Compare commits
4 Commits
561331b825
...
6ec2d299fc
| Author | SHA1 | Date | |
|---|---|---|---|
| 6ec2d299fc | |||
| 3cfc5f2bfd | |||
| 588a6482e9 | |||
| b4bde14809 |
@@ -1,25 +1,16 @@
|
||||
gitea_version: 1.24.3
|
||||
|
||||
gitea_partition: |-
|
||||
{{
|
||||
hard_disk__partitions | dict2items | selectattr(
|
||||
'value', 'contains', 'gitea'
|
||||
) | map(attribute='key') | first
|
||||
}}
|
||||
|
||||
gitea_database:
|
||||
db_name: gitea
|
||||
db_user: gitea
|
||||
db_password: gitea
|
||||
|
||||
gitea:
|
||||
partition: "{{ gitea_partition }}"
|
||||
database:
|
||||
dockercompose:
|
||||
name: arcodange_factory
|
||||
networks:
|
||||
gitea:
|
||||
name: arcodange_factory_gitea
|
||||
postgres:
|
||||
name: arcodange_factory_postgres
|
||||
external: true
|
||||
services:
|
||||
gitea:
|
||||
@@ -45,12 +36,18 @@ gitea:
|
||||
GITEA__server__SSH_PORT: 2222
|
||||
GITEA__server__SSH_DOMAIN: "{{ lookup('dig', groups.gitea[0]) }}"
|
||||
GITEA__server__SSH_LISTEN_PORT: 22
|
||||
GITEA_server__DOMAIN: localhost
|
||||
GITEA_server__HTTP_PORT: 3000
|
||||
GITEA_server__ROOT_URL: https://gitea.arcodange.duckdns.org/
|
||||
GITEA_server__START_SSH_SERVER: true
|
||||
GITEA_server__OFFLINE_MODE: true
|
||||
GITEA_service__DISABLE_REGISTRATION: true
|
||||
networks:
|
||||
- gitea
|
||||
- postgres
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "2222:22"
|
||||
volumes:
|
||||
- /arcodange/{{gitea_partition}}/gitea/data:/data
|
||||
- /home/pi/arcodange/docker_composes/gitea/data:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
@@ -0,0 +1,14 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
32306161663530376161333835626633326334356137363366643838346132613666356566383638
|
||||
3263386238353438376432313332393134333339306336640a366130313661316166383436346364
|
||||
39613364326533376530623636636334316532343330376366333338626130333533343937623165
|
||||
6538333530376132350a383934336566623866366338323034613965623237653564366435666464
|
||||
33303064356161316564396439383533333139653632393332663336356430383866643337363766
|
||||
30646663663737336162383663383664633030653039313565626164313134326433653965306262
|
||||
39393361643639333166623631316465316564393639643764336133306663346261303137376333
|
||||
30333930613062383465613139646562383836633431366637616166666131366232623065396238
|
||||
35313334633064313234383537663632356466326133333238636335383666323839393930633565
|
||||
36663130343035383731303332396436333333353863376461376131393834666232336138323666
|
||||
38346566616137323830346433303535343030623364353364653731353233373337363633626638
|
||||
32613661633962643030333662386333323035656265316537633961373537373961303134353936
|
||||
33633632633835666364663964383661383830336631333531623131633763333733
|
||||
@@ -1,21 +0,0 @@
|
||||
# to add/mount a partitiion, use the gparted utility to create it beforehand witht the matching name/label
|
||||
hard_disk__partitions:
|
||||
nfs: []
|
||||
gitea_data:
|
||||
- gitea
|
||||
pg_data:
|
||||
- postgres
|
||||
|
||||
hard_disk__applications:
|
||||
postgres: "{{ postgres }}"
|
||||
gitea: "{{ gitea }}"
|
||||
|
||||
hard_disk__postgres_databases:
|
||||
gitea: "{{ gitea_database }}"
|
||||
webapp:
|
||||
db_name: webapp
|
||||
|
||||
hard_disk__nfs:
|
||||
server_ip: "{{ ansible_host }}"
|
||||
ks_namespace: kube-system
|
||||
export_directory: /arcodange/nfs
|
||||
@@ -1,13 +0,0 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
66376231363631663639623736353861383337333863623761303438643831653061373338306366
|
||||
3762316261326433316166393132663034373636313935660a353962653931643131306134663264
|
||||
64636264393338366363333932366163393036326362353630656132326534663239306639336531
|
||||
3239373433386332640a653262633333653037646236366362333838356534623935613534376465
|
||||
66633335636235323035656332356566343738363661363066653239653037643539323533643534
|
||||
38376465663637646637326436306631663135333361666635303936643562356365616164636565
|
||||
39313231623630386332363262376364383935353534663465333362356631383334396366643463
|
||||
65616130613936343035643736623137313665373462353531326365396638633165326139343233
|
||||
31313933313161343265373865643638616134303834396563623366633136616333613433323035
|
||||
32643336343438646361616364336466366165363464323466363034373531323839363863396236
|
||||
34343731386364613739666461633564646135306231366135396562383565383562396639316164
|
||||
33626266643765653765
|
||||
@@ -1,15 +1,8 @@
|
||||
postgres_partition: |-
|
||||
{{
|
||||
hard_disk__partitions | dict2items | selectattr(
|
||||
'value', 'contains', 'postgres'
|
||||
) | map(attribute='key') | first
|
||||
}}
|
||||
postgres:
|
||||
partition: "{{ postgres_partition }}"
|
||||
dockercompose:
|
||||
name: arcodange_factory
|
||||
networks:
|
||||
gitea:
|
||||
postgres:
|
||||
external: false
|
||||
services:
|
||||
postgres:
|
||||
@@ -21,11 +14,11 @@ postgres:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: postgres
|
||||
networks:
|
||||
- gitea
|
||||
- postgres
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- /arcodange/{{postgres_partition}}/postgres/data:/var/lib/postgresql/data
|
||||
- /home/pi/arcodange/docker_composes/postgres/data:/var/lib/postgresql/data
|
||||
|
||||
pgbouncer:
|
||||
auth_user: &pgbouncer_auth pgbouncer_auth
|
||||
@@ -31,17 +31,13 @@ local:
|
||||
pi2:
|
||||
pi3:
|
||||
|
||||
hard_disk:
|
||||
hosts:
|
||||
pi2: # 4To toshiba external hard drive (/dev/sda)
|
||||
|
||||
postgres:
|
||||
children:
|
||||
hard_disk:
|
||||
hosts:
|
||||
pi2:
|
||||
|
||||
gitea:
|
||||
children:
|
||||
hard_disk:
|
||||
postgres:
|
||||
|
||||
all:
|
||||
children:
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
---
|
||||
|
||||
- name: Prepare disks for longhorn
|
||||
ansible.builtin.import_playbook: ./prepare_disks.yml
|
||||
|
||||
- name: System Docker
|
||||
hosts: raspberries:&local
|
||||
gather_facts: yes
|
||||
@@ -46,6 +49,50 @@
|
||||
|
||||
#---
|
||||
|
||||
- name: Install iSCSI client for Longhorn on Raspberry Pi
|
||||
hosts: raspberries:&local
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Install open-iscsi
|
||||
ansible.builtin.apt:
|
||||
name: open-iscsi
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Enable and start iSCSI service
|
||||
ansible.builtin.service:
|
||||
name: iscsid
|
||||
state: started
|
||||
enabled: yes
|
||||
|
||||
- name: Installer cryptsetup
|
||||
ansible.builtin.apt:
|
||||
name: cryptsetup
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Charger le module noyau dm_crypt
|
||||
ansible.builtin.modprobe:
|
||||
name: dm_crypt
|
||||
state: present
|
||||
|
||||
- name: S'assurer que le module dm_crypt est chargé au démarrage
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/modules
|
||||
line: dm_crypt
|
||||
state: present
|
||||
|
||||
- name: Créer dossier longhorn
|
||||
ansible.builtin.file:
|
||||
path: /mnt/arcodange/longhorn
|
||||
state: directory
|
||||
owner: pi
|
||||
group: docker
|
||||
mode: '0774'
|
||||
ignore_errors: true
|
||||
|
||||
#---
|
||||
|
||||
- name: System K3S
|
||||
hosts: raspberries:&local
|
||||
tags: never
|
||||
@@ -72,11 +119,11 @@
|
||||
|
||||
- name: k3s
|
||||
tags: never
|
||||
# ansible.builtin.import_playbook: k3s.orchestration.site
|
||||
ansible.builtin.import_playbook: k3s.orchestration.upgrade
|
||||
ansible.builtin.import_playbook: k3s.orchestration.site
|
||||
# ansible.builtin.import_playbook: k3s.orchestration.upgrade
|
||||
# ansible.builtin.import_playbook: k3s.orchestration.reset
|
||||
vars:
|
||||
k3s_version: v1.32.2+k3s1
|
||||
k3s_version: v1.32.7+k3s1
|
||||
extra_server_args: "--docker --disable traefik"
|
||||
extra_agent_args: "--docker"
|
||||
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
|
||||
@@ -98,6 +145,42 @@
|
||||
path: ~/.kube/config
|
||||
regexp: 'server: https://127.0.0.1:6443'
|
||||
replace: 'server: https://{{ ansible_default_ipv4.address }}:6443'
|
||||
|
||||
# - name: setup hard disk
|
||||
# tags: never
|
||||
# ansible.builtin.import_playbook: ./setup/hard_disk_v2.yml
|
||||
# # vars:
|
||||
# # hard_disk__partitions:
|
||||
# # nfs: []
|
||||
|
||||
- name: setup longhorn for volumes https://docs.k3s.io/helm
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
dest: /var/lib/rancher/k3s/server/manifests/longhorn-install.yaml
|
||||
content: |-
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
annotations:
|
||||
helmcharts.cattle.io/managed-by: helm-controller
|
||||
finalizers:
|
||||
- wrangler.cattle.io/on-helm-chart-remove
|
||||
generation: 1
|
||||
name: longhorn-install
|
||||
namespace: kube-system
|
||||
spec:
|
||||
version: v1.9.1
|
||||
chart: longhorn
|
||||
repo: https://charts.longhorn.io
|
||||
failurePolicy: abort
|
||||
targetNamespace: longhorn-system
|
||||
createNamespace: true
|
||||
valuesContent: |-
|
||||
defaultSettings:
|
||||
defaultDataPath: /mnt/arcodange/longhorn
|
||||
vars:
|
||||
longhorn_helm_values: {} # https://github.com/longhorn/longhorn/blob/master/chart/values.yaml
|
||||
|
||||
- name: customize k3s traefik configuration https://docs.k3s.io/helm
|
||||
block:
|
||||
- name: Get my public IP
|
||||
@@ -173,6 +256,13 @@
|
||||
traefik_helm_values:
|
||||
deployment:
|
||||
kind: "Deployment"
|
||||
initContainers:
|
||||
- name: volume-permissions
|
||||
image: busybox:latest
|
||||
command: ["sh", "-c", "touch /data/acme.json; chmod -v 600 /data/acme.json"]
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
# default is https://github.com/traefik/traefik-helm-chart/blob/v25.0.0/traefik/values.yaml <- for v25 (`kubectl describe deployments.apps traefik -n kube-system | grep helm.sh/chart`)
|
||||
# current is https://github.com/traefik/traefik-helm-chart/blob/v30.1.0/traefik/values.yaml
|
||||
nodeSelector:
|
||||
@@ -206,6 +296,11 @@
|
||||
access:
|
||||
enabled: true
|
||||
# format: json
|
||||
podSecurityContext:
|
||||
runAsGroup: 65532
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
fsGroup: 65532 # else the persistent volume might be owned by root and be unwriteable
|
||||
persistence:
|
||||
# -- Enable persistence using Persistent Volume Claims
|
||||
# ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
|
||||
@@ -215,7 +310,7 @@
|
||||
# existingClaim: ""
|
||||
accessMode: ReadWriteOnce
|
||||
size: 128Mi
|
||||
storageClass: "nfs-client"
|
||||
storageClass: "longhorn"
|
||||
# volumeName: ""
|
||||
path: /data
|
||||
annotations: {}
|
||||
@@ -235,6 +330,12 @@
|
||||
dnsChallenge:
|
||||
# requires env variable DUCKDNS_TOKEN
|
||||
provider: duckdns
|
||||
propagation:
|
||||
delayBeforeChecks: 120
|
||||
disableChecks: true
|
||||
resolvers:
|
||||
- "1.1.1.1:53"
|
||||
- "8.8.8.8:53"
|
||||
httpChallenge:
|
||||
entryPoint: "web"
|
||||
# It has to match the path with a persistent volume
|
||||
@@ -252,39 +353,6 @@
|
||||
|
||||
|
||||
# ---
|
||||
|
||||
- name: setup hard disk
|
||||
tags: never
|
||||
ansible.builtin.import_playbook: ./setup/hard_disk.yml
|
||||
vars:
|
||||
hard_disk__partitions:
|
||||
nfs: []
|
||||
|
||||
- name: Deploy NFS Subdir External Provisioner and alter default traefik deployment
|
||||
tags: never
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: Deploy NFS Subdir External Provisioner
|
||||
block:
|
||||
- name: Add Helm repository for NFS Subdir External Provisioner
|
||||
kubernetes.core.helm_repository:
|
||||
name: nfs-subdir-external-provisioner
|
||||
repo_url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
|
||||
force_update: yes
|
||||
|
||||
- name: Install NFS Subdir External Provisioner using Helm
|
||||
# debug:
|
||||
# var: hard_disk__nfs
|
||||
kubernetes.core.helm:
|
||||
name: nfs-subdir-external-provisioner
|
||||
chart_ref: nfs-subdir-external-provisioner/nfs-subdir-external-provisioner
|
||||
release_namespace: "{{ hard_disk__nfs.ks_namespace }}"
|
||||
values:
|
||||
nfs:
|
||||
server: "{{ hard_disk__nfs.server_ip }}"
|
||||
path: "{{ hard_disk__nfs.export_directory }}"
|
||||
vars:
|
||||
hard_disk__nfs: "{{ hostvars[groups.hard_disk[0]].hard_disk__nfs }}"
|
||||
|
||||
- name: redeploy traefik
|
||||
hosts: localhost
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
- name: Fetch Gitea Token for Action Runner registration
|
||||
delegate_to: "{{ groups.gitea[0] }}"
|
||||
delegate_facts: true
|
||||
delegate_facts: false
|
||||
ansible.builtin.command:
|
||||
docker exec gitea su git -c "gitea actions generate-runner-token"
|
||||
register: gitea_runner_token_cmd
|
||||
@@ -30,7 +30,7 @@
|
||||
GITEA_INSTANCE_URL: >-
|
||||
http://{{ hostvars[groups.gitea[0]].ansible_host }}:3000
|
||||
GITEA_RUNNER_REGISTRATION_TOKEN: "{{ gitea_runner_token_cmd.stdout }}"
|
||||
GITEA_RUNNER_NAME: arcodange_global_runner
|
||||
GITEA_RUNNER_NAME: arcodange_global_runner_{{ inventory_hostname }}
|
||||
# GITEA_RUNNER_LABELS: host={{ansible_host}},env=any
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
@@ -54,7 +54,7 @@
|
||||
# Where to store the registration result.
|
||||
file: .runner
|
||||
# Execute how many tasks concurrently at the same time.
|
||||
capacity: 1
|
||||
capacity: 2
|
||||
# Extra environment variables to run jobs.
|
||||
envs:
|
||||
A_TEST_ENV_NAME_1: a_test_env_value_1
|
||||
|
||||
123
ansible/arcodange/factory/playbooks/prepare_disks.yml
Normal file
123
ansible/arcodange/factory/playbooks/prepare_disks.yml
Normal file
@@ -0,0 +1,123 @@
|
||||
---
|
||||
- name: Préparer automatiquement le disque externe en ext4 avec label
|
||||
hosts: raspberries:&local
|
||||
become: yes
|
||||
vars:
|
||||
mount_point: /mnt/arcodange
|
||||
disk_label: arcodange_500
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Lister toutes les partitions avec labels
|
||||
command: "lsblk -o NAME,LABEL,SIZE,TYPE,MOUNTPOINT -J -b"
|
||||
register: lsblk_info
|
||||
changed_when: false
|
||||
|
||||
- name: Extraire toutes les partitions
|
||||
set_fact:
|
||||
all_partitions: >-
|
||||
{{
|
||||
(lsblk_info.stdout | from_json).blockdevices
|
||||
| map(attribute='children', default=[]) | flatten
|
||||
| selectattr('type', 'equalto', 'part')
|
||||
| list
|
||||
}}
|
||||
|
||||
- name: Rechercher si le label existe déjà
|
||||
set_fact:
|
||||
labeled_partition: >-
|
||||
{{
|
||||
all_partitions
|
||||
| selectattr('label', 'equalto', disk_label)
|
||||
| list | first | default(None)
|
||||
}}
|
||||
|
||||
- name: Choisir une partition candidate (hors disque système mmcblk0)
|
||||
set_fact:
|
||||
target_partition: "{{ (all_partitions | rejectattr('name', 'search', '^mmcblk0')) | sort(attribute='size') | last }}"
|
||||
when: labeled_partition == None
|
||||
|
||||
- name: Définir target_device selon label existant ou partition candidate
|
||||
set_fact:
|
||||
target_device: >-
|
||||
{{
|
||||
'/dev/' + (
|
||||
(labeled_partition.name | default(''))
|
||||
if labeled_partition != None
|
||||
else target_partition.name
|
||||
)
|
||||
}}
|
||||
|
||||
- name: Vérifier si la partition est déjà montée au bon point
|
||||
set_fact:
|
||||
partition_mounted_correctly: >-
|
||||
{{
|
||||
(labeled_partition != None and labeled_partition.mountpoint == mount_point)
|
||||
or (target_partition != None and target_partition.mountpoint == mount_point)
|
||||
}}
|
||||
|
||||
- debug:
|
||||
var: partition_mounted_correctly
|
||||
|
||||
- name: Demander confirmation avant formatage si label inexistant
|
||||
run_once: true
|
||||
when:
|
||||
- labeled_partition == None
|
||||
- not partition_mounted_correctly
|
||||
pause:
|
||||
prompt: |
|
||||
ATTENTION : la partition {{ target_device }} sera FORMATÉE en ext4
|
||||
et recevra le label {{ disk_label }}.
|
||||
Tapez 'oui' pour continuer :
|
||||
register: user_confirm
|
||||
|
||||
- name: Annuler si l'utilisateur n'a pas confirmé
|
||||
fail:
|
||||
msg: "Formatage annulé."
|
||||
when:
|
||||
- labeled_partition == None
|
||||
- not partition_mounted_correctly
|
||||
- user_confirm.user_input | lower != 'oui'
|
||||
|
||||
- name: Démonter la partition si montée ailleurs
|
||||
mount:
|
||||
path: "{{ (labeled_partition.mountpoint if labeled_partition != None else target_partition.mountpoint) }}"
|
||||
state: unmounted
|
||||
when:
|
||||
- not partition_mounted_correctly
|
||||
- (labeled_partition != None and labeled_partition.mountpoint not in [mount_point, '', None])
|
||||
or (target_partition != None and target_partition.mountpoint not in [mount_point, '', None])
|
||||
|
||||
- name: Formater avec label si nécessaire
|
||||
filesystem:
|
||||
fstype: ext4
|
||||
dev: "{{ target_device }}"
|
||||
force: true
|
||||
opts: "-L {{ disk_label }}"
|
||||
when:
|
||||
- not partition_mounted_correctly
|
||||
- labeled_partition == None
|
||||
|
||||
- name: Créer point de montage si absent
|
||||
file:
|
||||
path: "{{ mount_point }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
- name: Monter le disque par label (idempotent)
|
||||
mount:
|
||||
path: "{{ mount_point }}"
|
||||
src: "LABEL={{ disk_label }}"
|
||||
fstype: ext4
|
||||
opts: defaults,nofail
|
||||
state: mounted
|
||||
|
||||
- name: Assurer persistance dans fstab
|
||||
mount:
|
||||
path: "{{ mount_point }}"
|
||||
src: "LABEL={{ disk_label }}"
|
||||
fstype: ext4
|
||||
opts: defaults,nofail
|
||||
state: present
|
||||
@@ -1,5 +1,18 @@
|
||||
# Setup factory services
|
||||
|
||||
## Changements à venir (V2)
|
||||
|
||||
Le NFS était un Single Point of Failure car en cas de panne de disque dur tout est corrumpu et inexploitable. Le disque de 4Tb ne sera donc plus utilisé.
|
||||
|
||||
À la place l'outil de réplication LongHorn sera utilisé via K3S sur des disques durs de 500Gb. Le storage class NFS ne sera plus disponible au profit de LongHorn.
|
||||
|
||||
Pour ne plus subir la corruption de la base de données postgres qui s'execute à l'exterieur de k3s la solution suivante est mise en place:
|
||||
Un volume permanent avec accès ReadWriteMany et la classe de stockage LongHorn donne accès à un volume NFS administré par LongHorn. Ce volume servira de dossier où déposer les sauvegardes de postgres ou gitea périodiquement (1 fois par jour).
|
||||
|
||||
De plus, [longhorn exportera les données sur un storage externe](https://longhorn.io/docs/1.9.1/snapshots-and-backups/backup-and-restore/set-backup-target/#set-up-gcp-cloud-storage-backupstore).
|
||||
|
||||
## V1
|
||||
|
||||
```mermaid
|
||||
%%{init: { 'logLevel': 'debug', 'theme': 'base', 'rough':true } }%%
|
||||
flowchart
|
||||
|
||||
168
ansible/arcodange/factory/playbooks/setup/backup_nfs.yml
Normal file
168
ansible/arcodange/factory/playbooks/setup/backup_nfs.yml
Normal file
@@ -0,0 +1,168 @@
|
||||
---
|
||||
- name: Créer volume RWX Longhorn pour sauvegardes
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: no
|
||||
vars:
|
||||
namespace_longhorn: longhorn-system
|
||||
backup_volume_name: backups-rwx
|
||||
backup_size: 50Gi
|
||||
access_mode: ReadWriteMany
|
||||
storage_class: longhorn
|
||||
|
||||
tasks:
|
||||
- name: Créer PVC RWX dans longhorn-system
|
||||
tags: never
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: "{{ backup_volume_name }}"
|
||||
namespace: "{{ namespace_longhorn }}"
|
||||
spec:
|
||||
accessModes:
|
||||
- "{{ access_mode }}"
|
||||
resources:
|
||||
requests:
|
||||
storage: "{{ backup_size }}"
|
||||
storageClassName: "{{ storage_class }}"
|
||||
|
||||
- name: Récupérer infos du PVC
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: v1
|
||||
kind: PersistentVolumeClaim
|
||||
namespace: "{{ namespace_longhorn }}"
|
||||
name: "{{ backup_volume_name }}"
|
||||
register: pvc_info
|
||||
|
||||
- name: Extraire le nom du volume
|
||||
set_fact:
|
||||
pvc_internal_name: "{{ pvc_info.resources[0].spec.volumeName }}"
|
||||
|
||||
- name: Lancer un pod temporaire pour déclencher NFS
|
||||
tags: never
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: rwx-nfs
|
||||
namespace: "{{ namespace_longhorn }}"
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox
|
||||
command: ["sleep", "infinity"]
|
||||
# command: ["sh", "-c", "sleep 600"]
|
||||
volumeMounts:
|
||||
- mountPath: "/mnt/backups"
|
||||
name: backup-vol
|
||||
volumes:
|
||||
- name: backup-vol
|
||||
persistentVolumeClaim:
|
||||
claimName: "{{ backup_volume_name }}"
|
||||
|
||||
- name: Attendre que le pod rwx-nfs soit Running
|
||||
tags: never
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: v1
|
||||
kind: Pod
|
||||
namespace: "{{ namespace_longhorn }}"
|
||||
name: rwx-nfs
|
||||
register: pod_info
|
||||
until: pod_info.resources[0].status.phase == "Running"
|
||||
retries: 30
|
||||
delay: 5
|
||||
|
||||
- name: Vérifier si un Service NFS existe déjà (par labels)
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: v1
|
||||
kind: Service
|
||||
namespace: "{{ namespace_longhorn }}"
|
||||
label_selectors:
|
||||
- "longhorn.io/managed-by=longhorn-manager"
|
||||
- "longhorn.io/share-manager={{ pvc_internal_name }}"
|
||||
register: existing_nfs_service
|
||||
|
||||
- name: Définir le nom du Service existant si trouvé
|
||||
set_fact:
|
||||
nfs_service_name: "{{ existing_nfs_service.resources[0].metadata.name }}"
|
||||
when: existing_nfs_service.resources | length > 0
|
||||
|
||||
- name: Créer un Service NFS stable si aucun trouvé
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: "nfs-{{ backup_volume_name }}"
|
||||
namespace: "{{ namespace_longhorn }}"
|
||||
spec:
|
||||
selector:
|
||||
longhorn.io/share-manager: "{{ pvc_internal_name }}"
|
||||
ports:
|
||||
- name: nfs
|
||||
protocol: TCP
|
||||
port: 2049
|
||||
targetPort: 2049
|
||||
type: ClusterIP
|
||||
when: existing_nfs_service.resources | length == 0
|
||||
|
||||
- name: Définir le nom du Service NFS créé si nécessaire
|
||||
set_fact:
|
||||
nfs_service_name: "nfs-{{ backup_volume_name }}"
|
||||
when: existing_nfs_service.resources | length == 0
|
||||
|
||||
- name: Récupérer infos du Service NFS choisi
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: v1
|
||||
kind: Service
|
||||
name: "{{ nfs_service_name }}"
|
||||
namespace: "{{ namespace_longhorn }}"
|
||||
register: nfs_service
|
||||
|
||||
- name: Sauvegarder infos NFS
|
||||
set_fact:
|
||||
nfs_info:
|
||||
ip: "{{ nfs_service.resources[0].spec.clusterIP }}"
|
||||
path: "/{{ pvc_internal_name }}/"
|
||||
|
||||
- name: Monter volume RWX Longhorn sur Raspberry
|
||||
hosts: raspberries:&local
|
||||
become: yes
|
||||
vars:
|
||||
backup_mount: "/mnt/backups"
|
||||
tasks:
|
||||
- name: Installer client NFS
|
||||
apt:
|
||||
name: nfs-common
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Créer point de montage
|
||||
file:
|
||||
path: "{{ backup_mount }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Monter volume de backup
|
||||
mount:
|
||||
path: "{{ backup_mount }}"
|
||||
src: "{{ hostvars['localhost'].nfs_info.ip }}:{{ hostvars['localhost'].nfs_info.path }}"
|
||||
fstype: nfs
|
||||
opts: vers=4.1,rw,nofail,_netdev,x-systemd.automount
|
||||
state: mounted
|
||||
|
||||
- name: Ajouter entrée dans fstab pour montage automatique
|
||||
mount:
|
||||
path: "{{ backup_mount }}"
|
||||
src: "{{ hostvars['localhost'].nfs_info.ip }}:{{ hostvars['localhost'].nfs_info.path }}"
|
||||
fstype: nfs
|
||||
opts: vers=4.1,rw,nofail,_netdev,x-systemd.automount
|
||||
state: present
|
||||
|
||||
|
||||
@@ -1,44 +1,36 @@
|
||||
---
|
||||
- name: Setup Gitea
|
||||
hosts: gitea:&hard_disk
|
||||
hosts: gitea
|
||||
gather_facts: yes
|
||||
become: false
|
||||
run_once: true
|
||||
|
||||
vars:
|
||||
applications: "{{ hard_disk__applications }}"
|
||||
app: "{{ gitea }}"
|
||||
app_name: gitea
|
||||
|
||||
tasks:
|
||||
- name: Deploy gitea Docker Compose configuration
|
||||
include_role:
|
||||
name: arcodange.factory.deploy_docker_compose
|
||||
vars:
|
||||
app_name: "{{ app.name }}"
|
||||
dockercompose_content: "{{ app.conf.dockercompose }}"
|
||||
partition: "{{ app.conf.partition }}"
|
||||
app_owner: "{{ app.conf.owner | default('pi') }}"
|
||||
app_group: "{{ app.conf.group | default('docker') }}"
|
||||
loop: "{{ applications | dict2items(key_name='name', value_name='conf') }}"
|
||||
loop_control:
|
||||
loop_var: app
|
||||
label: "{{ app.name }}"
|
||||
when: app.name == 'gitea'
|
||||
dockercompose_content: "{{ app.dockercompose }}"
|
||||
app_owner: "{{ app.owner | default('pi') }}"
|
||||
app_group: "{{ app.group | default('docker') }}"
|
||||
|
||||
- name: Deploy Gitea
|
||||
include_role:
|
||||
name: deploy_gitea
|
||||
vars:
|
||||
app_name: gitea
|
||||
partition: "{{ applications.gitea.partition }}"
|
||||
gitea_container_name: "{{ applications.gitea.dockercompose.services.gitea.container_name }}"
|
||||
gitea_container_name: "{{ gitea.dockercompose.services.gitea.container_name }}"
|
||||
postgres_host: |-
|
||||
{{ applications.gitea.dockercompose.services.gitea.environment.GITEA__database__HOST }}
|
||||
{{ gitea.dockercompose.services.gitea.environment.GITEA__database__HOST }}
|
||||
postgres_db: |-
|
||||
{{ applications.gitea.dockercompose.services.gitea.environment.GITEA__database__NAME }}
|
||||
{{ gitea.dockercompose.services.gitea.environment.GITEA__database__NAME }}
|
||||
postgres_user: |-
|
||||
{{ applications.gitea.dockercompose.services.gitea.environment.GITEA__database__USER }}
|
||||
{{ gitea.dockercompose.services.gitea.environment.GITEA__database__USER }}
|
||||
postgres_password: |-
|
||||
{{ applications.gitea.dockercompose.services.gitea.environment.GITEA__database__PASSWD }}
|
||||
{{ gitea.dockercompose.services.gitea.environment.GITEA__database__PASSWD }}
|
||||
|
||||
- name: Create admin user
|
||||
block:
|
||||
@@ -54,7 +46,7 @@
|
||||
- name: List admin users
|
||||
ansible.builtin.shell:
|
||||
cmd: >-
|
||||
docker exec -u git {{ applications.gitea.dockercompose.services.gitea.container_name }}
|
||||
docker exec -u git {{ gitea.dockercompose.services.gitea.container_name }}
|
||||
gitea admin user list --admin
|
||||
| awk '{print $2}'
|
||||
| tail -n +2
|
||||
@@ -65,7 +57,7 @@
|
||||
- name: Create admin user
|
||||
when: gitea_user.name not in gitea_admin_users_list_cmd.stdout.split()
|
||||
ansible.builtin.command: >-
|
||||
docker exec -u git {{ applications.gitea.dockercompose.services.gitea.container_name }}
|
||||
docker exec -u git {{ gitea.dockercompose.services.gitea.container_name }}
|
||||
gitea admin user create
|
||||
--username {{ gitea_user.name }}
|
||||
--email {{ gitea_user.email }}
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
# awesome commands:
|
||||
# sudo fdisk -l
|
||||
# sudo parted -l
|
||||
# sudo gparted -- partitionnement graphique <-- utilisé pour créer la partition et donner le nom+label 'gitea_data'
|
||||
# sudo testdisk -- recuperation
|
||||
# sudo blkid -- uuid des partitions pour configurer gstab (mount auto)
|
||||
# lsblk -fe7 -- uuid des partitions
|
||||
---
|
||||
- name: Setup Hard Disk
|
||||
hosts: hard_disk
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
|
||||
vars:
|
||||
mount_points: |
|
||||
{{
|
||||
(
|
||||
hard_disk__partitions
|
||||
| default( {
|
||||
'gitea_data':[],
|
||||
'pg_data':[]
|
||||
} )
|
||||
).keys() | list
|
||||
}}
|
||||
verify_partitions: true # Change this to false if you don't want to verify partitions
|
||||
|
||||
tasks:
|
||||
- name: Setup partitions
|
||||
include_role:
|
||||
name: setup_partition
|
||||
loop: "{{ mount_points }}"
|
||||
loop_control:
|
||||
loop_var: mount_point
|
||||
|
||||
- name: Setup NFS
|
||||
include_role:
|
||||
name: nfs_setup
|
||||
|
||||
- name: Set permissions for group docker on /arcodange
|
||||
ansible.builtin.file:
|
||||
path: "/arcodange/{{ subdir }}"
|
||||
state: directory
|
||||
owner: pi
|
||||
group: docker
|
||||
mode: u=rwX,g=rX,o=r
|
||||
loop: "{{ [''] + mount_points }}"
|
||||
loop_control:
|
||||
loop_var: subdir
|
||||
|
||||
- name: Set ACL for group docker on /arcodange
|
||||
ansible.posix.acl:
|
||||
path: "/arcodange/{{ subdir }}"
|
||||
entity: "docker"
|
||||
etype: "group"
|
||||
permissions: "rwx"
|
||||
state: present
|
||||
loop: "{{ [''] + mount_points }}"
|
||||
loop_control:
|
||||
loop_var: subdir
|
||||
|
||||
- name: Mount NFS
|
||||
hosts: raspberries:&local
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Setup NFS
|
||||
include_role:
|
||||
name: nfs_setup
|
||||
tasks_from: mount
|
||||
vars:
|
||||
nfs_setup_export_directory: "{{ hard_disk__nfs.export_directory | default(hostvars[groups.hard_disk[0]].hard_disk__nfs.export_directory) }}"
|
||||
nfs_setup_server_ip: "{{ hard_disk__nfs.server_ip | default(hostvars[groups.hard_disk[0]].ansible_host) }}"
|
||||
@@ -1,37 +1,29 @@
|
||||
---
|
||||
- name: Setup Postgres
|
||||
hosts: hard_disk
|
||||
hosts: postgres
|
||||
gather_facts: yes
|
||||
become: false
|
||||
|
||||
vars:
|
||||
applications: "{{ hard_disk__applications }}"
|
||||
applications_databases: "{{ hard_disk__postgres_databases }}"
|
||||
postgres_container_name: "{{ applications.postgres.dockercompose.services.postgres.container_name }}"
|
||||
app: "{{ postgres }}"
|
||||
app_name: postgres
|
||||
postgres_container_name: "{{ postgres.dockercompose.services.postgres.container_name }}"
|
||||
|
||||
tasks:
|
||||
- name: Deploy postgres Docker Compose configuration
|
||||
include_role:
|
||||
name: arcodange.factory.deploy_docker_compose
|
||||
vars:
|
||||
app_name: "{{ app.name }}"
|
||||
dockercompose_content: "{{ app.conf.dockercompose }}"
|
||||
partition: "{{ app.conf.partition }}"
|
||||
app_owner: "{{ app.conf.owner | default('pi') }}"
|
||||
app_group: "{{ app.conf.group | default('docker') }}"
|
||||
loop: "{{ applications | dict2items(key_name='name', value_name='conf') }}"
|
||||
loop_control:
|
||||
loop_var: app
|
||||
label: "{{ app.name }}"
|
||||
when: app.name == 'postgres'
|
||||
dockercompose_content: "{{ app.dockercompose }}"
|
||||
app_owner: "{{ app.owner | default('pi') }}"
|
||||
app_group: "{{ app.group | default('docker') }}"
|
||||
|
||||
- name: Deploy PostgreSQL
|
||||
include_role:
|
||||
name: deploy_postgresql
|
||||
vars:
|
||||
app_name: postgres
|
||||
partition: "{{ applications.postgres.partition }}"
|
||||
# applications_databases: "{{ applications_databases }}" # kept for documentation purposes
|
||||
applications_databases:
|
||||
gitea: "{{ gitea_database }}"
|
||||
|
||||
- name: Create auth_user for pgbouncer (connection pool component)
|
||||
ansible.builtin.shell: |
|
||||
@@ -62,4 +54,4 @@
|
||||
loop_control:
|
||||
loop_var: database__pg_instruction
|
||||
loop:
|
||||
"{{ applications_databases.values() | map(attribute='db_name') | product(pg_instructions) }}"
|
||||
"{{ ['postgres', 'gitea'] | product(pg_instructions) }}"
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
app_owner: pi
|
||||
app_group: docker
|
||||
app_name: gitea
|
||||
partition: gitea_data
|
||||
config_path: /arcodange/{{ partition }}/{{ app_name }}/config
|
||||
data_path: /arcodange/{{ partition }}/{{ app_name }}/data
|
||||
partition: docker_composes
|
||||
config_path: /home/pi/arcodange/{{ partition }}/{{ app_name }}/config
|
||||
data_path: /home/pi/arcodange/{{ partition }}/{{ app_name }}/data
|
||||
gitea_user:
|
||||
name: arcodange
|
||||
email: arcodange@gmail.com
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
- name: Deploy Gitea with Docker Compose
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "/arcodange/{{ partition }}/{{ app_name }}"
|
||||
project_src: "/home/pi/arcodange/{{ partition }}/{{ app_name }}"
|
||||
pull: missing
|
||||
state: present
|
||||
register: deploy_result
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
- name: Deploy PostgreSQL with Docker Compose
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "/arcodange/{{ partition }}/{{ app_name }}"
|
||||
project_src: "/home/pi/arcodange/docker_composes/{{ app_name }}"
|
||||
pull: missing
|
||||
state: present
|
||||
register: deploy_result
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
---
|
||||
nfs_setup_export_directory: /arcodange/nfs
|
||||
# nfs_setup_server_ip: "{{ hostvars['pi2'].ansible_default_ipv4.address }}"
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
- name: Install Avahi and related packages
|
||||
ansible.builtin.apt: # https://www.baeldung.com/linux/conflicting-values-error-resolution
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
update_cache: yes
|
||||
with_items:
|
||||
- avahi-daemon
|
||||
- avahi-utils
|
||||
|
||||
- name: Create Avahi service file for NFS
|
||||
template:
|
||||
src: nfs.service.j2
|
||||
dest: /etc/avahi/services/nfs.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Restart Avahi daemon
|
||||
service:
|
||||
name: avahi-daemon
|
||||
state: restarted
|
||||
enabled: yes
|
||||
@@ -1,39 +0,0 @@
|
||||
---
|
||||
- name: Install NFS server package
|
||||
ansible.builtin.apt: # https://www.baeldung.com/linux/conflicting-values-error-resolution
|
||||
name: nfs-kernel-server
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Create export directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ nfs_setup_export_directory }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
- name: Configure /etc/exports
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/exports
|
||||
line: "{{ nfs_setup_export_directory }} 192.168.1.0/24(rw,sync,no_subtree_check,anonuid=1000,anongid=1000)"
|
||||
create: yes
|
||||
state: present
|
||||
|
||||
- name: Ensure NFS service is running and enabled
|
||||
ansible.builtin.service:
|
||||
name: nfs-kernel-server
|
||||
state: started
|
||||
enabled: yes
|
||||
|
||||
- name: Export the shared directories
|
||||
ansible.builtin.command: exportfs -ra
|
||||
|
||||
- name: Verify NFS exports
|
||||
ansible.builtin.command: exportfs -v
|
||||
register: nfs_exports
|
||||
|
||||
- ansible.builtin.debug:
|
||||
msg: "NFS Exports: {{ nfs_exports.stdout }}"
|
||||
|
||||
- include_tasks: announce.yml
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
- name: Install NFS client package
|
||||
ansible.builtin.apt: # https://www.baeldung.com/linux/conflicting-values-error-resolution
|
||||
name: nfs-common
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Create local mount directory
|
||||
ansible.builtin.file:
|
||||
path: /mnt/nfs
|
||||
state: directory
|
||||
owner: pi
|
||||
group: docker
|
||||
mode: '0774'
|
||||
ignore_errors: true
|
||||
|
||||
- name: Mount NFS share
|
||||
mount:
|
||||
src: "{{ nfs_setup_server_ip }}:{{ nfs_setup_export_directory }}"
|
||||
path: /mnt/nfs
|
||||
fstype: nfs
|
||||
opts: rw,vers=4
|
||||
state: mounted
|
||||
ignore_errors: true
|
||||
@@ -1,9 +0,0 @@
|
||||
<?xml version="1.0" standalone='no'?>
|
||||
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
|
||||
<service-group>
|
||||
<name replace-wildcards="yes">%h NFS</name>
|
||||
<service>
|
||||
<type>_nfs._tcp</type>
|
||||
<port>2049</port>
|
||||
</service>
|
||||
</service-group>
|
||||
@@ -1,38 +0,0 @@
|
||||
Role Name
|
||||
=========
|
||||
|
||||
A brief description of the role goes here.
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
|
||||
|
||||
Role Variables
|
||||
--------------
|
||||
|
||||
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
|
||||
|
||||
Example Playbook
|
||||
----------------
|
||||
|
||||
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
|
||||
|
||||
- hosts: servers
|
||||
roles:
|
||||
- { role: username.rolename, x: 42 }
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
BSD
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
# defaults file for roles/setup_partition
|
||||
mount_points: []
|
||||
verify_partitions: false
|
||||
@@ -1,44 +0,0 @@
|
||||
---
|
||||
- name: Optionally verify partition existence
|
||||
ansible.builtin.set_fact:
|
||||
device: "/dev/sda"
|
||||
when: verify_partitions | default(false)
|
||||
|
||||
- name: Read device information
|
||||
community.general.parted:
|
||||
device: "{{ device }}"
|
||||
unit: GiB
|
||||
register: device_info
|
||||
when: verify_partitions | default(false)
|
||||
|
||||
- name: Select partition
|
||||
ansible.builtin.set_fact:
|
||||
disk: |-
|
||||
{{ device + (
|
||||
device_info | to_json | from_json
|
||||
| community.general.json_query(jmes_path) | string
|
||||
)
|
||||
}}
|
||||
vars:
|
||||
jmes_path: partitions[?name == '{{ mount_point }}'].num | [0]
|
||||
failed_when: disk.endswith('None')
|
||||
when: verify_partitions | default(false)
|
||||
|
||||
- name: Check if mount directory exists
|
||||
stat:
|
||||
path: "/arcodange/{{ mount_point }}"
|
||||
register: mount_dir_stat
|
||||
|
||||
- name: Create the mount directory
|
||||
ansible.builtin.file:
|
||||
path: "/arcodange/{{ mount_point }}"
|
||||
state: directory
|
||||
when: not mount_dir_stat.stat.exists
|
||||
|
||||
- name: Declare mount point
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/fstab
|
||||
line: "LABEL={{ mount_point }} /arcodange/{{ mount_point }} ext4 defaults 0 0"
|
||||
|
||||
- name: Use updated mount list
|
||||
ansible.builtin.command: mount -a
|
||||
@@ -5,8 +5,8 @@
|
||||
tasks:
|
||||
- ansible.builtin.ping:
|
||||
|
||||
- name: setup hard disk
|
||||
ansible.builtin.import_playbook: hard_disk.yml
|
||||
- name: prepare backups shared directory
|
||||
ansible.builtin.import_playbook: backup_nfs.yml
|
||||
tags: never
|
||||
|
||||
- name: setup factory postgres
|
||||
|
||||
@@ -37,6 +37,13 @@
|
||||
register: last_tofu_command
|
||||
loop:
|
||||
- tofu init -no-color
|
||||
# - >-
|
||||
# tofu destroy -auto-approve -no-color
|
||||
# -var='gitea_app={{ gitea_app | to_json }}'
|
||||
# -var='vault_address={{ vault_address }}'
|
||||
# -var='vault_token={{ vault_root_token }}'
|
||||
# -var='postgres_admin_credentials={{ postgres_admin_credentials | to_json }}'
|
||||
# -var='gitea_admin_token={{ gitea_admin_token }}'
|
||||
- >-
|
||||
tofu apply -auto-approve -no-color
|
||||
-var='gitea_app={{ gitea_app | to_json }}'
|
||||
|
||||
@@ -2,6 +2,7 @@ app_name: "{{ (dockercompose_content | from_yaml).name }}"
|
||||
app_owner: pi
|
||||
app_group: docker
|
||||
partition: docker_composes
|
||||
hard_disk_root_path: /arcodange
|
||||
no_hard_disk_root_path: /home/pi/arcodange
|
||||
root_path: "{{ ('hard_disk' in group_names) | ansible.builtin.ternary(hard_disk_root_path, no_hard_disk_root_path) }}"
|
||||
# hard_disk_root_path: /arcodange
|
||||
# no_hard_disk_root_path: /home/pi/arcodange
|
||||
# root_path: "{{ ('hard_disk' in group_names) | ansible.builtin.ternary(hard_disk_root_path, no_hard_disk_root_path) }}"
|
||||
root_path: /home/pi/arcodange
|
||||
@@ -4,5 +4,5 @@ gitea_token_scopes: write:admin,write:organization,write:package,write:repositor
|
||||
gitea_token_fact_name: gitea_api_token
|
||||
gitea_base_url: 'http://{{ groups.gitea[0] }}:3000'
|
||||
gitea_token_replace: false
|
||||
gitea_token_name: ansible-{{ ansible_date_time.iso8601 }} # require gathering facts
|
||||
gitea_token_name: ansible-{{ ansible_date_time.iso8601 }}-{{ inventory_hostname }} # require gathering facts
|
||||
gitea_token_delete: false # only delete token
|
||||
5
iac/README.md
Normal file
5
iac/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
#
|
||||
|
||||
Provisionne un utilisateur gitea "tofu_module_reader",
|
||||
autorisé à lire certains projets il est utilisé par la CI pour récupérer des blueprints terraform
|
||||
via sa clé ssh répertoriée dans vault.
|
||||
@@ -9,7 +9,7 @@ resource "gitea_user" "tofu" {
|
||||
email = "tofu-module-reader@arcodange.fake"
|
||||
must_change_password = false
|
||||
full_name = "restricted CI user"
|
||||
prohibit_login = true
|
||||
prohibit_login = false
|
||||
restricted = true
|
||||
visibility = "private"
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
resource "random_password" "credentials_editor" {
|
||||
length = 24
|
||||
override_special = "-:!+<>$"
|
||||
override_special = "-:!+<>"
|
||||
}
|
||||
|
||||
resource "postgresql_role" "credentials_editor" {
|
||||
|
||||
Reference in New Issue
Block a user