refactor storage and setup shared backup directory

This commit is contained in:
2025-08-27 17:26:05 +02:00
parent 588a6482e9
commit 3cfc5f2bfd
20 changed files with 223 additions and 389 deletions

View File

@@ -1,25 +1,16 @@
gitea_version: 1.24.3
gitea_partition: |-
{{
hard_disk__partitions | dict2items | selectattr(
'value', 'contains', 'gitea'
) | map(attribute='key') | first
}}
gitea_database:
db_name: gitea
db_user: gitea
db_password: gitea
gitea:
partition: "{{ gitea_partition }}"
database:
dockercompose:
name: arcodange_factory
networks:
gitea:
name: arcodange_factory_gitea
postgres:
name: arcodange_factory_postgres
external: true
services:
gitea:
@@ -45,12 +36,18 @@ gitea:
GITEA__server__SSH_PORT: 2222
GITEA__server__SSH_DOMAIN: "{{ lookup('dig', groups.gitea[0]) }}"
GITEA__server__SSH_LISTEN_PORT: 22
GITEA_server__DOMAIN: localhost
GITEA_server__HTTP_PORT: 3000
GITEA_server__ROOT_URL: https://gitea.arcodange.duckdns.org/
GITEA_server__START_SSH_SERVER: true
GITEA_server__OFFLINE_MODE: true
GITEA_service__DISABLE_REGISTRATION: true
networks:
- gitea
- postgres
ports:
- "3000:3000"
- "2222:22"
volumes:
- /arcodange/{{gitea_partition}}/gitea/data:/data
- /home/pi/arcodange/docker_composes/gitea/data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro

View File

@@ -1,21 +0,0 @@
# to add/mount a partitiion, use the gparted utility to create it beforehand witht the matching name/label
hard_disk__partitions:
nfs: []
gitea_data:
- gitea
pg_data:
- postgres
hard_disk__applications:
postgres: "{{ postgres }}"
gitea: "{{ gitea }}"
hard_disk__postgres_databases:
gitea: "{{ gitea_database }}"
webapp:
db_name: webapp
hard_disk__nfs:
server_ip: "{{ ansible_host }}"
ks_namespace: kube-system
export_directory: /arcodange/nfs

View File

@@ -1,15 +1,8 @@
postgres_partition: |-
{{
hard_disk__partitions | dict2items | selectattr(
'value', 'contains', 'postgres'
) | map(attribute='key') | first
}}
postgres:
partition: "{{ postgres_partition }}"
dockercompose:
name: arcodange_factory
networks:
gitea:
postgres:
external: false
services:
postgres:
@@ -21,11 +14,11 @@ postgres:
POSTGRES_PASSWORD: postgres
POSTGRES_DB: postgres
networks:
- gitea
- postgres
ports:
- "5432:5432"
volumes:
- /arcodange/{{postgres_partition}}/postgres/data:/var/lib/postgresql/data
- /home/pi/arcodange/docker_composes/postgres/data:/var/lib/postgresql/data
pgbouncer:
auth_user: &pgbouncer_auth pgbouncer_auth

View File

@@ -31,17 +31,13 @@ local:
pi2:
pi3:
hard_disk:
hosts:
pi2: # 4To toshiba external hard drive (/dev/sda)
postgres:
children:
hard_disk:
hosts:
pi2:
gitea:
children:
hard_disk:
postgres:
all:
children:

View File

@@ -1,5 +1,8 @@
---
- name: Prepare disks for longhorn
ansible.builtin.import_playbook: ./prepare_disks.yml
- name: System Docker
hosts: raspberries:&local
gather_facts: yes

View File

@@ -0,0 +1,123 @@
---
- name: Préparer automatiquement le disque externe en ext4 avec label
hosts: raspberries:&local
become: yes
vars:
mount_point: /mnt/arcodange
disk_label: arcodange_500
tasks:
- name: Lister toutes les partitions avec labels
command: "lsblk -o NAME,LABEL,SIZE,TYPE,MOUNTPOINT -J -b"
register: lsblk_info
changed_when: false
- name: Extraire toutes les partitions
set_fact:
all_partitions: >-
{{
(lsblk_info.stdout | from_json).blockdevices
| map(attribute='children', default=[]) | flatten
| selectattr('type', 'equalto', 'part')
| list
}}
- name: Rechercher si le label existe déjà
set_fact:
labeled_partition: >-
{{
all_partitions
| selectattr('label', 'equalto', disk_label)
| list | first | default(None)
}}
- name: Choisir une partition candidate (hors disque système mmcblk0)
set_fact:
target_partition: "{{ (all_partitions | rejectattr('name', 'search', '^mmcblk0')) | sort(attribute='size') | last }}"
when: labeled_partition == None
- name: Définir target_device selon label existant ou partition candidate
set_fact:
target_device: >-
{{
'/dev/' + (
(labeled_partition.name | default(''))
if labeled_partition != None
else target_partition.name
)
}}
- name: Vérifier si la partition est déjà montée au bon point
set_fact:
partition_mounted_correctly: >-
{{
(labeled_partition != None and labeled_partition.mountpoint == mount_point)
or (target_partition != None and target_partition.mountpoint == mount_point)
}}
- debug:
var: partition_mounted_correctly
- name: Demander confirmation avant formatage si label inexistant
run_once: true
when:
- labeled_partition == None
- not partition_mounted_correctly
pause:
prompt: |
ATTENTION : la partition {{ target_device }} sera FORMATÉE en ext4
et recevra le label {{ disk_label }}.
Tapez 'oui' pour continuer :
register: user_confirm
- name: Annuler si l'utilisateur n'a pas confirmé
fail:
msg: "Formatage annulé."
when:
- labeled_partition == None
- not partition_mounted_correctly
- user_confirm.user_input | lower != 'oui'
- name: Démonter la partition si montée ailleurs
mount:
path: "{{ (labeled_partition.mountpoint if labeled_partition != None else target_partition.mountpoint) }}"
state: unmounted
when:
- not partition_mounted_correctly
- (labeled_partition != None and labeled_partition.mountpoint not in [mount_point, '', None])
or (target_partition != None and target_partition.mountpoint not in [mount_point, '', None])
- name: Formater avec label si nécessaire
filesystem:
fstype: ext4
dev: "{{ target_device }}"
force: true
opts: "-L {{ disk_label }}"
when:
- not partition_mounted_correctly
- labeled_partition == None
- name: Créer point de montage si absent
file:
path: "{{ mount_point }}"
state: directory
owner: root
group: root
mode: '0755'
- name: Monter le disque par label (idempotent)
mount:
path: "{{ mount_point }}"
src: "LABEL={{ disk_label }}"
fstype: ext4
opts: defaults,nofail
state: mounted
- name: Assurer persistance dans fstab
mount:
path: "{{ mount_point }}"
src: "LABEL={{ disk_label }}"
fstype: ext4
opts: defaults,nofail
state: present

View File

@@ -76,23 +76,59 @@
until: pod_info.resources[0].status.phase == "Running"
retries: 30
delay: 5
- name: Récupérer Endpoints du NFS Longhorn
- name: Vérifier si un Service NFS existe déjà (par labels)
kubernetes.core.k8s_info:
api_version: v1
kind: Endpoints
kind: Service
namespace: "{{ namespace_longhorn }}"
name: "{{ pvc_internal_name }}"
register: nfs_endpoint
- name: Extraire IP du pod NFS
label_selectors:
- "longhorn.io/managed-by=longhorn-manager"
- "longhorn.io/share-manager={{ pvc_internal_name }}"
register: existing_nfs_service
- name: Définir le nom du Service existant si trouvé
set_fact:
backup_nfs_ip: "{{ nfs_endpoint.resources[0].subsets[0].addresses[0].ip }}"
nfs_service_name: "{{ existing_nfs_service.resources[0].metadata.name }}"
when: existing_nfs_service.resources | length > 0
- name: Créer un Service NFS stable si aucun trouvé
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: "nfs-{{ backup_volume_name }}"
namespace: "{{ namespace_longhorn }}"
spec:
selector:
longhorn.io/share-manager: "{{ pvc_internal_name }}"
ports:
- name: nfs
protocol: TCP
port: 2049
targetPort: 2049
type: ClusterIP
when: existing_nfs_service.resources | length == 0
- name: Définir le nom du Service NFS créé si nécessaire
set_fact:
nfs_service_name: "nfs-{{ backup_volume_name }}"
when: existing_nfs_service.resources | length == 0
- name: Récupérer infos du Service NFS choisi
kubernetes.core.k8s_info:
api_version: v1
kind: Service
name: "{{ nfs_service_name }}"
namespace: "{{ namespace_longhorn }}"
register: nfs_service
- name: Sauvegarder infos NFS
set_fact:
nfs_info:
ip: "{{ backup_nfs_ip }}"
ip: "{{ nfs_service.resources[0].spec.clusterIP }}"
path: "/{{ pvc_internal_name }}/"
- name: Monter volume RWX Longhorn sur Raspberry
@@ -118,7 +154,7 @@
path: "{{ backup_mount }}"
src: "{{ hostvars['localhost'].nfs_info.ip }}:{{ hostvars['localhost'].nfs_info.path }}"
fstype: nfs
opts: vers=4.1,rw
opts: vers=4.1,rw,nofail,_netdev,x-systemd.automount
state: mounted
- name: Ajouter entrée dans fstab pour montage automatique
@@ -126,7 +162,7 @@
path: "{{ backup_mount }}"
src: "{{ hostvars['localhost'].nfs_info.ip }}:{{ hostvars['localhost'].nfs_info.path }}"
fstype: nfs
opts: rw
opts: vers=4.1,rw,nofail,_netdev,x-systemd.automount
state: present

View File

@@ -1,44 +1,36 @@
---
- name: Setup Gitea
hosts: gitea:&hard_disk
hosts: gitea
gather_facts: yes
become: false
run_once: true
vars:
applications: "{{ hard_disk__applications }}"
app: "{{ gitea }}"
app_name: gitea
tasks:
- name: Deploy gitea Docker Compose configuration
include_role:
name: arcodange.factory.deploy_docker_compose
vars:
app_name: "{{ app.name }}"
dockercompose_content: "{{ app.conf.dockercompose }}"
partition: "{{ app.conf.partition }}"
app_owner: "{{ app.conf.owner | default('pi') }}"
app_group: "{{ app.conf.group | default('docker') }}"
loop: "{{ applications | dict2items(key_name='name', value_name='conf') }}"
loop_control:
loop_var: app
label: "{{ app.name }}"
when: app.name == 'gitea'
dockercompose_content: "{{ app.dockercompose }}"
app_owner: "{{ app.owner | default('pi') }}"
app_group: "{{ app.group | default('docker') }}"
- name: Deploy Gitea
include_role:
name: deploy_gitea
vars:
app_name: gitea
partition: "{{ applications.gitea.partition }}"
gitea_container_name: "{{ applications.gitea.dockercompose.services.gitea.container_name }}"
gitea_container_name: "{{ gitea.dockercompose.services.gitea.container_name }}"
postgres_host: |-
{{ applications.gitea.dockercompose.services.gitea.environment.GITEA__database__HOST }}
{{ gitea.dockercompose.services.gitea.environment.GITEA__database__HOST }}
postgres_db: |-
{{ applications.gitea.dockercompose.services.gitea.environment.GITEA__database__NAME }}
{{ gitea.dockercompose.services.gitea.environment.GITEA__database__NAME }}
postgres_user: |-
{{ applications.gitea.dockercompose.services.gitea.environment.GITEA__database__USER }}
{{ gitea.dockercompose.services.gitea.environment.GITEA__database__USER }}
postgres_password: |-
{{ applications.gitea.dockercompose.services.gitea.environment.GITEA__database__PASSWD }}
{{ gitea.dockercompose.services.gitea.environment.GITEA__database__PASSWD }}
- name: Create admin user
block:
@@ -54,7 +46,7 @@
- name: List admin users
ansible.builtin.shell:
cmd: >-
docker exec -u git {{ applications.gitea.dockercompose.services.gitea.container_name }}
docker exec -u git {{ gitea.dockercompose.services.gitea.container_name }}
gitea admin user list --admin
| awk '{print $2}'
| tail -n +2
@@ -65,7 +57,7 @@
- name: Create admin user
when: gitea_user.name not in gitea_admin_users_list_cmd.stdout.split()
ansible.builtin.command: >-
docker exec -u git {{ applications.gitea.dockercompose.services.gitea.container_name }}
docker exec -u git {{ gitea.dockercompose.services.gitea.container_name }}
gitea admin user create
--username {{ gitea_user.name }}
--email {{ gitea_user.email }}

View File

@@ -1,72 +0,0 @@
# awesome commands:
# sudo fdisk -l
# sudo parted -l
# sudo gparted -- partitionnement graphique <-- utilisé pour créer la partition et donner le nom+label 'gitea_data'
# sudo testdisk -- recuperation
# sudo blkid -- uuid des partitions pour configurer gstab (mount auto)
# lsblk -fe7 -- uuid des partitions
---
- name: Setup Hard Disk
hosts: hard_disk
gather_facts: yes
become: yes
vars:
mount_points: |
{{
(
hard_disk__partitions
| default( {
'gitea_data':[],
'pg_data':[]
} )
).keys() | list
}}
verify_partitions: true # Change this to false if you don't want to verify partitions
tasks:
- name: Setup partitions
include_role:
name: setup_partition
loop: "{{ mount_points }}"
loop_control:
loop_var: mount_point
- name: Setup NFS
include_role:
name: nfs_setup
- name: Set permissions for group docker on /arcodange
ansible.builtin.file:
path: "/arcodange/{{ subdir }}"
state: directory
owner: pi
group: docker
mode: u=rwX,g=rX,o=r
loop: "{{ [''] + mount_points }}"
loop_control:
loop_var: subdir
- name: Set ACL for group docker on /arcodange
ansible.posix.acl:
path: "/arcodange/{{ subdir }}"
entity: "docker"
etype: "group"
permissions: "rwx"
state: present
loop: "{{ [''] + mount_points }}"
loop_control:
loop_var: subdir
- name: Mount NFS
hosts: raspberries:&local
become: yes
tasks:
- name: Setup NFS
include_role:
name: nfs_setup
tasks_from: mount
vars:
nfs_setup_export_directory: "{{ hard_disk__nfs.export_directory | default(hostvars[groups.hard_disk[0]].hard_disk__nfs.export_directory) }}"
nfs_setup_server_ip: "{{ hard_disk__nfs.server_ip | default(hostvars[groups.hard_disk[0]].ansible_host) }}"

View File

@@ -1,120 +0,0 @@
---
- name: Préparer automatiquement le disque externe en ext4
hosts: raspberries:&local
become: yes
vars:
mount_point: /mnt/arcodange
tasks:
- name: Lister toutes les partitions en bytes (lsblk -b)
command: lsblk -b -J -o NAME,SIZE,TYPE,MOUNTPOINT
register: lsblk_json
changed_when: false
- name: Extraire tous les children partitions
set_fact:
all_partitions: >-
{{
lsblk_json.stdout | from_json | json_query("blockdevices[].children")
| flatten
}}
- name: Filtrer les partitions hors disque système (quelque soit mountpoint)
set_fact:
candidate_partitions: >-
{{
all_partitions
| selectattr('name', 'search', '^((?!mmcblk0).)*$')
| list
}}
- name: Vérifier qu'on a au moins une partition candidate
fail:
msg: "Aucune partition externe trouvée."
when: candidate_partitions | length == 0
- name: Choisir la partition la plus grosse parmi les candidates
block:
- set_fact:
target_partition: "{{ (candidate_partitions | sort(attribute='size'))[-1] }}"
- set_fact:
target_device: "/dev/{{ target_partition.name }}"
- name: Vérifier si la partition est déjà montée sur le point de montage voulu
set_fact:
partition_mounted_correctly: "{{ target_partition.mountpoint == mount_point }}"
- debug:
var: partition_mounted_correctly
- name: Demander confirmation avant formatage et (dé)montage
run_once: true
pause:
prompt: |
Attention : la partition {{ target_device }} sera formatée et (dé)montée.
Tapez 'oui' pour continuer, ou autre chose pour annuler :
register: user_confirm
- name: Annuler si l'utilisateur n'a pas confirmé
fail:
msg: "Formatage annulé par l'utilisateur."
when: user_confirm.user_input | lower != 'oui'
- name: Démonter la partition si montée ailleurs
mount:
path: "{{ target_partition.mountpoint }}"
state: unmounted # utiliser absent si pas dans fstab mais pour un nouveau disque non configuré ce n'est normalement pas le cas
when:
- target_partition.mountpoint is defined
- target_partition.mountpoint != ''
- target_partition.mountpoint != mount_point
- name: Formater la partition en ext4 si non montée au bon point
filesystem:
fstype: ext4
dev: "{{ target_device }}"
force: true
when: not partition_mounted_correctly
- name: Démonter la partition si montée ailleurs
mount:
path: "{{ target_partition.mountpoint }}"
state: absent
when:
- target_partition.mountpoint is defined
- target_partition.mountpoint != ''
- target_partition.mountpoint != mount_point
- name: Formater la partition en ext4 si non montée au bon point
filesystem:
fstype: ext4
dev: "{{ target_device }}"
when: not partition_mounted_correctly
- name: Créer point de montage si absent
file:
path: "{{ mount_point }}"
state: directory
owner: root
group: root
mode: '0755'
- name: Monter la partition avec options idempotentes
mount:
path: "{{ mount_point }}"
src: "{{ target_device }}"
fstype: ext4
opts: defaults
state: mounted
when: not partition_mounted_correctly
- name: Ajouter entrée fstab si absente (assure persistante)
mount:
path: "{{ mount_point }}"
src: "{{ target_device }}"
fstype: ext4
opts: defaults
state: present
when: not partition_mounted_correctly

View File

@@ -1,37 +1,29 @@
---
- name: Setup Postgres
hosts: hard_disk
hosts: postgres
gather_facts: yes
become: false
vars:
applications: "{{ hard_disk__applications }}"
applications_databases: "{{ hard_disk__postgres_databases }}"
postgres_container_name: "{{ applications.postgres.dockercompose.services.postgres.container_name }}"
app: "{{ postgres }}"
app_name: postgres
postgres_container_name: "{{ postgres.dockercompose.services.postgres.container_name }}"
tasks:
- name: Deploy postgres Docker Compose configuration
include_role:
name: arcodange.factory.deploy_docker_compose
vars:
app_name: "{{ app.name }}"
dockercompose_content: "{{ app.conf.dockercompose }}"
partition: "{{ app.conf.partition }}"
app_owner: "{{ app.conf.owner | default('pi') }}"
app_group: "{{ app.conf.group | default('docker') }}"
loop: "{{ applications | dict2items(key_name='name', value_name='conf') }}"
loop_control:
loop_var: app
label: "{{ app.name }}"
when: app.name == 'postgres'
dockercompose_content: "{{ app.dockercompose }}"
app_owner: "{{ app.owner | default('pi') }}"
app_group: "{{ app.group | default('docker') }}"
- name: Deploy PostgreSQL
include_role:
name: deploy_postgresql
vars:
app_name: postgres
partition: "{{ applications.postgres.partition }}"
# applications_databases: "{{ applications_databases }}" # kept for documentation purposes
applications_databases:
gitea: "{{ gitea_database }}"
- name: Create auth_user for pgbouncer (connection pool component)
ansible.builtin.shell: |
@@ -62,4 +54,4 @@
loop_control:
loop_var: database__pg_instruction
loop:
"{{ ( ['postgres'] + ( applications_databases.values() | map(attribute='db_name') ) ) | product(pg_instructions) }}"
"{{ ['postgres', 'gitea'] | product(pg_instructions) }}"

View File

@@ -2,9 +2,9 @@
app_owner: pi
app_group: docker
app_name: gitea
partition: gitea_data
config_path: /arcodange/{{ partition }}/{{ app_name }}/config
data_path: /arcodange/{{ partition }}/{{ app_name }}/data
partition: docker_composes
config_path: /home/pi/arcodange/{{ partition }}/{{ app_name }}/config
data_path: /home/pi/arcodange/{{ partition }}/{{ app_name }}/data
gitea_user:
name: arcodange
email: arcodange@gmail.com

View File

@@ -21,7 +21,7 @@
- name: Deploy Gitea with Docker Compose
community.docker.docker_compose_v2:
project_src: "/arcodange/{{ partition }}/{{ app_name }}"
project_src: "/home/pi/arcodange/{{ partition }}/{{ app_name }}"
pull: missing
state: present
register: deploy_result

View File

@@ -7,7 +7,7 @@
- name: Deploy PostgreSQL with Docker Compose
community.docker.docker_compose_v2:
project_src: "/arcodange/{{ partition }}/{{ app_name }}"
project_src: "/home/pi/arcodange/docker_composes/{{ app_name }}"
pull: missing
state: present
register: deploy_result

View File

@@ -1,38 +0,0 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View File

@@ -1,4 +0,0 @@
---
# defaults file for roles/setup_partition
mount_points: []
verify_partitions: false

View File

@@ -1,44 +0,0 @@
---
- name: Optionally verify partition existence
ansible.builtin.set_fact:
device: "/dev/sda"
when: verify_partitions | default(false)
- name: Read device information
community.general.parted:
device: "{{ device }}"
unit: GiB
register: device_info
when: verify_partitions | default(false)
- name: Select partition
ansible.builtin.set_fact:
disk: |-
{{ device + (
device_info | to_json | from_json
| community.general.json_query(jmes_path) | string
)
}}
vars:
jmes_path: partitions[?name == '{{ mount_point }}'].num | [0]
failed_when: disk.endswith('None')
when: verify_partitions | default(false)
- name: Check if mount directory exists
stat:
path: "/arcodange/{{ mount_point }}"
register: mount_dir_stat
- name: Create the mount directory
ansible.builtin.file:
path: "/arcodange/{{ mount_point }}"
state: directory
when: not mount_dir_stat.stat.exists
- name: Declare mount point
ansible.builtin.lineinfile:
path: /etc/fstab
line: "LABEL={{ mount_point }} /arcodange/{{ mount_point }} ext4 defaults 0 0"
- name: Use updated mount list
ansible.builtin.command: mount -a

View File

@@ -5,8 +5,8 @@
tasks:
- ansible.builtin.ping:
- name: setup hard disk
ansible.builtin.import_playbook: hard_disk_v2.yml
- name: prepare backups shared directory
ansible.builtin.import_playbook: backup_nfs.yml
tags: never
- name: setup factory postgres

View File

@@ -2,6 +2,7 @@ app_name: "{{ (dockercompose_content | from_yaml).name }}"
app_owner: pi
app_group: docker
partition: docker_composes
hard_disk_root_path: /arcodange
no_hard_disk_root_path: /home/pi/arcodange
root_path: "{{ ('hard_disk' in group_names) | ansible.builtin.ternary(hard_disk_root_path, no_hard_disk_root_path) }}"
# hard_disk_root_path: /arcodange
# no_hard_disk_root_path: /home/pi/arcodange
# root_path: "{{ ('hard_disk' in group_names) | ansible.builtin.ternary(hard_disk_root_path, no_hard_disk_root_path) }}"
root_path: /home/pi/arcodange