fixes and set preferred_ip since new interface eth0

This commit is contained in:
2025-10-09 17:27:42 +02:00
parent 83410d9eb1
commit 9b09e6bd86
9 changed files with 62 additions and 29 deletions

View File

@@ -34,7 +34,8 @@ gitea:
GITEA__mailer__SMTP_PORT: 465 GITEA__mailer__SMTP_PORT: 465
GITEA__mailer__PASSWD: '{{ gitea_vault.GITEA__mailer__PASSWD }}' GITEA__mailer__PASSWD: '{{ gitea_vault.GITEA__mailer__PASSWD }}'
GITEA__server__SSH_PORT: 2222 GITEA__server__SSH_PORT: 2222
GITEA__server__SSH_DOMAIN: "{{ lookup('dig', groups.gitea[0]) }}" GITEA__server__SSH_DOMAIN: "{{ hostvars[groups.gitea[0]]['preferred_ip'] }}"
# GITEA__server__SSH_DOMAIN: "{{ lookup('dig', groups.gitea[0]) }}" # might work again if deactivate rpi wifi
GITEA__server__SSH_LISTEN_PORT: 22 GITEA__server__SSH_LISTEN_PORT: 22
GITEA_server__DOMAIN: localhost GITEA_server__DOMAIN: localhost
GITEA_server__HTTP_PORT: 3000 GITEA_server__HTTP_PORT: 3000

View File

@@ -2,12 +2,15 @@ raspberries:
hosts: hosts:
pi1: pi1:
ansible_host: pi1.home # setup http://192.168.1.1/ Réseau/DNS ansible_host: pi1.home # setup http://192.168.1.1/ Réseau/DNS
preferred_ip: 192.168.1.201
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no' ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
pi2: pi2:
ansible_host: pi2.home ansible_host: pi2.home
preferred_ip: 192.168.1.202
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no' ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
pi3: pi3:
ansible_host: pi3.home ansible_host: pi3.home
preferred_ip: 192.168.1.203
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no' ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
internetPi1: internetPi1:

View File

@@ -118,12 +118,12 @@
run_once: true run_once: true
- name: k3s - name: k3s
tags: never tags: never,k3s
ansible.builtin.import_playbook: k3s.orchestration.site ansible.builtin.import_playbook: k3s.orchestration.site
# ansible.builtin.import_playbook: k3s.orchestration.upgrade # ansible.builtin.import_playbook: k3s.orchestration.upgrade
# ansible.builtin.import_playbook: k3s.orchestration.reset # ansible.builtin.import_playbook: k3s.orchestration.reset
vars: vars:
k3s_version: v1.32.7+k3s1 k3s_version: v1.34.1+k3s1
extra_server_args: "--docker --disable traefik" extra_server_args: "--docker --disable traefik"
extra_agent_args: "--docker" extra_agent_args: "--docker"
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}" api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
@@ -217,8 +217,23 @@
gitea: gitea:
loadBalancer: loadBalancer:
servers: servers:
- url: "http://{{ lookup('dig', groups.gitea[0]) }}:3000" - url: "http://{{ hostvars[groups.gitea[0]]['preferred_ip'] }}:3000"
# - url: "http://{{ lookup('dig', groups.gitea[0]) }}:3000" # might work again if deactivate rpi wifi
routers: routers:
dashboard:
rule: Host(`traefik.arcodange.duckdns.org`)
service: api@internal
middlewares:
- localIp
tls:
certResolver: letsencrypt
domains:
- main: "arcodange.duckdns.org"
sans:
- "traefik.arcodange.duckdns.org"
entryPoints:
- websecure
- web
acme-challenge: acme-challenge:
rule: Host(`arcodange.duckdns.org`) && PathPrefix(`/.well-known/acme-challenge`) rule: Host(`arcodange.duckdns.org`) && PathPrefix(`/.well-known/acme-challenge`)
service: acme-http@internal service: acme-http@internal
@@ -266,7 +281,7 @@
# default is https://github.com/traefik/traefik-helm-chart/blob/v25.0.0/traefik/values.yaml <- for v25 (`kubectl describe deployments.apps traefik -n kube-system | grep helm.sh/chart`) # default is https://github.com/traefik/traefik-helm-chart/blob/v25.0.0/traefik/values.yaml <- for v25 (`kubectl describe deployments.apps traefik -n kube-system | grep helm.sh/chart`)
# current is https://github.com/traefik/traefik-helm-chart/blob/v30.1.0/traefik/values.yaml # current is https://github.com/traefik/traefik-helm-chart/blob/v30.1.0/traefik/values.yaml
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: 'true' # make predictible choice of node to direct https traffic to this node and avoid NAT/loss of client IP node-role.kubernetes.io/control-plane: 'true' # make predictible choice of node to direct https traffic to this node and avoid NAT/loss of client IP
service: service:
spec: spec:
externalTrafficPolicy: Local externalTrafficPolicy: Local

View File

@@ -203,7 +203,7 @@
gitea_token_replace: true gitea_token_replace: true
- name: Figure out k3s master node - name: Figure out k3s master node
shell: shell:
kubectl get nodes -l node-role.kubernetes.io/master=true -o name | sed s'#node/##' kubectl get nodes -l node-role.kubernetes.io/control-plane=true -o name | sed s'#node/##'
register: get_k3s_master_node register: get_k3s_master_node
changed_when: false changed_when: false
- name: Get kubernetes server internal url - name: Get kubernetes server internal url

View File

@@ -35,6 +35,7 @@
content: | content: |
#!/bin/bash #!/bin/bash
set -e set -e
mkdir -p {{ backup_dir }}
{{ backup_cmd }} > {{ backup_dir }}/backup_$(date +\%Y\%m\%d).gitea.gz {{ backup_cmd }} > {{ backup_dir }}/backup_$(date +\%Y\%m\%d).gitea.gz
find {{ backup_dir }} -type f -name 'backup_*.gitea.gz' -mtime +{{ keep_days }} -delete find {{ backup_dir }} -type f -name 'backup_*.gitea.gz' -mtime +{{ keep_days }} -delete

View File

@@ -33,6 +33,7 @@
content: | content: |
#!/bin/bash #!/bin/bash
set -e set -e
mkdir -p {{ backup_dir }}
{{ backup_cmd }} | gzip > {{ backup_dir }}/backup_$(date +\%Y\%m\%d).sql.gz {{ backup_cmd }} | gzip > {{ backup_dir }}/backup_$(date +\%Y\%m\%d).sql.gz
find {{ backup_dir }} -type f -name 'backup_*.sql.gz' -mtime +{{ keep_days }} -delete find {{ backup_dir }} -type f -name 'backup_*.sql.gz' -mtime +{{ keep_days }} -delete

View File

@@ -37,6 +37,9 @@
namespace: "{{ namespace_longhorn }}" namespace: "{{ namespace_longhorn }}"
name: "{{ backup_volume_name }}" name: "{{ backup_volume_name }}"
register: pvc_info register: pvc_info
retries: 3
delay: 3
until: pvc_info.resources is defined
- name: Extraire le nom du volume - name: Extraire le nom du volume
set_fact: set_fact:
@@ -75,29 +78,37 @@
path: "/metadata/labels/recurring-job.longhorn.io~1{{ recurring_job }}" path: "/metadata/labels/recurring-job.longhorn.io~1{{ recurring_job }}"
value: "enabled" value: "enabled"
- name: Lancer un pod temporaire pour déclencher NFS - name: Lancer un Deployment pour déclencher NFS
tags: never tags: never
kubernetes.core.k8s: kubernetes.core.k8s:
state: present state: present
definition: definition:
apiVersion: v1 apiVersion: apps/v1
kind: Pod kind: Deployment
metadata: metadata:
name: rwx-nfs name: rwx-nfs
namespace: "{{ namespace_longhorn }}" namespace: "{{ namespace_longhorn }}"
spec: spec:
containers: replicas: 1
- name: busybox selector:
image: busybox matchLabels:
command: ["sleep", "infinity"] app: rwx-nfs
# command: ["sh", "-c", "sleep 600"] template:
volumeMounts: metadata:
- mountPath: "/mnt/backups" labels:
name: backup-vol app: rwx-nfs
volumes: spec:
- name: backup-vol containers:
persistentVolumeClaim: - name: busybox
claimName: "{{ backup_volume_name }}" image: busybox
command: ["sleep", "infinity"]
volumeMounts:
- mountPath: "/mnt/backups"
name: backup-vol
volumes:
- name: backup-vol
persistentVolumeClaim:
claimName: "{{ backup_volume_name }}"
- name: Attendre que le pod rwx-nfs soit Running - name: Attendre que le pod rwx-nfs soit Running
tags: never tags: never
@@ -105,7 +116,8 @@
api_version: v1 api_version: v1
kind: Pod kind: Pod
namespace: "{{ namespace_longhorn }}" namespace: "{{ namespace_longhorn }}"
name: rwx-nfs label_selectors:
- app = rwx-nfs
register: pod_info register: pod_info
until: pod_info.resources[0].status.phase == "Running" until: pod_info.resources[0].status.phase == "Running"
retries: 30 retries: 30

View File

@@ -35,12 +35,12 @@
password: '{{ pg_conf.POSTGRES_PASSWORD }}' password: '{{ pg_conf.POSTGRES_PASSWORD }}'
gitea_admin_token: '{{ vault_GITEA_ADMIN_TOKEN }}' gitea_admin_token: '{{ vault_GITEA_ADMIN_TOKEN }}'
- name: share VAULT CA # - name: share VAULT CA
block: # block:
- name: read traefik CA # - name: read traefik CA
include_role: # include_role:
name: arcodange.factory.traefik_certs # name: arcodange.factory.traefik_certs
post_tasks: post_tasks:
- include_role: - include_role:

View File

@@ -15,8 +15,8 @@
- include_role: - include_role:
name: arcodange.factory.playwright name: arcodange.factory.playwright
- include_role: # - include_role:
name: arcodange.factory.traefik_certs # name: arcodange.factory.traefik_certs
- set_fact: - set_fact:
gitea_app: '{{ playwright_job.stdout | from_json }}' gitea_app: '{{ playwright_job.stdout | from_json }}'