fixes and set preferred_ip since new interface eth0

This commit is contained in:
2025-10-09 17:27:42 +02:00
parent 83410d9eb1
commit 9b09e6bd86
9 changed files with 62 additions and 29 deletions

View File

@@ -34,7 +34,8 @@ gitea:
GITEA__mailer__SMTP_PORT: 465
GITEA__mailer__PASSWD: '{{ gitea_vault.GITEA__mailer__PASSWD }}'
GITEA__server__SSH_PORT: 2222
GITEA__server__SSH_DOMAIN: "{{ lookup('dig', groups.gitea[0]) }}"
GITEA__server__SSH_DOMAIN: "{{ hostvars[groups.gitea[0]]['preferred_ip'] }}"
# GITEA__server__SSH_DOMAIN: "{{ lookup('dig', groups.gitea[0]) }}" # might work again if deactivate rpi wifi
GITEA__server__SSH_LISTEN_PORT: 22
GITEA_server__DOMAIN: localhost
GITEA_server__HTTP_PORT: 3000

View File

@@ -2,12 +2,15 @@ raspberries:
hosts:
pi1:
ansible_host: pi1.home # setup http://192.168.1.1/ Réseau/DNS
preferred_ip: 192.168.1.201
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
pi2:
ansible_host: pi2.home
preferred_ip: 192.168.1.202
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
pi3:
ansible_host: pi3.home
preferred_ip: 192.168.1.203
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
internetPi1:

View File

@@ -118,12 +118,12 @@
run_once: true
- name: k3s
tags: never
tags: never,k3s
ansible.builtin.import_playbook: k3s.orchestration.site
# ansible.builtin.import_playbook: k3s.orchestration.upgrade
# ansible.builtin.import_playbook: k3s.orchestration.reset
vars:
k3s_version: v1.32.7+k3s1
k3s_version: v1.34.1+k3s1
extra_server_args: "--docker --disable traefik"
extra_agent_args: "--docker"
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
@@ -217,8 +217,23 @@
gitea:
loadBalancer:
servers:
- url: "http://{{ lookup('dig', groups.gitea[0]) }}:3000"
- url: "http://{{ hostvars[groups.gitea[0]]['preferred_ip'] }}:3000"
# - url: "http://{{ lookup('dig', groups.gitea[0]) }}:3000" # might work again if deactivate rpi wifi
routers:
dashboard:
rule: Host(`traefik.arcodange.duckdns.org`)
service: api@internal
middlewares:
- localIp
tls:
certResolver: letsencrypt
domains:
- main: "arcodange.duckdns.org"
sans:
- "traefik.arcodange.duckdns.org"
entryPoints:
- websecure
- web
acme-challenge:
rule: Host(`arcodange.duckdns.org`) && PathPrefix(`/.well-known/acme-challenge`)
service: acme-http@internal
@@ -266,7 +281,7 @@
# default is https://github.com/traefik/traefik-helm-chart/blob/v25.0.0/traefik/values.yaml <- for v25 (`kubectl describe deployments.apps traefik -n kube-system | grep helm.sh/chart`)
# current is https://github.com/traefik/traefik-helm-chart/blob/v30.1.0/traefik/values.yaml
nodeSelector:
node-role.kubernetes.io/master: 'true' # make predictible choice of node to direct https traffic to this node and avoid NAT/loss of client IP
node-role.kubernetes.io/control-plane: 'true' # make predictible choice of node to direct https traffic to this node and avoid NAT/loss of client IP
service:
spec:
externalTrafficPolicy: Local

View File

@@ -203,7 +203,7 @@
gitea_token_replace: true
- name: Figure out k3s master node
shell:
kubectl get nodes -l node-role.kubernetes.io/master=true -o name | sed s'#node/##'
kubectl get nodes -l node-role.kubernetes.io/control-plane=true -o name | sed s'#node/##'
register: get_k3s_master_node
changed_when: false
- name: Get kubernetes server internal url

View File

@@ -35,6 +35,7 @@
content: |
#!/bin/bash
set -e
mkdir -p {{ backup_dir }}
{{ backup_cmd }} > {{ backup_dir }}/backup_$(date +\%Y\%m\%d).gitea.gz
find {{ backup_dir }} -type f -name 'backup_*.gitea.gz' -mtime +{{ keep_days }} -delete

View File

@@ -33,6 +33,7 @@
content: |
#!/bin/bash
set -e
mkdir -p {{ backup_dir }}
{{ backup_cmd }} | gzip > {{ backup_dir }}/backup_$(date +\%Y\%m\%d).sql.gz
find {{ backup_dir }} -type f -name 'backup_*.sql.gz' -mtime +{{ keep_days }} -delete

View File

@@ -37,6 +37,9 @@
namespace: "{{ namespace_longhorn }}"
name: "{{ backup_volume_name }}"
register: pvc_info
retries: 3
delay: 3
until: pvc_info.resources is defined
- name: Extraire le nom du volume
set_fact:
@@ -75,29 +78,37 @@
path: "/metadata/labels/recurring-job.longhorn.io~1{{ recurring_job }}"
value: "enabled"
- name: Lancer un pod temporaire pour déclencher NFS
- name: Lancer un Deployment pour déclencher NFS
tags: never
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Pod
apiVersion: apps/v1
kind: Deployment
metadata:
name: rwx-nfs
namespace: "{{ namespace_longhorn }}"
spec:
containers:
- name: busybox
image: busybox
command: ["sleep", "infinity"]
# command: ["sh", "-c", "sleep 600"]
volumeMounts:
- mountPath: "/mnt/backups"
name: backup-vol
volumes:
- name: backup-vol
persistentVolumeClaim:
claimName: "{{ backup_volume_name }}"
replicas: 1
selector:
matchLabels:
app: rwx-nfs
template:
metadata:
labels:
app: rwx-nfs
spec:
containers:
- name: busybox
image: busybox
command: ["sleep", "infinity"]
volumeMounts:
- mountPath: "/mnt/backups"
name: backup-vol
volumes:
- name: backup-vol
persistentVolumeClaim:
claimName: "{{ backup_volume_name }}"
- name: Attendre que le pod rwx-nfs soit Running
tags: never
@@ -105,7 +116,8 @@
api_version: v1
kind: Pod
namespace: "{{ namespace_longhorn }}"
name: rwx-nfs
label_selectors:
- app = rwx-nfs
register: pod_info
until: pod_info.resources[0].status.phase == "Running"
retries: 30

View File

@@ -35,12 +35,12 @@
password: '{{ pg_conf.POSTGRES_PASSWORD }}'
gitea_admin_token: '{{ vault_GITEA_ADMIN_TOKEN }}'
- name: share VAULT CA
block:
# - name: share VAULT CA
# block:
- name: read traefik CA
include_role:
name: arcodange.factory.traefik_certs
# - name: read traefik CA
# include_role:
# name: arcodange.factory.traefik_certs
post_tasks:
- include_role:

View File

@@ -15,8 +15,8 @@
- include_role:
name: arcodange.factory.playwright
- include_role:
name: arcodange.factory.traefik_certs
# - include_role:
# name: arcodange.factory.traefik_certs
- set_fact:
gitea_app: '{{ playwright_job.stdout | from_json }}'