use self signed cert for internal domain arcodange.lab

This commit is contained in:
2025-12-31 17:38:04 +01:00
parent 91219c49f1
commit 5b3c896a25
48 changed files with 1549 additions and 675 deletions

View File

@@ -19,10 +19,10 @@ concurrency:
.vault_step: &vault_step
name: read vault secret
uses: https://gitea.arcodange.duckdns.org/arcodange-org/vault-action.git@main
uses: https://gitea.arcodange.lab/arcodange-org/vault-action.git@main
id: vault-secrets
with:
url: https://vault.arcodange.duckdns.org
url: https://vault.arcodange.lab
jwtGiteaOIDC: ${{ needs.gitea_vault_auth.outputs.gitea_vault_jwt }}
role: gitea_cicd
method: jwt

View File

@@ -17,10 +17,10 @@ concurrency:
.vault_step: &vault_step
name: read vault secret
uses: https://gitea.arcodange.duckdns.org/arcodange-org/vault-action.git@main
uses: https://gitea.arcodange.lab/arcodange-org/vault-action.git@main
id: vault-secrets
with:
url: https://vault.arcodange.duckdns.org
url: https://vault.arcodange.lab
jwtGiteaOIDC: ${{ needs.gitea_vault_auth.outputs.gitea_vault_jwt }}
role: gitea_cicd
method: jwt

View File

@@ -39,7 +39,7 @@ gitea:
GITEA__server__SSH_LISTEN_PORT: 22
GITEA_server__DOMAIN: localhost
GITEA_server__HTTP_PORT: 3000
GITEA_server__ROOT_URL: https://gitea.arcodange.duckdns.org/
GITEA_server__ROOT_URL: https://gitea.arcodange.lab/
GITEA_server__START_SSH_SERVER: true
GITEA_server__OFFLINE_MODE: true
GITEA_service__DISABLE_REGISTRATION: true

View File

@@ -0,0 +1,9 @@
step_ca_primary: pi1
step_ca_fqdn: ssl-ca.arcodange.lab
step_ca_user: step
step_ca_home: /home/step
step_ca_dir: /home/step/.step
step_ca_listen_address: ":8443"

View File

@@ -0,0 +1,13 @@
$ANSIBLE_VAULT;1.1;AES256
35633437343661363030323466313735373033373566643530653539633133623462333337393037
6336653635366439363031616637313339373465666433320a653936396438373132623264386665
66623330343439613636353963373139363531613761613864623262623661666565373137306461
3062646337353331300a636164643462343163303931646538653537323831623736393634343137
39376139306165356138383664373334353364316435303265643965386135356561316130316239
64393436363436393339393130383764353231333361313565333934313136666234356433626437
35656666386538653963653334393262366562656631376636353538383661386661366438366133
64346338666666323562313363363836613439633931306437393132616134666230613936623634
34383366663031336236316566626666303764323631363239636461396366323733393731376563
65356630326536333133393335383766616631323732333262396464326165366532383066363761
37303033316135616661623431623836313965373930376361656334323336656561643336616265
36666235623564383132

View File

@@ -47,6 +47,12 @@ pihole:
pi1:
pi3:
step_ca:
hosts:
pi1:
pi2:
pi3:
all:
children:
raspberries:

View File

@@ -1,429 +1,2 @@
---
- name: Prepare disks for longhorn
ansible.builtin.import_playbook: ./prepare_disks.yml
- name: System Docker
hosts: raspberries:&local
gather_facts: yes
tags: never
become: yes
pre_tasks:
- name: set hostname
ansible.builtin.hostname:
name: "{{ inventory_hostname }}"
become: yes
when: inventory_hostname != ansible_hostname
- name: Prevent apt source conflict
ansible.builtin.file:
state: absent
path: /etc/apt/sources.list.d/docker.list
become: yes
- name: Install role geerlingguy.docker
community.general.ansible_galaxy_install:
type: role
name: geerlingguy.docker
run_once: true
delegate_to: localhost
become: false
- ansible.builtin.debug:
var: ansible_facts.machine
tasks:
- include_role:
name: geerlingguy.docker
post_tasks:
- name: adding existing user '{{ ansible_user }}' to group docker
user:
name: '{{ ansible_user }}'
groups: docker
append: yes
become: yes
#---
- name: Install iSCSI client for Longhorn on Raspberry Pi
hosts: raspberries:&local
become: yes
tasks:
- name: Install open-iscsi
ansible.builtin.apt:
name: open-iscsi
state: present
update_cache: yes
- name: Enable and start iSCSI service
ansible.builtin.service:
name: iscsid
state: started
enabled: yes
- name: Installer cryptsetup
ansible.builtin.apt:
name: cryptsetup
state: present
update_cache: yes
- name: Charger le module noyau dm_crypt
ansible.builtin.modprobe:
name: dm_crypt
state: present
- name: S'assurer que le module dm_crypt est chargé au démarrage
ansible.builtin.lineinfile:
path: /etc/modules
line: dm_crypt
state: present
- name: Créer dossier longhorn
ansible.builtin.file:
path: /mnt/arcodange/longhorn
state: directory
owner: pi
group: docker
mode: '0774'
ignore_errors: true
#---
- name: System K3S
hosts: raspberries:&local
tags: never
tasks:
- name: prepare inventory for k3s external playbook
tags: always
ansible.builtin.add_host:
hostname: "{{ item }}"
groups:
- k3s_cluster
- "{{ ansible_loop.first | ternary('server', 'agent') }}"
loop: "{{ groups.raspberries | intersect(groups.local) | sort }}"
loop_control:
extended: true
extended_allitems: false
- name: Install collection k3s.orchestration
local_action:
module: community.general.ansible_galaxy_install
type: collection
name: git+https://github.com/k3s-io/k3s-ansible
run_once: true
- name: Install socat for kubectl port forwarding
ansible.builtin.apt:
name: socat
state: present
update_cache: yes
- name: k3s
tags: never,k3s
ansible.builtin.import_playbook: k3s.orchestration.site
# ansible.builtin.import_playbook: k3s.orchestration.upgrade
# ansible.builtin.import_playbook: k3s.orchestration.reset
vars:
k3s_version: v1.34.1+k3s1
extra_server_args: "--docker --disable traefik"
extra_agent_args: "--docker"
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
- name: how to reach k3s
hosts: server
tasks:
- name: copy /etc/rancher/k3s/k3s.yaml to ~/.kube/config from the k3s server and replace 127.0.0.1 with the server ip or hostname
run_once: true
block:
- ansible.builtin.fetch:
src: /etc/rancher/k3s/k3s.yaml
dest: ~/.kube/config
flat: true
become: true
run_once: true
- local_action:
module: ansible.builtin.replace
path: ~/.kube/config
regexp: 'server: https://127.0.0.1:6443'
replace: 'server: https://{{ ansible_default_ipv4.address }}:6443'
# - name: setup hard disk
# tags: never
# ansible.builtin.import_playbook: ./setup/hard_disk_v2.yml
# # vars:
# # hard_disk__partitions:
# # nfs: []
- name: setup longhorn for volumes https://docs.k3s.io/helm
become: true
ansible.builtin.copy:
dest: /var/lib/rancher/k3s/server/manifests/longhorn-install.yaml
content: |-
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
annotations:
helmcharts.cattle.io/managed-by: helm-controller
finalizers:
- wrangler.cattle.io/on-helm-chart-remove
generation: 1
name: longhorn-install
namespace: kube-system
spec:
version: v1.9.1
chart: longhorn
repo: https://charts.longhorn.io
failurePolicy: abort
targetNamespace: longhorn-system
createNamespace: true
valuesContent: |-
defaultSettings:
defaultDataPath: /mnt/arcodange/longhorn
vars:
longhorn_helm_values: {} # https://github.com/longhorn/longhorn/blob/master/chart/values.yaml
- name: customize k3s traefik configuration https://docs.k3s.io/helm
block:
- name: Get my public IP
community.general.ipify_facts:
- become: true
ansible.builtin.copy:
dest: /var/lib/rancher/k3s/server/manifests/traefik-v3.yaml
content: |-
apiVersion: v1
data:
dynamic.yaml: |-
{{ traefik_config_yaml | to_nice_yaml | indent( width=4 ) }}
kind: ConfigMap
metadata:
name: traefik-configmap
namespace: kube-system
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: traefik
namespace: kube-system
spec:
repo: https://traefik.github.io/charts
chart: traefik
version: v37.4.0
targetNamespace: kube-system
valuesContent: |-
{{ traefik_helm_values | to_nice_yaml | indent( width=4 ) }}
---
apiVersion: v1
kind: Service
metadata:
name: gitea-external
namespace: kube-system
spec:
type: ExternalName
externalName: {{ hostvars[groups.gitea[0]]['preferred_ip'] }}
ports:
- port: 3000
targetPort: 3000
vars:
traefik_config_yaml:
http:
services:
gitea:
loadBalancer:
servers:
- url: "http://{{ hostvars[groups.gitea[0]]['preferred_ip'] }}:3000"
# - url: "http://{{ lookup('dig', groups.gitea[0]) }}:3000" # might work again if deactivate rpi wifi
routers:
dashboard:
rule: Host(`traefik.arcodange.duckdns.org`)
service: api@internal
middlewares:
- localIp
tls:
certResolver: letsencrypt
domains:
- main: "arcodange.duckdns.org"
sans:
- "traefik.arcodange.duckdns.org"
entryPoints:
- websecure
- web
acme-challenge:
rule: Host(`arcodange.duckdns.org`) && PathPrefix(`/.well-known/acme-challenge`)
service: acme-http@internal
tls:
certResolver: letsencrypt
domains:
- main: "arcodange.duckdns.org"
sans:
- "*.arcodange.duckdns.org"
entryPoints:
- websecure
- web
gitea:
rule: Host(`gitea.arcodange.duckdns.org`)
service: gitea
middlewares:
- localIp
tls:
certResolver: letsencrypt
domains:
- main: "arcodange.duckdns.org"
sans:
- "gitea.arcodange.duckdns.org"
entrypoints:
- websecure
middlewares:
localIp:
ipAllowList:
sourceRange:
- "172.16.0.0/12"
- "10.42.0.0/16"
- "192.168.1.0/24"
- "{{ ipify_public_ip }}/32"
# - "0.0.0.0/0"
# ipStrategy:
# depth: 1
traefik_helm_values:
deployment:
kind: "Deployment"
initContainers:
- name: volume-permissions
image: busybox:latest
command: ["sh", "-c", "touch /data/acme.json; chmod -v 600 /data/acme.json"]
volumeMounts:
- name: data
mountPath: /data
# default is https://github.com/traefik/traefik-helm-chart/blob/v25.0.0/traefik/values.yaml <- for v25 (`kubectl describe deployments.apps traefik -n kube-system | grep helm.sh/chart`)
# current is https://github.com/traefik/traefik-helm-chart/blob/v37.4.0/traefik/values.yaml
nodeSelector:
node-role.kubernetes.io/control-plane: 'true' # make predictible choice of node to direct https traffic to this node and avoid NAT/loss of client IP
service:
spec:
externalTrafficPolicy: Local
ports:
traefik:
expose:
default: true
web:
forwardedHeaders:
trustedIPs: ["10.42.0.0/16"] #default k3s cidr
ingressRoute:
dashboard:
enabled: true
globalArguments: [] # deactivate --global.sendanonymoususage
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LEGO_DISABLE_CNAME_SUPPORT
value: 'true'
logs:
general:
level: INFO
# format: json
access:
enabled: true
timezone: Europe/Paris
# format: json
podSecurityContext:
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
fsGroup: 65532 # else the persistent volume might be owned by root and be unwriteable
persistence:
# -- Enable persistence using Persistent Volume Claims
# ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
# It can be used to store TLS certificates, see `storage` in certResolvers
enabled: true
name: data
# existingClaim: ""
accessMode: ReadWriteOnce
size: 128Mi
storageClass: "longhorn"
# volumeName: ""
path: /data
annotations: {}
volumes:
- name: traefik-configmap
mountPath: /config
type: configMap
experimental:
plugins:
crowdsec-bouncer:
moduleName: github.com/maxlerebourg/crowdsec-bouncer-traefik-plugin #https://plugins.traefik.io/plugins/6335346ca4caa9ddeffda116/crowdsec-bouncer-traefik-plugin
version: v1.3.3
additionalArguments:
- '--providers.file.filename=/config/dynamic.yaml'
- '--providers.kubernetesingress.ingressendpoint.publishedservice=kube-system/traefik'
- "--providers.kubernetescrd.allowcrossnamespace=true"
- "--providers.kubernetescrd.allowExternalNameServices=true"
certificatesResolvers:
letsencrypt:
acme:
# for challenge options cf. https://doc.traefik.io/traefik/https/acme/
email: arcodange@gmail.com
tlsChallenge: true
dnsChallenge:
# requires env variable DUCKDNS_TOKEN
provider: duckdns
propagation:
delayBeforeChecks: 120
disableChecks: true
resolvers:
- "1.1.1.1:53"
- "8.8.8.8:53"
httpChallenge:
entryPoint: "web"
# It has to match the path with a persistent volume
storage: /data/acme.json
envFrom:
- secretRef:
name: traefik-duckdns-token
# MY_TOKEN=<my token (see https://www.duckdns.org/domains)>
# kubectl create secret generic traefik-duckdns-token --from-literal="DUCKDNS_TOKEN=$MY_TOKEN" -n kube-system
- name: touch manifests/traefik.yaml to trigger update
ansible.builtin.file:
path: /var/lib/rancher/k3s/server/manifests/traefik-v3.yaml
state: touch
become: true
# ---
- name: redeploy traefik
hosts: localhost
tasks:
- name: delete old traefik deployment
kubernetes.core.k8s:
api_version: v1
name: traefik
kind: Deployment
namespace: kube-system
state: "absent"
- name: delete old deployment job so the k3s helm controller redeploy with our new configuration
kubernetes.core.k8s:
api_version: batch/v1
name: helm-install-traefik
kind: Job
namespace: kube-system
state: "absent"
- name: get traefik deployment
kubernetes.core.k8s_info:
api_version: v1
name: traefik
kind: Deployment
namespace: kube-system
wait: true
register: traefik_deployment
- ansible.builtin.debug:
var: traefik_deployment
- name: system
ansible.builtin.import_playbook: ./system/system.yml

View File

@@ -36,8 +36,6 @@
- /var/run/docker.sock:/var/run/docker.sock
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
extra_hosts:
gitea.arcodange.duckdns.org: '{{ lookup("dig", "gitea.arcodange.duckdns.org") }}'
configs:
- config.yaml
configs:
@@ -217,6 +215,15 @@
- set_fact:
k3s_master_node: "{{ get_k3s_master_node.stdout }}"
k3s_internal_server_url: "{{ get_k3s_internal_server_url.stdout }}"
- name: Read Step CA root certificate from k3s master
become: true
delegate_to: "{{ k3s_master_node }}"
slurp:
src: /home/step/.step/certs/root_ca.crt
register: step_ca_root_cert
- name: Decode Step CA root certificate
set_fact:
step_ca_root_cert_pem: "{{ step_ca_root_cert.content | b64decode }}"
- name: Install Argo CD
become: true
delegate_to: "{{ k3s_master_node }}"
@@ -226,7 +233,7 @@
password: "{{ argocd_token }}"
argocd_helm_values: # https://github.com/argoproj/argo-helm/blob/main/charts/argo-cd/values.yaml
global:
domain: argocd.arcodange.duckdns.org
domain: argocd.arcodange.lab
configs:
cm:
kustomize.buildOptions: "--enable-helm"
@@ -242,6 +249,15 @@
metadata:
name: argocd
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-tls-certs-cm
namespace: argocd
data:
gitea.arcodange.lab: |
{{ step_ca_root_cert_pem | indent(4) }}
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
@@ -264,12 +280,12 @@
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
traefik.ingress.kubernetes.io/router.tls.certresolver: letsencrypt
traefik.ingress.kubernetes.io/router.tls.domains.0.main: arcodange.duckdns.org
traefik.ingress.kubernetes.io/router.tls.domains.0.sans: argocd.arcodange.duckdns.org
traefik.ingress.kubernetes.io/router.tls.domains.0.main: arcodange.lab
traefik.ingress.kubernetes.io/router.tls.domains.0.sans: argocd.arcodange.lab
traefik.ingress.kubernetes.io/router.middlewares: localIp@file
spec:
rules:
- host: argocd.arcodange.duckdns.org
- host: argocd.arcodange.lab
http:
paths:
- path: /
@@ -289,7 +305,7 @@
argocd.argoproj.io/secret-type: repository
stringData:
type: git
url: https://gitea.arcodange.duckdns.org/arcodange-org/factory
url: https://gitea.arcodange.lab/arcodange-org/factory
---
apiVersion: v1
kind: Secret
@@ -300,7 +316,7 @@
argocd.argoproj.io/secret-type: repo-creds
stringData:
type: git
url: https://gitea.arcodange.duckdns.org/arcodange-org
url: https://gitea.arcodange.lab/arcodange-org
password: {{ gitea_credentials.password }}
username: {{ gitea_credentials.username }}
---
@@ -312,7 +328,7 @@
spec:
project: default
source:
repoURL: https://gitea.arcodange.duckdns.org/arcodange-org/factory
repoURL: https://gitea.arcodange.lab/arcodange-org/factory
targetRevision: HEAD
path: argocd
destination:

View File

@@ -1,3 +0,0 @@
---
- name: dns
ansible.builtin.import_playbook: ./dns/dns.yml

View File

@@ -1,213 +1,11 @@
---
- name: Installer et configurer Pi-hole sur pi1
hosts: raspberries:&local #pihole # change with pihole group
hosts: raspberries:&local
become: yes
vars:
pihole_custom_dns:
".arcodange.duckdns.org": "{{ hostvars['pi1'].preferred_ip }}"
".arcodange.lab": "{{ hostvars['pi1'].preferred_ip }}"
roles:
- pihole
# tasks:
# - name: Proposer la commande d'installation manuelle de Pi-hole
# debug:
# msg: |
# Veuillez installer Pi-hole manuellement sur ce host avec la commande suivante :
# # curl -sSL https://install.pi-hole.net | sudo bash
# L'installation sera vérifiée automatiquement dans les 10 prochaines minutes.
# - name: Attendre que Pi-hole soit installé (vérification service et fichier config)
# wait_for:
# path: /etc/pihole/pihole-FTL.db
# state: present
# timeout: 600 # 10 minutes
# register: pihole_config_ready
# - name: Vérifier que le service pihole-FTL est actif
# wait_for:
# port: 53 # port interne par défaut Pi-hole
# state: started
# timeout: 60
# when: pihole_config_ready is succeeded
# - name: Modifier le fichier /etc/pihole/pihole.toml pour changer le port
# replace:
# path: /etc/pihole/pihole.toml
# regexp: '^\s*port\s*=\s*".*"'
# replace: ' port = "{{ pihole_ports }}"'
# - name: Modifier le fichier /etc/pihole/pihole.toml pour autoriser toutes les origines DNS
# replace:
# path: /etc/pihole/pihole.toml
# regexp: '^\s*listeningMode\s*=\s*".*"'
# replace: ' listeningMode = "ALL"'
# - name: Activer le chargement des fichiers /etc/dnsmasq.d/ dans Pi-hole
# lineinfile:
# path: /etc/pihole/pihole.toml
# regexp: '^\s*etc_dnsmasq_d\s*='
# line: ' etc_dnsmasq_d = true'
# state: present
# create: yes
# - name: Créer le fichier dnsmasq pour le wildcard
# copy:
# dest: /etc/dnsmasq.d/10-arcodange-wildcard.conf
# content: |
# address=/{{ pihole_wildcard_rule }}/{{ pihole_dns_ip }}
# owner: root
# group: root
# mode: '0644'
# - name: Créer les entrées DNS locales pour les RPis (pi*.home)
# copy:
# dest: /etc/dnsmasq.d/20-rpis.conf
# owner: root
# group: root
# mode: '0644'
# content: |
# # Generated by Ansible Raspberry Pi local DNS
# {% for host in groups['raspberries']
# if host is match('^pi[0-9]+$')
# and hostvars[host].preferred_ip is defined %}
# address=/{{ host }}.home/{{ hostvars[host].preferred_ip }}
# {% endfor %}
# - name: Configurer resolv.conf pour DNS croisé
# copy:
# dest: /etc/resolv.conf
# owner: root
# group: root
# mode: '0644'
# content: |
# {% if inventory_hostname == 'pi1' %}
# nameserver {{ hostvars['pi3'].preferred_ip }}
# {% elif inventory_hostname == 'pi3' %}
# nameserver {{ hostvars['pi1'].preferred_ip }}
# {% endif %}
# - name: Redémarrer le service Pi-hole FTL
# service:
# name: pihole-FTL
# state: restarted
# ############################################################
# # 2. Configuration DNS des Raspberry Pi clients
# ############################################################
# - name: Configurer Docker via systemd override (DNS Pi-hole)
# hosts: raspberries:&local
# become: yes
# vars:
# docker_dns_servers:
# - "{{ hostvars['pi1']['preferred_ip'] }}"
# - "8.8.8.8"
# - "1.1.1.1"
# docker_override_dir: /etc/systemd/system/docker.service.d
# docker_override_file: /etc/systemd/system/docker.service.d/override.conf
# tasks:
# - name: Créer le dossier systemd override pour Docker
# file:
# path: "{{ docker_override_dir }}"
# state: directory
# owner: root
# group: root
# mode: '0755'
# tags: docker, override
# - name: Déployer l'override systemd pour dockerd (DNS + IPv4 only)
# copy:
# dest: "{{ docker_override_file }}"
# owner: root
# group: root
# mode: '0644'
# content: |
# [Service]
# ExecStart=
# ExecStart=/usr/bin/dockerd \
# -H fd:// \
# --containerd=/run/containerd/containerd.sock \
# {% for dns in docker_dns_servers %}
# --dns={{ dns }} \
# {% endfor %}
# --ipv6=false
# notify:
# - Reexec systemd
# - Restart Docker
# - Restart k3s
# tags: docker, override
# # -------- ROLLBACK --------
# - name: Rollback - Supprimer l'override systemd Docker
# file:
# path: "{{ docker_override_file }}"
# state: absent
# notify:
# - Reexec systemd
# - Restart Docker
# - Restart k3s
# when: "'rollbacks' in ansible_run_tags"
# handlers:
# - name: Reexec systemd
# command: systemctl daemon-reexec
# - name: Restart Docker
# service:
# name: docker
# state: restarted
# - name: Restart k3s
# service:
# name: k3s
# state: restarted
# ignore_errors: yes
# - name: Restart k3s-agent
# service:
# name: k3s-agent
# state: restarted
# ignore_errors: yes
## 3 configurer traefik
# apiVersion: v1
# kind: Service
# metadata:
# name: pihole-external
# namespace: kube-system
# spec:
# type: ExternalName
# externalName: {{ hostvars[groups.pihole[0]]['preferred_ip'] }}
# ports:
# - port: 3000
# targetPort: 3000
# ---
# apiVersion: traefik.io/v1alpha1
# kind: IngressRoute
# metadata:
# name: pihole
# namespace: kube-system
# spec:
# entryPoints:
# - web
# routes:
# - match: Host(`gitea.arcodange.fr`)
# kind: Rule
# middlewares:
# - name: crowdsec
# namespace: kube-system
# services:
# - kind: Service
# name: gitea-external
# namespace: kube-system
# port: 3000
- pihole

View File

@@ -24,4 +24,52 @@
insertafter: '^search'
line: "nameserver {{ item }}"
state: present
loop: "{{ pihole_dns_servers }}"
loop: "{{ pihole_dns_servers }}"
# 3⃣ Définir les priorités par interface
- name: Set DNS priority mapping
set_fact:
interface_dns_priority:
eth0: 50
wlan0: 100
# 5⃣ Configurer les DNS Pi-hole sur toutes les interfaces actives
- name: Get active connections
command: nmcli -t -f NAME,DEVICE connection show --active
register: active_connections
changed_when: false
- name: Get current DNS for each active interface
vars:
iface_name: "{{ item.split(':')[1] }}"
conn_name: "{{ item.split(':')[0] }}"
loop: "{{ active_connections.stdout_lines }}"
when: item.split(':')[1] in interface_dns_priority
command: nmcli -g IP4.DNS connection show "{{ conn_name }}"
register: current_dns
changed_when: false
- name: Apply Pi-hole DNS if different
vars:
iface_name: "{{ item.split(':')[1] }}"
conn_name: "{{ item.split(':')[0] }}"
loop: "{{ active_connections.stdout_lines }}"
when: item.split(':')[1] in interface_dns_priority
command: >
nmcli connection modify "{{ conn_name }}"
ipv4.dns "{{ pihole_dns_servers | join(' ') }}"
ipv4.ignore-auto-dns yes
ipv4.dns-priority "{{ interface_dns_priority[iface_name] }}"
register: dns_changed
changed_when: dns_changed is defined and dns_changed.stdout != ""
- name: Reactivate interface if DNS changed
vars:
iface_name: "{{ item.split(':')[1] }}"
conn_name: "{{ item.split(':')[0] }}"
loop: "{{ active_connections.stdout_lines }}"
when: item.split(':')[1] in interface_dns_priority
command: nmcli connection up "{{ conn_name }}"
when: dns_changed is defined and dns_changed.changed

View File

@@ -126,14 +126,14 @@
debug:
msg: >-
Clé SSH ajoutée avec succès.
Visitez https://gitea.arcodange.duckdns.org/user/settings/keys?verify_ssh={{ add_ssh_key_result.json.fingerprint }}
Visitez https://gitea.arcodange.lab/user/settings/keys?verify_ssh={{ add_ssh_key_result.json.fingerprint }}
pour vérifier la signature de vos commits avec cette clé.
- set_fact:
gitea_org_name: arcodange-org
gitea_org_full_name: Arcodange
gitea_org_description: '🏹💻🪽'
gitea_org_website: https://www.arcodange.duckdns.org
gitea_org_website: https://www.arcodange.fr
gitea_org_location: Paris
gitea_org_avatar_img_path: '{{ inventory_dir }}/../img/arcodange-org.jpeg'

View File

@@ -3,7 +3,7 @@ APP_NAME = Arcodange repositories
[server]
DOMAIN = localhost
HTTP_PORT = 3000
ROOT_URL = https://gitea.arcodange.duckdns.org/
ROOT_URL = https://gitea.arcodange.lab/
DISABLE_SSH = false
SSH_PORT = 22
START_SSH_SERVER = true

View File

@@ -0,0 +1,21 @@
step_ca_primary: pi1
step_ca_user: step
step_ca_home: /home/step
step_ca_dir: /home/step/.step
step_ca_name: "Arcodange Lab CA"
step_ca_fqdn: ssl-ca.arcodange.lab
step_ca_listen_address: ":8443"
step_ca_password: "{{ vault_step_ca_password }}"
step_ca_force_reinit: false
step_ca_provisioner_name: cert-manager
step_ca_provisioner_type: JWK
step_ca_jwk_dir: "{{ step_ca_dir }}/provisioners"
step_ca_jwk_key: "{{ step_ca_jwk_dir }}/cert-manager.jwk"
step_ca_jwk_password: "{{ vault_step_ca_jwk_password }}"
step_ca_jwk_password_file: "{{ step_ca_dir }}/secrets/cert-manager.jwk.pass"
step_ca_url: "https://{{ step_ca_fqdn }}{{ step_ca_listen_address }}"
step_ca_root: "{{ step_ca_dir }}/certs/root_ca.crt"

View File

@@ -0,0 +1,4 @@
- name: restart step-ca
systemd:
name: step-ca
state: restarted

View File

@@ -0,0 +1,67 @@
# can be called with -e step_ca_force_reinit=true
# 1⃣ Vérifier si le CA est déjà initialisé
- name: Check if CA already initialized
stat:
path: "{{ step_ca_dir }}/config/ca.json"
register: step_ca_initialized
when: inventory_hostname == step_ca_primary
# 2⃣ Arrêter step-ca si reinit forcée
- name: Stop step-ca service (reinit)
systemd:
name: step-ca
state: stopped
when:
- inventory_hostname == step_ca_primary
- step_ca_force_reinit | bool
ignore_errors: true
# 3⃣ Wipe complet du CA si reinit forcée
- name: Wipe existing step-ca data
file:
path: "{{ step_ca_dir }}"
state: absent
when:
- inventory_hostname == step_ca_primary
- step_ca_force_reinit | bool
# 4⃣ Recréer le dossier CA proprement
- name: Recreate step-ca directory
file:
path: "{{ step_ca_dir }}"
state: directory
owner: "{{ step_ca_user }}"
group: "{{ step_ca_user }}"
mode: "0700"
when:
- inventory_hostname == step_ca_primary
- step_ca_force_reinit | bool
# 5⃣ Installer le fichier de mot de passe
- name: Install step-ca password file
copy:
dest: "{{ step_ca_home }}/.step-pass"
content: "{{ step_ca_password }}"
owner: "{{ step_ca_user }}"
group: "{{ step_ca_user }}"
mode: "0600"
when: inventory_hostname == step_ca_primary
# 6⃣ Initialiser step-ca (non interactif)
- name: Initialize step-ca
become: true
become_user: "{{ step_ca_user }}"
command: >
step ca init
--name "{{ step_ca_name }}"
--dns "{{ step_ca_fqdn }}"
--address "{{ step_ca_listen_address }}"
--provisioner admin
--password-file {{ step_ca_home }}/.step-pass
args:
creates: "{{ step_ca_dir }}/config/ca.json"
when:
- inventory_hostname == step_ca_primary
- step_ca_force_reinit | bool or not step_ca_initialized.stat.exists
notify: restart step-ca

View File

@@ -0,0 +1,51 @@
- name: Install base packages
apt:
name:
- curl
- vim
- gpg
- ca-certificates
state: present
update_cache: yes
install_recommends: no
- name: Download Smallstep apt signing key
get_url:
url: https://packages.smallstep.com/keys/apt/repo-signing-key.gpg
dest: /etc/apt/trusted.gpg.d/smallstep.asc
mode: "0644"
- name: Add Smallstep apt repository
copy:
dest: /etc/apt/sources.list.d/smallstep.list
mode: "0644"
content: |
deb [signed-by=/etc/apt/trusted.gpg.d/smallstep.asc] https://packages.smallstep.com/stable/debian debs main
- name: Update apt cache
apt:
update_cache: yes
- name: Install step-cli and step-ca
apt:
name:
- step-cli
- step-ca
state: present
- name: Create step user
user:
name: "{{ step_ca_user }}"
system: true
shell: /usr/sbin/nologin
home: "{{ step_ca_home }}"
- name: Secure step directory
file:
path: "{{ step_ca_dir }}"
owner: "{{ step_ca_user }}"
group: "{{ step_ca_user }}"
mode: "0700"
recurse: yes

View File

@@ -0,0 +1,5 @@
- import_tasks: install.yml
- import_tasks: init.yml
- import_tasks: sync.yml
- import_tasks: systemd.yml
- import_tasks: provisioners.yml

View File

@@ -0,0 +1,73 @@
- name: Ensure provisioner directory exists
file:
path: "{{ step_ca_jwk_dir }}"
state: directory
owner: "{{ step_ca_user }}"
group: "{{ step_ca_user }}"
mode: "0700"
when: inventory_hostname == step_ca_primary
- name: Check if JWK provisioner already exists
command: >
step ca provisioner list
--ca-url {{ step_ca_url }}
--root {{ step_ca_root }}
register: step_ca_provisioners
changed_when: false
become: true
become_user: "{{ step_ca_user }}"
when: inventory_hostname == step_ca_primary
- name: Check if cert-manager provisioner exists
set_fact:
step_ca_provisioner_exists: >-
{{
(step_ca_provisioners.stdout | from_json
| selectattr('name', 'equalto', step_ca_provisioner_name)
| list
| length) > 0
}}
when: inventory_hostname == step_ca_primary
- name: Install JWK password file
copy:
dest: "{{ step_ca_jwk_password_file }}"
content: "{{ step_ca_jwk_password }}"
owner: "{{ step_ca_user }}"
group: "{{ step_ca_user }}"
mode: "0400"
when: inventory_hostname == step_ca_primary
- name: Generate JWK key for cert-manager
command: >
step crypto jwk create
{{ step_ca_jwk_key }}.pub
{{ step_ca_jwk_key }}
--password-file "{{ step_ca_jwk_password_file }}"
args:
creates: "{{ step_ca_jwk_key }}"
become: true
become_user: "{{ step_ca_user }}"
when: inventory_hostname == step_ca_primary
- name: Add JWK provisioner to step-ca
command: >
step ca provisioner add {{ step_ca_provisioner_name }}
--type JWK
--public-key {{ step_ca_jwk_key }}.pub
--private-key {{ step_ca_jwk_key }}
become: true
become_user: "{{ step_ca_user }}"
when:
- inventory_hostname == step_ca_primary
- step_ca_provisioner_name not in step_ca_provisioners.stdout
notify: restart step-ca
- name: Secure JWK keys permissions
file:
path: "{{ step_ca_jwk_dir }}"
owner: "{{ step_ca_user }}"
group: "{{ step_ca_user }}"
mode: "0700"
recurse: yes
when: inventory_hostname == step_ca_primary

View File

@@ -0,0 +1,121 @@
# 1⃣ Lock sur le primaire (évite double sync concurrente)
- name: Create sync lock on primary
file:
path: "{{ step_ca_dir }}/.sync.lock"
state: touch
owner: "{{ step_ca_user }}"
group: "{{ step_ca_user }}"
mode: "0600"
delegate_to: "{{ step_ca_primary }}"
run_once: true
# 2⃣ Calcul du checksum du CA sur le primaire
- name: Compute deterministic checksum of CA directory on primary
shell: |
set -o pipefail
tar --sort=name \
--mtime='UTC 1970-01-01' \
--owner=0 --group=0 --numeric-owner \
-cf - {{ step_ca_dir }} \
| sha256sum | awk '{print $1}'
args:
executable: /bin/bash
register: step_ca_primary_checksum
changed_when: false
delegate_to: "{{ step_ca_primary }}"
run_once: true
# 3⃣ Charger le checksum précédent (s'il existe)
- name: Load previous checksum (controller)
slurp:
src: /tmp/step-ca-sync/.checksum
register: step_ca_previous_checksum
failed_when: false
changed_when: false
run_once: true
become: false
delegate_to: localhost
# 4⃣ Décider si une synchronisation est nécessaire
- name: Decide if sync is required
set_fact:
step_ca_sync_required: >-
{{
step_ca_previous_checksum.content | default('') | b64decode
!= step_ca_primary_checksum.stdout
}}
run_once: true
- name: Ensure temporary sync directory exists on controller
file:
path: /tmp/step-ca-sync
state: directory
mode: "0700"
delegate_to: localhost
become: false
run_once: true
# 5⃣ Pull depuis le primaire vers le contrôleur
- name: Fetch CA data from primary to controller
synchronize:
rsync_path: "sudo -u {{ step_ca_user }} rsync"
src: "{{ step_ca_dir }}/"
dest: "/tmp/step-ca-sync/"
mode: pull
recursive: yes
delete: no
delegate_to: localhost
become: false
when: step_ca_sync_required
run_once: true
# 6⃣ Sauvegarder le nouveau checksum (controller)
- name: Save new checksum on controller
copy:
dest: /tmp/step-ca-sync/.checksum
content: "{{ step_ca_primary_checksum.stdout }}"
mode: "0600"
when: step_ca_sync_required
run_once: true
become: false
delegate_to: localhost
# 7⃣ Push vers les standby
- name: Push CA data to standby nodes
synchronize:
rsync_path: "sudo -u {{ step_ca_user }} rsync"
src: "/tmp/step-ca-sync/"
dest: "{{ step_ca_dir }}/"
mode: push
recursive: yes
delete: no
when:
- inventory_hostname != step_ca_primary
- step_ca_sync_required
- name: Wipe temporary CA sync directory on controller
file:
path: /tmp/step-ca-sync
state: absent
delegate_to: localhost
run_once: true
become: false
when: step_ca_sync_required
# 8⃣ Forcer permissions correctes (sécurité)
- name: Fix step directory permissions
file:
path: "{{ step_ca_dir }}"
owner: "{{ step_ca_user }}"
group: "{{ step_ca_user }}"
mode: "0700"
recurse: yes
notify: restart step-ca
# 9⃣ Retirer le lock sur le primaire
- name: Remove sync lock on primary
file:
path: "{{ step_ca_dir }}/.sync.lock"
state: absent
delegate_to: "{{ step_ca_primary }}"
run_once: true

View File

@@ -0,0 +1,23 @@
- name: Install step-ca systemd service
template:
src: step-ca.service.j2
dest: /etc/systemd/system/step-ca.service
mode: "0644"
- name: Reload systemd
systemd:
daemon_reload: yes
- name: Enable step-ca on primary
systemd:
name: step-ca
enabled: yes
state: started
when: inventory_hostname == step_ca_primary
- name: Disable step-ca on standby nodes
systemd:
name: step-ca
enabled: no
state: stopped
when: inventory_hostname != step_ca_primary

View File

@@ -0,0 +1,15 @@
[Unit]
Description=Smallstep CA
After=network.target
[Service]
User={{ step_ca_user }}
Group={{ step_ca_user }}
ExecStart=/usr/bin/step-ca \
--password-file {{ step_ca_home }}/.step-pass \
{{ step_ca_dir }}/config/ca.json
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,65 @@
# - name: step-ca
# ansible.builtin.import_playbook: step-ca.yml
- name: Fetch Step-CA root certificate
hosts: localhost
gather_facts: false
vars:
step_ca_primary: pi1
step_ca_user: step
step_ca_root: "/home/step/.step/certs/root_ca.crt"
tmp_dir: "/tmp/step-ca-cert-manager"
tasks:
- name: Ensure local temp directory exists
file:
path: "{{ tmp_dir }}"
state: directory
mode: "0700"
- name: Fetch root CA from step_ca_primary
fetch:
src: "{{ step_ca_root }}"
dest: "{{ tmp_dir }}/root_ca.crt"
flat: true
delegate_to: "{{ step_ca_primary }}"
become: true
become_user: "{{ step_ca_user }}"
run_once: true
# - name: Distribute Step-CA root certificate
# hosts: all
# gather_facts: true
# become: true
# vars:
# root_ca_source: "/tmp/step-ca-cert-manager/root_ca.crt"
# root_ca_filename: "arcodange-root.crt"
# tasks:
# - name: Ensure root CA file is copied to correct location
# copy:
# src: "{{ root_ca_source }}"
# dest: "{{ ca_dest_path }}"
# owner: root
# group: root
# mode: '0644'
# vars:
# ca_dest_path: >-
# {% if ansible_facts['os_family'] == 'Debian' %}
# /usr/local/share/ca-certificates/{{ root_ca_filename }}
# {% elif ansible_facts['os_family'] in ['RedHat', 'Fedora'] %}
# /etc/pki/ca-trust/source/anchors/{{ root_ca_filename }}
# {% else %}
# /etc/ssl/certs/{{ root_ca_filename }}
# {% endif %}
# - name: Update CA trust store
# command: "{{ ca_update_command }}"
# vars:
# ca_update_command: >-
# {% if ansible_facts['os_family'] == 'Debian' %}
# update-ca-certificates
# {% elif ansible_facts['os_family'] in ['RedHat', 'Fedora'] %}
# update-ca-trust
# {% else %}
# echo 'Please update the CA trust manually'
# {% endif %}

View File

@@ -0,0 +1,6 @@
---
- name: Setup step-ca on raspberries
hosts: step_ca #raspberries:&local
become: yes
roles:
- step_ca

View File

@@ -0,0 +1,41 @@
- name: Install iSCSI client for Longhorn on Raspberry Pi
hosts: raspberries:&local
become: yes
tasks:
- name: Install open-iscsi
ansible.builtin.apt:
name: open-iscsi
state: present
update_cache: yes
- name: Enable and start iSCSI service
ansible.builtin.service:
name: iscsid
state: started
enabled: yes
- name: Installer cryptsetup
ansible.builtin.apt:
name: cryptsetup
state: present
update_cache: yes
- name: Charger le module noyau dm_crypt
ansible.builtin.modprobe:
name: dm_crypt
state: present
- name: S'assurer que le module dm_crypt est chargé au démarrage
ansible.builtin.lineinfile:
path: /etc/modules
line: dm_crypt
state: present
- name: Créer dossier longhorn
ansible.builtin.file:
path: /mnt/arcodange/longhorn
state: directory
owner: pi
group: docker
mode: '0774'
ignore_errors: true

View File

@@ -0,0 +1,316 @@
---
- name: System K3S
hosts: raspberries:&local
tasks:
- name: prepare inventory for k3s external playbook
tags: always
ansible.builtin.add_host:
hostname: "{{ item }}"
groups:
- k3s_cluster
- "{{ ansible_loop.first | ternary('server', 'agent') }}"
loop: "{{ groups.raspberries | intersect(groups.local) | sort }}"
loop_control:
extended: true
extended_allitems: false
- name: how to reach k3s
hosts: server
tasks:
- name: setup longhorn for volumes https://docs.k3s.io/helm
become: true
ansible.builtin.copy:
dest: /var/lib/rancher/k3s/server/manifests/longhorn-install.yaml
content: |-
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
annotations:
helmcharts.cattle.io/managed-by: helm-controller
finalizers:
- wrangler.cattle.io/on-helm-chart-remove
generation: 1
name: longhorn-install
namespace: kube-system
spec:
version: v1.9.1
chart: longhorn
repo: https://charts.longhorn.io
failurePolicy: abort
targetNamespace: longhorn-system
createNamespace: true
valuesContent: |-
defaultSettings:
defaultDataPath: /mnt/arcodange/longhorn
vars:
longhorn_helm_values: {} # https://github.com/longhorn/longhorn/blob/master/chart/values.yaml
- name: customize k3s traefik configuration https://docs.k3s.io/helm
block:
- name: Get my public IP
community.general.ipify_facts:
- become: true
ansible.builtin.copy:
dest: /var/lib/rancher/k3s/server/manifests/traefik-v3.yaml
content: |-
apiVersion: v1
data:
dynamic.yaml: |-
{{ traefik_config_yaml | to_nice_yaml | indent( width=4 ) }}
kind: ConfigMap
metadata:
name: traefik-configmap
namespace: kube-system
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: traefik
namespace: kube-system
spec:
repo: https://traefik.github.io/charts
chart: traefik
version: v37.4.0
targetNamespace: kube-system
valuesContent: |-
{{ traefik_helm_values | to_nice_yaml | indent( width=4 ) }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: wildcard-arcodange-lab
namespace: kube-system
spec:
secretName: wildcard-arcodange-lab
issuerRef:
name: step-issuer
kind: StepClusterIssuer
group: certmanager.step.sm
dnsNames:
- arcodange.lab
- "*.arcodange.lab"
---
apiVersion: traefik.io/v1alpha1
kind: TLSStore
metadata:
name: default
namespace: kube-system
spec:
defaultCertificate:
secretName: wildcard-arcodange-lab
---
apiVersion: v1
kind: Service
metadata:
name: gitea-external
namespace: kube-system
spec:
type: ExternalName
externalName: {{ hostvars[groups.gitea[0]]['preferred_ip'] }}
ports:
- port: 3000
targetPort: 3000
vars:
traefik_config_yaml:
http:
services:
gitea:
loadBalancer:
servers:
- url: "http://{{ hostvars[groups.gitea[0]]['preferred_ip'] }}:3000"
# - url: "http://{{ lookup('dig', groups.gitea[0]) }}:3000" # might work again if deactivate rpi wifi
routers:
dashboard:
# rule: Host(`traefik.arcodange.duckdns.org`)
rule: Host(`traefik.arcodange.lab`)
service: api@internal
middlewares:
- localIp
# tls:
# certResolver: letsencrypt
# domains:
# - main: "arcodange.duckdns.org"
# sans:
# - "traefik.arcodange.duckdns.org"
entryPoints:
- websecure
- web
acme-challenge:
rule: Host(`arcodange.duckdns.org`) && PathPrefix(`/.well-known/acme-challenge`)
service: acme-http@internal
tls:
certResolver: letsencrypt
domains:
- main: "arcodange.duckdns.org"
sans:
- "*.arcodange.duckdns.org"
entryPoints:
- websecure
- web
gitea:
# rule: Host(`gitea.arcodange.duckdns.org`)
rule: Host(`gitea.arcodange.lab`)
service: gitea
middlewares:
- localIp
# tls:
# certResolver: letsencrypt
# domains:
# - main: "arcodange.duckdns.org"
# sans:
# - "gitea.arcodange.duckdns.org"
entrypoints:
- websecure
middlewares:
localIp:
ipAllowList:
sourceRange:
- "172.16.0.0/12"
- "10.42.0.0/16"
- "192.168.1.0/24"
- "{{ ipify_public_ip }}/32"
# - "0.0.0.0/0"
# ipStrategy:
# depth: 1
traefik_helm_values:
deployment:
kind: "Deployment"
initContainers:
- name: volume-permissions
image: busybox:latest
command: ["sh", "-c", "touch /data/acme.json; chmod -v 600 /data/acme.json"]
volumeMounts:
- name: data
mountPath: /data
# default is https://github.com/traefik/traefik-helm-chart/blob/v25.0.0/traefik/values.yaml <- for v25 (`kubectl describe deployments.apps traefik -n kube-system | grep helm.sh/chart`)
# current is https://github.com/traefik/traefik-helm-chart/blob/v37.4.0/traefik/values.yaml
nodeSelector:
node-role.kubernetes.io/control-plane: 'true' # make predictible choice of node to direct https traffic to this node and avoid NAT/loss of client IP
service:
spec:
externalTrafficPolicy: Local
ports:
traefik:
expose:
default: true
web:
forwardedHeaders:
trustedIPs: ["10.42.0.0/16"] #default k3s cidr
ingressRoute:
dashboard:
enabled: true
globalArguments: [] # deactivate --global.sendanonymoususage
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LEGO_DISABLE_CNAME_SUPPORT
value: 'true'
logs:
general:
level: INFO
# format: json
access:
enabled: true
timezone: Europe/Paris
# format: json
podSecurityContext:
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
fsGroup: 65532 # else the persistent volume might be owned by root and be unwriteable
persistence:
# -- Enable persistence using Persistent Volume Claims
# ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
# It can be used to store TLS certificates, see `storage` in certResolvers
enabled: true
name: data
# existingClaim: ""
accessMode: ReadWriteOnce
size: 128Mi
storageClass: "longhorn"
# volumeName: ""
path: /data
annotations: {}
volumes:
- name: traefik-configmap
mountPath: /config
type: configMap
experimental:
plugins:
crowdsec-bouncer:
moduleName: github.com/maxlerebourg/crowdsec-bouncer-traefik-plugin #https://plugins.traefik.io/plugins/6335346ca4caa9ddeffda116/crowdsec-bouncer-traefik-plugin
version: v1.3.3
additionalArguments:
- '--providers.file.filename=/config/dynamic.yaml'
- '--providers.kubernetesingress.ingressendpoint.publishedservice=kube-system/traefik'
- "--providers.kubernetescrd.allowcrossnamespace=true"
- "--providers.kubernetescrd.allowExternalNameServices=true"
certificatesResolvers:
letsencrypt:
acme:
# for challenge options cf. https://doc.traefik.io/traefik/https/acme/
email: arcodange@gmail.com
tlsChallenge: true
dnsChallenge:
# requires env variable DUCKDNS_TOKEN
provider: duckdns
propagation:
delayBeforeChecks: 120
disableChecks: true
resolvers:
- "1.1.1.1:53"
- "8.8.8.8:53"
httpChallenge:
entryPoint: "web"
# It has to match the path with a persistent volume
storage: /data/acme.json
envFrom:
- secretRef:
name: traefik-duckdns-token
# MY_TOKEN=<my token (see https://www.duckdns.org/domains)>
# kubectl create secret generic traefik-duckdns-token --from-literal="DUCKDNS_TOKEN=$MY_TOKEN" -n kube-system
- name: touch manifests/traefik.yaml to trigger update
ansible.builtin.file:
path: /var/lib/rancher/k3s/server/manifests/traefik-v3.yaml
state: touch
become: true
# ---
- name: redeploy traefik
hosts: localhost
tasks:
- name: delete old traefik deployment
kubernetes.core.k8s:
api_version: v1
name: traefik
kind: Deployment
namespace: kube-system
state: "absent"
- name: delete old deployment job so the k3s helm controller redeploy with our new configuration
kubernetes.core.k8s:
api_version: batch/v1
name: helm-install-traefik
kind: Job
namespace: kube-system
state: "absent"
- name: get traefik deployment
kubernetes.core.k8s_info:
api_version: v1
name: traefik
kind: Deployment
namespace: kube-system
wait: true
register: traefik_deployment
- ansible.builtin.debug:
var: traefik_deployment

View File

@@ -0,0 +1,27 @@
# https://docs.k3s.io/advanced#coredns-custom-configuration-imports
---
- name: "Déclarer le ConfigMap coredns-custom pour arcodange.lab"
hosts: localhost
gather_facts: false
vars:
pihole_ip: "192.168.1.201"
coredns_namespace: "kube-system"
tasks:
- name: "Créer / mettre à jour le ConfigMap coredns-custom"
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns-custom
namespace: "{{ coredns_namespace }}"
data:
arcodange-lab.server: |
arcodange.lab:53 {
errors
cache 30
forward . {{ pihole_ip }}:53
}

View File

@@ -0,0 +1,165 @@
---
- name: System K3S
hosts: raspberries:&local
tasks:
- name: prepare inventory for k3s external playbook
tags: always
ansible.builtin.add_host:
hostname: "{{ item }}"
groups:
- k3s_cluster
- "{{ ansible_loop.first | ternary('server', 'agent') }}"
loop: "{{ groups.raspberries | intersect(groups.local) | sort }}"
loop_control:
extended: true
extended_allitems: false
# =========================
# Play 1 — Read step-ca PKI
# =========================
- name: Collect PKI material from step-ca
hosts: localhost
gather_facts: false
vars:
step_ca_primary: pi1
step_ca_user: step
step_ca_root: "/home/step/.step/certs/root_ca.crt"
tmp_dir: /tmp/step-ca-cert-manager
tasks:
- name: Ensure local temp directory exists
file:
path: "{{ tmp_dir }}"
state: directory
mode: "0700"
- name: Fetch root CA
fetch:
src: "{{ step_ca_root }}"
dest: "{{ tmp_dir }}/root_ca.crt"
flat: true
delegate_to: "{{ step_ca_primary }}"
become: true
become_user: "{{ step_ca_user }}"
run_once: true
- name: Read and decode PKI material
slurp:
src: "{{ item }}"
loop:
- "{{ tmp_dir }}/root_ca.crt"
register: pki_raw
- name: Set PKI facts
set_fact:
root_ca_b64: "{{ (pki_raw.results | selectattr('item','equalto', tmp_dir + '/root_ca.crt') | first).content }}"
# =========================
# Play 2 — Deploy to k3s
# =========================
- name: Deploy cert-manager and step-ca integration on k3s server
hosts: server
gather_facts: false
become: true
vars:
namespace: cert-manager
jwk_provisioner_name: cert-manager
jwk_secret_name: step-jwk-password
clusterissuer_name: step-ca
step_ca_url: "https://ssl-ca.arcodange.lab:8443"
cert_manager_version: v1.19.2
tasks:
- name: Get cert-manager provisioner info from step-ca
command: >
step ca provisioner list
register: provisioners_json
delegate_to: "{{ step_ca_primary }}"
become: true
become_user: "{{ step_ca_user }}"
run_once: true
- name: Set fact jwk_kid from provisioner
set_fact:
jwk_kid: >-
{{
(provisioners_json.stdout | from_json
| selectattr('name', 'equalto', jwk_provisioner_name) | list
| first).key.kid
}}
- name: Compute PKI checksum
set_fact:
pki_checksum: >-
{{
(hostvars['localhost'].root_ca_b64
~ jwk_kid
~ step_ca_url
~ cert_manager_version) | hash('sha256')
}}
- name: Install cert-manager and step-ca via k3s static manifest
copy:
dest: /var/lib/rancher/k3s/server/manifests/cert-manager-step-ca.yaml
mode: "0600"
content: |-
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cert-manager
namespace: kube-system
annotations:
pki.arcodange.lab/checksum: "{{ pki_checksum }}"
spec:
chart: cert-manager
repo: https://charts.jetstack.io
version: {{ cert_manager_version }}
targetNamespace: cert-manager
createNamespace: true
valuesContent: |-
installCRDs: true
---
apiVersion: v1
kind: Secret
metadata:
name: {{ jwk_secret_name }}
namespace: {{ namespace }}
annotations:
pki.arcodange.lab/checksum: "{{ pki_checksum }}"
type: Opaque
stringData:
password: >-
{{ hostvars[step_ca_primary].vault_step_ca_jwk_password }}
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: step-issuer
namespace: kube-system
annotations:
pki.arcodange.lab/checksum: "{{ pki_checksum }}"
spec:
chart: step-issuer
repo: https://smallstep.github.io/helm-charts
version: 1.9.11
targetNamespace: {{ namespace }}
createNamespace: false
valuesContent: |-
certManager:
namespace: {{ namespace }}
stepClusterIssuer:
create: true
caUrl: "{{ step_ca_url }}"
caBundle: "{{ hostvars['localhost'].root_ca_b64 }}"
provisioner:
name: {{ jwk_provisioner_name }}
kid: "{{ jwk_kid }}"
passwordRef:
name: {{ jwk_secret_name }}
namespace: {{ namespace }}
key: password

View File

@@ -0,0 +1,161 @@
# PKI
Explications générées par chatgpt pour expliquer le setup de ssl via "step"
```mermaid
---
config:
logLevel: debug
theme: forest
---
flowchart TB
%% PKI
subgraph PKI["Step CA / PKI (Pi1)"]
style PKI fill:#ffe0b2,stroke:#ff8c00,stroke-width:2px
A[Step CA primaire]:::stepCA
B[JWK Provisioner]:::jwk
C[Root CA]:::root
end
%% Contrôleur Ansible
subgraph Controller["Contrôleur Ansible / Mac"]
style Controller fill:#e0f7fa,stroke:#00acc1,stroke-width:2px
D[Fetch JWK + Root CA]:::ansible
E[Secrets K8s: step-jwk, step-root-ca]:::k8sSecret
F[ClusterIssuer cert-manager]:::clusterIssuer
end
%% K3s Cluster + Traefik
subgraph K3sCluster["K3s Cluster"]
style K3sCluster fill:#f1f8e9,stroke:#558b2f,stroke-width:2px
T[Traefik Ingress Controller]:::traefik
H[Webapp Pods]:::webapp
G["Gitea Service (ExternalName → pi2.home:3000)"]:::gitea
end
Users[Clients / Navigateurs]:::clients
%% Flèches
%% PKI → Controller
A --> B
C --> D
B --> D
D --> E
E --> F
%% ClusterIssuer → Traefik services
F --> H
F --> G
%% Traefik expose tous les services
T --> H
T --> G
Users -->|HTTPS / HTTP| T
%% PKI direct (optional, for clarity)
A -->|Sign initial cert| F
%% Styling classes
classDef stepCA fill:#fff3e0,stroke:#ff6f00,stroke-width:1px
classDef jwk fill:#fff9c4,stroke:#fbc02d,stroke-width:1px
classDef root fill:#ffe0b2,stroke:#ff8c00,stroke-width:1px
classDef ansible fill:#b2ebf2,stroke:#00acc1,stroke-width:1px
classDef k8sSecret fill:#b3e5fc,stroke:#0288d1,stroke-width:1px
classDef clusterIssuer fill:#81d4fa,stroke:#0277bd,stroke-width:1px
classDef gitea fill:#c8e6c9,stroke:#388e3c,stroke-width:1px
classDef webapp fill:#a5d6a7,stroke:#2e7d32,stroke-width:1px
classDef traefik fill:#ffe082,stroke:#ff8f00,stroke-width:1px
classDef clients fill:#eeeeee,stroke:#9e9e9e,stroke-width:1px
```
- 🔵 PKI (Step CA) : la source de confiance. Toutes les certificats HTTPS proviennent de là.
- 🔵 JWK Provisioner : autorise cert-manager à demander des certificats automatiquement.
- 🟢 Contrôleur Ansible : centralise les clés, crée les Secrets K8s et ClusterIssuer.
- 🟢 Secrets & ClusterIssuer : permettent à cert-manager dans K3s de sauthentifier et obtenir des certificats TLS.
- 🟢 Webapp Pods : obtiennent leurs certificats via cert-manager et HTTPS fonctionne automatiquement.
- 🔵 Gitea : reçoit directement un certificat signé par Step CA, sert HTTPS hors K3s.
```mermaid
flowchart TD
%% PKI
subgraph PKI["Step CA / PKI (Pi1)"]
style PKI fill:#ffe0b2,stroke:#ff8c00,stroke-width:2px
A[1⃣ Initialisation Step CA primaire]:::stepCA
B[2⃣ Création JWK Provisioner pour K3s]:::jwk
C[Root CA]:::root
end
%% Contrôleur Ansible
subgraph Controller["Contrôleur Ansible / Mac"]
style Controller fill:#e0f7fa,stroke:#00acc1,stroke-width:2px
D[3⃣ Fetch JWK + Root CA depuis Step CA]:::ansible
E[4⃣ Création / Mise à jour des Secrets K8s]:::k8sSecret
F[5⃣ Création / Mise à jour ClusterIssuer cert-manager]:::clusterIssuer
end
%% K3s Cluster + Traefik
subgraph K3sCluster["K3s Cluster"]
style K3sCluster fill:#f1f8e9,stroke:#558b2f,stroke-width:2px
T[6⃣ Traefik Ingress Controller]:::traefik
H[7⃣ Webapp Pods]:::webapp
G["8⃣ Gitea Service (ExternalName → pi2.home:3000)"]:::gitea
end
Users[9⃣ Client Mac / Navigateurs]:::clients
%% Flux
A --> B
C --> D
B --> D
D --> E
E --> F
F --> H
F --> G
T --> H
T --> G
Users -->|HTTPS / HTTP| T
%% Styling classes
classDef stepCA fill:#fff3e0,stroke:#ff6f00,stroke-width:1px
classDef jwk fill:#fff9c4,stroke:#fbc02d,stroke-width:1px
classDef root fill:#ffe0b2,stroke:#ff8c00,stroke-width:1px
classDef ansible fill:#b2ebf2,stroke:#00acc1,stroke-width:1px
classDef k8sSecret fill:#b3e5fc,stroke:#0288d1,stroke-width:1px
classDef clusterIssuer fill:#81d4fa,stroke:#0277bd,stroke-width:1px
classDef gitea fill:#c8e6c9,stroke:#388e3c,stroke-width:1px
classDef webapp fill:#a5d6a7,stroke:#2e7d32,stroke-width:1px
classDef traefik fill:#ffe082,stroke:#ff8f00,stroke-width:1px
classDef clients fill:#eeeeee,stroke:#9e9e9e,stroke-width:1px
```
```mermaid
flowchart TD
subgraph Cluster["Cluster Kubernetes (k3s)"]
subgraph CertManager["Cert-Manager"]
ClusterIssuer["ClusterIssuer\n(type: smallstep)"]
end
subgraph Traefik["Traefik (Ingress Controller)"]
TLSStore["TLSStore\n(Traefik v2+)"]
IngressRoute["IngressRoute\n(TLS: my-tls-store)"]
end
subgraph Apps["Applications"]
App1[Service: my-app]
App2[Service: my-api]
end
end
subgraph Smallstep["Smallstep PKI (step-ca)"]
StepCA["step-ca\n(CA interne)"]
end
%% Interactions
ClusterIssuer -- "1. Demande de certificat\n(CertificateRequest)" --> StepCA
StepCA -- "2. Émet un certificat\n(signé par la CA)" --> ClusterIssuer
ClusterIssuer -- "3. Stocke le certificat\n(dans un Secret Kubernetes)" --> Secret[(Secret: my-app-tls)]
Secret -- "4. Référencé par" --> TLSStore
TLSStore -- "5. Fournit le certificat\n(TLS Termination)" --> IngressRoute
IngressRoute -- "6. Route le trafic HTTPS\nvers" --> App1
IngressRoute -- "6. Route le trafic HTTPS\nvers" --> App2
```

View File

@@ -0,0 +1,13 @@
- name: Raspberry pi general setup
hosts: raspberries:&local
gather_facts: yes
tags: never
become: yes
tasks:
- name: set hostname
ansible.builtin.hostname:
name: "{{ inventory_hostname }}"
become: yes
when: inventory_hostname != ansible_hostname

View File

@@ -0,0 +1,31 @@
---
- name: Setup général des rpis
ansible.builtin.import_playbook: rpi.yml
- name: dns
ansible.builtin.import_playbook: ../dns/dns.yml
- name: ssl
ansible.builtin.import_playbook: ../ssl/ssl.yml
- name: Préparer les disques pour Longhorn
ansible.builtin.import_playbook: prepare_disks.yml
- name: Installer et configurer Docker
ansible.builtin.import_playbook: system_docker.yml
- name: Installer le client iSCSI pour Longhorn
ansible.builtin.import_playbook: iscsi_longhorn.yml
- name: Préparer l'inventaire et installer K3s
ansible.builtin.import_playbook: system_k3s.yml
- name: Configurer K3S Core DNS
ansible.builtin.import_playbook: k3s_dns.yml
- name: Configurer K3S Cert Issuer
ansible.builtin.import_playbook: k3s_ssl.yml
- name: Configurer K3s (kubeconfig, Longhorn, Traefik)
ansible.builtin.import_playbook: k3s_config.yml

View File

@@ -0,0 +1,88 @@
- name: System Docker
hosts: raspberries:&local
gather_facts: yes
tags: never
become: yes
pre_tasks:
- name: Prevent apt source conflict
ansible.builtin.file:
state: absent
path: /etc/apt/sources.list.d/docker.list
become: yes
- name: Install role geerlingguy.docker
community.general.ansible_galaxy_install:
type: role
name: geerlingguy.docker
run_once: true
delegate_to: localhost
become: false
- ansible.builtin.debug:
var: ansible_facts.machine
tasks:
- include_role:
name: geerlingguy.docker
- name: Créer le répertoire /etc/docker s'il n'existe pas
ansible.builtin.file:
path: /etc/docker
state: directory
mode: '0755'
- name: Lire la configuration Docker existante
ansible.builtin.command: "cat /etc/docker/daemon.json"
register: docker_config_raw
ignore_errors: yes
changed_when: false
when: ansible.facts.stat.exists
vars:
ansible_facts:
stat:
exists: "{{ (ansible.builtin.stat.path='/etc/docker/daemon.json').stat.exists }}"
- name: Initialiser la variable de config Docker
ansible.builtin.set_fact:
docker_config: {}
- name: Parser le JSON existant si le fichier existe
ansible.builtin.set_fact:
docker_config: "{{ docker_config_raw.stdout | from_json }}"
when: docker_config_raw.stdout is defined and docker_config_raw.stdout != ""
- name: Mettre à jour la config du logger
ansible.builtin.set_fact:
docker_config: >
{{ docker_config | combine({
'log-driver': 'json-file',
'log-opts': {
'max-size': '10m',
'max-file': '5'
}
}, recursive=True) }}
- name: Écrire la configuration mise à jour
ansible.builtin.copy:
dest: /etc/docker/daemon.json
content: "{{ docker_config | to_nice_json(indent=2) }}"
mode: '0644'
notify: Redémarrer Docker
handlers:
- name: Redémarrer Docker
ansible.builtin.service:
name: docker
state: restarted
post_tasks:
- name: adding existing user '{{ ansible_user }}' to group docker
user:
name: '{{ ansible_user }}'
groups: docker
append: yes
become: yes

View File

@@ -0,0 +1,63 @@
- name: System K3S
hosts: raspberries:&local
tasks:
- name: prepare inventory for k3s external playbook
tags: always
ansible.builtin.add_host:
hostname: "{{ item }}"
groups:
- k3s_cluster
- "{{ ansible_loop.first | ternary('server', 'agent') }}"
loop: "{{ groups.raspberries | intersect(groups.local) | sort }}"
loop_control:
extended: true
extended_allitems: false
- name: Install collection k3s.orchestration
local_action:
module: community.general.ansible_galaxy_install
type: collection
name: git+https://github.com/k3s-io/k3s-ansible
run_once: true
- name: Install socat for kubectl port forwarding
ansible.builtin.apt:
name: socat
state: present
update_cache: yes
become: yes
- name: k3s
ansible.builtin.import_playbook: k3s.orchestration.site
# ansible.builtin.import_playbook: k3s.orchestration.upgrade
# ansible.builtin.import_playbook: k3s.orchestration.reset
vars:
k3s_version: v1.34.3+k3s1
extra_server_args: >-
--docker --disable traefik
--kubelet-arg="container-log-max-files=5"
--kubelet-arg="container-log-max-size=10Mi"
extra_agent_args: >-
--docker
--kubelet-arg="container-log-max-files=5"
--kubelet-arg="container-log-max-size=10Mi"
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
- name: how to reach k3s
hosts: server
tasks:
- name: copy /etc/rancher/k3s/k3s.yaml to ~/.kube/config from the k3s server and replace 127.0.0.1 with the server ip or hostname
run_once: true
block:
- ansible.builtin.fetch:
src: /etc/rancher/k3s/k3s.yaml
dest: ~/.kube/config
flat: true
become: true
run_once: true
- local_action:
module: ansible.builtin.replace
path: ~/.kube/config
regexp: 'server: https://127.0.0.1:6443'
replace: 'server: https://{{ ansible_default_ipv4.address }}:6443'

View File

@@ -2,7 +2,7 @@ vault_unseal_keys_path: ~/.arcodange/cluster-keys.json
vault_unseal_keys_shares: 1
vault_unseal_keys_key_threshold: 1 # keys_key_threshold <= keys_shares
vault_address: https://vault.arcodange.duckdns.org
vault_address: https://vault.arcodange.lab
vault_oidc_gitea_setupGiteaAppJS: '{{ role_path }}/files/playwright_setupGiteaApp.js'

View File

@@ -19,7 +19,7 @@ variable "admin_email" {
}
variable "gitea_app" {
type = object({
url = optional(string, "https://gitea.arcodange.duckdns.org/")
url = optional(string, "https://gitea.arcodange.lab/")
id = string
secret = string
description = optional(string, "Arcodange Gitea Auth")
@@ -39,7 +39,7 @@ variable "gitea_admin_token" {
sensitive = true
}
# kubectl -n kube-system exec $(kubectl -n kube-system get pod -l app.kubernetes.io/name=traefik -o jsonpath="{.items[0]['.metadata.name']}") -- cat /data/acme.json | jq '(.letsencrypt.Certificates | map(select(.domain.main=="arcodange.duckdns.org")))[0]' | jq '.certificate' -r | base64 -d | openssl x509
# kubectl -n kube-system exec $(kubectl -n kube-system get pod -l app.kubernetes.io/name=traefik -o jsonpath="{.items[0]['.metadata.name']}") -- cat /data/acme.json | jq '(.letsencrypt.Certificates | map(select(.domain.main=="arcodange.lab")))[0]' | jq '.certificate' -r | base64 -d | openssl x509
# variable "ca_pem" {
# type = string
# }

View File

@@ -7,7 +7,7 @@ const username = process.env.GITEA_USER;
const password = process.env.GITEA_PASSWORD;
const debug = Boolean(process.env.DEBUG);
const vaultAddress = process.env.VAULT_ADDRESS || 'http://localhost:8200';
const giteaAddress = process.env.GITEA_ADDRESS || 'https://gitea.arcodange.duckdns.org';
const giteaAddress = process.env.GITEA_ADDRESS || 'https://gitea.arcodange.lab';
if (!username || !password) {
console.error('Veuillez définir les variables d\'environnement GITEA_USER et GITEA_PASSWORD.');
@@ -75,7 +75,7 @@ async function setupApp() {
await applicationsPanel.locator('textarea[name="redirect_uris"]').fill([
'http://localhost:8250/oidc/callback', // for command line login
`${vaultAddress}/ui/vault/auth/gitea/oidc/callback`,
'https://webapp.arcodange.duckdns.org/oauth-callback',
'https://webapp.arcodange.lab/oauth-callback',
'https://webapp.arcodange.fr/oauth-callback',
].join('\n'));
await applicationsPanel.locator('form[action="/-/admin/applications/oauth2"] > button').dblclick()

View File

@@ -71,7 +71,7 @@
gitea_secret_name: vault_oauth__sh_b64
gitea_secret_value: >-
{{ lookup('ansible.builtin.template', 'oidc_jwt_token.sh.j2', template_vars = {
'GITEA_BASE_URL': 'https://gitea.arcodange.duckdns.org',
'GITEA_BASE_URL': 'https://gitea.arcodange.lab',
'OIDC_CLIENT_ID': gitea_app.id,
'OIDC_CLIENT_SECRET': gitea_app.secret,
}) | b64encode }}

View File

@@ -5,9 +5,9 @@ set -eu
CLIENT_ID="{{ OIDC_CLIENT_ID }}"
CLIENT_SECRET="{{ OIDC_CLIENT_SECRET }}"
REDIRECT_URI="{{ OIDC_CLIENT_CALLBACK | default('https://webapp.arcodange.fr/oauth-callback') }}" # Redirige ici après l'authentification
AUTH_URL="{{ GITEA_BASE_URL | default('https://gitea.arcodange.duckdns.org') }}/login/oauth/authorize"
TOKEN_URL="{{ GITEA_BASE_URL | default('https://gitea.arcodange.duckdns.org') }}/login/oauth/access_token"
ISSUER="https://gitea.arcodange.duckdns.org/"
AUTH_URL="{{ GITEA_BASE_URL | default('https://gitea.arcodange.lab') }}/login/oauth/authorize"
TOKEN_URL="{{ GITEA_BASE_URL | default('https://gitea.arcodange.lab') }}/login/oauth/access_token"
ISSUER="https://gitea.arcodange.lab/"
# SCOPE="openid email profile groups" # Scope que tu souhaites obtenir - profile groups
SCOPE="email openid read:user" # Scope que tu souhaites obtenir - profile groups
set +u

View File

@@ -1,5 +1,5 @@
# to see generated tokens
# go to https://gitea.arcodange.duckdns.org/user/settings/applications
# go to https://gitea.arcodange.lab/user/settings/applications
- when: >-
lookup('ansible.builtin.varnames', '^' ~ gitea_token_fact_name ~ '$') | length == 0

View File

@@ -7,7 +7,7 @@ const username = process.env.GITEA_USER;
const password = process.env.GITEA_PASSWORD;
const debug = Boolean(process.env.DEBUG);
const vaultAddress = process.env.VAULT_ADDRESS || 'http://localhost:8200';
const giteaAddress = process.env.GITEA_ADDRESS || 'https://gitea.arcodange.duckdns.org';
const giteaAddress = process.env.GITEA_ADDRESS || 'https://gitea.arcodange.lab';
if (!username || !password) {
console.error('Veuillez définir les variables d\'environnement GITEA_USER et GITEA_PASSWORD.');

View File

@@ -4,7 +4,7 @@
kubectl -n kube-system exec
$(kubectl -n kube-system get pod -l app.kubernetes.io/name=traefik
-o jsonpath="{.items[0]['.metadata.name']}") --
cat /data/acme.json | jq '(.letsencrypt.Certificates | map(select(.domain.main=="*.arcodange.duckdns.org")))[0]'
cat /data/acme.json | jq '(.letsencrypt.Certificates | map(select(.domain.main=="*.arcodange.lab")))[0]'
| jq '.certificate' -r | base64 -d | openssl x509
register: traefik_certs_cmd
- set_fact:

View File

@@ -14,7 +14,7 @@ metadata:
spec:
project: default
source:
repoURL: https://gitea.arcodange.duckdns.org/arcodange-org/{{ $app_name }}
repoURL: https://gitea.arcodange.lab/arcodange-org/{{ $app_name }}
targetRevision: HEAD
path: chart
destination:

View File

@@ -8,19 +8,19 @@ gitea_applications:
annotations: {}
webapp:
annotations:
argocd-image-updater.argoproj.io/image-list: webapp=gitea.arcodange.duckdns.org/arcodange-org/webapp:latest
argocd-image-updater.argoproj.io/image-list: webapp=gitea.arcodange.lab/arcodange-org/webapp:latest
argocd-image-updater.argoproj.io/webapp.update-strategy: digest
erp:
annotations: {}
cms:
annotations:
argocd-image-updater.argoproj.io/image-list: cms=gitea.arcodange.duckdns.org/arcodange-org/cms:latest
argocd-image-updater.argoproj.io/image-list: cms=gitea.arcodange.lab/arcodange-org/cms:latest
argocd-image-updater.argoproj.io/cms.update-strategy: digest
argocd_image_updater_chart_values:
config:
argocd:
grpcWeb: false
serverAddress: "https://argocd.arcodange.duckdns.org/"
serverAddress: "https://argocd.arcodange.lab/"
insecure: true
plaintext: true

View File

@@ -9,7 +9,7 @@
>L'unsealKey, le vaultRootToken initial et l'authentification au backend terraform sont pour le moment configurés sur le controleur ansible (Macbook Pro).
>[!NOTE]
> Vault est déployé via [argo cd](https://gitea.arcodange.duckdns.org/arcodange-org/tools/src/branch/main/hashicorp-vault)
> Vault est déployé via [argo cd](https://gitea.arcodange.lab/arcodange-org/tools/src/branch/main/hashicorp-vault)
```mermaid
%%{init: { 'logLevel': 'debug', 'theme': 'base',

View File

@@ -24,12 +24,12 @@ terraform {
}
provider "gitea" { # https://registry.terraform.io/providers/go-gitea/gitea/latest/docs
base_url = "https://gitea.arcodange.duckdns.org"
base_url = "https://gitea.arcodange.lab"
# use GITEA_TOKEN env var
}
provider "vault" {
address = "https://vault.arcodange.duckdns.org"
address = "https://vault.arcodange.lab"
auth_login_jwt { # TERRAFORM_VAULT_AUTH_JWT environment variable
mount = "gitea_jwt"
role = "gitea_cicd"

View File

@@ -29,7 +29,7 @@ provider "postgresql" {
}
provider vault {
address = "https://vault.arcodange.duckdns.org"
address = "https://vault.arcodange.lab"
auth_login_jwt { # TERRAFORM_VAULT_AUTH_JWT environment variable
mount = "gitea_jwt"
role = "gitea_cicd"

58
ssl.md Normal file
View File

@@ -0,0 +1,58 @@
# Distribution du Root CA Step-CA
Ce guide explique comment installer le certificat racine Step-CA sur tous les appareils pour que TLS fonctionne avec la PKI interne.
---
## Pré-requis
- Le certificat racine est récupéré depuis `step_ca_primary` (pi1) : `/home/step/.step/certs/root_ca.crt`
- Les machines cibles sont :
- pi1, pi2, pi3 (Raspbian / Debian)
- localhost (Mac)
---
## 1. Copier le certificat sur les RPi
```bash
scp pi1:/home/step/.step/certs/root_ca.crt /tmp/root_ca.crt
````
Puis sur chaque Pi (idempotent) :
```bash
for pi in pi1 pi2 pi3
do
ssh $pi "sudo cp /home/step/.step/certs/root_ca.crt /usr/local/share/ca-certificates/arcodange-root.crt && sudo chmod 644 /usr/local/share/ca-certificates/arcodange-root.crt && sudo update-ca-certificates"
ssh $pi 'sudo apt install -y libnss3-tools && certutil -d sql:/home/pi/.pki/nssdb -A -t "C,," -n "arcodange-root" -i /usr/local/share/ca-certificates/arcodange-root.crt'
done
```
Vérification rapide sur chaque Pi :
```bash
ssh pi1 "sudo openssl verify /usr/local/share/ca-certificates/arcodange-root.crt"
ssh pi2 "sudo openssl verify /usr/local/share/ca-certificates/arcodange-root.crt"
ssh pi3 "sudo openssl verify /usr/local/share/ca-certificates/arcodange-root.crt"
```
---
## 2. Copier le certificat sur Mac (localhost)
```bash
scp pi1:/home/step/.step/certs/root_ca.crt /tmp/root_ca.crt
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/root_ca.crt
```
Vérification :
```bash
security verify-cert -c /tmp/root_ca.crt
```
---
## 3. Redémarrer les services TLS si nécessaire
Sur les RPi (optionnel, si vous utilisez Docker, containerd ou k3s) :
```bash