Compare commits
32 Commits
c19cf7eced
...
vibe/batch
| Author | SHA1 | Date | |
|---|---|---|---|
| f8009989fc | |||
| fc9164f11e | |||
| c751b621ba | |||
| 07a619b274 | |||
| 9931f81998 | |||
| 437fd506ed | |||
| 943915be74 | |||
| 8a82d14797 | |||
| 0285d171ff | |||
| 55d137132f | |||
| 451dfa5133 | |||
| 17e99db641 | |||
| 07e5ff460b | |||
| 5b3c896a25 | |||
| 91219c49f1 | |||
| 74b8676244 | |||
| 1fd47e9d97 | |||
| 0fbfbd589f | |||
| 8d6be311ae | |||
| 2b4aa30a64 | |||
| cd3c4d86ff | |||
| 45d39d13b4 | |||
| f4cb04c9c9 | |||
| 17a0f23bbb | |||
| f7bfe2f71d | |||
| 72628f0f0e | |||
| b6d240ce31 | |||
| 2d8f5de482 | |||
| 140dab4f1d | |||
| 9b09e6bd86 | |||
| 83410d9eb1 | |||
| fa5bc7e30e |
@@ -19,18 +19,20 @@ concurrency:
|
||||
|
||||
.vault_step: &vault_step
|
||||
name: read vault secret
|
||||
uses: https://gitea.arcodange.duckdns.org/arcodange-org/vault-action.git@main
|
||||
uses: https://gitea.arcodange.lab/arcodange-org/vault-action.git@main
|
||||
id: vault-secrets
|
||||
with:
|
||||
url: https://vault.arcodange.duckdns.org
|
||||
url: https://vault.arcodange.lab
|
||||
caCertificate: ${{ secrets.HOMELAB_CA_CERT }}
|
||||
jwtGiteaOIDC: ${{ needs.gitea_vault_auth.outputs.gitea_vault_jwt }}
|
||||
role: gitea_cicd
|
||||
method: jwt
|
||||
path: gitea_jwt
|
||||
secrets: |
|
||||
kvv1/google/credentials credentials | GOOGLE_CREDENTIALS ;
|
||||
kvv1/admin/gitea token | GITEA_TOKEN
|
||||
|
||||
kvv1/admin/gitea token | GITEA_TOKEN ;
|
||||
kvv1/admin/cloudflare iam_token | CLOUDFLARE_API_TOKEN ;
|
||||
kvv1/admin/ovh/app * | OVH_ ;
|
||||
jobs:
|
||||
gitea_vault_auth:
|
||||
name: Auth with gitea for vault
|
||||
@@ -52,9 +54,12 @@ jobs:
|
||||
env:
|
||||
OPENTOFU_VERSION: 1.8.2
|
||||
TERRAFORM_VAULT_AUTH_JWT: ${{ needs.gitea_vault_auth.outputs.gitea_vault_jwt }}
|
||||
VAULT_CACERT: "${{ github.workspace }}/homelab.pem"
|
||||
steps:
|
||||
- *vault_step
|
||||
- uses: actions/checkout@v4
|
||||
- name: prepare vault self signed cert
|
||||
run: echo -n "${{ secrets.HOMELAB_CA_CERT }}" | base64 -d > $VAULT_CACERT
|
||||
- name: terraform apply
|
||||
uses: dflook/terraform-apply@v1
|
||||
with:
|
||||
|
||||
@@ -17,10 +17,11 @@ concurrency:
|
||||
|
||||
.vault_step: &vault_step
|
||||
name: read vault secret
|
||||
uses: https://gitea.arcodange.duckdns.org/arcodange-org/vault-action.git@main
|
||||
uses: https://gitea.arcodange.lab/arcodange-org/vault-action.git@main
|
||||
id: vault-secrets
|
||||
with:
|
||||
url: https://vault.arcodange.duckdns.org
|
||||
url: https://vault.arcodange.lab
|
||||
caCertificate: ${{ secrets.HOMELAB_CA_CERT }}
|
||||
jwtGiteaOIDC: ${{ needs.gitea_vault_auth.outputs.gitea_vault_jwt }}
|
||||
role: gitea_cicd
|
||||
method: jwt
|
||||
@@ -50,9 +51,12 @@ jobs:
|
||||
env:
|
||||
OPENTOFU_VERSION: 1.8.2
|
||||
TERRAFORM_VAULT_AUTH_JWT: ${{ needs.gitea_vault_auth.outputs.gitea_vault_jwt }}
|
||||
VAULT_CACERT: "${{ github.workspace }}/homelab.pem"
|
||||
steps:
|
||||
- *vault_step
|
||||
- uses: actions/checkout@v4
|
||||
- name: prepare vault self signed cert
|
||||
run: echo -n "${{ secrets.HOMELAB_CA_CERT }}" | base64 -d > $VAULT_CACERT
|
||||
- name: terraform apply
|
||||
uses: dflook/terraform-apply@v1
|
||||
with:
|
||||
|
||||
@@ -10,41 +10,68 @@ kubectl create secret generic traefik-duckdns-token --from-literal="DUCKDNS_TOKE
|
||||
```mermaid
|
||||
%%{init: { 'logLevel': 'debug', 'theme': 'dark' } }%%
|
||||
timeline
|
||||
title ordre des playbook
|
||||
section Setup DNS, OS, ...
|
||||
configuration manuelle
|
||||
: installer OS, réserver IP statique, configurer SSH,VNC
|
||||
: formater et créer des partitions avec gparted
|
||||
section Docker & K3S
|
||||
system
|
||||
: install Docker
|
||||
: install K3S working with docker
|
||||
: configure Traefik
|
||||
|
||||
section Volume, NFS
|
||||
setup hard_disk
|
||||
: monter les partitions
|
||||
: installer NFS
|
||||
system
|
||||
: déployer provisionner NFS
|
||||
|
||||
section postgres
|
||||
setup
|
||||
: postgres
|
||||
section gitea
|
||||
setup
|
||||
: gitea
|
||||
section gitea action runner
|
||||
setup
|
||||
: gitea action runner
|
||||
section argo cd
|
||||
argo_cd
|
||||
: argo cd
|
||||
section hello world app
|
||||
setup git repository
|
||||
: terraform
|
||||
setup CI
|
||||
deploy
|
||||
: dev : list exposed deployments with label and port as a landpage
|
||||
: expose (as ngrock ? direct ? port ? )
|
||||
title Playbook Execution Sequence
|
||||
section 01_system
|
||||
rpi
|
||||
: set hostname
|
||||
dns
|
||||
: install pi-hole
|
||||
ssl
|
||||
: step-ca
|
||||
: fetch root certificate
|
||||
: build docker image with CA
|
||||
prepare_disks
|
||||
: list partitions
|
||||
: format disk
|
||||
: mount disk
|
||||
system_docker
|
||||
: install docker
|
||||
: configure docker storage
|
||||
: restart docker
|
||||
longhorn
|
||||
: deploy longhorn
|
||||
k3s
|
||||
: prepare inventory
|
||||
: install k3s collection
|
||||
: install socat
|
||||
: deploy k3s cluster
|
||||
: configure kubeconfig
|
||||
: configure traefik
|
||||
: configure cert-manager
|
||||
section 02_setup
|
||||
backup_nfs
|
||||
: create RWX volume
|
||||
: create recurring job
|
||||
: deploy NFS
|
||||
: mount NFS
|
||||
postgres
|
||||
: create database
|
||||
: create user
|
||||
gitea
|
||||
: deploy gitea
|
||||
: create admin user
|
||||
: create organization
|
||||
section 03_cicd
|
||||
cicd : CI/CD
|
||||
gitea_token
|
||||
: generate token
|
||||
deploy_docker_compose
|
||||
: deploy gitea action
|
||||
argocd
|
||||
: generate token
|
||||
: deploy argocd
|
||||
section 04_tools
|
||||
Hashicorp Vault
|
||||
: gitea_token
|
||||
: hashicorp_vault
|
||||
Crowdsec
|
||||
: crowdsec
|
||||
section 05_backup
|
||||
Gitea Backup
|
||||
: gitea
|
||||
K3s PVC Backup
|
||||
: k3s_pvc
|
||||
Postgres Backup
|
||||
: create backup script
|
||||
: create restore script
|
||||
```
|
||||
@@ -1,4 +1,4 @@
|
||||
gitea_version: 1.24.3
|
||||
gitea_version: 1.25.5
|
||||
|
||||
gitea_database:
|
||||
db_name: gitea
|
||||
@@ -34,11 +34,11 @@ gitea:
|
||||
GITEA__mailer__SMTP_PORT: 465
|
||||
GITEA__mailer__PASSWD: '{{ gitea_vault.GITEA__mailer__PASSWD }}'
|
||||
GITEA__server__SSH_PORT: 2222
|
||||
GITEA__server__SSH_DOMAIN: "{{ lookup('dig', groups.gitea[0]) }}"
|
||||
GITEA__server__SSH_DOMAIN: "{{ hostvars[groups.gitea[0]]['preferred_ip'] }}"
|
||||
GITEA__server__SSH_LISTEN_PORT: 22
|
||||
GITEA_server__DOMAIN: localhost
|
||||
GITEA_server__HTTP_PORT: 3000
|
||||
GITEA_server__ROOT_URL: https://gitea.arcodange.duckdns.org/
|
||||
GITEA_server__ROOT_URL: https://gitea.arcodange.lab/
|
||||
GITEA_server__START_SSH_SERVER: true
|
||||
GITEA_server__OFFLINE_MODE: true
|
||||
GITEA_service__DISABLE_REGISTRATION: true
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
step_ca_primary: pi1
|
||||
|
||||
step_ca_fqdn: ssl-ca.arcodange.lab
|
||||
|
||||
step_ca_user: step
|
||||
step_ca_home: /home/step
|
||||
step_ca_dir: /home/step/.step
|
||||
|
||||
step_ca_listen_address: ":8443"
|
||||
@@ -0,0 +1,13 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35633437343661363030323466313735373033373566643530653539633133623462333337393037
|
||||
6336653635366439363031616637313339373465666433320a653936396438373132623264386665
|
||||
66623330343439613636353963373139363531613761613864623262623661666565373137306461
|
||||
3062646337353331300a636164643462343163303931646538653537323831623736393634343137
|
||||
39376139306165356138383664373334353364316435303265643965386135356561316130316239
|
||||
64393436363436393339393130383764353231333361313565333934313136666234356433626437
|
||||
35656666386538653963653334393262366562656631376636353538383661386661366438366133
|
||||
64346338666666323562313363363836613439633931306437393132616134666230613936623634
|
||||
34383366663031336236316566626666303764323631363239636461396366323733393731376563
|
||||
65356630326536333133393335383766616631323732333262396464326165366532383066363761
|
||||
37303033316135616661623431623836313965373930376361656334323336656561643336616265
|
||||
36666235623564383132
|
||||
@@ -2,12 +2,15 @@ raspberries:
|
||||
hosts:
|
||||
pi1:
|
||||
ansible_host: pi1.home # setup http://192.168.1.1/ Réseau/DNS
|
||||
preferred_ip: 192.168.1.201
|
||||
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
|
||||
pi2:
|
||||
ansible_host: pi2.home
|
||||
preferred_ip: 192.168.1.202
|
||||
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
|
||||
pi3:
|
||||
ansible_host: pi3.home
|
||||
preferred_ip: 192.168.1.203
|
||||
ansible_ssh_extra_args: '-o StrictHostKeyChecking=no'
|
||||
|
||||
internetPi1:
|
||||
@@ -39,6 +42,17 @@ gitea:
|
||||
children:
|
||||
postgres:
|
||||
|
||||
pihole:
|
||||
hosts:
|
||||
pi1:
|
||||
pi3:
|
||||
|
||||
step_ca:
|
||||
hosts:
|
||||
pi1:
|
||||
pi2:
|
||||
pi3:
|
||||
|
||||
all:
|
||||
children:
|
||||
raspberries:
|
||||
@@ -1,383 +1,2 @@
|
||||
---
|
||||
|
||||
- name: Prepare disks for longhorn
|
||||
ansible.builtin.import_playbook: ./prepare_disks.yml
|
||||
|
||||
- name: System Docker
|
||||
hosts: raspberries:&local
|
||||
gather_facts: yes
|
||||
tags: never
|
||||
become: yes
|
||||
|
||||
pre_tasks:
|
||||
|
||||
- name: set hostname
|
||||
ansible.builtin.hostname:
|
||||
name: "{{ inventory_hostname }}"
|
||||
become: yes
|
||||
when: inventory_hostname != ansible_hostname
|
||||
|
||||
- name: Prevent apt source conflict
|
||||
ansible.builtin.file:
|
||||
state: absent
|
||||
path: /etc/apt/sources.list.d/docker.list
|
||||
become: yes
|
||||
|
||||
- name: Install role geerlingguy.docker
|
||||
community.general.ansible_galaxy_install:
|
||||
type: role
|
||||
name: geerlingguy.docker
|
||||
run_once: true
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- ansible.builtin.debug:
|
||||
var: ansible_facts.machine
|
||||
|
||||
tasks:
|
||||
|
||||
- include_role:
|
||||
name: geerlingguy.docker
|
||||
|
||||
post_tasks:
|
||||
- name: adding existing user '{{ ansible_user }}' to group docker
|
||||
user:
|
||||
name: '{{ ansible_user }}'
|
||||
groups: docker
|
||||
append: yes
|
||||
become: yes
|
||||
|
||||
#---
|
||||
|
||||
- name: Install iSCSI client for Longhorn on Raspberry Pi
|
||||
hosts: raspberries:&local
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Install open-iscsi
|
||||
ansible.builtin.apt:
|
||||
name: open-iscsi
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Enable and start iSCSI service
|
||||
ansible.builtin.service:
|
||||
name: iscsid
|
||||
state: started
|
||||
enabled: yes
|
||||
|
||||
- name: Installer cryptsetup
|
||||
ansible.builtin.apt:
|
||||
name: cryptsetup
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Charger le module noyau dm_crypt
|
||||
ansible.builtin.modprobe:
|
||||
name: dm_crypt
|
||||
state: present
|
||||
|
||||
- name: S'assurer que le module dm_crypt est chargé au démarrage
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/modules
|
||||
line: dm_crypt
|
||||
state: present
|
||||
|
||||
- name: Créer dossier longhorn
|
||||
ansible.builtin.file:
|
||||
path: /mnt/arcodange/longhorn
|
||||
state: directory
|
||||
owner: pi
|
||||
group: docker
|
||||
mode: '0774'
|
||||
ignore_errors: true
|
||||
|
||||
#---
|
||||
|
||||
- name: System K3S
|
||||
hosts: raspberries:&local
|
||||
tags: never
|
||||
|
||||
tasks:
|
||||
- name: prepare inventory for k3s external playbook
|
||||
tags: always
|
||||
ansible.builtin.add_host:
|
||||
hostname: "{{ item }}"
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- "{{ ansible_loop.first | ternary('server', 'agent') }}"
|
||||
loop: "{{ groups.raspberries | intersect(groups.local) | sort }}"
|
||||
loop_control:
|
||||
extended: true
|
||||
extended_allitems: false
|
||||
|
||||
- name: Install collection k3s.orchestration
|
||||
local_action:
|
||||
module: community.general.ansible_galaxy_install
|
||||
type: collection
|
||||
name: git+https://github.com/k3s-io/k3s-ansible
|
||||
run_once: true
|
||||
|
||||
- name: k3s
|
||||
tags: never
|
||||
ansible.builtin.import_playbook: k3s.orchestration.site
|
||||
# ansible.builtin.import_playbook: k3s.orchestration.upgrade
|
||||
# ansible.builtin.import_playbook: k3s.orchestration.reset
|
||||
vars:
|
||||
k3s_version: v1.32.7+k3s1
|
||||
extra_server_args: "--docker --disable traefik"
|
||||
extra_agent_args: "--docker"
|
||||
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
|
||||
|
||||
- name: how to reach k3s
|
||||
hosts: server
|
||||
tasks:
|
||||
- name: copy /etc/rancher/k3s/k3s.yaml to ~/.kube/config from the k3s server and replace 127.0.0.1 with the server ip or hostname
|
||||
run_once: true
|
||||
block:
|
||||
- ansible.builtin.fetch:
|
||||
src: /etc/rancher/k3s/k3s.yaml
|
||||
dest: ~/.kube/config
|
||||
flat: true
|
||||
become: true
|
||||
run_once: true
|
||||
- local_action:
|
||||
module: ansible.builtin.replace
|
||||
path: ~/.kube/config
|
||||
regexp: 'server: https://127.0.0.1:6443'
|
||||
replace: 'server: https://{{ ansible_default_ipv4.address }}:6443'
|
||||
|
||||
# - name: setup hard disk
|
||||
# tags: never
|
||||
# ansible.builtin.import_playbook: ./setup/hard_disk_v2.yml
|
||||
# # vars:
|
||||
# # hard_disk__partitions:
|
||||
# # nfs: []
|
||||
|
||||
- name: setup longhorn for volumes https://docs.k3s.io/helm
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
dest: /var/lib/rancher/k3s/server/manifests/longhorn-install.yaml
|
||||
content: |-
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
annotations:
|
||||
helmcharts.cattle.io/managed-by: helm-controller
|
||||
finalizers:
|
||||
- wrangler.cattle.io/on-helm-chart-remove
|
||||
generation: 1
|
||||
name: longhorn-install
|
||||
namespace: kube-system
|
||||
spec:
|
||||
version: v1.9.1
|
||||
chart: longhorn
|
||||
repo: https://charts.longhorn.io
|
||||
failurePolicy: abort
|
||||
targetNamespace: longhorn-system
|
||||
createNamespace: true
|
||||
valuesContent: |-
|
||||
defaultSettings:
|
||||
defaultDataPath: /mnt/arcodange/longhorn
|
||||
vars:
|
||||
longhorn_helm_values: {} # https://github.com/longhorn/longhorn/blob/master/chart/values.yaml
|
||||
|
||||
- name: customize k3s traefik configuration https://docs.k3s.io/helm
|
||||
block:
|
||||
- name: Get my public IP
|
||||
community.general.ipify_facts:
|
||||
- become: true
|
||||
ansible.builtin.copy:
|
||||
dest: /var/lib/rancher/k3s/server/manifests/traefik-v3.yaml
|
||||
content: |-
|
||||
apiVersion: v1
|
||||
data:
|
||||
dynamic.yaml: |-
|
||||
{{ traefik_config_yaml | to_nice_yaml | indent( width=4 ) }}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: traefik-configmap
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: kube-system
|
||||
spec:
|
||||
repo: https://traefik.github.io/charts
|
||||
chart: traefik
|
||||
version: v37.0.0
|
||||
targetNamespace: kube-system
|
||||
valuesContent: |-
|
||||
{{ traefik_helm_values | to_nice_yaml | indent( width=4 ) }}
|
||||
vars:
|
||||
traefik_config_yaml:
|
||||
http:
|
||||
services:
|
||||
gitea:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://{{ lookup('dig', groups.gitea[0]) }}:3000"
|
||||
routers:
|
||||
acme-challenge:
|
||||
rule: Host(`arcodange.duckdns.org`) && PathPrefix(`/.well-known/acme-challenge`)
|
||||
service: acme-http@internal
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
domains:
|
||||
- main: "arcodange.duckdns.org"
|
||||
sans:
|
||||
- "*.arcodange.duckdns.org"
|
||||
entryPoints:
|
||||
- websecure
|
||||
- web
|
||||
gitea:
|
||||
rule: Host(`gitea.arcodange.duckdns.org`)
|
||||
service: gitea
|
||||
middlewares:
|
||||
- localIp
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
domains:
|
||||
- main: "arcodange.duckdns.org"
|
||||
sans:
|
||||
- "gitea.arcodange.duckdns.org"
|
||||
entrypoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
localIp:
|
||||
ipAllowList:
|
||||
sourceRange:
|
||||
- "192.168.1.0/24"
|
||||
- "{{ ipify_public_ip }}/32"
|
||||
# - "0.0.0.0/0"
|
||||
# ipStrategy:
|
||||
# depth: 1
|
||||
traefik_helm_values:
|
||||
deployment:
|
||||
kind: "Deployment"
|
||||
initContainers:
|
||||
- name: volume-permissions
|
||||
image: busybox:latest
|
||||
command: ["sh", "-c", "touch /data/acme.json; chmod -v 600 /data/acme.json"]
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
# default is https://github.com/traefik/traefik-helm-chart/blob/v25.0.0/traefik/values.yaml <- for v25 (`kubectl describe deployments.apps traefik -n kube-system | grep helm.sh/chart`)
|
||||
# current is https://github.com/traefik/traefik-helm-chart/blob/v30.1.0/traefik/values.yaml
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: 'true' # make predictible choice of node to direct https traffic to this node and avoid NAT/loss of client IP
|
||||
service:
|
||||
spec:
|
||||
externalTrafficPolicy: Local
|
||||
ports:
|
||||
traefik:
|
||||
expose:
|
||||
default: true
|
||||
ingressRoute:
|
||||
dashboard:
|
||||
enabled: true
|
||||
globalArguments: [] # deactivate --global.sendanonymoususage
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LEGO_DISABLE_CNAME_SUPPORT
|
||||
value: 'true'
|
||||
logs:
|
||||
general:
|
||||
level: DEBUG
|
||||
# format: json
|
||||
access:
|
||||
enabled: true
|
||||
# format: json
|
||||
podSecurityContext:
|
||||
runAsGroup: 65532
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
fsGroup: 65532 # else the persistent volume might be owned by root and be unwriteable
|
||||
persistence:
|
||||
# -- Enable persistence using Persistent Volume Claims
|
||||
# ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
|
||||
# It can be used to store TLS certificates, see `storage` in certResolvers
|
||||
enabled: true
|
||||
name: data
|
||||
# existingClaim: ""
|
||||
accessMode: ReadWriteOnce
|
||||
size: 128Mi
|
||||
storageClass: "longhorn"
|
||||
# volumeName: ""
|
||||
path: /data
|
||||
annotations: {}
|
||||
volumes:
|
||||
- name: traefik-configmap
|
||||
mountPath: /config
|
||||
type: configMap
|
||||
additionalArguments:
|
||||
- '--providers.file.filename=/config/dynamic.yaml'
|
||||
- '--providers.kubernetesingress.ingressendpoint.publishedservice=kube-system/traefik'
|
||||
certificatesResolvers:
|
||||
letsencrypt:
|
||||
acme:
|
||||
# for challenge options cf. https://doc.traefik.io/traefik/https/acme/
|
||||
email: arcodange@gmail.com
|
||||
tlsChallenge: true
|
||||
dnsChallenge:
|
||||
# requires env variable DUCKDNS_TOKEN
|
||||
provider: duckdns
|
||||
propagation:
|
||||
delayBeforeChecks: 120
|
||||
disableChecks: true
|
||||
resolvers:
|
||||
- "1.1.1.1:53"
|
||||
- "8.8.8.8:53"
|
||||
httpChallenge:
|
||||
entryPoint: "web"
|
||||
# It has to match the path with a persistent volume
|
||||
storage: /data/acme.json
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: traefik-duckdns-token
|
||||
# MY_TOKEN=<my token (see https://www.duckdns.org/domains)>
|
||||
# kubectl create secret generic traefik-duckdns-token --from-literal="DUCKDNS_TOKEN=$MY_TOKEN" -n kube-system
|
||||
- name: touch manifests/traefik.yaml to trigger update
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server/manifests/traefik-v3.yaml
|
||||
state: touch
|
||||
become: true
|
||||
|
||||
|
||||
# ---
|
||||
|
||||
- name: redeploy traefik
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: delete old traefik deployment
|
||||
kubernetes.core.k8s:
|
||||
api_version: v1
|
||||
name: traefik
|
||||
kind: Deployment
|
||||
namespace: kube-system
|
||||
state: "absent"
|
||||
- name: delete old deployment job so the k3s helm controller redeploy with our new configuration
|
||||
kubernetes.core.k8s:
|
||||
api_version: batch/v1
|
||||
name: helm-install-traefik
|
||||
kind: Job
|
||||
namespace: kube-system
|
||||
state: "absent"
|
||||
- name: get traefik deployment
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: v1
|
||||
name: traefik
|
||||
kind: Deployment
|
||||
namespace: kube-system
|
||||
wait: true
|
||||
register: traefik_deployment
|
||||
- ansible.builtin.debug:
|
||||
var: traefik_deployment
|
||||
- name: system
|
||||
ansible.builtin.import_playbook: ./system/system.yml
|
||||
@@ -27,17 +27,18 @@
|
||||
container_name: gitea_action
|
||||
restart: always
|
||||
environment:
|
||||
CONFIG_FILE: /config.yaml
|
||||
GITEA_INSTANCE_URL: >-
|
||||
http://{{ hostvars[groups.gitea[0]].ansible_host }}:3000
|
||||
GITEA_RUNNER_REGISTRATION_TOKEN: "{{ gitea_runner_token_cmd.stdout }}"
|
||||
GITEA_RUNNER_NAME: arcodange_global_runner_{{ inventory_hostname }}
|
||||
# GITEA_RUNNER_LABELS: host={{ansible_host}},env=any
|
||||
GITEA_RUNNER_LABELS: ubuntu-latest:docker://gitea.arcodange.lab/arcodange-org/runner-images:ubuntu-latest-ca,ubuntu-latest-ca:docker://gitea.arcodange.lab/arcodange-org/runner-images:ubuntu-latest-ca
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
extra_hosts:
|
||||
gitea.arcodange.duckdns.org: '{{ lookup("dig", "gitea.arcodange.duckdns.org") }}'
|
||||
- /etc/ssl/certs:/etc/ssl/certs:ro
|
||||
- /usr/local/share/ca-certificates/:/usr/local/share/ca-certificates/:ro
|
||||
configs:
|
||||
- config.yaml
|
||||
configs:
|
||||
@@ -78,9 +79,8 @@
|
||||
# If it's empty when registering, it will ask for inputting labels.
|
||||
# If it's empty when execute `daemon`, will use labels in `.runner` file.
|
||||
labels:
|
||||
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
|
||||
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
|
||||
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
|
||||
- "ubuntu-latest:docker://gitea.arcodange.lab/arcodange-org/runner-images:ubuntu-latest-ca"
|
||||
- "ubuntu-latest-ca:docker://gitea.arcodange.lab/arcodange-org/runner-images:ubuntu-latest-ca"
|
||||
|
||||
cache:
|
||||
# Enable cache server to use actions/cache.
|
||||
@@ -131,7 +131,7 @@
|
||||
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
|
||||
docker_host: ""
|
||||
# Pull docker image(s) even if already present
|
||||
force_pull: true
|
||||
force_pull: false
|
||||
# Rebuild docker image(s) even if already present
|
||||
force_rebuild: false
|
||||
|
||||
@@ -143,193 +143,240 @@
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "/home/pi/arcodange/docker_composes/arcodange_factory_gitea_action"
|
||||
pull: missing
|
||||
state: present
|
||||
state: "{{ docker_compose_down_then_up }}"
|
||||
register: deploy_result
|
||||
loop: ["absent", "present"]
|
||||
loop_control:
|
||||
loop_var: docker_compose_down_then_up
|
||||
|
||||
- name: Set PACKAGES_TOKEN secret to upload packages from CI
|
||||
run_once: True
|
||||
block:
|
||||
- name: Generate cicd PACKAGES_TOKEN
|
||||
include_role:
|
||||
name: arcodange.factory.gitea_token
|
||||
vars:
|
||||
gitea_token_name: PACKAGES_TOKEN
|
||||
gitea_token_fact_name: cicd_PACKAGES_TOKEN
|
||||
gitea_token_scopes: write:package
|
||||
gitea_token_replace: true
|
||||
# - name: Set PACKAGES_TOKEN secret to upload packages from CI
|
||||
# run_once: True
|
||||
# block:
|
||||
# - name: Generate cicd PACKAGES_TOKEN
|
||||
# include_role:
|
||||
# name: arcodange.factory.gitea_token
|
||||
# vars:
|
||||
# gitea_token_name: PACKAGES_TOKEN
|
||||
# gitea_token_fact_name: cicd_PACKAGES_TOKEN
|
||||
# gitea_token_scopes: write:package
|
||||
# gitea_token_replace: true
|
||||
|
||||
- name: Register cicd PACKAGES_TOKEN secrets
|
||||
include_role:
|
||||
name: arcodange.factory.gitea_secret
|
||||
vars:
|
||||
gitea_secret_name: PACKAGES_TOKEN
|
||||
gitea_secret_value: "{{ cicd_PACKAGES_TOKEN }}"
|
||||
loop: ["organization", "user"]
|
||||
loop_control:
|
||||
loop_var: gitea_owner_type # Peut être "user" ou "organization"
|
||||
# - name: Register cicd PACKAGES_TOKEN secrets
|
||||
# include_role:
|
||||
# name: arcodange.factory.gitea_secret
|
||||
# vars:
|
||||
# gitea_secret_name: PACKAGES_TOKEN
|
||||
# gitea_secret_value: "{{ cicd_PACKAGES_TOKEN }}"
|
||||
# loop: ["organization", "user"]
|
||||
# loop_control:
|
||||
# loop_var: gitea_owner_type # Peut être "user" ou "organization"
|
||||
|
||||
# - name: Set HOMELAB_CA_CERT secret to validate self signed ssl
|
||||
# run_once: True
|
||||
# block:
|
||||
# - name: Download homelab CA certificate
|
||||
# ansible.builtin.uri:
|
||||
# url: "https://ssl-ca.arcodange.lab:8443/roots.pem"
|
||||
# return_content: yes
|
||||
# validate_certs: no
|
||||
# register: homelab_ca_cert
|
||||
# - name: Debug cert
|
||||
# debug:
|
||||
# msg: "{{ homelab_ca_cert.content }}..."
|
||||
# - name: Register cicd HOMELAB_CA_CERT secrets
|
||||
# include_role:
|
||||
# name: arcodange.factory.gitea_secret
|
||||
# vars:
|
||||
# gitea_secret_name: HOMELAB_CA_CERT
|
||||
# gitea_secret_value: "{{ homelab_ca_cert.content | b64encode }}"
|
||||
# loop: ["organization", "user"]
|
||||
# loop_control:
|
||||
# loop_var: gitea_owner_type # Peut être "user" ou "organization"
|
||||
|
||||
post_tasks:
|
||||
- include_role:
|
||||
name: arcodange.factory.gitea_token
|
||||
vars:
|
||||
gitea_token_delete: true
|
||||
# post_tasks:
|
||||
# - include_role:
|
||||
# name: arcodange.factory.gitea_token
|
||||
# vars:
|
||||
# gitea_token_delete: true
|
||||
|
||||
|
||||
- name: Deploy Argo CD
|
||||
hosts: localhost
|
||||
roles:
|
||||
- role: arcodange.factory.gitea_token # generate gitea_api_token used to replace generated token with set name if required
|
||||
tags:
|
||||
- gitea_sync
|
||||
tasks:
|
||||
- name: Set factory repo
|
||||
include_role:
|
||||
name: arcodange.factory.gitea_repo
|
||||
vars:
|
||||
gitea_repo_name: factory
|
||||
- name: Sync other repos
|
||||
tags: gitea_sync
|
||||
include_role:
|
||||
name: arcodange.factory.gitea_sync
|
||||
apply:
|
||||
tags: gitea_sync
|
||||
- name: Generate Argo CD token
|
||||
include_role:
|
||||
name: arcodange.factory.gitea_token
|
||||
vars:
|
||||
gitea_token_name: ARGOCD_TOKEN
|
||||
gitea_token_fact_name: argocd_token
|
||||
gitea_token_scopes: read:repository,read:package
|
||||
gitea_token_replace: true
|
||||
- name: Figure out k3s master node
|
||||
shell:
|
||||
kubectl get nodes -l node-role.kubernetes.io/master=true -o name | sed s'#node/##'
|
||||
register: get_k3s_master_node
|
||||
changed_when: false
|
||||
- name: Get kubernetes server internal url
|
||||
command: >-
|
||||
echo https://kubernetes.default.svc
|
||||
# {%raw%}
|
||||
# kubectl get svc/kubernetes -o template="{{.spec.clusterIP}}:{{(index .spec.ports 0).port}}"
|
||||
# {%endraw%}
|
||||
register: get_k3s_internal_server_url
|
||||
changed_when: false
|
||||
- set_fact:
|
||||
k3s_master_node: "{{ get_k3s_master_node.stdout }}"
|
||||
k3s_internal_server_url: "{{ get_k3s_internal_server_url.stdout }}"
|
||||
- name: Install Argo CD
|
||||
become: true
|
||||
delegate_to: "{{ k3s_master_node }}"
|
||||
vars:
|
||||
gitea_credentials:
|
||||
username: arcodange
|
||||
password: "{{ argocd_token }}"
|
||||
argocd_helm_values: # https://github.com/argoproj/argo-helm/blob/main/charts/argo-cd/values.yaml
|
||||
global:
|
||||
domain: argocd.arcodange.duckdns.org
|
||||
configs:
|
||||
params:
|
||||
server.insecure: true # let k3s traefik do TLS termination
|
||||
ansible.builtin.copy:
|
||||
dest: /var/lib/rancher/k3s/server/manifests/argocd.yaml
|
||||
content: |-
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: argocd
|
||||
---
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: argocd
|
||||
namespace: kube-system
|
||||
spec:
|
||||
repo: https://argoproj.github.io/argo-helm
|
||||
chart: argo-cd
|
||||
targetNamespace: argocd
|
||||
valuesContent: |-
|
||||
{{ argocd_helm_values | to_nice_yaml | indent( width=4 ) }}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: argocd-server-ingress
|
||||
namespace: argocd
|
||||
annotations:
|
||||
# For Traefik v2.x
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
traefik.ingress.kubernetes.io/router.tls.certresolver: letsencrypt
|
||||
traefik.ingress.kubernetes.io/router.tls.domains.0.main: arcodange.duckdns.org
|
||||
traefik.ingress.kubernetes.io/router.tls.domains.0.sans: argocd.arcodange.duckdns.org
|
||||
traefik.ingress.kubernetes.io/router.middlewares: localIp@file
|
||||
spec:
|
||||
rules:
|
||||
- host: argocd.arcodange.duckdns.org
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: argocd-server
|
||||
port:
|
||||
number: 80 #TLS is terminated at Traefik
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: gitea-arcodangeorg-factory-repo
|
||||
namespace: argocd
|
||||
labels:
|
||||
argocd.argoproj.io/secret-type: repository
|
||||
stringData:
|
||||
type: git
|
||||
url: https://gitea.arcodange.duckdns.org/arcodange-org/factory
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: gitea-arcodangeorg-repo-creds
|
||||
namespace: argocd
|
||||
labels:
|
||||
argocd.argoproj.io/secret-type: repo-creds
|
||||
stringData:
|
||||
type: git
|
||||
url: https://gitea.arcodange.duckdns.org/arcodange-org
|
||||
password: {{ gitea_credentials.password }}
|
||||
username: {{ gitea_credentials.username }}
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: factory
|
||||
namespace: argocd
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://gitea.arcodange.duckdns.org/arcodange-org/factory
|
||||
targetRevision: HEAD
|
||||
path: argocd
|
||||
destination:
|
||||
server: {{ k3s_internal_server_url }}
|
||||
namespace: argocd
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
selfHeal: true
|
||||
- name: touch manifests/argocd.yaml to trigger update
|
||||
delegate_to: "{{ k3s_master_node }}"
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server/manifests/argocd.yaml
|
||||
state: touch
|
||||
become: true
|
||||
post_tasks:
|
||||
- include_role:
|
||||
name: arcodange.factory.gitea_token
|
||||
apply:
|
||||
tags: gitea_sync
|
||||
tags:
|
||||
- gitea_sync
|
||||
vars:
|
||||
gitea_token_delete: true
|
||||
# - name: Deploy Argo CD
|
||||
# hosts: localhost
|
||||
# roles:
|
||||
# - role: arcodange.factory.gitea_token # generate gitea_api_token used to replace generated token with set name if required
|
||||
# tags:
|
||||
# - gitea_sync
|
||||
# tasks:
|
||||
# - name: Set factory repo
|
||||
# include_role:
|
||||
# name: arcodange.factory.gitea_repo
|
||||
# vars:
|
||||
# gitea_repo_name: factory
|
||||
# - name: Sync other repos
|
||||
# tags: gitea_sync
|
||||
# include_role:
|
||||
# name: arcodange.factory.gitea_sync
|
||||
# apply:
|
||||
# tags: gitea_sync
|
||||
# - name: Generate Argo CD token
|
||||
# include_role:
|
||||
# name: arcodange.factory.gitea_token
|
||||
# vars:
|
||||
# gitea_token_name: ARGOCD_TOKEN
|
||||
# gitea_token_fact_name: argocd_token
|
||||
# gitea_token_scopes: read:repository,read:package
|
||||
# gitea_token_replace: true
|
||||
# - name: Figure out k3s master node
|
||||
# shell:
|
||||
# kubectl get nodes -l node-role.kubernetes.io/control-plane=true -o name | sed s'#node/##'
|
||||
# register: get_k3s_master_node
|
||||
# changed_when: false
|
||||
# - name: Get kubernetes server internal url
|
||||
# command: >-
|
||||
# echo https://kubernetes.default.svc
|
||||
# # {%raw%}
|
||||
# # kubectl get svc/kubernetes -o template="{{.spec.clusterIP}}:{{(index .spec.ports 0).port}}"
|
||||
# # {%endraw%}
|
||||
# register: get_k3s_internal_server_url
|
||||
# changed_when: false
|
||||
# - set_fact:
|
||||
# k3s_master_node: "{{ get_k3s_master_node.stdout }}"
|
||||
# k3s_internal_server_url: "{{ get_k3s_internal_server_url.stdout }}"
|
||||
# - name: Read Step CA root certificate from k3s master
|
||||
# become: true
|
||||
# delegate_to: "{{ k3s_master_node }}"
|
||||
# slurp:
|
||||
# src: /home/step/.step/certs/root_ca.crt
|
||||
# register: step_ca_root_cert
|
||||
# - name: Decode Step CA root certificate
|
||||
# set_fact:
|
||||
# step_ca_root_cert_pem: "{{ step_ca_root_cert.content | b64decode }}"
|
||||
# - name: Install Argo CD
|
||||
# become: true
|
||||
# delegate_to: "{{ k3s_master_node }}"
|
||||
# vars:
|
||||
# gitea_credentials:
|
||||
# username: arcodange
|
||||
# password: "{{ argocd_token }}"
|
||||
# argocd_helm_values: # https://github.com/argoproj/argo-helm/blob/main/charts/argo-cd/values.yaml
|
||||
# global:
|
||||
# domain: argocd.arcodange.lab
|
||||
# configs:
|
||||
# cm:
|
||||
# kustomize.buildOptions: "--enable-helm"
|
||||
# helm.enablePostRenderer: "true"
|
||||
# exec.enabled: "true"
|
||||
# params:
|
||||
# server.insecure: true # let k3s traefik do TLS termination
|
||||
# ansible.builtin.copy:
|
||||
# dest: /var/lib/rancher/k3s/server/manifests/argocd.yaml
|
||||
# content: |-
|
||||
# apiVersion: v1
|
||||
# kind: Namespace
|
||||
# metadata:
|
||||
# name: argocd
|
||||
# ---
|
||||
# apiVersion: v1
|
||||
# kind: ConfigMap
|
||||
# metadata:
|
||||
# name: argocd-tls-certs-cm
|
||||
# namespace: argocd
|
||||
# data:
|
||||
# gitea.arcodange.lab: |
|
||||
# {{ step_ca_root_cert_pem | indent(4) }}
|
||||
# ---
|
||||
# apiVersion: helm.cattle.io/v1
|
||||
# kind: HelmChart
|
||||
# metadata:
|
||||
# name: argocd
|
||||
# namespace: kube-system
|
||||
# spec:
|
||||
# repo: https://argoproj.github.io/argo-helm
|
||||
# chart: argo-cd
|
||||
# targetNamespace: argocd
|
||||
# valuesContent: |-
|
||||
# {{ argocd_helm_values | to_nice_yaml | indent( width=4 ) }}
|
||||
# ---
|
||||
# apiVersion: networking.k8s.io/v1
|
||||
# kind: Ingress
|
||||
# metadata:
|
||||
# name: argocd-server-ingress
|
||||
# namespace: argocd
|
||||
# annotations:
|
||||
# # For Traefik v2.x
|
||||
# traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
# traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
# traefik.ingress.kubernetes.io/router.tls.certresolver: letsencrypt
|
||||
# traefik.ingress.kubernetes.io/router.tls.domains.0.main: arcodange.lab
|
||||
# traefik.ingress.kubernetes.io/router.tls.domains.0.sans: argocd.arcodange.lab
|
||||
# traefik.ingress.kubernetes.io/router.middlewares: localIp@file
|
||||
# spec:
|
||||
# rules:
|
||||
# - host: argocd.arcodange.lab
|
||||
# http:
|
||||
# paths:
|
||||
# - path: /
|
||||
# pathType: Prefix
|
||||
# backend:
|
||||
# service:
|
||||
# name: argocd-server
|
||||
# port:
|
||||
# number: 80 #TLS is terminated at Traefik
|
||||
# ---
|
||||
# apiVersion: v1
|
||||
# kind: Secret
|
||||
# metadata:
|
||||
# name: gitea-arcodangeorg-factory-repo
|
||||
# namespace: argocd
|
||||
# labels:
|
||||
# argocd.argoproj.io/secret-type: repository
|
||||
# stringData:
|
||||
# type: git
|
||||
# url: https://gitea.arcodange.lab/arcodange-org/factory
|
||||
# ---
|
||||
# apiVersion: v1
|
||||
# kind: Secret
|
||||
# metadata:
|
||||
# name: gitea-arcodangeorg-repo-creds
|
||||
# namespace: argocd
|
||||
# labels:
|
||||
# argocd.argoproj.io/secret-type: repo-creds
|
||||
# stringData:
|
||||
# type: git
|
||||
# url: https://gitea.arcodange.lab/arcodange-org
|
||||
# password: {{ gitea_credentials.password }}
|
||||
# username: {{ gitea_credentials.username }}
|
||||
# ---
|
||||
# apiVersion: argoproj.io/v1alpha1
|
||||
# kind: Application
|
||||
# metadata:
|
||||
# name: factory
|
||||
# namespace: argocd
|
||||
# spec:
|
||||
# project: default
|
||||
# source:
|
||||
# repoURL: https://gitea.arcodange.lab/arcodange-org/factory
|
||||
# targetRevision: HEAD
|
||||
# path: argocd
|
||||
# destination:
|
||||
# server: {{ k3s_internal_server_url }}
|
||||
# namespace: argocd
|
||||
# syncPolicy:
|
||||
# automated:
|
||||
# prune: true
|
||||
# selfHeal: true
|
||||
# - name: touch manifests/argocd.yaml to trigger update
|
||||
# delegate_to: "{{ k3s_master_node }}"
|
||||
# ansible.builtin.file:
|
||||
# path: /var/lib/rancher/k3s/server/manifests/argocd.yaml
|
||||
# state: touch
|
||||
# become: true
|
||||
# post_tasks:
|
||||
# - include_role:
|
||||
# name: arcodange.factory.gitea_token
|
||||
# apply:
|
||||
# tags: gitea_sync
|
||||
# tags:
|
||||
# - gitea_sync
|
||||
# vars:
|
||||
# gitea_token_delete: true
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: setup cron report
|
||||
ansible.builtin.import_playbook: cron_report.yml
|
||||
# - name: setup cron report
|
||||
# ansible.builtin.import_playbook: cron_report.yml
|
||||
|
||||
- name: postgres
|
||||
ansible.builtin.import_playbook: postgres.yml
|
||||
@@ -12,4 +12,10 @@
|
||||
ansible.builtin.import_playbook: gitea.yml
|
||||
vars:
|
||||
backup_root_dir: "/mnt/backups"
|
||||
backup_dirname: "gitea"
|
||||
backup_dirname: "gitea"
|
||||
|
||||
- name: k3s_pvc
|
||||
ansible.builtin.import_playbook: k3s_pvc.yml
|
||||
vars:
|
||||
backup_root_dir: "/mnt/backups"
|
||||
backup_dirname: "k3s_pvc"
|
||||
@@ -24,7 +24,7 @@
|
||||
name:
|
||||
- postfix
|
||||
- msmtp
|
||||
- msmtp-mta
|
||||
# - msmtp-mta # conflicts with recent pi setup - may be required by pi2 with old setup
|
||||
- mailutils
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
gitea_user: "git"
|
||||
backup_dir: "{{ backup_root_dir }}/{{ backup_dirname }}"
|
||||
scripts_dir: "/home/pi/arcodange/docker_composes/gitea/scripts"
|
||||
keep_days: 15
|
||||
keep_days: 3
|
||||
|
||||
tasks:
|
||||
- name: S'assurer que le répertoire de backup existe
|
||||
@@ -22,7 +22,7 @@
|
||||
set_fact:
|
||||
backup_cmd: >-
|
||||
docker exec -u {{ gitea_user }} {{ gitea_container_name }}
|
||||
gitea dump --skip-log --skip-db --type tar.gz -c /data/gitea/conf/app.ini -C /data/gitea/ -f -
|
||||
gitea dump --skip-log --skip-db --skip-package-data --type tar.gz -c /data/gitea/conf/app.ini -C /data/gitea/ -f -
|
||||
|
||||
- name: test backup_cmd
|
||||
ansible.builtin.shell: |
|
||||
@@ -35,6 +35,7 @@
|
||||
content: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
mkdir -p {{ backup_dir }}
|
||||
{{ backup_cmd }} > {{ backup_dir }}/backup_$(date +\%Y\%m\%d).gitea.gz
|
||||
find {{ backup_dir }} -type f -name 'backup_*.gitea.gz' -mtime +{{ keep_days }} -delete
|
||||
|
||||
|
||||
83
ansible/arcodange/factory/playbooks/backup/k3s_pvc.yml
Normal file
83
ansible/arcodange/factory/playbooks/backup/k3s_pvc.yml
Normal file
@@ -0,0 +1,83 @@
|
||||
---
|
||||
- name: Backup K3S Persistent Volumes
|
||||
hosts: pi1
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
|
||||
vars:
|
||||
backup_dir: "{{ backup_root_dir }}/{{ backup_dirname }}"
|
||||
scripts_dir: "/opt/k3s_volumes"
|
||||
keep_days: 3
|
||||
|
||||
tasks:
|
||||
- name: S'assurer que le répertoire de backup existe
|
||||
file:
|
||||
path: "{{ backup_dir }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: S'assurer que le répertoire de scripts existe
|
||||
file:
|
||||
path: "{{ scripts_dir }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: define backup command
|
||||
set_fact:
|
||||
backup_cmd: |-
|
||||
echo "
|
||||
$(kubectl get -A pv -o yaml)
|
||||
---
|
||||
$(kubectl get -A pvc -o yaml)
|
||||
"
|
||||
|
||||
- name: test backup_cmd
|
||||
ansible.builtin.shell: |
|
||||
{{ backup_cmd }} > /dev/null
|
||||
|
||||
- name: Créer le script de backup
|
||||
copy:
|
||||
dest: "{{ scripts_dir }}/backup.sh"
|
||||
mode: '0755'
|
||||
content: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
mkdir -p {{ backup_dir }}
|
||||
{{ backup_cmd }} > {{ backup_dir }}/backup_$(date +\%Y\%m\%d).volumes
|
||||
find {{ backup_dir }} -type f -name 'backup_*.volumes' -mtime +{{ keep_days }} -delete
|
||||
|
||||
SCRIPTS_DIR="$(dirname "$(realpath "${BASH_SOURCE[0]}")")"
|
||||
{{ backup_cmd }} > $SCRIPTS_DIR/backup.volumes
|
||||
|
||||
- name: Ajouter une tâche cron pour backup k3s volumes tous les jours à 4h
|
||||
cron:
|
||||
name: "Backup K3S Volumes"
|
||||
minute: "0"
|
||||
hour: "4"
|
||||
user: root
|
||||
job: "{{ scripts_dir }}/backup.sh"
|
||||
|
||||
- name: Créer le script de restauration
|
||||
copy:
|
||||
dest: "{{ scripts_dir }}/restore.sh"
|
||||
mode: '0755'
|
||||
content: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
BACKUP_DIR="{{ backup_dir }}"
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
FILE=$(ls -1t "$BACKUP_DIR"/backup_*.volumes | head -n 1)
|
||||
echo "Aucune date fournie, restauration du dernier dump : $FILE"
|
||||
else
|
||||
FILE="$BACKUP_DIR/backup_$1.volumes"
|
||||
if [ ! -f "$FILE" ]; then
|
||||
echo "Fichier $FILE introuvable"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
kubectl apply -f "$FILE"
|
||||
|
||||
echo "Restauration des volumes k3s terminée."
|
||||
@@ -9,7 +9,7 @@
|
||||
postgres_user: "{{ postgres.dockercompose.services.postgres.environment.POSTGRES_USER }}"
|
||||
backup_dir: "{{ backup_root_dir }}/{{ backup_dirname }}"
|
||||
scripts_dir: "/home/pi/arcodange/docker_composes/postgres/scripts"
|
||||
keep_days: 15
|
||||
keep_days: 3
|
||||
|
||||
tasks:
|
||||
- name: S'assurer que le répertoire de backup existe
|
||||
@@ -33,6 +33,7 @@
|
||||
content: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
mkdir -p {{ backup_dir }}
|
||||
{{ backup_cmd }} | gzip > {{ backup_dir }}/backup_$(date +\%Y\%m\%d).sql.gz
|
||||
find {{ backup_dir }} -type f -name 'backup_*.sql.gz' -mtime +{{ keep_days }} -delete
|
||||
|
||||
|
||||
2
ansible/arcodange/factory/playbooks/dns/dns.yml
Normal file
2
ansible/arcodange/factory/playbooks/dns/dns.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
- name: pihole
|
||||
ansible.builtin.import_playbook: pihole.yml
|
||||
11
ansible/arcodange/factory/playbooks/dns/pihole.yml
Normal file
11
ansible/arcodange/factory/playbooks/dns/pihole.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Installer et configurer Pi-hole sur pi1
|
||||
hosts: raspberries:&local
|
||||
become: yes
|
||||
vars:
|
||||
|
||||
pihole_custom_dns:
|
||||
".arcodange.duckdns.org": "{{ hostvars['pi1'].preferred_ip }}"
|
||||
".arcodange.lab": "{{ hostvars['pi1'].preferred_ip }}"
|
||||
roles:
|
||||
- pihole
|
||||
@@ -0,0 +1,7 @@
|
||||
pihole_primary: pi1
|
||||
pihole_user_gravity: pihole_gravity
|
||||
pihole_gravity_home: /var/lib/pihole_gravity
|
||||
pihole_dns_domain: lab
|
||||
pihole_ports: '8081o,443os,[::]:8081o,[::]:443os' # web interface
|
||||
pihole_gravity_conf: /etc/gravity-sync/gravity-sync.conf # should not be changed
|
||||
pihole_custom_dns: {}
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: Restart Pi-hole
|
||||
service:
|
||||
name: pihole-FTL
|
||||
state: restarted
|
||||
@@ -0,0 +1,75 @@
|
||||
---
|
||||
- name: Build DNS server list (exclude self)
|
||||
set_fact:
|
||||
pihole_dns_servers: >-
|
||||
{{
|
||||
groups['pihole']
|
||||
| reject('equalto', inventory_hostname)
|
||||
| map('extract', hostvars, 'preferred_ip')
|
||||
| list
|
||||
}}
|
||||
|
||||
# 1️⃣ Supprimer d’éventuelles anciennes entrées Pi-hole
|
||||
- name: Remove existing Pi-hole nameservers
|
||||
lineinfile:
|
||||
path: /etc/resolv.conf
|
||||
regexp: '^nameserver ({{ pihole_dns_servers | join("|") }})$'
|
||||
state: absent
|
||||
when: pihole_dns_servers | length > 0
|
||||
|
||||
# 2️⃣ Insérer les Pi-hole juste après la ligne search
|
||||
- name: Insert Pi-hole nameservers with priority
|
||||
lineinfile:
|
||||
path: /etc/resolv.conf
|
||||
insertafter: '^search'
|
||||
line: "nameserver {{ item }}"
|
||||
state: present
|
||||
loop: "{{ pihole_dns_servers }}"
|
||||
|
||||
|
||||
# 3️⃣ Définir les priorités par interface
|
||||
- name: Set DNS priority mapping
|
||||
set_fact:
|
||||
interface_dns_priority:
|
||||
eth0: 50
|
||||
wlan0: 100
|
||||
|
||||
# 5️⃣ Configurer les DNS Pi-hole sur toutes les interfaces actives
|
||||
|
||||
- name: Get active connections
|
||||
command: nmcli -t -f NAME,DEVICE connection show --active
|
||||
register: active_connections
|
||||
changed_when: false
|
||||
|
||||
- name: Get current DNS for each active interface
|
||||
vars:
|
||||
iface_name: "{{ item.split(':')[1] }}"
|
||||
conn_name: "{{ item.split(':')[0] }}"
|
||||
loop: "{{ active_connections.stdout_lines }}"
|
||||
when: item.split(':')[1] in interface_dns_priority
|
||||
command: nmcli -g IP4.DNS connection show "{{ conn_name }}"
|
||||
register: current_dns
|
||||
changed_when: false
|
||||
|
||||
- name: Apply Pi-hole DNS if different
|
||||
vars:
|
||||
iface_name: "{{ item.split(':')[1] }}"
|
||||
conn_name: "{{ item.split(':')[0] }}"
|
||||
loop: "{{ active_connections.stdout_lines }}"
|
||||
when: item.split(':')[1] in interface_dns_priority
|
||||
command: >
|
||||
nmcli connection modify "{{ conn_name }}"
|
||||
ipv4.dns "{{ pihole_dns_servers | join(' ') }}"
|
||||
ipv4.ignore-auto-dns yes
|
||||
ipv4.dns-priority "{{ interface_dns_priority[iface_name] }}"
|
||||
register: dns_changed
|
||||
changed_when: dns_changed is defined and dns_changed.stdout != ""
|
||||
|
||||
- name: Reactivate interface if DNS changed
|
||||
vars:
|
||||
iface_name: "{{ item.split(':')[1] }}"
|
||||
conn_name: "{{ item.split(':')[0] }}"
|
||||
loop: "{{ active_connections.stdout_lines }}"
|
||||
when: item.split(':')[1] in interface_dns_priority
|
||||
command: nmcli connection up "{{ conn_name }}"
|
||||
when: dns_changed is defined and dns_changed.changed
|
||||
@@ -0,0 +1,153 @@
|
||||
---
|
||||
# -------------------------------------------------------------------
|
||||
# Gravity Sync HA setup – final version with SSH key rotation
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
- name: Determine primary Pi-hole
|
||||
set_fact:
|
||||
pihole_primary: "{{ groups['pihole'] | first }}"
|
||||
|
||||
- name: Set secondary Pi-hole hosts
|
||||
set_fact:
|
||||
pihole_secondaries: "{{ groups['pihole'] | difference([pihole_primary]) }}"
|
||||
|
||||
#################################################################
|
||||
# 1️⃣ Ensure gravity user exists on all Pi-hole nodes
|
||||
#################################################################
|
||||
|
||||
- name: Ensure gravity user exists
|
||||
user:
|
||||
name: "{{ pihole_user_gravity }}"
|
||||
home: "{{ pihole_gravity_home }}"
|
||||
shell: /bin/bash
|
||||
system: yes
|
||||
create_home: yes
|
||||
|
||||
- name: Create .ssh directory for gravity user
|
||||
file:
|
||||
path: "{{ pihole_gravity_home }}/.ssh"
|
||||
state: directory
|
||||
owner: "{{ pihole_user_gravity }}"
|
||||
group: "{{ pihole_user_gravity }}"
|
||||
mode: '0700'
|
||||
|
||||
#################################################################
|
||||
# 2️⃣ Generate SSH key for each host (rotation at each run)
|
||||
#################################################################
|
||||
|
||||
- name: Generate SSH keypair for gravity user
|
||||
openssh_keypair:
|
||||
path: "{{ pihole_gravity_home }}/.ssh/id_ed25519"
|
||||
type: ed25519
|
||||
owner: "{{ pihole_user_gravity }}"
|
||||
group: "{{ pihole_user_gravity }}"
|
||||
mode: '0600'
|
||||
register: gravity_key
|
||||
no_log: true
|
||||
|
||||
- name: Set gravity key in hostvars
|
||||
set_fact:
|
||||
gravity_pubkey: "{{ gravity_key.public_key }}"
|
||||
|
||||
- name: Clean authorized_keys for gravity user
|
||||
file:
|
||||
path: "{{ pihole_gravity_home }}/.ssh/authorized_keys"
|
||||
state: absent
|
||||
|
||||
- name: Authorize SSH keys from other Pi-hole hosts
|
||||
authorized_key:
|
||||
user: "{{ pihole_user_gravity }}"
|
||||
key: "{{ hostvars[item].gravity_pubkey }}"
|
||||
state: present
|
||||
loop: "{{ groups['pihole'] }}"
|
||||
when: inventory_hostname != item
|
||||
|
||||
- name: Add all Pi-hole hosts to known_hosts
|
||||
known_hosts:
|
||||
path: "{{ pihole_gravity_home }}/.ssh/known_hosts"
|
||||
name: "{{ item }}"
|
||||
key: "{{ lookup('pipe', 'ssh-keyscan -t ed25519 ' ~ item) }}"
|
||||
state: present
|
||||
loop: "{{ groups['pihole'] }}"
|
||||
when: inventory_hostname != item
|
||||
become: yes
|
||||
become_user: "{{ pihole_user_gravity }}"
|
||||
|
||||
#################################################################
|
||||
# Install Gravity Sync binary if absent
|
||||
#################################################################
|
||||
|
||||
- name: Check if Gravity Sync binary exists
|
||||
stat:
|
||||
path: /usr/local/bin/gravity-sync
|
||||
register: gravity_sync_bin
|
||||
|
||||
- name: Download installer
|
||||
get_url:
|
||||
url: https://raw.githubusercontent.com/vmstan/gs-install/main/gs-install.sh
|
||||
dest: /tmp/gs-install.sh
|
||||
mode: '0755'
|
||||
when: not gravity_sync_bin.stat.exists
|
||||
|
||||
- name: Give full sudo to gravity user
|
||||
copy:
|
||||
dest: /etc/sudoers.d/gravity-sync
|
||||
mode: '0440'
|
||||
content: "{{ pihole_user_gravity }} ALL=(ALL) NOPASSWD: ALL"
|
||||
when: not gravity_sync_bin.stat.exists
|
||||
|
||||
- name: Execute Gravity Sync installer non-interactively
|
||||
command: bash /tmp/gs-install.sh
|
||||
become: yes
|
||||
become_user: "{{ pihole_user_gravity }}"
|
||||
environment:
|
||||
HOME: "{{ pihole_gravity_home }}"
|
||||
when: not gravity_sync_bin.stat.exists
|
||||
|
||||
#################################################################
|
||||
# Generate gravity-sync.conf for non-interactive use
|
||||
#################################################################
|
||||
|
||||
- name: Set remote host for gravity-sync.conf
|
||||
set_fact:
|
||||
remote_pihole: "{{ (inventory_hostname == pihole_primary) | ternary(pihole_secondaries[0] ~ '.home', pihole_primary ~ '.home') }}"
|
||||
|
||||
- name: Create gravity-sync.conf file
|
||||
copy:
|
||||
dest: "{{ pihole_gravity_conf }}"
|
||||
owner: "{{ pihole_user_gravity }}"
|
||||
group: "{{ pihole_user_gravity }}"
|
||||
mode: '0600'
|
||||
content: |
|
||||
# REQUIRED SETTINGS
|
||||
REMOTE_HOST='{{ remote_pihole }}'
|
||||
REMOTE_USER='{{ pihole_user_gravity }}'
|
||||
|
||||
# CUSTOM VARIABLES
|
||||
# LOCAL_PIHOLE_DIRECTORY='/etc/pihole'
|
||||
# REMOTE_PIHOLE_DIRECTORY='/etc/pihole'
|
||||
# LOCAL_FILE_OWNER='{{ pihole_user_gravity }}'
|
||||
# REMOTE_FILE_OWNER='{{ pihole_user_gravity }}'
|
||||
|
||||
# LOCAL_DOCKER_CONTAINER='' # optional
|
||||
# REMOTE_DOCKER_CONTAINER='' # optional
|
||||
|
||||
- name: Create symlink for gravity-sync.rsa
|
||||
file:
|
||||
src: "{{ pihole_gravity_home }}/.ssh/id_ed25519"
|
||||
dest: /etc/gravity-sync/gravity-sync.rsa
|
||||
owner: "{{ pihole_user_gravity }}"
|
||||
group: "{{ pihole_user_gravity }}"
|
||||
mode: '0600'
|
||||
state: link
|
||||
|
||||
#################################################################
|
||||
# Execute Gravity Sync with non-interactive config
|
||||
#################################################################
|
||||
|
||||
- name: Run Gravity Sync script
|
||||
command: bash /usr/local/bin/gravity-sync
|
||||
become: yes
|
||||
become_user: "{{ pihole_user_gravity }}"
|
||||
environment:
|
||||
HOME: "{{ pihole_gravity_home }}"
|
||||
@@ -0,0 +1,100 @@
|
||||
#################################################################
|
||||
# Bootstrap Pi-hole (installation manuelle attendue)
|
||||
#################################################################
|
||||
|
||||
- name: Proposer la commande d'installation manuelle de Pi-hole
|
||||
debug:
|
||||
msg: |
|
||||
Veuillez installer Pi-hole manuellement sur ce host avec la commande suivante :
|
||||
------------------------------------------------------------
|
||||
curl -sSL https://install.pi-hole.net | sudo bash
|
||||
------------------------------------------------------------
|
||||
L'installation sera vérifiée automatiquement dans les 10 prochaines minutes.
|
||||
|
||||
#################################################################
|
||||
# Vérification installation Pi-hole
|
||||
#################################################################
|
||||
|
||||
- name: Attendre que Pi-hole soit installé (FTL DB)
|
||||
wait_for:
|
||||
path: /etc/pihole/pihole-FTL.db
|
||||
state: present
|
||||
timeout: 600 # 10 minutes
|
||||
register: pihole_config_ready
|
||||
|
||||
- name: Vérifier que le service pihole-FTL est actif
|
||||
wait_for:
|
||||
port: 53
|
||||
state: started
|
||||
timeout: 60
|
||||
when: pihole_config_ready is succeeded
|
||||
|
||||
#################################################################
|
||||
# Configuration Pi-hole (commune HA)
|
||||
#################################################################
|
||||
|
||||
- name: Modifier le port d'écoute Pi-hole
|
||||
replace:
|
||||
path: /etc/pihole/pihole.toml
|
||||
regexp: '^\s*port\s*=\s*".*"'
|
||||
replace: ' port = "{{ pihole_ports }}"'
|
||||
notify: Restart Pi-hole
|
||||
|
||||
- name: Autoriser Pi-hole à écouter sur toutes les interfaces
|
||||
replace:
|
||||
path: /etc/pihole/pihole.toml
|
||||
regexp: '^\s*listeningMode\s*=\s*".*"'
|
||||
replace: ' listeningMode = "ALL"'
|
||||
notify: Restart Pi-hole
|
||||
|
||||
- name: Activer le chargement de /etc/dnsmasq.d
|
||||
lineinfile:
|
||||
path: /etc/pihole/pihole.toml
|
||||
regexp: '^\s*etc_dnsmasq_d\s*='
|
||||
line: ' etc_dnsmasq_d = true'
|
||||
state: present
|
||||
notify: Restart Pi-hole
|
||||
|
||||
#################################################################
|
||||
# DNS custom (wildcard + locaux)
|
||||
#################################################################
|
||||
|
||||
- name: Validate custom DNS IPs
|
||||
assert:
|
||||
that:
|
||||
- ip is match('^([0-9]{1,3}\.){3}[0-9]{1,3}$')
|
||||
fail_msg: "Invalid IP for {{ fqdn }}"
|
||||
loop: "{{ pihole_custom_dns | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
vars:
|
||||
fqdn: "{{ item.key }}"
|
||||
ip: "{{ item.value }}"
|
||||
|
||||
- name: Générer les règles DNS custom (wildcards + FQDN)
|
||||
copy:
|
||||
dest: /etc/dnsmasq.d/10-custom-rules.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
content: |
|
||||
# Generated by Ansible – Pi-hole custom DNS rules
|
||||
{% for fqdn, ip in pihole_custom_dns.items() %}
|
||||
address=/{{ fqdn }}/{{ ip }}
|
||||
{% endfor %}
|
||||
when: pihole_custom_dns | length > 0
|
||||
notify: Restart Pi-hole
|
||||
|
||||
- name: Créer les entrées DNS locales pour les RPis
|
||||
copy:
|
||||
dest: /etc/dnsmasq.d/20-rpis.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
content: |
|
||||
# Generated by Ansible – Raspberry Pi local DNS
|
||||
{% for host in groups['raspberries']
|
||||
if hostvars[host].preferred_ip is defined %}
|
||||
address=/{{ host }}.home/{{ hostvars[host].preferred_ip }}
|
||||
{% endfor %}
|
||||
notify: Restart Pi-hole
|
||||
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Setup Pi-hole HA
|
||||
include_tasks: ha_pihole_setup.yml
|
||||
when: "'pihole' in group_names"
|
||||
|
||||
- name: Setup Gravity Sync
|
||||
include_tasks: gravity_setup.yml
|
||||
when: "'pihole' in group_names"
|
||||
|
||||
- name: Setup DNS client
|
||||
include_tasks: client_setup.yml
|
||||
@@ -37,6 +37,9 @@
|
||||
namespace: "{{ namespace_longhorn }}"
|
||||
name: "{{ backup_volume_name }}"
|
||||
register: pvc_info
|
||||
retries: 3
|
||||
delay: 3
|
||||
until: pvc_info.resources is defined
|
||||
|
||||
- name: Extraire le nom du volume
|
||||
set_fact:
|
||||
@@ -57,7 +60,7 @@
|
||||
name: "{{ recurring_job }}"
|
||||
groups: []
|
||||
task: backup
|
||||
cron: "0 5 1,10,20 * *"
|
||||
cron: "0 5 */2 * *"
|
||||
retain: 2
|
||||
concurrency: 1
|
||||
|
||||
@@ -75,29 +78,37 @@
|
||||
path: "/metadata/labels/recurring-job.longhorn.io~1{{ recurring_job }}"
|
||||
value: "enabled"
|
||||
|
||||
- name: Lancer un pod temporaire pour déclencher NFS
|
||||
- name: Lancer un Deployment pour déclencher NFS
|
||||
tags: never
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: rwx-nfs
|
||||
namespace: "{{ namespace_longhorn }}"
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox
|
||||
command: ["sleep", "infinity"]
|
||||
# command: ["sh", "-c", "sleep 600"]
|
||||
volumeMounts:
|
||||
- mountPath: "/mnt/backups"
|
||||
name: backup-vol
|
||||
volumes:
|
||||
- name: backup-vol
|
||||
persistentVolumeClaim:
|
||||
claimName: "{{ backup_volume_name }}"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rwx-nfs
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rwx-nfs
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox
|
||||
command: ["sleep", "infinity"]
|
||||
volumeMounts:
|
||||
- mountPath: "/mnt/backups"
|
||||
name: backup-vol
|
||||
volumes:
|
||||
- name: backup-vol
|
||||
persistentVolumeClaim:
|
||||
claimName: "{{ backup_volume_name }}"
|
||||
|
||||
- name: Attendre que le pod rwx-nfs soit Running
|
||||
tags: never
|
||||
@@ -105,7 +116,8 @@
|
||||
api_version: v1
|
||||
kind: Pod
|
||||
namespace: "{{ namespace_longhorn }}"
|
||||
name: rwx-nfs
|
||||
label_selectors:
|
||||
- app = rwx-nfs
|
||||
register: pod_info
|
||||
until: pod_info.resources[0].status.phase == "Running"
|
||||
retries: 30
|
||||
|
||||
@@ -126,14 +126,14 @@
|
||||
debug:
|
||||
msg: >-
|
||||
Clé SSH ajoutée avec succès.
|
||||
Visitez https://gitea.arcodange.duckdns.org/user/settings/keys?verify_ssh={{ add_ssh_key_result.json.fingerprint }}
|
||||
Visitez https://gitea.arcodange.lab/user/settings/keys?verify_ssh={{ add_ssh_key_result.json.fingerprint }}
|
||||
pour vérifier la signature de vos commits avec cette clé.
|
||||
|
||||
- set_fact:
|
||||
gitea_org_name: arcodange-org
|
||||
gitea_org_full_name: Arcodange
|
||||
gitea_org_description: '🏹💻🪽'
|
||||
gitea_org_website: https://www.arcodange.duckdns.org
|
||||
gitea_org_website: https://www.arcodange.fr
|
||||
gitea_org_location: Paris
|
||||
gitea_org_avatar_img_path: '{{ inventory_dir }}/../img/arcodange-org.jpeg'
|
||||
|
||||
|
||||
@@ -55,3 +55,123 @@
|
||||
loop_var: database__pg_instruction
|
||||
loop:
|
||||
"{{ ['postgres', 'gitea'] | product(pg_instructions) }}"
|
||||
|
||||
# ---
|
||||
|
||||
- name: Change table owner (CronJob with dynamic roles and auto DB naming)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
|
||||
collections:
|
||||
- kubernetes.core
|
||||
|
||||
vars:
|
||||
|
||||
namespace: kube-system
|
||||
cronjob_name: pg-fix-table-ownership
|
||||
|
||||
pg_conf: >-
|
||||
{{ hostvars[groups.postgres[0]].postgres.dockercompose.services.postgres.environment }}
|
||||
postgres_admin_credentials:
|
||||
username: '{{ pg_conf.POSTGRES_USER }}'
|
||||
password: '{{ pg_conf.POSTGRES_PASSWORD }}'
|
||||
pg_host: "{{ hostvars[groups.postgres[0]]['preferred_ip'] }}"
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Create Kubernetes Secret for PostgreSQL admin credentials
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: postgres-admin-credentials
|
||||
namespace: "{{ namespace }}"
|
||||
type: Opaque
|
||||
data:
|
||||
username: "{{ postgres_admin_credentials.username | b64encode }}"
|
||||
password: "{{ postgres_admin_credentials.password | b64encode }}"
|
||||
|
||||
- name: Create cronjob to change table owners (dynamic roles, auto DB)
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: "{{ cronjob_name }}"
|
||||
namespace: "{{ namespace }}"
|
||||
spec:
|
||||
schedule: "0 3 * * *" # Exécution quotidienne à 3h du matin
|
||||
successfulJobsHistoryLimit: 1
|
||||
failedJobsHistoryLimit: 3
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: psql
|
||||
image: postgres:16.3
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: postgres-admin-credentials
|
||||
env:
|
||||
- name: PGPASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgres-admin-credentials
|
||||
key: password
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
args:
|
||||
- |
|
||||
set -eu
|
||||
|
||||
# Récupérer dynamiquement les rôles PostgreSQL
|
||||
echo "Fetching roles from PostgreSQL..."
|
||||
ROLES=$(psql \
|
||||
-h {{ pg_host }} \
|
||||
-U $username \
|
||||
-d postgres \
|
||||
-t -A \
|
||||
-c "SELECT rolname FROM pg_roles WHERE rolname LIKE '%_role';")
|
||||
|
||||
echo "Roles found: $ROLES"
|
||||
|
||||
# Pour chaque rôle, changer le propriétaire des tables dans sa base associée
|
||||
for role in $ROLES; do
|
||||
# Déduire le nom de la base en retirant "_role"
|
||||
DB_NAME="${role%_role}"
|
||||
echo "Database for $role: $DB_NAME"
|
||||
|
||||
# Vérifier si la base existe
|
||||
if psql -h {{ pg_host }} -U $username -d postgres -t -A -c "SELECT 1 FROM pg_database WHERE datname = '$DB_NAME';" | grep -q 1; then
|
||||
echo "Changing owner to $role for all tables in $DB_NAME..."
|
||||
psql \
|
||||
-h {{ pg_host }} \
|
||||
-U $username \
|
||||
-d "$DB_NAME" \
|
||||
-c "
|
||||
DO \$\$
|
||||
DECLARE
|
||||
r RECORD;
|
||||
BEGIN
|
||||
FOR r IN
|
||||
SELECT tablename
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
LOOP
|
||||
EXECUTE format('ALTER TABLE public.%I OWNER TO %I', r.tablename, '$role');
|
||||
END LOOP;
|
||||
END \$\$;
|
||||
"
|
||||
echo "Owner changed for $role in $DB_NAME"
|
||||
else
|
||||
echo "Database $DB_NAME does not exist, skipping..."
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -3,7 +3,7 @@ APP_NAME = Arcodange repositories
|
||||
[server]
|
||||
DOMAIN = localhost
|
||||
HTTP_PORT = 3000
|
||||
ROOT_URL = https://gitea.arcodange.duckdns.org/
|
||||
ROOT_URL = https://gitea.arcodange.lab/
|
||||
DISABLE_SSH = false
|
||||
SSH_PORT = 22
|
||||
START_SSH_SERVER = true
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
step_ca_primary: pi1
|
||||
step_ca_user: step
|
||||
step_ca_home: /home/step
|
||||
step_ca_dir: /home/step/.step
|
||||
|
||||
step_ca_name: "Arcodange Lab CA"
|
||||
step_ca_fqdn: ssl-ca.arcodange.lab
|
||||
step_ca_listen_address: ":8443"
|
||||
|
||||
step_ca_password: "{{ vault_step_ca_password }}"
|
||||
step_ca_force_reinit: false
|
||||
|
||||
step_ca_provisioner_name: cert-manager
|
||||
step_ca_provisioner_type: JWK
|
||||
step_ca_jwk_dir: "{{ step_ca_dir }}/provisioners"
|
||||
step_ca_jwk_key: "{{ step_ca_jwk_dir }}/cert-manager.jwk"
|
||||
step_ca_jwk_password: "{{ vault_step_ca_jwk_password }}"
|
||||
step_ca_jwk_password_file: "{{ step_ca_dir }}/secrets/cert-manager.jwk.pass"
|
||||
|
||||
step_ca_url: "https://{{ step_ca_fqdn }}{{ step_ca_listen_address }}"
|
||||
step_ca_root: "{{ step_ca_dir }}/certs/root_ca.crt"
|
||||
@@ -0,0 +1,4 @@
|
||||
- name: restart step-ca
|
||||
systemd:
|
||||
name: step-ca
|
||||
state: restarted
|
||||
@@ -0,0 +1,67 @@
|
||||
# can be called with -e step_ca_force_reinit=true
|
||||
|
||||
# 1️⃣ Vérifier si le CA est déjà initialisé
|
||||
- name: Check if CA already initialized
|
||||
stat:
|
||||
path: "{{ step_ca_dir }}/config/ca.json"
|
||||
register: step_ca_initialized
|
||||
when: inventory_hostname == step_ca_primary
|
||||
|
||||
# 2️⃣ Arrêter step-ca si reinit forcée
|
||||
- name: Stop step-ca service (reinit)
|
||||
systemd:
|
||||
name: step-ca
|
||||
state: stopped
|
||||
when:
|
||||
- inventory_hostname == step_ca_primary
|
||||
- step_ca_force_reinit | bool
|
||||
ignore_errors: true
|
||||
|
||||
# 3️⃣ Wipe complet du CA si reinit forcée
|
||||
- name: Wipe existing step-ca data
|
||||
file:
|
||||
path: "{{ step_ca_dir }}"
|
||||
state: absent
|
||||
when:
|
||||
- inventory_hostname == step_ca_primary
|
||||
- step_ca_force_reinit | bool
|
||||
|
||||
# 4️⃣ Recréer le dossier CA proprement
|
||||
- name: Recreate step-ca directory
|
||||
file:
|
||||
path: "{{ step_ca_dir }}"
|
||||
state: directory
|
||||
owner: "{{ step_ca_user }}"
|
||||
group: "{{ step_ca_user }}"
|
||||
mode: "0700"
|
||||
when:
|
||||
- inventory_hostname == step_ca_primary
|
||||
- step_ca_force_reinit | bool
|
||||
|
||||
# 5️⃣ Installer le fichier de mot de passe
|
||||
- name: Install step-ca password file
|
||||
copy:
|
||||
dest: "{{ step_ca_home }}/.step-pass"
|
||||
content: "{{ step_ca_password }}"
|
||||
owner: "{{ step_ca_user }}"
|
||||
group: "{{ step_ca_user }}"
|
||||
mode: "0600"
|
||||
when: inventory_hostname == step_ca_primary
|
||||
|
||||
# 6️⃣ Initialiser step-ca (non interactif)
|
||||
- name: Initialize step-ca
|
||||
become: true
|
||||
become_user: "{{ step_ca_user }}"
|
||||
command: >
|
||||
step ca init
|
||||
--name "{{ step_ca_name }}"
|
||||
--dns "{{ step_ca_fqdn }}"
|
||||
--address "{{ step_ca_listen_address }}"
|
||||
--provisioner admin
|
||||
--password-file {{ step_ca_home }}/.step-pass
|
||||
args:
|
||||
creates: "{{ step_ca_dir }}/config/ca.json"
|
||||
when:
|
||||
- inventory_hostname == step_ca_primary
|
||||
- step_ca_force_reinit | bool or not step_ca_initialized.stat.exists
|
||||
notify: restart step-ca
|
||||
@@ -0,0 +1,51 @@
|
||||
- name: Install base packages
|
||||
apt:
|
||||
name:
|
||||
- curl
|
||||
- vim
|
||||
- gpg
|
||||
- ca-certificates
|
||||
state: present
|
||||
update_cache: yes
|
||||
install_recommends: no
|
||||
|
||||
- name: Download Smallstep apt signing key
|
||||
get_url:
|
||||
url: https://packages.smallstep.com/keys/apt/repo-signing-key.gpg
|
||||
dest: /etc/apt/trusted.gpg.d/smallstep.asc
|
||||
mode: "0644"
|
||||
|
||||
- name: Add Smallstep apt repository
|
||||
copy:
|
||||
dest: /etc/apt/sources.list.d/smallstep.list
|
||||
mode: "0644"
|
||||
content: |
|
||||
deb [signed-by=/etc/apt/trusted.gpg.d/smallstep.asc] https://packages.smallstep.com/stable/debian debs main
|
||||
|
||||
- name: Update apt cache
|
||||
apt:
|
||||
update_cache: yes
|
||||
|
||||
- name: Install step-cli and step-ca
|
||||
apt:
|
||||
name:
|
||||
- step-cli
|
||||
- step-ca
|
||||
state: present
|
||||
|
||||
|
||||
|
||||
- name: Create step user
|
||||
user:
|
||||
name: "{{ step_ca_user }}"
|
||||
system: true
|
||||
shell: /usr/sbin/nologin
|
||||
home: "{{ step_ca_home }}"
|
||||
|
||||
- name: Secure step directory
|
||||
file:
|
||||
path: "{{ step_ca_dir }}"
|
||||
owner: "{{ step_ca_user }}"
|
||||
group: "{{ step_ca_user }}"
|
||||
mode: "0700"
|
||||
recurse: yes
|
||||
@@ -0,0 +1,5 @@
|
||||
- import_tasks: install.yml
|
||||
- import_tasks: init.yml
|
||||
- import_tasks: sync.yml
|
||||
- import_tasks: systemd.yml
|
||||
- import_tasks: provisioners.yml
|
||||
@@ -0,0 +1,73 @@
|
||||
- name: Ensure provisioner directory exists
|
||||
file:
|
||||
path: "{{ step_ca_jwk_dir }}"
|
||||
state: directory
|
||||
owner: "{{ step_ca_user }}"
|
||||
group: "{{ step_ca_user }}"
|
||||
mode: "0700"
|
||||
when: inventory_hostname == step_ca_primary
|
||||
|
||||
- name: Check if JWK provisioner already exists
|
||||
command: >
|
||||
step ca provisioner list
|
||||
--ca-url {{ step_ca_url }}
|
||||
--root {{ step_ca_root }}
|
||||
register: step_ca_provisioners
|
||||
changed_when: false
|
||||
become: true
|
||||
become_user: "{{ step_ca_user }}"
|
||||
when: inventory_hostname == step_ca_primary
|
||||
|
||||
- name: Check if cert-manager provisioner exists
|
||||
set_fact:
|
||||
step_ca_provisioner_exists: >-
|
||||
{{
|
||||
(step_ca_provisioners.stdout | from_json
|
||||
| selectattr('name', 'equalto', step_ca_provisioner_name)
|
||||
| list
|
||||
| length) > 0
|
||||
}}
|
||||
when: inventory_hostname == step_ca_primary
|
||||
|
||||
- name: Install JWK password file
|
||||
copy:
|
||||
dest: "{{ step_ca_jwk_password_file }}"
|
||||
content: "{{ step_ca_jwk_password }}"
|
||||
owner: "{{ step_ca_user }}"
|
||||
group: "{{ step_ca_user }}"
|
||||
mode: "0400"
|
||||
when: inventory_hostname == step_ca_primary
|
||||
|
||||
- name: Generate JWK key for cert-manager
|
||||
command: >
|
||||
step crypto jwk create
|
||||
{{ step_ca_jwk_key }}.pub
|
||||
{{ step_ca_jwk_key }}
|
||||
--password-file "{{ step_ca_jwk_password_file }}"
|
||||
args:
|
||||
creates: "{{ step_ca_jwk_key }}"
|
||||
become: true
|
||||
become_user: "{{ step_ca_user }}"
|
||||
when: inventory_hostname == step_ca_primary
|
||||
|
||||
- name: Add JWK provisioner to step-ca
|
||||
command: >
|
||||
step ca provisioner add {{ step_ca_provisioner_name }}
|
||||
--type JWK
|
||||
--public-key {{ step_ca_jwk_key }}.pub
|
||||
--private-key {{ step_ca_jwk_key }}
|
||||
become: true
|
||||
become_user: "{{ step_ca_user }}"
|
||||
when:
|
||||
- inventory_hostname == step_ca_primary
|
||||
- step_ca_provisioner_name not in step_ca_provisioners.stdout
|
||||
notify: restart step-ca
|
||||
|
||||
- name: Secure JWK keys permissions
|
||||
file:
|
||||
path: "{{ step_ca_jwk_dir }}"
|
||||
owner: "{{ step_ca_user }}"
|
||||
group: "{{ step_ca_user }}"
|
||||
mode: "0700"
|
||||
recurse: yes
|
||||
when: inventory_hostname == step_ca_primary
|
||||
@@ -0,0 +1,121 @@
|
||||
# 1️⃣ Lock sur le primaire (évite double sync concurrente)
|
||||
- name: Create sync lock on primary
|
||||
file:
|
||||
path: "{{ step_ca_dir }}/.sync.lock"
|
||||
state: touch
|
||||
owner: "{{ step_ca_user }}"
|
||||
group: "{{ step_ca_user }}"
|
||||
mode: "0600"
|
||||
delegate_to: "{{ step_ca_primary }}"
|
||||
run_once: true
|
||||
|
||||
# 2️⃣ Calcul du checksum du CA sur le primaire
|
||||
- name: Compute deterministic checksum of CA directory on primary
|
||||
shell: |
|
||||
set -o pipefail
|
||||
tar --sort=name \
|
||||
--mtime='UTC 1970-01-01' \
|
||||
--owner=0 --group=0 --numeric-owner \
|
||||
-cf - {{ step_ca_dir }} \
|
||||
| sha256sum | awk '{print $1}'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: step_ca_primary_checksum
|
||||
changed_when: false
|
||||
delegate_to: "{{ step_ca_primary }}"
|
||||
run_once: true
|
||||
|
||||
# 3️⃣ Charger le checksum précédent (s'il existe)
|
||||
- name: Load previous checksum (controller)
|
||||
slurp:
|
||||
src: /tmp/step-ca-sync/.checksum
|
||||
register: step_ca_previous_checksum
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
run_once: true
|
||||
become: false
|
||||
delegate_to: localhost
|
||||
|
||||
# 4️⃣ Décider si une synchronisation est nécessaire
|
||||
- name: Decide if sync is required
|
||||
set_fact:
|
||||
step_ca_sync_required: >-
|
||||
{{
|
||||
step_ca_previous_checksum.content | default('') | b64decode
|
||||
!= step_ca_primary_checksum.stdout
|
||||
}}
|
||||
run_once: true
|
||||
|
||||
- name: Ensure temporary sync directory exists on controller
|
||||
file:
|
||||
path: /tmp/step-ca-sync
|
||||
state: directory
|
||||
mode: "0700"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
run_once: true
|
||||
|
||||
# 5️⃣ Pull depuis le primaire vers le contrôleur
|
||||
- name: Fetch CA data from primary to controller
|
||||
synchronize:
|
||||
rsync_path: "sudo -u {{ step_ca_user }} rsync"
|
||||
src: "{{ step_ca_dir }}/"
|
||||
dest: "/tmp/step-ca-sync/"
|
||||
mode: pull
|
||||
recursive: yes
|
||||
delete: no
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
when: step_ca_sync_required
|
||||
run_once: true
|
||||
|
||||
# 6️⃣ Sauvegarder le nouveau checksum (controller)
|
||||
- name: Save new checksum on controller
|
||||
copy:
|
||||
dest: /tmp/step-ca-sync/.checksum
|
||||
content: "{{ step_ca_primary_checksum.stdout }}"
|
||||
mode: "0600"
|
||||
when: step_ca_sync_required
|
||||
run_once: true
|
||||
become: false
|
||||
delegate_to: localhost
|
||||
|
||||
# 7️⃣ Push vers les standby
|
||||
- name: Push CA data to standby nodes
|
||||
synchronize:
|
||||
rsync_path: "sudo -u {{ step_ca_user }} rsync"
|
||||
src: "/tmp/step-ca-sync/"
|
||||
dest: "{{ step_ca_dir }}/"
|
||||
mode: push
|
||||
recursive: yes
|
||||
delete: no
|
||||
when:
|
||||
- inventory_hostname != step_ca_primary
|
||||
- step_ca_sync_required
|
||||
|
||||
- name: Wipe temporary CA sync directory on controller
|
||||
file:
|
||||
path: /tmp/step-ca-sync
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
become: false
|
||||
when: step_ca_sync_required
|
||||
|
||||
# 8️⃣ Forcer permissions correctes (sécurité)
|
||||
- name: Fix step directory permissions
|
||||
file:
|
||||
path: "{{ step_ca_dir }}"
|
||||
owner: "{{ step_ca_user }}"
|
||||
group: "{{ step_ca_user }}"
|
||||
mode: "0700"
|
||||
recurse: yes
|
||||
notify: restart step-ca
|
||||
|
||||
# 9️⃣ Retirer le lock sur le primaire
|
||||
- name: Remove sync lock on primary
|
||||
file:
|
||||
path: "{{ step_ca_dir }}/.sync.lock"
|
||||
state: absent
|
||||
delegate_to: "{{ step_ca_primary }}"
|
||||
run_once: true
|
||||
@@ -0,0 +1,23 @@
|
||||
- name: Install step-ca systemd service
|
||||
template:
|
||||
src: step-ca.service.j2
|
||||
dest: /etc/systemd/system/step-ca.service
|
||||
mode: "0644"
|
||||
|
||||
- name: Reload systemd
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
|
||||
- name: Enable step-ca on primary
|
||||
systemd:
|
||||
name: step-ca
|
||||
enabled: yes
|
||||
state: started
|
||||
when: inventory_hostname == step_ca_primary
|
||||
|
||||
- name: Disable step-ca on standby nodes
|
||||
systemd:
|
||||
name: step-ca
|
||||
enabled: no
|
||||
state: stopped
|
||||
when: inventory_hostname != step_ca_primary
|
||||
@@ -0,0 +1,15 @@
|
||||
[Unit]
|
||||
Description=Smallstep CA
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User={{ step_ca_user }}
|
||||
Group={{ step_ca_user }}
|
||||
ExecStart=/usr/bin/step-ca \
|
||||
--password-file {{ step_ca_home }}/.step-pass \
|
||||
{{ step_ca_dir }}/config/ca.json
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
98
ansible/arcodange/factory/playbooks/ssl/ssl.yml
Normal file
98
ansible/arcodange/factory/playbooks/ssl/ssl.yml
Normal file
@@ -0,0 +1,98 @@
|
||||
- name: step-ca
|
||||
ansible.builtin.import_playbook: step-ca.yml
|
||||
|
||||
- name: Fetch Step-CA root certificate
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
vars:
|
||||
step_ca_primary: pi1
|
||||
step_ca_user: step
|
||||
step_ca_root: "/home/step/.step/certs/root_ca.crt"
|
||||
tmp_dir: "/tmp/step-ca-cert-manager"
|
||||
tasks:
|
||||
- name: Ensure local temp directory exists
|
||||
file:
|
||||
path: "{{ tmp_dir }}"
|
||||
state: directory
|
||||
mode: "0700"
|
||||
|
||||
- name: Fetch root CA from step_ca_primary
|
||||
fetch:
|
||||
src: "{{ step_ca_root }}"
|
||||
dest: "{{ tmp_dir }}/root_ca.crt"
|
||||
flat: true
|
||||
delegate_to: "{{ step_ca_primary }}"
|
||||
become: true
|
||||
become_user: "{{ step_ca_user }}"
|
||||
run_once: true
|
||||
|
||||
- name: Préparer le répertoire de build
|
||||
file:
|
||||
path: /tmp/gitea-runner-image
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Copier le root CA dans le contexte Docker
|
||||
copy:
|
||||
src: "{{ tmp_dir }}/root_ca.crt"
|
||||
dest: /tmp/gitea-runner-image/root_ca.crt
|
||||
mode: '0644'
|
||||
|
||||
- name: Créer le Dockerfile pour l'image runner avec CA custom
|
||||
copy:
|
||||
dest: /tmp/gitea-runner-image/Dockerfile
|
||||
mode: '0644'
|
||||
content: |
|
||||
FROM gitea/runner-images:ubuntu-latest
|
||||
|
||||
COPY root_ca.crt /usr/local/share/ca-certificates/root_ca.crt
|
||||
RUN update-ca-certificates
|
||||
|
||||
- name: Builder l'image runner avec le CA
|
||||
community.docker.docker_image:
|
||||
name: gitea.arcodange.lab/arcodange-org/runner-images
|
||||
tag: ubuntu-latest-ca
|
||||
source: build
|
||||
build:
|
||||
path: /tmp/gitea-runner-image
|
||||
push: true
|
||||
|
||||
# - /etc/ssl/certs:/etc/ssl/certs:ro
|
||||
|
||||
# - name: Distribute Step-CA root certificate
|
||||
# hosts: all
|
||||
# gather_facts: true
|
||||
# become: true
|
||||
# vars:
|
||||
# root_ca_source: "/tmp/step-ca-cert-manager/root_ca.crt"
|
||||
# root_ca_filename: "arcodange-root.crt"
|
||||
|
||||
# tasks:
|
||||
# - name: Ensure root CA file is copied to correct location
|
||||
# copy:
|
||||
# src: "{{ root_ca_source }}"
|
||||
# dest: "{{ ca_dest_path }}"
|
||||
# owner: root
|
||||
# group: root
|
||||
# mode: '0644'
|
||||
# vars:
|
||||
# ca_dest_path: >-
|
||||
# {% if ansible_facts['os_family'] == 'Debian' %}
|
||||
# /usr/local/share/ca-certificates/{{ root_ca_filename }}
|
||||
# {% elif ansible_facts['os_family'] in ['RedHat', 'Fedora'] %}
|
||||
# /etc/pki/ca-trust/source/anchors/{{ root_ca_filename }}
|
||||
# {% else %}
|
||||
# /etc/ssl/certs/{{ root_ca_filename }}
|
||||
# {% endif %}
|
||||
|
||||
# - name: Update CA trust store
|
||||
# command: "{{ ca_update_command }}"
|
||||
# vars:
|
||||
# ca_update_command: >-
|
||||
# {% if ansible_facts['os_family'] == 'Debian' %}
|
||||
# update-ca-certificates
|
||||
# {% elif ansible_facts['os_family'] in ['RedHat', 'Fedora'] %}
|
||||
# update-ca-trust
|
||||
# {% else %}
|
||||
# echo 'Please update the CA trust manually'
|
||||
# {% endif %}
|
||||
6
ansible/arcodange/factory/playbooks/ssl/step-ca.yml
Normal file
6
ansible/arcodange/factory/playbooks/ssl/step-ca.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Setup step-ca on raspberries
|
||||
hosts: step_ca #raspberries:&local
|
||||
become: yes
|
||||
roles:
|
||||
- step_ca
|
||||
@@ -0,0 +1,41 @@
|
||||
- name: Install iSCSI client for Longhorn on Raspberry Pi
|
||||
hosts: raspberries:&local
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Install open-iscsi
|
||||
ansible.builtin.apt:
|
||||
name: open-iscsi
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Enable and start iSCSI service
|
||||
ansible.builtin.service:
|
||||
name: iscsid
|
||||
state: started
|
||||
enabled: yes
|
||||
|
||||
- name: Installer cryptsetup
|
||||
ansible.builtin.apt:
|
||||
name: cryptsetup
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Charger le module noyau dm_crypt
|
||||
ansible.builtin.modprobe:
|
||||
name: dm_crypt
|
||||
state: present
|
||||
|
||||
- name: S'assurer que le module dm_crypt est chargé au démarrage
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/modules
|
||||
line: dm_crypt
|
||||
state: present
|
||||
|
||||
- name: Créer dossier longhorn
|
||||
ansible.builtin.file:
|
||||
path: /mnt/arcodange/longhorn
|
||||
state: directory
|
||||
owner: pi
|
||||
group: docker
|
||||
mode: '0774'
|
||||
ignore_errors: true
|
||||
315
ansible/arcodange/factory/playbooks/system/k3s_config.yml
Normal file
315
ansible/arcodange/factory/playbooks/system/k3s_config.yml
Normal file
@@ -0,0 +1,315 @@
|
||||
---
|
||||
|
||||
- name: System K3S
|
||||
hosts: raspberries:&local
|
||||
|
||||
tasks:
|
||||
- name: prepare inventory for k3s external playbook
|
||||
tags: always
|
||||
ansible.builtin.add_host:
|
||||
hostname: "{{ item }}"
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- "{{ ansible_loop.first | ternary('server', 'agent') }}"
|
||||
loop: "{{ groups.raspberries | intersect(groups.local) | sort }}"
|
||||
loop_control:
|
||||
extended: true
|
||||
extended_allitems: false
|
||||
|
||||
- name: how to reach k3s
|
||||
hosts: server
|
||||
tasks:
|
||||
|
||||
- name: setup longhorn for volumes https://docs.k3s.io/helm
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
dest: /var/lib/rancher/k3s/server/manifests/longhorn-install.yaml
|
||||
content: |-
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
annotations:
|
||||
helmcharts.cattle.io/managed-by: helm-controller
|
||||
finalizers:
|
||||
- wrangler.cattle.io/on-helm-chart-remove
|
||||
generation: 1
|
||||
name: longhorn-install
|
||||
namespace: kube-system
|
||||
spec:
|
||||
version: v1.9.1
|
||||
chart: longhorn
|
||||
repo: https://charts.longhorn.io
|
||||
failurePolicy: abort
|
||||
targetNamespace: longhorn-system
|
||||
createNamespace: true
|
||||
valuesContent: |-
|
||||
defaultSettings:
|
||||
defaultDataPath: /mnt/arcodange/longhorn
|
||||
vars:
|
||||
longhorn_helm_values: {} # https://github.com/longhorn/longhorn/blob/master/chart/values.yaml
|
||||
|
||||
- name: customize k3s traefik configuration https://docs.k3s.io/helm
|
||||
block:
|
||||
- name: Get my public IP
|
||||
community.general.ipify_facts:
|
||||
- become: true
|
||||
ansible.builtin.copy:
|
||||
dest: /var/lib/rancher/k3s/server/manifests/traefik-v3.yaml
|
||||
content: |-
|
||||
apiVersion: v1
|
||||
data:
|
||||
dynamic.yaml: |-
|
||||
{{ traefik_config_yaml | to_nice_yaml | indent( width=4 ) }}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: traefik-configmap
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: kube-system
|
||||
spec:
|
||||
repo: https://traefik.github.io/charts
|
||||
chart: traefik
|
||||
version: v37.4.0
|
||||
targetNamespace: kube-system
|
||||
valuesContent: |-
|
||||
{{ traefik_helm_values | to_nice_yaml | indent( width=4 ) }}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: wildcard-arcodange-lab
|
||||
namespace: kube-system
|
||||
spec:
|
||||
secretName: wildcard-arcodange-lab
|
||||
issuerRef:
|
||||
name: step-issuer
|
||||
kind: StepClusterIssuer
|
||||
group: certmanager.step.sm
|
||||
dnsNames:
|
||||
- arcodange.lab
|
||||
- "*.arcodange.lab"
|
||||
---
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: TLSStore
|
||||
metadata:
|
||||
name: default
|
||||
namespace: kube-system
|
||||
spec:
|
||||
defaultCertificate:
|
||||
secretName: wildcard-arcodange-lab
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: gitea-external
|
||||
namespace: kube-system
|
||||
spec:
|
||||
type: ExternalName
|
||||
externalName: {{ hostvars[groups.gitea[0]]['preferred_ip'] }}
|
||||
ports:
|
||||
- port: 3000
|
||||
targetPort: 3000
|
||||
vars:
|
||||
traefik_config_yaml:
|
||||
http:
|
||||
services:
|
||||
gitea:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://{{ hostvars[groups.gitea[0]]['preferred_ip'] }}:3000"
|
||||
routers:
|
||||
dashboard:
|
||||
# rule: Host(`traefik.arcodange.duckdns.org`)
|
||||
rule: Host(`traefik.arcodange.lab`)
|
||||
service: api@internal
|
||||
middlewares:
|
||||
- localIp
|
||||
# tls:
|
||||
# certResolver: letsencrypt
|
||||
# domains:
|
||||
# - main: "arcodange.duckdns.org"
|
||||
# sans:
|
||||
# - "traefik.arcodange.duckdns.org"
|
||||
entryPoints:
|
||||
- websecure
|
||||
- web
|
||||
acme-challenge:
|
||||
rule: Host(`arcodange.duckdns.org`) && PathPrefix(`/.well-known/acme-challenge`)
|
||||
service: acme-http@internal
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
domains:
|
||||
- main: "arcodange.duckdns.org"
|
||||
sans:
|
||||
- "*.arcodange.duckdns.org"
|
||||
entryPoints:
|
||||
- websecure
|
||||
- web
|
||||
gitea:
|
||||
# rule: Host(`gitea.arcodange.duckdns.org`)
|
||||
rule: Host(`gitea.arcodange.lab`)
|
||||
service: gitea
|
||||
middlewares:
|
||||
- localIp
|
||||
# tls:
|
||||
# certResolver: letsencrypt
|
||||
# domains:
|
||||
# - main: "arcodange.duckdns.org"
|
||||
# sans:
|
||||
# - "gitea.arcodange.duckdns.org"
|
||||
entrypoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
localIp:
|
||||
ipAllowList:
|
||||
sourceRange:
|
||||
- "172.16.0.0/12"
|
||||
- "10.42.0.0/16"
|
||||
- "192.168.1.0/24"
|
||||
- "{{ ipify_public_ip }}/32"
|
||||
# - "0.0.0.0/0"
|
||||
# ipStrategy:
|
||||
# depth: 1
|
||||
traefik_helm_values:
|
||||
deployment:
|
||||
kind: "Deployment"
|
||||
initContainers:
|
||||
- name: volume-permissions
|
||||
image: busybox:latest
|
||||
command: ["sh", "-c", "touch /data/acme.json; chmod -v 600 /data/acme.json"]
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
# default is https://github.com/traefik/traefik-helm-chart/blob/v25.0.0/traefik/values.yaml <- for v25 (`kubectl describe deployments.apps traefik -n kube-system | grep helm.sh/chart`)
|
||||
# current is https://github.com/traefik/traefik-helm-chart/blob/v37.4.0/traefik/values.yaml
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: 'true' # make predictible choice of node to direct https traffic to this node and avoid NAT/loss of client IP
|
||||
service:
|
||||
spec:
|
||||
externalTrafficPolicy: Local
|
||||
ports:
|
||||
traefik:
|
||||
expose:
|
||||
default: true
|
||||
web:
|
||||
forwardedHeaders:
|
||||
trustedIPs: ["10.42.0.0/16"] #default k3s cidr
|
||||
ingressRoute:
|
||||
dashboard:
|
||||
enabled: true
|
||||
globalArguments: [] # deactivate --global.sendanonymoususage
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LEGO_DISABLE_CNAME_SUPPORT
|
||||
value: 'true'
|
||||
logs:
|
||||
general:
|
||||
level: INFO
|
||||
# format: json
|
||||
access:
|
||||
enabled: true
|
||||
timezone: Europe/Paris
|
||||
# format: json
|
||||
podSecurityContext:
|
||||
runAsGroup: 65532
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
fsGroup: 65532 # else the persistent volume might be owned by root and be unwriteable
|
||||
persistence:
|
||||
# -- Enable persistence using Persistent Volume Claims
|
||||
# ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
|
||||
# It can be used to store TLS certificates, see `storage` in certResolvers
|
||||
enabled: true
|
||||
name: data
|
||||
# existingClaim: ""
|
||||
accessMode: ReadWriteOnce
|
||||
size: 128Mi
|
||||
storageClass: "longhorn"
|
||||
# volumeName: ""
|
||||
path: /data
|
||||
annotations: {}
|
||||
volumes:
|
||||
- name: traefik-configmap
|
||||
mountPath: /config
|
||||
type: configMap
|
||||
experimental:
|
||||
plugins:
|
||||
crowdsec-bouncer:
|
||||
moduleName: github.com/maxlerebourg/crowdsec-bouncer-traefik-plugin #https://plugins.traefik.io/plugins/6335346ca4caa9ddeffda116/crowdsec-bouncer-traefik-plugin
|
||||
version: v1.3.3
|
||||
additionalArguments:
|
||||
- '--providers.file.filename=/config/dynamic.yaml'
|
||||
- '--providers.kubernetesingress.ingressendpoint.publishedservice=kube-system/traefik'
|
||||
- "--providers.kubernetescrd.allowcrossnamespace=true"
|
||||
- "--providers.kubernetescrd.allowExternalNameServices=true"
|
||||
certificatesResolvers:
|
||||
letsencrypt:
|
||||
acme:
|
||||
# for challenge options cf. https://doc.traefik.io/traefik/https/acme/
|
||||
email: arcodange@gmail.com
|
||||
tlsChallenge: true
|
||||
dnsChallenge:
|
||||
# requires env variable DUCKDNS_TOKEN
|
||||
provider: duckdns
|
||||
propagation:
|
||||
delayBeforeChecks: 120
|
||||
disableChecks: true
|
||||
resolvers:
|
||||
- "1.1.1.1:53"
|
||||
- "8.8.8.8:53"
|
||||
httpChallenge:
|
||||
entryPoint: "web"
|
||||
# It has to match the path with a persistent volume
|
||||
storage: /data/acme.json
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: traefik-duckdns-token
|
||||
# MY_TOKEN=<my token (see https://www.duckdns.org/domains)>
|
||||
# kubectl create secret generic traefik-duckdns-token --from-literal="DUCKDNS_TOKEN=$MY_TOKEN" -n kube-system
|
||||
- name: touch manifests/traefik.yaml to trigger update
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server/manifests/traefik-v3.yaml
|
||||
state: touch
|
||||
become: true
|
||||
|
||||
|
||||
# ---
|
||||
|
||||
- name: redeploy traefik
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: delete old traefik deployment
|
||||
kubernetes.core.k8s:
|
||||
api_version: v1
|
||||
name: traefik
|
||||
kind: Deployment
|
||||
namespace: kube-system
|
||||
state: "absent"
|
||||
- name: delete old deployment job so the k3s helm controller redeploy with our new configuration
|
||||
kubernetes.core.k8s:
|
||||
api_version: batch/v1
|
||||
name: helm-install-traefik
|
||||
kind: Job
|
||||
namespace: kube-system
|
||||
state: "absent"
|
||||
- name: get traefik deployment
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: v1
|
||||
name: traefik
|
||||
kind: Deployment
|
||||
namespace: kube-system
|
||||
wait: true
|
||||
register: traefik_deployment
|
||||
- ansible.builtin.debug:
|
||||
var: traefik_deployment
|
||||
27
ansible/arcodange/factory/playbooks/system/k3s_dns.yml
Normal file
27
ansible/arcodange/factory/playbooks/system/k3s_dns.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
# https://docs.k3s.io/advanced#coredns-custom-configuration-imports
|
||||
---
|
||||
- name: "Déclarer le ConfigMap coredns-custom pour arcodange.lab"
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
pihole_ip: "192.168.1.201"
|
||||
coredns_namespace: "kube-system"
|
||||
|
||||
tasks:
|
||||
- name: "Créer / mettre à jour le ConfigMap coredns-custom"
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns-custom
|
||||
namespace: "{{ coredns_namespace }}"
|
||||
data:
|
||||
arcodange-lab.server: |
|
||||
arcodange.lab:53 {
|
||||
errors
|
||||
cache 30
|
||||
forward . {{ pihole_ip }}:53
|
||||
}
|
||||
172
ansible/arcodange/factory/playbooks/system/k3s_ssl.yml
Normal file
172
ansible/arcodange/factory/playbooks/system/k3s_ssl.yml
Normal file
@@ -0,0 +1,172 @@
|
||||
---
|
||||
- name: System K3S
|
||||
hosts: raspberries:&local
|
||||
|
||||
tasks:
|
||||
- name: prepare inventory for k3s external playbook
|
||||
tags: always
|
||||
ansible.builtin.add_host:
|
||||
hostname: "{{ item }}"
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- "{{ ansible_loop.first | ternary('server', 'agent') }}"
|
||||
loop: "{{ groups.raspberries | intersect(groups.local) | sort }}"
|
||||
loop_control:
|
||||
extended: true
|
||||
extended_allitems: false
|
||||
|
||||
# =========================
|
||||
# Play 1 — Read step-ca PKI
|
||||
# =========================
|
||||
- name: Collect PKI material from step-ca
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
step_ca_primary: pi1
|
||||
step_ca_user: step
|
||||
step_ca_root: "/home/step/.step/certs/root_ca.crt"
|
||||
tmp_dir: /tmp/step-ca-cert-manager
|
||||
|
||||
tasks:
|
||||
- name: Ensure local temp directory exists
|
||||
file:
|
||||
path: "{{ tmp_dir }}"
|
||||
state: directory
|
||||
mode: "0700"
|
||||
|
||||
- name: Fetch root CA
|
||||
fetch:
|
||||
src: "{{ step_ca_root }}"
|
||||
dest: "{{ tmp_dir }}/root_ca.crt"
|
||||
flat: true
|
||||
delegate_to: "{{ step_ca_primary }}"
|
||||
become: true
|
||||
become_user: "{{ step_ca_user }}"
|
||||
run_once: true
|
||||
|
||||
- name: Read and decode PKI material
|
||||
slurp:
|
||||
src: "{{ item }}"
|
||||
loop:
|
||||
- "{{ tmp_dir }}/root_ca.crt"
|
||||
register: pki_raw
|
||||
|
||||
- name: Set PKI facts
|
||||
set_fact:
|
||||
root_ca_b64: "{{ (pki_raw.results | selectattr('item','equalto', tmp_dir + '/root_ca.crt') | first).content }}"
|
||||
|
||||
# =========================
|
||||
# Play 2 — Deploy to k3s
|
||||
# =========================
|
||||
- name: Deploy cert-manager and step-ca integration on k3s server
|
||||
hosts: server
|
||||
gather_facts: false
|
||||
become: true
|
||||
|
||||
vars:
|
||||
namespace: cert-manager
|
||||
jwk_provisioner_name: cert-manager
|
||||
jwk_secret_name: step-jwk-password
|
||||
clusterissuer_name: step-ca
|
||||
step_ca_url: "https://ssl-ca.arcodange.lab:8443"
|
||||
cert_manager_version: v1.19.2
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Get cert-manager provisioner info from step-ca
|
||||
command: >
|
||||
step ca provisioner list
|
||||
register: provisioners_json
|
||||
delegate_to: "{{ step_ca_primary }}"
|
||||
become: true
|
||||
become_user: "{{ step_ca_user }}"
|
||||
run_once: true
|
||||
|
||||
- name: Set fact jwk_kid from provisioner
|
||||
set_fact:
|
||||
jwk_kid: >-
|
||||
{{
|
||||
(provisioners_json.stdout | from_json
|
||||
| selectattr('name', 'equalto', jwk_provisioner_name) | list
|
||||
| first).key.kid
|
||||
}}
|
||||
|
||||
- name: Compute PKI checksum
|
||||
set_fact:
|
||||
pki_checksum: >-
|
||||
{{
|
||||
(hostvars['localhost'].root_ca_b64
|
||||
~ jwk_kid
|
||||
~ step_ca_url
|
||||
~ cert_manager_version) | hash('sha256')
|
||||
}}
|
||||
|
||||
- name: Install cert-manager and step-ca via k3s static manifest
|
||||
copy:
|
||||
dest: /var/lib/rancher/k3s/server/manifests/cert-manager-step-ca.yaml
|
||||
mode: "0600"
|
||||
content: |-
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: cert-manager
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
pki.arcodange.lab/checksum: "{{ pki_checksum }}"
|
||||
spec:
|
||||
chart: cert-manager
|
||||
repo: https://charts.jetstack.io
|
||||
version: {{ cert_manager_version }}
|
||||
targetNamespace: cert-manager
|
||||
createNamespace: true
|
||||
valuesContent: |-
|
||||
installCRDs: true
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ jwk_secret_name }}
|
||||
namespace: {{ namespace }}
|
||||
annotations:
|
||||
pki.arcodange.lab/checksum: "{{ pki_checksum }}"
|
||||
type: Opaque
|
||||
stringData:
|
||||
password: >-
|
||||
{{ hostvars[step_ca_primary].vault_step_ca_jwk_password }}
|
||||
---
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: step-issuer
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
pki.arcodange.lab/checksum: "{{ pki_checksum }}"
|
||||
spec:
|
||||
chart: step-issuer
|
||||
repo: https://smallstep.github.io/helm-charts
|
||||
version: 1.9.11
|
||||
targetNamespace: {{ namespace }}
|
||||
createNamespace: false
|
||||
valuesContent: |-
|
||||
certManager:
|
||||
namespace: {{ namespace }}
|
||||
stepClusterIssuer:
|
||||
create: true
|
||||
caUrl: "{{ step_ca_url }}"
|
||||
caBundle: "{{ hostvars['localhost'].root_ca_b64 }}"
|
||||
provisioner:
|
||||
name: {{ jwk_provisioner_name }}
|
||||
kid: "{{ jwk_kid }}"
|
||||
passwordRef:
|
||||
name: {{ jwk_secret_name }}
|
||||
namespace: {{ namespace }}
|
||||
key: password
|
||||
# Override kube-rbac-proxy image to use ARM64-compatible version.
|
||||
# Note: pi3 (ARM64) requires an ARM64-compatible image, while pi2 (ARMv7) may work with AMD64 images.
|
||||
# The default image (gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0) is AMD64-only and fails on pi3.
|
||||
kubeRBACproxy:
|
||||
image:
|
||||
repository: quay.io/brancz/kube-rbac-proxy
|
||||
tag: v0.15.0
|
||||
|
||||
161
ansible/arcodange/factory/playbooks/system/pki.md
Normal file
161
ansible/arcodange/factory/playbooks/system/pki.md
Normal file
@@ -0,0 +1,161 @@
|
||||
# PKI
|
||||
|
||||
Explications générées par chatgpt pour expliquer le setup de ssl via "step"
|
||||
|
||||
```mermaid
|
||||
---
|
||||
config:
|
||||
logLevel: debug
|
||||
theme: forest
|
||||
---
|
||||
flowchart TB
|
||||
%% PKI
|
||||
subgraph PKI["Step CA / PKI (Pi1)"]
|
||||
style PKI fill:#ffe0b2,stroke:#ff8c00,stroke-width:2px
|
||||
A[Step CA primaire]:::stepCA
|
||||
B[JWK Provisioner]:::jwk
|
||||
C[Root CA]:::root
|
||||
end
|
||||
|
||||
%% Contrôleur Ansible
|
||||
subgraph Controller["Contrôleur Ansible / Mac"]
|
||||
style Controller fill:#e0f7fa,stroke:#00acc1,stroke-width:2px
|
||||
D[Fetch JWK + Root CA]:::ansible
|
||||
E[Secrets K8s: step-jwk, step-root-ca]:::k8sSecret
|
||||
F[ClusterIssuer cert-manager]:::clusterIssuer
|
||||
end
|
||||
|
||||
%% K3s Cluster + Traefik
|
||||
subgraph K3sCluster["K3s Cluster"]
|
||||
style K3sCluster fill:#f1f8e9,stroke:#558b2f,stroke-width:2px
|
||||
T[Traefik Ingress Controller]:::traefik
|
||||
H[Webapp Pods]:::webapp
|
||||
G["Gitea Service (ExternalName → pi2.home:3000)"]:::gitea
|
||||
end
|
||||
|
||||
Users[Clients / Navigateurs]:::clients
|
||||
|
||||
%% Flèches
|
||||
%% PKI → Controller
|
||||
A --> B
|
||||
C --> D
|
||||
B --> D
|
||||
D --> E
|
||||
E --> F
|
||||
|
||||
%% ClusterIssuer → Traefik services
|
||||
F --> H
|
||||
F --> G
|
||||
|
||||
%% Traefik expose tous les services
|
||||
T --> H
|
||||
T --> G
|
||||
Users -->|HTTPS / HTTP| T
|
||||
|
||||
%% PKI direct (optional, for clarity)
|
||||
A -->|Sign initial cert| F
|
||||
|
||||
%% Styling classes
|
||||
classDef stepCA fill:#fff3e0,stroke:#ff6f00,stroke-width:1px
|
||||
classDef jwk fill:#fff9c4,stroke:#fbc02d,stroke-width:1px
|
||||
classDef root fill:#ffe0b2,stroke:#ff8c00,stroke-width:1px
|
||||
classDef ansible fill:#b2ebf2,stroke:#00acc1,stroke-width:1px
|
||||
classDef k8sSecret fill:#b3e5fc,stroke:#0288d1,stroke-width:1px
|
||||
classDef clusterIssuer fill:#81d4fa,stroke:#0277bd,stroke-width:1px
|
||||
classDef gitea fill:#c8e6c9,stroke:#388e3c,stroke-width:1px
|
||||
classDef webapp fill:#a5d6a7,stroke:#2e7d32,stroke-width:1px
|
||||
classDef traefik fill:#ffe082,stroke:#ff8f00,stroke-width:1px
|
||||
classDef clients fill:#eeeeee,stroke:#9e9e9e,stroke-width:1px
|
||||
```
|
||||
|
||||
- 🔵 PKI (Step CA) : la source de confiance. Toutes les certificats HTTPS proviennent de là.
|
||||
- 🔵 JWK Provisioner : autorise cert-manager à demander des certificats automatiquement.
|
||||
- 🟢 Contrôleur Ansible : centralise les clés, crée les Secrets K8s et ClusterIssuer.
|
||||
- 🟢 Secrets & ClusterIssuer : permettent à cert-manager dans K3s de s’authentifier et obtenir des certificats TLS.
|
||||
- 🟢 Webapp Pods : obtiennent leurs certificats via cert-manager et HTTPS fonctionne automatiquement.
|
||||
- 🔵 Gitea : reçoit directement un certificat signé par Step CA, sert HTTPS hors K3s.
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
%% PKI
|
||||
subgraph PKI["Step CA / PKI (Pi1)"]
|
||||
style PKI fill:#ffe0b2,stroke:#ff8c00,stroke-width:2px
|
||||
A[1️⃣ Initialisation Step CA primaire]:::stepCA
|
||||
B[2️⃣ Création JWK Provisioner pour K3s]:::jwk
|
||||
C[Root CA]:::root
|
||||
end
|
||||
|
||||
%% Contrôleur Ansible
|
||||
subgraph Controller["Contrôleur Ansible / Mac"]
|
||||
style Controller fill:#e0f7fa,stroke:#00acc1,stroke-width:2px
|
||||
D[3️⃣ Fetch JWK + Root CA depuis Step CA]:::ansible
|
||||
E[4️⃣ Création / Mise à jour des Secrets K8s]:::k8sSecret
|
||||
F[5️⃣ Création / Mise à jour ClusterIssuer cert-manager]:::clusterIssuer
|
||||
end
|
||||
|
||||
%% K3s Cluster + Traefik
|
||||
subgraph K3sCluster["K3s Cluster"]
|
||||
style K3sCluster fill:#f1f8e9,stroke:#558b2f,stroke-width:2px
|
||||
T[6️⃣ Traefik Ingress Controller]:::traefik
|
||||
H[7️⃣ Webapp Pods]:::webapp
|
||||
G["8️⃣ Gitea Service (ExternalName → pi2.home:3000)"]:::gitea
|
||||
end
|
||||
|
||||
Users[9️⃣ Client Mac / Navigateurs]:::clients
|
||||
|
||||
%% Flux
|
||||
A --> B
|
||||
C --> D
|
||||
B --> D
|
||||
D --> E
|
||||
E --> F
|
||||
F --> H
|
||||
F --> G
|
||||
T --> H
|
||||
T --> G
|
||||
Users -->|HTTPS / HTTP| T
|
||||
|
||||
%% Styling classes
|
||||
classDef stepCA fill:#fff3e0,stroke:#ff6f00,stroke-width:1px
|
||||
classDef jwk fill:#fff9c4,stroke:#fbc02d,stroke-width:1px
|
||||
classDef root fill:#ffe0b2,stroke:#ff8c00,stroke-width:1px
|
||||
classDef ansible fill:#b2ebf2,stroke:#00acc1,stroke-width:1px
|
||||
classDef k8sSecret fill:#b3e5fc,stroke:#0288d1,stroke-width:1px
|
||||
classDef clusterIssuer fill:#81d4fa,stroke:#0277bd,stroke-width:1px
|
||||
classDef gitea fill:#c8e6c9,stroke:#388e3c,stroke-width:1px
|
||||
classDef webapp fill:#a5d6a7,stroke:#2e7d32,stroke-width:1px
|
||||
classDef traefik fill:#ffe082,stroke:#ff8f00,stroke-width:1px
|
||||
classDef clients fill:#eeeeee,stroke:#9e9e9e,stroke-width:1px
|
||||
```
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
subgraph Cluster["Cluster Kubernetes (k3s)"]
|
||||
subgraph CertManager["Cert-Manager"]
|
||||
ClusterIssuer["ClusterIssuer\n(type: smallstep)"]
|
||||
end
|
||||
|
||||
subgraph Traefik["Traefik (Ingress Controller)"]
|
||||
TLSStore["TLSStore\n(Traefik v2+)"]
|
||||
IngressRoute["IngressRoute\n(TLS: my-tls-store)"]
|
||||
end
|
||||
|
||||
subgraph Apps["Applications"]
|
||||
App1[Service: my-app]
|
||||
App2[Service: my-api]
|
||||
end
|
||||
end
|
||||
|
||||
subgraph Smallstep["Smallstep PKI (step-ca)"]
|
||||
StepCA["step-ca\n(CA interne)"]
|
||||
end
|
||||
|
||||
%% Interactions
|
||||
ClusterIssuer -- "1. Demande de certificat\n(CertificateRequest)" --> StepCA
|
||||
StepCA -- "2. Émet un certificat\n(signé par la CA)" --> ClusterIssuer
|
||||
ClusterIssuer -- "3. Stocke le certificat\n(dans un Secret Kubernetes)" --> Secret[(Secret: my-app-tls)]
|
||||
Secret -- "4. Référencé par" --> TLSStore
|
||||
TLSStore -- "5. Fournit le certificat\n(TLS Termination)" --> IngressRoute
|
||||
IngressRoute -- "6. Route le trafic HTTPS\nvers" --> App1
|
||||
IngressRoute -- "6. Route le trafic HTTPS\nvers" --> App2
|
||||
```
|
||||
13
ansible/arcodange/factory/playbooks/system/rpi.yml
Normal file
13
ansible/arcodange/factory/playbooks/system/rpi.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
- name: Raspberry pi general setup
|
||||
hosts: raspberries:&local
|
||||
gather_facts: yes
|
||||
tags: never
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
|
||||
- name: set hostname
|
||||
ansible.builtin.hostname:
|
||||
name: "{{ inventory_hostname }}"
|
||||
become: yes
|
||||
when: inventory_hostname != ansible_hostname
|
||||
31
ansible/arcodange/factory/playbooks/system/system.yml
Normal file
31
ansible/arcodange/factory/playbooks/system/system.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
|
||||
- name: Setup général des rpis
|
||||
ansible.builtin.import_playbook: rpi.yml
|
||||
|
||||
- name: dns
|
||||
ansible.builtin.import_playbook: ../dns/dns.yml
|
||||
|
||||
- name: ssl
|
||||
ansible.builtin.import_playbook: ../ssl/ssl.yml
|
||||
|
||||
- name: Préparer les disques pour Longhorn
|
||||
ansible.builtin.import_playbook: prepare_disks.yml
|
||||
|
||||
- name: Installer et configurer Docker
|
||||
ansible.builtin.import_playbook: system_docker.yml
|
||||
|
||||
- name: Installer le client iSCSI pour Longhorn
|
||||
ansible.builtin.import_playbook: iscsi_longhorn.yml
|
||||
|
||||
- name: Préparer l'inventaire et installer K3s
|
||||
ansible.builtin.import_playbook: system_k3s.yml
|
||||
|
||||
- name: Configurer K3S Core DNS
|
||||
ansible.builtin.import_playbook: k3s_dns.yml
|
||||
|
||||
- name: Configurer K3S Cert Issuer
|
||||
ansible.builtin.import_playbook: k3s_ssl.yml
|
||||
|
||||
- name: Configurer K3s (kubeconfig, Longhorn, Traefik)
|
||||
ansible.builtin.import_playbook: k3s_config.yml
|
||||
111
ansible/arcodange/factory/playbooks/system/system_docker.yml
Normal file
111
ansible/arcodange/factory/playbooks/system/system_docker.yml
Normal file
@@ -0,0 +1,111 @@
|
||||
- name: System Docker
|
||||
hosts: raspberries:&local
|
||||
gather_facts: yes
|
||||
tags: never
|
||||
become: yes
|
||||
|
||||
pre_tasks:
|
||||
|
||||
- name: Prevent apt source conflict
|
||||
ansible.builtin.file:
|
||||
state: absent
|
||||
path: /etc/apt/sources.list.d/docker.list
|
||||
become: yes
|
||||
|
||||
- name: Install role geerlingguy.docker
|
||||
community.general.ansible_galaxy_install:
|
||||
type: role
|
||||
name: geerlingguy.docker
|
||||
run_once: true
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- ansible.builtin.debug:
|
||||
var: ansible_facts.machine
|
||||
|
||||
tasks:
|
||||
|
||||
- include_role:
|
||||
name: geerlingguy.docker
|
||||
|
||||
|
||||
- name: Créer le répertoire /etc/docker s'il n'existe pas
|
||||
ansible.builtin.file:
|
||||
path: /etc/docker
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Lire la configuration Docker existante
|
||||
ansible.builtin.command: "cat /etc/docker/daemon.json"
|
||||
register: docker_config_raw
|
||||
ignore_errors: yes
|
||||
changed_when: false
|
||||
when: (ansible.builtin.stat.path='/etc/docker/daemon.json').stat.exists
|
||||
|
||||
- name: Initialiser la variable de config Docker
|
||||
ansible.builtin.set_fact:
|
||||
docker_config: {}
|
||||
|
||||
- name: Parser le JSON existant si le fichier existe
|
||||
ansible.builtin.set_fact:
|
||||
docker_config: "{{ docker_config_raw.stdout | from_json }}"
|
||||
when: docker_config_raw.stdout is defined and docker_config_raw.stdout != ""
|
||||
|
||||
- name: Mettre à jour la config du logger
|
||||
ansible.builtin.set_fact:
|
||||
docker_config: >
|
||||
{{ docker_config | combine({
|
||||
'log-driver': 'json-file',
|
||||
'log-opts': {
|
||||
'max-size': '10m',
|
||||
'max-file': '5'
|
||||
}
|
||||
}, recursive=True) }}
|
||||
|
||||
- name: Ensure Docker storage directory exists on external disk
|
||||
ansible.builtin.file:
|
||||
path: /mnt/arcodange/docker
|
||||
state: directory
|
||||
mode: '0755'
|
||||
owner: root
|
||||
group: docker
|
||||
when: ansible_facts.mounts | selectattr('mount', 'equalto', '/mnt/arcodange') | list | length > 0
|
||||
|
||||
- name: Configure Docker to use external storage
|
||||
ansible.builtin.set_fact:
|
||||
docker_config: >
|
||||
{{ docker_config | combine({
|
||||
'data-root': '/mnt/arcodange/docker',
|
||||
'storage-driver': 'overlay2'
|
||||
}, recursive=True) }}
|
||||
when: ansible_facts.mounts | selectattr('mount', 'equalto', '/mnt/arcodange') | list | length > 0
|
||||
|
||||
- name: Ensure docker_config is a dictionary
|
||||
ansible.builtin.set_fact:
|
||||
docker_config: >
|
||||
{% if docker_config is mapping %}
|
||||
{{ docker_config }}
|
||||
{% else %}
|
||||
{}
|
||||
{% endif %}
|
||||
|
||||
- name: Écrire la configuration mise à jour
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/docker/daemon.json
|
||||
content: "{{ docker_config | to_nice_json(indent=2) }}"
|
||||
mode: '0644'
|
||||
notify: Redémarrer Docker
|
||||
|
||||
handlers:
|
||||
- name: Redémarrer Docker
|
||||
ansible.builtin.service:
|
||||
name: docker
|
||||
state: restarted
|
||||
|
||||
post_tasks:
|
||||
- name: adding existing user '{{ ansible_user }}' to group docker
|
||||
user:
|
||||
name: '{{ ansible_user }}'
|
||||
groups: docker
|
||||
append: yes
|
||||
become: yes
|
||||
63
ansible/arcodange/factory/playbooks/system/system_k3s.yml
Normal file
63
ansible/arcodange/factory/playbooks/system/system_k3s.yml
Normal file
@@ -0,0 +1,63 @@
|
||||
- name: System K3S
|
||||
hosts: raspberries:&local
|
||||
|
||||
tasks:
|
||||
- name: prepare inventory for k3s external playbook
|
||||
tags: always
|
||||
ansible.builtin.add_host:
|
||||
hostname: "{{ item }}"
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- "{{ ansible_loop.first | ternary('server', 'agent') }}"
|
||||
loop: "{{ groups.raspberries | intersect(groups.local) | sort }}"
|
||||
loop_control:
|
||||
extended: true
|
||||
extended_allitems: false
|
||||
|
||||
- name: Install collection k3s.orchestration
|
||||
local_action:
|
||||
module: community.general.ansible_galaxy_install
|
||||
type: collection
|
||||
name: git+https://github.com/k3s-io/k3s-ansible
|
||||
run_once: true
|
||||
|
||||
- name: Install socat for kubectl port forwarding
|
||||
ansible.builtin.apt:
|
||||
name: socat
|
||||
state: present
|
||||
update_cache: yes
|
||||
become: yes
|
||||
|
||||
- name: k3s
|
||||
ansible.builtin.import_playbook: k3s.orchestration.site
|
||||
# ansible.builtin.import_playbook: k3s.orchestration.upgrade
|
||||
# ansible.builtin.import_playbook: k3s.orchestration.reset
|
||||
vars:
|
||||
k3s_version: v1.34.3+k3s1
|
||||
extra_server_args: >-
|
||||
--docker --disable traefik
|
||||
--kubelet-arg="container-log-max-files=5"
|
||||
--kubelet-arg="container-log-max-size=10Mi"
|
||||
extra_agent_args: >-
|
||||
--docker
|
||||
--kubelet-arg="container-log-max-files=5"
|
||||
--kubelet-arg="container-log-max-size=10Mi"
|
||||
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
|
||||
|
||||
- name: how to reach k3s
|
||||
hosts: server
|
||||
tasks:
|
||||
- name: copy /etc/rancher/k3s/k3s.yaml to ~/.kube/config from the k3s server and replace 127.0.0.1 with the server ip or hostname
|
||||
run_once: true
|
||||
block:
|
||||
- ansible.builtin.fetch:
|
||||
src: /etc/rancher/k3s/k3s.yaml
|
||||
dest: ~/.kube/config
|
||||
flat: true
|
||||
become: true
|
||||
run_once: true
|
||||
- local_action:
|
||||
module: ansible.builtin.replace
|
||||
path: ~/.kube/config
|
||||
regexp: 'server: https://127.0.0.1:6443'
|
||||
replace: 'server: https://{{ ansible_default_ipv4.address }}:6443'
|
||||
10
ansible/arcodange/factory/playbooks/tools/crowdsec.yml
Normal file
10
ansible/arcodange/factory/playbooks/tools/crowdsec.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: crowdsec
|
||||
# hosts: raspberries:&local
|
||||
hosts: localhost
|
||||
# debugger: on_failed
|
||||
|
||||
tasks:
|
||||
- name: Setup crowdsec middleware for traefik
|
||||
include_role:
|
||||
name: crowdsec
|
||||
@@ -35,12 +35,12 @@
|
||||
password: '{{ pg_conf.POSTGRES_PASSWORD }}'
|
||||
gitea_admin_token: '{{ vault_GITEA_ADMIN_TOKEN }}'
|
||||
|
||||
- name: share VAULT CA
|
||||
block:
|
||||
# - name: share VAULT CA
|
||||
# block:
|
||||
|
||||
- name: read traefik CA
|
||||
include_role:
|
||||
name: arcodange.factory.traefik_certs
|
||||
# - name: read traefik CA
|
||||
# include_role:
|
||||
# name: arcodange.factory.traefik_certs
|
||||
|
||||
post_tasks:
|
||||
- include_role:
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
traefik_pvc_name: traefik
|
||||
@@ -0,0 +1,94 @@
|
||||
---
|
||||
- name: Inject captcha.html into Traefik PVC
|
||||
block:
|
||||
|
||||
# ---------------------
|
||||
# Scale to 0
|
||||
# ---------------------
|
||||
- name: Scale Traefik to 0
|
||||
kubernetes.core.k8s_scale:
|
||||
api_version: apps/v1
|
||||
kind: Deployment
|
||||
namespace: kube-system
|
||||
name: traefik
|
||||
replicas: 0
|
||||
|
||||
# ---------------------
|
||||
# Create Job
|
||||
# ---------------------
|
||||
- name: Deploy captcha injection Job
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
namespace: kube-system
|
||||
definition:
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: inject-captcha
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: traefik-data
|
||||
persistentVolumeClaim:
|
||||
claimName: "{{ traefik_pvc_name }}"
|
||||
containers:
|
||||
- name: write-captcha
|
||||
image: alpine:3.20
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
echo "Writing captcha.html into PVC..."
|
||||
cat << 'EOF' > /data/captcha.html
|
||||
{{ lookup('template', 'captcha.html.j2') | indent(20) }}
|
||||
EOF
|
||||
volumeMounts:
|
||||
- name: traefik-data
|
||||
mountPath: /data
|
||||
|
||||
# ---------------------
|
||||
# Wait for job success
|
||||
# ---------------------
|
||||
- name: Wait for Job completion
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: batch/v1
|
||||
kind: Job
|
||||
name: inject-captcha
|
||||
namespace: kube-system
|
||||
register: job_status
|
||||
until: job_status.resources[0].status.succeeded | default(0) | int > 0
|
||||
retries: 20
|
||||
delay: 5
|
||||
|
||||
# ---------------------
|
||||
# Clean Job
|
||||
# ---------------------
|
||||
- name: Remove captcha injection Job
|
||||
kubernetes.core.k8s:
|
||||
state: absent
|
||||
api_version: batch/v1
|
||||
kind: Job
|
||||
name: inject-captcha
|
||||
namespace: kube-system
|
||||
|
||||
rescue:
|
||||
- name: Log failure
|
||||
ansible.builtin.debug:
|
||||
msg: "An error occurred during captcha injection. Traefik will still be scaled back up."
|
||||
|
||||
always:
|
||||
# ---------------------
|
||||
# Ensure Traefik is scaled back to 1 NO MATTER WHAT
|
||||
# ---------------------
|
||||
- name: Ensure Traefik is scaled back to 1
|
||||
kubernetes.core.k8s_scale:
|
||||
api_version: apps/v1
|
||||
kind: Deployment
|
||||
namespace: kube-system
|
||||
name: traefik
|
||||
replicas: 1
|
||||
wait: yes
|
||||
wait_timeout: 300
|
||||
@@ -0,0 +1,179 @@
|
||||
- name: Créer le ServiceAccount pour l'authentification Vault
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: factory-ansible-tool-crowdsec-traefik-plugin
|
||||
namespace: kube-system
|
||||
wait: yes
|
||||
wait_timeout: 30
|
||||
|
||||
- name: Créer la ressource VaultAuth
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: secrets.hashicorp.com/v1beta1
|
||||
kind: VaultAuth
|
||||
metadata:
|
||||
name: factory-ansible-tool-crowdsec
|
||||
namespace: kube-system
|
||||
spec:
|
||||
method: kubernetes
|
||||
mount: kubernetes
|
||||
kubernetes:
|
||||
role: factory_crowdsec_conf
|
||||
serviceAccount: factory-ansible-tool-crowdsec-traefik-plugin
|
||||
audiences:
|
||||
- vault
|
||||
wait: yes
|
||||
wait_timeout: 30
|
||||
|
||||
- name: Créer la ressource VaultStaticSecret
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: secrets.hashicorp.com/v1beta1
|
||||
kind: VaultStaticSecret
|
||||
metadata:
|
||||
name: factory-ansible-tool-crowdsec-turnstile-secret
|
||||
namespace: kube-system
|
||||
spec:
|
||||
type: kv-v2
|
||||
mount: kvv2
|
||||
path: cms/factory/turnstile
|
||||
destination:
|
||||
name: factory-ansible-tool-crowdsec-traefik-plugin-captcha-params
|
||||
create: true
|
||||
refreshAfter: 30s
|
||||
vaultAuthRef: factory-ansible-tool-crowdsec
|
||||
wait: yes
|
||||
wait_timeout: 30
|
||||
|
||||
- name: Récupérer le secret Kubernetes
|
||||
kubernetes.core.k8s_info:
|
||||
kind: Secret
|
||||
name: factory-ansible-tool-crowdsec-traefik-plugin-captcha-params
|
||||
namespace: kube-system
|
||||
register: crowdsec_captcha_secret
|
||||
|
||||
- name: Récupérer le nom du pod CrowdSec LAPI
|
||||
kubernetes.core.k8s_info:
|
||||
kind: Pod
|
||||
namespace: tools
|
||||
label_selectors:
|
||||
- k8s-app = crowdsec
|
||||
- type = lapi
|
||||
register: crowdsec_lapi_pods
|
||||
|
||||
- name: Vérifier qu'un pod a été trouvé
|
||||
assert:
|
||||
that: crowdsec_lapi_pods.resources | length > 0
|
||||
fail_msg: "Aucun pod CrowdSec LAPI trouvé dans le namespace 'tools' avec les labels 'k8s-app=crowdsec, type=lapi'."
|
||||
|
||||
- name: Définir le nom du pod CrowdSec LAPI
|
||||
set_fact:
|
||||
crowdsec_lapi_pod_name: "{{ crowdsec_lapi_pods.resources[0].metadata.name }}"
|
||||
|
||||
- name: Récupérer la clé API du bouncer CrowdSec
|
||||
kubernetes.core.k8s_exec:
|
||||
namespace: tools
|
||||
pod: "{{ crowdsec_lapi_pod_name }}"
|
||||
container: crowdsec-lapi
|
||||
command: >
|
||||
cscli bouncers add traefik-plugin
|
||||
register: bouncer_key_result
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Supprimer le bouncer existant en cas d'échec
|
||||
kubernetes.core.k8s_exec:
|
||||
namespace: tools
|
||||
pod: "{{ crowdsec_lapi_pod_name }}"
|
||||
container: crowdsec-lapi
|
||||
command: >
|
||||
cscli bouncers delete traefik-plugin
|
||||
when: bouncer_key_result.failed
|
||||
|
||||
- name: Réessayer de récupérer la clé API
|
||||
kubernetes.core.k8s_exec:
|
||||
namespace: tools
|
||||
pod: "{{ crowdsec_lapi_pod_name }}"
|
||||
container: crowdsec-lapi
|
||||
command: >
|
||||
cscli bouncers add traefik-plugin
|
||||
register: bouncer_key_result
|
||||
when: bouncer_key_result.failed
|
||||
|
||||
- name: Inject captcha.html into Traefik PVC
|
||||
include_tasks: inject_captcha_html.yml
|
||||
tags: never
|
||||
|
||||
- name: Créer le Middleware Traefik pour CrowdSec
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: Middleware
|
||||
metadata:
|
||||
name: crowdsec
|
||||
namespace: kube-system
|
||||
spec:
|
||||
plugin:
|
||||
crowdsec-bouncer:
|
||||
enabled: true
|
||||
logLevel: DEBUG
|
||||
crowdsecMode: stream
|
||||
crowdsecLapiScheme: http
|
||||
crowdsecLapiHost: crowdsec-service.tools.svc.cluster.local:8080
|
||||
crowdsecLapiKey: "{{ bouncer_key_result.stdout_lines[2].strip() }}"
|
||||
htttTimeoutSeconds: 60
|
||||
crowdsecAppsecEnabled: false
|
||||
crowdsecAppsecHost: crowdsec:7422
|
||||
crowdsecAppsecFailureBlock: true
|
||||
crowdsecAppsecUnreachableBlock: true
|
||||
forwardedHeadersTrustedIPs:
|
||||
- 10.0.10.23/32
|
||||
- 10.0.20.0/24
|
||||
clientTrustedIPs:
|
||||
- 192.168.1.0/24
|
||||
- 10.42.0.0/16
|
||||
captchaProvider: turnstile
|
||||
captchaSiteKey: "{{ crowdsec_captcha_secret.resources[0].data.sitekey | b64decode }}"
|
||||
captchaSecretKey: "{{ crowdsec_captcha_secret.resources[0].data.secret | b64decode }}"
|
||||
captchaHTMLFilePath: "/data/captcha.html"
|
||||
redisCacheEnabled: true
|
||||
redisCacheHost: "redis.tools:6379"
|
||||
redisCacheDatabase: "0"
|
||||
redisCacheUnreachableBlock: false
|
||||
|
||||
- name: Redémarrer traefik pour prendre la nouvelle configuration du middleware
|
||||
block:
|
||||
# ---------------------
|
||||
# Scale to 0
|
||||
# ---------------------
|
||||
- name: Scale Traefik to 0
|
||||
kubernetes.core.k8s_scale:
|
||||
api_version: apps/v1
|
||||
kind: Deployment
|
||||
namespace: kube-system
|
||||
name: traefik
|
||||
replicas: 0
|
||||
rescue:
|
||||
- name: Log failure
|
||||
ansible.builtin.debug:
|
||||
msg: "An error occurred during traefik scale down. Traefik will still be scaled back up."
|
||||
|
||||
always:
|
||||
# ---------------------
|
||||
# Ensure Traefik is scaled back to 1 NO MATTER WHAT
|
||||
# ---------------------
|
||||
- name: Ensure Traefik is scaled back to 1
|
||||
kubernetes.core.k8s_scale:
|
||||
api_version: apps/v1
|
||||
kind: Deployment
|
||||
namespace: kube-system
|
||||
name: traefik
|
||||
replicas: 1
|
||||
wait: yes
|
||||
wait_timeout: 300
|
||||
@@ -0,0 +1,18 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<title>Captcha verification</title>
|
||||
<script src="https://challenges.cloudflare.com/turnstile/v0/api.js" async defer></script>
|
||||
</head>
|
||||
<body>
|
||||
<form method="POST">
|
||||
<div class="cf-turnstile"
|
||||
data-sitekey="{{ crowdsec_captcha_secret.resources[0].data.sitekey | b64decode }}"
|
||||
data-theme="auto"
|
||||
data-size="normal">
|
||||
</div>
|
||||
<button type="submit">Valider</button>
|
||||
</form>
|
||||
</body>
|
||||
</html>
|
||||
@@ -2,7 +2,7 @@ vault_unseal_keys_path: ~/.arcodange/cluster-keys.json
|
||||
vault_unseal_keys_shares: 1
|
||||
vault_unseal_keys_key_threshold: 1 # keys_key_threshold <= keys_shares
|
||||
|
||||
vault_address: https://vault.arcodange.duckdns.org
|
||||
vault_address: https://vault.arcodange.lab
|
||||
|
||||
vault_oidc_gitea_setupGiteaAppJS: '{{ role_path }}/files/playwright_setupGiteaApp.js'
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ variable "admin_email" {
|
||||
}
|
||||
variable "gitea_app" {
|
||||
type = object({
|
||||
url = optional(string, "https://gitea.arcodange.duckdns.org/")
|
||||
url = optional(string, "https://gitea.arcodange.lab")
|
||||
id = string
|
||||
secret = string
|
||||
description = optional(string, "Arcodange Gitea Auth")
|
||||
@@ -39,10 +39,10 @@ variable "gitea_admin_token" {
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
# kubectl -n kube-system exec $(kubectl -n kube-system get pod -l app.kubernetes.io/name=traefik -o jsonpath="{.items[0]['.metadata.name']}") -- cat /data/acme.json | jq '(.letsencrypt.Certificates | map(select(.domain.main=="arcodange.duckdns.org")))[0]' | jq '.certificate' -r | base64 -d | openssl x509
|
||||
# variable "ca_pem" {
|
||||
# type = string
|
||||
# }
|
||||
# same as vault CA
|
||||
variable "ca_pem" {
|
||||
type = string
|
||||
}
|
||||
terraform {
|
||||
required_providers {
|
||||
vault = {
|
||||
@@ -63,10 +63,10 @@ resource "vault_jwt_auth_backend" "gitea" {
|
||||
path = "gitea"
|
||||
type = "oidc"
|
||||
oidc_discovery_url = var.gitea_app.url
|
||||
# oidc_discovery_ca_pem = var.ca_pem
|
||||
oidc_discovery_ca_pem = file(var.ca_pem)
|
||||
oidc_client_id = var.gitea_app.id
|
||||
oidc_client_secret = var.gitea_app.secret
|
||||
bound_issuer = var.gitea_app.url
|
||||
bound_issuer = trimsuffix(var.gitea_app.url, "/")
|
||||
|
||||
tune {
|
||||
allowed_response_headers = []
|
||||
@@ -91,7 +91,8 @@ resource "vault_jwt_auth_backend_role" "gitea" {
|
||||
allowed_redirect_uris = [
|
||||
"http://localhost:8250/oidc/callback", # for command line login
|
||||
"${var.vault_address}/ui/vault/auth/gitea/oidc/callback",
|
||||
"https://webapp.arcodange.duckdns.org/oauth-callback",
|
||||
"https://webapp.arcodange.fr/oauth-callback",
|
||||
"https://webapp.arcodange.lab/oauth-callback",
|
||||
]
|
||||
}
|
||||
|
||||
@@ -101,8 +102,8 @@ resource "vault_jwt_auth_backend" "gitea_jwt" {
|
||||
path = "gitea_jwt"
|
||||
type = "jwt"
|
||||
oidc_discovery_url = var.gitea_app.url
|
||||
# oidc_discovery_ca_pem = var.ca_pem
|
||||
bound_issuer = var.gitea_app.url
|
||||
oidc_discovery_ca_pem = file(var.ca_pem)
|
||||
bound_issuer = trimsuffix(var.gitea_app.url, "/")
|
||||
|
||||
tune {
|
||||
allowed_response_headers = []
|
||||
@@ -166,7 +167,7 @@ resource "vault_kv_secret" "google_credentials" {
|
||||
path = "${vault_mount.kvv1.path}/google/credentials"
|
||||
data_json = jsonencode(
|
||||
{
|
||||
credentials = file("~/.config/gcloud/application_default_credentials.json")
|
||||
credentials = file("/root/.config/gcloud/application_default_credentials.json")
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ const username = process.env.GITEA_USER;
|
||||
const password = process.env.GITEA_PASSWORD;
|
||||
const debug = Boolean(process.env.DEBUG);
|
||||
const vaultAddress = process.env.VAULT_ADDRESS || 'http://localhost:8200';
|
||||
const giteaAddress = process.env.GITEA_ADDRESS || 'https://gitea.arcodange.duckdns.org';
|
||||
const giteaAddress = process.env.GITEA_ADDRESS || 'https://gitea.arcodange.lab';
|
||||
|
||||
if (!username || !password) {
|
||||
console.error('Veuillez définir les variables d\'environnement GITEA_USER et GITEA_PASSWORD.');
|
||||
@@ -22,7 +22,7 @@ const browser = await chromium.launch({
|
||||
log: (name, severity, message, args) => console.warn(`${severity}| ${name} :: ${message} __ ${args}`)
|
||||
},
|
||||
});
|
||||
const context = await browser.newContext({locale: "gb-GB"});
|
||||
const context = await browser.newContext({locale: "gb-GB", ignoreHTTPSErrors: true}); // Using self signed cert - could improve with NODE_EXTRA_CA_CERTS env variable
|
||||
const page = await context.newPage();
|
||||
|
||||
async function doLogin() {
|
||||
@@ -75,7 +75,8 @@ async function setupApp() {
|
||||
await applicationsPanel.locator('textarea[name="redirect_uris"]').fill([
|
||||
'http://localhost:8250/oidc/callback', // for command line login
|
||||
`${vaultAddress}/ui/vault/auth/gitea/oidc/callback`,
|
||||
'https://webapp.arcodange.duckdns.org/oauth-callback',
|
||||
'https://webapp.arcodange.lab/oauth-callback',
|
||||
'https://webapp.arcodange.fr/oauth-callback',
|
||||
].join('\n'));
|
||||
await applicationsPanel.locator('form[action="/-/admin/applications/oauth2"] > button').dblclick()
|
||||
|
||||
|
||||
@@ -11,18 +11,44 @@
|
||||
GITEA_USER: '{{ gitea_admin_user }}'
|
||||
GITEA_PASSWORD: '{{ gitea_admin_password }}'
|
||||
VAULT_ADDRESS: '{{ vault_address }}'
|
||||
NODE_EXTRA_CA_CERTS: ''
|
||||
|
||||
- include_role:
|
||||
name: arcodange.factory.playwright
|
||||
|
||||
- include_role:
|
||||
name: arcodange.factory.traefik_certs
|
||||
# - include_role:
|
||||
# name: arcodange.factory.traefik_certs
|
||||
|
||||
- set_fact:
|
||||
gitea_app: '{{ playwright_job.stdout | from_json }}'
|
||||
|
||||
volume_name: tofu-{{ ansible_date_time.iso8601.replace(':','-') }}
|
||||
|
||||
- name: Check SSL certificate for Gitea
|
||||
shell: >-
|
||||
openssl s_client -connect gitea.arcodange.lab:443 -CAfile /etc/ssl/certs/arcodange-root.pem -servername gitea.arcodange.lab < /dev/null 2>&1 | grep -E "Verify return code:|subject=|issuer="
|
||||
register: ssl_check
|
||||
ignore_errors: true
|
||||
|
||||
- name: Debug SSL certificate check
|
||||
debug:
|
||||
var: ssl_check.stdout_lines
|
||||
|
||||
|
||||
|
||||
- name: Delete existing Gitea OIDC backends if they exist
|
||||
include_tasks: vault_cmd.yml
|
||||
vars:
|
||||
vault_cmd: vault auth disable {{ backend_name }}
|
||||
vault_cmd_can_fail: true
|
||||
vault_cmd_json_attr: ''
|
||||
vault_cmd_output_var: false
|
||||
loop:
|
||||
- gitea
|
||||
- gitea_jwt
|
||||
loop_control:
|
||||
loop_var: backend_name
|
||||
|
||||
- name: use tofu to provision vault
|
||||
block:
|
||||
- shell: docker volume create {{ volume_name }}
|
||||
@@ -31,6 +57,8 @@
|
||||
-v {{ volume_name }}:/tofu -w /tofu
|
||||
-v {{ role_path }}/files/hashicorp_vault.tf:/tofu/hashicorp_vault.tf
|
||||
-v ~/.config/gcloud:/root/.config/gcloud
|
||||
-v /etc/ssl/certs/arcodange-root.pem:/etc/ssl/custom/arcodange-root.pem:ro
|
||||
-e VAULT_CACERT=/etc/ssl/custom/arcodange-root.pem
|
||||
--entrypoint=''
|
||||
ghcr.io/opentofu/opentofu:latest
|
||||
{{ command }}
|
||||
@@ -44,6 +72,7 @@
|
||||
# -var='vault_token={{ vault_root_token }}'
|
||||
# -var='postgres_admin_credentials={{ postgres_admin_credentials | to_json }}'
|
||||
# -var='gitea_admin_token={{ gitea_admin_token }}'
|
||||
# -var="ca_pem=/etc/ssl/custom/arcodange-root.pem"
|
||||
- >-
|
||||
tofu apply -auto-approve -no-color
|
||||
-var='gitea_app={{ gitea_app | to_json }}'
|
||||
@@ -51,6 +80,7 @@
|
||||
-var='vault_token={{ vault_root_token }}'
|
||||
-var='postgres_admin_credentials={{ postgres_admin_credentials | to_json }}'
|
||||
-var='gitea_admin_token={{ gitea_admin_token }}'
|
||||
-var="ca_pem=/etc/ssl/custom/arcodange-root.pem"
|
||||
loop_control:
|
||||
loop_var: command
|
||||
extended: true
|
||||
@@ -71,7 +101,7 @@
|
||||
gitea_secret_name: vault_oauth__sh_b64
|
||||
gitea_secret_value: >-
|
||||
{{ lookup('ansible.builtin.template', 'oidc_jwt_token.sh.j2', template_vars = {
|
||||
'GITEA_BASE_URL': 'https://gitea.arcodange.duckdns.org',
|
||||
'GITEA_BASE_URL': 'https://gitea.arcodange.lab',
|
||||
'OIDC_CLIENT_ID': gitea_app.id,
|
||||
'OIDC_CLIENT_SECRET': gitea_app.secret,
|
||||
}) | b64encode }}
|
||||
|
||||
@@ -4,10 +4,10 @@ set -eu
|
||||
# Variables à ajuster selon ta configuration
|
||||
CLIENT_ID="{{ OIDC_CLIENT_ID }}"
|
||||
CLIENT_SECRET="{{ OIDC_CLIENT_SECRET }}"
|
||||
REDIRECT_URI="{{ OIDC_CLIENT_CALLBACK | default('https://webapp.arcodange.duckdns.org/oauth-callback') }}" # Redirige ici après l'authentification
|
||||
AUTH_URL="{{ GITEA_BASE_URL | default('https://gitea.arcodange.duckdns.org') }}/login/oauth/authorize"
|
||||
TOKEN_URL="{{ GITEA_BASE_URL | default('https://gitea.arcodange.duckdns.org') }}/login/oauth/access_token"
|
||||
ISSUER="https://gitea.arcodange.duckdns.org/"
|
||||
REDIRECT_URI="{{ OIDC_CLIENT_CALLBACK | default('https://webapp.arcodange.lab/oauth-callback') }}" # Redirige ici après l'authentification
|
||||
AUTH_URL="{{ GITEA_BASE_URL | default('https://gitea.arcodange.lab') }}/login/oauth/authorize"
|
||||
TOKEN_URL="{{ GITEA_BASE_URL | default('https://gitea.arcodange.lab') }}/login/oauth/access_token"
|
||||
ISSUER="https://gitea.arcodange.lab/"
|
||||
# SCOPE="openid email profile groups" # Scope que tu souhaites obtenir - profile groups
|
||||
SCOPE="email openid read:user" # Scope que tu souhaites obtenir - profile groups
|
||||
set +u
|
||||
@@ -26,7 +26,7 @@ poll_state() {
|
||||
#echo "Tentative $attempt/$MAX_ATTEMPTS: Requête à l'endpoint /retrieve pour state=$STATE..."
|
||||
|
||||
# Effectuer la requête GET
|
||||
RESPONSE=$(curl -s -w "%{http_code}" -o /tmp/response_body "https://webapp.arcodange.duckdns.org/retrieve?state=$STATE")
|
||||
RESPONSE=$(curl -s -w "%{http_code}" -o /tmp/response_body "https://webapp.arcodange.lab/retrieve?state=$STATE")
|
||||
HTTP_CODE=$(tail -n1 <<< "$RESPONSE")
|
||||
|
||||
if [ "$HTTP_CODE" == "200" ]; then
|
||||
@@ -50,6 +50,9 @@ poll_state() {
|
||||
return 1
|
||||
}
|
||||
|
||||
# 0. Installer le certificat arcodange.lab (droits sudo)
|
||||
# curl https://ssl-ca.arcodange.lab:8443/roots.pem -ks > /usr/local/share/ca-certificates/arcodange-root.crt && update-ca-certificates 2>/dev/null >/dev/null && export VAULT_CACERT=/usr/local/share/ca-certificates/arcodange-root.crt || echo "couldn't install self signed .crt" >&2
|
||||
|
||||
# 1. Rediriger l'utilisateur vers l'URL d'authentification
|
||||
echo "Ouvrez le lien suivant dans votre navigateur pour vous authentifier dans Gitea:"
|
||||
echo "$AUTH_URL?client_id=$CLIENT_ID&redirect_uri=$REDIRECT_URI&response_type=code&scope=$(sed 's/ /%20/g' <<<$SCOPE)&state=$STATE"
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
---
|
||||
- name: hashicorp_vault
|
||||
ansible.builtin.import_playbook: hashicorp_vault.yml
|
||||
- name: crowdsec
|
||||
ansible.builtin.import_playbook: crowdsec.yml
|
||||
@@ -1,5 +1,5 @@
|
||||
# to see generated tokens
|
||||
# go to https://gitea.arcodange.duckdns.org/user/settings/applications
|
||||
# go to https://gitea.arcodange.lab/user/settings/applications
|
||||
|
||||
- when: >-
|
||||
lookup('ansible.builtin.varnames', '^' ~ gitea_token_fact_name ~ '$') | length == 0
|
||||
|
||||
@@ -7,7 +7,7 @@ const username = process.env.GITEA_USER;
|
||||
const password = process.env.GITEA_PASSWORD;
|
||||
const debug = Boolean(process.env.DEBUG);
|
||||
const vaultAddress = process.env.VAULT_ADDRESS || 'http://localhost:8200';
|
||||
const giteaAddress = process.env.GITEA_ADDRESS || 'https://gitea.arcodange.duckdns.org';
|
||||
const giteaAddress = process.env.GITEA_ADDRESS || 'https://gitea.arcodange.lab';
|
||||
|
||||
if (!username || !password) {
|
||||
console.error('Veuillez définir les variables d\'environnement GITEA_USER et GITEA_PASSWORD.');
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
kubectl -n kube-system exec
|
||||
$(kubectl -n kube-system get pod -l app.kubernetes.io/name=traefik
|
||||
-o jsonpath="{.items[0]['.metadata.name']}") --
|
||||
cat /data/acme.json | jq '(.letsencrypt.Certificates | map(select(.domain.main=="*.arcodange.duckdns.org")))[0]'
|
||||
cat /data/acme.json | jq '(.letsencrypt.Certificates | map(select(.domain.main=="*.arcodange.lab")))[0]'
|
||||
| jq '.certificate' -r | base64 -d | openssl x509
|
||||
register: traefik_certs_cmd
|
||||
- set_fact:
|
||||
|
||||
@@ -3,8 +3,9 @@ roles:
|
||||
- name: geerlingguy.docker
|
||||
|
||||
collections:
|
||||
- name: community.general
|
||||
- name: community.docker
|
||||
- name: ansible.posix
|
||||
- name: community.crypto
|
||||
- name: community.docker
|
||||
- name: community.general
|
||||
- name: kubernetes.core
|
||||
- name: git+https://github.com/k3s-io/k3s-ansible.git
|
||||
@@ -1,4 +1,5 @@
|
||||
{{- range $app_name := .Values.gitea_applications -}}
|
||||
{{- range $app_name, $app_attr := .Values.gitea_applications -}}
|
||||
{{- $org := default "arcodange-org" $app_attr.org -}}
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
@@ -7,10 +8,14 @@ metadata:
|
||||
namespace: argocd
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
{{- with $app_attr.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://gitea.arcodange.duckdns.org/arcodange-org/{{ $app_name }}
|
||||
repoURL: https://gitea.arcodange.lab/{{ $org }}/{{ $app_name }}
|
||||
targetRevision: HEAD
|
||||
path: chart
|
||||
destination:
|
||||
|
||||
14
argocd/templates/argocd_image_updater.yaml
Normal file
14
argocd/templates/argocd_image_updater.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
{{ with ( .Values.argocd_image_updater_chart_values ) }}
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: argocd-image-updater
|
||||
namespace: kube-system
|
||||
spec:
|
||||
repo: https://argoproj.github.io/argo-helm
|
||||
chart: argocd-image-updater
|
||||
targetNamespace: argocd
|
||||
valuesContent: |-
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end -}}
|
||||
---
|
||||
@@ -2,8 +2,30 @@
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
gitea_applications:
|
||||
- url-shortener
|
||||
- tools
|
||||
- webapp
|
||||
- erp
|
||||
- cms
|
||||
url-shortener:
|
||||
annotations: {}
|
||||
tools:
|
||||
annotations: {}
|
||||
webapp:
|
||||
annotations:
|
||||
argocd-image-updater.argoproj.io/image-list: webapp=gitea.arcodange.lab/arcodange-org/webapp:latest
|
||||
argocd-image-updater.argoproj.io/webapp.update-strategy: digest
|
||||
erp:
|
||||
annotations: {}
|
||||
cms:
|
||||
annotations:
|
||||
argocd-image-updater.argoproj.io/image-list: cms=gitea.arcodange.lab/arcodange-org/cms:latest
|
||||
argocd-image-updater.argoproj.io/cms.update-strategy: digest
|
||||
dance-lessons-coach:
|
||||
org: arcodange
|
||||
annotations:
|
||||
argocd-image-updater.argoproj.io/image-list: dance-lessons-coach=gitea.arcodange.lab/arcodange/dance-lessons-coach:latest
|
||||
argocd-image-updater.argoproj.io/dance-lessons-coach.update-strategy: digest
|
||||
|
||||
argocd_image_updater_chart_values:
|
||||
config:
|
||||
argocd:
|
||||
grpcWeb: false
|
||||
serverAddress: "https://argocd.arcodange.lab/"
|
||||
insecure: true
|
||||
plaintext: true
|
||||
@@ -9,7 +9,7 @@
|
||||
>L'unsealKey, le vaultRootToken initial et l'authentification au backend terraform sont pour le moment configurés sur le controleur ansible (Macbook Pro).
|
||||
|
||||
>[!NOTE]
|
||||
> Vault est déployé via [argo cd](https://gitea.arcodange.duckdns.org/arcodange-org/tools/src/branch/main/hashicorp-vault)
|
||||
> Vault est déployé via [argo cd](https://gitea.arcodange.lab/arcodange-org/tools/src/branch/main/hashicorp-vault)
|
||||
|
||||
```mermaid
|
||||
%%{init: { 'logLevel': 'debug', 'theme': 'base',
|
||||
|
||||
@@ -2,4 +2,7 @@
|
||||
|
||||
Provisionne un utilisateur gitea "tofu_module_reader",
|
||||
autorisé à lire certains projets il est utilisé par la CI pour récupérer des blueprints terraform
|
||||
via sa clé ssh répertoriée dans vault.
|
||||
via sa clé ssh répertoriée dans vault.
|
||||
|
||||
#
|
||||
configure les tokens ovh et cloudflare pour permettre aux autre projet de gérer des resources du cloud
|
||||
101
iac/cloudflare.tf
Normal file
101
iac/cloudflare.tf
Normal file
@@ -0,0 +1,101 @@
|
||||
data "cloudflare_account" "arcodange" {
|
||||
filter = {
|
||||
name = "arcodange@gmail.com"
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
cloudflare_account_id = data.cloudflare_account.arcodange.account_id
|
||||
}
|
||||
|
||||
resource "cloudflare_r2_bucket" "arcodange_tf" {
|
||||
account_id = local.cloudflare_account_id
|
||||
name = "arcodange-tf"
|
||||
jurisdiction = "eu"
|
||||
}
|
||||
|
||||
module "cf_r2_arcodange_tf_token" {
|
||||
source = "./modules/cloudflare_token"
|
||||
account_id = local.cloudflare_account_id
|
||||
bucket = cloudflare_r2_bucket.arcodange_tf
|
||||
token_name = "r2_arcodange_tf_token"
|
||||
permissions = {
|
||||
bucket = [
|
||||
"account:Workers R2 Storage Read",
|
||||
"bucket:Workers R2 Storage Bucket Item Write",
|
||||
]
|
||||
account = [
|
||||
"account:Account Settings Read",
|
||||
]
|
||||
}
|
||||
}
|
||||
resource "vault_kv_secret" "cf_r2_arcodange_tf" {
|
||||
path = "kvv1/cloudflare/r2/arcodange-tf"
|
||||
data_json = jsonencode({
|
||||
S3_SECRET_ACCESS_KEY = module.cf_r2_arcodange_tf_token.r2_credentials.secret_access_key
|
||||
S3_ACCESS_KEY = module.cf_r2_arcodange_tf_token.r2_credentials.access_key_id
|
||||
S3_ENDPOINT = "https://${local.cloudflare_account_id}.eu.r2.cloudflarestorage.com"
|
||||
})
|
||||
}
|
||||
|
||||
data "vault_policy_document" "cf_r2_arcodange_tf" {
|
||||
rule {
|
||||
path = "kvv1/cloudflare/r2/arcodange-tf"
|
||||
capabilities = ["read"]
|
||||
}
|
||||
rule {
|
||||
path = "kvv1/zoho/self_client" # zoho mail client is created manually
|
||||
capabilities = ["read"]
|
||||
}
|
||||
}
|
||||
resource "vault_policy" "cf_r2_arcodange_tf" {
|
||||
name = "factory__cf_r2_arcodange_tf"
|
||||
policy = data.vault_policy_document.cf_r2_arcodange_tf.hcl
|
||||
}
|
||||
|
||||
data "gitea_repo" "cms" {
|
||||
name = "cms"
|
||||
username = "arcodange-org"
|
||||
}
|
||||
module "cf_arcodange_cms_token" {
|
||||
source = "./modules/cloudflare_token"
|
||||
account_id = local.cloudflare_account_id
|
||||
bucket = cloudflare_r2_bucket.arcodange_tf
|
||||
token_name = "cf_arcodange_cms_token"
|
||||
permissions = {
|
||||
account = [
|
||||
"account:Pages Write",
|
||||
"account:Account DNS Settings Write",
|
||||
"account:Account Settings Read",
|
||||
"zone:Zone Write",
|
||||
"zone:Zone Settings Write",
|
||||
"zone:DNS Write",
|
||||
"account:Cloudflare Tunnel Write",
|
||||
"account:Turnstile Sites Write",
|
||||
]
|
||||
}
|
||||
}
|
||||
resource "gitea_repository_actions_secret" "cf_arcodange_cms_token" {
|
||||
repository = data.gitea_repo.cms.name
|
||||
repository_owner = data.gitea_repo.cms.username
|
||||
secret_name = "CLOUDFLARE_API_TOKEN"
|
||||
secret_value = module.cf_arcodange_cms_token.token
|
||||
}
|
||||
resource "gitea_repository_actions_secret" "cf_account_id_cms" {
|
||||
repository = data.gitea_repo.cms.name
|
||||
repository_owner = data.gitea_repo.cms.username
|
||||
secret_name = "CLOUDFLARE_ACCOUNT_ID"
|
||||
secret_value = local.cloudflare_account_id
|
||||
}
|
||||
|
||||
output "token" {
|
||||
value = module.cf_arcodange_cms_token.token
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
resource "vault_kv_secret" "cf_arcodange_cms_token" {
|
||||
path = "kvv1/cloudflare/cms/cf_arcodange_cms_token"
|
||||
data_json = jsonencode({
|
||||
token = module.cf_arcodange_cms_token.token
|
||||
})
|
||||
}
|
||||
@@ -35,9 +35,9 @@ resource "vault_kv_secret_v2" "longhorn_gcs_backup" {
|
||||
cas = 1
|
||||
delete_all_versions = true
|
||||
data_json = jsonencode({
|
||||
AWS_ACCESS_KEY_ID = google_storage_hmac_key.longhorn_backup.access_id
|
||||
AWS_ACCESS_KEY_ID = google_storage_hmac_key.longhorn_backup.access_id
|
||||
AWS_SECRET_ACCESS_KEY = google_storage_hmac_key.longhorn_backup.secret
|
||||
AWS_ENDPOINTS: "https://storage.googleapis.com"
|
||||
AWS_ENDPOINTS : "https://storage.googleapis.com"
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
81
iac/modules/cloudflare_token/main.tf
Normal file
81
iac/modules/cloudflare_token/main.tf
Normal file
@@ -0,0 +1,81 @@
|
||||
# Récupère toutes les permissions Cloudflare disponibles
|
||||
data "cloudflare_account_api_token_permission_groups_list" "all" {
|
||||
account_id = var.account_id
|
||||
}
|
||||
|
||||
# Sélectionne uniquement les permissions demandées
|
||||
locals {
|
||||
# Simplifie le scope Cloudflare (ex: "account" depuis "com.cloudflare.api.account")
|
||||
permission_map = {
|
||||
for p in data.cloudflare_account_api_token_permission_groups_list.all.result :
|
||||
"${split(".", p.scopes[0])[length(split(".", p.scopes[0])) - 1]}:${p.name}" => p.id
|
||||
}
|
||||
permission_map_from_id = zipmap(values(local.permission_map), keys(local.permission_map))
|
||||
|
||||
# Résout les permissions (si présentes) pour chaque catégorie
|
||||
selected_account_permissions = var.permissions.account != null ? compact([
|
||||
for name in var.permissions.account : lookup(local.permission_map, name, null)
|
||||
]) : []
|
||||
|
||||
selected_bucket_permissions = var.bucket != null && try(var.permissions.bucket, null) != null ? compact([
|
||||
for name in var.permissions.bucket : lookup(local.permission_map, name, null)
|
||||
]) : []
|
||||
|
||||
# Validation des permissions introuvables
|
||||
missing_permissions = concat(
|
||||
[for name in coalesce(var.permissions.account, []) : name if lookup(local.permission_map, name, null) == null],
|
||||
[for name in coalesce(var.permissions.bucket, []) : name if lookup(local.permission_map, name, null) == null]
|
||||
)
|
||||
|
||||
# Ressources cibles
|
||||
account_resource = {
|
||||
"com.cloudflare.api.account.${var.account_id}" = "*"
|
||||
}
|
||||
|
||||
bucket_resource = var.bucket != null ? {
|
||||
"com.cloudflare.edge.r2.bucket.${var.account_id}_${var.bucket.jurisdiction}_${var.bucket.name}" = "*"
|
||||
} : {}
|
||||
|
||||
# Policies construites dynamiquement
|
||||
policies = [for policy in [
|
||||
length(local.selected_account_permissions) > 0 ? {
|
||||
effect = "allow"
|
||||
permission_groups = [for id in local.selected_account_permissions : { id = id }]
|
||||
resources = local.account_resource
|
||||
} : null,
|
||||
|
||||
length(local.selected_bucket_permissions) > 0 ? {
|
||||
effect = "allow"
|
||||
permission_groups = [for id in local.selected_bucket_permissions : { id = id }]
|
||||
resources = local.bucket_resource
|
||||
} : null
|
||||
] : policy if policy != null]
|
||||
|
||||
error_message = length(local.missing_permissions) > 0 ? format("Permissions introuvables : %s", join(", ", local.missing_permissions)) : ""
|
||||
}
|
||||
|
||||
# Création du token
|
||||
resource "cloudflare_account_token" "token" {
|
||||
account_id = var.account_id
|
||||
name = var.token_name
|
||||
|
||||
policies = local.policies
|
||||
|
||||
expires_on = null
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [expires_on, policies] # ignore permission id change as unstable
|
||||
replace_triggered_by = [null_resource.cloudflare_account_token_replace] # replace permission name change d
|
||||
precondition {
|
||||
condition = length(local.missing_permissions) == 0
|
||||
error_message = local.error_message
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "cloudflare_account_token_replace" { # replace token when permission names change
|
||||
triggers = {
|
||||
"account_permissions" = sha256(join("", sort([for p_id in local.selected_account_permissions : lookup(local.permission_map_from_id, p_id)])))
|
||||
"bucket_permissions" = sha256(join("", sort([for p_id in local.selected_bucket_permissions : lookup(local.permission_map_from_id, p_id)])))
|
||||
}
|
||||
}
|
||||
35
iac/modules/cloudflare_token/outputs.tf
Normal file
35
iac/modules/cloudflare_token/outputs.tf
Normal file
@@ -0,0 +1,35 @@
|
||||
output "token" {
|
||||
description = "Valeur du token Cloudflare"
|
||||
value = cloudflare_account_token.token.value
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "token_id" {
|
||||
description = "ID du token Cloudflare (sert de Access Key ID pour R2 si bucket défini)"
|
||||
value = cloudflare_account_token.token.id
|
||||
}
|
||||
|
||||
output "token_sha256" {
|
||||
description = "SHA-256 du token Cloudflare (sert de Secret Access Key pour R2 si bucket défini)"
|
||||
value = sha256(cloudflare_account_token.token.value)
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "r2_credentials" {
|
||||
description = "Credentials R2 si bucket configuré (AccessKeyId, SecretAccessKey)"
|
||||
value = var.bucket != null ? {
|
||||
access_key_id = cloudflare_account_token.token.id
|
||||
secret_access_key = sha256(cloudflare_account_token.token.value)
|
||||
} : null
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "permissions" {
|
||||
description = "Liste des permissions introuvables (si existantes)"
|
||||
value = compact(concat(local.selected_account_permissions, local.selected_bucket_permissions))
|
||||
}
|
||||
|
||||
output "resources" {
|
||||
description = "Map des resources assignées au token"
|
||||
value = keys(merge(local.account_resource, local.bucket_resource))
|
||||
}
|
||||
37
iac/modules/cloudflare_token/variables.tf
Normal file
37
iac/modules/cloudflare_token/variables.tf
Normal file
@@ -0,0 +1,37 @@
|
||||
variable "account_id" {
|
||||
description = "Cloudflare account ID"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "token_name" {
|
||||
description = "Nom du token Cloudflare à créer"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "permissions" {
|
||||
description = <<-EOT
|
||||
Liste des permissions Cloudflare (ex: [\"Pages Deploy\", \"Zone DNS Edit\"])
|
||||
you can check required permissions per service
|
||||
https://developers.cloudflare.com/api/node/
|
||||
EOT
|
||||
type = object({
|
||||
account = optional(list(string))
|
||||
bucket = optional(list(string))
|
||||
})
|
||||
}
|
||||
|
||||
variable "bucket" {
|
||||
description = <<-EOT
|
||||
Objet optionnel représentant un bucket R2.
|
||||
Exemple :
|
||||
{
|
||||
name = "mon-bucket"
|
||||
jurisdiction = "eu"
|
||||
}
|
||||
EOT
|
||||
type = object({
|
||||
name = string
|
||||
jurisdiction = string
|
||||
})
|
||||
default = null
|
||||
}
|
||||
57
iac/ovh.tf
Normal file
57
iac/ovh.tf
Normal file
@@ -0,0 +1,57 @@
|
||||
data "ovh_me" "account" {}
|
||||
data "ovh_iam_reference_actions" "domain" {
|
||||
type = "domain"
|
||||
}
|
||||
locals {
|
||||
domain_read_permissions = [for a in data.ovh_iam_reference_actions.domain.actions : a if contains(a.categories, "READ")]
|
||||
}
|
||||
|
||||
resource "ovh_me_api_oauth2_client" "cms" {
|
||||
name = "cms repo"
|
||||
description = "arcodange.fr management"
|
||||
flow = "CLIENT_CREDENTIALS"
|
||||
}
|
||||
resource "ovh_iam_policy" "cms" {
|
||||
name = "cms_manager"
|
||||
description = "Permissions related to www.arcodange.fr domain"
|
||||
identities = [ovh_me_api_oauth2_client.cms.identity]
|
||||
resources = [
|
||||
data.ovh_me.account.urn,
|
||||
# ovh_me_api_oauth2_client.cms.identity,
|
||||
"urn:v1:eu:resource:domain:arcodange.fr",
|
||||
]
|
||||
# these are all the actions
|
||||
allow = concat([
|
||||
"account:apiovh:me/get",
|
||||
"account:apiovh:me/supportLevel/get",
|
||||
"account:apiovh:me/certificates/get",
|
||||
"account:apiovh:me/tag/get",
|
||||
"account:apiovh:services/get",
|
||||
],
|
||||
local.domain_read_permissions[*].action,
|
||||
[
|
||||
"domain:apiovh:nameServer/edit",
|
||||
])
|
||||
}
|
||||
|
||||
resource "gitea_repository_actions_secret" "ovh_cms_client_id" {
|
||||
repository = data.gitea_repo.cms.name
|
||||
repository_owner = data.gitea_repo.cms.username
|
||||
secret_name = "OVH_CLIENT_ID"
|
||||
secret_value = ovh_me_api_oauth2_client.cms.client_id
|
||||
}
|
||||
resource "gitea_repository_actions_secret" "ovh_cms_client_secret" {
|
||||
repository = data.gitea_repo.cms.name
|
||||
repository_owner = data.gitea_repo.cms.username
|
||||
secret_name = "OVH_CLIENT_SECRET"
|
||||
secret_value = ovh_me_api_oauth2_client.cms.client_secret
|
||||
}
|
||||
|
||||
resource "vault_kv_secret" "ovh_cms_token" {
|
||||
path = "kvv1/ovh/cms/app"
|
||||
data_json = jsonencode({
|
||||
client_id = ovh_me_api_oauth2_client.cms.client_id
|
||||
client_secret = ovh_me_api_oauth2_client.cms.client_secret
|
||||
urn = ovh_me_api_oauth2_client.cms.identity
|
||||
})
|
||||
}
|
||||
@@ -2,7 +2,7 @@ terraform {
|
||||
required_providers {
|
||||
gitea = {
|
||||
source = "go-gitea/gitea"
|
||||
version = "0.5.1"
|
||||
version = "0.6.0"
|
||||
}
|
||||
vault = {
|
||||
source = "vault"
|
||||
@@ -12,16 +12,24 @@ terraform {
|
||||
source = "google"
|
||||
version = "7.0.1"
|
||||
}
|
||||
cloudflare = {
|
||||
source = "cloudflare/cloudflare"
|
||||
version = "~> 5"
|
||||
}
|
||||
ovh = {
|
||||
source = "ovh/ovh"
|
||||
version = "2.8.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "gitea" { # https://registry.terraform.io/providers/go-gitea/gitea/latest/docs
|
||||
base_url = "https://gitea.arcodange.duckdns.org"
|
||||
base_url = "https://gitea.arcodange.lab"
|
||||
# use GITEA_TOKEN env var
|
||||
}
|
||||
|
||||
provider "vault" {
|
||||
address = "https://vault.arcodange.duckdns.org"
|
||||
address = "https://vault.arcodange.lab"
|
||||
auth_login_jwt { # TERRAFORM_VAULT_AUTH_JWT environment variable
|
||||
mount = "gitea_jwt"
|
||||
role = "gitea_cicd"
|
||||
@@ -31,4 +39,10 @@ provider "vault" {
|
||||
provider "google" {
|
||||
project = "arcodange"
|
||||
region = "US-EAST1"
|
||||
}
|
||||
|
||||
provider "cloudflare" {} # CLOUDFLARE_API_TOKEN environment variable required
|
||||
|
||||
provider "ovh" { # OVH_APPLICATION_KEY OVH_APPLICATION_SECRET OVH_CONSUMER_KEY
|
||||
endpoint = "ovh-eu"
|
||||
}
|
||||
@@ -29,7 +29,7 @@ provider "postgresql" {
|
||||
}
|
||||
|
||||
provider vault {
|
||||
address = "https://vault.arcodange.duckdns.org"
|
||||
address = "https://vault.arcodange.lab"
|
||||
auth_login_jwt { # TERRAFORM_VAULT_AUTH_JWT environment variable
|
||||
mount = "gitea_jwt"
|
||||
role = "gitea_cicd"
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
applications = [
|
||||
"webapp",
|
||||
"erp",
|
||||
"crowdsec",
|
||||
"plausible",
|
||||
]
|
||||
64
ssl.md
Normal file
64
ssl.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# Distribution du Root CA Step-CA
|
||||
|
||||
Ce guide explique comment installer le certificat racine Step-CA sur tous les appareils pour que TLS fonctionne avec la PKI interne.
|
||||
|
||||
---
|
||||
|
||||
## Pré-requis
|
||||
|
||||
- Le certificat racine est récupéré depuis `step_ca_primary` (pi1) : `/home/step/.step/certs/root_ca.crt`
|
||||
- Les machines cibles sont :
|
||||
- pi1, pi2, pi3 (Raspbian / Debian)
|
||||
- localhost (Mac)
|
||||
|
||||
---
|
||||
|
||||
## 1. Copier le certificat sur les RPi
|
||||
|
||||
```bash
|
||||
scp pi1:/home/step/.step/certs/root_ca.crt /tmp/root_ca.crt
|
||||
````
|
||||
|
||||
Puis sur chaque Pi (idempotent) :
|
||||
```bash
|
||||
for pi in pi1 pi2 pi3
|
||||
do
|
||||
ssh $pi "sudo cp /home/step/.step/certs/root_ca.crt /usr/local/share/ca-certificates/arcodange-root.crt && sudo chmod 644 /usr/local/share/ca-certificates/arcodange-root.crt && sudo update-ca-certificates"
|
||||
ssh $pi 'sudo apt install -y libnss3-tools && certutil -d sql:/home/pi/.pki/nssdb -A -t "C,," -n "arcodange-root" -i /usr/local/share/ca-certificates/arcodange-root.crt'
|
||||
done
|
||||
```
|
||||
|
||||
Vérification rapide sur chaque Pi :
|
||||
```bash
|
||||
ssh pi1 "sudo openssl verify /usr/local/share/ca-certificates/arcodange-root.crt"
|
||||
ssh pi2 "sudo openssl verify /usr/local/share/ca-certificates/arcodange-root.crt"
|
||||
ssh pi3 "sudo openssl verify /usr/local/share/ca-certificates/arcodange-root.crt"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2. Copier le certificat sur Mac (localhost)
|
||||
|
||||
```bash
|
||||
scp pi1:/home/step/.step/certs/root_ca.crt /tmp/root_ca.crt
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/root_ca.crt
|
||||
sudo scp pi@pi1:/etc/ssl/certs/arcodange-root.pem /etc/ssl/certs/arcodange-root.pem
|
||||
```
|
||||
|
||||
Vérification :
|
||||
```bash
|
||||
security verify-cert -c /tmp/root_ca.crt
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. Redémarrer les services TLS si nécessaire
|
||||
|
||||
Sur les RPi (optionnel, si vous utilisez Docker, containerd ou k3s par exemple).
|
||||
|
||||
---
|
||||
|
||||
## 4
|
||||
|
||||
Autre commande pratique:
|
||||
> `curl https://ssl-ca.arcodange.lab:8443/roots.pem -ks > /usr/local/share/ca-certificates/arcodange-root.crt && update-ca-certificates 2>/dev/null >/dev/null`
|
||||
Reference in New Issue
Block a user