399 lines
13 KiB
YAML
399 lines
13 KiB
YAML
---
|
|
|
|
- name: Prepare disks for longhorn
|
|
ansible.builtin.import_playbook: ./prepare_disks.yml
|
|
|
|
- name: System Docker
|
|
hosts: raspberries:&local
|
|
gather_facts: yes
|
|
tags: never
|
|
become: yes
|
|
|
|
pre_tasks:
|
|
|
|
- name: set hostname
|
|
ansible.builtin.hostname:
|
|
name: "{{ inventory_hostname }}"
|
|
become: yes
|
|
when: inventory_hostname != ansible_hostname
|
|
|
|
- name: Prevent apt source conflict
|
|
ansible.builtin.file:
|
|
state: absent
|
|
path: /etc/apt/sources.list.d/docker.list
|
|
become: yes
|
|
|
|
- name: Install role geerlingguy.docker
|
|
community.general.ansible_galaxy_install:
|
|
type: role
|
|
name: geerlingguy.docker
|
|
run_once: true
|
|
delegate_to: localhost
|
|
become: false
|
|
|
|
- ansible.builtin.debug:
|
|
var: ansible_facts.machine
|
|
|
|
tasks:
|
|
|
|
- include_role:
|
|
name: geerlingguy.docker
|
|
|
|
post_tasks:
|
|
- name: adding existing user '{{ ansible_user }}' to group docker
|
|
user:
|
|
name: '{{ ansible_user }}'
|
|
groups: docker
|
|
append: yes
|
|
become: yes
|
|
|
|
#---
|
|
|
|
- name: Install iSCSI client for Longhorn on Raspberry Pi
|
|
hosts: raspberries:&local
|
|
become: yes
|
|
tasks:
|
|
- name: Install open-iscsi
|
|
ansible.builtin.apt:
|
|
name: open-iscsi
|
|
state: present
|
|
update_cache: yes
|
|
|
|
- name: Enable and start iSCSI service
|
|
ansible.builtin.service:
|
|
name: iscsid
|
|
state: started
|
|
enabled: yes
|
|
|
|
- name: Installer cryptsetup
|
|
ansible.builtin.apt:
|
|
name: cryptsetup
|
|
state: present
|
|
update_cache: yes
|
|
|
|
- name: Charger le module noyau dm_crypt
|
|
ansible.builtin.modprobe:
|
|
name: dm_crypt
|
|
state: present
|
|
|
|
- name: S'assurer que le module dm_crypt est chargé au démarrage
|
|
ansible.builtin.lineinfile:
|
|
path: /etc/modules
|
|
line: dm_crypt
|
|
state: present
|
|
|
|
- name: Créer dossier longhorn
|
|
ansible.builtin.file:
|
|
path: /mnt/arcodange/longhorn
|
|
state: directory
|
|
owner: pi
|
|
group: docker
|
|
mode: '0774'
|
|
ignore_errors: true
|
|
|
|
#---
|
|
|
|
- name: System K3S
|
|
hosts: raspberries:&local
|
|
tags: never
|
|
|
|
tasks:
|
|
- name: prepare inventory for k3s external playbook
|
|
tags: always
|
|
ansible.builtin.add_host:
|
|
hostname: "{{ item }}"
|
|
groups:
|
|
- k3s_cluster
|
|
- "{{ ansible_loop.first | ternary('server', 'agent') }}"
|
|
loop: "{{ groups.raspberries | intersect(groups.local) | sort }}"
|
|
loop_control:
|
|
extended: true
|
|
extended_allitems: false
|
|
|
|
- name: Install collection k3s.orchestration
|
|
local_action:
|
|
module: community.general.ansible_galaxy_install
|
|
type: collection
|
|
name: git+https://github.com/k3s-io/k3s-ansible
|
|
run_once: true
|
|
|
|
- name: k3s
|
|
tags: never,k3s
|
|
ansible.builtin.import_playbook: k3s.orchestration.site
|
|
# ansible.builtin.import_playbook: k3s.orchestration.upgrade
|
|
# ansible.builtin.import_playbook: k3s.orchestration.reset
|
|
vars:
|
|
k3s_version: v1.34.1+k3s1
|
|
extra_server_args: "--docker --disable traefik"
|
|
extra_agent_args: "--docker"
|
|
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
|
|
|
|
- name: how to reach k3s
|
|
hosts: server
|
|
tasks:
|
|
- name: copy /etc/rancher/k3s/k3s.yaml to ~/.kube/config from the k3s server and replace 127.0.0.1 with the server ip or hostname
|
|
run_once: true
|
|
block:
|
|
- ansible.builtin.fetch:
|
|
src: /etc/rancher/k3s/k3s.yaml
|
|
dest: ~/.kube/config
|
|
flat: true
|
|
become: true
|
|
run_once: true
|
|
- local_action:
|
|
module: ansible.builtin.replace
|
|
path: ~/.kube/config
|
|
regexp: 'server: https://127.0.0.1:6443'
|
|
replace: 'server: https://{{ ansible_default_ipv4.address }}:6443'
|
|
|
|
# - name: setup hard disk
|
|
# tags: never
|
|
# ansible.builtin.import_playbook: ./setup/hard_disk_v2.yml
|
|
# # vars:
|
|
# # hard_disk__partitions:
|
|
# # nfs: []
|
|
|
|
- name: setup longhorn for volumes https://docs.k3s.io/helm
|
|
become: true
|
|
ansible.builtin.copy:
|
|
dest: /var/lib/rancher/k3s/server/manifests/longhorn-install.yaml
|
|
content: |-
|
|
apiVersion: helm.cattle.io/v1
|
|
kind: HelmChart
|
|
metadata:
|
|
annotations:
|
|
helmcharts.cattle.io/managed-by: helm-controller
|
|
finalizers:
|
|
- wrangler.cattle.io/on-helm-chart-remove
|
|
generation: 1
|
|
name: longhorn-install
|
|
namespace: kube-system
|
|
spec:
|
|
version: v1.9.1
|
|
chart: longhorn
|
|
repo: https://charts.longhorn.io
|
|
failurePolicy: abort
|
|
targetNamespace: longhorn-system
|
|
createNamespace: true
|
|
valuesContent: |-
|
|
defaultSettings:
|
|
defaultDataPath: /mnt/arcodange/longhorn
|
|
vars:
|
|
longhorn_helm_values: {} # https://github.com/longhorn/longhorn/blob/master/chart/values.yaml
|
|
|
|
- name: customize k3s traefik configuration https://docs.k3s.io/helm
|
|
block:
|
|
- name: Get my public IP
|
|
community.general.ipify_facts:
|
|
- become: true
|
|
ansible.builtin.copy:
|
|
dest: /var/lib/rancher/k3s/server/manifests/traefik-v3.yaml
|
|
content: |-
|
|
apiVersion: v1
|
|
data:
|
|
dynamic.yaml: |-
|
|
{{ traefik_config_yaml | to_nice_yaml | indent( width=4 ) }}
|
|
kind: ConfigMap
|
|
metadata:
|
|
name: traefik-configmap
|
|
namespace: kube-system
|
|
---
|
|
apiVersion: helm.cattle.io/v1
|
|
kind: HelmChart
|
|
metadata:
|
|
name: traefik
|
|
namespace: kube-system
|
|
spec:
|
|
repo: https://traefik.github.io/charts
|
|
chart: traefik
|
|
version: v37.0.0
|
|
targetNamespace: kube-system
|
|
valuesContent: |-
|
|
{{ traefik_helm_values | to_nice_yaml | indent( width=4 ) }}
|
|
vars:
|
|
traefik_config_yaml:
|
|
http:
|
|
services:
|
|
gitea:
|
|
loadBalancer:
|
|
servers:
|
|
- url: "http://{{ hostvars[groups.gitea[0]]['preferred_ip'] }}:3000"
|
|
# - url: "http://{{ lookup('dig', groups.gitea[0]) }}:3000" # might work again if deactivate rpi wifi
|
|
routers:
|
|
dashboard:
|
|
rule: Host(`traefik.arcodange.duckdns.org`)
|
|
service: api@internal
|
|
middlewares:
|
|
- localIp
|
|
tls:
|
|
certResolver: letsencrypt
|
|
domains:
|
|
- main: "arcodange.duckdns.org"
|
|
sans:
|
|
- "traefik.arcodange.duckdns.org"
|
|
entryPoints:
|
|
- websecure
|
|
- web
|
|
acme-challenge:
|
|
rule: Host(`arcodange.duckdns.org`) && PathPrefix(`/.well-known/acme-challenge`)
|
|
service: acme-http@internal
|
|
tls:
|
|
certResolver: letsencrypt
|
|
domains:
|
|
- main: "arcodange.duckdns.org"
|
|
sans:
|
|
- "*.arcodange.duckdns.org"
|
|
entryPoints:
|
|
- websecure
|
|
- web
|
|
gitea:
|
|
rule: Host(`gitea.arcodange.duckdns.org`)
|
|
service: gitea
|
|
middlewares:
|
|
- localIp
|
|
tls:
|
|
certResolver: letsencrypt
|
|
domains:
|
|
- main: "arcodange.duckdns.org"
|
|
sans:
|
|
- "gitea.arcodange.duckdns.org"
|
|
entrypoints:
|
|
- websecure
|
|
middlewares:
|
|
localIp:
|
|
ipAllowList:
|
|
sourceRange:
|
|
- "192.168.1.0/24"
|
|
- "{{ ipify_public_ip }}/32"
|
|
# - "0.0.0.0/0"
|
|
# ipStrategy:
|
|
# depth: 1
|
|
traefik_helm_values:
|
|
deployment:
|
|
kind: "Deployment"
|
|
initContainers:
|
|
- name: volume-permissions
|
|
image: busybox:latest
|
|
command: ["sh", "-c", "touch /data/acme.json; chmod -v 600 /data/acme.json"]
|
|
volumeMounts:
|
|
- name: data
|
|
mountPath: /data
|
|
# default is https://github.com/traefik/traefik-helm-chart/blob/v25.0.0/traefik/values.yaml <- for v25 (`kubectl describe deployments.apps traefik -n kube-system | grep helm.sh/chart`)
|
|
# current is https://github.com/traefik/traefik-helm-chart/blob/v30.1.0/traefik/values.yaml
|
|
nodeSelector:
|
|
node-role.kubernetes.io/control-plane: 'true' # make predictible choice of node to direct https traffic to this node and avoid NAT/loss of client IP
|
|
service:
|
|
spec:
|
|
externalTrafficPolicy: Local
|
|
ports:
|
|
traefik:
|
|
expose:
|
|
default: true
|
|
ingressRoute:
|
|
dashboard:
|
|
enabled: true
|
|
globalArguments: [] # deactivate --global.sendanonymoususage
|
|
env:
|
|
- name: POD_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: metadata.name
|
|
- name: POD_NAMESPACE
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: metadata.namespace
|
|
- name: LEGO_DISABLE_CNAME_SUPPORT
|
|
value: 'true'
|
|
logs:
|
|
general:
|
|
level: DEBUG
|
|
# format: json
|
|
access:
|
|
enabled: true
|
|
# format: json
|
|
podSecurityContext:
|
|
runAsGroup: 65532
|
|
runAsNonRoot: true
|
|
runAsUser: 65532
|
|
fsGroup: 65532 # else the persistent volume might be owned by root and be unwriteable
|
|
persistence:
|
|
# -- Enable persistence using Persistent Volume Claims
|
|
# ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
|
|
# It can be used to store TLS certificates, see `storage` in certResolvers
|
|
enabled: true
|
|
name: data
|
|
# existingClaim: ""
|
|
accessMode: ReadWriteOnce
|
|
size: 128Mi
|
|
storageClass: "longhorn"
|
|
# volumeName: ""
|
|
path: /data
|
|
annotations: {}
|
|
volumes:
|
|
- name: traefik-configmap
|
|
mountPath: /config
|
|
type: configMap
|
|
additionalArguments:
|
|
- '--providers.file.filename=/config/dynamic.yaml'
|
|
- '--providers.kubernetesingress.ingressendpoint.publishedservice=kube-system/traefik'
|
|
certificatesResolvers:
|
|
letsencrypt:
|
|
acme:
|
|
# for challenge options cf. https://doc.traefik.io/traefik/https/acme/
|
|
email: arcodange@gmail.com
|
|
tlsChallenge: true
|
|
dnsChallenge:
|
|
# requires env variable DUCKDNS_TOKEN
|
|
provider: duckdns
|
|
propagation:
|
|
delayBeforeChecks: 120
|
|
disableChecks: true
|
|
resolvers:
|
|
- "1.1.1.1:53"
|
|
- "8.8.8.8:53"
|
|
httpChallenge:
|
|
entryPoint: "web"
|
|
# It has to match the path with a persistent volume
|
|
storage: /data/acme.json
|
|
envFrom:
|
|
- secretRef:
|
|
name: traefik-duckdns-token
|
|
# MY_TOKEN=<my token (see https://www.duckdns.org/domains)>
|
|
# kubectl create secret generic traefik-duckdns-token --from-literal="DUCKDNS_TOKEN=$MY_TOKEN" -n kube-system
|
|
- name: touch manifests/traefik.yaml to trigger update
|
|
ansible.builtin.file:
|
|
path: /var/lib/rancher/k3s/server/manifests/traefik-v3.yaml
|
|
state: touch
|
|
become: true
|
|
|
|
|
|
# ---
|
|
|
|
- name: redeploy traefik
|
|
hosts: localhost
|
|
tasks:
|
|
- name: delete old traefik deployment
|
|
kubernetes.core.k8s:
|
|
api_version: v1
|
|
name: traefik
|
|
kind: Deployment
|
|
namespace: kube-system
|
|
state: "absent"
|
|
- name: delete old deployment job so the k3s helm controller redeploy with our new configuration
|
|
kubernetes.core.k8s:
|
|
api_version: batch/v1
|
|
name: helm-install-traefik
|
|
kind: Job
|
|
namespace: kube-system
|
|
state: "absent"
|
|
- name: get traefik deployment
|
|
kubernetes.core.k8s_info:
|
|
api_version: v1
|
|
name: traefik
|
|
kind: Deployment
|
|
namespace: kube-system
|
|
wait: true
|
|
register: traefik_deployment
|
|
- ansible.builtin.debug:
|
|
var: traefik_deployment
|