--- - name: System Docker hosts: raspberries:&local gather_facts: yes tags: never become: yes pre_tasks: - name: set hostname ansible.builtin.hostname: name: "{{ inventory_hostname }}" become: yes when: inventory_hostname != ansible_hostname - name: Prevent apt source conflict ansible.builtin.file: state: absent path: /etc/apt/sources.list.d/docker.list become: yes - name: Install role geerlingguy.docker community.general.ansible_galaxy_install: type: role name: geerlingguy.docker run_once: true delegate_to: localhost become: false - ansible.builtin.debug: var: ansible_facts.machine tasks: - include_role: name: geerlingguy.docker post_tasks: - name: adding existing user '{{ ansible_user }}' to group docker user: name: '{{ ansible_user }}' groups: docker append: yes become: yes #--- - name: System K3S hosts: raspberries:&local tags: never tasks: - name: prepare inventory for k3s external playbook tags: always ansible.builtin.add_host: hostname: "{{ item }}" groups: - k3s_cluster - "{{ ansible_loop.first | ternary('server', 'agent') }}" loop: "{{ groups.raspberries | intersect(groups.local) | sort }}" loop_control: extended: true extended_allitems: false - name: Install collection k3s.orchestration local_action: module: community.general.ansible_galaxy_install type: collection name: git+https://github.com/k3s-io/k3s-ansible run_once: true - name: k3s tags: never # ansible.builtin.import_playbook: k3s.orchestration.site ansible.builtin.import_playbook: k3s.orchestration.upgrade # ansible.builtin.import_playbook: k3s.orchestration.reset vars: k3s_version: v1.32.2+k3s1 extra_server_args: "--docker --disable traefik" extra_agent_args: "--docker" api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}" - name: how to reach k3s hosts: server tasks: - name: copy /etc/rancher/k3s/k3s.yaml to ~/.kube/config from the k3s server and replace 127.0.0.1 with the server ip or hostname run_once: true block: - ansible.builtin.fetch: src: /etc/rancher/k3s/k3s.yaml dest: ~/.kube/config flat: true become: true run_once: true - local_action: module: ansible.builtin.replace path: ~/.kube/config regexp: 'server: https://127.0.0.1:6443' replace: 'server: https://{{ ansible_default_ipv4.address }}:6443' - name: customize k3s traefik configuration https://docs.k3s.io/helm block: - name: Get my public IP community.general.ipify_facts: - become: true ansible.builtin.copy: dest: /var/lib/rancher/k3s/server/manifests/traefik-v3.yaml content: |- apiVersion: v1 data: dynamic.yaml: |- {{ traefik_config_yaml | to_nice_yaml | indent( width=4 ) }} kind: ConfigMap metadata: name: traefik-configmap namespace: kube-system --- apiVersion: helm.cattle.io/v1 kind: HelmChart metadata: name: traefik namespace: kube-system spec: repo: https://traefik.github.io/charts chart: traefik version: v37.0.0 targetNamespace: kube-system valuesContent: |- {{ traefik_helm_values | to_nice_yaml | indent( width=4 ) }} vars: traefik_config_yaml: http: services: gitea: loadBalancer: servers: - url: "http://{{ lookup('dig', groups.gitea[0]) }}:3000" routers: acme-challenge: rule: Host(`arcodange.duckdns.org`) && PathPrefix(`/.well-known/acme-challenge`) service: acme-http@internal tls: certResolver: letsencrypt domains: - main: "arcodange.duckdns.org" sans: - "*.arcodange.duckdns.org" entryPoints: - websecure - web gitea: rule: Host(`gitea.arcodange.duckdns.org`) service: gitea middlewares: - localIp tls: certResolver: letsencrypt domains: - main: "arcodange.duckdns.org" sans: - "gitea.arcodange.duckdns.org" entrypoints: - websecure middlewares: localIp: ipAllowList: sourceRange: - "0.0.0.0/0" # - "192.168.1.0/24" # - "{{ ipify_public_ip }}/32" # ipStrategy: # depth: 2 traefik_helm_values: deployment: kind: "Deployment" # default is https://github.com/traefik/traefik-helm-chart/blob/v25.0.0/traefik/values.yaml <- for v25 (`kubectl describe deployments.apps traefik -n kube-system | grep helm.sh/chart`) # current is https://github.com/traefik/traefik-helm-chart/blob/v30.1.0/traefik/values.yaml nodeSelector: node-role.kubernetes.io/master: 'true' # make predictible choice of node to direct https traffic to this node and avoid NAT/loss of client IP service: spec: externalTrafficPolicy: Local ports: traefik: expose: default: true ingressRoute: dashboard: enabled: true globalArguments: [] # deactivate --global.sendanonymoususage env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: LEGO_DISABLE_CNAME_SUPPORT value: 'true' logs: general: level: DEBUG # format: json access: enabled: true # format: json persistence: # -- Enable persistence using Persistent Volume Claims # ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ # It can be used to store TLS certificates, see `storage` in certResolvers enabled: true name: data # existingClaim: "" accessMode: ReadWriteOnce size: 128Mi storageClass: "nfs-client" # volumeName: "" path: /data annotations: {} volumes: - name: traefik-configmap mountPath: /config type: configMap additionalArguments: - '--providers.file.filename=/config/dynamic.yaml' - '--providers.kubernetesingress.ingressendpoint.publishedservice=kube-system/traefik' certificatesResolvers: letsencrypt: acme: # for challenge options cf. https://doc.traefik.io/traefik/https/acme/ email: arcodange@gmail.com tlsChallenge: true dnsChallenge: # requires env variable DUCKDNS_TOKEN provider: duckdns httpChallenge: entryPoint: "web" # It has to match the path with a persistent volume storage: /data/acme.json envFrom: - secretRef: name: traefik-duckdns-token # MY_TOKEN= # kubectl create secret generic traefik-duckdns-token --from-literal="DUCKDNS_TOKEN=$MY_TOKEN" -n kube-system - name: touch manifests/traefik.yaml to trigger update ansible.builtin.file: path: /var/lib/rancher/k3s/server/manifests/traefik-v3.yaml state: touch become: true # --- - name: setup hard disk tags: never ansible.builtin.import_playbook: ./setup/hard_disk.yml vars: hard_disk__partitions: nfs: [] - name: Deploy NFS Subdir External Provisioner and alter default traefik deployment tags: never hosts: localhost tasks: - name: Deploy NFS Subdir External Provisioner block: - name: Add Helm repository for NFS Subdir External Provisioner kubernetes.core.helm_repository: name: nfs-subdir-external-provisioner repo_url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/ force_update: yes - name: Install NFS Subdir External Provisioner using Helm # debug: # var: hard_disk__nfs kubernetes.core.helm: name: nfs-subdir-external-provisioner chart_ref: nfs-subdir-external-provisioner/nfs-subdir-external-provisioner release_namespace: "{{ hard_disk__nfs.ks_namespace }}" values: nfs: server: "{{ hard_disk__nfs.server_ip }}" path: "{{ hard_disk__nfs.export_directory }}" vars: hard_disk__nfs: "{{ hostvars[groups.hard_disk[0]].hard_disk__nfs }}" - name: redeploy traefik hosts: localhost tasks: - name: delete old traefik deployment kubernetes.core.k8s: api_version: v1 name: traefik kind: Deployment namespace: kube-system state: "absent" - name: delete old deployment job so the k3s helm controller redeploy with our new configuration kubernetes.core.k8s: api_version: batch/v1 name: helm-install-traefik kind: Job namespace: kube-system state: "absent" - name: get traefik deployment kubernetes.core.k8s_info: api_version: v1 name: traefik kind: Deployment namespace: kube-system wait: true register: traefik_deployment - ansible.builtin.debug: var: traefik_deployment