k3s setup and git action runner

This commit is contained in:
2024-08-12 21:45:16 +02:00
parent 3cfbc59f50
commit cb4d679d8b
29 changed files with 888 additions and 132 deletions

View File

@@ -72,4 +72,4 @@ classDef done fill:gold,stroke:indigo,stroke-width:4px,color:blue;
class prepare_hd,nodeId2 done;
```
🏹💻🪽
🏹💻🪽

View File

@@ -1,10 +1,11 @@
# docker build -f ansible/Dockerfile -t arcodange-ansible:0.0.0 ansible/
FROM python:slim
RUN apt update && apt install openssh-client socat gosu -y
RUN apt update && apt install openssh-client socat gosu git -y
COPY nonroot_ssh_proxy_setup.sh /usr/local/bin/nonroot_ssh_proxy_setup.sh
COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
COPY requirements.yml /tmp/requirements.yml
RUN chmod +x /usr/local/bin/*.sh
ENV SSH_AUTH_SOCK=/home/arcodange/.ssh/socket
@@ -13,10 +14,11 @@ USER 1000
WORKDIR /home/arcodange/code
ENV PATH=/home/arcodange/.local/bin:/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
RUN pip install ansible-core jmespath
RUN pip install ansible-core jmespath kubernetes dnspython
ENV GALAXY_SERVER=https://beta-galaxy.ansible.com/api/
RUN ansible-galaxy collection install --token 11bebd8fd1ad4009f700bdedbeb80b19743ce3d3 \
community.general community.docker ansible.posix
-r /tmp/requirements.yml
# community.general community.docker ansible.posix kubernetes.core
ENV ANSIBLE_HOST_KEY_CHECKING=False
ENV ANSIBLE_FORCE_COLOR=True=True

View File

@@ -19,7 +19,7 @@ git clone -q --depth 1 --branch master https://github.com/arcodange/ssh-agent.gi
docker run -d --name=ssh-agent docker-ssh-agent:latest
docker run --rm --volumes-from=ssh-agent -v ~/.ssh:/.ssh -it docker-ssh-agent:latest ssh-add /root/.ssh/id_rsa
docker run --rm -u root --name test --volumes-from=ssh-agent -v $PWD:/home/arcodange/code arcodange-ansible:0.0.0 \
ansible-playbook ansible/arcodange/factory/playbooks/setup/setup.yml -i ansible/arcodange/factory/inventory -vv
ansible-playbook ansible/arcodange/factory/playbooks/setup/01_system.yml -i ansible/arcodange/factory/inventory -vv
```
### a tool to reuse a ssh agent (not required)
@@ -43,4 +43,9 @@ ssh-add ~/.ssh/id_rsa
```sh
ansible -i ,localhost -c local localhost -m raw -a "echo hello world {{ inventory_hostname }} : {{ hostvars | to_nice_json | regex_replace(\"['\n]\",' ') }}"
```
```
### local python environment with pipx
#### add dependency
- `pipx runpip ansible-core install dnspython`

View File

@@ -1,3 +1,50 @@
# Ansible Collection - arcodange.factory
Documentation for the collection.
```sh
MY_TOKEN= #<my token (see https://www.duckdns.org/domains)>
kubectl create secret generic traefik-duckdns-token --from-literal="DUCKDNS_TOKEN=$MY_TOKEN" -n kube-system
```
```mermaid
%%{init: { 'logLevel': 'debug', 'theme': 'dark' } }%%
timeline
title ordre des playbook
section Setup DNS, OS, ...
configuration manuelle
: installer OS, réserver IP statique, configurer SSH,VNC
: formater et créer des partitions avec gparted
section Docker & K3S
system
: install Docker
: install K3S working with docker
: configure Traefik
section Volume, NFS
setup hard_disk
: monter les partitions
: installer NFS
system
: déployer provisionner NFS
section postgres
setup
: postgres
section gitea
setup
: gitea
section gitea action runner
setup
: gitea action runner
section argo cd
argo_cd
: argo cd
section hello world app
setup git repository
: terraform
setup CI
deploy
: dev : list exposed deployments with label and port as a landpage
: expose (as ngrock ? direct ? port ? )
```

View File

@@ -1,5 +1,6 @@
# to add/mount a partitiion, use the gparted utility to create it beforehand witht the matching name/label
hard_disk__partitions:
nfs: []
gitea_data:
- gitea
pg_data:
@@ -10,4 +11,9 @@ hard_disk__applications:
gitea: "{{ gitea }}"
hard_disk__postgres_databases:
gitea: "{{ gitea_database }}"
gitea: "{{ gitea_database }}"
hard_disk__nfs:
server_ip: "{{ ansible_host }}"
ks_namespace: kube-system
export_directory: /arcodange/nfs

View File

@@ -35,7 +35,7 @@ gitea:
networks:
- gitea
ports:
- "80:3000"
- "3000:3000"
- "2222:22"
volumes:
- /arcodange/{{gitea_partition}}/gitea/data:/data

View File

@@ -6,10 +6,10 @@ raspberries:
ansible_host: pi2.home
internetPi1:
ansible_host: rg-evry.changeip.co
ansible_host: arcodange.duckdns.org
ansible_port: 51022
internetPi2:
ansible_host: rg-evry.changeip.co
ansible_host: arcodange.duckdns.org
ansible_port: 52022
vars:
@@ -17,12 +17,15 @@ raspberries:
local:
hosts:
localhost:
ansible_connection: local
ansible_python_interpreter: /Users/gabrielradureau/.local/pipx/venvs/ansible-core/bin/python
pi1:
pi2:
hard_disk:
hosts:
pi2 # 4To toshiba external hard drive (/dev/sda)
pi2: # 4To toshiba external hard drive (/dev/sda)
postgres:
children:

View File

@@ -0,0 +1,275 @@
---
- name: System Docker
hosts: raspberries:&local
gather_facts: yes
tags: never
become: yes
pre_tasks:
- name: set hostname
ansible.builtin.hostname:
name: "{{ inventory_hostname }}"
become: yes
when: inventory_hostname != ansible_hostname
- name: Install role geerlingguy.docker
community.general.ansible_galaxy_install:
type: role
name: geerlingguy.docker
run_once: true
delegate_to: localhost
become: false
- ansible.builtin.debug:
var: ansible_facts.machine
tasks:
- include_role:
name: geerlingguy.docker
post_tasks:
- name: adding existing user '{{ ansible_user }}' to group docker
user:
name: '{{ ansible_user }}'
groups: docker
append: yes
become: yes
#---
- name: System K3S
hosts: raspberries:&local
tags: never
tasks:
- name: prepare inventory for k3s external playbook
tags: always
ansible.builtin.add_host:
hostname: "{{ item }}"
groups:
- k3s_cluster
- "{{ ansible_loop.first | ternary('server', 'agent') }}"
loop: "{{ groups.raspberries | intersect(groups.local) | sort }}"
loop_control:
extended: true
extended_allitems: false
- name: Install collection k3s.orchestration
local_action:
module: community.general.ansible_galaxy_install
type: collection
name: git+https://github.com/k3s-io/k3s-ansible
run_once: true
- name: k3s
tags: never
ansible.builtin.import_playbook: k3s.orchestration.site
# ansible.builtin.import_playbook: k3s.orchestration.reset
vars:
k3s_version: v1.30.3+k3s1
token: changeme!
extra_server_args: "--docker"
extra_agent_args: "--docker"
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
- name: how to reach k3s
hosts: server
tasks:
- name: copy /etc/rancher/k3s/k3s.yaml to ~/.kube/config from the k3s server and replace 127.0.0.1 with the server ip or hostname
run_once: true
block:
- ansible.builtin.fetch:
src: /etc/rancher/k3s/k3s.yaml
dest: ~/.kube/config
flat: true
become: true
run_once: true
- local_action:
module: ansible.builtin.replace
path: ~/.kube/config
regexp: 'server: https://127.0.0.1:6443'
replace: 'server: https://{{ ansible_default_ipv4.address }}:6443'
- name: customize k3s traefik configuration https://docs.k3s.io/helm
block:
- name: Get my public IP
community.general.ipify_facts:
- become: true
ansible.builtin.copy:
dest: /var/lib/rancher/k3s/server/manifests/traefik-config.yaml
content: |-
apiVersion: v1
data:
dynamic.yaml: |-
{{ traefik_config_yaml | to_nice_yaml | indent( width=4 ) }}
kind: ConfigMap
metadata:
name: traefik-configmap
namespace: kube-system
---
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: traefik
namespace: kube-system
spec:
valuesContent: |-
{{ traefik_helm_values | to_nice_yaml | indent( width=4 ) }}
vars:
traefik_config_yaml:
http:
services:
gitea:
loadBalancer:
servers:
- url: "http://{{ lookup('dig', groups.gitea[0]) }}:3000"
routers:
acme-challenge:
rule: Host(`arcodange.duckdns.org`) && PathPrefix(`/.well-known/acme-challenge`)
service: acme-http@internal
tls: &tls_opts
certResolver: letsencrypt
domains:
- main: "arcodange.duckdns.org"
sans:
- "*.arcodange.duckdns.org"
entryPoints:
- websecure
- web
gitea:
rule: Host(`gitea.arcodange.duckdns.org`)
service: gitea
middlewares:
- localIp
tls:
<<: *tls_opts
entrypoints:
- websecure
middlewares:
localIp:
ipWhiteList:
sourceRange:
- "192.168.1.0/24"
- "{{ ipify_public_ip }}/32"
traefik_helm_values: # https://github.com/traefik/traefik-helm-chart/blob/v25.0.0/traefik/values.yaml <- for v25 (`kubectl describe deployments.apps traefik -n kube-system | grep helm.sh/chart`)
service:
spec:
externalTrafficPolicy: Local
ports:
traefik:
expose: true
globalArguments: [] # deactivate --global.sendanonymoususage
logs:
general:
level: TRACE
# format: json
access:
enabled: true
# format: json
persistence:
# -- Enable persistence using Persistent Volume Claims
# ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
# It can be used to store TLS certificates, see `storage` in certResolvers
enabled: true
name: data
# existingClaim: ""
accessMode: ReadWriteOnce
size: 128Mi
storageClass: "nfs-client"
# volumeName: ""
path: /data
annotations: {}
volumes:
- name: traefik-configmap
mountPath: /config
type: configMap
additionalArguments:
- '--providers.file.filename=/config/dynamic.yaml'
certResolvers:
letsencrypt:
# for challenge options cf. https://doc.traefik.io/traefik/https/acme/
email: arcodange@gmail.com
tlsChallenge: true
dnsChallenge:
# requires env variable DUCKDNS_TOKEN
provider: duckdns
httpChallenge:
entryPoint: "web"
# It has to match the path with a persistent volume
storage: /data/acme.json
envFrom:
- secretRef:
name: traefik-duckdns-token
# MY_TOKEN=<my token (see https://www.duckdns.org/domains)>
# kubectl create secret generic traefik-duckdns-token --from-literal="DUCKDNS_TOKEN=$MY_TOKEN" -n kube-system
- name: touch manifests/traefik-config.yaml to trigger update
ansible.builtin.file:
path: /var/lib/rancher/k3s/server/manifests/traefik-config.yaml
state: touch
become: true
# ---
- name: setup hard disk
tags: never
ansible.builtin.import_playbook: ./setup/hard_disk.yml
vars:
hard_disk__partitions:
nfs: []
- name: Deploy NFS Subdir External Provisioner and alter default traefik deployment
tags: never
hosts: localhost
tasks:
- name: Deploy NFS Subdir External Provisioner
block:
- name: Add Helm repository for NFS Subdir External Provisioner
kubernetes.core.helm_repository:
name: nfs-subdir-external-provisioner
repo_url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
force_update: yes
- name: Install NFS Subdir External Provisioner using Helm
# debug:
# var: hard_disk__nfs
kubernetes.core.helm:
name: nfs-subdir-external-provisioner
chart_ref: nfs-subdir-external-provisioner/nfs-subdir-external-provisioner
release_namespace: "{{ hard_disk__nfs.ks_namespace }}"
values:
nfs:
server: "{{ hard_disk__nfs.server_ip }}"
path: "{{ hard_disk__nfs.export_directory }}"
vars:
hard_disk__nfs: "{{ hostvars[groups.hard_disk[0]].hard_disk__nfs }}"
- name: redeploy traefik
hosts: localhost
tasks:
- name: delete old traefik deployment
kubernetes.core.k8s:
api_version: v1
name: traefik
kind: Deployment
namespace: kube-system
state: "absent"
- name: delete old deployment job so the k3s helm controller redeploy with our new configuration
kubernetes.core.k8s:
api_version: batch/v1
name: helm-install-traefik
kind: Job
namespace: kube-system
state: "absent"
- name: get traefik deployment
kubernetes.core.k8s_info:
api_version: v1
name: traefik
kind: Deployment
namespace: kube-system
wait: true
register: traefik_deployment
- ansible.builtin.debug:
var: traefik_deployment

View File

@@ -0,0 +1,3 @@
---
- name: setup
ansible.builtin.import_playbook: ./setup/setup.yml

View File

@@ -0,0 +1,140 @@
---
- name: Deploy Gitea Action
hosts: raspberries:&local:!gitea # do not deploy on machine with gitea instance
tasks:
- name: Fetch Gitea Token for Action Runner registration
delegate_to: "{{ groups.gitea[0] }}"
delegate_facts: true
ansible.builtin.command:
docker exec gitea su git -c "gitea actions generate-runner-token"
register: gitea_runner_token_cmd
- name: Deploy Gitea Action Docker Compose configuration
include_role:
name: arcodange.factory.deploy_docker_compose
vars:
dockercompose_content:
name: arcodange_factory_gitea_action
services:
gitea_action:
image: gitea/act_runner:latest
container_name: gitea_action
restart: always
environment:
GITEA_INSTANCE_URL: >-
http://{{ hostvars[groups.gitea[0]].ansible_host }}:3000
GITEA_RUNNER_REGISTRATION_TOKEN: "{{ gitea_runner_token_cmd.stdout }}"
GITEA_RUNNER_NAME: arcodange_global_runner
# GITEA_RUNNER_LABELS: host={{ansible_host}},env=any
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
configs:
config.yaml:
content: |
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: true
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels:
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: "{{ ansible_default_ipv4.address }}"
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
# If the path starts with '/', the '/' will be trimmed.
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: true
# Rebuild docker image(s) even if already present
force_rebuild: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:
- name: Deploy Gitea with Docker Compose
community.docker.docker_compose_v2:
project_src: "/home/pi/arcodange/docker_composes/arcodange_factory_gitea_action"
pull: missing
state: present
register: deploy_result

View File

@@ -35,8 +35,8 @@ flowchart
postgres_service <-.:5432.-> Out
gitea_service <-.:443,80.-> Out
net -. "https://rg-evry.changeip.co:5<u>2</u><i>443</i>" .- net_rules -. :<i>443</i> .-> Out
net -. "http://rg-evry.changeip.co:5<u>2</u>0<i>80</i>" .- net_rules -. :<i>80</i> .-> Out
net -. "https://(*.)arcodange.duckdns.org:5<u>2</u><i>443</i>" .- net_rules -. :<i>443</i> .-> Out
net -. "http://(*.)arcodange.duckdns.org:5<u>2</u>0<i>80</i>" .- net_rules -. :<i>80</i> .-> Out
subgraph scripts
dc><u>docker-compose.yml</u>\ndescribing docker container service\nexposed ports\nand data volume]

View File

@@ -32,20 +32,41 @@
loop_control:
loop_var: mount_point
- name: Setup NFS
include_role:
name: nfs_setup
- name: Set permissions for group docker on /arcodange
ansible.builtin.file:
path: /arcodange
path: "/arcodange/{{ subdir }}"
state: directory
recurse: yes
owner: pi
group: docker
mode: u=rwX,g=rX,o=rX
mode: u=rwX,g=rX,o=r
loop: "{{ [''] + mount_points }}"
loop_control:
loop_var: subdir
- name: Set ACL for group docker on /arcodange
ansible.posix.acl:
path: "/arcodange"
path: "/arcodange/{{ subdir }}"
entity: "docker"
etype: "group"
permissions: "rwx"
recursive: yes
state: present
state: present
loop: "{{ [''] + mount_points }}"
loop_control:
loop_var: subdir
- name: Mount NFS
hosts: raspberries:&local
become: yes
tasks:
- name: Setup NFS
include_role:
name: nfs_setup
tasks_from: mount
vars:
nfs_setup_export_directory: "{{ hard_disk__nfs.export_directory | default(hostvars[groups.hard_disk[0]].hard_disk__nfs.export_directory) }}"
nfs_setup_server_ip: "{{ hard_disk__nfs.server_ip | default(hostvars[groups.hard_disk[0]].ansible_host) }}"

View File

@@ -1,12 +1,12 @@
---
- name: Ensure Docker is running
service:
ansible.builtin.service:
name: docker
state: started
enabled: yes
- name: Create configuration directory
file:
ansible.builtin.file:
path: "{{ config_path }}"
state: directory
owner: "{{ app_owner }}"

View File

@@ -1,8 +1,9 @@
APP_NAME = Arcodange repositories
[server]
PROTOCOL = http
DOMAIN = localhost
HTTP_PORT = 3000
ROOT_URL = http://localhost:3000/
ROOT_URL = https://gitea.arcodange.duckdns.org/
DISABLE_SSH = false
SSH_PORT = 22
START_SSH_SERVER = true
@@ -16,3 +17,18 @@ USER = {{ postgres_user }}
PASSWD = {{ postgres_password }}
SSL_MODE = disable
PATH = data/gitea.db
[service]
DISABLE_REGISTRATION = true
REQUIRE_SIGNIN_VIEW = false
REGISTER_EMAIL_CONFIRM = false
ENABLE_NOTIFY_MAIL = false
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = false
DEFAULT_KEEP_EMAIL_PRIVATE = true
DEFAULT_ALLOW_CREATE_ORGANIZATION = true
DEFAULT_ENABLE_TIMETRACKING = true
NO_REPLY_ADDRESS = noreply.localhost
[mailer]
ENABLED = false

View File

@@ -1,6 +1,6 @@
---
- name: Ensure Docker is running
service:
ansible.builtin.service:
name: docker
state: started
enabled: yes

View File

@@ -0,0 +1,3 @@
---
nfs_setup_export_directory: /arcodange/nfs
# nfs_setup_server_ip: "{{ hostvars['pi2'].ansible_default_ipv4.address }}"

View File

@@ -0,0 +1,23 @@
---
- name: Install Avahi and related packages
ansible.builtin.apt: # https://www.baeldung.com/linux/conflicting-values-error-resolution
name: "{{ item }}"
state: present
update_cache: yes
with_items:
- avahi-daemon
- avahi-utils
- name: Create Avahi service file for NFS
template:
src: nfs.service.j2
dest: /etc/avahi/services/nfs.service
owner: root
group: root
mode: '0644'
- name: Restart Avahi daemon
service:
name: avahi-daemon
state: restarted
enabled: yes

View File

@@ -0,0 +1,39 @@
---
- name: Install NFS server package
ansible.builtin.apt: # https://www.baeldung.com/linux/conflicting-values-error-resolution
name: nfs-kernel-server
state: present
update_cache: yes
- name: Create export directory
ansible.builtin.file:
path: "{{ nfs_setup_export_directory }}"
state: directory
owner: root
group: root
mode: '0755'
- name: Configure /etc/exports
ansible.builtin.lineinfile:
path: /etc/exports
line: "{{ nfs_setup_export_directory }} 192.168.1.0/24(rw,sync,no_subtree_check,anonuid=1000,anongid=1000)"
create: yes
state: present
- name: Ensure NFS service is running and enabled
ansible.builtin.service:
name: nfs-kernel-server
state: started
enabled: yes
- name: Export the shared directories
ansible.builtin.command: exportfs -ra
- name: Verify NFS exports
ansible.builtin.command: exportfs -v
register: nfs_exports
- ansible.builtin.debug:
msg: "NFS Exports: {{ nfs_exports.stdout }}"
- include_tasks: announce.yml

View File

@@ -0,0 +1,23 @@
---
- name: Install NFS client package
ansible.builtin.apt: # https://www.baeldung.com/linux/conflicting-values-error-resolution
name: nfs-common
state: present
update_cache: yes
- name: Create local mount directory
ansible.builtin.file:
path: /mnt/nfs
state: directory
owner: pi
group: docker
mode: '0774'
- name: Mount NFS share
mount:
src: "{{ nfs_setup_server_ip }}:{{ nfs_setup_export_directory }}"
path: /mnt/nfs
fstype: nfs
opts: rw,vers=4
state: mounted
ignore_errors: true

View File

@@ -0,0 +1,9 @@
<?xml version="1.0" standalone='no'?>
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
<service-group>
<name replace-wildcards="yes">%h NFS</name>
<service>
<type>_nfs._tcp</type>
<port>2049</port>
</service>
</service-group>

View File

@@ -30,7 +30,7 @@
register: mount_dir_stat
- name: Create the mount directory
file:
ansible.builtin.file:
path: "/arcodange/{{ mount_point }}"
state: directory
when: not mount_dir_stat.stat.exists
@@ -41,4 +41,4 @@
line: "LABEL={{ mount_point }} /arcodange/{{ mount_point }} ext4 defaults 0 0"
- name: Use updated mount list
command: mount -a
ansible.builtin.command: mount -a

View File

@@ -0,0 +1,7 @@
app_name: "{{ (dockercompose_content | from_yaml).name }}"
app_owner: pi
app_group: docker
partition: docker_composes
hard_disk_root_path: /arcodange
no_hard_disk_root_path: /home/pi/arcodange
root_path: "{{ ('hard_disk' in group_names) | ansible.builtin.ternary(hard_disk_root_path, no_hard_disk_root_path) }}"

View File

@@ -1,14 +1,14 @@
---
- name: Create application directory
file:
path: "/arcodange/{{ partition }}/{{ app_name }}"
ansible.builtin.file:
path: "{{root_path}}/{{ partition }}/{{ app_name }}"
state: directory
owner: "{{ app_owner }}"
group: "{{ app_group }}"
- name: Create data directory
file:
path: "/arcodange/{{ partition }}/{{ app_name }}/data"
ansible.builtin.file:
path: "{{root_path}}/{{ partition }}/{{ app_name }}/data"
state: directory
owner: "{{ app_owner }}"
group: "{{ app_group }}"
@@ -16,8 +16,8 @@
ignore_errors: true # app container might have set its own permissions on previous run
- name: Create scripts directory
file:
path: "/arcodange/{{ partition }}/{{ app_name }}/scripts"
ansible.builtin.file:
path: "{{root_path}}/{{ partition }}/{{ app_name }}/scripts"
state: directory
owner: "{{ app_owner }}"
group: "{{ app_group }}"
@@ -26,18 +26,19 @@
- name: Write docker-compose.yml
copy:
content: "{{ dockercompose_content | to_nice_yaml }}"
dest: "/arcodange/{{ partition }}/{{ app_name }}/docker-compose.yml"
dest: "{{root_path}}/{{ partition }}/{{ app_name }}/docker-compose.yml"
owner: "{{ app_owner }}"
group: "{{ app_group }}"
mode: '0644'
validate: 'docker compose -f %s config'
- name: Write docker-compose script
copy:
content: |
#!/bin/bash
set -ex
docker compose -f /arcodange/{{ partition }}/{{ app_name }}/docker-compose.yml "$@"
dest: "/arcodange/{{ partition }}/{{ app_name }}/scripts/docker-compose"
docker compose -f {{root_path}}/{{ partition }}/{{ app_name }}/docker-compose.yml "$@"
dest: "{{root_path}}/{{ partition }}/{{ app_name }}/scripts/docker-compose"
owner: "{{ app_owner }}"
group: "{{ app_group }}"
mode: '0755'

10
ansible/requirements.yml Normal file
View File

@@ -0,0 +1,10 @@
---
roles:
- name: geerlingguy.docker
collections:
- name: community.general
- name: community.docker
- name: ansible.posix
- name: kubernetes.core
- name: git+https://github.com/k3s-io/k3s-ansible.git

55
doc/adr/00_dns_os_etc.md Normal file
View File

@@ -0,0 +1,55 @@
# [Bases](./README.md)
## DNS, OS, ...
```mermaid
%%{init: { 'logLevel': 'debug', 'theme': 'base' } }%%
flowchart TD
subgraph Internet
direction BT
pirates[pirates 🏴‍☠️]
robots[robots 🤖]
prospects[prospects 🕴️]
clients[clients 🧑‍💼]
world[le monde 🌍]
subgraph changeip.com
subdomain
end
subgraph home[🏠]
direction TB
subgraph livebox
ipPublique
dns
firewall
end
subgraph PC
end
subgraph pi1[pi1.home]
end
subgraph pi2[pi2.home]
end
dns -- 192.168.1.201 --->pi1
dns -- 192.168.1.202 --->pi2
end
subdomain -- (*.)arcodange.duckdns.org --> ipPublique
robots -.-> firewall
pirates -.-> firewall
prospects -.-> firewall
clients -.-> firewall
world -.-> firewall
end
```
### Mise en place
- Utiliser une image Raspbian à jour avec le SSID du réseau et son mot de passe préconfiguré.
- Se connecter à la console d'administration du réseau local http://livebox.home et affecter les adresses statiques.
- Donner accès VNC+SSH dans les raspberry-pis
- Enregistrer sa clé SSH puis désactiver la connection avec mot de pase

10
doc/adr/01_docker_k3s.md Normal file
View File

@@ -0,0 +1,10 @@
# [Bases](./README.md)
## Docker & K3S
- [k3s-ansible](https://github.com/k3s-io/k3s-ansible/)
- [`ansible-playbook -i ansible/arcodange/factory/inventory/ ansible/arcodange/factory/playbooks/02_system.yml --tags='all,never'`](/ansible/arcodange/factory/playbooks/02_system.yml)
- scp pi1:/home/pi/.kube/config ~/.kube/config
- sed -i '' 's/127.0.0.1/pi1/g' ~/.kube/config # note the space+'' after -i for Mac
- sometime `ssh pi1 'sudo touch /var/lib/rancher/k3s/server/manifests/traefik-config.yaml'` is required to reload traefik

View File

@@ -0,0 +1,77 @@
# [Bases](./README.md)
## Main Components
### Setup Volume, NFS, K8S Provisioner
- [`ansible-playbook -i ansible/arcodange/factory/inventory/ ansible/arcodange/factory/playbooks/02_setup.yml --tags='all,never'`](/ansible/arcodange/factory/playbooks/02_setup.yml)
> [!IMPORTANT]
> Une partie du playbook est invoqué par le précédent [playbook system](/ansible/arcodange/factory/playbooks/01_system.yml) responsasble du Provisioner K8S.
> Le NFS est également requis pour le Persistent Volume Claim (PVC) utiliser pour les certificats SSL de Traefik.
### Setup Postgres
Le container possède sa propre partition et ne tourne pas dans le cluster K3S (pour plus de simplicité).
>[!NOTE]
>Le role [`deploy_postgresql`](/ansible/arcodange/factory/playbooks/setup/roles/deploy_postgresql/tasks/create_db_and_user.yml) du playbook `setup` utilise la variable `applications_databases`: une liste de { db_name: str; db_user: str; db_password: str} à créer.
### Setup Gitea
Le container possède sa propre partition et ne tourne pas dans le cluster K3S (pour plus de simplicité). On déploiera un gitea action runner sur d'autres machines pour que l'instance principale gitea gèrent mieux les cas de famine de ressources potentiellement causés par ce dernier.
#### Setup mail alert
TODO
>[!WARNING]
>le template [app.ini.j2](/ansible/arcodange/factory/playbooks/setup/roles/deploy_gitea/templates/app.ini.j2) n'est pas utilisé. (voir [inventaire](/ansible/arcodange/factory/inventory/group_vars/hard_disk/gitea.yml))
> il faudrait peut-être utiliser [community.general.ini_file](https://docs.ansible.com/ansible/latest/collections/community/general/ini_file_module.html)
## Annexes
### Ordre d'execution des playbooks
```mermaid
%%{init: { 'logLevel': 'debug', 'theme': 'dark' } }%%
timeline
title ordre d'execution des playbooks
section Setup DNS, OS, ...
configuration manuelle
: installer OS, réserver IP statique, configurer SSH,VNC
: formater et créer des partitions avec gparted
section Docker & K3S
system
: install Docker
: install K3S working with docker
: configure Traefik
section Volume, NFS
setup hard_disk
: monter les partitions
: installer NFS
system
: déployer provisionner NFS
section postgres
setup
: postgres
section gitea
setup
: gitea
section gitea action runner
setup
: gitea action runner
section argo cd
argo_cd
: argo cd
section hello world app
setup git repository
: terraform
setup CI
deploy
: dev : list exposed deployments with label and port as a landpage
: expose (as ngrock ? direct ? port ? )
```

78
doc/adr/README.md Normal file
View File

@@ -0,0 +1,78 @@
# Bases
## Fondamentaux
- [x] [Setup DNS, OS, ...](./00_dns_os_etc.md)
- [x] [docker & k3s](./01_docker_k3s.md)
- [x] [main components](./02_main_components.md)
- [x] setup volume, nfs, k8s provisioner
- [x] setup postgres
- [x] setup gitea
- [ ] setup mail alert
- [ ] setup gitea runners, Argo CD
- [ ] setup hello world web app
> [!NOTE]
> Reference: [Arcodange _**Factory**_ Ansible Collection](/ansible/arcodange/factory/README.md)
> [!IMPORTANT]
> [duckdns.org](https://duckdns.org/) offre un sous domaine de duckdns.org gratuitement ainsi que des (sous-)sous domaines. Avec notre token on peut valider le challenge DNS de letsencrypt (pour du SSL gratuit) et configurer un cron job pour mettre à jour l'ip publique référencée par notre sous-domaine.
```mermaid
%%{init: { 'logLevel': 'debug', 'theme': 'base' } }%%
flowchart
net[Internet]
subgraph "Local Network (livebox)"
net_rules{network rules}
subgraph pi1
subgraph Docker_pi1[Docker]
%%subgraph Traefik
%% :web80
%% :websecure443
%%end
gitea_action_runner
end
subgraph k3s_server[K3S Master]
%% Traefik -.-> K3S_Traefik
subgraph K3S_Traefik
subdomain_git[/git.arcodange.duckdns.org/]
end
Pods[hello world web app]
ArgoCD
end
end
subgraph pi2
subgraph Docker_pi2[Docker]
subgraph gitea_network
subgraph postgres_service
end
subgraph gitea_service
end
end
end
subgraph HardDisk[Hard Disk]
pg_data[pg_data partition]
gitea_data[gitea_data partition]
nfs_data[NFS_data partition]
K3S_storage_provisioner --o nfs_data
end
subgraph k3s_agent[K3S Agent]
Pods'[hello world web app]
end
end
end
postgres_service --o pg_data
gitea_service --o gitea_data
net -. "http(s)://(*.)arcodange.duckdns.org" .- net_rules -. :<i>443/80</i> .-> K3S_Traefik
%% Traefik
subdomain_git -.-> gitea_service
classDef k3s fill:#fff,stroke:#f0f,stroke-width:1px;
classDef storage fill:#999,stroke:#000,stroke-width:2px;
class HardDisk storage;
class k3s_server,k3s_agent,K3S_storage_provisioner k3s;
```

View File

@@ -1,97 +0,0 @@
api:
dashboard: true
insecure: false
providers:
file:
filename: /etc/traefik/traefik.yml
certificatesResolvers:
myresolver:
acme:
email: arcodage@gmail.com
storage: acme.json
tlsChallenge: {}
entryPoints:
web:
address: ":80"
websecure:
address: ":443"
gitea:
address: ":60000"
http:
services:
gitea:
loadBalancer:
servers:
- url: "http://gitea.home"
routers:
acme-challenge:
rule: Host(`rg-evry.changeip.co`) && PathPrefix(`/.well-known/acme-challenge`)
service: api@internal
tls:
certResolver: myresolver
entryPoints:
- websecure
- web
main:
rule: Host(`rg-evry.changeip.co`) && ClientIP(`90.16.102.250`)
service: gitea
tls:
certResolver: myresolver
entrypoints:
- websecure
- web
middlewares:
- localIp
- redirectToGitea
- resetPath
dashboard:
rule: Host(`traefik.home`) && (Path(`/`) || PathPrefix(`/api`) || PathPrefix(`/dashboard`))
service: api@internal
middlewares:
- redirectToDashboard
gitea:
rule: Host(`rg-evry.changeip.co`) && ClientIP(`90.16.102.250`)
service: gitea
tls:
certResolver: myresolver
entryPoints:
- gitea
middlewares:
- localIp
middlewares:
localIp:
ipAllowList:
sourceRange:
- "192.168.1.0/24"
- "90.16.102.250/32"
redirectToDashboard:
replacePathRegex:
regex: ^(http:\/\/(\[[\w:.]+\]|[\w\._-]+)(:\d+)?)\/$
replacement: ${1}/dashboard/
resetPath:
replacePath:
path: "/"
redirectToGitea:
redirectScheme:
scheme: https
port: 60000
log:
level: TRACE
accesslog: {}