k3s setup and git action runner

This commit is contained in:
2024-08-12 21:45:16 +02:00
parent 3cfbc59f50
commit cb4d679d8b
29 changed files with 888 additions and 132 deletions

View File

@@ -1,10 +1,11 @@
# docker build -f ansible/Dockerfile -t arcodange-ansible:0.0.0 ansible/
FROM python:slim
RUN apt update && apt install openssh-client socat gosu -y
RUN apt update && apt install openssh-client socat gosu git -y
COPY nonroot_ssh_proxy_setup.sh /usr/local/bin/nonroot_ssh_proxy_setup.sh
COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
COPY requirements.yml /tmp/requirements.yml
RUN chmod +x /usr/local/bin/*.sh
ENV SSH_AUTH_SOCK=/home/arcodange/.ssh/socket
@@ -13,10 +14,11 @@ USER 1000
WORKDIR /home/arcodange/code
ENV PATH=/home/arcodange/.local/bin:/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
RUN pip install ansible-core jmespath
RUN pip install ansible-core jmespath kubernetes dnspython
ENV GALAXY_SERVER=https://beta-galaxy.ansible.com/api/
RUN ansible-galaxy collection install --token 11bebd8fd1ad4009f700bdedbeb80b19743ce3d3 \
community.general community.docker ansible.posix
-r /tmp/requirements.yml
# community.general community.docker ansible.posix kubernetes.core
ENV ANSIBLE_HOST_KEY_CHECKING=False
ENV ANSIBLE_FORCE_COLOR=True=True

View File

@@ -19,7 +19,7 @@ git clone -q --depth 1 --branch master https://github.com/arcodange/ssh-agent.gi
docker run -d --name=ssh-agent docker-ssh-agent:latest
docker run --rm --volumes-from=ssh-agent -v ~/.ssh:/.ssh -it docker-ssh-agent:latest ssh-add /root/.ssh/id_rsa
docker run --rm -u root --name test --volumes-from=ssh-agent -v $PWD:/home/arcodange/code arcodange-ansible:0.0.0 \
ansible-playbook ansible/arcodange/factory/playbooks/setup/setup.yml -i ansible/arcodange/factory/inventory -vv
ansible-playbook ansible/arcodange/factory/playbooks/setup/01_system.yml -i ansible/arcodange/factory/inventory -vv
```
### a tool to reuse a ssh agent (not required)
@@ -43,4 +43,9 @@ ssh-add ~/.ssh/id_rsa
```sh
ansible -i ,localhost -c local localhost -m raw -a "echo hello world {{ inventory_hostname }} : {{ hostvars | to_nice_json | regex_replace(\"['\n]\",' ') }}"
```
```
### local python environment with pipx
#### add dependency
- `pipx runpip ansible-core install dnspython`

View File

@@ -1,3 +1,50 @@
# Ansible Collection - arcodange.factory
Documentation for the collection.
```sh
MY_TOKEN= #<my token (see https://www.duckdns.org/domains)>
kubectl create secret generic traefik-duckdns-token --from-literal="DUCKDNS_TOKEN=$MY_TOKEN" -n kube-system
```
```mermaid
%%{init: { 'logLevel': 'debug', 'theme': 'dark' } }%%
timeline
title ordre des playbook
section Setup DNS, OS, ...
configuration manuelle
: installer OS, réserver IP statique, configurer SSH,VNC
: formater et créer des partitions avec gparted
section Docker & K3S
system
: install Docker
: install K3S working with docker
: configure Traefik
section Volume, NFS
setup hard_disk
: monter les partitions
: installer NFS
system
: déployer provisionner NFS
section postgres
setup
: postgres
section gitea
setup
: gitea
section gitea action runner
setup
: gitea action runner
section argo cd
argo_cd
: argo cd
section hello world app
setup git repository
: terraform
setup CI
deploy
: dev : list exposed deployments with label and port as a landpage
: expose (as ngrock ? direct ? port ? )
```

View File

@@ -1,5 +1,6 @@
# to add/mount a partitiion, use the gparted utility to create it beforehand witht the matching name/label
hard_disk__partitions:
nfs: []
gitea_data:
- gitea
pg_data:
@@ -10,4 +11,9 @@ hard_disk__applications:
gitea: "{{ gitea }}"
hard_disk__postgres_databases:
gitea: "{{ gitea_database }}"
gitea: "{{ gitea_database }}"
hard_disk__nfs:
server_ip: "{{ ansible_host }}"
ks_namespace: kube-system
export_directory: /arcodange/nfs

View File

@@ -35,7 +35,7 @@ gitea:
networks:
- gitea
ports:
- "80:3000"
- "3000:3000"
- "2222:22"
volumes:
- /arcodange/{{gitea_partition}}/gitea/data:/data

View File

@@ -6,10 +6,10 @@ raspberries:
ansible_host: pi2.home
internetPi1:
ansible_host: rg-evry.changeip.co
ansible_host: arcodange.duckdns.org
ansible_port: 51022
internetPi2:
ansible_host: rg-evry.changeip.co
ansible_host: arcodange.duckdns.org
ansible_port: 52022
vars:
@@ -17,12 +17,15 @@ raspberries:
local:
hosts:
localhost:
ansible_connection: local
ansible_python_interpreter: /Users/gabrielradureau/.local/pipx/venvs/ansible-core/bin/python
pi1:
pi2:
hard_disk:
hosts:
pi2 # 4To toshiba external hard drive (/dev/sda)
pi2: # 4To toshiba external hard drive (/dev/sda)
postgres:
children:

View File

@@ -0,0 +1,275 @@
---
- name: System Docker
hosts: raspberries:&local
gather_facts: yes
tags: never
become: yes
pre_tasks:
- name: set hostname
ansible.builtin.hostname:
name: "{{ inventory_hostname }}"
become: yes
when: inventory_hostname != ansible_hostname
- name: Install role geerlingguy.docker
community.general.ansible_galaxy_install:
type: role
name: geerlingguy.docker
run_once: true
delegate_to: localhost
become: false
- ansible.builtin.debug:
var: ansible_facts.machine
tasks:
- include_role:
name: geerlingguy.docker
post_tasks:
- name: adding existing user '{{ ansible_user }}' to group docker
user:
name: '{{ ansible_user }}'
groups: docker
append: yes
become: yes
#---
- name: System K3S
hosts: raspberries:&local
tags: never
tasks:
- name: prepare inventory for k3s external playbook
tags: always
ansible.builtin.add_host:
hostname: "{{ item }}"
groups:
- k3s_cluster
- "{{ ansible_loop.first | ternary('server', 'agent') }}"
loop: "{{ groups.raspberries | intersect(groups.local) | sort }}"
loop_control:
extended: true
extended_allitems: false
- name: Install collection k3s.orchestration
local_action:
module: community.general.ansible_galaxy_install
type: collection
name: git+https://github.com/k3s-io/k3s-ansible
run_once: true
- name: k3s
tags: never
ansible.builtin.import_playbook: k3s.orchestration.site
# ansible.builtin.import_playbook: k3s.orchestration.reset
vars:
k3s_version: v1.30.3+k3s1
token: changeme!
extra_server_args: "--docker"
extra_agent_args: "--docker"
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
- name: how to reach k3s
hosts: server
tasks:
- name: copy /etc/rancher/k3s/k3s.yaml to ~/.kube/config from the k3s server and replace 127.0.0.1 with the server ip or hostname
run_once: true
block:
- ansible.builtin.fetch:
src: /etc/rancher/k3s/k3s.yaml
dest: ~/.kube/config
flat: true
become: true
run_once: true
- local_action:
module: ansible.builtin.replace
path: ~/.kube/config
regexp: 'server: https://127.0.0.1:6443'
replace: 'server: https://{{ ansible_default_ipv4.address }}:6443'
- name: customize k3s traefik configuration https://docs.k3s.io/helm
block:
- name: Get my public IP
community.general.ipify_facts:
- become: true
ansible.builtin.copy:
dest: /var/lib/rancher/k3s/server/manifests/traefik-config.yaml
content: |-
apiVersion: v1
data:
dynamic.yaml: |-
{{ traefik_config_yaml | to_nice_yaml | indent( width=4 ) }}
kind: ConfigMap
metadata:
name: traefik-configmap
namespace: kube-system
---
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: traefik
namespace: kube-system
spec:
valuesContent: |-
{{ traefik_helm_values | to_nice_yaml | indent( width=4 ) }}
vars:
traefik_config_yaml:
http:
services:
gitea:
loadBalancer:
servers:
- url: "http://{{ lookup('dig', groups.gitea[0]) }}:3000"
routers:
acme-challenge:
rule: Host(`arcodange.duckdns.org`) && PathPrefix(`/.well-known/acme-challenge`)
service: acme-http@internal
tls: &tls_opts
certResolver: letsencrypt
domains:
- main: "arcodange.duckdns.org"
sans:
- "*.arcodange.duckdns.org"
entryPoints:
- websecure
- web
gitea:
rule: Host(`gitea.arcodange.duckdns.org`)
service: gitea
middlewares:
- localIp
tls:
<<: *tls_opts
entrypoints:
- websecure
middlewares:
localIp:
ipWhiteList:
sourceRange:
- "192.168.1.0/24"
- "{{ ipify_public_ip }}/32"
traefik_helm_values: # https://github.com/traefik/traefik-helm-chart/blob/v25.0.0/traefik/values.yaml <- for v25 (`kubectl describe deployments.apps traefik -n kube-system | grep helm.sh/chart`)
service:
spec:
externalTrafficPolicy: Local
ports:
traefik:
expose: true
globalArguments: [] # deactivate --global.sendanonymoususage
logs:
general:
level: TRACE
# format: json
access:
enabled: true
# format: json
persistence:
# -- Enable persistence using Persistent Volume Claims
# ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
# It can be used to store TLS certificates, see `storage` in certResolvers
enabled: true
name: data
# existingClaim: ""
accessMode: ReadWriteOnce
size: 128Mi
storageClass: "nfs-client"
# volumeName: ""
path: /data
annotations: {}
volumes:
- name: traefik-configmap
mountPath: /config
type: configMap
additionalArguments:
- '--providers.file.filename=/config/dynamic.yaml'
certResolvers:
letsencrypt:
# for challenge options cf. https://doc.traefik.io/traefik/https/acme/
email: arcodange@gmail.com
tlsChallenge: true
dnsChallenge:
# requires env variable DUCKDNS_TOKEN
provider: duckdns
httpChallenge:
entryPoint: "web"
# It has to match the path with a persistent volume
storage: /data/acme.json
envFrom:
- secretRef:
name: traefik-duckdns-token
# MY_TOKEN=<my token (see https://www.duckdns.org/domains)>
# kubectl create secret generic traefik-duckdns-token --from-literal="DUCKDNS_TOKEN=$MY_TOKEN" -n kube-system
- name: touch manifests/traefik-config.yaml to trigger update
ansible.builtin.file:
path: /var/lib/rancher/k3s/server/manifests/traefik-config.yaml
state: touch
become: true
# ---
- name: setup hard disk
tags: never
ansible.builtin.import_playbook: ./setup/hard_disk.yml
vars:
hard_disk__partitions:
nfs: []
- name: Deploy NFS Subdir External Provisioner and alter default traefik deployment
tags: never
hosts: localhost
tasks:
- name: Deploy NFS Subdir External Provisioner
block:
- name: Add Helm repository for NFS Subdir External Provisioner
kubernetes.core.helm_repository:
name: nfs-subdir-external-provisioner
repo_url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
force_update: yes
- name: Install NFS Subdir External Provisioner using Helm
# debug:
# var: hard_disk__nfs
kubernetes.core.helm:
name: nfs-subdir-external-provisioner
chart_ref: nfs-subdir-external-provisioner/nfs-subdir-external-provisioner
release_namespace: "{{ hard_disk__nfs.ks_namespace }}"
values:
nfs:
server: "{{ hard_disk__nfs.server_ip }}"
path: "{{ hard_disk__nfs.export_directory }}"
vars:
hard_disk__nfs: "{{ hostvars[groups.hard_disk[0]].hard_disk__nfs }}"
- name: redeploy traefik
hosts: localhost
tasks:
- name: delete old traefik deployment
kubernetes.core.k8s:
api_version: v1
name: traefik
kind: Deployment
namespace: kube-system
state: "absent"
- name: delete old deployment job so the k3s helm controller redeploy with our new configuration
kubernetes.core.k8s:
api_version: batch/v1
name: helm-install-traefik
kind: Job
namespace: kube-system
state: "absent"
- name: get traefik deployment
kubernetes.core.k8s_info:
api_version: v1
name: traefik
kind: Deployment
namespace: kube-system
wait: true
register: traefik_deployment
- ansible.builtin.debug:
var: traefik_deployment

View File

@@ -0,0 +1,3 @@
---
- name: setup
ansible.builtin.import_playbook: ./setup/setup.yml

View File

@@ -0,0 +1,140 @@
---
- name: Deploy Gitea Action
hosts: raspberries:&local:!gitea # do not deploy on machine with gitea instance
tasks:
- name: Fetch Gitea Token for Action Runner registration
delegate_to: "{{ groups.gitea[0] }}"
delegate_facts: true
ansible.builtin.command:
docker exec gitea su git -c "gitea actions generate-runner-token"
register: gitea_runner_token_cmd
- name: Deploy Gitea Action Docker Compose configuration
include_role:
name: arcodange.factory.deploy_docker_compose
vars:
dockercompose_content:
name: arcodange_factory_gitea_action
services:
gitea_action:
image: gitea/act_runner:latest
container_name: gitea_action
restart: always
environment:
GITEA_INSTANCE_URL: >-
http://{{ hostvars[groups.gitea[0]].ansible_host }}:3000
GITEA_RUNNER_REGISTRATION_TOKEN: "{{ gitea_runner_token_cmd.stdout }}"
GITEA_RUNNER_NAME: arcodange_global_runner
# GITEA_RUNNER_LABELS: host={{ansible_host}},env=any
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
configs:
config.yaml:
content: |
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: true
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels:
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: "{{ ansible_default_ipv4.address }}"
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
# If the path starts with '/', the '/' will be trimmed.
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: true
# Rebuild docker image(s) even if already present
force_rebuild: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:
- name: Deploy Gitea with Docker Compose
community.docker.docker_compose_v2:
project_src: "/home/pi/arcodange/docker_composes/arcodange_factory_gitea_action"
pull: missing
state: present
register: deploy_result

View File

@@ -35,8 +35,8 @@ flowchart
postgres_service <-.:5432.-> Out
gitea_service <-.:443,80.-> Out
net -. "https://rg-evry.changeip.co:5<u>2</u><i>443</i>" .- net_rules -. :<i>443</i> .-> Out
net -. "http://rg-evry.changeip.co:5<u>2</u>0<i>80</i>" .- net_rules -. :<i>80</i> .-> Out
net -. "https://(*.)arcodange.duckdns.org:5<u>2</u><i>443</i>" .- net_rules -. :<i>443</i> .-> Out
net -. "http://(*.)arcodange.duckdns.org:5<u>2</u>0<i>80</i>" .- net_rules -. :<i>80</i> .-> Out
subgraph scripts
dc><u>docker-compose.yml</u>\ndescribing docker container service\nexposed ports\nand data volume]

View File

@@ -32,20 +32,41 @@
loop_control:
loop_var: mount_point
- name: Setup NFS
include_role:
name: nfs_setup
- name: Set permissions for group docker on /arcodange
ansible.builtin.file:
path: /arcodange
path: "/arcodange/{{ subdir }}"
state: directory
recurse: yes
owner: pi
group: docker
mode: u=rwX,g=rX,o=rX
mode: u=rwX,g=rX,o=r
loop: "{{ [''] + mount_points }}"
loop_control:
loop_var: subdir
- name: Set ACL for group docker on /arcodange
ansible.posix.acl:
path: "/arcodange"
path: "/arcodange/{{ subdir }}"
entity: "docker"
etype: "group"
permissions: "rwx"
recursive: yes
state: present
state: present
loop: "{{ [''] + mount_points }}"
loop_control:
loop_var: subdir
- name: Mount NFS
hosts: raspberries:&local
become: yes
tasks:
- name: Setup NFS
include_role:
name: nfs_setup
tasks_from: mount
vars:
nfs_setup_export_directory: "{{ hard_disk__nfs.export_directory | default(hostvars[groups.hard_disk[0]].hard_disk__nfs.export_directory) }}"
nfs_setup_server_ip: "{{ hard_disk__nfs.server_ip | default(hostvars[groups.hard_disk[0]].ansible_host) }}"

View File

@@ -1,12 +1,12 @@
---
- name: Ensure Docker is running
service:
ansible.builtin.service:
name: docker
state: started
enabled: yes
- name: Create configuration directory
file:
ansible.builtin.file:
path: "{{ config_path }}"
state: directory
owner: "{{ app_owner }}"

View File

@@ -1,8 +1,9 @@
APP_NAME = Arcodange repositories
[server]
PROTOCOL = http
DOMAIN = localhost
HTTP_PORT = 3000
ROOT_URL = http://localhost:3000/
ROOT_URL = https://gitea.arcodange.duckdns.org/
DISABLE_SSH = false
SSH_PORT = 22
START_SSH_SERVER = true
@@ -16,3 +17,18 @@ USER = {{ postgres_user }}
PASSWD = {{ postgres_password }}
SSL_MODE = disable
PATH = data/gitea.db
[service]
DISABLE_REGISTRATION = true
REQUIRE_SIGNIN_VIEW = false
REGISTER_EMAIL_CONFIRM = false
ENABLE_NOTIFY_MAIL = false
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = false
DEFAULT_KEEP_EMAIL_PRIVATE = true
DEFAULT_ALLOW_CREATE_ORGANIZATION = true
DEFAULT_ENABLE_TIMETRACKING = true
NO_REPLY_ADDRESS = noreply.localhost
[mailer]
ENABLED = false

View File

@@ -1,6 +1,6 @@
---
- name: Ensure Docker is running
service:
ansible.builtin.service:
name: docker
state: started
enabled: yes

View File

@@ -0,0 +1,3 @@
---
nfs_setup_export_directory: /arcodange/nfs
# nfs_setup_server_ip: "{{ hostvars['pi2'].ansible_default_ipv4.address }}"

View File

@@ -0,0 +1,23 @@
---
- name: Install Avahi and related packages
ansible.builtin.apt: # https://www.baeldung.com/linux/conflicting-values-error-resolution
name: "{{ item }}"
state: present
update_cache: yes
with_items:
- avahi-daemon
- avahi-utils
- name: Create Avahi service file for NFS
template:
src: nfs.service.j2
dest: /etc/avahi/services/nfs.service
owner: root
group: root
mode: '0644'
- name: Restart Avahi daemon
service:
name: avahi-daemon
state: restarted
enabled: yes

View File

@@ -0,0 +1,39 @@
---
- name: Install NFS server package
ansible.builtin.apt: # https://www.baeldung.com/linux/conflicting-values-error-resolution
name: nfs-kernel-server
state: present
update_cache: yes
- name: Create export directory
ansible.builtin.file:
path: "{{ nfs_setup_export_directory }}"
state: directory
owner: root
group: root
mode: '0755'
- name: Configure /etc/exports
ansible.builtin.lineinfile:
path: /etc/exports
line: "{{ nfs_setup_export_directory }} 192.168.1.0/24(rw,sync,no_subtree_check,anonuid=1000,anongid=1000)"
create: yes
state: present
- name: Ensure NFS service is running and enabled
ansible.builtin.service:
name: nfs-kernel-server
state: started
enabled: yes
- name: Export the shared directories
ansible.builtin.command: exportfs -ra
- name: Verify NFS exports
ansible.builtin.command: exportfs -v
register: nfs_exports
- ansible.builtin.debug:
msg: "NFS Exports: {{ nfs_exports.stdout }}"
- include_tasks: announce.yml

View File

@@ -0,0 +1,23 @@
---
- name: Install NFS client package
ansible.builtin.apt: # https://www.baeldung.com/linux/conflicting-values-error-resolution
name: nfs-common
state: present
update_cache: yes
- name: Create local mount directory
ansible.builtin.file:
path: /mnt/nfs
state: directory
owner: pi
group: docker
mode: '0774'
- name: Mount NFS share
mount:
src: "{{ nfs_setup_server_ip }}:{{ nfs_setup_export_directory }}"
path: /mnt/nfs
fstype: nfs
opts: rw,vers=4
state: mounted
ignore_errors: true

View File

@@ -0,0 +1,9 @@
<?xml version="1.0" standalone='no'?>
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
<service-group>
<name replace-wildcards="yes">%h NFS</name>
<service>
<type>_nfs._tcp</type>
<port>2049</port>
</service>
</service-group>

View File

@@ -30,7 +30,7 @@
register: mount_dir_stat
- name: Create the mount directory
file:
ansible.builtin.file:
path: "/arcodange/{{ mount_point }}"
state: directory
when: not mount_dir_stat.stat.exists
@@ -41,4 +41,4 @@
line: "LABEL={{ mount_point }} /arcodange/{{ mount_point }} ext4 defaults 0 0"
- name: Use updated mount list
command: mount -a
ansible.builtin.command: mount -a

View File

@@ -0,0 +1,7 @@
app_name: "{{ (dockercompose_content | from_yaml).name }}"
app_owner: pi
app_group: docker
partition: docker_composes
hard_disk_root_path: /arcodange
no_hard_disk_root_path: /home/pi/arcodange
root_path: "{{ ('hard_disk' in group_names) | ansible.builtin.ternary(hard_disk_root_path, no_hard_disk_root_path) }}"

View File

@@ -1,14 +1,14 @@
---
- name: Create application directory
file:
path: "/arcodange/{{ partition }}/{{ app_name }}"
ansible.builtin.file:
path: "{{root_path}}/{{ partition }}/{{ app_name }}"
state: directory
owner: "{{ app_owner }}"
group: "{{ app_group }}"
- name: Create data directory
file:
path: "/arcodange/{{ partition }}/{{ app_name }}/data"
ansible.builtin.file:
path: "{{root_path}}/{{ partition }}/{{ app_name }}/data"
state: directory
owner: "{{ app_owner }}"
group: "{{ app_group }}"
@@ -16,8 +16,8 @@
ignore_errors: true # app container might have set its own permissions on previous run
- name: Create scripts directory
file:
path: "/arcodange/{{ partition }}/{{ app_name }}/scripts"
ansible.builtin.file:
path: "{{root_path}}/{{ partition }}/{{ app_name }}/scripts"
state: directory
owner: "{{ app_owner }}"
group: "{{ app_group }}"
@@ -26,18 +26,19 @@
- name: Write docker-compose.yml
copy:
content: "{{ dockercompose_content | to_nice_yaml }}"
dest: "/arcodange/{{ partition }}/{{ app_name }}/docker-compose.yml"
dest: "{{root_path}}/{{ partition }}/{{ app_name }}/docker-compose.yml"
owner: "{{ app_owner }}"
group: "{{ app_group }}"
mode: '0644'
validate: 'docker compose -f %s config'
- name: Write docker-compose script
copy:
content: |
#!/bin/bash
set -ex
docker compose -f /arcodange/{{ partition }}/{{ app_name }}/docker-compose.yml "$@"
dest: "/arcodange/{{ partition }}/{{ app_name }}/scripts/docker-compose"
docker compose -f {{root_path}}/{{ partition }}/{{ app_name }}/docker-compose.yml "$@"
dest: "{{root_path}}/{{ partition }}/{{ app_name }}/scripts/docker-compose"
owner: "{{ app_owner }}"
group: "{{ app_group }}"
mode: '0755'

10
ansible/requirements.yml Normal file
View File

@@ -0,0 +1,10 @@
---
roles:
- name: geerlingguy.docker
collections:
- name: community.general
- name: community.docker
- name: ansible.posix
- name: kubernetes.core
- name: git+https://github.com/k3s-io/k3s-ansible.git