Compare commits

..

3 Commits

Author SHA1 Message Date
f81a4b0889 --amend 2025-08-05 16:27:19 +02:00
68178b0dad add workflow_dispatch trigger 2025-08-05 15:49:35 +02:00
781a04b26f apply vault config from CI 2024-10-01 15:49:21 +02:00
78 changed files with 77 additions and 5298 deletions

View File

@@ -1,63 +0,0 @@
---
# template source: https://github.com/bretfisher/docker-build-workflow/blob/main/templates/call-docker-build.yaml
name: Crowdsec
on: #[push,pull_request]
workflow_dispatch: {}
push: &crowdsecPaths
paths:
- 'crowdsec/**/*.tf'
pull_request: *crowdsecPaths
# cancel any previously-started, yet still active runs of this workflow on the same branch
concurrency:
group: ${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
.vault_step: &vault_step
name: read vault secret
uses: https://gitea.arcodange.lab/arcodange-org/vault-action.git@main
id: vault-secrets
with:
url: https://vault.arcodange.lab
caCertificate: ${{ secrets.HOMELAB_CA_CERT }}
jwtGiteaOIDC: ${{ needs.gitea_vault_auth.outputs.gitea_vault_jwt }}
role: gitea_cicd_crowdsec
method: jwt
path: gitea_jwt
secrets: |
kvv1/google/credentials credentials | GOOGLE_BACKEND_CREDENTIALS ;
kvv1/gitea/tofu_module_reader ssh_private_key | TERRAFORM_SSH_KEY ;
jobs:
gitea_vault_auth:
name: Auth with gitea for vault
runs-on: ubuntu-latest
outputs:
gitea_vault_jwt: ${{steps.gitea_vault_jwt.outputs.id_token}}
steps:
- name: Auth with gitea for vault
id: gitea_vault_jwt
run: |
echo -n "${{ secrets.vault_oauth__sh_b64 }}" | base64 -d | bash
tofu:
name: Tofu - Crowdsec IAC
needs:
- gitea_vault_auth
runs-on: ubuntu-latest
env:
OPENTOFU_VERSION: 1.8.2
TERRAFORM_VAULT_AUTH_JWT: ${{ needs.gitea_vault_auth.outputs.gitea_vault_jwt }}
VAULT_CACERT: "${{ github.workspace }}/homelab.pem"
steps:
- *vault_step
- uses: actions/checkout@v4
- name: prepare vault self signed cert
run: echo -n "${{ secrets.HOMELAB_CA_CERT }}" | base64 -d > $VAULT_CACERT
- name: terraform apply
uses: dflook/terraform-apply@v1
with:
path: crowdsec/iac
auto_approve: true

View File

@@ -2,7 +2,7 @@
# template source: https://github.com/bretfisher/docker-build-workflow/blob/main/templates/call-docker-build.yaml
name: Helm Charts
on: [push,pull_request,workflow_dispatch]
on: [push,pull_request]
# push: &helmPaths # turns out gitea don't handle well the paths filter
# paths:
# - '*/\.yaml'
@@ -165,7 +165,7 @@ jobs:
chart_package=${chart}-${chart_version}.tgz
# helm package ${chart}
tar -X ${chart}/.helmignore -czf ${chart_package} ${chart}
curl --user ${{ github.actor }}:${{ secrets.PACKAGES_TOKEN }} -X POST --upload-file ./${chart_package} https://gitea.arcodange.lab/api/packages/${{ github.repository_owner }}/helm/api/charts
curl --user ${{ github.actor }}:${{ secrets.PACKAGES_TOKEN }} -X POST --upload-file ./${chart_package} https://gitea.arcodange.duckdns.org/api/packages/${{ github.repository_owner }}/helm/api/charts
application-charts:
<<: *charts-matrix-job

View File

@@ -1,63 +0,0 @@
---
# template source: https://github.com/bretfisher/docker-build-workflow/blob/main/templates/call-docker-build.yaml
name: Plausible
on: #[push,pull_request]
workflow_dispatch: {}
push: &plausiblePaths
paths:
- 'plausible/**/*.tf'
pull_request: *plausiblePaths
# cancel any previously-started, yet still active runs of this workflow on the same branch
concurrency:
group: ${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
.vault_step: &vault_step
name: read vault secret
uses: https://gitea.arcodange.lab/arcodange-org/vault-action.git@main
id: vault-secrets
with:
url: https://vault.arcodange.lab
caCertificate: ${{ secrets.HOMELAB_CA_CERT }}
jwtGiteaOIDC: ${{ needs.gitea_vault_auth.outputs.gitea_vault_jwt }}
role: gitea_cicd_plausible
method: jwt
path: gitea_jwt
secrets: |
kvv1/google/credentials credentials | GOOGLE_BACKEND_CREDENTIALS ;
kvv1/gitea/tofu_module_reader ssh_private_key | TERRAFORM_SSH_KEY ;
jobs:
gitea_vault_auth:
name: Auth with gitea for vault
runs-on: ubuntu-latest
outputs:
gitea_vault_jwt: ${{steps.gitea_vault_jwt.outputs.id_token}}
steps:
- name: Auth with gitea for vault
id: gitea_vault_jwt
run: |
echo -n "${{ secrets.vault_oauth__sh_b64 }}" | base64 -d | bash
tofu:
name: Tofu - plausible IAC
needs:
- gitea_vault_auth
runs-on: ubuntu-latest
env:
OPENTOFU_VERSION: 1.8.2
TERRAFORM_VAULT_AUTH_JWT: ${{ needs.gitea_vault_auth.outputs.gitea_vault_jwt }}
VAULT_CACERT: "${{ github.workspace }}/homelab.pem"
steps:
- *vault_step
- uses: actions/checkout@v4
- name: prepare vault self signed cert
run: echo -n "${{ secrets.HOMELAB_CA_CERT }}" | base64 -d > $VAULT_CACERT
- name: terraform apply
uses: dflook/terraform-apply@v1
with:
path: plausible/iac
auto_approve: true

View File

@@ -2,63 +2,47 @@
# template source: https://github.com/bretfisher/docker-build-workflow/blob/main/templates/call-docker-build.yaml
name: Hashicorp Vault
on: #[push,pull_request]
workflow_dispatch: {}
push: &vaultPaths
paths:
- 'hashicorp-vault/**/*.tf'
pull_request: *vaultPaths
on: [push,pull_request,workflow_dispatch]
# push: &helmPaths # turns out gitea don't handle well the paths filter
# paths:
# - '*/\.yaml'
# - '*/\.tpl'
# - '*/NOTES.txt'
# - '*/\.helmignore'
# pull_request: *helmPaths
# cancel any previously-started, yet still active runs of this workflow on the same branch
concurrency:
group: ${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
.vault_step: &vault_step
name: read vault secret
uses: https://gitea.arcodange.lab/arcodange-org/vault-action.git@main
id: vault-secrets
with:
url: https://vault.arcodange.lab
caCertificate: ${{ secrets.HOMELAB_CA_CERT }}
jwtGiteaOIDC: ${{ needs.gitea_vault_auth.outputs.gitea_vault_jwt }}
role: gitea_cicd
method: jwt
path: gitea_jwt
secrets: |
kvv1/google/credentials credentials | GOOGLE_BACKEND_CREDENTIALS ;
kvv1/gitea/app id | TF_VAR_gitea_app_id ;
kvv1/postgres/credentials_editor/credentials * | TF_VAR_POSTGRES_CREDENTIALS_EDITOR_ ;
jobs:
gitea_vault_auth:
name: Auth with gitea for vault
runs-on: ubuntu-latest
outputs:
gitea_vault_jwt: ${{steps.gitea_vault_jwt.outputs.id_token}}
gitea_vault_jwt: ${{steps.gitea_vault_jwt.outputs.access_token}}
steps:
- uses: actions/checkout@v4
- name: Auth with gitea for vault
id: gitea_vault_jwt
run: |
echo -n "${{ secrets.vault_oauth__sh_b64 }}" | base64 -d | bash
VAULT_AUTH_JWT=`echo -n "${{ secrets.get_gitea_vault_jwt__sh_b64 }}" | base64 -d | sh \
| tee /dev/tty | tail -n 1 | awk '{print $NF}'`
echo "access_token=$VAULT_AUTH_JWT" >> $GITHUB_OUTPUT
tofu:
name: Tofu - Vault IAC
name: Library charts ${{ matrix.chart }}
needs:
- gitea_vault_auth
runs-on: ubuntu-latest
container:
image: ghcr.io/opentofu/opentofu:latest
env:
OPENTOFU_VERSION: 1.8.2
TERRAFORM_VAULT_AUTH_JWT: ${{ needs.gitea_vault_auth.outputs.gitea_vault_jwt }}
VAULT_CACERT: "${{ github.workspace }}/homelab.pem"
TERRAFORM_VAULT_AUTH_JWT: ${{ needs.gitea_vault_auth.outputs.access_token }}
steps:
- *vault_step
- uses: actions/checkout@v4
- name: prepare vault self signed cert
run: echo -n "${{ secrets.HOMELAB_CA_CERT }}" | base64 -d > $VAULT_CACERT
- name: terraform apply
uses: dflook/terraform-apply@v1
with:
path: hashicorp-vault/iac
auto_approve: true
- tofu -chdir=hashicorp-vault/iac init
- tofu -chdir=hashicorp-vault/iac apply -no-color -auto-approve

2
.gitignore vendored
View File

@@ -1,5 +1,5 @@
.DS_Store
Chart.lock
**/charts/
*/charts/*.tgz
.terraform
.terraform.lock.hcl

View File

@@ -0,0 +1 @@
foo

View File

@@ -1,4 +1,4 @@
{{- range $app_name, $app := .Values.tools }}
{{- range $app_name := .Values.tools -}}
---
apiVersion: argoproj.io/v1alpha1
kind: Application
@@ -10,7 +10,7 @@ metadata:
spec:
project: tools
source:
repoURL: https://gitea.arcodange.lab/arcodange-org/tools
repoURL: https://gitea.arcodange.duckdns.org/arcodange-org/tools
targetRevision: HEAD
path: {{ $app_name }}
destination:
@@ -22,4 +22,4 @@ spec:
selfHeal: true
syncOptions:
- CreateNamespace=true
{{- end }}
{{ end }}

View File

@@ -10,7 +10,7 @@ metadata:
spec:
description: Arcodange tools (monitoring, cache, connection pool, secret management...)
sourceRepos:
- 'https://gitea.arcodange.lab/arcodange-org/tools'
- 'https://gitea.arcodange.duckdns.org/arcodange-org/tools'
# Only permit applications to deploy to the tools namespace in the same cluster
destinations:
- namespace: tools
@@ -23,5 +23,3 @@ spec:
kind: ClusterRole
- group: '*'
kind: MutatingWebhookConfiguration
- group: 'apiextensions.k8s.io'
kind: CustomResourceDefinition

View File

@@ -1,11 +1,5 @@
tools:
pgbouncer: {}
#pgcat # trop contraignant: lister tous les databases/users et auth_type md5 uniquement: {}
# prometheus: {}
hashicorp-vault: {}
crowdsec: {}
redis: {}
clickhouse: {}
grafana: {}
plausible: {}
prometheus: {}
- pgbouncer
#- pgcat # trop contraignant: lister tous les databases/users et auth_type md5 uniquement
# - prometheus
- hashicorp-vault

View File

@@ -1,2 +0,0 @@
charts/
!charts/databases/

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,24 +0,0 @@
apiVersion: v2
name: clickhouse databases
description: declare clickhouse databases
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "24.12.6.70-alpine"

View File

@@ -1,85 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: clickhouse-db-init
labels:
app.kubernetes.io/name: clickhouse-db-init
app.kubernetes.io/instance: {{ .Release.Name }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/init-sql-configmap.yaml") . | sha256sum }}
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: clickhouse-init
image: clickhouse/clickhouse-server:{{ .Chart.AppVersion }}
command: ["bash", "-c"]
args:
- |
echo "⏳ Waiting for ClickHouse..."
until clickhouse-client \
--host {{ .Values.clickhouse.host }} \
--port {{ .Values.clickhouse.port }} \
--user {{ .Values.clickhouse.adminUser }} \
--password "{{ .Values.clickhouse.adminPassword }}" \
-q "SELECT 1" >/dev/null 2>&1; do
sleep 2
done
echo "✅ ClickHouse ready"
{{- if .Values.databases }}
echo "➡️ Creating declared databases & users..."
clickhouse-client \
--host {{ .Values.clickhouse.host }} \
--port {{ .Values.clickhouse.port }} \
--user {{ .Values.clickhouse.adminUser }} \
--password "{{ .Values.clickhouse.adminPassword }}" \
--multiquery < /config/init.sql
{{- end }}
echo "➡️ Generating list of databases to drop..."
clickhouse-client \
--host {{ .Values.clickhouse.host }} \
--port {{ .Values.clickhouse.port }} \
--user {{ .Values.clickhouse.adminUser }} \
--password "{{ .Values.clickhouse.adminPassword }}" \
-q "
SELECT concat('DROP DATABASE IF EXISTS ', name, ';')
FROM system.databases
WHERE name NOT IN (
'system',
'information_schema',
'INFORMATION_SCHEMA',
'default'
{{- if .Values.databases }}
{{- range $db := .Values.databases }}
, '{{ $db }}'
{{- end }}
{{- end }}
);
" > /tmp/to_drop.sql
if [ -s /tmp/to_drop.sql ]; then
echo "➡️ Dropping leftover databases:"
cat /tmp/to_drop.sql
clickhouse-client \
--host {{ .Values.clickhouse.host }} \
--port {{ .Values.clickhouse.port }} \
--user {{ .Values.clickhouse.adminUser }} \
--password "{{ .Values.clickhouse.adminPassword }}" \
--multiquery < /tmp/to_drop.sql
else
echo "✔️ No databases to drop."
fi
echo "🎉 Initialization completed"
volumeMounts:
- name: init-sql
mountPath: /config
volumes:
- name: init-sql
configMap:
name: clickhouse-init-sql

View File

@@ -1,26 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: clickhouse-init-sql
data:
init.sql: |
-- This file is auto-generated by Helm
-- Databases and users initialization
{{- range $db := .Values.databases }}
-- Database: {{ $db }}
CREATE DATABASE IF NOT EXISTS {{ $db }};
-- User: {{ $db }}
CREATE USER IF NOT EXISTS {{ $db }}
IDENTIFIED BY '{{ $db }}arcodange';
-- Privileges
GRANT CREATE, SELECT, INSERT, ALTER, DROP
ON {{ $db }}.*
TO {{ $db }};
GRANT SELECT ON system.* TO {{ $db }};
{{- end }}

View File

@@ -1,8 +0,0 @@
clickhouse:
host: clickhouse.tools
port: 9000
adminUser: arcodange
adminPassword: clickhousearcodange
databases:
- plausible

View File

@@ -1,176 +0,0 @@
global: {}
image:
# -- The registry to pull the image from.
registry: docker.io
# -- The repository to pull the image from.
repository: clickhouse/clickhouse-server
# -- The docker tag, if left empty chart's appVersion will be used.
# @default -- `.Chart.AppVersion`
tag: ''
# -- The pull policy for the controller.
pullPolicy: IfNotPresent
nameOverride: ''
fullnameOverride: ''
controller:
# -- Create a workload for this chart.
enabled: true
# -- Type of the workload object.
kind: StatefulSet
# -- The number of replicas.
replicas: 1
# -- The controller update strategy. Currently only applies to controllers of kind `Deployment`.
updateStrategy: {}
# -- Additional annotations for the controller object.
annotations: {}
# -- Additional labels for the controller object.
labels: {}
service:
# -- Create a service for exposing this chart.
enabled: true
# -- The service type used.
type: ClusterIP
# -- ClusterIP used if service type is `ClusterIP`.
clusterIP: ''
# -- LoadBalancerIP if service type is `LoadBalancer`.
loadBalancerIP: ''
# -- Allowed addresses when service type is `LoadBalancer`.
loadBalancerSourceRanges: []
# -- Additional annotations for the service object.
annotations: {}
# -- Additional labels for the service object.
labels: {}
env:
# -- Timezone for the container.
- name: TZ
value: Europe/Paris
# -- List of extra arguments for the container.
extraArgs: []
# - --loglevel warning
ports:
rest:
# -- Enable the port inside the `Controller` and `Service` objects.
enabled: true
# -- The port used as internal port and cluster-wide port if `.service.type` == `ClusterIP`.
port: 8123
# -- The external port used if `.service.type` == `NodePort`.
nodePort: null
# -- The protocol used for the service.
protocol: TCP
rpc:
# -- Enable the port inside the `Controller` and `Service` objects.
enabled: true
# -- The port used as internal port and cluster-wide port if `.service.type` == `ClusterIP`.
port: 9000
# -- The external port used if `.service.type` == `NodePort`.
nodePort: null
# -- The protocol used for the service.
protocol: TCP
configMap:
# -- Create a new config map object.
create: true
# -- Mount path of the config map object.
mountPath: /etc/config
# -- Use an existing config map object.
existingConfigMap: ''
# -- Map of configuration files as strings.
files:
custom-users.xml: |
<clickhouse>
<users>
<default>
<networks>
<ip>::1</ip>
<ip>127.0.0.1</ip>
</networks>
</default>
<arcodange>
<password>clickhousearcodange</password>
<networks>
<ip>::/0</ip>
<ip>0.0.0.0/0</ip>
</networks>
<profile>default</profile>
<quota>default</quota>
<access_management>1</access_management>
</arcodange>
</users>
</clickhouse>
# file1.yml: |
# # contents
# file2.yml: |
# # contents
# -- Additional annotations for the config map object.
annotations: {}
# -- Additional labels for the config map object.
labels: {}
persistentVolumeClaim:
# -- Create a new persistent volume claim object.
create: true
# -- Mount path of the persistent volume claim object.
mountPath: /var/lib/clickhouse
# -- Access mode of the persistent volume claim object.
accessMode: ReadWriteOnce
# -- Volume mode of the persistent volume claim object.
volumeMode: Filesystem
# -- Storage request size for the persistent volume claim object.
size: 16Gi
# -- Storage class name for the persistent volume claim object.
storageClassName: ''
# -- Use an existing persistent volume claim object.
existingPersistentVolumeClaim: ''
# -- Additional annotations for the persistent volume claim object.
annotations: {}
# -- Additional labels for the persistent volume claim object.
labels: {}
serviceAccount:
# -- Create a `ServiceAccount` object.
create: true
# -- Specify the service account used for the controller.
name: ''
# -- Additional annotations for the role and role binding objects.
annotations: {}
# -- Additional labels for the role and role binding objects.
labels: {}
# -- Pod-level security attributes. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context).
securityContext:
fsGroup: 101
runAsNonRoot: true
runAsGroup: 101
runAsUser: 101
# -- Compute resources used by the container. More info [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- Pod-level affinity. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling).
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: NotIn
values:
- pi2
# -- Pod-level tolerations. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling).
tolerations: []
# - key: node-role.kubernetes.io/control-plane
# operator: Exists
# effect: NoSchedule

View File

@@ -1,28 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: tools
helmGlobals:
chartHome: charts
helmCharts:
- name: clickhouse
repo: https://charts.pascaliske.dev
version: 0.4.0
releaseName: clickhouse
valuesFile: clickhouseValues.yaml
- name: databases
releaseName: clickhouse-databases
patches:
- target:
kind: StatefulSet
name: clickhouse
patch: |-
- op: add
path: /spec/template/spec/containers/0/volumeMounts/-
value:
name: config-volume
mountPath: /etc/clickhouse-server/users.d/custom-users.xml
subPath: custom-users.xml
readOnly: true

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,32 +0,0 @@
apiVersion: v2
name: crowdsec
description: A Helm chart for Kubernetes
dependencies:
- name: tool
version: 0.1.0
repository: https://gitea.arcodange.lab/api/packages/arcodange-org/helm
- name: crowdsec
version: 0.20.1
repository: https://crowdsecurity.github.io/helm-charts
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
# appVersion: "1.16.0"

View File

@@ -1,6 +0,0 @@
terraform {
backend "gcs" {
bucket = "arcodange-tf"
prefix = "tools/crowdsec/main"
}
}

View File

@@ -1,5 +0,0 @@
module "app_roles" {
source = "git::ssh://git@192.168.1.202:2222/arcodange-org/tools.git//hashicorp-vault/iac/modules/app_roles?depth=1&ref=main"
name = "crowdsec"
service_account_namespaces = ["tools"]
}

View File

@@ -1,16 +0,0 @@
terraform {
required_providers {
vault = {
source = "vault"
version = "4.4.0"
}
}
}
provider "vault" {
address = "https://vault.arcodange.lab"
auth_login_jwt { # TERRAFORM_VAULT_AUTH_JWT environment variable
mount = "gitea_jwt"
role = "gitea_cicd_crowdsec"
}
}

View File

@@ -1,3 +0,0 @@
{{- if eq .Values.tool.kind "HelmChart" -}}
{{- include "tool.helm-chart-config.tpl" . -}}
{{- end -}}

View File

@@ -1,3 +0,0 @@
{{- if eq .Values.tool.kind "HelmChart" -}}
{{- include "tool.helm-chart.tpl" . -}}
{{- end -}}

View File

@@ -1,6 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: crowdsec
namespace: {{ .Release.Namespace }}
automountServiceAccountToken: true

View File

@@ -1,14 +0,0 @@
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: crowdsec
namespace: {{ .Release.Namespace }}
spec:
vaultConnectionRef: default
method: kubernetes
mount: kubernetes
kubernetes:
role: crowdsec
serviceAccount: crowdsec
audiences:
- vault

View File

@@ -1,25 +0,0 @@
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultDynamicSecret
metadata:
name: crowdsec-db-credentials
namespace: {{ .Release.Namespace }}
spec:
# Mount path of the secrets backend
mount: postgres
# Path to the secret
path: creds/crowdsec
# Where to store the secrets, VSO will create the secret
destination:
create: true
name: crowdsec-db-credentials
# Restart these pods when secrets rotated
rolloutRestartTargets:
- kind: Deployment
name: crowdsec-lapi
# Name of the CRD to authenticate to Vault
vaultAuthRef: crowdsec

View File

@@ -1,95 +0,0 @@
crowdsec: &crowdsec_config
# for raw logs format: json or cri (docker|containerd)
container_runtime: docker
agent:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
# Specify each pod whose logs you want to process
acquisition:
# The namespace where the pod is located
- namespace: kube-system
# The pod name
podName: traefik-*
# as in crowdsec configuration, we need to specify the program name to find a matching parser
program: traefik
env:
- name: COLLECTIONS
value: "crowdsecurity/traefik crowdsecurity/http-cve"
- name: TZ
value: Europe/Paris
lapi:
strategy:
type: Recreate
env:
- name: TZ
value: Europe/Paris
# To enroll the Security Engine to the console
- name: ENROLL_KEY
value: "cmieq72i3000802jr1wx8kply"
- name: ENROLL_INSTANCE_NAME
value: "homelab"
- name: ENROLL_TAGS
value: "k3s rpi test"
- name: DB_USER
valueFrom:
secretKeyRef:
name: crowdsec-db-credentials
key: username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: crowdsec-db-credentials
key: password
appsec:
enabled: true
acquisitions:
- appsec_config: crowdsecurity/appsec-default
labels:
type: appsec
listen_addr: 0.0.0.0:7422
path: /
source: appsec
env:
- name: TZ
value: Europe/Paris
- name: COLLECTIONS
value: "crowdsecurity/appsec-virtual-patching crowdsecurity/appsec-generic-rules"
resources:
limits:
cpu: "500m"
memory: "300Mi"
requests:
cpu: "100m"
memory: "200Mi"
config:
config.yaml.local: |
db_config:
type: postgresql
user: ${DB_USER}
password: ${DB_PASSWORD}
db_name: crowdsec
host: pgbouncer.tools
port: 5432
api:
server:
auto_registration: # Activate if not using TLS for authentication
enabled: true
token: "${REGISTRATION_TOKEN}" # /!\ do not change
allowed_ranges: # /!\ adapt to the pod IP ranges used by your cluster
- "127.0.0.1/32"
- "192.168.0.0/16"
- "10.42.0.0/16"
- "172.16.0.0/12"
tool:
# kind: 'SubChart' or 'HelmChart', if subchart then uncomment Chart.yaml dependency, else comment and use tool library with helm chart template
kind: 'SubChart'
repo: https://crowdsecurity.github.io/helm-charts
chart: crowdsec
version: 0.20.1
values: *crowdsec_config

View File

@@ -1,34 +0,0 @@
# Chart: keydb-custom
# Helm chart tailored for KeyDB (EqAlpha) on 2 Raspberry Pi 5 nodes
# - Mode: master (statefulset index 0) + replica (index 1)
# - Replica runs as replicaof master at startup
# - server-threads = 4
# - Config mounted via ConfigMap
# - Liveness / readiness probes included
# - Persistence via PersistentVolumeClaim (storageClass configurable)
# -----------------------------------------------------------------------------
# Chart.yaml
# -----------------------------------------------------------------------------
apiVersion: v2
name: grafana
description: A Helm chart for Kubernetes
dependencies:
- name: tool
version: 0.1.0
repository: https://gitea.arcodange.lab/api/packages/arcodange-org/helm
- name: grafana
version: 10.3.0
repository: https://grafana.github.io/helm-charts
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
version: 0.1.0
appVersion: "latest"

View File

@@ -1,3 +0,0 @@
{{- if eq .Values.tool.kind "HelmChart" -}}
{{- include "tool.helm-chart-config.tpl" . -}}
{{- end -}}

View File

@@ -1,3 +0,0 @@
{{- if eq .Values.tool.kind "HelmChart" -}}
{{- include "tool.helm-chart.tpl" . -}}
{{- end -}}

File diff suppressed because it is too large Load Diff

View File

@@ -22,6 +22,3 @@
*.tmproj
.vscode/
iac/
README.md
.terraform
.terraform.lock

View File

@@ -5,13 +5,10 @@ description: A Helm chart for Kubernetes
dependencies:
- name: tool
version: 0.1.0
repository: https://gitea.arcodange.lab/api/packages/arcodange-org/helm
repository: https://gitea.arcodange.duckdns.org/api/packages/arcodange-org/helm
- name: vault
version: 0.28.1
repository: https://helm.releases.hashicorp.com
- name: vault-secrets-operator
version: 0.9.0
repository: https://helm.releases.hashicorp.com
# A chart can be either an 'application' or a 'library' chart.
#

View File

@@ -0,0 +1 @@
foo

View File

@@ -1,151 +0,0 @@
# Vault
1. Les [playbooks ansible](https://gitea.arcodange.lab/arcodange-org/factory/src/branch/main/ansible/arcodange/factory/playbooks) configurent la base de données postgres et le minimum requis pour permetre au dépot "tools" d'appliquer via un workflow gitea action [une configuration vault via tofu](./iac/).
2. Configuration des backend d'authentification et des roles pour postgres et kubernetes. Définition de rôles "${app}-ops" pour permettre au dépot d'une application de définir ses propres dépendances dans vault. Rotation de credentials postgres pour les applications.
3. [Le dépot de l'application webapp](https://gitea.arcodange.lab/arcodange-org/webapp) gère l'obtention de ses crédentials pour postgres.
```mermaid
flowchart LR
classDef playbook stroke:#0f0,fill:#440,stroke-width: 1px
classDef tofu stroke:#f00,fill:#404,stroke-width: 2px
classDef argocd stroke:#00f,fill:#044,stroke-width: 3px
classDef database stroke:#bb0,fill:#ff0,stroke-width: 5px,color: black
classDef secret stroke:#f00,fill:#f00,stroke-width: 5px,color: yellow
classDef secretOperator stroke:#f00,fill:DarkRed,stroke-width: 5px,color: Orange
subgraph git_code[factory.git]
subgraph ansible_collection
setup_playbook[playbook arcodange.factory.setup]:::playbook
tools_playbook[playbook arcodange.factory.tools]:::playbook
end
git_code_tofu_vault{{tofu}}:::tofu
end
subgraph git_tools[tools.git]
argocd_tools{{Argo CD Apps}}:::argocd
git_tools_tofu_vault{{tofu}}:::tofu
end
subgraph git_webapp[webapp.git]
webapp["Go(lang) web app"]
argocd_webapp{{Argo CD App}}:::argocd
git_webapp_tofu_vault{{tofu}}:::tofu
end
subgraph servers
subgraph k3s
subgraph k3s_ns_tools[ns:tools]
argocd{{Argo CD}}:::argocd
pgbouncer
subgraph vault
subgraph vault_auth[auth]
subgraph vault_auth_openid[openid]
end
vault_auth_jwt[jwt]
vault_auth_k8s[kubernetes]
vault_auth_jwt_role_gitea_cicd[gitea_cicd role]
vault_auth_jwt_role_gitea_cicd_webapp_ops[gitea_cicd_webapp ops role]
vault_auth_k8s_role_vso[vault-secret-operator role]
vault_auth_k8s_role_webapp[webapp role]
subgraph policies
policy_default[default]
policy_webapp[webapp]
policy_webapp_ops[webapp ops]
policy_admin[admin]
policy_vso[edit-vso-client-cache]
end
end
subgraph vault_secrets[secrets]
subgraph kvv2
google/credentials
webapp/config
end
end
subgraph vault_postgres[postgres]
creds/creds-editor
creds/webapp
end
subgraph vault_transit[transit]
end
end
vault-secret-operator:::secretOperator
end
subgraph k3s_ns_webapp[ns:webapp]
webapp_deployment[deployment:webapp]
webapp_postgres_creds_secret[secret:postgres creds]:::secret
webapp_config_secret[secret:config]:::secret
webapp_service_account[sa:webapp]
end
end
subgraph postgres
root_credentials
postgres_db[(postgres)]:::database
webapp_credentials:::secret
webapp_db[(webpp)]:::database
vault_creds_editor_role{{credentials_editor}}
end
end
setup_playbook -. setup postgres .-> postgres
tools_playbook -.-o git_code_tofu_vault
git_code_tofu_vault -..-> vault_auth_openid
git_code_tofu_vault -..-> vault_auth_jwt -- tofu:factory --- vault_auth_jwt_role_gitea_cicd
git_code_tofu_vault -..-> kvv2
git_code_tofu_vault -..-> google/credentials
linkStyle 0,1 stroke:#ff3,stroke-width:1px,color:DarkKhaki;
linkStyle 2,3,5,6 stroke:#f3f,stroke-width:2px,color:DarkOrange;
git_tools -.-o argocd_tools
argocd_tools -.-> pgbouncer
argocd_tools -.-> vault
argocd_tools -.-> vault-secret-operator
argocd_tools o--o argocd
linkStyle 7,8,9,10,11 stroke:#3ff,stroke-width:3px,color:DarkSlateBlue;
git_tools_tofu_vault -..-> vault_auth_k8s -- sa:vso --- vault_auth_k8s_role_vso
git_tools_tofu_vault -..-> webapp/config
git_tools_tofu_vault -..-> vault_transit
git_tools_tofu_vault -..-> vault_postgres
vault_auth_k8s ---> k3s
vault_postgres --> pgbouncer x==> postgres; webapp_deployment --> pgbouncer
linkStyle 12,14,15,16 stroke:#f3f,stroke-width:2px,color:DarkOrange;
linkStyle 18,19,20 stroke:gold,stroke-width:2px;
vault_transit x---x vault-secret-operator
vault-secret-operator x---x vault_auth_k8s_role_vso
vault_auth_jwt_role_gitea_cicd x--x policy_default
vault_auth_k8s_role_vso x--x policy_vso
creds/webapp -.-> webapp_credentials
creds/webapp -.-> vault-secret-operator
vault-secret-operator -.-> webapp_postgres_creds_secret
webapp/config -.-> vault-secret-operator
vault-secret-operator -.-> webapp_config_secret
argocd_webapp -.-> k3s_ns_webapp
webapp --o webapp_deployment
webapp_postgres_creds_secret --o webapp_deployment
webapp_deployment --> webapp_service_account
vault_auth_jwt -- tofu:tools --- vault_auth_jwt_role_gitea_cicd_webapp_ops
vault_auth_jwt_role_gitea_cicd_webapp_ops x--x policy_webapp_ops
vault_auth_k8s -- sa:webapp --- vault_auth_k8s_role_webapp x-- tofu:webapp --x policy_webapp
git_webapp_tofu_vault -.-> vault_auth_k8s_role_webapp
git_webapp_tofu_vault -.-> creds/webapp
root_credentials x--x postgres_db
webapp_credentials x--x webapp_db
tools_playbook --> vault_creds_editor_role
vault_creds_editor_role -. change password .-> webapp_credentials
vault_postgres x--x vault_creds_editor_role
```

View File

@@ -1,6 +0,0 @@
terraform {
backend "gcs" {
bucket = "arcodange-tf"
prefix = "tools/hashicorp_vault/main"
}
}

View File

@@ -1,25 +0,0 @@
locals {
factory_crowdsec_conf_sa_name = "factory-ansible-tool-crowdsec-traefik-plugin"
}
data "vault_policy_document" "factory_crowdsec_conf" {
rule {
path = "kvv2/data/cms/factory/*" # cms.git//cloudflare/iac.tf
capabilities = ["read", "list"]
}
}
resource "vault_policy" "factory_crowdsec_conf" {
name = "factory_crowdsec_conf"
policy = data.vault_policy_document.factory_crowdsec_conf.hcl
}
resource "vault_kubernetes_auth_backend_role" "factory_crowdsec_conf" {
backend = vault_auth_backend.kubernetes.path
role_name = "factory_crowdsec_conf"
bound_service_account_names = [local.factory_crowdsec_conf_sa_name]
bound_service_account_namespaces = ["kube-system"]
token_ttl = 3600
token_policies = ["default", vault_policy.factory_crowdsec_conf.name]
audience = "vault"
alias_name_source = "serviceaccount_name"
}

View File

@@ -1,85 +1,39 @@
resource "vault_auth_backend" "kubernetes" {
type = "kubernetes"
}
resource "vault_kubernetes_auth_backend_config" "config" {
backend = vault_auth_backend.kubernetes.path
kubernetes_host = "https://kubernetes.default.svc:443"
}
resource "vault_mount" "kvv2" {
path = "kvv2"
type = "kv"
options = { version = "2" }
description = "KV Version 2 secret engine mount"
}
resource "vault_mount" "postgres" {
path = "postgres"
type = "database"
}
resource "vault_database_secret_backend_connection" "postgres" {
backend = vault_mount.postgres.path
name = "postgres"
allowed_roles = ["*"]
root_rotation_statements = [
"ALTER USER \"{{name}}\" WITH PASSWORD '{{password}}';",
]
postgresql {
connection_url = "postgresql://{{username}}:{{password}}@pgbouncer.tools:5432/postgres?sslmode=disable"
username = var.POSTGRES_CREDENTIALS_EDITOR_USERNAME
password = var.POSTGRES_CREDENTIALS_EDITOR_PASSWORD
terraform {
backend "gcs" {
bucket = "arcodange-tf"
prefix = "tools/hashicorp_vault/main"
}
}
resource "vault_mount" "transit" {
path = "transit"
type = "transit"
description = "Pour le vault secret operator (vso) dans k3s en cas de redemarrage par exemple"
# default_lease_ttl_seconds = 3600
# max_lease_ttl_seconds = 86400
}
resource "vault_transit_secret_backend_key" "vso_client_cache" {
backend = vault_mount.transit.path
name = "vso-client-cache"
variable "vault_address" {
type = string
default = "http://127.0.0.1:8200"
}
data "vault_policy_document" "vso_client_cache" {
terraform {
required_providers {
vault = {
source = "vault"
version = "4.4.0"
}
}
}
provider vault {
address = var.vault_address
auth_login_jwt { # TERRAFORM_VAULT_AUTH_JWT environment variable
role = "admin"
}
}
data "vault_policy_document" "admin" {
rule {
path = "${vault_mount.transit.path}/encrypt/${vault_transit_secret_backend_key.vso_client_cache.name}"
capabilities = ["create", "update"]
}
rule {
path = "${vault_mount.transit.path}/decrypt/${vault_transit_secret_backend_key.vso_client_cache.name}"
capabilities = ["create", "update"]
path = "*"
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
description = "admin privileges"
}
}
resource "vault_policy" "vso_client_cache" {
name = "edit-vso-client-cache"
policy = data.vault_policy_document.vso_client_cache.hcl
}
resource "vault_kubernetes_auth_backend_role" "vso" {
backend = vault_auth_backend.kubernetes.path
role_name = "vault-secret-operator"
bound_service_account_names = ["hashicorp-vault-vault-secrets-operator-controller-manager"]
bound_service_account_namespaces = ["tools"]
token_ttl = 0
token_period = 120
token_policies = ["default", vault_policy.vso_client_cache.name]
audience = "vault"
alias_name_source = "serviceaccount_name"
}
module "app_policies" {
source = "./modules/app_policy"
for_each = { for app in var.applications : app.name => app }
name = each.value.name
ops_policies = each.value.policies
service_account_names = each.value.service_account_names
service_account_namespaces = each.value.service_account_namespaces
gitea_app_id = var.gitea_app_id
resource "vault_policy" "admin" {
name = "admin"
policy = data.vault_policy_document.admin.hcl
}

View File

@@ -1,214 +0,0 @@
# Modules
## app_policy
_Ce module à déclarer dans ce projet permet au projet subordonné de déclarer le module app_roles suivant._
Ce module Terraform associe un **projet Git** à un ensemble de ressources Vault :
- Une **policy `-ops`** pour la CI/CD du projet (dépôt Git).
- Une **policy `app`** pour le runtime applicatif (pods).
- Un **groupe Vault** lié au projet. (pour ajouter les utilisateurs vault associé à leur compte gitea)
- Un **rôle JWT Vault** lié à ton SCM (ex: Gitea).
- Les droits nécessaires pour gérer les rôles Kubernetes et Postgres associés au projet.
### 🚀 Usage
```hcl
module "webapp_vault" {
source = "./modules/vault_project"
name = "webapp"
gitea_app_id = "my-gitea-oauth-app-id" # secret récupéré via vault dans la CI
}
```
## app_roles
Ce module Terraform configure les rôles Vault nécessaires pour quune **application déployée dans Kubernetes** puisse :
- sauthentifier auprès de Vault via son `ServiceAccount`,
- obtenir des **identifiants Postgres dynamiques**,
- accéder à ses secrets dans Vault.
### 🚀 Usage
```hcl
module "webapp_vault_app" {
source = "./modules/vault_app"
name = "webapp"
database = "mydb" # optionnel, par défaut = name
}
```
## principe
```lua
+-----------------+
| Dépôt Git |
| (CI/CD Terraform|
+--------+--------+
|
[Auth via Vault JWT Role]
|
+----------v-----------+
| Vault (Policy -ops) |
| - Peut gérer |
| - Roles K8s |
| - Roles Postgres|
| - Secrets KV |
+----------+-----------+
|
[Token éphémère CI/CD]
|
+----------------v----------------+
| Kubernetes API |
| - Applique CRDs / Secrets |
| - Configure Longhorn / RBAC |
+---------------------------------+
--------------------------- Flux runtime ---------------------------
+-----------------+
| Pod App |
| (SA: webapp) |
+--------+--------+
|
[Auth via Vault K8s Role]
|
+----------v-----------+
| Vault (Policy app) |
| - Peut lire |
| - kvv2/data/... |
| - postgres/... |
+---------------------+
|
[Secrets dynamiques: PW DB, etc.]
|
+--------v---------+
| Postgres DB |
+------------------+
```
---
_documentation destinée aux dépots des applications:_
---
# 🔑 Gestion des secrets avec Vault Secrets Operator (VSO)
Ce repository utilise [Vault Secrets Operator](https://developer.hashicorp.com/vault/docs/platform/k8s/vso) pour gérer les secrets de lapplication (notamment les identifiants Postgres).
Lobjectif est déviter de stocker des credentials statiques, en déléguant la génération et la rotation à HashiCorp Vault.
---
## ⚙️ Architecture
1. **Terraform côté admin** configure Vault :
- un **backend Postgres** (`postgres/`) connecté à la base via pgbouncer,
- un **rôle Vault** `webapp` (`postgres/roles/webapp`) qui définit la manière dont les credentials dynamiques sont créés,
- un **rôle Kubernetes** `webapp` (`auth/kubernetes/role/webapp`) qui autorise le ServiceAccount `webapp` du namespace à sauthentifier auprès de Vault.
2. **Lapplication** (dans ce repo) déclare :
- un `VaultAuth` qui associe le **ServiceAccount `webapp`** au rôle Vault `webapp`,
- un `VaultDynamicSecret` qui demande un secret dynamique (`postgres/creds/webapp`),
- un `Secret` Kubernetes généré automatiquement par VSO (`vso-db-credentials`), injecté dans le Pod de lapplication.
---
## 🛠️ Ressources déployées
### `VaultConnection`
```yaml
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultConnection
metadata:
finalizers:
- vaultconnection.secrets.hashicorp.com/finalizer
labels:
name: default
namespace: {{ .Release.Namespace }}
spec:
address: http://hashicorp-vault.tools.svc.cluster.local:8200
skipTLSVerify: false
```
### `VaultAuth`
```yaml
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: auth
namespace: {{ .Release.Namespace }}
spec:
vaultConnectionRef: default
method: kubernetes
mount: kubernetes
kubernetes:
role: webapp
serviceAccount: {{ include "webapp.serviceAccountName" . }}
audiences:
- vault
```
Permet à VSO (et donc à lapp) de sauthentifier auprès de Vault avec le rôle webapp.
VaultDynamicSecret
```yaml
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultDynamicSecret
metadata:
name: vso-db
namespace: {{ .Release.Namespace }}
spec:
mount: postgres
path: creds/webapp # chemin du rôle dynamique Postgres
destination:
create: true
name: vso-db-credentials
rolloutRestartTargets:
- kind: Deployment
name: {{ include "webapp.fullname" . }}
vaultAuthRef: auth
```
Demande un secret dynamique Postgres depuis Vault et le stocke dans un Secret Kubernetes nommé vso-db-credentials.
Le Deployment de lapp est redémarré automatiquement à chaque rotation de credentials.
📦 Consommation du secret
Une fois VSO en place, les credentials Postgres sont disponibles dans le Secret Kubernetes :
```yaml
apiVersion: v1
kind: Pod
metadata:
name: example
spec:
containers:
- name: app
image: myapp:latest
env:
- name: DB_USERNAME
valueFrom:
secretKeyRef:
name: vso-db-credentials
key: username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: vso-db-credentials
key: password
````
🔄 Rotation
Vault génère des identifiants éphémères (par défaut TTL = 1h).
VSO renouvelle ou régénère automatiquement ces credentials.
Lorsquun nouveau secret est émis, le Deployment ciblé est redémarré pour recharger les variables denvironnement.
✅ Résumé
Pas de secrets stockés en clair dans Git ou Helm.
Rotation automatique des credentials Postgres.
Intégration fluide avec Kubernetes via ServiceAccounts.

View File

@@ -1,155 +0,0 @@
# jwt cicd app ops
# allow creating
# - vault role
# - backend k8s
# - postgres role
locals {
name = lower(var.name)
bound_service_account_names = concat([var.name], var.service_account_names)
bound_service_account_namespaces = concat([var.name], var.service_account_namespaces)
}
data "vault_policy_document" "ops" {
# use terraform vault provider
rule {
path = "auth/token/create"
capabilities = ["create", "update"]
}
# check on mounted auth backend (such as k8s)
rule {
path = "sys/mounts/auth/*"
capabilities = ["read"]
}
# read google credentials for terraform gcs backend
rule {
path = "kvv1/google/credentials"
capabilities = ["read"]
}
# read cloudflare related secrets
rule {
path = "kvv1/cloudflare/${local.name}*"
capabilities = ["read", "list", "create", "update", "delete"]
}
# read ovh related secrets
rule {
path = "kvv1/ovh/${local.name}*"
capabilities = ["read", "list", "create", "update", "delete"]
}
# read tofu_module_reader gitea bot user ssh keys
rule {
path = "kvv1/gitea/tofu_module_reader"
capabilities = ["read"]
}
# edit postgres credentials access permissions
rule {
path = "postgres/roles/${local.name}*"
capabilities = ["read", "list", "create", "update", "delete"]
}
# edit k8s role
rule {
path = "auth/kubernetes/role/${local.name}*"
capabilities = ["read", "list", "create", "update", "delete"]
allowed_parameter {
key = "*"
value = []
}
allowed_parameter {
key = "bound_service_account_names"
value = [jsonencode(local.bound_service_account_names)]
}
allowed_parameter {
key = "bound_service_account_namespaces"
value = [jsonencode(local.bound_service_account_namespaces)]
}
allowed_parameter {
key = "token_policies"
value = [
jsonencode(["default", local.name]),
jsonencode([local.name, "default"])
]
}
}
# allow editing app secrets
rule {
path = "kvv2/data/${local.name}/*"
capabilities = ["create", "update", "read", "delete"]
}
rule {
path = "kvv2/delete/${local.name}/*"
capabilities = ["update"]
}
rule {
path = "kvv2/undelete/${local.name}/*"
capabilities = ["update"]
}
rule {
path = "kvv2/destroy/${local.name}/*"
capabilities = ["update"]
}
rule {
path = "kvv2/metadata/${local.name}/*"
capabilities = ["read", "list", "delete"]
}
# allow edit vault role (risky ?)
}
resource "vault_policy" "ops" {
name = "${local.name}-ops"
/*
allowed_parameters = {
- "bound_service_account_names" = ["["webapp"]"]
+ "bound_service_account_names" = [["webapp"]]
}
*/
policy = replace(
replace(
data.vault_policy_document.ops.hcl,
"\"[\"", "[\""
),
"\"]\"", "\"]"
)
}
resource "vault_identity_group" "ops" {
name = "${local.name}-ops"
type = "internal"
external_member_entity_ids = true
policies = [vault_policy.ops.name]
}
data "vault_auth_backend" "gitea_jwt" {
path = "gitea_jwt"
}
resource "vault_jwt_auth_backend_role" "gitea_jwt_cicd" {
backend = data.vault_auth_backend.gitea_jwt.path
role_name = "gitea_cicd_${local.name}"
token_policies = concat(["default"], var.ops_policies) # give "${local.name}-ops" role to group of entities
bound_audiences = [
var.gitea_app_id,
]
user_claim = "email"
role_type = "jwt"
}
data "vault_policy_document" "app" {
rule {
path = "kvv2/data/${local.name}/*"
capabilities = ["read", "list"]
}
rule {
path = "postgres/creds/${local.name}*"
capabilities = ["read"]
}
}
resource "vault_policy" "app" {
name = local.name
policy = data.vault_policy_document.app.hcl
}

View File

@@ -1,20 +0,0 @@
variable "name" {
type = string
}
variable "gitea_app_id" {
type = string
}
variable "ops_policies" {
type = list(string)
default = []
}
variable "service_account_names" {
type = list(string)
default = []
description = "var.name will always be included by default - whitelist service account that can take this policy"
}
variable "service_account_namespaces" {
type = list(string)
default = []
description = "var.name will always be included by default - whitelist service account namespaces that can take this policy"
}

View File

@@ -1,46 +0,0 @@
data "vault_auth_backend" "kubernetes" {
path = "kubernetes"
}
locals {
name = lower(var.name)
database = var.database == null ? local.name : var.database
bound_service_account_names = concat([var.name], var.service_account_names)
bound_service_account_namespaces = concat([var.name], var.service_account_namespaces)
vault_mount_postgres = { path = "postgres" }
vault_mount_kvv2 = { path = "kvv2" }
}
moved {
from = vault_database_secret_backend_role.role
to = vault_database_secret_backend_role.role[0]
}
resource "vault_database_secret_backend_role" "role" {
count = var.disable_database ? 0 : 1
backend = local.vault_mount_postgres.path
name = local.name
db_name = "postgres"
creation_statements = [
"CREATE ROLE \"{{name}}\" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';",
"GRANT ${local.name}_role TO \"{{name}}\";",
]
revocation_statements = [
"REASSIGN OWNED BY \"{{name}}\" TO ${local.name}_role;", # reassign must be executed in the database where the reassgined objects are - TODO (one connection per database/app)
"REVOKE ALL ON DATABASE ${local.database} FROM \"{{name}}\";", # should we drop the role ? -> YES after fixing reassign
]
renew_statements = []
rollback_statements = []
}
resource "vault_kubernetes_auth_backend_role" "role" {
backend = data.vault_auth_backend.kubernetes.path
role_name = local.name
bound_service_account_names = local.bound_service_account_names
bound_service_account_namespaces = local.bound_service_account_namespaces
token_ttl = 3600
token_policies = ["default", local.name]
audience = "vault"
alias_name_source = "serviceaccount_name"
}

View File

@@ -1,16 +0,0 @@
output "name" {
value = local.name
}
output "database" {
value = local.database
}
output "mount_paths" {
value = {
k8s = data.vault_auth_backend.kubernetes.path
pg = local.vault_mount_postgres.path
kvv2 = local.vault_mount_kvv2.path
}
}
output "kvv2_path_prefix" {
value = format("%s/", local.name)
}

View File

@@ -1,8 +0,0 @@
terraform {
required_providers {
vault = {
source = "vault"
version = ">= 4.4.0"
}
}
}

View File

@@ -1,22 +0,0 @@
variable "name" {
type = string
}
variable "database" {
type = string
nullable = true
default = null
}
variable "disable_database" {
type = bool
default = false
}
variable "service_account_names" {
type = list(string)
default = []
description = "var.name will always be included by default - whitelist service account that can take this policy"
}
variable "service_account_namespaces" {
type = list(string)
default = []
description = "var.name will always be included by default - whitelist service account namespaces that can take this policy"
}

View File

@@ -1,16 +0,0 @@
terraform {
required_providers {
vault = {
source = "vault"
version = "4.4.0"
}
}
}
provider "vault" {
address = "https://vault.arcodange.lab"
auth_login_jwt { # TERRAFORM_VAULT_AUTH_JWT environment variable
mount = "gitea_jwt"
role = "gitea_cicd"
}
}

View File

@@ -1,18 +0,0 @@
applications = [
{ name = "webapp" },
{ name = "erp" },
{ name = "dance-lessons-coach" },
{
name = "cms"
ops_policies = ["factory__cf_r2_arcodange_tf"]
service_account_names = ["cloudflared"]
},
{
name = "crowdsec"
service_account_namespaces = ["tools"]
},
{
name = "plausible"
service_account_namespaces = ["tools"]
},
]

View File

@@ -1,19 +0,0 @@
variable "gitea_app_id" {
type = string
}
variable "POSTGRES_CREDENTIALS_EDITOR_USERNAME" {
type = string
sensitive = true
}
variable "POSTGRES_CREDENTIALS_EDITOR_PASSWORD" {
type = string
sensitive = true
}
variable "applications" {
type = set(object({
name = string
policies = optional(list(string), [])
service_account_names = optional(list(string), [])
service_account_namespaces = optional(list(string), [])
}))
}

View File

@@ -5,9 +5,7 @@ vault: &vault_config
server:
enabled: true
logLevel: trace
auditStorage:
enabled: true
logLevel: debug
ingress:
enabled: true
@@ -15,11 +13,11 @@ vault: &vault_config
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
traefik.ingress.kubernetes.io/router.tls.certresolver: letsencrypt
traefik.ingress.kubernetes.io/router.tls.domains.0.main: arcodange.lab
traefik.ingress.kubernetes.io/router.tls.domains.0.sans: vault.arcodange.lab
traefik.ingress.kubernetes.io/router.tls.domains.0.main: arcodange.duckdns.org
traefik.ingress.kubernetes.io/router.tls.domains.0.sans: vault.arcodange.duckdns.org
traefik.ingress.kubernetes.io/router.middlewares: localIp@file
hosts:
- host: vault.arcodange.lab
- host: vault.arcodange.duckdns.org
paths: []
postStart: [] # https://github.com/hashicorp/vault-helm/blob/main/values.yaml
@@ -52,26 +50,6 @@ vault: &vault_config
enabled: true
annotations: {}
vault-secrets-operator:
defaultVaultConnection:
enabled: true
address: http://hashicorp-vault.tools.svc.cluster.local:8200
defaultAuthMethod:
enabled: true
controller:
manager:
clientCache:
persistenceModel: direct-encrypted
storageEncryption:
enabled: true
mount: vault-secret-operator
keyName: vso-client-cache
transitMount: transit
kubernetes:
role: edit-vso-client-cache
serviceAccount: hashicorp-vault-vault-secrets-operator-controller-manager
tool:
# kind: 'SubChart' or 'HelmChart', if subchart then uncomment Chart.yaml dependency, else comment and use tool library with helm chart template

View File

@@ -5,7 +5,7 @@ description: A Helm chart for Kubernetes
dependencies:
- name: tool
version: 0.1.0
repository: https://gitea.arcodange.lab/api/packages/arcodange-org/helm
repository: https://gitea.arcodange.duckdns.org/api/packages/arcodange-org/helm
- name: pgbouncer
version: 2.3.1
repository: https://icoretech.github.io/helm

View File

@@ -0,0 +1 @@
foo

View File

@@ -14,8 +14,6 @@ pgbouncer: &pgbouncer_config
auth_type: scram-sha-256
auth_query: SELECT uname, phash FROM user_lookup($1)
ignore_startup_parameters: extra_float_digits # unsupported jdbc extra_float_digits=2 argument
server_reset_query: DEALLOCATE ALL # fix prepared statement already exist (crowdsec)
server_idle_timeout: 7200
pgbouncerExporter:
enabled: false

View File

@@ -5,7 +5,7 @@ description: A Helm chart for Kubernetes
dependencies:
- name: tool
version: 0.1.0
repository: https://gitea.arcodange.lab/api/packages/arcodange-org/helm
repository: https://gitea.arcodange.duckdns.org/api/packages/arcodange-org/helm
- name: pgcat
version: 0.1.0
repository: https://improwised.github.io/charts/

View File

@@ -0,0 +1 @@
foo

View File

@@ -1,36 +0,0 @@
- op: add
path: /spec/template/spec/containers/0/volumeMounts/-
value:
name: generated-secrets
mountPath: /run/secrets
- op: add
path: /spec/template/spec/initContainers/0/volumeMounts
value:
- name: generated-secrets
mountPath: /run/secrets
- op: add
path: /spec/template/spec/initContainers/0
value:
name: build-database-url
image: alpine:3.19
command: ["/bin/sh", "-c"]
args:
- |
echo "postgres://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}" > /run/secrets/DATABASE_URL
volumeMounts:
- name: generated-secrets
mountPath: /run/secrets
env:
- name: DB_USER
valueFrom:
secretKeyRef:
name: plausible-db-credentials
key: username
- name: DB_PASS
valueFrom:
secretKeyRef:
name: plausible-db-credentials
key: password
envFrom:
- configMapRef:
name: plausible-config

View File

@@ -1,6 +0,0 @@
terraform {
backend "gcs" {
bucket = "arcodange-tf"
prefix = "tools/plausible/main"
}
}

View File

@@ -1,30 +0,0 @@
module "app_roles" {
source = "git::ssh://git@192.168.1.202:2222/arcodange-org/tools.git//hashicorp-vault/iac/modules/app_roles?depth=1&ref=main"
name = "plausible"
service_account_namespaces = ["tools"]
}
# https://github.com/plausible/community-edition/wiki/configuration#database
#SECRET_KEY_BASE (openssl rand -base64 48)
#
resource "random_password" "secret" {
for_each = toset(["48","32"])
length = tonumber(each.value)
special = false
}
locals {
config = {
SECRET_KEY_BASE = base64encode(random_password.secret["48"].result)
TOTP_VAULT_KEY = base64encode(random_password.secret["32"].result)
}
}
resource "vault_kv_secret_v2" "config" {
mount = "kvv2"
name = "plausible/config"
cas = 1
# delete_all_versions = true
data_json = jsonencode(local.config)
}

View File

@@ -1,16 +0,0 @@
terraform {
required_providers {
vault = {
source = "vault"
version = "4.4.0"
}
}
}
provider "vault" {
address = "https://vault.arcodange.lab"
auth_login_jwt { # TERRAFORM_VAULT_AUTH_JWT environment variable
mount = "gitea_jwt"
role = "gitea_cicd_plausible"
}
}

View File

@@ -1,85 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: tools
# https://kubectl.docs.kubernetes.io/references/kustomize/builtins/#_helmchartinflationgenerator_
helmCharts:
- name: plausible
repo: https://charts.pascaliske.dev
version: 2.0.0
releaseName: plausible
valuesFile: plausibleValues.yaml
namespace: tools
patches:
- target:
kind: IngressRoute
name: plausible-route
patch: |-
- op: add
path: /spec/tls
value:
certResolver: letsencrypt
domains:
- main: arcodange.lab
sans:
- analytics.arcodange.lab
resources:
- resources/vaultauth.yaml
- resources/vaultdynamicsecret.yaml
- resources/vaultsecret.yaml
- resources/configmap.yaml
- resources/geoipsecret.yaml
- resources/ingressroute.yaml
patchesJson6902:
- target:
version: v1
kind: Deployment
name: plausible
patch: |-
- op: replace
path: /spec/template/spec/containers/1/env/2
value:
name: GEOIPUPDATE_LICENSE_KEY
valueFrom:
secretKeyRef:
name: plausible-geoip
key: LICENSE_KEY
- op: replace
path: /spec/template/spec/containers/1/env/4
value:
name: GEOIPUPDATE_EDITION_IDS
value: "GeoLite2-Country GeoLite2-City"
- op: add
path: /spec/template/spec/containers/0/env/2
value:
name: IP_GEOLOCATION_DB
value: /geoip/GeoLite2-City.mmdb
- op: add
path: /spec/template/spec/volumes/-
value:
name: generated-secrets
emptyDir:
medium: Memory
- op: add
path: /spec/template/spec/containers/0/envFrom
value:
- configMapRef:
name: plausible-config
- op: add
path: /spec/template/spec/initContainers/0/envFrom
value:
- configMapRef:
name: plausible-config
- op: replace
path: /spec/template/spec/initContainers/0/args
value:
- >-
sleep 10 && /entrypoint.sh db migrate
- target:
version: v1
kind: Deployment
name: plausible
path: add-initcontainer.yaml

View File

@@ -1,180 +0,0 @@
image:
# -- The registry to pull the image from.
registry: ghcr.io
# -- The repository to pull the image from.
repository: plausible/community-edition
# -- The docker tag, if left empty chart's appVersion will be used.
# @default -- `.Chart.AppVersion`
tag: ''
# -- The pull policy for the controller.
pullPolicy: IfNotPresent
nameOverride: ''
fullnameOverride: ''
controller:
# -- Create a workload for this chart.
enabled: true
# -- Type of the workload object.
kind: Deployment
# -- The number of replicas.
replicas: 1
# -- Additional annotations for the controller object.
annotations: {}
# -- Additional labels for the controller object.
labels: {}
service:
# -- Create a service for exposing this chart.
enabled: true
# -- The service type used.
type: ClusterIP
# -- ClusterIP used if service type is `ClusterIP`.
clusterIP: ''
# -- LoadBalancerIP if service type is `LoadBalancer`.
loadBalancerIP: ''
# -- Allowed addresses when service type is `LoadBalancer`.
loadBalancerSourceRanges: []
# -- Additional annotations for the service object.
annotations: {}
# -- Additional labels for the service object.
labels: {}
serviceMonitor:
# -- Create a service monitor for prometheus operator.
enabled: false
# -- How frequently the exporter should be scraped.
interval: 30s
# -- Timeout value for individual scrapes.
timeout: 10s
# -- Additional annotations for the service monitor object.
annotations: {}
# -- Additional labels for the service monitor object.
labels: {}
ingressRoute:
# -- Create an IngressRoute object for exposing this chart.
create: true
# -- List of [entry points](https://doc.traefik.io/traefik/routing/routers/#entrypoints) on which the ingress route will be available.
entryPoints: [websecure]
# -- [Matching rule](https://doc.traefik.io/traefik/routing/routers/#rule) for the underlying router.
rule: Host(`analytics.arcodange.lab`)
# -- List of [middleware objects](https://doc.traefik.io/traefik/routing/providers/kubernetes-crd/#kind-middleware) for the ingress route.
middlewares:
- name: localIp@file
# -- Use an existing secret containing the TLS certificate.
tlsSecretName: ''
# -- Additional annotations for the ingress route object.
annotations: {}
# -- Additional labels for the ingress route object.
labels: {}
certificate:
# -- Create an Certificate object for the exposed chart.
create: false
# -- List of subject alternative names for the certificate.
dnsNames: []
# -- Name of the secret in which the certificate will be stored. Defaults to the first item in dnsNames.
secretName: ''
issuerRef:
# -- Type of the referenced certificate issuer. Can be "Issuer" or "ClusterIssuer".
kind: ClusterIssuer
# -- Name of the referenced certificate issuer.
name: ''
# -- Additional annotations for the certificate object.
annotations: {}
# -- Additional labels for the certificate object.
labels: {}
env:
# -- Timezone for the container.
- name: TZ
value: Europe/Paris
ports:
http:
# -- Enable the port inside the `Deployment` and `Service` objects.
enabled: true
# -- The port used as internal port and cluster-wide port if `.service.type` == `ClusterIP`.
port: 8000
# -- The external port used if `.service.type` == `NodePort`.
nodePort: null
# -- The protocol used for the service.
protocol: TCP
secret:
# -- Create a new secret object.
create: false
# -- Use an existing secret object.
existingSecret: 'plausible-config'
# -- Secret values used when not using an existing secret. Helm templates are supported for values.
values:
# -- Secret key for session tokens.
SECRET_KEY_BASE: '{{ randAlphaNum 42 | b64enc }}'
# -- Encryption token for TOTP secrets.
TOTP_VAULT_KEY: '{{ randAlphaNum 32 | b64enc }}'
# -- Additional annotations for the secret object.
annotations: {}
# -- Additional labels for the secret object.
labels: {}
geoip:
# -- Enable support for MaxMinds GeoLite2 database.
enabled: true
image:
# -- The repository for the geoip image.
repository: ghcr.io/maxmind/geoipupdate
# -- The docker tag for the geoip image.
tag: v7.1.1
# -- Required. MaxMind account ID.
accountId: '1266329'
# -- Required. Case-sensitive MaxMind license key.
# licenseKey: 'kvv2/data/plausible/geoip LICENSE_KEY'
# -- Optional. Database update frequency. Defaults to "168" which equals 7 days.
frequency: 168
# -- Optional. Specify the database mount path inside the containers.
mountPath: /geoip
serviceAccount:
# -- Create a `ServiceAccount` object.
create: true
# -- Specify the service account used for the controller.
name: ''
# -- Additional annotations for the service account object.
annotations: {}
# -- Additional labels for the service account object.
labels: {}
# -- Pod-level security attributes. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context).
securityContext: {}
# fsGroup: 1000
# runAsNonRoot: true
# runAsGroup: 1000
# runAsUser: 1000
# -- Compute resources used by the container. More info [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- Pod-level affinity. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling).
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/hostname
# operator: In
# values:
# - my-node-xyz
# -- Pod-level tolerations. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling).
tolerations: []
# - key: node-role.kubernetes.io/control-plane
# operator: Exists
# effect: NoSchedule

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: plausible-config
namespace: tools
# Doc: https://github.com/plausible/community-edition/wiki/Configuration
data:
DB_HOST: pgbouncer.tools
DB_PORT: !!str 5432
DB_NAME: plausible
BASE_URL: https://analytics.arcodange.lab
CLICKHOUSE_DATABASE_URL: http://plausible:plausiblearcodange@clickhouse.tools:8123/plausible
DB_POOL_SIZE: "30"
DB_QUEUE_TARGET: "10000" # 10 secondes
DB_CONNECT_TIMEOUT: "30000" # 30 secondes
DB_RECONNECT_ATTEMPTS: "5"
DB_RECONNECT_DELAY: "5000"

View File

@@ -1,24 +0,0 @@
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: plausible-geoip
namespace: tools
spec:
type: kv-v2
# mount path
mount: kvv2
# path of the secret
path: plausible/geoip
# dest k8s secret
destination:
name: plausible-geoip
create: true
# static secret refresh interval
refreshAfter: 30s
# Name of the CRD to authenticate to Vault
vaultAuthRef: plausible

View File

@@ -1,20 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: plausible-external
labels:
app.kubernetes.io/instance: plausible
app.kubernetes.io/name: plausible
spec:
entryPoints:
- web
routes:
- kind: Rule
match: Host(`analytics.arcodange.fr`) && (PathPrefix(`/api/event`) || PathPrefix(`/js/`))
middlewares:
- name: kube-system-crowdsec@kubernetescrd
services:
- kind: Service
name: plausible-web
namespace: tools
port: 8000

View File

@@ -1,14 +0,0 @@
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: plausible
namespace: tools
spec:
vaultConnectionRef: default
method: kubernetes
mount: kubernetes
kubernetes:
role: plausible
serviceAccount: plausible
audiences:
- vault

View File

@@ -1,25 +0,0 @@
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultDynamicSecret
metadata:
name: plausible-db-credentials
namespace: tools
spec:
# Mount path of the secrets backend
mount: postgres
# Path to the secret
path: creds/plausible
# Where to store the secrets, VSO will create the secret
destination:
create: true
name: plausible-db-credentials
# Restart these pods when secrets rotated
rolloutRestartTargets:
- kind: Deployment
name: plausible
# Name of the CRD to authenticate to Vault
vaultAuthRef: plausible

View File

@@ -1,24 +0,0 @@
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: plausible
namespace: tools
spec:
type: kv-v2
# mount path
mount: kvv2
# path of the secret
path: plausible/config
# dest k8s secret
destination:
name: plausible-config
create: true
# static secret refresh interval
refreshAfter: 30s
# Name of the CRD to authenticate to Vault
vaultAuthRef: plausible

View File

@@ -1,23 +0,0 @@
apiVersion: v2
name: prometheus
description: A Helm chart for Kubernetes
dependencies:
- name: tool
version: 0.1.0
repository: https://gitea.arcodange.lab/api/packages/arcodange-org/helm
- name: prometheus
version: 28.13.0
repository: https://prometheus-community.github.io/helm-charts
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
version: 0.1.0
appVersion: "v3.10.0"

View File

@@ -1,3 +0,0 @@
{{- if eq .Values.tool.kind "HelmChart" -}}
{{- include "tool.helm-chart-config.tpl" . -}}
{{- end -}}

View File

@@ -1,3 +0,0 @@
{{- if eq .Values.tool.kind "HelmChart" -}}
{{- include "tool.helm-chart.tpl" . -}}
{{- end -}}

File diff suppressed because it is too large Load Diff

View File

@@ -1,34 +0,0 @@
# Chart: keydb-custom
# Helm chart tailored for KeyDB (EqAlpha) on 2 Raspberry Pi 5 nodes
# - Mode: master (statefulset index 0) + replica (index 1)
# - Replica runs as replicaof master at startup
# - server-threads = 4
# - Config mounted via ConfigMap
# - Liveness / readiness probes included
# - Persistence via PersistentVolumeClaim (storageClass configurable)
# -----------------------------------------------------------------------------
# Chart.yaml
# -----------------------------------------------------------------------------
apiVersion: v2
name: redis
description: A Helm chart for Kubernetes
dependencies:
- name: tool
version: 0.1.0
repository: https://gitea.arcodange.lab/api/packages/arcodange-org/helm
- name: redis
version: 2.1.0
repository: https://charts.pascaliske.dev
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
version: 0.1.0
appVersion: "latest"

View File

@@ -1,2 +0,0 @@
Run `kubectl port-forward -n tools svc/redis 6379:6379` and launch `Redis Insights`

View File

@@ -1,3 +0,0 @@
{{- if eq .Values.tool.kind "HelmChart" -}}
{{- include "tool.helm-chart-config.tpl" . -}}
{{- end -}}

View File

@@ -1,3 +0,0 @@
{{- if eq .Values.tool.kind "HelmChart" -}}
{{- include "tool.helm-chart.tpl" . -}}
{{- end -}}

View File

@@ -1,197 +0,0 @@
redis: &redis_config
image:
# -- The repository to pull the image from.
repository: redis
# -- The docker tag, if left empty chart's appVersion will be used.
# @default -- `.Chart.AppVersion`
tag: ''
# -- The pull policy for the controller.
pullPolicy: IfNotPresent
# -- Optionally supply image pull secrets.
imagePullSecrets: []
nameOverride: ''
fullnameOverride: ''
controller:
# -- Create a workload for this chart.
enabled: true
# -- Type of the workload object.
kind: StatefulSet
# -- The number of replicas.
replicas: 1
# -- Additional annotations for the controller object.
annotations: {}
# -- Additional labels for the controller object.
labels: {}
service:
# -- Create a service for exposing this chart.
enabled: true
# -- The service type used.
type: ClusterIP
# -- ClusterIP used if service type is `ClusterIP`.
clusterIP: ''
# -- LoadBalancerIP if service type is `LoadBalancer`.
loadBalancerIP: ''
# -- Allowed addresses when service type is `LoadBalancer`.
loadBalancerSourceRanges: []
# -- Additional annotations for the service object.
annotations: {}
# -- Additional labels for the service object.
labels: {}
serviceMonitor:
# -- Create a service monitor for prometheus operator.
enabled: false
# -- How frequently the exporter should be scraped.
interval: 30s
# -- Timeout value for individual scrapes.
timeout: 10s
# -- Additional annotations for the service monitor object.
annotations: {}
# -- Additional labels for the service monitor object.
labels: {}
redisExporter:
# -- Enable optional redis exporter instance as sidecar container.
enabled: false
# -- Image for the metric exporter
image:
# -- The repository to pull the image from.
repository: oliver006/redis_exporter
# -- The docker tag, if left empty latest will be used.
# @default -- `latest`
tag: 'latest'
# -- The pull policy for the exporter.
pullPolicy: IfNotPresent
# -- Pod-level security attributes. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context).
securityContext:
runAsUser: 59000
runAsGroup: 59000
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# -- Compute resources used by the container. More info [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
resources:
requests:
cpu: 10m
memory: 50Mi
limits:
cpu: 100m
memory: 100Mi
env:
# -- Timezone for the container.
- name: TZ
value: Europe/Paris
# -- List of extra arguments for the container.
extraArgs: []
# - --loglevel warning
ports:
redis:
# -- Enable the port inside the `Controller` and `Service` objects.
enabled: true
# -- The port used as internal port and cluster-wide port if `.service.type` == `ClusterIP`.
port: 6379
# -- The external port used if `.service.type` == `NodePort`.
nodePort: null
# -- The protocol used for the service.
protocol: TCP
# -- The application protocol for this port. Used as hint for implementations to offer richer behavior.
appProtocol: redis
persistentVolumeClaim:
# -- Create a new persistent volume claim object.
create: true
# -- Mount path of the persistent volume claim object.
mountPath: /data
# -- Access mode of the persistent volume claim object.
accessMode: ReadWriteOnce
# -- Volume mode of the persistent volume claim object.
volumeMode: Filesystem
# -- Storage request size for the persistent volume claim object.
size: 1Gi
# -- Storage class name for the persistent volume claim object.
storageClassName: ''
# -- Use an existing persistent volume claim object.
existingPersistentVolumeClaim: ''
# -- Additional annotations for the persistent volume claim object.
annotations: {}
# -- Additional labels for the persistent volume claim object.
labels: {}
serviceAccount:
# -- Specify the service account used for the controller.
name: ''
# -- Optional priority class name to be used for pods.
priorityClassName: ''
# -- Pod-level security attributes. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context).
securityContext:
fsGroup: 999
runAsNonRoot: true
runAsGroup: 999
runAsUser: 999
# -- Compute resources used by the container. More info [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- Pod-level affinity. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling).
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/hostname
# operator: In
# values:
# - my-node-xyz
# -- Pod-level tolerations. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling).
tolerations: []
# - key: node-role.kubernetes.io/control-plane
# operator: Exists
# effect: NoSchedule
# -- Pod-level node selector. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling).
nodeSelector: {}
# label: value
# -- Specify any extra containers here as dictionary items - each should have its own key.
extraContainers: {}
# container:
# name: my-container
# image: my-org/my-image
# -- Specify extra volume mounts for the default containers.
extraVolumeMounts: []
# - name: my-volume
# mountPath: /path/to/volume
# readOnly: false
# -- Specify extra volumes for the workload.
extraVolumes: []
# - name: my-volume
# secret:
# secretName: my-secret
tool:
# kind: 'SubChart' or 'HelmChart', if subchart then uncomment Chart.yaml dependency, else comment and use tool library with helm chart template
kind: 'SubChart'
repo: https://charts.pascaliske.dev
chart: redis
version: 2.1.0
values: *redis_config

1
tool/Chart.yaml.toremove Normal file
View File

@@ -0,0 +1 @@
foo

View File

@@ -1 +0,0 @@
Library chart