use pascaliske chart for redis
All checks were successful
Helm Charts / Detect changed charts (push) Successful in 12s
Helm Charts / Library charts tool (push) Has been skipped
Helm Charts / Application charts pgcat (push) Has been skipped

This commit is contained in:
2025-12-04 12:58:03 +01:00
parent c46c479dc5
commit 7676196b8a
10 changed files with 211 additions and 260 deletions

View File

@@ -10,8 +10,25 @@
# Chart.yaml # Chart.yaml
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
apiVersion: v2 apiVersion: v2
name: keydb-custom # https://chatgpt.com/c/69315a7d-cb88-832c-bc18-3be2b519356d name: redis
description: "Custom KeyDB Helm chart for 2-node Master+Replica on Raspberry Pi 5" description: A Helm chart for Kubernetes
dependencies:
- name: tool
version: 0.1.0
repository: https://gitea.arcodange.duckdns.org/api/packages/arcodange-org/helm
- name: redis
version: 2.1.0
repository: https://charts.pascaliske.dev
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application type: application
version: 0.1.0 version: 0.1.0
appVersion: "latest" appVersion: "latest"

View File

@@ -1,12 +0,0 @@
# -----------------------------------------------------------------------------
# templates/NOTES.txt
# -----------------------------------------------------------------------------
{{""}}
1. Get the application URL by running these commands:
export POD0=$(kubectl get pods -l app={{ include "keydb-custom.name" . }} -o jsonpath="{.items[0].metadata.name}")
kubectl logs $POD0
2. To connect via keydb-cli from a pod:
kubectl run -it --rm --image=eqalpha/keydb --restart=Never keydb-client -- /bin/sh
# inside the pod:
keydb-cli -h {{ include "keydb-custom.fullname" . }}-0.{{ include "keydb-custom.fullname" . }}-headless -a "{{ .Values.auth.password }}" PING

View File

@@ -1,11 +0,0 @@
# -----------------------------------------------------------------------------
# templates/_helpers.tpl
# -----------------------------------------------------------------------------
{{""}}
{{- define "keydb-custom.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 -}}
{{- end -}}
{{- define "keydb-custom.fullname" -}}
{{- printf "%s" (include "keydb-custom.name" .) -}}
{{- end -}}

View File

@@ -1,24 +0,0 @@
# -----------------------------------------------------------------------------
# templates/configmap.yaml
# -----------------------------------------------------------------------------
{{""}}
apiVersion: v1
kind: ConfigMap
metadata:
name: '{{ include "keydb-custom.fullname" . }}-conf'
labels:
app: {{ include "keydb-custom.name" . }}
data:
keydb.conf: |
# Minimal keydb.conf generated from values
server-threads {{ .Values.keydb.serverThreads }}
maxmemory {{ .Values.keydb.maxmemory }}
maxmemory-policy {{ .Values.keydb.maxmemoryPolicy }}
appendonly {{ .Values.keydb.appendonly }}
protected-mode no
bind 0.0.0.0
port {{ .Values.service.port }}
active-replica yes
scratch-file-path /tmp/
jemalloc-bg-thread no # doesn't work with rpi5 16ko page size instead of classic 4ko size
# requirepass is handled via env (not in file)

View File

@@ -1,17 +0,0 @@
# -----------------------------------------------------------------------------
# templates/headless-svc.yaml
# -----------------------------------------------------------------------------
{{""}}
apiVersion: v1
kind: Service
metadata:
name: '{{ include "keydb-custom.fullname" . }}-headless'
labels:
app: {{ include "keydb-custom.name" . }}
spec:
clusterIP: None
selector:
app: {{ include "keydb-custom.name" . }}
ports:
- port: {{ .Values.service.port }}
name: keydb

View File

@@ -0,0 +1,3 @@
{{- if eq .Values.tool.kind "HelmChart" -}}
{{- include "tool.helm-chart-config.tpl" . -}}
{{- end -}}

View File

@@ -0,0 +1,3 @@
{{- if eq .Values.tool.kind "HelmChart" -}}
{{- include "tool.helm-chart.tpl" . -}}
{{- end -}}

View File

@@ -1,11 +0,0 @@
# -----------------------------------------------------------------------------
# templates/secret-auth.yaml
# -----------------------------------------------------------------------------
{{""}}
apiVersion: v1
kind: Secret
metadata:
name: '{{ include "keydb-custom.fullname" . }}-auth'
type: Opaque
stringData:
password: "{{ .Values.auth.password }}"

View File

@@ -1,135 +0,0 @@
# -----------------------------------------------------------------------------
# templates/statefulset.yaml
# -----------------------------------------------------------------------------
{{""}}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "keydb-custom.fullname" . }}
labels:
app: {{ include "keydb-custom.name" . }}
spec:
serviceName: {{ include "keydb-custom.fullname" . }}-headless
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ include "keydb-custom.name" . }}
template:
metadata:
labels:
app: {{ include "keydb-custom.name" . }}
spec:
{{- if .Values.nodeAffinity }}
affinity:
nodeAffinity: {{ toYaml .Values.nodeAffinity | nindent 8 }}
{{- end }}
{{- if .Values.podAntiAffinity.enabled }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- {{ include "keydb-custom.name" . }}
topologyKey: "kubernetes.io/hostname"
{{- end }}
containers:
- name: keydb
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: keydb
containerPort: {{ .Values.service.port }}
env:
- name: KEYDB_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "keydb-custom.fullname" . }}-auth
key: password
- name: KEYDB_CONF_FILE
value: "/etc/keydb/keydb.conf"
volumeMounts:
- name: keydb-conf
mountPath: /etc/keydb
- name: data
mountPath: /bitnami/keydb
resources:
{{ toYaml .Values.resources | nindent 12 }}
command:
- sh
- -c
- |
# Startup logic:
# - pod index 0 starts as master
# - pod index >0 will start and then configure REPLICAOF to master
POD_NAME=$(hostname)
# derive ordinal from pods named <release>-<ordinal>
ORDINAL=${POD_NAME##*-}
# start keydb-server in background to accept CONFIG/CLI commands
keydb-server /etc/keydb/keydb.conf &
sleep 1
if [ "${ORDINAL}" != "0" ]; then
# wait for master to be ready
MASTER_HOST={{ include "keydb-custom.fullname" . }}-0.{{ include "keydb-custom.fullname" . }}-headless
until nc -z ${MASTER_HOST} {{ .Values.service.port }}; do sleep 1; done
# configure replication (use CLI to set replicaof)
if [ -n "$KEYDB_PASSWORD" ]; then
keydb-cli -a "$KEYDB_PASSWORD" REPLICAOF ${MASTER_HOST} {{ .Values.service.port }}
else
keydb-cli REPLICAOF ${MASTER_HOST} {{ .Values.service.port }}
fi
echo "Configured replicaof ${MASTER_HOST}:{{ .Values.service.port }}"
# tail logs (block) to keep container running
wait
else
# master: block on server process
wait
fi
readinessProbe:
{{- if .Values.readinessProbe.enabled }}
exec:
command:
- sh
- -c
- |
# quick PING check
if [ -n "$KEYDB_PASSWORD" ]; then
keydb-cli -a "$KEYDB_PASSWORD" PING > /dev/null 2>&1 && exit 0 || exit 1
else
keydb-cli PING > /dev/null 2>&1 && exit 0 || exit 1
fi
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
{{- end }}
livenessProbe:
{{- if .Values.livenessProbe.enabled }}
exec:
command:
- sh
- -c
- |
if [ -n "$KEYDB_PASSWORD" ]; then
keydb-cli -a "$KEYDB_PASSWORD" PING > /dev/null 2>&1 && exit 0 || exit 1
else
keydb-cli PING > /dev/null 2>&1 && exit 0 || exit 1
fi
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
{{- end }}
volumes:
- name: keydb-conf
configMap:
name: {{ include "keydb-custom.fullname" . }}-conf
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: {{ toYaml .Values.persistence.accessModes | nindent 8 }}
resources:
requests:
storage: {{ .Values.persistence.size }}
storageClassName: "{{ .Values.persistence.storageClass }}"

View File

@@ -1,59 +1,197 @@
# ----------------------------------------------------------------------------- redis: &redis_config
# values.yaml image:
# ----------------------------------------------------------------------------- # -- The repository to pull the image from.
# Default values - adjust to your environment repository: redis
replicaCount: 2 # -- The docker tag, if left empty chart's appVersion will be used.
# @default -- `.Chart.AppVersion`
tag: ''
# -- The pull policy for the controller.
pullPolicy: IfNotPresent
image: # -- Optionally supply image pull secrets.
repository: eqalpha/keydb imagePullSecrets: []
tag: latest
pullPolicy: IfNotPresent
service: nameOverride: ''
port: 6379 fullnameOverride: ''
headless: true
auth: controller:
enabled: true # -- Create a workload for this chart.
password: "redisarcodange" enabled: true
# -- Type of the workload object.
kind: StatefulSet
# -- The number of replicas.
replicas: 1
# -- Additional annotations for the controller object.
annotations: {}
# -- Additional labels for the controller object.
labels: {}
keydb: service:
serverThreads: 4 # -- Create a service for exposing this chart.
maxmemory: "70%" enabled: true
maxmemoryPolicy: "allkeys-lru" # -- The service type used.
appendonly: "no" type: ClusterIP
# -- ClusterIP used if service type is `ClusterIP`.
clusterIP: ''
# -- LoadBalancerIP if service type is `LoadBalancer`.
loadBalancerIP: ''
# -- Allowed addresses when service type is `LoadBalancer`.
loadBalancerSourceRanges: []
# -- Additional annotations for the service object.
annotations: {}
# -- Additional labels for the service object.
labels: {}
persistence: serviceMonitor:
enabled: true # -- Create a service monitor for prometheus operator.
accessModes: enabled: false
- ReadWriteOnce # -- How frequently the exporter should be scraped.
size: 1Gi interval: 30s
storageClass: "local-path" # set to your storageClass: longhorn, local-path, etc. # -- Timeout value for individual scrapes.
timeout: 10s
# -- Additional annotations for the service monitor object.
annotations: {}
# -- Additional labels for the service monitor object.
labels: {}
resources: redisExporter:
requests: # -- Enable optional redis exporter instance as sidecar container.
cpu: "300m" enabled: false
memory: "512Mi" # -- Image for the metric exporter
limits: image:
cpu: "1200m" # -- The repository to pull the image from.
memory: "1Gi" repository: oliver006/redis_exporter
# -- The docker tag, if left empty latest will be used.
# @default -- `latest`
tag: 'latest'
# -- The pull policy for the exporter.
pullPolicy: IfNotPresent
# -- Pod-level security attributes. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context).
securityContext:
runAsUser: 59000
runAsGroup: 59000
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# -- Compute resources used by the container. More info [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
resources:
requests:
cpu: 10m
memory: 50Mi
limits:
cpu: 100m
memory: 100Mi
nodeAffinity: {} env:
# -- Timezone for the container.
- name: TZ
value: Europe/Paris
podAntiAffinity: # -- List of extra arguments for the container.
enabled: true extraArgs: []
# - --loglevel warning
securityContext: ports:
enabled: false redis:
# -- Enable the port inside the `Controller` and `Service` objects.
enabled: true
# -- The port used as internal port and cluster-wide port if `.service.type` == `ClusterIP`.
port: 6379
# -- The external port used if `.service.type` == `NodePort`.
nodePort: null
# -- The protocol used for the service.
protocol: TCP
# -- The application protocol for this port. Used as hint for implementations to offer richer behavior.
appProtocol: redis
livenessProbe: persistentVolumeClaim:
enabled: true # -- Create a new persistent volume claim object.
initialDelaySeconds: 30 create: true
periodSeconds: 10 # -- Mount path of the persistent volume claim object.
timeoutSeconds: 5 mountPath: /data
# -- Access mode of the persistent volume claim object.
accessMode: ReadWriteOnce
# -- Volume mode of the persistent volume claim object.
volumeMode: Filesystem
# -- Storage request size for the persistent volume claim object.
size: 1Gi
# -- Storage class name for the persistent volume claim object.
storageClassName: ''
# -- Use an existing persistent volume claim object.
existingPersistentVolumeClaim: ''
# -- Additional annotations for the persistent volume claim object.
annotations: {}
# -- Additional labels for the persistent volume claim object.
labels: {}
readinessProbe: serviceAccount:
enabled: true # -- Specify the service account used for the controller.
initialDelaySeconds: 5 name: ''
periodSeconds: 5
timeoutSeconds: 2 # -- Optional priority class name to be used for pods.
priorityClassName: ''
# -- Pod-level security attributes. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context).
securityContext: {}
# fsGroup: 1000
# runAsNonRoot: true
# runAsGroup: 1000
# runAsUser: 1000
# -- Compute resources used by the container. More info [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- Pod-level affinity. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling).
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/hostname
# operator: In
# values:
# - my-node-xyz
# -- Pod-level tolerations. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling).
tolerations: []
# - key: node-role.kubernetes.io/control-plane
# operator: Exists
# effect: NoSchedule
# -- Pod-level node selector. More info [here](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling).
nodeSelector: {}
# label: value
# -- Specify any extra containers here as dictionary items - each should have its own key.
extraContainers: {}
# container:
# name: my-container
# image: my-org/my-image
# -- Specify extra volume mounts for the default containers.
extraVolumeMounts: []
# - name: my-volume
# mountPath: /path/to/volume
# readOnly: false
# -- Specify extra volumes for the workload.
extraVolumes: []
# - name: my-volume
# secret:
# secretName: my-secret
tool:
# kind: 'SubChart' or 'HelmChart', if subchart then uncomment Chart.yaml dependency, else comment and use tool library with helm chart template
kind: 'SubChart'
repo: https://charts.pascaliske.dev
chart: redis
version: 2.1.0
values: *redis_config