diff --git a/chart/values.yaml b/chart/values.yaml index 809102c..7405b4f 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -5,4 +5,5 @@ tools: - hashicorp-vault - crowdsec - redis - - clickhouse \ No newline at end of file + - clickhouse + - grafana \ No newline at end of file diff --git a/grafana/Chart.yaml b/grafana/Chart.yaml new file mode 100644 index 0000000..d0018f7 --- /dev/null +++ b/grafana/Chart.yaml @@ -0,0 +1,34 @@ +# Chart: keydb-custom +# Helm chart tailored for KeyDB (EqAlpha) on 2 Raspberry Pi 5 nodes +# - Mode: master (statefulset index 0) + replica (index 1) +# - Replica runs as replicaof master at startup +# - server-threads = 4 +# - Config mounted via ConfigMap +# - Liveness / readiness probes included +# - Persistence via PersistentVolumeClaim (storageClass configurable) +# ----------------------------------------------------------------------------- +# Chart.yaml +# ----------------------------------------------------------------------------- +apiVersion: v2 +name: grafana +description: A Helm chart for Kubernetes + +dependencies: +- name: tool + version: 0.1.0 + repository: https://gitea.arcodange.duckdns.org/api/packages/arcodange-org/helm +- name: grafana + version: 10.3.0 + repository: https://grafana.github.io/helm-charts + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application +version: 0.1.0 +appVersion: "latest" \ No newline at end of file diff --git a/grafana/templates/helm-chart-config.yaml b/grafana/templates/helm-chart-config.yaml new file mode 100644 index 0000000..31cb271 --- /dev/null +++ b/grafana/templates/helm-chart-config.yaml @@ -0,0 +1,3 @@ +{{- if eq .Values.tool.kind "HelmChart" -}} +{{- include "tool.helm-chart-config.tpl" . -}} +{{- end -}} \ No newline at end of file diff --git a/grafana/templates/helm-chart.yaml b/grafana/templates/helm-chart.yaml new file mode 100644 index 0000000..c6b793d --- /dev/null +++ b/grafana/templates/helm-chart.yaml @@ -0,0 +1,3 @@ +{{- if eq .Values.tool.kind "HelmChart" -}} +{{- include "tool.helm-chart.tpl" . -}} +{{- end -}} \ No newline at end of file diff --git a/grafana/values.yaml b/grafana/values.yaml new file mode 100644 index 0000000..84dcd7f --- /dev/null +++ b/grafana/values.yaml @@ -0,0 +1,1599 @@ +grafana: &grafana_config + global: + + rbac: + create: true + ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) + # useExistingRole: name-of-some-role + # useExistingClusterRole: name-of-some-clusterRole + pspEnabled: false + pspUseAppArmor: false + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + serviceAccount: + create: true + name: + nameTest: + ## ServiceAccount labels. + labels: {} + ## Service account annotations. Can be templated. + # annotations: + # eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + + ## autoMount is deprecated in favor of automountServiceAccountToken + # autoMount: false + automountServiceAccountToken: false + + replicas: 1 + + ## Create a headless service for the deployment + headlessService: false + + ## Should the service account be auto mounted on the pod + automountServiceAccountToken: true + + ## Create HorizontalPodAutoscaler object for deployment type + # + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + + ## See `kubectl explain poddisruptionbudget.spec` for more + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + podDisruptionBudget: {} + # apiVersion: "" + # minAvailable: 1 + # maxUnavailable: 1 + # unhealthyPodEvictionPolicy: IfHealthyBudget + + ## See `kubectl explain deployment.spec.strategy` for more + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + deploymentStrategy: + type: RollingUpdate + + readinessProbe: + httpGet: + path: /api/health + port: 3000 + + livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: "default-scheduler" + + image: + # -- The Docker registry + registry: docker.io + # -- Docker image repository + repository: grafana/grafana + # Overrides the Grafana image tag whose default is the chart appVersion + tag: "" + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Can be templated. + ## + pullSecrets: [] + # - myRegistrKeySecretName + + testFramework: + enabled: false + ## The type of Helm hook used to run this test. Defaults to test. + ## ref: https://helm.sh/docs/topics/charts_hooks/#the-available-hooks + ## + # hookType: test + image: + # -- The Docker registry + registry: docker.io + repository: bats/bats + tag: "v1.4.1" + imagePullPolicy: IfNotPresent + securityContext: {} + containerSecurityContext: {} + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # dns configuration for pod + dnsPolicy: ~ + dnsConfig: {} + # nameservers: + # - 8.8.8.8 + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + securityContext: + runAsNonRoot: true + runAsUser: 472 + runAsGroup: 472 + fsGroup: 472 + + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + # Enable creating the grafana configmap + createConfigmap: true + + # Extra configmaps to mount in grafana pods + # Values are templated. + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + # optional: false + + + extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + + # Apply extra labels to common labels. + extraLabels: {} + + ## Assign a PriorityClassName to pods if set + # priorityClassName: + + downloadDashboardsImage: + # -- The Docker registry + registry: docker.io + repository: curlimages/curl + tag: 8.9.1 + sha: "" + pullPolicy: IfNotPresent + + downloadDashboards: + env: {} + envFromSecret: "" + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + + ## Pod Annotations + # podAnnotations: {} + + ## ConfigMap Annotations + # configMapAnnotations: {} + # argocd.argoproj.io/sync-options: Replace=true + + ## Pod Labels + # podLabels: {} + + podPortName: grafana + gossipPortName: gossip + ## Deployment annotations + # annotations: {} + + ## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). + ## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. + ## ref: http://kubernetes.io/docs/user-guide/services/ + ## + service: + enabled: true + type: ClusterIP + # Set the ip family policy to configure dual-stack see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services) + ipFamilyPolicy: "" + # Sets the families that should be supported and the order in which they should be applied to ClusterIP as well. Can be IPv4 and/or IPv6. + ipFamilies: [] + loadBalancerIP: "" + loadBalancerClass: "" + loadBalancerSourceRanges: [] + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + ## Service annotations. Can be templated. + annotations: {} + labels: {} + portName: service + # Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + sessionAffinity: "" + + serviceMonitor: + ## If true, a ServiceMonitor CR is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 30s + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + metricRelabelings: [] + basicAuth: {} + targetLabels: [] + + extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + + # overrides pod.spec.hostAliases in the grafana deployment's pods + hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + + ingress: + enabled: true + annotations: + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" + traefik.ingress.kubernetes.io/router.tls.certresolver: letsencrypt + traefik.ingress.kubernetes.io/router.tls.domains.0.main: arcodange.duckdns.org + traefik.ingress.kubernetes.io/router.tls.domains.0.sans: grafana.arcodange.duckdns.org + traefik.ingress.kubernetes.io/router.middlewares: localIp@file + hosts: + - host: grafana.arcodange.duckdns.org + paths: [] + + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + type: pvc + enabled: false + # storageClassName: default + ## (Optional) Use this to bind the claim to an existing PersistentVolume (PV) by name. + volumeName: "" + accessModes: + - ReadWriteOnce + size: 10Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # selectorLabels: {} + ## Sub-directory of the PV to mount. Can be templated. + # subPath: "" + ## Name of an existing PVC. Can be templated. + # existingClaim: + ## Extra labels to apply to a PVC. + extraPvcLabels: {} + disableWarning: false + + ## If persistence is not enabled, this allows to mount the + ## local storage in-memory to improve performance + ## + inMemory: + enabled: false + ## The maximum usage on memory medium EmptyDir would be + ## the minimum value between the SizeLimit specified + ## here and the sum of memory limits of all containers in a pod + ## + # sizeLimit: 300Mi + + ## If 'lookupVolumeName' is set to true, Helm will attempt to retrieve + ## the current value of 'spec.volumeName' and incorporate it into the template. + lookupVolumeName: true + + initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the grafana-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## + image: + # -- The Docker registry + registry: docker.io + repository: library/busybox + tag: "1.31.1" + sha: "" + pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + securityContext: + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + seccompProfile: + type: RuntimeDefault + capabilities: + add: + - CHOWN + drop: + - ALL + + # Administrator credentials when not using an existing secret (see below) + adminUser: admin + adminPassword: grafanaarcodange + + # Use an existing secret for the admin user. + admin: + ## Name of the secret. Can be templated. + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + + ## Define command to be executed at startup by grafana container + ## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) + ## Default is "run.sh" as defined in grafana's Dockerfile + # command: + # - "sh" + # - "/run.sh" + + ## Optionally define args if command is used + ## Needed if using `hashicorp/envconsul` to manage secrets + ## By default no arguments are set + # args: + # - "-secret" + # - "secret/grafana" + # - "./grafana" + + ## Extra environment variables that will be pass onto deployment pods + ## + ## to provide grafana with access to CloudWatch on AWS EKS: + ## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) + ## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the + ## same oidc eks provider as noted before (same as the existing line) + ## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name + ## + ## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", + ## + ## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess + ## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) + ## + ## env: + ## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here + ## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token + ## AWS_REGION: us-east-1 + ## + ## 5. uncomment the EKS section in extraSecretMounts: below + ## 6. uncomment the annotation section in the serviceAccount: above + ## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn + + env: {} + + ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core + ## Renders in container spec as: + ## env: + ## ... + ## - name: + ## valueFrom: + ## + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + + ## The name of a secret in the same kubernetes namespace which contain values to be added to the environment + ## This can be useful for auth tokens, etc. Value is templated. + envFromSecret: "" + + ## Sensible environment variables that will be rendered as new secret object + ## This can be useful for auth tokens, etc. + ## If the secret values contains "{{", they'll need to be properly escaped so that they are not interpreted by Helm + ## ref: https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function + envRenderSecret: {} + + ## The names of secrets in the same kubernetes namespace which contain values to be added to the environment + ## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. + ## Name is templated. + envFromSecrets: [] + ## - name: secret-name + ## prefix: prefix + ## optional: true + + ## The names of configmaps in the same kubernetes namespace which contain values to be added to the environment + ## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. + ## Name is templated. + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core + envFromConfigMaps: [] + ## - name: configmap-name + ## prefix: prefix + ## optional: true + + # Inject Kubernetes services as environment variables. + # See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables + enableServiceLinks: true + + ## Additional grafana server secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + # optional: false + # subPath: "" + # + # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + # + # for CSI e.g. Azure Key Vault use the following + # - name: secrets-store-inline + # mountPath: /run/secrets + # readOnly: true + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "akv-grafana-spc" + # nodePublishSecretRef: # Only required when using service principal mode + # name: grafana-akv-creds # Only required when using service principal mode + + ## Additional grafana server volume mounts + # Defines additional volume mounts. + extraVolumeMounts: [] + # - name: extra-volume-0 + # mountPath: /mnt/volume0 + # readOnly: true + # - name: extra-volume-1 + # mountPath: /mnt/volume1 + # readOnly: true + # - name: grafana-secrets + # mountPath: /mnt/volume2 + + ## Additional Grafana server volumes + extraVolumes: [] + # - name: extra-volume-0 + # existingClaim: volume-claim + # - name: extra-volume-1 + # hostPath: + # path: /usr/shared/ + # type: "" + # - name: grafana-secrets + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "grafana-env-spc" + + ## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request + lifecycleHooks: {} + # postStart: + # exec: + # command: [] + + ## Pass the plugins you want installed as a list. + ## + plugins: + - grafana-clickhouse-datasource + # - digrich-bubblechart-panel + # - grafana-clock-panel + ## You can also use other plugin download URL, as long as they are valid zip files, + ## and specify the name of the plugin after the semicolon. Like this: + # - https://grafana.com/api/plugins/marcusolsson-json-datasource/versions/1.3.2/download;marcusolsson-json-datasource + + ## Configure grafana datasources + ## ref: http://docs.grafana.org/administration/provisioning/#datasources + ## + datasources: + datasources.yaml: + apiVersion: 1 + datasources: + - name: ClickHouse + type: grafana-clickhouse-datasource + access: proxy + isDefault: false + jsonData: + server: "http://clickhouse.tools.svc.cluster.local:8123" + defaultDatabase: "default" + secureJsonData: {} + # - name: Prometheus + # type: prometheus + # url: http://prometheus-prometheus-server + # access: proxy + # isDefault: true + # - name: CloudWatch + # type: cloudwatch + # access: proxy + # uid: cloudwatch + # editable: false + # jsonData: + # authType: default + # defaultRegion: us-east-1 + # deleteDatasources: [] + # - name: Prometheus + + ## Configure grafana alerting (can be templated) + ## ref: https://docs.grafana.com/alerting/set-up/provision-alerting-resources/file-provisioning/ + ## + alerting: {} + # policies.yaml: + # apiVersion: 1 + # policies: + # - orgId: 1 + # receiver: first_uid + # + # rules.yaml: + # apiVersion: 1 + # groups: + # - orgId: 1 + # name: '{{ .Chart.Name }}_my_rule_group' + # folder: my_first_folder + # interval: 60s + # rules: + # - uid: my_id_1 + # title: my_first_rule + # condition: A + # data: + # - refId: A + # datasourceUid: '-100' + # model: + # conditions: + # - evaluator: + # params: + # - 3 + # type: gt + # operator: + # type: and + # query: + # params: + # - A + # reducer: + # type: last + # type: query + # datasource: + # type: __expr__ + # uid: '-100' + # expression: 1==0 + # intervalMs: 1000 + # maxDataPoints: 43200 + # refId: A + # type: math + # dashboardUid: my_dashboard + # panelId: 123 + # noDataState: Alerting + # for: 60s + # annotations: + # some_key: some_value + # labels: + # team: sre_team_1 + # + # contactpoints.yaml: + # secret: + # apiVersion: 1 + # contactPoints: + # - orgId: 1 + # name: cp_1 + # receivers: + # - uid: first_uid + # type: pagerduty + # settings: + # integrationKey: XXX + # severity: critical + # class: ping failure + # component: Grafana + # group: app-stack + # summary: | + # {{ `{{ include "default.message" . }}` }} + # + # templates.yaml: + # apiVersion: 1 + # templates: + # - orgId: 1 + # name: my_first_template + # template: | + # {{ ` + # {{ define "my_first_template" }} + # Custom notification message + # {{ end }} + # ` }} + # + # mutetimes.yaml + # apiVersion: 1 + # muteTimes: + # - orgId: 1 + # name: mti_1 + # # refer to https://prometheus.io/docs/alerting/latest/configuration/#time_interval-0 + # time_intervals: {} + + ## Configure notifiers + ## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels + ## + notifiers: {} + # notifiers.yaml: + # notifiers: + # - name: email-notifier + # type: email + # uid: email1 + # # either: + # org_id: 1 + # # or + # org_name: Main Org. + # is_default: true + # settings: + # addresses: an_email_address@example.com + # delete_notifiers: + + ## Configure grafana dashboard providers + ## ref: http://docs.grafana.org/administration/provisioning/#dashboards + ## + ## `path` must be /var/lib/grafana/dashboards/ + ## + dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'ClickHouse' + folder: 'ClickHouse' + type: file + disableDeletion: false + options: + path: /var/lib/grafana/dashboards/clickhouse + # - name: 'default' + # orgId: 1 + # folder: '' + # type: file + # disableDeletion: false + # editable: true + # options: + # path: /var/lib/grafana/dashboards/default + + ## Configure how curl fetches remote dashboards. The beginning dash is required. + ## NOTE: This sets the default short flags for all dashboards, but these + ## defaults can be overridden individually for each dashboard by setting + ## curlOptions. See the example dashboards section below. + ## + ## -s - silent mode + ## -k - allow insecure (eg: non-TLS) connections + ## -f - fail fast + ## See the curl documentation for additional options + ## + defaultCurlOptions: "-skf" + + ## Configure grafana dashboard to import + ## NOTE: To use dashboards you must also enable/configure dashboardProviders + ## ref: https://grafana.com/dashboards + ## + ## dashboards per provider, use provider name as key. + ## + dashboards: + ClickHouse: + clickhouse-query-analysis: + url: "https://grafana.com/api/dashboards/13500/revisions/1/download" + curlOptions: "-sLf" + datasource: ClickHouse + + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # curlOptions: "-sLf" + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + # local-dashboard-gitlab: + # url: https://example.com/repository/test-gitlab.json + # gitlabToken: '' + # local-dashboard-bitbucket: + # url: https://example.com/repository/test-bitbucket.json + # bearerToken: '' + # local-dashboard-azure: + # url: https://example.com/repository/test-azure.json + # basic: '' + # acceptHeader: '*/*' + + ## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. + ## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. + ## ConfigMap data example: + ## + ## data: + ## example-dashboard.json: | + ## RAW_JSON + ## + dashboardsConfigMaps: {} + # default: "" + + ## Grafana's primary configuration + ## NOTE: values in map will be converted to ini format + ## ref: http://docs.grafana.org/installation/configuration/ + ## + grafana.ini: + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + server: + domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ tpl (.Values.ingress.hosts | first) . }}{{ else }}''{{ end }}" + ## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: + ## LDAP Authentication can be enabled with the following values on grafana.ini + ## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + ## Grafana's alerting configuration + # unified_alerting: + # enabled: true + # rule_version_record_limit: "5" + + ## Grafana's LDAP configuration + ## Templated by the template in _helpers.tpl + ## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled + ## ref: http://docs.grafana.org/installation/configuration/#auth-ldap + ## ref: http://docs.grafana.org/installation/ldap/#configuration + ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + + # When process namespace sharing is enabled, processes in a container are visible to all other containers in the same pod + # This parameter is added because the ldap reload api is not working https://grafana.com/docs/grafana/latest/developers/http_api/admin/#reload-ldap-configuration + # To allow an extraContainer to restart the Grafana container + shareProcessNamespace: false + + ## Grafana's SMTP configuration + ## NOTE: To enable, grafana.ini must be configured with smtp.enabled + ## ref: http://docs.grafana.org/installation/configuration/#smtp + smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + + ## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + ## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards + sidecar: + image: + # -- The Docker registry + registry: quay.io + repository: kiwigrid/k8s-sidecar + tag: 2.1.2 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + # requests: + # cpu: 50m + # memory: 50Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + readinessProbe: {} + livenessProbe: {} + # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO + # logLevel: INFO + alerts: + enabled: false + # Additional environment variables for the alerts sidecar + env: {} + ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core + ## Renders in container spec as: + ## env: + ## ... + ## - name: + ## valueFrom: + ## + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with alert are marked with (can be templated) + label: grafana_alert + # value of label that the configmaps with alert are set to (can be templated) + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for alert config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # + # resourceName: comma separated list of resource names to be fetched/checked by this sidecar. + # per default all resources of the type defined in {{ .Values.sidecar.alerts.resource }} will be checked. + # This e.g. allows stricter RBAC rules which are limited to the resources meant for the sidecars. + # resourceName: "secret/alerts-1,configmap/alerts-0" + resourceName: "" + # + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # maxTotalRetries: Total number of retries to allow for any http request. + # Takes precedence over other counts. Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry. + # maxTotalRetries: 5 + # + # maxConnectRetries: How many connection-related errors to retry on for any http request. + # These are errors raised before the request is sent to the remote server, which we assume has not triggered the server to process the request. + # Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry of this type. + # maxConnectRetries: 10 + # + # maxReadRetries: How many times to retry on read errors for any http request + # These errors are raised after the request was sent to the server, so the request may have side-effects. + # Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry of this type. + # maxReadRetries: 5 + # + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload" + # Absolute path to a script to execute after a configmap got reloaded. + # It runs before calls to REQ_URI. If the file is not executable it will be passed to sh. + # Otherwise, it's executed as is. Shebangs known to work are #!/bin/sh and #!/usr/bin/env python + script: null + skipReload: false + # This is needed if skipReload is true, to load any alerts defined at startup time. + # Deploy the alert sidecar as an initContainer. + initAlerts: false + # Use native sidecar https://kubernetes.io/docs/concepts/workloads/pods/sidecar-containers/ + # restartPolicy: Always + # # only applies to native sidecars + # startupProbe: + # httpGet: + # path: /healthz + # port: 8080 + # initialDelaySeconds: 5 + # periodSeconds: 5 + # failureThreshold: 60 # 5 minutes + # Additional alerts sidecar volume mounts + extraMounts: [] + # Sets the size limit of the alert sidecar emptyDir volume + sizeLimit: "" + dashboards: + enabled: false + # Additional environment variables for the dashboards sidecar + env: {} + ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core + ## Renders in container spec as: + ## env: + ## ... + ## - name: + ## valueFrom: + ## + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + SCProvider: true + # label that the configmaps with dashboards are marked with (can be templated) + label: grafana_dashboard + # value of label that the configmaps with dashboards are set to (can be templated) + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces. + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + # + # resourceName: comma separated list of resource names to be fetched/checked by this sidecar. + # per default all resources of the type defined in {{ .Values.sidecar.dashboards.resource }} will be checked. + # This e.g. allows stricter RBAC rules which are limited to the resources meant for the sidecars. + # resourceName: "secret/dashboards-0,configmap/dashboards-1" + resourceName: "" + # + # maxTotalRetries: Total number of retries to allow for any http request. + # Takes precedence over other counts. Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry. + # maxTotalRetries: 5 + # + # maxConnectRetries: How many connection-related errors to retry on for any http request. + # These are errors raised before the request is sent to the remote server, which we assume has not triggered the server to process the request. + # Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry of this type. + # maxConnectRetries: 10 + # + # maxReadRetries: How many times to retry on read errors for any http request + # These errors are raised after the request was sent to the server, so the request may have side-effects. + # Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry of this type. + # maxReadRetries: 5 + # + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload" + # Absolute path to a script to execute after a configmap got reloaded. + # It runs before calls to REQ_URI. If the file is not executable it will be passed to sh. + # Otherwise, it's executed as is. Shebangs known to work are #!/bin/sh and #!/usr/bin/env python + script: null + skipReload: false + # This is needed if skipReload is true, to load any dashboards defined at startup time. + # Deploy the dashboard sidecar as an initContainer. + initDashboards: false + # Use native sidecar https://kubernetes.io/docs/concepts/workloads/pods/sidecar-containers/ + # restartPolicy: Always + # # only applies to native sidecars + # startupProbe: + # httpGet: + # path: /healthz + # port: 8080 + # initialDelaySeconds: 5 + # periodSeconds: 5 + # failureThreshold: 60 # 5 minutes + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # folder UID. will be automatically generated if not specified + folderUid: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + # allow Grafana to replicate dashboard structure from filesystem + foldersFromFilesStructure: false + # Additional dashboards sidecar volume mounts + extraMounts: [] + # Sets the size limit of the dashboard sidecar emptyDir volume + sizeLimit: "" + datasources: + enabled: true + # Additional environment variables for the datasourcessidecar + env: {} + ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core + ## Renders in container spec as: + ## env: + ## ... + ## - name: + ## valueFrom: + ## + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with datasources are marked with (can be templated) + label: grafana_datasource + # value of label that the configmaps with datasources are set to (can be templated) + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # + # resourceName: comma separated list of resource names to be fetched/checked by this sidecar. + # per default all resources of the type defined in {{ .Values.sidecar.datasources.resource }} will be checked. + # This e.g. allows stricter RBAC rules which are limited to the resources meant for the sidecars. + # resourceName: "secret/datasources-0,configmap/datasources-15" + resourceName: "" + # + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # maxTotalRetries: Total number of retries to allow for any http request. + # Takes precedence over other counts. Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry. + # maxTotalRetries: 5 + # + # maxConnectRetries: How many connection-related errors to retry on for any http request. + # These are errors raised before the request is sent to the remote server, which we assume has not triggered the server to process the request. + # Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry of this type. + # maxConnectRetries: 10 + # + # maxReadRetries: How many times to retry on read errors for any http request + # These errors are raised after the request was sent to the server, so the request may have side-effects. + # Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry of this type. + # maxReadRetries: 5 + # + # Endpoint to send request to reload datasources + reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" + # Absolute path to a script to execute after a configmap got reloaded. + # It runs before calls to REQ_URI. If the file is not executable it will be passed to sh. + # Otherwise, it's executed as is. Shebangs known to work are #!/bin/sh and #!/usr/bin/env python + script: null + skipReload: false + # This is needed if skipReload is true, to load any datasources defined at startup time. + # Deploy the datasources sidecar as an initContainer. + initDatasources: false + # Use native sidecar https://kubernetes.io/docs/concepts/workloads/pods/sidecar-containers/ + # restartPolicy: Always + # # only applies to native sidecars + # startupProbe: + # httpGet: + # path: /healthz + # port: 8080 + # initialDelaySeconds: 5 + # periodSeconds: 5 + # failureThreshold: 60 # 5 minutes + # Additional datasources sidecar volume mounts + extraMounts: [] + # Sets the size limit of the datasource sidecar emptyDir volume + sizeLimit: "" + plugins: + enabled: true + # Additional environment variables for the plugins sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with plugins are marked with (can be templated) + label: grafana_plugin + # value of label that the configmaps with plugins are set to (can be templated) + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for plugin config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # + # resourceName: comma separated list of resource names to be fetched/checked by this sidecar. + # per default all resources of the type defined in {{ .Values.sidecar.plugins.resource }} will be checked. + # This e.g. allows stricter RBAC rules which are limited to the resources meant for the sidecars. + # resourceName: "secret/plugins-0,configmap/plugins-1" + resourceName: "" + # + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # maxTotalRetries: Total number of retries to allow for any http request. + # Takes precedence over other counts. Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry. + # maxTotalRetries: 5 + # + # maxConnectRetries: How many connection-related errors to retry on for any http request. + # These are errors raised before the request is sent to the remote server, which we assume has not triggered the server to process the request. + # Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry of this type. + # maxConnectRetries: 10 + # + # maxReadRetries: How many times to retry on read errors for any http request + # These errors are raised after the request was sent to the server, so the request may have side-effects. + # Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry of this type. + # maxReadRetries: 5 + # + # Endpoint to send request to reload plugins + reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" + # Absolute path to a script to execute after a configmap got reloaded. + # It runs before calls to REQ_URI. If the file is not executable it will be passed to sh. + # Otherwise, it's executed as is. Shebangs known to work are #!/bin/sh and #!/usr/bin/env python + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any plugins defined at startup time. + initPlugins: false + # Additional plugins sidecar volume mounts + extraMounts: [] + # Sets the size limit of the plugin sidecar emptyDir volume + sizeLimit: "" + notifiers: + enabled: false + # Additional environment variables for the notifierssidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with notifiers are marked with (can be templated) + label: grafana_notifier + # value of label that the configmaps with notifiers are set to (can be templated) + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for notifier config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # + # resourceName: comma separated list of resource names to be fetched/checked by this sidecar. + # per default all resources of the type defined in {{ .Values.sidecar.notifiers.resource }} will be checked. + # This e.g. allows stricter RBAC rules which are limited to the resources meant for the sidecars. + # resourceName: "secret/notifiers-2,configmap/notifiers-1" + resourceName: "" + # + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # maxTotalRetries: Total number of retries to allow for any http request. + # Takes precedence over other counts. Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry. + # maxTotalRetries: 5 + # + # maxConnectRetries: How many connection-related errors to retry on for any http request. + # These are errors raised before the request is sent to the remote server, which we assume has not triggered the server to process the request. + # Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry of this type. + # maxConnectRetries: 10 + # + # maxReadRetries: How many times to retry on read errors for any http request + # These errors are raised after the request was sent to the server, so the request may have side-effects. + # Applies to all requests to reloadURL and k8s api requests. + # Set to 0 to fail on the first retry of this type. + # maxReadRetries: 5 + # + # Endpoint to send request to reload notifiers + reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload" + # Absolute path to a script to execute after a configmap got reloaded. + # It runs before calls to REQ_URI. If the file is not executable it will be passed to sh. + # Otherwise, it's executed as is. Shebangs known to work are #!/bin/sh and #!/usr/bin/env python + script: null + skipReload: false + # Deploy the notifier sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any notifiers defined at startup time. + initNotifiers: false + # Use native sidecar https://kubernetes.io/docs/concepts/workloads/pods/sidecar-containers/ + # restartPolicy: Always + # # only applies to native sidecars + # startupProbe: + # httpGet: + # path: /healthz + # port: 8080 + # initialDelaySeconds: 5 + # periodSeconds: 5 + # failureThreshold: 60 # 5 minutes + # Additional notifiers sidecar volume mounts + extraMounts: [] + # Sets the size limit of the notifier sidecar emptyDir volume + sizeLimit: "" + + ## Override the deployment namespace + ## + namespaceOverride: "" + + ## Number of old ReplicaSets to retain + ## + revisionHistoryLimit: 10 + + ## Add a seperate remote image renderer deployment/service + imageRenderer: + deploymentStrategy: {} + # Enable the image-renderer deployment & service + enabled: false + replicas: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + # The url of remote image renderer if it is not in the same namespace with the grafana instance + serverURL: "" + # The callback url of grafana instances if it is not in the same namespace with the remote image renderer + renderingCallbackURL: "" + image: + # -- The Docker registry + registry: docker.io + # image-renderer Image repository + repository: grafana/grafana-image-renderer + # image-renderer Image tag + tag: latest + # image-renderer Image sha (optional) + sha: "" + # image-renderer Image pull secrets (optional) + pullSecrets: [] + # image-renderer ImagePullPolicy + pullPolicy: Always + # extra environment variables + env: + HTTP_HOST: "0.0.0.0" + # Fixes "Error: Failed to launch the browser process!\nchrome_crashpad_handler: --database is required" + XDG_CONFIG_HOME: /tmp/.chromium + XDG_CACHE_HOME: /tmp/.chromium + # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 + # RENDERING_MODE: clustered + # IGNORE_HTTPS_ERRORS: true + + ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core + ## Renders in container spec as: + ## env: + ## ... + ## - name: + ## valueFrom: + ## + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + + # image-renderer deployment serviceAccount + serviceAccountName: "" + automountServiceAccountToken: false + # image-renderer deployment securityContext + securityContext: {} + # image-renderer deployment container securityContext + containerSecurityContext: + seccompProfile: + type: RuntimeDefault + capabilities: + drop: ['ALL'] + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + ## image-renderer pod annotation + podAnnotations: {} + # image-renderer deployment Host Aliases + hostAliases: [] + # image-renderer deployment priority class + priorityClassName: '' + service: + # Enable the image-renderer service + enabled: true + # image-renderer service port name + portName: 'http' + # image-renderer service port used by both service and deployment + port: 8081 + targetPort: 8081 + # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + # See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels + targetLabels: [] + # - targetLabel1 + # - targetLabel2 + # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana + grafanaProtocol: http + # In case a sub_path is used this needs to be added to the image renderer callback + grafanaSubPath: "" + # name of the image-renderer port on the pod + podPortName: http + # number of image-renderer replica sets to keep + revisionHistoryLimit: 10 + networkPolicy: + # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods + limitIngress: true + # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods + limitEgress: false + # Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled) + extraIngressSelectors: [] + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + # requests: + # cpu: 50m + # memory: 50Mi + ## Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + + ## Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## Affinity for pod assignment (evaluated as template) + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: "default-scheduler" + + # Extra configmaps to mount in image-renderer pods + extraConfigmapMounts: [] + + # Extra secrets to mount in image-renderer pods + extraSecretMounts: [] + + # Extra volumes to mount in image-renderer pods + extraVolumeMounts: [] + + # Extra volumes for image-renderer pods + extraVolumes: [] + + networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to grafana port defined. + ## When true, grafana will accept connections from any source + ## (with the correct destination port). + ## + ingress: true + ## @param networkPolicy.ingress When true enables the creation + ## an ingress network policy + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the grafana. + ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## + ## + ## + ## + ## + ## + egress: + ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be + ## created allowing grafana to connect to external data sources from kubernetes cluster. + enabled: false + ## + ## @param networkPolicy.egress.blockDNSResolution When enabled, DNS resolution will be blocked + ## for all pods in the grafana namespace. + blockDNSResolution: false + ## + ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress + ports: [] + ## Add ports to the egress by specifying - port: + ## E.X. + ## - port: 80 + ## - port: 443 + ## + ## @param networkPolicy.egress.to Allow egress traffic to specific destinations + to: [] + ## Add destinations to the egress by specifying - ipBlock: + ## E.X. + ## to: + ## - namespaceSelector: + ## matchExpressions: + ## - {key: role, operator: In, values: [grafana]} + ## + ## + ## + ## + ## + + # Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option + enableKubeBackwardCompatibility: false + useStatefulSet: false + + # extraObjects could be utilized to add dynamic manifests via values + extraObjects: [] + # Examples: + # extraObjects: + # - apiVersion: kubernetes-client.io/v1 + # kind: ExternalSecret + # metadata: + # name: grafana-secrets-{{ .Release.Name }} + # spec: + # backendType: gcpSecretsManager + # data: + # - key: grafana-admin-password + # name: adminPassword + # Alternatively, you can use strings, which lets you use additional templating features: + # extraObjects: + # - | + # apiVersion: kubernetes-client.io/v1 + # kind: ExternalSecret + # metadata: + # name: grafana-secrets-{{ .Release.Name }} + # spec: + # backendType: gcpSecretsManager + # data: + # - key: grafana-admin-password + # name: {{ include "some-other-template" }} + + # assertNoLeakedSecrets is a helper function defined in _helpers.tpl that checks if secret + # values are not exposed in the rendered grafana.ini configmap. It is enabled by default. + # + # To pass values into grafana.ini without exposing them in a configmap, use variable expansion: + # https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#variable-expansion + # + # Alternatively, if you wish to allow secret values to be exposed in the rendered grafana.ini configmap, + # you can disable this check by setting assertNoLeakedSecrets to false. + assertNoLeakedSecrets: true + +tool: + # kind: 'SubChart' or 'HelmChart', if subchart then uncomment Chart.yaml dependency, else comment and use tool library with helm chart template + kind: 'SubChart' + repo: https://grafana.github.io/helm-charts + chart: grafana + version: 10.3.0 + values: *grafana_config \ No newline at end of file