## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## image: repository: redis tag: 6.0.7-alpine pullPolicy: IfNotPresent ## Reference to one or more secrets to be used when pulling images ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## This imagePullSecrets is only for redis images ## imagePullSecrets: [] # - name: "image-pull-secret" ## replicas number for each component replicas: 3 ## read-only replicas ## indexed slaves get never promoted to be master ## index starts with 0 - which is master on init ## i.e. "8,9" means 8th and 9th slave will be replica with replica-priority=0 ## see also: https://redis.io/topics/sentinel ro_replicas: "" ## Kubernetes priorityClass name for the redis-ha-server pod # priorityClassName: "" ## Custom labels for the redis pod labels: {} configmap: ## Custom labels for the redis configmap labels: {} ## Pods Service Account ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ serviceAccount: ## Specifies whether a ServiceAccount should be created ## create: true ## The name of the ServiceAccount to use. ## If not set and create is true, a name is generated using the redis-ha.fullname template # name: ## opt in/out of automounting API credentials into container ## https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ automountToken: false ## Enables a HA Proxy for better LoadBalancing / Sentinel Master support. Automatically proxies to Redis master. ## Recommend for externally exposed Redis clusters. ## ref: https://cbonte.github.io/haproxy-dconv/1.9/intro.html haproxy: enabled: false # Enable if you want a dedicated port in haproxy for redis-slaves readOnly: enabled: false port: 6380 replicas: 3 image: repository: haproxy tag: 2.0.4 pullPolicy: IfNotPresent ## Custom labels for the haproxy pod labels: {} ## Reference to one or more secrets to be used when pulling images ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## imagePullSecrets: [] # - name: "image-pull-secret" annotations: {} resources: {} emptyDir: {} ## PodSecurityPolicy configuration ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ ## podSecurityPolicy: ## Specifies whether a PodSecurityPolicy should be created ## create: false ## Enable sticky sessions to Redis nodes via HAProxy ## Very useful for long-living connections as in case of Sentry for example stickyBalancing: false ## Kubernetes priorityClass name for the haproxy pod # priorityClassName: "" ## Service type for HAProxy ## service: type: ClusterIP loadBalancerIP: # List of CIDR's allowed to connect to LoadBalancer # loadBalancerSourceRanges: [] annotations: {} serviceAccount: create: true ## Official HAProxy embedded prometheus metrics settings. ## Ref: https://github.com/haproxy/haproxy/tree/master/contrib/prometheus-exporter ## metrics: enabled: false # prometheus port & scrape path port: 9101 portName: http-exporter-port scrapePath: /metrics serviceMonitor: # When set true then use a ServiceMonitor to configure scraping enabled: false # Set the namespace the ServiceMonitor should be deployed # namespace: "monitoring" # Set how frequently Prometheus should scrape # interval: 30s # Set path to redis-exporter telemtery-path # telemetryPath: /metrics # Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator # labels: {} # Set timeout for scrape # timeout: 10s init: resources: {} timeout: connect: 4s server: 330s client: 330s check: 2s checkInterval: 1s securityContext: runAsUser: 1000 fsGroup: 1000 runAsNonRoot: true ## Whether the haproxy pods should be forced to run on separate nodes. hardAntiAffinity: true ## Additional affinities to add to the haproxy pods. additionalAffinities: {} ## Override all other affinity settings for the haproxy pods with a string. affinity: | ## Custom config-haproxy.cfg files used to override default settings. If this file is ## specified then the config-haproxy.cfg above will be ignored. # customConfig: |- # Define configuration here ## Place any additional configuration section to add to the default config-haproxy.cfg # extraConfig: |- # Define configuration here ## Container lifecycle hooks ## Ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ lifecycle: {} ## PodSecurityPolicy configuration ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ ## podSecurityPolicy: ## Specifies whether a PodSecurityPolicy should be created ## create: false ## Role Based Access ## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ ## rbac: create: true # NOT RECOMMENDED: Additional container in which you can execute arbitrary commands to update sysctl parameters # You can now use securityContext.sysctls to leverage this capability # Ref: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ ## sysctlImage: enabled: false command: [] registry: docker.io repository: busybox tag: 1.31.1 pullPolicy: Always mountHostSys: false resources: {} ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## # schedulerName: ## Redis specific configuration options redis: port: 6379 masterGroupName: "mymaster" # must match ^[\\w-\\.]+$) and can be templated ## Configures redis with tls-port parameter # tlsPort: 6385 ## Configures redis with tls-replication parameter, if true sets "tls-replication yes" in redis.conf # tlsReplication: true ## It is possible to disable client side certificates authentication when "authClients" is set to "no" # authClients: "no" ## Increase terminationGracePeriodSeconds to allow writing large RDB snapshots. (k8s default is 30s) ## https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination-forced terminationGracePeriodSeconds: 60 # liveness probe parameters for redis container livenessProbe: initialDelaySeconds: 30 periodSeconds: 15 timeoutSeconds: 15 successThreshold: 1 failureThreshold: 5 readinessProbe: initialDelaySeconds: 30 periodSeconds: 15 timeoutSeconds: 15 successThreshold: 1 failureThreshold: 5 config: ## Additional redis conf options can be added below ## For all available options see http://download.redis.io/redis-stable/redis.conf min-replicas-to-write: 1 min-replicas-max-lag: 5 # Value in seconds maxmemory: "0" # Max memory to use for each redis instance. Default is unlimited. maxmemory-policy: "volatile-lru" # Max memory policy to use for each redis instance. Default is volatile-lru. # Determines if scheduled RDB backups are created. Default is false. # Please note that local (on-disk) RDBs will still be created when re-syncing with a new slave. The only way to prevent this is to enable diskless replication. save: "900 1" # When enabled, directly sends the RDB over the wire to slaves, without using the disk as intermediate storage. Default is false. repl-diskless-sync: "yes" rdbcompression: "yes" rdbchecksum: "yes" ## Custom redis.conf files used to override default settings. If this file is ## specified then the redis.config above will be ignored. # customConfig: |- # Define configuration here resources: {} # requests: # memory: 200Mi # cpu: 100m # limits: # memory: 700Mi ## Container lifecycle hooks ## Ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ lifecycle: {} ## annotations for the redis statefulset annotations: {} ## updateStrategy for Redis StatefulSet ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies updateStrategy: type: RollingUpdate ## Sentinel specific configuration options sentinel: port: 26379 ## Configure the 'bind' directive to bind to a list of network interfaces # bind: 0.0.0.0 ## Configures sentinel with tls-port parameter # tlsPort: 26385 ## Configures sentinel with tls-replication parameter, if true sets "tls-replication yes" in sentinel.conf # tlsReplication: true ## It is possible to disable client side certificates authentication when "authClients" is set to "no" # authClients: "no" ## Configures sentinel with AUTH (requirepass params) auth: false # password: password ## Use existing secret containing key `authKey` (ignores sentinel.password) # existingSecret: sentinel-secret ## Defines the key holding the sentinel password in existing secret. authKey: sentinel-password # liveness probe parameters for sentinel container livenessProbe: initialDelaySeconds: 30 periodSeconds: 15 timeoutSeconds: 15 successThreshold: 1 failureThreshold: 5 # readiness probe parameters for sentinel container readinessProbe: initialDelaySeconds: 30 periodSeconds: 15 timeoutSeconds: 15 successThreshold: 3 failureThreshold: 5 quorum: 2 config: ## Additional sentinel conf options can be added below. Only options that ## are expressed in the format simialar to 'sentinel xxx mymaster xxx' will ## be properly templated expect maxclients option. ## For available options see http://download.redis.io/redis-stable/sentinel.conf down-after-milliseconds: 10000 ## Failover timeout value in milliseconds failover-timeout: 180000 parallel-syncs: 5 maxclients: 10000 ## Custom sentinel.conf files used to override default settings. If this file is ## specified then the sentinel.config above will be ignored. # customConfig: |- # Define configuration here resources: {} # requests: # memory: 200Mi # cpu: 100m # limits: # memory: 200Mi ## Container lifecycle hooks ## Ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ lifecycle: {} securityContext: runAsUser: 1000 fsGroup: 1000 runAsNonRoot: true ## Assuming your kubelet allows it, you can the following instructions to configure ## specific sysctl parameters ## # sysctls: # - name: net.core.somaxconn # value: '10000' ## Node labels, affinity, and tolerations for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity nodeSelector: {} ## Whether the Redis server pods should be forced to run on separate nodes. ## This is accomplished by setting their AntiAffinity with requiredDuringSchedulingIgnoredDuringExecution as opposed to preferred. ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature ## hardAntiAffinity: true ## Additional affinities to add to the Redis server pods. ## ## Example: ## nodeAffinity: ## preferredDuringSchedulingIgnoredDuringExecution: ## - weight: 50 ## preference: ## matchExpressions: ## - key: spot ## operator: NotIn ## values: ## - "true" ## ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## additionalAffinities: {} ## Override all other affinity settings for the Redis server pods with a string. ## ## Example: ## affinity: | ## podAntiAffinity: ## requiredDuringSchedulingIgnoredDuringExecution: ## - labelSelector: ## matchLabels: ## app: {{ template "redis-ha.name" . }} ## release: {{ .Release.Name }} ## topologyKey: kubernetes.io/hostname ## preferredDuringSchedulingIgnoredDuringExecution: ## - weight: 100 ## podAffinityTerm: ## labelSelector: ## matchLabels: ## app: {{ template "redis-ha.name" . }} ## release: {{ .Release.Name }} ## topologyKey: failure-domain.beta.kubernetes.io/zone ## affinity: | # Prometheus exporter specific configuration options exporter: enabled: false image: oliver006/redis_exporter tag: v1.15.1 pullPolicy: IfNotPresent # prometheus port & scrape path port: 9121 portName: exporter-port scrapePath: /metrics # Address/Host for Redis instance. Default: localhost # Exists to circumvent issues with IPv6 dns resolution that occurs on certain environments ## address: localhost ## Set this to true if you want to connect to redis tls port # sslEnabled: true # cpu/memory resource limits/requests resources: {} # Additional args for redis exporter extraArgs: {} # Used to mount a LUA-Script via config map and use it for metrics-collection # script: | # -- Example script copied from: https://github.com/oliver006/redis_exporter/blob/master/contrib/sample_collect_script.lua # -- Example collect script for -script option # -- This returns a Lua table with alternating keys and values. # -- Both keys and values must be strings, similar to a HGETALL result. # -- More info about Redis Lua scripting: https://redis.io/commands/eval # # local result = {} # # -- Add all keys and values from some hash in db 5 # redis.call("SELECT", 5) # local r = redis.call("HGETALL", "some-hash-with-stats") # if r ~= nil then # for _,v in ipairs(r) do # table.insert(result, v) -- alternating keys and values # end # end # # -- Set foo to 42 # table.insert(result, "foo") # table.insert(result, "42") -- note the string, use tostring() if needed # # return result serviceMonitor: # When set true then use a ServiceMonitor to configure scraping enabled: false # Set the namespace the ServiceMonitor should be deployed # namespace: "monitoring" # Set how frequently Prometheus should scrape # interval: 30s # Set path to redis-exporter telemtery-path # telemetryPath: /metrics # Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator # labels: {} # Set timeout for scrape # timeout: 10s # prometheus exporter SCANS redis db which can take some time # allow different probe settings to not let container crashloop livenessProbe: initialDelaySeconds: 15 timeoutSeconds: 3 periodSeconds: 15 readinessProbe: initialDelaySeconds: 15 timeoutSeconds: 3 periodSeconds: 15 successThreshold: 2 podDisruptionBudget: {} # maxUnavailable: 1 # minAvailable: 1 ## Configures redis with AUTH (requirepass & masterauth conf params) auth: false # redisPassword: ## Use existing secret containing key `authKey` (ignores redisPassword) # existingSecret: ## Defines the key holding the redis password in existing secret. authKey: auth persistentVolume: enabled: true ## redis-ha data Persistent Volume Storage Class ## If defined, storageClassName: ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessModes: - ReadWriteOnce size: 10Gi annotations: {} init: resources: {} # To use a hostPath for data, set persistentVolume.enabled to false # and define hostPath.path. # Warning: this might overwrite existing folders on the host system! hostPath: ## path is evaluated as template so placeholders are replaced # path: "/data/{{ .Release.Name }}" # if chown is true, an init-container with root permissions is launched to # change the owner of the hostPath folder to the user defined in the # security context chown: true emptyDir: {} tls: ## Fill the name of secret if you want to use your own TLS certificates. ## The secret should contains keys named by "tls.certFile" - the certificate, "tls.keyFile" - the private key, "tls.caCertFile" - the certificate of CA and "tls.dhParamsFile" - the dh parameter file ## These secret will be genrated using files from certs folder if the secretName is not set and redis.tlsPort is set # secretName: tls-secret ## Name of certificate file certFile: redis.crt ## Name of key file keyFile: redis.key ## Name of Diffie-Hellman (DH) key exchange parameters file # dhParamsFile: redis.dh ## Name of CA certificate file caCertFile: ca.crt # restore init container is executed if restore.[s3|ssh].source is not false # restore init container creates /data/dump.rdb_ from original if exists # restore init container overrides /data/dump.rdb # secrets are stored into environment of init container - stored encoded on k8s # REQUIRED for s3 restore: AWS 'access_key' and 'secret_key' # EXAMPLE source for s3 restore: 's3://bucket/dump.rdb' # REQUIRED for ssh restore: 'key' should be in one line including CR i.e. '-----BEGIN RSA PRIVATE KEY-----\n...\n...\n...\n-----END RSA PRIVATE KEY-----' # EXAMPLE source for ssh restore: 'user@server:/path/dump.rdb' restore: timeout: 600 s3: source: false access_key: "" secret_key: "" region: "" ssh: source: false key: ""