From 29951ea77e309395903c61d7d883e0f21bcf2bf8 Mon Sep 17 00:00:00 2001 From: nold Date: Wed, 24 Aug 2022 21:15:12 +0200 Subject: [PATCH] Fix(cilium): Values --- projects/core/values/cilium.yaml | 1975 +++++++++++++++++++++++++++++- 1 file changed, 1958 insertions(+), 17 deletions(-) diff --git a/projects/core/values/cilium.yaml b/projects/core/values/cilium.yaml index 2dcc677f..8ba17442 100644 --- a/projects/core/values/cilium.yaml +++ b/projects/core/values/cilium.yaml @@ -1,11 +1,940 @@ ---- -kubeProxyReplacement: strict -hubble: +# File generated by install/kubernetes/Makefile; DO NOT EDIT. +# This file is based on install/kubernetes/cilium/values.yaml.tmpl. + +# upgradeCompatibility helps users upgrading to ensure that the configMap for +# Cilium will not change critical values to ensure continued operation +# This is flag is not required for new installations. +# For example: 1.7, 1.8, 1.9 +# upgradeCompatibility: '1.8' + +debug: + # -- Enable debug logging + enabled: false + # verbose: + +rbac: + # -- Enable creation of Resource-Based Access Control configuration. + create: true + +# -- Configure image pull secrets for pulling container images +imagePullSecrets: +# - name: "image-pull-secret" + +# kubeConfigPath: ~/.kube/config +# k8sServiceHost: +# k8sServicePort: + +cluster: + # -- Name of the cluster. Only required for Cluster Mesh. + name: default + # -- (int) Unique ID of the cluster. Must be unique across all connected + # clusters and in the range of 1 to 255. Only required for Cluster Mesh, + # may be 0 if Cluster Mesh is not used. + id: 0 + +# -- Define serviceAccount names for components. +# @default -- Component's fully qualified name. +serviceAccounts: + cilium: + create: true + name: cilium + annotations: {} + etcd: + create: true + name: cilium-etcd-operator + annotations: {} + operator: + create: true + name: cilium-operator + annotations: {} + preflight: + create: true + name: cilium-pre-flight + annotations: {} relay: + create: true + name: hubble-relay + annotations: {} + ui: + create: true + name: hubble-ui + annotations: {} + clustermeshApiserver: + create: true + name: clustermesh-apiserver + annotations: {} + # -- Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob + clustermeshcertgen: + create: true + name: clustermesh-apiserver-generate-certs + annotations: {} + # -- Hubblecertgen is used if hubble.tls.auto.method=cronJob + hubblecertgen: + create: true + name: hubble-generate-certs + annotations: {} + +# -- Configure termination grace period for cilium-agent DaemonSet. +terminationGracePeriodSeconds: 1 + +# -- Install the cilium agent resources. +agent: true + +# -- Agent container name. +name: cilium + +# -- Roll out cilium agent pods automatically when configmap is updated. +rollOutCiliumPods: false + +# -- Agent container image. +image: + override: ~ + repository: "quay.io/cilium/cilium" + tag: "v1.12.1" + pullPolicy: "IfNotPresent" + # cilium-digest + digest: "" + useDigest: false + +# -- Security context to be added to agent pods +securityContext: + # runAsUser: 0 + privileged: false + extraCapabilities: + # Allow discretionary access control (e.g. required for package installation) + - DAC_OVERRIDE + # Allow to set Access Control Lists (ACLs) on arbitrary files (e.g. required for package installation) + - FOWNER + # Allow to execute program that changes GID (e.g. required for package installation) + - SETGID + # Allow to execute program that changes UID (e.g. required for package installation) + - SETUID + +# -- Cilium agent update strategy +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + +# -- Enable bandwidth manager to optimize TCP and UDP workloads and allow +# for rate-limiting traffic from individual Pods with EDT (Earliest Departure +# Time) through the "kubernetes.io/egress-bandwidth" Pod annotation. +bandwidthManager: + # -- Enable bandwidth manager infrastructure (also prerequirement for BBR) + enabled: false + # -- Activate BBR TCP congestion control for Pods + bbr: false + +# -- Configure BGP +bgp: + # -- Enable BGP support inside Cilium; embeds a new ConfigMap for BGP inside + # cilium-agent and cilium-operator + enabled: false + announce: + # -- Enable allocation and announcement of service LoadBalancer IPs + loadbalancerIP: false + # -- Enable announcement of node pod CIDR + podCIDR: false + +# -- This feature set enables virtual BGP routers to be created via +# CiliumBGPPeeringPolicy CRDs. +bgpControlPlane: + # -- Enables the BGP control plane. + enabled: false + +bpf: + # -- Configure the mount point for the BPF filesystem + root: /sys/fs/bpf + + # -- Enable BPF clock source probing for more efficient tick retrieval. + clockProbe: false + + # -- Enables pre-allocation of eBPF map values. This increases + # memory usage but can reduce latency. + preallocateMaps: false + + # -- Configure the maximum number of entries in the TCP connection tracking + # table. + # ctTcpMax: '524288' + + # -- Configure the maximum number of entries for the non-TCP connection + # tracking table. + # ctAnyMax: '262144' + + # -- Configure the maximum number of service entries in the + # load balancer maps. + lbMapMax: 65536 + + # -- Configure the maximum number of entries for the NAT table. + # natMax: 524288 + + # -- Configure the maximum number of entries for the neighbor table. + # neighMax: 524288 + + # -- Configure the maximum number of entries in endpoint policy map (per endpoint). + policyMapMax: 16384 + + # -- Configure auto-sizing for all BPF maps based on available memory. + # ref: https://docs.cilium.io/en/stable/concepts/ebpf/maps/#ebpf-maps + #mapDynamicSizeRatio: 0.0025 + + # -- Configure the level of aggregation for monitor notifications. + # Valid options are none, low, medium, maximum. + monitorAggregation: medium + + # -- Configure the typical time between monitor notifications for + # active connections. + monitorInterval: "5s" + + # -- Configure which TCP flags trigger notifications when seen for the + # first time in a connection. + monitorFlags: "all" + + # -- Allow cluster external access to ClusterIP services. + lbExternalClusterIP: false + + # -- Enable native IP masquerade support in eBPF + #masquerade: false + + # -- Deprecated in favor of bpf.hostLegacyRouting. To be removed in 1.13. + # Configure whether direct routing mode should route traffic via + # host stack (true) or directly and more efficiently out of BPF (false) if + # the kernel supports it. + #hostRouting: true + + # -- Configure whether direct routing mode should route traffic via + # host stack (true) or directly and more efficiently out of BPF (false) if + # the kernel supports it. The latter has the implication that it will also + # bypass netfilter in the host namespace. + #hostLegacyRouting: false + + # -- Configure the eBPF-based TPROXY to reduce reliance on iptables rules + # for implementing Layer 7 policy. + # tproxy: true + + # -- Configure the FIB lookup bypass optimization for nodeport reverse + # NAT handling. + # lbBypassFIBLookup: true + + # -- Configure explicitly allowed VLAN id's for bpf logic bypass. + # [0] will allow all VLAN id's without any filtering. + # vlanBypass: [] + +# -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy" +# init container before launching cilium-agent. +# More context can be found in the commit message of below PR +# https://github.com/cilium/cilium/pull/20123 +waitForKubeProxy: false + +cni: + # -- Install the CNI configuration and binary files into the filesystem. + install: true + + # -- Configure chaining on top of other CNI plugins. Possible values: + # - none + # - aws-cni + # - flannel + # - generic-veth + # - portmap + chainingMode: none + + # -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the + # node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. + # This ensures no Pods can be scheduled using other CNI plugins during Cilium + # agent downtime. + exclusive: true + + # -- Configure the log file for CNI logging with retention policy of 7 days. + # Disable CNI file logging by setting this field to empty explicitly. + logFile: /var/run/cilium/cilium-cni.log + + # -- Skip writing of the CNI configuration. This can be used if + # writing of the CNI configuration is performed by external automation. + customConf: false + + # -- Configure the path to the CNI configuration directory on the host. + confPath: /etc/cni/net.d + + # -- Configure the path to the CNI binary directory on the host. + binPath: /opt/cni/bin + + # -- Specify the path to a CNI config to read from on agent start. + # This can be useful if you want to manage your CNI + # configuration outside of a Kubernetes environment. This parameter is + # mutually exclusive with the 'cni.configMap' parameter. + # readCniConf: /host/etc/cni/net.d/05-cilium.conf + + # -- When defined, configMap will mount the provided value as ConfigMap and + # interpret the cniConf variable as CNI configuration file and write it + # when the agent starts up + # configMap: cni-configuration + + # -- Configure the key in the CNI ConfigMap to read the contents of + # the CNI configuration from. + configMapKey: cni-config + + # -- Configure the path to where to mount the ConfigMap inside the agent pod. + confFileMountPath: /tmp/cni-configuration + + # -- Configure the path to where the CNI configuration directory is mounted + # inside the agent pod. + hostConfDirMountPath: /host/etc/cni/net.d + +# -- Configure how frequently garbage collection should occur for the datapath +# connection tracking table. +# conntrackGCInterval: "0s" + +# -- Configure container runtime specific integration. +containerRuntime: + # -- Enables specific integrations for container runtimes. + # Supported values: + # - containerd + # - crio + # - docker + # - none + # - auto (automatically detect the container runtime) + integration: auto + # -- Configure the path to the container runtime control socket. + # socketPath: /path/to/runtime.sock + +# crdWaitTimeout: "" + +# -- Tail call hooks for custom eBPF programs. +customCalls: + # -- Enable tail call hooks for custom eBPF programs. + enabled: false + +# -- Configure which datapath mode should be used for configuring container +# connectivity. Valid options are "veth" or "ipvlan". Deprecated, to be removed +# in v1.12. +datapathMode: veth + +daemon: + # -- Configure where Cilium runtime state should be stored. + runPath: "/var/run/cilium" + +# -- Specify which network interfaces can run the eBPF datapath. This means +# that a packet sent from a pod to a destination outside the cluster will be +# masqueraded (to an output device IPv4 address), if the output device runs the +# program. When not specified, probing will automatically detect devices. +# devices: "" + +# -- Enables experimental support for the detection of new and removed datapath +# devices. When devices change the eBPF datapath is reloaded and services updated. +# If "devices" is set then only those devices, or devices matching a wildcard will +# be considered. +enableRuntimeDeviceDetection: false + +# -- Chains to ignore when installing feeder rules. +# disableIptablesFeederRules: "" + +# -- Limit egress masquerading to interface selector. +# egressMasqueradeInterfaces: "" + +# -- Whether to enable CNP status updates. +enableCnpStatusUpdates: false + +# -- Configures the use of the KVStore to optimize Kubernetes event handling by +# mirroring it into the KVstore for reduced overhead in large clusters. +enableK8sEventHandover: false + +# -- Enable setting identity mark for local traffic. +# enableIdentityMark: true + +# -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it. +# enableK8sEndpointSlice: true + +# -- Enable CiliumEndpointSlice feature. +enableCiliumEndpointSlice: false + +ingressController: + # -- Enable cilium ingress controller + # This will automatically set enable-envoy-config as well. + enabled: false + + # -- Enforce https for host having matching TLS host in Ingress. + # Incoming traffic to http listener will return 308 http error code with respective location in header. + enforceHttps: true + + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + secretsNamespace: + # -- Create secrets namespace for Ingress. + create: true + + # -- Name of Ingress secret namespace. + name: cilium-secrets + + # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + # If disabled, TLS secrets must be maintained externally. + sync: true + +# -- Enables the fallback compatibility solution for when the xt_socket kernel +# module is missing and it is needed for the datapath L7 redirection to work +# properly. See documentation for details on when this can be disabled: +# https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel. +enableXTSocketFallback: true + +encryption: + # -- Enable transparent network encryption. + enabled: false + + # -- Encryption method. Can be either ipsec or wireguard. + type: ipsec + + # -- Enable encryption for pure node to node traffic. + # This option is only effective when encryption.type is set to ipsec. + nodeEncryption: false + + ipsec: + # -- Name of the key file inside the Kubernetes secret configured via secretName. + keyFile: "" + + # -- Path to mount the secret inside the Cilium pod. + mountPath: "" + + # -- Name of the Kubernetes secret containing the encryption keys. + secretName: "" + + # -- The interface to use for encrypted traffic. + interface: "" + + wireguard: + # -- Enables the fallback to the user-space implementation. + userspaceFallback: false + + # -- Deprecated in favor of encryption.ipsec.keyFile. + # Name of the key file inside the Kubernetes secret configured via secretName. + # This option is only effective when encryption.type is set to ipsec. + keyFile: keys + + # -- Deprecated in favor of encryption.ipsec.mountPath. + # Path to mount the secret inside the Cilium pod. + # This option is only effective when encryption.type is set to ipsec. + mountPath: /etc/ipsec + + # -- Deprecated in favor of encryption.ipsec.secretName. + # Name of the Kubernetes secret containing the encryption keys. + # This option is only effective when encryption.type is set to ipsec. + secretName: cilium-ipsec-keys + + # -- Deprecated in favor of encryption.ipsec.interface. + # The interface to use for encrypted traffic. + # This option is only effective when encryption.type is set to ipsec. + interface: "" + +endpointHealthChecking: + # -- Enable connectivity health checking between virtual endpoints. + enabled: true + +# -- Enable endpoint status. +# Status can be: policy, health, controllers, logs and / or state. For 2 or more options use a comma. +endpointStatus: + enabled: false + status: "" + +endpointRoutes: + # -- Enable use of per endpoint routes instead of routing via + # the cilium_host interface. + enabled: false + +eni: + # -- Enable Elastic Network Interface (ENI) integration. + enabled: false + # -- Update ENI Adapter limits from the EC2 API + updateEC2AdapterLimitViaAPI: false + # -- Release IPs not used from the ENI + awsReleaseExcessIPs: false + # -- Enable ENI prefix delegation + awsEnablePrefixDelegation: false + # -- EC2 API endpoint to use + ec2APIEndpoint: "" + # -- Tags to apply to the newly created ENIs + eniTags: {} + # -- If using IAM role for Service Accounts will not try to + # inject identity values from cilium-aws kubernetes secret. + # Adds annotation to service account if managed by Helm. + # See https://github.com/aws/amazon-eks-pod-identity-webhook + iamRole: "" + # -- Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs + # Important note: This requires that each instance has an ENI with a matching subnet attached + # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + # use the CNI configuration file settings (cni.customConf) instead. + subnetIDsFilter: "" + # -- Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs + # Important note: This requires that each instance has an ENI with a matching subnet attached + # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + # use the CNI configuration file settings (cni.customConf) instead. + subnetTagsFilter: "" + # -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances + # are going to be used to create new ENIs + instanceTagsFilter: "" + +externalIPs: + # -- Enable ExternalIPs service support. + enabled: false + +# fragmentTracking enables IPv4 fragment tracking support in the datapath. +# fragmentTracking: true + +# -- Enable connectivity health checking. +healthChecking: true + +# -- TCP port for the agent health API. This is not the port for cilium-health. +healthPort: 9879 + +# -- Configure the host firewall. +hostFirewall: + # -- Enables the enforcement of host policies in the eBPF datapath. + enabled: false + +hostPort: + # -- Enable hostPort service support. + enabled: false + +# -- Configure socket LB +socketLB: + # -- Enable socket LB + enabled: false + + # -- Disable socket lb for non-root ns. This is used to enable Istio routing rules. + # hostNamespaceOnly: false + +# -- Configure certificate generation for Hubble integration. +# If hubble.tls.auto.method=cronJob, these values are used +# for the Kubernetes CronJob which will be scheduled regularly to +# (re)generate any certificates not provided manually. +certgen: + image: + override: ~ + repository: "quay.io/cilium/certgen" + tag: "v0.1.8@sha256:4a456552a5f192992a6edcec2febb1c54870d665173a33dc7d876129b199ddbd" + pullPolicy: "IfNotPresent" + # -- Seconds after which the completed job pod will be deleted + ttlSecondsAfterFinished: 1800 + # -- Labels to be added to hubble-certgen pods + podLabels: {} + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: [] + +hubble: + # -- Enable Hubble (true by default). + enabled: true + + # -- Buffer size of the channel Hubble uses to receive monitor events. If this + # value is not set, the queue size is set to the default monitor queue size. + # eventQueueSize: "" + + # -- Number of recent flows for Hubble to cache. Defaults to 4095. + # Possible values are: + # 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, + # 2047, 4095, 8191, 16383, 32767, 65535 + # eventBufferCapacity: "4095" + + # -- Hubble metrics configuration. + # See https://docs.cilium.io/en/stable/operations/metrics/#hubble-metrics + # for more comprehensive documentation about Hubble metrics. + metrics: + # -- Configures the list of metrics to collect. If empty or null, metrics + # are disabled. + # Example: + # + enabled: + - dns:query;ignoreAAAA + - drop + - tcp + - flow + - icmp + - http + # + # You can specify the list of metrics from the helm CLI: + # + # --set metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}" + # + enabled: ~ + # -- Configure the port the hubble metric server listens on. + port: 9965 + # -- Annotations to be added to hubble-metrics service. + serviceAnnotations: {} + serviceMonitor: + # -- Create ServiceMonitor resources for Prometheus Operator. + # This requires the prometheus CRDs to be available. + # ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor hubble + labels: {} + # -- Annotations to add to ServiceMonitor hubble + annotations: {} + + # -- Unix domain socket path to listen to when Hubble is enabled. + socketPath: /var/run/cilium/hubble.sock + + # -- An additional address for Hubble to listen to. + # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that + # Hubble is listening on port 4244. + listenAddress: ":4244" + peerService: + # -- Enable a K8s Service for the Peer service, so that it can be accessed + # by a non-local client + enabled: true + # -- Service Port for the Peer service. + # If not set, it is dynamically assigned to port 443 if TLS is enabled and to + # port 80 if not. + # servicePort: 80 + # -- Target Port for the Peer service. + targetPort: 4244 + # -- The cluster domain to use to query the Hubble Peer service. It should + # be the local cluster. + clusterDomain: cluster.local + # -- TLS configuration for Hubble + tls: + # -- Enable mutual TLS for listenAddress. Setting this value to false is + # highly discouraged as the Hubble API provides access to potentially + # sensitive network flow metadata and is exposed on the host network. + enabled: true + # -- Configure automatic TLS certificates generation. + auto: + # -- Auto-generate certificates. + # When set to true, automatically generate a CA and certificates to + # enable mTLS between Hubble server and Hubble Relay instances. If set to + # false, the certs for Hubble server need to be provided by setting + # appropriate values below. + enabled: true + # -- Set the method to auto-generate certificates. Supported values: + # - helm: This method uses Helm to generate all certificates. + # - cronJob: This method uses a Kubernetes CronJob the generate any + # certificates not provided by the user at installation + # time. + # - certmanager: This method use cert-manager to generate & rotate certificates. + method: helm + # -- Generated certificates validity duration in days. + certValidityDuration: 1095 + # -- Schedule for certificates regeneration (regardless of their expiration date). + # Only used if method is "cronJob". If nil, then no recurring job will be created. + # Instead, only the one-shot job is deployed to generate the certificates at + # installation time. + # + # Defaults to midnight of the first day of every fourth month. For syntax, see + # https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule + schedule: "0 0 1 */4 *" + + # [Example] + # certManagerIssuerRef: + # group: cert-manager.io + # kind: ClusterIssuer + # name: ca-issuer + # -- certmanager issuer used when hubble.tls.auto.method=certmanager. + # If not specified, a CA issuer will be created. + certManagerIssuerRef: {} + + # -- Deprecated in favor of tls.ca. To be removed in 1.13. + # base64 encoded PEM values for the Hubble CA certificate and private key. + ca: + # -- Deprecated in favor of tls.ca.cert. To be removed in 1.13. + cert: "" + # -- Deprecated in favor of tls.ca.key. To be removed in 1.13. + # The CA private key (optional). If it is provided, then it will be + # used by hubble.tls.auto.method=cronJob to generate all other certificates. + # Otherwise, a ephemeral CA is generated if hubble.tls.auto.enabled=true. + key: "" + # -- base64 encoded PEM values for the Hubble server certificate and private key + server: + cert: "" + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + + relay: + # -- Enable Hubble Relay (requires hubble.enabled=true) enabled: true + # -- Roll out Hubble Relay pods automatically when configmap is updated. + rollOutPods: false + + # -- Hubble-relay container image. + image: + override: ~ + repository: "quay.io/cilium/hubble-relay" + tag: "v1.12.1" + # hubble-relay-digest + digest: "" + useDigest: false + pullPolicy: "IfNotPresent" + + # -- Specifies the resources for the hubble-relay pods + resources: {} + + # -- Number of replicas run for the hubble-relay deployment. + replicas: 1 + + # -- Affinity for hubble-replay + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: + kubernetes.io/os: linux + + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + # + tolerations: [] + + # -- Additional hubble-relay environment variables. + extraEnv: [] + + # -- Annotations to be added to hubble-relay pods + podAnnotations: {} + + # -- Labels to be added to hubble-relay pods + podLabels: {} + + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + + # -- The priority class to use for hubble-relay + priorityClassName: "" + + # -- Configure termination grace period for hubble relay Deployment. + terminationGracePeriodSeconds: 1 + + # -- hubble-relay update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + + # -- hubble-relay security context + securityContext: {} + + # -- hubble-relay service configuration. + service: + # --- The type of service used for Hubble Relay access, either ClusterIP or NodePort. + type: ClusterIP + # --- The port to use when the service type is set to NodePort. + nodePort: 31234 + + # -- Host to listen to. Specify an empty string to bind to all the interfaces. + listenHost: "" + + # -- Port to listen to. + listenPort: "4245" + + # -- TLS configuration for Hubble Relay + tls: + # -- base64 encoded PEM values for the hubble-relay client certificate and private key + # This keypair is presented to Hubble server instances for mTLS + # authentication and is required when hubble.tls.enabled is true. + # These values need to be set manually if hubble.tls.auto.enabled is false. + client: + cert: "" + key: "" + # -- base64 encoded PEM values for the hubble-relay server certificate and private key + server: + # When set to true, enable TLS on for Hubble Relay server + # (ie: for clients connecting to the Hubble Relay API). + enabled: false + # These values need to be set manually if hubble.tls.auto.enabled is false. + cert: "" + key: "" + # -- extra DNS names added to certificate when its auto gen + extraDnsNames: [] + # -- extra IP addresses added to certificate when its auto gen + extraIpAddresses: [] + + # -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s"). + dialTimeout: ~ + + # -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s"). + retryTimeout: ~ + + # -- Max number of flows that can be buffered for sorting before being sent to the + # client (per request) (e.g. 100). + sortBufferLenMax: ~ + + # -- When the per-request flows sort buffer is not full, a flow is drained every + # time this timeout is reached (only affects requests in follow-mode) (e.g. "1s"). + sortBufferDrainTimeout: ~ + + # -- Port to use for the k8s service backed by hubble-relay pods. + # If not set, it is dynamically assigned to port 443 if TLS is enabled and to + # port 80 if not. + # servicePort: 80 + + # -- Enable prometheus metrics for hubble-relay on the configured port at + # /metrics + prometheus: + enabled: false + port: 9966 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor hubble-relay + labels: {} + # -- Annotations to add to ServiceMonitor hubble-relay + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + ui: + # -- Whether to enable the Hubble UI. enabled: true + + standalone: + # -- When true, it will allow installing the Hubble UI only, without checking dependencies. + # It is useful if a cluster already has cilium and Hubble relay installed and you just + # want Hubble UI to be deployed. + # When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui` + enabled: false + + tls: + # -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required + # to provide a volume for mounting the client certificates. + certsVolume: {} + # projected: + # defaultMode: 0400 + # sources: + # - secret: + # name: hubble-ui-client-certs + # items: + # - key: tls.crt + # path: client.crt + # - key: tls.key + # path: client.key + # - key: ca.crt + # path: hubble-relay-ca.crt + + # -- Roll out Hubble-ui pods automatically when configmap is updated. + rollOutPods: false + + tls: + # -- base64 encoded PEM values used to connect to hubble-relay + # This keypair is presented to Hubble Relay instances for mTLS + # authentication and is required when hubble.relay.tls.server.enabled is true. + # These values need to be set manually if hubble.tls.auto.enabled is false. + client: + cert: "" + key: "" + + backend: + # -- Hubble-ui backend image. + image: + override: ~ + repository: "quay.io/cilium/hubble-ui-backend" + tag: "v0.9.1@sha256:c4b86e0d7a38d52c6ea3d9d7b17809e5212efd97494e8bd37c8466ddd68d42d0" + pullPolicy: "IfNotPresent" + + # -- Additional hubble-ui backend environment variables. + extraEnv: [] + + # -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. + resources: {} + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + + frontend: + # -- Hubble-ui frontend image. + image: + override: ~ + repository: "quay.io/cilium/hubble-ui" + tag: "v0.9.1@sha256:baff611b975cb12307a163c0e547e648da211384eabdafd327707ff2ec31cc24" + pullPolicy: "IfNotPresent" + + # -- Additional hubble-ui frontend environment variables. + extraEnv: [] + + # -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. + resources: {} + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + + # -- The number of replicas of Hubble UI to deploy. + replicas: 1 + + # -- Annotations to be added to hubble-ui pods + podAnnotations: {} + + # -- Labels to be added to hubble-ui pods + podLabels: {} + + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + + # -- Affinity for hubble-ui + affinity: {} + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: + kubernetes.io/os: linux + + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + # + tolerations: [] + + # -- The priority class to use for hubble-ui + priorityClassName: "" + + # -- hubble-ui update strategy. + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + + # -- Security context to be added to Hubble UI pods + securityContext: + # -- Deprecated in favor of hubble.ui.securityContext. + # Whether to set the security context on the Hubble UI pods. + enabled: true + runAsUser: 1001 + runAsGroup: 1001 + fsGroup: 1001 + + # -- hubble-ui service configuration. + service: + # --- The type of service used for Hubble UI access, either ClusterIP or NodePort. + type: ClusterIP + # --- The port to use when the service type is set to NodePort. + nodePort: 31235 + + # -- hubble-ui ingress configuration. ingress: enabled: true className: ingress-internal-traefik @@ -19,23 +948,1035 @@ hubble: hosts: - cilium.dc - metrics: - enabled: - - dns:query;ignoreAAAA - - drop - - tcp - - flow - - icmp - - http - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9091" +# -- Method to use for identity allocation (`crd` or `kvstore`). +identityAllocationMode: "crd" -prometheus: +# -- Time to wait before using new identity on endpoint identity change. +# identityChangeGracePeriod: "5s" + +# -- GC interval for security identities. +# identityGCInterval: "15m0s" + +# -- Timeout after which identity expires on lack of heartbeat. +# identityHeartbeatTimeout: "30m0s" + + +# -- Configure whether to install iptables rules to allow for TPROXY +# (L7 proxy injection), iptables-based masquerading and compatibility +# with kube-proxy. +installIptablesRules: true + +# -- Install Iptables rules to skip netfilter connection tracking on all pod +# traffic. This option is only effective when Cilium is running in direct +# routing and full KPR mode. Moreover, this option cannot be enabled when Cilium +# is running in a managed Kubernetes environment or in a chained CNI setup. +installNoConntrackIptablesRules: false + +ipam: + # -- Configure IP Address Management mode. + # ref: https://docs.cilium.io/en/stable/concepts/networking/ipam/ + mode: "cluster-pool" + operator: + # -- Deprecated in favor of ipam.operator.clusterPoolIPv4PodCIDRList. + # IPv4 CIDR range to delegate to individual nodes for IPAM. + clusterPoolIPv4PodCIDR: "10.0.0.0/8" + # -- IPv4 CIDR list range to delegate to individual nodes for IPAM. + clusterPoolIPv4PodCIDRList: [] + # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM. + clusterPoolIPv4MaskSize: 24 + # -- Deprecated in favor of ipam.operator.clusterPoolIPv6PodCIDRList. + # IPv6 CIDR range to delegate to individual nodes for IPAM. + clusterPoolIPv6PodCIDR: "fd00::/104" + # -- IPv6 CIDR list range to delegate to individual nodes for IPAM. + clusterPoolIPv6PodCIDRList: [] + # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM. + clusterPoolIPv6MaskSize: 120 + +# -- Configure the eBPF-based ip-masq-agent +ipMasqAgent: + enabled: false + +# iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium. +# iptablesLockTimeout: "5s" + +ipv4: + # -- Enable IPv4 support. enabled: true +ipv6: + # -- Enable IPv6 support. + enabled: false + +# -- Configure Kubernetes specific configuration +k8s: {} + # -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR + # range via the Kubernetes node resource + # requireIPv4PodCIDR: false + + # -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR + # range via the Kubernetes node resource + # requireIPv6PodCIDR: false + +# -- Keep the deprecated selector labels when deploying Cilium DaemonSet. +keepDeprecatedLabels: false + +# -- Keep the deprecated probes when deploying Cilium DaemonSet +keepDeprecatedProbes: false + +startupProbe: + # -- failure threshold of startup probe. + # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) + failureThreshold: 105 + # -- interval between checks of the startup probe + periodSeconds: 2 +livenessProbe: + # -- failure threshold of liveness probe + failureThreshold: 10 + # -- interval between checks of the liveness probe + periodSeconds: 30 +readinessProbe: + # -- failure threshold of readiness probe + failureThreshold: 3 + # -- interval between checks of the readiness probe + periodSeconds: 30 + +# -- Configure the kube-proxy replacement in Cilium BPF datapath +# Valid options are "disabled", "partial", "strict". +# ref: https://docs.cilium.io/en/stable/gettingstarted/kubeproxy-free/ +kubeProxyReplacement: "strict" + +# -- healthz server bind address for the kube-proxy replacement. +# To enable set the value to '0.0.0.0:10256' for all ipv4 +# addresses and this '[::]:10256' for all ipv6 addresses. +# By default it is disabled. +kubeProxyReplacementHealthzBindAddr: "" + +l2NeighDiscovery: + # -- Enable L2 neighbor discovery in the agent + enabled: true + # -- Override the agent's default neighbor resolution refresh period. + refreshPeriod: "30s" + +# -- Enable Layer 7 network policy. +l7Proxy: true + +# -- Enable Local Redirect Policy. +localRedirectPolicy: false + +# To include or exclude matched resources from cilium identity evaluation +# labels: "" + +# logOptions allows you to define logging options. eg: +# logOptions: +# format: json + +# -- Enables periodic logging of system load +logSystemLoad: false + + +# -- Configure maglev consistent hashing +maglev: {} + # -- tableSize is the size (parameter M) for the backend table of one + # service entry + # tableSize: + + # -- hashSeed is the cluster-wide base64 encoded seed for the hashing + # hashSeed: + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +enableIPv4Masquerade: true + +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +enableIPv6Masquerade: true + +# -- Enables egress gateway to redirect and SNAT the traffic that leaves the +# cluster. +egressGateway: + enabled: false + # -- Install egress gateway IP rules and routes in order to properly steer + # egress gateway traffic to the correct ENI interface + installRoutes: false + +vtep: +# -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow +# Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. + enabled: false + +# -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1" + endpoint: "" +# -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" + cidr: "" +# -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0" + mask: "" +# -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y" + mac: "" + +# -- Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# ipv4NativeRoutingCIDR: + +# -- Allows to explicitly specify the IPv6 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# ipv6NativeRoutingCIDR: + +# -- cilium-monitor sidecar. +monitor: + # -- Enable the cilium-monitor sidecar. + enabled: false + +# -- Configure service load balancing +# loadBalancer: + # -- standalone enables the standalone L4LB which does not connect to + # kube-apiserver. + # standalone: false + + # -- algorithm is the name of the load balancing algorithm for backend + # selection e.g. random or maglev + # algorithm: random + + # -- mode is the operation mode of load balancing for remote backends + # e.g. snat, dsr, hybrid + # mode: snat + + # -- acceleration is the option to accelerate service handling via XDP + # e.g. native, disabled + # acceleration: disabled + + # -- dsrDispatch configures whether IP option or IPIP encapsulation is + # used to pass a service IP and port to remote backend + # dsrDispatch: opt + + # -- serviceTopology enables K8s Topology Aware Hints -based service + # endpoints filtering + # serviceTopology: false + +# -- Configure N-S k8s service loadbalancing +nodePort: + # -- Enable the Cilium NodePort service implementation. + enabled: false + + # -- Port range to use for NodePort services. + # range: "30000,32767" + + # -- Set to true to prevent applications binding to service ports. + bindProtection: true + + # -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral + # ports is detected. + autoProtectPortRange: true + + # -- Enable healthcheck nodePort server for NodePort services + enableHealthCheck: true + +# policyAuditMode: false + +# -- The agent can be put into one of the three policy enforcement modes: +# default, always and never. +# ref: https://docs.cilium.io/en/stable/policy/intro/#policy-enforcement-modes +policyEnforcementMode: "default" + +pprof: + # -- Enable Go pprof debugging + enabled: false + +# -- Configure prometheus metrics on the configured port at /metrics +prometheus: + enabled: false + port: 9962 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor cilium-agent + labels: {} + # -- Annotations to add to ServiceMonitor cilium-agent + annotations: {} + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # -- Metrics that should be enabled or disabled from the default metric + # list. (+metric_foo to enable metric_foo , -metric_bar to disable + # metric_bar). + # ref: https://docs.cilium.io/en/stable/operations/metrics/#exported-metrics + metrics: ~ + +# -- Configure Istio proxy options. +proxy: + prometheus: + enabled: true + port: "9964" + # -- Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecarImageRegex: "cilium/istio_proxy" + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +remoteNodeIdentity: true + +# -- Enable resource quotas for priority classes used in the cluster. +resourceQuotas: + enabled: false + cilium: + hard: + # 5k nodes * 2 DaemonSets (Cilium and cilium node init) + pods: "10k" + operator: + hard: + # 15 "clusterwide" Cilium Operator pods for HA + pods: "15" + +# Need to document default +################## +#sessionAffinity: false + +# -- Do not run Cilium agent when running with clean mode. Useful to completely +# uninstall Cilium as it will stop Cilium from starting and create artifacts +# in the node. +sleepAfterInit: false + +# -- Configure BPF socket operations configuration +sockops: + # enabled enables installation of socket options acceleration. + enabled: false + +# -- Enable check of service source ranges (currently, only for LoadBalancer). +svcSourceRangeCheck: true + +# -- Synchronize Kubernetes nodes to kvstore and perform CNP GC. +synchronizeK8sNodes: true + +# -- Configure TLS configuration in the agent. +tls: + # -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies + # (namely the secrets referenced by terminatingTLS and originatingTLS). + # Possible values: + # - local + # - k8s + secretsBackend: local + + # -- Base64 encoded PEM values for the CA certificate and private key. + # This can be used as common CA to generate certificates used by hubble and clustermesh components + ca: + # -- Optional CA cert. If it is provided, it will be used by cilium to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + cert: "" + + # -- Optional CA private key. If it is provided, it will be used by cilium to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + key: "" + + # -- Generated certificates validity duration in days. This will be used for auto generated CA. + certValidityDuration: 1095 + +# -- Configure the encapsulation configuration for communication between nodes. +# Possible values: +# - disabled +# - vxlan (default) +# - geneve +tunnel: "vxlan" + +# -- Disable the usage of CiliumEndpoint CRD. +disableEndpointCRD: "false" + +wellKnownIdentities: + # -- Enable the use of well-known identities. + enabled: false + +etcd: + # -- Enable etcd mode for the agent. + enabled: false + + # -- cilium-etcd-operator image. + image: + override: ~ + repository: "quay.io/cilium/cilium-etcd-operator" + tag: "v2.0.7@sha256:04b8327f7f992693c2cb483b999041ed8f92efc8e14f2a5f3ab95574a65ea2dc" + pullPolicy: "IfNotPresent" + + # -- The priority class to use for cilium-etcd-operator + priorityClassName: "" + + # -- Additional cilium-etcd-operator container arguments. + extraArgs: [] + + # -- Node tolerations for cilium-etcd-operator scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Node labels for cilium-etcd-operator pod assignment + # ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: + kubernetes.io/os: linux + + # -- Annotations to be added to cilium-etcd-operator pods + podAnnotations: {} + + # -- Labels to be added to cilium-etcd-operator pods + podLabels: {} + + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + + # -- cilium-etcd-operator resource limits & requests + # ref: https://kubernetes.io/docs/user-guide/compute-resources/ + resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + + # -- Security context to be added to cilium-etcd-operator pods + securityContext: {} + # runAsUser: 0 + + # -- cilium-etcd-operator update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + + # -- If etcd is behind a k8s service set this option to true so that Cilium + # does the service translation automatically without requiring a DNS to be + # running. + k8sService: false + + # -- Cluster domain for cilium-etcd-operator. + clusterDomain: cluster.local + + # -- List of etcd endpoints (not needed when using managed=true). + endpoints: + - https://CHANGE-ME:2379 + + # -- Enable use of TLS/SSL for connectivity to etcd. (auto-enabled if + # managed=true) + ssl: false + operator: + # -- Enable the cilium-operator component (required). + enabled: true + + # -- Roll out cilium-operator pods automatically when configmap is updated. + rollOutPods: false + + # -- cilium-operator image. + image: + override: ~ + repository: "quay.io/cilium/operator" + tag: "v1.12.1" + # operator-generic-digest + genericDigest: "" + # operator-azure-digest + azureDigest: "" + # operator-aws-digest + awsDigest: "" + # operator-alibabacloud-digest + alibabacloudDigest: "" + useDigest: false + pullPolicy: "IfNotPresent" + suffix: "" + + # -- Number of replicas to run for the cilium-operator deployment replicas: 1 -hostServices: - enabled: true + # -- The priority class to use for cilium-operator + priorityClassName: "" + + # -- DNS policy for Cilium operator pods. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + dnsPolicy: "" + + # -- cilium-operator update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + + # -- Affinity for cilium-operator + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + io.cilium/app: operator + + # -- Node labels for cilium-operator pod assignment + # ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: + kubernetes.io/os: linux + + # -- Node tolerations for cilium-operator scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Additional cilium-operator container arguments. + extraArgs: [] + + # -- Additional cilium-operator environment variables. + extraEnv: [] + + # -- Additional cilium-operator hostPath mounts. + extraHostPathMounts: [] + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer + + # -- Additional cilium-operator volumes. + extraVolumes: [] + + # -- Additional cilium-operator volumeMounts. + extraVolumeMounts: [] + + # -- Annotations to be added to cilium-operator pods + podAnnotations: {} + + # -- Labels to be added to cilium-operator pods + podLabels: {} + + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + + # -- cilium-operator resource limits & requests + # ref: https://kubernetes.io/docs/user-guide/compute-resources/ + resources: {} + # limits: + # cpu: 1000m + # memory: 1Gi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- Security context to be added to cilium-operator pods + securityContext: {} + # runAsUser: 0 + + # -- Interval for endpoint garbage collection. + endpointGCInterval: "5m0s" + + # -- Interval for cilium node garbage collection. + nodeGCInterval: "5m0s" + + # -- Interval for identity garbage collection. + identityGCInterval: "15m0s" + + # -- Timeout for identity heartbeats. + identityHeartbeatTimeout: "30m0s" + + # -- Enable prometheus metrics for cilium-operator on the configured port at + # /metrics + prometheus: + enabled: false + port: 9963 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor cilium-operator + labels: {} + # -- Annotations to add to ServiceMonitor cilium-operator + annotations: {} + + # -- Skip CRDs creation for cilium-operator + skipCRDCreation: false + + # -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium + # pod running. + removeNodeTaints: true + + # -- Set Node condition NetworkUnavailable to 'false' with the reason + # 'CiliumIsUp' for nodes that have a healthy Cilium pod. + setNodeNetworkStatus: true + + unmanagedPodWatcher: + # -- Restart any pod that are not managed by Cilium. + restart: true + # -- Interval, in seconds, to check if there are any pods that are not + # managed by Cilium. + intervalSeconds: 15 + +nodeinit: + # -- Enable the node initialization DaemonSet + enabled: false + + # -- node-init image. + image: + override: ~ + repository: "quay.io/cilium/startup-script" + tag: "d69851597ea019af980891a4628fb36b7880ec26" + pullPolicy: "IfNotPresent" + + # -- The priority class to use for the nodeinit pod. + priorityClassName: "" + + # -- node-init update strategy + updateStrategy: + type: RollingUpdate + + # -- Additional nodeinit environment variables. + extraEnv: [] + + # -- Affinity for cilium-nodeinit + affinity: {} + + # -- Node labels for nodeinit pod assignment + # ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: + kubernetes.io/os: linux + + # -- Node tolerations for nodeinit scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Annotations to be added to node-init pods. + podAnnotations: {} + + # -- Labels to be added to node-init pods. + podLabels: {} + + # -- nodeinit resource limits & requests + # ref: https://kubernetes.io/docs/user-guide/compute-resources/ + resources: + requests: + cpu: 100m + memory: 100Mi + + # -- Security context to be added to nodeinit pods. + securityContext: + privileged: false + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + add: + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # Used for nsenter + - NET_ADMIN + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + + # -- bootstrapFile is the location of the file where the bootstrap timestamp is + # written by the node-init DaemonSet + bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time" + +preflight: + # -- Enable Cilium pre-flight resources (required for upgrade) + enabled: false + + # -- Cilium pre-flight image. + image: + override: ~ + repository: "quay.io/cilium/cilium" + tag: "v1.12.1" + # cilium-digest + digest: "" + useDigest: false + pullPolicy: "IfNotPresent" + + # -- The priority class to use for the preflight pod. + priorityClassName: "" + + # -- preflight update strategy + updateStrategy: + type: RollingUpdate + + # -- Additional preflight environment variables. + extraEnv: [] + + # -- Affinity for cilium-preflight + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + + # -- Node labels for preflight pod assignment + # ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: + kubernetes.io/os: linux + + # -- Node tolerations for preflight scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: + - key: node.kubernetes.io/not-ready + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node.cloudprovider.kubernetes.io/uninitialized + effect: NoSchedule + value: "true" + - key: CriticalAddonsOnly + operator: "Exists" + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Annotations to be added to preflight pods + podAnnotations: {} + + # -- Labels to be added to the preflight pod. + podLabels: {} + + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + + # -- preflight resource limits & requests + # ref: https://kubernetes.io/docs/user-guide/compute-resources/ + resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + + # -- Security context to be added to preflight pods + securityContext: {} + # runAsUser: 0 + + # -- Path to write the `--tofqdns-pre-cache` file to. + tofqdnsPreCache: "" + + # -- Configure termination grace period for preflight Deployment and DaemonSet. + terminationGracePeriodSeconds: 1 + + # -- By default we should always validate the installed CNPs before upgrading + # Cilium. This will make sure the user will have the policies deployed in the + # cluster with the right schema. + validateCNPs: true + +# -- Explicitly enable or disable priority class. +# .Capabilities.KubeVersion is unsettable in `helm template` calls, +# it depends on k8s libraries version that Helm was compiled against. +# This option allows to explicitly disable setting the priority class, which +# is useful for rendering charts for gke clusters in advance. +enableCriticalPriorityClass: true + +# disableEnvoyVersionCheck removes the check for Envoy, which can be useful +# on AArch64 as the images do not currently ship a version of Envoy. +#disableEnvoyVersionCheck: false + +clustermesh: + # -- Deploy clustermesh-apiserver for clustermesh + useAPIServer: false + + # -- Clustermesh explicit configuration. + config: + # -- Enable the Clustermesh explicit configuration. + enabled: false + # -- Default dns domain for the Clustermesh API servers + # This is used in the case cluster addresses are not provided + # and IPs are used. + domain: mesh.cilium.io + # -- List of clusters to be peered in the mesh. + clusters: [] + # clusters: + # # -- Name of the cluster + # - name: cluster1 + # # -- Address of the cluster, use this if you created DNS records for + # # the cluster Clustermesh API server. + # address: cluster1.mesh.cilium.io + # # -- Port of the cluster Clustermesh API server. + # port: 2379 + # # -- IPs of the cluster Clustermesh API server, use multiple ones when + # # you have multiple IPs to access the Clustermesh API server. + # ips: + # - 172.18.255.201 + # # -- base64 encoded PEM values for the cluster client certificate, private key and certificate authority. + # tls: + # cert: "" + # key: "" + + apiserver: + # -- Clustermesh API server image. + image: + override: ~ + repository: "quay.io/cilium/clustermesh-apiserver" + tag: "v1.12.1" + # clustermesh-apiserver-digest + digest: "" + useDigest: false + pullPolicy: "IfNotPresent" + + etcd: + # -- Clustermesh API server etcd image. + image: + override: ~ + repository: "quay.io/coreos/etcd" + tag: "v3.5.4@sha256:795d8660c48c439a7c3764c2330ed9222ab5db5bb524d8d0607cac76f7ba82a3" + pullPolicy: "IfNotPresent" + + service: + # -- The type of service used for apiserver access. + type: NodePort + # -- Optional port to use as the node port for apiserver access. + nodePort: 32379 + # -- Optional loadBalancer IP address to use with type LoadBalancer. + # loadBalancerIP: + + # -- Annotations for the clustermesh-apiserver + # For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal" + # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + annotations: {} + + # -- Number of replicas run for the clustermesh-apiserver deployment. + replicas: 1 + + # -- Additional clustermesh-apiserver environment variables. + extraEnv: [] + + # -- Annotations to be added to clustermesh-apiserver pods + podAnnotations: {} + + # -- Labels to be added to clustermesh-apiserver pods + podLabels: {} + + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + + # -- Resource requests and limits for the clustermesh-apiserver container of the clustermesh-apiserver deployment, such as + # resources: + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + # -- Resource requests and limits for the clustermesh-apiserver + resources: {} + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M + + # -- Affinity for clustermesh.apiserver + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: clustermesh-apiserver + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: + kubernetes.io/os: linux + + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: [] + + # -- clustermesh-apiserver update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + + # -- The priority class to use for clustermesh-apiserver + priorityClassName: "" + + tls: + # -- Configure automatic TLS certificates generation. + # A Kubernetes CronJob is used the generate any + # certificates not provided by the user at installation + # time. + auto: + # -- When set to true, automatically generate a CA and certificates to + # enable mTLS between clustermesh-apiserver and external workload instances. + # If set to false, the certs to be provided by setting appropriate values below. + enabled: true + # Sets the method to auto-generate certificates. Supported values: + # - helm: This method uses Helm to generate all certificates. + # - cronJob: This method uses a Kubernetes CronJob the generate any + # certificates not provided by the user at installation + # time. + # - certmanager: This method use cert-manager to generate & rotate certificates. + method: helm + # -- Generated certificates validity duration in days. + certValidityDuration: 1095 + # -- Schedule for certificates regeneration (regardless of their expiration date). + # Only used if method is "cronJob". If nil, then no recurring job will be created. + # Instead, only the one-shot job is deployed to generate the certificates at + # installation time. + # + # Due to the out-of-band distribution of client certs to external workloads the + # CA is (re)regenerated only if it is not provided as a helm value and the k8s + # secret is manually deleted. + # + # Defaults to none. Commented syntax gives midnight of the first day of every + # fourth month. For syntax, see + # https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule + # schedule: "0 0 1 */4 *" + + # [Example] + # certManagerIssuerRef: + # group: cert-manager.io + # kind: ClusterIssuer + # name: ca-issuer + # -- certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager. + # If not specified, a CA issuer will be created. + certManagerIssuerRef: {} + # -- base64 encoded PEM values for the ExternalWorkload CA certificate and private key. + ca: + # -- Optional CA cert. If it is provided, it will be used by the 'cronJob' method to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + cert: "" + # -- Optional CA private key. If it is provided, it will be used by the 'cronJob' method to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver server certificate and private key. + # Used if 'auto' is not enabled. + server: + cert: "" + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + # -- base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key. + # Used if 'auto' is not enabled. + admin: + cert: "" + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver client certificate and private key. + # Used if 'auto' is not enabled. + client: + cert: "" + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key. + # Used if 'auto' is not enabled. + remote: + cert: "" + key: "" + +# -- Configure external workloads support +externalWorkloads: + # -- Enable support for external workloads, such as VMs (false by default). + enabled: false + +# -- Configure cgroup related configuration +cgroup: + autoMount: + # -- Enable auto mount of cgroup2 filesystem. + # When `autoMount` is enabled, cgroup2 filesystem is mounted at + # `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod. + # If users disable `autoMount`, it's expected that users have mounted + # cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the + # volume will be mounted inside the cilium agent pod at the same path. + enabled: true + # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) + hostRoot: /run/cilium/cgroupv2 + +# -- Configure whether to enable auto detect of terminating state for endpoints +# in order to support graceful termination. +enableK8sTerminatingEndpoint: true + +# -- Configure whether to unload DNS policy rules on graceful shutdown +# dnsPolicyUnloadOnShutdown: false + +# -- Configure the key of the taint indicating that Cilium is not ready on the node. +# When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up. +agentNotReadyTaintKey: "node.cilium.io/agent-not-ready" + +dnsProxy: + # -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'. + dnsRejectResponseCode: refused + # -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present. + enableDnsCompression: true + # -- Maximum number of IPs to maintain per FQDN name for each endpoint. + endpointMaxIpPerHostname: 50 + # -- Time during which idle but previously active connections with expired DNS lookups are still considered alive. + idleConnectionGracePeriod: 0s + # -- Maximum number of IPs to retain for expired DNS lookups with still-active connections. + maxDeferredConnectionDeletes: 10000 + # -- The minimum time, in seconds, to use DNS data for toFQDNs policies. + minTtl: 3600 + # -- DNS cache data at this path is preloaded on agent startup. + preCache: "" + # -- Global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port. + proxyPort: 0 + # -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information. + proxyResponseMaxDelay: 100ms