hive-apps/.archive/crowdsec/values/crowdsec.yaml

323 lines
9.9 KiB
YAML
Raw Permalink Normal View History

2023-03-12 10:24:46 +00:00
# yaml-language-server: $schema=https://raw.githubusercontent.com/crowdsecurity/helm-charts/main/charts/crowdsec/values.schema.json
# Default values for crowdsec-chart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- for raw logs format: json or cri (docker|containerd)
container_runtime: containerd
image:
# -- docker image repository name
repository: crowdsecurity/crowdsec
# -- pullPolicy
pullPolicy: IfNotPresent
# -- docker image tag
tag: ""
# Here you can specify your own custom configuration to be loaded in crowdsec agent or lapi
# Each config needs to be a multi-line using '|' in YAML specs
# for the agent those configs will be loaded : parsers, scenarios, postoverflows, simulation.yaml
# for the lapi those configs will be loaded : profiles.yaml, notifications, console.yaml
config:
# -- To better understand stages in parsers, you can take a look at https://docs.crowdsec.net/docs/next/parsers/intro/
parsers:
s00-raw: {}
s01-parse: {}
# example-parser.yaml: |
# filter: "evt.Line.Labels.type == 'myProgram'"
# onsuccess: next_stage
# ....
s02-enrich: {}
# -- to better understand how to write a scenario, you can take a look at https://docs.crowdsec.net/docs/next/scenarios/intro
scenarios: {}
# myScenario.yaml: |
# type: trigger
# name: myName/MyScenario
# description: "Detect bruteforce on myService"
# filter: "evt.Meta.log_type == 'auth_bf_log'"
# ...
# -- to better understand how to write a postoverflow, you can take a look at (https://docs.crowdsec.net/docs/next/whitelist/create/#whitelist-in-postoverflows)
postoverflows:
s00-enrich: {}
# rdnsEnricher.yaml: |
# ...
s01-whitelist: {}
# myRdnsWhitelist.yaml: |
# ...
# -- Simulation configuration (https://docs.crowdsec.net/docs/next/scenarios/simulation/)
simulation.yaml: ""
# |
# simulation: false
# exclusions:
# - crowdsecurity/ssh-bf
console.yaml: ""
# |
# share_manual_decisions: true
# share_tainted: true
# share_custom: true
# -- Profiles configuration (https://docs.crowdsec.net/docs/next/profiles/format/#profile-configuration-example)
profiles.yaml: ""
# |
# name: default_ip_remediation
# debug: true
# filters:
# - Alert.Remediation == true && Alert.GetScope() == "Ip"
# ...
# -- notifications configuration (https://docs.crowdsec.net/docs/next/notification_plugins/intro)
notifications: {}
# email.yaml: |
# type: email
# name: email_default
# One of "trace", "debug", "info", "warn", "error", "off"
# log_level: info
# ...
# slack.yaml: ""
# http.yaml: ""
# splunk.yaml: ""
tls:
enabled: false
caBundle: true
insecureSkipVerify: false
certManager:
enabled: true
bouncer:
secret: "{{ .Release.Name }}-bouncer-tls"
reflector:
namespaces: []
agent:
tlsClientAuth: true
secret: "{{ .Release.Name }}-agent-tls"
reflector:
namespaces: []
lapi:
secret: "{{ .Release.Name }}-lapi-tls"
# If you want to specify secrets that will be used for all your crowdsec-agents
# secrets can be provided as env variables
secrets:
# -- agent username (default is generated randomly)
username: ""
# -- agent password (default is generated randomly)
password: ""
# lapi will deploy pod with crowdsec lapi and dashboard as deployment
lapi:
# -- environment variables from crowdsecurity/crowdsec docker image
env:
- name: DISABLE_ONLINE_API
value: "true"
# by default disable the agent because it only needs the local API.
#- name: DISABLE_AGENT
# value: "true"
# -- Enable ingress lapi object
ingress:
enabled: false
annotations:
# we only want http to the backend so we need this annotation
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
# labels: {}
ingressClassName: "" # nginx
host: "" # crowdsec-api.example.com
# tls: {}
dashboard:
# -- Enable Metabase Dashboard (by default disabled)
enabled: true
image:
# -- docker image repository name
repository: metabase/metabase
# -- pullPolicy
pullPolicy: IfNotPresent
# -- docker image tag
tag: "v0.41.5"
# -- Metabase SQLite static DB containing Dashboards
assetURL: https://crowdsec-statics-assets.s3-eu-west-1.amazonaws.com/metabase_sqlite.zip
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# limits:
# memory: 2Gi
# requests:
# cpu: 150m
# memory: 1700Mi
# -- Enable ingress object
ingress:
enabled: true
annotations:
# metabase only supports http so we need this annotation
cert-manager.io/cluster-issuer: vault-issuer
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
# labels: {}
ingressClassName: "ingress-internal" # nginx
host: "crowdsec.dc" # metabase.example.com
tls:
- secretName: crowdsec-tls
hosts:
- crowdsec.dc
resources:
limits:
memory: 100Mi
requests:
cpu: 150m
memory: 100Mi
# -- Enable persistent volumes
persistentVolume:
# -- Persistent volume for data folder. Stores e.g. registered bouncer api keys
data:
enabled: true
accessModes:
- ReadWriteOnce
storageClassName: ""
existingClaim: ""
size: 1Gi
# -- Persistent volume for config folder. Stores e.g. online api credentials
config:
enabled: true
accessModes:
- ReadWriteOnce
storageClassName: ""
existingClaim: ""
size: 100Mi
service:
type: ClusterIP
labels: {}
annotations: {}
externalIPs: []
loadBalancerIP: null
loadBalancerClass: null
externalTrafficPolicy: Cluster
# -- nodeSelector for lapi
nodeSelector: {}
# -- tolerations for lapi
tolerations: {}
# -- Enable service monitoring (exposes "metrics" port "6060" for Prometheus)
metrics:
enabled: false
# -- Creates a ServiceMonitor so Prometheus will monitor this service
# -- Prometheus needs to be configured to watch on all namespaces for ServiceMonitors
# -- See the documentation: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#prometheusioscrape
# -- See also: https://github.com/prometheus-community/helm-charts/issues/106#issuecomment-700847774
serviceMonitor:
enabled: false
strategy:
type: RollingUpdate
# agent will deploy pod on every node as daemonSet to read wanted pods logs
agent:
# -- To add custom acquisitions using available datasources (https://docs.crowdsec.net/docs/next/data_sources/intro)
additionalAcquisition: []
# - source: kinesis
# stream_name: my-stream
# labels:
# type: mytype
# - source: syslog
# listen_addr: 127.0.0.1
# listen_port: 4242
# labels:
# type: syslog
acquisition:
# -- Specify each pod you want to process it logs (namespace, podName and program)
- namespace: "ingress-internal" #ingress-nginx
# -- to select pod logs to process
podName: "ingress-internel-*" #ingress-nginx-controller-*
# -- program name related to specific parser you will use (see https://hub.crowdsec.net/author/crowdsecurity/configurations/docker-logs)
program: "nginx" #nginx
- namespace: "ingress-external" #ingress-nginx
podName: "ingress-external-*" #ingress-nginx-controller-*
program: "nginx" #nginx
resources:
limits:
memory: 100Mi
requests:
cpu: 150m
memory: 100Mi
# -- Enable persistent volumes
persistentVolume:
# -- Persistent volume for config folder. Stores local config (parsers, scenarios etc.)
config:
enabled: false
accessModes:
- ReadWriteOnce
storageClassName: ""
existingClaim: ""
size: 100Mi
# -- environment variables from crowdsecurity/crowdsec docker image
env:
# by default we configure the docker-logs parser to be able to parse docker logs in k8s
# by default we disable local API on the agent pod
# - name: SCENARIOS
# value: "scenario/name otherScenario/name"
# - name: PARSERS
# value: "parser/name otherParser/name"
# - name: POSTOVERFLOWS
# value: "postoverflow/name otherPostoverflow/name"
# - name: CONFIG_FILE
# value: "/etc/crowdsec/config.yaml"
# - name: DSN
# value: "file:///var/log/toto.log"
# - name: TYPE
# value: "Labels.type_for_time-machine_mode"
# - name: TEST_MODE
# value: "false"
# - name: TZ
# value: ""
# - name: DISABLE_AGENT
# value: "false"
- name: DISABLE_ONLINE_API
value: "true"
- name: COLLECTIONS
value: "crowdsecurity/nginx"
# - name: LEVEL_TRACE
# value: "false"
# - name: LEVEL_DEBUG
# value: "false"
# - name: LEVEL_INFO
# value: "false"
# -- nodeSelector for agent
nodeSelector: {}
# -- tolerations for agent
tolerations: {}
# -- Enable service monitoring (exposes "metrics" port "6060" for Prometheus)
metrics:
enabled: false
# -- Creates a ServiceMonitor so Prometheus will monitor this service
# -- Prometheus needs to be configured to watch on all namespaces for ServiceMonitors
# -- See the documentation: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#prometheusioscrape
# -- See also: https://github.com/prometheus-community/helm-charts/issues/106#issuecomment-700847774
serviceMonitor:
enabled: false
service:
type: ClusterIP
labels: {}
annotations: {}
externalIPs: []
loadBalancerIP: null
loadBalancerClass: null
externalTrafficPolicy: Cluster
# -- wait-for-lapi init container
wait_for_lapi:
image:
# -- docker image repository name
repository: busybox
# -- pullPolicy
pullPolicy: IfNotPresent
# -- docker image tag
tag: "1.28"
#service: {}