K3S Initial Commit

nold 1 year ago
parent be478af8c5
commit cb7f18844a
  1. 6294
      crds/cert-manager.yaml
  2. 4
      manifests/heqet-apps.yaml
  3. 8
      templates/_helpers.tpl
  4. 16
      templates/acme-clusterissuer.yaml
  5. 25
      templates/dashboard/deployment.yaml
  6. 27
      templates/dashboard/ingress.yaml
  7. 19
      templates/dashboard/service.yaml
  8. 96
      templates/external-dns.yaml
  9. 2
      templates/heqet-apps.yaml
  10. 129
      values.d/cert-manager.yaml
  11. 518
      values.d/jaeger.yaml
  12. 200
      values.d/kubernetes-dashboard.yaml
  13. 12
      values.d/nginx-ingress.yaml
  14. 146
      values.d/rook.yaml
  15. 5
      values.d/vault.yaml
  16. 126
      values.yaml

File diff suppressed because it is too large Load Diff

@ -3,8 +3,6 @@ apiVersion: v1
kind: Namespace
metadata:
name: heqet
spec: {}
status: {}
---
apiVersion: argoproj.io/v1alpha1
kind: Application
@ -18,7 +16,7 @@ spec:
source:
path: .
repoURL: 'https://github.com/nold360/heqet'
targetRevision: HEAD
targetRevision: k3s
helm:
valueFiles:
- values.yaml

@ -13,15 +13,7 @@ ingress:
paths: []
{{- end }}
annotations:
kubernetes.io/ingress.class: {{ .ingressClass | default "nginx" }}
kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: {{ .clusterIssuer | default "letsencrypt" }}
external-dns.alpha.kubernetes.io/hostname: {{ .vhost }}
nginx.ingress.kubernetes.io/ssl-redirect: "true"
tls:
- secretName: {{ .name }}-le-tls
hosts:
- {{ .vhost | quote }}
{{- end -}}
{{- end -}}

@ -1,16 +0,0 @@
{{ if .Values.generators.ingress.acme.enabled }}
apiVersion: cert-manager.io/v1alpha3
kind: ClusterIssuer
metadata:
name: letsencrypt
spec:
acme:
server: {{ .Values.generators.ingress.acme.server | default "https://acme-staging-v02.api.letsencrypt.org/directory" }}
email: {{ .Values.generators.ingress.acme.email | quote }}
privateKeySecretRef:
name: letsencrypt-clusterissuer
solvers:
- http01:
ingress:
class: nginx
{{- end }}

@ -1,25 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: hqt-dashboard
name: hqt-dashboard
namespace: heqet
spec:
replicas: 1
selector:
matchLabels:
app: hqt-dashboard
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: hqt-dashboard
spec:
containers:
- image: nold360/hqt
name: hqt
resources: {}
status: {}

@ -1,27 +0,0 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: hqt-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
kubernetes.io/ingress.class: {{ .ingressClass | default "nginx" }}
kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: {{ .clusterIssuer | default "letsencrypt" }}
external-dns.alpha.kubernetes.io/hostname: hqt.{{ $.Values.defaults.domain }}
nginx.ingress.kubernetes.io/ssl-redirect: "true"
labels:
app: hqt-dashboard
namespace: heqet
spec:
rules:
- host: hqt.{{ $.Values.defaults.domain }}
http:
paths:
- backend:
serviceName: hqt-dashboard
servicePort: 80
path: null
tls:
- hosts:
- hqt.{{ $.Values.defaults.domain }}
secretName: hqt-le-tls

@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: hqt-dashboard
name: hqt-dashboard
namespace: heqet
spec:
ports:
- name: "80"
port: 80
protocol: TCP
targetPort: 80
selector:
app: hqt-dashboard
type: ClusterIP
status:
loadBalancer: {}

@ -1,96 +0,0 @@
# External DNS Provider using OVH
# Service Annotation: external-dns.alpha.kubernetes.io/hostname: example.com
# See: https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/ovh.md
#
apiVersion: v1
kind: Namespace
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-1"
name: external-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: external-dns
annotations:
argocd.argoproj.io/sync-wave: "-1"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: external-dns
annotations:
argocd.argoproj.io/sync-wave: "-1"
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["get","watch","list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
annotations:
argocd.argoproj.io/sync-wave: "-1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: external-dns
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
namespace: external-dns
annotations:
argocd.argoproj.io/sync-wave: "0"
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: external-dns
template:
metadata:
labels:
app: external-dns
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
args:
- --source=ingress
- --domain-filter=lib42.me
- --provider=ovh
env:
- name: OVH_APPLICATION_KEY
valueFrom:
secretKeyRef:
name: external-dns-auth
key: OVH_APPLICATION_KEY
- name: OVH_APPLICATION_SECRET
valueFrom:
secretKeyRef:
name: external-dns-auth
key: OVH_APPLICATION_SECRET
- name: OVH_CONSUMER_KEY
valueFrom:
secretKeyRef:
name: external-dns-auth
key: OVH_CONSUMER_KEY

@ -33,7 +33,7 @@ spec:
source:
path: {{ .path | default "." | quote }}
repoURL: {{ .repoURL | default $.Values.defaults.repoURL | quote }}
targetRevision: {{ default "HEAD" .targetRevision | quote }}
targetRevision: {{ .targetRevision | default "HEAD" | quote }}
{{ if .chart }}chart: {{ .chart | quote }}{{ end }}
helm:
{{- if .parameters }}

@ -1,129 +0,0 @@
# Default values for cert-manager.
# Heket: CRDs via crds/
installCRDs: false
# Heket: More stuff
cainjector:
image:
tag: v0.15.1
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"
# Optional priority class to be used for the cert-manager pods
priorityClassName: ""
replicaCount: 1
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
image:
repository: quay.io/jetstack/cert-manager-controller
tag: v0.15.1
pullPolicy: IfNotPresent
# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer
# resources. By default, the same namespace as cert-manager is deployed within is
# used. This namespace will not be automatically created by the Helm chart.
clusterResourceNamespace: ""
leaderElection:
# Override the namespace used to store the ConfigMap for leader election
namespace: ""
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Optional additional arguments
extraArgs: []
# Use this flag to set a namespace that cert-manager will use to store
# supporting resources required for each ClusterIssuer (default is kube-system)
# - --cluster-resource-namespace=kube-system
extraEnv: []
# - name: SOME_VAR
# value: 'some value'
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
# Pod Security Context
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
podAnnotations: {}
podLabels: {}
# Optional DNS settings, useful if you have a public and private DNS zone for
# the same domain on Route 53. What follows is an example of ensuring
# cert-manager can access an ingress or DNS TXT records at all times.
# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for
# the cluster to work.
# podDnsPolicy: "None"
# podDnsConfig:
# nameservers:
# - "1.1.1.1"
# - "8.8.8.8"
nodeSelector: {}
ingressShim: {}
# defaultIssuerName: ""
# defaultIssuerKind: ""
# defaultACMEChallengeType: ""
# defaultACMEDNS01ChallengeProvider: ""
webhook:
enabled: true
image:
tag: v0.15.1
# Use these variables to configure the HTTP_PROXY environment variables
# http_proxy: "http://proxy:8080"
# http_proxy: "http://proxy:8080"
# no_proxy: 127.0.0.1,localhost
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
# for example:
# tolerations:
# - key: foo.bar.com/role
# operator: Equal
# value: master
# effect: NoSchedule
tolerations: []

@ -1,518 +0,0 @@
# Default values for jaeger.
# This is a YAML-formatted file.
# Jaeger values are grouped by component. Cassandra values override subchart values
provisionDataStore:
cassandra: true
elasticsearch: false
kafka: false
tag: 1.17.1
nameOverride: ""
fullnameOverride: ""
storage:
# allowed values (cassandra, elasticsearch)
type: cassandra
cassandra:
host: cassandra
port: 9042
tls:
enabled: false
secretName: cassandra-tls-secret
user: user
usePassword: true
password: password
keyspace: jaeger_v1_test
## Use existing secret (ignores previous password)
# existingSecret:
## Cassandra related env vars to be configured on the concerned components
env: {}
# CASSANDRA_SERVERS: cassandra
# CASSANDRA_PORT: 9042
# CASSANDRA_KEYSPACE: jaeger_v1_test
# CASSANDRA_TLS_ENABLED: false
## Cassandra related cmd line opts to be configured on the concerned components
cmdlineParams: {}
# cassandra.servers: cassandra
# cassandra.port: 9042
# cassandra.keyspace: jaeger_v1_test
# cassandra.tls.enabled: false
elasticsearch:
scheme: http
host: elasticsearch-master
port: 9200
user: elastic
usePassword: true
password: changeme
# indexPrefix: test
## Use existing secret (ignores previous password)
# existingSecret:
nodesWanOnly: false
env: {}
## ES related env vars to be configured on the concerned components
# ES_SERVER_URLS: http://elasticsearch-master:9200
# ES_USERNAME: elastic
# ES_INDEX_PREFIX: test
## ES related cmd line opts to be configured on the concerned components
cmdlineParams: {}
# es.server-urls: http://elasticsearch-master:9200
# es.username: elastic
# es.index-prefix: test
kafka:
brokers:
- kafka:9092
topic: jaeger_v1_test
authentication: none
# Begin: Override values on the Cassandra subchart to customize for Jaeger
cassandra:
persistence:
# To enable persistence, please see the documentation for the Cassandra chart
enabled: false
config:
cluster_name: jaeger
seed_size: 1
dc_name: dc1
rack_name: rack1
endpoint_snitch: GossipingPropertyFileSnitch
# End: Override values on the Cassandra subchart to customize for Jaeger
# Begin: Override values on the Kafka subchart to customize for Jaeger
kafka:
replicas: 1
configurationOverrides:
"auto.create.topics.enable": true
zookeeper:
replicaCount: 1
# End: Override values on the Kafka subchart to customize for Jaeger
# Begin: Default values for the various components of Jaeger
# This chart has been based on the Kubernetes integration found in the following repo:
# https://github.com/jaegertracing/jaeger-kubernetes/blob/master/production/jaeger-production-template.yml
#
# This is the jaeger-cassandra-schema Job which sets up the Cassandra schema for
# use by Jaeger
schema:
annotations: {}
image: jaegertracing/jaeger-cassandra-schema
pullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 256m
# memory: 128Mi
serviceAccount:
create: true
name:
podAnnotations: {}
podLabels: {}
## Deadline for cassandra schema creation job
activeDeadlineSeconds: 300
env:
MODE: prod
# TRACE_TTL: 172800
# DEPENDENCIES_TTL: 0
# For configurable values of the elasticsearch if provisioned, please see:
# https://github.com/elastic/helm-charts/tree/master/elasticsearch#configuration
elasticsearch: {}
ingester:
enabled: false
podSecurityContext: {}
securityContext: {}
annotations: {}
image: jaegertracing/jaeger-ingester
pullPolicy: IfNotPresent
dnsPolicy: ClusterFirst
cmdlineParams: {}
env: {}
replicaCount: 1
autoscaling:
enabled: false
minReplicas: 2
maxReplicas: 10
# targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
service:
annotations: {}
# List of IP ranges that are allowed to access the load balancer (if supported)
loadBalancerSourceRanges: []
type: ClusterIP
resources: {}
# limits:
# cpu: 1
# memory: 1Gi
# requests:
# cpu: 500m
# memory: 512Mi
serviceAccount:
create: true
name:
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
## Additional pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
extraSecretMounts: []
extraConfigmapMounts: []
serviceMonitor:
enabled: false
additionalLabels: {}
agent:
podSecurityContext: {}
securityContext: {}
enabled: true
annotations: {}
image: jaegertracing/jaeger-agent
pullPolicy: IfNotPresent
cmdlineParams: {}
env: {}
daemonset:
useHostPort: false
service:
annotations: {}
# List of IP ranges that are allowed to access the load balancer (if supported)
loadBalancerSourceRanges: []
type: ClusterIP
# zipkinThriftPort :accept zipkin.thrift over compact thrift protocol
zipkinThriftPort: 5775
# compactPort: accept jaeger.thrift over compact thrift protocol
compactPort: 6831
# binaryPort: accept jaeger.thrift over binary thrift protocol
binaryPort: 6832
# samplingPort: (HTTP) serve configs, sampling strategies
samplingPort: 5778
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 256m
# memory: 128Mi
serviceAccount:
create: true
name:
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
## Additional pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
extraSecretMounts: []
# - name: jaeger-tls
# mountPath: /tls
# subPath: ""
# secretName: jaeger-tls
# readOnly: true
extraConfigmapMounts: []
# - name: jaeger-config
# mountPath: /config
# subPath: ""
# configMap: jaeger-config
# readOnly: true
useHostNetwork: false
dnsPolicy: ClusterFirst
priorityClassName: ""
serviceMonitor:
enabled: false
additionalLabels: {}
collector:
podSecurityContext: {}
securityContext: {}
enabled: true
annotations: {}
image: jaegertracing/jaeger-collector
pullPolicy: IfNotPresent
dnsPolicy: ClusterFirst
cmdlineParams: {}
env: {}
replicaCount: 1
autoscaling:
enabled: false
minReplicas: 2
maxReplicas: 10
# targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
service:
annotations: {}
# List of IP ranges that are allowed to access the load balancer (if supported)
loadBalancerSourceRanges: []
type: ClusterIP
grpc:
port: 14250
# nodePort:
# tchannelPort: used by jaeger-agent to send spans in jaeger.thrift format
tchannel:
port: 14267
# nodePort:
# httpPort: can accept spans directly from clients in jaeger.thrift format
http:
port: 14268
# nodePort:
# can accept Zipkin spans in JSON or Thrift
zipkin: {}
# port: 9411
# nodePort:
resources: {}
# limits:
# cpu: 1
# memory: 1Gi
# requests:
# cpu: 500m
# memory: 512Mi
serviceAccount:
create: true
name:
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
## Additional pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
extraSecretMounts: []
# - name: jaeger-tls
# mountPath: /tls
# subPath: ""
# secretName: jaeger-tls
# readOnly: true
extraConfigmapMounts: []
# - name: jaeger-config
# mountPath: /config
# subPath: ""
# configMap: jaeger-config
# readOnly: true
# samplingConfig: |-
# {
# "service_strategies": [
# {
# "service": "foo",
# "type": "probabilistic",
# "param": 0.8,
# "operation_strategies": [
# {
# "operation": "op1",
# "type": "probabilistic",
# "param": 0.2
# },
# {
# "operation": "op2",
# "type": "probabilistic",
# "param": 0.4
# }
# ]
# },
# {
# "service": "bar",
# "type": "ratelimiting",
# "param": 5
# }
# ],
# "default_strategy": {
# "type": "probabilistic",
# "param": 1
# }
# }
serviceMonitor:
enabled: false
additionalLabels: {}
query:
enabled: true
podSecurityContext: {}
securityContext: {}
agentSidecar:
enabled: true
annotations: {}
image: jaegertracing/jaeger-query
pullPolicy: IfNotPresent
dnsPolicy: ClusterFirst
cmdlineParams: {}
env: {}
replicaCount: 1
service:
annotations: {}
type: ClusterIP
# List of IP ranges that are allowed to access the load balancer (if supported)
loadBalancerSourceRanges: []
port: 80
# Specify a specific node port when type is NodePort
# nodePort: 32500
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt
external-dns.alpha.kubernetes.io/hostname: jaeger.lib42.me
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
hosts:
- jaeger.lib42.me
tls:
- hosts:
- jaeger.lib42.me
secretName: jaeger-le-tls
# Used to create an Ingress record.
# hosts:
# - chart-example.local
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 256m
# memory: 128Mi
serviceAccount:
create: true
name:
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
## Additional pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
extraConfigmapMounts: []
# - name: jaeger-config
# mountPath: /config
# subPath: ""
# configMap: jaeger-config
# readOnly: true
serviceMonitor:
enabled: false
additionalLabels: {}
spark:
enabled: false
annotations: {}
image: jaegertracing/spark-dependencies
tag: latest
pullPolicy: Always
cmdlineParams: {}
schedule: "49 23 * * *"
successfulJobsHistoryLimit: 5
failedJobsHistoryLimit: 5
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 256m
# memory: 128Mi
serviceAccount:
create: true
name:
nodeSelector: {}
tolerations: []
affinity: {}
extraSecretMounts: []
extraConfigmapMounts: []
## Additional pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
esIndexCleaner:
enabled: false
securityContext:
runAsUser: 1000
podSecurityContext:
runAsUser: 1000
annotations: {}
image: jaegertracing/jaeger-es-index-cleaner
tag: latest
pullPolicy: Always
cmdlineParams: {}
schedule: "55 23 * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 256m
# memory: 128Mi
numberOfDays: 7
serviceAccount:
create: true
name:
nodeSelector: {}
tolerations: []
affinity: {}
extraSecretMounts: []
extraConfigmapMounts: []
podAnnotations: {}
## Additional pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
# End: Default values for the various components of Jaeger
hotrod:
enabled: false
podSecurityContext: {}
securityContext: {}
replicaCount: 1
image:
repository: jaegertracing/example-hotrod
pullPolicy: Always
service:
annotations: {}
name: hotrod
type: ClusterIP
# List of IP ranges that are allowed to access the load balancer (if supported)
loadBalancerSourceRanges: []
port: 80
ingress:
enabled: false
# Used to create Ingress record (should be used with service.type: ClusterIP).
hosts:
- chart-example.local
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
serviceAccount:
create: true
name:
nodeSelector: {}
tolerations: []
affinity: {}
tracing:
host: null
port: 6831

@ -1,200 +0,0 @@
# Default values for kubernetes-dashboard
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
image:
repository: k8s.gcr.io/kubernetes-dashboard-amd64
tag: v1.10.1
pullPolicy: IfNotPresent
pullSecrets: []
replicaCount: 1
## Here annotations can be added to the kubernetes dashboard deployment
annotations: {}
## Here labels can be added to the kubernetes dashboard deployment
##
labels: {}
# kubernetes.io/name: "Kubernetes Dashboard"
## Enable possibility to skip login
enableSkipLogin: false
## Serve application over HTTP without TLS
# Heket: We have Ingress
enableInsecureLogin: true
## Additional container arguments
##
# extraArgs:
# - --enable-skip-login
# - --enable-insecure-login
# - --system-banner="Welcome to Kubernetes"
## Additional container environment variables
##
extraEnv: []
# - name: SOME_VAR
# value: 'some value'
# Annotations to be added to kubernetes dashboard pods
## Recommended value
# podAnnotations:
# seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
podAnnotations: {}
## SecurityContext for the kubernetes dashboard container
## Recommended values
dashboardContainerSecurityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
## The two values below can be set here or at podLevel (using variable .securityContext)
runAsUser: 1001
runAsGroup: 2001
#dashboardContainerSecurityContext: {}
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute"
## Affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# priorityClassName: ""
service:
type: ClusterIP
externalPort: 443
## This allows an override of the heapster service name
## Default: {{ .Chart.Name }}
##
# nameOverride:
# LoadBalancerSourcesRange is a list of allowed CIDR values, which are combined with ServicePort to
# set allowed inbound rules on the security group assigned to the master load balancer
# loadBalancerSourceRanges: []
## Kubernetes Dashboard Service annotations
##
## For GCE ingress, the following annotation is required:
## service.alpha.kubernetes.io/app-protocols: '{"https":"HTTPS"}' if enableInsecureLogin=false
## or
## service.alpha.kubernetes.io/app-protocols: '{"http":"HTTP"}' if enableInsecureLogin=true
annotations: {}
## Here labels can be added to the Kubernetes Dashboard service
##
labels: {}
# kubernetes.io/name: "Kubernetes Dashboard"
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
ingress:
## If true, Kubernetes Dashboard Ingress will be created.
##
enabled: true
## Kubernetes Dashboard Ingress annotations
##
## Add custom labels
# labels:
# key: value
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: 'true'
## If you plan to use TLS backend with enableInsecureLogin set to false
## (default), you need to uncomment the below.
## If you use ingress-nginx < 0.21.0
# nginx.ingress.kubernetes.io/secure-backends: "true"
## if you use ingress-nginx >= 0.21.0
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
## Kubernetes Dashboard Ingress paths
##
paths:
- /
# - /*
## Kubernetes Dashboard Ingress hostnames
## Must be provided if Ingress is enabled
##
# hosts:
# - kubernetes-dashboard.domain.com
## Kubernetes Dashboard Ingress TLS configuration
## Secrets must be manually created in the namespace
##
# tls:
# - secretName: kubernetes-dashboard-tls
# hosts:
# - kubernetes-dashboard.domain.com
rbac:
# Specifies whether RBAC resources should be created
create: true
# Specifies whether cluster-admin ClusterRole will be used for dashboard
# ServiceAccount (NOT RECOMMENDED).
clusterAdminRole: false
# Start in ReadOnly mode.
# Only dashboard-related Secrets and ConfigMaps will still be available for writing.
#
# Turn OFF clusterAdminRole to use clusterReadOnlyRole.
#
# The basic idea of the clusterReadOnlyRole comparing to the clusterAdminRole
# is not to hide all the secrets and sensitive data but more
# to avoid accidental changes in the cluster outside the standard CI/CD.
#
# Same as for clusterAdminRole, it is NOT RECOMMENDED to use this version in production.
# Instead you should review the role and remove all potentially sensitive parts such as
# access to persistentvolumes, pods/log etc.
clusterReadOnlyRole: false
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
livenessProbe:
# Number of seconds to wait before sending first probe
initialDelaySeconds: 30
# Number of seconds to wait for probe response
timeoutSeconds: 30
podDisruptionBudget:
# https://kubernetes.io/docs/tasks/run-application/configure-pdb/
enabled: false
minAvailable:
maxUnavailable:
## PodSecurityContext for pod level securityContext
##
# securityContext:
# runAsUser: 1001
# runAsGroup: 2001
securityContext: {}
networkPolicy: true

@ -1,12 +0,0 @@
controller:
admissionWebhooks:
enabled: false
patch:
enabled: true
publishService:
enabled: true
image:
repository: bitnami/nginx-ingress-controller
rbac:
create: true

@ -1,146 +0,0 @@
# Default values for rook-ceph-operator
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
prefix: rook
repository: rook/ceph
tag: v1.2.0-42.gf01ce98
pullPolicy: IfNotPresent
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
nodeSelector:
# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# disktype: ssd
# Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
tolerations: []
# Whether rook watches its current namespace for CRDs or the entire cluster, defaults to false
currentNamespaceOnly: false
# Interval at which to get the ceph status and update the cluster custom resource status
cephStatusCheckInterval: "60s"
mon:
healthCheckInterval: "45s"
monOutTimeout: "600s"
## Annotations to be added to pod
annotations: {}
## LogLevel can be set to: TRACE, DEBUG, INFO, NOTICE, WARNING, ERROR or CRITICAL
logLevel: INFO
## If true, create & use RBAC resources
##
rbacEnable: true
## If true, create & use PSP resources
##
pspEnable: true
## Settings for whether to disable the drivers or other daemons if they are not
## needed
csi:
enableRbdDriver: true
enableCephfsDriver: true
enableGrpcMetrics: true
enableSnapshotter: true
# Set provisonerTolerations and provisionerNodeAffinity for provisioner pod.
# The CSI provisioner would be best to start on the same nodes as other ceph daemons.
# provisionerTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# provisionerNodeAffinity: key1=value1,value2; key2=value3
# Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
# The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# pluginTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# pluginNodeAffinity: key1=value1,value2; key2=value3
#cephfsGrpcMetricsPort: 9091
#cephfsLivenessMetricsPort: 9081
#rbdGrpcMetricsPort: 9090
#rbdLivenessMetricsPort: 9080
#kubeletDirPath: /var/lib/kubelet
#cephcsi:
#image: quay.io/cephcsi/cephcsi:v1.2.2
#registrar:
#image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0
#provisioner:
#image: quay.io/k8scsi/csi-provisioner:v1.4.0
#snapshotter:
#image: quay.io/k8scsi/csi-snapshotter:v1.2.2
#attacher:
#image: quay.io/k8scsi/csi-attacher:v1.2.0
enableFlexDriver: false
enableDiscoveryDaemon: true
## if true, run rook operator on the host network
# useOperatorHostNetwork: true
## Rook Agent configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
## nodeAffinity: Set to labels of the node to match
## flexVolumeDirPath: The path where the Rook agent discovers the flex volume plugins
## libModulesDirPath: The path where the Rook agent can find kernel modules
# agent:
# toleration: NoSchedule
# tolerationKey: key
# tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# nodeAffinity: key1=value1,value2; key2=value3
# mountSecurityMode: Any
## For information on FlexVolume path, please refer to https://rook.io/docs/rook/master/flexvolume.html
# flexVolumeDirPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
# libModulesDirPath: /lib/modules
# mounts: mount1=/host/path:/container/path,/host/path2:/container/path2
## Rook Discover configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
## nodeAffinity: Set to labels of the node to match
# discover:
# toleration: NoSchedule
# tolerationKey: key
# tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# nodeAffinity: key1=value1,value2; key2=value3
# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
# Disable it here if you have similar issues.
# For more details see https://github.com/rook/rook/issues/2417
enableSelinuxRelabeling: true
# Writing to the hostPath is required for the Ceph mon and osd pods. Given the restricted permissions in OpenShift with SELinux,
# the pod must be running privileged in order to write to the hostPath volume, this must be set to true then.
hostpathRequiresPrivileged: false
# Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
# Blacklist certain disks according to the regex provided.
discoverDaemonUdev:
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
# imagePullSecrets:
# - name: my-registry-secret

@ -1,5 +0,0 @@
server:
dev:
enabled: true
ui:
enabled: true

@ -1,25 +1,26 @@
# Install Custom Ressource Definitions?
installCRDs: true
installCRDs: false
# Default values that are used for creating ArgoCD `Application` definitions
defaults:
project: "default"
repoURL: https://github.com/nold360/heqet
targetRevision: k3s
server: https://kubernetes.default.svc
automated:
prune: true
selfHeal: false
domain: lib42.me
domain: k3s
# Heqet Functions / Injectors:
generators:
# Nginx Ingress & Cert Manager TLS Injector
# Ingress & Cert Manager TLS Injector
ingress:
enabled: true
# ACME / Letsencrypt Injector
acme:
enabled: true
enabled: false
## Default: https://acme-staging-v02.api.letsencrypt.org/directory
## Change to 'https://acme-v02.api.letsencrypt.org/directory' for real/trusted LE Certs
@ -27,18 +28,6 @@ generators:
email: [email protected]
# # Vault Secret Injector
# # incl. ServiceAccount read-only ["ro"]
# vault:
# enabled: false
# createServiceAccount: true
# defaultPermissions: "ro"
# annotations:
commonAnnotations:
enabled: false # to code...
# App Definitions that will be managed in ArgoCD
apps:
# Heqet
@ -46,113 +35,48 @@ apps:
path: .
syncWave: "-1"
noValues: true
vhost: lib42.me
targetRevision: k3s
vhost: hqt.k3s
# SealedSecrests - Store encrypted secrets in git
- name: sealed-secrets
repoURL: https://github.com/nold360/charts
path: stable/sealed-secrets
repoURL: https://bitnami-labs.github.io/sealed-secrets/
chart: sealed-secrets
targetRevision: 1.15.0-r3
syncWave: "-1"
namespace: sealed-secrets
# ArgoCD - Continous Deployment from Git
- name: argocd
repoURL: https://github.com/nold360/argo-helm
path: charts/argo-cd
vhost: argocd.lib42.me
repoURL: https://argoproj.github.io/argo-helm
chart: argocd
targetRevision: 3.0.0
vhost: argocd.k3s
ingress: server
vault: server
syncWave: "0"
# for pod annotations / vault/ ...
podSpecRoots:
- server
# Jaeger Tracing
- name: jaeger
repoURL: https://github.com/jaegertracing/helm-charts
path: charts/jaeger
targetRevision: HEAD
vhost: jaeger.lib42.me
# Nginx-Ingress for incoming HTTP/s traffic
- name: nginx-ingress
repoURL: https://github.com/kubernetes/ingress-nginx
path: charts/ingress-nginx
syncWave: "0"
# Cert Manager will handle TLS-Certs
- name: cert-manager
repoURL: https://github.com/jetstack/cert-manager
path: deploy/charts/cert-manager
syncWave: "0"
# Vault Secret Management [currently dissabled]
- name: vault
disabled: true
repoURL: https://github.com/hashicorp/vault-helm
vhost: vault.lib42.me
syncWave: "0"
ingress: server
ingressHostsKeymap: true
ignoreDifferences: |
- group: apiextensions.k8s.io
kind: CustomResourceDefinition
jsonPointers:
- /metadata/annotations
- /metadata/labels
- /spec/validation
- group: admissionregistration.k8s.io
kind: MutatingWebhookConfiguration
jsonPointers:
- /webhooks
- group: admissionregistration.k8s.io
kind: ValidatingWebhookConfiguration
jsonPointers:
- /webhooks
# Plain old Kubernetes Dashboard
- name: kubernetes-dashboard
disabled: false
disabled: true
namespace: kube-system
repoURL: https://github.com/nold360/charts
path: stable/kubernetes-dashboard
vhost: dashboard.lib42.me
vhost: dashboard.k3s
# Loki / Grafana / Promtail Stack for Logging & Metrics
- name: loki-stack
disabled: false
repoURL: https://github.com/grafana/loki
path: production/helm/loki-stack
vhost: grafana.lib42.me
repoURL: https://grafana.github.io/helm-charts
chart: loki-stack
targetRevision: 2.3.1
vhost: grafana.k3s
ingress: grafana
# Kubeless - Function-as-a-service / Serverless
- name: kubeless
disabled: false
repoURL: https://github.com/nold360/charts
path: incubator/kubeless
ingress: ui
vhost: kubeless.lib42.me
# Eclise Che IDE
- name: che
repoURL: https://github.com/Nold360/charts-1
path: charts/che
ingress: che
vhost: che.lib42.me
# Polaris - Scan cluster for stuff
- name: polaris
repoURL: https://github.com/FairwindsOps/charts
path: stable/polaris
repoURL: https://charts.fairwinds.com/stable
chart: polaris
targetRevision: 3.1.1
ingress: dashboard
namespace: eclipse-che
vhost: polaris.lib42.me
# Ceph / S3 / Filestorage for on-prem
- name: rook
disabled: true
repoURL: https://charts.rook.io/master
targetRevision: v1.3.0-beta.0.384.g08167f9
chart: rook-ceph
namespace: polaris
vhost: polaris.k3s

Loading…
Cancel
Save