diff --git a/projects/argocd/project.yml b/projects/argocd/project.yml new file mode 100644 index 00000000..0860ad07 --- /dev/null +++ b/projects/argocd/project.yml @@ -0,0 +1,18 @@ +config: + description: ArgoCD - Continous Deployment from Git + +apps: +- name: argocd + repoURL: https://argoproj.github.io/argo-helm + chart: argo-cd + targetRevision: 3.26.10 + syncWave: "0" + secrets: + - name: argocd-secret + keys: + - admin.password + - server.secretkey + - oidc.auth0.clientSecret + - name: ca-cert + keys: + - ca diff --git a/projects/argocd/values/argocd.yaml b/projects/argocd/values/argocd.yaml new file mode 100644 index 00000000..f698a32d --- /dev/null +++ b/projects/argocd/values/argocd.yaml @@ -0,0 +1,213 @@ +## ArgoCD configuration +## Ref: https://github.com/argoproj/argo-cd +## + +# Optional CRD installation for those without Helm hooks +installCRDs: true + +global: + image: + repository: quay.io/argoproj/argocd + tag: latest +# imagePullPolicy: IfNotPresent + securityContext: + runAsUser: 999 + runAsGroup: 999 + fsGroup: 999 +## Controller +controller: + ## Labels to set container specific security contexts + containerSecurityContext: + capabilities: + drop: + - all + readOnlyRootFilesystem: true + + ## Server metrics controller configuration + metrics: + enabled: true + service: + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8082' + + clusterAdminAccess: + enabled: true + +## Dex +dex: + enabled: true + + ## Labels to set container specific security contexts + containerSecurityContext: + capabilities: + drop: + - all + readOnlyRootFilesystem: true + +## Redis +redis: + enabled: true + + ## Labels to set container specific security contexts + containerSecurityContext: + capabilities: + drop: + - all + readOnlyRootFilesystem: true + + ## Redis Pod specific security context + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + runAsNonRoot: true + +## Server +server: + extraArgs: + - --insecure + + ## Labels to set container specific security contexts + containerSecurityContext: + capabilities: + drop: + - all + readOnlyRootFilesystem: true + + ## Server metrics service configuration + metrics: + enabled: true + service: + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8083' + servicePort: 8083 + + ingress: + enabled: true + annotations: + cert-manager.io/cluster-issuer: vault-issuer + kubernetes.io/ingress.class: nginx + hosts: + - argocd.dc + paths: + - / + tls: + - secretName: argocd-tls + hosts: + - argocd.dc + https: false + # dedicated ingess for gRPC as documented at + # https://argoproj.github.io/argo-cd/operator-manual/ingress/ + + ## ArgoCD config + ## reference https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml + configEnabled: true + config: + # Argo CD's externally facing base URL (optional). Required when configuring SSO + url: https://argocd.dc + accounts.webhook: apiKey, login + + oidc.config: | + name: Authentik + issuer: https://auth.dc/application/o/argocd/ + clientID: 0c149045b7b87eb80e41fcdd3e788476472d7316 + clientSecret: $oidc.auth0.clientSecret + requestedScopes: ["openid", "profile", "email", "groups"] + + rbacConfig: + policy.csv: | + g, ArgoCDAdmins, role:admin + + # Mount public CA cert + volumeMounts: + - name: certificate + mountPath: /etc/ssl/certs/ca.crt + subPath: ca + + volumes: + - name: certificate + secret: + secretName: ca-cert + defaultMode: 420 + + additionalApplications: [] + + ## Projects + ## reference: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/ + additionalProjects: [] + + ## Enable Admin ClusterRole resources. + ## Enable if you would like to grant rights to ArgoCD to deploy to the local Kubernetes cluster. + clusterAdminAccess: + enabled: true + +## Repo Server +repoServer: + containerSecurityContext: + capabilities: + drop: + - all + readOnlyRootFilesystem: true + + ## Repo server metrics service configuration + metrics: + enabled: true + service: + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8084' + servicePort: 8084 + + volumes: + - name: var-files + emptyDir: {} + - name: plugins + emptyDir: {} + + volumeMounts: + - mountPath: /home/argocd/cmp-server/plugins + name: plugins + + initContainers: + - name: copy-cmp-server + image: quay.io/argoproj/argocd:latest + command: + - cp + - -n + - /usr/local/bin/argocd + - /var/run/argocd/argocd-cmp-server + volumeMounts: + - mountPath: /var/run/argocd + name: var-files + + extraContainers: + - name: cmp-heqet + command: [/var/run/argocd/argocd-cmp-server] + image: lib42/heqet-cli:latest + securityContext: + runAsNonRoot: true + runAsUser: 999 + volumeMounts: + - mountPath: /var/run/argocd + name: var-files + - mountPath: /home/argocd/cmp-server/plugins + name: plugins + - mountPath: /tmp + name: tmp-dir + + ## Repo server rbac rules + # rbac: + # - apiGroups: + # - argoproj.io + # resources: + # - applications + # verbs: + # - get + # - list + # - watch + +configs: + secret: + createSecret: false diff --git a/projects/backup/project.yml b/projects/backup/project.yml new file mode 100644 index 00000000..1a630955 --- /dev/null +++ b/projects/backup/project.yml @@ -0,0 +1,8 @@ +config: + description: BorgBackup SSH-Server + +apps: +- name: backup-lan + repoURL: https://github.com/lib42/charts.git + path: charts/borgserver + targetRevision: dev diff --git a/projects/backup/values/backup-lan.yaml b/projects/backup/values/backup-lan.yaml new file mode 100644 index 00000000..0173b4c8 --- /dev/null +++ b/projects/backup/values/backup-lan.yaml @@ -0,0 +1,10 @@ +clients: + - name: noldbook + type: ssh-rsa + key: AAAAB3NzaC1yc2EAAAADAQABAAABAQDJc7+boEpuSfjBM5y/qYfAnaGoYFP74yXuDmnlcY9glrRTGV2UVYFQV+fFl8pAT6aiqJUcbylBq+kQFvFHTI2JW7iux+JO+o/eEpMYqoNe5kIewYTHWaBL+6h7B90NIgE8ec1Ce7Oqm9+ttAa51Wu5K5zXXLWHds6nlqLG5llNiSZB4yxCJ/oyj5uQKmeAY+Hr4XjFsnisuaajSrvNaR7gshrme8A7wxn3qORe62ux33bPgEXjwUfPJZrHeeRWBMfnWoHBH1RybwC8FboNDes6gXgx3hJiQ+UfslmmFgpADWos216YX2FKXxDk19K/gXvejSuljO8fCBeQIdo/1xVh + - name: hive + type: ssh-rsa + key: AAAAB3NzaC1yc2EAAAADAQABAAACAQDVq6KXD45FNZjWDRytd97YsrOdIP9JnbftO/pbSD8l86GZsxhZ9Gk/SVsGyZ8z3Mi0RNetFvelLoU/QW8lx5ETAUHJuEUj+EjWX6bLEUGWq0YAhcQRVnD70fe0ANbWp6njYL86B5X1EGgT7y55iTW6V/ssLkIKtgUeqV61GaBQoHaHtoaOAVT3yEMGZwhzj5lrdpgPfsQMlnlW/uzfmVMbMSNCmKdQCOnQ/yZ8N1EaReF99zqT8+Z841JmcSTXr1INwk8QAclBOwPqOe/7VWtQdZGoiToT2ro7dHMbzOucauTbw/8GvoxHZvr44PiWffDINWzQyPPtva3s1wOeVQOlmDFsL3LrL7kfUJs1hNz7GKITgpIhPYyhNp8CZ8Jk2zmpjZAHxZiPJkMe0VGRvnYVjUqeLsFwlj/yXsbsa9X+Yd4B15Op060c41FM54X+UiiCt/ZLUOch46Lq9kmF6MNtLSoMeOs+uBCKWaS4iH2r3v+ZuAHuj5MDDk+tPVSoyGQujADHspJ9q7xEhywsF0AFJy1+3ui72Uoo0AUrFzOYq4WveNCNg7CvN1yOC1RsckIQ3aCdmijF3vz/Ndbk7QQzNHtAmIt/0/PGJ0+lsxgE9EPIxOELEzK43XFFu97hXficiBftsjXNPyHRjRn19hCNif36yWpeK8JN1ZNmoMshqQ== + - name: noldface + type: ssh-rsa + key: AAAAB3NzaC1yc2EAAAADAQABAAACAQDPdrtT3OwSPN6t3udItlodb96ITOtBkwF+RG5rxKi1x1j2RHOeoOtkQ8oeh9zRqPnsgeFFbbRAJy1pHaS6wcpKlow0zq7+Xl61/AFgE9mD+yks7JQOP0Cf6N9kq3nYUC3XwBKtuLmwox9+PZX+lPDXVA6gN59Tb1BG/FJN2XbeEzMPUMiNr0B3LAk7sK9P+ipm2FoXi3oLEzS9QKi+1aDfx/K/EWcFLtlPeUYSxGHS18Vl1ad//Cw3XwKY4RGRnI6kfXN41xxTY1TyRRyTnZRoa7+4GudELrqrd6Y722/G+SOg4xhWL3Ns98E7xqC4VarNePEXX2Evom86aU9CkAGTvsMXcHWxDJatihtN2dFAngNfJfkAq7eaGVrrARgtiWw5gKREPaR96PIHSmA3d822nooScX2liDWNbXr0WpwW43MJXhaQKW2s0Cucd/iqxg4t7rP1uJrSaE9nCuXDE3vAbFXDjdWFyNrdOg8akyQ+z+bXJNyk1YQO3mMgahXqHFrVgZhtMgI5+WJYEapM0jjqviXav8eR9h3dkH2k4BrY3pfYsfMmNslwo/3Ot1os0k3k47mW9qQ4gWdAkMYg9RRkK760Bmd0YNjeVDvRgwD2Cvt3it4wW223CtqL1KxAoTK4iPzgpwu2x5KA0ojvexkzTG7J/i8EUYREgHCyQtVVIQ== diff --git a/projects/blocky/project.yml b/projects/blocky/project.yml new file mode 100644 index 00000000..2f751a88 --- /dev/null +++ b/projects/blocky/project.yml @@ -0,0 +1,7 @@ +config: + description: Blocky DNS Server +apps: +- name: blocky + repoURL: https://k8s-at-home.com/charts + chart: blocky + targetRevision: 9.0.3 diff --git a/projects/blocky/values/blocky.yaml b/projects/blocky/values/blocky.yaml new file mode 100644 index 00000000..53b61733 --- /dev/null +++ b/projects/blocky/values/blocky.yaml @@ -0,0 +1,196 @@ +env: + TZ: Europe/Amsterdam + +podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "4000" + +image: + tag: v0.15 + +service: + main: + ports: + http: + port: 4000 + dns-tcp: + enabled: false + dns-udp: + enabled: true + type: LoadBalancer + externalTrafficPolicy: Local + ports: + dns-udp: + enabled: true + port: 53 + protocol: UDP + targetPort: 53 + +persistence: + logs: + enabled: true + mountPath: /logs + accessMode: ReadWriteOnce + size: 1Gi + storageClass: local-path + +prometheus: + serviceMonitor: + enabled: false + +# -- Full list of options https://github.com/0xERR0R/blocky/blob/master/docs/config.yml +config: | + upstream: + externalResolvers: + - 192.168.1.1 + + #customDNS: + # mapping: + # printer.lan: 192.168.178.3 + + conditional: + mapping: + lan: udp:192.168.1.1 + dc: udp:192.168.1.1 + + blocking: + blackLists: + ads: + - https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt + - https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/extra.txt + - https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/android-tracking.txt + - https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt + - https://raw.githubusercontent.com/notracking/hosts-blocklists/master/hostnames.txt + - https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts + - http://sysctl.org/cameleon/hosts + - https://adaway.org/hosts.txt + - https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-malware.txt + - https://hostfiles.frogeye.fr/firstparty-trackers-hosts.txt + - https://phishing.army/download/phishing_army_blocklist_extended.txt + - https://raw.githubusercontent.com/anudeepND/blacklist/master/adservers.txt + - https://raw.githubusercontent.com/anudeepND/youtubeadsblacklist/master/domainlist.txt + - https://raw.githubusercontent.com/bigdargon/hostsVN/master/hosts + - https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt + - https://raw.githubusercontent.com/DandelionSprout/adfilt/master/Alternate%20versions%20Anti-Malware%20List/AntiMalwareHosts.txt + - https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.2o7Net/hosts + - https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Risk/hosts + - https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Spam/hosts + - https://raw.githubusercontent.com/Kees1958/W3C_annual_most_used_survey_blocklist/master/TOP_EU_US_Ads_Trackers_HOST + - https://raw.githubusercontent.com/PolishFiltersTeam/KADhosts/master/KADhosts.txt + - https://raw.githubusercontent.com/Spam404/lists/master/main-blacklist.txt + - https://urlhaus.abuse.ch/downloads/hostfile/ + - https://zerodot1.gitlab.io/CoinBlockerLists/hosts_browser + + # All firebog lists: + - https://v.firebog.net/hosts/Cameleon.txt + - https://v.firebog.net/hosts/HostsFileOrg.txt + - https://v.firebog.net/hosts/JoeWein.txt + - https://v.firebog.net/hosts/Mahakala.txt + - https://v.firebog.net/hosts/JoeyLane.txt + - https://v.firebog.net/hosts/PeterLowe.txt + - https://v.firebog.net/hosts/PiwikSpam.txt + - https://v.firebog.net/hosts/ReddestDream.txt + - https://v.firebog.net/hosts/SBDead.txt + - https://v.firebog.net/hosts/SBKAD.txt + - https://v.firebog.net/hosts/SBSpam.txt + - https://v.firebog.net/hosts/SomeoneWC.txt + - https://v.firebog.net/hosts/Spam404.txt + - https://v.firebog.net/hosts/Vokins.txt + - https://v.firebog.net/hosts/Winhelp2002.txt + - https://v.firebog.net/hosts/AdAway.txt + - https://v.firebog.net/hosts/Disconnect-ads.txt + - https://v.firebog.net/hosts/Easylist.txt + - https://v.firebog.net/hosts/Easylist-Dutch.txt + - https://v.firebog.net/hosts/SBUnchecky.txt + - https://v.firebog.net/hosts/AdguardDNS.txt + - https://v.firebog.net/hosts/Prigent-Ads.txt + - https://v.firebog.net/hosts/Airelle-trc.txt + - https://v.firebog.net/hosts/Disconnect-trc.txt + - https://v.firebog.net/hosts/Disconnect-mal.txt + - https://v.firebog.net/hosts/Easyprivacy.txt + - https://v.firebog.net/hosts/SB2o7Net.txt + - https://v.firebog.net/hosts/APT1Rep.txt + - https://v.firebog.net/hosts/Airelle-hrsk.txt + - https://v.firebog.net/hosts/Openphish.txt + - https://v.firebog.net/hosts/SBRisk.txt + - https://v.firebog.net/hosts/Shalla-mal.txt + - https://v.firebog.net/hosts/Prigent-Malware.txt + ms: [] + untrusted: + - https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/update.txt + - https://git.nold.in/nold/dns-whitelist/raw/branch/master/blacklists/ms.txt + - https://git.nold.in/nold/dns-whitelist/raw/branch/master/blacklists/fbook.txt + - https://git.nold.in/nold/dns-whitelist/raw/branch/master/blacklists/google.txt + - https://git.nold.in/nold/dns-whitelist/raw/branch/master/blacklists/nintendont.txt + - https://git.nold.in/nold/dns-whitelist/raw/branch/master/blacklists/ps.txt + - https://git.nold.in/nold/dns-whitelist/raw/branch/master/blacklists/xbox.txt + whiteLists: + ads: + - https://git.nold.in/nold/dns-whitelist/raw/branch/master/whitelists/common.txt + - https://git.nold.in/nold/dns-whitelist/raw/branch/master/whitelists/ms.txt + ms: + - https://git.nold.in/nold/dns-whitelist/raw/branch/master/whitelists/ms.txt + clientGroupsBlock: + default: + - ads + LAPTOP-G35N0AS1.lan: + - ads + - ms + # use client name (with wildcard support: * - sequence of any characters, [0-9] - range) + # or single ip address / client subnet as CIDR notation + #laptop*: + # - ads + #192.168.178.1/24: + # - special + + # which response will be sent, if query is blocked: + blockType: zeroIp + # optional: automatically list refresh period in minutes. Default: 4h. + # Negative value -> deactivate automatically refresh. + # 0 value -> use default + refreshPeriod: 0 + + # optional: configuration for caching of DNS responses + #caching: + # amount in minutes, how long a response must be cached (min value). + # If <=0, use response's TTL, if >0 use this value, if TTL is smaller + # Default: 0 + # minTime: 5 + # amount in minutes, how long a response must be cached (max value). + # If <0, do not cache responses + # If 0, use TTL + # If > 0, use this value, if TTL is greater + # Default: 0 + # maxTime: -1 + # if true, will preload DNS results for often used queries (names queried more than 5 times in a 2 hour time window) + # this improves the response time for often used queries, but significantly increases external traffic + # default: false + # prefetching: true + + # optional: configuration of client name resolution + clientLookup: + # optional: this DNS resolver will be used to perform reverse DNS lookup (typically local router) + upstream: udp:192.168.1.1 + # optional: custom mapping of client name to IP addresses. Useful if reverse DNS does not work properly or just to have custom client names. + #clients: + # laptop: + # - 192.168.178.29 + + prometheus: + enable: true + path: /metrics + + # optional: write query information (question, answer, client, duration etc) to daily csv file + queryLog: + # # directory (should be mounted as volume in docker) + dir: /logs + # # if true, write one file per client. Writes all queries to single file otherwise + # perClient: true + # # if > 0, deletes log files which are older than ... days + logRetentionDays: 1 + + port: 53 + httpPort: 4000 + bootstrapDns: udp:192.168.1.1 + logLevel: info + logFormat: text diff --git a/projects/core/project.yml b/projects/core/project.yml new file mode 100644 index 00000000..c31e0646 --- /dev/null +++ b/projects/core/project.yml @@ -0,0 +1,61 @@ +config: + description: Core Components for Kubernetes +apps: +- name: fast-storage + namespace: fast-storage + repoURL: https://github.com/rancher/local-path-provisioner + path: deploy/chart + syncWave: '0' + parameters: + - name: storageClass.name + value: fast + - name: nodePathMap[0].node + value: DEFAULT_PATH_FOR_NON_LISTED_NODES + - name: nodePathMap[0].paths[0] + value: /var/lib/rancher/k3s/storage + +- name: ssd-storage + namespace: ssd-storage + repoURL: https://github.com/rancher/local-path-provisioner + path: deploy/chart + syncWave: '0' + parameters: + - name: storageClass.name + value: ssd + - name: nodePathMap[0].node + value: DEFAULT_PATH_FOR_NON_LISTED_NODES + - name: nodePathMap[0].paths[0] + value: /data/kubernetes/ssd + +- name: metallb + repoURL: https://charts.bitnami.com/bitnami + chart: metallb + namespace: metallb + targetRevision: 2.5.6 + syncWave: '0' +- name: cert-manager + namespace: cert-manager + repoURL: https://charts.jetstack.io + chart: cert-manager + targetRevision: 1.5.3 + parameters: + - name: installCRDs + value: 'true' + secrets: + - name: cert-manager-vault-approle + keys: + - secretId + +- name: ingress-internal + namespace: ingress-internal + repoURL: https://kubernetes.github.io/ingress-nginx + chart: ingress-nginx + targetRevision: 4.0.8 + syncWave: '0' + +- name: ingress-external + namespace: ingress-external + repoURL: https://kubernetes.github.io/ingress-nginx + chart: ingress-nginx + targetRevision: 4.0.8 + syncWave: '0' diff --git a/projects/core/values/ingress-external.yaml b/projects/core/values/ingress-external.yaml new file mode 100644 index 00000000..748fef66 --- /dev/null +++ b/projects/core/values/ingress-external.yaml @@ -0,0 +1,27 @@ +controller: + name: external + ingressClassResource: + name: external + enabled: true + controllerValue: 'k8s.io/external' + extraArgs: + ingress-class: external + + kind: DaemonSet + updateStrategy: + # rollingUpdate: + # maxUnavailable: 1 + type: RollingUpdate + + service: + annotations: + metallb.universe.tf/address-pool: external + metrics: + enabled: true + service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "10254" + +podSecurityPolicy: + enabled: true diff --git a/projects/core/values/ingress-internal.yaml b/projects/core/values/ingress-internal.yaml new file mode 100644 index 00000000..5d8a6e8b --- /dev/null +++ b/projects/core/values/ingress-internal.yaml @@ -0,0 +1,25 @@ +controller: + name: controller-internal + electionID: ingress-controller-internal-leader + watchIngressWithoutClass: true + ingressClassResource: + name: nginx + enabled: true + default: true + + kind: Deployment + service: + annotations: + metallb.universe.tf/address-pool: internal + metrics: + enabled: true + service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "10254" + +defaultBackend: + enabled: true + +podSecurityPolicy: + enabled: true diff --git a/projects/core/values/metallb.yaml b/projects/core/values/metallb.yaml new file mode 100644 index 00000000..328e04df --- /dev/null +++ b/projects/core/values/metallb.yaml @@ -0,0 +1,29 @@ +configInline: + address-pools: + - name: default + protocol: layer2 + addresses: + - 192.168.1.13/32 + - 192.168.1.14/32 + - 192.168.1.15/32 + - 192.168.1.16/32 + - 192.168.1.17/32 + - 192.168.1.18/32 + - 192.168.1.19/32 + - 192.168.1.20/32 + + - name: external + protocol: layer2 + addresses: + - 192.168.1.12/32 + + - name: internal + protocol: layer2 + addresses: + - 192.168.1.11/32 + +prometheus: + serviceMonitor: + enabled: true + prometheusRule: + enabled: true diff --git a/projects/downloader/project.yml b/projects/downloader/project.yml new file mode 100644 index 00000000..e1e7f6e8 --- /dev/null +++ b/projects/downloader/project.yml @@ -0,0 +1,32 @@ +config: + description: Tools for downloading linux isos +apps: +- name: deluge + repoURL: https://k8s-at-home.com/charts/ + chart: deluge + targetRevision: 5.0.1 + secrets: + - name: openvpn + keys: + - VPN_AUTH + - vpnConfigfile +- name: rtorrent + repoURL: https://k8s-at-home.com/charts/ + chart: rtorrent-flood + targetRevision: 9.0.1 + secrets: + - name: openvpn + fromApp: deluge + keys: + - VPN_AUTH + - vpnConfigfile + +- name: youtubedl + repoURL: https://k8s-at-home.com/charts/ + chart: youtubedl-material + targetRevision: 4.0.1 + +- name: pyload + repoURL: https://k8s-at-home.com/charts/ + chart: pyload + targetRevision: 6.0.1 diff --git a/projects/downloader/values/deluge.yaml b/projects/downloader/values/deluge.yaml new file mode 100644 index 00000000..86cbb596 --- /dev/null +++ b/projects/downloader/values/deluge.yaml @@ -0,0 +1,60 @@ +ingress: + main: + enabled: true + annotations: + cert-manager.io/cluster-issuer: "vault-issuer" + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/proxy-body-size: 50m + + hosts: + - host: torrent.dc + paths: + - path: / + pathType: Prefix + + tls: + - secretName: torrent.dc-tls + hosts: + - torrent.dc + +env: + PUID: 1000 + GUID: 1000 + +persistence: + config: + enabled: true + mountPath: /config + size: 10M + + # use hostpath instead + downloads: + enabled: true + type: hostPath + hostPath: /data/torrent + mountPath: /downloads + +## VPN +addons: + vpn: + enabled: true + + openvpn: + authSecret: openvpn + configFileSecret: openvpn + + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + + livenessProbe: + exec: + command: + - sh + - -c + - if [ $(curl -s https://ipinfo.io/country) == 'NL' ]; then exit 0; else exit $?; fi + initialDelaySeconds: 30 + periodSeconds: 60 + failureThreshold: 3 diff --git a/projects/downloader/values/pyload.yaml b/projects/downloader/values/pyload.yaml new file mode 100644 index 00000000..0744d945 --- /dev/null +++ b/projects/downloader/values/pyload.yaml @@ -0,0 +1,34 @@ +ingress: + main: + enabled: true + annotations: + cert-manager.io/cluster-issuer: "vault-issuer" + kubernetes.io/ingress.class: "nginx" + hosts: + - host: pyload.dc + paths: + - path: / + pathType: Prefix + tls: + - secretName: pyload.dc-tls + hosts: + - pyload.dc + +env: + PUID: 1420 + GUID: 2420 + +persistance: + config: + enabled: true + mountPath: /config + storageClass: local-path + accessMode: ReadWriteOnce + size: 1Gi + + # use hostpath instead + downloads: + enabled: true + type: hostPath + hostPath: /data/downloads + mountPath: /downloads diff --git a/projects/downloader/values/rtorrent.yaml b/projects/downloader/values/rtorrent.yaml new file mode 100644 index 00000000..0539c565 --- /dev/null +++ b/projects/downloader/values/rtorrent.yaml @@ -0,0 +1,128 @@ +env: + # -- Set the container timezone + TZ: UTC + # -- Folder where Flood stores it's configuration + HOME: "/config" + # -- The host that Flood should listen for web connections on + FLOOD_OPTION_HOST: "0.0.0.0" + # -- The port that Flood should listen for web connections on + FLOOD_OPTION_PORT: "3000" + # -- ADVANCED: rTorrent daemon managed by Flood + FLOOD_OPTION_RTORRENT: "true" + # -- Allowed path for file operations + FLOOD_OPTION_ALLOWEDPATH: "/downloads" + +# -- Configures service settings for the chart. +# @default -- See values.yaml +service: + main: + ports: + http: + port: 3000 + bittorrent: + enabled: true + type: ClusterIP + ports: + bittorrent: + enabled: true + port: 6881 + protocol: TCP + targetPort: 6881 + +# -- Minimal configuration provided from https://github.com/jesec/rtorrent/blob/master/doc/rtorrent.rc +# @default -- string +config: | + session.use_lock.set = no + method.insert = cfg.basedir, private|const|string, (cat,(fs.homedir),"/.local/share/rtorrent/") + method.insert = cfg.download, private|const|string, (cat,"/downloads/","download/") + method.insert = cfg.logs, private|const|string, (cat,(cfg.download),"log/") + method.insert = cfg.logfile, private|const|string, (cat,(cfg.logs),"rtorrent-",(system.time),".log") + method.insert = cfg.session, private|const|string, (cat,(cfg.basedir),".session/") + method.insert = cfg.watch, private|const|string, (cat,(cfg.download),"watch/") + fs.mkdir.recursive = (cat,(cfg.basedir)) + fs.mkdir = (cat,(cfg.download)) + fs.mkdir = (cat,(cfg.logs)) + fs.mkdir = (cat,(cfg.session)) + fs.mkdir = (cat,(cfg.watch)) + fs.mkdir = (cat,(cfg.watch),"/load") + fs.mkdir = (cat,(cfg.watch),"/start") + schedule2 = watch_load, 11, 10, ((load.verbose, (cat, (cfg.watch), "load/*.torrent"))) + schedule2 = watch_start, 10, 10, ((load.start_verbose, (cat, (cfg.watch), "start/*.torrent"))) + dht.add_bootstrap = dht.transmissionbt.com:6881 + dht.add_bootstrap = dht.libtorrent.org:25401 + throttle.max_uploads.set = 20 + throttle.max_uploads.global.set = 50 + throttle.min_peers.normal.set = 20 + throttle.max_peers.normal.set = 60 + throttle.min_peers.seed.set = 30 + throttle.max_peers.seed.set = 80 + trackers.numwant.set = 80 + network.port_range.set = 61086-61086 + network.max_open_files.set = 600 + network.max_open_sockets.set = 300 + pieces.memory.max.set = 1800M + session.path.set = (cat, (cfg.session)) + directory.default.set = (cat, (cfg.download)) + log.execute = (cat, (cfg.logs), "execute.log") + encoding.add = utf8 + system.daemon.set = true + system.umask.set = 0002 + system.cwd.set = (directory.default) + network.http.max_open.set = 500 + network.http.dns_cache_timeout.set = 25 + network.scgi.open_local = (cat,(cfg.basedir),rtorrent.sock) + print = (cat, "Logging to ", (cfg.logfile)) + log.open_file = "log", (cfg.logfile) + log.add_output = "info", "log" + +ingress: + main: + enabled: true + annotations: + cert-manager.io/cluster-issuer: "vault-issuer" + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/proxy-body-size: 50m + hosts: + - host: flood.dc + paths: + - path: / + pathType: Prefix + tls: + - secretName: flood.dc-tls + hosts: + - flood.dc + +persistence: + config: + enabled: true + mountPath: /config + size: 10M + + # use hostpath instead + downloads: + enabled: true + type: hostPath + hostPath: /data/torrent + mountPath: /downloads + +## VPN +addons: + vpn: + enabled: true + openvpn: + authSecret: openvpn + configFileSecret: openvpn + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + livenessProbe: + exec: + command: + - sh + - -c + - if [ $(curl -s https://ipinfo.io/country) == 'NL' ]; then exit 0; else exit $?; fi + initialDelaySeconds: 30 + periodSeconds: 60 + failureThreshold: 3 diff --git a/projects/downloader/values/youtubedl.yaml b/projects/downloader/values/youtubedl.yaml new file mode 100644 index 00000000..6d08ab60 --- /dev/null +++ b/projects/downloader/values/youtubedl.yaml @@ -0,0 +1,29 @@ +ingress: + main: + enabled: true + annotations: + cert-manager.io/cluster-issuer: "vault-issuer" + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/auth-url: | + https://youtubedl.dc/akprox/auth/nginx + nginx.ingress.kubernetes.io/auth-signin: | + https://youtubedl.dc/akprox/start?rd=$escaped_request_uri + nginx.ingress.kubernetes.io/auth-response-headers: | + Set-Cookie,X-authentik-username,X-authentik-groups,X-authentik-email,X-authentik-name,X-authentik-uid + nginx.ingress.kubernetes.io/auth-snippet: | + proxy_set_header X-Forwarded-Host $http_host; + hosts: + - host: youtubedl.dc + paths: + - path: / + pathType: Prefix + tls: + - secretName: youtubedl.dc-tls + hosts: + - youtubedl.dc + +hostPathMounts: + - name: downloads + enabled: true + mountPath: /downloads + hostPath: /data/downloads diff --git a/projects/drone/project.yml b/projects/drone/project.yml new file mode 100644 index 00000000..30231977 --- /dev/null +++ b/projects/drone/project.yml @@ -0,0 +1,36 @@ +config: + description: Drone-CI + + networkPolicy: + groups: + - internet + rules: + - allow-runner + - allow-minio + +apps: + - name: drone + repoURL: https://github.com/nold360/drone-charts.git + path: charts/drone + targetRevision: master + secrets: + - name: drone-env + keys: + - DRONE_GITEA_SERVER + - DRONE_GITEA_CLIENT_ID + - DRONE_GITEA_CLIENT_SECRET + - DRONE_GITHUB_CLIENT_ID + - DRONE_GITHUB_CLIENT_SECRET + - DRONE_RPC_SECRET + + - name: drone-runner + namespace: drone-runner + repoURL: https://charts.drone.io + chart: drone-runner-kube + targetRevision: 0.1.5 + secrets: + - name: drone-env + fromApp: drone + keys: + - DRONE_RPC_SECRET + - DRONE_SECRET_PLUGIN_TOKEN diff --git a/projects/drone/values/drone-runner.yaml b/projects/drone/values/drone-runner.yaml new file mode 100644 index 00000000..64742b27 --- /dev/null +++ b/projects/drone/values/drone-runner.yaml @@ -0,0 +1,34 @@ +#podSecurityContext: +# fsGroup: 2000 + +#securityContext: + #capabilities: + # drop: + # - ALL + #readOnlyRootFilesystem: true + #runAsNonRoot: false + #runAsUser: 1000 + +resources: + limits: + cpu: 4000m + memory: 2048Mi + +rbac: + buildNamespaces: + - drone-runner + +extraSecretNamesForEnvFrom: + - drone-env + +env: + DRONE_RPC_HOST: drone.drone.svc.cluster.local + + DRONE_SECRET_PLUGIN_ENDPOINT: http://drone-secrets-drone-kubernetes-secrets.drone-runner.svc.cluster.local:3000 + DRONE_NAMESPACE_DEFAULT: drone-runner + DRONE_DEBUG: "true" + DRONE_TRACE: "true" + + HTTP_PROXY: http://proxy-squid.proxy.svc.cluster.local:80 + HTTPS_PROXY: http://proxy-squid.proxy.svc.cluster.local:80 + NO_PROXY: localhost,.cluster.local,drone,drone.drone.svc.cluster.local,10.0.0.0/8,10.42.0.1,10.43.0.1 diff --git a/projects/drone/values/drone-secrets.yaml b/projects/drone/values/drone-secrets.yaml new file mode 100644 index 00000000..06ceeb81 --- /dev/null +++ b/projects/drone/values/drone-secrets.yaml @@ -0,0 +1,21 @@ +podSecurityContext: + fsGroup: 2000 + +securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +extraSecretNamesForEnvFrom: + - drone-secrets-env + +rbac: + secretNamespace: drone-runner + restrictToSecrets: + - drone-secrets + +env: + KUBERNETES_NAMESPACE: drone-runner diff --git a/projects/drone/values/drone.yaml b/projects/drone/values/drone.yaml new file mode 100644 index 00000000..2d255d57 --- /dev/null +++ b/projects/drone/values/drone.yaml @@ -0,0 +1,147 @@ +image: +# repository: drone/drone + tag: 2.0.4 +# pullPolicy: IfNotPresent + +containerPort: 8000 + +securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "80" + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: true + annotations: + cert-manager.io/cluster-issuer: letsencrypt + kubernetes.io/ingress.class: external + kubernetes.io/tls-acme: "true" + hosts: + - host: drone.nold.in + paths: + - "/" + tls: + - secretName: drone-tls + hosts: + - drone.nold.in + +resources: + limits: + cpu: 100m + memory: 124Mi + +persistentVolume: + enabled: true + size: 8Gi + + storageClass: "" + +extraSecretNamesForEnvFrom: + - drone-env + +env: + DRONE_USER_FILTER: nold360 + DRONE_USER_CREATE: username:nold360,admin:true + DRONE_SERVER_PORT: ":8000" + + HTTP_PROXY: http://proxy-squid.proxy.svc.cluster.local + HTTPS_PROXY: http://proxy-squid.proxy.svc.cluster.local + NO_PROXY: localhost,.cluster.local + + DRONE_DATADOG_ENABLED: "false" + + ## REQUIRED: Set the user-visible Drone hostname, sans protocol. + ## Ref: https://docs.drone.io/installation/reference/drone-server-host/ + ## + DRONE_SERVER_HOST: "drone.nold.in" + + ## The protocol to pair with the value in DRONE_SERVER_HOST (http or https). + ## Ref: https://docs.drone.io/installation/reference/drone-server-proto/ + ## + DRONE_SERVER_PROTO: https + DRONE_WEBHOOK_ENDPOINT: "https://drone.nold.in/hook" + + DRONE_STARLARK_ENABLED: "true" + ## REQUIRED: Set the secret secret token that the Drone server and its Runners will use + ## to authenticate. This is commented out in order to leave you the ability to set the + ## key via a separately provisioned secret (see existingSecretName above). + ## Ref: https://docs.drone.io/installation/reference/drone-rpc-secret/ + ## + # DRONE_RPC_SECRET: + + ## If you'd like to use a DB other than SQLite (the default), set a driver + DSN here. + ## Ref: https://docs.drone.io/installation/storage/database/ + ## + # DRONE_DATABASE_DRIVER: + # DRONE_DATABASE_DATASOURCE: + + ## If you are going to store build secrets in the Drone database, it is suggested that + ## you set a database encryption secret. This must be set before any secrets are stored + ## in the database. + ## Ref: https://docs.drone.io/installation/storage/encryption/ + ## + # DRONE_DATABASE_SECRET: + + ## If you are using self-hosted GitHub or GitLab, you'll need to set this to true. + ## Ref: https://docs.drone.io/installation/reference/drone-git-always-auth/ + ## + # DRONE_GIT_ALWAYS_AUTH: false + + ## =================================================================================== + ## Provider Directives (select ONE) + ## ----------------------------------------------------------------------------------- + ## Select one provider (and only one). Refer to the corresponding documentation link + ## before filling the values in. Also note that you can use the 'secretMounts' value + ## if you'd rather not have secrets in Kubernetes Secret instead of a ConfigMap. + ## =================================================================================== + + ## GitHub-specific variables. See the provider docs here: + ## Ref: https://docs.drone.io/installation/providers/github/ + ## + # DRONE_GITHUB_CLIENT_ID: + # DRONE_GITHUB_CLIENT_SECRET: + + ## GitLab-specific variables. See the provider docs here: + ## Ref: https://docs.drone.io/installation/providers/gitlab/ + ## + # DRONE_GITLAB_CLIENT_ID: + # DRONE_GITLAB_CLIENT_SECRET: + # DRONE_GITLAB_SERVER: + + ## Bitbucket Cloud-specific variables. See the provider docs here: + ## Ref: https://docs.drone.io/installation/providers/bitbucket-cloud/ + ## + # DRONE_BITBUCKET_CLIENT_ID: + # DRONE_BITBUCKET_CLIENT_SECRET: + + ## Bitbucket-specific variables. See the provider docs here: + ## Ref: https://docs.drone.io/installation/providers/bitbucket-server/ + ## + # DRONE_GIT_USERNAME: + # DRONE_GIT_PASSWORD: + # DRONE_STASH_CONSUMER_KEY: + # DRONE_STASH_PRIVATE_KEY: + # DRONE_STASH_SERVER: + + ## Gitea-specific variables. See the provider docs here: + ## Ref: https://docs.drone.io/installation/providers/gitea/ + ## + # DRONE_GITEA_CLIENT_ID: + # DRONE_GITEA_CLIENT_SECRET: + # DRONE_GITEA_SERVER: + + ## Gogs-specific variables. See the provider docs here: + ## Ref: https://docs.drone.io/installation/providers/gogs/ + ## + # DRONE_GOGS_SERVER: diff --git a/projects/falco/project.yml b/projects/falco/project.yml new file mode 100644 index 00000000..ab0689d4 --- /dev/null +++ b/projects/falco/project.yml @@ -0,0 +1,8 @@ +config: + description: Falco Security +apps: +- name: falco + disabled: true + repoURL: https://falcosecurity.github.io/charts + chart: falco + targetRevision: 1.16.0 diff --git a/projects/falco/values/falco.yaml b/projects/falco/values/falco.yaml new file mode 100644 index 00000000..19ded07d --- /dev/null +++ b/projects/falco/values/falco.yaml @@ -0,0 +1,219 @@ +docker: + enabled: false + +podSecurityPolicy: + create: false + +containerd: + enabled: true + +#extraArgs: +# - --disable-cri-async + +falco: + timeFormatISO8601: true + grpc: + enabled: true + grpcOutput: + enabled: false + +falcosidekick: + enabled: true + replicaCount: 1 + podSecurityPolicy: + create: true + + webui: + enabled: true + retention: 200 + darkmode: true + podSecurityPolicy: + create: true + ingress: + enabled: true + annotations: + cert-manager.io/cluster-issuer: vault-issuer + hosts: + - host: falco.dc + paths: ["/ui", "/events", "/healthz", "/ws"] + tls: + - secretName: falcosidekick-tls + hosts: + - falco.dc + +customRules: + rule_exceptions.yaml: |- + - rule: Contact K8S API Server From Container + exceptions: + - name: proc_filenames + value: + - argocd-applicat + append: true + - rule: Write below root + exceptions: + - name: container + value: [ host ] + append: true + - rule: Read sensitive file untrusted + exceptions: + - name: container + value: [ host ] + append: true + - rule: Non sudo setuid + exceptions: + - name: container + value: [ host ] + append: true + nginx_rules.yaml: |- + - macro: nginx_consider_syscalls + condition: (evt.num < 0) + - macro: app_nginx + condition: container and container.image contains "nginx" + # Any outbound traffic raises a WARNING + - rule: Unauthorized process opened an outbound connection (nginx) + desc: A nginx process tried to open an outbound connection and is not whitelisted + condition: outbound and evt.rawres >= 0 and app_nginx + output: Non-whitelisted process opened an outbound connection (command=%proc.cmdline connection=%fd.name) + priority: WARNING + # Restricting listening ports to selected set + - list: nginx_allowed_inbound_ports_tcp + items: [80, 443, 8080, 8443] + - rule: Unexpected inbound tcp connection nginx + desc: Detect inbound traffic to nginx using tcp on a port outside of expected set + condition: inbound and evt.rawres >= 0 and not fd.sport in (nginx_allowed_inbound_ports_tcp) and app_nginx + output: Inbound network connection to nginx on unexpected port (command=%proc.cmdline pid=%proc.pid connection=%fd.name sport=%fd.sport user=%user.name %container.info image=%container.image) + priority: NOTICE + # Restricting spawned processes to selected set + - list: nginx_allowed_processes + items: ["nginx", "app-entrypoint.", "basename", "dirname", "grep", "nami", "node", "tini"] + - rule: Unexpected spawned process nginx + desc: Detect a process started in a nginx container outside of an expected set + condition: spawned_process and not proc.name in (nginx_allowed_processes) and app_nginx + output: Unexpected process spawned in nginx container (command=%proc.cmdline pid=%proc.pid user=%user.name %container.info image=%container.image) + priority: NOTICE + # Restricting files read or written to specific set + - list: nginx_allowed_file_prefixes_readwrite + items: ["/var/log/nginx", "/var/run"] + # Remember to add your nginx cache path + + - rule: Unexpected file access readwrite for nginx + desc: Detect an attempt to access a file readwrite other than below an expected list of directories + condition: (open_write) and not fd.name pmatch (nginx_allowed_file_prefixes_readwrite) and app_nginx + output: Unexpected file accessed readwrite for nginx (command=%proc.cmdline pid=%proc.pid file=%fd.name %container.info image=%container.image) + priority: NOTICE + # Restricting syscalls to selected set + - list: nginx_allowed_syscalls + items: [accept, bind, clone, connect, dup, listen, mkdir, open, recvfrom, recvmsg, sendto, setgid, setuid, socket, socketpair] + - rule: Unexpected syscall nginx + desc: Detect a syscall in a nginx container outside of an expected set + condition: nginx_consider_syscalls and not evt.type in ("", nginx_allowed_syscalls) and app_nginx + output: Unexpected syscall in nginx container (command=%proc.cmdline pid=%proc.pid user=%user.name syscall=%evt.type args=%evt.args %container.info image=%container.image) + priority: NOTICE + warn_evttypes: False + + php_fpm.yaml: |- + - macro: php_fpm_consider_syscalls + condition: (evt.num < 0) + + - macro: app_php_fpm + condition: container and container.image contains "fpm" + + # Considering any inbound network connection suspect + - rule: Unexpected inbound connection php_fpm + desc: Detect any inbound connection arriving at php_fpm + condition: inbound and evt.rawres >= 0 and app_php_fpm + output: Unexpected inbound connection arriving at php_fpm (command=%proc.cmdline pid=%proc.pid connection=%fd.name user=%user.name %container.info image=%container.image) + priority: NOTICE + + # Restricting listening ports to selected set + + - list: php_fpm_allowed_inbound_ports_tcp + items: [80, 443] + + - rule: Unexpected inbound tcp connection php_fpm + desc: Detect inbound traffic to php_fpm using tcp on a port outside of expected set + condition: inbound and evt.rawres >= 0 and not fd.sport in (php_fpm_allowed_inbound_ports_tcp) and app_php_fpm + output: Inbound network connection to php_fpm on unexpected port (command=%proc.cmdline pid=%proc.pid connection=%fd.name sport=%fd.sport user=%user.name %container.info image=%container.image) + priority: NOTICE + + # Restricting spawned processes to selected set + + - list: php_fpm_allowed_processes + items: ["/usr/bin/python2", "nginx", "nginx: master process /usr/sbin/nginx -g daemon off; error_log /dev/stderr info;", "nginx: worker process", "php-fpm", "php-fpm: pool www"] + + - rule: Unexpected spawned process php_fpm + desc: Detect a process started in a php_fpm container outside of an expected set + condition: spawned_process and not proc.name in (php_fpm_allowed_processes) and app_php_fpm + output: Unexpected process spawned in php_fpm container (command=%proc.cmdline pid=%proc.pid user=%user.name %container.info image=%container.image) + priority: NOTICE + + # Restricting files read or written to specific set + + - list: php_fpm_allowed_file_prefixes_readonly + items: ["/dev", "/var/www/errors"] + + - rule: Unexpected file access readonly for php_fpm + desc: Detect an attempt to access a file readonly other than below an expected list of directories + condition: (open_read and evt.is_open_write=false) and not fd.name pmatch (php_fpm_allowed_file_prefixes_readonly) and app_php_fpm + output: Unexpected file accessed readonly for php_fpm (command=%proc.cmdline pid=%proc.pid file=%fd.name %container.info image=%container.image) + priority: NOTICE + + - list: php_fpm_allowed_file_prefixes_readwrite + items: ["/dev", "/tmp", "/usr/local/var/log"] + + - rule: Unexpected file access readwrite for php_fpm + desc: Detect an attempt to access a file readwrite other than below an expected list of directories + condition: (open_write) and not fd.name pmatch (php_fpm_allowed_file_prefixes_readwrite) and app_php_fpm + output: Unexpected file accessed readwrite for php_fpm (command=%proc.cmdline pid=%proc.pid file=%fd.name %container.info image=%container.image) + priority: NOTICE + + postgres.yaml: |- + - macro: postgres_consider_syscalls + condition: (evt.num < 0) + + - macro: app_postgres + condition: container and container.image contains "postgres" + + - list: postgres_allowed_inbound_ports_tcp + items: [5432] + + - rule: Unexpected inbound tcp connection postgres + desc: Detect inbound traffic to postgres using tcp on a port outside of expected set + condition: inbound and evt.rawres >= 0 and not fd.sport in (postgres_allowed_inbound_ports_tcp) and app_postgres + output: Inbound network connection to postgres on unexpected port (command=%proc.cmdline pid=%proc.pid connection=%fd.name sport=%fd.sport user=%user.name %container.info image=%container.image) + priority: NOTICE + + # Restricting spawned processes to selected set + + - list: postgres_allowed_processes + items: ["/proc/self/exe", "pg_isready", "postgres", "psql", "postgres: autovacuum launcher process", "pg_ctl" , "postgres: checkpointer process ", "postgres: stats collector process ", "postgres: wal writer process ", "postgres: writer process ", "sh"] + + - rule: Unexpected spawned process postgres + desc: Detect a process started in a postgres container outside of an expected set + condition: spawned_process and not proc.name in (postgres_allowed_processes) and app_postgres + output: Unexpected process spawned in postgres container (command=%proc.cmdline pid=%proc.pid user=%user.name %container.info image=%container.image) + priority: NOTICE + + # Restricting files read or written to specific set + + - list: postgres_allowed_file_prefixes_readonly + items: ["/dev", "/etc", "/lib/x86_64-linux-gnu", "/usr/lib/locale", "/usr/lib/x86_64-linux-gnu", "/usr/share/locale", "/var/lib/postgresql/data", "/usr/share/zoneinfo", "/var/lib/postgresql", "/usr/lib/postgresql", "/usr/share/postgresql", "/var/run/postgresql"] + + - rule: Unexpected file access readonly for postgres + desc: Detect an attempt to access a file readonly other than below an expected list of directories + condition: (open_read and evt.is_open_write=false) and not fd.name pmatch (postgres_allowed_file_prefixes_readonly) and app_postgres + output: Unexpected file accessed readonly for postgres (command=%proc.cmdline pid=%proc.pid file=%fd.name %container.info image=%container.image) + priority: NOTICE + + - list: postgres_allowed_file_prefixes_readwrite + items: ["/var/lib/postgresql/data", "/var/run/postgresql"] + + - rule: Unexpected file access readwrite for postgres + desc: Detect an attempt to access a file readwrite other than below an expected list of directories + condition: (open_write) and not fd.name pmatch (postgres_allowed_file_prefixes_readwrite) and app_postgres + output: Unexpected file accessed readwrite for postgres (command=%proc.cmdline pid=%proc.pid file=%fd.name %container.info image=%container.image) + priority: NOTICE + +# For OpenShit +scc: + create: false diff --git a/projects/gitea/project.yml b/projects/gitea/project.yml new file mode 100644 index 00000000..6114e550 --- /dev/null +++ b/projects/gitea/project.yml @@ -0,0 +1,22 @@ +config: + description: Gitea public Git Server + networkPolicy: + groups: + - internet + rules: + - allow-ssh +apps: +- name: gitea + repoURL: https://dl.gitea.io/charts/ + chart: gitea + targetRevision: 4.1.1 + secrets: + - name: admin + keys: + - username + - password + - email + - name: postgres + keys: + - postgresql-password + - postgresql-postgres-password diff --git a/projects/gitea/values/gitea.yaml b/projects/gitea/values/gitea.yaml new file mode 100644 index 00000000..414b59d2 --- /dev/null +++ b/projects/gitea/values/gitea.yaml @@ -0,0 +1,137 @@ +# Gitea +image: + rootless: true + +statefulset: + env: + - name: HTTP_PROXY + value: http://proxy-squid.proxy.svc.cluster.local:80 + - name: HTTPS_PROXY + value: http://proxy-squid.proxy.svc.cluster.local:80 + - name: http_proxy + value: http://proxy-squid.proxy.svc.cluster.local:80 + - name: https_proxy + value: http://proxy-squid.proxy.svc.cluster.local:80 + +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL +# # https://gitea.com/gitea/helm-chart/issues/161 +# add: +# - SYS_CHROOT + privileged: false + readOnlyRootFilesystem: true + runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + +service: + http: + type: ClusterIP + port: 3000 + ssh: + type: LoadBalancer + port: 2222 + +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: external + kubernetes.io/tls-acme: "true" + cert-manager.io/cluster-issuer: letsencrypt + hosts: + - host: git.nold.in + paths: + - path: / + pathType: Prefix + tls: + - secretName: gitea-tls + hosts: + - git.nold.in + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +persistence: + enabled: true + size: 10Gi + #storageClass: slow + +gitea: + admin: + existingSecret: admin + + metrics: + enabled: false + serviceMonitor: + enabled: false + # additionalLabels: + # prometheus-release: prom1 + + oauth: + enabled: false + #name: + #provider: + #key: + #secret: + #autoDiscoverUrl: + #useCustomUrls: + #customAuthUrl: + #customTokenUrl: + #customProfileUrl: + #customEmailUrl: + + config: + APP_NAME: "Git with a lot of coffee" + ui: + DEFAULT_THEME: arc-green + repository: + DEFAULT_BRANCH: main + server: + LFS_START_SERVER: true + PROTOCOL: http + database: + HOST: gitea-postgresql.gitea.svc.cluster.local:5432 + service: + DISABLE_REGISTRATION: true + lfs: + STORAGE_TYPE: local + picture: + DISABLE_GRAVATAR: true + metrics: + ENABLED: false + api: + ENABLE_SWAGGER: false + oauth: + ENABLE: false + + database: + builtIn: + postgresql: + enabled: true + cache: + builtIn: + enabled: false + +postgresql: + global: + #storageClass: slow + postgresql: + existingSecret: postgres + persistence: + size: 10Gi + psp: + create: true + rbac: + create: true diff --git a/projects/grafana/project.yml b/projects/grafana/project.yml new file mode 100644 index 00000000..dda590cf --- /dev/null +++ b/projects/grafana/project.yml @@ -0,0 +1,21 @@ +config: + description: Grafana, Prometheus and friends +apps: +- name: prometheus + namespace: prometheus + repoURL: https://prometheus-community.github.io/helm-charts + chart: prometheus + targetRevision: 14.11.0 +- name: loki-stack + existingNamespace: prometheus + repoURL: https://grafana.github.io/helm-charts + chart: loki-stack + targetRevision: 2.4.1 + secrets: + - name: loki-stack-grafana + keys: + - admin-user + - admin-password + - name: grafana-env + keys: + - GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET diff --git a/projects/grafana/values/loki-stack.yaml b/projects/grafana/values/loki-stack.yaml new file mode 100755 index 00000000..7243eb3a --- /dev/null +++ b/projects/grafana/values/loki-stack.yaml @@ -0,0 +1,124 @@ +loki: + image: + tag: 2.3.0 + enabled: true + +promtail: + enabled: true + +fluent-bit: + enabled: true + +grafana: + enabled: true + + image: + tag: 8.1.2 + + admin: + existingSecret: "loki-stack-grafana" + + ingress: + enabled: true + annotations: + cert-manager.io/cluster-issuer: vault-issuer + kubernetes.io/ingress.class: nginx + hosts: + - grafana.dc + tls: + - secretName: grafana-tls + hosts: + - grafana.dc + + sidecar: + datasources: + enabled: true + dashboards: + enabled: true + label: grafana_dashboard + + persistence: + enabled: true + + plugins: + - grafana-piechart-panel + + dashboards: + default: + traefik: + gnetId: 11462 + revision: 1 + + # For OAUTH Secret Token + envFromSecret: grafana-env + + grafana.ini: + paths: + data: /var/lib/grafana/data + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: false + log: + mode: console + grafana_net: + url: https://grafana.net + server: + root_url: https://grafana.dc + + auth.generic_oauth: + name: Authentik + enabled: true + allow_sign_up: true + client_id: 779461ddc18a79dba69cf2eef895a20d59b21d59 + #client_secret: IN ENV + scopes: "openid profile email" + #email_attribute_name: email:primary + role_attribute_path: "contains(groups[*], 'Grafana Admins') && 'Admin' || contains(groups[*], 'Grafana Editors') && 'Editor' || 'Viewer'" + auth_url: https://auth.dc/application/o/authorize/ + token_url: https://auth.dc/application/o/token/ + api_url: https://auth.dc/application/o/userinfo/ + # meh.. but for now... + tls_skip_verify_insecure: true + + +prometheus: + enabled: false + image: + tag: v2.26.0 + + extraScrapeConfigs: | + - job_name: 'openwrt' + scrape_interval: 10s + static_configs: + - targets: ['192.168.1.1:9100'] + - job_name: 'borg' + scrape_interval: 10s + static_configs: + - targets: ['192.168.1.111:9942'] + + podSecurityPolicy: + enabled: true + + server: + extraArgs: + #storage.local.retention: 720h + + nodeexporter: +# image: +# repository: quay.io/prometheus/node-exporter +# tag: v1.1.2 + + extraHostPathMounts: + - name: textfile-dir + mountPath: /srv/txt_collector + hostPath: /var/lib/node-exporter + readOnly: true + mountPropagation: HostToContainer + + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: false + runAsUser: 0 diff --git a/projects/grafana/values/prometheus.yaml b/projects/grafana/values/prometheus.yaml new file mode 100644 index 00000000..d9ee044e --- /dev/null +++ b/projects/grafana/values/prometheus.yaml @@ -0,0 +1,44 @@ +podSecurityPolicy: + enabled: true + +kubeStateMetrics: + enabled: false + +nodeExporter: + enabled: true + hostNetwork: true + hostPID: true + hostRootfs: true + + extraHostPathMounts: + - name: textfile-dir + mountPath: /srv/txt_collector + hostPath: /var/lib/node-exporter + readOnly: true + mountPropagation: HostToContainer + +server: + enabled: true + + persistentVolume: + enabled: true + +pushgateway: + enabled: true + +extraScrapeConfigs: | + - job_name: 'openwrt' + scrape_interval: 10s + static_configs: + - targets: ['192.168.1.1:9100'] + - job_name: 'borg' + scrape_interval: 120s + static_configs: + - targets: ['192.168.1.111:9942'] +# - job_name: 'octoprint' +# scrape_interval: 5s +# metrics_path: '/plugin/prometheus_exporter/metrics' +# params: +# apikey: ['__OCTOPRINT_APIKEY__'] +# static_configs: +# - targets: ['octoprint:80'] diff --git a/projects/heqet/project.yml b/projects/heqet/project.yml new file mode 100644 index 00000000..2f067237 --- /dev/null +++ b/projects/heqet/project.yml @@ -0,0 +1,11 @@ +config: + name: heqet2 + syncWave: -5 + +apps: + # Heqet + - name: heqet2 + path: charts/heqet + repoURL: https://github.com/nold360/heqet + targetRevision: f/v2 + syncWave: "-1" diff --git a/projects/heqet/values/argocd.yaml b/projects/heqet/values/argocd.yaml new file mode 100644 index 00000000..e848eb6f --- /dev/null +++ b/projects/heqet/values/argocd.yaml @@ -0,0 +1,1001 @@ +## ArgoCD configuration +## Ref: https://github.com/argoproj/argo-cd +## +nameOverride: argocd +fullnameOverride: "" + +# Optional CRD installation for those without Helm hooks +installCRDs: true + +global: + image: + repository: argoproj/argocd + tag: v2.0.0 + imagePullPolicy: IfNotPresent + securityContext: + runAsUser: 999 + runAsGroup: 999 + fsGroup: 999 + imagePullSecrets: [] + hostAliases: [] + # - ip: 10.20.30.40 + # hostnames: + # - git.myhostname + +## Controller +controller: + name: application-controller + + image: + repository: # argoproj/argocd + tag: # v1.7.11 + imagePullPolicy: # IfNotPresent + + # If changing the number of replicas you must pass the number as ARGOCD_CONTROLLER_REPLICAS as an environment variable + replicas: 1 + + # Deploy the application as a StatefulSet instead of a Deployment, this is required for HA capability. + # This is a feature flag that will become the default in chart version 3.x + enableStatefulSet: false + + ## Argo controller commandline flags + args: + statusProcessors: "20" + operationProcessors: "10" + appResyncPeriod: "180" + selfHealTimeout: "5" + + ## Argo controller log format: text|json + logFormat: text + ## Argo controller log level + logLevel: info + + ## Additional command line arguments to pass to argocd-controller + ## + extraArgs: [] + + ## Environment variables to pass to argocd-controller + ## + env: + [] + # - name: "ARGOCD_CONTROLLER_REPLICAS" + # value: "" + + ## Annotations to be added to controller pods + ## + podAnnotations: {} + + ## Labels to be added to controller pods + ## + podLabels: {} + + ## Labels to set container specific security contexts + containerSecurityContext: + capabilities: + drop: + - all + readOnlyRootFilesystem: true + + ## Configures the controller port + containerPort: 8082 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + + ## Additional volumeMounts to the controller main container. + volumeMounts: [] + + ## Additional volumes to the controller pod. + volumes: [] + + ## Controller service configuration + service: + annotations: {} + labels: {} + port: 8082 + portName: https-controller + + ## Node selectors and tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + nodeSelector: {} + tolerations: [] + affinity: {} + + priorityClassName: "" + + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + serviceAccount: + create: true + name: argocd-application-controller + ## Annotations applied to created service account + annotations: {} + ## Automount API credentials for the Service Account + automountServiceAccountToken: true + + ## Server metrics controller configuration + metrics: + enabled: false + service: + annotations: {} + labels: {} + servicePort: 8082 + serviceMonitor: + enabled: false + # selector: + # prometheus: kube-prometheus + # namespace: monitoring + # additionalLabels: {} + rules: + enabled: false + spec: [] + # - alert: ArgoAppMissing + # expr: | + # absent(argocd_app_info) + # for: 15m + # labels: + # severity: critical + # annotations: + # summary: "[ArgoCD] No reported applications" + # description: > + # ArgoCD has not reported any applications data for the past 15 minutes which + # means that it must be down or not functioning properly. This needs to be + # resolved for this cloud to continue to maintain state. + # - alert: ArgoAppNotSynced + # expr: | + # argocd_app_info{sync_status!="Synced"} == 1 + # for: 12h + # labels: + # severity: warning + # annotations: + # summary: "[{{`{{$labels.name}}`}}] Application not synchronized" + # description: > + # The application [{{`{{$labels.name}}`}} has not been synchronized for over + # 12 hours which means that the state of this cloud has drifted away from the + # state inside Git. + # selector: + # prometheus: kube-prometheus + # namespace: monitoring + # additionalLabels: {} + + ## Enable Admin ClusterRole resources. + ## Enable if you would like to grant rights to ArgoCD to deploy to the local Kubernetes cluster. + clusterAdminAccess: + enabled: true + +## Dex +dex: + enabled: true + name: dex-server + + metrics: + enabled: false + service: + annotations: {} + labels: {} + serviceMonitor: + enabled: false + + image: + repository: quay.io/dexidp/dex + tag: v2.26.0 + imagePullPolicy: IfNotPresent + initImage: + repository: + tag: + imagePullPolicy: + + ## Environment variables to pass to the Dex server + ## + env: [] + + ## Annotations to be added to the Dex server pods + ## + podAnnotations: {} + + ## Labels to be added to the Dex server pods + ## + podLabels: {} + + serviceAccount: + create: true + name: argocd-dex-server + ## Annotations applied to created service account + annotations: {} + ## Automount API credentials for the Service Account + automountServiceAccountToken: true + + ## Additional volumeMounts to the controller main container. + volumeMounts: + - name: static-files + mountPath: /shared + + ## Additional volumes to the controller pod. + volumes: + - name: static-files + emptyDir: {} + + ## Dex deployment container ports + containerPortHttp: 5556 + servicePortHttp: 5556 + containerPortGrpc: 5557 + servicePortGrpc: 5557 + containerPortMetrics: 5558 + servicePortMetrics: 5558 + + ## Node selectors and tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + nodeSelector: {} + tolerations: [] + affinity: {} + + priorityClassName: "" + + ## Labels to set container specific security contexts + containerSecurityContext: + capabilities: + drop: + - all + readOnlyRootFilesystem: true + + resources: {} + # limits: + # cpu: 50m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + +## Redis +redis: + enabled: true + name: redis + + image: + repository: redis + tag: 5.0.10-alpine + imagePullPolicy: IfNotPresent + + containerPort: 6379 + servicePort: 6379 + + ## Environment variables to pass to the Redis server + ## + env: [] + + ## Annotations to be added to the Redis server pods + ## + podAnnotations: {} + + ## Labels to be added to the Redis server pods + ## + podLabels: {} + + ## Node selectors and tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + nodeSelector: {} + tolerations: [] + affinity: {} + + priorityClassName: "" + + ## Labels to set container specific security contexts + containerSecurityContext: + capabilities: + drop: + - all + readOnlyRootFilesystem: true + + ## Redis Pod specific security context + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + runAsNonRoot: true + + resources: {} + # limits: + # cpu: 200m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 64Mi + + volumeMounts: [] + volumes: [] + +# This key configures Redis-HA subchart and when enabled (redis-ha.enabled=true) +# the custom redis deployment is omitted +redis-ha: + enabled: false + # Check the redis-ha chart for more properties + exporter: + enabled: true + persistentVolume: + enabled: false + redis: + masterGroupName: argocd + config: + save: '""' + haproxy: + enabled: true + metrics: + enabled: true + image: + tag: 5.0.8-alpine + +## Server +server: + name: server + + replicas: 1 + + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + image: + repository: # argoproj/argocd + tag: # v1.7.11 + imagePullPolicy: # IfNotPresent + + ## Additional command line arguments to pass to argocd-server + ## + extraArgs: + - --insecure + + ## Environment variables to pass to argocd-server + ## + env: [] + + ## Specify postStart and preStop lifecycle hooks for your argo-cd-server container + ## + lifecycle: {} + + ## Argo server log format: text|json + logFormat: text + ## Argo server log level + logLevel: info + + ## Annotations to be added to controller pods + ## + podAnnotations: {} + + ## Labels to be added to controller pods + ## + podLabels: {} + + ## Configures the server port + containerPort: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + + ## Additional volumeMounts to the server main container. + volumeMounts: [] + + ## Additional volumes to the controller pod. + volumes: [] + + ## Node selectors and tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + nodeSelector: {} + tolerations: [] + affinity: {} + + priorityClassName: "" + + ## Labels to set container specific security contexts + containerSecurityContext: + capabilities: + drop: + - all + readOnlyRootFilesystem: true + + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 50m + # memory: 64Mi + + ## Certificate configuration + certificate: + enabled: false + domain: argocd.example.com + issuer: {} + additionalHosts: [] + + ## Server service configuration + service: + annotations: {} + labels: {} + type: ClusterIP + ## For node port default ports + nodePortHttp: 30080 + nodePortHttps: 30443 + servicePortHttp: 80 + servicePortHttps: 443 + servicePortHttpName: http + servicePortHttpsName: https + namedTargetPort: true + loadBalancerIP: "" + loadBalancerSourceRanges: [] + externalIPs: [] + + ## Server metrics service configuration + metrics: + enabled: false + service: + annotations: {} + labels: {} + servicePort: 8083 + serviceMonitor: + enabled: false + # selector: + # prometheus: kube-prometheus + # namespace: monitoring + # additionalLabels: {} + + serviceAccount: + create: true + name: argocd-server + ## Annotations applied to created service account + annotations: {} + ## Automount API credentials for the Service Account + automountServiceAccountToken: true + + ingress: + enabled: true + annotations: {} + labels: {} + + ## Argo Ingress. + ## Hostnames must be provided if Ingress is enabled. + ## Secrets must be manually created in the namespace + ## + hosts: + - argocd.k3s + paths: + - / + extraPaths: + [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + tls: + [] + # - secretName: argocd-example-tls + # hosts: + # - argocd.example.com + https: false + # dedicated ingess for gRPC as documented at + # https://argoproj.github.io/argo-cd/operator-manual/ingress/ + ingressGrpc: + enabled: false + annotations: {} + labels: {} + + ## Argo Ingress. + ## Hostnames must be provided if Ingress is enabled. + ## Secrets must be manually created in the namespace + ## + hosts: + [] + # - argocd.example.com + paths: + - / + extraPaths: + [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + tls: + [] + # - secretName: argocd-example-tls + # hosts: + # - argocd.example.com + https: false + + # Create a OpenShift Route with SSL passthrough for UI and CLI + # Consider setting 'hostname' e.g. https://argocd.apps-crc.testing/ using your Default Ingress Controller Domain + # Find your domain with: kubectl describe --namespace=openshift-ingress-operator ingresscontroller/default | grep Domain: + # If 'hostname' is an empty string "" OpenShift will create a hostname for you. + route: + enabled: false + hostname: "" + + ## ArgoCD config + ## reference https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml + configEnabled: true + config: + # Argo CD's externally facing base URL (optional). Required when configuring SSO + url: https://argocd.k3s + # Argo CD instance label key + application.instanceLabelKey: argocd.argoproj.io/instance + # repositories: | + # - url: git@github.com:group/repo.git + # sshPrivateKeySecret: + # name: secret-name + # key: sshPrivateKey + # - type: helm + # url: https://charts.helm.sh/stable + # name: stable + # - type: helm + # url: https://argoproj.github.io/argo-helm + # name: argo + # oidc.config: | + # name: AzureAD + # issuer: https://login.microsoftonline.com/TENANT_ID/v2.0 + # clientID: CLIENT_ID + # clientSecret: $oidc.azuread.clientSecret + # requestedIDTokenClaims: + # groups: + # essential: true + # requestedScopes: + # - openid + # - profile + # - email + + ## Annotations to be added to ArgoCD ConfigMap + configAnnotations: {} + + ## ArgoCD rbac config + ## reference https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/rbac.md + rbacConfig: + {} + # policy.csv is an file containing user-defined RBAC policies and role definitions (optional). + # Policy rules are in the form: + # p, subject, resource, action, object, effect + # Role definitions and bindings are in the form: + # g, subject, inherited-subject + # See https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/rbac.md for additional information. + # policy.csv: | + # # Grant all members of the group 'my-org:team-alpha; the ability to sync apps in 'my-project' + # p, my-org:team-alpha, applications, sync, my-project/*, allow + # # Grant all members of 'my-org:team-beta' admins + # g, my-org:team-beta, role:admin + # policy.default is the name of the default role which Argo CD will falls back to, when + # authorizing API requests (optional). If omitted or empty, users may be still be able to login, + # but will see no apps, projects, etc... + # policy.default: role:readonly + # scopes controls which OIDC scopes to examine during rbac enforcement (in addition to `sub` scope). + # If omitted, defaults to: '[groups]'. The scope value can be a string, or a list of strings. + # scopes: '[cognito:groups, email]' + + ## Annotations to be added to ArgoCD rbac ConfigMap + rbacConfigAnnotations: {} + + # Boolean determining whether or not to create the configmap. If false, it is expected tthe configmap will be created + # by something else. ArgoCD will not work if there is no configMap created with the name above. + rbacConfigCreate: true + + ## Not well tested and not well supported on release v1.0.0. + ## Applications + ## reference: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/ + additionalApplications: [] + # - name: guestbook + # namespace: argocd + # additionalLabels: {} + # additionalAnnotations: {} + # project: guestbook + # source: + # repoURL: https://github.com/argoproj/argocd-example-apps.git + # targetRevision: HEAD + # path: guestbook + # directory: + # recurse: true + # destination: + # server: https://kubernetes.default.svc + # namespace: guestbook + # syncPolicy: + # automated: + # prune: false + # selfHeal: false + + ## Projects + ## reference: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/ + additionalProjects: [] + # - name: guestbook + # namespace: argocd + # additionalLabels: {} + # additionalAnnotations: {} + # description: Example Project + # sourceRepos: + # - '*' + # destinations: + # - namespace: guestbook + # server: https://kubernetes.default.svc + # clusterResourceWhitelist: [] + # namespaceResourceBlacklist: + # - group: '' + # kind: ResourceQuota + # - group: '' + # kind: LimitRange + # - group: '' + # kind: NetworkPolicy + # orphanedResources: {} + # roles: [] + # namespaceResourceWhitelist: + # - group: 'apps' + # kind: Deployment + # - group: 'apps' + # kind: StatefulSet + # orphanedResources: {} + # roles: [] + # syncWindows: + # - kind: allow + # schedule: '10 1 * * *' + # duration: 1h + # applications: + # - '*-prod' + # manualSync: true + + ## Enable Admin ClusterRole resources. + ## Enable if you would like to grant rights to ArgoCD to deploy to the local Kubernetes cluster. + clusterAdminAccess: + enabled: true + + ## Enable BackendConfig custom resource for Google Kubernetes Engine + GKEbackendConfig: + enabled: false + spec: {} + # spec: + # iap: + # enabled: true + # oauthclientCredentials: + # secretName: argocd-secret + + extraContainers: [] + ## Additional containers to be added to the controller pod. + ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + +## Repo Server +repoServer: + name: repo-server + + replicas: 1 + + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + image: + repository: # argoproj/argocd + tag: # v1.7.11 + imagePullPolicy: # IfNotPresent + + ## Additional command line arguments to pass to argocd-repo-server + ## + extraArgs: [] + + ## Environment variables to pass to argocd-repo-server + ## + env: [] + + ## Argo repoServer log format: text|json + logFormat: text + ## Argo repoServer log level + logLevel: info + + ## Annotations to be added to repo server pods + ## + podAnnotations: {} + + ## Labels to be added to repo server pods + ## + podLabels: {} + + ## Configures the repo server port + containerPort: 8081 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + + ## Additional volumeMounts to the repo server main container. + volumeMounts: [] + + ## Additional volumes to the repo server pod. + volumes: [] + + ## Node selectors and tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + nodeSelector: {} + tolerations: [] + affinity: {} + + priorityClassName: "" + + ## Labels to set container specific security contexts + containerSecurityContext: + {} + # capabilities: + # drop: + # - all + # readOnlyRootFilesystem: true + + resources: {} + # limits: + # cpu: 50m + # memory: 128Mi + # requests: + # cpu: 10m + # memory: 64Mi + + ## Repo server service configuration + service: + annotations: {} + labels: {} + port: 8081 + portName: https-repo-server + + ## Repo server metrics service configuration + metrics: + enabled: false + service: + annotations: {} + labels: {} + servicePort: 8084 + serviceMonitor: + enabled: false + # selector: + # prometheus: kube-prometheus + # namespace: monitoring + # additionalLabels: {} + + ## Repo server service account + ## If create is set to true, make sure to uncomment the name and update the rbac section below + serviceAccount: + create: false + # name: argocd-repo-server + ## Annotations applied to created service account + annotations: {} + ## Automount API credentials for the Service Account + automountServiceAccountToken: true + + ## Repo server rbac rules + # rbac: + # - apiGroups: + # - argoproj.io + # resources: + # - applications + # verbs: + # - get + # - list + # - watch + + ## Use init containers to configure custom tooling + ## https://argoproj.github.io/argo-cd/operator-manual/custom_tools/ + ## When using the volumes & volumeMounts section bellow, please comment out those above. + # volumes: + # - name: custom-tools + # emptyDir: {} + # + # initContainers: + # - name: download-tools + # image: alpine:3.8 + # command: [sh, -c] + # args: + # - wget -qO- https://get.helm.sh/helm-v2.16.1-linux-amd64.tar.gz | tar -xvzf - && + # mv linux-amd64/helm /custom-tools/ + # volumeMounts: + # - mountPath: /custom-tools + # name: custom-tools + # volumeMounts: + # - mountPath: /usr/local/bin/helm + # name: custom-tools + # subPath: helm + +## Argo Configs +configs: + ## External Cluster Credentials + ## reference: + ## - https://argoproj.github.io/argo-cd/operator-manual/declarative-setup/#clusters + ## - https://argoproj.github.io/argo-cd/operator-manual/security/#external-cluster-credentials + clusterCredentials: [] + # - name: mycluster + # server: https://mycluster.com + # annotations: {} + # config: + # bearerToken: "" + # tlsClientConfig: + # insecure: false + # caData: "" + # - name: mycluster2 + # server: https://mycluster2.com + # annotations: {} + # namespaces: namespace1,namespace2 + # config: + # bearerToken: "" + # tlsClientConfig: + # insecure: false + # caData: "" + + knownHostsAnnotations: {} + knownHosts: + data: + ssh_known_hosts: | + bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== + github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== + gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= + gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf + gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 + ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H + vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H + tlsCertsAnnotations: {} + tlsCerts: + {} + # data: + # argocd.example.com: | + # -----BEGIN CERTIFICATE----- + # MIIF1zCCA7+gAwIBAgIUQdTcSHY2Sxd3Tq/v1eIEZPCNbOowDQYJKoZIhvcNAQEL + # BQAwezELMAkGA1UEBhMCREUxFTATBgNVBAgMDExvd2VyIFNheG9ueTEQMA4GA1UE + # BwwHSGFub3ZlcjEVMBMGA1UECgwMVGVzdGluZyBDb3JwMRIwEAYDVQQLDAlUZXN0 + # c3VpdGUxGDAWBgNVBAMMD2Jhci5leGFtcGxlLmNvbTAeFw0xOTA3MDgxMzU2MTda + # Fw0yMDA3MDcxMzU2MTdaMHsxCzAJBgNVBAYTAkRFMRUwEwYDVQQIDAxMb3dlciBT + # YXhvbnkxEDAOBgNVBAcMB0hhbm92ZXIxFTATBgNVBAoMDFRlc3RpbmcgQ29ycDES + # MBAGA1UECwwJVGVzdHN1aXRlMRgwFgYDVQQDDA9iYXIuZXhhbXBsZS5jb20wggIi + # MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCv4mHMdVUcafmaSHVpUM0zZWp5 + # NFXfboxA4inuOkE8kZlbGSe7wiG9WqLirdr39Ts+WSAFA6oANvbzlu3JrEQ2CHPc + # CNQm6diPREFwcDPFCe/eMawbwkQAPVSHPts0UoRxnpZox5pn69ghncBR+jtvx+/u + # P6HdwW0qqTvfJnfAF1hBJ4oIk2AXiip5kkIznsAh9W6WRy6nTVCeetmIepDOGe0G + # ZJIRn/OfSz7NzKylfDCat2z3EAutyeT/5oXZoWOmGg/8T7pn/pR588GoYYKRQnp+ + # YilqCPFX+az09EqqK/iHXnkdZ/Z2fCuU+9M/Zhrnlwlygl3RuVBI6xhm/ZsXtL2E + # Gxa61lNy6pyx5+hSxHEFEJshXLtioRd702VdLKxEOuYSXKeJDs1x9o6cJ75S6hko + # Ml1L4zCU+xEsMcvb1iQ2n7PZdacqhkFRUVVVmJ56th8aYyX7KNX6M9CD+kMpNm6J + # kKC1li/Iy+RI138bAvaFplajMF551kt44dSvIoJIbTr1LigudzWPqk31QaZXV/4u + # kD1n4p/XMc9HYU/was/CmQBFqmIZedTLTtK7clkuFN6wbwzdo1wmUNgnySQuMacO + # gxhHxxzRWxd24uLyk9Px+9U3BfVPaRLiOPaPoC58lyVOykjSgfpgbus7JS69fCq7 + # bEH4Jatp/10zkco+UQIDAQABo1MwUTAdBgNVHQ4EFgQUjXH6PHi92y4C4hQpey86 + # r6+x1ewwHwYDVR0jBBgwFoAUjXH6PHi92y4C4hQpey86r6+x1ewwDwYDVR0TAQH/ + # BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAFE4SdKsX9UsLy+Z0xuHSxhTd0jfn + # Iih5mtzb8CDNO5oTw4z0aMeAvpsUvjJ/XjgxnkiRACXh7K9hsG2r+ageRWGevyvx + # CaRXFbherV1kTnZw4Y9/pgZTYVWs9jlqFOppz5sStkfjsDQ5lmPJGDii/StENAz2 + # XmtiPOgfG9Upb0GAJBCuKnrU9bIcT4L20gd2F4Y14ccyjlf8UiUi192IX6yM9OjT + # +TuXwZgqnTOq6piVgr+FTSa24qSvaXb5z/mJDLlk23npecTouLg83TNSn3R6fYQr + # d/Y9eXuUJ8U7/qTh2Ulz071AO9KzPOmleYPTx4Xty4xAtWi1QE5NHW9/Ajlv5OtO + # OnMNWIs7ssDJBsB7VFC8hcwf79jz7kC0xmQqDfw51Xhhk04kla+v+HZcFW2AO9so + # 6ZdVHHQnIbJa7yQJKZ+hK49IOoBR6JgdB5kymoplLLiuqZSYTcwSBZ72FYTm3iAr + # jzvt1hxpxVDmXvRnkhRrIRhK4QgJL0jRmirBjDY+PYYd7bdRIjN7WNZLFsgplnS8 + # 9w6CwG32pRlm0c8kkiQ7FXA6BYCqOsDI8f1VGQv331OpR2Ck+FTv+L7DAmg6l37W + # +LB9LGh4OAp68ImTjqf6ioGKG0RBSznwME+r4nXtT1S/qLR6ASWUS4ViWRhbRlNK + # XWyb96wrUlv+E8I= + # -----END CERTIFICATE----- + # Creates a secret with optional repository credentials + repositoryCredentials: + {} + # sample-ssh-key: | + # -----BEGIN RSA PRIVATE KEY----- + # MIICXAIBAAKBgQCcmiVJXGUvL8zqWmRRETbCKgFadtjJ9WDQpSwiZzMiktpYBo0N + # z0cThzGQfWqvdiJYEy72MrKCaSYssV3eHP5zTffk4VBDktNfdl1kgkOpqnh7tQO4 + # nBONRLzcK6KEbKUsmiTbW8Jb4UFYDhyyyveby7y3vYePmaRQIrlEenVfKwIDAQAB + # AoGAbbg+WZjnt9jYzHWKhZX29LDzg8ty9oT6URT4yB3gIOAdJMFqQHuyg8cb/e0x + # O0AcrfK623oHwgEj4vpeFwnfaBdtM5GfH9zaj6pnXV7VZc3oBHrBnHUgFT3NEYUe + # tt6rtatIguBH61Aj/pyij9sOfF0xDj0s1nwFTbdHtZR/31kCQQDIwcVTqhKkDNW6 + # cvdz+Wt3v9x1wNg+VhZhyA/pKILz3+qtn3GogLrQqhpVi+Y7tdvEv9FvgKaCjUp8 + # 6Lfp6dDFAkEAx7HpQbXFdrtcveOi9kosKRDX1PT4zdhB08jAXGlV8jr0jkrZazVM + # hV5rVCuu35Vh6x1fiyGwwiVsqhgWE+KPLwJAWrDemasM/LsnmjDxhJy6ZcBwsWlK + # xu5Q8h9UwLmiXtVayNBsofh1bGpLtzWZ7oN7ImidDkgJ8JQvgDoJS0xrGQJBALPJ + # FkMFnrjtqGqBVkc8shNqyZY90v6oM2OzupO4dht2PpUZCDPAMZtlTWXjSjabbCPc + # NxexBk1UmkdtFftjHxsCQGjG+nhRYH92MsmrbvZyFzgxg9SIOu6xel7D3Dq9l5Le + # XG+bpHPF4SiCpAxthP5WNa17zuvk+CDsMZgZNuhYNMo= + # -----END RSA PRIVATE KEY----- + secret: + createSecret: false + ## Annotations to be added to argocd-secret + ## + annotations: {} + + # Webhook Configs + githubSecret: "" + gitlabSecret: "" + bitbucketServerSecret: "" + bitbucketUUID: "" + gogsSecret: "" + + # Custom secrets. Useful for injecting SSO secrets into environment variables. + # Ref: https://argoproj.github.io/argo-cd/operator-manual/sso/ + # Note that all values must be non-empty. + extra: + {} + # LDAP_PASSWORD: "mypassword" + + # Argo TLS Data. + argocdServerTlsConfig: + {} + # key: + # crt: | + # -----BEGIN CERTIFICATE----- + # + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # + # -----END CERTIFICATE----- + + # Argo expects the password in the secret to be bcrypt hashed. You can create this hash with + # `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'` + # argocdServerAdminPassword: + # Password modification time defaults to current time if not set + # argocdServerAdminPasswordMtime: "2006-01-02T15:04:05Z" + +openshift: + enabled: false diff --git a/projects/homeassistant/project.yaml b/projects/homeassistant/project.yaml new file mode 100644 index 00000000..ba66374e --- /dev/null +++ b/projects/homeassistant/project.yaml @@ -0,0 +1,14 @@ +config: + description: Home Automation + syncWave: 100 + repo: k8s-at-home +apps: +- name: homeassistant + chart: home-assistant + targetRevision: 11.0.5 + secrets: + - name: hass-postgres + keys: + - postgresql-username + - postgresql-password + - postgresql-postgres-password diff --git a/projects/homeassistant/values/homeassistant.yaml b/projects/homeassistant/values/homeassistant.yaml new file mode 100644 index 00000000..28e7587e --- /dev/null +++ b/projects/homeassistant/values/homeassistant.yaml @@ -0,0 +1,101 @@ +additionalContainers: + addon-homematic: + name: addon-homematic + image: homeassistant/i386-addon-homematic:latest + volumeMounts: + - name: data + mountPath: /data + - name: config + mountPath: /config + + homegear: + name: homegear + image: homegear/homegear:stable + volumeMounts: + - name: homegear-config + mountPath: /etc/homegear + - name: homegear-lib + mountPath: /var/lib/homegear + env: + - name: HOST_USER_ID + value: "1000" + - name: HOST_USER_GID + value: "1000" + ports: + - name: homegear + containerPort: 2001 + securityContext: + privileged: true + +env: + TZ: UTC + +influxdb: + architecture: standalone + authEnabled: false + database: home_assistant + enabled: true + persistence: + enabled: true + size: 8Gi + +ingress: + main: + annotations: + cert-manager.io/cluster-issuer: vault-issuer + enabled: true + hosts: + - host: hass.dc + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - hass.dc + secretName: hass-tls + +metrics: + enabled: false + prometheusRule: + enabled: false + labels: {} + rules: [] + serviceMonitor: + interval: 1m + labels: {} + scrapeTimeout: 30s + +persistence: + config: + enabled: true + usb: + enabled: true + hostPath: /dev/ttyUSB0 + type: hostPath + homegear-config: + enabled: true + storageClass: local-path + accessMode: ReadWriteOnce + size: 1Gi + homegear-lib: + enabled: true + storageClass: local-path + accessMode: ReadWriteOnce + size: 1Gi + data: + enabled: true + storageClass: local-path + accessMode: ReadWriteOnce + size: 1Gi + +postgresql: + enabled: true + existingSecret: hass-postgres + persistence: + enabled: true + size: 8Gi + postgresqlDatabase: homeassistant + postgresqlUsername: homeassistant + +securityContext: + privileged: false diff --git a/projects/homer/project.yml b/projects/homer/project.yml new file mode 100644 index 00000000..2005fd1b --- /dev/null +++ b/projects/homer/project.yml @@ -0,0 +1,7 @@ +config: + description: Homer Hive Dashboard +apps: +- name: homer + repoURL: https://k8s-at-home.com/charts/ + chart: homer + targetRevision: 6.0.1 diff --git a/projects/homer/values/homer.yml b/projects/homer/values/homer.yml new file mode 100644 index 00000000..ef53128a --- /dev/null +++ b/projects/homer/values/homer.yml @@ -0,0 +1,182 @@ +ingress: + main: + enabled: true + annotations: + cert-manager.io/cluster-issuer: vault-issuer + hosts: + - host: homer.dc + paths: + - path: / + pathType: Prefix + tls: + - secretName: homer-tls + hosts: + - homer.dc + +configmap: + # -- Store homer configuration as a ConfigMap + enabled: true + # -- Homer configuration. See [image documentation](https://github.com/bastienwirtz/homer/blob/main/docs/configuration.md) for more information. + # @default -- See values.yaml + config: | + --- + title: "Hive Dashboard" + subtitle: "Homer on the Hive" + logo: "logo.png" + + header: true + footer: false + columns: "6" + + connectivityCheck: true + + # Optional theme customization + theme: default + colors: + dark: + highlight-primary: "#013c3d" + highlight-secondary: "#057752" + highlight-hover: "#2a8769" + background: "#131313" + card-background: "#2b2b2b" + text: "#eaeaea" + text-header: "#ffffff" + text-title: "#fafafa" + text-subtitle: "#f5f5f5" + card-shadow: rgba(0, 0, 0, 0.4) + link-hover: "#ffdd57" + #message: + # Optional navbar + links: [] # Allows for navbar (dark mode, layout, and search) without any links + #links: + # - name: "Contribute" + # icon: "fab fa-github" + # url: "https://github.com/bastienwirtz/homer" + # target: "_blank" # optional html a tag target attribute + # - name: "Wiki" + # icon: "fas fa-book" + # url: "https://www.wikipedia.org/" + # this will link to a second homer page that will load config from additionnal-page.yml and keep default config values as in config.yml file + # see url field and assets/additionnal-page.yml.dist used in this example: + # - name: "another page!" + # icon: "fas fa-file-alt" + # url: "#additionnal-page" + # Services + # First level array represent a group. + # Leave only a "items" key if not using group (group name, icon & tagstyle are optional, section separation will not be displayed). + services: + - name: "// Admin" + icon: "fas fa-tools" + items: + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/openwrt.png + name: OpenWRT + url: https://openwrt.lan + target: "_blank" + - logo: https://argocd.dc/assets/images/logo.png + name: ArgoCD + url: https://argocd.dc + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/vault.png + name: Vault + url: https://vault.dc + target: "_blank" + - logo: https://grafana.dc/public/img/grafana_icon.svg + name: Grafana + url: https://grafana.dc + target: "_blank" + - logo: https://raw.githubusercontent.com/falcosecurity/falcosidekick/master/imgs/falcosidekick_color.png + name: Falco + url: https://falco.dc/ui + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/minio.png + name: MinIO + url: https://minio.dc + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/kibana.png + name: Kibana + url: https://kibana.dc + target: "_blank" + + - name: "// Coding" + icon: fas fa-code-branch + items: + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/drone.png + name: Drone.io + url: https://drone.nold.in + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/gitea.png + name: Gitea + url: https://git.nold.in + target: "_blank" + + - name: "// Arrrrrr" + icon: "fas fa-download" + items: + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/ombi.png + name: Ombi + url: https://ombi.dc + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/radarr.png + name: Radarr + url: https://radarr.dc + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/sonarr.png + name: Sonarr + url: https://sonarr.dc + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/lidarr.png + name: Lidarr + url: https://lidarr.dc + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/bazarr.png + name: Bazarr + url: https://bazarr.dc + target: "_blank" + - logo: https://jackett.dc/jacket_medium.png + name: Jackett + url: https://jackett.dc + target: "_blank" + + - name: "// Apps" + icon: "fas fa-cloud" + items: + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/octoprint.png + name: OctoPrint + url: https://octo.dc + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/kodi.png + name: Kodi + url: http://libreelec.lan:8080 + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/home-assistant.png + name: HomeAssi + url: https://hass.dc + target: "_blank" + - logo: https://www.chia.net/android-chrome-384x384.png + name: Chia Farm + url: https://chia.dc + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/navidrome.png + name: Music Stream + url: https://music.dc + target: "_blank" + + - name: "// Loader" + icon: "fas fa-download" + items: + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/deluge.png + name: Deluge + url: https://torrent.dc + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/flood.png + name: Flood + url: https://flood.dc + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/pyload.png + name: pyLoad + url: https://pyload.dc + target: "_blank" + - logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/youtube.png + name: YouTube-dl + url: https://youtubedl.dc + target: "_blank" diff --git a/projects/jellyfin/project.yaml b/projects/jellyfin/project.yaml new file mode 100644 index 00000000..16189c4a --- /dev/null +++ b/projects/jellyfin/project.yaml @@ -0,0 +1,15 @@ +config: + description: Stream stuff + syncWave: 100 + repo: k8s-at-home + networkPolicy: + groups: + - internet + +apps: +- name: jellyfin + chart: jellyfin + targetRevision: 9.1.0 + include: + - noRoot + - tmpdirs diff --git a/projects/jellyfin/values/jellyfin.yaml b/projects/jellyfin/values/jellyfin.yaml new file mode 100644 index 00000000..96c86331 --- /dev/null +++ b/projects/jellyfin/values/jellyfin.yaml @@ -0,0 +1,40 @@ +ingress: + main: + enabled: true + annotations: + cert-manager.io/cluster-issuer: letsencrypt + kubernetes.io/ingress.class: external + kubernetes.io/tls-acme: "true" + hosts: + - host: stream.nold.in + paths: + - path: / + pathType: Prefix + tls: + - secretName: jellyfin-tls + hosts: + - stream.nold.in + +securityContext: + runAsUser: 568 + runAsGroup: 568 +# fsGroup: 568 + privileged: false + +persistence: + config: + enabled: true + storageClass: local-path + accessMode: ReadWriteOnce + size: 1Gi + cache: + enabled: true + storageClass: local-path + accessMode: ReadWriteOnce + size: 1Gi + media: + enabled: true + type: hostPath + mountPath: /media + hostPath: /data/media/stream + diff --git a/projects/navidrome/project.yaml b/projects/navidrome/project.yaml new file mode 100644 index 00000000..e6da14c4 --- /dev/null +++ b/projects/navidrome/project.yaml @@ -0,0 +1,9 @@ +config: + description: Stream Music + syncWave: 100 + repo: k8s-at-home + +apps: +- name: navidrome + chart: navidrome + targetRevision: 6.0.1 diff --git a/projects/navidrome/values/navidrome.yaml b/projects/navidrome/values/navidrome.yaml new file mode 100644 index 00000000..55afa82f --- /dev/null +++ b/projects/navidrome/values/navidrome.yaml @@ -0,0 +1,75 @@ +image: + # -- image repository + repository: deluan/navidrome + # -- image tag + #tag: 0.43.0 + # -- image pull policy + pullPolicy: IfNotPresent + +# -- environment variables. See [navidrome docs](https://www.navidrome.org/docs/usage/configuration-options/#environment-variables) for more details. +# @default -- See below +env: + # -- Set the container timezone + TZ: UTC + # -- Log level. Useful for troubleshooting. + ND_LOGLEVEL: info + # -- How long Navidrome will wait before closing web ui idle sessions + ND_SESSIONTIMEOUT: 24h + # -- Enables transcoding configuration in the UI + ND_ENABLETRANSCODINGCONFIG: "true" + # -- Folder where your music library is stored. + ND_MUSICFOLDER: /music + # Disable Scanning Scheduling + ND_SCANSCHEDULE: "0" + +podSecurityContext: + runAsUser: 1420 + runAsGroup: 2420 + fsGroup: 2420 + +securityContext: + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + +# -- Configures service settings for the chart. +# @default -- See values.yaml +service: + main: + ports: + http: + port: 4533 + +ingress: + main: + enabled: true + annotations: + cert-manager.io/cluster-issuer: vault-issuer + hosts: + - host: music.dc + paths: + - path: / + pathType: Prefix + tls: + - secretName: music-tls + hosts: + - music.dc + +# -- Configure persistence settings for the chart under this key. +# @default -- See values.yaml +persistence: + config: + enabled: true + mountPath: /data + storageClass: local-path + accessMode: ReadWriteOnce + size: 1Gi + music: + enabled: true + mountPath: /music + type: hostPath + hostPath: /data/media/music diff --git a/projects/nextcloud/project.yaml b/projects/nextcloud/project.yaml new file mode 100644 index 00000000..d5c4ffc5 --- /dev/null +++ b/projects/nextcloud/project.yaml @@ -0,0 +1,26 @@ +config: + description: Public Nextcloud + networkPolicy: + groups: + - internet +apps: +- name: nextcloud + repoURL: https://nextcloud.github.io/helm + chart: nextcloud + targetRevision: 2.9.0 + secrets: + - name: nextcloud-user + keys: + - username + - password + - smtp_username + - smtp_password + - name: nextcloud-postgres + keys: + - postgresql-username + - postgresql-password + - postgresql-postgres-password + - name: nextcloud-db + keys: + - db-username + - db-password diff --git a/projects/nextcloud/values/nextcloud.yaml b/projects/nextcloud/values/nextcloud.yaml new file mode 100644 index 00000000..8704be0e --- /dev/null +++ b/projects/nextcloud/values/nextcloud.yaml @@ -0,0 +1,143 @@ +image: + tag: 21-fpm + pullPolicy: Always + +nextcloud: + host: share.gnu.one + extraEnv: + - name: HTTP_PROXY + value: http://proxy-squid.proxy.svc.cluster.local:80 + - name: HTTPS_PROXY + value: http://proxy-squid.proxy.svc.cluster.local:80 + - name: NO_PROXY + value: .cluster.local + existingSecret: + enabled: true + secretName: nextcloud-user + usernameKey: username + passwordKey: password + smtpUsernameKey: smtp_username + smtpPasswordKey: smtp_password + configs: + proxy.config.php: |- + 'proxy-squid.proxy.svc.cluster.local:80', + 'trusted_proxies' => + array ( + 0 => 'proxy-squid.proxy.svc.cluster.local', + ), + 'proxyexclude' => ['.cluster.local'], + 'debug' => true, + 'loglevel' => 1, + ); + extraSecurityContext: + runAsUser: "33" + runAsGroup: "33" + runAsNonRoot: true + readOnlyRootFilesystem: true + phpConfigs: + memory_limit.conf: | + php_admin_value[memory_limit] = 512M + tuning.conf: | + pm = dynamic + pm.max_children = 64 + pm.start_servers = 12 + pm.min_spare_servers = 8 + pm.max_spare_servers = 24 + pm.max_requests = 1000 + +ingress: + enabled: true + annotations: + nginx.ingress.kubernetes.io/proxy-body-size: 4G + kubernetes.io/ingress.class: "external" + kubernetes.io/tls-acme: "true" + cert-manager.io/cluster-issuer: letsencrypt +# nginx.ingress.kubernetes.io/server-snippet: |- +# server_tokens off; +# proxy_hide_header X-Powered-By; +# +# rewrite ^/.well-known/webfinger /public.php?service=webfinger last; +# rewrite ^/.well-known/host-meta /public.php?service=host-meta last; +# rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json; +# location = /.well-known/carddav { +# return 301 $scheme://$host/remote.php/dav; +# } +# location = /.well-known/caldav { +# return 301 $scheme://$host/remote.php/dav; +# } +# location = /robots.txt { +# allow all; +# log_not_found off; +# access_log off; +# } +# location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ { +# deny all; +# } +# location ~ ^/(?:autotest|occ|issue|indie|db_|console) { +# deny all; +# } + tls: + - secretName: nextcloud-tls + hosts: + - share.gnu.one + +nginx: + enabled: true + +cronjob: + enabled: false + curlInsecure: true + +internalDatabase: + enabled: false + +externalDatabase: + enabled: true + type: postgresql + host: nextcloud-postgresql.nextcloud.svc.cluster.local + existingSecret: + enabled: true + secretName: nextcloud-postgres + passwordKey: postgresql-password + usernameKey: postgresql-username + +postgresql: + enabled: true + postgresqlDatabase: nextcloud + postgresqlUsername: nextcloud + existingSecret: nextcloud-postgres + persistence: + enabled: true + +redis: + enabled: false + architecture: standalone + auth: + existingSecret: nextcloud-redis + existingSecretPasswordKey: password + replica: + replicaCount: 1 + rbac: + create: false + podSecurityPolicy: + enabled: true + create: true + +persistence: + enabled: true + storageClass: local-path + size: 100Gi + persistence: + enabled: true + +rbac: + enabled: true + +readinessProbe: + initialDelaySeconds: 60 +livenessProbe: + initialDelaySeconds: 60 +startupProbe: + initialDelaySeconds: 60 diff --git a/projects/services/project.yml b/projects/services/project.yml new file mode 100644 index 00000000..1a3fa018 --- /dev/null +++ b/projects/services/project.yml @@ -0,0 +1,21 @@ +config: + description: Shared Network Services + +apps: +# Squid Internet Proxy +- name: proxy + namespace: proxy + repoURL: http://honestica.github.io/lifen-charts + chart: squid + targetRevision: 0.3.0 + +- name: minio + namespace: minio + repoURL: https://charts.bitnami.com/bitnami + chart: minio + targetRevision: 9.0.2 + secrets: + - name: minio-auth + keys: + - root-user + - root-password diff --git a/projects/services/values/minio.yaml b/projects/services/values/minio.yaml new file mode 100644 index 00000000..132ec422 --- /dev/null +++ b/projects/services/values/minio.yaml @@ -0,0 +1,16 @@ +defaultBuckets: "public, drone, temp" +auth: + existingSecret: minio-auth + +ingress: + enabled: true + hostname: minio.dc + tls: true + annotations: + cert-manager.io/cluster-issuer: vault-issuer + +networkPolicy: + enabled: true + +persistence: + enabled: true diff --git a/projects/services/values/proxy.yaml b/projects/services/values/proxy.yaml new file mode 100644 index 00000000..434d30de --- /dev/null +++ b/projects/services/values/proxy.yaml @@ -0,0 +1,102 @@ +# Default values for squid. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: honestica/squid + tag: 4-f9839050-1344-48d2-981a-b73e4541e193 + pullPolicy: IfNotPresent + # imagePullSecrets: + +service: + type: ClusterIP + # Specify IP to whitelist if needed + #loadBalancerSourceRanges: "" + # Specify external IP if needed + #loadBalancerIP: "" + port: 80 + # annotations: {} + +ingress: + enabled: true + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - proxy.dc + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +config: | + acl SSL_ports port 443 + acl Safe_ports port 80 # http + acl Safe_ports port 443 # https + acl CONNECT method CONNECT + + acl restricted_destination_subnetworks dst 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 + + # Recommended minimum Access Permission configuration: + # + # Deny requests to certain unsafe ports + http_access deny !Safe_ports + + # Only allow cachemgr access from localhost + http_access allow localhost manager + http_access deny manager + + http_access deny restricted_destination_subnetworks + + # Squid normally listens to port 3128 + http_port 3128 + + # Uncomment and adjust the following to add a disk cache directory. + #cache_dir ufs /var/cache/squid 100 16 256 + + # Leave coredumps in the first cache dir + coredump_dir /var/cache/squid + + # + # Add any of your own refresh_pattern entries above these. + # + refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 + refresh_pattern . 0 20% 4320 + + # Do not display squid version + httpd_suppress_version_string on + + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +releaseAntiAffinity: true + +metrics: + enabled: false + serviceMonitor: false + exporter: + port: 9301 + resources: {} + image: + repository: boynux/squid-exporter + tag: v1.8 + pullPolicy: IfNotPresent diff --git a/projects/vault/project.yml b/projects/vault/project.yml new file mode 100644 index 00000000..1f526a8a --- /dev/null +++ b/projects/vault/project.yml @@ -0,0 +1,22 @@ +config: + description: Vault Secret Managemet + +apps: + - name: vault + repoURL: https://helm.releases.hashicorp.com + chart: vault + targetRevision: 0.15.0 + syncWave: "-3" + parameters: + - name: global.psp.enabled + value: "true" + - name: server.dev.enabled + value: "false" + + # Vault Secret Operator for automatic Secret injection + - name: vault-secrets-operator + namespace: vault-secrets-operator + repoURL: https://ricoberger.github.io/helm-charts + chart: vault-secrets-operator + targetRevision: 1.15.1 + syncWave: "-2" diff --git a/projects/vault/values/vault-secrets-operator.yaml b/projects/vault/values/vault-secrets-operator.yaml new file mode 100644 index 00000000..6d218bd0 --- /dev/null +++ b/projects/vault/values/vault-secrets-operator.yaml @@ -0,0 +1,17 @@ +vault: + address: "http://vault.vault.svc.cluster.local:8200" + authMethod: kubernetes + kubernetesRole: heqet-app + namespaces: "" + +crd: + create: false + +rbac: + create: true + createrole: true + namespaced: false + +serviceAccount: + create: true + name: vault-secrets-operator diff --git a/projects/vault/values/vault.yaml b/projects/vault/values/vault.yaml new file mode 100644 index 00000000..16d8ee76 --- /dev/null +++ b/projects/vault/values/vault.yaml @@ -0,0 +1,61 @@ +global: + enabled: true + tlsDisable: true + psp: + enable: true +injector: + enabled: false +server: + enabled: true + auditStorage: + accessMode: ReadWriteOnce + annotations: {} + enabled: false + mountPath: /vault/audit + size: 10Gi + storageClass: null + authDelegator: + enabled: true + dataStorage: + accessMode: ReadWriteOnce + annotations: {} + enabled: true + mountPath: /vault/data + size: 10Gi + storageClass: local-path + dev: + enabled: false + ha: + enabled: false + ingress: + annotations: + cert-manager.io/cluster-issuer: vault-issuer + kubernetes.io/ingress.class: nginx + enabled: true + extraPaths: [] + hosts: + - host: vault.dc + paths: [] + labels: {} + tls: + - hosts: + - vault.dc + secretName: vault-tls + networkPolicy: + egress: [] + enabled: true + standalone: + enabled: true + config: | + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + } + storage "file" { + path = "/vault/data" + } +ui: + enabled: true \ No newline at end of file diff --git a/resources/manifests/clusterissuer.yaml b/resources/manifests/clusterissuer.yaml new file mode 100644 index 00000000..18da793b --- /dev/null +++ b/resources/manifests/clusterissuer.yaml @@ -0,0 +1,21 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt + namespace: cert-manager +spec: + acme: + # You must replace this email address with your own. + # Let's Encrypt will use this to contact you about expiring + # certificates, and issues related to your account. + email: nold@gnu.one + #server: https://acme-staging-v02.api.letsencrypt.org/directory + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource that will be used to store the account's private key. + name: issuer-account-key + # Add a single challenge solver, HTTP01 using nginx + solvers: + - http01: + ingress: + class: external diff --git a/resources/manifests/vault_clusterissuer.yaml b/resources/manifests/vault_clusterissuer.yaml new file mode 100644 index 00000000..711f4254 --- /dev/null +++ b/resources/manifests/vault_clusterissuer.yaml @@ -0,0 +1,17 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: vault-issuer + namespace: cert-manager +spec: + vault: + path: pki_int/sign/dc + server: http://vault.vault.svc.cluster.local:8200 + caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURaekNDQWsrZ0F3SUJBZ0lVVU5CTWNDZkRmbS9MeS9RUGdhWVUxdFdSc1Nrd0RRWUpLb1pJaHZjTkFRRUwKQlFBd016RVVNQklHQTFVRUNoTUxibTlzWkhSeWIyNXBZM014RFRBTEJnTlZCQXNUQkdocGRtVXhEREFLQmdOVgpCQU1UQTJzemN6QWVGdzB5TVRBME1qa3hPVEUwTVRWYUZ3MHlNVEExTXpFeE9URTBORFJhTURNeEZEQVNCZ05WCkJBb1RDMjV2YkdSMGNtOXVhV056TVEwd0N3WURWUVFMRXdSb2FYWmxNUXd3Q2dZRFZRUURFd05yTTNNd2dnRWkKTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDOTJDb2Z6cHdvNWZjNDNPNTJvaGhqdHAwZgplRXE0ZkRTQ24rQjZwYUVmcGtsZXZvM2J1akw3ZnhTcmt2VlhuRDgyc3FCSythWjRCM0pJNVBMSHpoeTBwcVFSCm9rSDdiSUxuYVBGSlljZGYzWnF4VDVvT1QvWC9IeUlDQkNBbFdoN2ZNZThJYitFbm5oUGpFdlVTWHJzWTk4T3IKQVhpVGN4TlF0OVl5WnIzQS93cnloM2lIZmYyR3NuTGVEekRhV0tyMm93N3pCckZvZXFJRkdzWXY4b1YzcFZIZQpPdTloWWQ1V2F2ZjVRSjBQcTAwUndKTHRuc2V0RUdQenlEUlJTRnhKbmZSK2JwY1VSZDFrWkl5ZFpTRENNVFc5CnhtSTJQOUxvT2cwMThWb0MrM3l5V01PVEh1dmRraWh2SDBQM3RhcnpFcnd0cm0vOUlkT3J4NVhPVUVKUEFnTUIKQUFHamN6QnhNQTRHQTFVZER3RUIvd1FFQXdJQkJqQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRVwpCQlMzOWxNTVFySDNoczBTQzk4bVFJODhEbmF6VXpBZkJnTlZIU01FR0RBV2dCUzM5bE1NUXJIM2hzMFNDOThtClFJODhEbmF6VXpBT0JnTlZIUkVFQnpBRmdnTnJNM013RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUV1VG04SzQKRndoaUFOc2dsSXhjTk10aHJYQ0p4aUhyMHJVRWFOclQ5czlSYUxRSEZlSkhhZUxiL2NJUXZncjdQbW93aURMYQpvRlVZUk1EamZKNzUwK1Jmc21oa2pmdzdJL1JMWktsVXdMY1l4dlN4MjViWFNZVkdOTW5UUi9wTmN6cWNXSk5NCmpWZEZFbDRlRjMzQmtCWCtzN1pTWGxVVUhhdFloTEszQ1EzRk5tSWRjRFliNFovVWY1bXk1eUs2cG5HT3hQbnkKTnkyY0xsYndzWVYrelA5UGR4Y3JqUTlpem1rYmMxMUVxS3ZmYnhmUmc4aGlMZkovSnFaM0hxMW91SUk0V3dvMQo3WWxxVEZsS2FXRnpKNVhOSmVPYWZyNnZpREszQ3NQMjJYTXhMekRQQmdrbEd5Qnp0cFdKbVd1ZEVVanNKVDJiCnJsKzlOdEw5TmgzQklrcz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + auth: + kubernetes: + role: vault-issuer + mountPath: /v1/auth/kubernetes + secretRef: + name: vault-issuer-token + key: token diff --git a/resources/manifests/vault_issuer_serviceaccount.yaml b/resources/manifests/vault_issuer_serviceaccount.yaml new file mode 100644 index 00000000..42b05829 --- /dev/null +++ b/resources/manifests/vault_issuer_serviceaccount.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vault-issuer + namespace: cert-manager +secrets: +- name: vault-issuer-token +--- +apiVersion: v1 +kind: Secret +metadata: + name: vault-issuer-token + namespace: cert-manager + annotations: + kubernetes.io/service-account.name: vault-issuer +type: kubernetes.io/service-account-token