App Migration

This commit is contained in:
nold 2021-11-28 11:16:31 +01:00
parent d5fb35a18b
commit cc9e40299a
47 changed files with 3608 additions and 0 deletions

View file

@ -0,0 +1,18 @@
config:
description: ArgoCD - Continous Deployment from Git
apps:
- name: argocd
repoURL: https://argoproj.github.io/argo-helm
chart: argo-cd
targetRevision: 3.26.10
syncWave: "0"
secrets:
- name: argocd-secret
keys:
- admin.password
- server.secretkey
- oidc.auth0.clientSecret
- name: ca-cert
keys:
- ca

View file

@ -0,0 +1,213 @@
## ArgoCD configuration
## Ref: https://github.com/argoproj/argo-cd
##
# Optional CRD installation for those without Helm hooks
installCRDs: true
global:
image:
repository: quay.io/argoproj/argocd
tag: latest
# imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 999
runAsGroup: 999
fsGroup: 999
## Controller
controller:
## Labels to set container specific security contexts
containerSecurityContext:
capabilities:
drop:
- all
readOnlyRootFilesystem: true
## Server metrics controller configuration
metrics:
enabled: true
service:
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '8082'
clusterAdminAccess:
enabled: true
## Dex
dex:
enabled: true
## Labels to set container specific security contexts
containerSecurityContext:
capabilities:
drop:
- all
readOnlyRootFilesystem: true
## Redis
redis:
enabled: true
## Labels to set container specific security contexts
containerSecurityContext:
capabilities:
drop:
- all
readOnlyRootFilesystem: true
## Redis Pod specific security context
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
runAsNonRoot: true
## Server
server:
extraArgs:
- --insecure
## Labels to set container specific security contexts
containerSecurityContext:
capabilities:
drop:
- all
readOnlyRootFilesystem: true
## Server metrics service configuration
metrics:
enabled: true
service:
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '8083'
servicePort: 8083
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: vault-issuer
kubernetes.io/ingress.class: nginx
hosts:
- argocd.dc
paths:
- /
tls:
- secretName: argocd-tls
hosts:
- argocd.dc
https: false
# dedicated ingess for gRPC as documented at
# https://argoproj.github.io/argo-cd/operator-manual/ingress/
## ArgoCD config
## reference https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml
configEnabled: true
config:
# Argo CD's externally facing base URL (optional). Required when configuring SSO
url: https://argocd.dc
accounts.webhook: apiKey, login
oidc.config: |
name: Authentik
issuer: https://auth.dc/application/o/argocd/
clientID: 0c149045b7b87eb80e41fcdd3e788476472d7316
clientSecret: $oidc.auth0.clientSecret
requestedScopes: ["openid", "profile", "email", "groups"]
rbacConfig:
policy.csv: |
g, ArgoCDAdmins, role:admin
# Mount public CA cert
volumeMounts:
- name: certificate
mountPath: /etc/ssl/certs/ca.crt
subPath: ca
volumes:
- name: certificate
secret:
secretName: ca-cert
defaultMode: 420
additionalApplications: []
## Projects
## reference: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/
additionalProjects: []
## Enable Admin ClusterRole resources.
## Enable if you would like to grant rights to ArgoCD to deploy to the local Kubernetes cluster.
clusterAdminAccess:
enabled: true
## Repo Server
repoServer:
containerSecurityContext:
capabilities:
drop:
- all
readOnlyRootFilesystem: true
## Repo server metrics service configuration
metrics:
enabled: true
service:
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '8084'
servicePort: 8084
volumes:
- name: var-files
emptyDir: {}
- name: plugins
emptyDir: {}
volumeMounts:
- mountPath: /home/argocd/cmp-server/plugins
name: plugins
initContainers:
- name: copy-cmp-server
image: quay.io/argoproj/argocd:latest
command:
- cp
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
volumeMounts:
- mountPath: /var/run/argocd
name: var-files
extraContainers:
- name: cmp-heqet
command: [/var/run/argocd/argocd-cmp-server]
image: lib42/heqet-cli:latest
securityContext:
runAsNonRoot: true
runAsUser: 999
volumeMounts:
- mountPath: /var/run/argocd
name: var-files
- mountPath: /home/argocd/cmp-server/plugins
name: plugins
- mountPath: /tmp
name: tmp-dir
## Repo server rbac rules
# rbac:
# - apiGroups:
# - argoproj.io
# resources:
# - applications
# verbs:
# - get
# - list
# - watch
configs:
secret:
createSecret: false

View file

@ -0,0 +1,8 @@
config:
description: BorgBackup SSH-Server
apps:
- name: backup-lan
repoURL: https://github.com/lib42/charts.git
path: charts/borgserver
targetRevision: dev

View file

@ -0,0 +1,10 @@
clients:
- name: noldbook
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAABAQDJc7+boEpuSfjBM5y/qYfAnaGoYFP74yXuDmnlcY9glrRTGV2UVYFQV+fFl8pAT6aiqJUcbylBq+kQFvFHTI2JW7iux+JO+o/eEpMYqoNe5kIewYTHWaBL+6h7B90NIgE8ec1Ce7Oqm9+ttAa51Wu5K5zXXLWHds6nlqLG5llNiSZB4yxCJ/oyj5uQKmeAY+Hr4XjFsnisuaajSrvNaR7gshrme8A7wxn3qORe62ux33bPgEXjwUfPJZrHeeRWBMfnWoHBH1RybwC8FboNDes6gXgx3hJiQ+UfslmmFgpADWos216YX2FKXxDk19K/gXvejSuljO8fCBeQIdo/1xVh
- name: hive
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAACAQDVq6KXD45FNZjWDRytd97YsrOdIP9JnbftO/pbSD8l86GZsxhZ9Gk/SVsGyZ8z3Mi0RNetFvelLoU/QW8lx5ETAUHJuEUj+EjWX6bLEUGWq0YAhcQRVnD70fe0ANbWp6njYL86B5X1EGgT7y55iTW6V/ssLkIKtgUeqV61GaBQoHaHtoaOAVT3yEMGZwhzj5lrdpgPfsQMlnlW/uzfmVMbMSNCmKdQCOnQ/yZ8N1EaReF99zqT8+Z841JmcSTXr1INwk8QAclBOwPqOe/7VWtQdZGoiToT2ro7dHMbzOucauTbw/8GvoxHZvr44PiWffDINWzQyPPtva3s1wOeVQOlmDFsL3LrL7kfUJs1hNz7GKITgpIhPYyhNp8CZ8Jk2zmpjZAHxZiPJkMe0VGRvnYVjUqeLsFwlj/yXsbsa9X+Yd4B15Op060c41FM54X+UiiCt/ZLUOch46Lq9kmF6MNtLSoMeOs+uBCKWaS4iH2r3v+ZuAHuj5MDDk+tPVSoyGQujADHspJ9q7xEhywsF0AFJy1+3ui72Uoo0AUrFzOYq4WveNCNg7CvN1yOC1RsckIQ3aCdmijF3vz/Ndbk7QQzNHtAmIt/0/PGJ0+lsxgE9EPIxOELEzK43XFFu97hXficiBftsjXNPyHRjRn19hCNif36yWpeK8JN1ZNmoMshqQ==
- name: noldface
type: ssh-rsa
key: AAAAB3NzaC1yc2EAAAADAQABAAACAQDPdrtT3OwSPN6t3udItlodb96ITOtBkwF+RG5rxKi1x1j2RHOeoOtkQ8oeh9zRqPnsgeFFbbRAJy1pHaS6wcpKlow0zq7+Xl61/AFgE9mD+yks7JQOP0Cf6N9kq3nYUC3XwBKtuLmwox9+PZX+lPDXVA6gN59Tb1BG/FJN2XbeEzMPUMiNr0B3LAk7sK9P+ipm2FoXi3oLEzS9QKi+1aDfx/K/EWcFLtlPeUYSxGHS18Vl1ad//Cw3XwKY4RGRnI6kfXN41xxTY1TyRRyTnZRoa7+4GudELrqrd6Y722/G+SOg4xhWL3Ns98E7xqC4VarNePEXX2Evom86aU9CkAGTvsMXcHWxDJatihtN2dFAngNfJfkAq7eaGVrrARgtiWw5gKREPaR96PIHSmA3d822nooScX2liDWNbXr0WpwW43MJXhaQKW2s0Cucd/iqxg4t7rP1uJrSaE9nCuXDE3vAbFXDjdWFyNrdOg8akyQ+z+bXJNyk1YQO3mMgahXqHFrVgZhtMgI5+WJYEapM0jjqviXav8eR9h3dkH2k4BrY3pfYsfMmNslwo/3Ot1os0k3k47mW9qQ4gWdAkMYg9RRkK760Bmd0YNjeVDvRgwD2Cvt3it4wW223CtqL1KxAoTK4iPzgpwu2x5KA0ojvexkzTG7J/i8EUYREgHCyQtVVIQ==

View file

@ -0,0 +1,7 @@
config:
description: Blocky DNS Server
apps:
- name: blocky
repoURL: https://k8s-at-home.com/charts
chart: blocky
targetRevision: 9.0.3

View file

@ -0,0 +1,196 @@
env:
TZ: Europe/Amsterdam
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "4000"
image:
tag: v0.15
service:
main:
ports:
http:
port: 4000
dns-tcp:
enabled: false
dns-udp:
enabled: true
type: LoadBalancer
externalTrafficPolicy: Local
ports:
dns-udp:
enabled: true
port: 53
protocol: UDP
targetPort: 53
persistence:
logs:
enabled: true
mountPath: /logs
accessMode: ReadWriteOnce
size: 1Gi
storageClass: local-path
prometheus:
serviceMonitor:
enabled: false
# -- Full list of options https://github.com/0xERR0R/blocky/blob/master/docs/config.yml
config: |
upstream:
externalResolvers:
- 192.168.1.1
#customDNS:
# mapping:
# printer.lan: 192.168.178.3
conditional:
mapping:
lan: udp:192.168.1.1
dc: udp:192.168.1.1
blocking:
blackLists:
ads:
- https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt
- https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/extra.txt
- https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/android-tracking.txt
- https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt
- https://raw.githubusercontent.com/notracking/hosts-blocklists/master/hostnames.txt
- https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
- http://sysctl.org/cameleon/hosts
- https://adaway.org/hosts.txt
- https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-malware.txt
- https://hostfiles.frogeye.fr/firstparty-trackers-hosts.txt
- https://phishing.army/download/phishing_army_blocklist_extended.txt
- https://raw.githubusercontent.com/anudeepND/blacklist/master/adservers.txt
- https://raw.githubusercontent.com/anudeepND/youtubeadsblacklist/master/domainlist.txt
- https://raw.githubusercontent.com/bigdargon/hostsVN/master/hosts
- https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt
- https://raw.githubusercontent.com/DandelionSprout/adfilt/master/Alternate%20versions%20Anti-Malware%20List/AntiMalwareHosts.txt
- https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.2o7Net/hosts
- https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Risk/hosts
- https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Spam/hosts
- https://raw.githubusercontent.com/Kees1958/W3C_annual_most_used_survey_blocklist/master/TOP_EU_US_Ads_Trackers_HOST
- https://raw.githubusercontent.com/PolishFiltersTeam/KADhosts/master/KADhosts.txt
- https://raw.githubusercontent.com/Spam404/lists/master/main-blacklist.txt
- https://urlhaus.abuse.ch/downloads/hostfile/
- https://zerodot1.gitlab.io/CoinBlockerLists/hosts_browser
# All firebog lists:
- https://v.firebog.net/hosts/Cameleon.txt
- https://v.firebog.net/hosts/HostsFileOrg.txt
- https://v.firebog.net/hosts/JoeWein.txt
- https://v.firebog.net/hosts/Mahakala.txt
- https://v.firebog.net/hosts/JoeyLane.txt
- https://v.firebog.net/hosts/PeterLowe.txt
- https://v.firebog.net/hosts/PiwikSpam.txt
- https://v.firebog.net/hosts/ReddestDream.txt
- https://v.firebog.net/hosts/SBDead.txt
- https://v.firebog.net/hosts/SBKAD.txt
- https://v.firebog.net/hosts/SBSpam.txt
- https://v.firebog.net/hosts/SomeoneWC.txt
- https://v.firebog.net/hosts/Spam404.txt
- https://v.firebog.net/hosts/Vokins.txt
- https://v.firebog.net/hosts/Winhelp2002.txt
- https://v.firebog.net/hosts/AdAway.txt
- https://v.firebog.net/hosts/Disconnect-ads.txt
- https://v.firebog.net/hosts/Easylist.txt
- https://v.firebog.net/hosts/Easylist-Dutch.txt
- https://v.firebog.net/hosts/SBUnchecky.txt
- https://v.firebog.net/hosts/AdguardDNS.txt
- https://v.firebog.net/hosts/Prigent-Ads.txt
- https://v.firebog.net/hosts/Airelle-trc.txt
- https://v.firebog.net/hosts/Disconnect-trc.txt
- https://v.firebog.net/hosts/Disconnect-mal.txt
- https://v.firebog.net/hosts/Easyprivacy.txt
- https://v.firebog.net/hosts/SB2o7Net.txt
- https://v.firebog.net/hosts/APT1Rep.txt
- https://v.firebog.net/hosts/Airelle-hrsk.txt
- https://v.firebog.net/hosts/Openphish.txt
- https://v.firebog.net/hosts/SBRisk.txt
- https://v.firebog.net/hosts/Shalla-mal.txt
- https://v.firebog.net/hosts/Prigent-Malware.txt
ms: []
untrusted:
- https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/update.txt
- https://git.nold.in/nold/dns-whitelist/raw/branch/master/blacklists/ms.txt
- https://git.nold.in/nold/dns-whitelist/raw/branch/master/blacklists/fbook.txt
- https://git.nold.in/nold/dns-whitelist/raw/branch/master/blacklists/google.txt
- https://git.nold.in/nold/dns-whitelist/raw/branch/master/blacklists/nintendont.txt
- https://git.nold.in/nold/dns-whitelist/raw/branch/master/blacklists/ps.txt
- https://git.nold.in/nold/dns-whitelist/raw/branch/master/blacklists/xbox.txt
whiteLists:
ads:
- https://git.nold.in/nold/dns-whitelist/raw/branch/master/whitelists/common.txt
- https://git.nold.in/nold/dns-whitelist/raw/branch/master/whitelists/ms.txt
ms:
- https://git.nold.in/nold/dns-whitelist/raw/branch/master/whitelists/ms.txt
clientGroupsBlock:
default:
- ads
LAPTOP-G35N0AS1.lan:
- ads
- ms
# use client name (with wildcard support: * - sequence of any characters, [0-9] - range)
# or single ip address / client subnet as CIDR notation
#laptop*:
# - ads
#192.168.178.1/24:
# - special
# which response will be sent, if query is blocked:
blockType: zeroIp
# optional: automatically list refresh period in minutes. Default: 4h.
# Negative value -> deactivate automatically refresh.
# 0 value -> use default
refreshPeriod: 0
# optional: configuration for caching of DNS responses
#caching:
# amount in minutes, how long a response must be cached (min value).
# If <=0, use response's TTL, if >0 use this value, if TTL is smaller
# Default: 0
# minTime: 5
# amount in minutes, how long a response must be cached (max value).
# If <0, do not cache responses
# If 0, use TTL
# If > 0, use this value, if TTL is greater
# Default: 0
# maxTime: -1
# if true, will preload DNS results for often used queries (names queried more than 5 times in a 2 hour time window)
# this improves the response time for often used queries, but significantly increases external traffic
# default: false
# prefetching: true
# optional: configuration of client name resolution
clientLookup:
# optional: this DNS resolver will be used to perform reverse DNS lookup (typically local router)
upstream: udp:192.168.1.1
# optional: custom mapping of client name to IP addresses. Useful if reverse DNS does not work properly or just to have custom client names.
#clients:
# laptop:
# - 192.168.178.29
prometheus:
enable: true
path: /metrics
# optional: write query information (question, answer, client, duration etc) to daily csv file
queryLog:
# # directory (should be mounted as volume in docker)
dir: /logs
# # if true, write one file per client. Writes all queries to single file otherwise
# perClient: true
# # if > 0, deletes log files which are older than ... days
logRetentionDays: 1
port: 53
httpPort: 4000
bootstrapDns: udp:192.168.1.1
logLevel: info
logFormat: text

61
projects/core/project.yml Normal file
View file

@ -0,0 +1,61 @@
config:
description: Core Components for Kubernetes
apps:
- name: fast-storage
namespace: fast-storage
repoURL: https://github.com/rancher/local-path-provisioner
path: deploy/chart
syncWave: '0'
parameters:
- name: storageClass.name
value: fast
- name: nodePathMap[0].node
value: DEFAULT_PATH_FOR_NON_LISTED_NODES
- name: nodePathMap[0].paths[0]
value: /var/lib/rancher/k3s/storage
- name: ssd-storage
namespace: ssd-storage
repoURL: https://github.com/rancher/local-path-provisioner
path: deploy/chart
syncWave: '0'
parameters:
- name: storageClass.name
value: ssd
- name: nodePathMap[0].node
value: DEFAULT_PATH_FOR_NON_LISTED_NODES
- name: nodePathMap[0].paths[0]
value: /data/kubernetes/ssd
- name: metallb
repoURL: https://charts.bitnami.com/bitnami
chart: metallb
namespace: metallb
targetRevision: 2.5.6
syncWave: '0'
- name: cert-manager
namespace: cert-manager
repoURL: https://charts.jetstack.io
chart: cert-manager
targetRevision: 1.5.3
parameters:
- name: installCRDs
value: 'true'
secrets:
- name: cert-manager-vault-approle
keys:
- secretId
- name: ingress-internal
namespace: ingress-internal
repoURL: https://kubernetes.github.io/ingress-nginx
chart: ingress-nginx
targetRevision: 4.0.8
syncWave: '0'
- name: ingress-external
namespace: ingress-external
repoURL: https://kubernetes.github.io/ingress-nginx
chart: ingress-nginx
targetRevision: 4.0.8
syncWave: '0'

View file

@ -0,0 +1,27 @@
controller:
name: external
ingressClassResource:
name: external
enabled: true
controllerValue: 'k8s.io/external'
extraArgs:
ingress-class: external
kind: DaemonSet
updateStrategy:
# rollingUpdate:
# maxUnavailable: 1
type: RollingUpdate
service:
annotations:
metallb.universe.tf/address-pool: external
metrics:
enabled: true
service:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "10254"
podSecurityPolicy:
enabled: true

View file

@ -0,0 +1,25 @@
controller:
name: controller-internal
electionID: ingress-controller-internal-leader
watchIngressWithoutClass: true
ingressClassResource:
name: nginx
enabled: true
default: true
kind: Deployment
service:
annotations:
metallb.universe.tf/address-pool: internal
metrics:
enabled: true
service:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "10254"
defaultBackend:
enabled: true
podSecurityPolicy:
enabled: true

View file

@ -0,0 +1,29 @@
configInline:
address-pools:
- name: default
protocol: layer2
addresses:
- 192.168.1.13/32
- 192.168.1.14/32
- 192.168.1.15/32
- 192.168.1.16/32
- 192.168.1.17/32
- 192.168.1.18/32
- 192.168.1.19/32
- 192.168.1.20/32
- name: external
protocol: layer2
addresses:
- 192.168.1.12/32
- name: internal
protocol: layer2
addresses:
- 192.168.1.11/32
prometheus:
serviceMonitor:
enabled: true
prometheusRule:
enabled: true

View file

@ -0,0 +1,32 @@
config:
description: Tools for downloading linux isos
apps:
- name: deluge
repoURL: https://k8s-at-home.com/charts/
chart: deluge
targetRevision: 5.0.1
secrets:
- name: openvpn
keys:
- VPN_AUTH
- vpnConfigfile
- name: rtorrent
repoURL: https://k8s-at-home.com/charts/
chart: rtorrent-flood
targetRevision: 9.0.1
secrets:
- name: openvpn
fromApp: deluge
keys:
- VPN_AUTH
- vpnConfigfile
- name: youtubedl
repoURL: https://k8s-at-home.com/charts/
chart: youtubedl-material
targetRevision: 4.0.1
- name: pyload
repoURL: https://k8s-at-home.com/charts/
chart: pyload
targetRevision: 6.0.1

View file

@ -0,0 +1,60 @@
ingress:
main:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "vault-issuer"
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/proxy-body-size: 50m
hosts:
- host: torrent.dc
paths:
- path: /
pathType: Prefix
tls:
- secretName: torrent.dc-tls
hosts:
- torrent.dc
env:
PUID: 1000
GUID: 1000
persistence:
config:
enabled: true
mountPath: /config
size: 10M
# use hostpath instead
downloads:
enabled: true
type: hostPath
hostPath: /data/torrent
mountPath: /downloads
## VPN
addons:
vpn:
enabled: true
openvpn:
authSecret: openvpn
configFileSecret: openvpn
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
livenessProbe:
exec:
command:
- sh
- -c
- if [ $(curl -s https://ipinfo.io/country) == 'NL' ]; then exit 0; else exit $?; fi
initialDelaySeconds: 30
periodSeconds: 60
failureThreshold: 3

View file

@ -0,0 +1,34 @@
ingress:
main:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "vault-issuer"
kubernetes.io/ingress.class: "nginx"
hosts:
- host: pyload.dc
paths:
- path: /
pathType: Prefix
tls:
- secretName: pyload.dc-tls
hosts:
- pyload.dc
env:
PUID: 1420
GUID: 2420
persistance:
config:
enabled: true
mountPath: /config
storageClass: local-path
accessMode: ReadWriteOnce
size: 1Gi
# use hostpath instead
downloads:
enabled: true
type: hostPath
hostPath: /data/downloads
mountPath: /downloads

View file

@ -0,0 +1,128 @@
env:
# -- Set the container timezone
TZ: UTC
# -- Folder where Flood stores it's configuration
HOME: "/config"
# -- The host that Flood should listen for web connections on
FLOOD_OPTION_HOST: "0.0.0.0"
# -- The port that Flood should listen for web connections on
FLOOD_OPTION_PORT: "3000"
# -- ADVANCED: rTorrent daemon managed by Flood
FLOOD_OPTION_RTORRENT: "true"
# -- Allowed path for file operations
FLOOD_OPTION_ALLOWEDPATH: "/downloads"
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
ports:
http:
port: 3000
bittorrent:
enabled: true
type: ClusterIP
ports:
bittorrent:
enabled: true
port: 6881
protocol: TCP
targetPort: 6881
# -- Minimal configuration provided from https://github.com/jesec/rtorrent/blob/master/doc/rtorrent.rc
# @default -- string
config: |
session.use_lock.set = no
method.insert = cfg.basedir, private|const|string, (cat,(fs.homedir),"/.local/share/rtorrent/")
method.insert = cfg.download, private|const|string, (cat,"/downloads/","download/")
method.insert = cfg.logs, private|const|string, (cat,(cfg.download),"log/")
method.insert = cfg.logfile, private|const|string, (cat,(cfg.logs),"rtorrent-",(system.time),".log")
method.insert = cfg.session, private|const|string, (cat,(cfg.basedir),".session/")
method.insert = cfg.watch, private|const|string, (cat,(cfg.download),"watch/")
fs.mkdir.recursive = (cat,(cfg.basedir))
fs.mkdir = (cat,(cfg.download))
fs.mkdir = (cat,(cfg.logs))
fs.mkdir = (cat,(cfg.session))
fs.mkdir = (cat,(cfg.watch))
fs.mkdir = (cat,(cfg.watch),"/load")
fs.mkdir = (cat,(cfg.watch),"/start")
schedule2 = watch_load, 11, 10, ((load.verbose, (cat, (cfg.watch), "load/*.torrent")))
schedule2 = watch_start, 10, 10, ((load.start_verbose, (cat, (cfg.watch), "start/*.torrent")))
dht.add_bootstrap = dht.transmissionbt.com:6881
dht.add_bootstrap = dht.libtorrent.org:25401
throttle.max_uploads.set = 20
throttle.max_uploads.global.set = 50
throttle.min_peers.normal.set = 20
throttle.max_peers.normal.set = 60
throttle.min_peers.seed.set = 30
throttle.max_peers.seed.set = 80
trackers.numwant.set = 80
network.port_range.set = 61086-61086
network.max_open_files.set = 600
network.max_open_sockets.set = 300
pieces.memory.max.set = 1800M
session.path.set = (cat, (cfg.session))
directory.default.set = (cat, (cfg.download))
log.execute = (cat, (cfg.logs), "execute.log")
encoding.add = utf8
system.daemon.set = true
system.umask.set = 0002
system.cwd.set = (directory.default)
network.http.max_open.set = 500
network.http.dns_cache_timeout.set = 25
network.scgi.open_local = (cat,(cfg.basedir),rtorrent.sock)
print = (cat, "Logging to ", (cfg.logfile))
log.open_file = "log", (cfg.logfile)
log.add_output = "info", "log"
ingress:
main:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "vault-issuer"
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/proxy-body-size: 50m
hosts:
- host: flood.dc
paths:
- path: /
pathType: Prefix
tls:
- secretName: flood.dc-tls
hosts:
- flood.dc
persistence:
config:
enabled: true
mountPath: /config
size: 10M
# use hostpath instead
downloads:
enabled: true
type: hostPath
hostPath: /data/torrent
mountPath: /downloads
## VPN
addons:
vpn:
enabled: true
openvpn:
authSecret: openvpn
configFileSecret: openvpn
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
livenessProbe:
exec:
command:
- sh
- -c
- if [ $(curl -s https://ipinfo.io/country) == 'NL' ]; then exit 0; else exit $?; fi
initialDelaySeconds: 30
periodSeconds: 60
failureThreshold: 3

View file

@ -0,0 +1,29 @@
ingress:
main:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "vault-issuer"
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-url: |
https://youtubedl.dc/akprox/auth/nginx
nginx.ingress.kubernetes.io/auth-signin: |
https://youtubedl.dc/akprox/start?rd=$escaped_request_uri
nginx.ingress.kubernetes.io/auth-response-headers: |
Set-Cookie,X-authentik-username,X-authentik-groups,X-authentik-email,X-authentik-name,X-authentik-uid
nginx.ingress.kubernetes.io/auth-snippet: |
proxy_set_header X-Forwarded-Host $http_host;
hosts:
- host: youtubedl.dc
paths:
- path: /
pathType: Prefix
tls:
- secretName: youtubedl.dc-tls
hosts:
- youtubedl.dc
hostPathMounts:
- name: downloads
enabled: true
mountPath: /downloads
hostPath: /data/downloads

View file

@ -0,0 +1,36 @@
config:
description: Drone-CI
networkPolicy:
groups:
- internet
rules:
- allow-runner
- allow-minio
apps:
- name: drone
repoURL: https://github.com/nold360/drone-charts.git
path: charts/drone
targetRevision: master
secrets:
- name: drone-env
keys:
- DRONE_GITEA_SERVER
- DRONE_GITEA_CLIENT_ID
- DRONE_GITEA_CLIENT_SECRET
- DRONE_GITHUB_CLIENT_ID
- DRONE_GITHUB_CLIENT_SECRET
- DRONE_RPC_SECRET
- name: drone-runner
namespace: drone-runner
repoURL: https://charts.drone.io
chart: drone-runner-kube
targetRevision: 0.1.5
secrets:
- name: drone-env
fromApp: drone
keys:
- DRONE_RPC_SECRET
- DRONE_SECRET_PLUGIN_TOKEN

View file

@ -0,0 +1,34 @@
#podSecurityContext:
# fsGroup: 2000
#securityContext:
#capabilities:
# drop:
# - ALL
#readOnlyRootFilesystem: true
#runAsNonRoot: false
#runAsUser: 1000
resources:
limits:
cpu: 4000m
memory: 2048Mi
rbac:
buildNamespaces:
- drone-runner
extraSecretNamesForEnvFrom:
- drone-env
env:
DRONE_RPC_HOST: drone.drone.svc.cluster.local
DRONE_SECRET_PLUGIN_ENDPOINT: http://drone-secrets-drone-kubernetes-secrets.drone-runner.svc.cluster.local:3000
DRONE_NAMESPACE_DEFAULT: drone-runner
DRONE_DEBUG: "true"
DRONE_TRACE: "true"
HTTP_PROXY: http://proxy-squid.proxy.svc.cluster.local:80
HTTPS_PROXY: http://proxy-squid.proxy.svc.cluster.local:80
NO_PROXY: localhost,.cluster.local,drone,drone.drone.svc.cluster.local,10.0.0.0/8,10.42.0.1,10.43.0.1

View file

@ -0,0 +1,21 @@
podSecurityContext:
fsGroup: 2000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
extraSecretNamesForEnvFrom:
- drone-secrets-env
rbac:
secretNamespace: drone-runner
restrictToSecrets:
- drone-secrets
env:
KUBERNETES_NAMESPACE: drone-runner

View file

@ -0,0 +1,147 @@
image:
# repository: drone/drone
tag: 2.0.4
# pullPolicy: IfNotPresent
containerPort: 8000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "80"
service:
type: ClusterIP
port: 80
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: external
kubernetes.io/tls-acme: "true"
hosts:
- host: drone.nold.in
paths:
- "/"
tls:
- secretName: drone-tls
hosts:
- drone.nold.in
resources:
limits:
cpu: 100m
memory: 124Mi
persistentVolume:
enabled: true
size: 8Gi
storageClass: ""
extraSecretNamesForEnvFrom:
- drone-env
env:
DRONE_USER_FILTER: nold360
DRONE_USER_CREATE: username:nold360,admin:true
DRONE_SERVER_PORT: ":8000"
HTTP_PROXY: http://proxy-squid.proxy.svc.cluster.local
HTTPS_PROXY: http://proxy-squid.proxy.svc.cluster.local
NO_PROXY: localhost,.cluster.local
DRONE_DATADOG_ENABLED: "false"
## REQUIRED: Set the user-visible Drone hostname, sans protocol.
## Ref: https://docs.drone.io/installation/reference/drone-server-host/
##
DRONE_SERVER_HOST: "drone.nold.in"
## The protocol to pair with the value in DRONE_SERVER_HOST (http or https).
## Ref: https://docs.drone.io/installation/reference/drone-server-proto/
##
DRONE_SERVER_PROTO: https
DRONE_WEBHOOK_ENDPOINT: "https://drone.nold.in/hook"
DRONE_STARLARK_ENABLED: "true"
## REQUIRED: Set the secret secret token that the Drone server and its Runners will use
## to authenticate. This is commented out in order to leave you the ability to set the
## key via a separately provisioned secret (see existingSecretName above).
## Ref: https://docs.drone.io/installation/reference/drone-rpc-secret/
##
# DRONE_RPC_SECRET:
## If you'd like to use a DB other than SQLite (the default), set a driver + DSN here.
## Ref: https://docs.drone.io/installation/storage/database/
##
# DRONE_DATABASE_DRIVER:
# DRONE_DATABASE_DATASOURCE:
## If you are going to store build secrets in the Drone database, it is suggested that
## you set a database encryption secret. This must be set before any secrets are stored
## in the database.
## Ref: https://docs.drone.io/installation/storage/encryption/
##
# DRONE_DATABASE_SECRET:
## If you are using self-hosted GitHub or GitLab, you'll need to set this to true.
## Ref: https://docs.drone.io/installation/reference/drone-git-always-auth/
##
# DRONE_GIT_ALWAYS_AUTH: false
## ===================================================================================
## Provider Directives (select ONE)
## -----------------------------------------------------------------------------------
## Select one provider (and only one). Refer to the corresponding documentation link
## before filling the values in. Also note that you can use the 'secretMounts' value
## if you'd rather not have secrets in Kubernetes Secret instead of a ConfigMap.
## ===================================================================================
## GitHub-specific variables. See the provider docs here:
## Ref: https://docs.drone.io/installation/providers/github/
##
# DRONE_GITHUB_CLIENT_ID:
# DRONE_GITHUB_CLIENT_SECRET:
## GitLab-specific variables. See the provider docs here:
## Ref: https://docs.drone.io/installation/providers/gitlab/
##
# DRONE_GITLAB_CLIENT_ID:
# DRONE_GITLAB_CLIENT_SECRET:
# DRONE_GITLAB_SERVER:
## Bitbucket Cloud-specific variables. See the provider docs here:
## Ref: https://docs.drone.io/installation/providers/bitbucket-cloud/
##
# DRONE_BITBUCKET_CLIENT_ID:
# DRONE_BITBUCKET_CLIENT_SECRET:
## Bitbucket-specific variables. See the provider docs here:
## Ref: https://docs.drone.io/installation/providers/bitbucket-server/
##
# DRONE_GIT_USERNAME:
# DRONE_GIT_PASSWORD:
# DRONE_STASH_CONSUMER_KEY:
# DRONE_STASH_PRIVATE_KEY:
# DRONE_STASH_SERVER:
## Gitea-specific variables. See the provider docs here:
## Ref: https://docs.drone.io/installation/providers/gitea/
##
# DRONE_GITEA_CLIENT_ID:
# DRONE_GITEA_CLIENT_SECRET:
# DRONE_GITEA_SERVER:
## Gogs-specific variables. See the provider docs here:
## Ref: https://docs.drone.io/installation/providers/gogs/
##
# DRONE_GOGS_SERVER:

View file

@ -0,0 +1,8 @@
config:
description: Falco Security
apps:
- name: falco
disabled: true
repoURL: https://falcosecurity.github.io/charts
chart: falco
targetRevision: 1.16.0

View file

@ -0,0 +1,219 @@
docker:
enabled: false
podSecurityPolicy:
create: false
containerd:
enabled: true
#extraArgs:
# - --disable-cri-async
falco:
timeFormatISO8601: true
grpc:
enabled: true
grpcOutput:
enabled: false
falcosidekick:
enabled: true
replicaCount: 1
podSecurityPolicy:
create: true
webui:
enabled: true
retention: 200
darkmode: true
podSecurityPolicy:
create: true
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: vault-issuer
hosts:
- host: falco.dc
paths: ["/ui", "/events", "/healthz", "/ws"]
tls:
- secretName: falcosidekick-tls
hosts:
- falco.dc
customRules:
rule_exceptions.yaml: |-
- rule: Contact K8S API Server From Container
exceptions:
- name: proc_filenames
value:
- argocd-applicat
append: true
- rule: Write below root
exceptions:
- name: container
value: [ host ]
append: true
- rule: Read sensitive file untrusted
exceptions:
- name: container
value: [ host ]
append: true
- rule: Non sudo setuid
exceptions:
- name: container
value: [ host ]
append: true
nginx_rules.yaml: |-
- macro: nginx_consider_syscalls
condition: (evt.num < 0)
- macro: app_nginx
condition: container and container.image contains "nginx"
# Any outbound traffic raises a WARNING
- rule: Unauthorized process opened an outbound connection (nginx)
desc: A nginx process tried to open an outbound connection and is not whitelisted
condition: outbound and evt.rawres >= 0 and app_nginx
output: Non-whitelisted process opened an outbound connection (command=%proc.cmdline connection=%fd.name)
priority: WARNING
# Restricting listening ports to selected set
- list: nginx_allowed_inbound_ports_tcp
items: [80, 443, 8080, 8443]
- rule: Unexpected inbound tcp connection nginx
desc: Detect inbound traffic to nginx using tcp on a port outside of expected set
condition: inbound and evt.rawres >= 0 and not fd.sport in (nginx_allowed_inbound_ports_tcp) and app_nginx
output: Inbound network connection to nginx on unexpected port (command=%proc.cmdline pid=%proc.pid connection=%fd.name sport=%fd.sport user=%user.name %container.info image=%container.image)
priority: NOTICE
# Restricting spawned processes to selected set
- list: nginx_allowed_processes
items: ["nginx", "app-entrypoint.", "basename", "dirname", "grep", "nami", "node", "tini"]
- rule: Unexpected spawned process nginx
desc: Detect a process started in a nginx container outside of an expected set
condition: spawned_process and not proc.name in (nginx_allowed_processes) and app_nginx
output: Unexpected process spawned in nginx container (command=%proc.cmdline pid=%proc.pid user=%user.name %container.info image=%container.image)
priority: NOTICE
# Restricting files read or written to specific set
- list: nginx_allowed_file_prefixes_readwrite
items: ["/var/log/nginx", "/var/run"]
# Remember to add your nginx cache path
- rule: Unexpected file access readwrite for nginx
desc: Detect an attempt to access a file readwrite other than below an expected list of directories
condition: (open_write) and not fd.name pmatch (nginx_allowed_file_prefixes_readwrite) and app_nginx
output: Unexpected file accessed readwrite for nginx (command=%proc.cmdline pid=%proc.pid file=%fd.name %container.info image=%container.image)
priority: NOTICE
# Restricting syscalls to selected set
- list: nginx_allowed_syscalls
items: [accept, bind, clone, connect, dup, listen, mkdir, open, recvfrom, recvmsg, sendto, setgid, setuid, socket, socketpair]
- rule: Unexpected syscall nginx
desc: Detect a syscall in a nginx container outside of an expected set
condition: nginx_consider_syscalls and not evt.type in ("<unknown>", nginx_allowed_syscalls) and app_nginx
output: Unexpected syscall in nginx container (command=%proc.cmdline pid=%proc.pid user=%user.name syscall=%evt.type args=%evt.args %container.info image=%container.image)
priority: NOTICE
warn_evttypes: False
php_fpm.yaml: |-
- macro: php_fpm_consider_syscalls
condition: (evt.num < 0)
- macro: app_php_fpm
condition: container and container.image contains "fpm"
# Considering any inbound network connection suspect
- rule: Unexpected inbound connection php_fpm
desc: Detect any inbound connection arriving at php_fpm
condition: inbound and evt.rawres >= 0 and app_php_fpm
output: Unexpected inbound connection arriving at php_fpm (command=%proc.cmdline pid=%proc.pid connection=%fd.name user=%user.name %container.info image=%container.image)
priority: NOTICE
# Restricting listening ports to selected set
- list: php_fpm_allowed_inbound_ports_tcp
items: [80, 443]
- rule: Unexpected inbound tcp connection php_fpm
desc: Detect inbound traffic to php_fpm using tcp on a port outside of expected set
condition: inbound and evt.rawres >= 0 and not fd.sport in (php_fpm_allowed_inbound_ports_tcp) and app_php_fpm
output: Inbound network connection to php_fpm on unexpected port (command=%proc.cmdline pid=%proc.pid connection=%fd.name sport=%fd.sport user=%user.name %container.info image=%container.image)
priority: NOTICE
# Restricting spawned processes to selected set
- list: php_fpm_allowed_processes
items: ["/usr/bin/python2", "nginx", "nginx: master process /usr/sbin/nginx -g daemon off; error_log /dev/stderr info;", "nginx: worker process", "php-fpm", "php-fpm: pool www"]
- rule: Unexpected spawned process php_fpm
desc: Detect a process started in a php_fpm container outside of an expected set
condition: spawned_process and not proc.name in (php_fpm_allowed_processes) and app_php_fpm
output: Unexpected process spawned in php_fpm container (command=%proc.cmdline pid=%proc.pid user=%user.name %container.info image=%container.image)
priority: NOTICE
# Restricting files read or written to specific set
- list: php_fpm_allowed_file_prefixes_readonly
items: ["/dev", "/var/www/errors"]
- rule: Unexpected file access readonly for php_fpm
desc: Detect an attempt to access a file readonly other than below an expected list of directories
condition: (open_read and evt.is_open_write=false) and not fd.name pmatch (php_fpm_allowed_file_prefixes_readonly) and app_php_fpm
output: Unexpected file accessed readonly for php_fpm (command=%proc.cmdline pid=%proc.pid file=%fd.name %container.info image=%container.image)
priority: NOTICE
- list: php_fpm_allowed_file_prefixes_readwrite
items: ["/dev", "/tmp", "/usr/local/var/log"]
- rule: Unexpected file access readwrite for php_fpm
desc: Detect an attempt to access a file readwrite other than below an expected list of directories
condition: (open_write) and not fd.name pmatch (php_fpm_allowed_file_prefixes_readwrite) and app_php_fpm
output: Unexpected file accessed readwrite for php_fpm (command=%proc.cmdline pid=%proc.pid file=%fd.name %container.info image=%container.image)
priority: NOTICE
postgres.yaml: |-
- macro: postgres_consider_syscalls
condition: (evt.num < 0)
- macro: app_postgres
condition: container and container.image contains "postgres"
- list: postgres_allowed_inbound_ports_tcp
items: [5432]
- rule: Unexpected inbound tcp connection postgres
desc: Detect inbound traffic to postgres using tcp on a port outside of expected set
condition: inbound and evt.rawres >= 0 and not fd.sport in (postgres_allowed_inbound_ports_tcp) and app_postgres
output: Inbound network connection to postgres on unexpected port (command=%proc.cmdline pid=%proc.pid connection=%fd.name sport=%fd.sport user=%user.name %container.info image=%container.image)
priority: NOTICE
# Restricting spawned processes to selected set
- list: postgres_allowed_processes
items: ["/proc/self/exe", "pg_isready", "postgres", "psql", "postgres: autovacuum launcher process", "pg_ctl" , "postgres: checkpointer process ", "postgres: stats collector process ", "postgres: wal writer process ", "postgres: writer process ", "sh"]
- rule: Unexpected spawned process postgres
desc: Detect a process started in a postgres container outside of an expected set
condition: spawned_process and not proc.name in (postgres_allowed_processes) and app_postgres
output: Unexpected process spawned in postgres container (command=%proc.cmdline pid=%proc.pid user=%user.name %container.info image=%container.image)
priority: NOTICE
# Restricting files read or written to specific set
- list: postgres_allowed_file_prefixes_readonly
items: ["/dev", "/etc", "/lib/x86_64-linux-gnu", "/usr/lib/locale", "/usr/lib/x86_64-linux-gnu", "/usr/share/locale", "/var/lib/postgresql/data", "/usr/share/zoneinfo", "/var/lib/postgresql", "/usr/lib/postgresql", "/usr/share/postgresql", "/var/run/postgresql"]
- rule: Unexpected file access readonly for postgres
desc: Detect an attempt to access a file readonly other than below an expected list of directories
condition: (open_read and evt.is_open_write=false) and not fd.name pmatch (postgres_allowed_file_prefixes_readonly) and app_postgres
output: Unexpected file accessed readonly for postgres (command=%proc.cmdline pid=%proc.pid file=%fd.name %container.info image=%container.image)
priority: NOTICE
- list: postgres_allowed_file_prefixes_readwrite
items: ["/var/lib/postgresql/data", "/var/run/postgresql"]
- rule: Unexpected file access readwrite for postgres
desc: Detect an attempt to access a file readwrite other than below an expected list of directories
condition: (open_write) and not fd.name pmatch (postgres_allowed_file_prefixes_readwrite) and app_postgres
output: Unexpected file accessed readwrite for postgres (command=%proc.cmdline pid=%proc.pid file=%fd.name %container.info image=%container.image)
priority: NOTICE
# For OpenShit
scc:
create: false

View file

@ -0,0 +1,22 @@
config:
description: Gitea public Git Server
networkPolicy:
groups:
- internet
rules:
- allow-ssh
apps:
- name: gitea
repoURL: https://dl.gitea.io/charts/
chart: gitea
targetRevision: 4.1.1
secrets:
- name: admin
keys:
- username
- password
- email
- name: postgres
keys:
- postgresql-password
- postgresql-postgres-password

View file

@ -0,0 +1,137 @@
# Gitea
image:
rootless: true
statefulset:
env:
- name: HTTP_PROXY
value: http://proxy-squid.proxy.svc.cluster.local:80
- name: HTTPS_PROXY
value: http://proxy-squid.proxy.svc.cluster.local:80
- name: http_proxy
value: http://proxy-squid.proxy.svc.cluster.local:80
- name: https_proxy
value: http://proxy-squid.proxy.svc.cluster.local:80
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# # https://gitea.com/gitea/helm-chart/issues/161
# add:
# - SYS_CHROOT
privileged: false
readOnlyRootFilesystem: true
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
service:
http:
type: ClusterIP
port: 3000
ssh:
type: LoadBalancer
port: 2222
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: external
kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: letsencrypt
hosts:
- host: git.nold.in
paths:
- path: /
pathType: Prefix
tls:
- secretName: gitea-tls
hosts:
- git.nold.in
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
persistence:
enabled: true
size: 10Gi
#storageClass: slow
gitea:
admin:
existingSecret: admin
metrics:
enabled: false
serviceMonitor:
enabled: false
# additionalLabels:
# prometheus-release: prom1
oauth:
enabled: false
#name:
#provider:
#key:
#secret:
#autoDiscoverUrl:
#useCustomUrls:
#customAuthUrl:
#customTokenUrl:
#customProfileUrl:
#customEmailUrl:
config:
APP_NAME: "Git with a lot of coffee"
ui:
DEFAULT_THEME: arc-green
repository:
DEFAULT_BRANCH: main
server:
LFS_START_SERVER: true
PROTOCOL: http
database:
HOST: gitea-postgresql.gitea.svc.cluster.local:5432
service:
DISABLE_REGISTRATION: true
lfs:
STORAGE_TYPE: local
picture:
DISABLE_GRAVATAR: true
metrics:
ENABLED: false
api:
ENABLE_SWAGGER: false
oauth:
ENABLE: false
database:
builtIn:
postgresql:
enabled: true
cache:
builtIn:
enabled: false
postgresql:
global:
#storageClass: slow
postgresql:
existingSecret: postgres
persistence:
size: 10Gi
psp:
create: true
rbac:
create: true

View file

@ -0,0 +1,21 @@
config:
description: Grafana, Prometheus and friends
apps:
- name: prometheus
namespace: prometheus
repoURL: https://prometheus-community.github.io/helm-charts
chart: prometheus
targetRevision: 14.11.0
- name: loki-stack
existingNamespace: prometheus
repoURL: https://grafana.github.io/helm-charts
chart: loki-stack
targetRevision: 2.4.1
secrets:
- name: loki-stack-grafana
keys:
- admin-user
- admin-password
- name: grafana-env
keys:
- GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET

View file

@ -0,0 +1,124 @@
loki:
image:
tag: 2.3.0
enabled: true
promtail:
enabled: true
fluent-bit:
enabled: true
grafana:
enabled: true
image:
tag: 8.1.2
admin:
existingSecret: "loki-stack-grafana"
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: vault-issuer
kubernetes.io/ingress.class: nginx
hosts:
- grafana.dc
tls:
- secretName: grafana-tls
hosts:
- grafana.dc
sidecar:
datasources:
enabled: true
dashboards:
enabled: true
label: grafana_dashboard
persistence:
enabled: true
plugins:
- grafana-piechart-panel
dashboards:
default:
traefik:
gnetId: 11462
revision: 1
# For OAUTH Secret Token
envFromSecret: grafana-env
grafana.ini:
paths:
data: /var/lib/grafana/data
logs: /var/log/grafana
plugins: /var/lib/grafana/plugins
provisioning: /etc/grafana/provisioning
analytics:
check_for_updates: false
log:
mode: console
grafana_net:
url: https://grafana.net
server:
root_url: https://grafana.dc
auth.generic_oauth:
name: Authentik
enabled: true
allow_sign_up: true
client_id: 779461ddc18a79dba69cf2eef895a20d59b21d59
#client_secret: IN ENV
scopes: "openid profile email"
#email_attribute_name: email:primary
role_attribute_path: "contains(groups[*], 'Grafana Admins') && 'Admin' || contains(groups[*], 'Grafana Editors') && 'Editor' || 'Viewer'"
auth_url: https://auth.dc/application/o/authorize/
token_url: https://auth.dc/application/o/token/
api_url: https://auth.dc/application/o/userinfo/
# meh.. but for now...
tls_skip_verify_insecure: true
prometheus:
enabled: false
image:
tag: v2.26.0
extraScrapeConfigs: |
- job_name: 'openwrt'
scrape_interval: 10s
static_configs:
- targets: ['192.168.1.1:9100']
- job_name: 'borg'
scrape_interval: 10s
static_configs:
- targets: ['192.168.1.111:9942']
podSecurityPolicy:
enabled: true
server:
extraArgs:
#storage.local.retention: 720h
nodeexporter:
# image:
# repository: quay.io/prometheus/node-exporter
# tag: v1.1.2
extraHostPathMounts:
- name: textfile-dir
mountPath: /srv/txt_collector
hostPath: /var/lib/node-exporter
readOnly: true
mountPropagation: HostToContainer
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: false
runAsUser: 0

View file

@ -0,0 +1,44 @@
podSecurityPolicy:
enabled: true
kubeStateMetrics:
enabled: false
nodeExporter:
enabled: true
hostNetwork: true
hostPID: true
hostRootfs: true
extraHostPathMounts:
- name: textfile-dir
mountPath: /srv/txt_collector
hostPath: /var/lib/node-exporter
readOnly: true
mountPropagation: HostToContainer
server:
enabled: true
persistentVolume:
enabled: true
pushgateway:
enabled: true
extraScrapeConfigs: |
- job_name: 'openwrt'
scrape_interval: 10s
static_configs:
- targets: ['192.168.1.1:9100']
- job_name: 'borg'
scrape_interval: 120s
static_configs:
- targets: ['192.168.1.111:9942']
# - job_name: 'octoprint'
# scrape_interval: 5s
# metrics_path: '/plugin/prometheus_exporter/metrics'
# params:
# apikey: ['__OCTOPRINT_APIKEY__']
# static_configs:
# - targets: ['octoprint:80']

View file

@ -0,0 +1,11 @@
config:
name: heqet2
syncWave: -5
apps:
# Heqet
- name: heqet2
path: charts/heqet
repoURL: https://github.com/nold360/heqet
targetRevision: f/v2
syncWave: "-1"

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,14 @@
config:
description: Home Automation
syncWave: 100
repo: k8s-at-home
apps:
- name: homeassistant
chart: home-assistant
targetRevision: 11.0.5
secrets:
- name: hass-postgres
keys:
- postgresql-username
- postgresql-password
- postgresql-postgres-password

View file

@ -0,0 +1,101 @@
additionalContainers:
addon-homematic:
name: addon-homematic
image: homeassistant/i386-addon-homematic:latest
volumeMounts:
- name: data
mountPath: /data
- name: config
mountPath: /config
homegear:
name: homegear
image: homegear/homegear:stable
volumeMounts:
- name: homegear-config
mountPath: /etc/homegear
- name: homegear-lib
mountPath: /var/lib/homegear
env:
- name: HOST_USER_ID
value: "1000"
- name: HOST_USER_GID
value: "1000"
ports:
- name: homegear
containerPort: 2001
securityContext:
privileged: true
env:
TZ: UTC
influxdb:
architecture: standalone
authEnabled: false
database: home_assistant
enabled: true
persistence:
enabled: true
size: 8Gi
ingress:
main:
annotations:
cert-manager.io/cluster-issuer: vault-issuer
enabled: true
hosts:
- host: hass.dc
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- hass.dc
secretName: hass-tls
metrics:
enabled: false
prometheusRule:
enabled: false
labels: {}
rules: []
serviceMonitor:
interval: 1m
labels: {}
scrapeTimeout: 30s
persistence:
config:
enabled: true
usb:
enabled: true
hostPath: /dev/ttyUSB0
type: hostPath
homegear-config:
enabled: true
storageClass: local-path
accessMode: ReadWriteOnce
size: 1Gi
homegear-lib:
enabled: true
storageClass: local-path
accessMode: ReadWriteOnce
size: 1Gi
data:
enabled: true
storageClass: local-path
accessMode: ReadWriteOnce
size: 1Gi
postgresql:
enabled: true
existingSecret: hass-postgres
persistence:
enabled: true
size: 8Gi
postgresqlDatabase: homeassistant
postgresqlUsername: homeassistant
securityContext:
privileged: false

View file

@ -0,0 +1,7 @@
config:
description: Homer Hive Dashboard
apps:
- name: homer
repoURL: https://k8s-at-home.com/charts/
chart: homer
targetRevision: 6.0.1

View file

@ -0,0 +1,182 @@
ingress:
main:
enabled: true
annotations:
cert-manager.io/cluster-issuer: vault-issuer
hosts:
- host: homer.dc
paths:
- path: /
pathType: Prefix
tls:
- secretName: homer-tls
hosts:
- homer.dc
configmap:
# -- Store homer configuration as a ConfigMap
enabled: true
# -- Homer configuration. See [image documentation](https://github.com/bastienwirtz/homer/blob/main/docs/configuration.md) for more information.
# @default -- See values.yaml
config: |
---
title: "Hive Dashboard"
subtitle: "Homer on the Hive"
logo: "logo.png"
header: true
footer: false
columns: "6"
connectivityCheck: true
# Optional theme customization
theme: default
colors:
dark:
highlight-primary: "#013c3d"
highlight-secondary: "#057752"
highlight-hover: "#2a8769"
background: "#131313"
card-background: "#2b2b2b"
text: "#eaeaea"
text-header: "#ffffff"
text-title: "#fafafa"
text-subtitle: "#f5f5f5"
card-shadow: rgba(0, 0, 0, 0.4)
link-hover: "#ffdd57"
#message:
# Optional navbar
links: [] # Allows for navbar (dark mode, layout, and search) without any links
#links:
# - name: "Contribute"
# icon: "fab fa-github"
# url: "https://github.com/bastienwirtz/homer"
# target: "_blank" # optional html a tag target attribute
# - name: "Wiki"
# icon: "fas fa-book"
# url: "https://www.wikipedia.org/"
# this will link to a second homer page that will load config from additionnal-page.yml and keep default config values as in config.yml file
# see url field and assets/additionnal-page.yml.dist used in this example:
# - name: "another page!"
# icon: "fas fa-file-alt"
# url: "#additionnal-page"
# Services
# First level array represent a group.
# Leave only a "items" key if not using group (group name, icon & tagstyle are optional, section separation will not be displayed).
services:
- name: "// Admin"
icon: "fas fa-tools"
items:
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/openwrt.png
name: OpenWRT
url: https://openwrt.lan
target: "_blank"
- logo: https://argocd.dc/assets/images/logo.png
name: ArgoCD
url: https://argocd.dc
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/vault.png
name: Vault
url: https://vault.dc
target: "_blank"
- logo: https://grafana.dc/public/img/grafana_icon.svg
name: Grafana
url: https://grafana.dc
target: "_blank"
- logo: https://raw.githubusercontent.com/falcosecurity/falcosidekick/master/imgs/falcosidekick_color.png
name: Falco
url: https://falco.dc/ui
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/minio.png
name: MinIO
url: https://minio.dc
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/kibana.png
name: Kibana
url: https://kibana.dc
target: "_blank"
- name: "// Coding"
icon: fas fa-code-branch
items:
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/drone.png
name: Drone.io
url: https://drone.nold.in
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/gitea.png
name: Gitea
url: https://git.nold.in
target: "_blank"
- name: "// Arrrrrr"
icon: "fas fa-download"
items:
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/ombi.png
name: Ombi
url: https://ombi.dc
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/radarr.png
name: Radarr
url: https://radarr.dc
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/sonarr.png
name: Sonarr
url: https://sonarr.dc
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/lidarr.png
name: Lidarr
url: https://lidarr.dc
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/bazarr.png
name: Bazarr
url: https://bazarr.dc
target: "_blank"
- logo: https://jackett.dc/jacket_medium.png
name: Jackett
url: https://jackett.dc
target: "_blank"
- name: "// Apps"
icon: "fas fa-cloud"
items:
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/octoprint.png
name: OctoPrint
url: https://octo.dc
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/kodi.png
name: Kodi
url: http://libreelec.lan:8080
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/home-assistant.png
name: HomeAssi
url: https://hass.dc
target: "_blank"
- logo: https://www.chia.net/android-chrome-384x384.png
name: Chia Farm
url: https://chia.dc
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/navidrome.png
name: Music Stream
url: https://music.dc
target: "_blank"
- name: "// Loader"
icon: "fas fa-download"
items:
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/deluge.png
name: Deluge
url: https://torrent.dc
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/flood.png
name: Flood
url: https://flood.dc
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/pyload.png
name: pyLoad
url: https://pyload.dc
target: "_blank"
- logo: https://raw.githubusercontent.com/NX211/homer-icons/master/png/youtube.png
name: YouTube-dl
url: https://youtubedl.dc
target: "_blank"

View file

@ -0,0 +1,15 @@
config:
description: Stream stuff
syncWave: 100
repo: k8s-at-home
networkPolicy:
groups:
- internet
apps:
- name: jellyfin
chart: jellyfin
targetRevision: 9.1.0
include:
- noRoot
- tmpdirs

View file

@ -0,0 +1,40 @@
ingress:
main:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: external
kubernetes.io/tls-acme: "true"
hosts:
- host: stream.nold.in
paths:
- path: /
pathType: Prefix
tls:
- secretName: jellyfin-tls
hosts:
- stream.nold.in
securityContext:
runAsUser: 568
runAsGroup: 568
# fsGroup: 568
privileged: false
persistence:
config:
enabled: true
storageClass: local-path
accessMode: ReadWriteOnce
size: 1Gi
cache:
enabled: true
storageClass: local-path
accessMode: ReadWriteOnce
size: 1Gi
media:
enabled: true
type: hostPath
mountPath: /media
hostPath: /data/media/stream

View file

@ -0,0 +1,9 @@
config:
description: Stream Music
syncWave: 100
repo: k8s-at-home
apps:
- name: navidrome
chart: navidrome
targetRevision: 6.0.1

View file

@ -0,0 +1,75 @@
image:
# -- image repository
repository: deluan/navidrome
# -- image tag
#tag: 0.43.0
# -- image pull policy
pullPolicy: IfNotPresent
# -- environment variables. See [navidrome docs](https://www.navidrome.org/docs/usage/configuration-options/#environment-variables) for more details.
# @default -- See below
env:
# -- Set the container timezone
TZ: UTC
# -- Log level. Useful for troubleshooting.
ND_LOGLEVEL: info
# -- How long Navidrome will wait before closing web ui idle sessions
ND_SESSIONTIMEOUT: 24h
# -- Enables transcoding configuration in the UI
ND_ENABLETRANSCODINGCONFIG: "true"
# -- Folder where your music library is stored.
ND_MUSICFOLDER: /music
# Disable Scanning Scheduling
ND_SCANSCHEDULE: "0"
podSecurityContext:
runAsUser: 1420
runAsGroup: 2420
fsGroup: 2420
securityContext:
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
ports:
http:
port: 4533
ingress:
main:
enabled: true
annotations:
cert-manager.io/cluster-issuer: vault-issuer
hosts:
- host: music.dc
paths:
- path: /
pathType: Prefix
tls:
- secretName: music-tls
hosts:
- music.dc
# -- Configure persistence settings for the chart under this key.
# @default -- See values.yaml
persistence:
config:
enabled: true
mountPath: /data
storageClass: local-path
accessMode: ReadWriteOnce
size: 1Gi
music:
enabled: true
mountPath: /music
type: hostPath
hostPath: /data/media/music

View file

@ -0,0 +1,26 @@
config:
description: Public Nextcloud
networkPolicy:
groups:
- internet
apps:
- name: nextcloud
repoURL: https://nextcloud.github.io/helm
chart: nextcloud
targetRevision: 2.9.0
secrets:
- name: nextcloud-user
keys:
- username
- password
- smtp_username
- smtp_password
- name: nextcloud-postgres
keys:
- postgresql-username
- postgresql-password
- postgresql-postgres-password
- name: nextcloud-db
keys:
- db-username
- db-password

View file

@ -0,0 +1,143 @@
image:
tag: 21-fpm
pullPolicy: Always
nextcloud:
host: share.gnu.one
extraEnv:
- name: HTTP_PROXY
value: http://proxy-squid.proxy.svc.cluster.local:80
- name: HTTPS_PROXY
value: http://proxy-squid.proxy.svc.cluster.local:80
- name: NO_PROXY
value: .cluster.local
existingSecret:
enabled: true
secretName: nextcloud-user
usernameKey: username
passwordKey: password
smtpUsernameKey: smtp_username
smtpPasswordKey: smtp_password
configs:
proxy.config.php: |-
<?php
$CONFIG = array (
'proxy' => 'proxy-squid.proxy.svc.cluster.local:80',
'trusted_proxies' =>
array (
0 => 'proxy-squid.proxy.svc.cluster.local',
),
'proxyexclude' => ['.cluster.local'],
'debug' => true,
'loglevel' => 1,
);
extraSecurityContext:
runAsUser: "33"
runAsGroup: "33"
runAsNonRoot: true
readOnlyRootFilesystem: true
phpConfigs:
memory_limit.conf: |
php_admin_value[memory_limit] = 512M
tuning.conf: |
pm = dynamic
pm.max_children = 64
pm.start_servers = 12
pm.min_spare_servers = 8
pm.max_spare_servers = 24
pm.max_requests = 1000
ingress:
enabled: true
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: 4G
kubernetes.io/ingress.class: "external"
kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: letsencrypt
# nginx.ingress.kubernetes.io/server-snippet: |-
# server_tokens off;
# proxy_hide_header X-Powered-By;
#
# rewrite ^/.well-known/webfinger /public.php?service=webfinger last;
# rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
# rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json;
# location = /.well-known/carddav {
# return 301 $scheme://$host/remote.php/dav;
# }
# location = /.well-known/caldav {
# return 301 $scheme://$host/remote.php/dav;
# }
# location = /robots.txt {
# allow all;
# log_not_found off;
# access_log off;
# }
# location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ {
# deny all;
# }
# location ~ ^/(?:autotest|occ|issue|indie|db_|console) {
# deny all;
# }
tls:
- secretName: nextcloud-tls
hosts:
- share.gnu.one
nginx:
enabled: true
cronjob:
enabled: false
curlInsecure: true
internalDatabase:
enabled: false
externalDatabase:
enabled: true
type: postgresql
host: nextcloud-postgresql.nextcloud.svc.cluster.local
existingSecret:
enabled: true
secretName: nextcloud-postgres
passwordKey: postgresql-password
usernameKey: postgresql-username
postgresql:
enabled: true
postgresqlDatabase: nextcloud
postgresqlUsername: nextcloud
existingSecret: nextcloud-postgres
persistence:
enabled: true
redis:
enabled: false
architecture: standalone
auth:
existingSecret: nextcloud-redis
existingSecretPasswordKey: password
replica:
replicaCount: 1
rbac:
create: false
podSecurityPolicy:
enabled: true
create: true
persistence:
enabled: true
storageClass: local-path
size: 100Gi
persistence:
enabled: true
rbac:
enabled: true
readinessProbe:
initialDelaySeconds: 60
livenessProbe:
initialDelaySeconds: 60
startupProbe:
initialDelaySeconds: 60

View file

@ -0,0 +1,21 @@
config:
description: Shared Network Services
apps:
# Squid Internet Proxy
- name: proxy
namespace: proxy
repoURL: http://honestica.github.io/lifen-charts
chart: squid
targetRevision: 0.3.0
- name: minio
namespace: minio
repoURL: https://charts.bitnami.com/bitnami
chart: minio
targetRevision: 9.0.2
secrets:
- name: minio-auth
keys:
- root-user
- root-password

View file

@ -0,0 +1,16 @@
defaultBuckets: "public, drone, temp"
auth:
existingSecret: minio-auth
ingress:
enabled: true
hostname: minio.dc
tls: true
annotations:
cert-manager.io/cluster-issuer: vault-issuer
networkPolicy:
enabled: true
persistence:
enabled: true

View file

@ -0,0 +1,102 @@
# Default values for squid.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: honestica/squid
tag: 4-f9839050-1344-48d2-981a-b73e4541e193
pullPolicy: IfNotPresent
# imagePullSecrets:
service:
type: ClusterIP
# Specify IP to whitelist if needed
#loadBalancerSourceRanges: ""
# Specify external IP if needed
#loadBalancerIP: ""
port: 80
# annotations: {}
ingress:
enabled: true
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- proxy.dc
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
config: |
acl SSL_ports port 443
acl Safe_ports port 80 # http
acl Safe_ports port 443 # https
acl CONNECT method CONNECT
acl restricted_destination_subnetworks dst 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16
# Recommended minimum Access Permission configuration:
#
# Deny requests to certain unsafe ports
http_access deny !Safe_ports
# Only allow cachemgr access from localhost
http_access allow localhost manager
http_access deny manager
http_access deny restricted_destination_subnetworks
# Squid normally listens to port 3128
http_port 3128
# Uncomment and adjust the following to add a disk cache directory.
#cache_dir ufs /var/cache/squid 100 16 256
# Leave coredumps in the first cache dir
coredump_dir /var/cache/squid
#
# Add any of your own refresh_pattern entries above these.
#
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
refresh_pattern . 0 20% 4320
# Do not display squid version
httpd_suppress_version_string on
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
releaseAntiAffinity: true
metrics:
enabled: false
serviceMonitor: false
exporter:
port: 9301
resources: {}
image:
repository: boynux/squid-exporter
tag: v1.8
pullPolicy: IfNotPresent

View file

@ -0,0 +1,22 @@
config:
description: Vault Secret Managemet
apps:
- name: vault
repoURL: https://helm.releases.hashicorp.com
chart: vault
targetRevision: 0.15.0
syncWave: "-3"
parameters:
- name: global.psp.enabled
value: "true"
- name: server.dev.enabled
value: "false"
# Vault Secret Operator for automatic Secret injection
- name: vault-secrets-operator
namespace: vault-secrets-operator
repoURL: https://ricoberger.github.io/helm-charts
chart: vault-secrets-operator
targetRevision: 1.15.1
syncWave: "-2"

View file

@ -0,0 +1,17 @@
vault:
address: "http://vault.vault.svc.cluster.local:8200"
authMethod: kubernetes
kubernetesRole: heqet-app
namespaces: ""
crd:
create: false
rbac:
create: true
createrole: true
namespaced: false
serviceAccount:
create: true
name: vault-secrets-operator

View file

@ -0,0 +1,61 @@
global:
enabled: true
tlsDisable: true
psp:
enable: true
injector:
enabled: false
server:
enabled: true
auditStorage:
accessMode: ReadWriteOnce
annotations: {}
enabled: false
mountPath: /vault/audit
size: 10Gi
storageClass: null
authDelegator:
enabled: true
dataStorage:
accessMode: ReadWriteOnce
annotations: {}
enabled: true
mountPath: /vault/data
size: 10Gi
storageClass: local-path
dev:
enabled: false
ha:
enabled: false
ingress:
annotations:
cert-manager.io/cluster-issuer: vault-issuer
kubernetes.io/ingress.class: nginx
enabled: true
extraPaths: []
hosts:
- host: vault.dc
paths: []
labels: {}
tls:
- hosts:
- vault.dc
secretName: vault-tls
networkPolicy:
egress: []
enabled: true
standalone:
enabled: true
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "file" {
path = "/vault/data"
}
ui:
enabled: true

View file

@ -0,0 +1,21 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt
namespace: cert-manager
spec:
acme:
# You must replace this email address with your own.
# Let's Encrypt will use this to contact you about expiring
# certificates, and issues related to your account.
email: nold@gnu.one
#server: https://acme-staging-v02.api.letsencrypt.org/directory
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: issuer-account-key
# Add a single challenge solver, HTTP01 using nginx
solvers:
- http01:
ingress:
class: external

View file

@ -0,0 +1,17 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: vault-issuer
namespace: cert-manager
spec:
vault:
path: pki_int/sign/dc
server: http://vault.vault.svc.cluster.local:8200
caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURaekNDQWsrZ0F3SUJBZ0lVVU5CTWNDZkRmbS9MeS9RUGdhWVUxdFdSc1Nrd0RRWUpLb1pJaHZjTkFRRUwKQlFBd016RVVNQklHQTFVRUNoTUxibTlzWkhSeWIyNXBZM014RFRBTEJnTlZCQXNUQkdocGRtVXhEREFLQmdOVgpCQU1UQTJzemN6QWVGdzB5TVRBME1qa3hPVEUwTVRWYUZ3MHlNVEExTXpFeE9URTBORFJhTURNeEZEQVNCZ05WCkJBb1RDMjV2YkdSMGNtOXVhV056TVEwd0N3WURWUVFMRXdSb2FYWmxNUXd3Q2dZRFZRUURFd05yTTNNd2dnRWkKTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDOTJDb2Z6cHdvNWZjNDNPNTJvaGhqdHAwZgplRXE0ZkRTQ24rQjZwYUVmcGtsZXZvM2J1akw3ZnhTcmt2VlhuRDgyc3FCSythWjRCM0pJNVBMSHpoeTBwcVFSCm9rSDdiSUxuYVBGSlljZGYzWnF4VDVvT1QvWC9IeUlDQkNBbFdoN2ZNZThJYitFbm5oUGpFdlVTWHJzWTk4T3IKQVhpVGN4TlF0OVl5WnIzQS93cnloM2lIZmYyR3NuTGVEekRhV0tyMm93N3pCckZvZXFJRkdzWXY4b1YzcFZIZQpPdTloWWQ1V2F2ZjVRSjBQcTAwUndKTHRuc2V0RUdQenlEUlJTRnhKbmZSK2JwY1VSZDFrWkl5ZFpTRENNVFc5CnhtSTJQOUxvT2cwMThWb0MrM3l5V01PVEh1dmRraWh2SDBQM3RhcnpFcnd0cm0vOUlkT3J4NVhPVUVKUEFnTUIKQUFHamN6QnhNQTRHQTFVZER3RUIvd1FFQXdJQkJqQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRVwpCQlMzOWxNTVFySDNoczBTQzk4bVFJODhEbmF6VXpBZkJnTlZIU01FR0RBV2dCUzM5bE1NUXJIM2hzMFNDOThtClFJODhEbmF6VXpBT0JnTlZIUkVFQnpBRmdnTnJNM013RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUV1VG04SzQKRndoaUFOc2dsSXhjTk10aHJYQ0p4aUhyMHJVRWFOclQ5czlSYUxRSEZlSkhhZUxiL2NJUXZncjdQbW93aURMYQpvRlVZUk1EamZKNzUwK1Jmc21oa2pmdzdJL1JMWktsVXdMY1l4dlN4MjViWFNZVkdOTW5UUi9wTmN6cWNXSk5NCmpWZEZFbDRlRjMzQmtCWCtzN1pTWGxVVUhhdFloTEszQ1EzRk5tSWRjRFliNFovVWY1bXk1eUs2cG5HT3hQbnkKTnkyY0xsYndzWVYrelA5UGR4Y3JqUTlpem1rYmMxMUVxS3ZmYnhmUmc4aGlMZkovSnFaM0hxMW91SUk0V3dvMQo3WWxxVEZsS2FXRnpKNVhOSmVPYWZyNnZpREszQ3NQMjJYTXhMekRQQmdrbEd5Qnp0cFdKbVd1ZEVVanNKVDJiCnJsKzlOdEw5TmgzQklrcz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
auth:
kubernetes:
role: vault-issuer
mountPath: /v1/auth/kubernetes
secretRef:
name: vault-issuer-token
key: token

View file

@ -0,0 +1,17 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault-issuer
namespace: cert-manager
secrets:
- name: vault-issuer-token
---
apiVersion: v1
kind: Secret
metadata:
name: vault-issuer-token
namespace: cert-manager
annotations:
kubernetes.io/service-account.name: vault-issuer
type: kubernetes.io/service-account-token