Compare commits

..

1 commit

Author SHA1 Message Date
renovate[bot]
c6eba03faf
chore(deps): update dependency fluxcd/flux2 to v2.3.0 2024-05-13 17:05:36 +00:00
58 changed files with 2735 additions and 1822 deletions

View file

@ -5,4 +5,4 @@ apps:
- name: crowdsec
repoURL: https://crowdsecurity.github.io/helm-charts
chart: crowdsec
targetRevision: 0.11.0
targetRevision: 0.10.0

View file

@ -5,4 +5,4 @@ apps:
- name: core
repoURL: https://neuvector.github.io/neuvector-helm/
chart: core
targetRevision: 2.7.7
targetRevision: 2.7.6

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
image:
repository: adguard/adguardhome
tag: v0.107.52
tag: v0.107.48
env:
TZ: Europe/Amsterdam

View file

@ -8,11 +8,7 @@ apps:
# targetRevision: 3.1.0
repoURL: https://github.com/nold360/localai-charts.git
path: charts/local-ai
targetRevision: feat/envsecret
secrets:
- name: localai
keys:
- hf-token
targetRevision: feat/runtimeclass
- name: anythingllm
repo: bjw-s
@ -24,13 +20,31 @@ apps:
# chart: app-template
# targetRevision: 2.4.0
# - name: big-agi
# repo: bjw-s
# chart: app-template
# targetRevision: 2.4.0
- name: big-agi
repo: bjw-s
chart: app-template
targetRevision: 2.4.0
- name: browserless
namespace: browserless
repo: bjw-s
chart: app-template
targetRevision: 2.4.0
- name: qdrant
repoURL: https://qdrant.to/helm
chart: qdrant
targetRevision: 0.9.0
- name: bibot
namespace: bibot
repo: bjw-s
chart: app-template
targetRevision: 2.4.0
secrets:
- name: bibot
keys:
- discord-token
- ombi-api-key
- prompt-template
- instruct-template

View file

@ -1,56 +1,26 @@
controllers:
# main agent
main:
containers:
main:
image:
repository: reg.dc/bi
repository: reg.dc/bibot
tag: latest
pullPolicy: Always
env:
BIBOT_CONFIG: /config/bibot.yml
BIBOT_KAFKA__BROKER: kafka://bi-cluster-kafka-bootstrap:9092
PHOENIX_COLLECTOR_ENDPOINT: http://phoenix.phoenix.svc.cluster.local:6006
PHOENIX_PROJECT_NAME: bi
command: ["python3"]
args: ["/app/bi/agents/main/app.py", "worker", "-l", "info"]
controller:
containers:
main:
image:
repository: reg.dc/bi
tag: latest
pullPolicy: Always
env:
BIBOT_CONFIG: /config/bibot.yml
BIBOT_KAFKA__BROKER: kafka://bi-cluster-kafka-bootstrap:9092
command: ["python3"]
args: ["/app/bi/controller.py", "worker", "-l", "info"]
discord:
containers:
main:
image:
repository: reg.dc/bi
tag: latest
pullPolicy: Always
command: ["python3"]
args: ["/app/bi/connectors/discord/app.py", "worker", "-l", "info"]
env:
BIBOT_KAFKA__BROKER: bi-cluster-kafka-bootstrap:9092
OPENAI_API_KEY: fake
BIBOT_DISCORD__TOKEN:
BROWSERLESS_URL: http://browserless.browserless.svc.cluster.local:3000
OMBI_API_URL: http://ombi.ombi.svc.cluster.local:3579/api/v1
OMBI_API_TOKEN:
valueFrom:
secretKeyRef:
name: bibot
key: ombi-api-key
DISCORD_TOKEN:
valueFrom:
secretKeyRef:
name: bibot
key: discord-token
## Prod:
BIBOT_DISCORD__CHANNELS: "1216440541064200192"
# Dev:
# BIBOT_DISCORD_CHANNELS: "1217418069693960223"
probes:
liveness:
enabled: false
@ -59,6 +29,35 @@ controllers:
startup:
enabled: false
configMaps:
config:
enabled: true
data:
bibot.yml: |-
prompt_template: /secret/prompt-template
instruct_template: /secret/instruct-template
context:
- Your name is Bi
- You are a helpful, funny & sarcastic chatbot and a little bit of a smartass
- Your main task is to use your extensive internal knowledge and dialogue skills to understand and address user queries
- Employ your tools ONLY upon direct request or when necessary to obtain additional information
discord:
channels: [ '1216440541064200192' ]
tools:
ombi:
url: http://ombi.ombi.svc.cluster.local:3579/api/v1
browserless:
url: http://browserless.browserless.svc.cluster.local:3000
openai:
url: http://localai-local-ai.ai.svc.cluster.local/v1
model: hermes-2-Pro-Llama-3-8B-Q5_K_M
temperature: 0
persistence:
secret:
name: bibot
@ -70,15 +69,11 @@ persistence:
enabled: true
type: configMap
data:
size: 10Gi
type: persistentVolumeClaim
accessMode: ReadWriteOnce
service:
main:
controller: main
ports:
http:
port: 3001
type: ClusterIP
# service:
# main:
# controller: main
# ports:
# http:
# port: 8000
# type: ClusterIP

View file

@ -4,7 +4,7 @@ controllers:
main:
image:
repository: flowiseai/flowise
tag: 1.8.4
tag: 1.7.2
command:
- flowise
- start

View file

@ -3,8 +3,7 @@ replicaCount: 1
deployment:
image:
repository: quay.io/go-skynet/local-ai
#tag: latest-aio-gpu-nvidia-cuda-12
tag: v2.21.0-cublas-cuda12-ffmpeg
tag: v2.15.0-cublas-cuda12-ffmpeg
pullPolicy: Always
runtimeClassName: nvidia
@ -57,14 +56,6 @@ deployment:
# UPLOAD_LIMIT
# HUGGINGFACEHUB_API_TOKEN=Token here
# Inject Secrets into Environment:
secretEnv:
- name: HF_TOKEN
valueFrom:
secretKeyRef:
name: localai
key: hf-token
modelsPath: "/models"
download_model:

View file

@ -1,7 +1,7 @@
image:
repository: docker.io/qdrant/qdrant
pullPolicy: IfNotPresent
tag: "v1.10.1"
tag: "v1.9.2"
useUnprivilegedImage: true
env:

View file

@ -8,7 +8,7 @@ installCRDs: true
global:
image:
repository: quay.io/argoproj/argocd
tag: v2.11.5
tag: v2.11.0
# imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 999
@ -176,7 +176,7 @@ repoServer:
initContainers:
- name: copy-cmp-server
image: quay.io/argoproj/argocd:v2.11.5
image: quay.io/argoproj/argocd:v2.11.0
command:
- cp
- -n

View file

@ -61,3 +61,17 @@ apps:
- noRoot
- tmpdirs
- ingress-internal
- name: unpackerr
chart: unpackerr
targetRevision: 5.1.0
include:
- noRoot
- tmpdirs
- ingress-internal
secrets:
- name: unpackerr-config
keys:
- UN_LIDARR_0_API_KEY
- UN_RADARR_0_API_KEY
- UN_SONARR_0_API_KEY

View file

@ -1,6 +1,6 @@
image:
repository: ghcr.io/onedr0p/bazarr
tag: 1.4.3
tag: 1.4.2
ingress:
main:

View file

@ -1,6 +1,6 @@
image:
repository: ghcr.io/onedr0p/lidarr
tag: 2.4.3.4248
tag: 2.2.5.4141
ingress:
main:

View file

@ -9,7 +9,7 @@ image:
# -- image repository
repository: ghcr.io/onedr0p/prowlarr-develop
# @default -- chart.appVersion
tag: "1.21"
tag: "1.17"
# -- image pull policy
pullPolicy: IfNotPresent

View file

@ -1,6 +1,6 @@
image:
repository: ghcr.io/onedr0p/radarr
tag: 5.8.3.8933
tag: 5.4.6.8723
env:
UMASK: "002"

View file

@ -1,6 +1,6 @@
image:
repository: ghcr.io/onedr0p/sonarr
tag: 4.0.8.1874
tag: 4.0.4.1491
securityContext:
privileged: true

View file

@ -1,139 +0,0 @@
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: bi-cluster
namespace: bi
spec:
kafka:
version: 3.7.0
replicas: 1
listeners:
- name: plain
port: 9092
type: internal
tls: false
- name: tls
port: 9093
type: internal
tls: true
config:
offsets.topic.replication.factor: 1
transaction.state.log.replication.factor: 1
transaction.state.log.min.isr: 1
default.replication.factor: 1
min.insync.replicas: 1
inter.broker.protocol.version: "3.7"
storage:
type: jbod
volumes:
- id: 0
type: persistent-claim
size: 100Gi
deleteClaim: false
zookeeper:
replicas: 1
storage:
type: persistent-claim
size: 100Gi
deleteClaim: false
entityOperator:
topicOperator: {}
userOperator: {}
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: bi-input
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: bi-output
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: agent-registry
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 24
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: agent-main-input
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: discord-input
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: agent-researcher-input
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: agent-researcher-config
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824

View file

@ -1,22 +0,0 @@
---
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
name: bi-mongo
namespace: bi
spec:
members: 1
type: ReplicaSet
version: "6.0.5"
security:
authentication:
modes: ["SCRAM"]
users:
- name: bi
db: bi
passwordSecretRef:
name: bi-mongo-password
roles:
- name: dbOwner
db: bi
scramCredentialsSecretName: bi

View file

@ -1,33 +0,0 @@
config:
description: Bi Agent Framework
apps:
- name: bi
namespace: bi
repo: bjw-s
chart: app-template
targetRevision: 3.2.1
secrets:
- name: bibot
keys:
- discord-token
- ombi-api-key
- prompt-template
- instruct-template
- OPENWEATHERMAP_API_KEY
ignoreDiff:
- group: apps
kind: Deployment
jsonPointers:
- /spec/replicas
# - name: mongo-express
# repoURL: https://cowboysysop.github.io/charts/
# chart: mongo-express
# targetRevision: 6.5.2
# secrets:
# - name: mongo-express
# keys:
# - mongodb-admin-password
# - site-cookie-secret
# - site-session-secret

View file

@ -1,182 +0,0 @@
controllers:
# main agent
main:
containers:
main:
image:
repository: reg.dc/bi
tag: latest
pullPolicy: Always
env:
BIBOT_CONFIG: /config/bibot.yml
BIBOT_KAFKA__BROKER: kafka://bi-cluster-kafka-bootstrap:9092
BIBOT_MONGODB__URI: mongodb://bi-mongo-svc:27017/
BIBOT_MONGODB__USER:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: username
BIBOT_MONGODB__PASSWORD:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: password
PHOENIX_COLLECTOR_ENDPOINT: http://phoenix.phoenix.svc.cluster.local:6006
PHOENIX_PROJECT_NAME: bi
command: ["python3"]
args: ["/app/bi/agents/main/app.py", "worker", "-l", "info"]
controller:
containers:
main:
image:
repository: reg.dc/bi
tag: latest
pullPolicy: Always
env:
BIBOT_CONFIG: /config/bibot.yml
BIBOT_KAFKA__BROKER: kafka://bi-cluster-kafka-bootstrap:9092
BIBOT_MONGODB__URI: mongodb://bi-mongo-svc:27017/
BIBOT_MONGODB__USER:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: username
BIBOT_MONGODB__PASSWORD:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: password
command: ["python3"]
args: ["/app/bi/controller.py", "worker", "-l", "info"]
discord:
containers:
main:
image:
repository: reg.dc/bi
tag: latest
pullPolicy: Always
command: ["python3"]
args: ["/app/bi/connectors/discord/app.py", "worker", "-l", "info"]
env:
BIBOT_KAFKA__BROKER: bi-cluster-kafka-bootstrap:9092
BIBOT_MONGODB__URI: mongodb://bi-mongo-svc:27017/
BIBOT_MONGODB__USER:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: username
BIBOT_MONGODB__PASSWORD:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: password
OPENAI_API_KEY: fake
BIBOT_DISCORD__TOKEN:
valueFrom:
secretKeyRef:
name: bibot
key: discord-token
## Prod:
BIBOT_DISCORD__CHANNELS: "1216440541064200192"
# Dev:
# BIBOT_DISCORD_CHANNELS: "1217418069693960223"
probes:
liveness:
enabled: false
readiness:
enabled: false
startup:
enabled: false
researcher:
containers:
main:
image:
repository: reg.dc/bi
tag: latest
pullPolicy: Always
env:
BIBOT_CONFIG: /config/bibot.yml
BIBOT_OPENAI__TEMPERATURE: "0.0"
BIBOT_KAFKA__BROKER: kafka://bi-cluster-kafka-bootstrap:9092
BIBOT_MONGODB__URI: mongodb://bi-mongo-svc:27017/
BIBOT_MONGODB__USER:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: username
BIBOT_MONGODB__PASSWORD:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: password
OPENWEATHERMAP_API_KEY:
valueFrom:
secretKeyRef:
name: bibot
key: OPENWEATHERMAP_API_KEY
PHOENIX_COLLECTOR_ENDPOINT: http://phoenix.phoenix.svc.cluster.local:6006
PHOENIX_PROJECT_NAME: bi
command: ["python3"]
args: ["/app/bi/agents/researcher/app.py", "worker", "-l", "info"]
mongoui:
containers:
main:
image:
repository: ugleiton/mongo-gui
tag: latest
pullPolicy: Always
env:
MONGO_URL:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: connectionString.standardSrv
persistence:
secret:
name: bibot
enabled: true
type: secret
config:
name: bibot-config
enabled: true
type: configMap
data:
size: 10Gi
type: persistentVolumeClaim
accessMode: ReadWriteOnce
service:
main:
controller: mongoui
ports:
http:
port: 4321
type: ClusterIP
ingress:
main:
annotations:
cert-manager.io/cluster-issuer: vault-issuer
enabled: true
hosts:
- host: mongo.dc
paths:
- path: /
service:
# name: main
identifier: main
port: 4321
tls:
- hosts:
- mongo.dc
secretName: mongo-tls

View file

@ -1,50 +0,0 @@
ingress:
enabled: true
ingressClassName: "ingress-internal"
pathType: ImplementationSpecific
annotations:
cert-manager.io/cluster-issuer: vault-issuer
hosts:
- host: mongo.dc
paths:
- /
tls:
- secretName: mongo-express-tls
hosts:
- mongo.dc
## @param mongodbServer MongoDB host name or IP address
mongodbServer: bi-mongo-svc.bi.svc.cluster.local
## @param mongodbPort MongoDB port
mongodbPort: 27017
## @param mongodbEnableAdmin Enable administrator access
mongodbEnableAdmin: true
## @param mongodbAdminUsername Administrator username
mongodbAdminUsername: admin
## @param mongodbAdminPassword Administrator password
# mongodbAdminPassword: ""
## @param siteBaseUrl Set the express baseUrl to ease mounting at a subdirectory
siteBaseUrl: /
## @param basicAuthUsername Mongo Express web login name
basicAuthUsername: ""
## @param basicAuthPassword Mongo Express web login password
basicAuthPassword: ""
## @param existingSecret Name of existing Secret to use
existingSecret: "mongo-express"
## @param existingSecretKeyMongodbAdminPassword Key in existing Secret that contains administrator password
# existingSecretKeyMongodbAdminPassword: bi-mongo-admin-admin
## @param existingSecretKeyMongodbAuthPassword Key in existing Secret that contains database password
# existingSecretKeyMongodbAuthPassword: bi-mongo-admin-admin

View file

@ -1,73 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: cloudflare-ddns-gnu
namespace: core
spec:
schedule: "*/15 * * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
template:
spec:
containers:
- name: cloudflare-ddns
image: mirioeggmann/cloudflare-ddns:v0.5.1
envFrom:
- secretRef:
name: cloudflare-ddns-gnu
restartPolicy: OnFailure
---
apiVersion: ricoberger.de/v1alpha1
kind: VaultSecret
metadata:
annotations:
name: cloudflare-ddns-gnu
namespace: core
spec:
keys:
- API_TOKEN
- NAME
- RECORD_ID
- ZONE_ID
- PROXIED
path: heqet/core/cloudflare-ddns-gnu
type: Opaque
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: cloudflare-ddns-nold
namespace: core
spec:
schedule: "*/15 * * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
template:
spec:
containers:
- name: cloudflare-ddns
image: mirioeggmann/cloudflare-ddns:v0.5.1
envFrom:
- secretRef:
name: cloudflare-ddns-nold
restartPolicy: OnFailure
---
apiVersion: ricoberger.de/v1alpha1
kind: VaultSecret
metadata:
annotations:
name: cloudflare-ddns-nold
namespace: core
spec:
keys:
- API_TOKEN
- NAME
- RECORD_ID
- ZONE_ID
- PROXIED
path: heqet/core/cloudflare-ddns-nold
type: Opaque

View file

@ -42,40 +42,29 @@ apps:
namespace: ingress-internal
repoURL: https://kubernetes.github.io/ingress-nginx
chart: ingress-nginx
targetRevision: 4.11.1
targetRevision: 4.10.1
syncWave: '0'
- name: cilium
existingNamespace: kube-system
repoURL: https://helm.cilium.io
chart: cilium
targetRevision: 1.15.8
targetRevision: 1.15.4
- name: external-dns
repoURL: https://kubernetes-sigs.github.io/external-dns
chart: external-dns
targetRevision: 1.14.5
targetRevision: 1.14.4
secrets:
- name: cloudflare-api
keys:
- CF_API_TOKEN
- name: external-dns-adguard
repoURL: https://kubernetes-sigs.github.io/external-dns
chart: external-dns
targetRevision: 1.14.5
secrets:
- name: adguard-config
keys:
- ADGUARD_URL
- ADGUARD_USER
- ADGUARD_PASSWORD
- name: cert-manager
namespace: cert-manager
repoURL: https://charts.jetstack.io
chart: cert-manager
targetRevision: v1.15.1
targetRevision: v1.14.5
secrets:
- name: cert-manager-vault-approle
keys:

View file

@ -23,13 +23,7 @@ global:
# Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose.
logLevel: 2
leaderElection:
namespace: "cert-manager"
crds:
enabled: true
installCRDs: true
replicaCount: 1
strategy:

View file

@ -1,104 +0,0 @@
# Default values for external-dns.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: registry.k8s.io/external-dns/external-dns
tag: "v0.14.2"
pullPolicy: IfNotPresent
shareProcessNamespace: false
podSecurityContext:
fsGroup: 65534
securityContext:
runAsNonRoot: true
runAsUser: 65534
readOnlyRootFilesystem: true
capabilities:
drop: ["ALL"]
# Defaults to `ClusterFirst`.
# Valid values are: `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`.
dnsPolicy:
serviceMonitor:
enabled: false
additionalLabels: {}
interval: 1m
scrapeTimeout: 10s
resources:
limits:
memory: 50Mi
cpu: 200m
requests:
memory: 50Mi
cpu: 10m
logLevel: info
logFormat: text
interval: 1m
triggerLoopOnEvent: false
sources:
- ingress
# - service
policy: upsert-only
registry: txt
txtOwnerId: ""
txtPrefix: ""
txtSuffix: ""
domainFilters:
- dc
#extraArgs:
deploymentStrategy:
type: Recreate
provider:
name: webhook
webhook:
image:
repository: ghcr.io/muhlba91/external-dns-provider-adguard
tag: latest
livenessProbe:
httpGet:
path: /healthz
port: 8888
initialDelaySeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /healthz
port: 8888
initialDelaySeconds: 10
timeoutSeconds: 5
env:
- name: LOG_LEVEL
value: debug
- name: ADGUARD_URL
valueFrom:
secretKeyRef:
name: adguard-config
key: ADGUARD_URL
- name: ADGUARD_USER
valueFrom:
secretKeyRef:
name: adguard-config
key: ADGUARD_USER
- name: ADGUARD_PASSWORD
valueFrom:
secretKeyRef:
name: adguard-config
key: ADGUARD_PASSWORD
- name: SERVER_HOST
value: "0.0.0.0"
- name: DRY_RUN
value: "false"

View file

@ -3,8 +3,8 @@
# Declare variables to be passed into your templates.
image:
repository: registry.k8s.io/external-dns/external-dns
tag: "v0.14.2"
repository: k8s.gcr.io/external-dns/external-dns
tag: "v0.13.4"
pullPolicy: IfNotPresent
shareProcessNamespace: false

View file

@ -6,7 +6,7 @@ apps:
- name: crossplane
repoURL: https://charts.crossplane.io/stable
chart: crossplane
targetRevision: 1.16.0
targetRevision: 1.15.2
secrets:
- name: terraform
keys:

View file

@ -1,7 +1,7 @@
---
image:
repository: ghcr.io/onedr0p/qbittorrent
tag: 4.6.5
tag: 4.6.4
ingress:
main:

View file

@ -5,17 +5,26 @@ apps:
- name: kube-prometheus-stack
repoURL: https://prometheus-community.github.io/helm-charts
chart: kube-prometheus-stack
targetRevision: 60.5.0
targetRevision: 55.11.0
secrets:
- name: grafana
keys:
- admin-password
- admin-user
helm:
skipCrds: true
- name: kube-prometheus-crds
repoURL: https://github.com/prometheus-community/helm-charts.git
path: charts/kube-prometheus-stack/crds/
targetRevision: kube-prometheus-stack-46.8.0
directory:
recurse: true
syncPolicy:
syncOptions:
- ServerSideApply=true
- Replace=true
# - name: loki-stack
# repoURL: https://grafana.github.io/helm-charts
# chart: loki-stack
# targetRevision: 2.10.2
- name: loki-stack
repoURL: https://grafana.github.io/helm-charts
chart: loki-stack
targetRevision: 2.10.2

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,7 @@ loki:
enabled: true
image:
repository: grafana/loki
tag: 3.1.0
tag: 3.0.0
promtail:
enabled: true

View file

@ -8,7 +8,7 @@ nodeExporter:
enabled: true
image:
repository: quay.io/prometheus/node-exporter
tag: v1.8.2
tag: v1.8.0
hostNetwork: true
hostPID: true
@ -24,7 +24,7 @@ server:
enabled: true
image:
repository: quay.io/prometheus/prometheus
tag: v2.53.1
tag: v2.52.0
strategy:
type: Recreate

View file

@ -4,7 +4,7 @@ controllers:
main:
image:
repository: homeassistant/home-assistant
tag: "2024.7"
tag: "2024.5"
env:
TZ: Europe/Berlin

View file

@ -1,6 +1,6 @@
image:
repository: influxdb
tag: 2.7.8-alpine
tag: 2.7.6-alpine
pullPolicy: IfNotPresent
## If specified, use these secrets to access the images
# pullSecrets:

View file

@ -172,7 +172,7 @@ sidecar:
# -- The image repository to pull from
repository: kiwigrid/k8s-sidecar
# -- The image tag to pull, default: `1.23.1`
tag: 1.27.5
tag: 1.26.2
# -- The image pull policy, default: `IfNotPresent`
pullPolicy: IfNotPresent
# -- The extra volume mounts for the sidecar

View file

@ -33,7 +33,7 @@ configMaps:
format=json
logfile=/dev/stdout
donotprobe=/dev/ttyACM0
shell=/usr/bin/mosquitto_pub -h 192.168.1.20 -t wmbusmeters/"$METER_ID" -m "$METER_JSON"
shell=/usr/bin/mosquitto_pub -h mqtt.lan -t wmbusmeters/"$METER_ID" -m "$METER_JSON"
ignoreduplicates=false
meters:

View file

@ -1,6 +1,6 @@
image:
repository: b4bz/homer
tag: v24.05.1
tag: v24.04.1
initContainers:
clone-assets:

View file

@ -13,5 +13,5 @@ apps:
- name: ingress-external
repoURL: https://kubernetes.github.io/ingress-nginx
chart: ingress-nginx
targetRevision: 4.11.1
targetRevision: 4.10.1
syncWave: '0'

View file

@ -1,6 +1,6 @@
image:
repository: koenkk/zigbee2mqtt
tag: 1.39.0
tag: 1.37.1
service:
main:

View file

@ -5,16 +5,18 @@ config:
- internet
rules:
- allow-minio
- allow-localai
labels:
environment: external
apps:
- name: nextcloud
repoURL: https://nextcloud.github.io/helm
chart: nextcloud
targetRevision: 5.2.0
#repoURL: https://nextcloud.github.io/helm
#chart: nextcloud
#targetRevision: 3.1.0
repoURL: https://github.com/Nold360/nextcloud-helm
targetRevision: f/multifix
path: charts/nextcloud
secrets:
- name: nextcloud-user
keys:

View file

@ -1,5 +1,5 @@
image:
tag: 29-fpm
tag: 28-fpm
pullPolicy: Always
nextcloud:
@ -80,31 +80,6 @@ ingress:
external-dns.alpha.kubernetes.io/target: gnu.one
external-dns.alpha.kubernetes.io/cloudflare-proxied: "false"
nginx.ingress.kubernetes.io/server-snippet: |-
server_tokens off;
proxy_hide_header X-Powered-By;
rewrite ^/.well-known/webfinger /index.php/.well-known/webfinger last;
rewrite ^/.well-known/nodeinfo /index.php/.well-known/nodeinfo last;
rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json;
location = /.well-known/carddav {
return 301 $scheme://$host/remote.php/dav;
}
location = /.well-known/caldav {
return 301 $scheme://$host/remote.php/dav;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ {
deny all;
}
location ~ ^/(?:autotest|occ|issue|indie|db_|console) {
deny all;
}
tls:
- secretName: nextcloud-tls
hosts:
@ -112,7 +87,6 @@ ingress:
nginx:
enabled: true
containerPort: 8080
cronjob:
enabled: false

View file

@ -9,7 +9,7 @@ image:
# -- image repository
repository: octoprint/octoprint
# -- image tag
tag: 1.10.2-minimal
tag: 1.10.0-minimal
# -- image pull policy
pullPolicy: IfNotPresent

View file

@ -15,7 +15,7 @@ ingress:
- paperless.dc
image:
repository: ghcr.io/paperless-ngx/paperless-ngx
tag: 2.11.1
tag: 2.8.3
pullPolicy: IfNotPresent
# -- See the following files for additional environment variables:

View file

@ -1,85 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: searxng-config
namespace: searxng
data:
settings.yml: |
---
use_default_settings:
engines:
remove:
- soundcloud
server:
limiter: false
image_proxy: false
search:
default_lang: en
formats:
- html
- json
# autocomplete: google
general:
instance_name: HiveSearch
ui:
static_use_hash: true
default_theme: simple
theme_args:
simple_style: dark
infinite_scroll: true
results_on_new_tab: true
enabled_plugins:
- Basic Calculator
- Hash plugin
- Hostname replace
- Open Access DOI rewrite
- Self Informations
- Tracker URL remover
- Unit converter plugin
hostname_replace:
(www\.)?reddit\.com$: redlib.rostvik.site
limiter.toml: |
[real_ip]
# Number of values to trust for X-Forwarded-For.
x_for = 1
# The prefix defines the number of leading bits in an address that are compared
# to determine whether or not an address is part of a (client) network.
ipv4_prefix = 32
ipv6_prefix = 48
[botdetection.ip_limit]
# To get unlimited access in a local network, by default link-lokal addresses
# (networks) are not monitored by the ip_limit
filter_link_local = true
# activate link_token method in the ip_limit method
link_token = false
[botdetection.ip_lists]
# In the limiter, the ip_lists method has priority over all other methods -> if
# an IP is in the pass_ip list, it has unrestricted access and it is also not
# checked if e.g. the "user agent" suggests a bot (e.g. curl).
block_ip = [
]
pass_ip = [
'10.0.0.0/24', # IPv4 private network
]
# Activate passlist of (hardcoded) IPs from the SearXNG organization,
# e.g. `check.searx.space`.
pass_searxng_org = false

View file

@ -1,13 +0,0 @@
config:
description: Local Meta Search
apps:
- name: searxng
repo: bjw-s
chart: app-template
targetRevision: 3.2.1
secrets:
- name: searxng
keys:
- SEARXNG_SECRET

View file

@ -1,97 +0,0 @@
controllers:
app:
replicas: 1
strategy: RollingUpdate
containers:
app:
image:
repository: searxng/searxng
tag: 2024.5.16-2f2d93b29
env:
BASE_URL: https://search.dc
AUTOCOMPLETE: "false"
INSTANCE_NAME: "HiveSearch"
envFrom:
- secretRef:
name: searxng
# probes:
# liveness:
# enabled: true
# custom: true
# spec:
# httpGet:
# path: /stats
# port: 8080
# initialDelaySeconds: 0
# periodSeconds: 10
# timeoutSeconds: 1
# failureThreshold: 3
# readiness:
# enabled: true
# custom: true
# spec:
# httpGet:
# path: /stats
# port: 8080
# initialDelaySeconds: 0
# periodSeconds: 10
# timeoutSeconds: 1
# failureThreshold: 3
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
capabilities:
drop:
- ALL
add:
- CHOWN
- SETGID
- SETUID
- DAC_OVERRIDE
resources:
requests:
cpu: 10m
limits:
memory: 3Gi
service:
app:
controller: app
ports:
http:
port: 8080
persistence:
config:
type: configMap
name: searxng-config
advancedMounts:
app:
app:
- path: /etc/searxng/settings.yml
subPath: settings.yml
readOnly: true
- path: /etc/searxng/limiter.toml
subPath: limiter.toml
readOnly: true
ingress:
app:
# className: ingress-internal
annotations:
cert-manager.io/cluster-issuer: vault-issuer
hosts:
- host: search.dc
paths:
- path: /
service:
identifier: app
port: http
tls:
- hosts:
- search.dc
secretName: searxng-tls

View file

@ -22,22 +22,10 @@ apps:
namespace: cnpg-system
repoURL: https://cloudnative-pg.github.io/charts
chart: cloudnative-pg
targetRevision: 0.21.5
targetRevision: 0.21.2
- name: redis-operator
repoURL: https://ot-container-kit.github.io/helm-charts
namespace: redis-operator
chart: redis-operator
targetRevision: 0.16.4
- name: kafka-operator
repoURL: https://strimzi.io/charts
namespace: kafka-operator
chart: strimzi-kafka-operator
targetRevision: 0.41.0
- name: mongodb-operator
repoURL: https://mongodb.github.io/helm-charts
namespace: mongodb-operator
chart: community-operator
targetRevision: 0.10.0
targetRevision: 0.15.9

View file

@ -1 +0,0 @@
watchAnyNamespace: true

View file

@ -1,19 +0,0 @@
operator:
watchNamespace: "*"
resources:
limits:
cpu: 1100m
memory: 1Gi
requests:
cpu: 100m
memory: 100Mi
replicas: 1
podSecurityContext:
runAsNonRoot: true
runAsUser: 2000
securityContext: {}
community-operator-crds:
enabled: true

View file

@ -4,37 +4,67 @@
# Name of the image repository to pull the container image from.
image:
repository: ghcr.io/ot-container-kit/redis-operator/redis-operator
repository: quay.io/spotahome/redis-operator
pullPolicy: IfNotPresent
#tag: v1.2.4
tag: v1.2.4
imageCredentials:
create: false
registry: url.private.registry
username: someone
password: somepassword
email: someone@example.com
# Use exists secrets in namespace
existsSecrets:
- registrysecret
updateStrategy:
type: RollingUpdate
replicas: 1
# A name in place of the chart name for `app:` labels.
nameOverride: ""
# A name to substitute for the full names of resources.
fullnameOverride: ""
serviceAccount:
# Enable service account creation.
create: true
# Annotations to be added to the service account.
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template.
name: ""
service:
type: ClusterIP
port: 9710
container:
port: 9710
# Container [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container).
# See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1) for details.
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
# Container resource [requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
# See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources) for details.
# @default -- No requests or limits.
resources:
requests:
cpu: 10m
memory: 16Mi
limits:
cpu: 500m
memory: 500Mi
certificate:
secretName: redis-operator-tls
certmanager:
enabled: false
cpu: 100m
memory: 128Mi
### Monitoring
###############
monitoring:
# Enable Prometheus PodMonitor to monitor the operator.
enabled: false

View file

@ -5,7 +5,7 @@ apps:
- name: vault
repoURL: https://helm.releases.hashicorp.com
chart: vault
targetRevision: 0.28.1
targetRevision: 0.28.0
syncWave: '-3'
ignoreDiff:
- group: "*"
@ -18,5 +18,5 @@ apps:
namespace: vault-secrets-operator
repoURL: https://ricoberger.github.io/helm-charts
chart: vault-secrets-operator
targetRevision: 2.5.10
targetRevision: 2.5.7
syncWave: '-2'

View file

@ -5,13 +5,13 @@ global:
enable: false
injector:
enabled: false
enabled: true
server:
enabled: true
image:
repository: "hashicorp/vault"
tag: "1.17.2"
tag: "1.16.2"
auditStorage:
accessMode: ReadWriteOnce
annotations: {}

View file

@ -6,16 +6,14 @@ config:
networkPolicy:
groups:
- internet
rules:
- allow-agent
labels:
environment: external
apps:
- name: woodpecker-server
path: charts/woodpecker/charts/server
- name: woodpecker
path: charts/woodpecker
secrets:
- name: github-oauth
keys:
@ -24,15 +22,3 @@ apps:
- name: woodpecker-secret
keys:
- WOODPECKER_AGENT_SECRET
- name: woodpecker-agent
path: charts/woodpecker/charts/agent
namespace: woodpecker-agent
networkPolicy:
rules:
- allow-agent
secrets:
- name: woodpecker-secret
fromApp: woodpecker-server
keys:
- WOODPECKER_AGENT_SECRET

View file

@ -1,56 +0,0 @@
# -- The number of replicas for the deployment
replicaCount: 2
image:
registry: docker.io
repository: woodpeckerci/woodpecker-agent
pullPolicy: Always
tag: 'next'
env:
# -- Add the environment variables for the agent component
WOODPECKER_SERVER: 'woodpecker-server.woodpecker.svc.cluster.local:9000'
WOODPECKER_BACKEND: kubernetes
WOODPECKER_BACKEND_K8S_NAMESPACE: woodpecker-agent
WOODPECKER_BACKEND_K8S_STORAGE_CLASS: 'ssd'
WOODPECKER_BACKEND_K8S_VOLUME_SIZE: 10G
WOODPECKER_BACKEND_K8S_STORAGE_RWX: false
WOODPECKER_BACKEND_K8S_POD_LABELS: ''
WOODPECKER_BACKEND_K8S_POD_ANNOTATIONS: ''
WOODPECKER_CONNECT_RETRY_COUNT: '1'
# -- Add extra secret that is contains environment variables
extraSecretNamesForEnvFrom:
- woodpecker-secret
persistence:
enabled: true
size: 1Gi
storageClass: 'ssd'
accessModes:
- ReadWriteOnce
# -- Add pod security context
podSecurityContext:
runAsUser: 1000
runAsGroup: 2000
fsGroup: 2000
# -- Add security context
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 2000
# -- Specifies the resources for the agent component
resources:
limits:
cpu: 2000m
memory: 1024Mi
requests:
cpu: 10m
memory: 10Mi

View file

@ -1,84 +0,0 @@
statefulSet:
replicaCount: 1
updateStrategy:
type: RollingUpdate
image:
registry: docker.io
repository: woodpeckerci/woodpecker-server
pullPolicy: Always
tag: 'next'
# -- Add environment variables for the server component
env:
WOODPECKER_OPEN: "false"
WOODPECKER_ADMIN: "Nold360"
WOODPECKER_HOST: https://ci.nold.in
WOODPECKER_GITHUB: "true"
#WOODPECKER_REPO_OWNERS: "nold360"
HTTP_PROXY: http://proxy-squid.proxy.svc.cluster.local:3128
HTTPS_PROXY: http://proxy-squid.proxy.svc.cluster.local:3128
http_proxy: http://proxy-squid.proxy.svc.cluster.local:3128
https_proxy: http://proxy-squid.proxy.svc.cluster.local:3128
NO_PROXY: localhost,.cluster.local,10.43.0.1
no_proxy: localhost,.cluster.local,10.43.0.1
# -- Add extra environment variables from the secrets list
extraSecretNamesForEnvFrom:
- woodpecker-secret
- github-oauth
# -- Create a generic secret to store things in, e.g. env values
secrets:
- name: woodpecker-store
persistentVolume:
enabled: true
size: 10Gi
mountPath: '/var/lib/woodpecker'
storageClass: ''
podSecurityContext:
fsGroup: 2000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
ingress:
enabled: true
ingressClassName: ingress-external
labels:
environment: external
annotations:
kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: letsencrypt
external-dns.alpha.kubernetes.io/hostname: ci.nold.in
external-dns.alpha.kubernetes.io/target: nold.in
hosts:
- host: ci.nold.in
paths:
- path: /
backend:
serviceName: server
servicePort: 80
tls:
- secretName: ci-nold-in-tls
hosts:
- ci.nold.in
# -- Specifies the ressources for the server component
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi

View file

@ -0,0 +1,148 @@
agent:
# -- Enable the agent component
enabled: true
# -- The number of replicas for the deployment
replicaCount: 2
image:
registry: docker.io
repository: woodpeckerci/woodpecker-agent
pullPolicy: Always
tag: 'next'
env:
# -- Add the environment variables for the agent component
WOODPECKER_SERVER: 'woodpecker-server:9000'
WOODPECKER_BACKEND: kubernetes
WOODPECKER_BACKEND_K8S_NAMESPACE: woodpecker
WOODPECKER_BACKEND_K8S_STORAGE_CLASS: 'ssd'
WOODPECKER_BACKEND_K8S_VOLUME_SIZE: 10G
WOODPECKER_BACKEND_K8S_STORAGE_RWX: false
WOODPECKER_BACKEND_K8S_POD_LABELS: ''
WOODPECKER_BACKEND_K8S_POD_ANNOTATIONS: ''
WOODPECKER_CONNECT_RETRY_COUNT: '1'
# -- Add extra secret that is contains environment variables
extraSecretNamesForEnvFrom:
- woodpecker-secret
persistence:
enabled: true
size: 1Gi
storageClass: 'ssd'
accessModes:
- ReadWriteOnce
# -- Add pod security context
podSecurityContext:
runAsUser: 1000
runAsGroup: 2000
fsGroup: 2000
# -- Add security context
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 2000
# -- Specifies the resources for the agent component
resources:
limits:
cpu: 2000m
memory: 1024Mi
requests:
cpu: 10m
memory: 10Mi
server:
enabled: true
statefulSet:
replicaCount: 1
updateStrategy:
type: RollingUpdate
image:
registry: docker.io
repository: woodpeckerci/woodpecker-server
pullPolicy: Always
tag: 'next'
# -- Add environment variables for the server component
env:
WOODPECKER_OPEN: "false"
WOODPECKER_ADMIN: "Nold360"
WOODPECKER_HOST: https://ci.nold.in
WOODPECKER_GITHUB: "true"
#WOODPECKER_REPO_OWNERS: "nold360"
HTTP_PROXY: http://proxy-squid.proxy.svc.cluster.local:3128
HTTPS_PROXY: http://proxy-squid.proxy.svc.cluster.local:3128
http_proxy: http://proxy-squid.proxy.svc.cluster.local:3128
https_proxy: http://proxy-squid.proxy.svc.cluster.local:3128
NO_PROXY: localhost,.cluster.local,10.43.0.1
no_proxy: localhost,.cluster.local,10.43.0.1
# -- Add extra environment variables from the secrets list
extraSecretNamesForEnvFrom:
- woodpecker-secret
- github-oauth
# -- Create a generic secret to store things in, e.g. env values
secrets:
- name: woodpecker-store
persistentVolume:
enabled: true
size: 10Gi
mountPath: '/var/lib/woodpecker'
storageClass: ''
podSecurityContext:
fsGroup: 2000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
ingress:
enabled: true
ingressClassName: ingress-external
labels:
environment: external
annotations:
kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: letsencrypt
external-dns.alpha.kubernetes.io/hostname: ci.nold.in
external-dns.alpha.kubernetes.io/target: nold.in
hosts:
- host: ci.nold.in
paths:
- path: /
backend:
serviceName: server
servicePort: 80
tls:
- secretName: ci-nold-in-tls
hosts:
- ci.nold.in
# -- Specifies the ressources for the server component
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi

View file

@ -5,7 +5,7 @@ apps:
- name: argo-workflows
repoURL: https://argoproj.github.io/argo-helm
chart: argo-workflows
targetRevision: 0.41.11
targetRevision: 0.41.4
# secrets:
# - name: argocd-secret
# keys:

View file

@ -141,22 +141,3 @@ networkPolicy:
- namespaceSelector:
matchLabels:
app.heqet.gnu.one/project: argocd
# Allow access to internet proxy
allow-localai:
podSelector: {}
policyTypes:
- Egress
egress:
- ports:
- port: 80
protocol: TCP
- port: 8080
protocol: TCP
to:
- podSelector:
matchLabels:
app.kubernetes.io/name: local-ai
- namespaceSelector:
matchLabels:
app.heqet.gnu.one/project: ai