Compare commits

..

1 commit

Author SHA1 Message Date
renovate[bot]
c6eba03faf
chore(deps): update dependency fluxcd/flux2 to v2.3.0 2024-05-13 17:05:36 +00:00
58 changed files with 2735 additions and 1822 deletions

View file

@ -5,4 +5,4 @@ apps:
- name: crowdsec - name: crowdsec
repoURL: https://crowdsecurity.github.io/helm-charts repoURL: https://crowdsecurity.github.io/helm-charts
chart: crowdsec chart: crowdsec
targetRevision: 0.11.0 targetRevision: 0.10.0

View file

@ -5,4 +5,4 @@ apps:
- name: core - name: core
repoURL: https://neuvector.github.io/neuvector-helm/ repoURL: https://neuvector.github.io/neuvector-helm/
chart: core chart: core
targetRevision: 2.7.7 targetRevision: 2.7.6

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
image: image:
repository: adguard/adguardhome repository: adguard/adguardhome
tag: v0.107.52 tag: v0.107.48
env: env:
TZ: Europe/Amsterdam TZ: Europe/Amsterdam

View file

@ -8,11 +8,7 @@ apps:
# targetRevision: 3.1.0 # targetRevision: 3.1.0
repoURL: https://github.com/nold360/localai-charts.git repoURL: https://github.com/nold360/localai-charts.git
path: charts/local-ai path: charts/local-ai
targetRevision: feat/envsecret targetRevision: feat/runtimeclass
secrets:
- name: localai
keys:
- hf-token
- name: anythingllm - name: anythingllm
repo: bjw-s repo: bjw-s
@ -24,13 +20,31 @@ apps:
# chart: app-template # chart: app-template
# targetRevision: 2.4.0 # targetRevision: 2.4.0
# - name: big-agi - name: big-agi
# repo: bjw-s repo: bjw-s
# chart: app-template chart: app-template
# targetRevision: 2.4.0 targetRevision: 2.4.0
- name: browserless - name: browserless
namespace: browserless namespace: browserless
repo: bjw-s repo: bjw-s
chart: app-template chart: app-template
targetRevision: 2.4.0 targetRevision: 2.4.0
- name: qdrant
repoURL: https://qdrant.to/helm
chart: qdrant
targetRevision: 0.9.0
- name: bibot
namespace: bibot
repo: bjw-s
chart: app-template
targetRevision: 2.4.0
secrets:
- name: bibot
keys:
- discord-token
- ombi-api-key
- prompt-template
- instruct-template

View file

@ -1,56 +1,26 @@
controllers: controllers:
# main agent
main: main:
containers: containers:
main: main:
image: image:
repository: reg.dc/bi repository: reg.dc/bibot
tag: latest tag: latest
pullPolicy: Always pullPolicy: Always
env: env:
BIBOT_CONFIG: /config/bibot.yml BIBOT_CONFIG: /config/bibot.yml
BIBOT_KAFKA__BROKER: kafka://bi-cluster-kafka-bootstrap:9092 BROWSERLESS_URL: http://browserless.browserless.svc.cluster.local:3000
PHOENIX_COLLECTOR_ENDPOINT: http://phoenix.phoenix.svc.cluster.local:6006 OMBI_API_URL: http://ombi.ombi.svc.cluster.local:3579/api/v1
PHOENIX_PROJECT_NAME: bi OMBI_API_TOKEN:
command: ["python3"] valueFrom:
args: ["/app/bi/agents/main/app.py", "worker", "-l", "info"] secretKeyRef:
name: bibot
controller: key: ombi-api-key
containers: DISCORD_TOKEN:
main:
image:
repository: reg.dc/bi
tag: latest
pullPolicy: Always
env:
BIBOT_CONFIG: /config/bibot.yml
BIBOT_KAFKA__BROKER: kafka://bi-cluster-kafka-bootstrap:9092
command: ["python3"]
args: ["/app/bi/controller.py", "worker", "-l", "info"]
discord:
containers:
main:
image:
repository: reg.dc/bi
tag: latest
pullPolicy: Always
command: ["python3"]
args: ["/app/bi/connectors/discord/app.py", "worker", "-l", "info"]
env:
BIBOT_KAFKA__BROKER: bi-cluster-kafka-bootstrap:9092
OPENAI_API_KEY: fake
BIBOT_DISCORD__TOKEN:
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: bibot name: bibot
key: discord-token key: discord-token
## Prod:
BIBOT_DISCORD__CHANNELS: "1216440541064200192"
# Dev:
# BIBOT_DISCORD_CHANNELS: "1217418069693960223"
probes: probes:
liveness: liveness:
enabled: false enabled: false
@ -59,6 +29,35 @@ controllers:
startup: startup:
enabled: false enabled: false
configMaps:
config:
enabled: true
data:
bibot.yml: |-
prompt_template: /secret/prompt-template
instruct_template: /secret/instruct-template
context:
- Your name is Bi
- You are a helpful, funny & sarcastic chatbot and a little bit of a smartass
- Your main task is to use your extensive internal knowledge and dialogue skills to understand and address user queries
- Employ your tools ONLY upon direct request or when necessary to obtain additional information
discord:
channels: [ '1216440541064200192' ]
tools:
ombi:
url: http://ombi.ombi.svc.cluster.local:3579/api/v1
browserless:
url: http://browserless.browserless.svc.cluster.local:3000
openai:
url: http://localai-local-ai.ai.svc.cluster.local/v1
model: hermes-2-Pro-Llama-3-8B-Q5_K_M
temperature: 0
persistence: persistence:
secret: secret:
name: bibot name: bibot
@ -70,15 +69,11 @@ persistence:
enabled: true enabled: true
type: configMap type: configMap
data: service:
size: 10Gi main:
type: persistentVolumeClaim controller: main
accessMode: ReadWriteOnce ports:
http:
port: 3001
type: ClusterIP
# service:
# main:
# controller: main
# ports:
# http:
# port: 8000
# type: ClusterIP

View file

@ -4,7 +4,7 @@ controllers:
main: main:
image: image:
repository: flowiseai/flowise repository: flowiseai/flowise
tag: 1.8.4 tag: 1.7.2
command: command:
- flowise - flowise
- start - start

View file

@ -3,8 +3,7 @@ replicaCount: 1
deployment: deployment:
image: image:
repository: quay.io/go-skynet/local-ai repository: quay.io/go-skynet/local-ai
#tag: latest-aio-gpu-nvidia-cuda-12 tag: v2.15.0-cublas-cuda12-ffmpeg
tag: v2.21.0-cublas-cuda12-ffmpeg
pullPolicy: Always pullPolicy: Always
runtimeClassName: nvidia runtimeClassName: nvidia
@ -57,14 +56,6 @@ deployment:
# UPLOAD_LIMIT # UPLOAD_LIMIT
# HUGGINGFACEHUB_API_TOKEN=Token here # HUGGINGFACEHUB_API_TOKEN=Token here
# Inject Secrets into Environment:
secretEnv:
- name: HF_TOKEN
valueFrom:
secretKeyRef:
name: localai
key: hf-token
modelsPath: "/models" modelsPath: "/models"
download_model: download_model:

View file

@ -1,7 +1,7 @@
image: image:
repository: docker.io/qdrant/qdrant repository: docker.io/qdrant/qdrant
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
tag: "v1.10.1" tag: "v1.9.2"
useUnprivilegedImage: true useUnprivilegedImage: true
env: env:

View file

@ -8,7 +8,7 @@ installCRDs: true
global: global:
image: image:
repository: quay.io/argoproj/argocd repository: quay.io/argoproj/argocd
tag: v2.11.5 tag: v2.11.0
# imagePullPolicy: IfNotPresent # imagePullPolicy: IfNotPresent
securityContext: securityContext:
runAsUser: 999 runAsUser: 999
@ -176,7 +176,7 @@ repoServer:
initContainers: initContainers:
- name: copy-cmp-server - name: copy-cmp-server
image: quay.io/argoproj/argocd:v2.11.5 image: quay.io/argoproj/argocd:v2.11.0
command: command:
- cp - cp
- -n - -n

View file

@ -61,3 +61,17 @@ apps:
- noRoot - noRoot
- tmpdirs - tmpdirs
- ingress-internal - ingress-internal
- name: unpackerr
chart: unpackerr
targetRevision: 5.1.0
include:
- noRoot
- tmpdirs
- ingress-internal
secrets:
- name: unpackerr-config
keys:
- UN_LIDARR_0_API_KEY
- UN_RADARR_0_API_KEY
- UN_SONARR_0_API_KEY

View file

@ -1,6 +1,6 @@
image: image:
repository: ghcr.io/onedr0p/bazarr repository: ghcr.io/onedr0p/bazarr
tag: 1.4.3 tag: 1.4.2
ingress: ingress:
main: main:

View file

@ -1,6 +1,6 @@
image: image:
repository: ghcr.io/onedr0p/lidarr repository: ghcr.io/onedr0p/lidarr
tag: 2.4.3.4248 tag: 2.2.5.4141
ingress: ingress:
main: main:

View file

@ -9,7 +9,7 @@ image:
# -- image repository # -- image repository
repository: ghcr.io/onedr0p/prowlarr-develop repository: ghcr.io/onedr0p/prowlarr-develop
# @default -- chart.appVersion # @default -- chart.appVersion
tag: "1.21" tag: "1.17"
# -- image pull policy # -- image pull policy
pullPolicy: IfNotPresent pullPolicy: IfNotPresent

View file

@ -1,6 +1,6 @@
image: image:
repository: ghcr.io/onedr0p/radarr repository: ghcr.io/onedr0p/radarr
tag: 5.8.3.8933 tag: 5.4.6.8723
env: env:
UMASK: "002" UMASK: "002"

View file

@ -1,6 +1,6 @@
image: image:
repository: ghcr.io/onedr0p/sonarr repository: ghcr.io/onedr0p/sonarr
tag: 4.0.8.1874 tag: 4.0.4.1491
securityContext: securityContext:
privileged: true privileged: true

View file

@ -1,139 +0,0 @@
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: bi-cluster
namespace: bi
spec:
kafka:
version: 3.7.0
replicas: 1
listeners:
- name: plain
port: 9092
type: internal
tls: false
- name: tls
port: 9093
type: internal
tls: true
config:
offsets.topic.replication.factor: 1
transaction.state.log.replication.factor: 1
transaction.state.log.min.isr: 1
default.replication.factor: 1
min.insync.replicas: 1
inter.broker.protocol.version: "3.7"
storage:
type: jbod
volumes:
- id: 0
type: persistent-claim
size: 100Gi
deleteClaim: false
zookeeper:
replicas: 1
storage:
type: persistent-claim
size: 100Gi
deleteClaim: false
entityOperator:
topicOperator: {}
userOperator: {}
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: bi-input
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: bi-output
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: agent-registry
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 24
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: agent-main-input
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: discord-input
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: agent-researcher-input
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: agent-researcher-config
namespace: bi
labels:
strimzi.io/cluster: bi-cluster
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824

View file

@ -1,22 +0,0 @@
---
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
name: bi-mongo
namespace: bi
spec:
members: 1
type: ReplicaSet
version: "6.0.5"
security:
authentication:
modes: ["SCRAM"]
users:
- name: bi
db: bi
passwordSecretRef:
name: bi-mongo-password
roles:
- name: dbOwner
db: bi
scramCredentialsSecretName: bi

View file

@ -1,33 +0,0 @@
config:
description: Bi Agent Framework
apps:
- name: bi
namespace: bi
repo: bjw-s
chart: app-template
targetRevision: 3.2.1
secrets:
- name: bibot
keys:
- discord-token
- ombi-api-key
- prompt-template
- instruct-template
- OPENWEATHERMAP_API_KEY
ignoreDiff:
- group: apps
kind: Deployment
jsonPointers:
- /spec/replicas
# - name: mongo-express
# repoURL: https://cowboysysop.github.io/charts/
# chart: mongo-express
# targetRevision: 6.5.2
# secrets:
# - name: mongo-express
# keys:
# - mongodb-admin-password
# - site-cookie-secret
# - site-session-secret

View file

@ -1,182 +0,0 @@
controllers:
# main agent
main:
containers:
main:
image:
repository: reg.dc/bi
tag: latest
pullPolicy: Always
env:
BIBOT_CONFIG: /config/bibot.yml
BIBOT_KAFKA__BROKER: kafka://bi-cluster-kafka-bootstrap:9092
BIBOT_MONGODB__URI: mongodb://bi-mongo-svc:27017/
BIBOT_MONGODB__USER:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: username
BIBOT_MONGODB__PASSWORD:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: password
PHOENIX_COLLECTOR_ENDPOINT: http://phoenix.phoenix.svc.cluster.local:6006
PHOENIX_PROJECT_NAME: bi
command: ["python3"]
args: ["/app/bi/agents/main/app.py", "worker", "-l", "info"]
controller:
containers:
main:
image:
repository: reg.dc/bi
tag: latest
pullPolicy: Always
env:
BIBOT_CONFIG: /config/bibot.yml
BIBOT_KAFKA__BROKER: kafka://bi-cluster-kafka-bootstrap:9092
BIBOT_MONGODB__URI: mongodb://bi-mongo-svc:27017/
BIBOT_MONGODB__USER:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: username
BIBOT_MONGODB__PASSWORD:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: password
command: ["python3"]
args: ["/app/bi/controller.py", "worker", "-l", "info"]
discord:
containers:
main:
image:
repository: reg.dc/bi
tag: latest
pullPolicy: Always
command: ["python3"]
args: ["/app/bi/connectors/discord/app.py", "worker", "-l", "info"]
env:
BIBOT_KAFKA__BROKER: bi-cluster-kafka-bootstrap:9092
BIBOT_MONGODB__URI: mongodb://bi-mongo-svc:27017/
BIBOT_MONGODB__USER:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: username
BIBOT_MONGODB__PASSWORD:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: password
OPENAI_API_KEY: fake
BIBOT_DISCORD__TOKEN:
valueFrom:
secretKeyRef:
name: bibot
key: discord-token
## Prod:
BIBOT_DISCORD__CHANNELS: "1216440541064200192"
# Dev:
# BIBOT_DISCORD_CHANNELS: "1217418069693960223"
probes:
liveness:
enabled: false
readiness:
enabled: false
startup:
enabled: false
researcher:
containers:
main:
image:
repository: reg.dc/bi
tag: latest
pullPolicy: Always
env:
BIBOT_CONFIG: /config/bibot.yml
BIBOT_OPENAI__TEMPERATURE: "0.0"
BIBOT_KAFKA__BROKER: kafka://bi-cluster-kafka-bootstrap:9092
BIBOT_MONGODB__URI: mongodb://bi-mongo-svc:27017/
BIBOT_MONGODB__USER:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: username
BIBOT_MONGODB__PASSWORD:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: password
OPENWEATHERMAP_API_KEY:
valueFrom:
secretKeyRef:
name: bibot
key: OPENWEATHERMAP_API_KEY
PHOENIX_COLLECTOR_ENDPOINT: http://phoenix.phoenix.svc.cluster.local:6006
PHOENIX_PROJECT_NAME: bi
command: ["python3"]
args: ["/app/bi/agents/researcher/app.py", "worker", "-l", "info"]
mongoui:
containers:
main:
image:
repository: ugleiton/mongo-gui
tag: latest
pullPolicy: Always
env:
MONGO_URL:
valueFrom:
secretKeyRef:
name: bi-mongo-bi-bi
key: connectionString.standardSrv
persistence:
secret:
name: bibot
enabled: true
type: secret
config:
name: bibot-config
enabled: true
type: configMap
data:
size: 10Gi
type: persistentVolumeClaim
accessMode: ReadWriteOnce
service:
main:
controller: mongoui
ports:
http:
port: 4321
type: ClusterIP
ingress:
main:
annotations:
cert-manager.io/cluster-issuer: vault-issuer
enabled: true
hosts:
- host: mongo.dc
paths:
- path: /
service:
# name: main
identifier: main
port: 4321
tls:
- hosts:
- mongo.dc
secretName: mongo-tls

View file

@ -1,50 +0,0 @@
ingress:
enabled: true
ingressClassName: "ingress-internal"
pathType: ImplementationSpecific
annotations:
cert-manager.io/cluster-issuer: vault-issuer
hosts:
- host: mongo.dc
paths:
- /
tls:
- secretName: mongo-express-tls
hosts:
- mongo.dc
## @param mongodbServer MongoDB host name or IP address
mongodbServer: bi-mongo-svc.bi.svc.cluster.local
## @param mongodbPort MongoDB port
mongodbPort: 27017
## @param mongodbEnableAdmin Enable administrator access
mongodbEnableAdmin: true
## @param mongodbAdminUsername Administrator username
mongodbAdminUsername: admin
## @param mongodbAdminPassword Administrator password
# mongodbAdminPassword: ""
## @param siteBaseUrl Set the express baseUrl to ease mounting at a subdirectory
siteBaseUrl: /
## @param basicAuthUsername Mongo Express web login name
basicAuthUsername: ""
## @param basicAuthPassword Mongo Express web login password
basicAuthPassword: ""
## @param existingSecret Name of existing Secret to use
existingSecret: "mongo-express"
## @param existingSecretKeyMongodbAdminPassword Key in existing Secret that contains administrator password
# existingSecretKeyMongodbAdminPassword: bi-mongo-admin-admin
## @param existingSecretKeyMongodbAuthPassword Key in existing Secret that contains database password
# existingSecretKeyMongodbAuthPassword: bi-mongo-admin-admin

View file

@ -1,73 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: cloudflare-ddns-gnu
namespace: core
spec:
schedule: "*/15 * * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
template:
spec:
containers:
- name: cloudflare-ddns
image: mirioeggmann/cloudflare-ddns:v0.5.1
envFrom:
- secretRef:
name: cloudflare-ddns-gnu
restartPolicy: OnFailure
---
apiVersion: ricoberger.de/v1alpha1
kind: VaultSecret
metadata:
annotations:
name: cloudflare-ddns-gnu
namespace: core
spec:
keys:
- API_TOKEN
- NAME
- RECORD_ID
- ZONE_ID
- PROXIED
path: heqet/core/cloudflare-ddns-gnu
type: Opaque
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: cloudflare-ddns-nold
namespace: core
spec:
schedule: "*/15 * * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
template:
spec:
containers:
- name: cloudflare-ddns
image: mirioeggmann/cloudflare-ddns:v0.5.1
envFrom:
- secretRef:
name: cloudflare-ddns-nold
restartPolicy: OnFailure
---
apiVersion: ricoberger.de/v1alpha1
kind: VaultSecret
metadata:
annotations:
name: cloudflare-ddns-nold
namespace: core
spec:
keys:
- API_TOKEN
- NAME
- RECORD_ID
- ZONE_ID
- PROXIED
path: heqet/core/cloudflare-ddns-nold
type: Opaque

View file

@ -42,40 +42,29 @@ apps:
namespace: ingress-internal namespace: ingress-internal
repoURL: https://kubernetes.github.io/ingress-nginx repoURL: https://kubernetes.github.io/ingress-nginx
chart: ingress-nginx chart: ingress-nginx
targetRevision: 4.11.1 targetRevision: 4.10.1
syncWave: '0' syncWave: '0'
- name: cilium - name: cilium
existingNamespace: kube-system existingNamespace: kube-system
repoURL: https://helm.cilium.io repoURL: https://helm.cilium.io
chart: cilium chart: cilium
targetRevision: 1.15.8 targetRevision: 1.15.4
- name: external-dns - name: external-dns
repoURL: https://kubernetes-sigs.github.io/external-dns repoURL: https://kubernetes-sigs.github.io/external-dns
chart: external-dns chart: external-dns
targetRevision: 1.14.5 targetRevision: 1.14.4
secrets: secrets:
- name: cloudflare-api - name: cloudflare-api
keys: keys:
- CF_API_TOKEN - CF_API_TOKEN
- name: external-dns-adguard
repoURL: https://kubernetes-sigs.github.io/external-dns
chart: external-dns
targetRevision: 1.14.5
secrets:
- name: adguard-config
keys:
- ADGUARD_URL
- ADGUARD_USER
- ADGUARD_PASSWORD
- name: cert-manager - name: cert-manager
namespace: cert-manager namespace: cert-manager
repoURL: https://charts.jetstack.io repoURL: https://charts.jetstack.io
chart: cert-manager chart: cert-manager
targetRevision: v1.15.1 targetRevision: v1.14.5
secrets: secrets:
- name: cert-manager-vault-approle - name: cert-manager-vault-approle
keys: keys:

View file

@ -23,13 +23,7 @@ global:
# Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose. # Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose.
logLevel: 2 logLevel: 2
leaderElection: installCRDs: true
namespace: "cert-manager"
crds:
enabled: true
replicaCount: 1 replicaCount: 1
strategy: strategy:

View file

@ -1,104 +0,0 @@
# Default values for external-dns.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: registry.k8s.io/external-dns/external-dns
tag: "v0.14.2"
pullPolicy: IfNotPresent
shareProcessNamespace: false
podSecurityContext:
fsGroup: 65534
securityContext:
runAsNonRoot: true
runAsUser: 65534
readOnlyRootFilesystem: true
capabilities:
drop: ["ALL"]
# Defaults to `ClusterFirst`.
# Valid values are: `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`.
dnsPolicy:
serviceMonitor:
enabled: false
additionalLabels: {}
interval: 1m
scrapeTimeout: 10s
resources:
limits:
memory: 50Mi
cpu: 200m
requests:
memory: 50Mi
cpu: 10m
logLevel: info
logFormat: text
interval: 1m
triggerLoopOnEvent: false
sources:
- ingress
# - service
policy: upsert-only
registry: txt
txtOwnerId: ""
txtPrefix: ""
txtSuffix: ""
domainFilters:
- dc
#extraArgs:
deploymentStrategy:
type: Recreate
provider:
name: webhook
webhook:
image:
repository: ghcr.io/muhlba91/external-dns-provider-adguard
tag: latest
livenessProbe:
httpGet:
path: /healthz
port: 8888
initialDelaySeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /healthz
port: 8888
initialDelaySeconds: 10
timeoutSeconds: 5
env:
- name: LOG_LEVEL
value: debug
- name: ADGUARD_URL
valueFrom:
secretKeyRef:
name: adguard-config
key: ADGUARD_URL
- name: ADGUARD_USER
valueFrom:
secretKeyRef:
name: adguard-config
key: ADGUARD_USER
- name: ADGUARD_PASSWORD
valueFrom:
secretKeyRef:
name: adguard-config
key: ADGUARD_PASSWORD
- name: SERVER_HOST
value: "0.0.0.0"
- name: DRY_RUN
value: "false"

View file

@ -3,8 +3,8 @@
# Declare variables to be passed into your templates. # Declare variables to be passed into your templates.
image: image:
repository: registry.k8s.io/external-dns/external-dns repository: k8s.gcr.io/external-dns/external-dns
tag: "v0.14.2" tag: "v0.13.4"
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
shareProcessNamespace: false shareProcessNamespace: false

View file

@ -6,7 +6,7 @@ apps:
- name: crossplane - name: crossplane
repoURL: https://charts.crossplane.io/stable repoURL: https://charts.crossplane.io/stable
chart: crossplane chart: crossplane
targetRevision: 1.16.0 targetRevision: 1.15.2
secrets: secrets:
- name: terraform - name: terraform
keys: keys:

View file

@ -1,7 +1,7 @@
--- ---
image: image:
repository: ghcr.io/onedr0p/qbittorrent repository: ghcr.io/onedr0p/qbittorrent
tag: 4.6.5 tag: 4.6.4
ingress: ingress:
main: main:

View file

@ -5,17 +5,26 @@ apps:
- name: kube-prometheus-stack - name: kube-prometheus-stack
repoURL: https://prometheus-community.github.io/helm-charts repoURL: https://prometheus-community.github.io/helm-charts
chart: kube-prometheus-stack chart: kube-prometheus-stack
targetRevision: 60.5.0 targetRevision: 55.11.0
secrets: secrets:
- name: grafana - name: grafana
keys: keys:
- admin-password - admin-password
- admin-user - admin-user
helm:
skipCrds: true
- name: kube-prometheus-crds
repoURL: https://github.com/prometheus-community/helm-charts.git
path: charts/kube-prometheus-stack/crds/
targetRevision: kube-prometheus-stack-46.8.0
directory:
recurse: true
syncPolicy: syncPolicy:
syncOptions: syncOptions:
- ServerSideApply=true - Replace=true
# - name: loki-stack - name: loki-stack
# repoURL: https://grafana.github.io/helm-charts repoURL: https://grafana.github.io/helm-charts
# chart: loki-stack chart: loki-stack
# targetRevision: 2.10.2 targetRevision: 2.10.2

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,7 @@ loki:
enabled: true enabled: true
image: image:
repository: grafana/loki repository: grafana/loki
tag: 3.1.0 tag: 3.0.0
promtail: promtail:
enabled: true enabled: true

View file

@ -8,7 +8,7 @@ nodeExporter:
enabled: true enabled: true
image: image:
repository: quay.io/prometheus/node-exporter repository: quay.io/prometheus/node-exporter
tag: v1.8.2 tag: v1.8.0
hostNetwork: true hostNetwork: true
hostPID: true hostPID: true
@ -24,7 +24,7 @@ server:
enabled: true enabled: true
image: image:
repository: quay.io/prometheus/prometheus repository: quay.io/prometheus/prometheus
tag: v2.53.1 tag: v2.52.0
strategy: strategy:
type: Recreate type: Recreate

View file

@ -4,7 +4,7 @@ controllers:
main: main:
image: image:
repository: homeassistant/home-assistant repository: homeassistant/home-assistant
tag: "2024.7" tag: "2024.5"
env: env:
TZ: Europe/Berlin TZ: Europe/Berlin

View file

@ -1,6 +1,6 @@
image: image:
repository: influxdb repository: influxdb
tag: 2.7.8-alpine tag: 2.7.6-alpine
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
## If specified, use these secrets to access the images ## If specified, use these secrets to access the images
# pullSecrets: # pullSecrets:

View file

@ -172,7 +172,7 @@ sidecar:
# -- The image repository to pull from # -- The image repository to pull from
repository: kiwigrid/k8s-sidecar repository: kiwigrid/k8s-sidecar
# -- The image tag to pull, default: `1.23.1` # -- The image tag to pull, default: `1.23.1`
tag: 1.27.5 tag: 1.26.2
# -- The image pull policy, default: `IfNotPresent` # -- The image pull policy, default: `IfNotPresent`
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# -- The extra volume mounts for the sidecar # -- The extra volume mounts for the sidecar

View file

@ -33,7 +33,7 @@ configMaps:
format=json format=json
logfile=/dev/stdout logfile=/dev/stdout
donotprobe=/dev/ttyACM0 donotprobe=/dev/ttyACM0
shell=/usr/bin/mosquitto_pub -h 192.168.1.20 -t wmbusmeters/"$METER_ID" -m "$METER_JSON" shell=/usr/bin/mosquitto_pub -h mqtt.lan -t wmbusmeters/"$METER_ID" -m "$METER_JSON"
ignoreduplicates=false ignoreduplicates=false
meters: meters:

View file

@ -1,6 +1,6 @@
image: image:
repository: b4bz/homer repository: b4bz/homer
tag: v24.05.1 tag: v24.04.1
initContainers: initContainers:
clone-assets: clone-assets:

View file

@ -13,5 +13,5 @@ apps:
- name: ingress-external - name: ingress-external
repoURL: https://kubernetes.github.io/ingress-nginx repoURL: https://kubernetes.github.io/ingress-nginx
chart: ingress-nginx chart: ingress-nginx
targetRevision: 4.11.1 targetRevision: 4.10.1
syncWave: '0' syncWave: '0'

View file

@ -1,6 +1,6 @@
image: image:
repository: koenkk/zigbee2mqtt repository: koenkk/zigbee2mqtt
tag: 1.39.0 tag: 1.37.1
service: service:
main: main:

View file

@ -5,16 +5,18 @@ config:
- internet - internet
rules: rules:
- allow-minio - allow-minio
- allow-localai
labels: labels:
environment: external environment: external
apps: apps:
- name: nextcloud - name: nextcloud
repoURL: https://nextcloud.github.io/helm #repoURL: https://nextcloud.github.io/helm
chart: nextcloud #chart: nextcloud
targetRevision: 5.2.0 #targetRevision: 3.1.0
repoURL: https://github.com/Nold360/nextcloud-helm
targetRevision: f/multifix
path: charts/nextcloud
secrets: secrets:
- name: nextcloud-user - name: nextcloud-user
keys: keys:

View file

@ -1,5 +1,5 @@
image: image:
tag: 29-fpm tag: 28-fpm
pullPolicy: Always pullPolicy: Always
nextcloud: nextcloud:
@ -80,31 +80,6 @@ ingress:
external-dns.alpha.kubernetes.io/target: gnu.one external-dns.alpha.kubernetes.io/target: gnu.one
external-dns.alpha.kubernetes.io/cloudflare-proxied: "false" external-dns.alpha.kubernetes.io/cloudflare-proxied: "false"
nginx.ingress.kubernetes.io/server-snippet: |-
server_tokens off;
proxy_hide_header X-Powered-By;
rewrite ^/.well-known/webfinger /index.php/.well-known/webfinger last;
rewrite ^/.well-known/nodeinfo /index.php/.well-known/nodeinfo last;
rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json;
location = /.well-known/carddav {
return 301 $scheme://$host/remote.php/dav;
}
location = /.well-known/caldav {
return 301 $scheme://$host/remote.php/dav;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ {
deny all;
}
location ~ ^/(?:autotest|occ|issue|indie|db_|console) {
deny all;
}
tls: tls:
- secretName: nextcloud-tls - secretName: nextcloud-tls
hosts: hosts:
@ -112,7 +87,6 @@ ingress:
nginx: nginx:
enabled: true enabled: true
containerPort: 8080
cronjob: cronjob:
enabled: false enabled: false

View file

@ -9,7 +9,7 @@ image:
# -- image repository # -- image repository
repository: octoprint/octoprint repository: octoprint/octoprint
# -- image tag # -- image tag
tag: 1.10.2-minimal tag: 1.10.0-minimal
# -- image pull policy # -- image pull policy
pullPolicy: IfNotPresent pullPolicy: IfNotPresent

View file

@ -15,7 +15,7 @@ ingress:
- paperless.dc - paperless.dc
image: image:
repository: ghcr.io/paperless-ngx/paperless-ngx repository: ghcr.io/paperless-ngx/paperless-ngx
tag: 2.11.1 tag: 2.8.3
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# -- See the following files for additional environment variables: # -- See the following files for additional environment variables:

View file

@ -1,85 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: searxng-config
namespace: searxng
data:
settings.yml: |
---
use_default_settings:
engines:
remove:
- soundcloud
server:
limiter: false
image_proxy: false
search:
default_lang: en
formats:
- html
- json
# autocomplete: google
general:
instance_name: HiveSearch
ui:
static_use_hash: true
default_theme: simple
theme_args:
simple_style: dark
infinite_scroll: true
results_on_new_tab: true
enabled_plugins:
- Basic Calculator
- Hash plugin
- Hostname replace
- Open Access DOI rewrite
- Self Informations
- Tracker URL remover
- Unit converter plugin
hostname_replace:
(www\.)?reddit\.com$: redlib.rostvik.site
limiter.toml: |
[real_ip]
# Number of values to trust for X-Forwarded-For.
x_for = 1
# The prefix defines the number of leading bits in an address that are compared
# to determine whether or not an address is part of a (client) network.
ipv4_prefix = 32
ipv6_prefix = 48
[botdetection.ip_limit]
# To get unlimited access in a local network, by default link-lokal addresses
# (networks) are not monitored by the ip_limit
filter_link_local = true
# activate link_token method in the ip_limit method
link_token = false
[botdetection.ip_lists]
# In the limiter, the ip_lists method has priority over all other methods -> if
# an IP is in the pass_ip list, it has unrestricted access and it is also not
# checked if e.g. the "user agent" suggests a bot (e.g. curl).
block_ip = [
]
pass_ip = [
'10.0.0.0/24', # IPv4 private network
]
# Activate passlist of (hardcoded) IPs from the SearXNG organization,
# e.g. `check.searx.space`.
pass_searxng_org = false

View file

@ -1,13 +0,0 @@
config:
description: Local Meta Search
apps:
- name: searxng
repo: bjw-s
chart: app-template
targetRevision: 3.2.1
secrets:
- name: searxng
keys:
- SEARXNG_SECRET

View file

@ -1,97 +0,0 @@
controllers:
app:
replicas: 1
strategy: RollingUpdate
containers:
app:
image:
repository: searxng/searxng
tag: 2024.5.16-2f2d93b29
env:
BASE_URL: https://search.dc
AUTOCOMPLETE: "false"
INSTANCE_NAME: "HiveSearch"
envFrom:
- secretRef:
name: searxng
# probes:
# liveness:
# enabled: true
# custom: true
# spec:
# httpGet:
# path: /stats
# port: 8080
# initialDelaySeconds: 0
# periodSeconds: 10
# timeoutSeconds: 1
# failureThreshold: 3
# readiness:
# enabled: true
# custom: true
# spec:
# httpGet:
# path: /stats
# port: 8080
# initialDelaySeconds: 0
# periodSeconds: 10
# timeoutSeconds: 1
# failureThreshold: 3
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
capabilities:
drop:
- ALL
add:
- CHOWN
- SETGID
- SETUID
- DAC_OVERRIDE
resources:
requests:
cpu: 10m
limits:
memory: 3Gi
service:
app:
controller: app
ports:
http:
port: 8080
persistence:
config:
type: configMap
name: searxng-config
advancedMounts:
app:
app:
- path: /etc/searxng/settings.yml
subPath: settings.yml
readOnly: true
- path: /etc/searxng/limiter.toml
subPath: limiter.toml
readOnly: true
ingress:
app:
# className: ingress-internal
annotations:
cert-manager.io/cluster-issuer: vault-issuer
hosts:
- host: search.dc
paths:
- path: /
service:
identifier: app
port: http
tls:
- hosts:
- search.dc
secretName: searxng-tls

View file

@ -22,22 +22,10 @@ apps:
namespace: cnpg-system namespace: cnpg-system
repoURL: https://cloudnative-pg.github.io/charts repoURL: https://cloudnative-pg.github.io/charts
chart: cloudnative-pg chart: cloudnative-pg
targetRevision: 0.21.5 targetRevision: 0.21.2
- name: redis-operator - name: redis-operator
repoURL: https://ot-container-kit.github.io/helm-charts repoURL: https://ot-container-kit.github.io/helm-charts
namespace: redis-operator namespace: redis-operator
chart: redis-operator chart: redis-operator
targetRevision: 0.16.4 targetRevision: 0.15.9
- name: kafka-operator
repoURL: https://strimzi.io/charts
namespace: kafka-operator
chart: strimzi-kafka-operator
targetRevision: 0.41.0
- name: mongodb-operator
repoURL: https://mongodb.github.io/helm-charts
namespace: mongodb-operator
chart: community-operator
targetRevision: 0.10.0

View file

@ -1 +0,0 @@
watchAnyNamespace: true

View file

@ -1,19 +0,0 @@
operator:
watchNamespace: "*"
resources:
limits:
cpu: 1100m
memory: 1Gi
requests:
cpu: 100m
memory: 100Mi
replicas: 1
podSecurityContext:
runAsNonRoot: true
runAsUser: 2000
securityContext: {}
community-operator-crds:
enabled: true

View file

@ -4,37 +4,67 @@
# Name of the image repository to pull the container image from. # Name of the image repository to pull the container image from.
image: image:
repository: ghcr.io/ot-container-kit/redis-operator/redis-operator repository: quay.io/spotahome/redis-operator
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
#tag: v1.2.4 tag: v1.2.4
imageCredentials: imageCredentials:
create: false create: false
registry: url.private.registry
username: someone
password: somepassword
email: someone@example.com
# Use exists secrets in namespace
existsSecrets:
- registrysecret
updateStrategy: updateStrategy:
type: RollingUpdate type: RollingUpdate
replicas: 1 replicas: 1
# A name in place of the chart name for `app:` labels.
nameOverride: ""
# A name to substitute for the full names of resources.
fullnameOverride: ""
serviceAccount:
# Enable service account creation.
create: true
# Annotations to be added to the service account.
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template.
name: ""
service:
type: ClusterIP
port: 9710
container:
port: 9710
# Container [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container).
# See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1) for details.
securityContext: securityContext:
readOnlyRootFilesystem: true readOnlyRootFilesystem: true
runAsNonRoot: true runAsNonRoot: true
runAsUser: 1000 runAsUser: 1000
# Container resource [requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
# See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources) for details.
# @default -- No requests or limits.
resources: resources:
requests: requests:
cpu: 10m cpu: 10m
memory: 16Mi memory: 16Mi
limits: limits:
cpu: 500m cpu: 100m
memory: 500Mi memory: 128Mi
certificate:
secretName: redis-operator-tls
certmanager:
enabled: false
### Monitoring
###############
monitoring: monitoring:
# Enable Prometheus PodMonitor to monitor the operator. # Enable Prometheus PodMonitor to monitor the operator.
enabled: false enabled: false

View file

@ -5,7 +5,7 @@ apps:
- name: vault - name: vault
repoURL: https://helm.releases.hashicorp.com repoURL: https://helm.releases.hashicorp.com
chart: vault chart: vault
targetRevision: 0.28.1 targetRevision: 0.28.0
syncWave: '-3' syncWave: '-3'
ignoreDiff: ignoreDiff:
- group: "*" - group: "*"
@ -18,5 +18,5 @@ apps:
namespace: vault-secrets-operator namespace: vault-secrets-operator
repoURL: https://ricoberger.github.io/helm-charts repoURL: https://ricoberger.github.io/helm-charts
chart: vault-secrets-operator chart: vault-secrets-operator
targetRevision: 2.5.10 targetRevision: 2.5.7
syncWave: '-2' syncWave: '-2'

View file

@ -5,13 +5,13 @@ global:
enable: false enable: false
injector: injector:
enabled: false enabled: true
server: server:
enabled: true enabled: true
image: image:
repository: "hashicorp/vault" repository: "hashicorp/vault"
tag: "1.17.2" tag: "1.16.2"
auditStorage: auditStorage:
accessMode: ReadWriteOnce accessMode: ReadWriteOnce
annotations: {} annotations: {}

View file

@ -6,16 +6,14 @@ config:
networkPolicy: networkPolicy:
groups: groups:
- internet - internet
rules:
- allow-agent
labels: labels:
environment: external environment: external
apps: apps:
- name: woodpecker-server - name: woodpecker
path: charts/woodpecker/charts/server path: charts/woodpecker
secrets: secrets:
- name: github-oauth - name: github-oauth
keys: keys:
@ -24,15 +22,3 @@ apps:
- name: woodpecker-secret - name: woodpecker-secret
keys: keys:
- WOODPECKER_AGENT_SECRET - WOODPECKER_AGENT_SECRET
- name: woodpecker-agent
path: charts/woodpecker/charts/agent
namespace: woodpecker-agent
networkPolicy:
rules:
- allow-agent
secrets:
- name: woodpecker-secret
fromApp: woodpecker-server
keys:
- WOODPECKER_AGENT_SECRET

View file

@ -1,56 +0,0 @@
# -- The number of replicas for the deployment
replicaCount: 2
image:
registry: docker.io
repository: woodpeckerci/woodpecker-agent
pullPolicy: Always
tag: 'next'
env:
# -- Add the environment variables for the agent component
WOODPECKER_SERVER: 'woodpecker-server.woodpecker.svc.cluster.local:9000'
WOODPECKER_BACKEND: kubernetes
WOODPECKER_BACKEND_K8S_NAMESPACE: woodpecker-agent
WOODPECKER_BACKEND_K8S_STORAGE_CLASS: 'ssd'
WOODPECKER_BACKEND_K8S_VOLUME_SIZE: 10G
WOODPECKER_BACKEND_K8S_STORAGE_RWX: false
WOODPECKER_BACKEND_K8S_POD_LABELS: ''
WOODPECKER_BACKEND_K8S_POD_ANNOTATIONS: ''
WOODPECKER_CONNECT_RETRY_COUNT: '1'
# -- Add extra secret that is contains environment variables
extraSecretNamesForEnvFrom:
- woodpecker-secret
persistence:
enabled: true
size: 1Gi
storageClass: 'ssd'
accessModes:
- ReadWriteOnce
# -- Add pod security context
podSecurityContext:
runAsUser: 1000
runAsGroup: 2000
fsGroup: 2000
# -- Add security context
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 2000
# -- Specifies the resources for the agent component
resources:
limits:
cpu: 2000m
memory: 1024Mi
requests:
cpu: 10m
memory: 10Mi

View file

@ -1,84 +0,0 @@
statefulSet:
replicaCount: 1
updateStrategy:
type: RollingUpdate
image:
registry: docker.io
repository: woodpeckerci/woodpecker-server
pullPolicy: Always
tag: 'next'
# -- Add environment variables for the server component
env:
WOODPECKER_OPEN: "false"
WOODPECKER_ADMIN: "Nold360"
WOODPECKER_HOST: https://ci.nold.in
WOODPECKER_GITHUB: "true"
#WOODPECKER_REPO_OWNERS: "nold360"
HTTP_PROXY: http://proxy-squid.proxy.svc.cluster.local:3128
HTTPS_PROXY: http://proxy-squid.proxy.svc.cluster.local:3128
http_proxy: http://proxy-squid.proxy.svc.cluster.local:3128
https_proxy: http://proxy-squid.proxy.svc.cluster.local:3128
NO_PROXY: localhost,.cluster.local,10.43.0.1
no_proxy: localhost,.cluster.local,10.43.0.1
# -- Add extra environment variables from the secrets list
extraSecretNamesForEnvFrom:
- woodpecker-secret
- github-oauth
# -- Create a generic secret to store things in, e.g. env values
secrets:
- name: woodpecker-store
persistentVolume:
enabled: true
size: 10Gi
mountPath: '/var/lib/woodpecker'
storageClass: ''
podSecurityContext:
fsGroup: 2000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
ingress:
enabled: true
ingressClassName: ingress-external
labels:
environment: external
annotations:
kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: letsencrypt
external-dns.alpha.kubernetes.io/hostname: ci.nold.in
external-dns.alpha.kubernetes.io/target: nold.in
hosts:
- host: ci.nold.in
paths:
- path: /
backend:
serviceName: server
servicePort: 80
tls:
- secretName: ci-nold-in-tls
hosts:
- ci.nold.in
# -- Specifies the ressources for the server component
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi

View file

@ -0,0 +1,148 @@
agent:
# -- Enable the agent component
enabled: true
# -- The number of replicas for the deployment
replicaCount: 2
image:
registry: docker.io
repository: woodpeckerci/woodpecker-agent
pullPolicy: Always
tag: 'next'
env:
# -- Add the environment variables for the agent component
WOODPECKER_SERVER: 'woodpecker-server:9000'
WOODPECKER_BACKEND: kubernetes
WOODPECKER_BACKEND_K8S_NAMESPACE: woodpecker
WOODPECKER_BACKEND_K8S_STORAGE_CLASS: 'ssd'
WOODPECKER_BACKEND_K8S_VOLUME_SIZE: 10G
WOODPECKER_BACKEND_K8S_STORAGE_RWX: false
WOODPECKER_BACKEND_K8S_POD_LABELS: ''
WOODPECKER_BACKEND_K8S_POD_ANNOTATIONS: ''
WOODPECKER_CONNECT_RETRY_COUNT: '1'
# -- Add extra secret that is contains environment variables
extraSecretNamesForEnvFrom:
- woodpecker-secret
persistence:
enabled: true
size: 1Gi
storageClass: 'ssd'
accessModes:
- ReadWriteOnce
# -- Add pod security context
podSecurityContext:
runAsUser: 1000
runAsGroup: 2000
fsGroup: 2000
# -- Add security context
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 2000
# -- Specifies the resources for the agent component
resources:
limits:
cpu: 2000m
memory: 1024Mi
requests:
cpu: 10m
memory: 10Mi
server:
enabled: true
statefulSet:
replicaCount: 1
updateStrategy:
type: RollingUpdate
image:
registry: docker.io
repository: woodpeckerci/woodpecker-server
pullPolicy: Always
tag: 'next'
# -- Add environment variables for the server component
env:
WOODPECKER_OPEN: "false"
WOODPECKER_ADMIN: "Nold360"
WOODPECKER_HOST: https://ci.nold.in
WOODPECKER_GITHUB: "true"
#WOODPECKER_REPO_OWNERS: "nold360"
HTTP_PROXY: http://proxy-squid.proxy.svc.cluster.local:3128
HTTPS_PROXY: http://proxy-squid.proxy.svc.cluster.local:3128
http_proxy: http://proxy-squid.proxy.svc.cluster.local:3128
https_proxy: http://proxy-squid.proxy.svc.cluster.local:3128
NO_PROXY: localhost,.cluster.local,10.43.0.1
no_proxy: localhost,.cluster.local,10.43.0.1
# -- Add extra environment variables from the secrets list
extraSecretNamesForEnvFrom:
- woodpecker-secret
- github-oauth
# -- Create a generic secret to store things in, e.g. env values
secrets:
- name: woodpecker-store
persistentVolume:
enabled: true
size: 10Gi
mountPath: '/var/lib/woodpecker'
storageClass: ''
podSecurityContext:
fsGroup: 2000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
ingress:
enabled: true
ingressClassName: ingress-external
labels:
environment: external
annotations:
kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: letsencrypt
external-dns.alpha.kubernetes.io/hostname: ci.nold.in
external-dns.alpha.kubernetes.io/target: nold.in
hosts:
- host: ci.nold.in
paths:
- path: /
backend:
serviceName: server
servicePort: 80
tls:
- secretName: ci-nold-in-tls
hosts:
- ci.nold.in
# -- Specifies the ressources for the server component
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi

View file

@ -5,7 +5,7 @@ apps:
- name: argo-workflows - name: argo-workflows
repoURL: https://argoproj.github.io/argo-helm repoURL: https://argoproj.github.io/argo-helm
chart: argo-workflows chart: argo-workflows
targetRevision: 0.41.11 targetRevision: 0.41.4
# secrets: # secrets:
# - name: argocd-secret # - name: argocd-secret
# keys: # keys:

View file

@ -141,22 +141,3 @@ networkPolicy:
- namespaceSelector: - namespaceSelector:
matchLabels: matchLabels:
app.heqet.gnu.one/project: argocd app.heqet.gnu.one/project: argocd
# Allow access to internet proxy
allow-localai:
podSelector: {}
policyTypes:
- Egress
egress:
- ports:
- port: 80
protocol: TCP
- port: 8080
protocol: TCP
to:
- podSelector:
matchLabels:
app.kubernetes.io/name: local-ai
- namespaceSelector:
matchLabels:
app.heqet.gnu.one/project: ai