add: localai stuff

This commit is contained in:
nold 2023-12-25 21:46:48 +01:00
parent b3506b38c1
commit c13a096805
5 changed files with 210 additions and 130 deletions

18
projects/ai/project.yaml Normal file
View file

@ -0,0 +1,18 @@
config:
description: LocalAI & LLM Stuff
apps:
- name: localai
repoURL: https://go-skynet.github.io/helm-charts
chart: local-ai
targetRevision: 2.1.2
- name: anythingllm
repo: bjw-s
chart: app-template
targetRevision: 2.4.0
- name: flowise
repo: bjw-s
chart: app-template
targetRevision: 2.4.0

View file

@ -0,0 +1,47 @@
controllers:
main:
containers:
main:
image:
repository: mintplexlabs/anythingllm
tag: master
env:
STORAGE_DIR: /data
ingress:
main:
annotations:
cert-manager.io/cluster-issuer: vault-issuer
enabled: true
hosts:
- host: chat.dc
paths:
- path: /
service:
name: main
port: http
tls:
- hosts:
- chat.dc
secretName: anythingllm-tls
persistence:
data:
accessMode: ReadWriteOnce
enabled: true
mountPath: /data
readOnly: false
size: 10Gi
type: persistentVolumeClaim
securityContext:
privileged: false
service:
main:
ports:
http:
enabled: true
port: 3001
type: ClusterIP

View file

@ -0,0 +1,77 @@
controllers:
main:
containers:
main:
image:
repository: flowiseai/flowise
tag: 1.4.7
command:
- flowise
- start
env:
FLOWISE_USERNAME: nold
PORT: "3000"
DATABASE_PATH: /data/.flowise
APIKEY_PATH: /data/.flowise
SECRETKEY_PATH: /data/.flowise
LOG_PATH: /data/.flowise/logs
# NUMBER_OF_PROXIES= 1
# DATABASE_TYPE=postgres
# DATABASE_PORT=""
# DATABASE_HOST=""
# DATABASE_NAME="flowise"
# DATABASE_USER=""
# DATABASE_PASSWORD=""
# FLOWISE_USERNAME=user
# FLOWISE_PASSWORD=1234
# FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey
# DEBUG=true
# LOG_LEVEL=debug (error | warn | info | verbose | debug)
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs
# TOOL_FUNCTION_EXTERNAL_DEP=moment,lodash
# LANGCHAIN_TRACING_V2=true
# LANGCHAIN_ENDPOINT=https://api.smith.langchain.com
# LANGCHAIN_API_KEY=your_api_key
# LANGCHAIN_PROJECT=your_project
ingress:
main:
annotations:
cert-manager.io/cluster-issuer: vault-issuer
enabled: true
hosts:
- host: flowise.dc
paths:
- path: /
service:
name: main
port: http
tls:
- hosts:
- flowise.dc
secretName: flowise-tls
persistence:
data:
accessMode: ReadWriteOnce
enabled: true
mountPath: /data
readOnly: false
size: 10Gi
type: persistentVolumeClaim
securityContext:
privileged: false
service:
main:
ports:
http:
enabled: true
port: 3000
type: ClusterIP

View file

@ -0,0 +1,68 @@
replicaCount: 1
deployment:
image: quay.io/go-skynet/local-ai:master
env:
THREADS: 16
CONTEXT_SIZE: 512
DEBUG: "true"
modelsPath: "/models"
download_model:
# To use cloud provided (eg AWS) image, provide it like: 1234356789.dkr.ecr.us-REGION-X.amazonaws.com/busybox
image: busybox
prompt_templates:
# To use cloud provided (eg AWS) image, provide it like: 1234356789.dkr.ecr.us-REGION-X.amazonaws.com/busybox
image: busybox
pullPolicy: Always
imagePullSecrets: []
# - name: secret-names
# Prompt templates to include
# Note: the keys of this map will be the names of the prompt template files
promptTemplates:
{}
# ggml-gpt4all-j.tmpl: |
# The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.
# ### Prompt:
# {{.Input}}
# ### Response:
# Models to download at runtime
models:
# Whether to force download models even if they already exist
forceDownload: false
# The list of URLs to download models from
# Note: the name of the file will be the name of the loaded model
list:
- url: "https://gpt4all.io/models/ggml-gpt4all-j.bin"
# basicAuth: base64EncodedCredentials
persistence:
pvc:
enabled: true
size: 6Gi
accessModes:
- ReadWriteOnce
service:
type: ClusterIP
port: 80
ingress:
enabled: true
className: "ingress-internal"
annotations:
cert-manager.io/cluster-issuer: vault-issuer
hosts:
- host: ai.dc
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: localai-tls
hosts:
- ai.dc
image:
pullPolicy: IfNotPresent

View file

@ -1,130 +0,0 @@
image:
## @param image.registry Image registry
registry: quay.io
## @param image.repository Image repository
repository: go-skynet/local-ai
## @param image.tag Image tag
tag: v2.2.0-ffmpeg
## @param image.digest Image digest
digest: ""
## @param image.pullPolicy Image pull policy
pullPolicy: IfNotPresent
podSecurityContext:
fsGroup: 2000
## @param securityContext Container security context
securityContext:
capabilities:
drop:
- ALL
#readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
ingress:
## @param ingress.enabled Enable ingress controller resource
enabled: true
## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress
ingressClassName: "ingress-internal"
## @param ingress.pathType Ingress path type
pathType: ImplementationSpecific
## @param ingress.annotations Ingress annotations
annotations:
cert-manager.io/cluster-issuer: vault-issuer
traefik.ingress.kubernetes.io/router.tls: 'true'
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
## @param ingress.hosts[0].host Hostname to your LocalAI installation
## @param ingress.hosts[0].paths Paths within the url structure
hosts:
- host: ai.dc
paths:
- /
## @param ingress.tls TLS configuration
tls:
- secretName: local-ai-tls
hosts:
- ai.dc
## @param resources CPU/Memory resource requests/limits
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## @param extraArgs Additional container arguments
extraArgs: {}
# name: ""
## @param extraEnvVars Additional container environment variables
extraEnvVars: []
# - name: MY-NAME
# value: "MY-VALUE"
## @param extraEnvVarsCM Name of existing ConfigMap containing additional container environment variables
extraEnvVarsCM:
## @param extraEnvVarsSecret Name of existing Secret containing additional container environment variables
extraEnvVarsSecret:
init:
## @param init.securityContext Init security context
securityContext:
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
## @param init.resources Init CPU/Memory resource requests/limits
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
persistence:
## @param persistence.enabled Enable persistence using PVC
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
storageClass: ssd
config:
## @param config.galleries Model galleries
galleries:
- name: model-gallery
url: github:go-skynet/model-gallery/index.yaml
## @param config.preloadModels Models to preload (configure liveness probe initial delay according to model download time)
preloadModels:
- id: model-gallery@text-embedding-ada-002
- url: github:go-skynet/model-gallery/gpt4all-j.yaml
name: gpt-3.5-turbo
- id: model-gallery@stablediffusion
- id: model-gallery@whisper-1
- id: model-gallery@voice-en-us-kathleen-low
# FIXME:due to timeout during download
readinessProbe:
enabled: false
livenessProbe:
enabled: false