You've already forked DataMate
1412 lines
35 KiB
YAML
1412 lines
35 KiB
YAML
## Expand the name of the chart
|
|
nameOverride: ""
|
|
|
|
## Default fully qualified app name
|
|
fullnameOverride: ""
|
|
|
|
## Enable or disable Milvus Cluster mode
|
|
cluster:
|
|
enabled: false
|
|
|
|
global:
|
|
imageRegistry: ""
|
|
|
|
image:
|
|
all:
|
|
repository: milvusdb/milvus
|
|
tag: v2.6.5
|
|
pullPolicy: IfNotPresent
|
|
## Optionally specify an array of imagePullSecrets.
|
|
## Secrets must be manually created in the namespace.
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
##
|
|
# pullSecrets:
|
|
# - myRegistryKeySecretName
|
|
|
|
# Global node selector
|
|
# If set, this will apply to all milvus components
|
|
# Individual components can be set to a different node selector
|
|
nodeSelector: {}
|
|
|
|
# Global tolerations
|
|
# If set, this will apply to all milvus components
|
|
# Individual components can be set to a different tolerations
|
|
tolerations: []
|
|
|
|
# Global affinity
|
|
# If set, this will apply to all milvus components
|
|
# Individual components can be set to a different affinity
|
|
affinity: {}
|
|
|
|
# Global securityContext
|
|
# If set, this will apply to all milvus components
|
|
# Individual components can be set to a different securityContext
|
|
containerSecurityContext: {}
|
|
securityContext: {}
|
|
# runAsUser: 1000
|
|
# runAsGroup: 1000
|
|
# fsGroup: 1000
|
|
# runAsNonRoot: true
|
|
|
|
# Global topologySpreadConstraints
|
|
# If set, this will apply to all milvus components
|
|
# Individual components can be set to different topologySpreadConstraints
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
|
|
topologySpreadConstraints: []
|
|
# - maxSkew: 1
|
|
# topologyKey: topology.kubernetes.io/zone
|
|
# whenUnsatisfiable: DoNotSchedule
|
|
# labelSelector:
|
|
# matchLabels:
|
|
# app.kubernetes.io/name: milvus
|
|
|
|
# Global labels and annotations
|
|
# If set, this will apply to all milvus components
|
|
labels: {}
|
|
annotations: {}
|
|
|
|
# Global Volumes & volumeMounts
|
|
# If set, this will apply to all milvus components
|
|
# ref: https://kubernetes.io/docs/concepts/storage/volumes/
|
|
volumes: []
|
|
volumeMounts: []
|
|
|
|
# Streaming node
|
|
# Default config for 2.6.0 or later.
|
|
streaming:
|
|
enabled: true
|
|
|
|
##
|
|
## If set, the whole config will be mounted from this custom ConfigMap.
|
|
## The ConfigMap should have a key named `milvus.yaml` which contains the full Milvus config.
|
|
## extraConfigFiles will be ignored if this variable is set.
|
|
##
|
|
customConfigMap: ""
|
|
|
|
# Extra configs for milvus.yaml
|
|
# If set, this config will merge into milvus.yaml
|
|
# Please follow the config structure in the milvus.yaml
|
|
# at https://github.com/milvus-io/milvus/blob/master/configs/milvus.yaml
|
|
# Note: this config will be the top priority which will override the config
|
|
# in the image and helm chart.
|
|
extraConfigFiles:
|
|
user.yaml: |+
|
|
# For example enable rest http for milvus proxy
|
|
# proxy:
|
|
# http:
|
|
# enabled: true
|
|
# maxUserNum: 100
|
|
# maxRoleNum: 10
|
|
## Enable tlsMode and set the tls cert and key
|
|
# tls:
|
|
# serverPemPath: /etc/milvus/certs/tls.crt
|
|
# serverKeyPath: /etc/milvus/certs/tls.key
|
|
# common:
|
|
# security:
|
|
# tlsMode: 1
|
|
|
|
## Expose the Milvus service to be accessed from outside the cluster (LoadBalancer service).
|
|
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
|
|
## ref: http://kubernetes.io/docs/user-guide/services/
|
|
##
|
|
service:
|
|
type: ClusterIP
|
|
port: 19530
|
|
portName: milvus
|
|
nodePort: ""
|
|
annotations: {}
|
|
labels: {}
|
|
|
|
## List of IP addresses at which the Milvus service is available
|
|
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
|
##
|
|
externalIPs: []
|
|
# - externalIp1
|
|
|
|
# LoadBalancerSourcesRange is a list of allowed CIDR values, which are combined with ServicePort to
|
|
# set allowed inbound rules on the security group assigned to the master load balancer
|
|
loadBalancerSourceRanges:
|
|
- 0.0.0.0/0
|
|
# Optionally assign a known public LB IP
|
|
# loadBalancerIP: 1.2.3.4
|
|
|
|
ingress:
|
|
enabled: false
|
|
ingressClassName: ""
|
|
annotations:
|
|
# Annotation example: set nginx ingress type
|
|
# kubernetes.io/ingress.class: nginx
|
|
nginx.ingress.kubernetes.io/backend-protocol: GRPC
|
|
nginx.ingress.kubernetes.io/listen-ports-ssl: '[19530]'
|
|
nginx.ingress.kubernetes.io/proxy-body-size: 4m
|
|
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
|
labels: {}
|
|
rules:
|
|
- host: "milvus-example.local"
|
|
path: "/"
|
|
pathType: "Prefix"
|
|
# - host: "milvus-example2.local"
|
|
# path: "/otherpath"
|
|
# pathType: "Prefix"
|
|
tls: []
|
|
# - secretName: chart-example-tls
|
|
# hosts:
|
|
# - milvus-example.local
|
|
|
|
serviceAccount:
|
|
create: false
|
|
name:
|
|
annotations:
|
|
labels:
|
|
|
|
metrics:
|
|
enabled: true
|
|
|
|
serviceMonitor:
|
|
# Set this to `true` to create ServiceMonitor for Prometheus operator
|
|
enabled: false
|
|
interval: "30s"
|
|
scrapeTimeout: "10s"
|
|
# Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
|
|
additionalLabels: {}
|
|
|
|
livenessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 90
|
|
periodSeconds: 30
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 5
|
|
|
|
readinessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 90
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 5
|
|
|
|
log:
|
|
level: "info"
|
|
file:
|
|
maxSize: 300 # MB
|
|
maxAge: 10 # day
|
|
maxBackups: 20
|
|
format: "text" # text/json
|
|
|
|
persistence:
|
|
mountPath: "/milvus/logs"
|
|
## If true, create/use a Persistent Volume Claim
|
|
## If false, use emptyDir
|
|
##
|
|
enabled: true
|
|
persistentVolumeClaim:
|
|
existingClaim: ""
|
|
## Milvus Logs Persistent Volume Storage Class
|
|
## If defined, storageClassName: <storageClass>
|
|
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
## If undefined (the default) or set to null, no storageClassName spec is
|
|
## set, choosing the default provisioner.
|
|
## ReadWriteMany access mode required for milvus cluster.
|
|
##
|
|
storageClass:
|
|
accessModes: ReadWriteMany
|
|
size: 10Gi
|
|
subPath: ""
|
|
storagePath:
|
|
storageNode:
|
|
|
|
## Heaptrack traces all memory allocations and annotates these events with stack traces.
|
|
## See more: https://github.com/KDE/heaptrack
|
|
## Enable heaptrack in production is not recommended.
|
|
heaptrack:
|
|
image:
|
|
repository: milvusdb/heaptrack
|
|
tag: v0.1.0
|
|
pullPolicy: IfNotPresent
|
|
|
|
standalone:
|
|
replicas: 1 # Run standalone mode with replication disabled
|
|
resources: {}
|
|
# Set local storage size in resources
|
|
# resources:
|
|
# limits:
|
|
# ephemeral-storage: 100Gi
|
|
nodeSelector: {}
|
|
affinity: {}
|
|
tolerations: []
|
|
securityContext: {}
|
|
containerSecurityContext: {}
|
|
topologySpreadConstraints: [] # Component specific topologySpreadConstraints
|
|
extraEnv: []
|
|
heaptrack:
|
|
enabled: false
|
|
disk:
|
|
enabled: true
|
|
size:
|
|
enabled: false # Enable local storage size limit
|
|
profiling:
|
|
enabled: false # Enable live profiling
|
|
|
|
## Default message queue for milvus standalone
|
|
## Supported value: rocksmq, natsmq, pulsar, kafka and woodpecker
|
|
messageQueue: woodpecker
|
|
persistence:
|
|
mountPath: "/var/lib/milvus"
|
|
## If true, alertmanager will create/use a Persistent Volume Claim
|
|
## If false, use emptyDir
|
|
##
|
|
enabled: true
|
|
persistentVolumeClaim:
|
|
existingClaim: ""
|
|
## Milvus Persistent Volume Storage Class
|
|
## If defined, storageClassName: <storageClass>
|
|
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
## If undefined (the default) or set to null, no storageClassName spec is
|
|
## set, choosing the default provisioner.
|
|
##
|
|
storageClass:
|
|
accessModes: ReadWriteOnce
|
|
size: 50Gi
|
|
subPath: ""
|
|
storagePath:
|
|
storageNode:
|
|
|
|
proxy:
|
|
enabled: true
|
|
# You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
|
|
replicas: 1
|
|
resources: {}
|
|
nodeSelector: {}
|
|
affinity: {}
|
|
tolerations: []
|
|
securityContext: {}
|
|
containerSecurityContext: {}
|
|
topologySpreadConstraints: [] # Component specific topologySpreadConstraints
|
|
extraEnv: []
|
|
heaptrack:
|
|
enabled: false
|
|
profiling:
|
|
enabled: false # Enable live profiling
|
|
http:
|
|
enabled: true # whether to enable http rest server
|
|
debugMode:
|
|
enabled: false
|
|
# Mount a TLS secret into proxy pod
|
|
tls:
|
|
enabled: false
|
|
## when enabling proxy.tls, all items below should be uncommented and the key and crt values should be populated.
|
|
# enabled: true
|
|
# secretName: milvus-tls
|
|
## expecting base64 encoded values here: i.e. $(cat tls.crt | base64 -w 0) and $(cat tls.key | base64 -w 0)
|
|
# key: LS0tLS1CRUdJTiBQU--REDUCT
|
|
# crt: LS0tLS1CRUdJTiBDR--REDUCT
|
|
# volumes:
|
|
# - secret:
|
|
# secretName: milvus-tls
|
|
# name: milvus-tls
|
|
# volumeMounts:
|
|
# - mountPath: /etc/milvus/certs/
|
|
# name: milvus-tls
|
|
# Deployment strategy, default is RollingUpdate
|
|
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
|
|
strategy: {}
|
|
annotations: {}
|
|
hpa:
|
|
enabled: false
|
|
minReplicas: 1
|
|
maxReplicas: 5
|
|
cpuUtilization: 40
|
|
|
|
rootCoordinator:
|
|
enabled: false
|
|
# You can set the number of replicas greater than 1, only if enable active standby
|
|
replicas: 1 # Run Root Coordinator mode with replication disabled
|
|
resources: {}
|
|
nodeSelector: {}
|
|
affinity: {}
|
|
tolerations: []
|
|
securityContext: {}
|
|
containerSecurityContext: {}
|
|
topologySpreadConstraints: [] # Component specific topologySpreadConstraints
|
|
extraEnv: []
|
|
heaptrack:
|
|
enabled: false
|
|
profiling:
|
|
enabled: false # Enable live profiling
|
|
activeStandby:
|
|
enabled: false # Enable active-standby when you set multiple replicas for root coordinator
|
|
# Deployment strategy, default is RollingUpdate
|
|
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
|
|
strategy: {}
|
|
annotations: {}
|
|
|
|
service:
|
|
port: 53100
|
|
annotations: {}
|
|
labels: {}
|
|
clusterIP: ""
|
|
|
|
queryCoordinator:
|
|
enabled: false
|
|
# You can set the number of replicas greater than 1, only if enable active standby
|
|
replicas: 1 # Run Query Coordinator mode with replication disabled
|
|
resources: {}
|
|
nodeSelector: {}
|
|
affinity: {}
|
|
tolerations: []
|
|
securityContext: {}
|
|
containerSecurityContext: {}
|
|
topologySpreadConstraints: [] # Component specific topologySpreadConstraints
|
|
extraEnv: []
|
|
heaptrack:
|
|
enabled: false
|
|
profiling:
|
|
enabled: false # Enable live profiling
|
|
activeStandby:
|
|
enabled: false # Enable active-standby when you set multiple replicas for query coordinator
|
|
# Deployment strategy, default is RollingUpdate
|
|
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
|
|
strategy: {}
|
|
annotations: {}
|
|
|
|
service:
|
|
port: 19531
|
|
annotations: {}
|
|
labels: {}
|
|
clusterIP: ""
|
|
|
|
queryNode:
|
|
enabled: true
|
|
# You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
|
|
replicas: 1
|
|
resources: {}
|
|
# Set local storage size in resources
|
|
# resources:
|
|
# limits:
|
|
# ephemeral-storage: 100Gi
|
|
# Set below when using other runtimes than default like nvidia for example
|
|
runtimeClassName: ""
|
|
nodeSelector: {}
|
|
affinity: {}
|
|
tolerations: []
|
|
securityContext: {}
|
|
containerSecurityContext: {}
|
|
topologySpreadConstraints: [] # Component specific topologySpreadConstraints
|
|
extraEnv: []
|
|
heaptrack:
|
|
enabled: false
|
|
disk:
|
|
enabled: true # Enable querynode load disk index, and search on disk index
|
|
size:
|
|
enabled: false # Enable local storage size limit
|
|
profiling:
|
|
enabled: false # Enable live profiling
|
|
# Deployment strategy, default is RollingUpdate
|
|
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
|
|
strategy: {}
|
|
annotations: {}
|
|
hpa:
|
|
enabled: false
|
|
minReplicas: 1
|
|
maxReplicas: 5
|
|
cpuUtilization: 40
|
|
memoryUtilization: 60
|
|
|
|
indexCoordinator:
|
|
enabled: false
|
|
# You can set the number of replicas greater than 1, only if enable active standby
|
|
replicas: 1 # Run Index Coordinator mode with replication disabled
|
|
resources: {}
|
|
nodeSelector: {}
|
|
affinity: {}
|
|
tolerations: []
|
|
securityContext: {}
|
|
containerSecurityContext: {}
|
|
topologySpreadConstraints: [] # Component specific topologySpreadConstraints
|
|
extraEnv: []
|
|
heaptrack:
|
|
enabled: false
|
|
profiling:
|
|
enabled: false # Enable live profiling
|
|
activeStandby:
|
|
enabled: false # Enable active-standby when you set multiple replicas for index coordinator
|
|
# Deployment strategy, default is RollingUpdate
|
|
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
|
|
strategy: {}
|
|
annotations: {}
|
|
|
|
service:
|
|
port: 31000
|
|
annotations: {}
|
|
labels: {}
|
|
clusterIP: ""
|
|
|
|
indexNode:
|
|
enabled: false
|
|
# You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
|
|
replicas: 1
|
|
resources: {}
|
|
# Set local storage size in resources
|
|
# limits:
|
|
# ephemeral-storage: 100Gi
|
|
# Set below when using other runtimes than default like nvidia for example
|
|
runtimeClassName: ""
|
|
nodeSelector: {}
|
|
affinity: {}
|
|
tolerations: []
|
|
securityContext: {}
|
|
containerSecurityContext: {}
|
|
topologySpreadConstraints: [] # Component specific topologySpreadConstraints
|
|
extraEnv: []
|
|
heaptrack:
|
|
enabled: false
|
|
profiling:
|
|
enabled: false # Enable live profiling
|
|
disk:
|
|
enabled: true # Enable index node build disk vector index
|
|
size:
|
|
enabled: false # Enable local storage size limit
|
|
# Deployment strategy, default is RollingUpdate
|
|
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
|
|
strategy: {}
|
|
annotations: {}
|
|
hpa:
|
|
enabled: false
|
|
minReplicas: 1
|
|
maxReplicas: 5
|
|
cpuUtilization: 40
|
|
|
|
dataCoordinator:
|
|
enabled: false
|
|
# You can set the number of replicas greater than 1, only if enable active standby
|
|
replicas: 1 # Run Data Coordinator mode with replication disabled
|
|
resources: {}
|
|
nodeSelector: {}
|
|
affinity: {}
|
|
tolerations: []
|
|
securityContext: {}
|
|
containerSecurityContext: {}
|
|
topologySpreadConstraints: [] # Component specific topologySpreadConstraints
|
|
extraEnv: []
|
|
heaptrack:
|
|
enabled: false
|
|
profiling:
|
|
enabled: false # Enable live profiling
|
|
activeStandby:
|
|
enabled: false # Enable active-standby when you set multiple replicas for data coordinator
|
|
# Deployment strategy, default is RollingUpdate
|
|
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
|
|
strategy: {}
|
|
annotations: {}
|
|
|
|
service:
|
|
port: 13333
|
|
annotations: {}
|
|
labels: {}
|
|
clusterIP: ""
|
|
|
|
dataNode:
|
|
enabled: true
|
|
# You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
|
|
replicas: 1
|
|
resources: {}
|
|
nodeSelector: {}
|
|
affinity: {}
|
|
tolerations: []
|
|
securityContext: {}
|
|
containerSecurityContext: {}
|
|
topologySpreadConstraints: [] # Component specific topologySpreadConstraints
|
|
extraEnv: []
|
|
heaptrack:
|
|
enabled: false
|
|
profiling:
|
|
enabled: false # Enable live profiling
|
|
# Deployment strategy, default is RollingUpdate
|
|
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
|
|
strategy: {}
|
|
annotations: {}
|
|
hpa:
|
|
enabled: false
|
|
minReplicas: 1
|
|
maxReplicas: 5
|
|
cpuUtilization: 40
|
|
|
|
## mixCoordinator contains all coord
|
|
## If you want to use mixcoord, enable this and disable all of other coords
|
|
mixCoordinator:
|
|
enabled: true
|
|
# You can set the number of replicas greater than 1, only if enable active standby
|
|
replicas: 1 # Run Mixture Coordinator mode with replication disabled
|
|
resources: {}
|
|
nodeSelector: {}
|
|
affinity: {}
|
|
tolerations: []
|
|
securityContext: {}
|
|
containerSecurityContext: {}
|
|
topologySpreadConstraints: [] # Component specific topologySpreadConstraints
|
|
extraEnv: []
|
|
heaptrack:
|
|
enabled: false
|
|
profiling:
|
|
enabled: false # Enable live profiling
|
|
activeStandby:
|
|
enabled: false # Enable active-standby when you set multiple replicas for Mixture coordinator
|
|
# Deployment strategy, default is RollingUpdate
|
|
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
|
|
strategy: {}
|
|
annotations: {}
|
|
|
|
service:
|
|
annotations: {}
|
|
labels: {}
|
|
clusterIP: ""
|
|
|
|
streamingNode:
|
|
replicas: 1
|
|
resources: {}
|
|
nodeSelector: {}
|
|
affinity: {}
|
|
tolerations: []
|
|
securityContext: {}
|
|
containerSecurityContext: {}
|
|
extraEnv: []
|
|
heaptrack:
|
|
enabled: false
|
|
profiling:
|
|
enabled: false # Enable live profiling
|
|
strategy: {}
|
|
|
|
attu:
|
|
enabled: false
|
|
name: attu
|
|
image:
|
|
repository: zilliz/attu
|
|
tag: v2.5.3
|
|
pullPolicy: IfNotPresent
|
|
service:
|
|
annotations: {}
|
|
labels: {}
|
|
type: ClusterIP
|
|
port: 3000
|
|
# loadBalancerIP: ""
|
|
resources: {}
|
|
securityContext: {}
|
|
containerSecurityContext: {}
|
|
podLabels: {}
|
|
annotations: {}
|
|
ingress:
|
|
enabled: false
|
|
ingressClassName: ""
|
|
annotations: {}
|
|
# Annotation example: set nginx ingress type
|
|
# kubernetes.io/ingress.class: nginx
|
|
labels: {}
|
|
hosts:
|
|
- milvus-attu.local
|
|
tls: []
|
|
# - secretName: chart-attu-tls
|
|
# hosts:
|
|
# - milvus-attu.local
|
|
|
|
route:
|
|
enabled: false
|
|
host: ""
|
|
annotations: {}
|
|
labels: {}
|
|
tls:
|
|
termination: edge # edge, passthrough, or reencrypt
|
|
insecureEdgeTerminationPolicy: Redirect # None, Redirect, or Allow
|
|
# certificate: ""
|
|
# key: ""
|
|
# caCertificate: ""
|
|
# destinationCACertificate: ""
|
|
|
|
|
|
## Configuration values for the minio dependency
|
|
## ref: https://github.com/zilliztech/milvus-helm/blob/master/charts/minio/README.md
|
|
##
|
|
|
|
minio:
|
|
enabled: true
|
|
name: minio
|
|
mode: standalone
|
|
image:
|
|
tag: "RELEASE.2024-12-18T13-15-44Z"
|
|
pullPolicy: IfNotPresent
|
|
accessKey: minioadmin
|
|
secretKey: minioadmin
|
|
existingSecret: ""
|
|
bucketName: "milvus-bucket"
|
|
rootPath: file
|
|
useIAM: false
|
|
iamEndpoint: ""
|
|
region: ""
|
|
useVirtualHost: false
|
|
podDisruptionBudget:
|
|
enabled: false
|
|
resources:
|
|
requests:
|
|
memory: 2Gi
|
|
|
|
service:
|
|
type: ClusterIP
|
|
port: 9000
|
|
|
|
persistence:
|
|
enabled: true
|
|
existingClaim: ""
|
|
storageClass:
|
|
accessMode: ReadWriteOnce
|
|
size: 500Gi
|
|
storagePath:
|
|
storageNode:
|
|
|
|
livenessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 5
|
|
periodSeconds: 5
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 5
|
|
|
|
readinessProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 5
|
|
periodSeconds: 5
|
|
timeoutSeconds: 1
|
|
successThreshold: 1
|
|
failureThreshold: 5
|
|
|
|
startupProbe:
|
|
enabled: true
|
|
initialDelaySeconds: 0
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
successThreshold: 1
|
|
failureThreshold: 60
|
|
|
|
## Configuration values for the etcd dependency
|
|
## ref: https://artifacthub.io/packages/helm/bitnami/etcd
|
|
##
|
|
|
|
etcd:
|
|
enabled: true
|
|
name: etcd
|
|
workload: statefulSet
|
|
replicaCount: 1
|
|
pdb:
|
|
create: false
|
|
image:
|
|
repository: "milvusdb/etcd"
|
|
tag: "3.5.18-r1"
|
|
pullPolicy: IfNotPresent
|
|
|
|
service:
|
|
type: ClusterIP
|
|
port: 2379
|
|
peerPort: 2380
|
|
|
|
auth:
|
|
rbac:
|
|
enabled: false
|
|
create: false
|
|
token:
|
|
enabled: false
|
|
|
|
persistence:
|
|
enabled: true
|
|
storageClass:
|
|
accessMode: ReadWriteOnce
|
|
size: 10Gi
|
|
storagePath:
|
|
storageNode:
|
|
|
|
## Change default timeout periods to mitigate zoobie probe process
|
|
livenessProbe:
|
|
enabled: true
|
|
timeoutSeconds: 10
|
|
|
|
readinessProbe:
|
|
enabled: true
|
|
periodSeconds: 20
|
|
timeoutSeconds: 10
|
|
|
|
## Enable auto compaction
|
|
## compaction by every 1000 revision
|
|
##
|
|
autoCompactionMode: revision
|
|
autoCompactionRetention: "1000"
|
|
|
|
## Increase default quota to 4G
|
|
##
|
|
extraEnvVars:
|
|
- name: ETCD_QUOTA_BACKEND_BYTES
|
|
value: "4294967296"
|
|
- name: ETCD_HEARTBEAT_INTERVAL
|
|
value: "500"
|
|
- name: ETCD_ELECTION_TIMEOUT
|
|
value: "2500"
|
|
|
|
## Configuration values for the pulsar dependency
|
|
## ref: https://github.com/apache/pulsar-helm-chart
|
|
##
|
|
|
|
# default values for pulsar v3
|
|
pulsarv3:
|
|
name: "pulsarv3" # note: nameOveride should be kept the same as the name field
|
|
nameOverride: "pulsarv3"
|
|
enabled: false
|
|
persistence: true
|
|
extra: {}
|
|
monitoring: {}
|
|
volumes:
|
|
persistence: true
|
|
local_storage: false
|
|
|
|
components:
|
|
# zookeeper
|
|
zookeeper: true
|
|
# bookkeeper
|
|
bookkeeper: true
|
|
# bookkeeper - autorecovery
|
|
autorecovery: true
|
|
# broker
|
|
broker: true
|
|
# functions
|
|
functions: false
|
|
# proxy
|
|
proxy: true
|
|
# toolset
|
|
toolset: false
|
|
# pulsar manager
|
|
pulsar_manager: false
|
|
|
|
images:
|
|
zookeeper:
|
|
repository: apachepulsar/pulsar
|
|
tag: 3.0.7
|
|
pullPolicy: IfNotPresent
|
|
bookie:
|
|
repository: apachepulsar/pulsar
|
|
tag: 3.0.7
|
|
pullPolicy: IfNotPresent
|
|
autorecovery:
|
|
repository: apachepulsar/pulsar
|
|
tag: 3.0.7
|
|
pullPolicy: IfNotPresent
|
|
broker:
|
|
repository: apachepulsar/pulsar
|
|
tag: 3.0.7
|
|
pullPolicy: IfNotPresent
|
|
proxy:
|
|
repository: apachepulsar/pulsar
|
|
tag: 3.0.7
|
|
pullPolicy: IfNotPresent
|
|
|
|
zookeeper:
|
|
pdb:
|
|
usePolicy: false
|
|
affinity:
|
|
anti_affinity: false
|
|
component: zookeeper
|
|
replicaCount: 3
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
podManagementPolicy: Parallel
|
|
podMonitor:
|
|
enabled: false
|
|
resources:
|
|
requests:
|
|
memory: 256Mi
|
|
cpu: 0.2
|
|
volumes:
|
|
# use a persistent volume or emptyDir
|
|
persistence: true
|
|
data:
|
|
name: data
|
|
size: 20Gi
|
|
local_storage: false
|
|
configData:
|
|
PULSAR_MEM: >
|
|
-Xms256m -Xmx256m
|
|
PULSAR_GC: >
|
|
-XX:+UseG1GC
|
|
-XX:MaxGCPauseMillis=10
|
|
-Dcom.sun.management.jmxremote
|
|
-Djute.maxbuffer=10485760
|
|
-XX:+ParallelRefProcEnabled
|
|
-XX:+UnlockExperimentalVMOptions
|
|
-XX:+DoEscapeAnalysis
|
|
-XX:+DisableExplicitGC
|
|
-XX:+ExitOnOutOfMemoryError
|
|
-XX:+PerfDisableSharedMem
|
|
|
|
bookkeeper:
|
|
pdb:
|
|
usePolicy: false
|
|
affinity:
|
|
anti_affinity: false
|
|
component: bookie
|
|
replicaCount: 3
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
podManagementPolicy: Parallel
|
|
podMonitor:
|
|
enabled: false
|
|
resources:
|
|
requests:
|
|
memory: 2048Mi
|
|
cpu: 0.5
|
|
volumes:
|
|
# use a persistent volume or emptyDir
|
|
persistence: true
|
|
journal:
|
|
name: journal
|
|
size: 100Gi
|
|
local_storage: false
|
|
ledgers:
|
|
name: ledgers
|
|
size: 200Gi
|
|
local_storage: false
|
|
configData:
|
|
PULSAR_MEM: >
|
|
-Xms4096m
|
|
-Xmx4096m
|
|
-XX:MaxDirectMemorySize=8192m
|
|
PULSAR_GC: >
|
|
-XX:+UseG1GC
|
|
-XX:MaxGCPauseMillis=10
|
|
-XX:+ParallelRefProcEnabled
|
|
-XX:+UnlockExperimentalVMOptions
|
|
-XX:+DoEscapeAnalysis
|
|
-XX:ParallelGCThreads=4
|
|
-XX:ConcGCThreads=4
|
|
-XX:G1NewSizePercent=50
|
|
-XX:+DisableExplicitGC
|
|
-XX:-ResizePLAB
|
|
-XX:+ExitOnOutOfMemoryError
|
|
-XX:+PerfDisableSharedMem
|
|
nettyMaxFrameSizeBytes: "104867840"
|
|
autorecovery:
|
|
affinity:
|
|
anti_affinity: false
|
|
component: recovery
|
|
replicaCount: 1
|
|
resources:
|
|
requests:
|
|
memory: 128Mi
|
|
cpu: 0.1
|
|
podMonitor:
|
|
enabled: false
|
|
configData:
|
|
BOOKIE_MEM: >
|
|
-Xms128m -Xmx128m
|
|
PULSAR_PREFIX_useV2WireProtocol: "true"
|
|
pulsar_metadata:
|
|
component: pulsar-init
|
|
image:
|
|
repository: apachepulsar/pulsar
|
|
tag: 3.0.7
|
|
pullPolicy: IfNotPresent
|
|
|
|
broker:
|
|
pdb:
|
|
usePolicy: false
|
|
affinity:
|
|
anti_affinity: false
|
|
component: broker
|
|
replicaCount: 2
|
|
autoscaling:
|
|
enabled: false
|
|
podMonitor:
|
|
enabled: false
|
|
resources:
|
|
requests:
|
|
memory: 2048Mi
|
|
cpu: 0.5
|
|
configData:
|
|
PULSAR_MEM: >
|
|
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
|
|
PULSAR_GC: >
|
|
-XX:+UseG1GC
|
|
-XX:MaxGCPauseMillis=10
|
|
-Dio.netty.leakDetectionLevel=disabled
|
|
-Dio.netty.recycler.linkCapacity=1024
|
|
-XX:+ParallelRefProcEnabled
|
|
-XX:+UnlockExperimentalVMOptions
|
|
-XX:+DoEscapeAnalysis
|
|
-XX:ParallelGCThreads=4
|
|
-XX:ConcGCThreads=4
|
|
-XX:G1NewSizePercent=50
|
|
-XX:+DisableExplicitGC
|
|
-XX:-ResizePLAB
|
|
-XX:+ExitOnOutOfMemoryError
|
|
-XX:+PerfDisableSharedMem
|
|
managedLedgerDefaultEnsembleSize: "2"
|
|
managedLedgerDefaultWriteQuorum: "2"
|
|
managedLedgerDefaultAckQuorum: "2"
|
|
maxMessageSize: "104857600"
|
|
defaultRetentionTimeInMinutes: "10080"
|
|
defaultRetentionSizeInMB: "-1"
|
|
backlogQuotaDefaultLimitGB: "8"
|
|
ttlDurationDefaultInSeconds: "259200"
|
|
subscriptionExpirationTimeMinutes: "3"
|
|
backlogQuotaDefaultRetentionPolicy: producer_request_hold
|
|
|
|
proxy:
|
|
pdb:
|
|
usePolicy: false
|
|
affinity:
|
|
anti_affinity: false
|
|
component: proxy
|
|
replicaCount: 2
|
|
autoscaling:
|
|
enabled: false
|
|
podMonitor:
|
|
enabled: false
|
|
resources:
|
|
requests:
|
|
memory: 1024Mi
|
|
cpu: 0.5
|
|
configData:
|
|
PULSAR_MEM: >
|
|
-Xms512m -Xmx512m -XX:MaxDirectMemorySize=2048m
|
|
PULSAR_GC: >
|
|
-XX:+UseG1GC
|
|
-XX:MaxGCPauseMillis=10
|
|
-Dio.netty.leakDetectionLevel=disabled
|
|
-Dio.netty.recycler.linkCapacity=1024
|
|
-XX:+ParallelRefProcEnabled
|
|
-XX:+UnlockExperimentalVMOptions
|
|
-XX:+DoEscapeAnalysis
|
|
-XX:ParallelGCThreads=4
|
|
-XX:ConcGCThreads=4
|
|
-XX:G1NewSizePercent=50
|
|
-XX:+DisableExplicitGC
|
|
-XX:-ResizePLAB
|
|
-XX:+ExitOnOutOfMemoryError
|
|
-XX:+PerfDisableSharedMem
|
|
httpNumThreads: "8"
|
|
ports:
|
|
http: 80
|
|
https: 443
|
|
pulsar: 6650
|
|
pulsarssl: 6651
|
|
containerPorts:
|
|
http: 8080
|
|
https: 8443
|
|
service:
|
|
annotations: {}
|
|
type: ClusterIP
|
|
|
|
kube-prometheus-stack:
|
|
crds:
|
|
enabled: false
|
|
enabled: false
|
|
prometheus:
|
|
enabled: false
|
|
grafana:
|
|
enabled: false
|
|
|
|
# default values for pulsar v2
|
|
pulsar:
|
|
enabled: false
|
|
name: pulsar
|
|
|
|
fullnameOverride: ""
|
|
persistence: true
|
|
|
|
maxMessageSize: "5242880" # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.
|
|
|
|
rbac:
|
|
enabled: false
|
|
psp: false
|
|
limit_to_namespace: true
|
|
|
|
affinity:
|
|
anti_affinity: false
|
|
|
|
## enableAntiAffinity: no
|
|
|
|
components:
|
|
zookeeper: true
|
|
bookkeeper: true
|
|
# bookkeeper - autorecovery
|
|
autorecovery: true
|
|
broker: true
|
|
functions: false
|
|
proxy: true
|
|
toolset: false
|
|
pulsar_manager: false
|
|
|
|
monitoring:
|
|
prometheus: false
|
|
grafana: false
|
|
node_exporter: false
|
|
alert_manager: false
|
|
|
|
images:
|
|
broker:
|
|
repository: apachepulsar/pulsar
|
|
pullPolicy: IfNotPresent
|
|
tag: 2.9.5
|
|
autorecovery:
|
|
repository: apachepulsar/pulsar
|
|
tag: 2.9.5
|
|
pullPolicy: IfNotPresent
|
|
zookeeper:
|
|
repository: apachepulsar/pulsar
|
|
pullPolicy: IfNotPresent
|
|
tag: 2.9.5
|
|
bookie:
|
|
repository: apachepulsar/pulsar
|
|
pullPolicy: IfNotPresent
|
|
tag: 2.9.5
|
|
proxy:
|
|
repository: apachepulsar/pulsar
|
|
pullPolicy: IfNotPresent
|
|
tag: 2.9.5
|
|
pulsar_manager:
|
|
repository: apachepulsar/pulsar-manager
|
|
pullPolicy: IfNotPresent
|
|
tag: v0.1.0
|
|
|
|
zookeeper:
|
|
resources:
|
|
requests:
|
|
memory: 1024Mi
|
|
cpu: 0.3
|
|
configData:
|
|
PULSAR_MEM: >
|
|
-Xms1024m
|
|
-Xmx1024m
|
|
PULSAR_GC: >
|
|
-Dcom.sun.management.jmxremote
|
|
-Djute.maxbuffer=10485760
|
|
-XX:+ParallelRefProcEnabled
|
|
-XX:+UnlockExperimentalVMOptions
|
|
-XX:+DoEscapeAnalysis
|
|
-XX:+DisableExplicitGC
|
|
-XX:+PerfDisableSharedMem
|
|
-Dzookeeper.forceSync=no
|
|
pdb:
|
|
usePolicy: false
|
|
|
|
bookkeeper:
|
|
replicaCount: 3
|
|
volumes:
|
|
journal:
|
|
name: journal
|
|
size: 100Gi
|
|
ledgers:
|
|
name: ledgers
|
|
size: 200Gi
|
|
resources:
|
|
requests:
|
|
memory: 2048Mi
|
|
cpu: 1
|
|
configData:
|
|
PULSAR_MEM: >
|
|
-Xms4096m
|
|
-Xmx4096m
|
|
-XX:MaxDirectMemorySize=8192m
|
|
PULSAR_GC: >
|
|
-Dio.netty.leakDetectionLevel=disabled
|
|
-Dio.netty.recycler.linkCapacity=1024
|
|
-XX:+UseG1GC -XX:MaxGCPauseMillis=10
|
|
-XX:+ParallelRefProcEnabled
|
|
-XX:+UnlockExperimentalVMOptions
|
|
-XX:+DoEscapeAnalysis
|
|
-XX:ParallelGCThreads=32
|
|
-XX:ConcGCThreads=32
|
|
-XX:G1NewSizePercent=50
|
|
-XX:+DisableExplicitGC
|
|
-XX:-ResizePLAB
|
|
-XX:+ExitOnOutOfMemoryError
|
|
-XX:+PerfDisableSharedMem
|
|
-XX:+PrintGCDetails
|
|
nettyMaxFrameSizeBytes: "104867840"
|
|
pdb:
|
|
usePolicy: false
|
|
|
|
broker:
|
|
component: broker
|
|
podMonitor:
|
|
enabled: false
|
|
replicaCount: 1
|
|
resources:
|
|
requests:
|
|
memory: 4096Mi
|
|
cpu: 1.5
|
|
configData:
|
|
PULSAR_MEM: >
|
|
-Xms4096m
|
|
-Xmx4096m
|
|
-XX:MaxDirectMemorySize=8192m
|
|
PULSAR_GC: >
|
|
-Dio.netty.leakDetectionLevel=disabled
|
|
-Dio.netty.recycler.linkCapacity=1024
|
|
-XX:+ParallelRefProcEnabled
|
|
-XX:+UnlockExperimentalVMOptions
|
|
-XX:+DoEscapeAnalysis
|
|
-XX:ParallelGCThreads=32
|
|
-XX:ConcGCThreads=32
|
|
-XX:G1NewSizePercent=50
|
|
-XX:+DisableExplicitGC
|
|
-XX:-ResizePLAB
|
|
-XX:+ExitOnOutOfMemoryError
|
|
maxMessageSize: "104857600"
|
|
defaultRetentionTimeInMinutes: "10080"
|
|
defaultRetentionSizeInMB: "-1"
|
|
backlogQuotaDefaultLimitGB: "8"
|
|
ttlDurationDefaultInSeconds: "259200"
|
|
subscriptionExpirationTimeMinutes: "3"
|
|
backlogQuotaDefaultRetentionPolicy: producer_request_hold
|
|
pdb:
|
|
usePolicy: false
|
|
|
|
autorecovery:
|
|
resources:
|
|
requests:
|
|
memory: 512Mi
|
|
cpu: 1
|
|
|
|
proxy:
|
|
replicaCount: 1
|
|
podMonitor:
|
|
enabled: false
|
|
resources:
|
|
requests:
|
|
memory: 2048Mi
|
|
cpu: 1
|
|
service:
|
|
type: ClusterIP
|
|
ports:
|
|
pulsar: 6650
|
|
http: 8080
|
|
configData:
|
|
PULSAR_MEM: >
|
|
-Xms2048m -Xmx2048m
|
|
PULSAR_GC: >
|
|
-XX:MaxDirectMemorySize=2048m
|
|
httpNumThreads: "100"
|
|
pdb:
|
|
usePolicy: false
|
|
|
|
pulsar_manager:
|
|
service:
|
|
type: ClusterIP
|
|
|
|
pulsar_metadata:
|
|
component: pulsar-init
|
|
image:
|
|
# the image used for running `pulsar-cluster-initialize` job
|
|
repository: apachepulsar/pulsar
|
|
tag: 2.9.5
|
|
|
|
|
|
## Configuration values for the kafka dependency
|
|
## ref: https://artifacthub.io/packages/helm/bitnami/kafka
|
|
##
|
|
|
|
kafka:
|
|
enabled: false
|
|
name: kafka
|
|
replicaCount: 3
|
|
image:
|
|
repository: bitnamilegacy/kafka
|
|
tag: 3.1.0
|
|
## Increase graceful termination for kafka graceful shutdown
|
|
terminationGracePeriodSeconds: "90"
|
|
pdb:
|
|
create: false
|
|
|
|
## Enable startup probe to prevent pod restart during recovering
|
|
startupProbe:
|
|
enabled: true
|
|
|
|
## Kafka Java Heap size
|
|
heapOpts: "-Xmx4096m -Xms4096m"
|
|
maxMessageBytes: _10485760
|
|
defaultReplicationFactor: 3
|
|
offsetsTopicReplicationFactor: 3
|
|
## Only enable time based log retention
|
|
logRetentionHours: 168
|
|
logRetentionBytes: _-1
|
|
extraEnvVars:
|
|
- name: KAFKA_CFG_MAX_PARTITION_FETCH_BYTES
|
|
value: "5242880"
|
|
- name: KAFKA_CFG_MAX_REQUEST_SIZE
|
|
value: "5242880"
|
|
- name: KAFKA_CFG_REPLICA_FETCH_MAX_BYTES
|
|
value: "10485760"
|
|
- name: KAFKA_CFG_FETCH_MESSAGE_MAX_BYTES
|
|
value: "5242880"
|
|
- name: KAFKA_CFG_LOG_ROLL_HOURS
|
|
value: "24"
|
|
|
|
persistence:
|
|
enabled: true
|
|
storageClass:
|
|
accessMode: ReadWriteOnce
|
|
size: 300Gi
|
|
|
|
metrics:
|
|
## Prometheus Kafka exporter: exposes complimentary metrics to JMX exporter
|
|
kafka:
|
|
enabled: false
|
|
image:
|
|
repository: bitnamilegacy/kafka-exporter-archived
|
|
tag: 1.4.2
|
|
|
|
## Prometheus JMX exporter: exposes the majority of Kafkas metrics
|
|
jmx:
|
|
enabled: false
|
|
image:
|
|
repository: bitnamilegacy/jmx-exporter
|
|
tag: 0.16.1
|
|
|
|
## To enable serviceMonitor, you must enable either kafka exporter or jmx exporter.
|
|
## And you can enable them both
|
|
serviceMonitor:
|
|
enabled: false
|
|
|
|
service:
|
|
type: ClusterIP
|
|
ports:
|
|
client: 9092
|
|
|
|
zookeeper:
|
|
enabled: true
|
|
replicaCount: 3
|
|
image:
|
|
repository: bitnamilegacy/zookeeper
|
|
tag: 3.7.0
|
|
|
|
###################################
|
|
# Woodpecker
|
|
# - these configs are used to enable Woodpecker message queue
|
|
###################################
|
|
woodpecker:
|
|
enabled: true
|
|
|
|
###################################
|
|
# External S3
|
|
# - these configs are only used when `externalS3.enabled` is true
|
|
###################################
|
|
externalS3:
|
|
enabled: false
|
|
host: ""
|
|
port: ""
|
|
accessKey: ""
|
|
secretKey: ""
|
|
useSSL: false
|
|
bucketName: ""
|
|
rootPath: ""
|
|
useIAM: false
|
|
cloudProvider: "aws"
|
|
iamEndpoint: ""
|
|
region: ""
|
|
useVirtualHost: false
|
|
|
|
###################################
|
|
# GCS Gateway
|
|
# - these configs are only used when `minio.gcsgateway.enabled` is true
|
|
###################################
|
|
externalGcs:
|
|
bucketName: ""
|
|
|
|
###################################
|
|
# External etcd
|
|
# - these configs are only used when `externalEtcd.enabled` is true
|
|
###################################
|
|
externalEtcd:
|
|
enabled: false
|
|
## the endpoints of the external etcd
|
|
##
|
|
endpoints:
|
|
- localhost:2379
|
|
|
|
###################################
|
|
# External pulsar
|
|
# - these configs are only used when `externalPulsar.enabled` is true
|
|
###################################
|
|
externalPulsar:
|
|
enabled: false
|
|
host: localhost
|
|
port: 6650
|
|
maxMessageSize: "5242880" # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.
|
|
tenant: public
|
|
namespace: default
|
|
authPlugin: ""
|
|
authParams: ""
|
|
|
|
###################################
|
|
# External kafka
|
|
# - these configs are only used when `externalKafka.enabled` is true
|
|
# - note that the following are just examples, you should confirm the
|
|
# value of brokerList and mechanisms according to the actual external
|
|
# Kafka configuration. E.g. If you select the AWS MSK, the configuration
|
|
# should look something like this:
|
|
# externalKafka:
|
|
# enabled: true
|
|
# brokerList: "xxxx:9096"
|
|
# securityProtocol: SASL_SSL
|
|
# sasl:
|
|
# mechanisms: SCRAM-SHA-512
|
|
# password: "xxx"
|
|
# username: "xxx"
|
|
###################################
|
|
externalKafka:
|
|
enabled: false
|
|
brokerList: localhost:9092
|
|
securityProtocol: SASL_SSL
|
|
sasl:
|
|
mechanisms: PLAIN
|
|
username: ""
|
|
password: ""
|
|
|
|
###################################
|
|
# Text Embeddings Inference (TEI)
|
|
# - these configs are used to deploy TEI service
|
|
###################################
|
|
tei:
|
|
enabled: false
|
|
name: text-embeddings-inference
|
|
image:
|
|
repository: ghcr.nju.edu.cn/huggingface/text-embeddings-inference
|
|
tag: cpu-1.6
|
|
pullPolicy: IfNotPresent
|
|
service:
|
|
type: ClusterIP
|
|
port: 8080
|
|
annotations: {}
|
|
labels: {}
|
|
resources:
|
|
requests:
|
|
cpu: "4"
|
|
memory: "8Gi"
|
|
limits:
|
|
cpu: "8"
|
|
memory: "16Gi"
|
|
persistence:
|
|
enabled: true
|
|
mountPath: "/data"
|
|
annotations: {}
|
|
persistentVolumeClaim:
|
|
existingClaim: ""
|
|
storageClass:
|
|
accessModes: ReadWriteOnce
|
|
size: 50Gi
|
|
subPath: ""
|
|
# TEI model configuration
|
|
modelId: "BAAI/bge-large-en-v1.5"
|
|
# Additional TEI configuration
|
|
extraArgs: []
|
|
nodeSelector: {}
|
|
affinity: {}
|
|
tolerations: []
|
|
topologySpreadConstraints: []
|
|
extraEnv: []
|