refactor: reorganize Helm chart structure and update service configurations

This commit is contained in:
hhhhsc
2025-10-23 16:57:12 +08:00
parent c998de2e9d
commit 17e6cea1d9
61 changed files with 3156 additions and 984 deletions

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,29 @@
apiVersion: v2
name: backend
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.0.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.0.1"
dependencies:
- name: database
repository: file://../database
version: 0.0.1

View File

@@ -0,0 +1,75 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "backend.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "backend.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "backend.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{/*
Common labels
*/}}
{{- define "backend.labels" -}}
helm.sh/chart: {{ include "backend.chart" . }}
{{ include "backend.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "backend.selectorLabels" -}}
app.kubernetes.io/name: {{ include "backend.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "backend.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "backend.fullname" .) .Values.serviceAccount.name -}}
{{- else }}
{{- default "default" .Values.serviceAccount.name -}}
{{- end }}
{{- end }}
{{/*
Name of image
*/}}
{{- define "backend.image" -}}
{{- $name := default .Values.image.repository .Values.global.image.backend.name }}
{{- $tag := default .Values.image.tag .Values.global.image.backend.tag }}
{{- if .Values.global.image.repository }}
{{- .Values.global.image.repository | trimSuffix "/" }}/{{ $name }}:{{ $tag }}
{{- else }}
{{- $name }}:{{ $tag }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,82 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "backend.fullname" . }}
labels:
{{- include "backend.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "backend.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "backend.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "backend.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ include "backend.image" . }}"
imagePullPolicy: {{ default .Values.global.image.pullPolicy .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
{{- with .Values.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.env }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "backend.fullname" . }}
labels:
{{- include "backend.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: {{ .Values.service.port }}
protocol: TCP
name: {{ .Chart.Name }}
selector:
{{- include "backend.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "backend.serviceAccountName" . }}
labels:
{{- include "backend.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

View File

@@ -0,0 +1,114 @@
# Default values for datamate.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
image:
repository: "datamate-backend"
# This sets the pull policy for images.
pullPolicy: "IfNotPresent"
# Overrides the image tag whose default is the chart appVersion.
tag: "latest"
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# This is to override the chart name.
nameOverride: "datamate-backend"
fullnameOverride: "datamate-backend"
env:
- name: namespace
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SPRING_CONFIG_LOCATION
value: file:/opt/backend/application.yml
# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
service:
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: ClusterIP
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
port: 8080
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
# livenessProbe:
# httpGet:
# path: /
# port: http
# readinessProbe:
# httpGet:
# path: /
# port: http
# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: database
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.0.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.0.1"

View File

@@ -0,0 +1,75 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "database.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "database.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "database.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{/*
Common labels
*/}}
{{- define "database.labels" -}}
helm.sh/chart: {{ include "database.chart" . }}
{{ include "database.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "database.selectorLabels" -}}
app.kubernetes.io/name: {{ include "database.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "database.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "database.fullname" .) .Values.serviceAccount.name -}}
{{- else }}
{{- default "default" .Values.serviceAccount.name -}}
{{- end }}
{{- end }}
{{/*
Name of image
*/}}
{{- define "database.image" -}}
{{- $name := default .Values.image.repository .Values.global.image.database.name }}
{{- $tag := default .Values.image.tag .Values.global.image.database.tag }}
{{- if .Values.global.image.repository }}
{{- .Values.global.image.repository | trimSuffix "/" }}/{{ $name }}:{{ $tag }}
{{- else }}
{{- $name }}:{{ $tag }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: datamate-mysql-utf8-config
data:
utf8.cnf: |
[mysqld]
# 设置服务器默认字符集为 utf8mb4 (推荐,支持完整的 UTF-8,包括 emoji)
character-set-server = utf8mb4
# 设置默认排序规则
collation-server = utf8mb4_unicode_ci
# 或者使用 utf8_general_ci (性能稍好,但排序规则稍宽松)
default-time-zone = 'Asia/Shanghai'
log_error=/var/log/datamate/database/error.log
[client]
# 设置客户端连接默认字符集
default-character-set = utf8mb4
[mysql]
# 设置 mysql 命令行客户端默认字符集
default-character-set = utf8mb4

View File

@@ -0,0 +1,82 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "database.fullname" . }}
labels:
{{- include "database.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "database.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "database.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "database.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ include "database.image" . }}"
imagePullPolicy: {{ default .Values.global.image.pullPolicy .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
{{- with .Values.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.env }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "database.fullname" . }}
labels:
{{- include "database.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: {{ .Values.service.port }}
protocol: TCP
name: {{ .Chart.Name }}
selector:
{{- include "database.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "database.serviceAccountName" . }}
labels:
{{- include "database.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

View File

@@ -0,0 +1,110 @@
# Default values for datamate.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
image:
repository: "mysql"
# This sets the pull policy for images.
pullPolicy: "IfNotPresent"
# Overrides the image tag whose default is the chart appVersion.
tag: "8"
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# This is to override the chart name.
nameOverride: "datamate-database"
fullnameOverride: "datamate-database"
env:
- name: MYSQL_ROOT_PASSWORD
value: "Huawei@123"
# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
service:
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: ClusterIP
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
port: 3306
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
# livenessProbe:
# httpGet:
# path: /
# port: http
# readinessProbe:
# httpGet:
# path: /
# port: http
# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,29 @@
apiVersion: v2
name: frontend
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.0.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.0.1"
dependencies:
- name: backend
repository: file://../backend
version: 0.0.1

View File

@@ -0,0 +1,75 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "frontend.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "frontend.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "frontend.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{/*
Common labels
*/}}
{{- define "frontend.labels" -}}
helm.sh/chart: {{ include "frontend.chart" . }}
{{ include "frontend.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "frontend.selectorLabels" -}}
app.kubernetes.io/name: {{ include "frontend.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "frontend.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "frontend.fullname" .) .Values.serviceAccount.name -}}
{{- else }}
{{- default "default" .Values.serviceAccount.name -}}
{{- end }}
{{- end }}
{{/*
Name of image
*/}}
{{- define "frontend.image" -}}
{{- $name := default .Values.image.repository .Values.global.image.frontend.name }}
{{- $tag := default .Values.image.tag .Values.global.image.frontend.tag }}
{{- if .Values.global.image.repository }}
{{- .Values.global.image.repository | trimSuffix "/" }}/{{ $name }}:{{ $tag }}
{{- else }}
{{- $name }}:{{ $tag }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,82 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "frontend.fullname" . }}
labels:
{{- include "frontend.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "frontend.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "frontend.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "frontend.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ include "frontend.image" . }}"
imagePullPolicy: {{ default .Values.global.image.pullPolicy .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
{{- with .Values.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.env }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "frontend.fullname" . }}
labels:
{{- include "frontend.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: {{ .Values.service.port }}
protocol: TCP
name: {{ .Chart.Name }}
{{- if eq .Values.service.type "NodePort" }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
{{- include "frontend.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "frontend.serviceAccountName" . }}
labels:
{{- include "frontend.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

View File

@@ -0,0 +1,109 @@
# Default values for datamate.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
image:
repository: "datamate-frontend"
# This sets the pull policy for images.
pullPolicy: "IfNotPresent"
# Overrides the image tag whose default is the chart appVersion.
tag: "latest"
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# This is to override the chart name.
nameOverride: "datamate-frontend"
fullnameOverride: "datamate-frontend"
env: []
# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
service:
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: NodePort
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
port: 80
nodePort: 30000
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
# livenessProbe:
# httpGet:
# path: /
# port: http
# readinessProbe:
# httpGet:
# path: /
# port: http
# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: kuberay-operator
description: A Helm chart for deploying the Kuberay operator on Kubernetes.
version: 1.4.2
type: application
keywords:
- ray
- ray operator
- distributed computing
- data processing
- machine learning
- deep learning
- hyperparameter tuning
- reinforcement learning
- model serving
home: https://github.com/ray-project/kuberay
icon: https://github.com/ray-project/ray/raw/master/doc/source/images/ray_header_logo.png

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,322 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "kuberay-operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Allow the component label to be overridden, otherwise provide a default value.
*/}}
{{- define "kuberay-operator.component" -}}
{{- default .Chart.Name .Values.componentOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kuberay-operator.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kuberay-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "kuberay-operator.labels" -}}
app.kubernetes.io/name: {{ include "kuberay-operator.name" . }}
helm.sh/chart: {{ include "kuberay-operator.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{- /* Create the name of the deployment to use. */ -}}
{{- define "kuberay-operator.deployment.name" -}}
{{- include "kuberay-operator.fullname" . }}
{{- end -}}
{{/*
FeatureGates
*/}}
{{- define "kuberay.featureGates" -}}
{{- $features := "" }}
{{- range .Values.featureGates }}
{{- $str := printf "%s=%t," .name .enabled }}
{{- $features = print $features $str }}
{{- end }}
{{- with .Values.featureGates }}
--feature-gates={{ $features | trimSuffix "," }}
{{- end }}
{{- end }}
{{- /* Create the name of the service to use. */ -}}
{{- define "kuberay-operator.service.name" -}}
{{- include "kuberay-operator.fullname" . }}
{{- end -}}
{{- /* Create the name of the service account to use. */ -}}
{{- define "kuberay-operator.serviceAccount.name" -}}
{{- if .Values.serviceAccount.create -}}
{{- default (include "kuberay-operator.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{- /* Create the name of the cluster role to use. */ -}}
{{- define "kuberay-operator.clusterRole.name" -}}
{{- include "kuberay-operator.fullname" . -}}
{{- end -}}
{{- /* Create the name of the cluster role binding to use. */ -}}
{{- define "kuberay-operator.clusterRoleBinding.name" -}}
{{- include "kuberay-operator.fullname" . -}}
{{- end -}}
{{- /* Create the name of the role to use. */ -}}
{{- define "kuberay-operator.role.name" -}}
{{- include "kuberay-operator.fullname" . -}}
{{- end -}}
{{- /* Create the name of the role binding to use. */ -}}
{{- define "kuberay-operator.roleBinding.name" -}}
{{- include "kuberay-operator.fullname" . -}}
{{- end -}}
{{- /* Create the name of the leader election role to use. */ -}}
{{- define "kuberay-operator.leaderElectionRole.name" -}}
{{- include "kuberay-operator.fullname" . -}}-leader-election
{{- end -}}
{{- /* Create the name of the leader election role binding to use. */ -}}
{{- define "kuberay-operator.leaderElectionRoleBinding.name" -}}
{{- include "kuberay-operator.fullname" . -}}-leader-election
{{- end -}}
{{/*
Create a template to ensure consistency for Role and ClusterRole.
*/}}
{{- define "role.consistentRules" -}}
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
- pods/status
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods/proxy
- services/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- ""
resources:
- services/proxy
verbs:
- create
- get
- patch
- update
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- update
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ray.io
resources:
- rayclusters
- rayjobs
- rayservices
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ray.io
resources:
- rayclusters/finalizers
- rayjobs/finalizers
- rayservices/finalizers
verbs:
- update
- apiGroups:
- ray.io
resources:
- rayclusters/status
- rayjobs/status
- rayservices/status
verbs:
- get
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups:
- route.openshift.io
resources:
- routes
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
{{- if or .batchSchedulerEnabled (eq .batchSchedulerName "volcano") }}
- apiGroups:
- scheduling.volcano.sh
resources:
- podgroups
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
{{- end -}}
{{- if or .batchSchedulerEnabled (eq .batchSchedulerName "scheduler-plugins") }}
- apiGroups:
- scheduling.x-k8s.io
resources:
- podgroups
verbs:
- create
- get
- list
- watch
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,150 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kuberay-operator.deployment.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kuberay-operator.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: {{ include "kuberay-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "kuberay-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: {{ include "kuberay-operator.component" . }}
{{- with .Values.labels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "kuberay-operator.serviceAccount.name" . }}
{{- if and (.Values.logging.baseDir) (.Values.logging.fileName) }}
volumes:
- name: kuberay-logs
{{- if .Values.logging.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.logging.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
{{- end }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
{{- with .Values.image.pullPolicy }}
imagePullPolicy: {{ . }}
{{- end }}
{{- if and (.Values.logging.baseDir) (.Values.logging.fileName) }}
volumeMounts:
- name: kuberay-logs
mountPath: "{{ .Values.logging.baseDir }}"
{{- end }}
command:
- {{ .Values.operatorCommand }}
args:
{{- $argList := list -}}
{{- $argList = append $argList (include "kuberay.featureGates" . | trim) -}}
{{- if .Values.batchScheduler -}}
{{- if .Values.batchScheduler.enabled -}}
{{- $argList = append $argList "--enable-batch-scheduler" -}}
{{- end -}}
{{- if .Values.batchScheduler.name -}}
{{- $argList = append $argList (printf "--batch-scheduler=%s" .Values.batchScheduler.name) -}}
{{- end -}}
{{- end -}}
{{- $watchNamespace := "" -}}
{{- if and .Values.singleNamespaceInstall (not .Values.watchNamespace) -}}
{{- $watchNamespace = .Release.Namespace -}}
{{- else if .Values.watchNamespace -}}
{{- $watchNamespace = join "," .Values.watchNamespace -}}
{{- end -}}
{{- if $watchNamespace -}}
{{- $argList = append $argList "--watch-namespace" -}}
{{- $argList = append $argList $watchNamespace -}}
{{- end -}}
{{- if and (.Values.logging.baseDir) (.Values.logging.fileName) -}}
{{- $argList = append $argList "--log-file-path" -}}
{{- $argList = append $argList (printf "%s/%s" .Values.logging.baseDir .Values.logging.fileName) -}}
{{- end -}}
{{- if .Values.logging.stdoutEncoder -}}
{{- $argList = append $argList "--log-stdout-encoder" -}}
{{- $argList = append $argList .Values.logging.stdoutEncoder -}}
{{- end -}}
{{- if .Values.logging.fileEncoder -}}
{{- $argList = append $argList "--log-file-encoder" -}}
{{- $argList = append $argList .Values.logging.fileEncoder -}}
{{- end -}}
{{- if hasKey .Values "useKubernetesProxy" -}}
{{- $argList = append $argList (printf "--use-kubernetes-proxy=%t" .Values.useKubernetesProxy) -}}
{{- end -}}
{{- if hasKey .Values "leaderElectionEnabled" -}}
{{- $argList = append $argList (printf "--enable-leader-election=%t" .Values.leaderElectionEnabled) -}}
{{- end -}}
{{- if and (hasKey .Values "metrics") (hasKey .Values.metrics "enabled") }}
{{- $argList = append $argList (printf "--enable-metrics=%t" .Values.metrics.enabled) -}}
{{- end -}}
{{- (printf "\n") -}}
{{- $argList | toYaml | indent 12 }}
ports:
- name: http
containerPort: 8080
protocol: TCP
{{- with .Values.env }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
livenessProbe:
httpGet:
path: /metrics
port: http
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
readinessProbe:
httpGet:
path: /metrics
port: http
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,37 @@
{{- if .Values.rbacEnable }}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "kuberay-operator.leaderElectionRole.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kuberay-operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- update
{{- end }}

View File

@@ -0,0 +1,17 @@
{{- if .Values.rbacEnable -}}
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "kuberay-operator.leaderElectionRoleBinding.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kuberay-operator.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "kuberay-operator.leaderElectionRole.name" . }}
subjects:
- kind: ServiceAccount
name: {{ include "kuberay-operator.serviceAccount.name" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -0,0 +1,13 @@
{{- if and .Values.rbacEnable .Values.singleNamespaceInstall .Values.crNamespacedRbacEnable }}
{{- $watchNamespaces := default (list .Release.Namespace) .Values.watchNamespace }}
{{- range $namespace := $watchNamespaces }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "kuberay-operator.fullname" $ }}
namespace: {{ $namespace }}
labels: {{ include "kuberay-operator.labels" $ | nindent 4 }}
{{ include "role.consistentRules" (dict "batchSchedulerEnabled" $.Values.batchScheduler.enabled) }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,20 @@
{{- if and .Values.rbacEnable .Values.singleNamespaceInstall .Values.crNamespacedRbacEnable }}
{{- $watchNamespaces := default (list .Release.Namespace) .Values.watchNamespace }}
{{- range $namespace := $watchNamespaces }}
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "kuberay-operator.fullname" $ }}
namespace: {{ $namespace }}
labels: {{ include "kuberay-operator.labels" $ | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "kuberay-operator.fullname" $ }}
subjects:
- kind: ServiceAccount
name: {{ include "kuberay-operator.serviceAccount.name" $ }}
namespace: {{ $.Release.Namespace }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,28 @@
{{- /* ClusterRole for end users to view and edit RayJob. */ -}}
{{- if and .Values.rbacEnable (not .Values.singleNamespaceInstall) }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rayjob-editor-role
labels:
{{- include "kuberay-operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ray.io
resources:
- rayjobs
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ray.io
resources:
- rayjobs/status
verbs:
- get
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- /* ClusterRole for end users to view RayJob. */ -}}
{{- if and .Values.rbacEnable (not .Values.singleNamespaceInstall) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rayjob-viewer-role
labels:
{{- include "kuberay-operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ray.io
resources:
- rayjobs
verbs:
- get
- list
- watch
- apiGroups:
- ray.io
resources:
- rayjobs/status
verbs:
- get
{{- end }}

View File

@@ -0,0 +1,28 @@
{{- /* ClusterRole for end users to view and edit RayService. */ -}}
{{- if and .Values.rbacEnable (not .Values.singleNamespaceInstall) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rayservice-editor-role
labels:
{{- include "kuberay-operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ray.io
resources:
- rayservices
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ray.io
resources:
- rayservices/status
verbs:
- get
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- /* ClusterRole for end users to view RayService. */ -}}
{{- if and .Values.rbacEnable (not .Values.singleNamespaceInstall) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rayservice-viewer-role
labels:
{{- include "kuberay-operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ray.io
resources:
- rayservices
verbs:
- get
- list
- watch
- apiGroups:
- ray.io
resources:
- rayservices/status
verbs:
- get
{{- end }}

View File

@@ -0,0 +1,9 @@
{{- if and .Values.rbacEnable (not .Values.singleNamespaceInstall) }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "kuberay-operator.clusterRole.name" . }}
labels:
{{- include "kuberay-operator.labels" . | nindent 4 }}
{{ include "role.consistentRules" (dict "batchSchedulerEnabled" .Values.batchScheduler.enabled "batchSchedulerName" .Values.batchScheduler.name) }}
{{- end }}

View File

@@ -0,0 +1,16 @@
{{- if and .Values.rbacEnable (not .Values.singleNamespaceInstall) }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "kuberay-operator.clusterRoleBinding.name" . }}
labels:
{{- include "kuberay-operator.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ include "kuberay-operator.serviceAccount.name" . }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "kuberay-operator.clusterRole.name" . }}
{{- end }}

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "kuberay-operator.service.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kuberay-operator.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "kuberay-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -0,0 +1,9 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "kuberay-operator.serviceAccount.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kuberay-operator.labels" . | nindent 4 }}
{{- end -}}

View File

@@ -0,0 +1,23 @@
{{- if .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "kuberay-operator.fullname" . }}
namespace: {{ .Values.metrics.serviceMonitor.namespace | default .Release.Namespace }}
labels:
{{- with .Values.metrics.serviceMonitor.selector }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- path: /metrics
targetPort: http
interval: {{ .Values.metrics.serviceMonitor.interval }}
honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "kuberay-operator.name" . }}
{{- end }}

View File

@@ -0,0 +1,221 @@
# Default values for kuberay-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- String to partially override release name.
nameOverride: datamate-kuberay-operator
# -- String to fully override release name.
fullnameOverride: datamate-kuberay-operator
# -- String to override component name.
componentOverride: datamate-kuberay-operator
image:
# -- Image repository.
repository: quay.io/kuberay/operator
# -- Image tag.
tag: v1.4.2
# -- Image pull policy.
pullPolicy: IfNotPresent
# -- Extra labels.
labels: {}
# -- Extra annotations.
annotations: {}
serviceAccount:
# -- Specifies whether a service account should be created.
create: true
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template.
name: datamate-kuberay-operator
logging:
# -- Log encoder to use for stdout (one of `json` or `console`).
stdoutEncoder: json
# -- Log encoder to use for file logging (one of `json` or `console`).
fileEncoder: json
# -- Directory for kuberay-operator log file.
baseDir: ""
# -- File name for kuberay-operator log file.
fileName: ""
# -- EmptyDir volume size limit for kuberay-operator log file.
sizeLimit: ""
# Enable customized Kubernetes scheduler integration. If enabled, Ray workloads will be scheduled
# by the customized scheduler.
# * "enabled" is the legacy option and will be deprecated soon.
# * "name" is the standard option, expecting a scheduler name, supported values are
# "default", "volcano", "yunikorn", and "scheduler-plugins".
#
# Note: "enabled" and "name" should not be set at the same time. If both are set, an error will be thrown.
#
# Examples:
# 1. Use volcano (deprecated)
# batchScheduler:
# enabled: true
#
# 2. Use volcano
# batchScheduler:
# name: volcano
#
# 3. Use yunikorn
# batchScheduler:
# name: yunikorn
#
# 4. Use PodGroup
# batchScheduler:
# name: scheduler-plugins
#
batchScheduler:
# Deprecated. This option will be removed in the future.
# Note, for backwards compatibility. When it sets to true, it enables volcano scheduler integration.
enabled: false
# Set the customized scheduler name, supported values are "volcano" or "yunikorn", do not set
# "batchScheduler.enabled=true" at the same time as it will override this option.
name: ""
featureGates:
- name: RayClusterStatusConditions
enabled: true
- name: RayJobDeletionPolicy
enabled: false
# Configurations for KubeRay operator metrics.
metrics:
# -- Whether KubeRay operator should emit control plane metrics.
enabled: false
serviceMonitor:
# -- Enable a prometheus ServiceMonitor
enabled: false
# -- Prometheus ServiceMonitor interval
interval: 30s
# -- When true, honorLabels preserves the metric’s labels when they collide with the target’s labels.
honorLabels: true
# -- Prometheus ServiceMonitor selector
selector: {}
# release: prometheus
# -- Prometheus ServiceMonitor namespace
namespace: "" # "monitoring"
# -- Path to the operator binary
operatorCommand: /manager
# if userKubernetesProxy is set to true, the KubeRay operator will be configured with the --use-kubernetes-proxy flag.
# Using this option to configure kuberay-operator to comunitcate to Ray head pods by proxying through the Kubernetes API Server.
# useKubernetesProxy: true
# -- If leaderElectionEnabled is set to true, the KubeRay operator will use leader election for high availability.
leaderElectionEnabled: true
# -- If rbacEnable is set to false, no RBAC resources will be created, including the Role for leader election, the Role for Pods and Services, and so on.
rbacEnable: true
# -- When crNamespacedRbacEnable is set to true, the KubeRay operator will create a Role for RayCluster preparation (e.g., Pods, Services)
# and a corresponding RoleBinding for each namespace listed in the "watchNamespace" parameter. Please note that even if crNamespacedRbacEnable
# is set to false, the Role and RoleBinding for leader election will still be created.
#
# Note:
# (1) This variable is only effective when rbacEnable and singleNamespaceInstall are both set to true.
# (2) In most cases, it should be set to true, unless you are using a Kubernetes cluster managed by GitOps tools such as ArgoCD.
crNamespacedRbacEnable: true
# -- When singleNamespaceInstall is true:
# - Install namespaced RBAC resources such as Role and RoleBinding instead of cluster-scoped ones like ClusterRole and ClusterRoleBinding so that
# the chart can be installed by users with permissions restricted to a single namespace.
# (Please note that this excludes the CRDs, which can only be installed at the cluster scope.)
# - If "watchNamespace" is not set, the KubeRay operator will, by default, only listen
# to resource events within its own namespace.
singleNamespaceInstall: true
# The KubeRay operator will watch the custom resources in the namespaces listed in the "watchNamespace" parameter.
# watchNamespace:
# - n1
# - n2
# -- Environment variables.
env:
# If not set or set to true, kuberay auto injects an init container waiting for ray GCS.
# If false, you will need to inject your own init container to ensure ray GCS is up before the ray workers start.
# Warning: we highly recommend setting to true and let kuberay handle for you.
# - name: ENABLE_INIT_CONTAINER_INJECTION
# value: "true"
# If set to true, kuberay creates a normal ClusterIP service for a Ray Head instead of a Headless service. Default to false.
- name: ENABLE_RAY_HEAD_CLUSTER_IP_SERVICE
value: "true"
# If not set or set to "", kuberay will pick up the default k8s cluster domain `cluster.local`
# Otherwise, kuberay will use your custom domain
# - name: CLUSTER_DOMAIN
# value: ""
# If not set or set to false, when running on OpenShift with Ingress creation enabled, kuberay will create OpenShift route
# Otherwise, regardless of the type of cluster with Ingress creation enabled, kuberay will create Ingress
# - name: USE_INGRESS_ON_OPENSHIFT
# value: "true"
# Unconditionally requeue after the number of seconds specified in the
# environment variable RAYCLUSTER_DEFAULT_REQUEUE_SECONDS_ENV. If the
# environment variable is not set, requeue after the default value (300).
# - name: RAYCLUSTER_DEFAULT_REQUEUE_SECONDS_ENV
# value: 300
# If not set or set to "true", KubeRay will clean up the Redis storage namespace when a GCS FT-enabled RayCluster is deleted.
# - name: ENABLE_GCS_FT_REDIS_CLEANUP
# value: "true"
# For LLM serving, some users might not have sufficient GPU resources to run two RayClusters simultaneously.
# Therefore, KubeRay offers ENABLE_ZERO_DOWNTIME as a feature flag for zero-downtime upgrades.
# - name: ENABLE_ZERO_DOWNTIME
# value: "true"
# This environment variable for the KubeRay operator is used to determine whether to enable
# the injection of readiness and liveness probes into Ray head and worker containers.
# Enabling this feature contributes to the robustness of Ray clusters.
# - name: ENABLE_PROBES_INJECTION
# value: "true"
# If set to true, the RayJob CR itself will be deleted if shutdownAfterJobFinishes is set to true. Note that all resources created by the RayJob CR will be deleted, including the K8s Job. Otherwise, only the RayCluster CR will be deleted. Default is false.
# - name: DELETE_RAYJOB_CR_AFTER_JOB_FINISHES
# value: "false"
# -- Resource requests and limits for containers.
resources:
limits:
cpu: 100m
# Anecdotally, managing 500 Ray pods requires roughly 500MB memory.
# Monitor memory usage and adjust as needed.
memory: 512Mi
# requests:
# cpu: 100m
# memory: 512Mi
# @Ignore -- Pod liveness probe configuration.
livenessProbe:
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 5
# @Ignore -- Pod readiness probe configuration.
readinessProbe:
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 5
# -- Set up `securityContext` to improve Pod security.
podSecurityContext: {}
# @ignore -- Set up `securityContext` to improve container security.
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
service:
# -- Service type.
type: ClusterIP
# -- Service port.
port: 8080

View File

@@ -0,0 +1,5 @@
apiVersion: v1
description: A Helm chart for Kubernetes
name: ray-cluster
version: 1.4.2
icon: https://github.com/ray-project/ray/raw/master/doc/source/images/ray_header_logo.png

View File

@@ -0,0 +1,55 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "ray-cluster.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ray-cluster.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "ray-cluster.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "ray-cluster.labels" -}}
helm.sh/chart: {{ include "ray-cluster.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "ray-cluster.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "ray-cluster.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,407 @@
apiVersion: ray.io/v1
kind: RayCluster
metadata:
name: {{ include "ray-cluster.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "ray-cluster.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with .Values.head.rayVersion }}
rayVersion: {{ . }}
{{- end }}
{{- with .Values.head.enableInTreeAutoscaling }}
enableInTreeAutoscaling: {{ . }}
{{- end }}
{{- with .Values.head.autoscalerOptions }}
autoscalerOptions:
{{- toYaml . | nindent 4 }}
{{- end }}
headGroupSpec:
{{- with .Values.head.headService }}
headService:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.service.type }}
serviceType: {{ . }}
{{- end }}
{{- if or .Values.head.rayStartParams .Values.head.initArgs }}
rayStartParams:
{{- range $key, $val := .Values.head.rayStartParams }}
{{ $key }}: {{ $val | quote }}
{{- end }}
{{- /* initArgs is a deprecated alias for rayStartParams. */}}
{{- range $key, $val := .Values.head.initArgs }}
{{ $key }}: {{ $val | quote }}
{{- end }}
{{- else }}
rayStartParams: {}
{{- end }}
template:
metadata:
labels:
{{- include "ray-cluster.labels" . | nindent 10 }}
{{- with .Values.head.labels }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.head.annotations }}
annotations:
{{- toYaml . | nindent 10 }}
{{- end }}
spec:
{{- with .Values.head.initContainers }}
initContainers:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: ray-head
{{- if .Values.head.image }}
image: {{ .Values.head.image.repository }}:{{ .Values.head.image.tag }}
imagePullPolicy: {{ .Values.head.image.pullPolicy }}
{{- else }}
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- end }}
{{- with .Values.head.command }}
command:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.head.args }}
args:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with concat .Values.common.containerEnv .Values.head.containerEnv }}
env:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.head.envFrom }}
envFrom:
{{- toYaml . | nindent 10 }}
{{- end }}
{{ with .Values.head.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.head.ports }}
ports:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.head.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.head.lifecycle }}
lifecycle:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.head.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.head.sidecarContainers }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.head.volumes }}
volumes:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.head.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.head.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.head.affinity }}
affinity:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.head.tolerations }}
tolerations:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.head.priorityClassName }}
priorityClassName: {{ . }}
{{- end }}
{{- with .Values.head.priority }}
priority: {{ . }}
{{- end }}
{{- with .Values.head.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.head.restartPolicy }}
restartPolicy: {{ . }}
{{- end }}
{{- with .Values.head.serviceAccountName }}
serviceAccountName: {{ . }}
{{- end }}
{{- with .Values.head.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 10 }}
{{- end }}
workerGroupSpecs:
{{- if not .Values.worker.disabled }}
- groupName: {{ .Values.worker.groupName }}
replicas: {{ .Values.worker.replicas }}
minReplicas: {{ .Values.worker.minReplicas | default 0 }}
maxReplicas: {{ .Values.worker.maxReplicas | default 2147483647 }}
numOfHosts: {{ .Values.worker.numOfHosts | default 1 }}
{{- if or .Values.worker.rayStartParams .Values.worker.initArgs }}
rayStartParams:
{{- range $key, $val := .Values.worker.rayStartParams }}
{{ $key }}: {{ $val | quote }}
{{- end }}
{{- /* initArgs is a deprecated alias for rayStartParams. */}}
{{- range $key, $val := .Values.worker.initArgs }}
{{ $key }}: {{ $val | quote }}
{{- end }}
{{- else }}
rayStartParams: {}
{{- end }}
template:
metadata:
labels:
{{- include "ray-cluster.labels" . | nindent 10 }}
{{- with .Values.worker.labels }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.worker.annotations }}
annotations:
{{- toYaml . | nindent 10 }}
{{- end }}
spec:
{{- with .Values.worker.initContainers }}
initContainers:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: ray-worker
{{- if .Values.worker.image }}
image: {{ .Values.worker.image.repository }}:{{ .Values.worker.image.tag }}
imagePullPolicy: {{ .Values.worker.image.pullPolicy }}
{{- else }}
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- end }}
{{- with .Values.worker.command }}
command:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.worker.args }}
args:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with concat .Values.common.containerEnv .Values.worker.containerEnv }}
env:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.worker.envFrom }}
envFrom:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.worker.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.worker.ports }}
ports:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.worker.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker.lifecycle }}
lifecycle:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker.sidecarContainers }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.worker.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.worker.affinity }}
affinity:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.worker.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker.priorityClassName }}
priorityClassName: {{ . }}
{{- end }}
{{- with .Values.worker.priority }}
priority: {{ . }}
{{- end }}
{{- with .Values.worker.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker.restartPolicy }}
restartPolicy: {{ . }}
{{- end }}
{{- with .Values.worker.serviceAccountName }}
serviceAccountName: {{ . }}
{{- end }}
{{- with .Values.worker.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- end }}
{{- range $groupName, $values := .Values.additionalWorkerGroups }}
{{- if not $values.disabled }}
- groupName: {{ $groupName }}
replicas: {{ $values.replicas }}
minReplicas: {{ $values.minReplicas | default 0 }}
maxReplicas: {{ $values.maxReplicas | default 2147483647 }}
numOfHosts: {{ $values.numOfHosts | default 1 }}
{{- if or $values.rayStartParams $values.initArgs }}
rayStartParams:
{{- range $key, $val := $values.rayStartParams }}
{{ $key }}: {{ $val | quote }}
{{- end }}
{{- /* initArgs is a deprecated alias for rayStartParams. */}}
{{- range $key, $val := $values.initArgs }}
{{ $key }}: {{ $val | quote }}
{{- end }}
{{- else }}
rayStartParams: {}
{{- end }}
template:
metadata:
labels:
{{- include "ray-cluster.labels" $ | nindent 10 }}
{{- with $values.labels }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with $values.annotations }}
annotations:
{{- toYaml . | nindent 10 }}
{{- end }}
spec:
{{- with $values.initContainers }}
initContainers:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: ray-worker
{{- if $values.image }}
image: {{ $values.image.repository }}:{{ $values.image.tag }}
imagePullPolicy: {{ $values.image.pullPolicy }}
{{- else }}
image: {{ $.Values.image.repository }}:{{ $.Values.image.tag }}
imagePullPolicy: {{ $.Values.image.pullPolicy }}
{{- end }}
{{- with $values.command }}
command:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with $values.args }}
args:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with concat $.Values.common.containerEnv ($values.containerEnv | default list) }}
env:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with $values.envFrom }}
envFrom:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with $values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with $values.ports }}
ports:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with $values.lifecycle }}
lifecycle:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with $values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with $values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with $values.sidecarContainers }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with $.Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with $values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with $values.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with $values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with $values.affinity }}
affinity:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with $values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with $values.priorityClassName }}
priorityClassName: {{ . }}
{{- end }}
{{- with $values.priority }}
priority: {{ . }}
{{- end }}
{{- with $values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with $values.restartPolicy }}
restartPolicy: {{ . }}
{{- end }}
{{- with $values.serviceAccountName }}
serviceAccountName: {{ . }}
{{- end }}
{{- with $values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: datamate-runtime
labels:
ray.io/node-type: head
spec:
type: ClusterIP
ports:
- port: 8081
targetPort: 8081
protocol: TCP
selector:
ray.io/node-type: head

View File

@@ -0,0 +1,396 @@
# Default values for ray-cluster.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# The KubeRay community welcomes PRs to expose additional configuration
# in this Helm chart.
image:
repository: datamate-runtime
tag: latest
pullPolicy: IfNotPresent
nameOverride: "kuberay"
fullnameOverride: "datamate-raycluster"
imagePullSecrets: []
# - name: an-existing-secret
# common defined values shared between the head and worker
common:
# containerEnv specifies environment variables for the Ray head and worker containers.
# Follows standard K8s container env schema.
containerEnv: []
# - name: BLAH
# value: VAL
head:
# rayVersion determines the autoscaler's image version.
# It should match the Ray version in the image of the containers.
# rayVersion: 2.46.0
# If enableInTreeAutoscaling is true, the autoscaler sidecar will be added to the Ray head pod.
# Ray autoscaler integration is supported only for Ray versions >= 1.11.0
# Ray autoscaler integration is Beta with KubeRay >= 0.3.0 and Ray >= 2.0.0.
# enableInTreeAutoscaling: true
# autoscalerOptions is an OPTIONAL field specifying configuration overrides for the Ray autoscaler.
# The example configuration shown below represents the DEFAULT values.
# autoscalerOptions:
# upscalingMode: Default
# idleTimeoutSeconds is the number of seconds to wait before scaling down a worker pod which is not using Ray resources.
# idleTimeoutSeconds: 60
# imagePullPolicy optionally overrides the autoscaler container's default image pull policy (IfNotPresent).
# imagePullPolicy: IfNotPresent
# Optionally specify the autoscaler container's securityContext.
# securityContext: {}
# env: []
# envFrom: []
# resources specifies optional resource request and limit overrides for the autoscaler container.
# For large Ray clusters, we recommend monitoring container resource usage to determine if overriding the defaults is required.
# resources:
# limits:
# cpu: "500m"
# memory: "512Mi"
# requests:
# cpu: "500m"
# memory: "512Mi"
initContainers: []
labels: {}
# Note: From KubeRay v0.6.0, users need to create the ServiceAccount by themselves if they specify the `serviceAccountName`
# in the headGroupSpec. See https://github.com/ray-project/kuberay/pull/1128 for more details.
serviceAccountName: ""
restartPolicy: ""
rayStartParams:
object-store-memory: '78643200'
# containerEnv specifies environment variables for the Ray container,
# Follows standard K8s container env schema.
containerEnv:
- name: RAY_DEDUP_LOGS
value: "0"
- name: RAY_TQDM_PATCH_PRINT
value: "0"
- name: MYSQL_HOST
value: "datamate-database"
- name: MYSQL_PORT
value: "3306"
- name: MYSQL_USER
value: "root"
- name: MYSQL_PASSWORD
value: "Huawei@123"
- name: MYSQL_DATABASE
value: "datamate"
# - name: EXAMPLE_ENV
# value: "1"
envFrom: []
# - secretRef:
# name: my-env-secret
# ports optionally allows specifying ports for the Ray container.
# ports: []
# resource requests and limits for the Ray head container.
# Modify as needed for your application.
# Note that the resources in this example are much too small for production;
# we don't recommend allocating less than 8G memory for a Ray pod in production.
# Ray pods should be sized to take up entire K8s nodes when possible.
# Always set CPU and memory limits for Ray pods.
# It is usually best to set requests equal to limits.
# See https://docs.ray.io/en/latest/cluster/kubernetes/user-guides/config.html#resources
# for further guidance.
resources:
limits:
cpu: "2"
# To avoid out-of-memory issues, never allocate less than 2G memory for the Ray head.
memory: "4G"
requests:
cpu: "1"
memory: "2G"
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Pod security context.
podSecurityContext: {}
# Ray container security context.
securityContext: {}
# Optional: The following volumes/volumeMounts configurations are optional but recommended because
# Ray writes logs to /tmp/ray/session_latests/logs instead of stdout/stderr.
volumes:
- name: log-volume
hostPath:
path: /opt/datamate/data/log
type: DirectoryOrCreate
- name: dataset-volume
hostPath:
path: /opt/datamate/data/dataset
type: DirectoryOrCreate
- name: flow-volume
hostPath:
path: /opt/datamate/data/flow
type: DirectoryOrCreate
volumeMounts:
- mountPath: /tmp/ray
name: log-volume
subPath: ray/head
- mountPath: /dataset
name: dataset-volume
- mountPath: /flow
name: flow-volume
# sidecarContainers specifies additional containers to attach to the Ray pod.
# Follows standard K8s container spec.
sidecarContainers:
- name: runtime
image: datamate-runtime
imagePullPolicy: IfNotPresent
command:
- python
- /opt/runtime/datamate/operator_runtime.py
- --port
- "8081"
env:
- name: MYSQL_HOST
value: "datamate-database"
- name: MYSQL_PORT
value: "3306"
- name: MYSQL_USER
value: "root"
- name: MYSQL_PASSWORD
value: "Huawei@123"
- name: MYSQL_DATABASE
value: "datamate"
ports:
- containerPort: 8081
volumeMounts:
- mountPath: /tmp/ray
name: log-volume
subPath: ray/head
- mountPath: /var/log/datamate
name: log-volume
- mountPath: /dataset
name: dataset-volume
- mountPath: /flow
name: flow-volume
# See docs/guidance/pod-command.md for more details about how to specify
# container command for head Pod.
command: []
args: []
# Optional, for the user to provide any additional fields to the service.
# See https://pkg.go.dev/k8s.io/Kubernetes/pkg/api/v1#Service
headService: {}
# metadata:
# annotations:
# prometheus.io/scrape: "true"
# Custom pod DNS configuration
# See https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
# dnsConfig:
# nameservers:
# - 8.8.8.8
# searches:
# - example.local
# options:
# - name: ndots
# value: "2"
# - name: edns0
topologySpreadConstraints: []
worker:
# If you want to disable the default workergroup
# uncomment the line below
# disabled: true
groupName: workergroup
replicas: 1
minReplicas: 1
maxReplicas: 3
labels: {}
serviceAccountName: ""
restartPolicy: ""
rayStartParams: {}
initContainers: []
# containerEnv specifies environment variables for the Ray container,
# Follows standard K8s container env schema.
containerEnv:
- name: RAY_DEDUP_LOGS
value: "0"
- name: RAY_TQDM_PATCH_PRINT
value: "0"
- name: MYSQL_HOST
value: "datamate-database"
- name: MYSQL_PORT
value: "3306"
- name: MYSQL_USER
value: "root"
- name: MYSQL_PASSWORD
value: "Huawei@123"
- name: MYSQL_DATABASE
value: "datamate"
# - name: EXAMPLE_ENV
# value: "1"
envFrom: []
# - secretRef:
# name: my-env-secret
# ports optionally allows specifying ports for the Ray container.
# ports: []
# resource requests and limits for the Ray head container.
# Modify as needed for your application.
# Note that the resources in this example are much too small for production;
# we don't recommend allocating less than 8G memory for a Ray pod in production.
# Ray pods should be sized to take up entire K8s nodes when possible.
# Always set CPU and memory limits for Ray pods.
# It is usually best to set requests equal to limits.
# See https://docs.ray.io/en/latest/cluster/kubernetes/user-guides/config.html#resources
# for further guidance.
resources:
limits:
cpu: "4"
memory: "8G"
requests:
cpu: "1"
memory: "1G"
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Pod security context.
podSecurityContext: {}
# Ray container security context.
securityContext: {}
# Optional: The following volumes/volumeMounts configurations are optional but recommended because
# Ray writes logs to /tmp/ray/session_latests/logs instead of stdout/stderr.
volumes:
- name: log-volume
hostPath:
path: /opt/datamate/data/log
type: DirectoryOrCreate
- name: dataset-volume
hostPath:
path: /opt/datamate/data/dataset
type: DirectoryOrCreate
- name: flow-volume
hostPath:
path: /opt/datamate/data/flow
type: DirectoryOrCreate
volumeMounts:
- mountPath: /tmp/ray
name: log-volume
subPath: ray/worker
- mountPath: /dataset
name: dataset-volume
- mountPath: /flow
name: flow-volume
# sidecarContainers specifies additional containers to attach to the Ray pod.
# Follows standard K8s container spec.
sidecarContainers: []
# See docs/guidance/pod-command.md for more details about how to specify
# container command for worker Pod.
command: []
args: []
topologySpreadConstraints: []
# Custom pod DNS configuration
# See https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
# dnsConfig:
# nameservers:
# - 8.8.8.8
# searches:
# - example.local
# options:
# - name: ndots
# value: "2"
# - name: edns0
# The map's key is used as the groupName.
# For example, key:small-group in the map below
# will be used as the groupName
additionalWorkerGroups:
smallGroup:
# Disabled by default
disabled: true
replicas: 0
minReplicas: 0
maxReplicas: 3
labels: {}
serviceAccountName: ""
restartPolicy: ""
rayStartParams: {}
# containerEnv specifies environment variables for the Ray container,
# Follows standard K8s container env schema.
containerEnv: []
# - name: EXAMPLE_ENV
# value: "1"
envFrom: []
# - secretRef:
# name: my-env-secret
# ports optionally allows specifying ports for the Ray container.
# ports: []
# resource requests and limits for the Ray head container.
# Modify as needed for your application.
# Note that the resources in this example are much too small for production;
# we don't recommend allocating less than 8G memory for a Ray pod in production.
# Ray pods should be sized to take up entire K8s nodes when possible.
# Always set CPU and memory limits for Ray pods.
# It is usually best to set requests equal to limits.
# See https://docs.ray.io/en/latest/cluster/kubernetes/user-guides/config.html#resources
# for further guidance.
resources:
limits:
cpu: 1
memory: "1G"
requests:
cpu: 1
memory: "1G"
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Pod security context.
podSecurityContext: {}
# Ray container security context.
securityContext: {}
# Optional: The following volumes/volumeMounts configurations are optional but recommended because
# Ray writes logs to /tmp/ray/session_latests/logs instead of stdout/stderr.
volumes:
- name: log-volume
hostPath:
path: /opt/datamate/data/log
type: DirectoryOrCreate
- name: dataset-volume
hostPath:
path: /opt/datamate/data/dataset
type: DirectoryOrCreate
- name: flow-volume
hostPath:
path: /opt/datamate/data/flow
type: DirectoryOrCreate
volumeMounts:
- mountPath: /tmp/ray
name: log-volume
subPath: ray
- mountPath: /dataset
name: dataset-volume
- mountPath: /flow
name: flow-volume
sidecarContainers: []
# See docs/guidance/pod-command.md for more details about how to specify
# container command for worker Pod.
command: []
args: []
# Topology Spread Constraints for worker pods
# See: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
topologySpreadConstraints: []
# Custom pod DNS configuration
# See https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
# dnsConfig:
# nameservers:
# - 8.8.8.8
# searches:
# - example.local
# options:
# - name: ndots
# value: "2"
# - name: edns0
# Configuration for Head's Kubernetes Service
service:
# This is optional, and the default is ClusterIP.
type: NodePort