You've already forked DataMate
* Enhance CleaningTaskService to track cleaning process progress and update ExecutorType to DATAMATE * Refactor project to use 'datamate' naming convention for services and configurations
222 lines
8.2 KiB
YAML
222 lines
8.2 KiB
YAML
# Default values for kuberay-operator.
|
|
# This is a YAML-formatted file.
|
|
# Declare variables to be passed into your templates.
|
|
|
|
# -- String to partially override release name.
|
|
nameOverride: datamate-kuberay-operator
|
|
|
|
# -- String to fully override release name.
|
|
fullnameOverride: datamate-kuberay-operator
|
|
|
|
# -- String to override component name.
|
|
componentOverride: datamate-kuberay-operator
|
|
|
|
image:
|
|
# -- Image repository.
|
|
repository: quay.io/kuberay/operator
|
|
|
|
# -- Image tag.
|
|
tag: v1.4.2
|
|
|
|
# -- Image pull policy.
|
|
pullPolicy: IfNotPresent
|
|
|
|
# -- Extra labels.
|
|
labels: {}
|
|
|
|
# -- Extra annotations.
|
|
annotations: {}
|
|
|
|
serviceAccount:
|
|
# -- Specifies whether a service account should be created.
|
|
create: true
|
|
# -- The name of the service account to use.
|
|
# If not set and create is true, a name is generated using the fullname template.
|
|
name: datamate-kuberay-operator
|
|
|
|
logging:
|
|
# -- Log encoder to use for stdout (one of `json` or `console`).
|
|
stdoutEncoder: json
|
|
# -- Log encoder to use for file logging (one of `json` or `console`).
|
|
fileEncoder: json
|
|
# -- Directory for kuberay-operator log file.
|
|
baseDir: ""
|
|
# -- File name for kuberay-operator log file.
|
|
fileName: ""
|
|
# -- EmptyDir volume size limit for kuberay-operator log file.
|
|
sizeLimit: ""
|
|
|
|
# Enable customized Kubernetes scheduler integration. If enabled, Ray workloads will be scheduled
|
|
# by the customized scheduler.
|
|
# * "enabled" is the legacy option and will be deprecated soon.
|
|
# * "name" is the standard option, expecting a scheduler name, supported values are
|
|
# "default", "volcano", "yunikorn", and "scheduler-plugins".
|
|
#
|
|
# Note: "enabled" and "name" should not be set at the same time. If both are set, an error will be thrown.
|
|
#
|
|
# Examples:
|
|
# 1. Use volcano (deprecated)
|
|
# batchScheduler:
|
|
# enabled: true
|
|
#
|
|
# 2. Use volcano
|
|
# batchScheduler:
|
|
# name: volcano
|
|
#
|
|
# 3. Use yunikorn
|
|
# batchScheduler:
|
|
# name: yunikorn
|
|
#
|
|
# 4. Use PodGroup
|
|
# batchScheduler:
|
|
# name: scheduler-plugins
|
|
#
|
|
batchScheduler:
|
|
# Deprecated. This option will be removed in the future.
|
|
# Note, for backwards compatibility. When it sets to true, it enables volcano scheduler integration.
|
|
enabled: false
|
|
# Set the customized scheduler name, supported values are "volcano" or "yunikorn", do not set
|
|
# "batchScheduler.enabled=true" at the same time as it will override this option.
|
|
name: ""
|
|
|
|
featureGates:
|
|
- name: RayClusterStatusConditions
|
|
enabled: true
|
|
- name: RayJobDeletionPolicy
|
|
enabled: false
|
|
|
|
# Configurations for KubeRay operator metrics.
|
|
metrics:
|
|
# -- Whether KubeRay operator should emit control plane metrics.
|
|
enabled: false
|
|
serviceMonitor:
|
|
# -- Enable a prometheus ServiceMonitor
|
|
enabled: false
|
|
# -- Prometheus ServiceMonitor interval
|
|
interval: 30s
|
|
# -- When true, honorLabels preserves the metric’s labels when they collide with the target’s labels.
|
|
honorLabels: true
|
|
# -- Prometheus ServiceMonitor selector
|
|
selector: {}
|
|
# release: prometheus
|
|
# -- Prometheus ServiceMonitor namespace
|
|
namespace: "" # "monitoring"
|
|
|
|
# -- Path to the operator binary
|
|
operatorCommand: /manager
|
|
|
|
# if userKubernetesProxy is set to true, the KubeRay operator will be configured with the --use-kubernetes-proxy flag.
|
|
# Using this option to configure kuberay-operator to comunitcate to Ray head pods by proxying through the Kubernetes API Server.
|
|
# useKubernetesProxy: true
|
|
|
|
# -- If leaderElectionEnabled is set to true, the KubeRay operator will use leader election for high availability.
|
|
leaderElectionEnabled: true
|
|
|
|
# -- If rbacEnable is set to false, no RBAC resources will be created, including the Role for leader election, the Role for Pods and Services, and so on.
|
|
rbacEnable: true
|
|
|
|
# -- When crNamespacedRbacEnable is set to true, the KubeRay operator will create a Role for RayCluster preparation (e.g., Pods, Services)
|
|
# and a corresponding RoleBinding for each namespace listed in the "watchNamespace" parameter. Please note that even if crNamespacedRbacEnable
|
|
# is set to false, the Role and RoleBinding for leader election will still be created.
|
|
#
|
|
# Note:
|
|
# (1) This variable is only effective when rbacEnable and singleNamespaceInstall are both set to true.
|
|
# (2) In most cases, it should be set to true, unless you are using a Kubernetes cluster managed by GitOps tools such as ArgoCD.
|
|
crNamespacedRbacEnable: true
|
|
|
|
# -- When singleNamespaceInstall is true:
|
|
# - Install namespaced RBAC resources such as Role and RoleBinding instead of cluster-scoped ones like ClusterRole and ClusterRoleBinding so that
|
|
# the chart can be installed by users with permissions restricted to a single namespace.
|
|
# (Please note that this excludes the CRDs, which can only be installed at the cluster scope.)
|
|
# - If "watchNamespace" is not set, the KubeRay operator will, by default, only listen
|
|
# to resource events within its own namespace.
|
|
singleNamespaceInstall: true
|
|
|
|
# The KubeRay operator will watch the custom resources in the namespaces listed in the "watchNamespace" parameter.
|
|
# watchNamespace:
|
|
# - n1
|
|
# - n2
|
|
|
|
# -- Environment variables.
|
|
|
|
env:
|
|
# If not set or set to true, kuberay auto injects an init container waiting for ray GCS.
|
|
# If false, you will need to inject your own init container to ensure ray GCS is up before the ray workers start.
|
|
# Warning: we highly recommend setting to true and let kuberay handle for you.
|
|
# - name: ENABLE_INIT_CONTAINER_INJECTION
|
|
# value: "true"
|
|
# If set to true, kuberay creates a normal ClusterIP service for a Ray Head instead of a Headless service. Default to false.
|
|
- name: ENABLE_RAY_HEAD_CLUSTER_IP_SERVICE
|
|
value: "true"
|
|
# If not set or set to "", kuberay will pick up the default k8s cluster domain `cluster.local`
|
|
# Otherwise, kuberay will use your custom domain
|
|
# - name: CLUSTER_DOMAIN
|
|
# value: ""
|
|
# If not set or set to false, when running on OpenShift with Ingress creation enabled, kuberay will create OpenShift route
|
|
# Otherwise, regardless of the type of cluster with Ingress creation enabled, kuberay will create Ingress
|
|
# - name: USE_INGRESS_ON_OPENSHIFT
|
|
# value: "true"
|
|
# Unconditionally requeue after the number of seconds specified in the
|
|
# environment variable RAYCLUSTER_DEFAULT_REQUEUE_SECONDS_ENV. If the
|
|
# environment variable is not set, requeue after the default value (300).
|
|
# - name: RAYCLUSTER_DEFAULT_REQUEUE_SECONDS_ENV
|
|
# value: 300
|
|
# If not set or set to "true", KubeRay will clean up the Redis storage namespace when a GCS FT-enabled RayCluster is deleted.
|
|
# - name: ENABLE_GCS_FT_REDIS_CLEANUP
|
|
# value: "true"
|
|
# For LLM serving, some users might not have sufficient GPU resources to run two RayClusters simultaneously.
|
|
# Therefore, KubeRay offers ENABLE_ZERO_DOWNTIME as a feature flag for zero-downtime upgrades.
|
|
# - name: ENABLE_ZERO_DOWNTIME
|
|
# value: "true"
|
|
# This environment variable for the KubeRay operator is used to determine whether to enable
|
|
# the injection of readiness and liveness probes into Ray head and worker containers.
|
|
# Enabling this feature contributes to the robustness of Ray clusters.
|
|
# - name: ENABLE_PROBES_INJECTION
|
|
# value: "true"
|
|
# If set to true, the RayJob CR itself will be deleted if shutdownAfterJobFinishes is set to true. Note that all resources created by the RayJob CR will be deleted, including the K8s Job. Otherwise, only the RayCluster CR will be deleted. Default is false.
|
|
# - name: DELETE_RAYJOB_CR_AFTER_JOB_FINISHES
|
|
# value: "false"
|
|
|
|
# -- Resource requests and limits for containers.
|
|
resources:
|
|
limits:
|
|
cpu: 100m
|
|
# Anecdotally, managing 500 Ray pods requires roughly 500MB memory.
|
|
# Monitor memory usage and adjust as needed.
|
|
memory: 512Mi
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 512Mi
|
|
|
|
# @Ignore -- Pod liveness probe configuration.
|
|
livenessProbe:
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 5
|
|
failureThreshold: 5
|
|
|
|
# @Ignore -- Pod readiness probe configuration.
|
|
readinessProbe:
|
|
initialDelaySeconds: 10
|
|
periodSeconds: 5
|
|
failureThreshold: 5
|
|
|
|
# -- Set up `securityContext` to improve Pod security.
|
|
podSecurityContext: {}
|
|
|
|
# @ignore -- Set up `securityContext` to improve container security.
|
|
securityContext:
|
|
allowPrivilegeEscalation: false
|
|
readOnlyRootFilesystem: true
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
runAsNonRoot: true
|
|
seccompProfile:
|
|
type: RuntimeDefault
|
|
|
|
service:
|
|
# -- Service type.
|
|
type: ClusterIP
|
|
# -- Service port.
|
|
port: 8080
|