|
| 1 | +apiVersion: mcad.ibm.com/v1beta1 |
| 2 | +kind: AppWrapper |
| 3 | +metadata: |
| 4 | + name: raycluster-complete-1 |
| 5 | + namespace: default |
| 6 | +spec: |
| 7 | + resources: |
| 8 | + GenericItems: |
| 9 | + - replicas: 1 |
| 10 | + custompodresources: # Optional section that specifies resource requirements |
| 11 | + # for non-standard k8s resources, follows same format as |
| 12 | + # that of standard k8s resources. |
| 13 | + - replicas: 2 # because AppWrappers are generic they must define the resultant pods that will be needed |
| 14 | + # to fulfill a request as the request values cannot be reliably extracted from the |
| 15 | + # generictemplate below |
| 16 | + requests: |
| 17 | + cpu: 8 |
| 18 | + memory: 512Mi |
| 19 | + limits: |
| 20 | + cpu: 10 |
| 21 | + memory: 1G |
| 22 | + generictemplate: |
| 23 | + # The resource requests and limits in this config are too small for production! |
| 24 | + # For examples with more realistic resource configuration, see |
| 25 | + # ray-cluster.complete.large.yaml and |
| 26 | + # ray-cluster.autoscaler.large.yaml. |
| 27 | + apiVersion: ray.io/v1alpha1 |
| 28 | + kind: RayCluster |
| 29 | + metadata: |
| 30 | + labels: |
| 31 | + controller-tools.k8s.io: "1.0" |
| 32 | + # A unique identifier for the head node and workers of this cluster. |
| 33 | + name: raycluster-complete-1 |
| 34 | + spec: |
| 35 | + rayVersion: '2.5.0' |
| 36 | + # Ray head pod configuration |
| 37 | + headGroupSpec: |
| 38 | + # Kubernetes Service Type. This is an optional field, and the default value is ClusterIP. |
| 39 | + # Refer to https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types. |
| 40 | + serviceType: ClusterIP |
| 41 | + # The `rayStartParams` are used to configure the `ray start` command. |
| 42 | + # See https://github.com/ray-project/kuberay/blob/master/docs/guidance/rayStartParams.md for the default settings of `rayStartParams` in KubeRay. |
| 43 | + # See https://docs.ray.io/en/latest/cluster/cli.html#ray-start for all available options in `rayStartParams`. |
| 44 | + rayStartParams: |
| 45 | + dashboard-host: '0.0.0.0' |
| 46 | + # pod template |
| 47 | + template: |
| 48 | + metadata: |
| 49 | + # Custom labels. NOTE: To avoid conflicts with KubeRay operator, do not define custom labels start with `raycluster`. |
| 50 | + # Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ |
| 51 | + labels: {} |
| 52 | + spec: |
| 53 | + containers: |
| 54 | + - name: ray-head |
| 55 | + image: rayproject/ray:2.5.0 |
| 56 | + ports: |
| 57 | + - containerPort: 6379 |
| 58 | + name: gcs |
| 59 | + - containerPort: 8265 |
| 60 | + name: dashboard |
| 61 | + - containerPort: 10001 |
| 62 | + name: client |
| 63 | + lifecycle: |
| 64 | + preStop: |
| 65 | + exec: |
| 66 | + command: ["/bin/sh","-c","ray stop"] |
| 67 | + volumeMounts: |
| 68 | + - mountPath: /tmp/ray |
| 69 | + name: ray-logs |
| 70 | + # The resource requests and limits in this config are too small for production! |
| 71 | + # For an example with more realistic resource configuration, see |
| 72 | + # ray-cluster.autoscaler.large.yaml. |
| 73 | + # It is better to use a few large Ray pod than many small ones. |
| 74 | + # For production, it is ideal to size each Ray pod to take up the |
| 75 | + # entire Kubernetes node on which it is scheduled. |
| 76 | + resources: |
| 77 | + limits: |
| 78 | + cpu: "1" |
| 79 | + memory: "2G" |
| 80 | + requests: |
| 81 | + # For production use-cases, we recommend specifying integer CPU reqests and limits. |
| 82 | + # We also recommend setting requests equal to limits for both CPU and memory. |
| 83 | + # For this example, we use a 500m CPU request to accomodate resource-constrained local |
| 84 | + # Kubernetes testing environments such as KinD and minikube. |
| 85 | + cpu: "500m" |
| 86 | + memory: "2G" |
| 87 | + volumes: |
| 88 | + - name: ray-logs |
| 89 | + emptyDir: {} |
| 90 | + workerGroupSpecs: |
| 91 | + # the pod replicas in this group typed worker |
| 92 | + - replicas: 1 |
| 93 | + minReplicas: 1 |
| 94 | + maxReplicas: 10 |
| 95 | + # logical group name, for this called small-group, also can be functional |
| 96 | + groupName: small-group |
| 97 | + # If worker pods need to be added, we can increment the replicas. |
| 98 | + # If worker pods need to be removed, we decrement the replicas, and populate the workersToDelete list. |
| 99 | + # The operator will remove pods from the list until the desired number of replicas is satisfied. |
| 100 | + # If the difference between the current replica count and the desired replicas is greater than the |
| 101 | + # number of entries in workersToDelete, random worker pods will be deleted. |
| 102 | + #scaleStrategy: |
| 103 | + # workersToDelete: |
| 104 | + # - raycluster-complete-worker-small-group-bdtwh |
| 105 | + # - raycluster-complete-worker-small-group-hv457 |
| 106 | + # - raycluster-complete-worker-small-group-k8tj7 |
| 107 | + # The `rayStartParams` are used to configure the `ray start` command. |
| 108 | + # See https://github.com/ray-project/kuberay/blob/master/docs/guidance/rayStartParams.md for the default settings of `rayStartParams` in KubeRay. |
| 109 | + # See https://docs.ray.io/en/latest/cluster/cli.html#ray-start for all available options in `rayStartParams`. |
| 110 | + rayStartParams: {} |
| 111 | + #pod template |
| 112 | + template: |
| 113 | + spec: |
| 114 | + containers: |
| 115 | + - name: ray-worker |
| 116 | + image: rayproject/ray:2.5.0 |
| 117 | + lifecycle: |
| 118 | + preStop: |
| 119 | + exec: |
| 120 | + command: ["/bin/sh","-c","ray stop"] |
| 121 | + # use volumeMounts.Optional. |
| 122 | + # Refer to https://kubernetes.io/docs/concepts/storage/volumes/ |
| 123 | + volumeMounts: |
| 124 | + - mountPath: /tmp/ray |
| 125 | + name: ray-logs |
| 126 | + # The resource requests and limits in this config are too small for production! |
| 127 | + # For an example with more realistic resource configuration, see |
| 128 | + # ray-cluster.autoscaler.large.yaml. |
| 129 | + # It is better to use a few large Ray pod than many small ones. |
| 130 | + # For production, it is ideal to size each Ray pod to take up the |
| 131 | + # entire Kubernetes node on which it is scheduled. |
| 132 | + resources: |
| 133 | + limits: |
| 134 | + cpu: "1" |
| 135 | + memory: "1G" |
| 136 | + # For production use-cases, we recommend specifying integer CPU reqests and limits. |
| 137 | + # We also recommend setting requests equal to limits for both CPU and memory. |
| 138 | + # For this example, we use a 500m CPU request to accomodate resource-constrained local |
| 139 | + # Kubernetes testing environments such as KinD and minikube. |
| 140 | + requests: |
| 141 | + # For production use-cases, we recommend specifying integer CPU reqests and limits. |
| 142 | + # We also recommend setting requests equal to limits for both CPU and memory. |
| 143 | + # For this example, we use a 500m CPU request to accomodate resource-constrained local |
| 144 | + # Kubernetes testing environments such as KinD and minikube. |
| 145 | + cpu: "500m" |
| 146 | + # For production use-cases, we recommend allocating at least 8Gb memory for each Ray container. |
| 147 | + memory: "1G" |
| 148 | + # use volumes |
| 149 | + # Refer to https://kubernetes.io/docs/concepts/storage/volumes/ |
| 150 | + volumes: |
| 151 | + - name: ray-logs |
| 152 | + emptyDir: {} |
| 153 | + |
0 commit comments