|
7 | 7 | resources:
|
8 | 8 | GenericItems:
|
9 | 9 | - replicas: 1
|
10 |
| - custompodresources: # Optional section that specifies resource requirements |
11 |
| - # for non-standard k8s resources, follows same format as |
12 |
| - # that of standard k8s resources. |
13 |
| - - replicas: 2 # because AppWrappers are generic they must define the resultant pods that will be needed |
14 |
| - # to fulfill a request as the request values cannot be reliably extracted from the |
15 |
| - # generictemplate below |
| 10 | + custompodresources: |
| 11 | + # Optional section that specifies resource requirements |
| 12 | + # for non-standard k8s resources, follows same format as |
| 13 | + # that of standard k8s resources. |
| 14 | + # Each item in the custompodresources stanza should include resources consumed by target Item. |
| 15 | + # In this example, the 2 items correspond to 1 Ray head pod and 1 Ray worker pod |
| 16 | + - replicas: 1 |
| 17 | + limits: |
| 18 | + cpu: 2 |
| 19 | + memory: 8G |
| 20 | + nvidia.com/gpu: 0 |
16 | 21 | requests:
|
17 |
| - cpu: 8 |
18 |
| - memory: 512Mi |
| 22 | + cpu: 2 |
| 23 | + memory: 8G |
| 24 | + nvidia.com/gpu: 0 |
| 25 | + # The replica should match the number of worker pods |
| 26 | + - replicas: 1 |
19 | 27 | limits:
|
20 |
| - cpu: 10 |
21 |
| - memory: 1G |
| 28 | + cpu: 8 |
| 29 | + memory: 8G |
| 30 | + nvidia.com/gpu: 0 |
| 31 | + requests: |
| 32 | + cpu: 8 |
| 33 | + memory: 8G |
| 34 | + nvidia.com/gpu: 0 |
22 | 35 | generictemplate:
|
23 | 36 | # The resource requests and limits in this config are too small for production!
|
24 | 37 | # For examples with more realistic resource configuration, see
|
@@ -75,15 +88,13 @@ spec:
|
75 | 88 | # entire Kubernetes node on which it is scheduled.
|
76 | 89 | resources:
|
77 | 90 | limits:
|
78 |
| - cpu: "1" |
79 |
| - memory: "2G" |
| 91 | + cpu: "2" |
| 92 | + memory: "8G" |
80 | 93 | requests:
|
81 | 94 | # For production use-cases, we recommend specifying integer CPU reqests and limits.
|
82 | 95 | # We also recommend setting requests equal to limits for both CPU and memory.
|
83 |
| - # For this example, we use a 500m CPU request to accomodate resource-constrained local |
84 |
| - # Kubernetes testing environments such as KinD and minikube. |
85 |
| - cpu: "500m" |
86 |
| - memory: "2G" |
| 96 | + cpu: "2" |
| 97 | + memory: "8G" |
87 | 98 | volumes:
|
88 | 99 | - name: ray-logs
|
89 | 100 | emptyDir: {}
|
@@ -131,20 +142,16 @@ spec:
|
131 | 142 | # entire Kubernetes node on which it is scheduled.
|
132 | 143 | resources:
|
133 | 144 | limits:
|
134 |
| - cpu: "1" |
135 |
| - memory: "1G" |
| 145 | + cpu: "8" |
| 146 | + memory: "8G" |
136 | 147 | # For production use-cases, we recommend specifying integer CPU reqests and limits.
|
137 | 148 | # We also recommend setting requests equal to limits for both CPU and memory.
|
138 |
| - # For this example, we use a 500m CPU request to accomodate resource-constrained local |
139 |
| - # Kubernetes testing environments such as KinD and minikube. |
140 | 149 | requests:
|
141 | 150 | # For production use-cases, we recommend specifying integer CPU reqests and limits.
|
142 | 151 | # We also recommend setting requests equal to limits for both CPU and memory.
|
143 |
| - # For this example, we use a 500m CPU request to accomodate resource-constrained local |
144 |
| - # Kubernetes testing environments such as KinD and minikube. |
145 |
| - cpu: "500m" |
| 152 | + cpu: "8" |
146 | 153 | # For production use-cases, we recommend allocating at least 8Gb memory for each Ray container.
|
147 |
| - memory: "1G" |
| 154 | + memory: "8G" |
148 | 155 | # use volumes
|
149 | 156 | # Refer to https://kubernetes.io/docs/concepts/storage/volumes/
|
150 | 157 | volumes:
|
|
0 commit comments