Skip to content

Commit 96ab8fa

Browse files
tedhtchangopenshift-merge-robot
authored andcommitted
update appwrapper yamls
1 parent 23bb205 commit 96ab8fa

File tree

2 files changed

+62
-48
lines changed

2 files changed

+62
-48
lines changed

Diff for: doc/usage/examples/kuberay/config/aw-raycluster-1.yaml

+31-24
Original file line numberDiff line numberDiff line change
@@ -7,18 +7,31 @@ spec:
77
resources:
88
GenericItems:
99
- replicas: 1
10-
custompodresources: # Optional section that specifies resource requirements
11-
# for non-standard k8s resources, follows same format as
12-
# that of standard k8s resources.
13-
- replicas: 2 # because AppWrappers are generic they must define the resultant pods that will be needed
14-
# to fulfill a request as the request values cannot be reliably extracted from the
15-
# generictemplate below
10+
custompodresources:
11+
# Optional section that specifies resource requirements
12+
# for non-standard k8s resources, follows same format as
13+
# that of standard k8s resources.
14+
# Each item in the custompodresources stanza should include resources consumed by target Item.
15+
# In this example, the 2 items correspond to 1 Ray head pod and 1 Ray worker pod
16+
- replicas: 1
17+
limits:
18+
cpu: 2
19+
memory: 8G
20+
nvidia.com/gpu: 0
1621
requests:
17-
cpu: 8
18-
memory: 512Mi
22+
cpu: 2
23+
memory: 8G
24+
nvidia.com/gpu: 0
25+
# The replica should match the number of worker pods
26+
- replicas: 1
1927
limits:
20-
cpu: 10
21-
memory: 1G
28+
cpu: 8
29+
memory: 8G
30+
nvidia.com/gpu: 0
31+
requests:
32+
cpu: 8
33+
memory: 8G
34+
nvidia.com/gpu: 0
2235
generictemplate:
2336
# The resource requests and limits in this config are too small for production!
2437
# For examples with more realistic resource configuration, see
@@ -75,15 +88,13 @@ spec:
7588
# entire Kubernetes node on which it is scheduled.
7689
resources:
7790
limits:
78-
cpu: "1"
79-
memory: "2G"
91+
cpu: "2"
92+
memory: "8G"
8093
requests:
8194
# For production use-cases, we recommend specifying integer CPU reqests and limits.
8295
# We also recommend setting requests equal to limits for both CPU and memory.
83-
# For this example, we use a 500m CPU request to accomodate resource-constrained local
84-
# Kubernetes testing environments such as KinD and minikube.
85-
cpu: "500m"
86-
memory: "2G"
96+
cpu: "2"
97+
memory: "8G"
8798
volumes:
8899
- name: ray-logs
89100
emptyDir: {}
@@ -131,20 +142,16 @@ spec:
131142
# entire Kubernetes node on which it is scheduled.
132143
resources:
133144
limits:
134-
cpu: "1"
135-
memory: "1G"
145+
cpu: "8"
146+
memory: "8G"
136147
# For production use-cases, we recommend specifying integer CPU reqests and limits.
137148
# We also recommend setting requests equal to limits for both CPU and memory.
138-
# For this example, we use a 500m CPU request to accomodate resource-constrained local
139-
# Kubernetes testing environments such as KinD and minikube.
140149
requests:
141150
# For production use-cases, we recommend specifying integer CPU reqests and limits.
142151
# We also recommend setting requests equal to limits for both CPU and memory.
143-
# For this example, we use a 500m CPU request to accomodate resource-constrained local
144-
# Kubernetes testing environments such as KinD and minikube.
145-
cpu: "500m"
152+
cpu: "8"
146153
# For production use-cases, we recommend allocating at least 8Gb memory for each Ray container.
147-
memory: "1G"
154+
memory: "8G"
148155
# use volumes
149156
# Refer to https://kubernetes.io/docs/concepts/storage/volumes/
150157
volumes:

Diff for: doc/usage/examples/kuberay/config/aw-raycluster.yaml

+31-24
Original file line numberDiff line numberDiff line change
@@ -7,18 +7,31 @@ spec:
77
resources:
88
GenericItems:
99
- replicas: 1
10-
custompodresources: # Optional section that specifies resource requirements
11-
# for non-standard k8s resources, follows same format as
12-
# that of standard k8s resources.
13-
- replicas: 2 # because AppWrappers are generic they must define the resultant pods that will be needed
14-
# to fulfill a request as the request values cannot be reliably extracted from the
15-
# generictemplate below
10+
custompodresources:
11+
# Optional section that specifies resource requirements
12+
# for non-standard k8s resources, follows same format as
13+
# that of standard k8s resources.
14+
# Each item in the custompodresources stanza should include resources consumed by target Item.
15+
# In this example, the 2 items correspond to 1 Ray head pod and 1 Ray worker pod
16+
- replicas: 1
17+
limits:
18+
cpu: 2
19+
memory: 8G
20+
nvidia.com/gpu: 0
1621
requests:
17-
cpu: 8
18-
memory: 512Mi
22+
cpu: 2
23+
memory: 8G
24+
nvidia.com/gpu: 0
25+
# The replica should match the number of worker pods
26+
- replicas: 1
1927
limits:
20-
cpu: 10
21-
memory: 1G
28+
cpu: 8
29+
memory: 8G
30+
nvidia.com/gpu: 0
31+
requests:
32+
cpu: 8
33+
memory: 8G
34+
nvidia.com/gpu: 0
2235
generictemplate:
2336
# The resource requests and limits in this config are too small for production!
2437
# For examples with more realistic resource configuration, see
@@ -75,15 +88,13 @@ spec:
7588
# entire Kubernetes node on which it is scheduled.
7689
resources:
7790
limits:
78-
cpu: "1"
79-
memory: "2G"
91+
cpu: "2"
92+
memory: "8G"
8093
requests:
8194
# For production use-cases, we recommend specifying integer CPU reqests and limits.
8295
# We also recommend setting requests equal to limits for both CPU and memory.
83-
# For this example, we use a 500m CPU request to accomodate resource-constrained local
84-
# Kubernetes testing environments such as KinD and minikube.
85-
cpu: "500m"
86-
memory: "2G"
96+
cpu: "2"
97+
memory: "8G"
8798
volumes:
8899
- name: ray-logs
89100
emptyDir: {}
@@ -131,20 +142,16 @@ spec:
131142
# entire Kubernetes node on which it is scheduled.
132143
resources:
133144
limits:
134-
cpu: "1"
135-
memory: "1G"
145+
cpu: "8"
146+
memory: "8G"
136147
# For production use-cases, we recommend specifying integer CPU reqests and limits.
137148
# We also recommend setting requests equal to limits for both CPU and memory.
138-
# For this example, we use a 500m CPU request to accomodate resource-constrained local
139-
# Kubernetes testing environments such as KinD and minikube.
140149
requests:
141150
# For production use-cases, we recommend specifying integer CPU reqests and limits.
142151
# We also recommend setting requests equal to limits for both CPU and memory.
143-
# For this example, we use a 500m CPU request to accomodate resource-constrained local
144-
# Kubernetes testing environments such as KinD and minikube.
145-
cpu: "500m"
152+
cpu: "8"
146153
# For production use-cases, we recommend allocating at least 8Gb memory for each Ray container.
147-
memory: "1G"
154+
memory: "8G"
148155
# use volumes
149156
# Refer to https://kubernetes.io/docs/concepts/storage/volumes/
150157
volumes:

0 commit comments

Comments
 (0)