|
| 1 | +{ |
| 2 | + "name": "keep-it-simple", |
| 3 | + "creationTime": 1660657756574, |
| 4 | + "lastModifiedTime": 1660658307175, |
| 5 | + "lastUsedTime": 1660742079201, |
| 6 | + "choices": { |
| 7 | + "madwizard/apriori/use-gpu": "don't use gpus", |
| 8 | + "madwizard/apriori/arch": "x64", |
| 9 | + "madwizard/apriori/platform": "darwin", |
| 10 | + "madwizard/apriori/mac-installer": "Homebrew", |
| 11 | + "madwizard/apriori/in-terminal": "HTML", |
| 12 | + "Start a new Run####Connect Dashboard to an existing Run####Boot up a Cloud Computer####Shut down a Cloud Computer": "Start a new Run", |
| 13 | + "Run with CodeFlare Model Architecture####Bring Your Own Code####Demos": "Bring Your Own Code", |
| 14 | + "BERT####MLFlow Demo####Tensorboard Demo": "MLFlow Demo", |
| 15 | + "Location of your working directory": "{\"Location of your working directory\":\"tests/kind/inputs/qiskit-runtime-env-with-conda\"}", |
| 16 | + "Provide custom base image, if any": "{\"Provide custom base image, if any\":\"rayproject/ray:1.13.1-py37\"}", |
| 17 | + "AWS####IBM####My data is not stored in S3": "My data is not stored in S3", |
| 18 | + "Run Locally####Run on a Kubernetes Cluster": "Run on a Kubernetes Cluster", |
| 19 | + "My Cluster is Running Locally####My Cluster is Running on Kubernetes": "My Cluster is Running on Kubernetes", |
| 20 | + "expand((kubectl config get-contexts -o name | grep -E . >& /dev/null && kubectl config get-contexts -o name) || (kubectl version | grep Server >& /dev/null && echo \"${KUBE_CONTEXT_FOR_TEST-In-cluster}\" || exit 1), Kubernetes contexts)": "kind-codeflare-test", |
| 21 | + "expand([ -z ${KUBE_CONTEXT} ] && exit 1 || X=$([ -n \"$KUBE_NS_FOR_TEST\" ] && echo $KUBE_NS_FOR_TEST || kubectl ${KUBE_CONTEXT_ARG} get ns -o name || oc ${KUBE_CONTEXT_ARG} get projects -o name); echo \"$X\" | sed -E 's#(namespace|project.project.openshift.io)/##' | grep -Ev 'openshift|kube-', Kubernetes namespaces)####Create a namespace": "default", |
| 22 | + "Number of CPUs####Number of GPUs####Minimum Workers####Maximum Workers####Worker Memory####Head Memory": "{\"Number of CPUs\":\"500m\",\"Number of GPUs\":\"0\",\"Minimum Workers\":\"1\",\"Maximum Workers\":\"1\",\"Worker Memory\":\"1.5Gi\",\"Head Memory\":\"1.5Gi\"}", |
| 23 | + "Keep It Simple####Use the Ray Autoscaler####Use the Multi-user Enhanced Kubernetes Scheduler": "Keep It Simple", |
| 24 | + "Run with CodeFlare Model Architecture####Bring Your Own Code####Demos": "Demos", |
| 25 | + "Training Tasks####Fine Tuning Tasks": "Training Tasks", |
| 26 | + "Getting Started Demo####MLFlow Demo####Tensorboard Demo": "Getting Started Demo" |
| 27 | + } |
| 28 | +} |
0 commit comments