Skip to content

Commit

Permalink
Merge branch 'master' into hpguo/train_structured_log
Browse files Browse the repository at this point in the history
  • Loading branch information
hongpeng-guo committed Oct 7, 2024
2 parents 427b861 + f568674 commit 58bbf5d
Show file tree
Hide file tree
Showing 410 changed files with 7,176 additions and 11,420 deletions.
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -137,4 +137,4 @@ Getting Involved
.. _`StackOverflow`: https://stackoverflow.com/questions/tagged/ray
.. _`Meetup Group`: https://www.meetup.com/Bay-Area-Ray-Meetup/
.. _`Twitter`: https://twitter.com/raydistributed
.. _`Slack`: https://forms.gle/9TSdDYUgxYs8SA9e8
.. _`Slack`: https://www.ray.io/join-slack?utm_source=github&utm_medium=ray_readme&utm_campaign=getting_involved
6 changes: 3 additions & 3 deletions ci/env/install-dependencies.sh
Original file line number Diff line number Diff line change
Expand Up @@ -244,9 +244,9 @@ install_pip_packages() {

# For DAG visualization
requirements_packages+=("pydot")
requirements_packages+=("pytesseract")
requirements_packages+=("spacy>=3")
requirements_packages+=("spacy_langdetect")
requirements_packages+=("pytesseract==0.3.13")
requirements_packages+=("spacy==3.7.5")
requirements_packages+=("spacy_langdetect==0.1.2")
fi

# Additional RLlib test dependencies.
Expand Down
17 changes: 13 additions & 4 deletions ci/ray_ci/docker_container.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
]
GPU_PLATFORM = "cu12.1.1-cudnn8"

PYTHON_VERSIONS_RAY = ["3.9", "3.10", "3.11"]
PYTHON_VERSIONS_RAY_ML = ["3.9", "3.10"]
PYTHON_VERSIONS_RAY = ["3.9", "3.10", "3.11", "3.12"]
PYTHON_VERSIONS_RAY_ML = ["3.9", "3.10", "3.11"]
ARCHITECTURES_RAY = ["x86_64", "aarch64"]
ARCHITECTURES_RAY_ML = ["x86_64"]

Expand All @@ -46,6 +46,15 @@ def __init__(
upload: bool = False,
) -> None:
assert "RAYCI_CHECKOUT_DIR" in os.environ, "RAYCI_CHECKOUT_DIR not set"

assert python_version in PYTHON_VERSIONS_RAY
assert platform in PLATFORMS_RAY
assert architecture in ARCHITECTURES_RAY
if image_type == RayType.RAY_ML:
assert python_version in PYTHON_VERSIONS_RAY_ML
assert platform in PLATFORMS_RAY_ML
assert architecture in ARCHITECTURES_RAY_ML

rayci_checkout_dir = os.environ["RAYCI_CHECKOUT_DIR"]
self.python_version = python_version
self.platform = platform
Expand Down Expand Up @@ -122,13 +131,13 @@ def _get_image_tags(self, external: bool = False) -> List[str]:
versions = self._get_image_version_tags(external)

platforms = [self.get_platform_tag()]
if self.platform == "cpu" and self.image_type == "ray":
if self.platform == "cpu" and self.image_type == RayType.RAY:
# no tag is alias to cpu for ray image
platforms.append("")
elif self.platform == GPU_PLATFORM:
# gpu is alias to cu118 for ray image
platforms.append("-gpu")
if self.image_type == "ray-ml":
if self.image_type == RayType.RAY_ML:
# no tag is alias to gpu for ray-ml image
platforms.append("")

Expand Down
5 changes: 3 additions & 2 deletions ci/ray_ci/test_ray_docker_container.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

from ci.ray_ci.builder_container import DEFAULT_PYTHON_VERSION
from ci.ray_ci.container import _DOCKER_ECR_REPO
from ci.ray_ci.docker_container import GPU_PLATFORM
from ci.ray_ci.ray_docker_container import RayDockerContainer
from ci.ray_ci.test_base import RayCITestBase
from ci.ray_ci.utils import RAY_VERSION
Expand Down Expand Up @@ -203,8 +204,8 @@ def test_canonical_tag(self) -> None:
container = RayDockerContainer(v, "cpu", "ray", "aarch64")
assert container._get_canonical_tag() == f"{sha}-{pv}-cpu-aarch64"

container = RayDockerContainer(v, "cu11.8.0-cudnn8", "ray-ml")
assert container._get_canonical_tag() == f"{sha}-{pv}-cu118"
container = RayDockerContainer(v, GPU_PLATFORM, "ray-ml")
assert container._get_canonical_tag() == f"{sha}-{pv}-cu121"

with mock.patch.dict(os.environ, {"BUILDKITE_BRANCH": "releases/1.0.0"}):
container = RayDockerContainer(v, "cpu", "ray")
Expand Down
1 change: 1 addition & 0 deletions doc/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -467,6 +467,7 @@ doctest(
"source/rllib/rllib-sample-collection.rst",
],
),
data = ["//rllib:cartpole-v1_large"],
tags = ["team:rllib"],
)

Expand Down
4 changes: 0 additions & 4 deletions doc/source/_includes/rllib/new_api_stack.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,4 @@
The Ray Team plans to transition algorithms, example scripts, and documentation to the new code base
thereby incrementally replacing the "old API stack" (e.g., ModelV2, Policy, RolloutWorker) throughout the subsequent minor releases leading up to Ray 3.0.

Note, however, that so far only PPO (single- and multi-agent) and SAC (single-agent only)
support the "new API stack" and continue to run by default with the old APIs.
You can continue to use the existing custom (old stack) classes.

:doc:`See here </rllib/rllib-new-api-stack>` for more details on how to use the new API stack.
2 changes: 1 addition & 1 deletion doc/source/_templates/navbar-anyscale.html
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
<a id="try-anyscale-href" href="https://console.anyscale.com">
<a id="try-anyscale-href" href="https://console.anyscale.com/?utm_source=ray_docs&utm_medium=docs&utm_campaign=navbar">
<div id="try-anyscale-text">
<span>Try Ray on Anyscale</span>
</div>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,10 @@ kubectl apply -f https://raw.githubusercontent.com/GoogleCloudPlatform/container
# (Method 2) "gcloud container clusters get-credentials <your-cluster-name> --region <your-region> --project <your-project>"
# (Method 3) "kubectl config use-context ..."

# Install both CRDs and KubeRay operator v1.0.0.
# Install both CRDs and KubeRay operator.
helm repo add kuberay https://ray-project.github.io/kuberay-helm/
helm repo update
helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0
helm install kuberay-operator kuberay/kuberay-operator --version 1.2.2

# Create a Ray cluster
kubectl apply -f https://raw.githubusercontent.com/ray-project/ray/master/doc/source/cluster/kubernetes/configs/ray-cluster.gpu.yaml
Expand Down Expand Up @@ -116,7 +116,7 @@ It is optional.
# Create the KubeRay operator
helm repo add kuberay https://ray-project.github.io/kuberay-helm/
helm repo update
helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0
helm install kuberay-operator kuberay/kuberay-operator --version 1.2.2

# Create a Ray cluster
kubectl apply -f https://raw.githubusercontent.com/ray-project/ray/master/doc/source/cluster/kubernetes/configs/ray-cluster.gpu.yaml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ Note that the YAML file in this example uses `serveConfigV2`, which is supported

```sh
# Download `ray-service.mobilenet.yaml`
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-service.mobilenet.yaml
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-service.mobilenet.yaml

# Create a RayService
kubectl apply -f ray-service.mobilenet.yaml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,12 @@ The KubeRay operator Pod must be on the CPU node if you have set up the taint fo

## Step 2: Submit the RayJob

Create the RayJob custom resource with [ray-job.batch-inference.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-job.batch-inference.yaml).
Create the RayJob custom resource with [ray-job.batch-inference.yaml](https://github.com/ray-project/kuberay/blob/v1.2.2/ray-operator/config/samples/ray-job.batch-inference.yaml).

Download the file with `curl`:

```bash
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-job.batch-inference.yaml
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-job.batch-inference.yaml
```

Note that the `RayJob` spec contains a spec for the `RayCluster`. This tutorial uses a single-node cluster with 4 GPUs. For production use cases, use a multi-node cluster where the head node doesn't have GPUs, so that Ray can automatically schedule GPU workloads on worker nodes which won't interfere with critical Ray processes on the head node.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ Please note that the YAML file in this example uses `serveConfigV2`, which is su

```sh
# Step 3.1: Download `ray-service.stable-diffusion.yaml`
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-service.stable-diffusion.yaml
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-service.stable-diffusion.yaml

# Step 3.2: Create a RayService
kubectl apply -f ray-service.stable-diffusion.yaml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ Please note that the YAML file in this example uses `serveConfigV2`, which is su

```sh
# Step 3.1: Download `ray-service.text-summarizer.yaml`
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-service.text-summarizer.yaml
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-service.text-summarizer.yaml

# Step 3.2: Create a RayService
kubectl apply -f ray-service.text-summarizer.yaml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ Deploy the KubeRay operator with the [Helm chart repository](https://github.com/
helm repo add kuberay https://ray-project.github.io/kuberay-helm/
helm repo update

# Install both CRDs and KubeRay operator v1.1.1.
helm install kuberay-operator kuberay/kuberay-operator --version 1.1.1
# Install both CRDs and KubeRay operator v1.2.2.
helm install kuberay-operator kuberay/kuberay-operator --version 1.2.2

# Confirm that the operator is running in the namespace `default`.
kubectl get pods
Expand All @@ -46,14 +46,14 @@ Once the KubeRay operator is running, we are ready to deploy a RayCluster. To do
:::{tab-item} ARM64 (Apple Silicon)
```sh
# Deploy a sample RayCluster CR from the KubeRay Helm chart repo:
helm install raycluster kuberay/ray-cluster --version 1.1.1 --set 'image.tag=2.9.0-aarch64'
helm install raycluster kuberay/ray-cluster --version 1.2.2 --set 'image.tag=2.9.0-aarch64'
```
:::

:::{tab-item} x86-64 (Intel/Linux)
```sh
# Deploy a sample RayCluster CR from the KubeRay Helm chart repo:
helm install raycluster kuberay/ray-cluster --version 1.1.1
helm install raycluster kuberay/ray-cluster --version 1.2.2
```
:::

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

* KubeRay v0.6.0 or higher
* KubeRay v0.6.0 or v1.0.0: Ray 1.10 or higher.
* KubeRay v1.1.1 is highly recommended: Ray 2.8.0 or higher. This document is mainly for KubeRay v1.1.1.
* KubeRay v1.1.1 or newer is highly recommended: Ray 2.8.0 or higher.

## What's a RayJob?

Expand Down Expand Up @@ -78,7 +78,7 @@ Follow the [RayCluster Quickstart](kuberay-operator-deploy) to install the lates
## Step 3: Install a RayJob

```sh
kubectl apply -f https://raw.githubusercontent.com/ray-project/kuberay/v1.1.1/ray-operator/config/samples/ray-job.sample.yaml
kubectl apply -f https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-job.sample.yaml
```

## Step 4: Verify the Kubernetes cluster status
Expand Down Expand Up @@ -158,13 +158,13 @@ The Python script `sample_code.py` used by `entrypoint` is a simple Ray script t
## Step 6: Delete the RayJob

```sh
kubectl delete -f https://raw.githubusercontent.com/ray-project/kuberay/v1.1.1/ray-operator/config/samples/ray-job.sample.yaml
kubectl delete -f https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-job.sample.yaml
```

## Step 7: Create a RayJob with `shutdownAfterJobFinishes` set to true

```sh
kubectl apply -f https://raw.githubusercontent.com/ray-project/kuberay/v1.1.1/ray-operator/config/samples/ray-job.shutdown.yaml
kubectl apply -f https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-job.shutdown.yaml
```

The `ray-job.shutdown.yaml` defines a RayJob custom resource with `shutdownAfterJobFinishes: true` and `ttlSecondsAfterFinished: 10`.
Expand Down Expand Up @@ -192,7 +192,7 @@ kubectl get raycluster

```sh
# Step 10.1: Delete the RayJob
kubectl delete -f https://raw.githubusercontent.com/ray-project/kuberay/v1.1.1/ray-operator/config/samples/ray-job.shutdown.yaml
kubectl delete -f https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-job.shutdown.yaml

# Step 10.2: Delete the KubeRay operator
helm uninstall kuberay-operator
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Please note that the YAML file in this example uses `serveConfigV2` to specify a
## Step 3: Install a RayService

```sh
kubectl apply -f https://raw.githubusercontent.com/ray-project/kuberay/v1.1.1/ray-operator/config/samples/ray-service.sample.yaml
kubectl apply -f https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-service.sample.yaml
```

## Step 4: Verify the Kubernetes cluster status
Expand Down Expand Up @@ -110,7 +110,7 @@ curl -X POST -H 'Content-Type: application/json' rayservice-sample-serve-svc:800

```sh
# Delete the RayService.
kubectl delete -f https://raw.githubusercontent.com/ray-project/kuberay/v1.1.1/ray-operator/config/samples/ray-service.sample.yaml
kubectl delete -f https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-service.sample.yaml

# Uninstall the KubeRay operator.
helm uninstall kuberay-operator
Expand Down
4 changes: 2 additions & 2 deletions doc/source/cluster/kubernetes/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@ references

In this section we cover how to execute your distributed Ray programs on a Kubernetes cluster.

Using the [KubeRay Operator](https://github.com/ray-project/kuberay) is the
Using the [KubeRay operator](https://github.com/ray-project/kuberay) is the
recommended way to do so. The operator provides a Kubernetes-native way to manage Ray clusters.
Each Ray cluster consists of a head node pod and a collection of worker node pods. Optional
autoscaling support allows the KubeRay Operator to size your Ray clusters according to the
autoscaling support allows the KubeRay operator to size your Ray clusters according to the
requirements of your Ray workload, adding and removing Ray pods as needed. KubeRay supports
heterogenous compute nodes (including GPUs) as well as running multiple Ray clusters with
different Ray versions in the same Kubernetes cluster.
Expand Down
12 changes: 6 additions & 6 deletions doc/source/cluster/kubernetes/k8s-ecosystem/ingress.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@ Three examples show how to use ingress to access your Ray cluster:
# Step 1: Install KubeRay operator and CRD
helm repo add kuberay https://ray-project.github.io/kuberay-helm/
helm repo update
helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0
helm install kuberay-operator kuberay/kuberay-operator --version 1.2.2

# Step 2: Install a RayCluster
helm install raycluster kuberay/ray-cluster --version 1.0.0
helm install raycluster kuberay/ray-cluster --version 1.2.2

# Step 3: Edit the `ray-operator/config/samples/ray-cluster-alb-ingress.yaml`
#
Expand Down Expand Up @@ -122,10 +122,10 @@ Now run the following commands:
# Step 1: Install KubeRay operator and CRD
helm repo add kuberay https://ray-project.github.io/kuberay-helm/
helm repo update
helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0
helm install kuberay-operator kuberay/kuberay-operator --version 1.2.2

# Step 2: Install a RayCluster
helm install raycluster kuberay/ray-cluster --version 1.0.0
helm install raycluster kuberay/ray-cluster --version 1.2.2

# Step 3: Edit ray-cluster-gclb-ingress.yaml to replace the service name with the name of the head service from the RayCluster. (Output of `kubectl get svc`)

Expand Down Expand Up @@ -185,12 +185,12 @@ kubectl wait --namespace ingress-nginx \
# Step 3: Install KubeRay operator and CRD
helm repo add kuberay https://ray-project.github.io/kuberay-helm/
helm repo update
helm install kuberay-operator kuberay/kuberay-operator --version 1.1.1
helm install kuberay-operator kuberay/kuberay-operator --version 1.2.2

# Step 4: Install RayCluster and create an ingress separately.
# More information about change of setting was documented in https://github.com/ray-project/kuberay/pull/699
# and `ray-operator/config/samples/ray-cluster.separate-ingress.yaml`
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.1.1/ray-operator/config/samples/ray-cluster.separate-ingress.yaml
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-cluster.separate-ingress.yaml
kubectl apply -f ray-operator/config/samples/ray-cluster.separate-ingress.yaml

# Step 5: Check the ingress created in Step 4.
Expand Down
2 changes: 1 addition & 1 deletion doc/source/cluster/kubernetes/k8s-ecosystem/istio.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ In this mode, you _must_ disable the KubeRay init container injection by setting

```bash
# Set ENABLE_INIT_CONTAINER_INJECTION=false on the KubeRay operator.
helm upgrade kuberay-operator kuberay/kuberay-operator --version 1.1.1 \
helm upgrade kuberay-operator kuberay/kuberay-operator --version 1.2.2 \
--set env\[0\].name=ENABLE_INIT_CONTAINER_INJECTION \
--set-string env\[0\].value=false

Expand Down
2 changes: 1 addition & 1 deletion doc/source/cluster/kubernetes/k8s-ecosystem/kubeflow.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ kustomize version --short
```sh
# Create a RayCluster CR, and the KubeRay operator will reconcile a Ray cluster
# with 1 head Pod and 1 worker Pod.
helm install raycluster kuberay/ray-cluster --version 1.0.0 --set image.tag=2.2.0-py38-cpu
helm install raycluster kuberay/ray-cluster --version 1.2.2 --set image.tag=2.2.0-py38-cpu

# Check RayCluster
kubectl get pod -l ray.io/cluster=raycluster-kuberay
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ kubectl get service
* `# HELP`: Describe the meaning of this metric.
* `# TYPE`: See [this document](https://prometheus.io/docs/concepts/metric_types/) for more details.

* Three required environment variables are defined in [ray-cluster.embed-grafana.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-cluster.embed-grafana.yaml). See [Configuring and Managing Ray Dashboard](https://docs.ray.io/en/latest/cluster/configure-manage-dashboard.html) for more details about these environment variables.
* Three required environment variables are defined in [ray-cluster.embed-grafana.yaml](https://github.com/ray-project/kuberay/blob/v1.2.2/ray-operator/config/samples/ray-cluster.embed-grafana.yaml). See [Configuring and Managing Ray Dashboard](https://docs.ray.io/en/latest/cluster/configure-manage-dashboard.html) for more details about these environment variables.
```yaml
env:
- name: RAY_GRAFANA_IFRAME_HOST
Expand Down
2 changes: 1 addition & 1 deletion doc/source/cluster/kubernetes/k8s-ecosystem/pyspy.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ Follow [this document](kuberay-operator-deploy) to install the latest stable Kub

```bash
# Download `ray-cluster.py-spy.yaml`
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-cluster.py-spy.yaml
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-cluster.py-spy.yaml

# Create a RayCluster
kubectl apply -f ray-cluster.py-spy.yaml
Expand Down
6 changes: 3 additions & 3 deletions doc/source/cluster/kubernetes/k8s-ecosystem/volcano.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ batchScheduler:
* Pass the `--set batchScheduler.enabled=true` flag when running on the command line:
```shell
# Install the Helm chart with --enable-batch-scheduler flag set to true
helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0 --set batchScheduler.enabled=true
helm install kuberay-operator kuberay/kuberay-operator --version 1.2.2 --set batchScheduler.enabled=true
```

### Step 4: Install a RayCluster with the Volcano scheduler
Expand All @@ -45,7 +45,7 @@ The RayCluster custom resource must include the `ray.io/scheduler-name: volcano`
```shell
# Path: kuberay/ray-operator/config/samples
# Includes label `ray.io/scheduler-name: volcano` in the metadata.labels
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-cluster.volcano-scheduler.yaml
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-cluster.volcano-scheduler.yaml
kubectl apply -f ray-cluster.volcano-scheduler.yaml

# Check the RayCluster
Expand Down Expand Up @@ -113,7 +113,7 @@ Next, create a RayCluster with a head node (1 CPU + 2Gi of RAM) and two workers
```shell
# Path: kuberay/ray-operator/config/samples
# Includes the `ray.io/scheduler-name: volcano` and `volcano.sh/queue-name: kuberay-test-queue` labels in the metadata.labels
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-cluster.volcano-scheduler-queue.yaml
curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.2.2/ray-operator/config/samples/ray-cluster.volcano-scheduler-queue.yaml
kubectl apply -f ray-cluster.volcano-scheduler-queue.yaml
```

Expand Down
Loading

0 comments on commit 58bbf5d

Please sign in to comment.