From e8a33329aaf49e289ba3d754f5e3401f3637434d Mon Sep 17 00:00:00 2001 From: Jacob Tomlinson Date: Wed, 9 Oct 2024 14:38:32 +0100 Subject: [PATCH 1/6] Fix code typo for Coiled (#456) --- source/platforms/coiled.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/platforms/coiled.md b/source/platforms/coiled.md index b7584f02..88a3c954 100644 --- a/source/platforms/coiled.md +++ b/source/platforms/coiled.md @@ -14,7 +14,7 @@ To get started you need to install Coiled and login. ```console $ conda install -c conda-forge coiled -$ coiled setup +$ coiled login ``` For more information see the [Coiled Getting Started documentation](https://docs.coiled.io/user_guide/getting_started.html). @@ -82,7 +82,7 @@ We can also connect a Dask client to see that information for the workers too. ```python from dask.distributed import Client -client = Client +client = Client(cluster) client ``` From 9a6f627efa67dcefcc949735d756c994d9a900e9 Mon Sep 17 00:00:00 2001 From: Jacob Tomlinson Date: Wed, 9 Oct 2024 17:45:42 +0100 Subject: [PATCH 2/6] Fix EKS instructions for 24.10 testing (#458) --- source/cloud/aws/eks.md | 32 +++++++++++--------------------- 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/source/cloud/aws/eks.md b/source/cloud/aws/eks.md index 66648161..4a28cea4 100644 --- a/source/cloud/aws/eks.md +++ b/source/cloud/aws/eks.md @@ -24,7 +24,7 @@ Now we can launch a GPU enabled EKS cluster. First launch an EKS cluster with `e ```console $ eksctl create cluster rapids \ - --version 1.24 \ + --version 1.29 \ --nodes 3 \ --node-type=p3.8xlarge \ --timeout=40m \ @@ -32,8 +32,7 @@ $ eksctl create cluster rapids \ --ssh-public-key \ # Be sure to set your public key ID here --region us-east-1 \ --zones=us-east-1c,us-east-1b,us-east-1d \ - --auto-kubeconfig \ - --install-nvidia-plugin=false + --auto-kubeconfig ``` With this command, you’ve launched an EKS cluster called `rapids`. You’ve specified that it should use nodes of type `p3.8xlarge`. We also specified that we don't want to install the NVIDIA drivers as we will do that with the NVIDIA operator. @@ -46,30 +45,21 @@ $ aws eks --region us-east-1 update-kubeconfig --name rapids ## Install drivers -Next, [install the NVIDIA drivers](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/getting-started.html) onto each node. +As we selected a GPU node type EKS will automatically install drivers for us. We can verify this by listing the NVIDIA driver plugin Pods. ```console -$ helm install --repo https://helm.ngc.nvidia.com/nvidia --wait --generate-name -n gpu-operator --create-namespace gpu-operator -NAME: gpu-operator-1670843572 -NAMESPACE: gpu-operator -STATUS: deployed -REVISION: 1 -TEST SUITE: None +$ kubectl get po -n kube-system -l name=nvidia-device-plugin-ds +NAME READY STATUS RESTARTS AGE +nvidia-device-plugin-daemonset-kv7t5 1/1 Running 0 52m +nvidia-device-plugin-daemonset-rhmvx 1/1 Running 0 52m +nvidia-device-plugin-daemonset-thjhc 1/1 Running 0 52m ``` -Verify that the NVIDIA drivers are successfully installed. - -```console -$ kubectl get po -A --watch | grep nvidia -kube-system nvidia-driver-installer-6zwcn 1/1 Running 0 8m47s -kube-system nvidia-driver-installer-8zmmn 1/1 Running 0 8m47s -kube-system nvidia-driver-installer-mjkb8 1/1 Running 0 8m47s -kube-system nvidia-gpu-device-plugin-5ffkm 1/1 Running 0 13m -kube-system nvidia-gpu-device-plugin-d599s 1/1 Running 0 13m -kube-system nvidia-gpu-device-plugin-jrgjh 1/1 Running 0 13m +```{note} +By default this plugin will install the latest version on the NVIDIA drivers on every Node. If you need more control over your driver installation we recommend that when creating your cluster you set `eksctl create cluster --install-nvidia-plugin=false ...` and then install drivers yourself using the [NVIDIA GPU Operator](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/getting-started.html). ``` -After your drivers are installed, you are ready to test your cluster. +After you have confirmed your drivers are installed, you are ready to test your cluster. ```{include} ../../_includes/check-gpu-pod-works.md From eb5cba1d0a64d7b5677308a31b6ba70c88dfdffb Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Thu, 10 Oct 2024 15:16:51 -0400 Subject: [PATCH 3/6] Release v24.10.00 --- source/conf.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/source/conf.py b/source/conf.py index 929e29a1..3094edd7 100644 --- a/source/conf.py +++ b/source/conf.py @@ -21,25 +21,25 @@ author = "NVIDIA" # Single modifiable version for all of the docs - easier for future updates -stable_version = "24.08" -nightly_version = "24.10" +stable_version = "24.10" +nightly_version = "24.12" versions = { "stable": { "rapids_version": stable_version, "rapids_api_docs_version": "stable", - "rapids_container": f"nvcr.io/nvidia/rapidsai/base:{stable_version}-cuda12.5-py3.11", - "rapids_notebooks_container": f"nvcr.io/nvidia/rapidsai/notebooks:{stable_version}-cuda12.5-py3.11", + "rapids_container": f"nvcr.io/nvidia/rapidsai/base:{stable_version}-cuda12.5-py3.12", + "rapids_notebooks_container": f"nvcr.io/nvidia/rapidsai/notebooks:{stable_version}-cuda12.5-py3.12", "rapids_conda_channels": "-c rapidsai -c conda-forge -c nvidia", - "rapids_conda_packages": f"rapids={stable_version} python=3.11 cuda-version=12.5", + "rapids_conda_packages": f"rapids={stable_version} python=3.12 cuda-version=12.5", }, "nightly": { "rapids_version": f"{nightly_version}-nightly", "rapids_api_docs_version": "nightly", - "rapids_container": f"rapidsai/base:{nightly_version + 'a'}-cuda12.5-py3.11", - "rapids_notebooks_container": f"rapidsai/notebooks:{nightly_version + 'a'}-cuda12.5-py3.11", + "rapids_container": f"rapidsai/base:{nightly_version + 'a'}-cuda12.5-py3.12", + "rapids_notebooks_container": f"rapidsai/notebooks:{nightly_version + 'a'}-cuda12.5-py3.12", "rapids_conda_channels": "-c rapidsai-nightly -c conda-forge -c nvidia", - "rapids_conda_packages": f"rapids={nightly_version} python=3.11 cuda-version=12.5", + "rapids_conda_packages": f"rapids={nightly_version} python=3.12 cuda-version=12.5", }, } rapids_version = ( From fe84b04b7de3c96542d290a060c94b3f6bed815b Mon Sep 17 00:00:00 2001 From: Jacob Tomlinson Date: Fri, 11 Oct 2024 13:29:37 +0100 Subject: [PATCH 4/6] Add upper bound pin temporarily to sphinx to avoid breakages (#470) --- conda/environments/deployment_docs.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/conda/environments/deployment_docs.yml b/conda/environments/deployment_docs.yml index 46029fcd..183340b5 100644 --- a/conda/environments/deployment_docs.yml +++ b/conda/environments/deployment_docs.yml @@ -10,7 +10,8 @@ dependencies: - pydata-sphinx-theme>=0.15.4 - python=3.12 - pre-commit>=3.8.0 - - sphinx>=8.0.2 + # Upper bound pin on sphinx can be removed once https://github.com/mgaitan/sphinxcontrib-mermaid/issues/160 is resolved + - sphinx>=8.0.2,<8.1 - sphinx-autobuild>=2024.9.19 - sphinx-copybutton>=0.5.2 - sphinx-design>=0.6.1 From 1e83a9e33ac33dcd07de0199d6a688d3cefe914d Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Fri, 11 Oct 2024 09:08:32 -0400 Subject: [PATCH 5/6] Release v24.10.01 From 7ed98fbe06f5b91e54ea1e926530468dc695d2e9 Mon Sep 17 00:00:00 2001 From: Naty Clementi Date: Fri, 11 Oct 2024 11:15:01 -0400 Subject: [PATCH 6/6] Fix broken links and some typos (#466) --- source/cloud/azure/azure-vm.md | 2 +- source/examples/rapids-azureml-hpo/notebook.ipynb | 4 ++-- source/examples/rapids-optuna-hpo/notebook.ipynb | 4 ++-- source/platforms/kubeflow.md | 2 +- source/tools/kubernetes/dask-operator.md | 6 +++--- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/source/cloud/azure/azure-vm.md b/source/cloud/azure/azure-vm.md index 0b2ef953..ffcd6d04 100644 --- a/source/cloud/azure/azure-vm.md +++ b/source/cloud/azure/azure-vm.md @@ -128,7 +128,7 @@ Next, we can SSH into our VM to install RAPIDS. SSH instructions can be found by ### Useful Links -- [Using NGC with Azure](https://docs.nvidia.com/ngc/ngc-azure-setup-guide/index.html) +- [Using NGC with Azure](https://docs.nvidia.com/ngc/ngc-deploy-public-cloud/ngc-azure/index.html) ```{relatedexamples} diff --git a/source/examples/rapids-azureml-hpo/notebook.ipynb b/source/examples/rapids-azureml-hpo/notebook.ipynb index d4bee24c..fd553e13 100644 --- a/source/examples/rapids-azureml-hpo/notebook.ipynb +++ b/source/examples/rapids-azureml-hpo/notebook.ipynb @@ -72,7 +72,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Initialize`MLClient`[class](https://learn.microsoft.com/en-us/python/api/azure-ai-ml/azure.ai.ml.mlclient?view=azure-python) to handle the workspace you created in the prerequisites step. \n", + "Initialize `MLClient` [class](https://learn.microsoft.com/en-us/python/api/azure-ai-ml/azure.ai.ml.mlclient?view=azure-python) to handle the workspace you created in the prerequisites step. \n", "\n", "You can manually provide the workspace details or call `MLClient.from_config(credential, path)`\n", "to create a workspace object from the details stored in `config.json`" @@ -303,7 +303,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We'll be using a custom RAPIDS docker image to [setup the environment]((https://learn.microsoft.com/en-us/azure/machine-learning/how-to-manage-environments-v2?tabs=python#create-an-environment-from-a-docker-image). This is available in `rapidsai/rapidsai` repo on [DockerHub](https://hub.docker.com/r/rapidsai/rapidsai/).\n", + "We'll be using a custom RAPIDS docker image to [setup the environment](https://learn.microsoft.com/en-us/azure/machine-learning/how-to-manage-environments-v2?tabs=python#create-an-environment-from-a-docker-image). This is available in `rapidsai/rapidsai` repo on [DockerHub](https://hub.docker.com/r/rapidsai/rapidsai/).\n", "\n", "Make sure you have the correct path to the docker build context as `os.getcwd()`," ] diff --git a/source/examples/rapids-optuna-hpo/notebook.ipynb b/source/examples/rapids-optuna-hpo/notebook.ipynb index 127d08ce..3f16ccf3 100644 --- a/source/examples/rapids-optuna-hpo/notebook.ipynb +++ b/source/examples/rapids-optuna-hpo/notebook.ipynb @@ -277,7 +277,7 @@ " \n", "Optuna uses [studies](https://optuna.readthedocs.io/en/stable/reference/study.html) and [trials](https://optuna.readthedocs.io/en/stable/reference/trial.html) to keep track of the HPO experiments. Put simply, a trial is a single call of the objective function while a set of trials make up a study. We will pick the best observed trial from a study to get the best parameters that were used in that run.\n", "\n", - "Here, `DaskStorage` class is used to set up a storage shared by all workers in the cluster. Learn more about what storages can be used [here](https://optuna.readthedocs.io/en/stable/tutorial/distributed.html)\n", + "Here, `DaskStorage` class is used to set up a storage shared by all workers in the cluster. Learn more about what storages can be used [here](https://optuna.readthedocs.io/en/stable/reference/storages.html)\n", "\n", "`optuna.create_study` is used to set up the study. As you can see, it specifies the study name, sampler to be used, the direction of the study, and the storage.\n", "With just a few lines of code, we have set up a distributed HPO experiment." @@ -347,7 +347,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Conluding Remarks\n", + "## Concluding Remarks\n", " \n", "This notebook shows how RAPIDS and Optuna can be used along with dask to run multi-GPU HPO jobs, and can be used as a starting point for anyone wanting to get started with the framework. We have seen how by just adding a few lines of code we were able to integrate the libraries for a muli-GPU HPO runs. This can also be scaled to multiple nodes.\n", " \n", diff --git a/source/platforms/kubeflow.md b/source/platforms/kubeflow.md index a9f84065..5c3372ce 100644 --- a/source/platforms/kubeflow.md +++ b/source/platforms/kubeflow.md @@ -83,7 +83,7 @@ To use Dask, we need to create a scheduler and some workers that will perform ou ### Installing the Dask Kubernetes operator -To install the operator we need to create any custom resources and the operator itself, please [refer to the documentation](https://kubernetes.dask.org/en/latest/operator_installation.html) to find up-to-date installation instructions. From the terminal run the following command. +To install the operator we need to create any custom resources and the operator itself, please [refer to the documentation](https://kubernetes.dask.org/en/latest/installing.html) to find up-to-date installation instructions. From the terminal run the following command. ```console $ helm install --repo https://helm.dask.org --create-namespace -n dask-operator --generate-name dask-kubernetes-operator diff --git a/source/tools/kubernetes/dask-operator.md b/source/tools/kubernetes/dask-operator.md index 179a54cf..8997041e 100644 --- a/source/tools/kubernetes/dask-operator.md +++ b/source/tools/kubernetes/dask-operator.md @@ -1,7 +1,7 @@ # Dask Operator Many libraries in RAPIDS can leverage Dask to scale out computation onto multiple GPUs and multiple nodes. -[Dask has an operator for Kubernetes](https://kubernetes.dask.org/en/latest/operator.html) which allows you to launch Dask clusters as native Kubernetes resources. +[Dask has an operator for Kubernetes](https://kubernetes.dask.org/en/latest/) which allows you to launch Dask clusters as native Kubernetes resources. With the operator and associated Custom Resource Definitions (CRDs) you can create `DaskCluster`, `DaskWorkerGroup` and `DaskJob` resources that describe your Dask components and the operator will @@ -45,7 +45,7 @@ graph TD Your Kubernetes cluster must have GPU nodes and have [up to date NVIDIA drivers installed](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/getting-started.html). -To install the Dask operator follow the [instructions in the Dask documentation](https://kubernetes.dask.org/en/latest/operator_installation.html). +To install the Dask operator follow the [instructions in the Dask documentation](https://kubernetes.dask.org/en/latest/installing.html). ## Configuring a RAPIDS `DaskCluster` @@ -226,7 +226,7 @@ spec: ``` For the scheduler pod we are also setting the `rapidsai/base` container image, mainly to ensure our Dask versions match between -the scheduler and workers. We also disable Jupyter and ensure that the `dask-scheduler` command is configured. +the scheduler and workers. We ensure that the `dask-scheduler` command is configured. Then we configure both the Dask communication port on `8786` and the Dask dashboard on `8787` and add some probes so that Kubernetes can monitor the health of the scheduler.