From 86684cdc67f3119ce2893790a0f0232ae4573c5a Mon Sep 17 00:00:00 2001 From: Adeel Hassan Date: Thu, 4 Jan 2024 11:45:19 -0500 Subject: [PATCH] add some documentation about distributed training --- docs/setup/gpu.rst | 49 ++++++++++++++----- .../rastervision/pipeline/rv_config.py | 8 +-- 2 files changed, 42 insertions(+), 15 deletions(-) diff --git a/docs/setup/gpu.rst b/docs/setup/gpu.rst index 50e9247fe..96e5f3363 100644 --- a/docs/setup/gpu.rst +++ b/docs/setup/gpu.rst @@ -3,7 +3,7 @@ Using GPUs ========== -To run Raster Vision on a realistic dataset in a reasonable amount of time, it is necessary to use a machine with a GPU. Note that Raster Vision will use a GPU if it detects that one is available. +To run Raster Vision on a realistic dataset in a reasonable amount of time, it is necessary to use a machine with one or more GPUs. Note that Raster Vision will automatically use all available GPUs. If you don't own a machine with a GPU, it is possible to rent one by the minute using a cloud provider such as AWS. See :doc:`aws`. @@ -18,14 +18,7 @@ One way to check this is to make sure PyTorch can see the GPU(s). To do this, op import torch torch.cuda.is_available() - torch.cuda.get_device_name(0) - -This should print out something like: - -.. code-block:: console - - True - Tesla K80 + torch.cuda.device_count() If you have `nvidia-smi `_ installed, you can also use this command to inspect GPU utilization while the training job is running: @@ -40,10 +33,44 @@ If you would like to run Raster Vision in a Docker container with GPUs, you'll n First, you'll need to install the `nvidia-docker `_ runtime on your system. Follow their `Quickstart `_ and installation instructions. Make sure that your GPU is supported by NVIDIA Docker - if not you might need to find another way to have your Docker container communicate with the GPU. If you figure out how to support more GPUs, please let us know so we can add the steps to this documentation! -When running your Docker container, be sure to include the ``--runtime=nvidia`` option, e.g. +When running your Docker container, be sure to include the ``--gpus=all`` option, e.g. .. code-block:: console - > docker run --runtime=nvidia --rm -it quay.io/azavea/raster-vision:pytorch-{{ version }} /bin/bash + > docker run --gpus=all --rm -it quay.io/azavea/raster-vision:pytorch-{{ version }} /bin/bash or use the ``--gpu`` option with the ``docker/run`` script. + +.. _distributed: + +Using multiple GPUs (distributed training) +------------------------------------------ + +Raster Vision supports distributed training (multi-node and multi-GPU) via `PyTorch DDP `_. + +It can be used in the following ways: + +- Run Raster Vision normally on a multi-GPU machine. Raster Vision will automatically detect the multiple GPU and use distributed training when ``Learner.train()`` is called. +- Run Raster Vision using the `torchrun CLI command `_. For example, to run on a single machine with 4 GPUs: + + .. code-block:: console + + torchrun --standalone --nnodes=1 --nproc-per-node=4 --no-python \ + rastervision run local rastervision_pytorch_backend/rastervision/pytorch_backend/examples/tiny_spacenet.py + +Other considerations +~~~~~~~~~~~~~~~~~~~~ + +- Config variables that may be :ref:`set via environment or RV config ` (also documented `here `_): + + - ``RASTERVISION_USE_DDP``: ``YES`` by default. Set to ``NO`` to disable distributed training. + - ``RASTERVISION_DDP_BACKEND``: ``nccl`` by default. This is the recommended backend for CUDA GPUs. + - ``RASTERVISION_DDP_START_METHOD``: One of ``spawn``, ``fork``, or ``forkserver``. Passed to :func:`torch.multiprocessing.start_processes`. Default: ``spawn``. + + - ``spawn`` is what PyTorch documentation recommends (in fact, it doesn't even mention the alternatives), but it has the disadvantage that it requires everything to be pickleable, which rasterio dataset objects are not. This is also true for ``forkserver``, which needs to spawn a server process. However, ``fork`` does not have the same limitation. + - If not ``fork``, we avoid building the dataset in the base process and instead delay it until the worker processes are created. + - If ``fork`` or ``forkserver``, the CUDA runtime must not be initialized before the fork happens; otherwise, a ``RuntimeError: Cannot re-initialize CUDA in forked subprocess.`` error will be raised. We avoid this by not calling any ``torch.cuda`` functions or creating tensors on the GPU. + +- To avoid having to re-download files for each process when building datasets, it is recommended to :meth:`manually specify a temporary directory <.RVConfig.set_tmp_dir_root>` (otherwise each process will use a separate randomly generated temporary directory). When a single temp directory is set, to avoid IO conflicts, Raster Vision first builds the datasets only in the master process (rank = 0) and only after in the other processes, so that they use the already downloaded files. +- A similar problem also occurs when downloading external models/losses, but in this case, the strategy of building on the master first does not work. The model apparently needs to be created by the same line of code on each process. Therefore, we need to download files separately for each process; we do this by modifying ``TORCH_HOME`` to ``$TORCH_HOME/``. And only the master process copies the downloaded files to the training directory. +- Raster Vision will use all available GPUs by default. To override, set the ``WORLD_SIZE`` env var. diff --git a/rastervision_pipeline/rastervision/pipeline/rv_config.py b/rastervision_pipeline/rastervision/pipeline/rv_config.py index a5cd50667..14ba56680 100644 --- a/rastervision_pipeline/rastervision/pipeline/rv_config.py +++ b/rastervision_pipeline/rastervision/pipeline/rv_config.py @@ -80,10 +80,10 @@ def set_tmp_dir_root(self, tmp_dir_root: Optional[str] = None): To set the value, the following rules are used in decreasing priority: - 1) the tmp_dir_root argument if it is not None - 2) an environment variable (TMPDIR, TEMP, or TMP) - 3) a default temporary directory which is - 4) a directory returned by tempfile.TemporaryDirectory() + 1) the ``tmp_dir_root`` argument if it is not ``None`` + 2) an environment variable (``TMPDIR``, ``TEMP``, or ``TMP``) + 3) a default temporary directory which is a directory returned by + :class:`tempfile.TemporaryDirectory` """ # Check the various possibilities in order of priority. env_arr = [