diff --git a/README.md b/README.md index 41daaf121..6bfac65e2 100644 --- a/README.md +++ b/README.md @@ -74,6 +74,7 @@ sudo apt install libboost-all-dev ``` git clone https://github.com/HorizonRobotics/alf cd alf +pip install pybind11 pip install -e . --extra-index-url https://download.pytorch.org/whl/cu118 ``` diff --git a/alf/utils/tensorrt_utils.py b/alf/utils/tensorrt_utils.py index d2fb21e76..7c04d5533 100644 --- a/alf/utils/tensorrt_utils.py +++ b/alf/utils/tensorrt_utils.py @@ -41,12 +41,9 @@ # How to install dependencies (in a virtual env) on the deployment machine: # ```bash -# pip install onnx +# pip install onnx>=1.16.2 protobuf==3.20.2 # -# pip install tensorrt -# After this, tensorrt .so files will be installed in your virtual env: .../site-packages/tensorrt_lib/ -# You need to copy all .so files under it to /usr/local/cuda/targets/x86_64-linux/lib/ -# Or alternatively, you can install it system-wide from https://developer.nvidia.com/tensorrt-getting-started +# pip install tensorrt>=10.0 # For cuda 11.x, # pip install onnxruntime-gpu @@ -61,6 +58,11 @@ # ['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider'] # The order of elements represents the default priority order of Execution Providers from highest to lowest. +# NOTE: If onnxruntime complains about not finding libnvinfer.so or other tensorrt libs, +# they can be found in your virtual env: .../site-packages/tensorrt_lib/ +# You need to copy all .so files under it to /usr/local/cuda/targets/x86_64-linux/lib/ +# Or alternatively, you can install it system-wide from https://developer.nvidia.com/tensorrt-getting-started + def is_onnxruntime_available(): return backend is not None diff --git a/setup.py b/setup.py index bc4724572..daabbe0ad 100644 --- a/setup.py +++ b/setup.py @@ -37,7 +37,7 @@ 'pathos==0.2.4', 'pillow>=8', # 'procgen>=0.10.4', - 'protobuf==3.20.1', + 'protobuf==3.20.2', 'psutil==5.9.8', 'pybullet==2.5.0', 'pyglet==1.3.2', # higher version breaks classic control rendering