diff --git a/README.md b/README.md index 0fa6eef..38e583d 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,8 @@ At the moment, we provide support for: ## Example of usage with ORB_SLAM2 -the jupyter notebook **example_usage** shows how to use ORB_SLAM2 with a sequence of the KITTI dataset +- the jupyter notebook **example_usage** shows how to use ORB_SLAM2 with a sequence of the KITTI dataset +- the jupyter notebook **trajectory_example** draw the camera trajectory and the point cloud ## Change the settings @@ -41,7 +42,7 @@ We provided the vocabulary file and the configuration file for ORB_SLAM2/3 for t ### Docker -We provide a container on dockerhub, in which all all the dipendences and the repository are already installed. +We provide a container on dockerhub, in which all the dipendences and the repository are already installed. ``` docker pull giordanolaminetti/slampy:tag diff --git a/docs/requirements.txt b/docs/requirements.txt index ca42ff0..c226198 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -7,6 +7,7 @@ pytest==6.1.1 pytest-black==0.3.12 tqdm==4.50.2 opencv-contrib-python==3.4.11.45 +sphinx-argparse==0.2.5 matplotlib Pillow numpy diff --git a/docs/source/algorithm.rst b/docs/source/algorithm.rst new file mode 100644 index 0000000..fa6a78d --- /dev/null +++ b/docs/source/algorithm.rst @@ -0,0 +1,143 @@ +======================= +add you own algorithm +======================= +.. _addmethod: + +you can add you own algorithm to Slampy by adding a new ``your_method_name.py`` file with the specs found below, to the ``slam_method`` folder + +the file must contain a class named ``Slam`` with the method shown below :: + + import sys + sys.path.append("..") + from slampy import Sensor + from slampy import State + + class Slam: + def __init__(self, params, sensor_type): + + def process_image_mono(self, image, tframe): + + def process_image_stereo(self, image_left, image_right, tframe): + + def process_image_imu_mono(self, image, tframe, imu): + + def process_image_imu_stereo(self, image_left, image_right, tframe, imu): + + def process_image_rgbd(self, image, tframe): + + def get_pose_to_target(self): + + def get_abs_cloud(self): + + def get_camera_matrix(self): + + def get_state(self): + + def reset(self): + + def shutdown(self): + +---------------------- +Slam class references +---------------------- + +In this section we describe all the methods with the parameters that you need to write in order to add your algorithm to Slampy. + +You can add your own accessor method, but the method described below must be present with this signature and return the exact value as described + +.. py:class:: Slam + + .. py:method:: __init__(self, params, sensor_type) + + Initialize your algorithm with the params from the settings.yaml and the sensor type + + :param dict params: the setting.yaml params conver to Python dictionary + :param Pyslam.Sensor sensor_type: the type of the sensor as istance of Sensor Class + + + **there are one process_image method for each of the sensor type in Sensor Class. If your algorithm hasn't one of this methods you can add an exception every time that one calls that sensor processing method** + + .. py:method:: process_image_mono(self, image , tframe) + + pass the image to the slam system and compute the tracking + + :param numpy.ndarray image: an RGB image + :param float tframe: the timestamp in which the frame is caputred + :raises Exception: if the Sensor is different from MONOCULAR + + + .. py:method:: process_image_stereo(self, image_left,image_right , tframe) + + pass the images to the slam system and compute the tracking + + :param numpy.ndarray image_left: an RGB image + :param numpy.ndarray image_right: an RGB image + :param float tframe: the timestamp in which the frame is caputred + :raises Exception: if the Sensor is different from STEREO + + + .. py:method:: process_image_imu_mono(self, image , tframe, imu) + + pass the image to the slam system and compute the tracking + + :param numpy.ndarray image_left: an RGB image + :param numpy.ndarray image_right: an RGB image + :param float tframe: the timestamp in which the frame is caputred + :param numpy.ndarray imu: the imu data imu data stored in an float array in the form of [ AccX ,AccY ,AccZ, GyroX, vGyroY, vGyroZ, Timestamp] + :raises Exception: if the Sensor is different from MONOCULAR_IMU + + .. py:method:: process_image_imu_stereo(self, image_left, image_right , tframe, imu) + + pass the images to the slam system and compute the tracking + + :param numpy.ndarray image_left: an RGB image + :param numpy.ndarray image_right: an RGB image + :param float tframe: the timestamp in which the frame is caputred + :param numpy.ndarray imu: the imu data imu data stored in an float array in the form of [ AccX ,AccY ,AccZ, GyroX, vGyroY, vGyroZ, Timestamp] + :raises Exception: if the Sensor is different from STEREO_IMU + + .. py:method:: process_image_rgbd(self, image , tframe) + + pass the image to the slam system and compute the tracking + + :param numpy.ndarray image: an RGBD image + :param float tframe: the timestamp in which the frame is caputred + :raises Exception: if the Sensor is different from RGBD + + + .. py:method:: get_pose_to_target(self) + + return the pose between the references frame (usually the first frame) and the current one T. + + :return: the pose computed from the references frame to the actual ones, None if the tracking is failed + :rtype: a 4x4 numpy array + + .. py:method:: get_abs_cloud(self) + + return the point cloud at the current frame stored in absolute coordinates (coordinates from the references frame) + + :return: an array with the 3D coordinate of the point, None if the traking is failed + :rtype: a nx3 numpy array + + .. py:method:: get_camera_matrix(self) + + return the intrinsec parameter of cameras + + :return: an array with the intrinsec parameter of cameras + :rtype: a 4x4 numpy array + + .. py:method:: get_state(self) + + return the current state of the tracking as istance of State Class + + :return: the state of the tracking + :rtype: Slampy.State + + .. py:method:: reset(self) + + reset/initialize the slam tracking + + .. py:method:: shutdown(self) + + shutdown the slam algorithm + \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 083962b..c8d0510 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -37,6 +37,7 @@ "nbsphinx", "sphinxcontrib.bibtex", "sphinx_rtd_theme", + "sphinxarg.ext", ] napoleon_google_docstring = True diff --git a/docs/source/config.rst b/docs/source/config.rst new file mode 100644 index 0000000..1b85eac --- /dev/null +++ b/docs/source/config.rst @@ -0,0 +1,31 @@ +======================= +settings.yaml +======================= +.. _settings: + +it is the configuration file and it is used to change the current algorithm and the other parameters of the algorithm. + +---------------------------- +change the current algorithm +---------------------------- + +To change the algorithm settings you can modify the **setting.yaml** file in the line :: + + SLAM.alg:'insert here your .py file that contains the wrapper class' + +--------------------------------- +ORB_SLAM2 and ORB_SLAM3 settings +--------------------------------- + +the other params for the ORB_SLAM2/3 algorithm are : + +- **SLAM.vocab_path**: "Path to the ORB_SLAM2/3 vocabular file" +- **SLAM.settings_path**: "Path to the ORB_SLAM2/3 .yaml settings file" + +--------------------------------- +add your own settings +--------------------------------- + +if you add your method with :ref:`this ` you can add in **settings.yaml** the params needed for your application execution in the form :: + + params_name : 'params_values' diff --git a/docs/source/docker.rst b/docs/source/docker.rst new file mode 100644 index 0000000..2ba0bd4 --- /dev/null +++ b/docs/source/docker.rst @@ -0,0 +1,71 @@ +======================= +Docker Container +======================= + +.. _docker_container: + + +We provide a container on dockerhub, where all the dipendences and the repository are already installed. + +------------------------- +Download docker container +------------------------- +On docker hub there are 4 containers with a different tagname : + +- ``base`` the base container in which only the dipendences are installed +- ``orbslam2`` a container with only installed the ORB_SLAM2 library +- ``orbslam3`` a container with only installed the ORB_SLAM3 library +- ``latest`` a container with installed all the library + +for downloading the container you can type :: + + docker pull giordanolaminetti/slampy:tagname + +---------------------- +Build docker container +---------------------- +you can also build your own container, with the shell in the root path, typing this command :: + + docker build . -t slampy [--build-args arg=value]* + +you can change the image name by replacing ``slampy`` with the desired image name, the ``build args `` can be: + + +- ``base_container`` (String) you can change the base container, by default it is ``ubuntu:18.04``, it can also be used to install multiple algorithm +- ``build_dependences`` (Bool) build and install the dependences packages (pangolin, Eigen, Opencv, Python3.8, ...) default ``true`` +- ``build_orbslam2`` (Bool) build and install ORB_SLAM2 and the Python wrapper default ``true`` +- ``build_orbslam3`` (Bool) build and install ORB_SLAM3 and the Python wrapper default ``true`` + +For example if you want to build only the ORB_SLAM2 package from the base you can type :: + + docker build . -t slampy:orbslam2 --build-arg build_dependences=false --build-arg build_orbslam3=false --build-arg base_container=giordanolaminetti/slampy:base + +For example if you want to install only ORB_SLAM2 and ORB_SLAM3 you can type :: + + docker build . -t slampy:orbslam2+3 --build-arg build_dependences=false --build-arg build_orbslam2=false --build-arg base_container=giordanolaminetti/slampy:orbslam2 + +---------------- +Run docker image +---------------- + +When the image is ready, you can create a new container running: :: + + NAME="orb" + DATAPATH="/PATH/TO/KITTI/DATE/DRIVE_SYNC_FOLDER/" + IMAGE_NAME="giordanolaminetti/slampy" + TAG="latest + sudo docker run -it \ + --name $NAME \ + --mount type=bind,source="$(pwd)",target=/slampy/slampy \ + -v $DATAPATH:"/slampy/slampy/Dataset":ro \ + -p 8888:8888 \ + $IMAGE_NAME:$TAG --rm /bin/bash + + +Doing so, the created container contains both the code and the Dataset (in read-only mode to prevent wrong behaviours) + +You can have a test with the `jupyter` example running: :: + + jupyter notebook --ip 0.0.0.0 + +after the close connection it will remove the container, if you want to preserve it then remove the ``-rm `` options diff --git a/docs/source/index.rst b/docs/source/index.rst index db4a24f..cf62978 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -4,8 +4,11 @@ SlamPy .. toctree:: :maxdepth: 2 - :caption: Get Started introduction + installation modules + docker + config + algorithm .. bibliography:: references.bib diff --git a/docs/source/installation.rst b/docs/source/installation.rst new file mode 100644 index 0000000..d88cc47 --- /dev/null +++ b/docs/source/installation.rst @@ -0,0 +1,36 @@ +======================= +Installation +======================= +.. _installation: + +this project require the installiation of at least one of the supported algorithm, each installation is describet in it's section. +We also provide a Docker container on DockerHub, but you can also build your own container all the instruction see :ref:`Docker container `. + +Install ORB_SLAM2 +---------------------- + +for ORB_SLAM2 you need to install a modify version of the original that can be found at `ORB_SLAM2 `_ , + +after you have correctly installed the C++ version you need to install the python wrapper form here `ORB_SLAM2-PythonBindings `_ + + +Install ORB_SLAM3 +---------------------- + +similar to ORB_SLAM2 for ORB_SLAM3 we use a modify version of ORB_SLAM3 that can be found at `ORB_SLAM3 `_ + +after you have correctly installed the C++ version you need to install the python wrapper form here `ORB_SLAM2-PythonBindings `_ + +Install Slampy +--------------------- +you need to install poetry , the istrucntion can be found at this `link `_ + +after the installation of dependences you can download the repo :: + + git clone https://github.com/GiordanoLaminetti/SlamPy.git + +enter in the root folder and type :: + + poetry shell + +this create a python venv in which install all the requirements \ No newline at end of file diff --git a/docs/source/introduction.rst b/docs/source/introduction.rst index 76e2bf8..694c293 100644 --- a/docs/source/introduction.rst +++ b/docs/source/introduction.rst @@ -2,4 +2,35 @@ Introduction ============================================ -Run SLAM systems +This project is a python wrapper to SLAM algorithms. +At the moment, we provide support for: + +- ORB_SLAM2 +- ORB_SLAM3 + +The installation rules can be found :ref:`here ` . + +----------------- +Example of usage +----------------- + +in the project can be found two jupyter notebook: + +- **example_usage** shows how to use ORB_SLAM2 with a sequence of the KITTI dataset + +- **trajectory_example** which provides a video sequence that draws the camera trajectory and the point cloud + +it is also present an **run.py** script where, by providing a video sequence, it saves the trajectory as a .txt file for the possible argument see the :ref:`Run.py references ` + +To change the algorithm and its parameter settings see :ref:`parameters file ` + +------- +Credits +------- + +Our project has been developed starting from other repositories, in particular: + +- Python bindings to ORB Slam are based on the `repo `_ +- ORB Slam 2 has been cloned from `the ORB_SLAM2 original repository `_ +- ORB Slam 3 has been cloned from `the ORB_SLAM3 original repository `_ + diff --git a/docs/source/run.rst b/docs/source/run.rst index 4319bca..65578c9 100644 --- a/docs/source/run.rst +++ b/docs/source/run.rst @@ -1,7 +1,12 @@ run module ========== +.. _run_module: .. automodule:: run - :members: :undoc-members: :show-inheritance: + + .. argparse:: + :module: run + :func: parser + :prog: run \ No newline at end of file diff --git a/run.py b/run.py index 1542023..edcb94a 100644 --- a/run.py +++ b/run.py @@ -92,99 +92,101 @@ def run(args): eval_tool.eval(args) +parser = argparse.ArgumentParser( + description="Run the SLAM system and save, at each frame, the current depth and pose" +) + +parser.add_argument( + "--dataset", + type=str, + default="/media/Datasets/KITTI_VO/dataset/sequences/10", + help="path to dataset", +) +parser.add_argument( + "--settings", + type=str, + default="./settings_kitty.yaml", + help="which configuration?", +) +parser.add_argument( + "--dest", + type=str, + default="./results_kitty_vo_10", + help="where do we save artefacts?", +) +parser.add_argument( + "--pose_id", + type=int, + default=-1, + help="between which frames do you want compute the pose? If pose_id==-1, get the pose between 0->T; \ + if pose_id >0, compute the pose between T-pose_id->T \ + For instance, if pose_id=2 then compute the pose between T-2->T", +) +parser.add_argument( + "--is_evalute_depth", + default=True, + action="store_true", + help="If set, will evalute the orb depth with the gt files ", +) + +parser.add_argument( + "--is_evalute_pose", + default=True, + action="store_true", + help="If set, will evalute the orb pose with the gt files", +) + +parser.add_argument( + "--is_bash", + # default=True, + action="store_true", + help="If set, means use bash shell to evaluate", +) + +parser.add_argument( + "--data_type", + type=str, + help="which dataset type", + default="KITTI_VO", + choices=["TUM", "KITTI_VO", "KITTI"], +) + +parser.add_argument( + "--gt_depth", + type=str, + help="the gt depth files of the dataset", + default="/media/Datasets/KITTI_VO_SGM/10/depth", +) +# /media/Datasets/TUM/freiburg3_convert/depth + +parser.add_argument( + "--gt_pose_dir", + type=str, + help="each frame's gt pose file, saved as previous to current, and filename as current.npy", + default="/media/Datasets/KITTI_VO_SGM/10/npy_pose", +) + +parser.add_argument( + "--gt_pose_txt", + type=str, + help="this is the gt pose file provided by kitty or tum.", + default="/media/Datasets/KITTI_VO/dataset/poses/10.txt", +) + +parser.add_argument( + "--align", + type=str, + choices=["scale", "scale_7dof", "7dof", "6dof"], + default="7dof", + help="alignment type", +) + +parser.add_argument( + "--named", type=str, help="the names for saving pose", default="kitty_vo_10" +) + + if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Run the SLAM system and save, at each frame, the current depth and pose" - ) - - parser.add_argument( - "--dataset", - type=str, - default="/media/Datasets/KITTI_VO/dataset/sequences/10", - help="path to dataset", - ) - parser.add_argument( - "--settings", - type=str, - default="./settings_kitty.yaml", - help="which configuration?", - ) - parser.add_argument( - "--dest", - type=str, - default="./results_kitty_vo_10", - help="where do we save artefacts?", - ) - parser.add_argument( - "--pose_id", - type=int, - default=-1, - help="between which frames do you want compute the pose? If pose_id==-1, get the pose between 0->T; \ - if pose_id >0, compute the pose between T-pose_id->T \ - For instance, if pose_id=2 then compute the pose between T-2->T", - ) - parser.add_argument( - "--is_evalute_depth", - default=True, - action="store_true", - help="If set, will evalute the orb depth with the gt files ", - ) - - parser.add_argument( - "--is_evalute_pose", - default=True, - action="store_true", - help="If set, will evalute the orb pose with the gt files", - ) - - parser.add_argument( - "--is_bash", - # default=True, - action="store_true", - help="If set, means use bash shell to evaluate", - ) - - parser.add_argument( - "--data_type", - type=str, - help="which dataset type", - default="KITTI_VO", - choices=["TUM", "KITTI_VO", "KITTI"], - ) - - parser.add_argument( - "--gt_depth", - type=str, - help="the gt depth files of the dataset", - default="/media/Datasets/KITTI_VO_SGM/10/depth", - ) - # /media/Datasets/TUM/freiburg3_convert/depth - - parser.add_argument( - "--gt_pose_dir", - type=str, - help="each frame's gt pose file, saved as previous to current, and filename as current.npy", - default="/media/Datasets/KITTI_VO_SGM/10/npy_pose", - ) - - parser.add_argument( - "--gt_pose_txt", - type=str, - help="this is the gt pose file provided by kitty or tum.", - default="/media/Datasets/KITTI_VO/dataset/poses/10.txt", - ) - - parser.add_argument( - "--align", - type=str, - choices=["scale", "scale_7dof", "7dof", "6dof"], - default="7dof", - help="alignment type", - ) - - parser.add_argument( - "--named", type=str, help="the names for saving pose", default="kitty_vo_10" - ) args = parser.parse_args() run(args)