Skip to content

Commit

Permalink
Changes to be committed:
Browse files Browse the repository at this point in the history
	new file:   eval_orb_tum_vi.sh
	new file:   run_MONO_IMU.py
	new file:   settings_TUM_VI.yaml
	modified:   slam_method/OrbSlam3.py
	new file:   slam_method/Settings/TUM_512_cameraIMU.yaml
	modified:   utils.py
  • Loading branch information
Gabriel committed Feb 2, 2021
1 parent 3d0a33a commit cf6a3ac
Show file tree
Hide file tree
Showing 6 changed files with 413 additions and 6 deletions.
25 changes: 25 additions & 0 deletions eval_orb_tum_vi.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@

#!bin/bash
data="Dataset/TUM_VI/dataset-corridor4_512_16"
settings="./settings_TUM_VI.yaml"
save_path="./results_tum_vi_corrido4"
poseid=1
type='TUM_VI'
gt_depth_path=
gt_pose_dir="/Dataset/TUM_VI/dataset-corridor4_512_16/dso/gt_imu.csv"
gt_pose_txt=
names_to_plot="TUM_VI_corridor4"


python3 run_MONO_IMU.py --dataset $data \
--settings $settings \
--dest $save_path \
--pose_id $poseid \
--data_type $type \

--gt_pose_dir $gt_pose_dir\
--named $names_to_plot\

--is_bash \


226 changes: 226 additions & 0 deletions run_MONO_IMU.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,226 @@
import os
import cv2
import numpy as np
import slampy
import argparse
import yaml
from tqdm import tqdm
from utils import *
from kitti_odometry import KittiEvalOdom


def run(args):
"""Run SLAM system for each frame in the dataset and save depths and poses.
Args:
args: command line arguments
"""
setting_file = args.settings
if not os.path.exists(setting_file):
raise ValueError(f"Cannot find setting file at {setting_file}")
if args.pose_id < -1:
raise ValueError(f"Pose index must be -1 or >0")


with open(args.settings) as fs:
settings_yalm = yaml.safe_load(fs)
print("\nAlgorithm " + settings_yalm["SLAM.alg"] + " has been set\n")

print("Dataset selected: " + os.path.basename(args.dataset) + "\n")

app = slampy.System(setting_file, slampy.Sensor.MONOCULAR_IMU)

print("\n")

if args.data_type == "TUM_VI":
image_filenames, timestamps = load_images_TUM_VI(args.dataset)
elif args.data_type == "OTHERS":
image_filenames, timestamps = load_images_OTHERS(args.dataset)

num_images = len(image_filenames)

if args.data_type == "TUM_VI":
acc_data, gyro_data, IMUtimestamps = load_IMU_datas_TUM_VI(args.dataset)

dest_depth = os.path.join(args.dest, "depth")
dest_pose = os.path.join(args.dest, "pose")

create_dir(dest_depth)
create_dir(dest_pose)

states = []
errors = []


#finds first useful imu data, assuming imu starts recording way before camera
firstIMU=0
while(IMUtimestamps[firstIMU]<=timestamps[0]):
firstIMU += 1
firstIMU -= 1

imu = [] # array of valid imu measurments: one imu measure is 7 floats [acc x, acc y, acc z, gyro x, gyro y, gyro z, timestamp]

with tqdm(total=num_images) as pbar:
for idx, image_name in enumerate(image_filenames):
# TODO: it is image loader duty to provide correct images
image = cv2.imread(image_name)
if image is None:
raise ValueError(f"failed to load image {image_name}")

image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

imu.clear() #clear imu measures from last frame



if idx > 0: #select only those imu meas that occour in time before the current frame
while(IMUtimestamps[firstIMU]<=timestamps[idx]):
imu_valid_meas = (acc_data[firstIMU] + gyro_data[firstIMU])
imu_valid_meas.append(IMUtimestamps[firstIMU])
imu.append(imu_valid_meas)
firstIMU += 1


state = app.process_image_imu_mono(image, timestamps[idx], np.array(imu))

# NOTE: we buid a default invalid depth, in the case of system failure
if state == slampy.State.OK:
depth = app.get_depth()
pose_past_frame_to_current = app.get_pose_to_target(
precedent_frame=args.pose_id
)
name = os.path.splitext(os.path.basename(image_name))[0]

depth_path = os.path.join(dest_depth, name)
save_depth(depth_path, depth)

pose_path = os.path.join(dest_pose, name)
save_pose(pose_path, pose_past_frame_to_current)

curr_pose = app.get_pose_to_target(-1)
if curr_pose is not None:
save_pose_txt(args, name, curr_pose)

if args.is_evaluate_depth:
gt_file_path = os.path.join(args.gt_depth, "{}.png".format(name))
err = get_error(args, name, depth, gt_file_path)
errors.append(err)

states.append(state)
pbar.update(1)

if args.is_evaluate_depth:
mean_errors = np.array(errors).mean(0)
save_results = os.path.join(args.dest, "results.txt")
save_depth_err_results(save_results, "mean values", mean_errors)


# NOTE: final dump of log.txt file
with open(os.path.join(args.dest, "log.txt"), "w") as f:
for i, state in enumerate(states):
f.write(f"{i}: {state}\n")

if args.is_evaluate_pose:
print("Begin to evaluate predicted pose")
evaluate_pose(args)
eval_tool = KittiEvalOdom()
eval_tool.eval(args)


parser = argparse.ArgumentParser(
description="Run the SLAM system and save, at each frame, the current depth and pose"
)

parser.add_argument(
"--dataset",
type=str,
default="/media/Datasets/KITTI_VO/dataset/sequences/10",
help="path to dataset",
)
parser.add_argument(
"--settings",
type=str,
default="./settings_kitty.yaml",
help="which configuration?",
)
parser.add_argument(
"--dest",
type=str,
default="./results_kitty_vo_10",
help="where do we save artefacts?",
)
parser.add_argument(
"--pose_id",
type=int,
default=-1,
help="between which frames do you want compute the pose? If pose_id==-1, get the pose between 0->T; \
if pose_id >0, compute the pose between T-pose_id->T \
For instance, if pose_id=2 then compute the pose between T-2->T",
)
parser.add_argument(
"--is_evaluate_depth",
default=False,
action="store_true",
help="If set, will evalute the orb depth with the gt files ",
)

parser.add_argument(
"--is_evaluate_pose",
default=False,
action="store_true",
help="If set, will evalute the orb pose with the gt files",
)

parser.add_argument(
"--is_bash",
# default=True,
action="store_true",
help="If set, means use bash shell to evaluate",
)

parser.add_argument(
"--data_type",
type=str,
help="which dataset type",
default="KITTI_VO",
choices=["TUM", "KITTI_VO", "KITTI","OTHERS","TUM_VI"],
)

parser.add_argument(
"--gt_depth",
type=str,
help="the gt depth files of the dataset",
default="/media/Datasets/KITTI_VO_SGM/10/depth",
)

parser.add_argument(
"--gt_pose_dir",
type=str,
help="each frame's gt pose file, saved as previous to current, and filename as current.npy",
default="/media/Datasets/KITTI_VO_SGM/10/npy_pose",
)

parser.add_argument(
"--gt_pose_txt",
type=str,
help="this is the gt pose file provided by kitty or tum.",
default="/media/Datasets/KITTI_VO/dataset/poses/10.txt",
)

parser.add_argument(
"--align",
type=str,
choices=["scale", "scale_7dof", "7dof", "6dof"],
default="7dof",
help="alignment type",
)

parser.add_argument(
"--named", type=str, help="the names for saving pose", default="kitty_vo_10"
)


if __name__ == "__main__":

args = parser.parse_args()
run(args)
28 changes: 28 additions & 0 deletions settings_TUM_VI.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
SLAM.alg: "OrbSlam3"
#--------------------------------------------------------------------------------------------
# SLAM Method: change with the name of the .py file in whitch there are the class SLAM
#--------------------------------------------------------------------------------------------

#--------------------------------------------------------------------------------------------
# SLAM Params: add your own to run the algorithm
#--------------------------------------------------------------------------------------------
SLAM.vocab_path: "slam_method/Settings/ORBvoc.bin"
SLAM.settings_path: "slam_method/Settings/TUM_512_cameraIMU.yaml"

#--------------------------------------------------------------------------------------------
# Drawer Params: the extra params of trajectory_drawer class
# Params Guide https://slampy.readthedocs.io/en/latest/config.html
#--------------------------------------------------------------------------------------------

Drawer.eye.x: -18.0
Drawer.eye.y: -13.0
Drawer.eye.z: -55.0
Drawer.center.x: -17.0
Drawer.center.y: -8.0
Drawer.scale_grade.x: 1.0
Drawer.scale_grade.y: 1.0
Drawer.scale_grade.z: 10.0
Drawer.aspectratio.x: 50
Drawer.aspectratio.y: 50
Drawer.aspectratio.z: 100
Drawer.point_size: 2
5 changes: 2 additions & 3 deletions slam_method/OrbSlam3.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,15 +68,14 @@ def process_image_stereo(self, image_left, image_right, tframe):

def process_image_imu_mono(self, image, tframe, imu):
if self.sensor_type == Sensor.MONOCULAR_IMU:
self.slam.process_image_imu_mono(image, tframe, imu, "0")
self.slam.process_image_imu_mono(image, tframe, "0", imu)
else:
raise Exception("The sensor type is not MONOCULAR_IMU")

def process_image_imu_stereo(self, image_left, image_right, tframe, imu):
if self.sensor_type == Sensor.STEREO_IMU:
self.slam.process_image_imu_stereo(
image_left, image_right, tframe, imu, "0"
)
image_left, image_right, tframe, "0", imu)
else:
raise Exception("The sensor type is not STREO_IMU")

Expand Down
91 changes: 91 additions & 0 deletions slam_method/Settings/TUM_512_cameraIMU.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
%YAML:1.0

#--------------------------------------------------------------------------------------------
# Camera Parameters. Adjust them!
#--------------------------------------------------------------------------------------------
Camera.type: "KannalaBrandt8"
# Camera calibration and distortion parameters (OpenCV)
Camera.fx: 190.978477 # 190.97847715128717
Camera.fy: 190.973307 # 190.9733070521226
Camera.cx: 254.931706 # 254.93170605935475
Camera.cy: 256.897442 # 256.8974428996504

# Equidistant distortion 0.0034823894022493434, 0.0007150348452162257, -0.0020532361418706202, 0.00020293673591811182
#Camera.bFishEye: 1
Camera.k1: 0.003482389402 # 0.0034823894022493434
Camera.k2: 0.000715034845 # 0.0007150348452162257
Camera.k3: -0.002053236141 # -0.0020532361418706202
Camera.k4: 0.000202936736 # 0.00020293673591811182

# Camera resolution
Camera.width: 512
Camera.height: 512

# Camera frames per second
Camera.fps: 20.0

# Color order of the images (0: BGR, 1: RGB. It is ignored if images are grayscale)
Camera.RGB: 1

# Transformation from body-frame (imu) to camera
Tbc: !!opencv-matrix
rows: 4
cols: 4
dt: f
data: [-0.9995250378696743, 0.0075019185074052044, -0.02989013031643309, 0.045574835649698026,
0.029615343885863205, -0.03439736061393144, -0.998969345370175, -0.071161801837997044,
-0.008522328211654736, -0.9993800792498829, 0.03415885127385616, -0.044681254117144367,
0.0, 0.0, 0.0, 1.0]
# Tbc: !!opencv-matrix # from vins mono calibration file
# rows: 4
# cols: 4
# dt: f
# data: [-0.9995250378696743, 0.0075842033363785165, -0.030214670573904204, 0.044511917113940799,
# 0.029940114644659861, -0.034023430206013172, -0.99897246995704592, -0.073197096234105752,
# -0.0086044170750674241, -0.99939225835343004, 0.033779845322755464, -0.047972907300764499,
# 0.0, 0.0, 0.0, 1.0]


# IMU noise (Use those from VINS-mono)
IMU.NoiseGyro: 0.00016 # rad/s^0.5
IMU.NoiseAcc: 0.0028 # m/s^1.5
IMU.GyroWalk: 0.000022 # rad/s^1.5
IMU.AccWalk: 0.00086 # m/s^2.5
IMU.Frequency: 200


#--------------------------------------------------------------------------------------------
# ORB Parameters
#--------------------------------------------------------------------------------------------

# ORB Extractor: Number of features per image
ORBextractor.nFeatures: 1500 # Tested with 1250

# ORB Extractor: Scale factor between levels in the scale pyramid
ORBextractor.scaleFactor: 1.2

# ORB Extractor: Number of levels in the scale pyramid
ORBextractor.nLevels: 8

# ORB Extractor: Fast threshold
# Image is divided in a grid. At each cell FAST are extracted imposing a minimum response.
# Firstly we impose iniThFAST. If no corners are detected we impose a lower value minThFAST
# You can lower these values if your images have low contrast
# ORBextractor.iniThFAST: 20
# ORBextractor.minThFAST: 7
ORBextractor.iniThFAST: 20 # 20
ORBextractor.minThFAST: 7 # 7

#--------------------------------------------------------------------------------------------
# Viewer Parameters
#--------------------------------------------------------------------------------------------
Viewer.KeyFrameSize: 0.05
Viewer.KeyFrameLineWidth: 1
Viewer.GraphLineWidth: 0.9
Viewer.PointSize: 2
Viewer.CameraSize: 0.08
Viewer.CameraLineWidth: 3
Viewer.ViewpointX: 0
Viewer.ViewpointY: -0.7
Viewer.ViewpointZ: -3.5 # -1.8
Viewer.ViewpointF: 500
Loading

0 comments on commit cf6a3ac

Please sign in to comment.