Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
GiordanoLaminetti committed Apr 18, 2021
2 parents d4e22f6 + 5049f4a commit 4b306fd
Show file tree
Hide file tree
Showing 10 changed files with 697 additions and 22 deletions.
25 changes: 25 additions & 0 deletions eval_orb_tum_vi.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@

#!bin/bash
data="Dataset/TUM_VI/dataset-corridor4_512_16"
settings="./settings_TUM_VI.yaml"
save_path="./results_tum_vi_corrido4"
poseid=1
type='TUM_VI'
gt_depth_path=
gt_pose_dir="/Dataset/TUM_VI/dataset-corridor4_512_16/dso/gt_imu.csv"
gt_pose_txt=
names_to_plot="TUM_VI_corridor4"


python3 run_MONO_IMU.py --dataset $data \
--settings $settings \
--dest $save_path \
--pose_id $poseid \
--data_type $type \

--gt_pose_dir $gt_pose_dir\
--named $names_to_plot\

--is_bash \


50 changes: 33 additions & 17 deletions run.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import numpy as np
import slampy
import argparse
import yaml
from tqdm import tqdm
from utils import *
from kitti_odometry import KittiEvalOdom
Expand All @@ -19,15 +20,27 @@ def run(args):
raise ValueError(f"Cannot find setting file at {setting_file}")
if args.pose_id < -1:
raise ValueError(f"Pose index must be -1 or >0")


with open(args.settings) as fs:
settings_yalm = yaml.safe_load(fs)
print("\nAlgorithm " + settings_yalm["SLAM.alg"] + " has been set\n")

print("Dataset selected: " + os.path.basename(args.dataset) + "\n")

app = slampy.System(setting_file, slampy.Sensor.MONOCULAR)

print("\n")

# TODO: generic loader an not KITTI one

if args.data_type == "TUM":
image_filenames, timestamps = load_images_TUM(args.dataset, "rgb.txt")
elif args.data_type == "KITTI_VO":
image_filenames, timestamps = load_images_KITTI_VO(args.dataset)
elif args.data_type == "OTHERS":
image_filenames, timestamps = load_images_OTHERS(args.dataset)

num_images = len(image_filenames)

dest_depth = os.path.join(args.dest, "depth")
Expand All @@ -48,44 +61,47 @@ def run(args):
raise ValueError(f"failed to load image {image_name}")

image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

state = app.process_image_mono(image, timestamps[idx])

# NOTE: we buid a default invalid depth, in the case of system failure
if state == slampy.State.OK:
depth = app.get_depth()
depth = app.get_depth()
pose_past_frame_to_current = app.get_pose_to_target(
precedent_frame=args.pose_id
)
name = os.path.splitext(os.path.basename(image_name))[0]
depth_path = os.path.join(dest_depth, name)
save_depth(depth_path, depth)
name = os.path.splitext(os.path.basename(image_name))[0]

depth_path = os.path.join(dest_depth, name)
save_depth(depth_path, depth)

pose_path = os.path.join(dest_pose, name)
save_pose(pose_path, pose_past_frame_to_current)

curr_pose = app.get_pose_to_target(-1)
curr_pose = app.get_pose_to_target(-1)
if curr_pose is not None:
save_pose_txt(args, name, curr_pose)

if args.is_evalute_depth:
if args.is_evaluate_depth:
gt_file_path = os.path.join(args.gt_depth, "{}.png".format(name))
err = get_error(args, name, depth, gt_file_path)
errors.append(err)

states.append(state)
pbar.update(1)

mean_errors = np.array(errors).mean(0)
save_results = os.path.join(args.dest, "results.txt")
save_depth_err_results(save_results, "mean values", mean_errors)

if args.is_evaluate_depth:
mean_errors = np.array(errors).mean(0)
save_results = os.path.join(args.dest, "results.txt")
save_depth_err_results(save_results, "mean values", mean_errors)


# NOTE: final dump of log.txt file
with open(os.path.join(args.dest, "log.txt"), "w") as f:
for i, state in enumerate(states):
f.write(f"{i}: {state}\n")

if args.is_evalute_pose:
if args.is_evaluate_pose:
print("Begin to evaluate predicted pose")
evaluate_pose(args)
eval_tool = KittiEvalOdom()
Expand Down Expand Up @@ -123,15 +139,15 @@ def run(args):
For instance, if pose_id=2 then compute the pose between T-2->T",
)
parser.add_argument(
"--is_evalute_depth",
default=True,
"--is_evaluate_depth",
default=False,
action="store_true",
help="If set, will evalute the orb depth with the gt files ",
)

parser.add_argument(
"--is_evalute_pose",
default=True,
"--is_evaluate_pose",
default=False,
action="store_true",
help="If set, will evalute the orb pose with the gt files",
)
Expand All @@ -148,7 +164,7 @@ def run(args):
type=str,
help="which dataset type",
default="KITTI_VO",
choices=["TUM", "KITTI_VO", "KITTI"],
choices=["TUM", "KITTI_VO", "KITTI","OTHERS"],
)

parser.add_argument(
Expand Down
229 changes: 229 additions & 0 deletions run_MONO_IMU.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,229 @@
import os
import cv2
import numpy as np
import slampy
import argparse
import yaml
from tqdm import tqdm
from utils import *
from kitti_odometry import KittiEvalOdom


def run(args):
"""Run SLAM system for each frame in the dataset and save depths and poses.
Args:
args: command line arguments
"""
setting_file = args.settings
if not os.path.exists(setting_file):
raise ValueError(f"Cannot find setting file at {setting_file}")
if args.pose_id < -1:
raise ValueError(f"Pose index must be -1 or >0")


with open(args.settings) as fs:
settings_yalm = yaml.safe_load(fs)
print("\nAlgorithm " + settings_yalm["SLAM.alg"] + " has been set\n")

print("Dataset selected: " + os.path.basename(args.dataset) + "\n")

app = slampy.System(setting_file, slampy.Sensor.MONOCULAR_IMU)

print("\n")

if args.data_type == "TUM_VI":
image_filenames, timestamps = load_images_TUM_VI(args.dataset)
elif args.data_type == "EUROC":
image_filenames, timestamps = load_images_EuRoC(args.dataset)
elif args.data_type == "OTHERS":
image_filenames, timestamps = load_images_OTHERS(args.dataset)

num_images = len(image_filenames)

if args.data_type == "TUM_VI" or args.data_type =="EUROC":
acc_data, gyro_data, IMUtimestamps = load_IMU_datas_TUM_VI(args.dataset)

dest_depth = os.path.join(args.dest, "depth")
dest_pose = os.path.join(args.dest, "pose")

create_dir(dest_depth)
create_dir(dest_pose)

states = []
errors = []


#finds first useful imu data, assuming imu starts recording way before camera
firstIMU=0
while(IMUtimestamps[firstIMU]<=timestamps[0]):
firstIMU += 1
firstIMU -= 1

imu = [] # array of valid imu measurments: one imu measure is 7 floats [acc x, acc y, acc z, gyro x, gyro y, gyro z, timestamp]

with tqdm(total=num_images) as pbar:
for idx, image_name in enumerate(image_filenames):
# TODO: it is image loader duty to provide correct images
image = cv2.imread(image_name)
if image is None:
raise ValueError(f"failed to load image {image_name}")

image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

imu.clear() #clear imu measures from last frame



if idx > 0: #select only those imu meas that occour in time before the current frame
while(IMUtimestamps[firstIMU]<=timestamps[idx]):
imu_valid_meas = (acc_data[firstIMU] + gyro_data[firstIMU])
imu_valid_meas.append(IMUtimestamps[firstIMU])
imu.append(imu_valid_meas)
firstIMU += 1

state = app.process_image_imu_mono(image, timestamps[idx], np.array(imu))

# NOTE: we buid a default invalid depth, in the case of system failure
if state == slampy.State.OK:
depth = app.get_depth()
pose_past_frame_to_current = app.get_pose_to_target(
precedent_frame=args.pose_id
)
name = os.path.splitext(os.path.basename(image_name))[0]

depth_path = os.path.join(dest_depth, name)
save_depth(depth_path, depth)

pose_path = os.path.join(dest_pose, name)
save_pose(pose_path, pose_past_frame_to_current)

curr_pose = app.get_pose_to_target(-1)
if curr_pose is not None:
save_pose_txt(args, name, curr_pose)

if args.is_evaluate_depth:
gt_file_path = os.path.join(args.gt_depth, "{}.png".format(name))
err = get_error(args, name, depth, gt_file_path)
errors.append(err)

states.append(state)
pbar.update(1)

if args.is_evaluate_depth:
mean_errors = np.array(errors).mean(0)
save_results = os.path.join(args.dest, "results.txt")
save_depth_err_results(save_results, "mean values", mean_errors)


# NOTE: final dump of log.txt file
with open(os.path.join(args.dest, "log.txt"), "w") as f:
for i, state in enumerate(states):
f.write(f"{i}: {state}\n")

if args.is_evaluate_pose:
print("Begin to evaluate predicted pose")
evaluate_pose(args)
eval_tool = KittiEvalOdom()
eval_tool.eval(args)

app.shutdown()


parser = argparse.ArgumentParser(
description="Run the SLAM system and save, at each frame, the current depth and pose"
)

parser.add_argument(
"--dataset",
type=str,
default="/media/Datasets/KITTI_VO/dataset/sequences/10",
help="path to dataset",
)
parser.add_argument(
"--settings",
type=str,
default="./settings_kitty.yaml",
help="which configuration?",
)
parser.add_argument(
"--dest",
type=str,
default="./results_kitty_vo_10",
help="where do we save artefacts?",
)
parser.add_argument(
"--pose_id",
type=int,
default=-1,
help="between which frames do you want compute the pose? If pose_id==-1, get the pose between 0->T; \
if pose_id >0, compute the pose between T-pose_id->T \
For instance, if pose_id=2 then compute the pose between T-2->T",
)
parser.add_argument(
"--is_evaluate_depth",
default=False,
action="store_true",
help="If set, will evalute the orb depth with the gt files ",
)

parser.add_argument(
"--is_evaluate_pose",
default=False,
action="store_true",
help="If set, will evalute the orb pose with the gt files",
)

parser.add_argument(
"--is_bash",
# default=True,
action="store_true",
help="If set, means use bash shell to evaluate",
)

parser.add_argument(
"--data_type",
type=str,
help="which dataset type",
default="KITTI_VO",
choices=["TUM", "KITTI_VO", "KITTI","OTHERS","TUM_VI","EUROC"],
)

parser.add_argument(
"--gt_depth",
type=str,
help="the gt depth files of the dataset",
default="/media/Datasets/KITTI_VO_SGM/10/depth",
)

parser.add_argument(
"--gt_pose_dir",
type=str,
help="each frame's gt pose file, saved as previous to current, and filename as current.npy",
default="/media/Datasets/KITTI_VO_SGM/10/npy_pose",
)

parser.add_argument(
"--gt_pose_txt",
type=str,
help="this is the gt pose file provided by kitty or tum.",
default="/media/Datasets/KITTI_VO/dataset/poses/10.txt",
)

parser.add_argument(
"--align",
type=str,
choices=["scale", "scale_7dof", "7dof", "6dof"],
default="7dof",
help="alignment type",
)

parser.add_argument(
"--named", type=str, help="the names for saving pose", default="kitty_vo_10"
)


if __name__ == "__main__":

args = parser.parse_args()
run(args)
Loading

0 comments on commit 4b306fd

Please sign in to comment.