Skip to content

Commit

Permalink
Update aruco trajectory visualizer node
Browse files Browse the repository at this point in the history
  • Loading branch information
TheNoobInventor committed Apr 29, 2024
1 parent 82758f1 commit 0fde086
Show file tree
Hide file tree
Showing 9 changed files with 322 additions and 3 deletions.
20 changes: 20 additions & 0 deletions lidarbot_aruco/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,3 +39,23 @@ Running the previous script opens a window with the generated ArUco tag displaye

To close the window, press the **q** key on the keyboard on the opened window.

## Camera calibration

The Logitech webcam C270 HD is used in this project and needs to be calibrated.

First install the [ROS usb camera driver](https://index.ros.org/r/usb_cam/#humble) package:

```bash
sudo apt install ros-humble-usb-cam
```

Camera calibration was done following the steps outlined this [guide](https://navigation.ros.org/tutorials/docs/camera_calibration.html).

TODO: Note about 'dt' field in calibration file. Reference Addison's calibration guide.

Execute the command below to run the usb-cam driver node:

```bash
ros2 run usb_cam usb_cam_node_exe --ros-args --params-file ~/dev_ws/src/lidarbot_aruco/config/params_1.yaml
```

4 changes: 4 additions & 0 deletions lidarbot_aruco/config/chessboard_calibration.yaml
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,16 +1,20 @@
%YAML:1.0
---
image_width: 640
image_height: 480
camera_name: narrow_stereo
camera_matrix:
rows: 3
cols: 3
dt: d
data: [842.57825, 0. , 304.89032,
0. , 841.64125, 224.5223 ,
0. , 0. , 1. ]
distortion_model: plumb_bob
distortion_coefficients:
rows: 1
cols: 5
dt: d
data: [0.002363, 0.562980, -0.004719, -0.008675, 0.000000]
rectification_matrix:
rows: 3
Expand Down
23 changes: 23 additions & 0 deletions lidarbot_aruco/config/params_1.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
/**:
ros__parameters:
video_device: "/dev/video0"
framerate: 30.0
io_method: "mmap"
frame_id: "camera"
pixel_format: "mjpeg2rgb" # see usb_cam/supported_formats for list of supported formats
av_device_format: "YUV422P"
image_width: 640
image_height: 480
camera_name: "test_camera"
camera_info_url: "package://usb_cam/config/camera_info.yaml"
brightness: -1
contrast: -1
saturation: -1
sharpness: -1
gain: -1
auto_white_balance: true
white_balance: 4000
autoexposure: true
exposure: 100
autofocus: false
focus: -1
24 changes: 24 additions & 0 deletions lidarbot_aruco/config/params_2.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
/**:
ros__parameters:
video_device: "/dev/video2"
framerate: 15.0
io_method: "mmap"
frame_id: "camera2"
pixel_format: "mjpeg2rgb"
av_device_format: "YUV422P"
image_width: 640
image_height: 480
camera_name: "test_camera2"
# reusing same camera intrinsics only for demo, should really be unique for camera2"
camera_info_url: "package://usb_cam/config/camera_info.yaml"
brightness: -1
contrast: -1
saturation: -1
sharpness: -1
gain: -1
auto_white_balance: true
white_balance: 4000
autoexposure: true
exposure: 100
autofocus: false
focus: -1
247 changes: 247 additions & 0 deletions lidarbot_aruco/lidarbot_aruco/aruco_trajectory_visualizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,247 @@
#!/usr/bin/env python3

# This node is used to visualize the trajectory of lidarbot by tracking the ArUco marker
# on it

# Adapted from: https://automaticaddison.com/estimate-aruco-marker-pose-using-opencv-gazebo-and-ros-2/

# Import Python libraries
import cv2
import numpy as np
import argparse
from scipy.spatial.transform import Rotation as R

# Import ROS2 libraries
import rclpy
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data # Uses Best Effort reliability for camera
from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images
from geometry_msgs.msg import TransformStamped # Handles TransformStamped message
from sensor_msgs.msg import Image # Image is the message type
from tf2_ros import TransformBroadcaster

# Construct argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument(
"-t", "--type", type=str, default="DICT_4X4_50", help="type of ArUco tag to detect"
)

args = vars(ap.parse_args())

# The different ArUco dictionaries built into the OpenCV library
ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
"DICT_4X4_250": cv2.aruco.DICT_4X4_250,
"DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
"DICT_5X5_50": cv2.aruco.DICT_5X5_50,
"DICT_5X5_100": cv2.aruco.DICT_5X5_100,
"DICT_5X5_250": cv2.aruco.DICT_5X5_250,
"DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
"DICT_6X6_50": cv2.aruco.DICT_6X6_50,
"DICT_6X6_100": cv2.aruco.DICT_6X6_100,
"DICT_6X6_250": cv2.aruco.DICT_6X6_250,
"DICT_6X6_1000": cv2.aruco.DICT_6X6_1000,
"DICT_7X7_50": cv2.aruco.DICT_7X7_50,
"DICT_7X7_100": cv2.aruco.DICT_7X7_100,
"DICT_7X7_250": cv2.aruco.DICT_7X7_250,
"DICT_7X7_1000": cv2.aruco.DICT_7X7_1000,
"DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL,
}


class ArucoNode(Node):
def __init__(self):
super().__init__("aruco_node")

# Declare parameters
self.declare_parameter("aruco_dictionary_name", "DICT_4X4_50")
self.declare_parameter("aruco_marker_side_length", 0.063)
self.declare_parameter(
"camera_calibration_parameters_filename",
"/home/noobinventor/lidarbot_ws/src/lidarbot_aruco/config/chessboard_calibration.yaml",
)
self.declare_parameter("image_topic", "/image_raw")
self.declare_parameter("aruco_marker_name", "aruco_marker_frame")

# Read parameters
aruco_dictionary_name = (
self.get_parameter("aruco_dictionary_name")
.get_parameter_value()
.string_value
)
self.aruco_marker_side_length = (
self.get_parameter("aruco_marker_side_length")
.get_parameter_value()
.double_value
)
self.camera_calibration_parameters_filename = (
self.get_parameter("camera_calibration_parameters_filename")
.get_parameter_value()
.string_value
)
image_topic = (
self.get_parameter("image_topic").get_parameter_value().string_value
)
self.aruco_marker_name = (
self.get_parameter("aruco_marker_name").get_parameter_value().string_value
)

# Check that we have a valid ArUco marker
if ARUCO_DICT.get(aruco_dictionary_name, None) is None:
self.get_logger().info(
"[INFO] ArUCo tag of '{}' is not supported".format(args["type"])
)

# Load the camera parameters from the saved file
cv_file = cv2.FileStorage(
self.camera_calibration_parameters_filename, cv2.FILE_STORAGE_READ
)
self.camera_matrix = cv_file.getNode("camera_matrix").mat()
self.dist_coeffs = cv_file.getNode("distortion_coefficients").mat()
cv_file.release()

# Load the ArUco dictionary
self.get_logger().info(
"[INFO] detecting '{}' markers...".format(aruco_dictionary_name)
)
self.arucoDict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT[args["type"]])
self.arucoParams = cv2.aruco.DetectorParameters()

# Create the subscriber. This subscriber will receive an Image
# from the image_topic
self.subscription = self.create_subscription(
Image,
image_topic,
self.listener_callback,
qos_profile=qos_profile_sensor_data,
)
self.subscription # prevent unused variable warning

# Initialize the transform broadcaster
self.tfbroadcaster = TransformBroadcaster(self)

# Used to convert between ROS and OpenCV images
self.bridge = CvBridge()

# Callback function
def listener_callback(self, data):
# Display the message on the console
self.get_logger().info("Receiving video frame")

# Convert ROS Image message to OpenCV image
current_frame = self.bridge.imgmsg_to_cv2(data, desired_encoding="bgr8")

# Detect ArUco markers in the video frame
(corners, marker_ids, rejected) = cv2.aruco.detectMarkers(
current_frame,
self.arucoDict,
parameters=self.arucoParams,
)

# Check that at least one ArUco marker was detected
if marker_ids is not None:

# Draw a square around detected markers in the video frame
cv2.aruco.drawDetectedMarkers(current_frame, corners, marker_ids)

# Get the rotation and translation vectors
rvecs, tvecs, obj_points = cv2.aruco.estimatePoseSingleMarkers(
corners,
self.aruco_marker_side_length,
self.camera_matrix,
self.dist_coeffs,
)

# The pose of the marker is with respect to the camera lens frame.
# Imagine you are looking through the camera viewfinder,
# the camera lens frame's:
# x-axis points to the right
# y-axis points straight down towards your toes
# z-axis points straight ahead away from your eye, out of the camera
for i, marker_id in enumerate(marker_ids):

# Create the coordinate transform
t = TransformStamped()
t.header.stamp = self.get_clock().now().to_msg()
t.header.frame_id = "webcam_frame"
t.child_frame_id = self.aruco_marker_name

# Store the translation (i.e. position) information
t.transform.translation.x = tvecs[i][0][0]
t.transform.translation.y = tvecs[i][0][1]
t.transform.translation.z = tvecs[i][0][2]

# Store the rotation information
rotation_matrix = np.eye(4)
rotation_matrix[0:3, 0:3] = cv2.Rodrigues(np.array(rvecs[i][0]))[0]
r = R.from_matrix(rotation_matrix[0:3, 0:3])
quat = r.as_quat()

# Quaternion format
t.transform.rotation.x = quat[0]
t.transform.rotation.y = quat[1]
t.transform.rotation.z = quat[2]
t.transform.rotation.w = quat[3]

# Send the transform
self.tfbroadcaster.sendTransform(t)

# Draw the axes on the marker
cv2.drawFrameAxes(
current_frame,
self.camera_matrix,
self.dist_coeffs,
rvecs[i],
tvecs[i],
0.05,
)

# Draw circle at the center of ArUco tag
# x_sum = (
# corners[0][0][0][0]
# + corners[0][0][1][0]
# + corners[0][0][2][0]
# + corners[0][0][3][0]
# )
# y_sum = (
# corners[0][0][0][1]
# + corners[0][0][1][1]
# + corners[0][0][2][1]
# + corners[0][0][3][1]
# )
#
# x_centerPixel = x_sum * 0.25
# y_centerPixel = y_sum * 0.25
#
# cv2.circle(
# current_frame, (x_centerPixel, y_centerPixel), 4, (0, 0, 255), -1
# )

# Display image for testing
cv2.imshow("camera", current_frame)
cv2.waitKey(1)


def main(args=None):

# Initialize the rclpy library
rclpy.init(args=args)

# Create the node
aruco_node = ArucoNode()

# Spin the node so the callback function is called
rclpy.spin(aruco_node)

# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
aruco_node.destroy_node()

# Shutdown the ROS client library for Python
rclpy.shutdown()


if __name__ == "__main__":
main()
1 change: 1 addition & 0 deletions lidarbot_aruco/lidarbot_aruco/detect_aruco_marker.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

import argparse
import cv2
import sys

# Construct argument parser and parse the arguments
ap = argparse.ArgumentParser()
Expand Down
4 changes: 2 additions & 2 deletions lidarbot_aruco/lidarbot_aruco/generate_aruco_marker.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ def main():
args["type"], args["id"]
)
)
tag = np.zeros((250, 250, 1), dtype="uint8")
cv2.aruco.generateImageMarker(arucoDict, args["id"], 250, tag, 1)
tag = np.zeros((180, 180, 1), dtype="uint8")
cv2.aruco.generateImageMarker(arucoDict, args["id"], 180, tag, 1)

# Write the generated ArUCo tag to disk and then display it to our
# screen
Expand Down
2 changes: 1 addition & 1 deletion lidarbot_aruco/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
tests_require=["pytest"],
entry_points={
"console_scripts": [
"aruco_trajectory_visualizer_node = lidarbot_aruco.aruco_trajectory_visualizer.main"
"aruco_trajectory_visualizer_node = lidarbot_aruco.aruco_trajectory_visualizer:main"
],
},
)
Binary file added lidarbot_aruco/tags/DICT_4X4_50_id1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

0 comments on commit 0fde086

Please sign in to comment.