diff --git a/.gitignore b/.gitignore
index 796c68107..262810ef5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -154,3 +154,7 @@ dmypy.json
# docker files
/docker/.vscode-server
/docker/.zsh_history
+
+# colcon build stuff (I think)
+/install
+/log
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..c416832dc
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,6 @@
+[submodule "src/src/pointgrey_camera_driver"]
+ path = src/src/pointgrey_camera_driver
+ url = https://github.com/ros-drivers/pointgrey_camera_driver
+[submodule "src/pointgrey_camera_driver"]
+ path = src/pointgrey_camera_driver
+ url = https://github.com/ros-drivers/pointgrey_camera_driver
diff --git a/rktl_autonomy/CMakeLists.txt b/rktl_autonomy/CMakeLists.txt
deleted file mode 100644
index f53b3d4bf..000000000
--- a/rktl_autonomy/CMakeLists.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-cmake_minimum_required(VERSION 3.0.2)
-project(rktl_autonomy)
-
-# Find catkin macros
-find_package(catkin REQUIRED)
-
-# install python module(s)
-catkin_python_setup()
-
-# generates cmake config files and set variables for installation
-catkin_package()
-
-# Tests
-if(CATKIN_ENABLE_TESTING)
- find_package(roslaunch REQUIRED)
- roslaunch_add_file_check(launch/cartpole)
- roslaunch_add_file_check(launch/rocket_league)
- find_package(rostest REQUIRED)
- add_rostest(test/test_step.test)
-endif()
diff --git a/rktl_autonomy/README.md b/rktl_autonomy/README.md
deleted file mode 100644
index 51c4f2b67..000000000
--- a/rktl_autonomy/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# rktl_autonomy
-
-This package provides all code required to train a neural network to drive the
-car and interface it into the existing control structure. It also provides
-equivalent interfaces for two other environments (snake game and cartpole) for
-verification purposes.
diff --git a/rktl_autonomy/YAMLEditor/.gitignore b/rktl_autonomy/YAMLEditor/.gitignore
deleted file mode 100644
index c4194cfa6..000000000
--- a/rktl_autonomy/YAMLEditor/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-# Java
-bin/
-.classpath
-.settings
-
-# macOS metadata file
-.DS_Store
diff --git a/rktl_autonomy/YAMLEditor/.project b/rktl_autonomy/YAMLEditor/.project
deleted file mode 100644
index 94ee56c1e..000000000
--- a/rktl_autonomy/YAMLEditor/.project
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
- YAMLEditor
-
-
-
-
-
- org.eclipse.jdt.core.javabuilder
-
-
-
-
-
- org.eclipse.jdt.core.javanature
-
-
diff --git a/rktl_autonomy/YAMLEditor/README.md b/rktl_autonomy/YAMLEditor/README.md
deleted file mode 100644
index c97d0314c..000000000
--- a/rktl_autonomy/YAMLEditor/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# YAMLEditor
-
-## Unpack into a table, edit, and repack two YAML arrays
-
-Intended for use with reward values
-
-Built jar in `jars/`
diff --git a/rktl_autonomy/YAMLEditor/jars/YAMLEditor.jar b/rktl_autonomy/YAMLEditor/jars/YAMLEditor.jar
deleted file mode 100644
index bb3ed6c7d..000000000
Binary files a/rktl_autonomy/YAMLEditor/jars/YAMLEditor.jar and /dev/null differ
diff --git a/rktl_autonomy/YAMLEditor/src/com/purduearc/yamleditor/Main.java b/rktl_autonomy/YAMLEditor/src/com/purduearc/yamleditor/Main.java
deleted file mode 100644
index 67809ae9a..000000000
--- a/rktl_autonomy/YAMLEditor/src/com/purduearc/yamleditor/Main.java
+++ /dev/null
@@ -1,177 +0,0 @@
-package com.purduearc.yamleditor;
-
-import java.awt.Rectangle;
-import java.awt.event.ActionEvent;
-import java.awt.event.ActionListener;
-import java.util.Arrays;
-import java.util.regex.Pattern;
-
-import javax.swing.JButton;
-import javax.swing.JFrame;
-import javax.swing.JLabel;
-import javax.swing.JScrollPane;
-import javax.swing.JTable;
-import javax.swing.JTextField;
-import javax.swing.event.TableModelEvent;
-import javax.swing.event.TableModelListener;
-import javax.swing.table.DefaultTableModel;
-/**
- * Unpack into a table, edit, and repack two YAML arrays
- *
- * @author Campbell McClendon
- *
- * @version 1.0
- */
-public class Main {
- private static final Rectangle scrollPaneBounds = new Rectangle(2, 44, 496, 426);
- private static final String[] titles = new String[] {"ID", "WIN", "LOSE"};
- private static String[][] data = null;
- public static void main(String[] args) {
- JFrame frame = new JFrame("YAML Editor");
- frame.setResizable(false);
- frame.setSize(500, 500);
-
- JTextField winInputField = new JTextField("[1, 2, 3, 4, 5]", 50);
- winInputField.setBounds(2, 2, 100, 20);
-
- JTextField loseInputField = new JTextField("[6, 7, 8, 9, 10]");
- loseInputField.setBounds(104, 2, 100, 20);
-
- JLabel inputLabel = new JLabel("Input");
- inputLabel.setBounds(106, 24, 100, 20);
-
- data = new String[][] {{"0", "1", "2"}, {"1", "3", "4"}};
- DefaultTableModel tableModel = new DefaultTableModel(data, titles);
- JTable table = new JTable(tableModel);
- table.setFillsViewportHeight(true);
- tableModel.addTableModelListener(new TableModelListener() {
- @Override
- public void tableChanged(TableModelEvent e) {
- if (table.isEditing()) {
- data[table.getSelectedRow()][table.getSelectedColumn()] = (String) table.getValueAt(table.getSelectedRow(), table.getSelectedColumn());
- }
- }
- });
-
- JLabel errorLabel = new JLabel("");
- errorLabel.setBounds(225, 2, 100, 20);
-
- JScrollPane scrollPane = new JScrollPane(table);
- scrollPane.setBounds(scrollPaneBounds);
-
- JTextField winOutputField = new JTextField(50);
- winOutputField.setBounds(296, 2, 100, 20);
- winOutputField.setEditable(false);
-
- JTextField loseOutputField = new JTextField(50);
- loseOutputField.setBounds(398, 2, 100, 20);
- loseOutputField.setEditable(false);
-
- JLabel outputLabel = new JLabel("Output");
- outputLabel.setBounds(298, 24, 100, 20);
-
- JButton unpackButton = new JButton("Unpack Arrays");
- unpackButton.setBounds(2, 24, 100, 20);
- unpackButton.addActionListener(new ActionListener() {
- @Override
- public void actionPerformed(ActionEvent e) {
- try {
- String col1Text = clip(winInputField.getText().replaceAll("\\s+",""));
- String[] col1ByID = col1Text.split(Pattern.quote(","));
- if (col1ByID.length < 2) {
- System.err.println("Malformed win input data");
- errorLabel.setText("ERROR");
- return;
- }
- String col2Text = clip(loseInputField.getText().replaceAll("\\s+",""));
- String[] col2ByID = col2Text.split(Pattern.quote(","));
- if (col2ByID.length < 2) {
- System.err.println("Malformed lose input data");
- errorLabel.setText("ERROR");
- return;
- }
- if (col1ByID.length != col2ByID.length) {
- System.err.println("Win and lose arrays must be of same length");
- errorLabel.setText("ERROR");
- return;
- }
- String[][] newData = new String[col1ByID.length][2];
- for (int i = 0; i < col1ByID.length; i++) {
- newData[i][0] = col1ByID[i];
- newData[i][1] = col2ByID[i];
- }
- String[][] dataWithID = new String[newData.length][newData[0].length + 1];
- for (int id = 0; id < dataWithID.length; id++) {
- dataWithID[id][0] = "" + id;
- for (int i = 1; i < dataWithID[id].length; i++) {
- dataWithID[id][i] = newData[id][i - 1];
- }
- }
- data = dataWithID;
- tableModel.setDataVector(dataWithID, titles);
- errorLabel.setText("");
- } catch (Exception exception) {
- errorLabel.setText("ERROR");
- exception.printStackTrace();
- }
- }
- });
-
- JButton repackButton = new JButton("Repack Arrays");
- repackButton.setBounds(398, 24, 100, 20);
- repackButton.addActionListener(new ActionListener() {
- @Override
- public void actionPerformed(ActionEvent e) {
- try {
- String[][] dataWithoutID = new String[data.length][];
- for (int x = 0; x < data.length; x++) {
- String[] line = new String[data[x].length - 1];
- for (int y = 1; y < data[x].length; y++) {
- line[y - 1] = data[x][y];
- }
- dataWithoutID[x] = line;
- }
- String[] winValues = new String[dataWithoutID.length];
- String[] loseValues = new String[dataWithoutID.length];
- for (int i = 0; i < dataWithoutID.length; i++) {
- winValues[i] = dataWithoutID[i][0];
- loseValues[i] = dataWithoutID[i][1];
- }
- winOutputField.setText(Arrays.deepToString(winValues).replaceAll("\\s+",""));
- loseOutputField.setText(Arrays.deepToString(loseValues).replaceAll("\\s+",""));
- } catch (Exception exception) {
- errorLabel.setText("ERROR");
- exception.printStackTrace();
- }
- }
- });
-
- frame.add(winInputField);
- frame.add(loseInputField);
- frame.add(inputLabel);
- frame.add(unpackButton);
- frame.add(errorLabel);
- frame.add(winOutputField);
- frame.add(loseOutputField);
- frame.add(outputLabel);
- frame.add(repackButton);
- frame.add(scrollPane);
-
- frame.setLayout(null);
- frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
- frame.setVisible(true);
- }
- /**
- * Clips the first and last characters off of a string
- *
- * @param str input string
- * @return str without first and last characters
- */
- private static String clip(String str) {
- if (str.charAt(0) == (char) '[') {
- if (str.charAt(str.length() - 1) == (char) ']') return str.substring(1, str.length() - 1);
- else return str.substring(1, str.length());
- } else if (str.charAt(str.length() - 1) == (char) ']') return str.substring(0, str.length() - 1);
- else return str;
- }
-}
diff --git a/rktl_autonomy/config/rocket_league.yaml b/rktl_autonomy/config/rocket_league.yaml
deleted file mode 100644
index 7f6cb85f9..000000000
--- a/rktl_autonomy/config/rocket_league.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-reward:
- # reward to be given each frame
- # constant: 0.0
- # # reward as a function of squared distance from the ball
- # ball_dist_sq: -0.5
- # # reward as a function of squared distance from the ball to the goal
- # goal_dist_sq: -1.0
- # reward given when car changes velocity direction
- direction_change: 0
- # reward given when the episode ends with the car scoring the proper goal
- win: [100, 200, 300]
- # reward given when the episode ends with the car scoring on the wrong goal
- loss: [100, 100, 100]
- # reward given each frame when the car is in reverse
- # reverse: -25
- # # reward given each frame when the car is near the walls
- # walls:
- # # actual reward value
- # value: -50
- # # threshold distance (meters)
- # threshold: 0.25
-
-# duration when the episode will be terminated. Unit is seconds (sim time)
-max_episode_time: 15
-
-log:
- base_dir: "~/catkin_ws/data/rocket_league/"
- # frequency to save progress plot. Unit is episodes
- plot_freq: 50
- # variables to display in progress plot
- basic:
- - "duration"
- - "goals"
- advanced:
- - "net_reward"
diff --git a/rktl_autonomy/config/snake.yaml b/rktl_autonomy/config/snake.yaml
deleted file mode 100644
index ca4aaee7f..000000000
--- a/rktl_autonomy/config/snake.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-reward:
- # reward to be given when the snake dies. This should be zero or negative
- death: 0.0
- # reward to be given for each goal achieved. This should be positive
- goal: 50.0
- # reward to be given based off squared distance to goal.
- # - normalized for field length
- # - normalized for each second of gameplay
- # - should be zero or negative (to promote being close to goal)
- distance: 0.0
- # reward to be given each frame
- # - normalized for each second of gameplay
- # - could be negative to promote getting goals quickly
- # - could be positive to offset distance penalty
- constant: 0.0
-
-max_episode_time: 30.0
-
-control:
- max_linear_velocity: 3.0
- max_angular_velocity: 3.0
-
-log:
- base_dir: "~/catkin_ws/data/snake/"
- # frequency to save progress plot. Unit is episodes
- plot_freq: 25
- # variables to include in progress plot
- basic:
- - duration
- advanced:
- - score
- - net_reward
\ No newline at end of file
diff --git a/rktl_autonomy/launch/README.md b/rktl_autonomy/launch/README.md
deleted file mode 100644
index 802c1e97e..000000000
--- a/rktl_autonomy/launch/README.md
+++ /dev/null
@@ -1,157 +0,0 @@
-# Launch Files
-
-These are [.launch files](https://wiki.ros.org/roslaunch/XML) that can be run
-using [roslaunch](https://wiki.ros.org/roslaunch):
-
-```shell
-roslaunch rktl_autonomy
-```
-
-```{contents} Launch Files in the package
-:depth: 2
-:backlinks: top
-:local: true
-```
-
----
-
-## cartpole_train.launch
-
-This launch file starts the training a model for the cartpole environment. Wraps
-the [`cartpole_env`](../nodes/README.md#cartpole-env) node. For more info,
-refer to its documentation.
-
-### Launch Arguments
-
-- `agent_name` (default: `rocket_league_agent`): Name of the agent.
-- `render` (default: `false`): If true, a window for the current simulation will
- be shown.
-- `rate` (value: `30.0`): Rate at which the simulation runs, in steps per second.
-
-### Nodes
-
-- `cartpole_env`: [`cartpole_env`](../nodes/README.md#cartpole-env) node in
- the [`rktl_autonomy`](../README.md) package.
-
----
-
-## rocket_league_agent.launch
-
-This launch file starts the agent for the rocket league environment. Wraps
-the [`rocket_league_agent`](../nodes/README.md#rocket-league-agent) node. For
-more info, refer to its documentation.
-
-### Launch Arguments
-
-- `plot_log` (default: `false`): If true, a plot will be generated.
-- `weights_dir` (default: `'~/catkin_ws/data/rocket_league/'`): Folder
- containing the weights of pre-trained models.
-- `weights_name` (default: `'model'`): Which of the models to load, from the
- directory defined in `weights_dir`.
-
-### Nodes
-
-- `rocket_league_agent`:
- [`rocket_league_agent`](../nodes/README.md#rocket-league-agent) node from
- the [`rktl_autonomy`](../README.md) package.
-- `plotter`: [`plotter`](../nodes/README.md#plotter) node from the
- [`rktl_autonomy`](../README.md) package. Only run if the `plot_log` argument
- is set to `true`.
-
----
-
-## rocket_league_train.launch
-
-This launch file starts the training a model for the rocket league environment.
-Wraps the [`rocket_league_agent`](../nodes/README.md#rocket-league-agent) node
-and runs the simulator.
-
-Loads a training environment for and starts training a model.
-
-### Launch Arguments
-
-- `plot_log` (default: `true`): If true, a plot will be generated.
-- `agent_name` (default: `rocket_league_agent`): Name of the agent.
-- `render` (default: `false`): If true, a window for the current simulation will
- be shown.
-- `sim_mode` (default: `'ideal'`): Simulation mode for training.
-- `rate` (value: `10.0`): Rate at which the simulation runs, in steps per
- second. Cannot be overridden.
-- `agent_type` (value: `'none'`): Disables the loading of an existing agent.
- Cannot be overridden.
-
-### Nodes
-
-- `plotter`: [`plotter`](../nodes/README.md#plotter) node from the
- [`rktl_autonomy`](../README.md) package. Only run if the `plot_log` argument
- is set to `true`.
-
-### Included Launch Files
-
-- `rktl_perception camera.launch` (Included twice, once with `camera_name` set
- to `cam2` and once with `camera_name` set to `cam3`)
-- `rktl_launch rocket_league_sim.launch`: Loads Parameters from
- `config/rocket_league.yaml` in the [`rktl_autonomy`](../README.md) package.
- Also sets `~log` to `$(arg agent_name)/log`.
-
----
-
-## snake_eval.launch
-
-This launch file starts the agent for the snake environment. Wraps the
-[`snake_agent`](../nodes/README.md#snake-agent) node. For more info, refer to
-its documentation.
-
-### Launch Arguments
-
-- `plot_log` (default: `false`): If true, a plot will be generated.
-- `render` (default: `true`): If true, a window for the current simulation will
- be shown.
-- `weights` (default: `'~/catkin_ws/data/snake/weights`'): Folder
- containing the weights of pre-trained models.
-- `rate` (value: `10.0`): Rate at which the simulation runs, in steps per
- second. Cannot be overridden.
-- `snake_size` (value: `7`): Initial number of segments for the snake. Cannot be
- overridden.
-- `arena_size` (value: `10`): Size of the arena the snake operates in. Cannot be
- overridden.
-
-### Nodes
-
-- `snake_env`: `snake_node` node from the `snakesim` package.
-- `snake_agent`: [`snake_agent`](../nodes/README.md#snake-agent) node from the
- [`rktl_autonomy`](../README.md) package.
-- `plotter`: [`plotter`](../nodes/README.md#plotter) node from the
- [`rktl_autonomy`](../README.md) package. Only run if the `plot_log` argument
- is set to `true`.
-
----
-
-## snake_train.launch
-
-This launch file starts the training a model for the snake environment. Wraps
-the [`snake_agent`](../nodes/README.md#snake-agent) node. For more info,
-refer to its documentation.
-
-
-### Launch Arguments
-
-- `plot_log` (default: `false`): If true, a plot will be generated.
-- `agent_name` (default: `'snake_agent'`): Name of the agent.
-- `render` (default: `false`): If true, a window for the current simulation will
- be shown.
-- `rate` (value: `10.0`): Rate at which the simulation runs, in steps per
- second. Cannot be overridden.
-- `snake_size` (value: `7`): Initial number of segments for the snake. Cannot be
- overridden.
-- `arena_size` (value: `10`): Size of the arena the snake operates in. Cannot be
- overridden.
-
-### Nodes
-
-- `snake_env`: `snake_node` node from the `snakesim` package.
-- `plotter`: [`plotter`](../nodes/README.md#plotter) node from the
- [`rktl_autonomy`](../README.md) package. Only run if the `plot_log` argument
- is set to `true`.
-
----
diff --git a/rktl_autonomy/launch/cartpole/cartpole_train.launch b/rktl_autonomy/launch/cartpole/cartpole_train.launch
deleted file mode 100644
index 0c251f65e..000000000
--- a/rktl_autonomy/launch/cartpole/cartpole_train.launch
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/rktl_autonomy/launch/rocket_league/rocket_league_agent.launch b/rktl_autonomy/launch/rocket_league/rocket_league_agent.launch
deleted file mode 100644
index 2d7327415..000000000
--- a/rktl_autonomy/launch/rocket_league/rocket_league_agent.launch
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/rktl_autonomy/launch/rocket_league/rocket_league_train.launch b/rktl_autonomy/launch/rocket_league/rocket_league_train.launch
deleted file mode 100644
index cf7a34195..000000000
--- a/rktl_autonomy/launch/rocket_league/rocket_league_train.launch
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/rktl_autonomy/launch/snake/snake_eval.launch b/rktl_autonomy/launch/snake/snake_eval.launch
deleted file mode 100644
index 091e2f415..000000000
--- a/rktl_autonomy/launch/snake/snake_eval.launch
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/rktl_autonomy/launch/snake/snake_train.launch b/rktl_autonomy/launch/snake/snake_train.launch
deleted file mode 100644
index a80095c3a..000000000
--- a/rktl_autonomy/launch/snake/snake_train.launch
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/rktl_autonomy/nodes/README.md b/rktl_autonomy/nodes/README.md
deleted file mode 100644
index 02b1a911a..000000000
--- a/rktl_autonomy/nodes/README.md
+++ /dev/null
@@ -1,168 +0,0 @@
-# Nodes
-
-These are the [ROS Nodes](http://wiki.ros.org/Nodes) provided by this package.
-Most likely, you will use a launch file to run these. You can also run them
-by using `rosrun`:
-
-```shell
-rosrun rktl_autonomy
-```
-
-```{contents} ROS Nodes in the package
-:depth: 2
-:backlinks: top
-:local: true
-```
-
----
-
-## cartpole_env
-
-This node creates a connection between the CartPole-v0 environment from OpenAI's
-Gym library and ROS for testing and simulation purposes. It subscribes to the
-`cartpole/action` topic, which receives a message containing an action (an
-integer) that is taken by the agent. It also publishes messages to the
-`cartpole/observation`, `cartpole/reward`, and `cartpole/done` topics, which
-contain the current observation (a Float32MultiArray), the current reward (a
-Float32), and a Boolean indicating whether the episode is done or not,
-respectively.
-
-### Subscribed Topics
-
-- `cartpole/action` ([std_msgs/Int32](/std_msgs/html/msg/Int32.html#http://)):
- The action that should be performed.
-
-### Published Topics
-
-- `cartpole/observation` ([std_msgs/Float32MultiArray](/std_msgs/html/msg/Float32MultiArray.html#http://)):
- Observation of the system at any given point.
-- `cartpole/reward` ([std_msgs/Float32](/std_msgs/html/msg/Float32.html#http://)):
- Reward for training.
-- `cartpole/done` ([std_msgs/Bool](/std_msgs/html/msg/Bool.html#http://)):
- Boolean representing the whether or not the simulation is complete.
-
----
-
-## plotter
-
-This node plots the progress of a machine learning algorithm. The progress is
-subscribed to via the `diagnostic_msgs/DiagnosticStatus` message. The node uses
-the `mpl_toolkits` library to plot the progress of the training, which is then
-saved as a PNG image file.
-
-### Subscribed Topics
-
-- `~log` ([diagnostic_msgs/DiagnosticStatus](/diagnostic_msgs/html/msg/DiagnosticStatus.html#http://)):
- Log for the training algorithm.
-
-### Parameters
-
-- `~log/base_dir` (string): The base directory where the training log files are
- stored.
-- `~log/plot_freq` (int, default: `25`): The frequency of plotting, i.e., how
- often the node updates the plot with new data.
-- `~log/basic` (list, default: `['duration']`): A list of basic variables to
- plot. These variables have only one line on the plot.
-- `~log/advanced` (list, default: `['net_reward']`): A list of advanced
- variables to plot. These variables have three lines on the plot: the
- average, the minimum, and the maximum.
-- `~frequency` (float, default: `1/30`): Frequency of the main simulation loop.
-
----
-
-## rocket_league_agent
-
-This node runs the agent trained for the Rocket League project. It uses the
-`RocketLeagueInterface` class in `rocket_league_interface.py`.
-
-### Subscribed Topics
-
-- `cars/car0/odom` ([nav_msgs/Odometry](/nav_msgs/html/msg/Odometry.html#http://)):
- Odometry data of the car's position and velocity.
-- `ball/odom` ([nav_msgs/Odometry](/nav_msgs/html/msg/Odometry.html#http://)):
- Odometry data of the car's position and velocity.
-- `match_status` ([rktl_msgs/MatchStatus](/rktl_msgs/html/msg/MatchStatus.html#http://)):
- Match status data containing the current score of the game.
-
-### Published Topics
-
-- `/cars/car0/command` ([rktl_msgs/ControlCommand](/rktl_msgs/html/msg/ControlCommand.html#http://)):
- ontrol commands for the car.
-
-### Parameters
-
-- `~weights` (string): The location of weights for where the model was saved.
-- `/cars/throttle/max_speed` (float): Maximum velocity for the car, in meters
- per second.
-- `/cars/steering/max_throw` (float): Maximum steering angle for the car, in
- meters per second.
-- `~action_space/velocity/min` (float): Optional. Action space override for
- velocity for the car, in meters per second.
-- `~action_space/velocity/max` (float): Optional. Action space override for
- velocity for the car, in meters per second.
-- `~action_space/curvature/min` (float): Optional. Action space override for
- steering angle for the car, in meters per second.
-- `~action_space/curvature/max` (float): Optional. Action space override for
- steering angle for the car, in meters per second.
-- `/cars/length` (float): Length of the car, in meters.
-- `/field/width` (float): Width of the field, in meters.
-- `/field/length` (float): Length of the field, in meters.
-- `~observation/goal_depth` (float, default, `0.075`): Depth of the goal, in
- meters.
-- `~observation/velocity/max_abs` (float, default, `3.0`): Max absolute velocity
- for observation. (default is 3.0)
-- `~observation/angular_velocity/max_abs` (float, default: `2π`): Max absolute
- angular velocity for observation. (default is 2*pi)
-- `~max_episode_time` (float, default: `30.0`): Maximum time in seconds for an
- episode.
-- `~reward/constant` (float, default: `0.0`): Constant reward for each step.
-- `~reward/ball_dist_sq` (float, default: `0.0`): Reward for getting closer to
- the ball.
-- `~reward/goal_dist_sq` (float, default: `0.0`): Reward for getting closer to
- the goal.
-- `~reward/direction_change` (float, default: `0.0`): Reward for changing the
- direction.
-- `~reward/win` (float, default: `100.0`): Reward for winning.
-- `~reward/loss` (float, default: `0.0`): Reward for losing.
-- `~reward/reverse`: (float, default: `0.0`)Reward for reversing.
-- `~reward/walls/value` (float, default: `0.0`): Reward for hitting walls.
-- `~reward/walls/threshold` (float, default: `0.0`): Distance from the wall at
- which reward is given.
-
----
-
-## snake_agent
-
-This node runs the agent trained for the snake environment. It uses the
-`SnakeInterface` class in `snake_interface.py`.
-
-### Subscribed Topics
-
-- `snake/pose` ([geometry_msgs/PoseArray](/geometry_msgs/html/msg/PoseArray.html#http://)):
- Pose of the snake parts.
-- `snake/goal` ([geometry_msgs/PointStamped](/geometry_msgs/html/msg/PointStamped.html#http://)):
- Location of the apple.
-- `snake/score` ([std_msgs/Int32](/std_msgs/html/msg/Int32.html#http://)):
- Current Score.
-- `snake/active` ([std_msgs/Bool](/std_msgs/html/msg/Bool.html#http://)):
- Whether or not the snake is active.
-
-### Published Topics
-
-- `snake/cmd_vel` ([geometry_msgs/Twist](/geometry_msgs/html/msg/Twist.html#http://)):
- Command for the snake to follow.
-
-### Parameters
-
-- `~num_segments` (int, default: 7): Initial number of segments of the snake.
-- `~field_size` (float, default: 10): Size of the field.
-- `~control/max_angular_velocity` (float, default: 3.0): Max angular velocity.
-- `~control/max_linear_velocity` (float, default: 3.0): Max velocity.
-- `~reward/death` (float, default: 0.0): Penalty for death.
-- `~reward/goal` (float, default: 50.0): Reward for eating an apple.
-- `~reward/distance` (float, default: 0.0): Reward for distance traveled.
-- `~reward/constant` (float, default: 0.0): Constant reward.
-- `~max_episode_time` (float, default: 30.0): Maximum time in seconds for an
- episode.
-
----
diff --git a/rktl_autonomy/nodes/cartpole_env b/rktl_autonomy/nodes/cartpole_env
deleted file mode 100755
index cd3e18057..000000000
--- a/rktl_autonomy/nodes/cartpole_env
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python3
-"""CartPole-v0 environment with ROS bindings for testing.
-License:
- BSD 3-Clause License
- Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- All rights reserved.
-"""
-
-# ROS
-import rospy
-from std_msgs.msg import Int32, Float32, Float32MultiArray, Bool
-from std_srvs.srv import Empty, EmptyResponse
-
-import gym
-from threading import Lock
-
-class CartPole(object):
- def __init__(self):
- rospy.init_node('cart_pole_node')
- self.RENDER = rospy.get_param('~render', False)
-
- self.env = gym.make('CartPole-v0')
- self.observation = self.env.reset()
- self.reward=0
- self.done=0
- self.action=None
- self.lock = Lock()
-
- # Publishers
- self.obs_pub = rospy.Publisher('cartpole/observation', Float32MultiArray, queue_size=1)
- self.reward_pub = rospy.Publisher('cartpole/reward', Float32, queue_size=1)
- self.done_pub = rospy.Publisher('cartpole/done', Bool, queue_size=1)
-
- # Subscribers
- rospy.Subscriber('cartpole/action', Int32, self.cartpole_action)
-
- # Services
- rospy.Service('cartpole/reset', Empty, self.cartpole_reset)
-
- # Timers
- rospy.Timer(rospy.Duration(rospy.get_param('~frequency', 1/30)), self.cartpole_step)
- rospy.spin()
-
- def cartpole_step(self,event=None):
- """Step time once."""
- with self.lock:
- if self.action is not None and self.done == False:
- self.observation, self.reward, self.done, __ = self.env.step(self.action)
-
- self.obs_pub.publish(None, self.observation)
- self.reward_pub.publish(self.reward)
- self.done_pub.publish(self.done)
-
- if self.RENDER:
- self.env.render()
-
- def cartpole_action(self, action_msg):
- """Callback for actions."""
- self.action = action_msg.data
-
- def cartpole_reset(self, __):
- """Reset the environment."""
- with self.lock:
- self.observation=self.env.reset()
- self.reward=0
- self.done=0
- self.action=None
- return EmptyResponse()
-
-if __name__ == "__main__":
- CartPole()
diff --git a/rktl_autonomy/nodes/rocket_league_agent b/rktl_autonomy/nodes/rocket_league_agent
deleted file mode 100755
index 1192bb9a3..000000000
--- a/rktl_autonomy/nodes/rocket_league_agent
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env python3
-"""Real-time evaluation of the agent trained for the Rocket League project.
-License:
- BSD 3-Clause License
- Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- All rights reserved.
-"""
-
-from rktl_autonomy import RocketLeagueInterface
-from stable_baselines3 import PPO
-from os.path import expanduser
-import rospy
-
-# create interface (and init ROS)
-env = RocketLeagueInterface(eval=True)
-
-# load the model
-weights = expanduser(rospy.get_param('~weights'))
-model = PPO.load(weights)
-
-# evaluate in real-time
-obs = env.reset()
-while True:
- action, __ = model.predict(obs)
- try:
- obs, __, __, __ = env.step(action)
- except rospy.ROSInterruptException:
- exit()
diff --git a/rktl_autonomy/nodes/snake_agent b/rktl_autonomy/nodes/snake_agent
deleted file mode 100755
index f760d1b8a..000000000
--- a/rktl_autonomy/nodes/snake_agent
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env python3
-"""Real-time evaluation of the agent trained on the snake environment.
-License:
- BSD 3-Clause License
- Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- All rights reserved.
-"""
-
-from rktl_autonomy import SnakeInterface
-from stable_baselines3 import PPO
-from os.path import expanduser
-import rospy
-
-# create interface (and init ROS)
-env = SnakeInterface(eval=True)
-
-# load the model
-weights = expanduser(rospy.get_param('~weights'))
-model = PPO.load(weights)
-
-# evaluate in real-time
-obs = env.reset()
-while True:
- action, __ = model.predict(obs)
- try:
- obs, __, __, __ = env.step(action)
- except rospy.ROSInterruptException:
- exit()
diff --git a/rktl_autonomy/package.xml b/rktl_autonomy/package.xml
deleted file mode 100644
index c023f6e6c..000000000
--- a/rktl_autonomy/package.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
- rktl_autonomy
- 0.0.0
- Deep reinforcement learning for ARC Rocket League project
- Autonomous Robotics Club of Purdue
- BSD 3 Clause
- http://www.purduearc.com
- James Baxter
- Meenakshi McNamara
- Haddy Alchaer
- Reuben Georgi
-
- catkin
- rosunit
- rospy
- geometry_msgs
- std_msgs
- std_srvs
- snakesim
- rktl_msgs
- rktl_launch
-
\ No newline at end of file
diff --git a/rktl_autonomy/scripts/train_cartpole.py b/rktl_autonomy/scripts/train_cartpole.py
deleted file mode 100755
index 37b491168..000000000
--- a/rktl_autonomy/scripts/train_cartpole.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python3
-"""Training script for the CartPoleInterface for testing.
-License:
- BSD 3-Clause License
- Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- All rights reserved.
-"""
-
-from rktl_autonomy import CartpoleInterface
-from stable_baselines3 import PPO
-import gym, time
-
-def show_progress(model, episodes=5):
- env = gym.make("CartPole-v0")
- obs = env.reset()
- episodes = 0
- while episodes < 5:
- action, __ = model.predict(obs, deterministic=True)
- obs, __, done, __ = env.step(action)
- env.render()
- time.sleep(0.01)
- if done:
- obs = env.reset()
- episodes += 1
- env.close()
-
-env = CartpoleInterface()
-model = PPO("MlpPolicy", env, verbose=1)
-
-print("showing untrained performance")
-show_progress(model)
-
-print("training on 10k steps")
-model.learn(total_timesteps=10000)
-
-print("showing trained performance")
-show_progress(model)
diff --git a/rktl_autonomy/scripts/train_snake.py b/rktl_autonomy/scripts/train_snake.py
deleted file mode 100755
index db7ad527f..000000000
--- a/rktl_autonomy/scripts/train_snake.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python3
-"""Training script for the Snake game in ARC tutorials.
-License:
- BSD 3-Clause License
- Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- All rights reserved.
-"""
-
-from rktl_autonomy import SnakeInterface
-from stable_baselines3 import PPO
-from stable_baselines3.common.vec_env import SubprocVecEnv
-from stable_baselines3.common.env_util import make_vec_env
-from stable_baselines3.common.logger import configure
-from stable_baselines3.common.callbacks import CheckpointCallback
-from os.path import expanduser
-import uuid
-
-if __name__ == '__main__': # this is required due to forking processes
- run_id = str(uuid.uuid4()) # ALL running environments must share this
-
- env = make_vec_env(SnakeInterface, env_kwargs={'run_id':run_id},
- n_envs=16, vec_env_cls=SubprocVecEnv)
- model = PPO("MlpPolicy", env)
-
- # log training progress as CSV
- log_dir = expanduser(f'~/catkin_ws/data/snake/{run_id}')
- logger = configure(log_dir, ["stdout", "csv", "log"])
- model.set_logger(logger)
-
- # log model weights
- freq = 300 # (time steps in a SINGLE environment)
- callback = CheckpointCallback(save_freq=freq, save_path=log_dir)
-
- # run training
- steps = 50000 # 50k (timesteps accross ALL environments)
- print(f"training on {steps} steps")
- model.learn(total_timesteps=steps, callback=callback)
-
- # save final weights
- print("done training")
- model.save(log_dir + "/final_weights")
- env.close() # this must be done to clean up other processes
diff --git a/rktl_autonomy/setup.py b/rktl_autonomy/setup.py
deleted file mode 100644
index d036274eb..000000000
--- a/rktl_autonomy/setup.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python3
-
-from distutils.core import setup
-from catkin_pkg.python_setup import generate_distutils_setup
-
-setup_args = generate_distutils_setup(
- packages=['rktl_autonomy'],
- package_dir={'': 'src'}
-)
-
-setup(**setup_args)
\ No newline at end of file
diff --git a/rktl_autonomy/src/rktl_autonomy/cartpole_direct_interface.py b/rktl_autonomy/src/rktl_autonomy/cartpole_direct_interface.py
deleted file mode 100644
index 2025273b8..000000000
--- a/rktl_autonomy/src/rktl_autonomy/cartpole_direct_interface.py
+++ /dev/null
@@ -1,71 +0,0 @@
-"""
-Cartpole is a very simple environment included within OpenAI Gym. There is not
-much of a reason to use it, except for verifying the functionality of this
-library. Two different interfaces are provided, `CartpoleInterface` and
-`CartpoleDirectInterface`. The former uses the `ROSInterface` class and the
-latter directly owns the Gym environment. To verify the `ROSInterface` worked,
-it was confirmed that they both behave the same way.
-
-License:
- BSD 3-Clause License
- Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- All rights reserved.
-"""
-
-# package
-from rktl_autonomy import ROSInterface
-
-# ROS
-import rospy
-
-# System
-import gym
-
-class CartpoleDirectInterface(ROSInterface):
- """ROS interface for the cartpole game."""
- def __init__(self, eval=False, launch_file=['rktl_autonomy', 'cartpole_train.launch'], launch_args=[], run_id=None):
- super().__init__(node_name='cartpole_agent', eval=eval, launch_file=launch_file, launch_args=launch_args, run_id=run_id)
-
- self._RENDER = rospy.get_param('~render', False)
-
- self._env = gym.make('CartPole-v0')
- self._state = None
-
- @property
- def action_space(self):
- """The Space object corresponding to valid actions."""
- return self._env.action_space
-
- @property
- def observation_space(self):
- """The Space object corresponding to valid observations."""
- return self._env.observation_space
-
- def _reset_env(self):
- """Reset environment for a new training episode."""
- self._state = (self._env.reset(), 0, False, {})
-
- def _reset_self(self):
- """Reset internally for a new episode."""
- # Does not apply for direct interface
- pass
-
- def _has_state(self):
- """Determine if the new state is ready."""
- return self._state is not None
-
- def _clear_state(self):
- """Clear state variables / flags in preparation for new ones."""
- self._state = None
-
- def _get_state(self):
- """Get state tuple (observation, reward, done, info)."""
- assert self._has_state()
- return self._state
-
- def _publish_action(self, action):
- """Publish an action to the ROS network."""
- assert action >= 0 and action < self.action_space.n
- if self._RENDER:
- self._env.render()
- self._state = self._env.step(action)
diff --git a/rktl_autonomy/src/rktl_autonomy/cartpole_interface.py b/rktl_autonomy/src/rktl_autonomy/cartpole_interface.py
deleted file mode 100755
index cf3bdc5e5..000000000
--- a/rktl_autonomy/src/rktl_autonomy/cartpole_interface.py
+++ /dev/null
@@ -1,113 +0,0 @@
-"""
-Cartpole is a very simple environment included within OpenAI Gym. There is not
-much of a reason to use it, except for verifying the functionality of this
-library. Two different interfaces are provided, `CartpoleInterface` and
-`CartpoleDirectInterface`. The former uses the `ROSInterface` class and the
-latter directly owns the Gym environment. To verify the `ROSInterface` worked,
-it was confirmed that they both behave the same way.
-
-License:
- BSD 3-Clause License
- Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- All rights reserved.
-"""
-
-# package
-from rktl_autonomy import ROSInterface
-from gym.spaces import Discrete, Box
-
-# ROS
-import rospy
-from std_msgs.msg import Int32, Float32, Float32MultiArray, Bool
-from std_srvs.srv import Empty
-
-# System
-import numpy as np
-from enum import IntEnum, unique, auto
-from math import pi
-
-@unique
-class CartpoleActions(IntEnum):
- """Possible actions for deep learner."""
- LEFT = 0
- RIGHT = auto()
- SIZE = auto()
-
-class CartpoleInterface(ROSInterface):
- """ROS interface for the cartpole game."""
- def __init__(self, eval=False, launch_file=['rktl_autonomy', 'cartpole_train.launch'], launch_args=[], run_id=None):
- super().__init__(node_name='cartpole_agent', eval=eval, launch_file=launch_file, launch_args=launch_args, run_id=run_id)
-
- # Publishers
- self._action_pub = rospy.Publisher('cartpole/action', Int32, queue_size=1)
- self._reset_srv = rospy.ServiceProxy('cartpole/reset', Empty)
-
- # State variables
- self._obs = None
- self._reward = None
- self._done = None
-
- # Subscribers
- rospy.Subscriber('cartpole/observation', Float32MultiArray, self._obs_cb)
- rospy.Subscriber('cartpole/done', Bool, self._done_cb)
- rospy.Subscriber('cartpole/reward', Float32, self._reward_cb)
-
- @property
- def action_space(self):
- """The Space object corresponding to valid actions."""
- return Discrete(CartpoleActions.SIZE)
-
- @property
- def observation_space(self):
- """The Space object corresponding to valid observations."""
- return Box(low=-pi, high=pi, shape=(4,), dtype=np.float32)
-
- def _reset_env(self):
- """Reset environment for a new training episode."""
- self._reset_srv.call()
-
- def _reset_self(self):
- """Reset internally for a new episode."""
- self._clear_state()
-
- def _has_state(self):
- """Determine if the new state is ready."""
- return (
- self._obs is not None and
- self._reward is not None and
- self._done is not None)
-
- def _clear_state(self):
- """Clear state variables / flags in preparation for new ones."""
- self._obs = None
- self._reward = None
- self._done = None
-
- def _get_state(self):
- """Get state tuple (observation, reward, done, info)."""
- assert self._has_state()
- return (self._obs, self._reward, self._done, {})
-
- def _publish_action(self, action):
- """Publish an action to the ROS network."""
- assert action >= 0 and action < self.action_space.n
- self._action_pub.publish(action)
-
- def _obs_cb(self, obs_msg):
- """Callback for observation of game."""
- assert len(obs_msg.data) == self.observation_space.shape[0]
- self._obs = np.asarray(obs_msg.data, dtype=np.float32)
- with self._cond:
- self._cond.notify_all()
-
- def _reward_cb(self, reward_msg):
- """Callback for reward of game."""
- self._reward = reward_msg.data
- with self._cond:
- self._cond.notify_all()
-
- def _done_cb(self, done_msg):
- """Callback for if episode is done."""
- self._done = done_msg.data
- with self._cond:
- self._cond.notify_all()
diff --git a/rktl_autonomy/src/rktl_autonomy/env_counter.py b/rktl_autonomy/src/rktl_autonomy/env_counter.py
deleted file mode 100644
index c77dba7d7..000000000
--- a/rktl_autonomy/src/rktl_autonomy/env_counter.py
+++ /dev/null
@@ -1,9 +0,0 @@
-class EnvCounter:
- def __init__(self):
- self.counter = 0
-
- def count_env(self):
- self.counter += 1
-
- def get_current_counter(self):
- return self.counter
diff --git a/rktl_autonomy/src/rktl_autonomy/snake_interface.py b/rktl_autonomy/src/rktl_autonomy/snake_interface.py
deleted file mode 100755
index 255f35a30..000000000
--- a/rktl_autonomy/src/rktl_autonomy/snake_interface.py
+++ /dev/null
@@ -1,198 +0,0 @@
-"""
-This provides an interface to the snake game created in the
-[ARC Tutorials](https://github.com/purdue-arc/arc_tutorials/tree/snake_dqn).
-This was largely to gain experience in training reinforcement learning agents
-and there isn't much need for it anymore. It may be useful for testing new types
-of agents or onboarding new members.
-
-License:
- BSD 3-Clause License
- Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- All rights reserved.
-"""
-
-# package
-from rktl_autonomy import ROSInterface
-from gym.spaces import Discrete, Box
-
-# ROS
-import rospy
-from geometry_msgs.msg import Twist, PoseArray, PointStamped
-from std_msgs.msg import Int32, Bool
-from std_srvs.srv import Empty
-
-# System
-import numpy as np
-from tf.transformations import euler_from_quaternion
-from enum import IntEnum, unique, auto
-from math import pi
-
-@unique
-class SnakeActions(IntEnum):
- """Possible actions for deep learner."""
- FORWARD = 0
- LEFT = auto()
- RIGHT = auto()
- SIZE = auto()
-
-class SnakeInterface(ROSInterface):
- """ROS interface for the snake game."""
- def __init__(self, eval=False, launch_file=['rktl_autonomy', 'snake_train.launch'], launch_args=[], run_id=None):
- super().__init__(node_name='snake_agent', eval=eval, launch_file=launch_file, launch_args=launch_args, run_id=run_id)
-
- # Constants
- self._NUM_SEGMENTS = rospy.get_param('~num_segments', 7)
- self._FIELD_SIZE = rospy.get_param('~field_size', 10)
- self._ANGULAR_VELOCITY = rospy.get_param('~control/max_angular_velocity', 3.0)
- self._LINEAR_VELOCITY = rospy.get_param('~control/max_linear_velocity', 3.0)
- self._DEATH_REWARD = rospy.get_param('~reward/death', 0.0)
- self._GOAL_REWARD = rospy.get_param('~reward/goal', 50.0)
- self._DISTANCE_REWARD = rospy.get_param('~reward/distance', 0.0)
- self._CONSTANT_REWARD = rospy.get_param('~reward/constant', 0.0)
- self._MAX_TIME = rospy.get_param('~max_episode_time', 30.0)
-
- # Publishers
- self._action_pub = rospy.Publisher('snake/cmd_vel', Twist, queue_size=1)
- self._reset_srv = rospy.ServiceProxy('snake/reset', Empty)
-
- # State variables
- self._pose = None
- self._goal = None
- self._score = None
- self._alive = None
- self._prev_time = None
- self._start_time = None
-
- # Subscribers
- rospy.Subscriber('snake/pose', PoseArray, self._pose_cb)
- rospy.Subscriber('snake/goal', PointStamped, self._goal_cb)
- rospy.Subscriber('snake/score', Int32, self._score_cb)
- rospy.Subscriber('snake/active', Bool, self._alive_cb)
-
- @property
- def action_space(self):
- """The Space object corresponding to valid actions."""
- return Discrete(SnakeActions.SIZE)
-
- @property
- def observation_space(self):
- """The Space object corresponding to valid observations."""
- locations = 2*(1+self._NUM_SEGMENTS)
- return Box(
- low=np.array([-pi] + locations*[0], dtype=np.float32),
- high=np.array([pi] + locations*[self._FIELD_SIZE], dtype=np.float32))
-
- def _reset_env(self):
- """Reset environment for a new training episode."""
- self._reset_srv.call()
-
- def _reset_self(self):
- """Reset internally for a new episode."""
- self._clear_state()
- self._prev_time = None
- self._start_time = None
-
- def _has_state(self):
- """Determine if the new state is ready."""
- return (
- self._pose is not None and
- self._goal is not None and
- self._score is not None and
- self._alive is not None)
-
- def _clear_state(self):
- """Clear state variables / flags in preparation for new ones."""
- self._pose = None
- self._goal = None
- self._score = None
- self._alive = None
-
- def _get_state(self):
- """Get state tuple (observation, reward, done, info)."""
- assert self._has_state()
-
- # combine pose / goal for observation
- pose = np.asarray(self._pose, dtype=np.float32)
- goal = np.asarray(self._goal, dtype=np.float32)
- observation = np.concatenate((pose, goal))
-
- # Determine reward and if done
- reward = 0
- done = False
-
- time = rospy.Time.now()
- if self._prev_time is not None:
- reward += (time - self._prev_time).to_sec() * self._CONSTANT_REWARD
- dist_sq = np.sum(np.square(pose[1:3] - goal))
- norm_dist = dist_sq / (self._FIELD_SIZE ** 2)
- reward += (time - self._prev_time).to_sec() * self._DISTANCE_REWARD * norm_dist
- self._prev_time = time
-
- if self._prev_score is not None:
- reward += self._GOAL_REWARD * (self._score - self._prev_score)
- self._prev_score = self._score
-
- if not self._alive:
- reward += self._DEATH_REWARD
- done = True
-
- if self._start_time is None:
- self._start_time = time
-
- if (time - self._start_time).to_sec() >= self._MAX_TIME:
- done = True
-
- # info dict
- info = {"score" : self._score}
-
- return (observation, reward, done, info)
-
- def _publish_action(self, action):
- """Publish an action to the ROS network."""
- assert action >= 0 and action < self.action_space.n
-
- action_msg = Twist()
- action_msg.linear.x = self._LINEAR_VELOCITY
- if action == SnakeActions.LEFT:
- action_msg.angular.z = self._ANGULAR_VELOCITY
- if action == SnakeActions.RIGHT:
- action_msg.angular.z = -self._ANGULAR_VELOCITY
-
- self._action_pub.publish(action_msg)
-
- def _pose_cb(self, pose_msg):
- """Callback for poses of each segment of snake."""
- assert len(pose_msg.poses) == self._NUM_SEGMENTS
-
- yaw, __, __ = euler_from_quaternion((
- pose_msg.poses[0].orientation.x,
- pose_msg.poses[0].orientation.y,
- pose_msg.poses[0].orientation.z,
- pose_msg.poses[0].orientation.w))
-
- self._pose = tuple(
- [yaw] +
- [func(pose_msg.poses[i]) for i in range(self._NUM_SEGMENTS)
- for func in (
- lambda pose: pose.position.x,
- lambda pose: pose.position.y)])
- with self._cond:
- self._cond.notify_all()
-
- def _goal_cb(self, goal_msg):
- """Callback for location of goal."""
- self._goal = (goal_msg.point.x, goal_msg.point.y)
- with self._cond:
- self._cond.notify_all()
-
- def _score_cb(self, score_msg):
- """Callback for score of game."""
- self._score = score_msg.data
- with self._cond:
- self._cond.notify_all()
-
- def _alive_cb(self, alive_msg):
- """Callback for active state of snake."""
- self._alive = alive_msg.data
- with self._cond:
- self._cond.notify_all()
diff --git a/rktl_control/CMakeLists.txt b/rktl_control/CMakeLists.txt
deleted file mode 100644
index 1068e412b..000000000
--- a/rktl_control/CMakeLists.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-cmake_minimum_required(VERSION 3.0.2)
-project(rktl_control)
-
-# Find catkin macros
-find_package(catkin REQUIRED)
-
-# generates cmake config files and set variables for installation
-catkin_package()
-
-# Tests
-if(CATKIN_ENABLE_TESTING)
- find_package(roslaunch REQUIRED)
- roslaunch_add_file_check(launch)
- find_package(rostest REQUIRED)
- add_rostest(test/test_sync.test)
- add_rostest(test/test_mean_filter.test)
- add_rostest(test/test_particle_filter.test)
-endif()
diff --git a/rktl_control/README.md b/rktl_control/README.md
deleted file mode 100644
index 85d73b3d3..000000000
--- a/rktl_control/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# rktl_control
-
-This package performs several jobs that allow it to move physical cars according
-to commands from either the planning or autonomy code.
-
-Specifically it has three main components:
-
-- It filters the raw perception data using either a moving average filter or a
- particle filter, while also producing estimated odometry (which includes
- velocity in addition to the position given by the raw perception data)
-- It has a closed loop controller which reduces the error between the desired
- and estimated motion of the car by producing control signals.
-- It has a hardware interface, which sends those control signals to the physical
- cars.
-
-It also contains several MATLAB scripts that are useful in tuning and validating
-the Python code used for this project.
diff --git a/rktl_control/config/controller.yaml b/rktl_control/config/controller.yaml
deleted file mode 100644
index 7e1330a65..000000000
--- a/rktl_control/config/controller.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-# controller type: 'lead_lag', 'pid', or 'none'
-controller_type: 'none'
-# parameters for all controllers
-limits:
- throttle:
- min: -1.5
- max: 1.5
-# parameters for none (open loop)
-open_loop:
- publish_early: true
-# parameters for lead_lag (stable with 0.2 sec delay)
-# lead:
-# gain: 2.2696
-# alpha: -0.6146
-# beta: -0.7130
-# lag:
-# alpha: -0.9608
-# beta: -0.9951
-# alternate lead_lag (with predictive control)
-lead:
- gain: 3.9229
- alpha: -0.2584
- beta: -0.9231
-lag:
- alpha: -0.573
- beta: -0.9786
-# parameters for pid
-pid:
- kp: 1.0
- ki: 0.0
- kd: 0.0
- anti_windup: 0.0
- deadband: 0.0
\ No newline at end of file
diff --git a/rktl_control/config/hardware_interface.yaml b/rktl_control/config/hardware_interface.yaml
deleted file mode 100644
index 7a58fafe0..000000000
--- a/rktl_control/config/hardware_interface.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# hardware configuration
-port: /dev/ttyACM0
-
-# servo adjustments (uS)
-car0:
- throttle_throw: 90
- steering_left: 950
- steering_center: 1450
- steering_right: 1850
-car1:
- throttle_throw: 90
- steering_left: 950
- steering_center: 1450
- steering_right: 1850
-car2:
- throttle_throw: 90
- steering_left: 950
- steering_center: 1450
- steering_right: 1850
-car3:
- throttle_throw: 90
- steering_left: 950
- steering_center: 1450
- steering_right: 1850
-car4:
- throttle_throw: 90
- steering_left: 950
- steering_center: 1450
- steering_right: 1850
-car5:
- throttle_throw: 90
- steering_left: 950
- steering_center: 1450
- steering_right: 1850
\ No newline at end of file
diff --git a/rktl_control/config/mean_odom_filter.yaml b/rktl_control/config/mean_odom_filter.yaml
deleted file mode 100644
index 8d1016727..000000000
--- a/rktl_control/config/mean_odom_filter.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-delay:
- compensate: false
- duration: 0.15
-buffer_size:
- position: 3
- velocity: 3
\ No newline at end of file
diff --git a/rktl_control/config/particle_odom_filter.yaml b/rktl_control/config/particle_odom_filter.yaml
deleted file mode 100644
index bd4f6e3d5..000000000
--- a/rktl_control/config/particle_odom_filter.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-rate: 10.0
-supersampling: 2
-publish_particles: true
-num_particles: 100
-resample_proportion: 0.05
-boundary_check: true
-delay:
- compensate: true
- duration: 0.1
-allowable_latency: 1.25
-open_loop_limit: 10
-measurement_error:
- location: 0.1
- orientation: 0.2
-generator_noise:
- location: 0.5
- orientation: 0.75
- velocity: 0.1
- steering_angle: 0.05
-efforts:
- enable: true
- buffer_size: 15
- throttle:
- noise: 0.2
- max: 1.0
- min: -1.0
- steering:
- noise: 0.2
- max: 1.0
- min: -1.0
\ No newline at end of file
diff --git a/rktl_control/config/pose_synchronizer.yaml b/rktl_control/config/pose_synchronizer.yaml
deleted file mode 100644
index d4e8c7f52..000000000
--- a/rktl_control/config/pose_synchronizer.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-# rate of the whole perception system (Hz)
-rate: 10.0
-
-# delay in seconds. Lower is better, but AprilTags needs to keep up
-delay: 0.1
-
-# should debug info on latency be published?
-publish_latency: true
-
-# buffer size for each topic.
-# 12 potential detections per update (4 x 30 hz cameras)
-# delay of ~ 2 updates
-buffer_size: 30
-
-# topics to be synchronized
-topics:
- - cars/car0/pose
- - ball/pose
-
-# if Z in the message corresponds to a confidence weight
-use_weights:
- - false
- - true
\ No newline at end of file
diff --git a/rktl_control/firmware/hardware_interface/CarLink.cpp b/rktl_control/firmware/hardware_interface/CarLink.cpp
deleted file mode 100644
index 9d699133b..000000000
--- a/rktl_control/firmware/hardware_interface/CarLink.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-/*********************************************************************
-Class implementation for a single car's radio link.
-License:
- BSD 3-Clause License
- Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- All rights reserved.
-*********************************************************************/
-
-#include "CarLink.hpp"
-
-CarLink::CarLink(ros::NodeHandle *const nh, const int car_num, const int pin) :
- nh{nh},
- car_num{car_num},
- topic{String("car") + String(car_num) + String("/effort")},
- throttle_throw{0},
- steering_center{1500},
- steering_left{1500},
- steering_right{1500},
- enabled{false},
- ppm_out{RISING},
- effort_sub{topic.c_str(), &CarLink::effort_cb, this} {
- ppm_out.write(THROTTLE_CHANNEL, THROTTLE_ZERO);
- ppm_out.write(STEERING_CHANNEL, steering_center);
- ppm_out.begin(pin);
- this->nh->subscribe(effort_sub);
-}
-
-CarLink::~CarLink() {
- disable();
-}
-
-void CarLink::enable() {
- enabled = true;
-}
-
-void CarLink::disable() {
- ppm_out.write(THROTTLE_CHANNEL, THROTTLE_ZERO);
- enabled = false;
-}
-
-bool CarLink::update_params() {
- if (nh->connected()
- && nh->getParam((String("~car") + String(car_num) + String("/throttle_throw")).c_str(), &throttle_throw)
- && nh->getParam((String("~car") + String(car_num) + String("/steering_center")).c_str(), &steering_center)
- && nh->getParam((String("~car") + String(car_num) + String("/steering_left")).c_str(), &steering_left)
- && nh->getParam((String("~car") + String(car_num) + String("/steering_right")).c_str(), &steering_right)) {
- return true;
- } else {
- throttle_throw = 0;
- steering_center = 1500;
- steering_left = 1500;
- steering_right = 1500;
- return false;
- }
-}
-
-void CarLink::effort_cb(const rktl_msgs::ControlEffort& effort) {
- // throttle
- if (enabled) {
- ppm_out.write(THROTTLE_CHANNEL, THROTTLE_ZERO + throttle_throw * effort.throttle);
- } else {
- ppm_out.write(THROTTLE_CHANNEL, THROTTLE_ZERO);
- }
-
- // steering
- if (effort.steering > 0) {
- ppm_out.write(STEERING_CHANNEL, steering_center + (steering_left - steering_center) * effort.steering);
- } else if (effort.steering < 0) {
- ppm_out.write(STEERING_CHANNEL, steering_center + (steering_center - steering_right) * effort.steering);
- } else {
- ppm_out.write(STEERING_CHANNEL, steering_center);
- }
-}
diff --git a/rktl_control/firmware/hardware_interface/CarLink.hpp b/rktl_control/firmware/hardware_interface/CarLink.hpp
deleted file mode 100644
index 691dd4a6b..000000000
--- a/rktl_control/firmware/hardware_interface/CarLink.hpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/*********************************************************************
-Class definition for a single car's radio link.
-License:
- BSD 3-Clause License
- Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- All rights reserved.
-*********************************************************************/
-
-#pragma once
-
-#include
-
-#include
-#include
-
-class CarLink {
- public:
- CarLink(ros::NodeHandle *const nh, const int car_num, const int pin);
- ~CarLink();
-
- void enable();
- void disable();
- bool update_params();
-
- private:
- const int THROTTLE_ZERO = 1500;
- const int THROTTLE_CHANNEL = 1;
- const int STEERING_CHANNEL = 2;
-
- ros::NodeHandle *const nh;
- const int car_num;
- const String topic;
-
- int throttle_throw;
- int steering_center;
- int steering_left;
- int steering_right;
- bool enabled;
-
- PulsePositionOutput ppm_out;
- ros::Subscriber effort_sub;
-
- void effort_cb(const rktl_msgs::ControlEffort& effort);
-};
diff --git a/rktl_control/nodes/topic_delay b/rktl_control/nodes/topic_delay
deleted file mode 100755
index 67e97cd16..000000000
--- a/rktl_control/nodes/topic_delay
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env python3
-"""
-Delay an arbitrary ROS topic without modifying message
-
-License:
- BSD 3-Clause License
- Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- All rights reserved.
-"""
-
-import rospy
-import roslib.message
-from collections import deque
-from time import sleep
-import sys
-
-
-class Delay(object):
- """Class to delay arbitrary messages."""
-
- def __init__(self):
- # get args
- assert len(sys.argv) >= 5
- input_name = sys.argv[1]
- output_name = sys.argv[2]
- topic_type = sys.argv[3]
- delay = float(sys.argv[4])
-
- # get class of type
- msg_class = roslib.message.get_message_class(topic_type)
- assert msg_class is not None
-
- # create node
- rospy.init_node('delay', anonymous=True)
- pub = rospy.Publisher(output_name, msg_class, queue_size=1)
- rospy.Subscriber(input_name, msg_class, self.msg_cb)
- self.buffer = deque()
-
- while not rospy.is_shutdown():
- if len(self.buffer) > 0:
- time, msg = self.buffer.popleft()
- wait = delay - (rospy.Time.now() - time).to_sec()
- if wait > 0:
- sleep(wait)
- pub.publish(msg)
- else:
- sleep(delay/2)
-
- def msg_cb(self, msg):
- """Callback for enqueuing new message"""
- self.buffer.append((rospy.Time.now(), msg))
-
-
-if __name__ == "__main__":
- Delay()
diff --git a/rktl_control/package.xml b/rktl_control/package.xml
deleted file mode 100644
index 0d704140c..000000000
--- a/rktl_control/package.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
- rktl_control
- 0.0.0
- Velocity controller for the ARC Rocket League project.
- Autonomous Robotics Club of Purdue
- BSD 3 Clause
- http://www.purduearc.com
- James Baxter
- Justin Lee
- Darren Lie
- Robert Ketler
- AJ Ernandes
-
- catkin
- rosunit
- rospy
- std_msgs
- nav_msgs
- geometry_msgs
- rosserial_arduino
- joy
- topic_tools
-
\ No newline at end of file
diff --git a/rktl_control/scripts/analyze_step.m b/rktl_control/scripts/analyze_step.m
deleted file mode 100644
index 7c23f0fc3..000000000
--- a/rktl_control/scripts/analyze_step.m
+++ /dev/null
@@ -1,51 +0,0 @@
-% Analyze step responses to perform a system ID
-
-% Collect bag files of effort and odom
-% Run the following bash command in the folder containing responses:
-% for bag in *.bag; do
-% rostopic echo -b $bag -p effort/throttle > "$bag"_effort.csv
-% rostopic echo -b $bag -p odom/twist/twist/linear/x > "$bag"_velocity.csv
-% rostopic echo -b $bag -p pose_sync/pose/pose/position > "$bag"_position.csv
-% done
-
-close all
-clear
-
-% Location to get bag files from
-bagdir = "../../../../data/bags/step_responses";
-
-files = dir(bagdir);
-for i = 1:numel(files)
- file = files(i);
- if ~endsWith(file.name, '.bag')
- continue
- end
-
- % read in data
- effort = readtable(strcat(file.folder, '/', file.name, '_effort.csv'));
- velocity = readtable(strcat(file.folder, '/', file.name, '_velocity.csv'));
- position = readtable(strcat(file.folder, '/', file.name, '_position.csv'));
-
- % convert time from ns since epoch to seconds since start of bag
- t0 = min(effort.x_time(1), velocity.x_time(1));
- effort.x_time = (effort.x_time - t0) * 10^-9;
- velocity.x_time = (velocity.x_time - t0) * 10^-9;
- position.x_time = (position.x_time - t0) * 10^-9;
-
- % manually calculate velocity from position to compare to filter output
- xy = [position.field_x position.field_y];
- delta_xy = [0 0; xy(2:end, :) - xy(1:end-1, :)];
- dt = position.x_time(2) - position.x_time(1);
- v_raw = sqrt(sum(delta_xy.^2 / dt, 2));
- v_smooth = smoothdata(v_raw);
-
- figure
- plot(effort.x_time, effort.field), hold
- plot(velocity.x_time, velocity.field)
- plot(position.x_time, v_raw, ':')
- plot(position.x_time, v_smooth, ':')
- xlabel("Time, seconds")
- ylabel("Effort | Velocity, m/s")
- legend("effor", "filter velocity", "raw velocity", "smoothed velocity")
- title(file.name, 'Interpreter', 'none')
-end
diff --git a/rktl_control/scripts/bicycle_model.m b/rktl_control/scripts/bicycle_model.m
deleted file mode 100644
index 18b923650..000000000
--- a/rktl_control/scripts/bicycle_model.m
+++ /dev/null
@@ -1,25 +0,0 @@
-% Use MATLAB sybolic eq toolbox to solve for the equation used to
-% linearize the bicycle model
-
-close all
-clear
-
-syms DELTA_T CAR_LENGTH % constants
-syms x y theta v_rear psi a_rear % input state
-
-% intermediate calculations
-beta = atan(tan(psi)/2);
-v_body = v_rear/cos(beta);
-curvature = 2*sin(beta)/CAR_LENGTH;
-
-% next state calculations
-next_x = x + v_body*cos(theta+beta)*DELTA_T;
-next_y = y + v_body*sin(theta+beta)*DELTA_T;
-next_theta = theta + curvature*v_body*DELTA_T;
-next_v_rear = v_rear + a_rear*DELTA_T;
-
-% compute jacobian
-state = [x; y; theta; v_rear; psi; a_rear];
-next_state = [next_x; next_y; next_theta; next_v_rear; psi; a_rear]
-F = jacobian(next_state, state)
-
diff --git a/rktl_control/scripts/compare_filters.m b/rktl_control/scripts/compare_filters.m
deleted file mode 100644
index ca61fc990..000000000
--- a/rktl_control/scripts/compare_filters.m
+++ /dev/null
@@ -1,312 +0,0 @@
-% Experiment with different types of filters (ekf, ukf, particle)
-clear
-close all
-
-%% Constants
-% timing
-global DELTA_T
-DURATION = 15; % duration of data, sec
-DELTA_T = 0.1; % step size of filter, sec
-TRUTH_DELTA_T = 0.001; % step size of ground truth simulation, sec
-
-STEPS = DURATION/DELTA_T;
-TRUTH_STEPS = DURATION/TRUTH_DELTA_T;
-
-% uncertainties
-ORIGIN_LOC_STD_DEV = 0.1; % uncertainty of initial location, m
-ORIGIN_DIR_STD_DEV = deg2rad(10); % uncertainty of initial heading, rad
-
-global MEAS_LOC_STD_DEV MEAS_DIR_STD_DEV
-MEAS_LOC_STD_DEV = 0.05; % uncertainty of location measurement, m
-MEAS_DIR_STD_DEV = deg2rad(5); % uncertainty of heading measurment, rad
-
-PROC_PSI_STD_DEV = deg2rad(1.5); % process noise for steering angle, rad
-PROC_ACL_STD_DEV = 0.2; % process noise for rear wheel acceleration, m/s2
-NUM_PARTICLES = 1000; % particles in filter
-
-% physical properties of car
-global CAR_LENGTH MAX_SPEED THROTTLE_TAU STEERING_THROW STEERING_RATE
-CAR_LENGTH = .01; % wheel to wheel dist, m
-MAX_SPEED = 1; % max speed, m/s
-THROTTLE_TAU = 0.25; % time constant for changing speed, sec
-STEERING_THROW = deg2rad(15); % center-side steering throw, rad
-STEERING_RATE = deg2rad(30); % ROC steering, rad per sec
-
-%% Generate randomized control vectors
-% generate an initial control (throttle; steering)
-% throttle uniform between 0.5 and 1.0
-% steering uniform between -1.0 and 1.0
-control = rand(2, STEPS).*[0.5; 2] + [0.5; -1];
-
-%% Generate resulting ground truth data
-% pre-allocate matrix to hold output
-% each column is full state (x, y, theta, v_rear, psi)
-% new column for each time step
-ground_truth = zeros(5, TRUTH_STEPS);
-
-% generate initial state
-% random position, zero velocity, centered steering
-state = randn(5, 1).*[ORIGIN_LOC_STD_DEV; ORIGIN_LOC_STD_DEV; ORIGIN_DIR_STD_DEV; 0; 0];
-
-j = 1;
-for i = 1:TRUTH_STEPS
- % add to raw_data
- ground_truth(:,i) = state;
-
- % step
- state = simulate(state, control(:,j), TRUTH_DELTA_T);
-
- % update index to control vector
- if (mod(i, TRUTH_STEPS/STEPS) == 0)
- j = j + 1;
- end
-end
-
-%% Calculate resulting measurement data
-% each column is measurement (x, y, theta)
-% new column for each time step
-measurements = ground_truth(1:3, 1:(TRUTH_STEPS/STEPS):end);
-
-% add artificial noise
-measurements = measurements + randn(size(measurements)).*[MEAS_LOC_STD_DEV; MEAS_LOC_STD_DEV; MEAS_DIR_STD_DEV];
-
-%% Create inputs to filters
-% initial guesses
-state = zeros(6, 1);
-cov = diag([ORIGIN_LOC_STD_DEV, ORIGIN_LOC_STD_DEV, ORIGIN_DIR_STD_DEV, 0.01, deg2rad(1), 0.01]).^2;
-
-% measurement noise
-R = diag([MEAS_LOC_STD_DEV; MEAS_LOC_STD_DEV; MEAS_DIR_STD_DEV]);
-
-% process noise
-Q0 = diag([0, 0, 0, 0, PROC_PSI_STD_DEV, PROC_ACL_STD_DEV]).^2;
-F = state_jacobian_func(zeros(6,1));
-Q = F*Q0*F';
-
-%% Create filters
-% EKF (additive)
-ekf = extendedKalmanFilter(@state_func, @meas_func, state);
-ekf.StateTransitionJacobianFcn = @state_jacobian_func;
-ekf.MeasurementJacobianFcn = @meas_jacobian_func;
-ekf.StateCovariance = cov;
-ekf.MeasurementNoise = R;
-ekf.ProcessNoise = Q;
-
-% UKF (additive)
-ukf = unscentedKalmanFilter(@state_func, @meas_func, state);
-ukf.StateCovariance = cov;
-ukf.MeasurementNoise = R;
-ukf.ProcessNoise = Q;
-
-% Particle filter
-pf = particleFilter(@particle_state_func, @particle_meas_func);
-initialize(pf, NUM_PARTICLES, zeros(5,1), cov(1:5,1:5));
-
-%% Pre-allocate output
-ekf_state = zeros(6, STEPS);
-ukf_state = zeros(6, STEPS);
-pf_state = zeros(5, STEPS);
-
-%% Run filters
-for i = 1:DURATION/DELTA_T
- %EKF
- [ekf_state(:,i), ~] = correct(ekf, measurements(:,i));
- F = state_jacobian_func(ekf_state(:,i));
- ekf.ProcessNoise = F*Q0*F';
- predict(ekf);
-
- % UKF
- [ukf_state(:,i), ~] = correct(ukf, measurements(:,i));
- F = state_jacobian_func(ukf_state(:,i));
- ukf.ProcessNoise = F*Q0*F';
- predict(ukf);
-
- % Particle
- pf_state(:,i) = correct(pf, measurements(:,i));
- predict(pf);
-end
-
-%% Plotting
-% time vectors
-truth_time = 0:TRUTH_DELTA_T:(DURATION-TRUTH_DELTA_T);
-time = 0:DELTA_T:(DURATION-DELTA_T);
-
-% convert to odom
-truth_odom = state_to_odom(ground_truth);
-ekf_odom = state_to_odom(ekf_state);
-ukf_odom = state_to_odom(ukf_state);
-pf_odom = state_to_odom(pf_state);
-
-% XY plot
-figure
-plot(truth_odom(1,:), truth_odom(2,:))
-hold, grid on
-plot(ekf_odom(1,:), ekf_odom(2,:), '--')
-plot(ukf_odom(1,:), ukf_odom(2,:), '--')
-plot(pf_odom(1,:), pf_odom(2,:), '--')
-plot(measurements(1,:), measurements(2,:), 'x:')
-plot(truth_odom(1,1), truth_odom(2,1), '*b')
-legend(["Ground Truth", "EKF", "UKF", "PF", "Measurements"], 'Location', 'Best')
-title("XY tracking")
-xlabel("x, m")
-ylabel("y, m")
-
-% Odom tracking
-figure
-sgtitle("External state tracking")
-for i = 1:6
- subplot(2,3,i)
- plot(truth_time, truth_odom(i, :))
- hold, grid on
- plot(time, ekf_odom(i,:), '--')
- plot(time, ukf_odom(i,:), '--')
- plot(time, pf_odom(i,:), '--')
- if (i <= 3)
- plot(time, measurements(i,:), 'x:')
- legend(["Ground Truth", "EKF", "UKF", "PF", "Measurements"], 'Location', 'Best')
- else
- legend(["Ground Truth", "EKF", "UKF", "PF"], 'Location', 'Best')
- end
- xlabel("Time, sec")
- switch i
- case 1; title("X tracking"); ylabel("x, m")
- case 2; title("Y tracking"); ylabel("y, m")
- case 3; title("Heading tracking"); ylabel("\theta, rad")
- case 4; title("Body forward velocity tracking"); ylabel("x', m/s")
- case 5; title("Body lateral velocity tracking"); ylabel("y', m/s")
- case 6; title("Angular velocity tracking"); ylabel("\omega, rad/sec")
- end
-end
-
-%% Helper functions
-% transition functions (additive noise) for EKF, UFK
-function next_state = state_func(state)
- % using bicycle model, extrapolate future state
- global CAR_LENGTH DELTA_T
-
- % unpack input
- x = state(1);
- y = state(2);
- theta = state(3);
- v_rear = state(4);
- psi = state(5);
- a_rear = state(6);
-
- % equations were derived in bicyle_model.m
- % calculate predicted next state
- next_state = [
- x + DELTA_T*v_rear*cos(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2);
- y + DELTA_T*v_rear*sin(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2);
- theta + (DELTA_T*v_rear*tan(psi))/CAR_LENGTH;
- v_rear + DELTA_T*a_rear;
- psi;
- a_rear
- ];
-end
-
-function measurement = meas_func(state)
- % basically just the H matrix as a function
- measurement = state(1:3);
-end
-
-% jacobian functions for EKF
-function jacobian = state_jacobian_func(state)
- global CAR_LENGTH DELTA_T
-
- % unpack input
- x = state(1);
- y = state(2);
- theta = state(3);
- v_rear = state(4);
- psi = state(5);
- a_rear = state(6);
-
- % equations were derived in bicyle_model.m
- jacobian = [
- 1, 0, -DELTA_T*v_rear*sin(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2), DELTA_T*cos(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2), (DELTA_T*v_rear*cos(theta + atan(tan(psi)/2))*tan(psi)*(tan(psi)^2 + 1))/(4*(tan(psi)^2/4 + 1)^(1/2)) - (DELTA_T*v_rear*sin(theta + atan(tan(psi)/2))*(tan(psi)^2/2 + 1/2))/(tan(psi)^2/4 + 1)^(1/2), 0;
- 0, 1, DELTA_T*v_rear*cos(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2), DELTA_T*sin(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2), (DELTA_T*v_rear*cos(theta + atan(tan(psi)/2))*(tan(psi)^2/2 + 1/2))/(tan(psi)^2/4 + 1)^(1/2) + (DELTA_T*v_rear*tan(psi)*sin(theta + atan(tan(psi)/2))*(tan(psi)^2 + 1))/(4*(tan(psi)^2/4 + 1)^(1/2)), 0;
- 0, 0, 1, (DELTA_T*tan(psi))/CAR_LENGTH, (DELTA_T*v_rear*(tan(psi)^2 + 1))/CAR_LENGTH, 0;
- 0, 0, 0, 1, 0, DELTA_T;
- 0, 0, 0, 0, 1, 0;
- 0, 0, 0, 0, 0, 1
- ];
-end
-
-function jacobian = meas_jacobian_func(~)
- jacobian = [eye(3), zeros(3)];
-end
-
-% Functions for particle filter
-function particles = particle_state_func(particles)
- for i = 1:size(particles, 2)
- % create random control noise
- control = rand(2,1).*[0.5; 2] + [0.5; -1];
- % propagate
- global DELTA_T
- particles(:,i) = simulate(particles(:,i), control, DELTA_T);
- end
- % add a little more modeling noise
-% particles = particles + diag([.001, .001, deg2rad(1), .01, deg2rad(5)])*randn(size(particles));
-end
-
-function probability = particle_meas_func(particles, measurement)
- pred_meas = particles(1:3,:);
- error = pred_meas - ones(size(pred_meas)).*measurement;
-
- global MEAS_DIR_STD_DEV MEAS_LOC_STD_DEV
- sigma = [MEAS_DIR_STD_DEV; MEAS_DIR_STD_DEV; MEAS_LOC_STD_DEV];
- base = (sigma*sqrt(2*pi)).^2;
- probs = base .* exp(-.5*(error./sigma).^2);
-
- probability = probs(1,:).*probs(2,:).*probs(3,:);
-end
-
-function next_state = simulate(state, control, DELTA_T)
- % simple bicycle model with control
- global CAR_LENGTH MAX_SPEED THROTTLE_TAU STEERING_THROW STEERING_RATE
-
- % unpack input
- x = state(1);
- y = state(2);
- theta = state(3);
- v_rear = state(4);
- psi = state(5);
-
- v_rear_ref = MAX_SPEED*control(1);
- psi_ref = STEERING_THROW*control(2);
-
- % update rear wheel velocity using 1st order model
- v_rear = (v_rear-v_rear_ref)*exp(-DELTA_T/THROTTLE_TAU) + v_rear_ref;
-
- % update steering angle using massless acceleration to a fixed rate
- if abs(psi_ref - psi) < STEERING_RATE*DELTA_T
- psi = psi_ref;
- elseif psi_ref > psi
- psi = psi + STEERING_RATE*DELTA_T;
- else
- psi = psi - STEERING_RATE*DELTA_T;
- end
-
- % using bicycle model, extrapolate future state
- next_state = [
- x + DELTA_T*v_rear*cos(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2);
- y + DELTA_T*v_rear*sin(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2);
- theta + (DELTA_T*v_rear*tan(psi))/CAR_LENGTH;
- v_rear;
- psi
- ];
-end
-
-function odom = state_to_odom(state)
- global CAR_LENGTH
-
- beta = atan(tan(state(5,:))/2);
- v_body = state(4,:)./cos(beta);
- curvature = 2*sin(beta)/CAR_LENGTH;
- odom = [
- state(1:3,:);
- v_body.*cos(beta);
- v_body.*sin(beta);
- v_body.*curvature
- ];
-end
\ No newline at end of file
diff --git a/rktl_control/scripts/controller_design.m b/rktl_control/scripts/controller_design.m
deleted file mode 100644
index a05bb87e5..000000000
--- a/rktl_control/scripts/controller_design.m
+++ /dev/null
@@ -1,70 +0,0 @@
-% Use sys ID to build a controller
-close all
-clear
-
-% Plant constants
-T = 0.4174; % Time constant, sec
-K = 0.7756; % Static gain, m/s
-
-t_di = 0.15; % input delay, sec
-t_do = 0.05; % input delay, sec
-
-% System contants
-ts = 0.1; % Sample time, sec
-
-% Build plant
-G = tf(K, [T 1], 'InputDelay', t_di, 'OutputDelay', t_do);
-G_zoh = c2d(G, ts, 'zoh');
-
-% various plots
-% figure, step(G_zoh)
-% figure, rlocus(G_zoh)
-% figure, bode(G_zoh), grid
-
-% frequency domain, lead lag controller design
-% Inputs
-sse = 0.05; % steady state error. Percent error wrt velocity input
-PM = 60; % desired phase margin. Degrees. Higher increases stability. 30 is suggested minimum
-w0 = 4.0; % desired gain crossover frequency. Rad/s. Higher increases speed
-
-% Calculate required controller gain
-Kc = (1/sse - 1)/K;
-
-% Find existing phase and gain at w0
-[m, p] = bode(G, w0);
-phi_0 = p + 180; % deg
-M0 = 20*log10(m * Kc); % dB
-
-% Lead controller
-phi_m = PM - phi_0 + 5; % can tweak the 5 degree fudge factor if desired
-gamma = (1+sind(phi_m)) / (1-sind(phi_m));
-wz_lead = w0 / sqrt(gamma);
-wp_lead = w0 * sqrt(gamma);
-G_lead = gamma * tf([1 wz_lead], [1 wp_lead]);
-
-% Lag controller
-M_lead = 10*log10(gamma);
-M_lag = -(M0 + M_lead);
-Gamma = 10^(M_lag/-20);
-wz_lag = w0 / 10;
-wp_lag = wz_lag / Gamma;
-G_lag = 1/Gamma * tf([1 wz_lag], [1 wp_lag]);
-
-% Full lead-lag controller
-C = Kc * G_lead * G_lag;
-% figure, margin(C*G)
-
-% simulate
-figure, step(feedback(C*G, 1)), grid, hold
-
-% discretize
-Cd_rect = c2d(C, ts, 'zoh');
-step(feedback(Cd_rect*G_zoh, 1))
-
-Cd_trap = c2d(C, ts, 'tustin');
-step(feedback(Cd_trap*G_zoh, 1))
-
-legend(["Analog Controller", "Rectangular Integration", "Trapezoidal Integration"])
-
-% output
-zpk(Cd_trap)
diff --git a/rktl_control/scripts/ekf_demo.m b/rktl_control/scripts/ekf_demo.m
deleted file mode 100644
index cf30f2574..000000000
--- a/rktl_control/scripts/ekf_demo.m
+++ /dev/null
@@ -1,262 +0,0 @@
-% Experiment with extended Kalman filter for car's bicycle model
-clear
-close all
-
-%% Constants
-% timing
-DURATION = 30; % duration of data, sec
-FILTER_DELTA_T = 0.1; % step size of filter, sec
-TRUTH_DELTA_T = 0.01; % step size of ground truth simulation, sec
-% ^ THESE NEED TO BE INTEGER MULTIPLES
-
-% uncertainties
-ORIGIN_LOC_STD_DEV = 0.1; % uncertainty of initial location, m
-ORIGIN_DIR_STD_DEV = deg2rad(10); % uncertainty of initial heading, rad
-
-MEAS_LOC_STD_DEV = 0.05; % uncertainty of location measurement, m
-MEAS_DIR_STD_DEV = deg2rad(5); % uncertainty of heading measurment, rad
-
-PROC_PSI_STD_DEV = deg2rad(0.75); % process noise for steering angle, rad
-PROC_ACL_STD_DEV = 0.1; % process noise for rear wheel acceleration, m/s2
-
-% physical properties of car
-global CAR_LENGTH MAX_SPEED THROTTLE_TAU STEERING_THROW STEERING_RATE
-CAR_LENGTH = .01; % wheel to wheel dist, m
-MAX_SPEED = 1; % max speed, m/s
-THROTTLE_TAU = 0.25; % time constant for changing speed, sec
-STEERING_THROW = deg2rad(15); % center-side steering throw, rad
-STEERING_RATE = deg2rad(30); % ROC steering, rad per sec
-
-%% Generate randomized ground truth data
-% pre-allocate matrix to hold output
-% each column is full state (x, y, theta, v_rear, psi)
-% new column for each time step
-ground_truth = zeros(5, DURATION/TRUTH_DELTA_T);
-
-% generate initial state
-% random position, zero velocity, centered steering
-state = randn(5, 1).*[ORIGIN_LOC_STD_DEV; ORIGIN_LOC_STD_DEV; ORIGIN_DIR_STD_DEV; 0; 0];
-
-% generate an initial control (throttle; steering)
-% throttle uniform between 0.5 and 1.0
-% steering uniform between -1.0 and 1.0
-control = rand(2, 1).*[0.5; 2] + [0.5; -1];
-
-for i = 1:size(ground_truth,2)
- % add to raw_data
- ground_truth(:,i) = state;
-
- % update random control with same frequency as filter
- if (mod(i, FILTER_DELTA_T/TRUTH_DELTA_T) == 0)
- control = rand(2, 1).*[0.5; 2] + [0.5; -1];
- end
-
- % step
- state = simulate(state, control, TRUTH_DELTA_T);
-end
-
-%% Calculate resulting measurement data
-% each column is measurement (x, y, theta)
-% new column for each time step
-measurements = ground_truth(1:3, 1:(FILTER_DELTA_T/TRUTH_DELTA_T):end);
-
-% add artificial noise
-measurements = measurements + randn(size(measurements)).*[MEAS_LOC_STD_DEV; MEAS_LOC_STD_DEV; MEAS_DIR_STD_DEV];
-
-%% Paramters for EKF
-% process noise
-% assume steering and rear wheel velocity are independent
-% projecting v_rear, psi noise onto model requires linearization done at each time step
-Q0 = eye(6).*[0; 0; 0; 0; PROC_PSI_STD_DEV; PROC_ACL_STD_DEV].^2;
-
-% observation matrix (can only see position, not internal state)
-H = [eye(3), zeros(3, 3)];
-
-% measurement uncertainty
-% assume independence between x, y, theta
-R = eye(3).*[MEAS_LOC_STD_DEV; MEAS_LOC_STD_DEV; MEAS_DIR_STD_DEV].^2;
-
-%% Initial guess
-state = zeros(6, 1);
-
-% covariance
-% assume independence between all
-% pick something small for v_rear, psi, a_rear
-cov = eye(6).*[ORIGIN_LOC_STD_DEV; ORIGIN_LOC_STD_DEV; ORIGIN_DIR_STD_DEV; 0.01; deg2rad(1); 0.01].^2;
-
-%% Process via Kalman filter
-% pre-allocate matrices to hold output
-estimates = zeros(6, size(measurements,2));
-
-% each page corresponds to time step
-covariances = zeros(6, 6, size(measurements,2));
-
-for i = 1:size(measurements,2)
- % Add to outputs
- estimates(:,i) = state;
- covariances(:,:,i) = cov;
-
- % Extrapolate
- [next_state, F] = extrapolate(state, FILTER_DELTA_T);
- next_cov = F*cov*F'+ F*Q0*F';
-
- % Kalman gain
- gain = next_cov*H'/(H*next_cov*H'+R);
-
- % Update
- state = next_state + gain*(measurements(:,i) - H*next_state);
- cov = (eye(6)-gain*H)*next_cov*(eye(6)-gain*H)' + gain*R*gain';
-end
-
-%% Calculate odometry output
-beta = atan(tan(ground_truth(5,:))/2);
-v_body = ground_truth(4,:)/cos(beta);
-curvature = 2*sin(beta)/CAR_LENGTH;
-odom_true = [
- ground_truth(1:3,:);
- v_body*cos(beta);
- v_body*sin(beta);
- v_body*curvature
-];
-truth_time = 0:TRUTH_DELTA_T:(DURATION-TRUTH_DELTA_T);
-
-beta = atan(tan(estimates(5,:))/2);
-v_body = estimates(4,:)/cos(beta);
-curvature = 2*sin(beta)/CAR_LENGTH;
-odom_est = [
- estimates(1:3,:);
- v_body*cos(beta);
- v_body*sin(beta);
- v_body*curvature
-];
-filter_time = 0:FILTER_DELTA_T:(DURATION-FILTER_DELTA_T);
-
-%% Graphical output
-figure
-plot(ground_truth(1,:), ground_truth(2,:))
-hold, grid on
-plot(estimates(1,:), estimates(2,:), '--')
-plot(measurements(1,:), measurements(2,:), '.')
-plot(ground_truth(1,1), ground_truth(2,1), '*b')
-legend(["Ground Truth", "Estimates", "Measurements"], 'Location', 'Best')
-title("XY tracking")
-xlabel("x, m")
-ylabel("y, m")
-
-figure
-sgtitle("Internal state tracking")
-for i = 1:5
- subplot(2,3,i)
- plot(truth_time, ground_truth(i, :))
- hold, grid on
- plot(filter_time, estimates(i,:), '--')
- if (i <= 3)
- plot(filter_time, measurements(i,:), '.')
- legend(["Ground Truth", "Estimates", "Measurements"], 'Location', 'Best')
- else
- legend(["Ground Truth", "Estimates"], 'Location', 'Best')
- end
- xlabel("Time, sec")
- switch i
- case 1; title("X tracking"); ylabel("x, m")
- case 2; title("Y tracking"); ylabel("y, m")
- case 3; title("Heading tracking"); ylabel("\theta, rad")
- case 4; title("Rear wheel velocity tracking"); ylabel("v_{rear}, m/s")
- case 5; title("Steering angle tracking"); ylabel("\psi, rad")
- end
-end
-
-figure
-sgtitle("External state tracking")
-for i = 1:6
- subplot(2,3,i)
- plot(truth_time, odom_true(i, :))
- hold, grid on
- plot(filter_time, odom_est(i,:), '--')
- if (i <= 3)
- plot(filter_time, measurements(i,:), '.')
- legend(["Ground Truth", "Estimates", "Measurements"], 'Location', 'Best')
- else
- legend(["Ground Truth", "Estimates"], 'Location', 'Best')
- end
- xlabel("Time, sec")
- switch i
- case 1; title("X tracking"); ylabel("x, m")
- case 2; title("Y tracking"); ylabel("y, m")
- case 3; title("Heading tracking"); ylabel("\theta, rad")
- case 4; title("Body forward velocity tracking"); ylabel("x', m/s")
- case 5; title("Body lateral velocity tracking"); ylabel("y', m/s")
- case 6; title("Angular velocity tracking"); ylabel("\omega, rad/sec")
- end
-end
-
-%% Helper functions
-function [next_state, F] = extrapolate(state, DELTA_T)
- % using bicycle model, extrapolate future state
- global CAR_LENGTH
-
- % unpack input
- x = state(1);
- y = state(2);
- theta = state(3);
- v_rear = state(4);
- psi = state(5);
- a_rear = state(6);
-
- % equations were derived in bicyle_model.m
- % calculate predicted next state
- next_state = [
- x + DELTA_T*v_rear*cos(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2);
- y + DELTA_T*v_rear*sin(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2);
- theta + (DELTA_T*v_rear*tan(psi))/CAR_LENGTH;
- v_rear + DELTA_T*a_rear;
- psi;
- a_rear
- ];
-
- % calculate jacobian (linearization of this function about this point)
- F = [
- 1, 0, -DELTA_T*v_rear*sin(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2), DELTA_T*cos(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2), (DELTA_T*v_rear*cos(theta + atan(tan(psi)/2))*tan(psi)*(tan(psi)^2 + 1))/(4*(tan(psi)^2/4 + 1)^(1/2)) - (DELTA_T*v_rear*sin(theta + atan(tan(psi)/2))*(tan(psi)^2/2 + 1/2))/(tan(psi)^2/4 + 1)^(1/2), 0;
- 0, 1, DELTA_T*v_rear*cos(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2), DELTA_T*sin(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2), (DELTA_T*v_rear*cos(theta + atan(tan(psi)/2))*(tan(psi)^2/2 + 1/2))/(tan(psi)^2/4 + 1)^(1/2) + (DELTA_T*v_rear*tan(psi)*sin(theta + atan(tan(psi)/2))*(tan(psi)^2 + 1))/(4*(tan(psi)^2/4 + 1)^(1/2)), 0;
- 0, 0, 1, (DELTA_T*tan(psi))/CAR_LENGTH, (DELTA_T*v_rear*(tan(psi)^2 + 1))/CAR_LENGTH, 0;
- 0, 0, 0, 1, 0, DELTA_T;
- 0, 0, 0, 0, 1, 0;
- 0, 0, 0, 0, 0, 1
- ];
-end
-
-function next_state = simulate(state, control, DELTA_T)
- % simple bicycle model with control
- global CAR_LENGTH MAX_SPEED THROTTLE_TAU STEERING_THROW STEERING_RATE
-
- % unpack input
- x = state(1);
- y = state(2);
- theta = state(3);
- v_rear = state(4);
- psi = state(5);
-
- v_rear_ref = MAX_SPEED*control(1);
- psi_ref = STEERING_THROW*control(2);
-
- % update rear wheel velocity using 1st order model
- v_rear = (v_rear-v_rear_ref)*exp(-DELTA_T/THROTTLE_TAU) + v_rear_ref;
-
- % update steering angle using massless acceleration to a fixed rate
- if abs(psi_ref - psi) < STEERING_RATE*DELTA_T
- psi = psi_ref;
- elseif psi_ref > psi
- psi = psi + STEERING_RATE*DELTA_T;
- else
- psi = psi - STEERING_RATE*DELTA_T;
- end
-
- % using bicycle model, extrapolate future state
- next_state = [
- x + DELTA_T*v_rear*cos(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2);
- y + DELTA_T*v_rear*sin(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2);
- theta + (DELTA_T*v_rear*tan(psi))/CAR_LENGTH;
- v_rear;
- psi
- ];
-end
\ No newline at end of file
diff --git a/rktl_control/scripts/gen_bicycle_unittest.m b/rktl_control/scripts/gen_bicycle_unittest.m
deleted file mode 100644
index 9c8947efa..000000000
--- a/rktl_control/scripts/gen_bicycle_unittest.m
+++ /dev/null
@@ -1,56 +0,0 @@
-% Generate several bicycle model paths to verify accuracy of Python implementations
-
-% physical properties of car
-global CAR_LENGTH MAX_SPEED THROTTLE_TAU STEERING_THROW STEERING_RATE
-CAR_LENGTH = .01; % wheel to wheel dist, m
-MAX_SPEED = 1; % max speed, m/s
-THROTTLE_TAU = 0.25; % time constant for changing speed, sec
-STEERING_THROW = deg2rad(15); % center-side steering throw, rad
-STEERING_RATE = deg2rad(30); % ROC steering, rad per sec
-
-N_PARTICLES = 5;
-DELTA_T = 0.1;
-
-control = rand(N_PARTICLES, 2)*2 + -1
-states = randn(N_PARTICLES, 5)
-next_states = states;
-for i = 1:N_PARTICLES
- next_states(i,:) = simulate(states(i,:), control(i,:), DELTA_T);
-end
-next_states
-
-function next_state = simulate(state, control, DELTA_T)
- % simple bicycle model with control
- global CAR_LENGTH MAX_SPEED THROTTLE_TAU STEERING_THROW STEERING_RATE
-
- % unpack input
- x = state(1);
- y = state(2);
- theta = state(3);
- v_rear = state(4);
- psi = state(5);
-
- v_rear_ref = MAX_SPEED*control(1);
- psi_ref = STEERING_THROW*control(2);
-
- % update rear wheel velocity using 1st order model
- v_rear = (v_rear-v_rear_ref)*exp(-DELTA_T/THROTTLE_TAU) + v_rear_ref;
-
- % update steering angle using massless acceleration to a fixed rate
- if abs(psi_ref - psi) < STEERING_RATE*DELTA_T
- psi = psi_ref;
- elseif psi_ref > psi
- psi = psi + STEERING_RATE*DELTA_T;
- else
- psi = psi - STEERING_RATE*DELTA_T;
- end
-
- % using bicycle model, extrapolate future state
- next_state = [
- x + DELTA_T*v_rear*cos(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2);
- y + DELTA_T*v_rear*sin(theta + atan(tan(psi)/2))*(tan(psi)^2/4 + 1)^(1/2);
- theta + (DELTA_T*v_rear*tan(psi))/CAR_LENGTH;
- v_rear;
- psi
- ];
-end
\ No newline at end of file
diff --git a/rktl_game/CMakeLists.txt b/rktl_game/CMakeLists.txt
deleted file mode 100644
index a3bcc1085..000000000
--- a/rktl_game/CMakeLists.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-cmake_minimum_required(VERSION 3.0.2)
-project(rktl_game)
-
-## Compile as C++11, supported in ROS Kinetic and newer
-# add_compile_options(-std=c++11)
-
-## Find catkin macros and libraries
-## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
-## is used, also find other catkin packages
-find_package(catkin REQUIRED COMPONENTS
- roscpp
- rospy
- std_msgs
-)
-
-###################################
-## catkin specific configuration ##
-###################################
-## The catkin_package macro generates cmake config files for your package
-## Declare things to be passed to dependent projects
-## INCLUDE_DIRS: uncomment this if your package contains header files
-## LIBRARIES: libraries you create in this project that dependent projects also need
-## CATKIN_DEPENDS: catkin_packages dependent projects also need
-## DEPENDS: system dependencies of this project that dependent projects also need
-catkin_package(
-# INCLUDE_DIRS include
-# LIBRARIES rktl_game
-# CATKIN_DEPENDS roscpp rospy std_msgs
-# DEPENDS system_lib
-)
-
-###########
-## Build ##
-###########
-
-## Specify additional locations of header files
-## Your package locations should be listed before other locations
-include_directories(
-# include
- ${catkin_INCLUDE_DIRS}
-)
\ No newline at end of file
diff --git a/rktl_launch/CMakeLists.txt b/rktl_launch/CMakeLists.txt
deleted file mode 100644
index 9a2aefa20..000000000
--- a/rktl_launch/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-cmake_minimum_required(VERSION 3.0.2)
-project(rktl_launch)
-
-# find catkin so we can use our macros
-find_package(catkin REQUIRED)
-
-# generate variables for installation
-catkin_package()
-
-# Tests
-if(CATKIN_ENABLE_TESTING)
- find_package(roslaunch REQUIRED)
- roslaunch_add_file_check(launch)
-endif()
diff --git a/rktl_launch/config/global_params.yaml b/rktl_launch/config/global_params.yaml
deleted file mode 100644
index 2053ccd0c..000000000
--- a/rktl_launch/config/global_params.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-# ALL UNITS IN SI
-
-# field dimensions
-field:
- width: 3
- # max: 3.5
- # min: 2.5
- length: 4.25
- # max: 5
- # min: 3.5
- wall_thickness: 0.25
- goal:
- width: 1
-
-# car dimensions and physical constants
-ball:
- radius: 0.0508 # (2 in)
-
-# car dimensions and physical constants
-cars:
- length: 0.12 # front to rear wheel center to center, meters
- # min: 0.10
- # max: 0.30
- steering:
- max_throw: 0.1826 # center to side, radians
- # min: 0.16
- # max: 0.20
- rate: 0.9128 # rad/s
- throttle:
- max_speed: 2.3 # m/s
- tau: 0.2 # time constant, seconds
diff --git a/rktl_msgs/CMakeLists.txt b/rktl_msgs/CMakeLists.txt
deleted file mode 100644
index 2a27b853b..000000000
--- a/rktl_msgs/CMakeLists.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-cmake_minimum_required(VERSION 3.0.2)
-project(rktl_msgs)
-
-# find catkin so we can use our macros
-find_package(catkin REQUIRED COMPONENTS
- std_msgs
- geometry_msgs
- message_generation
-)
-
-################################################
-## Declare ROS messages, services and actions ##
-################################################
-## Generate messages in the 'msg' folder
-add_message_files(
- FILES
- BezierPath.msg
- BezierPathList.msg
- ControlEffort.msg
- ControlCommand.msg
- MatchStatus.msg
- Path.msg
- Waypoint.msg
- Score.msg
-)
-
-## Generate services in the 'srv' folder
-# add_service_files(
-# FILES
-# Service1.srv
-# Service2.srv
-# )
-
-## Generate actions in the 'action' folder
-# add_action_files(
-# FILES
-# Action1.action
-# Action2.action
-# )
-
-## Generate added messages and services with any dependencies listed here
-generate_messages(
- DEPENDENCIES
- std_msgs
- geometry_msgs
- rktl_msgs
-)
-
-###################################
-## catkin specific configuration ##
-###################################
-## The catkin_package macro generates cmake config files for your package
-## Declare things to be passed to dependent projects
-## INCLUDE_DIRS: uncomment this if your package contains header files
-## LIBRARIES: libraries you create in this project that dependent projects also need
-## CATKIN_DEPENDS: catkin_packages dependent projects also need
-## DEPENDS: system dependencies of this project that dependent projects also need
-catkin_package(
-# INCLUDE_DIRS include
-# LIBRARIES rktl_msgs
- CATKIN_DEPENDS message_runtime
-# DEPENDS system_lib
-)
diff --git a/rktl_msgs/package.xml b/rktl_msgs/package.xml
deleted file mode 100644
index c9f30d2d0..000000000
--- a/rktl_msgs/package.xml
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
- rktl_msgs
- 0.0.0
- Custom messages for ARC Rocket League Project
- Autonomous Robotics Club of Purdue
- BSD 3 Clause
- https://www.purduearc.com
- Arianna McNamara
- James Baxter
-
- catkin
- message_generation
- message_runtime
- geometry_msgs
- std_msgs
-
diff --git a/rktl_perception/CMakeLists.txt b/rktl_perception/CMakeLists.txt
deleted file mode 100644
index d5ae76ee4..000000000
--- a/rktl_perception/CMakeLists.txt
+++ /dev/null
@@ -1,129 +0,0 @@
-cmake_minimum_required(VERSION 3.0.2)
-project(rktl_perception)
-
-## Compile as C++11
-add_compile_options(-std=c++11)
-
-## Find catkin macros and libraries
-find_package(catkin REQUIRED COMPONENTS
- roscpp
- pointgrey_camera_driver
- image_proc
- tf2
- tf2_ros
- image_transport
- cv_bridge
- apriltag_ros
- geometry_msgs
- image_geometry
-)
-
-## System dependencies
-find_package(OpenCV REQUIRED
- core
- highgui
-)
-find_package(Eigen3 REQUIRED)
-
-catkin_package(
- INCLUDE_DIRS include
-# LIBRARIES rktl_perception
- CATKIN_DEPENDS
- roscpp
- pointgrey_camera_driver
- image_proc
- tf2
- tf2_ros
- image_transport
- cv_bridge
- apriltag_ros
- DEPENDS OpenCV
-)
-
-# Add include directories
-include_directories(
- include
- ${catkin_INCLUDE_DIRS}
- ${OpenCV_INCLUDE_DIRS}
- ${EIGEN3_INCLUDE_DIR}
-)
-
-# Build localizer node
-add_executable(${PROJECT_NAME}_localizer_node
- src/localizer_node.cpp
- src/localizer.cpp
-)
-set_target_properties(${PROJECT_NAME}_localizer_node PROPERTIES
- OUTPUT_NAME localizer
- PREFIX ""
-)
-add_dependencies(${PROJECT_NAME}_localizer_node
- ${${PROJECT_NAME}_EXPORTED_TARGETS}
- ${catkin_EXPORTED_TARGETS}
-)
-target_link_libraries(${PROJECT_NAME}_localizer_node
- ${catkin_LIBRARIES}
- Eigen3::Eigen
-)
-
-# Build ball_detection node
-add_executable(${PROJECT_NAME}_ball_detection_node
- src/ball_detection.cpp
- src/ball_detection_node.cpp
-)
-set_target_properties(${PROJECT_NAME}_ball_detection_node PROPERTIES
- OUTPUT_NAME ball_detection
- PREFIX ""
-)
-add_dependencies(${PROJECT_NAME}_ball_detection_node
- ${${PROJECT_NAME}_EXPORTED_TARGETS}
- ${catkin_EXPORTED_TARGETS}
- ${OpenCV_EXPORTED_TARGETS}
-)
-target_link_libraries(${PROJECT_NAME}_ball_detection_node
- ${catkin_LIBRARIES}
- ${OpenCV_LIBRARIES}
-)
-
-# Build ball_color node
-add_executable(${PROJECT_NAME}_ball_color_node
- src/ball_color.cpp
-)
-set_target_properties(${PROJECT_NAME}_ball_color_node PROPERTIES
- OUTPUT_NAME ball_color
- PREFIX ""
-)
-add_dependencies(${PROJECT_NAME}_ball_color_node
- ${${PROJECT_NAME}_EXPORTED_TARGETS}
- ${catkin_EXPORTED_TARGETS}
- ${OpenCV_EXPORTED_TARGETS}
-)
-target_link_libraries(${PROJECT_NAME}_ball_color_node
- ${catkin_LIBRARIES}
- ${OpenCV_LIBRARIES}
-)
-
-# vis node
-add_executable(${PROJECT_NAME}_focus_vis_node
- src/focus_vis.cpp
- src/focus_vis_node.cpp
-)
-set_target_properties(${PROJECT_NAME}_focus_vis_node PROPERTIES
- OUTPUT_NAME focus_vis
- PREFIX ""
-)
-add_dependencies(${PROJECT_NAME}_focus_vis_node
- ${${PROJECT_NAME}_EXPORTED_TARGETS}
- ${catkin_EXPORTED_TARGETS}
- ${OpenCV_EXPORTED_TARGETS}
-)
-target_link_libraries(${PROJECT_NAME}_focus_vis_node
- ${catkin_LIBRARIES}
- ${OpenCV_LIBRARIES}
-)
-
-# Tests
-if(CATKIN_ENABLE_TESTING)
- find_package(roslaunch REQUIRED)
- roslaunch_add_file_check(launch)
-endif()
\ No newline at end of file
diff --git a/rktl_perception/README.md b/rktl_perception/README.md
deleted file mode 100644
index 5bf1500cb..000000000
--- a/rktl_perception/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# rktl_perception
-
-This packages contains all nodes, launch files, and configuration files
-associated with the perception stack.
diff --git a/rktl_perception/config/apriltag_settings.yaml b/rktl_perception/config/apriltag_settings.yaml
deleted file mode 100644
index d971e57ec..000000000
--- a/rktl_perception/config/apriltag_settings.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-tag_family: 'tag25h9'
-tag_border: 1
-tag_threads: 4
-tag_decimate: 1.0
-tag_blur: 0.0
-tag_refine_edges: 1
-tag_refine_decode: 0
-tag_refine_pose: 0
-tag_debug: 0
-publish_tf: false
\ No newline at end of file
diff --git a/rktl_perception/config/ball_settings.yaml b/rktl_perception/config/ball_settings.yaml
deleted file mode 100644
index ef3cb0d9b..000000000
--- a/rktl_perception/config/ball_settings.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-publishThresh: true
-min_hue: 50
-max_hue: 100
-min_sat: 70
-max_sat: 180
-min_vib: 40
-max_vib: 100
-min_size: 250
-erode: 2
-dilate: 8
\ No newline at end of file
diff --git a/rktl_perception/config/cam0/calibration.yaml b/rktl_perception/config/cam0/calibration.yaml
deleted file mode 100644
index 5b0dbcc46..000000000
--- a/rktl_perception/config/cam0/calibration.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-image_width: 1288
-image_height: 964
-camera_name: 15356310
-camera_matrix:
- rows: 3
- cols: 3
- data: [610.4617133035794, 0, 648.8400790764666, 0, 612.8636563372359, 509.10981446149, 0, 0, 1]
-distortion_model: plumb_bob
-distortion_coefficients:
- rows: 1
- cols: 5
- data: [-0.2432499228354474, 0.04650574966262223, -0.001403592205737197, -0.001846787629070493, 0]
-rectification_matrix:
- rows: 3
- cols: 3
- data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
-projection_matrix:
- rows: 3
- cols: 4
- data: [437.3668518066406, 0, 642.4606144639438, 0, 0, 497.3519287109375, 519.6716765197198, 0, 0, 0, 1, 0]
\ No newline at end of file
diff --git a/rktl_perception/config/cam0/pointgrey.yaml b/rktl_perception/config/cam0/pointgrey.yaml
deleted file mode 100644
index 8e515d209..000000000
--- a/rktl_perception/config/cam0/pointgrey.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-serial: 15356310
-frame_rate: 30
-auto_shutter: false
-shutter_speed: 0.001
-auto_gain: false
-gain: 25.0
diff --git a/rktl_perception/config/cam1/calibration.yaml b/rktl_perception/config/cam1/calibration.yaml
deleted file mode 100644
index bb30a9a9a..000000000
--- a/rktl_perception/config/cam1/calibration.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-image_width: 1288
-image_height: 964
-camera_name: 15374749
-camera_matrix:
- rows: 3
- cols: 3
- data: [614.57136917199, 0, 655.3078549706212, 0, 615.1667182505952, 527.5952225219263, 0, 0, 1]
-distortion_model: plumb_bob
-distortion_coefficients:
- rows: 1
- cols: 5
- data: [-0.2139026302415871, 0.03446647780631319, 0.0003785624850231047, 0.001450676024114855, 0]
-rectification_matrix:
- rows: 3
- cols: 3
- data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
-projection_matrix:
- rows: 3
- cols: 4
- data: [429.591796875, 0, 663.7421479902696, 0, 0, 510.9947814941406, 548.7979183761527, 0, 0, 0, 1, 0]
\ No newline at end of file
diff --git a/rktl_perception/config/cam1/pointgrey.yaml b/rktl_perception/config/cam1/pointgrey.yaml
deleted file mode 100644
index 7b13e1a1d..000000000
--- a/rktl_perception/config/cam1/pointgrey.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-serial: 15374749
-frame_rate: 30
-auto_shutter: false
-shutter_speed: 0.001
-auto_gain: false
-gain: 25.0
diff --git a/rktl_perception/config/cam2/calibration.yaml b/rktl_perception/config/cam2/calibration.yaml
deleted file mode 100644
index 561353f25..000000000
--- a/rktl_perception/config/cam2/calibration.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-image_width: 1288
-image_height: 964
-camera_name: 15374754
-camera_matrix:
- rows: 3
- cols: 3
- data: [1126.08574591452, 0, 616.7375520125502, 0, 1116.255414571801, 525.7495338331287, 0, 0, 1]
-distortion_model: plumb_bob
-distortion_coefficients:
- rows: 1
- cols: 5
- data: [-0.7942828618736387, 0.5381554120245144, 0.00207013009474492, 0.01022713459887638, 0]
-rectification_matrix:
- rows: 3
- cols: 3
- data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
-projection_matrix:
- rows: 3
- cols: 4
- data: [827.6439208984375, 0, 634.490142294926, 0, 0, 908.0331420898438, 549.0852317258759, 0, 0, 0, 1, 0]
\ No newline at end of file
diff --git a/rktl_perception/config/cam2/pointgrey.yaml b/rktl_perception/config/cam2/pointgrey.yaml
deleted file mode 100644
index f923d79ef..000000000
--- a/rktl_perception/config/cam2/pointgrey.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-serial: 15374754
-frame_rate: 30
-auto_shutter: false
-shutter_speed: 0.001
-auto_gain: false
-gain: 25.0
diff --git a/rktl_perception/config/cam3/calibration.yaml b/rktl_perception/config/cam3/calibration.yaml
deleted file mode 100644
index 6bcbd4561..000000000
--- a/rktl_perception/config/cam3/calibration.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-image_width: 1288
-image_height: 964
-camera_name: 15374755
-camera_matrix:
- rows: 3
- cols: 3
- data: [691.8297171319369, 0, 616.8642266971589, 0, 697.0148612025321, 517.5329039891342, 0, 0, 1]
-distortion_model: plumb_bob
-distortion_coefficients:
- rows: 1
- cols: 5
- data: [-0.2678012034146836, 0.05194194294770087, 0.002831174538127116, 0.001537062305525107, 0]
-rectification_matrix:
- rows: 3
- cols: 3
- data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
-projection_matrix:
- rows: 3
- cols: 4
- data: [478.2955932617188, 0, 611.6567255038535, 0, 0, 581.3926391601562, 538.9608447667997, 0, 0, 0, 1, 0]
\ No newline at end of file
diff --git a/rktl_perception/config/cam3/pointgrey.yaml b/rktl_perception/config/cam3/pointgrey.yaml
deleted file mode 100644
index ad098a9ac..000000000
--- a/rktl_perception/config/cam3/pointgrey.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-serial: 15374755
-frame_rate: 30
-auto_shutter: false
-shutter_speed: 0.001
-auto_gain: false
-gain: 25.0
diff --git a/rktl_perception/config/tags.yaml b/rktl_perception/config/tags.yaml
deleted file mode 100644
index 65097ca38..000000000
--- a/rktl_perception/config/tags.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-standalone_tags:
- [
- { id: 10, size: 0.07 },
- { id: 11, size: 0.07 },
- { id: 12, size: 0.07 },
- { id: 13, size: 0.07 },
- ]
-tag_bundles:
- [
- {
- name: 'field_origin',
- layout:
- [
- {id: 0, size: 0.2, x: -1.8081, y: 1.1742, z: -0.0308, qw: 0.9997, qx: 0.0099, qy: 0.0046, qz: 0.0197},
- {id: 1, size: 0.2, x: 0.0701, y: 1.3045, z: -0.0291, qw: 0.9998, qx: -0.0116, qy: -0.0007, qz: 0.0161},
- {id: 2, size: 0.2, x: 1.7377, y: 1.2590, z: -0.0244, qw: 0.9998, qx: 0.0085, qy: -0.0038, qz: 0.0152},
- {id: 3, size: 0.2, x: -1.1869, y: 0.0000, z: 0.0000, qw: 1.0000, qx: 0.0000, qy: 0.0000, qz: 0.0000},
- {id: 4, size: 0.2, x: 1.1869, y: 0.0138, z: -0.0036, qw: 0.9998, qx: 0.0047, qy: -0.0161, qz: 0.0089},
- {id: 5, size: 0.2, x: -1.7244, y: -1.2670, z: 0.0111, qw: 0.9998, qx: 0.0008, qy: -0.0024, qz: 0.0208},
- {id: 6, size: 0.2, x: 0.1233, y: -1.2090, z: 0.0012, qw: 0.9998, qx: 0.0023, qy: 0.0177, qz: 0.0000},
- {id: 7, size: 0.2, x: 1.8482, y: -1.0850, z: 0.0030, qw: 0.9999, qx: -0.0131, qy: -0.0009, qz: -0.0015}
- ]
- }
- ]
\ No newline at end of file
diff --git a/rktl_perception/launch/README.md b/rktl_perception/launch/README.md
deleted file mode 100644
index 9f3f3f8ac..000000000
--- a/rktl_perception/launch/README.md
+++ /dev/null
@@ -1,86 +0,0 @@
-# Launch Files
-
-These are [.launch files](https://wiki.ros.org/roslaunch/XML) that can be run
-using [roslaunch](https://wiki.ros.org/roslaunch):
-
-```shell
-roslaunch rktl_perception
-```
-
-```{contents} Launch Files in this package
-:depth: 2
-:backlinks: top
-:local: true
-```
-
----
-
-## cal.launch
-
-Utility launch file that launches an interface for calibrating all cameras. It
-is configured for a 9 squares by 7 squares chessboard pattern, where each square
-is 26cm is length.
-
----
-
-## camera.launch
-
-Launch file containing everything that needs to be run for 1 camera. The
-`camera_name` argument can be changed to easily change the camera used.
-
-### Launch Arguments
-
-- `load_manager` (default: true): If true, a nodelet manager is started. If a
- nodelet manager is already running, set this to false to avoid any errors.
-- `manager_name` (default: `camera_manager`): The name of the nodelet manager to
- use for nodes that require it.
-- `manager_threads` (default: `4`): Number of threads to use in the nodelet
- manager, if started.
-- `camera_name` (default: `cam0`): Name of the camera to launch the stack for.
-
-### Nodes
-
-- `cams/{camera_name}/{manager_name}`: [Nodelet](https://wiki.ros.org/nodelet)
- manager. Only run if the `load_manager` argument is set to true.
-- `cams/{camera_name}/{camera_name}`: [Nodelet](https://wiki.ros.org/nodelet) of type
- [pointgrey_camera_driver/PointGreyCameraNodelet](https://wiki.ros.org/pointgrey_camera_driver).
- Puts camera feed onto the ROS network. Loads parameters (including camera
- serial number) from `rktl_perception/config/{camera_name}/pointgrey.yaml`
- and calibration from `rktl_perception/config/{camera_name}/calibration.yaml`.
-- `cams/{camera_name}/apriltag`: `apriltag_ros_continuous_node` node from the
- [apriltag_ros](https://wiki.ros.org/apriltag_ros) package. Looks for
- AprilTags in the camera feed and outputs the result to be used by other
- nodes. Loads parameters from `rktl_perception/config/apriltag_settings.yaml`
- and `rktl_perception/config/tags.yaml`.
-- `localizer`: [`localizer`](../nodes/README.md#localizer) node from the
- `rktl_perception` package.
-- `pose_to_tf`: [`pose_to_tf`](../nodes/README.md#pose_to_tf) node from the
- `rktl_perception` package.
-- `ball_detection`: [`ball_detection`](../nodes/README.md#ball_detection) node
- from the `rktl_perception` package. Loads parameters from
- `rktl_perception/config/ball_settings.yaml`.
-
-### Included Launch Files
-
-- `image_proc image_proc.launch`: Basic processing stack (image rectification
- for the provided camera).
-
----
-
-## color_picker.launch
-
-Utility launch file that opens a color picker used for fine tuning the parameters used for ball detection.
-
----
-
-## field_view.launch
-
-Utility launch file used for displaying the camera positions and feeds in RViz.
-
----
-
-## focus_assist.launch
-
-Utility launch file used launching the topics needed for focusing camera lenses.
-
----
diff --git a/rktl_perception/launch/cal.launch b/rktl_perception/launch/cal.launch
deleted file mode 100644
index 207d93c28..000000000
--- a/rktl_perception/launch/cal.launch
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
-
diff --git a/rktl_perception/launch/camera.launch b/rktl_perception/launch/camera.launch
deleted file mode 100644
index 63dff35d5..000000000
--- a/rktl_perception/launch/camera.launch
+++ /dev/null
@@ -1,57 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- '0,1,2,3,4,5,6,7': /origin/pose
- '10': /cars/car0/pose
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/rktl_perception/launch/color_picker.launch b/rktl_perception/launch/color_picker.launch
deleted file mode 100644
index a29fdb9d4..000000000
--- a/rktl_perception/launch/color_picker.launch
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
-
diff --git a/rktl_perception/launch/field_view.launch b/rktl_perception/launch/field_view.launch
deleted file mode 100644
index 99e562212..000000000
--- a/rktl_perception/launch/field_view.launch
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/rktl_perception/launch/focus_assist.launch b/rktl_perception/launch/focus_assist.launch
deleted file mode 100644
index af0d3bcc1..000000000
--- a/rktl_perception/launch/focus_assist.launch
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
-
diff --git a/rktl_perception/nodes/README.md b/rktl_perception/nodes/README.md
deleted file mode 100644
index eb86e76fe..000000000
--- a/rktl_perception/nodes/README.md
+++ /dev/null
@@ -1,175 +0,0 @@
-# ROS Nodes
-
-These are the [ROS Nodes](http://wiki.ros.org/Nodes) provided by this package.
-Most likely, you will use a launch file to run these. You can also run them
-by using `rosrun`:
-
-```
-rosrun rktl_perception
-```
-
-```{contents} ROS Nodes in this package
-:depth: 2
-:backlinks: top
-:local: true
-```
-
----
-
-## ball_detection
-
-C++ Node. Uses color thresholding to find a vector pointing from the camera in
-the direction of the ball.
-
-### Subscribed Topics
-
-- `image_rect_color` ([image_transport Camera](https://wiki.ros.org/image_transport)):
- Full-color camera feed.
-
-### Published Topics
-
-- `ball_vec` ([geometry_msgs/Vector3Stamped](/geometry_msgs/html/msg/Vector3Stamped.html#http://)):
- The $x$ and $y$ components of the vector comprise the the unit vector which
- passes from the camera center to the detected ball. The $z$ component is
- the largest contour area detected (the size of the ball as it appears to
- the camera).
-- `threshold_img` ([image_transport Camera](https://wiki.ros.org/image_transport)):
- The black-and-white output of the thresholding. Only published to if the
- `publishThresh` parameter is set to true.
-
-### Parameters
-
-- `publishThresh` (bool, default: false): Whether or not to publish the image
- after thresholding is applied.
-- `minHue` (int, default: `60`): The lower value of the hue to threshold on a
- 0 - 255 scale.
-- `minSat` (int, default: `135`): The lower value of the saturation to
- threshold on a 0 - 255 scale.
-- `minVib` (int, default: `50`): The lower value of the vibrance to threshold
- on a 0 - 255 scale.
-- `maxHue` (int, default: `150`): The upper value of the hue to threshold on a
- 0 - 255 scale.
-- `maxSat` (int, default: `225`): The upper value of the saturation to
- threshold on a 0 - 255 scale.
-- `maxVib` (int, default: `255`): The upper value of the vibrance to threshold
- on a 0 - 255 scale.
-
----
-
-## focus_vis
-
-C++ Node. Publishes a version of `image_color_rect` that has had Canny Edge
-Detection applied. Useful when using a
-[siemens star](https://en.wikipedia.org/wiki/Siemens_star) to get the focus of
-the camera just right.
-
-### Subscribed Topics
-
-- `image_rect_color` ([image_transport Camera](https://wiki.ros.org/image_transport)):
- Full-color camera feed.
-
-### Published Topics
-
-- `edge_dectection` ([image_transport Camera](https://wiki.ros.org/image_transport)):
- The black-and-white output of the edge detection.
-
----
-
-## localizer
-
-C++ Node. Calculates the location of all objects in the cameras' view.
-Republishes AprilTag and ball detections (received in "camera space") into a
-common reference frame for all cameras (referred to as "world space"). The
-measurements used to put the camera into world space are "averaged out" over a
-series of measurements to reduce noise.
-
-Multiple topics are published to, given by the `pub_topics` parameter. This
-parameter is interpreted as key/value pairs, where the key is an
-[AprilTag](https://wiki.ros.org/apriltag_ros) ID or the IDs of an AprilTag
-bundle and the value is the name of the topic that should be published to. In
-the case of a bundle, the tag IDs are given in ascending order, delimited by
-commas (e.g. `"0,1,2,3"`).
-
-For the inclined, "camera space" is defined as the space whose origin is defined
-as the center of the camera, where the $x$-axis points to the right of the
-image, the $y$-axis points towards the top of the image, and the camera looks
-down the positive $z$-axis (this is the same convention that OpenCV uses).
-"World space" is defined as the space whose origin is the center of the field,
-where the $x$-axis points towards the "right-hand" goals, the $y$-axis points
-towards the "top" sideline, and the $z$-axis points directly up.
-
-### Subscribed Topics
-
-- `tag_detections` ([apriltag_ros/AprilTagDetectionArray](/apriltag_ros/html/msg/AprilTagDetectionArray.html#http://)):
- Output of an AprilTag detection node. The name of this topic can be easily
- adjusted by modifying the `dectection_topic` parameter.
-- `ball_vector` ([geometry_msgs/Vector3Stamped](/geometry_msgs/html/msg/Vector3Stamped.html#http://)):
- Output of the [`ball_detection`](#ball-detection) node. The name of this
- topic can be easily adjusted by modifying the `ball_sub_topic` parameter.
-
-### Published Topics
-
-- `{pub_topic}` ([geometry_msgs/PoseWithCovarianceStamped](/geometry_msgs/html/msg/PoseWithCovarianceStamped.html#http://)):
- The position/orientation of the camera in world space.
-- `{pub_topics.values}` ([geometry_msgs/PoseWithCovarianceStamped](/geometry_msgs/html/msg/PoseWithCovarianceStamped.html#http://)):
- The position/orientation of the given objects in world space. `pub_topics`
- is a dictionary (`key`/`value` pairs) where each detections of AprilTag
- with ID `key` is published on the topic named `value`.
-
-### Parameters
-
-- `dectection_topic` (string, default: `"tag_detections"`): Name of topic
- subscribed to, published to by an AprilTag detection node.
-- `origin_id` (string, default: `"0"`): ID of the AprilTag or AprilTag bundle
- that defines the origin of world space, i.e. the center of the field.
-- `pub_topic` (string) Topic name to publish camera pose to.
-- `pub_topics` (dictionary): `key`/`value` pairs where each detections of
- AprilTag with ID `key` is published on the topic named `value`.
-- `ball_sub_topic` (string, default: `"ball_vector"`): Name of topic
- subscribed to, published to by a [`ball_detection`](#ball-detection) node.
-- `ball_pub_topic` (string, default: `"ball_pos"`): Name of topic
- published containing the position of the ball in world space.
-- `ball_radius` (float, default: `0.05`): Radius of the ball, in meters.
-- `buffer_size` (int, default: `10`): Number of elements used in the "averaging"
- of the camera pose.
-- `queue_size` (int, default: `100`): Size of the the queue for the publishers.
-
----
-
-## pose_to_tf
-
-Publishes an output of the [localizer](#localizer) node onto the [tf tree](https://wiki.ros.org/tf2).
-
-### Subscribed Topics
-
-- `pose` ([geometry_msgs/PoseWithCovarianceStamped](/geometry_msgs/html/msg/PoseWithCovarianceStamped.html#http://)):
- Data to republish onto the TF tree.
-
-### Parameters
-
-- `~cam_frame_id` (string, default: `"cam0"`): Frame ID of the camera.
-
----
-
-## projector
-
-Create a depth map to project camera data onto ground plane.
-
-### Subscribed Topics
-
-- `projector` ([sensor_msgs/CameraInfo](/sensor_msgs/html/msg/CameraInfo.html#http://)):
- Camera Info, containing the projection matrix used to re-project the image
- onto the plane.
-
-### Published Topics
-
-- `depth_rect` ([sensor_msgs/Image](/sensor_msgs/html/msg/Image.html#http://)):
- Calculated depth of each pixel in the camera.
-
-### Parameters
-
-- `~ground_height` (float, default: `0.0`): Height of the ground, in meters.
-- `~update_period` (float, default: `1`): Rate at which to re-compute depth map,
- in updates per second.
-
----
diff --git a/rktl_perception/package.xml b/rktl_perception/package.xml
deleted file mode 100644
index f19c535c9..000000000
--- a/rktl_perception/package.xml
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
- rktl_perception
- 0.0.0
- Perception for ARC Rocket League project
- Autonomous Robotics Club of Purdue
- BSD 3 Clause
- http://www.purduearc.com
- James Baxter
- Daniel Gerblick
- AJ Ernandes
-
- catkin
- roscpp
- pointgrey_camera_driver
- image_proc
- apriltag_ros
- tf2
- tf2_ros
- image_transport
- cv_bridge
- nodelet
- camera_calibration
- rviz
-
diff --git a/rktl_perception/src/ball_detection_node.cpp b/rktl_perception/src/ball_detection_node.cpp
deleted file mode 100644
index 79fa76099..000000000
--- a/rktl_perception/src/ball_detection_node.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Node to detect the ball's position on the field.
- * License:
- * BSD 3-Clause License
- * Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- * All rights reserved.
- */
-
-#include
-
-int main(int argc, char* argv[]) {
- ros::init(argc, argv, "ball_detection");
-
- ros::NodeHandle nh;
- ros::NodeHandle pnh("~");
- rktl_perception::BallDetection ballDetection(nh, pnh);
-
- ros::spin();
- return 0;
-}
diff --git a/rktl_planner/CMakeLists.txt b/rktl_planner/CMakeLists.txt
deleted file mode 100644
index ce97bf1be..000000000
--- a/rktl_planner/CMakeLists.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-cmake_minimum_required(VERSION 3.0.2)
-project(rktl_planner)
-
-# find catkin so we can use our macros
-find_package(catkin REQUIRED COMPONENTS
- rospy
- rktl_msgs
- std_srvs
- std_msgs
- geometry_msgs
- message_generation
-)
-
-## Uncomment this if the package has a setup.py. This macro ensures
-## modules and global scripts declared therein get installed
-## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
-catkin_python_setup()
-
-# msg and srv
-add_service_files(
- FILES
- CreateBezierPath.srv
-)
-
-generate_messages(
- DEPENDENCIES
- rktl_msgs
- std_srvs
- std_msgs
- geometry_msgs
-)
-
-# generate variables for installation
-catkin_package()
diff --git a/rktl_planner/config/path_follower.yaml b/rktl_planner/config/path_follower.yaml
deleted file mode 100644
index fb97b2333..000000000
--- a/rktl_planner/config/path_follower.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-lookahead_dist: 0.25
-lookahead_gain: 0.1
-lookahead_pnts: -1
diff --git a/rktl_planner/config/path_planner.yaml b/rktl_planner/config/path_planner.yaml
deleted file mode 100644
index a07c92493..000000000
--- a/rktl_planner/config/path_planner.yaml
+++ /dev/null
@@ -1 +0,0 @@
-planner_type: 'complex'
\ No newline at end of file
diff --git a/rktl_planner/config/patrol_planner.yaml b/rktl_planner/config/patrol_planner.yaml
deleted file mode 100644
index 10917f6df..000000000
--- a/rktl_planner/config/patrol_planner.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-# tuned params for no prediction at 0.15 delay
-wall_avoidance:
- reverse_time: 0.125
- distance_margin: 0.375
- heading_margin: 1.57
-
-patrol_wall_dist: 0.75
-
-curvature_gain:
- kp: 2.0
- kd: 0.05
- falloff: 0.25
-
-scoring:
- car_lookahead_dist: 0.75
- goal_depth_target: 0.05
-
-# X coordinate to switch to defense
-defensive_line: -0.5
-
-# Y coordinate to switch to reverse
-reverse_line: 0.0
-
-speed: 1.75
-attempt_timeout: 4.0
diff --git a/rktl_planner/setup.py b/rktl_planner/setup.py
deleted file mode 100755
index a4c08b610..000000000
--- a/rktl_planner/setup.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python3
-
-from distutils.core import setup
-from catkin_pkg.python_setup import generate_distutils_setup
-
-setup_args = generate_distutils_setup(
- packages=['rktl_planner'],
- package_dir={'': 'src'}
-)
-
-setup(**setup_args)
diff --git a/rktl_planner/src/rktl_planner/__init__.py b/rktl_planner/src/rktl_planner/__init__.py
deleted file mode 100755
index 9114b66b0..000000000
--- a/rktl_planner/src/rktl_planner/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""Helper modules for the local planner package.
-
-License:
- BSD 3-Clause License
- Copyright (c) 2023, Autonomous Robotics Club of Purdue (Purdue ARC)
- All rights reserved.
-"""
-
-from rktl_planner.bezier_curve import BezierCurve
-from rktl_planner.bezier_path import BezierPath
-
-__all__ = ['BezierCurve', 'BezierPath',]
\ No newline at end of file
diff --git a/rktl_planner/srv/CreateBezierPath.srv b/rktl_planner/srv/CreateBezierPath.srv
deleted file mode 100644
index 468e112fe..000000000
--- a/rktl_planner/srv/CreateBezierPath.srv
+++ /dev/null
@@ -1,8 +0,0 @@
-float64 velocity
-geometry_msgs/Pose[] target_poses
-std_msgs/Duration[] target_durations
-std_msgs/Duration bezier_segment_duration
-std_msgs/Duration linear_segment_duration
----
-rktl_msgs/BezierPath[] bezier_paths
-rktl_msgs/Path linear_path
diff --git a/rktl_sim/CMakeLists.txt b/rktl_sim/CMakeLists.txt
deleted file mode 100644
index 460ecbc03..000000000
--- a/rktl_sim/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-cmake_minimum_required(VERSION 3.0.2)
-project(rktl_sim)
-
-# find catkin so we can use our macros
-find_package(catkin REQUIRED)
-
-# install python module(s)
-catkin_python_setup()
-
-# generate variables for installation
-catkin_package()
-
-# tests
-if(CATKIN_ENABLE_TESTING)
- find_package(roslaunch REQUIRED)
- roslaunch_add_file_check(launch)
- find_package(rostest REQUIRED)
- add_rostest(test/test_car.test)
-endif()
diff --git a/rktl_sim/config/simulation.yaml b/rktl_sim/config/simulation.yaml
deleted file mode 100644
index 60be00b58..000000000
--- a/rktl_sim/config/simulation.yaml
+++ /dev/null
@@ -1,103 +0,0 @@
-rate: 10
-spawn_height: 0.06
-
-car:
- # init_pose:
- # pos: [0.5, 0.0, 0.06]
- sensor_noise:
- pos: [0.01, 0.01, 0.00]
- orient: [0.0, 0.0, 0.09]
- dropout: 0.15
-
-ball:
- init_speed: 0.0
- # init_pose:
- # pos: [0.0, 0.5, 0.06]
- # orient: [0.0, 0.0, 0.0]
- sensor_noise:
- pos: [0.01, 0.01, 0.0]
- orient: [0.0, 0.0, 0.0]
- dropout: 0.1
-
-#
-# Simulation settings
-#
-
-rate: 10
-spawn_height: 0.06
-
-# Directly export to setPhysicsEngineParameters()
-engine:
- fixedTimeStep: 0.001
- restitutionVelocityThreshold: 0.0001
-
-# Directly exported to changeDynamics()
-dynamics:
- ball:
- mass: 0.2
- lateralFriction: 0.4
- restitution: 0.7
- rollingFriction: 0.0001
- spinningFriction: 0.001
- contactDamping: 0.0
- contactStiffness: 0.0
- collisionMargin: 0.0
- car:
- mass: 0.0
- lateralFriction: 0.0
- restitution: 0.0
- rollingFriction: 0.0
- spinningFriction: 0.0
- contactDamping: 0.0
- contactStiffness: 0.0
- collisionMargin: 0.0
- walls:
- mass: 0.0
- lateralFriction: 0.0
- restitution: 0.0
- rollingFriction: 0.0
- spinningFriction: 0.0
- contactDamping: 0.0
- contactStiffness: 0.0
- collisionMargin: 0.0
- floor:
- mass: 0.0
- lateralFriction: 0.0
- restitution: 0.0
- rollingFriction: 0.0
- spinningFriction: 0.0
- contactDamping: 0.0
- contactStiffness: 0.0
- collisionMargin: 0.0
-
-#
-# Object instances
-#
-
-sensor_noise:
- car:
- pos: [0.01, 0.01, 0.00]
- orient: [0.0, 0.0, 0.09]
- dropout: 0.0
- ball:
- pos: [0.01, 0.01, 0.0]
- orient: [0.0, 0.0, 0.0]
- dropout: 0.0
-
-cars:
- - name: "car0"
- # init_pose:
- # pos: [0.5, 0.0, 0.06]
-
- # - name: "car1"
- # # init_pose:
- # # pos: [0.5, 0.0, 0.06]
- # sensor_noise:
- # pos: [0.01, 0.01, 0.00]
- # orient: [0.0, 0.0, 0.09]
-
-ball:
- init_speed: 0.0
- # init_pose:
- # pos: [0.0, 0.5, 0.06]
- # orient: [0.0, 0.0, 0.0]
\ No newline at end of file
diff --git a/rktl_sim/config/visualization.yaml b/rktl_sim/config/visualization.yaml
deleted file mode 100644
index d88d24526..000000000
--- a/rktl_sim/config/visualization.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-rate: 10
-cars:
- body_width: 0.0762 # (3 in)
- body_length: 0.2032 # (8 in)
diff --git a/rktl_sim/setup.py b/rktl_sim/setup.py
deleted file mode 100644
index cfc71ded9..000000000
--- a/rktl_sim/setup.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python3
-
-from distutils.core import setup
-from catkin_pkg.python_setup import generate_distutils_setup
-
-setup_args = generate_distutils_setup(
- packages=['simulator', 'visualizer'],
- package_dir={'': 'src'}
-)
-
-setup(**setup_args)
diff --git a/src/image_pipeline/.github/workflows/basic-build-ci.yaml b/src/image_pipeline/.github/workflows/basic-build-ci.yaml
new file mode 100644
index 000000000..25448b3fe
--- /dev/null
+++ b/src/image_pipeline/.github/workflows/basic-build-ci.yaml
@@ -0,0 +1,39 @@
+/**:
+ ros__parameters:
+name: Basic Build Workflow
+
+on:
+ push:
+ branches: [rolling]
+ pull_request:
+ types: [opened, reopened, synchronize]
+
+jobs:
+ build-rolling:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ container:
+ image: osrf/ros2:testing
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v2
+ - name: Create Workspace
+ run: |
+ mkdir src_tmp
+ mv `find -maxdepth 1 -not -name . -not -name src_tmp` src_tmp/
+ mv src_tmp/ src/
+ - name: Install Prerequisites
+ run: |
+ bash -c 'source /opt/ros/$ROS_DISTRO/setup.bash; \
+ apt-get update && apt-get upgrade -y && rosdep update; \
+ rosdep install --from-paths src --ignore-src -y'
+ - name: Build Workspace
+ run: |
+ bash -c 'source /opt/ros/$ROS_DISTRO/setup.bash; \
+ colcon build'
+ - name: Run Tests
+ run: |
+ bash -c 'source /opt/ros/$ROS_DISTRO/setup.bash; \
+ colcon test; \
+ colcon test-result --verbose'
diff --git a/src/image_pipeline/.gitignore b/src/image_pipeline/.gitignore
new file mode 100644
index 000000000..72723e50a
--- /dev/null
+++ b/src/image_pipeline/.gitignore
@@ -0,0 +1 @@
+*pyc
diff --git a/src/image_pipeline/CONTRIBUTING.md b/src/image_pipeline/CONTRIBUTING.md
new file mode 100644
index 000000000..5e190bdca
--- /dev/null
+++ b/src/image_pipeline/CONTRIBUTING.md
@@ -0,0 +1,3 @@
+Any contribution that you make to this repository will
+be under the BSD license 2.0, as dictated by that
+[license](https://opensource.org/licenses/BSD-3-Clause).
diff --git a/src/image_pipeline/LICENSE b/src/image_pipeline/LICENSE
new file mode 100644
index 000000000..09078518e
--- /dev/null
+++ b/src/image_pipeline/LICENSE
@@ -0,0 +1,239 @@
+Portions of the code are licensed under the BSD License 2.0:
+
+All rights reserved.
+
+Software License Agreement (BSD License 2.0)
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+
+Portions of the code are licensed under the Apaache License 2.0:
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+>>>>>>> Updating copyrights and adding dual-licensing.
diff --git a/src/image_pipeline/README.md b/src/image_pipeline/README.md
new file mode 100644
index 000000000..0835e5af0
--- /dev/null
+++ b/src/image_pipeline/README.md
@@ -0,0 +1,10 @@
+image_pipeline
+==============
+
+[![](https://github.com/ros-perception/image_pipeline/workflows/Basic%20Build%20Workflow/badge.svg?branch=ros2)](https://github.com/ros-perception/image_pipeline/actions)
+
+This package fills the gap between getting raw images from a camera driver and higher-level vision processing.
+
+For more information on this metapackage and underlying packages, please see [the ROS wiki entry](http://wiki.ros.org/image_pipeline).
+
+For examples, see the [image_pipeline tutorials entry](http://wiki.ros.org/image_pipeline/Tutorials) on the ROS Wiki.
diff --git a/src/image_pipeline/camera_calibration/CHANGELOG.rst b/src/image_pipeline/camera_calibration/CHANGELOG.rst
new file mode 100644
index 000000000..4feb0f95d
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/CHANGELOG.rst
@@ -0,0 +1,248 @@
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Changelog for package camera_calibration
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+4.0.0 (2022-12-24)
+------------------
+* [backport iron] ROS 2: Added more aruco dicts, fixed aruco linerror bug (`#873 `_) (`#890 `_)
+ backport `#873 `_
+* [backport iron] ROS 2: Fixing thrown Exception in camerachecker.py (`#871 `_) (`#888 `_)
+ backport #`#871 `_
+* add myself as a maintainer (`#846 `_)
+* fix threading shutdown
+* use correct synchronous service call
+* use remap rules instead of parameters for services
+* remove duplicated definition of on_model_change
+* fix service check
+* remove commented code
+* Fix QoS incompatibility camera_calibration ROS2
+* perform calibration in another thread
+* Contributors: Alejandro Hernández Cordero, Christian Rauch, Kenji Brameld, Michael Ferguson, Michal Wojcik
+
+3.0.1 (2022-12-04)
+------------------
+* add python3-opencv to camera calibration dependency
+* port changes from `#755 `_ to rolling branch
+* Contributors: Kenji Brameld
+
+3.0.0 (2022-04-29)
+------------------
+* Some small fixes noticed while reviewing.
+* fix premature camera model change in camera_calibration
+* Fix shebang lines for noetic python3
+* Update fisheye distortion model definition
+* Fix calibration yaml formatting (`#580 `_) (`#585 `_)
+* updated linear_error function to handle partial board views (`#561 `_)
+* Fix missing detected checkerboard points (`#558 `_)
+* ChArUco board, Noetic (`#549 `_)
+* fix `#503 `_: (`#545 `_)
+* Minimal Noetic (`#530 `_)
+* Apply `#509 `_ and `#526 `_ to Noetic Branch (`#528 `_)
+* Add Fisheye calibration tool (`#440 `_)
+* camera_calibration: Improve YAML formatting, make config dumping methods static (`#438 `_)
+* camera_calibration: Fix all-zero distortion coeffs returned for a rational_polynomial model (`#433 `_)
+* Make sure 'calibrate' button works even if not receiving images anymore
+* Add a comment
+* Replace deque with a modified Queue, add --queue-size param
+* Remove print statement
+* Cosmetic changes
+* Add max-chessboard-speed option to allow more accurate calibration of rolling shutter cameras.
+* revert back
+* added missing imports
+* update pytest.ini
+* fixes to pass tests
+* rebase change
+* implemented fisheye mono and stereo calibration based on the melodic branch
+* trimmed whitespace at line endings
+* Update camera_calibration setup.cfg to use underscores (`#688 `_)
+* Add maintainer (`#667 `_)
+* Fixed crash when rosargs are given (`#597 `_)
+* Contributors: Chris Lalancette, David Torres Ocaña, DavidTorresOcana, Gabor Soros, Jacob Perron, John Stechschulte, Joshua Whitley, Martin Valgur, Matthijs den Toom, Michael Carroll, Patrick Musau, Photon, Spiros Evangelatos, Victor Dubois, jaiveersinghNV, soeroesg
+
+2.2.1 (2020-08-27)
+------------------
+* remove email blasts from steve macenski (`#596 `_)
+* Add pytest.ini to fix warning (`#584 `_)
+ Fixes the following warning:
+ Warning: The 'junit_family' default value will change to 'xunit2' in pytest 6.0.
+ Add 'junit_family=xunit1' to your pytest.ini file to keep the current format in future versions of pytest and silence this warning.
+* [Foxy] Use ament_auto Macros (`#573 `_)
+* Contributors: Jacob Perron, Joshua Whitley, Steve Macenski
+
+2.2.0 (2020-07-27)
+------------------
+* Removed basestring (no longer exists in new python 3 version). (`#554 `_)
+ Fixes `#551 `_
+* Initial ROS2 commit.
+* Contributors: Michael Carroll, PfeifferMicha
+
+1.12.23 (2018-05-10)
+--------------------
+* camera_checker: Ensure cols + rows are in correct order (`#319 `_)
+ Without this commit, specifying a smaller column than row size lead to
+ huge reported errors:
+ ```
+ $ rosrun camera_calibration cameracheck.py --size 6x7 --square 0.0495
+ Linearity RMS Error: 13.545 Pixels Reprojection RMS Error: 22.766 Pixels
+ $ rosrun camera_calibration cameracheck.py --size 7x6 --square 0.0495
+ Linearity RMS Error: 0.092 Pixels Reprojection RMS Error: 0.083 Pixels
+ ```
+ This commit switches columns and rows around if necessary.
+* Contributors: Martin Günther
+
+1.12.22 (2017-12-08)
+--------------------
+* Changed flags CV_LOAD_IMAGE_COLOR by IMREAD_COLOR to adapt to Opencv3. (`#252 `_)
+* Fixed stereo calibration problem with chessboard with the same number of rows and cols by rotating the corners to same direction.
+* Contributors: jbosch
+
+1.12.21 (2017-11-05)
+--------------------
+* re-add the calibration nodes but now using the Python modules.
+ Fixes `#298 `_
+* Move nodes to Python module.
+* Contributors: Vincent Rabaud
+
+1.12.20 (2017-04-30)
+--------------------
+* properly save bytes buffer as such
+ This is useful for Python 3 and fixes `#256 `_.
+* Get tests slightly looser.
+ OpenCV 3.2 gives slightly different results apparently.
+* Use floor division where necessary. (`#247 `_)
+* Fix and Improve Camera Calibration Checker Node (`#254 `_)
+ * Fix according to calibrator.py API
+ * Add approximate to cameracheck
+* Force first corner off chessboard to be uppler left.
+ Fixes `#140 `_
+* fix doc jobs
+ This is a proper fix for `#233 `_
+* During stereo calibration check that the number of corners detected in the left and right images are the same. This fixes `ros-perception/image_pipeline#225 `_
+* Contributors: Leonard Gerard, Martin Peris, Vincent Rabaud, hgaiser
+
+1.12.19 (2016-07-24)
+--------------------
+* Fix array check in camerachecky.py
+ This closes `#205 `_
+* Contributors: Vincent Rabaud
+
+1.12.18 (2016-07-12)
+--------------------
+
+1.12.17 (2016-07-11)
+--------------------
+* fix typo np -> numpy
+* fix failing tests
+* Contributors: Shingo Kitagawa, Vincent Rabaud
+
+1.12.16 (2016-03-19)
+--------------------
+* clean OpenCV dependency in package.xml
+* Contributors: Vincent Rabaud
+
+1.12.15 (2016-01-17)
+--------------------
+* better 16 handling in mkgray
+ This re-uses `#150 `_ and therefore closes `#150 `_
+* fix OpenCV2 compatibility
+* fix tests with OpenCV3
+* [Calibrator]: add yaml file with calibration data in output
+* Contributors: Vincent Rabaud, sambrose
+
+1.12.14 (2015-07-22)
+--------------------
+* remove camera_hammer and install Python nodes properly
+ camera_hammer was just a test for camera info, nothing to do with
+ calibration. Plus the test was basic.
+* Correct three errors that prevented the node to work properly.
+* Contributors: Filippo Basso, Vincent Rabaud
+
+1.12.13 (2015-04-06)
+--------------------
+* replace Queue by deque of fixed size for simplicity
+ That is a potential fix for `#112 `_
+* Contributors: Vincent Rabaud
+
+1.12.12 (2014-12-31)
+--------------------
+* try to improve `#112 `_
+* Contributors: Vincent Rabaud
+
+1.12.11 (2014-10-26)
+--------------------
+
+1.12.10 (2014-09-28)
+--------------------
+* Update calibrator.py
+ bugfix: stereo calibrator crashed after the signature of the method for the computation of the epipolar error changed but the function call was not updated
+* Contributors: Volker Grabe
+
+1.12.9 (2014-09-21)
+-------------------
+* fix bad Python
+* only analyze the latest image
+ fixes `#97 `_
+* flips width and height during resize to give correct aspect ratio
+* Contributors: Russell Toris, Vincent Rabaud
+
+1.12.8 (2014-08-19)
+-------------------
+* install scripts in the local bin (they are now rosrun-able again)
+ fixes `#93 `_
+* fix default Constructor for OpenCV flags
+ this does not change anything in practice as the flag is set by the node.
+ It just fixes the test.
+* Contributors: Vincent Rabaud
+
+1.12.6 (2014-07-27)
+-------------------
+* make sure the GUI is started in its processing thread and fix a typo
+ This fully fixes `#85 `_
+* fix bad call to save an image
+* have display be in its own thread
+ that could be a fix for `#85 `_
+* fix bad usage of Numpy
+ fixes `#89 `_
+* fix asymmetric circle calibration
+ fixes `#35 `_
+* add more tests
+* improve unittests to include all patterns
+* install Python scripts properly
+ and fixes `#86 `_
+* fix typo that leads to segfault
+ fixes `#84 `_
+* also print self.report() on calibrate ... allows to use the params without having to commit them (e.g. for extrensic calibration between to cameras not used as stereo pair)
+* fixes `#76 `_
+ Move Python approximate time synchronizer to ros_comm
+* remove all trace of cv in Python (use cv2)
+* remove deprecated file (as mentioned in its help)
+* fixes `#25 `_
+ This is just removing deprecated options that were around since diamondback
+* fixes `#74 `_
+ calibrator.py is now using the cv2 only API when using cv_bridge.
+ The API got changed too but it seems to only be used internally.
+* Contributors: Vincent Rabaud, ahb
+
+1.12.5 (2014-05-11)
+-------------------
+* Fix `#68 `_: StringIO issues in calibrator.py
+* fix architecture independent
+* Contributors: Miquel Massot, Vincent Rabaud
+
+1.12.4 (2014-04-28)
+-------------------
+
+1.12.3 (2014-04-12)
+-------------------
+* camera_calibration: Fix Python import order
+* Contributors: Scott K Logan
+
+1.12.2 (2014-04-08)
+-------------------
+* Fixes a typo on stereo camera info service calls
+ Script works after correcting the call names.
+* Contributors: JoonasMelin
+
+1.11.4 (2013-11-23 13:10:55 +0100)
+----------------------------------
+- add visualization during calibration and several calibration flags (#48)
diff --git a/src/image_pipeline/camera_calibration/button.jpg b/src/image_pipeline/camera_calibration/button.jpg
new file mode 100644
index 000000000..494a6f6fc
Binary files /dev/null and b/src/image_pipeline/camera_calibration/button.jpg differ
diff --git a/src/image_pipeline/camera_calibration/doc/conf.py b/src/image_pipeline/camera_calibration/doc/conf.py
new file mode 100644
index 000000000..82ad7fe0b
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/doc/conf.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+#
+# camera_calibration documentation build configuration file, created by
+# sphinx-quickstart on Mon Jun 1 14:21:53 2009.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'camera_calibration'
+copyright = '2009, Willow Garage, Inc.'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0.1'
+# The full version, including alpha/beta/rc tags.
+release = '0.1.0'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# " v documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+#html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'camera_calibrationdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'camera_calibration.tex', 'stereo\\_utils Documentation',
+ 'James Bowman', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {
+ 'http://docs.python.org/': None,
+ 'http://docs.scipy.org/doc/numpy' : None,
+ 'http://www.ros.org/doc/api/tf/html/python/' : None
+ }
diff --git a/src/image_pipeline/camera_calibration/doc/index.rst b/src/image_pipeline/camera_calibration/doc/index.rst
new file mode 100644
index 000000000..741e4e5b7
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/doc/index.rst
@@ -0,0 +1,18 @@
+camera_calibration
+==================
+
+The camera_calibration package contains a user-friendly calibration tool,
+cameracalibrator. This tool uses the following Python classes, which
+conveniently hide some of the complexities of using OpenCV's calibration
+process and chessboard detection, and the details of constructing a ROS
+CameraInfo message. These classes are documented here for people who
+need to extend or make a new calibration tool.
+
+For details on the camera model and camera calibration process, see
+http://docs.opencv.org/master/d9/d0c/group__calib3d.html
+
+.. autoclass:: camera_calibration.calibrator.MonoCalibrator
+ :members:
+
+.. autoclass:: camera_calibration.calibrator.StereoCalibrator
+ :members:
diff --git a/src/image_pipeline/camera_calibration/mainpage.dox b/src/image_pipeline/camera_calibration/mainpage.dox
new file mode 100644
index 000000000..dbcfe690e
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/mainpage.dox
@@ -0,0 +1,59 @@
+/**
+\mainpage
+\htmlinclude manifest.html
+
+\b The camera_calibration package contains tools for calibrating monocular and stereo cameras.
+
+\section codeapi Code API
+
+camera_calibration does not have a code API.
+
+\section rosapi ROS API
+
+List of nodes:
+- \b calibrationnode
+
+
+
+
+
+\subsection node_name calibrationnode
+
+calibrationnode subscribes to ROS raw image topics, and presents a
+calibration window. It can run in both monocular and stereo modes.
+The calibration window shows the current images from the cameras,
+highlighting the checkerboard. When the user presses the "CALIBRATE"
+button, the node computes the camera calibration parameters. When the
+user clicks "UPLOAD", the node uploads these new calibration parameters
+to the camera driver using a service call.
+
+\subsubsection Usage
+\verbatim
+$ node_type1 [standard ROS args]
+\endverbatim
+
+\par Example
+
+\verbatim
+$ rosrun camera_calibration cal.py right:=/my_stereo/right/image_raw left:=/my_stereo/left/image_raw left_camera:=/my_stereo/left right_camera:=/my_stereo/right
+\endverbatim
+
+
+\subsubsection topics ROS topics
+
+Subscribes to:
+- \b "left": [sensor_msgs/Image] left raw image topic, for stereo cameras
+- \b "right": [sensor_msgs/Image] left raw image topic, for stereo cameras
+- \b "image": [sensor_msgs/Image] raw image topic, for monocular cameras
+
+Makes service calls to:
+
+\subsubsection services ROS services
+- \b "foo_service": [std_srvs/FooType] description of foo_service
+- \b "camera/set_camera_info": [sensor_msgs/SetCameraInfo] node of the camera for monocular
+- \b "left_camera/set_camera_info": [sensor_msgs/SetCameraInfo] node of the left stereo camera
+- \b "right_camera/set_camera_info": [sensor_msgs/SetCameraInfo] node of the left stereo camera
+
+
+
+*/
diff --git a/src/image_pipeline/camera_calibration/package.xml b/src/image_pipeline/camera_calibration/package.xml
new file mode 100644
index 000000000..e9335bf53
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/package.xml
@@ -0,0 +1,40 @@
+
+
+
+ camera_calibration
+ 4.0.0
+
+ camera_calibration allows easy calibration of monocular or stereo
+ cameras using a checkerboard calibration target.
+
+
+ Vincent Rabaud
+ Joshua Whitley
+ Jacob Perron
+ Michael Ferguson
+
+ BSD
+ https://index.ros.org/p/camera_calibration/github-ros-perception-image_pipeline/
+ https://github.com/ros-perception/image_pipeline/issues
+ https://github.com/ros-perception/image_pipeline
+ James Bowman
+ Patrick Mihelich
+
+ cv_bridge
+ image_geometry
+ message_filters
+ python3-opencv
+ rclpy
+ std_srvs
+ sensor_msgs
+
+ ament_copyright
+ ament_flake8
+ ament_pep257
+ python3-pytest
+ python3-requests
+
+
+ ament_python
+
+
diff --git a/src/image_pipeline/camera_calibration/pytest.ini b/src/image_pipeline/camera_calibration/pytest.ini
new file mode 100644
index 000000000..fe55d2ed6
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+junit_family=xunit2
diff --git a/src/image_pipeline/camera_calibration/resource/camera_calibration b/src/image_pipeline/camera_calibration/resource/camera_calibration
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/image_pipeline/camera_calibration/rosdoc.yaml b/src/image_pipeline/camera_calibration/rosdoc.yaml
new file mode 100644
index 000000000..9dee22792
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/rosdoc.yaml
@@ -0,0 +1,6 @@
+/**:
+ ros__parameters:
+ - builder: sphinx
+ name: Python API
+ output_dir: python
+ sphinx_root_dir: doc
diff --git a/src/image_pipeline/camera_calibration/scripts/tarfile_calibration.py b/src/image_pipeline/camera_calibration/scripts/tarfile_calibration.py
new file mode 100755
index 000000000..059f2d303
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/scripts/tarfile_calibration.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python
+#
+# Software License Agreement (BSD License)
+#
+# Copyright (c) 2009, Willow Garage, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import numpy
+
+import cv2
+import cv_bridge
+import tarfile
+
+from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, CalibrationException, ChessboardInfo
+
+import rclpy
+import sensor_msgs.srv
+
+def display(win_name, img):
+ cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
+ cv2.imshow( win_name, numpy.asarray( img[:,:] ))
+ k = cv2.waitKey(0)
+ cv2.destroyWindow(win_name)
+ if k in [27, ord('q')]:
+ rclpy.shutdown()
+
+
+def cal_from_tarfile(boards, tarname, mono = False, upload = False, calib_flags = 0, visualize = False, alpha=1.0):
+ if mono:
+ calibrator = MonoCalibrator(boards, calib_flags)
+ else:
+ calibrator = StereoCalibrator(boards, calib_flags)
+
+ calibrator.do_tarfile_calibration(tarname)
+
+ print(calibrator.ost())
+
+ if upload:
+ info = calibrator.as_message()
+ if mono:
+ set_camera_info_service = rclpy.ServiceProxy("%s/set_camera_info" % "camera", sensor_msgs.srv.SetCameraInfo)
+
+ response = set_camera_info_service.call(info)
+ if not response.success:
+ raise RuntimeError("connected to set_camera_info service, but failed setting camera_info")
+ else:
+ set_left_camera_info_service = rclpy.ServiceProxy("%s/set_camera_info" % "left_camera", sensor_msgs.srv.SetCameraInfo)
+ set_right_camera_info_service = rclpy.ServiceProxy("%s/set_camera_info" % "right_camera", sensor_msgs.srv.SetCameraInfo)
+
+ response1 = set_left_camera_info_service(info[0])
+ response2 = set_right_camera_info_service(info[1])
+ if not (response1.result().success and response2.result().success):
+ raise RuntimeError("connected to set_camera_info service, but failed setting camera_info")
+
+ if visualize:
+
+ #Show rectified images
+ calibrator.set_alpha(alpha)
+
+ archive = tarfile.open(tarname, 'r')
+ if mono:
+ for f in archive.getnames():
+ if f.startswith('left') and (f.endswith('.pgm') or f.endswith('png')):
+ filedata = archive.extractfile(f).read()
+ file_bytes = numpy.asarray(bytearray(filedata), dtype=numpy.uint8)
+ im=cv2.imdecode(file_bytes,cv2.IMREAD_COLOR)
+
+ bridge = cv_bridge.CvBridge()
+ try:
+ msg=bridge.cv2_to_imgmsg(im, "bgr8")
+ except cv_bridge.CvBridgeError as e:
+ print(e)
+
+ #handle msg returns the recitifed image with corner detection once camera is calibrated.
+ drawable=calibrator.handle_msg(msg)
+ vis=numpy.asarray( drawable.scrib[:,:])
+ #Display. Name of window:f
+ display(f, vis)
+ else:
+ limages = [ f for f in archive.getnames() if (f.startswith('left') and (f.endswith('pgm') or f.endswith('png'))) ]
+ limages.sort()
+ rimages = [ f for f in archive.getnames() if (f.startswith('right') and (f.endswith('pgm') or f.endswith('png'))) ]
+ rimages.sort()
+
+ if not len(limages) == len(rimages):
+ raise RuntimeError("Left, right images don't match. %d left images, %d right" % (len(limages), len(rimages)))
+
+ for i in range(len(limages)):
+ l=limages[i]
+ r=rimages[i]
+
+ if l.startswith('left') and (l.endswith('.pgm') or l.endswith('png')) and r.startswith('right') and (r.endswith('.pgm') or r.endswith('png')):
+ # LEFT IMAGE
+ filedata = archive.extractfile(l).read()
+ file_bytes = numpy.asarray(bytearray(filedata), dtype=numpy.uint8)
+ im_left=cv2.imdecode(file_bytes,cv2.IMREAD_COLOR)
+
+ bridge = cv_bridge.CvBridge()
+ try:
+ msg_left=bridge.cv2_to_imgmsg(im_left, "bgr8")
+ except cv_bridge.CvBridgeError as e:
+ print(e)
+
+ #RIGHT IMAGE
+ filedata = archive.extractfile(r).read()
+ file_bytes = numpy.asarray(bytearray(filedata), dtype=numpy.uint8)
+ im_right=cv2.imdecode(file_bytes,cv2.IMREAD_COLOR)
+ try:
+ msg_right=bridge.cv2_to_imgmsg(im_right, "bgr8")
+ except cv_bridge.CvBridgeError as e:
+ print(e)
+
+ drawable=calibrator.handle_msg([ msg_left,msg_right] )
+
+ h, w = numpy.asarray(drawable.lscrib[:,:]).shape[:2]
+ vis = numpy.zeros((h, w*2, 3), numpy.uint8)
+ vis[:h, :w ,:] = numpy.asarray(drawable.lscrib[:,:])
+ vis[:h, w:w*2, :] = numpy.asarray(drawable.rscrib[:,:])
+
+ display(l+" "+r,vis)
+
+
+if __name__ == '__main__':
+ from optparse import OptionParser
+ parser = OptionParser("%prog TARFILE [ opts ]")
+ parser.add_option("--mono", default=False, action="store_true", dest="mono",
+ help="Monocular calibration only. Calibrates left images.")
+ parser.add_option("-s", "--size", default=[], action="append", dest="size",
+ help="specify chessboard size as NxM [default: 8x6]")
+ parser.add_option("-q", "--square", default=[], action="append", dest="square",
+ help="specify chessboard square size in meters [default: 0.108]")
+ parser.add_option("--upload", default=False, action="store_true", dest="upload",
+ help="Upload results to camera(s).")
+ parser.add_option("--fix-principal-point", action="store_true", default=False,
+ help="fix the principal point at the image center")
+ parser.add_option("--fix-aspect-ratio", action="store_true", default=False,
+ help="enforce focal lengths (fx, fy) are equal")
+ parser.add_option("--zero-tangent-dist", action="store_true", default=False,
+ help="set tangential distortion coefficients (p1, p2) to zero")
+ parser.add_option("-k", "--k-coefficients", type="int", default=2, metavar="NUM_COEFFS",
+ help="number of radial distortion coefficients to use (up to 6, default %default)")
+ parser.add_option("--visualize", action="store_true", default=False,
+ help="visualize rectified images after calibration")
+ parser.add_option("-a", "--alpha", type="float", default=1.0, metavar="ALPHA",
+ help="zoom for visualization of rectifies images. Ranges from 0 (zoomed in, all pixels in calibrated image are valid) to 1 (zoomed out, all pixels in original image are in calibrated image). default %default)")
+
+ options, args = parser.parse_args()
+
+ if len(options.size) != len(options.square):
+ parser.error("Number of size and square inputs must be the same!")
+
+ if not options.square:
+ options.square.append("0.108")
+ options.size.append("8x6")
+
+ boards = []
+ for (sz, sq) in zip(options.size, options.square):
+ info = ChessboardInfo()
+ info.dim = float(sq)
+ size = tuple([int(c) for c in sz.split('x')])
+ info.n_cols = size[0]
+ info.n_rows = size[1]
+
+ boards.append(info)
+
+ if not boards:
+ parser.error("Must supply at least one chessboard")
+
+ if not args:
+ parser.error("Must give tarfile name")
+ if not os.path.exists(args[0]):
+ parser.error("Tarfile %s does not exist. Please select valid tarfile" % args[0])
+
+ tarname = args[0]
+
+ num_ks = options.k_coefficients
+
+ calib_flags = 0
+ if options.fix_principal_point:
+ calib_flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
+ if options.fix_aspect_ratio:
+ calib_flags |= cv2.CALIB_FIX_ASPECT_RATIO
+ if options.zero_tangent_dist:
+ calib_flags |= cv2.CALIB_ZERO_TANGENT_DIST
+ if (num_ks > 3):
+ calib_flags |= cv2.CALIB_RATIONAL_MODEL
+ if (num_ks < 6):
+ calib_flags |= cv2.CALIB_FIX_K6
+ if (num_ks < 5):
+ calib_flags |= cv2.CALIB_FIX_K5
+ if (num_ks < 4):
+ calib_flags |= cv2.CALIB_FIX_K4
+ if (num_ks < 3):
+ calib_flags |= cv2.CALIB_FIX_K3
+ if (num_ks < 2):
+ calib_flags |= cv2.CALIB_FIX_K2
+ if (num_ks < 1):
+ calib_flags |= cv2.CALIB_FIX_K1
+
+ cal_from_tarfile(boards, tarname, options.mono, options.upload, calib_flags, options.visualize, options.alpha)
diff --git a/src/image_pipeline/camera_calibration/setup.cfg b/src/image_pipeline/camera_calibration/setup.cfg
new file mode 100644
index 000000000..0fe247b97
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/camera_calibration
+[install]
+install_scripts=$base/lib/camera_calibration
diff --git a/src/image_pipeline/camera_calibration/setup.py b/src/image_pipeline/camera_calibration/setup.py
new file mode 100644
index 000000000..a261d29d1
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/setup.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+from setuptools import setup, find_packages
+
+PACKAGE_NAME = "camera_calibration"
+
+setup(
+ name=PACKAGE_NAME,
+ version='4.0.0',
+ packages=["camera_calibration", "camera_calibration.nodes"],
+ data_files=[
+ ('share/ament_index/resource_index/packages',
+ ['resource/' + PACKAGE_NAME]),
+ ('share/' + PACKAGE_NAME, ['package.xml']),
+ ],
+ py_modules=[],
+ package_dir={'': 'src'},
+ install_requires=[
+ 'setuptools',
+ ],
+ zip_safe=True,
+ author='James Bowman, Patrick Mihelich',
+ maintainer='Vincent Rabaud, Steven Macenski, Joshua Whitley',
+ maintainer_email='vincent.rabaud@gmail.com, stevenmacenski@gmail.com, whitleysoftwareservices@gmail.com',
+ keywords=['ROS2'],
+ description='Camera_calibration allows easy calibration of monocular or stereo cameras using a checkerboard calibration target .',
+ license='BSD',
+ tests_require=[
+ 'pytest',
+ 'requests'
+ ],
+ entry_points={
+ 'console_scripts': [
+ 'cameracalibrator = camera_calibration.nodes.cameracalibrator:main',
+ 'cameracheck = camera_calibration.nodes.cameracheck:main',
+ ],
+ },
+)
diff --git a/src/image_pipeline/camera_calibration/src/camera_calibration/__init__.py b/src/image_pipeline/camera_calibration/src/camera_calibration/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/image_pipeline/camera_calibration/src/camera_calibration/calibrator.py b/src/image_pipeline/camera_calibration/src/camera_calibration/calibrator.py
new file mode 100644
index 000000000..6f7744612
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/src/camera_calibration/calibrator.py
@@ -0,0 +1,1449 @@
+#!/usr/bin/env python
+#
+# Software License Agreement (BSD License)
+#
+# Copyright (c) 2009, Willow Garage, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from io import BytesIO
+import cv2
+import cv_bridge
+import image_geometry
+import math
+import numpy.linalg
+import pickle
+import random
+import sensor_msgs.msg
+import sys
+import tarfile
+import time
+from distutils.version import LooseVersion
+from enum import Enum
+
+# Supported camera models
+class CAMERA_MODEL(Enum):
+ PINHOLE = 0
+ FISHEYE = 1
+
+# Supported calibration patterns
+class Patterns:
+ Chessboard, Circles, ACircles, ChArUco = list(range(4))
+
+class CalibrationException(Exception):
+ pass
+
+# TODO: Make pattern per-board?
+class ChessboardInfo():
+ def __init__(self, pattern="chessboard", n_cols = 0, n_rows = 0, dim = 0.0, marker_size = 0.0, aruco_dict = None):
+ self.pattern = pattern
+ self.n_cols = n_cols
+ self.n_rows = n_rows
+ self.dim = dim
+ self.marker_size = marker_size
+ self.aruco_dict = None
+ self.charuco_board = None;
+ if pattern=="charuco":
+ self.aruco_dict = cv2.aruco.getPredefinedDictionary({
+ "aruco_orig" : cv2.aruco.DICT_ARUCO_ORIGINAL,
+ "4x4_50" : cv2.aruco.DICT_4X4_50,
+ "4x4_100" : cv2.aruco.DICT_4X4_100,
+ "4x4_250" : cv2.aruco.DICT_4X4_250,
+ "4x4_1000" : cv2.aruco.DICT_4X4_1000,
+ "5x5_50" : cv2.aruco.DICT_5X5_50,
+ "5x5_100" : cv2.aruco.DICT_5X5_100,
+ "5x5_250" : cv2.aruco.DICT_5X5_250,
+ "5x5_1000" : cv2.aruco.DICT_5X5_1000,
+ "6x6_50" : cv2.aruco.DICT_6x6_50,
+ "6x6_100" : cv2.aruco.DICT_6x6_100,
+ "6x6_250" : cv2.aruco.DICT_6x6_250,
+ "6x6_1000" : cv2.aruco.DICT_6x6_1000,
+ "7x7_50" : cv2.aruco.DICT_7x7_50,
+ "7x7_100" : cv2.aruco.DICT_7x7_100,
+ "7x7_250" : cv2.aruco.DICT_7x7_250,
+ "7x7_1000" : cv2.aruco.DICT_7x7_1000}[aruco_dict])
+ self.charuco_board = cv2.aruco.CharucoBoard_create(self.n_cols, self.n_rows, self.dim, self.marker_size,
+ self.aruco_dict)
+
+# Make all private!!!!!
+def lmin(seq1, seq2):
+ """ Pairwise minimum of two sequences """
+ return [min(a, b) for (a, b) in zip(seq1, seq2)]
+
+def lmax(seq1, seq2):
+ """ Pairwise maximum of two sequences """
+ return [max(a, b) for (a, b) in zip(seq1, seq2)]
+
+def _pdist(p1, p2):
+ """
+ Distance bwt two points. p1 = (x, y), p2 = (x, y)
+ """
+ return math.sqrt(math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2))
+
+def _get_outside_corners(corners, board):
+ """
+ Return the four corners of the board as a whole, as (up_left, up_right, down_right, down_left).
+ """
+ xdim = board.n_cols
+ ydim = board.n_rows
+
+ if board.pattern != "charuco" and corners.shape[1] * corners.shape[0] != xdim * ydim:
+ raise Exception("Invalid number of corners! %d corners. X: %d, Y: %d" % (corners.shape[1] * corners.shape[0],
+ xdim, ydim))
+ if board.pattern == "charuco" and corners.shape[1] * corners.shape[0] != (xdim-1) * (ydim-1):
+ raise Exception(("Invalid number of corners! %d corners. X: %d, Y: %d\n for ChArUco boards, " +
+ "_get_largest_rectangle_corners handles partial views of the target") % (corners.shape[1] *
+ corners.shape[0], xdim-1, ydim-1))
+
+ up_left = corners[0,0]
+ up_right = corners[xdim - 1,0]
+ down_right = corners[-1,0]
+ down_left = corners[-xdim,0]
+
+ return (up_left, up_right, down_right, down_left)
+
+def _get_largest_rectangle_corners(corners, ids, board):
+ """
+ Return the largest rectangle with all four corners visible in a partial view of a ChArUco board, as (up_left,
+ up_right, down_right, down_left).
+ """
+
+ # ChArUco board corner numbering:
+ #
+ # 9 10 11
+ # ^ 6 7 8
+ # y 3 4 5
+ # 0 1 2
+ # x >
+ #
+ # reference: https://docs.opencv.org/master/df/d4a/tutorial_charuco_detection.html
+
+ # xdim and ydim are number of squares, but we're working with inner corners
+ xdim = board.n_cols - 1
+ ydim = board.n_rows - 1
+ board_vis = [[[i*xdim + j] in ids for j in range(xdim)] for i in range(ydim)]
+
+ best_area = 0
+ best_rect = [-1, -1, -1, -1]
+
+ for x1 in range(xdim):
+ for x2 in range(x1, xdim):
+ for y1 in range(ydim):
+ for y2 in range(y1, ydim):
+ if (board_vis[y1][x1] and board_vis[y1][x2] and board_vis[y2][x1] and
+ board_vis[y2][x2] and (x2-x1+1)*(y2-y1+1) > best_area):
+ best_area = (x2-x1+1)*(y2-y1+1)
+ best_rect = [x1, x2, y1, y2]
+ (x1, x2, y1, y2) = best_rect
+ corner_ids = (y2*xdim+x1, y2*xdim+x2, y1*xdim+x2, y1*xdim + x1)
+ corners = tuple(corners[numpy.where(ids == corner_id)[0]][0][0] for corner_id in corner_ids)
+
+ return corners
+
+def _calculate_skew(corners):
+ """
+ Get skew for given checkerboard detection.
+ Scaled to [0,1], which 0 = no skew, 1 = high skew
+ Skew is proportional to the divergence of three outside corners from 90 degrees.
+ """
+ # TODO Using three nearby interior corners might be more robust, outside corners occasionally
+ # get mis-detected
+ up_left, up_right, down_right, _ = corners
+
+ def angle(a, b, c):
+ """
+ Return angle between lines ab, bc
+ """
+ ab = a - b
+ cb = c - b
+ return math.acos(numpy.dot(ab,cb) / (numpy.linalg.norm(ab) * numpy.linalg.norm(cb)))
+
+ skew = min(1.0, 2. * abs((math.pi / 2.) - angle(up_left, up_right, down_right)))
+ return skew
+
+def _calculate_area(corners):
+ """
+ Get 2d image area of the detected checkerboard.
+ The projected checkerboard is assumed to be a convex quadrilateral, and the area computed as
+ |p X q|/2; see http://mathworld.wolfram.com/Quadrilateral.html.
+ """
+ (up_left, up_right, down_right, down_left) = corners
+ a = up_right - up_left
+ b = down_right - up_right
+ c = down_left - down_right
+ p = b + c
+ q = a + b
+ return abs(p[0]*q[1] - p[1]*q[0]) / 2.
+
+def _get_corners(img, board, refine = True, checkerboard_flags=0):
+ """
+ Get corners for a particular chessboard for an image
+ """
+ h = img.shape[0]
+ w = img.shape[1]
+ if len(img.shape) == 3 and img.shape[2] == 3:
+ mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ else:
+ mono = img
+ (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH |
+ cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags)
+ if not ok:
+ return (ok, corners)
+
+ # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false
+ # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction
+ # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras
+ BORDER = 8
+ if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]):
+ ok = False
+
+ # Ensure that all corner-arrays are going from top to bottom.
+ if board.n_rows!=board.n_cols:
+ if corners[0, 0, 1] > corners[-1, 0, 1]:
+ corners = numpy.copy(numpy.flipud(corners))
+ else:
+ direction_corners=(corners[-1]-corners[0])>=numpy.array([[0.0,0.0]])
+
+ if not numpy.all(direction_corners):
+ if not numpy.any(direction_corners):
+ corners = numpy.copy(numpy.flipud(corners))
+ elif direction_corners[0][0]:
+ corners=numpy.rot90(corners.reshape(board.n_rows,board.n_cols,2)).reshape(board.n_cols*board.n_rows,1,2)
+ else:
+ corners=numpy.rot90(corners.reshape(board.n_rows,board.n_cols,2),3).reshape(board.n_cols*board.n_rows,1,2)
+
+ if refine and ok:
+ # Use a radius of half the minimum distance between corners. This should be large enough to snap to the
+ # correct corner, but not so large as to include a wrong corner in the search window.
+ min_distance = float("inf")
+ for row in range(board.n_rows):
+ for col in range(board.n_cols - 1):
+ index = row*board.n_rows + col
+ min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0]))
+ for row in range(board.n_rows - 1):
+ for col in range(board.n_cols):
+ index = row*board.n_rows + col
+ min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0]))
+ radius = int(math.ceil(min_distance * 0.5))
+ cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1),
+ ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))
+
+ return (ok, corners)
+
+def _get_charuco_corners(img, board, refine):
+ """
+ Get chessboard corners from image of ChArUco board
+ """
+ h = img.shape[0]
+ w = img.shape[1]
+
+ if len(img.shape) == 3 and img.shape[2] == 3:
+ mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ else:
+ mono = img
+
+ marker_corners, marker_ids, _ = cv2.aruco.detectMarkers(img, board.aruco_dict)
+ if len(marker_corners) == 0:
+ return (False, None, None)
+ _, square_corners, ids = cv2.aruco.interpolateCornersCharuco(marker_corners, marker_ids, img, board.charuco_board)
+ return ((square_corners is not None) and (len(square_corners) > 5), square_corners, ids)
+
+def _get_circles(img, board, pattern):
+ """
+ Get circle centers for a symmetric or asymmetric grid
+ """
+ h = img.shape[0]
+ w = img.shape[1]
+ if len(img.shape) == 3 and img.shape[2] == 3:
+ mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ else:
+ mono = img
+
+ flag = cv2.CALIB_CB_SYMMETRIC_GRID
+ if pattern == Patterns.ACircles:
+ flag = cv2.CALIB_CB_ASYMMETRIC_GRID
+ mono_arr = numpy.array(mono)
+ (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_cols, board.n_rows), flags=flag)
+
+ # In symmetric case, findCirclesGrid does not detect the target if it's turned sideways. So we try
+ # again with dimensions swapped - not so efficient.
+ # TODO Better to add as second board? Corner ordering will change.
+ if not ok and pattern == Patterns.Circles:
+ (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_rows, board.n_cols), flags=flag)
+
+ return (ok, corners)
+
+def _get_dist_model(dist_params, cam_model):
+ # Select dist model
+ if CAMERA_MODEL.PINHOLE == cam_model:
+ if dist_params.size > 5:
+ dist_model = "rational_polynomial"
+ else:
+ dist_model = "plumb_bob"
+ elif CAMERA_MODEL.FISHEYE == cam_model:
+ dist_model = "equidistant"
+ else:
+ dist_model = "unknown"
+ return dist_model
+
+# TODO self.size needs to come from CameraInfo, full resolution
+class Calibrator():
+ """
+ Base class for calibration system
+ """
+ def __init__(self, boards, flags=0, fisheye_flags = 0, pattern=Patterns.Chessboard, name='',
+ checkerboard_flags=cv2.CALIB_CB_FAST_CHECK, max_chessboard_speed = -1.0):
+ # Ordering the dimensions for the different detectors is actually a minefield...
+ if pattern == Patterns.Chessboard:
+ # Make sure n_cols > n_rows to agree with OpenCV CB detector output
+ self._boards = [ChessboardInfo("chessboard", max(i.n_cols, i.n_rows), min(i.n_cols, i.n_rows), i.dim) for i in boards]
+ if pattern == Patterns.ChArUco:
+ self._boards = boards
+ elif pattern == Patterns.ACircles:
+ # 7x4 and 4x7 are actually different patterns. Assume square-ish pattern, so n_rows > n_cols.
+ self._boards = [ChessboardInfo("acircles", min(i.n_cols, i.n_rows), max(i.n_cols, i.n_rows), i.dim) for i in boards]
+ elif pattern == Patterns.Circles:
+ # We end up having to check both ways anyway
+ self._boards = boards
+
+ # Set to true after we perform calibration
+ self.calibrated = False
+ self.calib_flags = flags
+ self.fisheye_calib_flags = fisheye_flags
+ self.checkerboard_flags = checkerboard_flags
+ self.pattern = pattern
+ self.br = cv_bridge.CvBridge()
+ self.camera_model = CAMERA_MODEL.PINHOLE
+ # self.db is list of (parameters, image) samples for use in calibration. parameters has form
+ # (X, Y, size, skew) all normalized to [0,1], to keep track of what sort of samples we've taken
+ # and ensure enough variety.
+ self.db = []
+ # For each db sample, we also record the detected corners (and IDs, if using a ChArUco board)
+ self.good_corners = []
+ # Set to true when we have sufficiently varied samples to calibrate
+ self.goodenough = False
+ self.param_ranges = [0.7, 0.7, 0.4, 0.5]
+ self.name = name
+ self.last_frame_corners = None
+ self.last_frame_ids = None
+ self.max_chessboard_speed = max_chessboard_speed
+
+ def mkgray(self, msg):
+ """
+ Convert a message into a 8-bit 1 channel monochrome OpenCV image
+ """
+ # as cv_bridge automatically scales, we need to remove that behavior
+ # TODO: get a Python API in cv_bridge to check for the image depth.
+ if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:
+ mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
+ mono8 = numpy.array(mono16 / 256, dtype=numpy.uint8)
+ return mono8
+ elif 'FC1' in msg.encoding:
+ # floating point image handling
+ img = self.br.imgmsg_to_cv2(msg, "passthrough")
+ _, max_val, _, _ = cv2.minMaxLoc(img)
+ if max_val > 0:
+ scale = 255.0 / max_val
+ mono_img = (img * scale).astype(numpy.uint8)
+ else:
+ mono_img = img.astype(numpy.uint8)
+ return mono_img
+ else:
+ return self.br.imgmsg_to_cv2(msg, "mono8")
+
+ def get_parameters(self, corners, ids, board, size):
+ """
+ Return list of parameters [X, Y, size, skew] describing the checkerboard view.
+ """
+ (width, height) = size
+ Xs = corners[:,:,0]
+ Ys = corners[:,:,1]
+ if board.pattern == 'charuco':
+ outside_corners = _get_largest_rectangle_corners(corners, ids, board)
+ else:
+ outside_corners = _get_outside_corners(corners, board)
+ area = _calculate_area(outside_corners)
+ skew = _calculate_skew(outside_corners)
+ border = math.sqrt(area)
+ # For X and Y, we "shrink" the image all around by approx. half the board size.
+ # Otherwise large boards are penalized because you can't get much X/Y variation.
+ p_x = min(1.0, max(0.0, (numpy.mean(Xs) - border / 2) / (width - border)))
+ p_y = min(1.0, max(0.0, (numpy.mean(Ys) - border / 2) / (height - border)))
+ p_size = math.sqrt(area / (width * height))
+ params = [p_x, p_y, p_size, skew]
+ return params
+
+ def set_cammodel(self, modeltype):
+ self.camera_model = modeltype
+
+ def is_slow_moving(self, corners, ids, last_frame_corners, last_frame_ids):
+ """
+ Returns true if the motion of the checkerboard is sufficiently low between
+ this and the previous frame.
+ """
+ # If we don't have previous frame corners, we can't accept the sample
+ if last_frame_corners is None:
+ return False
+ if ids is None:
+ num_corners = len(corners)
+ corner_deltas = (corners - last_frame_corners).reshape(num_corners, 2)
+ else:
+ corner_deltas = []
+ last_frame_ids = list(last_frame_ids.transpose()[0])
+ for i, c_id in enumerate(ids):
+ try:
+ last_i = last_frame_ids.index(c_id)
+ corner_deltas.append(corners[i] - last_frame_corners[last_i])
+ except ValueError: pass
+ corner_deltas = numpy.concatenate(corner_deltas)
+
+ # Average distance travelled overall for all corners
+ average_motion = numpy.average(numpy.linalg.norm(corner_deltas, axis = 1))
+ return average_motion <= self.max_chessboard_speed
+
+ def is_good_sample(self, params, corners, ids, last_frame_corners, last_frame_ids):
+ """
+ Returns true if the checkerboard detection described by params should be added to the database.
+ """
+ if not self.db:
+ return True
+
+ def param_distance(p1, p2):
+ return sum([abs(a-b) for (a,b) in zip(p1, p2)])
+
+ db_params = [sample[0] for sample in self.db]
+ d = min([param_distance(params, p) for p in db_params])
+ #print "d = %.3f" % d #DEBUG
+ # TODO What's a good threshold here? Should it be configurable?
+ if d <= 0.2:
+ return False
+
+ if self.max_chessboard_speed > 0:
+ if not self.is_slow_moving(corners, ids, last_frame_corners, last_frame_ids):
+ return False
+
+ # All tests passed, image should be good for calibration
+ return True
+
+ _param_names = ["X", "Y", "Size", "Skew"]
+
+ def compute_goodenough(self):
+ if not self.db:
+ return None
+
+ # Find range of checkerboard poses covered by samples in database
+ all_params = [sample[0] for sample in self.db]
+ min_params = all_params[0]
+ max_params = all_params[0]
+ for params in all_params[1:]:
+ min_params = lmin(min_params, params)
+ max_params = lmax(max_params, params)
+ # Don't reward small size or skew
+ min_params = [min_params[0], min_params[1], 0., 0.]
+
+ # For each parameter, judge how much progress has been made toward adequate variation
+ progress = [min((hi - lo) / r, 1.0) for (lo, hi, r) in zip(min_params, max_params, self.param_ranges)]
+ # If we have lots of samples, allow calibration even if not all parameters are green
+ # TODO Awkward that we update self.goodenough instead of returning it
+ self.goodenough = (len(self.db) >= 40) or all([p == 1.0 for p in progress])
+
+ return list(zip(self._param_names, min_params, max_params, progress))
+
+ def mk_object_points(self, boards, use_board_size = False):
+ opts = []
+ for i, b in enumerate(boards):
+ num_pts = b.n_cols * b.n_rows
+ opts_loc = numpy.zeros((num_pts, 1, 3), numpy.float32)
+ for j in range(num_pts):
+ opts_loc[j, 0, 0] = (j // b.n_cols)
+ if self.pattern == Patterns.ACircles:
+ opts_loc[j, 0, 1] = 2*(j % b.n_cols) + (opts_loc[j, 0, 0] % 2)
+ else:
+ opts_loc[j, 0, 1] = (j % b.n_cols)
+ opts_loc[j, 0, 2] = 0
+ if use_board_size:
+ opts_loc[j, 0, :] = opts_loc[j, 0, :] * b.dim
+ opts.append(opts_loc)
+ return opts
+
+ def get_corners(self, img, refine = True):
+ """
+ Use cvFindChessboardCorners to find corners of chessboard in image.
+
+ Check all boards. Return corners for first chessboard that it detects
+ if given multiple size chessboards.
+
+ If a ChArUco board is used, the marker IDs are also returned, otherwise
+ ids is None.
+
+ Returns (ok, corners, ids, board)
+ """
+
+ for b in self._boards:
+ if self.pattern == Patterns.Chessboard:
+ (ok, corners) = _get_corners(img, b, refine, self.checkerboard_flags)
+ ids = None
+ elif self.pattern == Patterns.ChArUco:
+ (ok, corners, ids) = _get_charuco_corners(img, b, refine)
+ else:
+ (ok, corners) = _get_circles(img, b, self.pattern)
+ ids = None
+ if ok:
+ return (ok, corners, ids, b)
+ return (False, None, None, None)
+
+ def downsample_and_detect(self, img):
+ """
+ Downsample the input image to approximately VGA resolution and detect the
+ calibration target corners in the full-size image.
+
+ Combines these apparently orthogonal duties as an optimization. Checkerboard
+ detection is too expensive on large images, so it's better to do detection on
+ the smaller display image and scale the corners back up to the correct size.
+
+ Returns (scrib, corners, downsampled_corners, ids, board, (x_scale, y_scale)).
+ """
+ # Scale the input image down to ~VGA size
+ height = img.shape[0]
+ width = img.shape[1]
+ scale = math.sqrt( (width*height) / (640.*480.) )
+ if scale > 1.0:
+ scrib = cv2.resize(img, (int(width / scale), int(height / scale)))
+ else:
+ scrib = img
+ # Due to rounding, actual horizontal/vertical scaling may differ slightly
+ x_scale = float(width) / scrib.shape[1]
+ y_scale = float(height) / scrib.shape[0]
+
+ if self.pattern == Patterns.Chessboard:
+ # Detect checkerboard
+ (ok, downsampled_corners, ids, board) = self.get_corners(scrib, refine = True)
+
+ # Scale corners back to full size image
+ corners = None
+ if ok:
+ if scale > 1.0:
+ # Refine up-scaled corners in the original full-res image
+ # TODO Does this really make a difference in practice?
+ corners_unrefined = downsampled_corners.copy()
+ corners_unrefined[:, :, 0] *= x_scale
+ corners_unrefined[:, :, 1] *= y_scale
+ radius = int(math.ceil(scale))
+ if len(img.shape) == 3 and img.shape[2] == 3:
+ mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ else:
+ mono = img
+ cv2.cornerSubPix(mono, corners_unrefined, (radius,radius), (-1,-1),
+ ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))
+ corners = corners_unrefined
+ else:
+ corners = downsampled_corners
+ else:
+ # Circle grid detection is fast even on large images
+ (ok, corners, ids, board) = self.get_corners(img)
+ # Scale corners to downsampled image for display
+ downsampled_corners = None
+ if ok:
+ if scale > 1.0:
+ downsampled_corners = corners.copy()
+ downsampled_corners[:,:,0] /= x_scale
+ downsampled_corners[:,:,1] /= y_scale
+ else:
+ downsampled_corners = corners
+
+ return (scrib, corners, downsampled_corners, ids, board, (x_scale, y_scale))
+
+ @staticmethod
+ def lrmsg(d, k, r, p, size, camera_model):
+ """ Used by :meth:`as_message`. Return a CameraInfo message for the given calibration matrices """
+ msg = sensor_msgs.msg.CameraInfo()
+ msg.width, msg.height = size
+ msg.distortion_model = _get_dist_model(d, camera_model)
+
+ msg.d = numpy.ravel(d).copy().tolist()
+ msg.k = numpy.ravel(k).copy().tolist()
+ msg.r = numpy.ravel(r).copy().tolist()
+ msg.p = numpy.ravel(p).copy().tolist()
+ return msg
+
+ @staticmethod
+ def lrreport(d, k, r, p):
+ print("D =", numpy.ravel(d).tolist())
+ print("K =", numpy.ravel(k).tolist())
+ print("R =", numpy.ravel(r).tolist())
+ print("P =", numpy.ravel(p).tolist())
+
+ @staticmethod
+ def lrost(name, d, k, r, p, size):
+ assert k.shape == (3, 3)
+ assert r.shape == (3, 3)
+ assert p.shape == (3, 4)
+ calmessage = "\n".join([
+ "# oST version 5.0 parameters",
+ "",
+ "",
+ "[image]",
+ "",
+ "width",
+ "%d" % size[0],
+ "",
+ "height",
+ "%d" % size[1],
+ "",
+ "[%s]" % name,
+ "",
+ "camera matrix",
+ " ".join("%8f" % k[0,i] for i in range(3)),
+ " ".join("%8f" % k[1,i] for i in range(3)),
+ " ".join("%8f" % k[2,i] for i in range(3)),
+ "",
+ "distortion",
+ " ".join("%8f" % x for x in d.flat),
+ "",
+ "rectification",
+ " ".join("%8f" % r[0,i] for i in range(3)),
+ " ".join("%8f" % r[1,i] for i in range(3)),
+ " ".join("%8f" % r[2,i] for i in range(3)),
+ "",
+ "projection",
+ " ".join("%8f" % p[0,i] for i in range(4)),
+ " ".join("%8f" % p[1,i] for i in range(4)),
+ " ".join("%8f" % p[2,i] for i in range(4)),
+ ""
+ ])
+ assert len(calmessage) < 525, "Calibration info must be less than 525 bytes"
+ return calmessage
+
+ @staticmethod
+ def lryaml(name, d, k, r, p, size, cam_model):
+ def format_mat(x, precision):
+ return ("[%s]" % (
+ numpy.array2string(x, precision=precision, suppress_small=True, separator=", ")
+ .replace("[", "").replace("]", "").replace("\n", "\n ")
+ ))
+
+ dist_model = _get_dist_model(d, cam_model)
+
+ assert k.shape == (3, 3)
+ assert r.shape == (3, 3)
+ assert p.shape == (3, 4)
+ calmessage = "\n".join([
+ "image_width: %d" % size[0],
+ "image_height: %d" % size[1],
+ "camera_name: " + name,
+ "camera_matrix:",
+ " rows: 3",
+ " cols: 3",
+ " data: " + format_mat(k, 5),
+ "distortion_model: " + dist_model,
+ "distortion_coefficients:",
+ " rows: 1",
+ " cols: %d" % d.size,
+ " data: [%s]" % ", ".join("%8f" % x for x in d.flat),
+ "rectification_matrix:",
+ " rows: 3",
+ " cols: 3",
+ " data: " + format_mat(r, 8),
+ "projection_matrix:",
+ " rows: 3",
+ " cols: 4",
+ " data: " + format_mat(p, 5),
+ ""
+ ])
+ return calmessage
+
+ def do_save(self):
+ filename = '/tmp/calibrationdata.tar.gz'
+ tf = tarfile.open(filename, 'w:gz')
+ self.do_tarfile_save(tf) # Must be overridden in subclasses
+ tf.close()
+ print(("Wrote calibration data to", filename))
+
+def image_from_archive(archive, name):
+ """
+ Load image PGM file from tar archive.
+
+ Used for tarfile loading and unit test.
+ """
+ member = archive.getmember(name)
+ imagefiledata = numpy.frombuffer(archive.extractfile(member).read(), numpy.uint8)
+ imagefiledata.resize((1, imagefiledata.size))
+ return cv2.imdecode(imagefiledata, cv2.IMREAD_COLOR)
+
+class ImageDrawable():
+ """
+ Passed to CalibrationNode after image handled. Allows plotting of images
+ with detected corner points
+ """
+ def __init__(self):
+ self.params = None
+
+class MonoDrawable(ImageDrawable):
+ def __init__(self):
+ ImageDrawable.__init__(self)
+ self.scrib = None
+ self.linear_error = -1.0
+
+class StereoDrawable(ImageDrawable):
+ def __init__(self):
+ ImageDrawable.__init__(self)
+ self.lscrib = None
+ self.rscrib = None
+ self.epierror = -1
+ self.dim = -1
+
+
+class MonoCalibrator(Calibrator):
+ """
+ Calibration class for monocular cameras::
+
+ images = [cv2.imread("mono%d.png") for i in range(8)]
+ mc = MonoCalibrator()
+ mc.cal(images)
+ print mc.as_message()
+ """
+
+ is_mono = True # TODO Could get rid of is_mono
+
+ def __init__(self, *args, **kwargs):
+ if 'name' not in kwargs:
+ kwargs['name'] = 'narrow_stereo/left'
+ super(MonoCalibrator, self).__init__(*args, **kwargs)
+
+ def cal(self, images):
+ """
+ Calibrate camera from given images
+ """
+ goodcorners = self.collect_corners(images)
+ self.cal_fromcorners(goodcorners)
+ self.calibrated = True
+
+ def collect_corners(self, images):
+ """
+ :param images: source images containing chessboards
+ :type images: list of :class:`cvMat`
+
+ Find chessboards in all images.
+
+ Return [ (corners, ids, ChessboardInfo) ]
+ """
+ self.size = (images[0].shape[1], images[0].shape[0])
+ corners = [self.get_corners(i) for i in images]
+
+ goodcorners = [(co, ids, b) for (ok, co, ids, b) in corners if ok]
+ if not goodcorners:
+ raise CalibrationException("No corners found in images!")
+ return goodcorners
+
+ def cal_fromcorners(self, good):
+ """
+ :param good: Good corner positions and boards
+ :type good: [(corners, ChessboardInfo)]
+ """
+
+ (ipts, ids, boards) = zip(*good)
+ opts = self.mk_object_points(boards)
+ # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio
+ intrinsics_in = numpy.eye(3, dtype=numpy.float64)
+
+ if self.pattern == Patterns.ChArUco:
+ if self.camera_model == CAMERA_MODEL.FISHEYE:
+ raise NotImplemented("Can't perform fisheye calibration with ChArUco board")
+
+ reproj_err, self.intrinsics, self.distortion, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco(
+ ipts, ids, boards[0].charuco_board, self.size, intrinsics_in, None)
+
+ elif self.camera_model == CAMERA_MODEL.PINHOLE:
+ print("mono pinhole calibration...")
+ reproj_err, self.intrinsics, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(
+ opts, ipts,
+ self.size,
+ intrinsics_in,
+ None,
+ flags = self.calib_flags)
+ # OpenCV returns more than 8 coefficients (the additional ones all zeros) when CALIB_RATIONAL_MODEL is set.
+ # The extra ones include e.g. thin prism coefficients, which we are not interested in.
+ if self.calib_flags & cv2.CALIB_RATIONAL_MODEL:
+ self.distortion = dist_coeffs.flat[:8].reshape(-1, 1) # rational polynomial
+ else:
+ self.distortion = dist_coeffs.flat[:5].reshape(-1, 1) # plumb bob
+ elif self.camera_model == CAMERA_MODEL.FISHEYE:
+ print("mono fisheye calibration...")
+ # WARNING: cv2.fisheye.calibrate wants float64 points
+ ipts64 = numpy.asarray(ipts, dtype=numpy.float64)
+ ipts = ipts64
+ opts64 = numpy.asarray(opts, dtype=numpy.float64)
+ opts = opts64
+ reproj_err, self.intrinsics, self.distortion, rvecs, tvecs = cv2.fisheye.calibrate(
+ opts, ipts, self.size,
+ intrinsics_in, None, flags = self.fisheye_calib_flags)
+
+ # R is identity matrix for monocular calibration
+ self.R = numpy.eye(3, dtype=numpy.float64)
+ self.P = numpy.zeros((3, 4), dtype=numpy.float64)
+
+ self.set_alpha(0.0)
+
+ def set_alpha(self, a):
+ """
+ Set the alpha value for the calibrated camera solution. The alpha
+ value is a zoom, and ranges from 0 (zoomed in, all pixels in
+ calibrated image are valid) to 1 (zoomed out, all pixels in
+ original image are in calibrated image).
+ """
+
+ if self.camera_model == CAMERA_MODEL.PINHOLE:
+ # NOTE: Prior to Electric, this code was broken such that we never actually saved the new
+ # camera matrix. In effect, this enforced P = [K|0] for monocular cameras.
+ # TODO: Verify that OpenCV #1199 gets applied (improved GetOptimalNewCameraMatrix)
+ ncm, _ = cv2.getOptimalNewCameraMatrix(self.intrinsics, self.distortion, self.size, a)
+ for j in range(3):
+ for i in range(3):
+ self.P[j,i] = ncm[j, i]
+ self.mapx, self.mapy = cv2.initUndistortRectifyMap(self.intrinsics, self.distortion, self.R, ncm, self.size, cv2.CV_32FC1)
+ elif self.camera_model == CAMERA_MODEL.FISHEYE:
+ # NOTE: cv2.fisheye.estimateNewCameraMatrixForUndistortRectify not producing proper results, using a naive approach instead:
+ self.P[:3,:3] = self.intrinsics[:3,:3]
+ self.P[0,0] /= (1. + a)
+ self.P[1,1] /= (1. + a)
+ self.mapx, self.mapy = cv2.fisheye.initUndistortRectifyMap(self.intrinsics, self.distortion, self.R, self.P, self.size, cv2.CV_32FC1)
+
+
+ def remap(self, src):
+ """
+ :param src: source image
+ :type src: :class:`cvMat`
+
+ Apply the post-calibration undistortion to the source image
+ """
+ return cv2.remap(src, self.mapx, self.mapy, cv2.INTER_LINEAR)
+
+ def undistort_points(self, src):
+ """
+ :param src: N source pixel points (u,v) as an Nx2 matrix
+ :type src: :class:`cvMat`
+
+ Apply the post-calibration undistortion to the source points
+ """
+ if self.camera_model == CAMERA_MODEL.PINHOLE:
+ return cv2.undistortPoints(src, self.intrinsics, self.distortion, R = self.R, P = self.P)
+ elif self.camera_model == CAMERA_MODEL.FISHEYE:
+ return cv2.fisheye.undistortPoints(src, self.intrinsics, self.distortion, R = self.R, P = self.P)
+
+ def as_message(self):
+ """ Return the camera calibration as a CameraInfo message """
+ return self.lrmsg(self.distortion, self.intrinsics, self.R, self.P, self.size, self.camera_model)
+
+ def from_message(self, msg, alpha = 0.0):
+ """ Initialize the camera calibration from a CameraInfo message """
+
+ self.size = (msg.width, msg.height)
+ self.intrinsics = numpy.array(msg.k, dtype=numpy.float64, copy=True).reshape((3, 3))
+ self.distortion = numpy.array(msg.d, dtype=numpy.float64, copy=True).reshape((len(msg.d), 1))
+ self.R = numpy.array(msg.r, dtype=numpy.float64, copy=True).reshape((3, 3))
+ self.P = numpy.array(msg.p, dtype=numpy.float64, copy=True).reshape((3, 4))
+
+ self.set_alpha(0.0)
+
+ def report(self):
+ self.lrreport(self.distortion, self.intrinsics, self.R, self.P)
+
+ def ost(self):
+ return self.lrost(self.name, self.distortion, self.intrinsics, self.R, self.P, self.size)
+
+ def yaml(self):
+ return self.lryaml(self.name, self.distortion, self.intrinsics, self.R, self.P, self.size, self.camera_model)
+
+ def linear_error_from_image(self, image):
+ """
+ Detect the checkerboard and compute the linear error.
+ Mainly for use in tests.
+ """
+ _, corners, _, ids, board, _ = self.downsample_and_detect(image)
+ if corners is None:
+ return None
+
+ undistorted = self.undistort_points(corners)
+ return self.linear_error(undistorted, ids, board)
+
+ @staticmethod
+ def linear_error(corners, ids, b):
+
+ """
+ Returns the linear error for a set of corners detected in the unrectified image.
+ """
+
+ if corners is None:
+ return None
+
+ corners = numpy.squeeze(corners)
+
+ def pt2line(x0, y0, x1, y1, x2, y2):
+ """ point is (x0, y0), line is (x1, y1, x2, y2) """
+ return abs((x2 - x1) * (y1 - y0) - (x1 - x0) * (y2 - y1)) / math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
+
+ n_cols = b.n_cols
+ n_rows = b.n_rows
+ if b.pattern == 'charuco':
+ n_cols -= 1
+ n_rows -= 1
+ n_pts = n_cols * n_rows
+
+ if ids is None:
+ ids = numpy.arange(n_pts).reshape((n_pts, 1))
+
+ ids_to_idx = dict((ids[i, 0], i) for i in range(len(ids)))
+
+ errors = []
+ for row in range(n_rows):
+ row_min = row * n_cols
+ row_max = (row+1) * n_cols
+ pts_in_row = [x for x in ids if row_min <= x < row_max]
+
+ # not enough points to calculate error
+ if len(pts_in_row) <= 2: continue
+
+ left_pt = min(pts_in_row)[0]
+ right_pt = max(pts_in_row)[0]
+ x_left = corners[ids_to_idx[left_pt], 0]
+ y_left = corners[ids_to_idx[left_pt], 1]
+ x_right = corners[ids_to_idx[right_pt], 0]
+ y_right = corners[ids_to_idx[right_pt], 1]
+
+ for pt in pts_in_row:
+ if pt[0] in (left_pt, right_pt): continue
+ x = corners[ids_to_idx[pt[0]], 0]
+ y = corners[ids_to_idx[pt[0]], 1]
+ errors.append(pt2line(x, y, x_left, y_left, x_right, y_right))
+
+ if errors:
+ return math.sqrt(sum([e**2 for e in errors]) / len(errors))
+ else:
+ return None
+
+
+ def handle_msg(self, msg):
+ """
+ Detects the calibration target and, if found and provides enough new information,
+ adds it to the sample database.
+
+ Returns a MonoDrawable message with the display image and progress info.
+ """
+ gray = self.mkgray(msg)
+ linear_error = -1
+
+ # Get display-image-to-be (scrib) and detection of the calibration target
+ scrib_mono, corners, downsampled_corners, ids, board, (x_scale, y_scale) = self.downsample_and_detect(gray)
+
+ if self.calibrated:
+ # Show rectified image
+ # TODO Pull out downsampling code into function
+ gray_remap = self.remap(gray)
+ gray_rect = gray_remap
+ if x_scale != 1.0 or y_scale != 1.0:
+ gray_rect = cv2.resize(gray_remap, (scrib_mono.shape[1], scrib_mono.shape[0]))
+
+ scrib = cv2.cvtColor(gray_rect, cv2.COLOR_GRAY2BGR)
+
+ if corners is not None:
+ # Report linear error
+ undistorted = self.undistort_points(corners)
+ linear_error = self.linear_error(undistorted, ids, board)
+
+ # Draw rectified corners
+ scrib_src = undistorted.copy()
+ scrib_src[:,:,0] /= x_scale
+ scrib_src[:,:,1] /= y_scale
+ cv2.drawChessboardCorners(scrib, (board.n_cols, board.n_rows), scrib_src, True)
+
+ else:
+ scrib = cv2.cvtColor(scrib_mono, cv2.COLOR_GRAY2BGR)
+ if corners is not None:
+ # Draw (potentially downsampled) corners onto display image
+ if board.pattern == "charuco":
+ cv2.aruco.drawDetectedCornersCharuco(scrib, downsampled_corners, ids)
+ else:
+ cv2.drawChessboardCorners(scrib, (board.n_cols, board.n_rows), downsampled_corners, True)
+
+ # Add sample to database only if it's sufficiently different from any previous sample.
+ params = self.get_parameters(corners, ids, board, (gray.shape[1], gray.shape[0]))
+ if self.is_good_sample(params, corners, ids, self.last_frame_corners, self.last_frame_ids):
+ self.db.append((params, gray))
+ if self.pattern == Patterns.ChArUco:
+ self.good_corners.append((corners, ids, board))
+ else:
+ self.good_corners.append((corners, None, board))
+ print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % tuple([len(self.db)] + params)))
+
+ self.last_frame_corners = corners
+ self.last_frame_ids = ids
+ rv = MonoDrawable()
+ rv.scrib = scrib
+ rv.params = self.compute_goodenough()
+ rv.linear_error = linear_error
+ return rv
+
+ def do_calibration(self, dump = False):
+ if not self.good_corners:
+ print("**** Collecting corners for all images! ****") #DEBUG
+ images = [i for (p, i) in self.db]
+ self.good_corners = self.collect_corners(images)
+ self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) # TODO Needs to be set externally
+ # Dump should only occur if user wants it
+ if dump:
+ pickle.dump((self.is_mono, self.size, self.good_corners),
+ open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w"))
+ self.cal_fromcorners(self.good_corners)
+ self.calibrated = True
+ # DEBUG
+ print((self.report()))
+ print((self.ost()))
+
+ def do_tarfile_save(self, tf):
+ """ Write images and calibration solution to a tarfile object """
+
+ def taradd(name, buf):
+ if isinstance(buf, str):
+ s = BytesIO(buf.encode('utf-8'))
+ else:
+ s = BytesIO(buf)
+ ti = tarfile.TarInfo(name)
+ ti.size = len(s.getvalue())
+ ti.uname = 'calibrator'
+ ti.mtime = int(time.time())
+ tf.addfile(tarinfo=ti, fileobj=s)
+
+ ims = [("left-%04d.png" % i, im) for i,(_, im) in enumerate(self.db)]
+ for (name, im) in ims:
+ taradd(name, cv2.imencode(".png", im)[1].tostring())
+ taradd('ost.yaml', self.yaml())
+ taradd('ost.txt', self.ost())
+
+ def do_tarfile_calibration(self, filename):
+ archive = tarfile.open(filename, 'r')
+
+ limages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('left') and (f.endswith('.pgm') or f.endswith('png'))) ]
+
+ self.cal(limages)
+
+# TODO Replicate MonoCalibrator improvements in stereo
+class StereoCalibrator(Calibrator):
+ """
+ Calibration class for stereo cameras::
+
+ limages = [cv2.imread("left%d.png") for i in range(8)]
+ rimages = [cv2.imread("right%d.png") for i in range(8)]
+ sc = StereoCalibrator()
+ sc.cal(limages, rimages)
+ print sc.as_message()
+ """
+
+ is_mono = False
+
+ def __init__(self, *args, **kwargs):
+ if 'name' not in kwargs:
+ kwargs['name'] = 'narrow_stereo'
+ super(StereoCalibrator, self).__init__(*args, **kwargs)
+ self.l = MonoCalibrator(*args, **kwargs)
+ self.r = MonoCalibrator(*args, **kwargs)
+ # Collecting from two cameras in a horizontal stereo rig, can't get
+ # full X range in the left camera.
+ self.param_ranges[0] = 0.4
+
+ #override
+ def set_cammodel(self, modeltype):
+ super(StereoCalibrator, self).set_cammodel(modeltype)
+ self.l.set_cammodel(modeltype)
+ self.r.set_cammodel(modeltype)
+
+ def cal(self, limages, rimages):
+ """
+ :param limages: source left images containing chessboards
+ :type limages: list of :class:`cvMat`
+ :param rimages: source right images containing chessboards
+ :type rimages: list of :class:`cvMat`
+
+ Find chessboards in images, and runs the OpenCV calibration solver.
+ """
+ goodcorners = self.collect_corners(limages, rimages)
+ self.size = (limages[0].shape[1], limages[0].shape[0])
+ self.l.size = self.size
+ self.r.size = self.size
+ self.cal_fromcorners(goodcorners)
+ self.calibrated = True
+
+ def collect_corners(self, limages, rimages):
+ """
+ For a sequence of left and right images, find pairs of images where both
+ left and right have a chessboard, and return their corners as a list of pairs.
+ """
+ # Pick out (corners, ids, board) tuples
+ lcorners = []
+ rcorners = []
+ for img in limages:
+ (_, corners, _, ids, board, _) = self.downsample_and_detect(img)
+ lcorners.append((corners, ids, board))
+ for img in rimages:
+ (_, corners, _, ids, board, _) = self.downsample_and_detect(img)
+ rcorners.append((corners, ids, board))
+
+ good = [(lco, rco, lid, rid, b) for ((lco, lid, b), (rco, rid, br)) in zip( lcorners, rcorners)
+ if (lco is not None and rco is not None)]
+
+ if len(good) == 0:
+ raise CalibrationException("No corners found in images!")
+ return good
+
+ def cal_fromcorners(self, good):
+ # Perform monocular calibrations
+ lcorners = [(lco, lid, b) for (lco, rco, lid, rid, b) in good]
+ rcorners = [(rco, rid, b) for (lco, rco, lid, rid, b) in good]
+ self.l.cal_fromcorners(lcorners)
+ self.r.cal_fromcorners(rcorners)
+
+ (lipts, ripts, _, _, boards) = zip(*good)
+
+ opts = self.mk_object_points(boards, True)
+
+ flags = cv2.CALIB_FIX_INTRINSIC
+
+ self.T = numpy.zeros((3, 1), dtype=numpy.float64)
+ self.R = numpy.eye(3, dtype=numpy.float64)
+
+ if self.pattern == Patterns.ChArUco:
+ # TODO: implement stereo ChArUco calibration
+ raise NotImplemented("Stereo calibration not implemented for ChArUco boards")
+
+ if self.camera_model == CAMERA_MODEL.PINHOLE:
+ print("stereo pinhole calibration...")
+ if LooseVersion(cv2.__version__).version[0] == 2:
+ cv2.stereoCalibrate(opts, lipts, ripts, self.size,
+ self.l.intrinsics, self.l.distortion,
+ self.r.intrinsics, self.r.distortion,
+ self.R, # R
+ self.T, # T
+ criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
+ flags = flags)
+ else:
+ cv2.stereoCalibrate(opts, lipts, ripts,
+ self.l.intrinsics, self.l.distortion,
+ self.r.intrinsics, self.r.distortion,
+ self.size,
+ self.R, # R
+ self.T, # T
+ criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
+ flags = flags)
+ elif self.camera_model == CAMERA_MODEL.FISHEYE:
+ print("stereo fisheye calibration...")
+ if LooseVersion(cv2.__version__).version[0] == 2:
+ print("ERROR: You need OpenCV >3 to use fisheye camera model")
+ sys.exit()
+ else:
+ # WARNING: cv2.fisheye.stereoCalibrate wants float64 points
+ lipts64 = numpy.asarray(lipts, dtype=numpy.float64)
+ lipts = lipts64
+ ripts64 = numpy.asarray(ripts, dtype=numpy.float64)
+ ripts = ripts64
+ opts64 = numpy.asarray(opts, dtype=numpy.float64)
+ opts = opts64
+
+ cv2.fisheye.stereoCalibrate(opts, lipts, ripts,
+ self.l.intrinsics, self.l.distortion,
+ self.r.intrinsics, self.r.distortion,
+ self.size,
+ self.R, # R
+ self.T, # T
+ criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), # 30, 1e-6
+ flags = flags)
+
+ self.set_alpha(0.0)
+
+ def set_alpha(self, a):
+ """
+ Set the alpha value for the calibrated camera solution. The
+ alpha value is a zoom, and ranges from 0 (zoomed in, all pixels
+ in calibrated image are valid) to 1 (zoomed out, all pixels in
+ original image are in calibrated image).
+ """
+ if self.camera_model == CAMERA_MODEL.PINHOLE:
+ cv2.stereoRectify(self.l.intrinsics,
+ self.l.distortion,
+ self.r.intrinsics,
+ self.r.distortion,
+ self.size,
+ self.R,
+ self.T,
+ self.l.R, self.r.R, self.l.P, self.r.P,
+ alpha = a)
+
+ cv2.initUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.P, self.size, cv2.CV_32FC1,
+ self.l.mapx, self.l.mapy)
+ cv2.initUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.P, self.size, cv2.CV_32FC1,
+ self.r.mapx, self.r.mapy)
+
+ elif self.camera_model == CAMERA_MODEL.FISHEYE:
+ self.Q = numpy.zeros((4,4), dtype=numpy.float64)
+
+ flags = cv2.CALIB_ZERO_DISPARITY # Operation flags that may be zero or CALIB_ZERO_DISPARITY .
+ # If the flag is set, the function makes the principal points of each camera have the same pixel coordinates in the rectified views.
+ # And if the flag is not set, the function may still shift the images in the horizontal or vertical direction
+ # (depending on the orientation of epipolar lines) to maximize the useful image area.
+
+ cv2.fisheye.stereoRectify(self.l.intrinsics, self.l.distortion,
+ self.r.intrinsics, self.r.distortion,
+ self.size,
+ self.R, self.T,
+ flags,
+ self.l.R, self.r.R,
+ self.l.P, self.r.P,
+ self.Q,
+ self.size,
+ a,
+ 1.0 )
+ self.l.P[:3,:3] = numpy.dot(self.l.intrinsics,self.l.R)
+ self.r.P[:3,:3] = numpy.dot(self.r.intrinsics,self.r.R)
+ cv2.fisheye.initUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.intrinsics, self.size, cv2.CV_32FC1,
+ self.l.mapx, self.l.mapy)
+ cv2.fisheye.initUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.intrinsics, self.size, cv2.CV_32FC1,
+ self.r.mapx, self.r.mapy)
+
+ def as_message(self):
+ """
+ Return the camera calibration as a pair of CameraInfo messages, for left
+ and right cameras respectively.
+ """
+
+ return (self.lrmsg(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P, self.size, self.l.camera_model),
+ self.lrmsg(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P, self.size, self.r.camera_model))
+
+ def from_message(self, msgs, alpha = 0.0):
+ """ Initialize the camera calibration from a pair of CameraInfo messages. """
+ self.size = (msgs[0].width, msgs[0].height)
+
+ self.T = numpy.zeros((3, 1), dtype=numpy.float64)
+ self.R = numpy.eye(3, dtype=numpy.float64)
+
+ self.l.from_message(msgs[0])
+ self.r.from_message(msgs[1])
+ # Need to compute self.T and self.R here, using the monocular parameters above
+ if False:
+ self.set_alpha(0.0)
+
+ def report(self):
+ print("\nLeft:")
+ self.lrreport(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P)
+ print("\nRight:")
+ self.lrreport(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P)
+ print("self.T =", numpy.ravel(self.T).tolist())
+ print("self.R =", numpy.ravel(self.R).tolist())
+
+ def ost(self):
+ return (self.lrost(self.name + "/left", self.l.distortion, self.l.intrinsics, self.l.R, self.l.P, self.size) +
+ self.lrost(self.name + "/right", self.r.distortion, self.r.intrinsics, self.r.R, self.r.P, self.size))
+
+ def yaml(self, suffix, info):
+ return self.lryaml(self.name + suffix, info.distortion, info.intrinsics, info.R, info.P, self.size, self.camera_model)
+
+ # TODO Get rid of "from_images" versions of these, instead have function to get undistorted corners
+ def epipolar_error_from_images(self, limage, rimage):
+ """
+ Detect the checkerboard in both images and compute the epipolar error.
+ Mainly for use in tests.
+ """
+ lcorners = self.downsample_and_detect(limage)[1]
+ rcorners = self.downsample_and_detect(rimage)[1]
+ if lcorners is None or rcorners is None:
+ return None
+
+ lundistorted = self.l.undistort_points(lcorners)
+ rundistorted = self.r.undistort_points(rcorners)
+
+ return self.epipolar_error(lundistorted, rundistorted)
+
+ def epipolar_error(self, lcorners, rcorners):
+ """
+ Compute the epipolar error from two sets of matching undistorted points
+ """
+ d = lcorners[:,:,1] - rcorners[:,:,1]
+ return numpy.sqrt(numpy.square(d).sum() / d.size)
+
+ def chessboard_size_from_images(self, limage, rimage):
+ _, lcorners, _, _, board, _ = self.downsample_and_detect(limage)
+ _, rcorners, _, _, board, _ = self.downsample_and_detect(rimage)
+ if lcorners is None or rcorners is None:
+ return None
+
+ lundistorted = self.l.undistort_points(lcorners)
+ rundistorted = self.r.undistort_points(rcorners)
+
+ return self.chessboard_size(lundistorted, rundistorted, board)
+
+ def chessboard_size(self, lcorners, rcorners, board, msg = None):
+ """
+ Compute the square edge length from two sets of matching undistorted points
+ given the current calibration.
+ :param msg: a tuple of (left_msg, right_msg)
+ """
+ # Project the points to 3d
+ cam = image_geometry.StereoCameraModel()
+ if msg == None:
+ msg = self.as_message()
+ cam.fromCameraInfo(*msg)
+ disparities = lcorners[:,:,0] - rcorners[:,:,0]
+ pt3d = [cam.projectPixelTo3d((lcorners[i,0,0], lcorners[i,0,1]), disparities[i,0]) for i in range(lcorners.shape[0]) ]
+ def l2(p0, p1):
+ return math.sqrt(sum([(c0 - c1) ** 2 for (c0, c1) in zip(p0, p1)]))
+
+ # Compute the length from each horizontal and vertical line, and return the mean
+ cc = board.n_cols
+ cr = board.n_rows
+ lengths = (
+ [l2(pt3d[cc * r + 0], pt3d[cc * r + (cc - 1)]) / (cc - 1) for r in range(cr)] +
+ [l2(pt3d[c + 0], pt3d[c + (cc * (cr - 1))]) / (cr - 1) for c in range(cc)])
+ return sum(lengths) / len(lengths)
+
+ def handle_msg(self, msg):
+ # TODO Various asserts that images have same dimension, same board detected...
+ (lmsg, rmsg) = msg
+ lgray = self.mkgray(lmsg)
+ rgray = self.mkgray(rmsg)
+ epierror = -1
+
+ # Get display-images-to-be and detections of the calibration target
+ lscrib_mono, lcorners, ldownsampled_corners, lids, lboard, (x_scale, y_scale) = self.downsample_and_detect(lgray)
+ rscrib_mono, rcorners, rdownsampled_corners, rids, rboard, _ = self.downsample_and_detect(rgray)
+
+ if self.calibrated:
+ # Show rectified images
+ lremap = self.l.remap(lgray)
+ rremap = self.r.remap(rgray)
+ lrect = lremap
+ rrect = rremap
+ if x_scale != 1.0 or y_scale != 1.0:
+ lrect = cv2.resize(lremap, (lscrib_mono.shape[1], lscrib_mono.shape[0]))
+ rrect = cv2.resize(rremap, (rscrib_mono.shape[1], rscrib_mono.shape[0]))
+
+ lscrib = cv2.cvtColor(lrect, cv2.COLOR_GRAY2BGR)
+ rscrib = cv2.cvtColor(rrect, cv2.COLOR_GRAY2BGR)
+
+ # Draw rectified corners
+ if lcorners is not None:
+ lundistorted = self.l.undistort_points(lcorners)
+ scrib_src = lundistorted.copy()
+ scrib_src[:,:,0] /= x_scale
+ scrib_src[:,:,1] /= y_scale
+ cv2.drawChessboardCorners(lscrib, (lboard.n_cols, lboard.n_rows), scrib_src, True)
+
+ if rcorners is not None:
+ rundistorted = self.r.undistort_points(rcorners)
+ scrib_src = rundistorted.copy()
+ scrib_src[:,:,0] /= x_scale
+ scrib_src[:,:,1] /= y_scale
+ cv2.drawChessboardCorners(rscrib, (rboard.n_cols, rboard.n_rows), scrib_src, True)
+
+ # Report epipolar error
+ if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners):
+ epierror = self.epipolar_error(lundistorted, rundistorted)
+
+ else:
+ lscrib = cv2.cvtColor(lscrib_mono, cv2.COLOR_GRAY2BGR)
+ rscrib = cv2.cvtColor(rscrib_mono, cv2.COLOR_GRAY2BGR)
+ # Draw any detected chessboards onto display (downsampled) images
+ if lcorners is not None:
+ cv2.drawChessboardCorners(lscrib, (lboard.n_cols, lboard.n_rows),
+ ldownsampled_corners, True)
+ if rcorners is not None:
+ cv2.drawChessboardCorners(rscrib, (rboard.n_cols, rboard.n_rows),
+ rdownsampled_corners, True)
+
+ # Add sample to database only if it's sufficiently different from any previous sample
+ if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners):
+ params = self.get_parameters(lcorners, lids, lboard, (lgray.shape[1], lgray.shape[0]))
+ if self.is_good_sample(params, lcorners, lids, self.last_frame_corners, self.last_frame_ids):
+ self.db.append( (params, lgray, rgray) )
+ self.good_corners.append( (lcorners, rcorners, lids, rids, lboard) )
+ print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % tuple([len(self.db)] + params)))
+
+ self.last_frame_corners = lcorners
+ self.last_frame_ids = lids
+ rv = StereoDrawable()
+ rv.lscrib = lscrib
+ rv.rscrib = rscrib
+ rv.params = self.compute_goodenough()
+ rv.epierror = epierror
+ return rv
+
+ def do_calibration(self, dump = False):
+ # TODO MonoCalibrator collects corners if needed here
+ self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) # TODO Needs to be set externally
+ # Dump should only occur if user wants it
+ if dump:
+ pickle.dump((self.is_mono, self.size, self.good_corners),
+ open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w"))
+ self.l.size = self.size
+ self.r.size = self.size
+ self.cal_fromcorners(self.good_corners)
+ self.calibrated = True
+ # DEBUG
+ print((self.report()))
+ print((self.ost()))
+
+ def do_tarfile_save(self, tf):
+ """ Write images and calibration solution to a tarfile object """
+ ims = ([("left-%04d.png" % i, im) for i,(_, im, _) in enumerate(self.db)] +
+ [("right-%04d.png" % i, im) for i,(_, _, im) in enumerate(self.db)])
+
+ def taradd(name, buf):
+ if isinstance(buf, str):
+ s = BytesIO(buf.encode('utf-8'))
+ else:
+ s = BytesIO(buf)
+ ti = tarfile.TarInfo(name)
+ ti.size = len(s.getvalue())
+ ti.uname = 'calibrator'
+ ti.mtime = int(time.time())
+ tf.addfile(tarinfo=ti, fileobj=s)
+
+ for (name, im) in ims:
+ taradd(name, cv2.imencode(".png", im)[1].tostring())
+ taradd('left.yaml', self.yaml("/left", self.l))
+ taradd('right.yaml', self.yaml("/right", self.r))
+ taradd('ost.txt', self.ost())
+
+ def do_tarfile_calibration(self, filename):
+ archive = tarfile.open(filename, 'r')
+ limages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('left') and (f.endswith('pgm') or f.endswith('png'))) ]
+ rimages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('right') and (f.endswith('pgm') or f.endswith('png'))) ]
+
+ if not len(limages) == len(rimages):
+ raise CalibrationException("Left, right images don't match. %d left images, %d right" % (len(limages), len(rimages)))
+
+ ##\todo Check that the filenames match and stuff
+
+ self.cal(limages, rimages)
diff --git a/src/image_pipeline/camera_calibration/src/camera_calibration/camera_calibrator.py b/src/image_pipeline/camera_calibration/src/camera_calibration/camera_calibrator.py
new file mode 100755
index 000000000..6d7ebdd40
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/src/camera_calibration/camera_calibrator.py
@@ -0,0 +1,417 @@
+# Software License Agreement (BSD License)
+#
+# Copyright (c) 2009, Willow Garage, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import cv2
+import message_filters
+import numpy
+import os
+import rclpy
+from rclpy.node import Node
+import sensor_msgs.msg
+import sensor_msgs.srv
+import threading
+import time
+from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, Patterns
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+from camera_calibration.calibrator import CAMERA_MODEL
+from rclpy.qos import qos_profile_system_default
+from rclpy.qos import QoSProfile
+
+
+class BufferQueue(Queue):
+ """Slight modification of the standard Queue that discards the oldest item
+ when adding an item and the queue is full.
+ """
+ def put(self, item, *args, **kwargs):
+ # The base implementation, for reference:
+ # https://github.com/python/cpython/blob/2.7/Lib/Queue.py#L107
+ # https://github.com/python/cpython/blob/3.8/Lib/queue.py#L121
+ with self.mutex:
+ if self.maxsize > 0 and self._qsize() == self.maxsize:
+ self._get()
+ self._put(item)
+ self.unfinished_tasks += 1
+ self.not_empty.notify()
+
+
+class SpinThread(threading.Thread):
+ """
+ Thread that spins the ros node, while imshow runs in the main thread
+ """
+
+ def __init__(self, node):
+ threading.Thread.__init__(self)
+ self.node = node
+
+ def run(self):
+ rclpy.spin(self.node)
+
+
+class ConsumerThread(threading.Thread):
+ def __init__(self, queue, function):
+ threading.Thread.__init__(self)
+ self.queue = queue
+ self.function = function
+
+ def run(self):
+ while rclpy.ok():
+ m = self.queue.get()
+ self.function(m)
+
+
+class CalibrationNode(Node):
+ def __init__(self, name, boards, service_check = True, synchronizer = message_filters.TimeSynchronizer, flags = 0,
+ fisheye_flags = 0, pattern=Patterns.Chessboard, camera_name='', checkerboard_flags = 0,
+ max_chessboard_speed = -1, queue_size = 1):
+ super().__init__(name)
+
+ self.set_camera_info_service = self.create_client(sensor_msgs.srv.SetCameraInfo, "camera/set_camera_info")
+ self.set_left_camera_info_service = self.create_client(sensor_msgs.srv.SetCameraInfo, "left_camera/set_camera_info")
+ self.set_right_camera_info_service = self.create_client(sensor_msgs.srv.SetCameraInfo, "right_camera/set_camera_info")
+
+ if service_check:
+ available = False
+ # assume any non-default service names have been set. Wait for the service to become ready
+ for cli in [self.set_camera_info_service, self.set_left_camera_info_service, self.set_right_camera_info_service]:
+ print("Waiting for service", cli.srv_name, "...")
+ # check all services so they are ready.
+ if cli.wait_for_service(timeout_sec=1):
+ available = True
+ print("OK")
+ else:
+ print(f"Service {cli.srv_name} not found.")
+
+ if not available:
+ raise RuntimeError("no camera service available")
+
+ self._boards = boards
+ self._calib_flags = flags
+ self._fisheye_calib_flags = fisheye_flags
+ self._checkerboard_flags = checkerboard_flags
+ self._pattern = pattern
+ self._camera_name = camera_name
+ self._max_chessboard_speed = max_chessboard_speed
+ lsub = message_filters.Subscriber(self, sensor_msgs.msg.Image, 'left', qos_profile=self.get_topic_qos("left"))
+ rsub = message_filters.Subscriber(self, sensor_msgs.msg.Image, 'right', qos_profile=self.get_topic_qos("right"))
+ ts = synchronizer([lsub, rsub], 4)
+ ts.registerCallback(self.queue_stereo)
+
+ msub = message_filters.Subscriber(self, sensor_msgs.msg.Image, 'image', qos_profile=self.get_topic_qos("image"))
+ msub.registerCallback(self.queue_monocular)
+
+ self.q_mono = BufferQueue(queue_size)
+ self.q_stereo = BufferQueue(queue_size)
+
+ self.c = None
+
+ self._last_display = None
+
+ mth = ConsumerThread(self.q_mono, self.handle_monocular)
+ mth.setDaemon(True)
+ mth.start()
+
+ sth = ConsumerThread(self.q_stereo, self.handle_stereo)
+ sth.setDaemon(True)
+ sth.start()
+
+ def redraw_stereo(self, *args):
+ pass
+ def redraw_monocular(self, *args):
+ pass
+
+ def queue_monocular(self, msg):
+ self.q_mono.put(msg)
+
+ def queue_stereo(self, lmsg, rmsg):
+ self.q_stereo.put((lmsg, rmsg))
+
+ def handle_monocular(self, msg):
+ if self.c == None:
+ if self._camera_name:
+ self.c = MonoCalibrator(self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, name=self._camera_name,
+ checkerboard_flags=self._checkerboard_flags,
+ max_chessboard_speed = self._max_chessboard_speed)
+ else:
+ self.c = MonoCalibrator(self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern,
+ checkerboard_flags=self.checkerboard_flags,
+ max_chessboard_speed = self._max_chessboard_speed)
+
+ # This should just call the MonoCalibrator
+ drawable = self.c.handle_msg(msg)
+ self.displaywidth = drawable.scrib.shape[1]
+ self.redraw_monocular(drawable)
+
+ def handle_stereo(self, msg):
+ if self.c == None:
+ if self._camera_name:
+ self.c = StereoCalibrator(self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, name=self._camera_name,
+ checkerboard_flags=self._checkerboard_flags,
+ max_chessboard_speed = self._max_chessboard_speed)
+ else:
+ self.c = StereoCalibrator(self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern,
+ checkerboard_flags=self._checkerboard_flags,
+ max_chessboard_speed = self._max_chessboard_speed)
+
+ drawable = self.c.handle_msg(msg)
+ self.displaywidth = drawable.lscrib.shape[1] + drawable.rscrib.shape[1]
+ self.redraw_stereo(drawable)
+
+
+ def check_set_camera_info(self, response):
+ if response.success:
+ return True
+
+ for i in range(10):
+ print("!" * 80)
+ print()
+ print("Attempt to set camera info failed: " + response.result() if response.result() is not None else "Not available")
+ print()
+ for i in range(10):
+ print("!" * 80)
+ print()
+ self.get_logger().error('Unable to set camera info for calibration. Failure message: %s' % response.result() if response.result() is not None else "Not available")
+ return False
+
+ def do_upload(self):
+ self.c.report()
+ print(self.c.ost())
+ info = self.c.as_message()
+
+ req = sensor_msgs.srv.SetCameraInfo.Request()
+ rv = True
+ if self.c.is_mono:
+ req.camera_info = info
+ response = self.set_camera_info_service.call(req)
+ rv = self.check_set_camera_info(response)
+ else:
+ req.camera_info = info[0]
+ response = self.set_left_camera_info_service.call(req)
+ rv = rv and self.check_set_camera_info(response)
+ req.camera_info = info[1]
+ response = self.set_right_camera_info_service.call(req)
+ rv = rv and self.check_set_camera_info(response)
+ return rv
+
+ def get_topic_qos(self, topic_name: str) -> QoSProfile:
+ """!
+ Given a topic name, get the QoS profile with which it is being published.
+ Replaces history and depth settings with default values since they cannot be retrieved
+ @param topic_name (str) the topic name
+ @return QosProfile the qos profile with which the topic is published. If no publishers exist
+ for the given topic, it returns the sensor data QoS. returns None in case ROS1 is being used
+ """
+ topic_name = self.resolve_topic_name(topic_name)
+ topic_info = self.get_publishers_info_by_topic(topic_name=topic_name)
+ if len(topic_info):
+ qos_profile = topic_info[0].qos_profile
+ qos_profile.history = qos_profile_system_default.history
+ qos_profile.depth = qos_profile_system_default.depth
+ return qos_profile
+ else:
+ self.get_logger().warn(f"No publishers available for topic {topic_name}. Using system default QoS for subscriber.")
+ return qos_profile_system_default
+
+
+class OpenCVCalibrationNode(CalibrationNode):
+ """ Calibration node with an OpenCV Gui """
+ FONT_FACE = cv2.FONT_HERSHEY_SIMPLEX
+ FONT_SCALE = 0.6
+ FONT_THICKNESS = 2
+
+ def __init__(self, *args, **kwargs):
+
+ CalibrationNode.__init__(self, *args, **kwargs)
+
+ self.queue_display = BufferQueue(maxsize=1)
+ self.initWindow()
+
+ def spin(self):
+ sth = SpinThread(self)
+ sth.start()
+
+ while rclpy.ok():
+ if self.queue_display.qsize() > 0:
+ self.image = self.queue_display.get()
+ cv2.imshow("display", self.image)
+ else:
+ time.sleep(0.1)
+ k = cv2.waitKey(6) & 0xFF
+ if k in [27, ord('q')]:
+ return
+ elif k == ord('s') and self.image is not None:
+ self.screendump(self.image)
+
+ def initWindow(self):
+ cv2.namedWindow("display", cv2.WINDOW_NORMAL)
+ cv2.setMouseCallback("display", self.on_mouse)
+ cv2.createTrackbar("Camera type: \n 0 : pinhole \n 1 : fisheye", "display", 0,1, self.on_model_change)
+ cv2.createTrackbar("scale", "display", 0, 100, self.on_scale)
+
+ @classmethod
+ def putText(cls, img, text, org, color = (0,0,0)):
+ cv2.putText(img, text, org, cls.FONT_FACE, cls.FONT_SCALE, color, thickness = cls.FONT_THICKNESS)
+
+ @classmethod
+ def getTextSize(cls, text):
+ return cv2.getTextSize(text, cls.FONT_FACE, cls.FONT_SCALE, cls.FONT_THICKNESS)[0]
+
+ def on_mouse(self, event, x, y, flags, param):
+ if event == cv2.EVENT_LBUTTONDOWN and self.displaywidth < x:
+ if self.c.goodenough:
+ if 180 <= y < 280:
+ print("**** Calibrating ****")
+ # Perform calibration in another thread to prevent UI blocking
+ threading.Thread(target=self.c.do_calibration, name="Calibration").start()
+ self.buttons(self._last_display)
+ self.queue_display.put(self._last_display)
+ if self.c.calibrated:
+ if 280 <= y < 380:
+ self.c.do_save()
+ elif 380 <= y < 480:
+ # Only shut down if we set camera info correctly, #3993
+ if self.do_upload():
+ rclpy.shutdown()
+ def on_model_change(self, model_select_val):
+ if self.c == None:
+ print("Cannot change camera model until the first image has been received")
+ return
+
+ self.c.set_cammodel( CAMERA_MODEL.PINHOLE if model_select_val < 0.5 else CAMERA_MODEL.FISHEYE)
+
+ def on_scale(self, scalevalue):
+ if self.c.calibrated:
+ self.c.set_alpha(scalevalue / 100.0)
+
+ def button(self, dst, label, enable):
+ dst.fill(255)
+ size = (dst.shape[1], dst.shape[0])
+ if enable:
+ color = (155, 155, 80)
+ else:
+ color = (224, 224, 224)
+ cv2.circle(dst, (size[0] // 2, size[1] // 2), min(size) // 2, color, -1)
+ (w, h) = self.getTextSize(label)
+ self.putText(dst, label, ((size[0] - w) // 2, (size[1] + h) // 2), (255,255,255))
+
+ def buttons(self, display):
+ x = self.displaywidth
+ self.button(display[180:280,x:x+100], "CALIBRATE", self.c.goodenough)
+ self.button(display[280:380,x:x+100], "SAVE", self.c.calibrated)
+ self.button(display[380:480,x:x+100], "COMMIT", self.c.calibrated)
+
+ def y(self, i):
+ """Set up right-size images"""
+ return 30 + 40 * i
+
+ def screendump(self, im):
+ i = 0
+ while os.access("/tmp/dump%d.png" % i, os.R_OK):
+ i += 1
+ cv2.imwrite("/tmp/dump%d.png" % i, im)
+ print("Saved screen dump to /tmp/dump%d.png" % i)
+
+ def redraw_monocular(self, drawable):
+ height = drawable.scrib.shape[0]
+ width = drawable.scrib.shape[1]
+
+ display = numpy.zeros((max(480, height), width + 100, 3), dtype=numpy.uint8)
+ display[0:height, 0:width,:] = drawable.scrib
+ display[0:height, width:width+100,:].fill(255)
+
+ self.buttons(display)
+ if not self.c.calibrated:
+ if drawable.params:
+ for i, (label, lo, hi, progress) in enumerate(drawable.params):
+ (w,_) = self.getTextSize(label)
+ self.putText(display, label, (width + (100 - w) // 2, self.y(i)))
+ color = (0,255,0)
+ if progress < 1.0:
+ color = (0, int(progress*255.), 255)
+ cv2.line(display,
+ (int(width + lo * 100), self.y(i) + 20),
+ (int(width + hi * 100), self.y(i) + 20),
+ color, 4)
+
+ else:
+ self.putText(display, "lin.", (width, self.y(0)))
+ linerror = drawable.linear_error
+ if linerror is None or linerror < 0:
+ msg = "?"
+ else:
+ msg = "%.2f" % linerror
+ #print "linear", linerror
+ self.putText(display, msg, (width, self.y(1)))
+
+ self._last_display = display
+ self.queue_display.put(display)
+
+ def redraw_stereo(self, drawable):
+ height = drawable.lscrib.shape[0]
+ width = drawable.lscrib.shape[1]
+
+ display = numpy.zeros((max(480, height), 2 * width + 100, 3), dtype=numpy.uint8)
+ display[0:height, 0:width,:] = drawable.lscrib
+ display[0:height, width:2*width,:] = drawable.rscrib
+ display[0:height, 2*width:2*width+100,:].fill(255)
+
+ self.buttons(display)
+
+ if not self.c.calibrated:
+ if drawable.params:
+ for i, (label, lo, hi, progress) in enumerate(drawable.params):
+ (w,_) = self.getTextSize(label)
+ self.putText(display, label, (2 * width + (100 - w) // 2, self.y(i)))
+ color = (0,255,0)
+ if progress < 1.0:
+ color = (0, int(progress*255.), 255)
+ cv2.line(display,
+ (int(2 * width + lo * 100), self.y(i) + 20),
+ (int(2 * width + hi * 100), self.y(i) + 20),
+ color, 4)
+
+ else:
+ self.putText(display, "epi.", (2 * width, self.y(0)))
+ if drawable.epierror == -1:
+ msg = "?"
+ else:
+ msg = "%.2f" % drawable.epierror
+ self.putText(display, msg, (2 * width, self.y(1)))
+ # TODO dim is never set anywhere. Supposed to be observed chessboard size?
+ if drawable.dim != -1:
+ self.putText(display, "dim", (2 * width, self.y(2)))
+ self.putText(display, "%.3f" % drawable.dim, (2 * width, self.y(3)))
+
+ self._last_display = display
+ self.queue_display.put(display)
diff --git a/src/image_pipeline/camera_calibration/src/camera_calibration/camera_checker.py b/src/image_pipeline/camera_calibration/src/camera_calibration/camera_checker.py
new file mode 100755
index 000000000..19149b5ec
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/src/camera_calibration/camera_checker.py
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+#
+# Software License Agreement (BSD License)
+#
+# Copyright (c) 2009, Willow Garage, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import cv2
+import cv_bridge
+import functools
+import message_filters
+import numpy
+import rclpy
+from rclpy.node import Node
+import sensor_msgs.msg
+import sensor_msgs.srv
+import threading
+
+from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, ChessboardInfo
+from message_filters import ApproximateTimeSynchronizer
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+
+def mean(seq):
+ return sum(seq) / len(seq)
+
+def lmin(seq1, seq2):
+ """ Pairwise minimum of two sequences """
+ return [min(a, b) for (a, b) in zip(seq1, seq2)]
+
+def lmax(seq1, seq2):
+ """ Pairwise maximum of two sequences """
+ return [max(a, b) for (a, b) in zip(seq1, seq2)]
+
+class ConsumerThread(threading.Thread):
+ def __init__(self, queue, function):
+ threading.Thread.__init__(self)
+ self.queue = queue
+ self.function = function
+
+ def run(self):
+ while rclpy.ok():
+ m = self.queue.get()
+ if self.queue.empty():
+ break
+ self.function(m)
+
+class CameraCheckerNode(Node):
+
+ def __init__(self, name, chess_size, dim, approximate=0):
+ super().__init__(name)
+ self.board = ChessboardInfo()
+ self.board.n_cols = chess_size[0]
+ self.board.n_rows = chess_size[1]
+ self.board.dim = dim
+
+ # make sure n_cols is not smaller than n_rows, otherwise error computation will be off
+ if self.board.n_cols < self.board.n_rows:
+ self.board.n_cols, self.board.n_rows = self.board.n_rows, self.board.n_cols
+
+ image_topic = "monocular/image_rect"
+ camera_topic = "monocular/camera_info"
+
+ tosync_mono = [
+ (image_topic, sensor_msgs.msg.Image),
+ (camera_topic, sensor_msgs.msg.CameraInfo),
+ ]
+
+ if approximate <= 0:
+ sync = message_filters.TimeSynchronizer
+ else:
+ sync = functools.partial(ApproximateTimeSynchronizer, slop=approximate)
+
+ tsm = sync([message_filters.Subscriber(self, type, topic) for (topic, type) in tosync_mono], 10)
+ tsm.registerCallback(self.queue_monocular)
+
+ left_topic = "stereo/left/image_rect"
+ left_camera_topic = "stereo/left/camera_info"
+ right_topic = "stereo/right/image_rect"
+ right_camera_topic = "stereo/right/camera_info"
+
+ tosync_stereo = [
+ (left_topic, sensor_msgs.msg.Image),
+ (left_camera_topic, sensor_msgs.msg.CameraInfo),
+ (right_topic, sensor_msgs.msg.Image),
+ (right_camera_topic, sensor_msgs.msg.CameraInfo)
+ ]
+
+ tss = sync([message_filters.Subscriber(self, type, topic) for (topic, type) in tosync_stereo], 10)
+ tss.registerCallback(self.queue_stereo)
+
+ self.br = cv_bridge.CvBridge()
+
+ self.q_mono = Queue()
+ self.q_stereo = Queue()
+
+ mth = ConsumerThread(self.q_mono, self.handle_monocular)
+ mth.setDaemon(True)
+ mth.start()
+
+ sth = ConsumerThread(self.q_stereo, self.handle_stereo)
+ sth.setDaemon(True)
+ sth.start()
+
+ self.mc = MonoCalibrator([self.board])
+ self.sc = StereoCalibrator([self.board])
+
+ def queue_monocular(self, msg, cmsg):
+ self.q_mono.put((msg, cmsg))
+
+ def queue_stereo(self, lmsg, lcmsg, rmsg, rcmsg):
+ self.q_stereo.put((lmsg, lcmsg, rmsg, rcmsg))
+
+ def mkgray(self, msg):
+ return self.mc.mkgray(msg)
+
+ def image_corners(self, im):
+ (ok, corners, ids, b) = self.mc.get_corners(im)
+ if ok:
+ return corners, ids
+ else:
+ return None, None
+
+ def handle_monocular(self, msg):
+
+ (image, camera) = msg
+ gray = self.mkgray(image)
+ C, ids = self.image_corners(gray)
+ if C is not None:
+ linearity_rms = self.mc.linear_error(C, ids, self.board)
+
+ # Add in reprojection check
+ image_points = C
+ object_points = self.mc.mk_object_points([self.board], use_board_size=True)[0]
+ dist_coeffs = numpy.zeros((4, 1))
+ camera_matrix = numpy.array( [ [ camera.P[0], camera.P[1], camera.P[2] ],
+ [ camera.P[4], camera.P[5], camera.P[6] ],
+ [ camera.P[8], camera.P[9], camera.P[10] ] ] )
+ ok, rot, trans = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs)
+ # Convert rotation into a 3x3 Rotation Matrix
+ rot3x3, _ = cv2.Rodrigues(rot)
+ # Reproject model points into image
+ object_points_world = numpy.asmatrix(rot3x3) * numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans)
+ reprojected_h = camera_matrix * object_points_world
+ reprojected = (reprojected_h[0:2, :] / reprojected_h[2, :])
+ reprojection_errors = image_points.squeeze().T - reprojected
+
+ reprojection_rms = numpy.sqrt(numpy.sum(numpy.array(reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape))
+
+ # Print the results
+ print("Linearity RMS Error: %.3f Pixels Reprojection RMS Error: %.3f Pixels" % (linearity_rms, reprojection_rms))
+ else:
+ print('no chessboard')
+
+ def handle_stereo(self, msg):
+
+ (lmsg, lcmsg, rmsg, rcmsg) = msg
+ lgray = self.mkgray(lmsg)
+ rgray = self.mkgray(rmsg)
+
+ L, _ = self.image_corners(lgray)
+ R, _ = self.image_corners(rgray)
+ if L is not None and R is not None:
+ epipolar = self.sc.epipolar_error(L, R)
+
+ dimension = self.sc.chessboard_size(L, R, self.board, msg=(lcmsg, rcmsg))
+
+ print("epipolar error: %f pixels dimension: %f m" % (epipolar, dimension))
+ else:
+ print("no chessboard")
diff --git a/src/image_pipeline/camera_calibration/src/camera_calibration/nodes/__init__.py b/src/image_pipeline/camera_calibration/src/camera_calibration/nodes/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/image_pipeline/camera_calibration/src/camera_calibration/nodes/cameracalibrator.py b/src/image_pipeline/camera_calibration/src/camera_calibration/nodes/cameracalibrator.py
new file mode 100755
index 000000000..df08e2706
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/src/camera_calibration/nodes/cameracalibrator.py
@@ -0,0 +1,234 @@
+#!/usr/bin/env python
+#
+# Software License Agreement (BSD License)
+#
+# Copyright (c) 2009, Willow Garage, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import cv2
+import functools
+import message_filters
+import rclpy
+from camera_calibration.camera_calibrator import OpenCVCalibrationNode
+from camera_calibration.calibrator import ChessboardInfo, Patterns
+from message_filters import ApproximateTimeSynchronizer
+
+def optionsValidCharuco(options, parser):
+ """
+ Validates the provided options when the pattern type is 'charuco'
+ """
+ if options.pattern != 'charuco': return False
+
+ n_boards = len(options.size)
+ if (n_boards != len(options.square) or n_boards != len(options.charuco_marker_size) or n_boards !=
+ len(options.aruco_dict)):
+ parser.error("When using ChArUco boards, --size, --square, --charuco_marker_size, and --aruco_dict " +
+ "must be specified for each board")
+ return False
+
+ # TODO: check for fisheye and stereo (not implemented with ChArUco)
+ return True
+
+
+def main():
+ from optparse import OptionParser, OptionGroup
+ parser = OptionParser("%prog --size SIZE1 --square SQUARE1 [ --size SIZE2 --square SQUARE2 ]",
+ description=None)
+ parser.add_option("-c", "--camera_name",
+ type="string", default='narrow_stereo',
+ help="name of the camera to appear in the calibration file")
+ group = OptionGroup(parser, "Chessboard Options",
+ "You must specify one or more chessboards as pairs of --size and --square options.")
+ group.add_option("-p", "--pattern",
+ type="string", default="chessboard",
+ help="calibration pattern to detect - 'chessboard', 'circles', 'acircles', 'charuco'\n" +
+ " if 'charuco' is used, a --charuco_marker_size and --aruco_dict argument must be supplied\n" +
+ " with each --size and --square argument")
+ group.add_option("-s", "--size",
+ action="append", default=[],
+ help="chessboard size as NxM, counting interior corners (e.g. a standard chessboard is 7x7)")
+ group.add_option("-q", "--square",
+ action="append", default=[],
+ help="chessboard square size in meters")
+ group.add_option("-m", "--charuco_marker_size",
+ action="append", default=[],
+ help="ArUco marker size (meters); only valid with `-p charuco`")
+ group.add_option("-d", "--aruco_dict",
+ action="append", default=[],
+ help="ArUco marker dictionary; only valid with `-p charuco`; one of 'aruco_orig', '4x4_250', " +
+ "'5x5_250', '6x6_250', '7x7_250'")
+ parser.add_option_group(group)
+ group = OptionGroup(parser, "ROS Communication Options")
+ group.add_option("--approximate",
+ type="float", default=0.0,
+ help="allow specified slop (in seconds) when pairing images from unsynchronized stereo cameras")
+ group.add_option("--no-service-check",
+ action="store_false", dest="service_check", default=True,
+ help="disable check for set_camera_info services at startup")
+ group.add_option("--queue-size",
+ type="int", default=1,
+ help="image queue size (default %default, set to 0 for unlimited)")
+ parser.add_option_group(group)
+ group = OptionGroup(parser, "Calibration Optimizer Options")
+ group.add_option("--fix-principal-point",
+ action="store_true", default=False,
+ help="for pinhole, fix the principal point at the image center")
+ group.add_option("--fix-aspect-ratio",
+ action="store_true", default=False,
+ help="for pinhole, enforce focal lengths (fx, fy) are equal")
+ group.add_option("--zero-tangent-dist",
+ action="store_true", default=False,
+ help="for pinhole, set tangential distortion coefficients (p1, p2) to zero")
+ group.add_option("-k", "--k-coefficients",
+ type="int", default=2, metavar="NUM_COEFFS",
+ help="for pinhole, number of radial distortion coefficients to use (up to 6, default %default)")
+
+ group.add_option("--fisheye-recompute-extrinsicsts",
+ action="store_true", default=False,
+ help="for fisheye, extrinsic will be recomputed after each iteration of intrinsic optimization")
+ group.add_option("--fisheye-fix-skew",
+ action="store_true", default=False,
+ help="for fisheye, skew coefficient (alpha) is set to zero and stay zero")
+ group.add_option("--fisheye-fix-principal-point",
+ action="store_true", default=False,
+ help="for fisheye,fix the principal point at the image center")
+ group.add_option("--fisheye-k-coefficients",
+ type="int", default=4, metavar="NUM_COEFFS",
+ help="for fisheye, number of radial distortion coefficients to use fixing to zero the rest (up to 4, default %default)")
+
+ group.add_option("--fisheye-check-conditions",
+ action="store_true", default=False,
+ help="for fisheye, the functions will check validity of condition number")
+
+ group.add_option("--disable_calib_cb_fast_check", action='store_true', default=False,
+ help="uses the CALIB_CB_FAST_CHECK flag for findChessboardCorners")
+ group.add_option("--max-chessboard-speed", type="float", default=-1.0,
+ help="Do not use samples where the calibration pattern is moving faster \
+ than this speed in px/frame. Set to eg. 0.5 for rolling shutter cameras.")
+
+ parser.add_option_group(group)
+ options, _ = parser.parse_args(rclpy.utilities.remove_ros_args())
+
+ if (len(options.size) != len(options.square)):
+ parser.error("Number of size and square inputs must be the same!")
+
+ if not options.square:
+ options.square.append("0.108")
+ options.size.append("8x6")
+
+ boards = []
+ if options.pattern == "charuco" and optionsValidCharuco(options, parser):
+ for (sz, sq, ms, ad) in zip(options.size, options.square, options.charuco_marker_size, options.aruco_dict):
+ size = tuple([int(c) for c in sz.split('x')])
+ boards.append(ChessboardInfo('charuco', size[0], size[1], float(sq), float(ms), ad))
+ else:
+ for (sz, sq) in zip(options.size, options.square):
+ size = tuple([int(c) for c in sz.split('x')])
+ boards.append(ChessboardInfo(options.pattern, size[0], size[1], float(sq)))
+
+ if options.approximate == 0.0:
+ sync = message_filters.TimeSynchronizer
+ else:
+ sync = functools.partial(ApproximateTimeSynchronizer, slop=options.approximate)
+
+ # Pinhole opencv calibration options parsing
+ num_ks = options.k_coefficients
+
+ calib_flags = 0
+ if options.fix_principal_point:
+ calib_flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
+ if options.fix_aspect_ratio:
+ calib_flags |= cv2.CALIB_FIX_ASPECT_RATIO
+ if options.zero_tangent_dist:
+ calib_flags |= cv2.CALIB_ZERO_TANGENT_DIST
+ if (num_ks > 3):
+ calib_flags |= cv2.CALIB_RATIONAL_MODEL
+ if (num_ks < 6):
+ calib_flags |= cv2.CALIB_FIX_K6
+ if (num_ks < 5):
+ calib_flags |= cv2.CALIB_FIX_K5
+ if (num_ks < 4):
+ calib_flags |= cv2.CALIB_FIX_K4
+ if (num_ks < 3):
+ calib_flags |= cv2.CALIB_FIX_K3
+ if (num_ks < 2):
+ calib_flags |= cv2.CALIB_FIX_K2
+ if (num_ks < 1):
+ calib_flags |= cv2.CALIB_FIX_K1
+
+ # Opencv calibration flags parsing:
+ num_ks = options.fisheye_k_coefficients
+ fisheye_calib_flags = 0
+ if options.fisheye_fix_principal_point:
+ fisheye_calib_flags |= cv2.fisheye.CALIB_FIX_PRINCIPAL_POINT
+ if options.fisheye_fix_skew:
+ fisheye_calib_flags |= cv2.fisheye.CALIB_FIX_SKEW
+ if options.fisheye_recompute_extrinsicsts:
+ fisheye_calib_flags |= cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC
+ if options.fisheye_check_conditions:
+ fisheye_calib_flags |= cv2.fisheye.CALIB_CHECK_COND
+ if (num_ks < 4):
+ fisheye_calib_flags |= cv2.fisheye.CALIB_FIX_K4
+ if (num_ks < 3):
+ fisheye_calib_flags |= cv2.fisheye.CALIB_FIX_K3
+ if (num_ks < 2):
+ fisheye_calib_flags |= cv2.fisheye.CALIB_FIX_K2
+ if (num_ks < 1):
+ fisheye_calib_flags |= cv2.fisheye.CALIB_FIX_K1
+
+ pattern = Patterns.Chessboard
+ if options.pattern == 'circles':
+ pattern = Patterns.Circles
+ elif options.pattern == 'acircles':
+ pattern = Patterns.ACircles
+ elif options.pattern == 'charuco':
+ pattern = Patterns.ChArUco
+ elif options.pattern != 'chessboard':
+ print('Unrecognized pattern %s, defaulting to chessboard' % options.pattern)
+
+ if options.disable_calib_cb_fast_check:
+ checkerboard_flags = 0
+ else:
+ checkerboard_flags = cv2.CALIB_CB_FAST_CHECK
+
+ rclpy.init()
+ node = OpenCVCalibrationNode("cameracalibrator", boards, options.service_check, sync,
+ calib_flags, fisheye_calib_flags, pattern, options.camera_name,
+ checkerboard_flags=checkerboard_flags, max_chessboard_speed=options.max_chessboard_speed,
+ queue_size=options.queue_size)
+ node.spin()
+ rclpy.shutdown()
+
+if __name__ == "__main__":
+ try:
+ main()
+ except Exception as e:
+ import traceback
+ traceback.print_exc()
diff --git a/src/image_pipeline/camera_calibration/src/camera_calibration/nodes/cameracheck.py b/src/image_pipeline/camera_calibration/src/camera_calibration/nodes/cameracheck.py
new file mode 100755
index 000000000..b2b3906cf
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/src/camera_calibration/nodes/cameracheck.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+#
+# Software License Agreement (BSD License)
+#
+# Copyright (c) 2009, Willow Garage, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import rclpy
+from camera_calibration.camera_checker import CameraCheckerNode
+
+
+def main():
+ from optparse import OptionParser
+ parser = OptionParser()
+ parser.add_option("-s", "--size", default="8x6", help="specify chessboard size as nxm [default: %default]")
+ parser.add_option("-q", "--square", default=".108", help="specify chessboard square size in meters [default: %default]")
+ parser.add_option("--approximate",
+ type="float", default=0.0,
+ help="allow specified slop (in seconds) when pairing images from unsynchronized stereo cameras")
+
+ options, _ = parser.parse_args(rclpy.utilities.remove_ros_args())
+ rclpy.init()
+
+ size = tuple([int(c) for c in options.size.split('x')])
+ dim = float(options.square)
+ approximate = float(options.approximate)
+ node = CameraCheckerNode("cameracheck", size, dim, approximate)
+ rclpy.spin(node)
+
+if __name__ == "__main__":
+ main()
diff --git a/src/image_pipeline/camera_calibration/test/test_directed.py b/src/image_pipeline/camera_calibration/test/test_directed.py
new file mode 100644
index 000000000..80ec1a8b9
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/test/test_directed.py
@@ -0,0 +1,296 @@
+#!/usr/bin/env python
+#
+# Software License Agreement (BSD License)
+#
+# Copyright (c) 2009, Willow Garage, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import cv2
+
+import collections
+import copy
+import numpy
+import os
+import requests
+import tarfile
+import unittest
+
+from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, \
+ Patterns, CalibrationException, ChessboardInfo, image_from_archive
+
+board = ChessboardInfo()
+board.n_cols = 8
+board.n_rows = 6
+board.dim = 0.108
+
+class TestDirected(unittest.TestCase):
+ def setUp(self):
+ if not os.path.isfile('/tmp/camera_calibration.tar.gz'):
+ url = 'http://download.ros.org/data/camera_calibration/camera_calibration.tar.gz'
+ r = requests.get(url, allow_redirects=True)
+ with open('/tmp/camera_calibration.tar.gz', 'wb') as cf:
+ cf.write(r.content)
+
+ tar_path = '/tmp/camera_calibration.tar.gz'
+ self.tar = tarfile.open(tar_path, 'r')
+ self.limages = [image_from_archive(self.tar, "wide/left%04d.pgm" % i) for i in range(3, 15)]
+ self.rimages = [image_from_archive(self.tar, "wide/right%04d.pgm" % i) for i in range(3, 15)]
+ self.l = {}
+ self.r = {}
+ self.sizes = [(320,240), (640,480), (800,600), (1024,768)]
+ for dim in self.sizes:
+ self.l[dim] = []
+ self.r[dim] = []
+ for li,ri in zip(self.limages, self.rimages):
+ rli = cv2.resize(li, (dim[0], dim[1]))
+ rri = cv2.resize(ri, (dim[0], dim[1]))
+ self.l[dim].append(rli)
+ self.r[dim].append(rri)
+
+ def assert_good_mono(self, c, dim, max_err):
+ self.assertTrue(len(c.ost()) > 0)
+ lin_err = 0
+ n = 0
+ for img in self.l[dim]:
+ lin_err_local = c.linear_error_from_image(img)
+ if lin_err_local:
+ lin_err += lin_err_local
+ n += 1
+ if n > 0:
+ lin_err /= n
+ self.assertTrue(0.0 < lin_err, 'lin_err is %f' % lin_err)
+ self.assertTrue(lin_err < max_err, 'lin_err is %f' % lin_err)
+
+ flat = c.remap(img)
+ self.assertEqual(img.shape, flat.shape)
+
+ def test_monocular(self):
+ # Run the calibrator, produce a calibration, check it
+ mc = MonoCalibrator([ board ], cv2.CALIB_FIX_K3)
+ max_errs = [0.1, 0.2, 0.4, 0.7]
+ for i, dim in enumerate(self.sizes):
+ mc.cal(self.l[dim])
+ self.assert_good_mono(mc, dim, max_errs[i])
+
+ # Make another calibration, import previous calibration as a message,
+ # and assert that the new one is good.
+
+ mc2 = MonoCalibrator([board])
+ mc2.from_message(mc.as_message())
+ self.assert_good_mono(mc2, dim, max_errs[i])
+
+ def test_stereo(self):
+ epierrors = [0.1, 0.2, 0.45, 1.0]
+ for i, dim in enumerate(self.sizes):
+ print("Dim =", dim)
+ sc = StereoCalibrator([board], cv2.CALIB_FIX_K3)
+ sc.cal(self.l[dim], self.r[dim])
+
+ sc.report()
+ #print sc.ost()
+
+ # NOTE: epipolar error currently increases with resolution.
+ # At highest res expect error ~0.75
+ epierror = 0
+ n = 0
+ for l_img, r_img in zip(self.l[dim], self.r[dim]):
+ epierror_local = sc.epipolar_error_from_images(l_img, r_img)
+ if epierror_local:
+ epierror += epierror_local
+ n += 1
+ epierror /= n
+ self.assertTrue(epierror < epierrors[i],
+ 'Epipolar error is %f for resolution i = %d' % (epierror, i))
+
+ self.assertAlmostEqual(sc.chessboard_size_from_images(self.l[dim][0], self.r[dim][0]), .108, 2)
+
+ #print sc.as_message()
+
+ img = self.l[dim][0]
+ flat = sc.l.remap(img)
+ self.assertEqual(img.shape, flat.shape)
+ flat = sc.r.remap(img)
+ self.assertEqual(img.shape, flat.shape)
+
+ sc2 = StereoCalibrator([board])
+ sc2.from_message(sc.as_message())
+ # sc2.set_alpha(1.0)
+ #sc2.report()
+ self.assertTrue(len(sc2.ost()) > 0)
+
+ def test_nochecker(self):
+ # Run with same images, but looking for an incorrect chessboard size (8, 7).
+ # Should raise an exception because of lack of input points.
+ new_board = copy.deepcopy(board)
+ new_board.n_cols = 8
+ new_board.n_rows = 7
+
+ sc = StereoCalibrator([new_board])
+ self.assertRaises(CalibrationException, lambda: sc.cal(self.limages, self.rimages))
+ mc = MonoCalibrator([new_board])
+ self.assertRaises(CalibrationException, lambda: mc.cal(self.limages))
+
+
+
+class TestArtificial(unittest.TestCase):
+ Setup = collections.namedtuple('Setup', ['pattern', 'cols', 'rows', 'lin_err', 'K_err'])
+
+ def setUp(self):
+ # Define some image transforms that will simulate a camera position
+ M = []
+ self.k = numpy.array([[500, 0, 250], [0, 500, 250], [0, 0, 1]], numpy.float32)
+ self.d = numpy.array([])
+ # physical size of the board
+ self.board_width_dim = 1
+
+ # Generate data for different grid types. For each grid type, define the different sizes of
+ # grid that are recognized (n row, n col)
+ # Patterns.Circles, Patterns.ACircles
+ self.setups = [ self.Setup(pattern=Patterns.Chessboard, cols=7, rows=8, lin_err=0.2, K_err=8.2),
+ self.Setup(pattern=Patterns.Circles, cols=7, rows=8, lin_err=0.1, K_err=4),
+ self.Setup(pattern=Patterns.ACircles, cols=3, rows=5, lin_err=0.1, K_err=8) ]
+ self.limages = []
+ self.rimages = []
+ for setup in self.setups:
+ self.limages.append([])
+ self.rimages.append([])
+
+ # Create the pattern
+ if setup.pattern == Patterns.Chessboard:
+ pattern = numpy.zeros((50*(setup.rows+3), 50*(setup.cols+3), 1), numpy.uint8)
+ pattern.fill(255)
+ for j in range(1, setup.rows+2):
+ for i in range(1+(j%2), setup.cols+2, 2):
+ pattern[50*j:50*(j+1), 50*i:50*(i+1)].fill(0)
+ elif setup.pattern == Patterns.Circles:
+ pattern = numpy.zeros((50*(setup.rows+2), 50*(setup.cols+2), 1), numpy.uint8)
+ pattern.fill(255)
+ for j in range(1, setup.rows+1):
+ for i in range(1, setup.cols+1):
+ cv2.circle(pattern, (int(50*i + 25), int(50*j + 25)), 15, (0,0,0), -1)
+ elif setup.pattern == Patterns.ACircles:
+ x = 60
+ pattern = numpy.zeros((x*(setup.rows+2), x*(setup.cols+5), 1), numpy.uint8)
+ pattern.fill(255)
+ for j in range(1, setup.rows+1):
+ for i in range(0, setup.cols):
+ cv2.circle(pattern, (int(x*(1 + 2*i + (j%2)) + x/2), int(x*j + x/2)), int(x/3), (0,0,0), -1)
+
+ rows, cols, _ = pattern.shape
+ object_points_2d = numpy.array([[0, 0], [0, cols-1], [rows-1, cols-1], [rows-1, 0]], numpy.float32)
+ object_points_3d = numpy.array([[0, 0, 0], [0, cols-1, 0], [rows-1, cols-1, 0], [rows-1, 0, 0]], numpy.float32)
+ object_points_3d *= self.board_width_dim/float(cols)
+
+ # create the artificial view points
+ rvec = [ [0, 0, 0], [0, 0, 0.4], [0, 0.4, 0], [0.4, 0, 0], [0.4, 0.4, 0], [0.4, 0, 0.4], [0, 0.4, 0.4], [0.4, 0.4, 0.4] ]
+ tvec = [ [-0.5, -0.5, 3], [-0.5, -0.5, 3], [-0.5, -0.1, 3], [-0.1, -0.5, 3], [-0.1, -0.1, 3], [-0.1, -0.5, 3], [-0.5, -0.1, 3], [-0.1, 0.1, 3] ]
+
+ dsize = (480, 640)
+ for i in range(len(rvec)):
+ R = numpy.array(rvec[i], numpy.float32)
+ T = numpy.array(tvec[i], numpy.float32)
+
+ image_points, _ = cv2.projectPoints(object_points_3d, R, T, self.k, self.d)
+
+ # deduce the perspective transform
+ M.append(cv2.getPerspectiveTransform(object_points_2d, image_points))
+
+ # project the pattern according to the different cameras
+ pattern_warped = cv2.warpPerspective(pattern, M[i], dsize)
+ self.limages[-1].append(pattern_warped)
+
+ def assert_good_mono(self, c, images, max_err):
+ #c.report()
+ self.assertTrue(len(c.ost()) > 0)
+ lin_err = 0
+ n = 0
+ for img in images:
+ lin_err_local = c.linear_error_from_image(img)
+ if lin_err_local:
+ lin_err += lin_err_local
+ n += 1
+ if n > 0:
+ lin_err /= n
+ print("linear error is %f" % lin_err)
+ self.assertTrue(0.0 < lin_err, 'lin_err is %f' % lin_err)
+ self.assertTrue(lin_err < max_err, 'lin_err is %f' % lin_err)
+
+ flat = c.remap(img)
+ self.assertEqual(img.shape, flat.shape)
+
+ def test_monocular(self):
+ # Run the calibrator, produce a calibration, check it
+ for i, setup in enumerate(self.setups):
+ board = ChessboardInfo()
+ board.n_cols = setup.cols
+ board.n_rows = setup.rows
+ board.dim = self.board_width_dim
+
+ mc = MonoCalibrator([ board ], flags=cv2.CALIB_FIX_K3, pattern=setup.pattern)
+
+ if 0:
+ # display the patterns viewed by the camera
+ for pattern_warped in self.limages[i]:
+ cv2.imshow("toto", pattern_warped)
+ cv2.waitKey(0)
+
+ mc.cal(self.limages[i])
+ self.assert_good_mono(mc, self.limages[i], setup.lin_err)
+
+ # Make sure the intrinsics are similar
+ err_intrinsics = numpy.linalg.norm(mc.intrinsics - self.k, ord=numpy.inf)
+ self.assertTrue(err_intrinsics < setup.K_err,
+ 'intrinsics error is %f for resolution i = %d' % (err_intrinsics, i))
+ print('intrinsics error is %f' % numpy.linalg.norm(mc.intrinsics - self.k, ord=numpy.inf))
+
+ def test_rational_polynomial_model(self):
+ """Test that the distortion coefficients returned for a rational_polynomial model are not empty."""
+ for i, setup in enumerate(self.setups):
+ board = ChessboardInfo()
+ board.n_cols = setup.cols
+ board.n_rows = setup.rows
+ board.dim = self.board_width_dim
+
+ mc = MonoCalibrator([ board ], flags=cv2.CALIB_RATIONAL_MODEL, pattern=setup.pattern)
+ mc.cal(self.limages[i])
+ self.assertEqual(len(mc.distortion.flat), 8,
+ 'length of distortion coefficients is %d' % len(mc.distortion.flat))
+ self.assertTrue(all(mc.distortion.flat != 0),
+ 'some distortion coefficients are zero: %s' % str(mc.distortion.flatten()))
+ self.assertEqual(mc.as_message().distortion_model, 'rational_polynomial')
+ self.assert_good_mono(mc, self.limages[i], setup.lin_err)
+
+ yaml = mc.yaml()
+ # Issue #278
+ self.assertIn('cols: 8', yaml)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/image_pipeline/camera_calibration/test/test_multiple_boards.py b/src/image_pipeline/camera_calibration/test/test_multiple_boards.py
new file mode 100644
index 000000000..40e1faa50
--- /dev/null
+++ b/src/image_pipeline/camera_calibration/test/test_multiple_boards.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+#
+# Software License Agreement (BSD License)
+#
+# Copyright (c) 2009, Willow Garage, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import rclpy
+import ament_index_python
+import requests
+import unittest
+import tarfile
+import os
+
+from camera_calibration.calibrator import StereoCalibrator, ChessboardInfo, image_from_archive
+
+# Large board used for PR2 calibration
+board = ChessboardInfo()
+board.n_cols = 7
+board.n_rows = 6
+board.dim = 0.108
+
+class TestMultipleBoards(unittest.TestCase):
+ def test_multiple_boards(self):
+ small_board = ChessboardInfo()
+ small_board.n_cols = 5
+ small_board.n_rows = 4
+ small_board.dim = 0.025
+
+ stereo_cal = StereoCalibrator([board, small_board])
+ if not os.path.isfile('/tmp/multi_board_calibration.tar.gz'):
+ url = 'http://download.ros.org/data/camera_calibration/multi_board_calibration.tar.gz'
+ r = requests.get(url, allow_redirects=True)
+ with open('/tmp/multi_board_calibration.tar.gz', 'wb') as mcf:
+ mcf.write(r.content)
+
+ my_archive_name = '/tmp/multi_board_calibration.tar.gz'
+ stereo_cal.do_tarfile_calibration(my_archive_name)
+
+ stereo_cal.report()
+ stereo_cal.ost()
+
+ # Check error for big image
+ archive = tarfile.open(my_archive_name)
+ l1_big = image_from_archive(archive, "left-0000.png")
+ r1_big = image_from_archive(archive, "right-0000.png")
+ epi_big = stereo_cal.epipolar_error_from_images(l1_big, r1_big)
+ self.assertTrue(epi_big < 1.0, "Epipolar error for large checkerboard > 1.0. Error: %.2f" % epi_big)
+
+ # Small checkerboard has larger error threshold for now
+ l1_sm = image_from_archive(archive, "left-0012-sm.png")
+ r1_sm = image_from_archive(archive, "right-0012-sm.png")
+ epi_sm = stereo_cal.epipolar_error_from_images(l1_sm, r1_sm)
+ self.assertTrue(epi_sm < 2.0, "Epipolar error for small checkerboard > 2.0. Error: %.2f" % epi_sm)
+
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
\ No newline at end of file
diff --git a/src/image_pipeline/depth_image_proc/CHANGELOG.rst b/src/image_pipeline/depth_image_proc/CHANGELOG.rst
new file mode 100644
index 000000000..01f4c2d7a
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/CHANGELOG.rst
@@ -0,0 +1,223 @@
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Changelog for package depth_image_proc
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+4.0.0 (2022-12-24)
+------------------
+* [backport iron] support rgba8 and bgra8 encodings by skipping alpha channel (`#869 `_) (`#896 `_)
+ backport `#869 `_
+* [backport iron] ROS 2: Add option to use the RGB image timestamp for the registered depth image (`#872 `_) (`#894 `_)
+ backport `#872 `_
+* [backport Iron] Support MONO16 image encodings: point_cloud_xyz (`#868 `_) (`#882 `_)
+ backport Iron `#868 `_
+* [backport iron] ROS 2: depth_image_proc/point_cloud_xyzi_radial Add intensity conversion (copy) for float (`#867 `_) (`#880 `_)
+ Backport `#867 `_
+* allow use as component or node (`#858 `_)
+ Backport `#852 `_ to Iron
+* add myself as a maintainer (`#846 `_)
+* Depth image transport configure susbcribers (`#844 `_) (`#845 `_)
+* Updated depth_image_proc for ros2
+ Instantiated template for convertDepth, added options to register, and
+ changed register from a class loader to an RCLPP component.
+* Contributors: Alejandro Hernández Cordero, Michael Ferguson, ksommerkohrt
+
+3.0.1 (2022-12-04)
+------------------
+* Replace deprecated headers
+ Fixing compiler warnings.
+* Contributors: Jacob Perron
+
+3.0.0 (2022-04-29)
+------------------
+* Cleanup of depth_image_proc.
+* Fix linker error caused by templating in the conversions.cpp file (`#718 `_)
+* Port upsampling interpolation from `#363 `_ to ROS2 (`#692 `_)
+* Fix uncrustify errors
+* allow loading depth_image_proc::RegisterNode as a component
+* Replace deprecated geometry2 headers
+* Fixed typo in pointcloudxyz launch file
+* use unique_ptrs, remove unused code, add back in missing initMatrix call
+* add xyzrgb radial node
+* Use RCLCPP_WARN_THROTTLE (10 secs) to avoid terminal spam
+* Fix tiny error in comment
+* Warning instead of fatal error when frames are differents
+* revert a293252
+* Replace deprecated geometry2 headers
+* Add maintainer (`#667 `_)
+* move to hpp/cpp structure, create conversions file
+* Fix deprecation warning calling declare_parameter
+* Contributors: Chris Lalancette, Evan Flynn, Francisco Martin Rico, Francisco Martín Rico, Harshal Deshpande, Jacob Perron, Joe Schornak, Joseph Schornak, Joshua Whitley, Patrick Musau
+
+2.2.1 (2020-08-27)
+------------------
+* remove email blasts from steve macenski (`#596 `_)
+* [Foxy] Use ament_auto Macros (`#573 `_)
+ * Fixing version and maintainer problems in camera_calibration.
+ * Applying ament_auto macros to depth_image_proc.
+ * Cleaning up package.xml in image_pipeline.
+ * Applying ament_auto macros to image_proc.
+ * Applying ament_auto macros to image_publisher.
+ * Applying ament_auto macros to image_rotate.
+ * Applying ament_auto macros to image_view.
+ * Replacing some deprecated headers in image_view.
+ * Fixing some build warnings in image_view.
+ * Applying ament_auto macros to stereo_image_proc.
+ * Adding some linter tests to image_pipeline.
+ * Updating package URLs to point to ROS Index.
+* Add rclcpp and rclcpp_components dependencies to package.xml. (`#569 `_) (`#570 `_)
+ I noticed that these are listed in CMakeLists.txt but not in package.xml
+ and this is causing a build failure for the binary releases on
+ build.ros2.org:
+ http://build.ros2.org/view/Dbin_ubhf_uBhf/job/Dbin_uB64__depth_image_proc__ubuntu_bionic_amd64__binary/
+ Co-authored-by: Steven! Ragnarök
+* Contributors: Joshua Whitley, Steve Macenski
+
+2.2.0 (2020-07-27)
+------------------
+* Replacing deprecated header includes with new HPP versions. (`#566 `_)
+ * Replacing deprecated header includes with new HPP versions.
+ * CI: Switching to official Foxy Docker container.
+ * Fixing headers which don't work correctly.
+* Contributors: Joshua Whitley
+
+* make parameters work in depth_image_proc (`#544 `_)
+* update depth_image_proc components (`#543 `_)
+ * update depth_image_proc components
+ This makes them loadable with the rclcpp_components
+ interface. I've fully tested PointCloudXYZRGB and
+ ConvertMetric, my use case doesn't use the others.
+ I also lack a setup to test the launch files fully,
+ but ran them with the realsense commented out and
+ they appear to be OK.
+ * fix linting
+* Contributors: Michael Ferguson
+
+2.0.0 (2018-12-09)
+------------------
+* enable rclcpp_register_node_plugins (`#368 `_)
+* Port depth image proc on ROS2 (`#362 `_)
+* Initial ROS2 commit.
+* Contributors: Chris Ye, Michael Carroll
+
+1.12.23 (2018-05-10)
+--------------------
+
+1.12.22 (2017-12-08)
+--------------------
+
+1.12.21 (2017-11-05)
+--------------------
+* Fix C++11 compilation
+ This fixes `#292 `_ and `#291 `_
+* Contributors: Vincent Rabaud
+
+1.12.20 (2017-04-30)
+--------------------
+* Fix CMake warnings about Eigen.
+* Convert depth image metric from [m] to [mm]
+* address gcc6 build error
+ With gcc6, compiling fails with `stdlib.h: No such file or directory`,
+ as including '-isystem /usr/include' breaks with gcc6, cf.,
+ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129.
+ This commit addresses this issue for this package in the same way
+ it was addressed in various other ROS packages. A list of related
+ commits and pull requests is at:
+ https://github.com/ros/rosdistro/issues/12783
+ Signed-off-by: Lukas Bulwahn
+* Contributors: Kentaro Wada, Lukas Bulwahn, Vincent Rabaud
+
+1.12.19 (2016-07-24)
+--------------------
+
+1.12.18 (2016-07-12)
+--------------------
+
+1.12.17 (2016-07-11)
+--------------------
+
+1.12.16 (2016-03-19)
+--------------------
+* check number of channels before the process
+* search minimum value with OpenCV
+* Use OpenCV to be faster
+* Add a feature for a depth image to crop foremost image
+* Contributors: Kenta Yonekura
+
+1.12.15 (2016-01-17)
+--------------------
+* Add option for exact time sync for point_cloud_xyzrgb
+* simplify OpenCV3 conversion
+* Contributors: Kentaro Wada, Vincent Rabaud
+
+1.12.14 (2015-07-22)
+--------------------
+
+1.12.13 (2015-04-06)
+--------------------
+* Add radial point cloud processors
+* Contributors: Hunter Laux
+
+1.12.12 (2014-12-31)
+--------------------
+* adds range_max
+* exports depth_conversions
+ with convert for xyz PC only
+* exports DepthTraits
+* Contributors: enriquefernandez
+
+1.12.11 (2014-10-26)
+--------------------
+
+1.12.10 (2014-09-28)
+--------------------
+
+1.12.9 (2014-09-21)
+-------------------
+* get code to compile with OpenCV3
+ fixes `#96 `_
+* Contributors: Vincent Rabaud
+
+1.12.8 (2014-08-19)
+-------------------
+
+1.12.6 (2014-07-27)
+-------------------
+* Add point_cloud_xyzi nodelet
+ This is for cameras that output depth and intensity images.
+ It's based on the point_cloud_xyzrgb nodelet.
+* Missing runtime dependency - eigen_conversions
+ `libdepth_image_proc` is missing this dependency at runtime
+ ```
+ > ldd libdepth_image_proc.so | grep eigen
+ libeigen_conversions.so => not found
+ ```
+ Which causes the following error on loading depth_image_proc:
+ ```
+ [ INFO] [1402564815.530736554]: /camera/rgb/camera_info -> /camera/rgb/camera_info
+ [ERROR] [1402564815.727176562]: Failed to load nodelet [/camera/depth_metric_rect] of type
+ [depth_image_proc/convert_metric]: Failed to load library /opt/ros/indigo/lib//libdepth_image_proc.so.
+ Make sure that you are calling the PLUGINLIB_EXPORT_CLASS macro in the library code, and that
+ names are consistent between this macro and your XML. Error string: Could not load library (Poco
+ exception = libeigen_conversions.so: cannot open shared object file: No such file or directory)
+ [FATAL] [1402564815.727410623]: Service call failed!
+ ```
+* Contributors: Daniel Stonier, Hunter Laux
+
+1.12.4 (2014-04-28)
+-------------------
+* depth_image_proc: fix missing symbols in nodelets
+* Contributors: Michael Ferguson
+
+1.12.3 (2014-04-12)
+-------------------
+
+1.12.2 (2014-04-08)
+-------------------
+
+1.12.1 (2014-04-06)
+-------------------
+* replace tf usage by tf2 usage
+
+1.12.0 (2014-04-04)
+-------------------
+* remove PCL dependency
diff --git a/src/image_pipeline/depth_image_proc/CMakeLists.txt b/src/image_pipeline/depth_image_proc/CMakeLists.txt
new file mode 100644
index 000000000..1a0dc88e1
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/CMakeLists.txt
@@ -0,0 +1,88 @@
+cmake_minimum_required(VERSION 3.5)
+project(depth_image_proc)
+
+# Default to C++14
+if(NOT CMAKE_CXX_STANDARD)
+ set(CMAKE_CXX_STANDARD 14)
+endif()
+
+if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ add_compile_options(-Wall -Wextra -Wpedantic)
+endif()
+
+find_package(ament_cmake_auto REQUIRED)
+ament_auto_find_build_dependencies()
+
+find_package(Eigen3 QUIET)
+if(NOT EIGEN3_FOUND)
+ find_package(Eigen REQUIRED)
+ set(EIGEN3_INCLUDE_DIRS ${EIGEN_INCLUDE_DIRS})
+ include_directories(include ${EIGEN3_INCLUDE_DIRS})
+endif()
+
+find_package(OpenCV REQUIRED)
+
+ament_auto_add_library(${PROJECT_NAME} SHARED
+ src/conversions.cpp
+ src/convert_metric.cpp
+ src/crop_foremost.cpp
+ src/disparity.cpp
+ src/point_cloud_xyz.cpp
+ src/point_cloud_xyzrgb.cpp
+ src/point_cloud_xyzi.cpp
+ src/point_cloud_xyz_radial.cpp
+ src/point_cloud_xyzi_radial.cpp
+ src/point_cloud_xyzrgb_radial.cpp
+ src/register.cpp
+)
+
+# Register individual components and also build standalone nodes for each
+rclcpp_components_register_node(${PROJECT_NAME}
+ PLUGIN "depth_image_proc::ConvertMetricNode"
+ EXECUTABLE convert_metric_node
+)
+rclcpp_components_register_node(${PROJECT_NAME}
+ PLUGIN "depth_image_proc::CropForemostNode"
+ EXECUTABLE crop_foremost_node
+)
+rclcpp_components_register_node(${PROJECT_NAME}
+ PLUGIN "depth_image_proc::DisparityNode"
+ EXECUTABLE disparity_node
+)
+rclcpp_components_register_node(${PROJECT_NAME}
+ PLUGIN "depth_image_proc::PointCloudXyzNode"
+ EXECUTABLE point_cloud_xyz_node
+)
+rclcpp_components_register_node(${PROJECT_NAME}
+ PLUGIN "depth_image_proc::PointCloudXyzrgbNode"
+ EXECUTABLE point_cloud_xyzrgb_node
+)
+rclcpp_components_register_node(${PROJECT_NAME}
+ PLUGIN "depth_image_proc::PointCloudXyziNode"
+ EXECUTABLE point_cloud_xyzi_node
+)
+rclcpp_components_register_node(${PROJECT_NAME}
+ PLUGIN "depth_image_proc::PointCloudXyzRadialNode"
+ EXECUTABLE point_cloud_xyz_radial_node
+)
+rclcpp_components_register_node(${PROJECT_NAME}
+ PLUGIN "depth_image_proc::PointCloudXyziRadialNode"
+ EXECUTABLE point_cloud_xyzi_radial_node
+)
+rclcpp_components_register_node(${PROJECT_NAME}
+ PLUGIN "depth_image_proc::PointCloudXyziRadialNode"
+ EXECUTABLE point_cloud_xyzrgb_radial_node
+)
+rclcpp_components_register_node(${PROJECT_NAME}
+ PLUGIN "depth_image_proc::RegisterNode"
+ EXECUTABLE register_node
+)
+
+target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBRARIES})
+
+if(BUILD_TESTING)
+ find_package(ament_lint_auto REQUIRED)
+ ament_lint_auto_find_test_dependencies()
+endif()
+
+ament_auto_package(INSTALL_TO_SHARE launch)
diff --git a/src/image_pipeline/depth_image_proc/include/depth_image_proc/conversions.hpp b/src/image_pipeline/depth_image_proc/include/depth_image_proc/conversions.hpp
new file mode 100644
index 000000000..eef392c63
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/include/depth_image_proc/conversions.hpp
@@ -0,0 +1,155 @@
+// Copyright (c) 2008, Willow Garage, Inc.
+// All rights reserved.
+//
+// Software License Agreement (BSD License 2.0)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of the Willow Garage nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DEPTH_IMAGE_PROC__CONVERSIONS_HPP_
+#define DEPTH_IMAGE_PROC__CONVERSIONS_HPP_
+
+#include
+
+#include "image_geometry/pinhole_camera_model.hpp"
+
+#include
+
+#include
+#include
+#include
+#include
+
+namespace depth_image_proc
+{
+
+// Handles float or uint16 depths
+template
+void convertDepth(
+ const sensor_msgs::msg::Image::ConstSharedPtr & depth_msg,
+ sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg,
+ const image_geometry::PinholeCameraModel & model,
+ double range_max = 0.0)
+{
+ // Use correct principal point from calibration
+ float center_x = model.cx();
+ float center_y = model.cy();
+
+ // Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y)
+ double unit_scaling = DepthTraits::toMeters(T(1) );
+ float constant_x = unit_scaling / model.fx();
+ float constant_y = unit_scaling / model.fy();
+ float bad_point = std::numeric_limits::quiet_NaN();
+
+ sensor_msgs::PointCloud2Iterator iter_x(*cloud_msg, "x");
+ sensor_msgs::PointCloud2Iterator iter_y(*cloud_msg, "y");
+ sensor_msgs::PointCloud2Iterator iter_z(*cloud_msg, "z");
+ const T * depth_row = reinterpret_cast(&depth_msg->data[0]);
+ int row_step = depth_msg->step / sizeof(T);
+ for (int v = 0; v < static_cast(cloud_msg->height); ++v, depth_row += row_step) {
+ for (int u = 0; u < static_cast(cloud_msg->width); ++u, ++iter_x, ++iter_y, ++iter_z) {
+ T depth = depth_row[u];
+
+ // Missing points denoted by NaNs
+ if (!DepthTraits::valid(depth)) {
+ if (range_max != 0.0) {
+ depth = DepthTraits::fromMeters(range_max);
+ } else {
+ *iter_x = *iter_y = *iter_z = bad_point;
+ continue;
+ }
+ }
+
+ // Fill in XYZ
+ *iter_x = (u - center_x) * depth * constant_x;
+ *iter_y = (v - center_y) * depth * constant_y;
+ *iter_z = DepthTraits::toMeters(depth);
+ }
+ }
+}
+
+// Handles float or uint16 depths
+template
+void convertDepthRadial(
+ const sensor_msgs::msg::Image::ConstSharedPtr & depth_msg,
+ sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg,
+ cv::Mat & transform)
+{
+ // Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y)
+ float bad_point = std::numeric_limits::quiet_NaN();
+
+ sensor_msgs::PointCloud2Iterator iter_x(*cloud_msg, "x");
+ sensor_msgs::PointCloud2Iterator iter_y(*cloud_msg, "y");
+ sensor_msgs::PointCloud2Iterator iter_z(*cloud_msg, "z");
+ const T * depth_row = reinterpret_cast(&depth_msg->data[0]);
+ int row_step = depth_msg->step / sizeof(T);
+ for (int v = 0; v < static_cast(cloud_msg->height); ++v, depth_row += row_step) {
+ for (int u = 0; u < static_cast(cloud_msg->width); ++u, ++iter_x, ++iter_y, ++iter_z) {
+ T depth = depth_row[u];
+
+ // Missing points denoted by NaNs
+ if (!DepthTraits::valid(depth)) {
+ *iter_x = *iter_y = *iter_z = bad_point;
+ continue;
+ }
+ const cv::Vec3f & cvPoint = transform.at(u, v) * DepthTraits::toMeters(depth);
+ // Fill in XYZ
+ *iter_x = cvPoint(0);
+ *iter_y = cvPoint(1);
+ *iter_z = cvPoint(2);
+ }
+ }
+}
+
+// Handles float or uint16 depths
+template
+void convertIntensity(
+ const sensor_msgs::msg::Image::ConstSharedPtr & intensity_msg,
+ sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg)
+{
+ sensor_msgs::PointCloud2Iterator iter_i(*cloud_msg, "intensity");
+ const T * inten_row = reinterpret_cast(&intensity_msg->data[0]);
+
+ const int i_row_step = intensity_msg->step / sizeof(T);
+ for (int v = 0; v < static_cast(cloud_msg->height); ++v, inten_row += i_row_step) {
+ for (int u = 0; u < static_cast(cloud_msg->width); ++u, ++iter_i) {
+ *iter_i = inten_row[u];
+ }
+ }
+}
+
+// Handles RGB8, BGR8, and MONO8
+void convertRgb(
+ const sensor_msgs::msg::Image::ConstSharedPtr & rgb_msg,
+ sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg,
+ int red_offset, int green_offset, int blue_offset, int color_step);
+
+cv::Mat initMatrix(cv::Mat cameraMatrix, cv::Mat distCoeffs, int width, int height, bool radial);
+
+} // namespace depth_image_proc
+
+#endif // DEPTH_IMAGE_PROC__CONVERSIONS_HPP_
diff --git a/src/image_pipeline/depth_image_proc/include/depth_image_proc/depth_traits.hpp b/src/image_pipeline/depth_image_proc/include/depth_image_proc/depth_traits.hpp
new file mode 100644
index 000000000..7e3c39d3d
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/include/depth_image_proc/depth_traits.hpp
@@ -0,0 +1,75 @@
+// Copyright (c) 2008, Willow Garage, Inc.
+// All rights reserved.
+//
+// Software License Agreement (BSD License 2.0)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of the Willow Garage nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DEPTH_IMAGE_PROC__DEPTH_TRAITS_HPP_
+#define DEPTH_IMAGE_PROC__DEPTH_TRAITS_HPP_
+
+#include
+#include
+#include
+#include
+
+namespace depth_image_proc
+{
+
+// Encapsulate differences between processing float and uint16_t depths
+template
+struct DepthTraits {};
+
+template<>
+struct DepthTraits
+{
+ static inline bool valid(uint16_t depth) {return depth != 0;}
+ static inline float toMeters(uint16_t depth) {return depth * 0.001f;} // originally mm
+ static inline uint16_t fromMeters(float depth) {return (depth * 1000.0f) + 0.5f;}
+ // Do nothing - already zero-filled
+ static inline void initializeBuffer(std::vector & buffer) {(void) buffer;}
+};
+
+template<>
+struct DepthTraits
+{
+ static inline bool valid(float depth) {return std::isfinite(depth);}
+ static inline float toMeters(float depth) {return depth;}
+ static inline float fromMeters(float depth) {return depth;}
+
+ static inline void initializeBuffer(std::vector & buffer)
+ {
+ float * start = reinterpret_cast(&buffer[0]);
+ float * end = reinterpret_cast(&buffer[0] + buffer.size());
+ std::fill(start, end, std::numeric_limits::quiet_NaN());
+ }
+};
+
+} // namespace depth_image_proc
+
+#endif // DEPTH_IMAGE_PROC__DEPTH_TRAITS_HPP_
diff --git a/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyz.hpp b/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyz.hpp
new file mode 100644
index 000000000..397f4ab6d
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyz.hpp
@@ -0,0 +1,85 @@
+// Copyright (c) 2008, Willow Garage, Inc.
+// All rights reserved.
+//
+// Software License Agreement (BSD License 2.0)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of the Willow Garage nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DEPTH_IMAGE_PROC__POINT_CLOUD_XYZ_HPP_
+#define DEPTH_IMAGE_PROC__POINT_CLOUD_XYZ_HPP_
+
+#include
+
+#include "depth_image_proc/visibility.h"
+#include "image_geometry/pinhole_camera_model.hpp"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+namespace depth_image_proc
+{
+
+namespace enc = sensor_msgs::image_encodings;
+
+class PointCloudXyzNode : public rclcpp::Node
+{
+public:
+ DEPTH_IMAGE_PROC_PUBLIC PointCloudXyzNode(const rclcpp::NodeOptions & options);
+
+private:
+ using PointCloud2 = sensor_msgs::msg::PointCloud2;
+ using Image = sensor_msgs::msg::Image;
+ using CameraInfo = sensor_msgs::msg::CameraInfo;
+
+ // Subscriptions
+ image_transport::CameraSubscriber sub_depth_;
+ int queue_size_;
+
+ // Publications
+ std::mutex connect_mutex_;
+ rclcpp::Publisher::SharedPtr pub_point_cloud_;
+
+ image_geometry::PinholeCameraModel model_;
+
+ void connectCb();
+
+ void depthCb(
+ const Image::ConstSharedPtr & depth_msg,
+ const CameraInfo::ConstSharedPtr & info_msg);
+};
+
+} // namespace depth_image_proc
+
+#endif // DEPTH_IMAGE_PROC__POINT_CLOUD_XYZ_HPP_
diff --git a/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyz_radial.hpp b/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyz_radial.hpp
new file mode 100644
index 000000000..2cfc44e03
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyz_radial.hpp
@@ -0,0 +1,85 @@
+// Copyright (c) 2008, Willow Garage, Inc.
+// All rights reserved.
+//
+// Software License Agreement (BSD License 2.0)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of the Willow Garage nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DEPTH_IMAGE_PROC__POINT_CLOUD_XYZ_RADIAL_HPP_
+#define DEPTH_IMAGE_PROC__POINT_CLOUD_XYZ_RADIAL_HPP_
+
+#include
+#include
+#include
+
+#include "depth_image_proc/visibility.h"
+#include "image_geometry/pinhole_camera_model.hpp"
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace depth_image_proc
+{
+
+class PointCloudXyzRadialNode : public rclcpp::Node
+{
+public:
+ DEPTH_IMAGE_PROC_PUBLIC PointCloudXyzRadialNode(const rclcpp::NodeOptions & options);
+
+private:
+ // Subscriptions
+ image_transport::CameraSubscriber sub_depth_;
+ int queue_size_;
+
+ // Publications
+ std::mutex connect_mutex_;
+ using PointCloud = sensor_msgs::msg::PointCloud2;
+ rclcpp::Publisher::SharedPtr pub_point_cloud_;
+
+ std::vector D_;
+ std::array K_;
+
+ uint32_t width_;
+ uint32_t height_;
+
+ cv::Mat transform_;
+
+ void connectCb();
+
+ void depthCb(
+ const sensor_msgs::msg::Image::ConstSharedPtr & depth_msg,
+ const sensor_msgs::msg::CameraInfo::ConstSharedPtr & info_msg);
+};
+
+} // namespace depth_image_proc
+
+#endif // DEPTH_IMAGE_PROC__POINT_CLOUD_XYZ_RADIAL_HPP_
diff --git a/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyzi.hpp b/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyzi.hpp
new file mode 100644
index 000000000..cd2cbd6ea
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyzi.hpp
@@ -0,0 +1,90 @@
+// Copyright (c) 2008, Willow Garage, Inc.
+// All rights reserved.
+//
+// Software License Agreement (BSD License 2.0)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of the Willow Garage nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DEPTH_IMAGE_PROC__POINT_CLOUD_XYZI_HPP_
+#define DEPTH_IMAGE_PROC__POINT_CLOUD_XYZI_HPP_
+
+#include
+#include
+
+#include "depth_image_proc/visibility.h"
+#include "image_geometry/pinhole_camera_model.hpp"
+#include "message_filters/subscriber.h"
+#include "message_filters/sync_policies/approximate_time.h"
+#include "message_filters/synchronizer.h"
+
+#include
+#include
+#include
+#include
+#include
+
+namespace depth_image_proc
+{
+
+namespace enc = sensor_msgs::image_encodings;
+
+class PointCloudXyziNode : public rclcpp::Node
+{
+public:
+ DEPTH_IMAGE_PROC_PUBLIC PointCloudXyziNode(const rclcpp::NodeOptions & options);
+
+private:
+ using Image = sensor_msgs::msg::Image;
+ using CameraInfo = sensor_msgs::msg::CameraInfo;
+ using PointCloud = sensor_msgs::msg::PointCloud2;
+
+ // Subscriptions
+ image_transport::SubscriberFilter sub_depth_, sub_intensity_;
+ message_filters::Subscriber sub_info_;
+ using SyncPolicy =
+ message_filters::sync_policies::ApproximateTime;
+ using Synchronizer = message_filters::Synchronizer;
+ std::shared_ptr sync_;
+
+ // Publications
+ std::mutex connect_mutex_;
+ rclcpp::Publisher::SharedPtr pub_point_cloud_;
+
+ image_geometry::PinholeCameraModel model_;
+
+ void connectCb();
+
+ void imageCb(
+ const Image::ConstSharedPtr & depth_msg,
+ const Image::ConstSharedPtr & intensity_msg,
+ const CameraInfo::ConstSharedPtr & info_msg);
+};
+
+} // namespace depth_image_proc
+
+#endif // DEPTH_IMAGE_PROC__POINT_CLOUD_XYZI_HPP_
diff --git a/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyzi_radial.hpp b/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyzi_radial.hpp
new file mode 100644
index 000000000..98398f227
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyzi_radial.hpp
@@ -0,0 +1,103 @@
+// Copyright (c) 2008, Willow Garage, Inc.
+// All rights reserved.
+//
+// Software License Agreement (BSD License 2.0)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of the Willow Garage nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DEPTH_IMAGE_PROC__POINT_CLOUD_XYZI_RADIAL_HPP_
+#define DEPTH_IMAGE_PROC__POINT_CLOUD_XYZI_RADIAL_HPP_
+
+#include
+#include
+#include
+#include
+
+#include "depth_image_proc/visibility.h"
+#include "message_filters/subscriber.h"
+#include "message_filters/synchronizer.h"
+#include "message_filters/sync_policies/exact_time.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace depth_image_proc
+{
+
+using SyncPolicy =
+ message_filters::sync_policies::ExactTime<
+ sensor_msgs::msg::Image,
+ sensor_msgs::msg::Image,
+ sensor_msgs::msg::CameraInfo>;
+
+class PointCloudXyziRadialNode : public rclcpp::Node
+{
+public:
+ DEPTH_IMAGE_PROC_PUBLIC PointCloudXyziRadialNode(const rclcpp::NodeOptions & options);
+
+private:
+ using PointCloud = sensor_msgs::msg::PointCloud2;
+ using Image = sensor_msgs::msg::Image;
+ using CameraInfo = sensor_msgs::msg::CameraInfo;
+
+ // Subscriptions
+ image_transport::SubscriberFilter sub_depth_, sub_intensity_;
+ message_filters::Subscriber sub_info_;
+
+ int queue_size_;
+
+ // Publications
+ std::mutex connect_mutex_;
+ rclcpp::Publisher::SharedPtr pub_point_cloud_;
+
+ using Synchronizer = message_filters::Synchronizer;
+ std::shared_ptr sync_;
+
+ std::vector D_;
+ std::array K_;
+
+ uint32_t width_;
+ uint32_t height_;
+
+ cv::Mat transform_;
+
+ void connectCb();
+
+ void imageCb(
+ const Image::ConstSharedPtr & depth_msg,
+ const Image::ConstSharedPtr & intensity_msg_in,
+ const CameraInfo::ConstSharedPtr & info_msg);
+};
+
+} // namespace depth_image_proc
+
+#endif // DEPTH_IMAGE_PROC__POINT_CLOUD_XYZI_RADIAL_HPP_
diff --git a/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb.hpp b/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb.hpp
new file mode 100644
index 000000000..e18db0cfc
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb.hpp
@@ -0,0 +1,94 @@
+// Copyright (c) 2008, Willow Garage, Inc.
+// All rights reserved.
+//
+// Software License Agreement (BSD License 2.0)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of the Willow Garage nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DEPTH_IMAGE_PROC__POINT_CLOUD_XYZRGB_HPP_
+#define DEPTH_IMAGE_PROC__POINT_CLOUD_XYZRGB_HPP_
+
+#include
+#include
+
+#include "depth_image_proc/visibility.h"
+#include "image_geometry/pinhole_camera_model.hpp"
+#include "message_filters/subscriber.h"
+#include "message_filters/synchronizer.h"
+#include "message_filters/sync_policies/exact_time.h"
+#include "message_filters/sync_policies/approximate_time.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace depth_image_proc
+{
+
+class PointCloudXyzrgbNode : public rclcpp::Node
+{
+public:
+ DEPTH_IMAGE_PROC_PUBLIC PointCloudXyzrgbNode(const rclcpp::NodeOptions & options);
+
+private:
+ using PointCloud2 = sensor_msgs::msg::PointCloud2;
+ using Image = sensor_msgs::msg::Image;
+ using CameraInfo = sensor_msgs::msg::CameraInfo;
+
+ // Subscriptions
+ image_transport::SubscriberFilter sub_depth_, sub_rgb_;
+ message_filters::Subscriber sub_info_;
+ using SyncPolicy =
+ message_filters::sync_policies::ApproximateTime;
+ using ExactSyncPolicy =
+ message_filters::sync_policies::ExactTime;
+ using Synchronizer = message_filters::Synchronizer;
+ using ExactSynchronizer = message_filters::Synchronizer;
+ std::shared_ptr sync_;
+ std::shared_ptr exact_sync_;
+
+ // Publications
+ std::mutex connect_mutex_;
+ rclcpp::Publisher::SharedPtr pub_point_cloud_;
+
+ image_geometry::PinholeCameraModel model_;
+
+ void connectCb();
+
+ void imageCb(
+ const Image::ConstSharedPtr & depth_msg,
+ const Image::ConstSharedPtr & rgb_msg,
+ const CameraInfo::ConstSharedPtr & info_msg);
+};
+
+} // namespace depth_image_proc
+
+#endif // DEPTH_IMAGE_PROC__POINT_CLOUD_XYZRGB_HPP_
diff --git a/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb_radial.hpp b/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb_radial.hpp
new file mode 100644
index 000000000..ef07c31d5
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb_radial.hpp
@@ -0,0 +1,106 @@
+// Copyright (c) 2008, Willow Garage, Inc.
+// All rights reserved.
+//
+// Software License Agreement (BSD License 2.0)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of the Willow Garage nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DEPTH_IMAGE_PROC__POINT_CLOUD_XYZRGB_RADIAL_HPP_
+#define DEPTH_IMAGE_PROC__POINT_CLOUD_XYZRGB_RADIAL_HPP_
+
+#include
+#include
+#include
+#include
+
+#include "depth_image_proc/visibility.h"
+#include "image_geometry/pinhole_camera_model.hpp"
+#include "message_filters/subscriber.h"
+#include "message_filters/synchronizer.h"
+#include "message_filters/sync_policies/exact_time.h"
+#include "message_filters/sync_policies/approximate_time.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace depth_image_proc
+{
+
+class PointCloudXyzrgbRadialNode : public rclcpp::Node
+{
+public:
+ DEPTH_IMAGE_PROC_PUBLIC PointCloudXyzrgbRadialNode(const rclcpp::NodeOptions & options);
+
+private:
+ using PointCloud2 = sensor_msgs::msg::PointCloud2;
+ using Image = sensor_msgs::msg::Image;
+ using CameraInfo = sensor_msgs::msg::CameraInfo;
+
+ // Subscriptions
+ image_transport::SubscriberFilter sub_depth_, sub_rgb_;
+ message_filters::Subscriber sub_info_;
+ using SyncPolicy =
+ message_filters::sync_policies::ApproximateTime;
+ using ExactSyncPolicy =
+ message_filters::sync_policies::ExactTime;
+ using Synchronizer = message_filters::Synchronizer;
+ using ExactSynchronizer = message_filters::Synchronizer;
+ std::unique_ptr sync_;
+ std::unique_ptr exact_sync_;
+
+ // Publications
+ std::mutex connect_mutex_;
+ rclcpp::Publisher::SharedPtr pub_point_cloud_;
+
+ std::vector D_;
+ std::array K_;
+
+ uint32_t width_;
+ uint32_t height_;
+
+ cv::Mat transform_;
+
+ image_geometry::PinholeCameraModel model_;
+
+ void connectCb();
+
+ void imageCb(
+ const Image::ConstSharedPtr & depth_msg,
+ const Image::ConstSharedPtr & rgb_msg,
+ const CameraInfo::ConstSharedPtr & info_msg);
+};
+
+} // namespace depth_image_proc
+
+#endif // DEPTH_IMAGE_PROC__POINT_CLOUD_XYZRGB_RADIAL_HPP_
diff --git a/src/image_pipeline/depth_image_proc/include/depth_image_proc/visibility.h b/src/image_pipeline/depth_image_proc/include/depth_image_proc/visibility.h
new file mode 100644
index 000000000..43d4d047a
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/include/depth_image_proc/visibility.h
@@ -0,0 +1,83 @@
+// Copyright (c) 2008, Willow Garage, Inc.
+// All rights reserved.
+//
+// Software License Agreement (BSD License 2.0)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of the Willow Garage nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+#ifndef DEPTH_IMAGE_PROC__VISIBILITY_H_
+#define DEPTH_IMAGE_PROC__VISIBILITY_H_
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+// This logic was borrowed (then namespaced) from the examples on the gcc wiki:
+// https://gcc.gnu.org/wiki/Visibility
+
+#if defined _WIN32 || defined __CYGWIN__
+
+ #ifdef __GNUC__
+ #define DEPTH_IMAGE_PROC_EXPORT __attribute__ ((dllexport))
+ #define DEPTH_IMAGE_PROC_IMPORT __attribute__ ((dllimport))
+ #else
+ #define DEPTH_IMAGE_PROC_EXPORT __declspec(dllexport)
+ #define DEPTH_IMAGE_PROC_IMPORT __declspec(dllimport)
+ #endif
+
+ #ifdef DEPTH_IMAGE_PROC_DLL
+ #define DEPTH_IMAGE_PROC_PUBLIC DEPTH_IMAGE_PROC_EXPORT
+ #else
+ #define DEPTH_IMAGE_PROC_PUBLIC DEPTH_IMAGE_PROC_IMPORT
+ #endif
+
+ #define DEPTH_IMAGE_PROC_PUBLIC_TYPE DEPTH_IMAGE_PROC_PUBLIC
+
+ #define DEPTH_IMAGE_PROC_LOCAL
+
+#else
+
+ #define DEPTH_IMAGE_PROC_EXPORT __attribute__ ((visibility("default")))
+ #define DEPTH_IMAGE_PROC_IMPORT
+
+ #if __GNUC__ >= 4
+ #define DEPTH_IMAGE_PROC_PUBLIC __attribute__ ((visibility("default")))
+ #define DEPTH_IMAGE_PROC_LOCAL __attribute__ ((visibility("hidden")))
+ #else
+ #define DEPTH_IMAGE_PROC_PUBLIC
+ #define DEPTH_IMAGE_PROC_LOCAL
+ #endif
+
+ #define DEPTH_IMAGE_PROC_PUBLIC_TYPE
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // DEPTH_IMAGE_PROC__VISIBILITY_H_
diff --git a/src/image_pipeline/depth_image_proc/launch/convert_metric.launch.py b/src/image_pipeline/depth_image_proc/launch/convert_metric.launch.py
new file mode 100644
index 000000000..37e6c09f2
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/convert_metric.launch.py
@@ -0,0 +1,75 @@
+# Copyright (c) 2008, Willow Garage, Inc.
+# All rights reserved.
+#
+# Software License Agreement (BSD License 2.0)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from ament_index_python.packages import get_package_share_directory
+from launch import LaunchDescription
+
+import launch_ros.actions
+import launch_ros.descriptions
+
+
+def generate_launch_description():
+ default_rviz = os.path.join(get_package_share_directory('depth_image_proc'),
+ 'launch', 'rviz/convert_metric.rviz')
+ return LaunchDescription([
+ # install realsense from https://github.com/intel/ros2_intel_realsense
+ launch_ros.actions.Node(
+ package='realsense_ros2_camera', node_executable='realsense_ros2_camera',
+ output='screen'),
+
+ # launch plugin through rclcpp_components container
+ launch_ros.actions.ComposableNodeContainer(
+ name='container',
+ namespace='',
+ package='rclcpp_components',
+ executable='component_container',
+ composable_node_descriptions=[
+ # Driver itself
+ launch_ros.descriptions.ComposableNode(
+ package='depth_image_proc',
+ plugin='depth_image_proc::ConvertMetricNode',
+ name='convert_metric_node',
+ remappings=[('image_raw', '/camera/depth/image_rect_raw'),
+ ('camera_info', '/camera/depth/camera_info'),
+ ('image', '/camera/depth/converted_image')]
+ ),
+ ],
+ output='screen',
+ ),
+
+ # rviz
+ launch_ros.actions.Node(
+ package='rviz2', node_executable='rviz2', output='screen',
+ arguments=['--display-config', default_rviz]),
+ ])
diff --git a/src/image_pipeline/depth_image_proc/launch/crop_foremost.launch.py b/src/image_pipeline/depth_image_proc/launch/crop_foremost.launch.py
new file mode 100644
index 000000000..17485336f
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/crop_foremost.launch.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2008, Willow Garage, Inc.
+# All rights reserved.
+#
+# Software License Agreement (BSD License 2.0)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from ament_index_python.packages import get_package_share_directory
+from launch import LaunchDescription
+
+import launch_ros.actions
+import launch_ros.descriptions
+
+
+def generate_launch_description():
+ default_rviz = os.path.join(get_package_share_directory('depth_image_proc'),
+ 'launch', 'rviz/crop_formost.rviz')
+ return LaunchDescription([
+ # install realsense from https://github.com/intel/ros2_intel_realsense
+ launch_ros.actions.Node(
+ package='realsense_ros2_camera', node_executable='realsense_ros2_camera',
+ output='screen'),
+
+ launch_ros.actions.ComposableNodeContainer(
+ name='container',
+ namespace='',
+ package='rclcpp_components',
+ executable='component_container',
+ composable_node_descriptions=[
+ # Driver itself
+ launch_ros.descriptions.ComposableNode(
+ package='depth_image_proc',
+ plugin='depth_image_proc::CropForemostNode',
+ name='crop_foremost_node',
+ remappings=[('image_raw', '/camera/depth/image_rect_raw'),
+ ('camera_info', '/camera/depth/camera_info'),
+ ('image', '/camera/depth/converted_image')]
+ ),
+ ],
+ output='screen',
+ ),
+
+ # rviz
+ launch_ros.actions.Node(
+ package='rviz2', node_executable='rviz2', output='screen',
+ arguments=['--display-config', default_rviz]),
+ ])
diff --git a/src/image_pipeline/depth_image_proc/launch/disparity.launch.py b/src/image_pipeline/depth_image_proc/launch/disparity.launch.py
new file mode 100644
index 000000000..acf2af812
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/disparity.launch.py
@@ -0,0 +1,77 @@
+# Copyright (c) 2008, Willow Garage, Inc.
+# All rights reserved.
+#
+# Software License Agreement (BSD License 2.0)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from ament_index_python.packages import get_package_share_directory
+from launch import LaunchDescription
+
+import launch_ros.actions
+import launch_ros.descriptions
+
+
+def generate_launch_description():
+ default_rviz = os.path.join(get_package_share_directory('depth_image_proc'),
+ 'launch', 'rviz/disparity.rviz')
+ return LaunchDescription([
+ # install realsense from https://github.com/intel/ros2_intel_realsense
+ launch_ros.actions.Node(
+ package='realsense_ros2_camera', node_executable='realsense_ros2_camera',
+ output='screen'),
+
+ # we use realsense camera for test, realsense not support left and right topic
+ # so we remap to depth image only for interface test.
+ launch_ros.actions.ComposableNodeContainer(
+ name='container',
+ namespace='',
+ package='rclcpp_components',
+ executable='component_container',
+ composable_node_descriptions=[
+ # Driver itself
+ launch_ros.descriptions.ComposableNode(
+ package='depth_image_proc',
+ plugin='depth_image_proc::DisparityNode',
+ name='disparity_node',
+ remappings=[('left/image_rect', '/camera/depth/image_rect_raw'),
+ ('right/camera_info', '/camera/depth/camera_info'),
+ ('left/disparity', '/camera/left/disparity')]
+ ),
+ ],
+ output='screen',
+ ),
+
+ # TODO: rviz could not display disparity(stereo_msgs)
+ # run stereo_view for display after image_view be ported
+ launch_ros.actions.Node(
+ package='rviz2', node_executable='rviz2', output='screen',
+ arguments=['--display-config', default_rviz]),
+ ])
diff --git a/src/image_pipeline/depth_image_proc/launch/point_cloud_xyz.launch.py b/src/image_pipeline/depth_image_proc/launch/point_cloud_xyz.launch.py
new file mode 100644
index 000000000..1428bcec1
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/point_cloud_xyz.launch.py
@@ -0,0 +1,75 @@
+# Copyright (c) 2008, Willow Garage, Inc.
+# All rights reserved.
+#
+# Software License Agreement (BSD License 2.0)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from ament_index_python.packages import get_package_share_directory
+from launch import LaunchDescription
+
+import launch_ros.actions
+import launch_ros.descriptions
+
+
+def generate_launch_description():
+ default_rviz = os.path.join(get_package_share_directory('depth_image_proc'),
+ 'launch', 'rviz/point_cloud_xyz.rviz')
+ return LaunchDescription([
+ # install realsense from https://github.com/intel/ros2_intel_realsense
+ launch_ros.actions.Node(
+ package='realsense_ros2_camera', node_executable='realsense_ros2_camera',
+ output='screen'),
+
+ # launch plugin through rclcpp_components container
+ launch_ros.actions.ComposableNodeContainer(
+ name='container',
+ namespace='',
+ package='rclcpp_components',
+ executable='component_container',
+ composable_node_descriptions=[
+ # Driver itself
+ launch_ros.descriptions.ComposableNode(
+ package='depth_image_proc',
+ plugin='depth_image_proc::PointCloudXyzNode',
+ name='point_cloud_xyz_node',
+ remappings=[('image_rect', '/camera/depth/image_rect_raw'),
+ ('camera_info', '/camera/depth/camera_info'),
+ ('image', '/camera/depth/converted_image')]
+ ),
+ ],
+ output='screen',
+ ),
+
+ # rviz
+ launch_ros.actions.Node(
+ package='rviz2', node_executable='rviz2', output='screen',
+ arguments=['--display-config', default_rviz]),
+ ])
diff --git a/src/image_pipeline/depth_image_proc/launch/point_cloud_xyz_radial.launch.py b/src/image_pipeline/depth_image_proc/launch/point_cloud_xyz_radial.launch.py
new file mode 100644
index 000000000..d578a05ce
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/point_cloud_xyz_radial.launch.py
@@ -0,0 +1,75 @@
+# Copyright (c) 2008, Willow Garage, Inc.
+# All rights reserved.
+#
+# Software License Agreement (BSD License 2.0)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from ament_index_python.packages import get_package_share_directory
+from launch import LaunchDescription
+
+import launch_ros.actions
+import launch_ros.descriptions
+
+
+def generate_launch_description():
+ default_rviz = os.path.join(get_package_share_directory('depth_image_proc'),
+ 'launch', 'rviz/point_cloud_xyz_radial.rviz')
+ return LaunchDescription([
+ # install realsense from https://github.com/intel/ros2_intel_realsense
+ launch_ros.actions.Node(
+ package='realsense_ros2_camera', node_executable='realsense_ros2_camera',
+ output='screen'),
+
+ # launch plugin through rclcpp_components container
+ launch_ros.actions.ComposableNodeContainer(
+ name='container',
+ namespace='',
+ package='rclcpp_components',
+ executable='component_container',
+ composable_node_descriptions=[
+ # Driver itself
+ launch_ros.descriptions.ComposableNode(
+ package='depth_image_proc',
+ plugin='depth_image_proc::PointCloudXyzRadialNode',
+ name='point_cloud_xyz_radial_node',
+ remappings=[('image_raw', '/camera/depth/image_rect_raw'),
+ ('camera_info', '/camera/depth/camera_info'),
+ ('image', '/camera/depth/converted_image')]
+ ),
+ ],
+ output='screen',
+ ),
+
+ # rviz
+ launch_ros.actions.Node(
+ package='rviz2', node_executable='rviz2', output='screen',
+ arguments=['--display-config', default_rviz]),
+ ])
diff --git a/src/image_pipeline/depth_image_proc/launch/point_cloud_xyzi.launch.py b/src/image_pipeline/depth_image_proc/launch/point_cloud_xyzi.launch.py
new file mode 100644
index 000000000..191b3f089
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/point_cloud_xyzi.launch.py
@@ -0,0 +1,77 @@
+# Copyright (c) 2008, Willow Garage, Inc.
+# All rights reserved.
+#
+# Software License Agreement (BSD License 2.0)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from ament_index_python.packages import get_package_share_directory
+from launch import LaunchDescription
+
+import launch_ros.actions
+import launch_ros.descriptions
+
+
+def generate_launch_description():
+ default_rviz = os.path.join(get_package_share_directory('depth_image_proc'),
+ 'launch', 'rviz/point_cloud_xyzi.rviz')
+ return LaunchDescription([
+ # install realsense from https://github.com/intel/ros2_intel_realsense
+ launch_ros.actions.Node(
+ package='realsense_ros2_camera', node_executable='realsense_ros2_camera',
+ output='screen'),
+
+ # TODO: Realsense camera do not support intensity message
+ # use color image instead of intensity only for interface test
+ launch_ros.actions.ComposableNodeContainer(
+ name='container',
+ namespace='',
+ package='rclcpp_components',
+ executable='component_container',
+ composable_node_descriptions=[
+ # Driver itself
+ launch_ros.descriptions.ComposableNode(
+ package='depth_image_proc',
+ plugin='depth_image_proc::PointCloudXyziNode',
+ name='point_cloud_xyzi',
+ remappings=[('depth/image_rect', '/camera/aligned_depth_to_color/image_raw'),
+ ('intensity/image_rect', '/camera/color/image_raw'),
+ ('intensity/camera_info', '/camera/color/camera_info'),
+ ('points', '/camera/depth/points')]
+ ),
+ ],
+ output='screen',
+ ),
+
+ # rviz
+ launch_ros.actions.Node(
+ package='rviz2', node_executable='rviz2', output='screen',
+ arguments=['--display-config', default_rviz]),
+ ])
diff --git a/src/image_pipeline/depth_image_proc/launch/point_cloud_xyzi_radial.launch.py b/src/image_pipeline/depth_image_proc/launch/point_cloud_xyzi_radial.launch.py
new file mode 100644
index 000000000..721387748
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/point_cloud_xyzi_radial.launch.py
@@ -0,0 +1,77 @@
+# Copyright (c) 2008, Willow Garage, Inc.
+# All rights reserved.
+#
+# Software License Agreement (BSD License 2.0)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from ament_index_python.packages import get_package_share_directory
+from launch import LaunchDescription
+
+import launch_ros.actions
+import launch_ros.descriptions
+
+
+def generate_launch_description():
+ default_rviz = os.path.join(get_package_share_directory('depth_image_proc'),
+ 'launch', 'rviz/point_cloud_xyzi.rviz')
+ return LaunchDescription([
+ # install realsense from https://github.com/intel/ros2_intel_realsense
+ launch_ros.actions.Node(
+ package='realsense_ros2_camera', node_executable='realsense_ros2_camera',
+ output='screen'),
+
+ # TODO: Realsense camera do not support intensity message,
+ # use depth instead of intensity only for interface test
+ launch_ros.actions.ComposableNodeContainer(
+ name='container',
+ namespace='',
+ package='rclcpp_components',
+ executable='component_container',
+ composable_node_descriptions=[
+ # Driver itself
+ launch_ros.descriptions.ComposableNode(
+ package='depth_image_proc',
+ plugin='depth_image_proc::PointCloudXyziRadialNode',
+ name='point_cloud_xyzi_radial_node',
+ remappings=[('depth/image_raw', '/camera/depth/image_rect_raw'),
+ ('intensity/image_raw', '/camera/depth/image_rect_raw'),
+ ('intensity/camera_info', '/camera/depth/camera_info'),
+ ('points', '/camera/depth/points')]
+ ),
+ ],
+ output='screen',
+ ),
+
+ # rviz
+ launch_ros.actions.Node(
+ package='rviz2', node_executable='rviz2', output='screen',
+ arguments=['--display-config', default_rviz]),
+ ])
diff --git a/src/image_pipeline/depth_image_proc/launch/point_cloud_xyzrgb.launch.py b/src/image_pipeline/depth_image_proc/launch/point_cloud_xyzrgb.launch.py
new file mode 100644
index 000000000..10b333ac2
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/point_cloud_xyzrgb.launch.py
@@ -0,0 +1,77 @@
+# Copyright (c) 2008, Willow Garage, Inc.
+# All rights reserved.
+#
+# Software License Agreement (BSD License 2.0)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from ament_index_python.packages import get_package_share_directory
+from launch import LaunchDescription
+
+import launch_ros.actions
+import launch_ros.descriptions
+
+
+def generate_launch_description():
+ default_rviz = os.path.join(get_package_share_directory('depth_image_proc'),
+ 'launch', 'rviz/point_cloud_xyzrgb.rviz')
+ return LaunchDescription([
+ # install realsense from https://github.com/intel/ros2_intel_realsense
+ launch_ros.actions.Node(
+ package='realsense_ros2_camera', node_executable='realsense_ros2_camera',
+ output='screen'),
+
+ # launch plugin through rclcpp_components container
+ launch_ros.actions.ComposableNodeContainer(
+ name='container',
+ namespace='',
+ package='rclcpp_components',
+ executable='component_container',
+ composable_node_descriptions=[
+ # Driver itself
+ launch_ros.descriptions.ComposableNode(
+ package='depth_image_proc',
+ plugin='depth_image_proc::PointCloudXyzrgbNode',
+ name='point_cloud_xyzrgb_node',
+ remappings=[('rgb/camera_info', '/camera/color/camera_info'),
+ ('rgb/image_rect_color', '/camera/color/image_raw'),
+ ('depth_registered/image_rect',
+ '/camera/aligned_depth_to_color/image_raw'),
+ ('points', '/camera/depth_registered/points')]
+ ),
+ ],
+ output='screen',
+ ),
+
+ # rviz
+ launch_ros.actions.Node(
+ package='rviz2', node_executable='rviz2', output='screen',
+ arguments=['--display-config', default_rviz]),
+ ])
diff --git a/src/image_pipeline/depth_image_proc/launch/point_cloud_xyzrgb_radial.launch.py b/src/image_pipeline/depth_image_proc/launch/point_cloud_xyzrgb_radial.launch.py
new file mode 100644
index 000000000..263e2397c
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/point_cloud_xyzrgb_radial.launch.py
@@ -0,0 +1,90 @@
+# Copyright (c) 2008, Willow Garage, Inc.
+# All rights reserved.
+#
+# Software License Agreement (BSD License 2.0)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# uncomment if you want to launch rviz too in this launch
+# import os
+# from ament_index_python.packages import get_package_share_directory
+import os
+
+from ament_index_python.packages import get_package_share_directory
+import launch
+from launch import LaunchDescription
+
+import launch_ros.actions
+import launch_ros.descriptions
+
+
+def generate_launch_description():
+ default_rviz = os.path.join(get_package_share_directory('depth_image_proc'),
+ 'launch', 'rviz/point_cloud_xyzrgb.rviz')
+ return LaunchDescription([
+ launch.actions.DeclareLaunchArgument(
+ name='rviz', default_value='False',
+ description='Launch RViz for viewing point cloud xyzrgb radial data'
+ ),
+ launch_ros.actions.Node(
+ condition=launch.conditions.IfCondition(
+ launch.substitutions.LaunchConfiguration('rviz')
+ ),
+ package='rviz2',
+ executable='rviz2',
+ output='screen',
+ arguments=['--display-config', default_rviz]
+ ),
+ # launch plugin through rclcpp_components container
+ # make sure remapped topics are available
+ launch_ros.actions.ComposableNodeContainer(
+ name='container',
+ namespace='',
+ package='rclcpp_components',
+ executable='component_container',
+ composable_node_descriptions=[
+ # Driver itself
+ launch_ros.descriptions.ComposableNode(
+ package='depth_image_proc',
+ plugin='depth_image_proc::PointCloudXyzrgbRadialNode',
+ name='point_cloud_xyzrgb_node',
+ remappings=[('rgb/camera_info', '/camera/color/camera_info'),
+ ('rgb/image_rect_color', '/camera/color/image_raw'),
+ ('depth_registered/image_rect',
+ '/camera/aligned_depth_to_color/image_raw'),
+ ('points', '/camera/depth_registered/points')]
+ ),
+ ],
+ output='screen',
+ ),
+
+ # rviz
+ # launch_ros.actions.Node(
+ # package='rviz2', executable='rviz2', output='screen',
+ # arguments=['--display-config', default_rviz]),
+ ])
diff --git a/src/image_pipeline/depth_image_proc/launch/register.launch.py b/src/image_pipeline/depth_image_proc/launch/register.launch.py
new file mode 100644
index 000000000..0eab8878e
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/register.launch.py
@@ -0,0 +1,79 @@
+# Copyright (c) 2008, Willow Garage, Inc.
+# All rights reserved.
+#
+# Software License Agreement (BSD License 2.0)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the Willow Garage nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from ament_index_python.packages import get_package_share_directory
+from launch import LaunchDescription
+
+import launch_ros.actions
+import launch_ros.descriptions
+
+
+def generate_launch_description():
+ default_rviz = os.path.join(get_package_share_directory('depth_image_proc'),
+ 'launch', 'rviz/register.rviz')
+ return LaunchDescription([
+ # install realsense from https://github.com/intel/ros2_intel_realsense
+ launch_ros.actions.Node(
+ package='realsense_ros2_camera', node_executable='realsense_ros2_camera',
+ output='screen'),
+
+ # launch plugin through rclcpp_components container
+ launch_ros.actions.ComposableNodeContainer(
+ name='container',
+ namespace='',
+ package='rclcpp_components',
+ executable='component_container',
+ composable_node_descriptions=[
+ # Driver itself
+ launch_ros.descriptions.ComposableNode(
+ package='depth_image_proc',
+ plugin='depth_image_proc::RegisterNode',
+ name='register_node',
+ remappings=[('depth/image_rect', '/camera/depth/image_rect_raw'),
+ ('depth/camera_info', '/camera/depth/camera_info'),
+ ('rgb/camera_info', '/camera/color/camera_info'),
+ ('depth_registered/image_rect',
+ '/camera/depth_registered/image_rect'),
+ ('depth_registered/camera_info',
+ '/camera/depth_registered/camera_info')]
+ ),
+ ],
+ output='screen',
+ ),
+
+ # rviz
+ launch_ros.actions.Node(
+ package='rviz2', node_executable='rviz2', output='screen',
+ arguments=['--display-config', default_rviz]),
+ ])
diff --git a/src/image_pipeline/depth_image_proc/launch/rviz/convert_metric.rviz b/src/image_pipeline/depth_image_proc/launch/rviz/convert_metric.rviz
new file mode 100644
index 000000000..d76e1637f
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/rviz/convert_metric.rviz
@@ -0,0 +1,136 @@
+Panels:
+ - Class: rviz_common/Displays
+ Help Height: 0
+ Name: Displays
+ Property Tree Widget:
+ Expanded:
+ - /Global Options1
+ - /Status1
+ - /image_raw1
+ - /image_raw1/Status1
+ - /converted_image1
+ - /converted_image1/Status1
+ Splitter Ratio: 0.5
+ Tree Height: 581
+ - Class: rviz_common/Selection
+ Name: Selection
+ - Class: rviz_common/Tool Properties
+ Expanded:
+ - /2D Nav Goal1
+ - /Publish Point1
+ Name: Tool Properties
+ Splitter Ratio: 0.5886790156364441
+ - Class: rviz_common/Views
+ Expanded:
+ - /Current View1
+ Name: Views
+ Splitter Ratio: 0.5
+Visualization Manager:
+ Class: ""
+ Displays:
+ - Alpha: 0.5
+ Cell Size: 1
+ Class: rviz_default_plugins/Grid
+ Color: 160; 160; 164
+ Enabled: true
+ Line Style:
+ Line Width: 0.029999999329447746
+ Value: Lines
+ Name: Grid
+ Normal Cell Count: 0
+ Offset:
+ X: 0
+ Y: 0
+ Z: 0
+ Plane: XY
+ Plane Cell Count: 10
+ Reference Frame:
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: image_raw
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/depth/image_rect_raw
+ Unreliable: false
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: converted_image
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/depth/converted_image
+ Unreliable: false
+ Value: true
+ Enabled: true
+ Global Options:
+ Background Color: 48; 48; 48
+ Fixed Frame: camera_color_optical_frame
+ Frame Rate: 30
+ Name: root
+ Tools:
+ - Class: rviz_default_plugins/MoveCamera
+ - Class: rviz_default_plugins/Select
+ - Class: rviz_default_plugins/FocusCamera
+ - Class: rviz_default_plugins/Measure
+ Line color: 128; 128; 0
+ - Class: rviz_default_plugins/SetInitialPose
+ Topic: /initialpose
+ - Class: rviz_default_plugins/SetGoal
+ Topic: /move_base_simple/goal
+ - Class: rviz_default_plugins/PublishPoint
+ Single click: true
+ Topic: /clicked_point
+ Transformation:
+ Current:
+ Class: rviz_default_plugins/TF
+ Value: true
+ Views:
+ Current:
+ Class: rviz_default_plugins/XYOrbit
+ Distance: 10
+ Enable Stereo Rendering:
+ Stereo Eye Separation: 0.05999999865889549
+ Stereo Focal Distance: 1
+ Swap Stereo Eyes: false
+ Value: false
+ Focal Point:
+ X: 0
+ Y: 0
+ Z: 0
+ Focal Shape Fixed Size: true
+ Focal Shape Size: 0.05000000074505806
+ Invert Z Axis: false
+ Name: Current View
+ Near Clip Distance: 0.009999999776482582
+ Pitch: 0.7853981852531433
+ Target Frame:
+ Value: XYOrbit (rviz_default_plugins)
+ Yaw: 0.7853981852531433
+ Saved: ~
+Window Geometry:
+ Displays:
+ collapsed: false
+ Height: 1145
+ Hide Left Dock: false
+ Hide Right Dock: false
+ QMainWindow State: 000000ff00000000fd0000000400000000000002740000041ffc020000000bfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000282000000c900fffffffb000000120069006d006100670065005f00720061007701000002c5000000c50000002800fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000001e0063006f006e007600650072007400650064005f0069006d0061006700650100000390000000cc0000002800fffffffb0000002c0072006500670069007300740065007200650064005f00640065007000740068005f0069006d00610067006500000003ad000000af0000000000000000000000010000010f0000041ffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003d0000041f000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000003ae0000041f00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+ Selection:
+ collapsed: false
+ Tool Properties:
+ collapsed: false
+ Views:
+ collapsed: false
+ Width: 1853
+ X: 67
+ Y: 27
+ converted_image:
+ collapsed: false
+ image_raw:
+ collapsed: false
diff --git a/src/image_pipeline/depth_image_proc/launch/rviz/crop_formost.rviz b/src/image_pipeline/depth_image_proc/launch/rviz/crop_formost.rviz
new file mode 100644
index 000000000..f1cf786e6
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/rviz/crop_formost.rviz
@@ -0,0 +1,134 @@
+Panels:
+ - Class: rviz_common/Displays
+ Help Height: 0
+ Name: Displays
+ Property Tree Widget:
+ Expanded:
+ - /Global Options1
+ - /Status1
+ - /image_raw1
+ - /crop_name1
+ Splitter Ratio: 0.5
+ Tree Height: 581
+ - Class: rviz_common/Selection
+ Name: Selection
+ - Class: rviz_common/Tool Properties
+ Expanded:
+ - /2D Nav Goal1
+ - /Publish Point1
+ Name: Tool Properties
+ Splitter Ratio: 0.5886790156364441
+ - Class: rviz_common/Views
+ Expanded:
+ - /Current View1
+ Name: Views
+ Splitter Ratio: 0.5
+Visualization Manager:
+ Class: ""
+ Displays:
+ - Alpha: 0.5
+ Cell Size: 1
+ Class: rviz_default_plugins/Grid
+ Color: 160; 160; 164
+ Enabled: true
+ Line Style:
+ Line Width: 0.029999999329447746
+ Value: Lines
+ Name: Grid
+ Normal Cell Count: 0
+ Offset:
+ X: 0
+ Y: 0
+ Z: 0
+ Plane: XY
+ Plane Cell Count: 10
+ Reference Frame:
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: image_raw
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/depth/image_rect_raw
+ Unreliable: false
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: crop_name
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/depth/crop_image
+ Unreliable: false
+ Value: true
+ Enabled: true
+ Global Options:
+ Background Color: 48; 48; 48
+ Fixed Frame: camera_color_optical_frame
+ Frame Rate: 30
+ Name: root
+ Tools:
+ - Class: rviz_default_plugins/MoveCamera
+ - Class: rviz_default_plugins/Select
+ - Class: rviz_default_plugins/FocusCamera
+ - Class: rviz_default_plugins/Measure
+ Line color: 128; 128; 0
+ - Class: rviz_default_plugins/SetInitialPose
+ Topic: /initialpose
+ - Class: rviz_default_plugins/SetGoal
+ Topic: /move_base_simple/goal
+ - Class: rviz_default_plugins/PublishPoint
+ Single click: true
+ Topic: /clicked_point
+ Transformation:
+ Current:
+ Class: rviz_default_plugins/TF
+ Value: true
+ Views:
+ Current:
+ Class: rviz_default_plugins/XYOrbit
+ Distance: 10
+ Enable Stereo Rendering:
+ Stereo Eye Separation: 0.05999999865889549
+ Stereo Focal Distance: 1
+ Swap Stereo Eyes: false
+ Value: false
+ Focal Point:
+ X: 0
+ Y: 0
+ Z: 0
+ Focal Shape Fixed Size: true
+ Focal Shape Size: 0.05000000074505806
+ Invert Z Axis: false
+ Name: Current View
+ Near Clip Distance: 0.009999999776482582
+ Pitch: 0.7853981852531433
+ Target Frame:
+ Value: XYOrbit (rviz_default_plugins)
+ Yaw: 0.7853981852531433
+ Saved: ~
+Window Geometry:
+ Displays:
+ collapsed: false
+ Height: 1145
+ Hide Left Dock: false
+ Hide Right Dock: false
+ QMainWindow State: 000000ff00000000fd0000000400000000000002740000041ffc020000000bfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000282000000c900fffffffb000000120069006d006100670065005f00720061007701000002c5000000c50000002800fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000001200630072006f0070005f006e0061006d00650100000390000000cc0000002800fffffffb0000002c0072006500670069007300740065007200650064005f00640065007000740068005f0069006d00610067006500000003ad000000af0000000000000000000000010000010f0000041ffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003d0000041f000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000003ae0000041f00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+ Selection:
+ collapsed: false
+ Tool Properties:
+ collapsed: false
+ Views:
+ collapsed: false
+ Width: 1853
+ X: 67
+ Y: 27
+ crop_name:
+ collapsed: false
+ image_raw:
+ collapsed: false
diff --git a/src/image_pipeline/depth_image_proc/launch/rviz/disparity.rviz b/src/image_pipeline/depth_image_proc/launch/rviz/disparity.rviz
new file mode 100644
index 000000000..6ffe90c5c
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/rviz/disparity.rviz
@@ -0,0 +1,134 @@
+Panels:
+ - Class: rviz_common/Displays
+ Help Height: 0
+ Name: Displays
+ Property Tree Widget:
+ Expanded:
+ - /Global Options1
+ - /Status1
+ - /fake left/image_rect1
+ - /fake left/disparity1
+ Splitter Ratio: 0.5
+ Tree Height: 581
+ - Class: rviz_common/Selection
+ Name: Selection
+ - Class: rviz_common/Tool Properties
+ Expanded:
+ - /2D Nav Goal1
+ - /Publish Point1
+ Name: Tool Properties
+ Splitter Ratio: 0.5886790156364441
+ - Class: rviz_common/Views
+ Expanded:
+ - /Current View1
+ Name: Views
+ Splitter Ratio: 0.5
+Visualization Manager:
+ Class: ""
+ Displays:
+ - Alpha: 0.5
+ Cell Size: 1
+ Class: rviz_default_plugins/Grid
+ Color: 160; 160; 164
+ Enabled: true
+ Line Style:
+ Line Width: 0.029999999329447746
+ Value: Lines
+ Name: Grid
+ Normal Cell Count: 0
+ Offset:
+ X: 0
+ Y: 0
+ Z: 0
+ Plane: XY
+ Plane Cell Count: 10
+ Reference Frame:
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: fake left/image_rect
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/depth/image_rect_raw
+ Unreliable: false
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: fake left/disparity
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/left/disparity
+ Unreliable: false
+ Value: true
+ Enabled: true
+ Global Options:
+ Background Color: 48; 48; 48
+ Fixed Frame: camera_color_optical_frame
+ Frame Rate: 30
+ Name: root
+ Tools:
+ - Class: rviz_default_plugins/MoveCamera
+ - Class: rviz_default_plugins/Select
+ - Class: rviz_default_plugins/FocusCamera
+ - Class: rviz_default_plugins/Measure
+ Line color: 128; 128; 0
+ - Class: rviz_default_plugins/SetInitialPose
+ Topic: /initialpose
+ - Class: rviz_default_plugins/SetGoal
+ Topic: /move_base_simple/goal
+ - Class: rviz_default_plugins/PublishPoint
+ Single click: true
+ Topic: /clicked_point
+ Transformation:
+ Current:
+ Class: rviz_default_plugins/TF
+ Value: true
+ Views:
+ Current:
+ Class: rviz_default_plugins/XYOrbit
+ Distance: 10
+ Enable Stereo Rendering:
+ Stereo Eye Separation: 0.05999999865889549
+ Stereo Focal Distance: 1
+ Swap Stereo Eyes: false
+ Value: false
+ Focal Point:
+ X: 0
+ Y: 0
+ Z: 0
+ Focal Shape Fixed Size: true
+ Focal Shape Size: 0.05000000074505806
+ Invert Z Axis: false
+ Name: Current View
+ Near Clip Distance: 0.009999999776482582
+ Pitch: 0.7853981852531433
+ Target Frame:
+ Value: XYOrbit (rviz_default_plugins)
+ Yaw: 0.7853981852531433
+ Saved: ~
+Window Geometry:
+ Displays:
+ collapsed: false
+ Height: 1145
+ Hide Left Dock: false
+ Hide Right Dock: false
+ QMainWindow State: 000000ff00000000fd0000000400000000000002740000041ffc020000000bfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000282000000c900fffffffb0000002800660061006b00650020006c006500660074002f0069006d006100670065005f007200650063007401000002c5000000c50000002800fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000002600660061006b00650020006c006500660074002f0064006900730070006100720069007400790100000390000000cc0000002800fffffffb0000002c0072006500670069007300740065007200650064005f00640065007000740068005f0069006d00610067006500000003ad000000af0000000000000000000000010000010f0000041ffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003d0000041f000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000003ae0000041f00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+ Selection:
+ collapsed: false
+ Tool Properties:
+ collapsed: false
+ Views:
+ collapsed: false
+ Width: 1853
+ X: 67
+ Y: 27
+ fake left/disparity:
+ collapsed: false
+ fake left/image_rect:
+ collapsed: false
diff --git a/src/image_pipeline/depth_image_proc/launch/rviz/point_cloud_xyz.rviz b/src/image_pipeline/depth_image_proc/launch/rviz/point_cloud_xyz.rviz
new file mode 100644
index 000000000..ae68f102a
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/rviz/point_cloud_xyz.rviz
@@ -0,0 +1,151 @@
+Panels:
+ - Class: rviz_common/Displays
+ Help Height: 0
+ Name: Displays
+ Property Tree Widget:
+ Expanded:
+ - /Global Options1
+ - /Status1
+ - /image_rect1
+ - /PointCloud21
+ Splitter Ratio: 0.5
+ Tree Height: 608
+ - Class: rviz_common/Selection
+ Name: Selection
+ - Class: rviz_common/Tool Properties
+ Expanded:
+ - /2D Nav Goal1
+ - /Publish Point1
+ Name: Tool Properties
+ Splitter Ratio: 0.5886790156364441
+ - Class: rviz_common/Views
+ Expanded:
+ - /Current View1
+ Name: Views
+ Splitter Ratio: 0.5
+Visualization Manager:
+ Class: ""
+ Displays:
+ - Alpha: 0.5
+ Cell Size: 1
+ Class: rviz_default_plugins/Grid
+ Color: 160; 160; 164
+ Enabled: true
+ Line Style:
+ Line Width: 0.029999999329447746
+ Value: Lines
+ Name: Grid
+ Normal Cell Count: 0
+ Offset:
+ X: 0
+ Y: 0
+ Z: 0
+ Plane: XY
+ Plane Cell Count: 10
+ Reference Frame:
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: image_rect
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/depth/image_rect_raw
+ Unreliable: false
+ Value: true
+ - Alpha: 1
+ Autocompute Intensity Bounds: true
+ Autocompute Value Bounds:
+ Max Value: 10
+ Min Value: -10
+ Value: true
+ Axis: Z
+ Channel Name: intensity
+ Class: rviz_default_plugins/PointCloud2
+ Color: 255; 255; 255
+ Color Transformer: Intensity
+ Decay Time: 0
+ Enabled: true
+ Invert Rainbow: false
+ Max Color: 255; 255; 255
+ Max Intensity: 4096
+ Min Color: 0; 0; 0
+ Min Intensity: 0
+ Name: PointCloud2
+ Position Transformer: XYZ
+ Queue Size: 10
+ Selectable: true
+ Size (Pixels): 3
+ Size (m): 0.009999999776482582
+ Style: Flat Squares
+ Topic: /camera/depth/points
+ Unreliable: false
+ Use Fixed Frame: true
+ Use rainbow: true
+ Value: true
+ Enabled: true
+ Global Options:
+ Background Color: 48; 48; 48
+ Fixed Frame: camera_depth_optical_frame
+ Frame Rate: 30
+ Name: root
+ Tools:
+ - Class: rviz_default_plugins/MoveCamera
+ - Class: rviz_default_plugins/Select
+ - Class: rviz_default_plugins/FocusCamera
+ - Class: rviz_default_plugins/Measure
+ Line color: 128; 128; 0
+ - Class: rviz_default_plugins/SetInitialPose
+ Topic: /initialpose
+ - Class: rviz_default_plugins/SetGoal
+ Topic: /move_base_simple/goal
+ - Class: rviz_default_plugins/PublishPoint
+ Single click: true
+ Topic: /clicked_point
+ Transformation:
+ Current:
+ Class: rviz_default_plugins/TF
+ Value: true
+ Views:
+ Current:
+ Class: rviz_default_plugins/Orbit
+ Distance: 2.125654935836792
+ Enable Stereo Rendering:
+ Stereo Eye Separation: 0.05999999865889549
+ Stereo Focal Distance: 1
+ Swap Stereo Eyes: false
+ Value: false
+ Focal Point:
+ X: 0
+ Y: 0
+ Z: 0
+ Focal Shape Fixed Size: true
+ Focal Shape Size: 0.05000000074505806
+ Invert Z Axis: false
+ Name: Current View
+ Near Clip Distance: 0.009999999776482582
+ Pitch: -1.5597962141036987
+ Target Frame:
+ Value: Orbit (rviz)
+ Yaw: 4.718582630157471
+ Saved: ~
+Window Geometry:
+ Displays:
+ collapsed: false
+ Height: 1145
+ Hide Left Dock: false
+ Hide Right Dock: true
+ QMainWindow State: 000000ff00000000fd0000000400000000000002740000041ffc020000000bfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000029d000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb000000160063006f006c006f0072005f0069006d0061006700650000000256000000a20000000000000000fb000000140069006d006100670065005f007200650063007401000002e00000017c0000002800fffffffb0000002c0072006500670069007300740065007200650064005f00640065007000740068005f0069006d006100670065000000038b000000d10000000000000000000000010000010f0000041ffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d0000041f000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000004c30000041f00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+ Selection:
+ collapsed: false
+ Tool Properties:
+ collapsed: false
+ Views:
+ collapsed: true
+ Width: 1853
+ X: 67
+ Y: 27
+ image_rect:
+ collapsed: false
diff --git a/src/image_pipeline/depth_image_proc/launch/rviz/point_cloud_xyz_radial.rviz b/src/image_pipeline/depth_image_proc/launch/rviz/point_cloud_xyz_radial.rviz
new file mode 100644
index 000000000..ae68f102a
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/rviz/point_cloud_xyz_radial.rviz
@@ -0,0 +1,151 @@
+Panels:
+ - Class: rviz_common/Displays
+ Help Height: 0
+ Name: Displays
+ Property Tree Widget:
+ Expanded:
+ - /Global Options1
+ - /Status1
+ - /image_rect1
+ - /PointCloud21
+ Splitter Ratio: 0.5
+ Tree Height: 608
+ - Class: rviz_common/Selection
+ Name: Selection
+ - Class: rviz_common/Tool Properties
+ Expanded:
+ - /2D Nav Goal1
+ - /Publish Point1
+ Name: Tool Properties
+ Splitter Ratio: 0.5886790156364441
+ - Class: rviz_common/Views
+ Expanded:
+ - /Current View1
+ Name: Views
+ Splitter Ratio: 0.5
+Visualization Manager:
+ Class: ""
+ Displays:
+ - Alpha: 0.5
+ Cell Size: 1
+ Class: rviz_default_plugins/Grid
+ Color: 160; 160; 164
+ Enabled: true
+ Line Style:
+ Line Width: 0.029999999329447746
+ Value: Lines
+ Name: Grid
+ Normal Cell Count: 0
+ Offset:
+ X: 0
+ Y: 0
+ Z: 0
+ Plane: XY
+ Plane Cell Count: 10
+ Reference Frame:
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: image_rect
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/depth/image_rect_raw
+ Unreliable: false
+ Value: true
+ - Alpha: 1
+ Autocompute Intensity Bounds: true
+ Autocompute Value Bounds:
+ Max Value: 10
+ Min Value: -10
+ Value: true
+ Axis: Z
+ Channel Name: intensity
+ Class: rviz_default_plugins/PointCloud2
+ Color: 255; 255; 255
+ Color Transformer: Intensity
+ Decay Time: 0
+ Enabled: true
+ Invert Rainbow: false
+ Max Color: 255; 255; 255
+ Max Intensity: 4096
+ Min Color: 0; 0; 0
+ Min Intensity: 0
+ Name: PointCloud2
+ Position Transformer: XYZ
+ Queue Size: 10
+ Selectable: true
+ Size (Pixels): 3
+ Size (m): 0.009999999776482582
+ Style: Flat Squares
+ Topic: /camera/depth/points
+ Unreliable: false
+ Use Fixed Frame: true
+ Use rainbow: true
+ Value: true
+ Enabled: true
+ Global Options:
+ Background Color: 48; 48; 48
+ Fixed Frame: camera_depth_optical_frame
+ Frame Rate: 30
+ Name: root
+ Tools:
+ - Class: rviz_default_plugins/MoveCamera
+ - Class: rviz_default_plugins/Select
+ - Class: rviz_default_plugins/FocusCamera
+ - Class: rviz_default_plugins/Measure
+ Line color: 128; 128; 0
+ - Class: rviz_default_plugins/SetInitialPose
+ Topic: /initialpose
+ - Class: rviz_default_plugins/SetGoal
+ Topic: /move_base_simple/goal
+ - Class: rviz_default_plugins/PublishPoint
+ Single click: true
+ Topic: /clicked_point
+ Transformation:
+ Current:
+ Class: rviz_default_plugins/TF
+ Value: true
+ Views:
+ Current:
+ Class: rviz_default_plugins/Orbit
+ Distance: 2.125654935836792
+ Enable Stereo Rendering:
+ Stereo Eye Separation: 0.05999999865889549
+ Stereo Focal Distance: 1
+ Swap Stereo Eyes: false
+ Value: false
+ Focal Point:
+ X: 0
+ Y: 0
+ Z: 0
+ Focal Shape Fixed Size: true
+ Focal Shape Size: 0.05000000074505806
+ Invert Z Axis: false
+ Name: Current View
+ Near Clip Distance: 0.009999999776482582
+ Pitch: -1.5597962141036987
+ Target Frame:
+ Value: Orbit (rviz)
+ Yaw: 4.718582630157471
+ Saved: ~
+Window Geometry:
+ Displays:
+ collapsed: false
+ Height: 1145
+ Hide Left Dock: false
+ Hide Right Dock: true
+ QMainWindow State: 000000ff00000000fd0000000400000000000002740000041ffc020000000bfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000029d000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb000000160063006f006c006f0072005f0069006d0061006700650000000256000000a20000000000000000fb000000140069006d006100670065005f007200650063007401000002e00000017c0000002800fffffffb0000002c0072006500670069007300740065007200650064005f00640065007000740068005f0069006d006100670065000000038b000000d10000000000000000000000010000010f0000041ffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d0000041f000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000004c30000041f00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+ Selection:
+ collapsed: false
+ Tool Properties:
+ collapsed: false
+ Views:
+ collapsed: true
+ Width: 1853
+ X: 67
+ Y: 27
+ image_rect:
+ collapsed: false
diff --git a/src/image_pipeline/depth_image_proc/launch/rviz/point_cloud_xyzi.rviz b/src/image_pipeline/depth_image_proc/launch/rviz/point_cloud_xyzi.rviz
new file mode 100644
index 000000000..ee87ced30
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/rviz/point_cloud_xyzi.rviz
@@ -0,0 +1,151 @@
+Panels:
+ - Class: rviz_common/Displays
+ Help Height: 0
+ Name: Displays
+ Property Tree Widget:
+ Expanded:
+ - /Global Options1
+ - /Status1
+ - /image_rect1
+ - /PointCloud21
+ Splitter Ratio: 0.5
+ Tree Height: 608
+ - Class: rviz_common/Selection
+ Name: Selection
+ - Class: rviz_common/Tool Properties
+ Expanded:
+ - /2D Nav Goal1
+ - /Publish Point1
+ Name: Tool Properties
+ Splitter Ratio: 0.5886790156364441
+ - Class: rviz_common/Views
+ Expanded:
+ - /Current View1
+ Name: Views
+ Splitter Ratio: 0.5
+Visualization Manager:
+ Class: ""
+ Displays:
+ - Alpha: 0.5
+ Cell Size: 1
+ Class: rviz_default_plugins/Grid
+ Color: 160; 160; 164
+ Enabled: true
+ Line Style:
+ Line Width: 0.029999999329447746
+ Value: Lines
+ Name: Grid
+ Normal Cell Count: 0
+ Offset:
+ X: 0
+ Y: 0
+ Z: 0
+ Plane: XY
+ Plane Cell Count: 10
+ Reference Frame:
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: image_rect
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/depth/image_rect_raw
+ Unreliable: false
+ Value: true
+ - Alpha: 1
+ Autocompute Intensity Bounds: true
+ Autocompute Value Bounds:
+ Max Value: 10
+ Min Value: -10
+ Value: true
+ Axis: Z
+ Channel Name: intensity
+ Class: rviz_default_plugins/PointCloud2
+ Color: 255; 255; 255
+ Color Transformer: Intensity
+ Decay Time: 0
+ Enabled: true
+ Invert Rainbow: false
+ Max Color: 255; 255; 255
+ Max Intensity: 255
+ Min Color: 0; 0; 0
+ Min Intensity: 2
+ Name: PointCloud2
+ Position Transformer: XYZ
+ Queue Size: 10
+ Selectable: true
+ Size (Pixels): 3
+ Size (m): 0.009999999776482582
+ Style: Flat Squares
+ Topic: /camera/depth/points
+ Unreliable: false
+ Use Fixed Frame: true
+ Use rainbow: true
+ Value: true
+ Enabled: true
+ Global Options:
+ Background Color: 48; 48; 48
+ Fixed Frame: camera_depth_optical_frame
+ Frame Rate: 30
+ Name: root
+ Tools:
+ - Class: rviz_default_plugins/MoveCamera
+ - Class: rviz_default_plugins/Select
+ - Class: rviz_default_plugins/FocusCamera
+ - Class: rviz_default_plugins/Measure
+ Line color: 128; 128; 0
+ - Class: rviz_default_plugins/SetInitialPose
+ Topic: /initialpose
+ - Class: rviz_default_plugins/SetGoal
+ Topic: /move_base_simple/goal
+ - Class: rviz_default_plugins/PublishPoint
+ Single click: true
+ Topic: /clicked_point
+ Transformation:
+ Current:
+ Class: rviz_default_plugins/TF
+ Value: true
+ Views:
+ Current:
+ Class: rviz_default_plugins/Orbit
+ Distance: 2.125654935836792
+ Enable Stereo Rendering:
+ Stereo Eye Separation: 0.05999999865889549
+ Stereo Focal Distance: 1
+ Swap Stereo Eyes: false
+ Value: false
+ Focal Point:
+ X: 0
+ Y: 0
+ Z: 0
+ Focal Shape Fixed Size: true
+ Focal Shape Size: 0.05000000074505806
+ Invert Z Axis: false
+ Name: Current View
+ Near Clip Distance: 0.009999999776482582
+ Pitch: -1.5597962141036987
+ Target Frame:
+ Value: Orbit (rviz)
+ Yaw: 4.718582630157471
+ Saved: ~
+Window Geometry:
+ Displays:
+ collapsed: false
+ Height: 1145
+ Hide Left Dock: false
+ Hide Right Dock: true
+ QMainWindow State: 000000ff00000000fd0000000400000000000002740000041ffc020000000bfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000029d000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb000000160063006f006c006f0072005f0069006d0061006700650000000256000000a20000000000000000fb000000140069006d006100670065005f007200650063007401000002e00000017c0000002800fffffffb0000002c0072006500670069007300740065007200650064005f00640065007000740068005f0069006d006100670065000000038b000000d10000000000000000000000010000010f0000041ffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d0000041f000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000004c30000041f00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+ Selection:
+ collapsed: false
+ Tool Properties:
+ collapsed: false
+ Views:
+ collapsed: true
+ Width: 1853
+ X: 67
+ Y: 27
+ image_rect:
+ collapsed: false
diff --git a/src/image_pipeline/depth_image_proc/launch/rviz/point_cloud_xyzrgb.rviz b/src/image_pipeline/depth_image_proc/launch/rviz/point_cloud_xyzrgb.rviz
new file mode 100644
index 000000000..daba8ef8e
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/rviz/point_cloud_xyzrgb.rviz
@@ -0,0 +1,179 @@
+Panels:
+ - Class: rviz_common/Displays
+ Help Height: 78
+ Name: Displays
+ Property Tree Widget:
+ Expanded:
+ - /Global Options1
+ - /Status1
+ - /color_image1
+ - /depth_image1
+ - /registered_depth_image1
+ - /PointCloud21
+ Splitter Ratio: 0.5
+ Tree Height: 392
+ - Class: rviz_common/Selection
+ Name: Selection
+ - Class: rviz_common/Tool Properties
+ Expanded:
+ - /2D Nav Goal1
+ - /Publish Point1
+ Name: Tool Properties
+ Splitter Ratio: 0.5886790156364441
+ - Class: rviz_common/Views
+ Expanded:
+ - /Current View1
+ Name: Views
+ Splitter Ratio: 0.5
+Visualization Manager:
+ Class: ""
+ Displays:
+ - Alpha: 0.5
+ Cell Size: 1
+ Class: rviz_default_plugins/Grid
+ Color: 160; 160; 164
+ Enabled: true
+ Line Style:
+ Line Width: 0.029999999329447746
+ Value: Lines
+ Name: Grid
+ Normal Cell Count: 0
+ Offset:
+ X: 0
+ Y: 0
+ Z: 0
+ Plane: XY
+ Plane Cell Count: 10
+ Reference Frame:
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: color_image
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/color/image_raw
+ Unreliable: false
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: depth_image
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/depth/image_rect_raw
+ Unreliable: false
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: registered_depth_image
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/aligned_depth_to_color/image_raw
+ Unreliable: false
+ Value: true
+ - Alpha: 1
+ Autocompute Intensity Bounds: true
+ Autocompute Value Bounds:
+ Max Value: 10
+ Min Value: -10
+ Value: true
+ Axis: Z
+ Channel Name: intensity
+ Class: rviz_default_plugins/PointCloud2
+ Color: 255; 255; 255
+ Color Transformer: RGB8
+ Decay Time: 0
+ Enabled: true
+ Invert Rainbow: false
+ Max Color: 255; 255; 255
+ Max Intensity: 4096
+ Min Color: 0; 0; 0
+ Min Intensity: 0
+ Name: PointCloud2
+ Position Transformer: XYZ
+ Queue Size: 10
+ Selectable: true
+ Size (Pixels): 3
+ Size (m): 0.009999999776482582
+ Style: Flat Squares
+ Topic: /camera/depth_registered/points
+ Unreliable: false
+ Use Fixed Frame: true
+ Use rainbow: true
+ Value: true
+ Enabled: true
+ Global Options:
+ Background Color: 48; 48; 48
+ Fixed Frame: camera_color_optical_frame
+ Frame Rate: 30
+ Name: root
+ Tools:
+ - Class: rviz_default_plugins/MoveCamera
+ - Class: rviz_default_plugins/Select
+ - Class: rviz_default_plugins/FocusCamera
+ - Class: rviz_default_plugins/Measure
+ Line color: 128; 128; 0
+ - Class: rviz_default_plugins/SetInitialPose
+ Topic: /initialpose
+ - Class: rviz_default_plugins/SetGoal
+ Topic: /move_base_simple/goal
+ - Class: rviz_default_plugins/PublishPoint
+ Single click: true
+ Topic: /clicked_point
+ Transformation:
+ Current:
+ Class: rviz_default_plugins/TF
+ Value: true
+ Views:
+ Current:
+ Class: rviz_default_plugins/Orbit
+ Distance: 1.8979061841964722
+ Enable Stereo Rendering:
+ Stereo Eye Separation: 0.05999999865889549
+ Stereo Focal Distance: 1
+ Swap Stereo Eyes: false
+ Value: false
+ Focal Point:
+ X: 0
+ Y: 0
+ Z: 0
+ Focal Shape Fixed Size: true
+ Focal Shape Size: 0.05000000074505806
+ Invert Z Axis: false
+ Name: Current View
+ Near Clip Distance: 0.009999999776482582
+ Pitch: -1.5597962141036987
+ Target Frame:
+ Value: Orbit (rviz)
+ Yaw: 4.718582630157471
+ Saved: ~
+Window Geometry:
+ Displays:
+ collapsed: false
+ Height: 1145
+ Hide Left Dock: false
+ Hide Right Dock: true
+ QMainWindow State: 000000ff00000000fd0000000400000000000002740000041ffc020000000bfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000213000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb000000160063006f006c006f0072005f0069006d0061006700650100000256000000a20000002800fffffffb0000001600640065007000740068005f0069006d00610067006501000002fe000000a90000002800fffffffb0000002c0072006500670069007300740065007200650064005f00640065007000740068005f0069006d00610067006501000003ad000000af0000002800ffffff000000010000010f0000041ffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d0000041f000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000004c30000041f00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+ Selection:
+ collapsed: false
+ Tool Properties:
+ collapsed: false
+ Views:
+ collapsed: true
+ Width: 1853
+ X: 67
+ Y: 27
+ color_image:
+ collapsed: false
+ depth_image:
+ collapsed: false
+ registered_depth_image:
+ collapsed: false
diff --git a/src/image_pipeline/depth_image_proc/launch/rviz/register.rviz b/src/image_pipeline/depth_image_proc/launch/rviz/register.rviz
new file mode 100644
index 000000000..6191de026
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/launch/rviz/register.rviz
@@ -0,0 +1,148 @@
+Panels:
+ - Class: rviz_common/Displays
+ Help Height: 0
+ Name: Displays
+ Property Tree Widget:
+ Expanded:
+ - /Global Options1
+ - /Status1
+ - /depth/image_rect1
+ - /depth_registered1
+ - /color image1
+ Splitter Ratio: 0.5
+ Tree Height: 366
+ - Class: rviz_common/Selection
+ Name: Selection
+ - Class: rviz_common/Tool Properties
+ Expanded:
+ - /2D Nav Goal1
+ - /Publish Point1
+ Name: Tool Properties
+ Splitter Ratio: 0.5886790156364441
+ - Class: rviz_common/Views
+ Expanded:
+ - /Current View1
+ Name: Views
+ Splitter Ratio: 0.5
+Visualization Manager:
+ Class: ""
+ Displays:
+ - Alpha: 0.5
+ Cell Size: 1
+ Class: rviz_default_plugins/Grid
+ Color: 160; 160; 164
+ Enabled: true
+ Line Style:
+ Line Width: 0.029999999329447746
+ Value: Lines
+ Name: Grid
+ Normal Cell Count: 0
+ Offset:
+ X: 0
+ Y: 0
+ Z: 0
+ Plane: XY
+ Plane Cell Count: 10
+ Reference Frame:
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: depth/image_rect
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/depth/image_rect_raw
+ Unreliable: false
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: depth_registered
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/depth_registered/image_rect
+ Unreliable: false
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: color image
+ Normalize Range: true
+ Queue Size: 10
+ Topic: /camera/color/image_raw
+ Unreliable: false
+ Value: true
+ Enabled: true
+ Global Options:
+ Background Color: 48; 48; 48
+ Fixed Frame: camera_depth_optical_frame
+ Frame Rate: 30
+ Name: root
+ Tools:
+ - Class: rviz_default_plugins/MoveCamera
+ - Class: rviz_default_plugins/Select
+ - Class: rviz_default_plugins/FocusCamera
+ - Class: rviz_default_plugins/Measure
+ Line color: 128; 128; 0
+ - Class: rviz_default_plugins/SetInitialPose
+ Topic: /initialpose
+ - Class: rviz_default_plugins/SetGoal
+ Topic: /move_base_simple/goal
+ - Class: rviz_default_plugins/PublishPoint
+ Single click: true
+ Topic: /clicked_point
+ Transformation:
+ Current:
+ Class: rviz_default_plugins/TF
+ Value: true
+ Views:
+ Current:
+ Class: rviz_default_plugins/Orbit
+ Distance: 2.125654935836792
+ Enable Stereo Rendering:
+ Stereo Eye Separation: 0.05999999865889549
+ Stereo Focal Distance: 1
+ Swap Stereo Eyes: false
+ Value: false
+ Focal Point:
+ X: 0
+ Y: 0
+ Z: 0
+ Focal Shape Fixed Size: true
+ Focal Shape Size: 0.05000000074505806
+ Invert Z Axis: false
+ Name: Current View
+ Near Clip Distance: 0.009999999776482582
+ Pitch: -1.5597962141036987
+ Target Frame:
+ Value: Orbit (rviz)
+ Yaw: 4.723582744598389
+ Saved: ~
+Window Geometry:
+ Displays:
+ collapsed: false
+ Height: 1145
+ Hide Left Dock: false
+ Hide Right Dock: true
+ QMainWindow State: 000000ff00000000fd0000000400000000000002740000041ffc020000000dfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000001ab000000c900fffffffb0000002000640065007000740068002f0069006d006100670065005f007200650063007401000001ee000000c70000002800fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb000000160063006f006c006f0072005f0069006d0061006700650000000256000000a20000000000000000fb000000160063006f006c006f007200200069006d00610067006501000002bb000000cb0000002800fffffffb0000002c0072006500670069007300740065007200650064005f00640065007000740068005f0069006d006100670065000000038b000000d10000000000000000fb0000002000640065007000740068005f0072006500670069007300740065007200650064010000038c000000d00000002800ffffff000000010000010f0000041ffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d0000041f000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000004c30000041f00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+ Selection:
+ collapsed: false
+ Tool Properties:
+ collapsed: false
+ Views:
+ collapsed: true
+ Width: 1853
+ X: 67
+ Y: 27
+ color image:
+ collapsed: false
+ depth/image_rect:
+ collapsed: false
+ depth_registered:
+ collapsed: false
diff --git a/src/image_pipeline/depth_image_proc/mainpage.dox b/src/image_pipeline/depth_image_proc/mainpage.dox
new file mode 100644
index 000000000..6b0414bd0
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/mainpage.dox
@@ -0,0 +1,26 @@
+/**
+\mainpage
+\htmlinclude manifest.html
+
+\b depth_image_proc is ...
+
+
+
+
+\section codeapi Code API
+
+
+
+
+*/
diff --git a/src/image_pipeline/depth_image_proc/package.xml b/src/image_pipeline/depth_image_proc/package.xml
new file mode 100644
index 000000000..1ee00d2a9
--- /dev/null
+++ b/src/image_pipeline/depth_image_proc/package.xml
@@ -0,0 +1,50 @@
+
+
+
+ depth_image_proc
+ 4.0.0
+
+
+ Contains components for processing depth images such as those
+ produced by OpenNI camera. Functions include creating disparity
+ images and point clouds, as well as registering (reprojecting)
+ a depth image into another camera frame.
+
+
+
+ Vincent Rabaud
+ Joshua Whitley
+ Chris Ye
+ Jacob Perron
+ Michael Ferguson
+
+ BSD
+ https://index.ros.org/p/depth_image_proc/github-ros-perception-image_pipeline/
+ https://github.com/ros-perception/image_pipeline/issues
+ https://github.com/ros-perception/image_pipeline
+ Patrick Mihelich
+
+ ament_cmake_auto
+
+ class_loader
+
+ cv_bridge
+ image_geometry
+ image_transport
+ libopencv-dev
+ message_filters
+ rclcpp
+ rclcpp_components
+ sensor_msgs
+ stereo_msgs
+ tf2
+ tf2_eigen
+ tf2_ros
+
+ ament_lint_auto
+ ament_lint_common
+
+
+