diff --git a/common/autoware_universe_utils/CMakeLists.txt b/common/autoware_universe_utils/CMakeLists.txt index f295662e69091..667018eb81673 100644 --- a/common/autoware_universe_utils/CMakeLists.txt +++ b/common/autoware_universe_utils/CMakeLists.txt @@ -23,6 +23,7 @@ ament_auto_add_library(autoware_universe_utils SHARED src/math/trigonometry.cpp src/ros/msg_operation.cpp src/ros/marker_helper.cpp + src/ros/managed_transform_buffer.cpp src/ros/logger_level_configure.cpp src/system/backtrace.cpp src/system/time_keeper.cpp diff --git a/common/autoware_universe_utils/include/autoware/universe_utils/ros/managed_transform_buffer.hpp b/common/autoware_universe_utils/include/autoware/universe_utils/ros/managed_transform_buffer.hpp index 9d96dc9e6b133..ee08dd256704f 100644 --- a/common/autoware_universe_utils/include/autoware/universe_utils/ros/managed_transform_buffer.hpp +++ b/common/autoware_universe_utils/include/autoware/universe_utils/ros/managed_transform_buffer.hpp @@ -15,21 +15,23 @@ #ifndef AUTOWARE__UNIVERSE_UTILS__ROS__MANAGED_TRANSFORM_BUFFER_HPP_ #define AUTOWARE__UNIVERSE_UTILS__ROS__MANAGED_TRANSFORM_BUFFER_HPP_ -#include "autoware/universe_utils/ros/transform_listener.hpp" - #include -#include #include -#include +#include +#include -#include +#include +#include +#include #include #include #include +#include #include -#include +#include +#include #include #include @@ -49,7 +51,6 @@ struct hash> namespace autoware::universe_utils { -using std::chrono_literals::operator""ms; using Key = std::pair; struct PairEqual { @@ -58,169 +59,204 @@ struct PairEqual return p1.first == p2.first && p1.second == p2.second; } }; -using TFMap = std::unordered_map, PairEqual>; -constexpr std::array warn_frames = {"map", "odom", "world"}; - -class ManagedTransformBuffer +struct TreeNode { -public: - explicit ManagedTransformBuffer(rclcpp::Node * node, const bool & has_static_tf_only) - : node_(node) + TreeNode() : is_static(false) {} + TreeNode(std::string p_parent, const bool p_is_static) + : parent(std::move(p_parent)), is_static(p_is_static) { - if (has_static_tf_only) { - get_transform_ = [this]( - const std::string & target_frame, const std::string & source_frame, - Eigen::Matrix4f & eigen_transform) { - return getStaticTransform(target_frame, source_frame, eigen_transform); - }; - } else { - tf_listener_ = std::make_unique(node); - get_transform_ = [this]( - const std::string & target_frame, const std::string & source_frame, - Eigen::Matrix4f & eigen_transform) { - return getDynamicTransform(target_frame, source_frame, eigen_transform); - }; - } } + std::string parent; + bool is_static; +}; - bool getTransform( - const std::string & target_frame, const std::string & source_frame, - Eigen::Matrix4f & eigen_transform) +struct TraverseResult +{ + TraverseResult() : success(false), is_static(false) {} + TraverseResult(const bool p_success, const bool p_is_static) + : success(p_success), is_static(p_is_static) { - return get_transform_(target_frame, source_frame, eigen_transform); } + bool success; + bool is_static; +}; +using std::chrono_literals::operator""ms; +using geometry_msgs::msg::TransformStamped; +using TFMap = std::unordered_map, PairEqual>; +using TreeMap = std::unordered_map; +/** + * @brief A managed TF buffer that handles listener node lifetime. This buffer triggers listener + * only for first occurrence of frames pair. After that, the local buffer is used for storing + * static transforms. If a dynamic transform is detected, the listener is switched to dynamic mode + * and acts as a regular TF buffer. + */ +class ManagedTransformBuffer +{ +public: /** - * Transforms a point cloud from one frame to another. + * @brief Construct a new Managed Transform Buffer object * - * @param target_frame The target frame to transform the point cloud to. - * @param cloud_in The input point cloud to be transformed. - * @param cloud_out The transformed point cloud. - * @return True if the transformation is successful, false otherwise. + * @param[in] node the node to use for the transform buffer + * @param[in] managed whether managed buffer feature should be used + */ + explicit ManagedTransformBuffer(rclcpp::Node * node, bool managed = true); + + /** @brief Destroy the Managed Transform Buffer object */ + ~ManagedTransformBuffer(); + + /** + * @brief Get the transform between two frames by frame ID. + * + * @tparam T the type of the transformation to retrieve + * @param[in] target_frame the frame to which data should be transformed + * @param[in] source_frame the frame where the data originated + * @param[in] time the time at which the value of the transform is desired (0 will get the latest) + * @param[in] timeout how long to block before failing + * @return an optional containing the transform if successful, or empty if not + * + * @overload getTransform + * @return An optional containing the TransformStamped if successful, or empty if not + * + * @overload getTransform + * @return An optional containing the Eigen::Matrix4f if successful, or empty if not + * + * @overload getTransform + * @return An optional containing the tf2::Transform if successful, or empty if not + */ + template + std::enable_if_t, std::optional> + getTransform( + const std::string & target_frame, const std::string & source_frame, + const rclcpp::Time & time = rclcpp::Time(0), + const rclcpp::Duration & timeout = default_timeout); + + template + std::enable_if_t, std::optional> getTransform( + const std::string & target_frame, const std::string & source_frame, + const rclcpp::Time & time = rclcpp::Time(0), + const rclcpp::Duration & timeout = default_timeout); + + template + std::enable_if_t, std::optional> getTransform( + const std::string & target_frame, const std::string & source_frame, + const rclcpp::Time & time = rclcpp::Time(0), + const rclcpp::Duration & timeout = default_timeout); + + /** + * @brief Transforms a point cloud from one frame to another. + * + * @param[in] target_frame the target TF frame + * @param[in] cloud_in the input point cloud + * @param[out] cloud_out the resultant output point cloud + * @param[in] time the time at which the value of the transform is desired (0 will get the latest) + * @param[in] timeout how long to block before failing + * @return true if the transformation is successful, false otherwise */ bool transformPointcloud( const std::string & target_frame, const sensor_msgs::msg::PointCloud2 & cloud_in, - sensor_msgs::msg::PointCloud2 & cloud_out) - { - if ( - pcl::getFieldIndex(cloud_in, "x") == -1 || pcl::getFieldIndex(cloud_in, "y") == -1 || - pcl::getFieldIndex(cloud_in, "z") == -1) { - RCLCPP_ERROR(node_->get_logger(), "Input pointcloud does not have xyz fields"); - return false; - } - if (target_frame == cloud_in.header.frame_id) { - cloud_out = cloud_in; - return true; - } - Eigen::Matrix4f eigen_transform; - if (!getTransform(target_frame, cloud_in.header.frame_id, eigen_transform)) { - return false; - } - pcl_ros::transformPointCloud(eigen_transform, cloud_in, cloud_out); - cloud_out.header.frame_id = target_frame; - return true; - } + sensor_msgs::msg::PointCloud2 & cloud_out, const rclcpp::Time & time = rclcpp::Time(0), + const rclcpp::Duration & timeout = default_timeout); private: - /** - * @brief Retrieves a transform between two static frames. + /** @brief Initialize TF listener used for storing transforms */ + void activateListener(); + + /** @brief Initialize local TF listener used for building TF tree */ + void activateLocalListener(); + + /** @brief Deactivate TF listener */ + void deactivateListener(); + + /** @brief Deactivate local TF listener */ + void deactivateLocalListener(); + + /** @brief Callback for TF messages * - * This function attempts to retrieve a transform between the target frame and the source frame. - * If success, the transform matrix is set to the output parameter and the function returns true. - * Otherwise, transform matrix is set to identity and the function returns false. Transform - * Listener is destroyed after function call. + * @param[in] msg the TF message + * @param[in] is_static whether the TF topic refers to static transforms + */ + void tfCallback(const tf2_msgs::msg::TFMessage::SharedPtr msg, const bool is_static); + + /** @brief Default ROS-ish lookupTransform trigger. * - * @param target_frame The target frame. - * @param source_frame The source frame. - * @param eigen_transform The output Eigen transform matrix. It is set to the identity if the - * transform is not found. - * @return True if the transform was successfully retrieved, false otherwise. + * @param[in] target_frame the frame to which data should be transformed + * @param[in] source_frame the frame where the data originated + * @param[in] time the time at which the value of the transform is desired (0 will get the latest) + * @param[in] timeout how long to block before failing + * @return an optional containing the transform if successful, or empty if not */ - bool getStaticTransform( + std::optional lookupTransform( const std::string & target_frame, const std::string & source_frame, - Eigen::Matrix4f & eigen_transform) - { - if ( - std::find(warn_frames.begin(), warn_frames.end(), target_frame) != warn_frames.end() || - std::find(warn_frames.begin(), warn_frames.end(), source_frame) != warn_frames.end()) { - RCLCPP_WARN( - node_->get_logger(), "Using %s -> %s transform. This may not be a static transform.", - target_frame.c_str(), source_frame.c_str()); - } - - auto key = std::make_pair(target_frame, source_frame); - auto key_inv = std::make_pair(source_frame, target_frame); - - // Check if the transform is already in the buffer - auto it = buffer_.find(key); - if (it != buffer_.end()) { - eigen_transform = it->second; - return true; - } - - // Check if the inverse transform is already in the buffer - auto it_inv = buffer_.find(key_inv); - if (it_inv != buffer_.end()) { - eigen_transform = it_inv->second.inverse(); - buffer_[key] = eigen_transform; - return true; - } - - // Check if transform is needed - if (target_frame == source_frame) { - eigen_transform = Eigen::Matrix4f::Identity(); - buffer_[key] = eigen_transform; - return true; - } - - // Get the transform from the TF tree - tf_listener_ = std::make_unique(node_); - auto tf = tf_listener_->getTransform( - target_frame, source_frame, rclcpp::Time(0), rclcpp::Duration(1000ms)); - tf_listener_.reset(); - RCLCPP_DEBUG( - node_->get_logger(), "Trying to enqueue %s -> %s transform to static TFs buffer...", - target_frame.c_str(), source_frame.c_str()); - if (tf == nullptr) { - eigen_transform = Eigen::Matrix4f::Identity(); - return false; - } - pcl_ros::transformAsMatrix(*tf, eigen_transform); - buffer_[key] = eigen_transform; - return true; - } + const rclcpp::Time & time = rclcpp::Time(0), + const rclcpp::Duration & timeout = default_timeout) const; - /** @brief Retrieves a transform between two dynamic frames. + /** @brief Traverse TF tree built by local TF listener. * - * This function attempts to retrieve a transform between the target frame and the source frame. - * If successful, the transformation matrix is assigned to the output parameter, and the function - * returns true. Otherwise, the transformation matrix is set to the identity and the function - * returns false. + * @param[in] target_frame the frame to which data should be transformed + * @param[in] source_frame the frame where the data originated + * @param[in] timeout how long to block before failing + * @return a traverse result indicating if the transform is possible and if it is static + */ + TraverseResult traverseTree( + const std::string & target_frame, const std::string & source_frame, + const rclcpp::Duration & timeout = default_timeout); + + /** @brief Get a dynamic transform from the TF buffer. * - * @param target_frame The target frame. - * @param source_frame The source frame. - * @param eigen_transform The output Eigen transformation matrix. It is set to the identity if the - * transform is not found. - * @return True if the transform was successfully retrieved, false otherwise. + * @param[in] target_frame the frame to which data should be transformed + * @param[in] source_frame the frame where the data originated + * @param[in] time the time at which the value of the transform is desired (0 will get the latest) + * @param[in] timeout how long to block before failing + * @return an optional containing the transform if successful, or empty if not */ - bool getDynamicTransform( + std::optional getDynamicTransform( const std::string & target_frame, const std::string & source_frame, - Eigen::Matrix4f & eigen_transform) - { - auto tf = tf_listener_->getTransform( - target_frame, source_frame, rclcpp::Time(0), rclcpp::Duration(1000ms)); - if (tf == nullptr) { - eigen_transform = Eigen::Matrix4f::Identity(); - return false; - } - pcl_ros::transformAsMatrix(*tf, eigen_transform); - return true; - } + const rclcpp::Time & time = rclcpp::Time(0), + const rclcpp::Duration & timeout = default_timeout); + + /** @brief Get a static transform from local TF buffer. + * + * @param[in] target_frame the frame to which data should be transformed + * @param[in] source_frame the frame where the data originated + * @return an optional containing the transform if successful, or empty if not + */ + std::optional getStaticTransform( + const std::string & target_frame, const std::string & source_frame); + + /** @brief Get an unknown (static or dynamic) transform. + * + * @param[in] target_frame the frame to which data should be transformed + * @param[in] source_frame the frame where the data originated + * @param[in] time the time at which the value of the transform is desired (0 will get the latest) + * @param[in] timeout how long to block before failing + * @return an optional containing the transform if successful, or empty if not + */ + std::optional getUnknownTransform( + const std::string & target_frame, const std::string & source_frame, + const rclcpp::Time & time = rclcpp::Time(0), + const rclcpp::Duration & timeout = default_timeout); - TFMap buffer_; + rclcpp::CallbackGroup::SharedPtr callback_group_{nullptr}; rclcpp::Node * const node_; - std::unique_ptr tf_listener_; - std::function get_transform_; + rclcpp::Node::SharedPtr managed_listener_node_{nullptr}; + rclcpp::NodeOptions options_; + rclcpp::Subscription::SharedPtr tf_static_sub_{nullptr}; + rclcpp::Subscription::SharedPtr tf_sub_{nullptr}; + rclcpp::SubscriptionOptionsWithAllocator> tf_options_; + rclcpp::SubscriptionOptionsWithAllocator> tf_static_options_; + rclcpp::executors::SingleThreadedExecutor::SharedPtr executor_{nullptr}; + std::function( + const std::string &, const std::string &, const rclcpp::Time &, const rclcpp::Duration &)> + get_transform_; + std::function cb_; + std::function cb_static_; + std::unique_ptr dedicated_listener_thread_; + std::unique_ptr tf_buffer_; + std::unique_ptr tf_listener_; + std::unique_ptr static_tf_buffer_; + std::unique_ptr tf_tree_; + static std::chrono::milliseconds default_timeout; }; } // namespace autoware::universe_utils diff --git a/common/autoware_universe_utils/src/ros/managed_transform_buffer.cpp b/common/autoware_universe_utils/src/ros/managed_transform_buffer.cpp new file mode 100644 index 0000000000000..5d695c12c32ca --- /dev/null +++ b/common/autoware_universe_utils/src/ros/managed_transform_buffer.cpp @@ -0,0 +1,349 @@ +// Copyright 2024 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "autoware/universe_utils/ros/managed_transform_buffer.hpp" + +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace autoware::universe_utils +{ + +std::chrono::milliseconds ManagedTransformBuffer::default_timeout = 10ms; + +ManagedTransformBuffer::ManagedTransformBuffer(rclcpp::Node * node, bool managed) : node_(node) +{ + get_transform_ = [this, managed]( + const std::string & target_frame, const std::string & source_frame, + const rclcpp::Time & time, + const rclcpp::Duration & timeout) -> std::optional { + if (managed) { + return getUnknownTransform(target_frame, source_frame, time, timeout); + } + return getDynamicTransform(target_frame, source_frame, time, timeout); + }; + + static_tf_buffer_ = std::make_unique(); + tf_tree_ = std::make_unique(); + tf_buffer_ = std::make_unique(node_->get_clock()); + auto timer_interface = std::make_shared( + node_->get_node_base_interface(), node_->get_node_timers_interface()); + tf_buffer_->setCreateTimerInterface(timer_interface); + tf_options_ = tf2_ros::detail::get_default_transform_listener_sub_options(); + tf_static_options_ = tf2_ros::detail::get_default_transform_listener_static_sub_options(); + cb_ = std::bind(&ManagedTransformBuffer::tfCallback, this, std::placeholders::_1, false); + cb_static_ = std::bind(&ManagedTransformBuffer::tfCallback, this, std::placeholders::_1, true); + std::stringstream sstream; + sstream << "managed_tf_listener_impl_" << std::hex << reinterpret_cast(this); + options_.arguments({"--ros-args", "-r", "__node:=" + std::string(sstream.str())}); + options_.start_parameter_event_publisher(false); + options_.start_parameter_services(false); +} + +ManagedTransformBuffer::~ManagedTransformBuffer() +{ + deactivateListener(); + deactivateLocalListener(); +} + +template <> +std::optional ManagedTransformBuffer::getTransform( + const std::string & target_frame, const std::string & source_frame, const rclcpp::Time & time, + const rclcpp::Duration & timeout) +{ + return get_transform_(target_frame, source_frame, time, timeout); +} + +template <> +std::optional ManagedTransformBuffer::getTransform( + const std::string & target_frame, const std::string & source_frame, const rclcpp::Time & time, + const rclcpp::Duration & timeout) +{ + auto tf = get_transform_(target_frame, source_frame, time, timeout); + if (!tf.has_value()) { + return std::nullopt; + } + Eigen::Matrix4f eigen_transform; + pcl_ros::transformAsMatrix(tf.value(), eigen_transform); + return std::make_optional(eigen_transform); +} + +template <> +std::optional ManagedTransformBuffer::getTransform( + const std::string & target_frame, const std::string & source_frame, const rclcpp::Time & time, + const rclcpp::Duration & timeout) +{ + auto tf = get_transform_(target_frame, source_frame, time, timeout); + if (!tf.has_value()) { + return std::nullopt; + } + tf2::Transform tf2_transform; + tf2::fromMsg(tf.value().transform, tf2_transform); + return std::make_optional(tf2_transform); +} + +bool ManagedTransformBuffer::transformPointcloud( + const std::string & target_frame, const sensor_msgs::msg::PointCloud2 & cloud_in, + sensor_msgs::msg::PointCloud2 & cloud_out, const rclcpp::Time & time, + const rclcpp::Duration & timeout) +{ + if ( + pcl::getFieldIndex(cloud_in, "x") == -1 || pcl::getFieldIndex(cloud_in, "y") == -1 || + pcl::getFieldIndex(cloud_in, "z") == -1) { + RCLCPP_ERROR(node_->get_logger(), "Input pointcloud does not have xyz fields"); + return false; + } + if (target_frame == cloud_in.header.frame_id) { + cloud_out = cloud_in; + return true; + } + auto eigen_transform = + getTransform(target_frame, cloud_in.header.frame_id, time, timeout); + if (!eigen_transform.has_value()) { + return false; + } + pcl_ros::transformPointCloud(eigen_transform.value(), cloud_in, cloud_out); + cloud_out.header.frame_id = target_frame; + return true; +} + +void ManagedTransformBuffer::activateListener() +{ + tf_listener_ = std::make_unique(*tf_buffer_); +} + +void ManagedTransformBuffer::activateLocalListener() +{ + managed_listener_node_ = rclcpp::Node::make_unique("_", options_); + callback_group_ = managed_listener_node_->create_callback_group( + rclcpp::CallbackGroupType::MutuallyExclusive, false); + tf_options_.callback_group = callback_group_; + tf_static_options_.callback_group = callback_group_; + tf_sub_ = rclcpp::create_subscription( + managed_listener_node_, "/tf", tf2_ros::DynamicListenerQoS(), cb_, tf_options_); + tf_static_sub_ = rclcpp::create_subscription( + managed_listener_node_, "/tf_static", tf2_ros::StaticListenerQoS(), cb_static_, + tf_static_options_); + executor_ = std::make_unique(); + executor_->add_callback_group(callback_group_, managed_listener_node_->get_node_base_interface()); + dedicated_listener_thread_ = std::make_unique([&]() { executor_->spin(); }); +} + +void ManagedTransformBuffer::deactivateListener() +{ + tf_listener_.reset(); +} + +void ManagedTransformBuffer::deactivateLocalListener() +{ + if (executor_) { + executor_->cancel(); + } + if (dedicated_listener_thread_ && dedicated_listener_thread_->joinable()) { + dedicated_listener_thread_->join(); + } + tf_static_sub_.reset(); + tf_sub_.reset(); + managed_listener_node_.reset(); + executor_.reset(); + dedicated_listener_thread_.reset(); +} + +void ManagedTransformBuffer::tfCallback( + const tf2_msgs::msg::TFMessage::SharedPtr msg, const bool is_static) +{ + for (const auto & transform : msg->transforms) { + tf_tree_->emplace(transform.child_frame_id, TreeNode{transform.header.frame_id, is_static}); + } +} + +std::optional ManagedTransformBuffer::lookupTransform( + const std::string & target_frame, const std::string & source_frame, const rclcpp::Time & time, + const rclcpp::Duration & timeout) const +{ + try { + auto tf = tf_buffer_->lookupTransform(target_frame, source_frame, time, timeout); + return std::make_optional(tf); + } catch (const tf2::TransformException & ex) { + RCLCPP_ERROR( + node_->get_logger(), "Failure to get transform from %s to %s with error: %s", + target_frame.c_str(), source_frame.c_str(), ex.what()); + return std::nullopt; + } +} + +TraverseResult ManagedTransformBuffer::traverseTree( + const std::string & target_frame, const std::string & source_frame, + const rclcpp::Duration & timeout) +{ + std::atomic timeout_reached{false}; + + auto traverse = [this, &timeout_reached](std::string t1, std::string t2) -> TraverseResult { + bool only_static_requested{true}; + uint32_t depth = 0; + auto current_frame = t1; + while (!timeout_reached) { + auto current_tf_tree = *tf_tree_; // Avoid race condition (mutex would lock callbacks) + auto frame_it = current_tf_tree.find(current_frame); + if (frame_it == current_tf_tree.end()) { // Not found, reset states and reverse the search + std::swap(t1, t2); + current_frame = t1; + only_static_requested = true; + depth = 0; + continue; + } + only_static_requested = only_static_requested && frame_it->second.is_static; + current_frame = frame_it->second.parent; + if (current_frame == t2) { // Found + return {true, only_static_requested}; + } + depth++; + if (depth > tf2::BufferCore::MAX_GRAPH_DEPTH) { // Possibly TF tree loop occurred + RCLCPP_ERROR( + node_->get_logger(), "Traverse depth exceeded for %s -> %s", t1.c_str(), t2.c_str()); + return {false, false}; + } + } + return {false, false}; + }; + + std::future future = + std::async(std::launch::async, traverse, target_frame, source_frame); + if ( + future.wait_for(timeout.to_chrono()) == std::future_status::ready) { + return future.get(); + } + timeout_reached = true; + return {false, false}; +} + +std::optional ManagedTransformBuffer::getDynamicTransform( + const std::string & target_frame, const std::string & source_frame, const rclcpp::Time & time, + const rclcpp::Duration & timeout) +{ + if (!tf_listener_) { + activateListener(); + } + return lookupTransform(target_frame, source_frame, time, timeout); +} + +std::optional ManagedTransformBuffer::getStaticTransform( + const std::string & target_frame, const std::string & source_frame) +{ + auto key = std::make_pair(target_frame, source_frame); + auto key_inv = std::make_pair(source_frame, target_frame); + + // Check if the transform is already in the buffer + auto it = static_tf_buffer_->find(key); + if (it != static_tf_buffer_->end()) { + auto tf_msg = it->second; + tf_msg.header.stamp = node_->now(); + return std::make_optional(tf_msg); + } + + // Check if the inverse transform is already in the buffer + auto it_inv = static_tf_buffer_->find(key_inv); + if (it_inv != static_tf_buffer_->end()) { + auto tf_msg = it_inv->second; + tf2::Transform tf; + tf2::fromMsg(tf_msg.transform, tf); + tf2::Transform inv_tf = tf.inverse(); + TransformStamped inv_tf_msg; + inv_tf_msg.transform = tf2::toMsg(inv_tf); + inv_tf_msg.header.frame_id = tf_msg.child_frame_id; + inv_tf_msg.child_frame_id = tf_msg.header.frame_id; + inv_tf_msg.header.stamp = node_->now(); + static_tf_buffer_->emplace(key, inv_tf_msg); + return std::make_optional(inv_tf_msg); + } + + // Check if transform is needed + if (target_frame == source_frame) { + auto tf_identity = tf2::Transform::getIdentity(); + TransformStamped tf_msg; + tf_msg.transform = tf2::toMsg(tf_identity); + tf_msg.header.frame_id = target_frame; + tf_msg.child_frame_id = source_frame; + tf_msg.header.stamp = node_->now(); + static_tf_buffer_->emplace(key, tf_msg); + return std::make_optional(tf_msg); + } + + return std::nullopt; +} + +std::optional ManagedTransformBuffer::getUnknownTransform( + const std::string & target_frame, const std::string & source_frame, const rclcpp::Time & time, + const rclcpp::Duration & timeout) +{ + // Try to get transform from local static buffer + auto static_tf = getStaticTransform(target_frame, source_frame); + if (static_tf.has_value()) { + return static_tf; + } + + // Initialize local TF listener and base TF listener + activateLocalListener(); + activateListener(); + + // Check local TF tree and TF buffer asynchronously + std::future traverse_future = std::async( + std::launch::async, &ManagedTransformBuffer::traverseTree, this, target_frame, source_frame, + timeout); + std::future> tf_future = std::async( + std::launch::async, &ManagedTransformBuffer::lookupTransform, this, target_frame, source_frame, + time, timeout); + auto traverse_result = traverse_future.get(); + auto tf = tf_future.get(); + deactivateLocalListener(); + + // Mimic lookup transform error if TF not exists in tree or buffer + if (!traverse_result.success || !tf.has_value()) { + return std::nullopt; + } + + // If TF is static, add it to the static buffer. Otherwise, switch to dynamic listener + if (traverse_result.is_static) { + auto key = std::make_pair(target_frame, source_frame); + static_tf_buffer_->emplace(key, tf.value()); + deactivateListener(); + } else { + get_transform_ = [this]( + const std::string & target_frame, const std::string & source_frame, + const rclcpp::Time & time, + const rclcpp::Duration & timeout) -> std::optional { + return getDynamicTransform(target_frame, source_frame, time, timeout); + }; + RCLCPP_DEBUG( + node_->get_logger(), "Transform %s -> %s is dynamic. Switching to dynamic listener.", + target_frame.c_str(), source_frame.c_str()); + } + + return tf; +} + +} // namespace autoware::universe_utils diff --git a/common/autoware_universe_utils/test/src/ros/test_managed_transform_buffer.cpp b/common/autoware_universe_utils/test/src/ros/test_managed_transform_buffer.cpp index 9df6f9bb2ffef..120ab68d51077 100644 --- a/common/autoware_universe_utils/test/src/ros/test_managed_transform_buffer.cpp +++ b/common/autoware_universe_utils/test/src/ros/test_managed_transform_buffer.cpp @@ -14,13 +14,25 @@ #include "autoware/universe_utils/ros/managed_transform_buffer.hpp" +#include #include #include +#include +#include +#include + #include +#include #include +#include +#include #include +#include + +std::chrono::milliseconds autoware::universe_utils::ManagedTransformBuffer::default_timeout = + std::chrono::milliseconds(100); // Relax timeout for CI class TestManagedTransformBuffer : public ::testing::Test { @@ -31,6 +43,7 @@ class TestManagedTransformBuffer : public ::testing::Test geometry_msgs::msg::TransformStamped tf_base_to_lidar_; Eigen::Matrix4f eigen_base_to_lidar_; std::unique_ptr cloud_in_; + double precision_; geometry_msgs::msg::TransformStamped generateTransformMsg( const int32_t seconds, const uint32_t nanoseconds, const std::string & parent_frame, @@ -54,16 +67,16 @@ class TestManagedTransformBuffer : public ::testing::Test void SetUp() override { - node_ = std::make_shared("test_managed_transform_buffer"); + node_ = std::make_unique("test_managed_transform_buffer"); managed_tf_buffer_ = - std::make_shared(node_.get(), true); - tf_broadcaster_ = std::make_shared(node_); + std::make_unique(node_.get(), true); + tf_broadcaster_ = std::make_unique(node_); tf_base_to_lidar_ = generateTransformMsg( 10, 100'000'000, "base_link", "lidar_top", 0.690, 0.000, 2.100, -0.007, -0.007, 0.692, 0.722); eigen_base_to_lidar_ = tf2::transformToEigen(tf_base_to_lidar_).matrix().cast(); - tf_broadcaster_->sendTransform(tf_base_to_lidar_); cloud_in_ = std::make_unique(); + precision_ = 0.01; // Set up the fields for x, y, and z coordinates cloud_in_->fields.resize(3); @@ -95,57 +108,98 @@ class TestManagedTransformBuffer : public ::testing::Test void TearDown() override { managed_tf_buffer_.reset(); } }; +TEST_F(TestManagedTransformBuffer, TestReturn) +{ + tf_broadcaster_->sendTransform(tf_base_to_lidar_); + + auto eigen_transform = + managed_tf_buffer_->getTransform("base_link", "lidar_top"); + EXPECT_TRUE(eigen_transform.has_value()); + auto tf2_transform = managed_tf_buffer_->getTransform("base_link", "lidar_top"); + ; + EXPECT_TRUE(tf2_transform.has_value()); + auto tf_msg_transform = managed_tf_buffer_->getTransform( + "base_link", "lidar_top"); + ; + EXPECT_TRUE(tf_msg_transform.has_value()); +} + TEST_F(TestManagedTransformBuffer, TestTransformNoExist) { - Eigen::Matrix4f transform; - auto success = managed_tf_buffer_->getTransform("base_link", "fake_link", transform); - EXPECT_TRUE(transform.isIdentity()); - EXPECT_FALSE(success); + tf_broadcaster_->sendTransform(tf_base_to_lidar_); + + auto eigen_transform = + managed_tf_buffer_->getTransform("base_link", "fake_link"); + ; + EXPECT_FALSE(eigen_transform.has_value()); } TEST_F(TestManagedTransformBuffer, TestTransformBase) { - Eigen::Matrix4f eigen_base_to_lidar; - auto success = managed_tf_buffer_->getTransform("base_link", "lidar_top", eigen_base_to_lidar); - EXPECT_TRUE(eigen_base_to_lidar.isApprox(eigen_base_to_lidar_, 0.001)); - EXPECT_TRUE(success); + tf_broadcaster_->sendTransform(tf_base_to_lidar_); + + auto eigen_base_to_lidar = + managed_tf_buffer_->getTransform("base_link", "lidar_top"); + ; + ASSERT_TRUE(eigen_base_to_lidar.has_value()); + EXPECT_TRUE(eigen_base_to_lidar.value().isApprox(eigen_base_to_lidar_, precision_)); } TEST_F(TestManagedTransformBuffer, TestTransformSameFrame) { - Eigen::Matrix4f eigen_base_to_base; - auto success = managed_tf_buffer_->getTransform("base_link", "base_link", eigen_base_to_base); - EXPECT_TRUE(eigen_base_to_base.isApprox(Eigen::Matrix4f::Identity(), 0.001)); - EXPECT_TRUE(success); + tf_broadcaster_->sendTransform(tf_base_to_lidar_); + + auto eigen_base_to_base = + managed_tf_buffer_->getTransform("base_link", "base_link"); + ; + ASSERT_TRUE(eigen_base_to_base.has_value()); + EXPECT_TRUE(eigen_base_to_base.value().isApprox(Eigen::Matrix4f::Identity(), precision_)); } TEST_F(TestManagedTransformBuffer, TestTransformInverse) { - Eigen::Matrix4f eigen_lidar_to_base; - auto success = managed_tf_buffer_->getTransform("lidar_top", "base_link", eigen_lidar_to_base); - EXPECT_TRUE(eigen_lidar_to_base.isApprox(eigen_base_to_lidar_.inverse(), 0.001)); - EXPECT_TRUE(success); + tf_broadcaster_->sendTransform(tf_base_to_lidar_); + + auto eigen_lidar_to_base = + managed_tf_buffer_->getTransform("lidar_top", "base_link"); + ; + ASSERT_TRUE(eigen_lidar_to_base.has_value()); + EXPECT_TRUE(eigen_lidar_to_base.value().isApprox(eigen_base_to_lidar_.inverse(), precision_)); } TEST_F(TestManagedTransformBuffer, TestTransformMultipleCall) { - Eigen::Matrix4f eigen_transform; - EXPECT_FALSE(managed_tf_buffer_->getTransform("base_link", "fake_link", eigen_transform)); - EXPECT_TRUE(eigen_transform.isApprox(Eigen::Matrix4f::Identity(), 0.001)); - EXPECT_TRUE(managed_tf_buffer_->getTransform("lidar_top", "base_link", eigen_transform)); - EXPECT_TRUE(eigen_transform.isApprox(eigen_base_to_lidar_.inverse(), 0.001)); - EXPECT_TRUE(managed_tf_buffer_->getTransform("fake_link", "fake_link", eigen_transform)); - EXPECT_TRUE(eigen_transform.isApprox(Eigen::Matrix4f::Identity(), 0.001)); - EXPECT_TRUE(managed_tf_buffer_->getTransform("base_link", "lidar_top", eigen_transform)); - EXPECT_TRUE(eigen_transform.isApprox(eigen_base_to_lidar_, 0.001)); - EXPECT_FALSE(managed_tf_buffer_->getTransform("fake_link", "lidar_top", eigen_transform)); - EXPECT_TRUE(eigen_transform.isApprox(Eigen::Matrix4f::Identity(), 0.001)); - EXPECT_TRUE(managed_tf_buffer_->getTransform("base_link", "lidar_top", eigen_transform)); - EXPECT_TRUE(eigen_transform.isApprox(eigen_base_to_lidar_, 0.001)); + tf_broadcaster_->sendTransform(tf_base_to_lidar_); + + std::optional eigen_transform; + eigen_transform = managed_tf_buffer_->getTransform("base_link", "fake_link"); + ; + EXPECT_FALSE(eigen_transform.has_value()); + eigen_transform = managed_tf_buffer_->getTransform("lidar_top", "base_link"); + ; + ASSERT_TRUE(eigen_transform.has_value()); + EXPECT_TRUE(eigen_transform.value().isApprox(eigen_base_to_lidar_.inverse(), precision_)); + eigen_transform = managed_tf_buffer_->getTransform("fake_link", "fake_link"); + ; + ASSERT_TRUE(eigen_transform.has_value()); + EXPECT_TRUE(eigen_transform.value().isApprox(Eigen::Matrix4f::Identity(), precision_)); + eigen_transform = managed_tf_buffer_->getTransform("base_link", "lidar_top"); + ; + ASSERT_TRUE(eigen_transform.has_value()); + EXPECT_TRUE(eigen_transform.value().isApprox(eigen_base_to_lidar_, precision_)); + eigen_transform = managed_tf_buffer_->getTransform("fake_link", "lidar_top"); + ; + EXPECT_FALSE(eigen_transform.has_value()); + eigen_transform = managed_tf_buffer_->getTransform("base_link", "lidar_top"); + ; + ASSERT_TRUE(eigen_transform.has_value()); + EXPECT_TRUE(eigen_transform.value().isApprox(eigen_base_to_lidar_, precision_)); } TEST_F(TestManagedTransformBuffer, TestTransformEmptyPointCloud) { + tf_broadcaster_->sendTransform(tf_base_to_lidar_); + auto cloud_in = std::make_unique(); cloud_in->header.frame_id = "lidar_top"; cloud_in->header.stamp = rclcpp::Time(10, 100'000'000); @@ -158,6 +212,8 @@ TEST_F(TestManagedTransformBuffer, TestTransformEmptyPointCloud) TEST_F(TestManagedTransformBuffer, TestTransformEmptyPointCloudNoHeader) { + tf_broadcaster_->sendTransform(tf_base_to_lidar_); + auto cloud_in = std::make_unique(); auto cloud_out = std::make_unique(); @@ -168,6 +224,8 @@ TEST_F(TestManagedTransformBuffer, TestTransformEmptyPointCloudNoHeader) TEST_F(TestManagedTransformBuffer, TestTransformPointCloud) { + tf_broadcaster_->sendTransform(tf_base_to_lidar_); + auto cloud_out = std::make_unique(); // Transform cloud with header @@ -178,6 +236,8 @@ TEST_F(TestManagedTransformBuffer, TestTransformPointCloud) TEST_F(TestManagedTransformBuffer, TestTransformPointCloudNoHeader) { + tf_broadcaster_->sendTransform(tf_base_to_lidar_); + auto cloud_out = std::make_unique(); // Transform cloud without header diff --git a/perception/autoware_ground_segmentation/README.md b/perception/autoware_ground_segmentation/README.md index 08b8a4f373bcc..b981b02e0a844 100644 --- a/perception/autoware_ground_segmentation/README.md +++ b/perception/autoware_ground_segmentation/README.md @@ -33,15 +33,14 @@ Detail description of each ground segmentation algorithm is in the following lin ### Node Parameters -| Name | Type | Default Value | Description | -| -------------------- | ------ | ------------- | ------------------------------------- | -| `input_frame` | string | " " | input frame id | -| `output_frame` | string | " " | output frame id | -| `has_static_tf_only` | bool | false | flag to listen TF only once | -| `max_queue_size` | int | 5 | max queue size of input/output topics | -| `use_indices` | bool | false | flag to use pointcloud indices | -| `latched_indices` | bool | false | flag to latch pointcloud indices | -| `approximate_sync` | bool | false | flag to use approximate sync option | +| Name | Type | Default Value | Description | +| ------------------ | ------ | ------------- | ------------------------------------- | +| `input_frame` | string | " " | input frame id | +| `output_frame` | string | " " | output frame id | +| `max_queue_size` | int | 5 | max queue size of input/output topics | +| `use_indices` | bool | false | flag to use pointcloud indices | +| `latched_indices` | bool | false | flag to latch pointcloud indices | +| `approximate_sync` | bool | false | flag to use approximate sync option | ## Assumptions / Known limits diff --git a/perception/autoware_ground_segmentation/docs/ransac-ground-filter.md b/perception/autoware_ground_segmentation/docs/ransac-ground-filter.md index 37388e665eb2c..a11a35521d173 100644 --- a/perception/autoware_ground_segmentation/docs/ransac-ground-filter.md +++ b/perception/autoware_ground_segmentation/docs/ransac-ground-filter.md @@ -23,7 +23,6 @@ This implementation inherits `autoware::pointcloud_preprocessor::Filter` class, | Name | Type | Description | | ----------------------- | ------ | --------------------------------------------------------------- | | `base_frame` | string | base_link frame | -| `has_static_tf_only` | bool | Flag to listen TF only once | | `unit_axis` | string | The axis which we need to search ground plane | | `max_iterations` | int | The maximum number of iterations | | `outlier_threshold` | double | The distance threshold to the model [m] | diff --git a/perception/autoware_ground_segmentation/docs/ray-ground-filter.md b/perception/autoware_ground_segmentation/docs/ray-ground-filter.md index ed4f5833b3b3a..9b80b0588f851 100644 --- a/perception/autoware_ground_segmentation/docs/ray-ground-filter.md +++ b/perception/autoware_ground_segmentation/docs/ray-ground-filter.md @@ -28,7 +28,6 @@ This implementation inherits `autoware::pointcloud_preprocessor::Filter` class, | ----------------------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | `input_frame` | string | frame id of input pointcloud | | `output_frame` | string | frame id of output pointcloud | -| `has_static_tf_only` | bool | Flag to listen TF only once | | `general_max_slope` | double | The triangle created by `general_max_slope` is called the global cone. If the point is outside the global cone, it is judged to be a point that is no on the ground | | `initial_max_slope` | double | Generally, the point where the object first hits is far from ego-vehicle because of sensor blind spot, so resolution is different from that point and thereafter, so this parameter exists to set a separate `local_max_slope` | | `local_max_slope` | double | The triangle created by `local_max_slope` is called the local cone. This parameter for classification based on the continuity of points | diff --git a/perception/autoware_ground_segmentation/docs/scan-ground-filter.md b/perception/autoware_ground_segmentation/docs/scan-ground-filter.md index 6b2f6d6b57571..f53d2d6c7bdc2 100644 --- a/perception/autoware_ground_segmentation/docs/scan-ground-filter.md +++ b/perception/autoware_ground_segmentation/docs/scan-ground-filter.md @@ -37,7 +37,6 @@ This implementation inherits `autoware::pointcloud_preprocessor::Filter` class, | --------------------------------- | ------ | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `input_frame` | string | "base_link" | frame id of input pointcloud | | `output_frame` | string | "base_link" | frame id of output pointcloud | -| `has_static_tf_only` | bool | false | Flag to listen TF only once | | `global_slope_max_angle_deg` | double | 8.0 | The global angle to classify as the ground or object [deg].
A large threshold may reduce false positive of high slope road classification but it may lead to increase false negative of non-ground classification, particularly for small objects. | | `local_slope_max_angle_deg` | double | 10.0 | The local angle to classify as the ground or object [deg] when comparing with adjacent point.
A small value enhance accuracy classification of object with inclined surface. This should be considered together with `split_points_distance_tolerance` value. | | `radial_divider_angle_deg` | double | 1.0 | The angle which divide the whole pointcloud to sliced group [deg] | diff --git a/perception/autoware_ground_segmentation/src/ransac_ground_filter/node.cpp b/perception/autoware_ground_segmentation/src/ransac_ground_filter/node.cpp index 83e0538bd56de..5583683ce931d 100644 --- a/perception/autoware_ground_segmentation/src/ransac_ground_filter/node.cpp +++ b/perception/autoware_ground_segmentation/src/ransac_ground_filter/node.cpp @@ -117,8 +117,7 @@ RANSACGroundFilterComponent::RANSACGroundFilterComponent(const rclcpp::NodeOptio pcl::console::setVerbosityLevel(pcl::console::L_ALWAYS); - managed_tf_buffer_ = - std::make_unique(this, has_static_tf_only_); + managed_tf_buffer_ = std::make_unique(this); bool use_time_keeper = declare_parameter("publish_processing_time_detail"); if (use_time_keeper) { @@ -165,18 +164,6 @@ void RANSACGroundFilterComponent::publishDebugMessage( debug_ground_cloud_pub_->publish(*ground_cloud_msg_ptr); } -bool RANSACGroundFilterComponent::transformPointCloud( - const std::string & in_target_frame, const PointCloud2ConstPtr & in_cloud_ptr, - const PointCloud2::SharedPtr & out_cloud_ptr) -{ - if (in_target_frame == in_cloud_ptr->header.frame_id) { - *out_cloud_ptr = *in_cloud_ptr; - return true; - } - - return managed_tf_buffer_->transformPointcloud(in_target_frame, *in_cloud_ptr, *out_cloud_ptr); -} - void RANSACGroundFilterComponent::extractPointsIndices( const pcl::PointCloud::Ptr in_cloud_ptr, const pcl::PointIndices & in_indices, pcl::PointCloud::Ptr out_only_indices_cloud_ptr, @@ -237,7 +224,8 @@ void RANSACGroundFilterComponent::filter( std::scoped_lock lock(mutex_); sensor_msgs::msg::PointCloud2::SharedPtr input_transformed_ptr(new sensor_msgs::msg::PointCloud2); - if (!transformPointCloud(base_frame_, input, input_transformed_ptr)) { + + if (!managed_tf_buffer_->transformPointcloud(base_frame_, *input, *input_transformed_ptr)) { RCLCPP_ERROR_STREAM_THROTTLE( this->get_logger(), *this->get_clock(), std::chrono::milliseconds(1000).count(), "Failed transform from " << base_frame_ << " to " << input->header.frame_id); @@ -321,8 +309,8 @@ void RANSACGroundFilterComponent::filter( sensor_msgs::msg::PointCloud2::SharedPtr no_ground_cloud_transformed_msg_ptr( new sensor_msgs::msg::PointCloud2); - if (!transformPointCloud( - base_frame_, no_ground_cloud_msg_ptr, no_ground_cloud_transformed_msg_ptr)) { + if (!managed_tf_buffer_->transformPointcloud( + base_frame_, *no_ground_cloud_msg_ptr, *no_ground_cloud_transformed_msg_ptr)) { RCLCPP_ERROR_STREAM_THROTTLE( this->get_logger(), *this->get_clock(), std::chrono::milliseconds(1000).count(), "Failed transform from " << base_frame_ << " to " diff --git a/perception/autoware_ground_segmentation/src/ransac_ground_filter/node.hpp b/perception/autoware_ground_segmentation/src/ransac_ground_filter/node.hpp index f9638086bf811..1cdcc53b839bc 100644 --- a/perception/autoware_ground_segmentation/src/ransac_ground_filter/node.hpp +++ b/perception/autoware_ground_segmentation/src/ransac_ground_filter/node.hpp @@ -91,18 +91,6 @@ class RANSACGroundFilterComponent : public autoware::pointcloud_preprocessor::Fi detailed_processing_time_publisher_; std::shared_ptr time_keeper_; - /*! - * Output transformed PointCloud from in_cloud_ptr->header.frame_id to in_target_frame - * @param[in] in_target_frame Coordinate system to perform transform - * @param[in] in_cloud_ptr PointCloud to perform transform - * @param[out] out_cloud_ptr Resulting transformed PointCloud - * @retval true transform succeeded - * @retval false transform failed - */ - bool transformPointCloud( - const std::string & in_target_frame, const PointCloud2ConstPtr & in_cloud_ptr, - const PointCloud2::SharedPtr & out_cloud_ptr); - /*! * Returns the resulting complementary PointCloud, one with the points kept and the other removed * as indicated in the indices diff --git a/sensing/autoware_pointcloud_preprocessor/config/concatenate_pointclouds.param.yaml b/sensing/autoware_pointcloud_preprocessor/config/concatenate_pointclouds.param.yaml index 449795c328402..5d38186840057 100644 --- a/sensing/autoware_pointcloud_preprocessor/config/concatenate_pointclouds.param.yaml +++ b/sensing/autoware_pointcloud_preprocessor/config/concatenate_pointclouds.param.yaml @@ -1,7 +1,6 @@ /**: ros__parameters: output_frame: base_link - has_static_tf_only: true input_topics: [ "/sensing/lidar/left/pointcloud_before_sync", "/sensing/lidar/right/pointcloud_before_sync", diff --git a/sensing/autoware_pointcloud_preprocessor/config/distortion_corrector_node.param.yaml b/sensing/autoware_pointcloud_preprocessor/config/distortion_corrector_node.param.yaml index 90ded7ffc9c97..c558756f5eed3 100644 --- a/sensing/autoware_pointcloud_preprocessor/config/distortion_corrector_node.param.yaml +++ b/sensing/autoware_pointcloud_preprocessor/config/distortion_corrector_node.param.yaml @@ -4,4 +4,3 @@ use_imu: true use_3d_distortion_correction: false update_azimuth_and_distance: false - has_static_tf_only: true diff --git a/sensing/autoware_pointcloud_preprocessor/config/time_synchronizer_node.param.yaml b/sensing/autoware_pointcloud_preprocessor/config/time_synchronizer_node.param.yaml index 7e5bd0fd96bcf..9add0be3f8809 100644 --- a/sensing/autoware_pointcloud_preprocessor/config/time_synchronizer_node.param.yaml +++ b/sensing/autoware_pointcloud_preprocessor/config/time_synchronizer_node.param.yaml @@ -3,7 +3,6 @@ input_twist_topic_type: twist output_frame: base_link keep_input_frame_in_synchronized_pointcloud: false - has_static_tf_only: true input_topics: [ "/sensing/lidar/left/pointcloud_before_sync", "/sensing/lidar/right/pointcloud_before_sync", diff --git a/sensing/autoware_pointcloud_preprocessor/docs/concatenate-data.md b/sensing/autoware_pointcloud_preprocessor/docs/concatenate-data.md index 08f7b92f88975..74b4f3df17615 100644 --- a/sensing/autoware_pointcloud_preprocessor/docs/concatenate-data.md +++ b/sensing/autoware_pointcloud_preprocessor/docs/concatenate-data.md @@ -28,13 +28,12 @@ The figure below represents the reception time of each sensor data and how it is ## Parameters -| Name | Type | Default Value | Description | -| -------------------- | ---------------- | ------------- | ------------------------------------------------------------------- | -| `input/points` | vector of string | [] | input topic names that type must be `sensor_msgs::msg::Pointcloud2` | -| `input_frame` | string | "" | input frame id | -| `output_frame` | string | "" | output frame id | -| `has_static_tf_only` | bool | false | flag to listen TF only once | -| `max_queue_size` | int | 5 | max queue size of input/output topics | +| Name | Type | Default Value | Description | +| ---------------- | ---------------- | ------------- | ------------------------------------------------------------------- | +| `input/points` | vector of string | [] | input topic names that type must be `sensor_msgs::msg::Pointcloud2` | +| `input_frame` | string | "" | input frame id | +| `output_frame` | string | "" | output frame id | +| `max_queue_size` | int | 5 | max queue size of input/output topics | ### Core Parameters diff --git a/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/concatenate_data/concatenate_and_time_sync_nodelet.hpp b/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/concatenate_data/concatenate_and_time_sync_nodelet.hpp index 40adad3521f8d..58642c2490da0 100644 --- a/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/concatenate_data/concatenate_and_time_sync_nodelet.hpp +++ b/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/concatenate_data/concatenate_and_time_sync_nodelet.hpp @@ -146,9 +146,6 @@ class PointCloudConcatenateDataSynchronizerComponent : public rclcpp::Node /** \brief Output TF frame the concatenated points should be transformed to. */ std::string output_frame_; - /** \brief The flag to indicate if only static TF are used. */ - bool has_static_tf_only_; - /** \brief Input point cloud topics. */ // XmlRpc::XmlRpcValue input_topics_; std::vector input_topics_; diff --git a/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/concatenate_data/concatenate_pointclouds.hpp b/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/concatenate_data/concatenate_pointclouds.hpp index 0e959eedae1b6..c79839febbb58 100644 --- a/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/concatenate_data/concatenate_pointclouds.hpp +++ b/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/concatenate_data/concatenate_pointclouds.hpp @@ -135,9 +135,6 @@ class PointCloudConcatenationComponent : public rclcpp::Node /** \brief Output TF frame the concatenated points should be transformed to. */ std::string output_frame_; - /** \brief The flag to indicate if only static TF are used. */ - bool has_static_tf_only_; - /** \brief Input point cloud topics. */ // XmlRpc::XmlRpcValue input_topics_; std::vector input_topics_; diff --git a/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/distortion_corrector/distortion_corrector.hpp b/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/distortion_corrector/distortion_corrector.hpp index d4432c24dd5b0..64825b68122d4 100644 --- a/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/distortion_corrector/distortion_corrector.hpp +++ b/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/distortion_corrector/distortion_corrector.hpp @@ -84,11 +84,9 @@ class DistortionCorrectorBase static tf2::Transform convert_matrix_to_transform(const Eigen::Matrix4f & matrix); public: - explicit DistortionCorrectorBase(rclcpp::Node & node, const bool & has_static_tf_only) - : node_(node) + explicit DistortionCorrectorBase(rclcpp::Node & node) : node_(node) { - managed_tf_buffer_ = - std::make_unique(&node, has_static_tf_only); + managed_tf_buffer_ = std::make_unique(&node); } [[nodiscard]] bool pointcloud_transform_exists() const; [[nodiscard]] bool pointcloud_transform_needed() const; @@ -117,10 +115,7 @@ template class DistortionCorrector : public DistortionCorrectorBase { public: - explicit DistortionCorrector(rclcpp::Node & node, const bool & has_static_tf_only) - : DistortionCorrectorBase(node, has_static_tf_only) - { - } + explicit DistortionCorrector(rclcpp::Node & node) : DistortionCorrectorBase(node) {} void undistort_pointcloud( bool use_imu, std::optional angle_conversion_opt, @@ -155,10 +150,7 @@ class DistortionCorrector2D : public DistortionCorrector tf2::Transform tf2_base_link_to_lidar_; public: - explicit DistortionCorrector2D(rclcpp::Node & node, const bool & has_static_tf_only) - : DistortionCorrector(node, has_static_tf_only) - { - } + explicit DistortionCorrector2D(rclcpp::Node & node) : DistortionCorrector(node) {} void initialize() override; void set_pointcloud_transform( const std::string & base_frame, const std::string & lidar_frame) override; @@ -184,10 +176,7 @@ class DistortionCorrector3D : public DistortionCorrector Eigen::Matrix4f eigen_base_link_to_lidar_; public: - explicit DistortionCorrector3D(rclcpp::Node & node, const bool & has_static_tf_only) - : DistortionCorrector(node, has_static_tf_only) - { - } + explicit DistortionCorrector3D(rclcpp::Node & node) : DistortionCorrector(node) {} void initialize() override; void set_pointcloud_transform( const std::string & base_frame, const std::string & lidar_frame) override; diff --git a/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/filter.hpp b/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/filter.hpp index b39be3cfaf9e5..2bd3a9ad66314 100644 --- a/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/filter.hpp +++ b/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/filter.hpp @@ -169,9 +169,6 @@ class Filter : public rclcpp::Node * if input.header.frame_id is different. */ std::string tf_output_frame_; - /** \brief The flag to indicate if only static TF are used. */ - bool has_static_tf_only_; - /** \brief Internal mutex. */ std::mutex mutex_; diff --git a/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/time_synchronizer/time_synchronizer_node.hpp b/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/time_synchronizer/time_synchronizer_node.hpp index d36fcb36a7be1..be2124a19a244 100644 --- a/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/time_synchronizer/time_synchronizer_node.hpp +++ b/sensing/autoware_pointcloud_preprocessor/include/autoware/pointcloud_preprocessor/time_synchronizer/time_synchronizer_node.hpp @@ -140,9 +140,6 @@ class PointCloudDataSynchronizerComponent : public rclcpp::Node std::string output_frame_; bool keep_input_frame_in_synchronized_pointcloud_; - /** \brief The flag to indicate if only static TF are used. */ - bool has_static_tf_only_; - /** \brief Input point cloud topics. */ // XmlRpc::XmlRpcValue input_topics_; std::vector input_topics_; diff --git a/sensing/autoware_pointcloud_preprocessor/schema/concatenate_pointclouds.schema.json b/sensing/autoware_pointcloud_preprocessor/schema/concatenate_pointclouds.schema.json index 19044f851bda6..916650c69a68b 100644 --- a/sensing/autoware_pointcloud_preprocessor/schema/concatenate_pointclouds.schema.json +++ b/sensing/autoware_pointcloud_preprocessor/schema/concatenate_pointclouds.schema.json @@ -25,11 +25,6 @@ "default": "base_link", "description": "Output frame id." }, - "has_static_tf_only": { - "type": "boolean", - "default": false, - "description": "Flag to indicate if only static TF is used." - }, "input_topics": { "type": "array", "items": { @@ -45,14 +40,7 @@ "description": "Max queue size of input/output topics." } }, - "required": [ - "timeout_sec", - "input_offset", - "output_frame", - "has_static_tf_only", - "input_topics", - "max_queue_size" - ] + "required": ["timeout_sec", "input_offset", "output_frame", "input_topics", "max_queue_size"] } }, "properties": { diff --git a/sensing/autoware_pointcloud_preprocessor/schema/distortion_corrector_node.schema.json b/sensing/autoware_pointcloud_preprocessor/schema/distortion_corrector_node.schema.json index 091695716c610..f841cf839e075 100644 --- a/sensing/autoware_pointcloud_preprocessor/schema/distortion_corrector_node.schema.json +++ b/sensing/autoware_pointcloud_preprocessor/schema/distortion_corrector_node.schema.json @@ -25,19 +25,13 @@ "type": "boolean", "description": "Flag to update the azimuth and distance values of each point after undistortion. If set to false, the azimuth and distance values will remain unchanged after undistortion, resulting in a mismatch with the updated x, y, z coordinates.", "default": "false" - }, - "has_static_tf_only": { - "type": "boolean", - "description": "Flag to indicate if only static TF is used.", - "default": false } }, "required": [ "base_frame", "use_imu", "use_3d_distortion_correction", - "update_azimuth_and_distance", - "has_static_tf_only" + "update_azimuth_and_distance" ] } }, diff --git a/sensing/autoware_pointcloud_preprocessor/schema/time_synchronizer_node.schema.json b/sensing/autoware_pointcloud_preprocessor/schema/time_synchronizer_node.schema.json index 90c14b50508db..8954fcaf6d814 100644 --- a/sensing/autoware_pointcloud_preprocessor/schema/time_synchronizer_node.schema.json +++ b/sensing/autoware_pointcloud_preprocessor/schema/time_synchronizer_node.schema.json @@ -31,11 +31,6 @@ "default": "base_link", "description": "Output frame." }, - "has_static_tf_only": { - "type": "boolean", - "default": false, - "description": "Flag to indicate if only static TF is used." - }, "keep_input_frame_in_synchronized_pointcloud": { "type": "boolean", "default": true, @@ -66,7 +61,6 @@ "input_offset", "input_twist_topic_type", "output_frame", - "has_static_tf_only", "keep_input_frame_in_synchronized_pointcloud", "input_topics", "synchronized_pointcloud_postfix", diff --git a/sensing/autoware_pointcloud_preprocessor/src/concatenate_data/concatenate_and_time_sync_nodelet.cpp b/sensing/autoware_pointcloud_preprocessor/src/concatenate_data/concatenate_and_time_sync_nodelet.cpp index a67c9f7f018e1..91e911166c513 100644 --- a/sensing/autoware_pointcloud_preprocessor/src/concatenate_data/concatenate_and_time_sync_nodelet.cpp +++ b/sensing/autoware_pointcloud_preprocessor/src/concatenate_data/concatenate_and_time_sync_nodelet.cpp @@ -92,7 +92,6 @@ PointCloudConcatenateDataSynchronizerComponent::PointCloudConcatenateDataSynchro RCLCPP_ERROR(get_logger(), "Need an 'output_frame' parameter to be set before continuing!"); return; } - has_static_tf_only_ = declare_parameter("has_static_tf_only", false); declare_parameter("input_topics", std::vector()); input_topics_ = get_parameter("input_topics").as_string_array(); if (input_topics_.empty()) { @@ -138,8 +137,7 @@ PointCloudConcatenateDataSynchronizerComponent::PointCloudConcatenateDataSynchro // tf2 listener { - managed_tf_buffer_ = - std::make_unique(this, has_static_tf_only_); + managed_tf_buffer_ = std::make_unique(this); } // Output Publishers diff --git a/sensing/autoware_pointcloud_preprocessor/src/concatenate_data/concatenate_pointclouds.cpp b/sensing/autoware_pointcloud_preprocessor/src/concatenate_data/concatenate_pointclouds.cpp index ba29389b88bf7..80078e5915e52 100644 --- a/sensing/autoware_pointcloud_preprocessor/src/concatenate_data/concatenate_pointclouds.cpp +++ b/sensing/autoware_pointcloud_preprocessor/src/concatenate_data/concatenate_pointclouds.cpp @@ -54,8 +54,6 @@ PointCloudConcatenationComponent::PointCloudConcatenationComponent( RCLCPP_ERROR(get_logger(), "Need an 'output_frame' parameter to be set before continuing!"); return; } - has_static_tf_only_ = declare_parameter( - "has_static_tf_only", false); // TODO(amadeuszsz): remove default value declare_parameter>("input_topics"); input_topics_ = get_parameter("input_topics").as_string_array(); if (input_topics_.empty()) { @@ -95,8 +93,7 @@ PointCloudConcatenationComponent::PointCloudConcatenationComponent( // tf2 listener { - managed_tf_buffer_ = - std::make_unique(this, has_static_tf_only_); + managed_tf_buffer_ = std::make_unique(this); } // Output Publishers diff --git a/sensing/autoware_pointcloud_preprocessor/src/distortion_corrector/distortion_corrector.cpp b/sensing/autoware_pointcloud_preprocessor/src/distortion_corrector/distortion_corrector.cpp index 43a44f836b61a..6683011ac165c 100644 --- a/sensing/autoware_pointcloud_preprocessor/src/distortion_corrector/distortion_corrector.cpp +++ b/sensing/autoware_pointcloud_preprocessor/src/distortion_corrector/distortion_corrector.cpp @@ -84,14 +84,11 @@ void DistortionCorrectorBase::get_imu_transformation( return; } - Eigen::Matrix4f eigen_imu_to_base_link; - imu_transform_exists_ = - managed_tf_buffer_->getTransform(base_frame, imu_frame, eigen_imu_to_base_link); - tf2::Transform tf2_imu_to_base_link = convert_matrix_to_transform(eigen_imu_to_base_link); + auto msg_imu_to_base_link = + managed_tf_buffer_->getTransform(base_frame, imu_frame); geometry_imu_to_base_link_ptr_ = std::make_shared(); - geometry_imu_to_base_link_ptr_->transform.rotation = - tf2::toMsg(tf2_imu_to_base_link.getRotation()); + geometry_imu_to_base_link_ptr_->transform.rotation = msg_imu_to_base_link->transform.rotation; } void DistortionCorrectorBase::enqueue_imu(const sensor_msgs::msg::Imu::ConstSharedPtr imu_msg) @@ -428,11 +425,13 @@ void DistortionCorrector2D::set_pointcloud_transform( return; } - Eigen::Matrix4f eigen_lidar_to_base_link; - pointcloud_transform_exists_ = - managed_tf_buffer_->getTransform(base_frame, lidar_frame, eigen_lidar_to_base_link); - tf2_lidar_to_base_link_ = convert_matrix_to_transform(eigen_lidar_to_base_link); - tf2_base_link_to_lidar_ = tf2_lidar_to_base_link_.inverse(); + auto tf2_lidar_to_base_link = + managed_tf_buffer_->getTransform(base_frame, lidar_frame); + if (tf2_lidar_to_base_link.has_value()) { + pointcloud_transform_exists_ = true; + tf2_lidar_to_base_link_ = tf2_lidar_to_base_link.value(); + tf2_base_link_to_lidar_ = tf2_lidar_to_base_link.value().inverse(); + } pointcloud_transform_needed_ = base_frame != lidar_frame && pointcloud_transform_exists_; } @@ -443,10 +442,14 @@ void DistortionCorrector3D::set_pointcloud_transform( return; } - pointcloud_transform_exists_ = - managed_tf_buffer_->getTransform(base_frame, lidar_frame, eigen_lidar_to_base_link_); - eigen_base_link_to_lidar_ = eigen_lidar_to_base_link_.inverse(); + auto eigen_lidar_to_base_link = + managed_tf_buffer_->getTransform(base_frame, lidar_frame); + pointcloud_transform_exists_ = eigen_lidar_to_base_link.has_value(); pointcloud_transform_needed_ = base_frame != lidar_frame && pointcloud_transform_exists_; + if (pointcloud_transform_needed_) { + eigen_lidar_to_base_link_ = eigen_lidar_to_base_link.value(); + eigen_base_link_to_lidar_ = eigen_lidar_to_base_link.value().inverse(); + } } inline void DistortionCorrector2D::undistort_point_implementation( diff --git a/sensing/autoware_pointcloud_preprocessor/src/distortion_corrector/distortion_corrector_node.cpp b/sensing/autoware_pointcloud_preprocessor/src/distortion_corrector/distortion_corrector_node.cpp index 896c7fe563e64..33f4adff58f27 100644 --- a/sensing/autoware_pointcloud_preprocessor/src/distortion_corrector/distortion_corrector_node.cpp +++ b/sensing/autoware_pointcloud_preprocessor/src/distortion_corrector/distortion_corrector_node.cpp @@ -36,8 +36,6 @@ DistortionCorrectorComponent::DistortionCorrectorComponent(const rclcpp::NodeOpt use_imu_ = declare_parameter("use_imu"); use_3d_distortion_correction_ = declare_parameter("use_3d_distortion_correction"); update_azimuth_and_distance_ = declare_parameter("update_azimuth_and_distance"); - auto has_static_tf_only = - declare_parameter("has_static_tf_only", false); // TODO(amadeuszsz): remove default value // Publisher { @@ -62,9 +60,9 @@ DistortionCorrectorComponent::DistortionCorrectorComponent(const rclcpp::NodeOpt // Setup the distortion corrector if (use_3d_distortion_correction_) { - distortion_corrector_ = std::make_unique(*this, has_static_tf_only); + distortion_corrector_ = std::make_unique(*this); } else { - distortion_corrector_ = std::make_unique(*this, has_static_tf_only); + distortion_corrector_ = std::make_unique(*this); } } diff --git a/sensing/autoware_pointcloud_preprocessor/src/filter.cpp b/sensing/autoware_pointcloud_preprocessor/src/filter.cpp index 62495fc8b70cb..3d2875d33d559 100644 --- a/sensing/autoware_pointcloud_preprocessor/src/filter.cpp +++ b/sensing/autoware_pointcloud_preprocessor/src/filter.cpp @@ -72,7 +72,6 @@ autoware::pointcloud_preprocessor::Filter::Filter( { tf_input_frame_ = static_cast(declare_parameter("input_frame", "")); tf_output_frame_ = static_cast(declare_parameter("output_frame", "")); - has_static_tf_only_ = static_cast(declare_parameter("has_static_tf_only", false)); max_queue_size_ = static_cast(declare_parameter("max_queue_size", 5)); // ---[ Optional parameters @@ -115,17 +114,7 @@ autoware::pointcloud_preprocessor::Filter::Filter( ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void autoware::pointcloud_preprocessor::Filter::setupTF() { - // Always consider static TF if in & out frames are same - if (tf_input_frame_ == tf_output_frame_) { - if (!has_static_tf_only_) { - RCLCPP_INFO( - this->get_logger(), - "Input and output frames are the same. Overriding has_static_tf_only to true."); - } - has_static_tf_only_ = true; - } - managed_tf_buffer_ = - std::make_unique(this, has_static_tf_only_); + managed_tf_buffer_ = std::make_unique(this); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -309,11 +298,12 @@ bool autoware::pointcloud_preprocessor::Filter::calculate_transform_matrix( this->get_logger(), "[get_transform_matrix] Transforming input dataset from %s to %s.", from.header.frame_id.c_str(), target_frame.c_str()); - if (!managed_tf_buffer_->getTransform( - target_frame, from.header.frame_id, transform_info.eigen_transform)) { + auto eigen_transform = + managed_tf_buffer_->getTransform(target_frame, from.header.frame_id); + if (!eigen_transform.has_value()) { return false; } - + transform_info.eigen_transform = eigen_transform.value(); transform_info.need_transform = true; return true; } diff --git a/sensing/autoware_pointcloud_preprocessor/src/time_synchronizer/time_synchronizer_node.cpp b/sensing/autoware_pointcloud_preprocessor/src/time_synchronizer/time_synchronizer_node.cpp index 5bf5069f1200f..6c0e7d8bf8e45 100644 --- a/sensing/autoware_pointcloud_preprocessor/src/time_synchronizer/time_synchronizer_node.cpp +++ b/sensing/autoware_pointcloud_preprocessor/src/time_synchronizer/time_synchronizer_node.cpp @@ -58,8 +58,6 @@ PointCloudDataSynchronizerComponent::PointCloudDataSynchronizerComponent( std::string synchronized_pointcloud_postfix; { output_frame_ = declare_parameter("output_frame"); - has_static_tf_only_ = declare_parameter( - "has_static_tf_only", false); // TODO(amadeuszsz): remove default value keep_input_frame_in_synchronized_pointcloud_ = declare_parameter("keep_input_frame_in_synchronized_pointcloud"); if (output_frame_.empty() && !keep_input_frame_in_synchronized_pointcloud_) { @@ -111,8 +109,7 @@ PointCloudDataSynchronizerComponent::PointCloudDataSynchronizerComponent( // tf2 listener { - managed_tf_buffer_ = - std::make_unique(this, has_static_tf_only_); + managed_tf_buffer_ = std::make_unique(this); } // Subscribers diff --git a/sensing/autoware_pointcloud_preprocessor/src/vector_map_filter/vector_map_inside_area_filter_node.cpp b/sensing/autoware_pointcloud_preprocessor/src/vector_map_filter/vector_map_inside_area_filter_node.cpp index 6fdf7791601cf..c2efb4eb744ff 100644 --- a/sensing/autoware_pointcloud_preprocessor/src/vector_map_filter/vector_map_inside_area_filter_node.cpp +++ b/sensing/autoware_pointcloud_preprocessor/src/vector_map_filter/vector_map_inside_area_filter_node.cpp @@ -14,6 +14,8 @@ #include "autoware/pointcloud_preprocessor/vector_map_filter/vector_map_inside_area_filter_node.hpp" +#include + namespace { autoware::universe_utils::Box2d calcBoundingBox( diff --git a/sensing/autoware_pointcloud_preprocessor/test/test_distortion_corrector_node.cpp b/sensing/autoware_pointcloud_preprocessor/test/test_distortion_corrector_node.cpp index 895061229a994..07358839d27b6 100644 --- a/sensing/autoware_pointcloud_preprocessor/test/test_distortion_corrector_node.cpp +++ b/sensing/autoware_pointcloud_preprocessor/test/test_distortion_corrector_node.cpp @@ -42,6 +42,8 @@ #include +std::chrono::milliseconds autoware::universe_utils::ManagedTransformBuffer::default_timeout = + std::chrono::milliseconds(100); // Relax timeout for CI enum AngleCoordinateSystem { HESAI, VELODYNE, CARTESIAN }; class DistortionCorrectorTest : public ::testing::Test { @@ -50,9 +52,9 @@ class DistortionCorrectorTest : public ::testing::Test { node_ = std::make_shared("test_node"); distortion_corrector_2d_ = - std::make_shared(*node_, true); + std::make_shared(*node_); distortion_corrector_3d_ = - std::make_shared(*node_, true); + std::make_shared(*node_); // Setup TF tf_broadcaster_ = std::make_shared(node_);