From daf8a2dac1304d339125a3bda54a85002c5b7e38 Mon Sep 17 00:00:00 2001 From: Ar-Ray-code Date: Mon, 12 Aug 2024 13:05:53 +0900 Subject: [PATCH] fix colon test failures --- yolox_ros_cpp/yolox_cpp/CMakeLists.txt | 4 +- .../include/yolox_cpp/coco_names.hpp | 224 +++--- .../yolox_cpp/include/yolox_cpp/config.h.in | 20 +- .../yolox_cpp/include/yolox_cpp/core.hpp | 609 +++++++------- .../include/yolox_cpp/tensorrt_logging.h | 756 +++++++----------- .../yolox_cpp/include/yolox_cpp/utils.hpp | 151 ++-- .../yolox_cpp/include/yolox_cpp/yolox.hpp | 22 +- .../include/yolox_cpp/yolox_onnxruntime.hpp | 89 ++- .../include/yolox_cpp/yolox_openvino.hpp | 55 +- .../include/yolox_cpp/yolox_tensorrt.hpp | 103 +-- .../include/yolox_cpp/yolox_tflite.hpp | 101 ++- .../yolox_cpp/src/yolox_onnxruntime.cpp | 292 +++---- .../yolox_cpp/src/yolox_openvino.cpp | 160 ++-- .../yolox_cpp/src/yolox_tensorrt.cpp | 296 ++++--- yolox_ros_cpp/yolox_cpp/src/yolox_tflite.cpp | 343 ++++---- .../include/yolox_ros_cpp/yolox_ros_cpp.hpp | 82 +- .../launch/yolox_onnxruntime.launch.py | 10 +- .../launch/yolox_openvino.launch.py | 19 +- .../launch/yolox_tensorrt.launch.py | 22 +- .../launch/yolox_tflite.launch.py | 18 +- .../yolox_ros_cpp/src/yolox_ros_cpp.cpp | 369 ++++----- 21 files changed, 1949 insertions(+), 1796 deletions(-) diff --git a/yolox_ros_cpp/yolox_cpp/CMakeLists.txt b/yolox_ros_cpp/yolox_cpp/CMakeLists.txt index a4d9b57..ac795bc 100644 --- a/yolox_ros_cpp/yolox_cpp/CMakeLists.txt +++ b/yolox_ros_cpp/yolox_cpp/CMakeLists.txt @@ -46,7 +46,7 @@ endif() if(YOLOX_USE_TENSORRT) find_package(CUDA REQUIRED) - if (NOT CUDA_TOOLKIT_ROOT_DIR) + if(NOT CUDA_TOOLKIT_ROOT_DIR) set(CUDA_TOOLKIT_ROOT_DIR /usr/local/cuda) endif() find_path(TENSORRT_INCLUDE_DIR NvInfer.h @@ -88,7 +88,7 @@ ament_target_dependencies(yolox_cpp ${TARGET_DPENDENCIES}) ament_export_dependencies(${TARGET_DPENDENCIES}) target_link_libraries(yolox_cpp ${TARGET_LIBS}) -if (YOLOX_USE_TFLITE) +if(YOLOX_USE_TFLITE) ament_export_include_directories(${INCLUDES}) install(DIRECTORY ${TFLITE_LIB_PATH}/ DESTINATION lib) endif() diff --git a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/coco_names.hpp b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/coco_names.hpp index 61d2f03..7499ac2 100644 --- a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/coco_names.hpp +++ b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/coco_names.hpp @@ -1,99 +1,127 @@ -#ifndef _YOLOX_CPP_COCO_NAMES_HPP -#define _YOLOX_CPP_COCO_NAMES_HPP +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -namespace yolox_cpp{ - static const std::vector COCO_CLASSES = { - "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", - "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", - "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", - "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", - "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", - "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", - "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", - "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", - "hair drier", "toothbrush" - }; - const float color_list[80][3] ={ - {0.000, 0.447, 0.741}, - {0.850, 0.325, 0.098}, - {0.929, 0.694, 0.125}, - {0.494, 0.184, 0.556}, - {0.466, 0.674, 0.188}, - {0.301, 0.745, 0.933}, - {0.635, 0.078, 0.184}, - {0.300, 0.300, 0.300}, - {0.600, 0.600, 0.600}, - {1.000, 0.000, 0.000}, - {1.000, 0.500, 0.000}, - {0.749, 0.749, 0.000}, - {0.000, 1.000, 0.000}, - {0.000, 0.000, 1.000}, - {0.667, 0.000, 1.000}, - {0.333, 0.333, 0.000}, - {0.333, 0.667, 0.000}, - {0.333, 1.000, 0.000}, - {0.667, 0.333, 0.000}, - {0.667, 0.667, 0.000}, - {0.667, 1.000, 0.000}, - {1.000, 0.333, 0.000}, - {1.000, 0.667, 0.000}, - {1.000, 1.000, 0.000}, - {0.000, 0.333, 0.500}, - {0.000, 0.667, 0.500}, - {0.000, 1.000, 0.500}, - {0.333, 0.000, 0.500}, - {0.333, 0.333, 0.500}, - {0.333, 0.667, 0.500}, - {0.333, 1.000, 0.500}, - {0.667, 0.000, 0.500}, - {0.667, 0.333, 0.500}, - {0.667, 0.667, 0.500}, - {0.667, 1.000, 0.500}, - {1.000, 0.000, 0.500}, - {1.000, 0.333, 0.500}, - {1.000, 0.667, 0.500}, - {1.000, 1.000, 0.500}, - {0.000, 0.333, 1.000}, - {0.000, 0.667, 1.000}, - {0.000, 1.000, 1.000}, - {0.333, 0.000, 1.000}, - {0.333, 0.333, 1.000}, - {0.333, 0.667, 1.000}, - {0.333, 1.000, 1.000}, - {0.667, 0.000, 1.000}, - {0.667, 0.333, 1.000}, - {0.667, 0.667, 1.000}, - {0.667, 1.000, 1.000}, - {1.000, 0.000, 1.000}, - {1.000, 0.333, 1.000}, - {1.000, 0.667, 1.000}, - {0.333, 0.000, 0.000}, - {0.500, 0.000, 0.000}, - {0.667, 0.000, 0.000}, - {0.833, 0.000, 0.000}, - {1.000, 0.000, 0.000}, - {0.000, 0.167, 0.000}, - {0.000, 0.333, 0.000}, - {0.000, 0.500, 0.000}, - {0.000, 0.667, 0.000}, - {0.000, 0.833, 0.000}, - {0.000, 1.000, 0.000}, - {0.000, 0.000, 0.167}, - {0.000, 0.000, 0.333}, - {0.000, 0.000, 0.500}, - {0.000, 0.000, 0.667}, - {0.000, 0.000, 0.833}, - {0.000, 0.000, 1.000}, - {0.000, 0.000, 0.000}, - {0.143, 0.143, 0.143}, - {0.286, 0.286, 0.286}, - {0.429, 0.429, 0.429}, - {0.571, 0.571, 0.571}, - {0.714, 0.714, 0.714}, - {0.857, 0.857, 0.857}, - {0.000, 0.447, 0.741}, - {0.314, 0.717, 0.741}, - {0.500, 0.500, 0.000} - }; -} -#endif \ No newline at end of file +#ifndef YOLOX_CPP__COCO_NAMES_HPP_ +#define YOLOX_CPP__COCO_NAMES_HPP_ + +#include +#include + +namespace yolox_cpp +{ +static const std::vector COCO_CLASSES = { + "person", "bicycle", "car", "motorcycle", "airplane", + "bus", "train", "truck", "boat", "traffic light", + "fire hydrant", "stop sign", "parking meter", "bench", + "bird", "cat", "dog", "horse", "sheep", "cow", + "elephant", "bear", "zebra", "giraffe", "backpack", + "umbrella", "handbag", "tie", "suitcase", "frisbee", + "skis", "snowboard", "sports ball", "kite", "baseball bat", + "baseball glove", "skateboard", "surfboard", + "tennis racket", "bottle", "wine glass", "cup", "fork", + "knife", "spoon", "bowl", "banana", "apple", + "sandwich", "orange", "broccoli", "carrot", "hot dog", + "pizza", "donut", "cake", "chair", "couch", + "potted plant", "bed", "dining table", "toilet", "tv", + "laptop", "mouse", "remote", "keyboard", "cell phone", + "microwave", "oven", "toaster", "sink", "refrigerator", + "book", "clock", "vase", "scissors", "teddy bear", + "hair drier", "toothbrush" +}; +const float color_list[80][3] = { + {0.000, 0.447, 0.741}, + {0.850, 0.325, 0.098}, + {0.929, 0.694, 0.125}, + {0.494, 0.184, 0.556}, + {0.466, 0.674, 0.188}, + {0.301, 0.745, 0.933}, + {0.635, 0.078, 0.184}, + {0.300, 0.300, 0.300}, + {0.600, 0.600, 0.600}, + {1.000, 0.000, 0.000}, + {1.000, 0.500, 0.000}, + {0.749, 0.749, 0.000}, + {0.000, 1.000, 0.000}, + {0.000, 0.000, 1.000}, + {0.667, 0.000, 1.000}, + {0.333, 0.333, 0.000}, + {0.333, 0.667, 0.000}, + {0.333, 1.000, 0.000}, + {0.667, 0.333, 0.000}, + {0.667, 0.667, 0.000}, + {0.667, 1.000, 0.000}, + {1.000, 0.333, 0.000}, + {1.000, 0.667, 0.000}, + {1.000, 1.000, 0.000}, + {0.000, 0.333, 0.500}, + {0.000, 0.667, 0.500}, + {0.000, 1.000, 0.500}, + {0.333, 0.000, 0.500}, + {0.333, 0.333, 0.500}, + {0.333, 0.667, 0.500}, + {0.333, 1.000, 0.500}, + {0.667, 0.000, 0.500}, + {0.667, 0.333, 0.500}, + {0.667, 0.667, 0.500}, + {0.667, 1.000, 0.500}, + {1.000, 0.000, 0.500}, + {1.000, 0.333, 0.500}, + {1.000, 0.667, 0.500}, + {1.000, 1.000, 0.500}, + {0.000, 0.333, 1.000}, + {0.000, 0.667, 1.000}, + {0.000, 1.000, 1.000}, + {0.333, 0.000, 1.000}, + {0.333, 0.333, 1.000}, + {0.333, 0.667, 1.000}, + {0.333, 1.000, 1.000}, + {0.667, 0.000, 1.000}, + {0.667, 0.333, 1.000}, + {0.667, 0.667, 1.000}, + {0.667, 1.000, 1.000}, + {1.000, 0.000, 1.000}, + {1.000, 0.333, 1.000}, + {1.000, 0.667, 1.000}, + {0.333, 0.000, 0.000}, + {0.500, 0.000, 0.000}, + {0.667, 0.000, 0.000}, + {0.833, 0.000, 0.000}, + {1.000, 0.000, 0.000}, + {0.000, 0.167, 0.000}, + {0.000, 0.333, 0.000}, + {0.000, 0.500, 0.000}, + {0.000, 0.667, 0.000}, + {0.000, 0.833, 0.000}, + {0.000, 1.000, 0.000}, + {0.000, 0.000, 0.167}, + {0.000, 0.000, 0.333}, + {0.000, 0.000, 0.500}, + {0.000, 0.000, 0.667}, + {0.000, 0.000, 0.833}, + {0.000, 0.000, 1.000}, + {0.000, 0.000, 0.000}, + {0.143, 0.143, 0.143}, + {0.286, 0.286, 0.286}, + {0.429, 0.429, 0.429}, + {0.571, 0.571, 0.571}, + {0.714, 0.714, 0.714}, + {0.857, 0.857, 0.857}, + {0.000, 0.447, 0.741}, + {0.314, 0.717, 0.741}, + {0.500, 0.500, 0.000} +}; + +} // namespace yolox_cpp + +#endif // YOLOX_CPP__COCO_NAMES_HPP_ diff --git a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/config.h.in b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/config.h.in index 72f4411..b6a7aa2 100644 --- a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/config.h.in +++ b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/config.h.in @@ -1,9 +1,23 @@ -#ifndef _YOLOX_CPP_CONFIG_H_ -#define _YOLOX_CPP_CONFIG_H_ +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef YOLOX_CPP__CONFIG_H_IN_ +#define YOLOX_CPP__CONFIG_H_IN_ #cmakedefine ENABLE_OPENVINO #cmakedefine ENABLE_TENSORRT #cmakedefine ENABLE_ONNXRUNTIME #cmakedefine ENABLE_TFLITE -#endif // _YOLOX_CPP_CONFIG_H_ +#endif // YOLOX_CPP__CONFIG_H_IN_ diff --git a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/core.hpp b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/core.hpp index fb68649..e4f0818 100644 --- a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/core.hpp +++ b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/core.hpp @@ -1,6 +1,24 @@ -#ifndef _YOLOX_CPP_CORE_HPP -#define _YOLOX_CPP_CORE_HPP - +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef YOLOX_CPP__CORE_HPP_ +#define YOLOX_CPP__CORE_HPP_ + +#include +#include +#include +#include #include namespace yolox_cpp @@ -10,308 +28,299 @@ namespace yolox_cpp */ #define file_name_t std::string - struct Object - { - cv::Rect_ rect; - int label; - float prob; - }; - - struct GridAndStride - { - int grid0; - int grid1; - int stride; - GridAndStride(const int grid0_, const int grid1_, const int stride_) - : grid0(grid0_), grid1(grid1_), stride(stride_) - { - } - }; - - class AbcYoloX - { - public: - AbcYoloX() {} - AbcYoloX(const float nms_th = 0.45, const float conf_th = 0.3, - const std::string &model_version = "0.1.1rc0", - const int num_classes = 80, const bool p6 = false) - : nms_thresh_(nms_th), bbox_conf_thresh_(conf_th), - num_classes_(num_classes), p6_(p6), model_version_(model_version) - { - } - virtual std::vector inference(const cv::Mat &frame) = 0; - - protected: - int input_w_; - int input_h_; - float nms_thresh_; - float bbox_conf_thresh_; - int num_classes_; - bool p6_; - std::string model_version_; - const std::vector mean_ = {0.485, 0.456, 0.406}; - const std::vector std_ = {0.229, 0.224, 0.225}; - const std::vector strides_ = {8, 16, 32}; - const std::vector strides_p6_ = {8, 16, 32, 64}; - std::vector grid_strides_; - - cv::Mat static_resize(const cv::Mat &img) - { - const float r = std::min(input_w_ / (img.cols * 1.0), input_h_ / (img.rows * 1.0)); - // r = std::min(r, 1.0f); - const int unpad_w = r * img.cols; - const int unpad_h = r * img.rows; - cv::Mat re(unpad_h, unpad_w, CV_8UC3); - cv::resize(img, re, re.size()); - cv::Mat out(input_h_, input_w_, CV_8UC3, cv::Scalar(114, 114, 114)); - re.copyTo(out(cv::Rect(0, 0, re.cols, re.rows))); - return out; - } - - // for NCHW - void blobFromImage(const cv::Mat &img, float *blob_data) - { - const size_t channels = 3; - const size_t img_h = img.rows; - const size_t img_w = img.cols; - if (this->model_version_ == "0.1.0") - { - for (size_t c = 0; c < channels; ++c) - { - for (size_t h = 0; h < img_h; ++h) - { - for (size_t w = 0; w < img_w; ++w) - { - blob_data[c * img_w * img_h + h * img_w + w] = - ((float)img.ptr(h)[w][c] / 255.0 - this->mean_[c]) / this->std_[c]; - } - } - } - } - else - { - for (size_t c = 0; c < channels; ++c) - { - for (size_t h = 0; h < img_h; ++h) - { - for (size_t w = 0; w < img_w; ++w) - { - blob_data[c * img_w * img_h + h * img_w + w] = (float)img.ptr(h)[w][c]; // 0.1.1rc0 or later - } - } - } - } - } - - // for NHWC - void blobFromImage_nhwc(const cv::Mat &img, float *blob_data) - { - const size_t channels = 3; - const size_t img_h = img.rows; - const size_t img_w = img.cols; - if (this->model_version_ == "0.1.0") - { - for (size_t i = 0; i < img_h * img_w; ++i) - { - for (size_t c = 0; c < channels; ++c) - { - blob_data[i * channels + c] = - ((float)img.data[i * channels + c] / 255.0 - this->mean_[c]) / this->std_[c]; - } - } - } - else - { - for (size_t i = 0; i < img_h * img_w; ++i) - { - for (size_t c = 0; c < channels; ++c) - { - blob_data[i * channels + c] = (float)img.data[i * channels + c]; // 0.1.1rc0 or later - } - } - } - } +struct Object +{ + cv::Rect_ rect; + int label; + float prob; +}; - void generate_grids_and_stride(const int target_w, const int target_h, const std::vector &strides, std::vector &grid_strides) - { - for (auto stride : strides) - { - const int num_grid_w = target_w / stride; - const int num_grid_h = target_h / stride; - for (int g1 = 0; g1 < num_grid_h; ++g1) - { - for (int g0 = 0; g0 < num_grid_w; ++g0) - { - grid_strides.emplace_back(g0, g1, stride); - } - } - } +struct GridAndStride +{ + int grid0; + int grid1; + int stride; + GridAndStride(const int grid0_, const int grid1_, const int stride_) + : grid0(grid0_), grid1(grid1_), stride(stride_) + { + } +}; + +class AbcYoloX +{ +public: + AbcYoloX() {} + AbcYoloX( + const float nms_th = 0.45, const float conf_th = 0.3, + const std::string & model_version = "0.1.1rc0", + const int num_classes = 80, const bool p6 = false) + : nms_thresh_(nms_th), bbox_conf_thresh_(conf_th), + num_classes_(num_classes), p6_(p6), model_version_(model_version) + { + } + virtual std::vector inference(const cv::Mat & frame) = 0; + +protected: + int input_w_; + int input_h_; + float nms_thresh_; + float bbox_conf_thresh_; + int num_classes_; + bool p6_; + std::string model_version_; + const std::vector mean_ = {0.485, 0.456, 0.406}; + const std::vector std_ = {0.229, 0.224, 0.225}; + const std::vector strides_ = {8, 16, 32}; + const std::vector strides_p6_ = {8, 16, 32, 64}; + std::vector grid_strides_; + + cv::Mat static_resize(const cv::Mat & img) + { + const float r = std::min(input_w_ / (img.cols * 1.0), input_h_ / (img.rows * 1.0)); + // r = std::min(r, 1.0f); + const int unpad_w = r * img.cols; + const int unpad_h = r * img.rows; + cv::Mat re(unpad_h, unpad_w, CV_8UC3); + cv::resize(img, re, re.size()); + cv::Mat out(input_h_, input_w_, CV_8UC3, cv::Scalar(114, 114, 114)); + re.copyTo(out(cv::Rect(0, 0, re.cols, re.rows))); + return out; + } + + // for NCHW + void blobFromImage(const cv::Mat & img, float * blob_data) + { + const size_t channels = 3; + const size_t img_h = img.rows; + const size_t img_w = img.cols; + if (this->model_version_ == "0.1.0") { + for (size_t c = 0; c < channels; ++c) { + for (size_t h = 0; h < img_h; ++h) { + for (size_t w = 0; w < img_w; ++w) { + blob_data[c * img_w * img_h + h * img_w + w] = + (static_cast(img.ptr(h)[w][c]) / 255.0 - + this->mean_[c]) / this->std_[c]; + } } - - void generate_yolox_proposals(const std::vector &grid_strides, const float *feat_ptr, const float prob_threshold, std::vector &objects) - { - const int num_anchors = grid_strides.size(); - - for (int anchor_idx = 0; anchor_idx < num_anchors; ++anchor_idx) - { - const int grid0 = grid_strides[anchor_idx].grid0; - const int grid1 = grid_strides[anchor_idx].grid1; - const int stride = grid_strides[anchor_idx].stride; - - const int basic_pos = anchor_idx * (num_classes_ + 5); - - const float box_objectness = feat_ptr[basic_pos + 4]; - int class_id = 0; - float max_class_score = 0.0; - for (int class_idx = 0; class_idx < num_classes_; ++class_idx) - { - const float box_cls_score = feat_ptr[basic_pos + 5 + class_idx]; - const float box_prob = box_objectness * box_cls_score; - if (box_prob > max_class_score) - { - class_id = class_idx; - max_class_score = box_prob; - } - } - if (max_class_score > prob_threshold) - { - // yolox/models/yolo_head.py decode logic - // outputs[..., :2] = (outputs[..., :2] + grids) * strides - // outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides - const float x_center = (feat_ptr[basic_pos + 0] + grid0) * stride; - const float y_center = (feat_ptr[basic_pos + 1] + grid1) * stride; - const float w = exp(feat_ptr[basic_pos + 2]) * stride; - const float h = exp(feat_ptr[basic_pos + 3]) * stride; - const float x0 = x_center - w * 0.5f; - const float y0 = y_center - h * 0.5f; - - Object obj; - obj.rect.x = x0; - obj.rect.y = y0; - obj.rect.width = w; - obj.rect.height = h; - obj.label = class_id; - obj.prob = max_class_score; - objects.push_back(obj); - } - } // point anchor loop + } + } else { + for (size_t c = 0; c < channels; ++c) { + for (size_t h = 0; h < img_h; ++h) { + for (size_t w = 0; w < img_w; ++w) { + // 0.1.1rc0 or later + blob_data[c * img_w * img_h + h * img_w + w] = + static_cast(img.ptr(h)[w][c]); + } } - - float intersection_area(const Object &a, const Object &b) - { - const cv::Rect_ inter = a.rect & b.rect; - return inter.area(); + } + } + } + + // for NHWC + void blobFromImage_nhwc(const cv::Mat & img, float * blob_data) + { + const size_t channels = 3; + const size_t img_h = img.rows; + const size_t img_w = img.cols; + if (this->model_version_ == "0.1.0") { + for (size_t i = 0; i < img_h * img_w; ++i) { + for (size_t c = 0; c < channels; ++c) { + blob_data[i * channels + c] = + (static_cast(img.data[i * channels + c]) / 255.0 - + this->mean_[c]) / this->std_[c]; } - - void qsort_descent_inplace(std::vector &faceobjects, int left, int right) - { - int i = left; - int j = right; - float p = faceobjects[(left + right) / 2].prob; - - while (i <= j) - { - while (faceobjects[i].prob > p) - ++i; - - while (faceobjects[j].prob < p) - --j; - - if (i <= j) - { - std::swap(faceobjects[i], faceobjects[j]); - - ++i; - --j; - } - } - if (left < j) - qsort_descent_inplace(faceobjects, left, j); - if (i < right) - qsort_descent_inplace(faceobjects, i, right); + } + } else { + for (size_t i = 0; i < img_h * img_w; ++i) { + for (size_t c = 0; c < channels; ++c) { + // 0.1.1rc0 or later + blob_data[i * channels + c] = static_cast(img.data[i * channels + c]); } - - void qsort_descent_inplace(std::vector &objects) - { - if (objects.empty()) - return; - - qsort_descent_inplace(objects, 0, objects.size() - 1); + } + } + } + + void generate_grids_and_stride( + const int target_w, const int target_h, + const std::vector & strides, + std::vector & grid_strides) + { + for (auto stride : strides) { + const int num_grid_w = target_w / stride; + const int num_grid_h = target_h / stride; + for (int g1 = 0; g1 < num_grid_h; ++g1) { + for (int g0 = 0; g0 < num_grid_w; ++g0) { + grid_strides.emplace_back(g0, g1, stride); } - - void nms_sorted_bboxes(const std::vector &faceobjects, std::vector &picked, const float nms_threshold) - { - picked.clear(); - - const int n = faceobjects.size(); - - std::vector areas(n); - for (int i = 0; i < n; ++i) - { - areas[i] = faceobjects[i].rect.area(); - } - - for (int i = 0; i < n; ++i) - { - const Object &a = faceobjects[i]; - const int picked_size = picked.size(); - - int keep = 1; - for (int j = 0; j < picked_size; ++j) - { - const Object &b = faceobjects[picked[j]]; - - // intersection over union - const float inter_area = intersection_area(a, b); - const float union_area = areas[i] + areas[picked[j]] - inter_area; - // float IoU = inter_area / union_area - if (inter_area / union_area > nms_threshold) - keep = 0; - } - - if (keep) - picked.push_back(i); - } + } + } + } + + void generate_yolox_proposals( + const std::vector & grid_strides, + const float * feat_ptr, const float prob_threshold, + std::vector & objects) + { + const int num_anchors = grid_strides.size(); + + for (int anchor_idx = 0; anchor_idx < num_anchors; ++anchor_idx) { + const int grid0 = grid_strides[anchor_idx].grid0; + const int grid1 = grid_strides[anchor_idx].grid1; + const int stride = grid_strides[anchor_idx].stride; + + const int basic_pos = anchor_idx * (num_classes_ + 5); + + const float box_objectness = feat_ptr[basic_pos + 4]; + int class_id = 0; + float max_class_score = 0.0; + for (int class_idx = 0; class_idx < num_classes_; ++class_idx) { + const float box_cls_score = feat_ptr[basic_pos + 5 + class_idx]; + const float box_prob = box_objectness * box_cls_score; + if (box_prob > max_class_score) { + class_id = class_idx; + max_class_score = box_prob; } - - void decode_outputs(const float *prob, const std::vector &grid_strides, - std::vector &objects, const float bbox_conf_thresh, - const float scale, const int img_w, const int img_h) - { - - std::vector proposals; - generate_yolox_proposals(grid_strides, prob, bbox_conf_thresh, proposals); - - qsort_descent_inplace(proposals); - - std::vector picked; - nms_sorted_bboxes(proposals, picked, nms_thresh_); - - int count = picked.size(); - objects.resize(count); - - for (int i = 0; i < count; ++i) - { - objects[i] = proposals[picked[i]]; - - // adjust offset to original unpadded - float x0 = (objects[i].rect.x) / scale; - float y0 = (objects[i].rect.y) / scale; - float x1 = (objects[i].rect.x + objects[i].rect.width) / scale; - float y1 = (objects[i].rect.y + objects[i].rect.height) / scale; - - // clip - x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f); - y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f); - x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f); - y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f); - - objects[i].rect.x = x0; - objects[i].rect.y = y0; - objects[i].rect.width = x1 - x0; - objects[i].rect.height = y1 - y0; - } + } + if (max_class_score > prob_threshold) { + const float x_center = (feat_ptr[basic_pos + 0] + grid0) * stride; + const float y_center = (feat_ptr[basic_pos + 1] + grid1) * stride; + const float w = exp(feat_ptr[basic_pos + 2]) * stride; + const float h = exp(feat_ptr[basic_pos + 3]) * stride; + const float x0 = x_center - w * 0.5f; + const float y0 = y_center - h * 0.5f; + + Object obj; + obj.rect.x = x0; + obj.rect.y = y0; + obj.rect.width = w; + obj.rect.height = h; + obj.label = class_id; + obj.prob = max_class_score; + objects.push_back(obj); + } + } // point anchor loop + } + + float intersection_area(const Object & a, const Object & b) + { + const cv::Rect_ inter = a.rect & b.rect; + return inter.area(); + } + + void qsort_descent_inplace(std::vector & faceobjects, int left, int right) + { + int i = left; + int j = right; + float p = faceobjects[(left + right) / 2].prob; + + while (i <= j) { + while (faceobjects[i].prob > p) { + ++i; + } + + while (faceobjects[j].prob < p) { + --j; + } + + if (i <= j) { + std::swap(faceobjects[i], faceobjects[j]); + + ++i; + --j; + } + } + if (left < j) { + qsort_descent_inplace(faceobjects, left, j); + } + if (i < right) { + qsort_descent_inplace(faceobjects, i, right); + } + } + + void qsort_descent_inplace(std::vector & objects) + { + if (objects.empty()) { + return; + } + + qsort_descent_inplace(objects, 0, objects.size() - 1); + } + + void nms_sorted_bboxes( + const std::vector & faceobjects, + std::vector & picked, + const float nms_threshold) + { + picked.clear(); + + const int n = faceobjects.size(); + + std::vector areas(n); + for (int i = 0; i < n; ++i) { + areas[i] = faceobjects[i].rect.area(); + } + + for (int i = 0; i < n; ++i) { + const Object & a = faceobjects[i]; + const int picked_size = picked.size(); + + int keep = 1; + for (int j = 0; j < picked_size; ++j) { + const Object & b = faceobjects[picked[j]]; + + // intersection over union + const float inter_area = intersection_area(a, b); + const float union_area = areas[i] + areas[picked[j]] - inter_area; + // float IoU = inter_area / union_area + if (inter_area / union_area > nms_threshold) { + keep = 0; } - }; -} -#endif + } + + if (keep) { + picked.push_back(i); + } + } + } + + void decode_outputs( + const float * prob, const std::vector & grid_strides, + std::vector & objects, const float bbox_conf_thresh, + const float scale, const int img_w, const int img_h) + { + std::vector proposals; + generate_yolox_proposals(grid_strides, prob, bbox_conf_thresh, proposals); + + qsort_descent_inplace(proposals); + + std::vector picked; + nms_sorted_bboxes(proposals, picked, nms_thresh_); + + int count = picked.size(); + objects.resize(count); + + for (int i = 0; i < count; ++i) { + objects[i] = proposals[picked[i]]; + + float x0 = (objects[i].rect.x) / scale; + float y0 = (objects[i].rect.y) / scale; + float x1 = (objects[i].rect.x + objects[i].rect.width) / scale; + float y1 = (objects[i].rect.y + objects[i].rect.height) / scale; + + // clip + x0 = std::max(std::min(x0, static_cast(img_w - 1)), 0.f); + y0 = std::max(std::min(y0, static_cast(img_h - 1)), 0.f); + x1 = std::max(std::min(x1, static_cast(img_w - 1)), 0.f); + y1 = std::max(std::min(y1, static_cast(img_h - 1)), 0.f); + + objects[i].rect.x = x0; + objects[i].rect.y = y0; + objects[i].rect.width = x1 - x0; + objects[i].rect.height = y1 - y0; + } + } +}; + +} // namespace yolox_cpp + +#endif // YOLOX_CPP__CORE_HPP_ diff --git a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/tensorrt_logging.h b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/tensorrt_logging.h index abea1ec..d2efbe4 100644 --- a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/tensorrt_logging.h +++ b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/tensorrt_logging.h @@ -1,23 +1,22 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TENSORRT_LOGGING_H -#define TENSORRT_LOGGING_H - -#include "NvInferRuntimeCommon.h" +// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +#ifndef YOLOX_CPP__TENSORRT_LOGGING_H_ +#define YOLOX_CPP__TENSORRT_LOGGING_H_ + +#include "NvInferRuntimeCommon.h" // NOLINT #include #include #include @@ -28,501 +27,330 @@ namespace yolox_cpp { + using Severity = nvinfer1::ILogger::Severity; // NOLINT + + class LogStreamConsumerBuffer: public std::stringbuf + { +public: + LogStreamConsumerBuffer( + std::ostream & stream, const std::string & prefix, + bool shouldLog) : mOutput(stream), mPrefix(prefix), mShouldLog(shouldLog) // NOLINT + { + } + + LogStreamConsumerBuffer(LogStreamConsumerBuffer && other) noexcept : mOutput(other.mOutput), + mPrefix(other.mPrefix), mShouldLog(other.mShouldLog) // NOLINT + { + } - using Severity = nvinfer1::ILogger::Severity; + LogStreamConsumerBuffer(const LogStreamConsumerBuffer & other) = delete; + LogStreamConsumerBuffer() = delete; + LogStreamConsumerBuffer & operator = (const LogStreamConsumerBuffer &) = delete; + LogStreamConsumerBuffer & operator = (LogStreamConsumerBuffer &&) = delete; - class LogStreamConsumerBuffer : public std::stringbuf + ~LogStreamConsumerBuffer() override { - public: - LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog) - : mOutput(stream) - , mPrefix(prefix) - , mShouldLog(shouldLog) - { - } + if (pbase() != pptr()) { + putOutput(); + } + } - LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other) noexcept - : mOutput(other.mOutput) - , mPrefix(other.mPrefix) - , mShouldLog(other.mShouldLog) - { - } - LogStreamConsumerBuffer(const LogStreamConsumerBuffer& other) = delete; - LogStreamConsumerBuffer() = delete; - LogStreamConsumerBuffer& operator=(const LogStreamConsumerBuffer&) = delete; - LogStreamConsumerBuffer& operator=(LogStreamConsumerBuffer&&) = delete; - - ~LogStreamConsumerBuffer() override - { - // std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence - // std::streambuf::pptr() gives a pointer to the current position of the output sequence - // if the pointer to the beginning is not equal to the pointer to the current position, - // call putOutput() to log the output to the stream - if (pbase() != pptr()) - { - putOutput(); - } - } + int32_t sync() override + { + putOutput(); + return 0; + } - //! - //! synchronizes the stream buffer and returns 0 on success - //! synchronizing the stream buffer consists of inserting the buffer contents into the stream, - //! resetting the buffer and flushing the stream - //! - int32_t sync() override - { - putOutput(); - return 0; - } + void putOutput() + { + if (mShouldLog) { + // prepend timestamp + std::time_t timestamp = std::time(nullptr); + tm * tm_local = std::localtime(×tamp); + mOutput << "["; + mOutput << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/"; + mOutput << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/"; + mOutput << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-"; + mOutput << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":"; + mOutput << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":"; + mOutput << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] "; + // std::stringbuf::str() gets the string contents of the buffer + // insert the buffer contents pre-appended by the appropriate prefix into the stream + mOutput << mPrefix << str(); + } + // set the buffer to empty + str(""); + // flush the stream + mOutput.flush(); + } - void putOutput() - { - if (mShouldLog) - { - // prepend timestamp - std::time_t timestamp = std::time(nullptr); - tm* tm_local = std::localtime(×tamp); - mOutput << "["; - mOutput << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/"; - mOutput << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/"; - mOutput << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-"; - mOutput << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":"; - mOutput << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":"; - mOutput << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] "; - // std::stringbuf::str() gets the string contents of the buffer - // insert the buffer contents pre-appended by the appropriate prefix into the stream - mOutput << mPrefix << str(); - } - // set the buffer to empty - str(""); - // flush the stream - mOutput.flush(); - } + void setShouldLog(bool shouldLog) + { + mShouldLog = shouldLog; + } - void setShouldLog(bool shouldLog) - { - mShouldLog = shouldLog; - } +private: + std::ostream & mOutput; + std::string mPrefix; + bool mShouldLog {}; + }; // class LogStreamConsumerBuffer - private: - std::ostream& mOutput; - std::string mPrefix; - bool mShouldLog{}; - }; // class LogStreamConsumerBuffer - //! - //! \class LogStreamConsumerBase - //! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer - //! - class LogStreamConsumerBase + class LogStreamConsumerBase + { +public: + LogStreamConsumerBase(std::ostream & stream, const std::string & prefix, bool shouldLog) + : mBuffer(stream, prefix, shouldLog) { + } + +protected: + LogStreamConsumerBuffer mBuffer; + }; // class LogStreamConsumerBase + + class LogStreamConsumer: protected LogStreamConsumerBase, public std::ostream + { +public: + LogStreamConsumer( + nvinfer1::ILogger::Severity reportableSeverity, + nvinfer1::ILogger::Severity severity) : LogStreamConsumerBase( + severityOstream( + severity), severityPrefix(severity), severity <= reportableSeverity), std::ostream( + &mBuffer), + mShouldLog(severity <= reportableSeverity), mSeverity(severity) // NOLINT { - public: - LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog) - : mBuffer(stream, prefix, shouldLog) - { - } + } - protected: - LogStreamConsumerBuffer mBuffer; - }; // class LogStreamConsumerBase + LogStreamConsumer( + LogStreamConsumer && + other) noexcept : LogStreamConsumerBase( + severityOstream(other.mSeverity), + severityPrefix(other.mSeverity), other.mShouldLog), std::ostream(&mBuffer), mShouldLog( + other.mShouldLog), mSeverity(other.mSeverity) // NOLINT + { + } - //! - //! \class LogStreamConsumer - //! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages. - //! Order of base classes is LogStreamConsumerBase and then std::ostream. - //! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field - //! in LogStreamConsumer and then the address of the buffer is passed to std::ostream. - //! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream. - //! Please do not change the order of the parent classes. - //! - class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream + LogStreamConsumer(const LogStreamConsumer & other) = delete; + LogStreamConsumer() = delete; + ~LogStreamConsumer() = default; + LogStreamConsumer & operator = (const LogStreamConsumer &) = delete; + LogStreamConsumer & operator = (LogStreamConsumer &&) = delete; + + void setReportableSeverity(Severity reportableSeverity) { - public: - //! - //! \brief Creates a LogStreamConsumer which logs messages with level severity. - //! Reportable severity determines if the messages are severe enough to be logged. - //! - LogStreamConsumer(nvinfer1::ILogger::Severity reportableSeverity, nvinfer1::ILogger::Severity severity) - : LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity) - , std::ostream(&mBuffer) // links the stream buffer with the stream - , mShouldLog(severity <= reportableSeverity) - , mSeverity(severity) - { - } + mShouldLog = mSeverity <= reportableSeverity; + mBuffer.setShouldLog(mShouldLog); + } - LogStreamConsumer(LogStreamConsumer&& other) noexcept - : LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog) - , std::ostream(&mBuffer) // links the stream buffer with the stream - , mShouldLog(other.mShouldLog) - , mSeverity(other.mSeverity) - { - } - LogStreamConsumer(const LogStreamConsumer& other) = delete; - LogStreamConsumer() = delete; - ~LogStreamConsumer() = default; - LogStreamConsumer& operator=(const LogStreamConsumer&) = delete; - LogStreamConsumer& operator=(LogStreamConsumer&&) = delete; - - void setReportableSeverity(Severity reportableSeverity) - { - mShouldLog = mSeverity <= reportableSeverity; - mBuffer.setShouldLog(mShouldLog); - } +private: + static std::ostream & severityOstream(Severity severity) + { + return severity >= Severity::kINFO ? std::cout : std::cerr; + } - private: - static std::ostream& severityOstream(Severity severity) - { - return severity >= Severity::kINFO ? std::cout : std::cerr; - } + static std::string severityPrefix(Severity severity) + { + switch (severity) { + case Severity::kINTERNAL_ERROR: return "[F] "; + case Severity::kERROR: return "[E] "; + case Severity::kWARNING: return "[W] "; + case Severity::kINFO: return "[I] "; + case Severity::kVERBOSE: return "[V] "; + default: assert(0); return ""; + } + } - static std::string severityPrefix(Severity severity) - { - switch (severity) - { - case Severity::kINTERNAL_ERROR: return "[F] "; - case Severity::kERROR: return "[E] "; - case Severity::kWARNING: return "[W] "; - case Severity::kINFO: return "[I] "; - case Severity::kVERBOSE: return "[V] "; - default: assert(0); return ""; - } - } + bool mShouldLog; + Severity mSeverity; + }; // class LogStreamConsumer - bool mShouldLog; - Severity mSeverity; - }; // class LogStreamConsumer + + class Logger: public nvinfer1::ILogger + { +public: + explicit Logger(Severity severity = Severity::kWARNING) + : mReportableSeverity(severity) { + } //! - //! \class Logger - //! - //! \brief Class which manages logging of TensorRT tools and samples - //! - //! \details This class provides a common interface for TensorRT tools and samples to log information to the console, - //! and supports logging two types of messages: - //! - //! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal) - //! - Test pass/fail messages - //! - //! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is - //! that the logic for controlling the verbosity and formatting of sample output is centralized in one location. - //! - //! In the future, this class could be extended to support dumping test results to a file in some standard format - //! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run). - //! - //! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger - //! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT - //! library and messages coming from the sample. - //! - //! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the - //! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger - //! object. + //! \enum TestResult + //! \brief Represents the state of a given test //! - class Logger : public nvinfer1::ILogger + enum class TestResult { - public: - explicit Logger(Severity severity = Severity::kWARNING) - : mReportableSeverity(severity) - { - } - - //! - //! \enum TestResult - //! \brief Represents the state of a given test - //! - enum class TestResult - { - kRUNNING, //!< The test is running - kPASSED, //!< The test passed - kFAILED, //!< The test failed - kWAIVED //!< The test was waived - }; - - //! - //! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger - //! \return The nvinfer1::ILogger associated with this Logger - //! - //! TODO Once all samples are updated to use this method to register the logger with TensorRT, - //! we can eliminate the inheritance of Logger from ILogger - //! - nvinfer1::ILogger& getTRTLogger() noexcept - { - return *this; - } - - //! - //! \brief Implementation of the nvinfer1::ILogger::log() virtual method - //! - //! Note samples should not be calling this function directly; it will eventually go away once we eliminate the - //! inheritance from nvinfer1::ILogger - //! - void log(Severity severity, const char* msg) noexcept override - { - LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl; - } + kRUNNING, //!< The test is running + kPASSED, //!< The test passed + kFAILED, //!< The test failed + kWAIVED //!< The test was waived + }; - //! - //! \brief Method for controlling the verbosity of logging output - //! - //! \param severity The logger will only emit messages that have severity of this level or higher. - //! - void setReportableSeverity(Severity severity) noexcept - { - mReportableSeverity = severity; - } + nvinfer1::ILogger & getTRTLogger() noexcept + { + return *this; + } - //! - //! \brief Opaque handle that holds logging information for a particular test - //! - //! This object is an opaque handle to information used by the Logger to print test results. - //! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used - //! with Logger::reportTest{Start,End}(). - //! - class TestAtom - { - public: - TestAtom(TestAtom&&) = default; - - private: - friend class Logger; - - TestAtom(bool started, const std::string& name, const std::string& cmdline) - : mStarted(started) - , mName(name) - , mCmdline(cmdline) - { - } - - bool mStarted; - std::string mName; - std::string mCmdline; - }; - - //! - //! \brief Define a test for logging - //! - //! \param[in] name The name of the test. This should be a string starting with - //! "TensorRT" and containing dot-separated strings containing - //! the characters [A-Za-z0-9_]. - //! For example, "TensorRT.sample_googlenet" - //! \param[in] cmdline The command line used to reproduce the test - // - //! \return a TestAtom that can be used in Logger::reportTest{Start,End}(). - //! - static TestAtom defineTest(const std::string& name, const std::string& cmdline) - { - return TestAtom(false, name, cmdline); - } + void log(Severity severity, const char * msg) noexcept override + { + LogStreamConsumer( + mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl; + } - //! - //! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments - //! as input - //! - //! \param[in] name The name of the test - //! \param[in] argc The number of command-line arguments - //! \param[in] argv The array of command-line arguments (given as C strings) - //! - //! \return a TestAtom that can be used in Logger::reportTest{Start,End}(). - //! - static TestAtom defineTest(const std::string& name, int32_t argc, char const* const* argv) - { - // Append TensorRT version as info - const std::string vname = name + " [TensorRT v" + std::to_string(NV_TENSORRT_VERSION) + "]"; - auto cmdline = genCmdlineString(argc, argv); - return defineTest(vname, cmdline); - } + void setReportableSeverity(Severity severity) noexcept + { + mReportableSeverity = severity; + } - //! - //! \brief Report that a test has started. - //! - //! \pre reportTestStart() has not been called yet for the given testAtom - //! - //! \param[in] testAtom The handle to the test that has started - //! - static void reportTestStart(TestAtom& testAtom) - { - reportTestResult(testAtom, TestResult::kRUNNING); - assert(!testAtom.mStarted); - testAtom.mStarted = true; - } + class TestAtom + { +public: + TestAtom(TestAtom &&) = default; - //! - //! \brief Report that a test has ended. - //! - //! \pre reportTestStart() has been called for the given testAtom - //! - //! \param[in] testAtom The handle to the test that has ended - //! \param[in] result The result of the test. Should be one of TestResult::kPASSED, - //! TestResult::kFAILED, TestResult::kWAIVED - //! - static void reportTestEnd(TestAtom const& testAtom, TestResult result) - { - assert(result != TestResult::kRUNNING); - assert(testAtom.mStarted); - reportTestResult(testAtom, result); - } +private: + friend class Logger; - static int32_t reportPass(TestAtom const& testAtom) - { - reportTestEnd(testAtom, TestResult::kPASSED); - return EXIT_SUCCESS; - } + TestAtom(bool started, const std::string & name, const std::string & cmdline) + : mStarted(started), + mName(name), + mCmdline(cmdline) + { + } - static int32_t reportFail(TestAtom const& testAtom) - { - reportTestEnd(testAtom, TestResult::kFAILED); - return EXIT_FAILURE; - } + bool mStarted; + std::string mName; + std::string mCmdline; + }; - static int32_t reportWaive(TestAtom const& testAtom) - { - reportTestEnd(testAtom, TestResult::kWAIVED); - return EXIT_SUCCESS; - } - static int32_t reportTest(TestAtom const& testAtom, bool pass) - { - return pass ? reportPass(testAtom) : reportFail(testAtom); - } + static TestAtom defineTest(const std::string & name, const std::string & cmdline) + { + return TestAtom(false, name, cmdline); + } - Severity getReportableSeverity() const - { - return mReportableSeverity; - } + static TestAtom defineTest(const std::string & name, int32_t argc, char const * const * argv) + { + // Append TensorRT version as info + const std::string vname = name + " [TRT v" + std::to_string(NV_TENSORRT_VERSION) + "]"; + auto cmdline = genCmdlineString(argc, argv); + return defineTest(vname, cmdline); + } - private: - //! - //! \brief returns an appropriate string for prefixing a log message with the given severity - //! - static const char* severityPrefix(Severity severity) - { - switch (severity) - { - case Severity::kINTERNAL_ERROR: return "[F] "; - case Severity::kERROR: return "[E] "; - case Severity::kWARNING: return "[W] "; - case Severity::kINFO: return "[I] "; - case Severity::kVERBOSE: return "[V] "; - default: assert(0); return ""; - } - } + static void reportTestStart(TestAtom & testAtom) + { + reportTestResult(testAtom, TestResult::kRUNNING); + assert(!testAtom.mStarted); + testAtom.mStarted = true; + } - //! - //! \brief returns an appropriate string for prefixing a test result message with the given result - //! - static const char* testResultString(TestResult result) - { - switch (result) - { - case TestResult::kRUNNING: return "RUNNING"; - case TestResult::kPASSED: return "PASSED"; - case TestResult::kFAILED: return "FAILED"; - case TestResult::kWAIVED: return "WAIVED"; - default: assert(0); return ""; - } - } + static void reportTestEnd(TestAtom const & testAtom, TestResult result) + { + assert(result != TestResult::kRUNNING); + assert(testAtom.mStarted); + reportTestResult(testAtom, result); + } - //! - //! \brief returns an appropriate output stream (cout or cerr) to use with the given severity - //! - static std::ostream& severityOstream(Severity severity) - { - return severity >= Severity::kINFO ? std::cout : std::cerr; - } + static int32_t reportPass(TestAtom const & testAtom) + { + reportTestEnd(testAtom, TestResult::kPASSED); + return EXIT_SUCCESS; + } - //! - //! \brief method that implements logging test results - //! - static void reportTestResult(TestAtom const& testAtom, TestResult result) - { - severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # " - << testAtom.mCmdline << std::endl; - } + static int32_t reportFail(TestAtom const & testAtom) + { + reportTestEnd(testAtom, TestResult::kFAILED); + return EXIT_FAILURE; + } - //! - //! \brief generate a command line string from the given (argc, argv) values - //! - static std::string genCmdlineString(int32_t argc, char const* const* argv) - { - std::stringstream ss; - for (int32_t i = 0; i < argc; i++) - { - if (i > 0) - { - ss << " "; - } - ss << argv[i]; - } - return ss.str(); - } + static int32_t reportWaive(TestAtom const & testAtom) + { + reportTestEnd(testAtom, TestResult::kWAIVED); + return EXIT_SUCCESS; + } - Severity mReportableSeverity; - }; // class Logger + static int32_t reportTest(TestAtom const & testAtom, bool pass) + { + return pass ? reportPass(testAtom) : reportFail(testAtom); + } -} // namespace yolox_cpp + Severity getReportableSeverity() const + { + return mReportableSeverity; + } -namespace -{ - using namespace yolox_cpp; - //! - //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE - //! - //! Example usage: - //! - //! LOG_VERBOSE(logger) << "hello world" << std::endl; - //! - inline LogStreamConsumer LOG_VERBOSE(const Logger& logger) +private: + static const char * severityPrefix(Severity severity) { - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE); + switch (severity) { + case Severity::kINTERNAL_ERROR: return "[F] "; + case Severity::kERROR: return "[E] "; + case Severity::kWARNING: return "[W] "; + case Severity::kINFO: return "[I] "; + case Severity::kVERBOSE: return "[V] "; + default: assert(0); return ""; + } } - //! - //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO - //! - //! Example usage: - //! - //! LOG_INFO(logger) << "hello world" << std::endl; - //! - inline LogStreamConsumer LOG_INFO(const Logger& logger) + static const char * testResultString(TestResult result) { - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO); + switch (result) { + case TestResult::kRUNNING: return "RUNNING"; + case TestResult::kPASSED: return "PASSED"; + case TestResult::kFAILED: return "FAILED"; + case TestResult::kWAIVED: return "WAIVED"; + default: assert(0); return ""; + } } - //! - //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING - //! - //! Example usage: - //! - //! LOG_WARN(logger) << "hello world" << std::endl; - //! - inline LogStreamConsumer LOG_WARN(const Logger& logger) + static std::ostream & severityOstream(Severity severity) { - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING); + return severity >= Severity::kINFO ? std::cout : std::cerr; } - //! - //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR - //! - //! Example usage: - //! - //! LOG_ERROR(logger) << "hello world" << std::endl; - //! - inline LogStreamConsumer LOG_ERROR(const Logger& logger) + static void reportTestResult(TestAtom const & testAtom, TestResult result) { - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR); + severityOstream(Severity::kINFO) + << "&&&& " << testResultString(result) << " " << testAtom.mName << " # " + << testAtom.mCmdline << std::endl; } - //! - //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR - //! ("fatal" severity) - //! - //! Example usage: - //! - //! LOG_FATAL(logger) << "hello world" << std::endl; - //! - inline LogStreamConsumer LOG_FATAL(const Logger& logger) + static std::string genCmdlineString(int32_t argc, char const * const * argv) { - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR); + std::stringstream ss; + for (int32_t i = 0; i < argc; i++) { + if (i > 0) { + ss << " "; + } + ss << argv[i]; + } + return ss.str(); } -} // anonymous namespace -#endif // TENSORRT_LOGGING_H + Severity mReportableSeverity; + }; // class Logger + + inline LogStreamConsumer LOG_VERBOSE(const Logger & logger) + { + return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE); + } + + inline LogStreamConsumer LOG_INFO(const Logger & logger) + { + return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO); + } + + inline LogStreamConsumer LOG_WARN(const Logger & logger) + { + return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING); + } + + inline LogStreamConsumer LOG_ERROR(const Logger & logger) + { + return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR); + } + + inline LogStreamConsumer LOG_FATAL(const Logger & logger) + { + return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR); + } + +} // namespace yolox_cpp + +#endif // YOLOX_CPP__TENSORRT_LOGGING_H_ diff --git a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/utils.hpp b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/utils.hpp index 335b980..fa27ff5 100644 --- a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/utils.hpp +++ b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/utils.hpp @@ -1,76 +1,99 @@ -#ifndef _YOLOX_CPP_UTILS_HPP -#define _YOLOX_CPP_UTILS_HPP +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +#ifndef YOLOX_CPP__UTILS_HPP_ +#define YOLOX_CPP__UTILS_HPP_ + #include #include #include +#include + #include #include "core.hpp" #include "coco_names.hpp" namespace yolox_cpp { - namespace utils - { - - static std::vector read_class_labels_file(const file_name_t &file_name) - { - std::vector class_names; - std::ifstream ifs(file_name); - std::string buff; - if (ifs.fail()) - { - return class_names; - } - while (getline(ifs, buff)) - { - if (buff == "") - continue; - class_names.push_back(buff); - } - return class_names; - } - - static void draw_objects(cv::Mat bgr, const std::vector &objects, const std::vector &class_names = COCO_CLASSES) - { - - for (const Object &obj : objects) - { - const int color_index = obj.label % 80; - cv::Scalar color = cv::Scalar(color_list[color_index][0], color_list[color_index][1], color_list[color_index][2]); - float c_mean = cv::mean(color)[0]; - cv::Scalar txt_color; - if (c_mean > 0.5) - { - txt_color = cv::Scalar(0, 0, 0); - } - else - { - txt_color = cv::Scalar(255, 255, 255); - } - - cv::rectangle(bgr, obj.rect, color * 255, 2); - - char text[256]; - sprintf(text, "%s %.1f%%", class_names[obj.label].c_str(), obj.prob * 100); - - int baseLine = 0; - cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine); - - cv::Scalar txt_bk_color = color * 0.7 * 255; - - int x = obj.rect.x; - int y = obj.rect.y + 1; - if (y > bgr.rows) - y = bgr.rows; - - cv::rectangle(bgr, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)), - txt_bk_color, -1); - - cv::putText(bgr, text, cv::Point(x, y + label_size.height), - cv::FONT_HERSHEY_SIMPLEX, 0.4, txt_color, 1); - } - } +namespace utils +{ + +static std::vector read_class_labels_file(const file_name_t & file_name) +{ + std::vector class_names; + std::ifstream ifs(file_name); + std::string buff; + if (ifs.fail()) { + return class_names; + } + while (getline(ifs, buff)) { + if (buff == "") { + continue; + } + class_names.push_back(buff); + } + return class_names; +} + +static void draw_objects( + cv::Mat bgr, + const std::vector & objects, + const std::vector & class_names = COCO_CLASSES) +{ + for (const Object & obj : objects) { + const int color_index = obj.label % 80; + cv::Scalar color = cv::Scalar( + color_list[color_index][0], color_list[color_index][1], color_list[color_index][2]); + float c_mean = cv::mean(color)[0]; + cv::Scalar txt_color; + if (c_mean > 0.5) { + txt_color = cv::Scalar(0, 0, 0); + } else { + txt_color = cv::Scalar(255, 255, 255); + } + + cv::rectangle(bgr, obj.rect, color * 255, 2); + + char text[256]; + sprintf(text, "%s %.1f%%", class_names[obj.label].c_str(), obj.prob * 100); // NOLINT + + int baseLine = 0; + cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine); + + cv::Scalar txt_bk_color = color * 0.7 * 255; + + int x = obj.rect.x; + int y = obj.rect.y + 1; + if (y > bgr.rows) { + y = bgr.rows; } + + cv::rectangle( + bgr, + cv::Rect( + cv::Point(x, y), + cv::Size(label_size.width, label_size.height + baseLine)), + txt_bk_color, -1); + + cv::putText( + bgr, text, cv::Point(x, y + label_size.height), + cv::FONT_HERSHEY_SIMPLEX, 0.4, txt_color, 1); + } } -#endif +} // namespace utils +} // namespace yolox_cpp + +#endif // YOLOX_CPP__UTILS_HPP_ diff --git a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox.hpp b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox.hpp index faad7cc..c1f8411 100644 --- a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox.hpp +++ b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox.hpp @@ -1,7 +1,21 @@ -#ifndef _YOLOX_CPP_YOLOX_HPP -#define _YOLOX_CPP_YOLOX_HPP +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -#include "config.h" +#ifndef YOLOX_CPP__YOLOX_HPP_ +#define YOLOX_CPP__YOLOX_HPP_ + +#include "config.h" // NOLINT #ifdef ENABLE_OPENVINO #include "yolox_openvino.hpp" @@ -20,4 +34,4 @@ #endif -#endif +#endif // YOLOX_CPP__YOLOX_HPP_ diff --git a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_onnxruntime.hpp b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_onnxruntime.hpp index 75e6fd0..3e16e4e 100644 --- a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_onnxruntime.hpp +++ b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_onnxruntime.hpp @@ -1,5 +1,19 @@ -#ifndef _YOLOX_CPP_YOLOX_ONNX_HPP -#define _YOLOX_CPP_YOLOX_ONNX_HPP +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef YOLOX_CPP__YOLOX_ONNXRUNTIME_HPP_ +#define YOLOX_CPP__YOLOX_ONNXRUNTIME_HPP_ #include #include @@ -8,38 +22,47 @@ #include #include -#include "onnxruntime/core/session/onnxruntime_cxx_api.h" +#include "onnxruntime/core/session/onnxruntime_cxx_api.h" // NOLINT #include "core.hpp" #include "coco_names.hpp" -namespace yolox_cpp{ - class YoloXONNXRuntime: public AbcYoloX{ - public: - YoloXONNXRuntime(file_name_t path_to_model, - int intra_op_num_threads, int inter_op_num_threads=1, - bool use_cuda=true, int device_id=0, bool use_parallel=false, - float nms_th=0.45, float conf_th=0.3, std::string model_version="0.1.1rc0", - int num_classes=80, bool p6=false); - std::vector inference(const cv::Mat& frame) override; - - private: - int intra_op_num_threads_ = 1; - int inter_op_num_threads_ = 1; - int device_id_ = 0; - bool use_cuda_ = true; - bool use_parallel_ = false; - - Ort::Session session_{nullptr}; - Ort::Env env_{ORT_LOGGING_LEVEL_WARNING, "Default"}; - - Ort::Value input_tensor_{nullptr}; - Ort::Value output_tensor_{nullptr}; - std::string input_name_; - std::string output_name_; - std::vector> input_buffer_; - std::vector> output_buffer_; - }; -} - -#endif +namespace yolox_cpp +{ +class YoloXONNXRuntime : public AbcYoloX +{ +public: + YoloXONNXRuntime( + file_name_t path_to_model, + int intra_op_num_threads, + int inter_op_num_threads = 1, + bool use_cuda = true, + int device_id = 0, + bool use_parallel = false, + float nms_th = 0.45, + float conf_th = 0.3, + std::string model_version = "0.1.1rc0", + int num_classes = 80, + bool p6 = false); + std::vector inference(const cv::Mat & frame) override; + +private: + int intra_op_num_threads_ = 1; + int inter_op_num_threads_ = 1; + int device_id_ = 0; + bool use_cuda_ = true; + bool use_parallel_ = false; + + Ort::Session session_{nullptr}; + Ort::Env env_{ORT_LOGGING_LEVEL_WARNING, "Default"}; + + Ort::Value input_tensor_{nullptr}; + Ort::Value output_tensor_{nullptr}; + std::string input_name_; + std::string output_name_; + std::vector> input_buffer_; + std::vector> output_buffer_; +}; +} // namespace yolox_cpp + +#endif // YOLOX_CPP__YOLOX_ONNXRUNTIME_HPP_ diff --git a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_openvino.hpp b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_openvino.hpp index d3ba1de..4c25e2e 100644 --- a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_openvino.hpp +++ b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_openvino.hpp @@ -1,5 +1,19 @@ -#ifndef _YOLOX_CPP_YOLOX_OPENVINO_HPP -#define _YOLOX_CPP_YOLOX_OPENVINO_HPP +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef YOLOX_CPP__YOLOX_OPENVINO_HPP_ +#define YOLOX_CPP__YOLOX_OPENVINO_HPP_ #include #include @@ -12,20 +26,27 @@ #include "core.hpp" #include "coco_names.hpp" -namespace yolox_cpp{ - class YoloXOpenVINO: public AbcYoloX{ - public: - YoloXOpenVINO(file_name_t path_to_model, std::string device_name, - float nms_th=0.45, float conf_th=0.3, std::string model_version="0.1.1rc0", - int num_classes=80, bool p6=false); - std::vector inference(const cv::Mat& frame) override; +namespace yolox_cpp +{ +class YoloXOpenVINO : public AbcYoloX +{ +public: + YoloXOpenVINO( + file_name_t path_to_model, + std::string device_name, + float nms_th = 0.45, + float conf_th = 0.3, + std::string model_version = "0.1.1rc0", + int num_classes = 80, + bool p6 = false); + std::vector inference(const cv::Mat & frame) override; - private: - std::string device_name_; - std::vector blob_; - ov::Shape input_shape_; - ov::InferRequest infer_request_; - }; -} +private: + std::string device_name_; + std::vector blob_; + ov::Shape input_shape_; + ov::InferRequest infer_request_; +}; +} // namespace yolox_cpp -#endif \ No newline at end of file +#endif // YOLOX_CPP__YOLOX_OPENVINO_HPP_ diff --git a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_tensorrt.hpp b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_tensorrt.hpp index c61b0ae..df09b18 100644 --- a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_tensorrt.hpp +++ b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_tensorrt.hpp @@ -1,5 +1,19 @@ -#ifndef _YOLOX_CPP_YOLOX_TENSORRT_HPP -#define _YOLOX_CPP_YOLOX_TENSORRT_HPP +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef YOLOX_CPP__YOLOX_TENSORRT_HPP_ +#define YOLOX_CPP__YOLOX_TENSORRT_HPP_ #include #include @@ -9,50 +23,47 @@ #include #include -#include "cuda_runtime_api.h" -#include "NvInfer.h" +#include "cuda_runtime_api.h" // NOLINT +#include "NvInfer.h" // NOLINT #include "core.hpp" #include "coco_names.hpp" -#include "tensorrt_logging.h" - -namespace yolox_cpp{ - using namespace nvinfer1; - - #define CHECK(status) \ - do\ - {\ - auto ret = (status);\ - if (ret != 0)\ - {\ - std::cerr << "Cuda failure: " << ret << std::endl;\ - abort();\ - }\ - } while (0) - - - class YoloXTensorRT: public AbcYoloX{ - public: - YoloXTensorRT(file_name_t path_to_engine, int device=0, - float nms_th=0.45, float conf_th=0.3, std::string model_version="0.1.1rc0", - int num_classes=80, bool p6=false); - ~YoloXTensorRT(); - std::vector inference(const cv::Mat& frame) override; - - private: - void doInference(float* input, float* output); - - int DEVICE_ = 0; - Logger gLogger_; - std::unique_ptr runtime_; - std::unique_ptr engine_; - std::unique_ptr context_; - int output_size_; - const int inputIndex_ = 0; - const int outputIndex_ = 1; - void *inference_buffers_[2]; - - }; -} // namespace yolox_cpp - -#endif \ No newline at end of file +#include "tensorrt_logging.h" // NOLINT + +namespace yolox_cpp +{ +using namespace nvinfer1; // NOLINT + +#define CHECK(status) do {auto ret = (status); \ + if (ret != 0) {std::cerr << "Cuda failure: " << ret << std::endl; abort();}} while (0) // NOLINT + +class YoloXTensorRT : public AbcYoloX +{ +public: + YoloXTensorRT( + file_name_t path_to_engine, + int device = 0, + float nms_th = 0.45, + float conf_th = 0.3, + std::string model_version = "0.1.1rc0", + int num_classes = 80, + bool p6 = false); + ~YoloXTensorRT(); + std::vector inference(const cv::Mat & frame) override; + +private: + void doInference(float * input, float * output); + + int DEVICE_ = 0; + Logger gLogger_; + std::unique_ptr runtime_; + std::unique_ptr engine_; + std::unique_ptr context_; + int output_size_; + const int inputIndex_ = 0; + const int outputIndex_ = 1; + void * inference_buffers_[2]; +}; +} // namespace yolox_cpp + +#endif // YOLOX_CPP__YOLOX_TENSORRT_HPP_ diff --git a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_tflite.hpp b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_tflite.hpp index ff216be..67a60d1 100644 --- a/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_tflite.hpp +++ b/yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_tflite.hpp @@ -1,5 +1,19 @@ -#ifndef _YOLOX_CPP_YOLOX_TFLITE_HPP -#define _YOLOX_CPP_YOLOX_TFLITE_HPP +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef YOLOX_CPP__YOLOX_TFLITE_HPP_ +#define YOLOX_CPP__YOLOX_TFLITE_HPP_ #include #include @@ -9,45 +23,52 @@ #include #include -#include "tensorflow/lite/interpreter.h" -#include "tensorflow/lite/kernels/register.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/model.h" -#include "tensorflow/lite/optional_debug_tools.h" -#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h" -// #include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h" -// #include "tensorflow/lite/delegates/gpu/delegate.h" +#include "tensorflow/lite/interpreter.h" // NOLINT +#include "tensorflow/lite/kernels/register.h" // NOLINT +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" // NOLINT +#include "tensorflow/lite/model.h" // NOLINT +#include "tensorflow/lite/optional_debug_tools.h" // NOLINT +#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h" // NOLINT +// #include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h" // NOLINT +// #include "tensorflow/lite/delegates/gpu/delegate.h" // NOLINT #include "core.hpp" #include "coco_names.hpp" -namespace yolox_cpp{ - #define TFLITE_MINIMAL_CHECK(x) \ - if (!(x)) { \ - fprintf(stderr, "Error at %s:%d\n", __FILE__, __LINE__); \ - exit(1); \ - } - - class YoloXTflite: public AbcYoloX{ - public: - YoloXTflite(file_name_t path_to_model, int num_threads, - float nms_th=0.45, float conf_th=0.3, std::string model_version="0.1.1rc0", - int num_classes=80, bool p6=false, bool is_nchw=true); - ~YoloXTflite(); - std::vector inference(const cv::Mat& frame) override; - - private: - int doInference(float* input, float* output); - - int input_size_; - int output_size_; - bool is_nchw_; - std::unique_ptr model_; - std::unique_ptr resolver_; - std::unique_ptr interpreter_; - TfLiteDelegate* delegate_; - - }; -} // namespace yolox_cpp - -#endif \ No newline at end of file +namespace yolox_cpp +{ +#define TFLITE_MINIMAL_CHECK(x) \ + if (!(x)) { \ + fprintf(stderr, "Error at %s:%d\n", __FILE__, __LINE__); \ + exit(1); \ + } // NOLINT + +class YoloXTflite : public AbcYoloX +{ +public: + YoloXTflite( + file_name_t path_to_model, + int num_threads, + float nms_th = 0.45, + float conf_th = 0.3, + std::string model_version = "0.1.1rc0", + int num_classes = 80, + bool p6 = false, + bool is_nchw = true); + ~YoloXTflite(); + std::vector inference(const cv::Mat & frame) override; + +private: + int doInference(float * input, float * output); + + int input_size_; + int output_size_; + bool is_nchw_; + std::unique_ptr model_; + std::unique_ptr resolver_; + std::unique_ptr interpreter_; + TfLiteDelegate * delegate_; +}; +} // namespace yolox_cpp + +#endif // YOLOX_CPP__YOLOX_TFLITE_HPP_ diff --git a/yolox_ros_cpp/yolox_cpp/src/yolox_onnxruntime.cpp b/yolox_ros_cpp/yolox_cpp/src/yolox_onnxruntime.cpp index 3545584..26e198b 100644 --- a/yolox_ros_cpp/yolox_cpp/src/yolox_onnxruntime.cpp +++ b/yolox_ros_cpp/yolox_cpp/src/yolox_onnxruntime.cpp @@ -1,147 +1,159 @@ +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "yolox_cpp/yolox_onnxruntime.hpp" -namespace yolox_cpp{ - - YoloXONNXRuntime::YoloXONNXRuntime(file_name_t path_to_model, - int intra_op_num_threads, int inter_op_num_threads, - bool use_cuda, int device_id, bool use_parallel, - float nms_th, float conf_th, std::string model_version, - int num_classes, bool p6) - :AbcYoloX(nms_th, conf_th, model_version, num_classes, p6), - intra_op_num_threads_(intra_op_num_threads), inter_op_num_threads_(inter_op_num_threads), - use_cuda_(use_cuda), device_id_(device_id), use_parallel_(use_parallel) - { - try - { - Ort::SessionOptions session_options; - - session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL); - if(this->use_parallel_) - { - session_options.SetExecutionMode(ExecutionMode::ORT_PARALLEL); - session_options.SetInterOpNumThreads(this->inter_op_num_threads_); - } - else - { - session_options.SetExecutionMode(ExecutionMode::ORT_SEQUENTIAL); - } - session_options.SetIntraOpNumThreads(this->intra_op_num_threads_); - - if(this->use_cuda_) - { - OrtCUDAProviderOptions cuda_option; - cuda_option.device_id = this->device_id_; - session_options.AppendExecutionProvider_CUDA(cuda_option); - } - - this->session_ = Ort::Session(this->env_, - path_to_model.c_str(), - session_options); - } - catch (std::exception &e) - { - std::cerr << e.what() << std::endl; - throw e; - } - - Ort::AllocatorWithDefaultOptions ort_alloc; - - // Allocate input memory buffer - std::cout << "input:" << std::endl; - this->input_name_ = std::string(this->session_.GetInputNameAllocated(0, ort_alloc).get()); - // this->input_name_ = this->session_.GetInputName(0, ort_alloc); - std::cout << " name: " << this->input_name_ << std::endl; - auto input_info = this->session_.GetInputTypeInfo(0); - auto input_shape_info = input_info.GetTensorTypeAndShapeInfo(); - std::vector input_shape = input_shape_info.GetShape(); - ONNXTensorElementDataType input_tensor_type = input_shape_info.GetElementType(); - this->input_h_ = input_shape[2]; - this->input_w_ = input_shape[3]; - - std::cout << " shape:" << std::endl; - for (size_t i = 0; i < input_shape.size(); i++) - { - std::cout << " - " << input_shape[i] << std::endl; - } - std::cout << " tensor_type: " << input_tensor_type << std::endl; - - size_t input_byte_count = sizeof(float) * input_shape_info.GetElementCount(); - std::unique_ptr input_buffer = std::make_unique(input_byte_count); - // auto input_memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeDefault); - auto input_memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU); - - this->input_tensor_ = Ort::Value::CreateTensor(input_memory_info, - input_buffer.get(), input_byte_count, - input_shape.data(), input_shape.size(), - input_tensor_type); - this->input_buffer_.emplace_back(std::move(input_buffer)); - - // Allocate output memory buffer - std::cout << "outputs" << std::endl; - this->output_name_ = std::string(this->session_.GetOutputNameAllocated(0, ort_alloc).get()); - // this->output_name_ = this->session_.GetOutputName(0, ort_alloc); - std::cout << " name: " << this->output_name_ << std::endl; - - auto output_info = this->session_.GetOutputTypeInfo(0); - auto output_shape_info = output_info.GetTensorTypeAndShapeInfo(); - auto output_shape = output_shape_info.GetShape(); - auto output_tensor_type = output_shape_info.GetElementType(); - - std::cout << " shape:" << std::endl; - for (size_t i = 0; i < output_shape.size(); i++) - { - std::cout << " - " << output_shape[i] << std::endl; - } - std::cout << " tensor_type: " << output_tensor_type << std::endl; - - size_t output_byte_count = sizeof(float) * output_shape_info.GetElementCount(); - std::unique_ptr output_buffer = std::make_unique(output_byte_count); - // auto output_memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeDefault); - auto output_memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU); - - this->output_tensor_ = Ort::Value::CreateTensor(output_memory_info, - output_buffer.get(), output_byte_count, - output_shape.data(), output_shape.size(), - output_tensor_type); - this->output_buffer_.emplace_back(std::move(output_buffer)); - - // Prepare GridAndStrides - if(this->p6_) - { - generate_grids_and_stride(this->input_w_, this->input_h_, this->strides_p6_, this->grid_strides_); - } - else - { - generate_grids_and_stride(this->input_w_, this->input_h_, this->strides_, this->grid_strides_); - } +namespace yolox_cpp +{ +YoloXONNXRuntime::YoloXONNXRuntime( + file_name_t path_to_model, + int intra_op_num_threads, int inter_op_num_threads, + bool use_cuda, int device_id, bool use_parallel, + float nms_th, float conf_th, std::string model_version, + int num_classes, bool p6) +: AbcYoloX(nms_th, conf_th, model_version, num_classes, p6), + intra_op_num_threads_(intra_op_num_threads), inter_op_num_threads_(inter_op_num_threads), + use_cuda_(use_cuda), device_id_(device_id), use_parallel_(use_parallel) +{ + try { + Ort::SessionOptions session_options; + + session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL); + if (this->use_parallel_) { + session_options.SetExecutionMode(ExecutionMode::ORT_PARALLEL); + session_options.SetInterOpNumThreads(this->inter_op_num_threads_); + } else { + session_options.SetExecutionMode(ExecutionMode::ORT_SEQUENTIAL); } + session_options.SetIntraOpNumThreads(this->intra_op_num_threads_); - std::vector YoloXONNXRuntime::inference(const cv::Mat& frame) - { - // preprocess - cv::Mat pr_img = static_resize(frame); - - float *blob_data = (float *)(this->input_buffer_[0].get()); - blobFromImage(pr_img, blob_data); - - const char* input_names_[] = {this->input_name_.c_str()}; - const char* output_names_[] = {this->output_name_.c_str()}; - - // Inference - Ort::RunOptions run_options; - this->session_.Run(run_options, - input_names_, - &this->input_tensor_, 1, - output_names_, - &this->output_tensor_, 1); - - float* net_pred = (float *)this->output_buffer_[0].get(); - - // post process - float scale = std::min(input_w_ / (frame.cols*1.0), input_h_ / (frame.rows*1.0)); - std::vector objects; - decode_outputs(net_pred, this->grid_strides_, objects, this->bbox_conf_thresh_, scale, frame.cols, frame.rows); - return objects; + if (this->use_cuda_) { + OrtCUDAProviderOptions cuda_option; + cuda_option.device_id = this->device_id_; + session_options.AppendExecutionProvider_CUDA(cuda_option); } + this->session_ = Ort::Session( + this->env_, + path_to_model.c_str(), + session_options); + } catch (std::exception & e) { + std::cerr << e.what() << std::endl; + throw e; + } + + Ort::AllocatorWithDefaultOptions ort_alloc; + + // Allocate input memory buffer + std::cout << "input:" << std::endl; + this->input_name_ = std::string(this->session_.GetInputNameAllocated(0, ort_alloc).get()); + // this->input_name_ = this->session_.GetInputName(0, ort_alloc); + std::cout << " name: " << this->input_name_ << std::endl; + auto input_info = this->session_.GetInputTypeInfo(0); + auto input_shape_info = input_info.GetTensorTypeAndShapeInfo(); + std::vector input_shape = input_shape_info.GetShape(); + ONNXTensorElementDataType input_tensor_type = input_shape_info.GetElementType(); + this->input_h_ = input_shape[2]; + this->input_w_ = input_shape[3]; + + std::cout << " shape:" << std::endl; + for (size_t i = 0; i < input_shape.size(); i++) { + std::cout << " - " << input_shape[i] << std::endl; + } + std::cout << " tensor_type: " << input_tensor_type << std::endl; + + size_t input_byte_count = sizeofstatic_cast * input_shape_info.GetElementCount(); + std::unique_ptr input_buffer = std::make_unique(input_byte_count); + auto input_memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU); + + this->input_tensor_ = Ort::Value::CreateTensor( + input_memory_info, + input_buffer.get(), input_byte_count, + input_shape.data(), input_shape.size(), + input_tensor_type); + this->input_buffer_.emplace_back(std::move(input_buffer)); + + // Allocate output memory buffer + std::cout << "outputs" << std::endl; + this->output_name_ = std::string(this->session_.GetOutputNameAllocated(0, ort_alloc).get()); + // this->output_name_ = this->session_.GetOutputName(0, ort_alloc); + std::cout << " name: " << this->output_name_ << std::endl; + + auto output_info = this->session_.GetOutputTypeInfo(0); + auto output_shape_info = output_info.GetTensorTypeAndShapeInfo(); + auto output_shape = output_shape_info.GetShape(); + auto output_tensor_type = output_shape_info.GetElementType(); + + std::cout << " shape:" << std::endl; + for (size_t i = 0; i < output_shape.size(); i++) { + std::cout << " - " << output_shape[i] << std::endl; + } + std::cout << " tensor_type: " << output_tensor_type << std::endl; + + size_t output_byte_count = sizeofstatic_cast * output_shape_info.GetElementCount(); + std::unique_ptr output_buffer = std::make_unique(output_byte_count); + auto output_memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU); + + this->output_tensor_ = Ort::Value::CreateTensor( + output_memory_info, + output_buffer.get(), output_byte_count, + output_shape.data(), output_shape.size(), + output_tensor_type); + this->output_buffer_.emplace_back(std::move(output_buffer)); + + // Prepare GridAndStrides + if (this->p6_) { + generate_grids_and_stride( + this->input_w_, this->input_h_, + this->strides_p6_, this->grid_strides_); + } else { + generate_grids_and_stride( + this->input_w_, this->input_h_, + this->strides_, this->grid_strides_); + } +} + +std::vector YoloXONNXRuntime::inference(const cv::Mat & frame) +{ + // preprocess + cv::Mat pr_img = static_resize(frame); + + float * blob_data = reinterpret_cast(this->input_buffer_[0].get()); + blobFromImage(pr_img, blob_data); + + const char * input_names_[] = {this->input_name_.c_str()}; + const char * output_names_[] = {this->output_name_.c_str()}; + + // Inference + Ort::RunOptions run_options; + this->session_.Run( + run_options, + input_names_, + &this->input_tensor_, 1, + output_names_, + &this->output_tensor_, 1); + + float * net_pred = reinterpret_cast this->output_buffer_[0].get(); + + // post process + float scale = std::min(input_w_ / (frame.cols * 1.0), input_h_ / (frame.rows * 1.0)); + std::vector objects; + decode_outputs( + net_pred, this->grid_strides_, + objects, this->bbox_conf_thresh_, + scale, frame.cols, frame.rows); + return objects; } + +} // namespace yolox_cpp diff --git a/yolox_ros_cpp/yolox_cpp/src/yolox_openvino.cpp b/yolox_ros_cpp/yolox_cpp/src/yolox_openvino.cpp index d0a63aa..ee0f1c7 100644 --- a/yolox_ros_cpp/yolox_cpp/src/yolox_openvino.cpp +++ b/yolox_ros_cpp/yolox_cpp/src/yolox_openvino.cpp @@ -1,82 +1,104 @@ +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "yolox_cpp/yolox_openvino.hpp" -namespace yolox_cpp{ - YoloXOpenVINO::YoloXOpenVINO(file_name_t path_to_model, std::string device_name, - float nms_th, float conf_th, std::string model_version, - int num_classes, bool p6) - :AbcYoloX(nms_th, conf_th, model_version, num_classes, p6), - device_name_(device_name) - { - // Step 1. Initialize inference engine core - std::cout << "Initialize Inference engine core" << std::endl; - ov::Core ie; +namespace yolox_cpp +{ +YoloXOpenVINO::YoloXOpenVINO( + file_name_t path_to_model, std::string device_name, + float nms_th, float conf_th, std::string model_version, + int num_classes, bool p6) +: AbcYoloX(nms_th, conf_th, model_version, num_classes, p6), + device_name_(device_name) +{ + // Step 1. Initialize inference engine core + std::cout << "Initialize Inference engine core" << std::endl; + ov::Core ie; - // Step 2. Read a model in OpenVINO Intermediate Representation (.xml and - // .bin files) or ONNX (.onnx file) format - std::cout << "Read a model in OpenVINO Intermediate Representation: " << path_to_model << std::endl; - const auto network = ie.read_model(path_to_model); + // Step 2. Read a model in OpenVINO Intermediate Representation (.xml and + // .bin files) or ONNX (.onnx file) format + std::cout << "Read an OpenVINO model: " << path_to_model << std::endl; + const auto network = ie.read_model(path_to_model); - // Step 3. Loading a model to the device - std::vector available_devices = ie.get_available_devices(); - std::cout << "======= AVAILABLE DEVICES FOR OPENVINO =======" << std::endl; - for (auto device : available_devices) { - std::cout << "- " << device << std::endl; - } - std::cout << "==============================================" << std::endl; - std::cout << "Loading a model to the device: " << device_name_ << std::endl; - auto compiled_model = ie.compile_model(network, device_name); + // Step 3. Loading a model to the device + std::vector available_devices = ie.get_available_devices(); + std::cout << "======= AVAILABLE DEVICES FOR OPENVINO =======" << std::endl; + for (auto device : available_devices) { + std::cout << "- " << device << std::endl; + } + std::cout << "==============================================" << std::endl; + std::cout << "Loading a model to the device: " << device_name_ << std::endl; + auto compiled_model = ie.compile_model(network, device_name); - // Step 4. Create an infer request - std::cout << "Create an infer request" << std::endl; - this->infer_request_ = compiled_model.create_infer_request(); + // Step 4. Create an infer request + std::cout << "Create an infer request" << std::endl; + this->infer_request_ = compiled_model.create_infer_request(); - // Step 5. Configure input & output - std::cout << "Configuring input and output blobs" << std::endl; - this->input_shape_ = compiled_model.input(0).get_shape(); - /* Mark input as resizable by setting of a resize algorithm. - * In this case we will be able to set an input blob of any shape to an - * infer request. Resize and layout conversions are executed automatically - * during inference */ - this->blob_.resize( - this->input_shape_.at(0) * this->input_shape_.at(1) * - this->input_shape_.at(2) * this->input_shape_.at(3)); - this->input_h_ = this->input_shape_.at(2); - this->input_w_ = this->input_shape_.at(3); - std::cout << "INPUT_HEIGHT: " << this->input_h_ << std::endl; - std::cout << "INPUT_WIDTH: " << this->input_w_ << std::endl; + // Step 5. Configure input & output + std::cout << "Configuring input and output blobs" << std::endl; + this->input_shape_ = compiled_model.input(0).get_shape(); + /* Mark input as resizable by setting of a resize algorithm. + * In this case we will be able to set an input blob of any shape to an + * infer request. Resize and layout conversions are executed automatically + * during inference */ + this->blob_.resize( + this->input_shape_.at(0) * this->input_shape_.at(1) * + this->input_shape_.at(2) * this->input_shape_.at(3)); + this->input_h_ = this->input_shape_.at(2); + this->input_w_ = this->input_shape_.at(3); + std::cout << "INPUT_HEIGHT: " << this->input_h_ << std::endl; + std::cout << "INPUT_WIDTH: " << this->input_w_ << std::endl; - // Prepare GridAndStrides - if(this->p6_) - { - generate_grids_and_stride(this->input_w_, this->input_h_, this->strides_p6_, this->grid_strides_); - } - else - { - generate_grids_and_stride(this->input_w_, this->input_h_, this->strides_, this->grid_strides_); - } - } + // Prepare GridAndStrides + if (this->p6_) { + generate_grids_and_stride( + this->input_w_, this->input_h_, + this->strides_p6_, this->grid_strides_); + } else { + generate_grids_and_stride( + this->input_w_, this->input_h_, + this->strides_, this->grid_strides_); + } +} - std::vector YoloXOpenVINO::inference(const cv::Mat& frame) - { - // preprocess - cv::Mat pr_img = static_resize(frame); - // locked memory holder should be alive all time while access to its buffer happens - blobFromImage(pr_img, this->blob_.data()); +std::vector YoloXOpenVINO::inference(const cv::Mat & frame) +{ + // preprocess + cv::Mat pr_img = static_resize(frame); + // locked memory holder should be alive all time while access to its buffer happens + blobFromImage(pr_img, this->blob_.data()); - // do inference - /* Running the request synchronously */ - this->infer_request_.set_input_tensor( - ov::Tensor{ov::element::f32, this->input_shape_, reinterpret_cast(this->blob_.data())}); - infer_request_.infer(); + // do inference + /* Running the request synchronously */ + this->infer_request_.set_input_tensor( + ov::Tensor{ + ov::element::f32, this->input_shape_, + reinterpret_cast(this->blob_.data())}); + infer_request_.infer(); - const auto &output_tensor = this->infer_request_.get_output_tensor(); - const float* net_pred = reinterpret_cast(output_tensor.data()); + const auto & output_tensor = this->infer_request_.get_output_tensor(); + const float * net_pred = reinterpret_cast(output_tensor.data()); - float scale = std::min(input_w_ / (frame.cols*1.0), input_h_ / (frame.rows*1.0)); - std::vector objects; - decode_outputs(net_pred, this->grid_strides_, objects, this->bbox_conf_thresh_, scale, frame.cols, frame.rows); - return objects; - } + float scale = std::min(input_w_ / (frame.cols * 1.0), input_h_ / (frame.rows * 1.0)); + std::vector objects; + decode_outputs( + net_pred, this->grid_strides_, + objects, this->bbox_conf_thresh_, + scale, frame.cols, frame.rows); + return objects; +} -} \ No newline at end of file +} // namespace yolox_cpp diff --git a/yolox_ros_cpp/yolox_cpp/src/yolox_tensorrt.cpp b/yolox_ros_cpp/yolox_cpp/src/yolox_tensorrt.cpp index 4dd7151..128f24f 100644 --- a/yolox_ros_cpp/yolox_cpp/src/yolox_tensorrt.cpp +++ b/yolox_ros_cpp/yolox_cpp/src/yolox_tensorrt.cpp @@ -1,134 +1,172 @@ +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "yolox_cpp/yolox_tensorrt.hpp" namespace yolox_cpp { - YoloXTensorRT::YoloXTensorRT(file_name_t path_to_engine, int device, - float nms_th, float conf_th, std::string model_version, - int num_classes, bool p6) - : AbcYoloX(nms_th, conf_th, model_version, num_classes, p6), - DEVICE_(device) - { - cudaSetDevice(this->DEVICE_); - // create a model using the API directly and serialize it to a stream - char *trtModelStream{nullptr}; - size_t size{0}; - - std::ifstream file(path_to_engine, std::ios::binary); - if (file.good()) - { - file.seekg(0, file.end); - size = file.tellg(); - file.seekg(0, file.beg); - trtModelStream = new char[size]; - assert(trtModelStream); - file.read(trtModelStream, size); - file.close(); - } - else - { - std::cerr << "invalid arguments path_to_engine: " << path_to_engine << std::endl; - return; - } - - this->runtime_ = std::unique_ptr(createInferRuntime(this->gLogger_)); - assert(this->runtime_ != nullptr); - this->engine_ = std::unique_ptr(this->runtime_->deserializeCudaEngine(trtModelStream, size)); - assert(this->engine_ != nullptr); - this->context_ = std::unique_ptr(this->engine_->createExecutionContext()); - assert(this->context_ != nullptr); - delete[] trtModelStream; - - const auto input_name = this->engine_->getIOTensorName(this->inputIndex_); - const auto input_dims = this->engine_->getTensorShape(input_name); - this->input_h_ = input_dims.d[2]; - this->input_w_ = input_dims.d[3]; - std::cout << "INPUT_HEIGHT: " << this->input_h_ << std::endl; - std::cout << "INPUT_WIDTH: " << this->input_w_ << std::endl; - - const auto output_name = this->engine_->getIOTensorName(this->outputIndex_); - auto output_dims = this->engine_->getTensorShape(output_name); - this->output_size_ = 1; - for (int j = 0; j < output_dims.nbDims; ++j) - { - this->output_size_ *= output_dims.d[j]; - } - - // Pointers to input and output device buffers to pass to engine. - // Engine requires exactly IEngine::getNbBindings() number of buffers. - assert(this->engine_->getNbIOTensors() == 2); - // In order to bind the buffers, we need to know the names of the input and output tensors. - // Note that indices are guaranteed to be less than IEngine::getNbBindings() - assert(this->engine_->getTensorDataType(input_name) == nvinfer1::DataType::kFLOAT); - assert(this->engine_->getTensorDataType(output_name) == nvinfer1::DataType::kFLOAT); - - // Create GPU buffers on device - CHECK(cudaMalloc(&this->inference_buffers_[this->inputIndex_], 3 * this->input_h_ * this->input_w_ * sizeof(float))); - CHECK(cudaMalloc(&this->inference_buffers_[this->outputIndex_], this->output_size_ * sizeof(float))); - - assert(this->context_->setInputShape(input_name, input_dims)); - assert(this->context_->allInputDimensionsSpecified()); - - assert(this->context_->setTensorAddress(input_name, this->inference_buffers_[this->inputIndex_])); - assert(this->context_->setTensorAddress(output_name, this->inference_buffers_[this->outputIndex_])); - - // Prepare GridAndStrides - if (this->p6_) - { - generate_grids_and_stride(this->input_w_, this->input_h_, this->strides_p6_, this->grid_strides_); - } - else - { - generate_grids_and_stride(this->input_w_, this->input_h_, this->strides_, this->grid_strides_); - } - } - - YoloXTensorRT::~YoloXTensorRT() - { - CHECK(cudaFree(inference_buffers_[this->inputIndex_])); - CHECK(cudaFree(inference_buffers_[this->outputIndex_])); - } - - std::vector YoloXTensorRT::inference(const cv::Mat &frame) - { - // preprocess - auto pr_img = static_resize(frame); - float *input_blob = new float[pr_img.total() * 3]; - blobFromImage(pr_img, input_blob); - - // inference - float *output_blob = new float[this->output_size_]; - this->doInference(input_blob, output_blob); - - float scale = std::min(this->input_w_ / (frame.cols * 1.0), this->input_h_ / (frame.rows * 1.0)); - - std::vector objects; - decode_outputs(output_blob, this->grid_strides_, objects, this->bbox_conf_thresh_, scale, frame.cols, frame.rows); - - delete input_blob; - delete output_blob; - return objects; - } - - void YoloXTensorRT::doInference(float *input, float *output) - { - // Create stream - cudaStream_t stream; - CHECK(cudaStreamCreate(&stream)); - - // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host - CHECK(cudaMemcpyAsync(this->inference_buffers_[this->inputIndex_], input, 3 * this->input_h_ * this->input_w_ * sizeof(float), cudaMemcpyHostToDevice, stream)); - - bool success = context_->enqueueV3(stream); - if (!success) - throw std::runtime_error("failed inference"); - - CHECK(cudaMemcpyAsync(output, this->inference_buffers_[this->outputIndex_], this->output_size_ * sizeof(float), cudaMemcpyDeviceToHost, stream)); - - CHECK(cudaStreamSynchronize(stream)); - - // Release stream - CHECK(cudaStreamDestroy(stream)); - } - -} // namespace yolox_cpp +YoloXTensorRT::YoloXTensorRT( + file_name_t path_to_engine, int device, + float nms_th, float conf_th, std::string model_version, + int num_classes, bool p6) +: AbcYoloX(nms_th, conf_th, model_version, num_classes, p6), + DEVICE_(device) +{ + cudaSetDevice(this->DEVICE_); + // create a model using the API directly and serialize it to a stream + char * trtModelStream{nullptr}; + size_t size{0}; + + std::ifstream file(path_to_engine, std::ios::binary); + if (file.good()) { + file.seekg(0, file.end); + size = file.tellg(); + file.seekg(0, file.beg); + trtModelStream = new char[size]; + assert(trtModelStream); + file.read(trtModelStream, size); + file.close(); + } else { + std::cerr << "invalid arguments path_to_engine: " << path_to_engine << std::endl; + return; + } + + this->runtime_ = std::unique_ptr(createInferRuntime(this->gLogger_)); + assert(this->runtime_ != nullptr); + this->engine_ = std::unique_ptr( + this->runtime_->deserializeCudaEngine(trtModelStream, size)); + assert(this->engine_ != nullptr); + this->context_ = + std::unique_ptr(this->engine_->createExecutionContext()); + assert(this->context_ != nullptr); + delete[] trtModelStream; + + const auto input_name = this->engine_->getIOTensorName(this->inputIndex_); + const auto input_dims = this->engine_->getTensorShape(input_name); + this->input_h_ = input_dims.d[2]; + this->input_w_ = input_dims.d[3]; + std::cout << "INPUT_HEIGHT: " << this->input_h_ << std::endl; + std::cout << "INPUT_WIDTH: " << this->input_w_ << std::endl; + + const auto output_name = this->engine_->getIOTensorName(this->outputIndex_); + auto output_dims = this->engine_->getTensorShape(output_name); + this->output_size_ = 1; + for (int j = 0; j < output_dims.nbDims; ++j) { + this->output_size_ *= output_dims.d[j]; + } + + // Pointers to input and output device buffers to pass to engine. + // Engine requires exactly IEngine::getNbBindings() number of buffers. + assert(this->engine_->getNbIOTensors() == 2); + // In order to bind the buffers, we need to know the names of the input & output tensors. + // Note that indices are guaranteed to be less than IEngine::getNbBindings() + assert(this->engine_->getTensorDataType(input_name) == nvinfer1::DataType::kFLOAT); + assert(this->engine_->getTensorDataType(output_name) == nvinfer1::DataType::kFLOAT); + + // Create GPU buffers on device + CHECK( + cudaMalloc( + &this->inference_buffers_[this->inputIndex_], + 3 * this->input_h_ * this->input_w_ * sizeofstatic_cast)); + CHECK( + cudaMalloc( + &this->inference_buffers_[this->outputIndex_], + this->output_size_ * sizeofstatic_cast)); + + assert(this->context_->setInputShape(input_name, input_dims)); + assert(this->context_->allInputDimensionsSpecified()); + + assert( + this->context_->setTensorAddress( + input_name, this->inference_buffers_[this->inputIndex_])); + assert( + this->context_->setTensorAddress( + output_name, this->inference_buffers_[this->outputIndex_])); + + // Prepare GridAndStrides + if (this->p6_) { + generate_grids_and_stride( + this->input_w_, this->input_h_, this->strides_p6_, this->grid_strides_); + } else { + generate_grids_and_stride( + this->input_w_, this->input_h_, this->strides_, this->grid_strides_); + } +} + +YoloXTensorRT::~YoloXTensorRT() +{ + CHECK(cudaFree(inference_buffers_[this->inputIndex_])); + CHECK(cudaFree(inference_buffers_[this->outputIndex_])); +} + +std::vector YoloXTensorRT::inference(const cv::Mat & frame) +{ + // preprocess + auto pr_img = static_resize(frame); + float * input_blob = new float[pr_img.total() * 3]; + blobFromImage(pr_img, input_blob); + + // inference + float * output_blob = new float[this->output_size_]; + this->doInference(input_blob, output_blob); + + float scale = std::min( + this->input_w_ / (frame.cols * 1.0), + this->input_h_ / (frame.rows * 1.0)); + + std::vector objects; + decode_outputs( + output_blob, this->grid_strides_, + objects, this->bbox_conf_thresh_, + scale, frame.cols, frame.rows); + + delete input_blob; + delete output_blob; + return objects; +} + +void YoloXTensorRT::doInference(float * input, float * output) +{ + // Create stream + cudaStream_t stream; + CHECK(cudaStreamCreate(&stream)); + + // DMA input batch data to device, infer on the batch asynchronously, + // and DMA output back to host + CHECK( + cudaMemcpyAsync( + this->inference_buffers_[this->inputIndex_], + input, + 3 * this->input_h_ * this->input_w_ * sizeofstatic_cast, + cudaMemcpyHostToDevice, stream)); + + bool success = context_->enqueueV3(stream); + if (!success) { + throw std::runtime_error("failed inference"); + } + + CHECK( + cudaMemcpyAsync( + output, + this->inference_buffers_[this->outputIndex_], + this->output_size_ * sizeofstatic_cast, cudaMemcpyDeviceToHost, stream)); + + CHECK(cudaStreamSynchronize(stream)); + + // Release stream + CHECK(cudaStreamDestroy(stream)); +} + +} // namespace yolox_cpp diff --git a/yolox_ros_cpp/yolox_cpp/src/yolox_tflite.cpp b/yolox_ros_cpp/yolox_cpp/src/yolox_tflite.cpp index 83a8e99..ee413bf 100644 --- a/yolox_ros_cpp/yolox_cpp/src/yolox_tflite.cpp +++ b/yolox_ros_cpp/yolox_cpp/src/yolox_tflite.cpp @@ -1,177 +1,182 @@ +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "yolox_cpp/yolox_tflite.hpp" namespace yolox_cpp { - YoloXTflite::YoloXTflite(file_name_t path_to_model, int num_threads, - float nms_th, float conf_th, std::string model_version, - int num_classes, bool p6, bool is_nchw) - : AbcYoloX(nms_th, conf_th, model_version, num_classes, p6), is_nchw_(is_nchw) - { - TfLiteStatus status; - this->model_ = tflite::FlatBufferModel::BuildFromFile(path_to_model.c_str()); - TFLITE_MINIMAL_CHECK(model_); - - this->resolver_ = std::make_unique(); - this->interpreter_ = std::make_unique(); - - tflite::InterpreterBuilder builder(*model_, *this->resolver_); - builder(&this->interpreter_); - TFLITE_MINIMAL_CHECK(this->interpreter_ != nullptr); - - TFLITE_MINIMAL_CHECK(this->interpreter_->AllocateTensors() == kTfLiteOk); - // tflite::PrintInterpreterState(this->interpreter_.get()); - - status = this->interpreter_->SetNumThreads(num_threads); - if (status != TfLiteStatus::kTfLiteOk) - { - std::cerr << "Failed to SetNumThreads." << std::endl; - exit(1); - } - - // XNNPACK Delegate - auto xnnpack_options = TfLiteXNNPackDelegateOptionsDefault(); - xnnpack_options.num_threads = num_threads; - this->delegate_ = TfLiteXNNPackDelegateCreate(&xnnpack_options); - status = this->interpreter_->ModifyGraphWithDelegate(this->delegate_); - if (status != TfLiteStatus::kTfLiteOk) - { - std::cerr << "Failed to ModifyGraphWithDelegate." << std::endl; - exit(1); - } - - // // GPU Delegate - // auto gpu_options = TfLiteGpuDelegateOptionsV2Default(); - // gpu_options.inference_preference = TFLITE_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED; - // gpu_options.inference_priority1 = TFLITE_GPU_INFERENCE_PRIORITY_MIN_LATENCY; - // this->delegate_ = TfLiteGpuDelegateV2Create(&gpu_options); - // status = this->interpreter_->ModifyGraphWithDelegate(this->delegate_); - // if (status != TfLiteStatus::kTfLiteOk) - // { - // std::cerr << "Failed to ModifyGraphWithDelegate." << std::endl; - // exit(1); - // } - - // // NNAPI Delegate - // tflite::StatefulNnApiDelegate::Options nnapi_options; - // nnapi_options.execution_preference = tflite::StatefulNnApiDelegate::Options::kSustainedSpeed; - // nnapi_options.allow_fp16 = true; - // nnapi_options.disallow_nnapi_cpu = true; - // this->delegate_ = new tflite::StatefulNnApiDelegate(nnapi_options); - // status = this->interpreter_->ModifyGraphWithDelegate(this->delegate_); - // if (status != TfLiteStatus::kTfLiteOk) - // { - // std::cerr << "Failed to ModifyGraphWithDelegate." << std::endl; - // exit(1); - // } - - if (this->interpreter_->AllocateTensors() != TfLiteStatus::kTfLiteOk) - { - std::cerr << "Failed to allocate tensors." << std::endl; - exit(1); - } - - { - TfLiteTensor *tensor = this->interpreter_->input_tensor(0); - std::cout << "input:" << std::endl; - std::cout << " name: " << tensor->name << std::endl; - if (this->is_nchw_ == true) - { - // NCHW - this->input_h_ = tensor->dims->data[2]; - this->input_w_ = tensor->dims->data[3]; - } - else - { - // NHWC - this->input_h_ = tensor->dims->data[1]; - this->input_w_ = tensor->dims->data[2]; - } - - std::cout << " shape:" << std::endl; - if (tensor->type == kTfLiteUInt8) - { - this->input_size_ = sizeof(uint8_t); - } - else - { - this->input_size_ = sizeof(float); - } - for (size_t i = 0; i < tensor->dims->size; i++) - { - this->input_size_ *= tensor->dims->data[i]; - std::cout << " - " << tensor->dims->data[i] << std::endl; - } - std::cout << " input_h: " << this->input_h_ << std::endl; - std::cout << " input_w: " << this->input_w_ << std::endl; - std::cout << " tensor_type: " << tensor->type << std::endl; - } - - { - TfLiteTensor *tensor = this->interpreter_->output_tensor(0); - std::cout << "output:" << std::endl; - std::cout << " name: " << tensor->name << std::endl; - std::cout << " shape:" << std::endl; - if (tensor->type == kTfLiteUInt8) - { - this->output_size_ = sizeof(uint8_t); - } - else - { - this->output_size_ = sizeof(float); - } - for (size_t i = 0; i < tensor->dims->size; i++) - { - this->output_size_ *= tensor->dims->data[i]; - std::cout << " - " << tensor->dims->data[i] << std::endl; - } - std::cout << " tensor_type: " << tensor->type << std::endl; - } - - // Prepare GridAndStrides - if(this->p6_) - { - generate_grids_and_stride(this->input_w_, this->input_h_, this->strides_p6_, this->grid_strides_); - } - else - { - generate_grids_and_stride(this->input_w_, this->input_h_, this->strides_, this->grid_strides_); - } +YoloXTflite::YoloXTflite( + file_name_t path_to_model, int num_threads, + float nms_th, float conf_th, std::string model_version, + int num_classes, bool p6, bool is_nchw) +: AbcYoloX(nms_th, conf_th, model_version, num_classes, p6), is_nchw_(is_nchw) +{ + TfLiteStatus status; + this->model_ = tflite::FlatBufferModel::BuildFromFile(path_to_model.c_str()); + TFLITE_MINIMAL_CHECK(model_); + + this->resolver_ = std::make_unique(); + this->interpreter_ = std::make_unique(); + + tflite::InterpreterBuilder builder(*model_, *this->resolver_); + builder(&this->interpreter_); + TFLITE_MINIMAL_CHECK(this->interpreter_ != nullptr); + + TFLITE_MINIMAL_CHECK(this->interpreter_->AllocateTensors() == kTfLiteOk); + // tflite::PrintInterpreterState(this->interpreter_.get()); + + status = this->interpreter_->SetNumThreads(num_threads); + if (status != TfLiteStatus::kTfLiteOk) { + std::cerr << "Failed to SetNumThreads." << std::endl; + exit(1); + } + + // XNNPACK Delegate + auto xnnpack_options = TfLiteXNNPackDelegateOptionsDefault(); + xnnpack_options.num_threads = num_threads; + this->delegate_ = TfLiteXNNPackDelegateCreate(&xnnpack_options); + status = this->interpreter_->ModifyGraphWithDelegate(this->delegate_); + if (status != TfLiteStatus::kTfLiteOk) { + std::cerr << "Failed to ModifyGraphWithDelegate." << std::endl; + exit(1); + } + + // // GPU Delegate + // auto gpu_options = TfLiteGpuDelegateOptionsV2Default(); + // gpu_options.inference_preference = + // TFLITE_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED; + // gpu_options.inference_priority1 = TFLITE_GPU_INFERENCE_PRIORITY_MIN_LATENCY; + // this->delegate_ = TfLiteGpuDelegateV2Create(&gpu_options); + // status = this->interpreter_->ModifyGraphWithDelegate(this->delegate_); + // if (status != TfLiteStatus::kTfLiteOk) + // { + // std::cerr << "Failed to ModifyGraphWithDelegate." << std::endl; + // exit(1); + // } + + // // NNAPI Delegate + // tflite::StatefulNnApiDelegate::Options nnapi_options; + // nnapi_options.execution_preference = + // tflite::StatefulNnApiDelegate::Options::kSustainedSpeed; + // nnapi_options.allow_fp16 = true; + // nnapi_options.disallow_nnapi_cpu = true; + // this->delegate_ = new tflite::StatefulNnApiDelegate(nnapi_options); + // status = this->interpreter_->ModifyGraphWithDelegate(this->delegate_); + // if (status != TfLiteStatus::kTfLiteOk) + // { + // std::cerr << "Failed to ModifyGraphWithDelegate." << std::endl; + // exit(1); + // } + + if (this->interpreter_->AllocateTensors() != TfLiteStatus::kTfLiteOk) { + std::cerr << "Failed to allocate tensors." << std::endl; + exit(1); + } + + { + TfLiteTensor * tensor = this->interpreter_->input_tensor(0); + std::cout << "input:" << std::endl; + std::cout << " name: " << tensor->name << std::endl; + if (this->is_nchw_ == true) { + // NCHW + this->input_h_ = tensor->dims->data[2]; + this->input_w_ = tensor->dims->data[3]; + } else { + // NHWC + this->input_h_ = tensor->dims->data[1]; + this->input_w_ = tensor->dims->data[2]; } - YoloXTflite::~YoloXTflite() - { - TfLiteXNNPackDelegateDelete(this->delegate_); + + std::cout << " shape:" << std::endl; + if (tensor->type == kTfLiteUInt8) { + this->input_size_ = sizeof(uint8_t); + } else { + this->input_size_ = sizeofstatic_cast; } - std::vector YoloXTflite::inference(const cv::Mat &frame) - { - // preprocess - cv::Mat pr_img = static_resize(frame); - - float *input_blob = this->interpreter_->typed_input_tensor(0); - if (this->is_nchw_ == true) - { - blobFromImage(pr_img, input_blob); - } - else - { - blobFromImage_nhwc(pr_img, input_blob); - } - - // inference - TfLiteStatus ret = this->interpreter_->Invoke(); - if (ret != TfLiteStatus::kTfLiteOk) - { - std::cerr << "Failed to invoke." << std::endl; - return std::vector(); - } - - // postprocess - std::vector objects; - float scale = std::min(this->input_w_ / (frame.cols * 1.0), this->input_h_ / (frame.rows * 1.0)); - float *output_blob = this->interpreter_->typed_output_tensor(0); - decode_outputs(output_blob, this->grid_strides_, objects, this->bbox_conf_thresh_, scale, frame.cols, frame.rows); - - return objects; + for (size_t i = 0; i < tensor->dims->size; i++) { + this->input_size_ *= tensor->dims->data[i]; + std::cout << " - " << tensor->dims->data[i] << std::endl; } - -} // namespace yolox_cpp + std::cout << " input_h: " << this->input_h_ << std::endl; + std::cout << " input_w: " << this->input_w_ << std::endl; + std::cout << " tensor_type: " << tensor->type << std::endl; + } + + { + TfLiteTensor * tensor = this->interpreter_->output_tensor(0); + std::cout << "output:" << std::endl; + std::cout << " name: " << tensor->name << std::endl; + std::cout << " shape:" << std::endl; + if (tensor->type == kTfLiteUInt8) { + this->output_size_ = sizeof(uint8_t); + } else { + this->output_size_ = sizeofstatic_cast; + } + for (size_t i = 0; i < tensor->dims->size; i++) { + this->output_size_ *= tensor->dims->data[i]; + std::cout << " - " << tensor->dims->data[i] << std::endl; + } + std::cout << " tensor_type: " << tensor->type << std::endl; + } + + // Prepare GridAndStrides + if (this->p6_) { + generate_grids_and_stride( + this->input_w_, this->input_h_, + this->strides_p6_, this->grid_strides_); + } else { + generate_grids_and_stride( + this->input_w_, this->input_h_, + this->strides_, this->grid_strides_); + } +} +YoloXTflite::~YoloXTflite() +{ + TfLiteXNNPackDelegateDelete(this->delegate_); +} +std::vector YoloXTflite::inference(const cv::Mat & frame) +{ + // preprocess + cv::Mat pr_img = static_resize(frame); + + float * input_blob = this->interpreter_->typed_input_tensor(0); + if (this->is_nchw_ == true) { + blobFromImage(pr_img, input_blob); + } else { + blobFromImage_nhwc(pr_img, input_blob); + } + + // inference + TfLiteStatus ret = this->interpreter_->Invoke(); + if (ret != TfLiteStatus::kTfLiteOk) { + std::cerr << "Failed to invoke." << std::endl; + return std::vector(); + } + + // postprocess + std::vector objects; + float scale = std::min( + this->input_w_ / (frame.cols * 1.0), + this->input_h_ / (frame.rows * 1.0)); + float * output_blob = this->interpreter_->typed_output_tensor(0); + decode_outputs( + output_blob, this->grid_strides_, + objects, this->bbox_conf_thresh_, + scale, frame.cols, frame.rows); + + return objects; +} + +} // namespace yolox_cpp diff --git a/yolox_ros_cpp/yolox_ros_cpp/include/yolox_ros_cpp/yolox_ros_cpp.hpp b/yolox_ros_cpp/yolox_ros_cpp/include/yolox_ros_cpp/yolox_ros_cpp.hpp index 2bde92a..4428e5b 100644 --- a/yolox_ros_cpp/yolox_ros_cpp/include/yolox_ros_cpp/yolox_ros_cpp.hpp +++ b/yolox_ros_cpp/yolox_ros_cpp/include/yolox_ros_cpp/yolox_ros_cpp.hpp @@ -1,9 +1,26 @@ +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #pragma once #include #include +#include +#include +#include -#include +#include // NOLINT #include #include #include @@ -18,30 +35,39 @@ #include "yolox_cpp/utils.hpp" #include "yolox_param/yolox_param.hpp" -namespace yolox_ros_cpp{ - class YoloXNode : public rclcpp::Node - { - public: - YoloXNode(const rclcpp::NodeOptions &); - private: - void onInit(); - void colorImageCallback(const sensor_msgs::msg::Image::ConstSharedPtr &); - - static bboxes_ex_msgs::msg::BoundingBoxes objects_to_bboxes(const cv::Mat &, const std::vector &, const std_msgs::msg::Header &); - static vision_msgs::msg::Detection2DArray objects_to_detection2d(const std::vector &, const std_msgs::msg::Header &); - - protected: - std::shared_ptr param_listener_; - yolox_parameters::Params params_; - private: - std::unique_ptr yolox_; - std::vector class_names_; - - rclcpp::TimerBase::SharedPtr init_timer_; - image_transport::Subscriber sub_image_; - - rclcpp::Publisher::SharedPtr pub_bboxes_; - rclcpp::Publisher::SharedPtr pub_detection2d_; - image_transport::Publisher pub_image_; - }; -} +namespace yolox_ros_cpp +{ +class YoloXNode : public rclcpp::Node +{ +public: + explicit YoloXNode(const rclcpp::NodeOptions &); + +private: + void onInit(); + void colorImageCallback(const sensor_msgs::msg::Image::ConstSharedPtr &); + + static bboxes_ex_msgs::msg::BoundingBoxes objects_to_bboxes( + const cv::Mat &, + const std::vector &, + const std_msgs::msg::Header &); + + static vision_msgs::msg::Detection2DArray objects_to_detection2d( + const std::vector &, + const std_msgs::msg::Header &); + +protected: + std::shared_ptr param_listener_; + yolox_parameters::Params params_; + +private: + std::unique_ptr yolox_; + std::vector class_names_; + + rclcpp::TimerBase::SharedPtr init_timer_; + image_transport::Subscriber sub_image_; + + rclcpp::Publisher::SharedPtr pub_bboxes_; + rclcpp::Publisher::SharedPtr pub_detection2d_; + image_transport::Publisher pub_image_; +}; +} // namespace yolox_ros_cpp diff --git a/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_onnxruntime.launch.py b/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_onnxruntime.launch.py index df34cdb..d49fec2 100644 --- a/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_onnxruntime.launch.py +++ b/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_onnxruntime.launch.py @@ -19,6 +19,7 @@ from launch_ros.actions import ComposableNodeContainer from launch_ros.descriptions import ComposableNode + def generate_launch_description(): launch_args = [ DeclareLaunchArgument( @@ -144,14 +145,17 @@ def generate_launch_description(): 'onnxruntime/use_cuda': LaunchConfiguration('onnxruntime/use_cuda'), 'onnxruntime/device_id': LaunchConfiguration('onnxruntime/device_id'), 'onnxruntime/use_parallel': LaunchConfiguration('onnxruntime/use_parallel'), - 'onnxruntime/inter_op_num_threads': LaunchConfiguration('onnxruntime/inter_op_num_threads'), - 'onnxruntime/intra_op_num_threads': LaunchConfiguration('onnxruntime/intra_op_num_threads'), + 'onnxruntime/inter_op_num_threads': + LaunchConfiguration('onnxruntime/inter_op_num_threads'), + 'onnxruntime/intra_op_num_threads': + LaunchConfiguration('onnxruntime/intra_op_num_threads'), 'conf': LaunchConfiguration('conf'), 'nms': LaunchConfiguration('nms'), 'imshow_isshow': LaunchConfiguration('imshow_isshow'), 'src_image_topic_name': LaunchConfiguration('src_image_topic_name'), 'publish_image_topic_name': LaunchConfiguration('publish_image_topic_name'), - 'publish_boundingbox_topic_name': LaunchConfiguration('publish_boundingbox_topic_name'), + 'publish_boundingbox_topic_name': + LaunchConfiguration('publish_boundingbox_topic_name'), 'publish_resized_image': LaunchConfiguration('publish_resized_image'), 'use_bbox_ex_msgs': LaunchConfiguration('use_bbox_ex_msgs'), }], diff --git a/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_openvino.launch.py b/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_openvino.launch.py index 1395507..5a9a597 100644 --- a/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_openvino.launch.py +++ b/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_openvino.launch.py @@ -1,9 +1,25 @@ +# Copyright 2023 Ar-Ray-code +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + import launch from launch.actions import DeclareLaunchArgument from launch.substitutions import LaunchConfiguration from launch_ros.actions import ComposableNodeContainer from launch_ros.descriptions import ComposableNode + def generate_launch_description(): launch_args = [ DeclareLaunchArgument( @@ -113,7 +129,8 @@ def generate_launch_description(): 'imshow_isshow': LaunchConfiguration('imshow_isshow'), 'src_image_topic_name': LaunchConfiguration('src_image_topic_name'), 'publish_image_topic_name': LaunchConfiguration('publish_image_topic_name'), - 'publish_boundingbox_topic_name': LaunchConfiguration('publish_boundingbox_topic_name'), + 'publish_boundingbox_topic_name': + LaunchConfiguration('publish_boundingbox_topic_name'), 'publish_resized_image': LaunchConfiguration('publish_resized_image'), 'use_bbox_ex_msgs': LaunchConfiguration('use_bbox_ex_msgs'), }], diff --git a/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_tensorrt.launch.py b/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_tensorrt.launch.py index c011aeb..da19c8b 100644 --- a/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_tensorrt.launch.py +++ b/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_tensorrt.launch.py @@ -1,11 +1,26 @@ +# Copyright 2023 Ar-Ray-code +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + import launch from launch.actions import DeclareLaunchArgument from launch.substitutions import LaunchConfiguration from launch_ros.actions import ComposableNodeContainer from launch_ros.descriptions import ComposableNode -def generate_launch_description(): +def generate_launch_description(): launch_args = [ DeclareLaunchArgument( 'video_device', @@ -14,7 +29,7 @@ def generate_launch_description(): ), DeclareLaunchArgument( 'model_path', - default_value='./install/yolox_ros_cpp/share/yolox_ros_cpp/weights/tensorrt/yolox_nano.trt', + default_value='./example.trt', description='yolox model path.' ), DeclareLaunchArgument( @@ -114,7 +129,8 @@ def generate_launch_description(): 'imshow_isshow': LaunchConfiguration('imshow_isshow'), 'src_image_topic_name': LaunchConfiguration('src_image_topic_name'), 'publish_image_topic_name': LaunchConfiguration('publish_image_topic_name'), - 'publish_boundingbox_topic_name': LaunchConfiguration('publish_boundingbox_topic_name'), + 'publish_boundingbox_topic_name': + LaunchConfiguration('publish_boundingbox_topic_name'), 'publish_resized_image': LaunchConfiguration('publish_resized_image'), 'use_bbox_ex_msgs': LaunchConfiguration('use_bbox_ex_msgs'), }], diff --git a/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_tflite.launch.py b/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_tflite.launch.py index 0f3781f..f86ba85 100644 --- a/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_tflite.launch.py +++ b/yolox_ros_cpp/yolox_ros_cpp/launch/yolox_tflite.launch.py @@ -1,3 +1,18 @@ +# Copyright 2023 Ar-Ray-code +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + import launch from launch.actions import DeclareLaunchArgument from launch.substitutions import LaunchConfiguration @@ -120,7 +135,8 @@ def generate_launch_description(): 'imshow_isshow': LaunchConfiguration('imshow_isshow'), 'src_image_topic_name': LaunchConfiguration('src_image_topic_name'), 'publish_image_topic_name': LaunchConfiguration('publish_image_topic_name'), - 'publish_boundingbox_topic_name': LaunchConfiguration('publish_boundingbox_topic_name'), + 'publish_boundingbox_topic_name': + LaunchConfiguration('publish_boundingbox_topic_name'), 'publish_resized_image': LaunchConfiguration('publish_resized_image'), 'use_bbox_ex_msgs': LaunchConfiguration('use_bbox_ex_msgs'), }], diff --git a/yolox_ros_cpp/yolox_ros_cpp/src/yolox_ros_cpp.cpp b/yolox_ros_cpp/yolox_ros_cpp/src/yolox_ros_cpp.cpp index e501cb4..6d67fa9 100644 --- a/yolox_ros_cpp/yolox_ros_cpp/src/yolox_ros_cpp.cpp +++ b/yolox_ros_cpp/yolox_ros_cpp/src/yolox_ros_cpp.cpp @@ -1,207 +1,212 @@ +// Copyright 2024 Ar-Ray-code +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "yolox_ros_cpp/yolox_ros_cpp.hpp" namespace yolox_ros_cpp { - YoloXNode::YoloXNode(const rclcpp::NodeOptions &options) - : Node("yolox_ros_cpp", options) - { - using namespace std::chrono_literals; // NOLINT - this->init_timer_ = this->create_wall_timer( - 0s, std::bind(&YoloXNode::onInit, this)); - } +YoloXNode::YoloXNode(const rclcpp::NodeOptions & options) +: Node("yolox_ros_cpp", options) +{ + using namespace std::chrono_literals; // NOLINT + this->init_timer_ = this->create_wall_timer( + 0s, std::bind(&YoloXNode::onInit, this)); +} + +void YoloXNode::onInit() +{ + this->init_timer_->cancel(); + this->param_listener_ = std::make_shared( + this->get_node_parameters_interface()); + + this->params_ = this->param_listener_->get_params(); + + if (this->params_.imshow_isshow) { + cv::namedWindow("yolox", cv::WINDOW_AUTOSIZE); + } + + if (this->params_.class_labels_path != "") { + RCLCPP_INFO( + this->get_logger(), + "read class labels from '%s'", + this->params_.class_labels_path.c_str()); + + this->class_names_ = + yolox_cpp::utils::read_class_labels_file(this->params_.class_labels_path); + } else { + this->class_names_ = yolox_cpp::COCO_CLASSES; + } - void YoloXNode::onInit() - { - this->init_timer_->cancel(); - this->param_listener_ = std::make_shared( - this->get_node_parameters_interface()); - - this->params_ = this->param_listener_->get_params(); - - if (this->params_.imshow_isshow) - { - cv::namedWindow("yolox", cv::WINDOW_AUTOSIZE); - } - - if (this->params_.class_labels_path != "") - { - RCLCPP_INFO(this->get_logger(), "read class labels from '%s'", this->params_.class_labels_path.c_str()); - this->class_names_ = yolox_cpp::utils::read_class_labels_file(this->params_.class_labels_path); - } - else - { - this->class_names_ = yolox_cpp::COCO_CLASSES; - } - - if (this->params_.model_type == "tensorrt") - { + if (this->params_.model_type == "tensorrt") { #ifdef ENABLE_TENSORRT - RCLCPP_INFO(this->get_logger(), "Model Type is TensorRT"); - this->yolox_ = std::make_unique( - this->params_.model_path, this->params_.tensorrt_device, - this->params_.nms, this->params_.conf, this->params_.model_version, - this->params_.num_classes, this->params_.p6); + RCLCPP_INFO(this->get_logger(), "Model Type is TensorRT"); + this->yolox_ = std::make_unique( + this->params_.model_path, this->params_.tensorrt_device, + this->params_.nms, this->params_.conf, this->params_.model_version, + this->params_.num_classes, this->params_.p6); #else - RCLCPP_ERROR(this->get_logger(), "yolox_cpp is not built with TensorRT"); - rclcpp::shutdown(); + RCLCPP_ERROR(this->get_logger(), "yolox_cpp is not built with TensorRT"); + rclcpp::shutdown(); #endif - } - else if (this->params_.model_type == "openvino") - { + } else if (this->params_.model_type == "openvino") { #ifdef ENABLE_OPENVINO - RCLCPP_INFO(this->get_logger(), "Model Type is OpenVINO"); - this->yolox_ = std::make_unique( - this->params_.model_path, this->params_.openvino_device, - this->params_.nms, this->params_.conf, this->params_.model_version, - this->params_.num_classes, this->params_.p6); + RCLCPP_INFO(this->get_logger(), "Model Type is OpenVINO"); + this->yolox_ = std::make_unique( + this->params_.model_path, this->params_.openvino_device, + this->params_.nms, this->params_.conf, this->params_.model_version, + this->params_.num_classes, this->params_.p6); #else - RCLCPP_ERROR(this->get_logger(), "yolox_cpp is not built with OpenVINO"); - rclcpp::shutdown(); + RCLCPP_ERROR(this->get_logger(), "yolox_cpp is not built with OpenVINO"); + rclcpp::shutdown(); #endif - } - else if (this->params_.model_type == "onnxruntime") - { + } else if (this->params_.model_type == "onnxruntime") { #ifdef ENABLE_ONNXRUNTIME - RCLCPP_INFO(this->get_logger(), "Model Type is ONNXRuntime"); - this->yolox_ = std::make_unique( - this->params_.model_path, - this->params_.onnxruntime_intra_op_num_threads, - this->params_.onnxruntime_inter_op_num_threads, - this->params_.onnxruntime_use_cuda, this->params_.onnxruntime_device_id, - this->params_.onnxruntime_use_parallel, - this->params_.nms, this->params_.conf, this->params_.model_version, - this->params_.num_classes, this->params_.p6); + RCLCPP_INFO(this->get_logger(), "Model Type is ONNXRuntime"); + this->yolox_ = std::make_unique( + this->params_.model_path, + this->params_.onnxruntime_intra_op_num_threads, + this->params_.onnxruntime_inter_op_num_threads, + this->params_.onnxruntime_use_cuda, this->params_.onnxruntime_device_id, + this->params_.onnxruntime_use_parallel, + this->params_.nms, this->params_.conf, this->params_.model_version, + this->params_.num_classes, this->params_.p6); #else - RCLCPP_ERROR(this->get_logger(), "yolox_cpp is not built with ONNXRuntime"); - rclcpp::shutdown(); + RCLCPP_ERROR(this->get_logger(), "yolox_cpp is not built with ONNXRuntime"); + rclcpp::shutdown(); #endif - } - else if (this->params_.model_type == "tflite") - { + } else if (this->params_.model_type == "tflite") { #ifdef ENABLE_TFLITE - RCLCPP_INFO(this->get_logger(), "Model Type is tflite"); - this->yolox_ = std::make_unique( - this->params_.model_path, this->params_.tflite_num_threads, - this->params_.nms, this->params_.conf, this->params_.model_version, - this->params_.num_classes, this->params_.p6, this->params_.is_nchw); + RCLCPP_INFO(this->get_logger(), "Model Type is tflite"); + this->yolox_ = std::make_unique( + this->params_.model_path, this->params_.tflite_num_threads, + this->params_.nms, this->params_.conf, this->params_.model_version, + this->params_.num_classes, this->params_.p6, this->params_.is_nchw); #else - RCLCPP_ERROR(this->get_logger(), "yolox_cpp is not built with tflite"); - rclcpp::shutdown(); + RCLCPP_ERROR(this->get_logger(), "yolox_cpp is not built with tflite"); + rclcpp::shutdown(); #endif - } - RCLCPP_INFO(this->get_logger(), "model loaded"); - - this->sub_image_ = image_transport::create_subscription( - this, this->params_.src_image_topic_name, - std::bind(&YoloXNode::colorImageCallback, this, std::placeholders::_1), - "raw"); - - if (this->params_.use_bbox_ex_msgs) { - this->pub_bboxes_ = this->create_publisher( - this->params_.publish_boundingbox_topic_name, - 10); - } else { - this->pub_detection2d_ = this->create_publisher( - this->params_.publish_boundingbox_topic_name, - 10); - } - - if (this->params_.publish_resized_image) { - this->pub_image_ = image_transport::create_publisher(this, this->params_.publish_image_topic_name); - } - } + } + RCLCPP_INFO(this->get_logger(), "model loaded"); + + this->sub_image_ = image_transport::create_subscription( + this, this->params_.src_image_topic_name, + std::bind(&YoloXNode::colorImageCallback, this, std::placeholders::_1), + "raw"); + + if (this->params_.use_bbox_ex_msgs) { + this->pub_bboxes_ = this->create_publisher( + this->params_.publish_boundingbox_topic_name, + 10); + } else { + this->pub_detection2d_ = this->create_publisher( + this->params_.publish_boundingbox_topic_name, + 10); + } + + if (this->params_.publish_resized_image) { + this->pub_image_ = image_transport::create_publisher( + this, this->params_.publish_image_topic_name); + } +} - void YoloXNode::colorImageCallback(const sensor_msgs::msg::Image::ConstSharedPtr &ptr) - { - auto img = cv_bridge::toCvCopy(ptr, "bgr8"); - cv::Mat frame = img->image; - - auto now = std::chrono::system_clock::now(); - auto objects = this->yolox_->inference(frame); - auto end = std::chrono::system_clock::now(); - - auto elapsed = std::chrono::duration_cast(end - now); - RCLCPP_INFO(this->get_logger(), "Inference time: %5ld ms", elapsed.count()); - - yolox_cpp::utils::draw_objects(frame, objects, this->class_names_); - if (this->params_.imshow_isshow) - { - cv::imshow("yolox", frame); - auto key = cv::waitKey(1); - if (key == 27) - { - rclcpp::shutdown(); - } - } - - if (this->params_.use_bbox_ex_msgs) - { - if (this->pub_bboxes_ == nullptr) - { - RCLCPP_ERROR(this->get_logger(), "pub_bboxes_ is nullptr"); - return; - } - auto boxes = objects_to_bboxes(frame, objects, img->header); - this->pub_bboxes_->publish(boxes); - } - else - { - if (this->pub_detection2d_ == nullptr) - { - RCLCPP_ERROR(this->get_logger(), "pub_detection2d_ is nullptr"); - return; - } - vision_msgs::msg::Detection2DArray detections = objects_to_detection2d(objects, img->header); - this->pub_detection2d_->publish(detections); - } - - if (this->params_.publish_resized_image) { - sensor_msgs::msg::Image::SharedPtr pub_img = - cv_bridge::CvImage(img->header, "bgr8", frame).toImageMsg(); - this->pub_image_.publish(pub_img); - } +void YoloXNode::colorImageCallback(const sensor_msgs::msg::Image::ConstSharedPtr & ptr) +{ + auto img = cv_bridge::toCvCopy(ptr, "bgr8"); + cv::Mat frame = img->image; + + auto now = std::chrono::system_clock::now(); + auto objects = this->yolox_->inference(frame); + auto end = std::chrono::system_clock::now(); + + auto elapsed = std::chrono::duration_cast(end - now); + RCLCPP_INFO(this->get_logger(), "Inference time: %5ld ms", elapsed.count()); + + yolox_cpp::utils::draw_objects(frame, objects, this->class_names_); + if (this->params_.imshow_isshow) { + cv::imshow("yolox", frame); + auto key = cv::waitKey(1); + if (key == 27) { + rclcpp::shutdown(); } + } - bboxes_ex_msgs::msg::BoundingBoxes YoloXNode::objects_to_bboxes( - const cv::Mat &frame, const std::vector &objects, const std_msgs::msg::Header &header) - { - bboxes_ex_msgs::msg::BoundingBoxes boxes; - boxes.header = header; - for (const auto &obj : objects) - { - bboxes_ex_msgs::msg::BoundingBox box; - box.probability = obj.prob;; - box.class_id = std::to_string(obj.label); - box.xmin = obj.rect.x; - box.ymin = obj.rect.y; - box.xmax = (obj.rect.x + obj.rect.width); - box.ymax = (obj.rect.y + obj.rect.height); - box.img_width = frame.cols; - box.img_height = frame.rows; - boxes.bounding_boxes.emplace_back(box); - } - return boxes; + if (this->params_.use_bbox_ex_msgs) { + if (this->pub_bboxes_ == nullptr) { + RCLCPP_ERROR(this->get_logger(), "pub_bboxes_ is nullptr"); + return; } - - vision_msgs::msg::Detection2DArray YoloXNode::objects_to_detection2d(const std::vector &objects, const std_msgs::msg::Header &header) - { - vision_msgs::msg::Detection2DArray detection2d; - detection2d.header = header; - for (const auto &obj : objects) - { - vision_msgs::msg::Detection2D det; - det.bbox.center.position.x = obj.rect.x + obj.rect.width / 2; - det.bbox.center.position.y = obj.rect.y + obj.rect.height / 2; - det.bbox.size_x = obj.rect.width; - det.bbox.size_y = obj.rect.height; - - det.results.resize(1); - det.results[0].hypothesis.class_id = std::to_string(obj.label); - det.results[0].hypothesis.score = obj.prob; - detection2d.detections.emplace_back(det); - } - return detection2d; + auto boxes = objects_to_bboxes(frame, objects, img->header); + this->pub_bboxes_->publish(boxes); + } else { + if (this->pub_detection2d_ == nullptr) { + RCLCPP_ERROR(this->get_logger(), "pub_detection2d_ is nullptr"); + return; } + vision_msgs::msg::Detection2DArray detections = + objects_to_detection2d(objects, img->header); + this->pub_detection2d_->publish(detections); + } + + if (this->params_.publish_resized_image) { + sensor_msgs::msg::Image::SharedPtr pub_img = + cv_bridge::CvImage(img->header, "bgr8", frame).toImageMsg(); + this->pub_image_.publish(pub_img); + } +} + +bboxes_ex_msgs::msg::BoundingBoxes YoloXNode::objects_to_bboxes( + const cv::Mat & frame, + const std::vector & objects, + const std_msgs::msg::Header & header) +{ + bboxes_ex_msgs::msg::BoundingBoxes boxes; + boxes.header = header; + for (const auto & obj : objects) { + bboxes_ex_msgs::msg::BoundingBox box; + box.probability = obj.prob; + box.class_id = std::to_string(obj.label); + box.xmin = obj.rect.x; + box.ymin = obj.rect.y; + box.xmax = (obj.rect.x + obj.rect.width); + box.ymax = (obj.rect.y + obj.rect.height); + box.img_width = frame.cols; + box.img_height = frame.rows; + boxes.bounding_boxes.emplace_back(box); + } + return boxes; +} + +vision_msgs::msg::Detection2DArray YoloXNode::objects_to_detection2d( + const std::vector & objects, + const std_msgs::msg::Header & header) +{ + vision_msgs::msg::Detection2DArray detection2d; + detection2d.header = header; + for (const auto & obj : objects) { + vision_msgs::msg::Detection2D det; + det.bbox.center.position.x = obj.rect.x + obj.rect.width / 2; + det.bbox.center.position.y = obj.rect.y + obj.rect.height / 2; + det.bbox.size_x = obj.rect.width; + det.bbox.size_y = obj.rect.height; + + det.results.resize(1); + det.results[0].hypothesis.class_id = std::to_string(obj.label); + det.results[0].hypothesis.score = obj.prob; + detection2d.detections.emplace_back(det); + } + return detection2d; } +} // namespace yolox_ros_cpp RCLCPP_COMPONENTS_REGISTER_NODE(yolox_ros_cpp::YoloXNode)