diff --git a/README.md b/README.md
index 24afb79..86ffd02 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,6 @@
## Supported List
-> rclcpp is recommended due to its extended support.
| Base | ROS2 C++ |
| --------------- | -------- |
@@ -17,11 +16,6 @@
| ONNX Runtime | ✅ |
| TFLite | ✅ |
-
## Installation & Demo (C++)
@@ -47,74 +41,12 @@ Check [this URL](./yolox_ros_cpp/README.md).
-## Parameters
+## Parameters
- Check launch files.
-## Composition
-
-- Supports C++ only.
-
-
-
-
-
-
-
## Reference
![](https://raw.githubusercontent.com/Megvii-BaseDetection/YOLOX/main/assets/logo.png)
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index e5127d0..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-numpy
-torch
-opencv_python
-loguru
-scikit-image
-tqdm
-torchvision
-Pillow
-thop
-ninja
-tabulate
-tensorboard
-onnxruntime
-yolox
diff --git a/yolox_ros_py/exps/yolox_nano.py b/yolox_ros_py/exps/yolox_nano.py
deleted file mode 100644
index f841957..0000000
--- a/yolox_ros_py/exps/yolox_nano.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Copyright (c) Megvii, Inc. and its affiliates.
-
-import os
-
-import torch.nn as nn
-
-from yolox.exp import Exp as MyExp
-
-
-class Exp(MyExp):
- def __init__(self):
- super(Exp, self).__init__()
- self.depth = 0.33
- self.width = 0.25
- self.input_size = (416, 416)
- self.random_size = (10, 20)
- self.mosaic_scale = (0.5, 1.5)
- self.test_size = (416, 416)
- self.mosaic_prob = 0.5
- self.enable_mixup = False
- self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
-
- def get_model(self, sublinear=False):
-
- def init_yolo(M):
- for m in M.modules():
- if isinstance(m, nn.BatchNorm2d):
- m.eps = 1e-3
- m.momentum = 0.03
- if "model" not in self.__dict__:
- from yolox.models import YOLOX, YOLOPAFPN, YOLOXHead
- in_channels = [256, 512, 1024]
- # NANO model use depthwise = True, which is main difference.
- backbone = YOLOPAFPN(
- self.depth, self.width, in_channels=in_channels,
- act=self.act, depthwise=True,
- )
- head = YOLOXHead(
- self.num_classes, self.width, in_channels=in_channels,
- act=self.act, depthwise=True
- )
- self.model = YOLOX(backbone, head)
-
- self.model.apply(init_yolo)
- self.model.head.initialize_biases(1e-2)
- return self.model
\ No newline at end of file
diff --git a/yolox_ros_py/launch/yolox_lite_tflite_camera.launch.py b/yolox_ros_py/launch/yolox_lite_tflite_camera.launch.py
deleted file mode 100644
index 101ae1f..0000000
--- a/yolox_ros_py/launch/yolox_lite_tflite_camera.launch.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import launch
-import launch_ros.actions
-from launch.actions import DeclareLaunchArgument
-from ament_index_python.packages import get_package_share_directory
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import LaunchConfiguration
-
-from urllib.request import urlretrieve
-import os
-import time
-
-def generate_launch_description():
- yolox_ros_share_dir = get_package_share_directory('yolox_ros_py')
-
- print("")
- print("-------------------------------------------------------")
- print("Warning : This model is based on YOLOX and is lightweight for RaspberryPi CPU operation. Detection other than human detection may not work correctly.")
- print("-------------------------------------------------------")
- print("")
- time.sleep(1)
-
- video_device = LaunchConfiguration('video_device', default='/dev/video0')
- video_device_arg = DeclareLaunchArgument(
- 'video_device',
- default_value='/dev/video0',
- description='Video device'
- )
-
- webcam = launch_ros.actions.Node(
- package="v4l2_camera", executable="v4l2_camera_node",
- parameters=[
- {"image_size": [640,480]},
- {"video_device": video_device},
- ],
- )
-
- yolox_tflite = launch_ros.actions.Node(
- package="yolox_ros_py", executable="yolox_tflite",output="screen",
- parameters=[
- {"model_path" : yolox_ros_share_dir+"/model.tflite"},
- {"conf" : 0.4},
- {"nms_th" : 0.5},
- {"input_shape/height" : 192},
- {"input_shape/width" : 192}
- ],
- )
-
- rqt_graph = launch_ros.actions.Node(
- package="rqt_graph", executable="rqt_graph",
- )
-
- return launch.LaunchDescription([
- video_device_arg,
- webcam,
- yolox_tflite,
- # rqt_graph
- ])
\ No newline at end of file
diff --git a/yolox_ros_py/launch/yolox_nano_onnx_camera.launch.py b/yolox_ros_py/launch/yolox_nano_onnx_camera.launch.py
deleted file mode 100644
index 03cde5f..0000000
--- a/yolox_ros_py/launch/yolox_nano_onnx_camera.launch.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import launch
-import launch_ros.actions
-from launch.actions import DeclareLaunchArgument
-from ament_index_python.packages import get_package_share_directory
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import LaunchConfiguration
-
-from urllib.request import urlretrieve
-import os
-
-def generate_launch_description():
- yolox_ros_share_dir = get_package_share_directory('yolox_ros_py')
-
- video_device = LaunchConfiguration('video_device', default='/dev/video0')
- video_device_arg = DeclareLaunchArgument(
- 'video_device',
- default_value='/dev/video0',
- description='Video device'
- )
-
- webcam = launch_ros.actions.Node(
- package="v4l2_camera", executable="v4l2_camera_node",
- parameters=[
- {"image_size": [640,480]},
- {"video_device": video_device},
- ],
- )
- yolox_onnx = launch_ros.actions.Node(
- package="yolox_ros_py", executable="yolox_onnx",output="screen",
- parameters=[
- {"input_shape/width": 416},
- {"input_shape/height": 416},
-
- {"with_p6" : False},
- {"model_path" : yolox_ros_share_dir+"/yolox_nano.onnx"},
- {"conf" : 0.3},
- ],
- )
-
- rqt_graph = launch_ros.actions.Node(
- package="rqt_graph", executable="rqt_graph",
- )
-
- return launch.LaunchDescription([
- video_device_arg,
- webcam,
- yolox_onnx,
- # rqt_graph
- ])
\ No newline at end of file
diff --git a/yolox_ros_py/launch/yolox_nano_onnx_gazebo.launch.py b/yolox_ros_py/launch/yolox_nano_onnx_gazebo.launch.py
deleted file mode 100644
index b882670..0000000
--- a/yolox_ros_py/launch/yolox_nano_onnx_gazebo.launch.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import os
-
-from ament_index_python.packages import get_package_share_directory
-from launch import LaunchDescription
-from launch.actions import IncludeLaunchDescription
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-import launch_ros
-
-
-def generate_launch_description():
- gazebo_ros_share_dir = get_package_share_directory('gazebo_ros')
- yolox_ros_share_dir = get_package_share_directory('yolox_ros_py')
- gazebo_plugins_share_dir = get_package_share_directory('gazebo_plugins')
-
- world = os.path.join(
- gazebo_plugins_share_dir,
- 'worlds',
- 'gazebo_ros_camera_demo.world'
- )
-
- gzserver_cmd = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- os.path.join(gazebo_ros_share_dir, 'launch', 'gzserver.launch.py')
- ),
- launch_arguments={'world': world}.items()
- )
-
- gzclient_cmd = IncludeLaunchDescription(
- PythonLaunchDescriptionSource(
- os.path.join(gazebo_ros_share_dir, 'launch', 'gzclient.launch.py')
- )
- )
-
- yolox_onnx = launch_ros.actions.Node(
- package="yolox_ros_py", executable="yolox_onnx",output="screen",
- parameters=[
- {"input_shape/width": 416},
- {"input_shape/height": 416},
-
- {"with_p6" : False},
- {"model_path" : yolox_ros_share_dir+"/yolox_nano.onnx"},
- {"conf" : 0.3},
- {"sensor_qos_mode" : True},
- ],
- remappings=[
- ("/image_raw", "/demo_cam/camera1/image_raw"),
- ],
- )
-
- return LaunchDescription([
- gzserver_cmd,
- gzclient_cmd,
- yolox_onnx,
- ])
diff --git a/yolox_ros_py/launch/yolox_nano_onnx_picture.launch.py b/yolox_ros_py/launch/yolox_nano_onnx_picture.launch.py
deleted file mode 100644
index dca3fbc..0000000
--- a/yolox_ros_py/launch/yolox_nano_onnx_picture.launch.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from matplotlib import image
-import launch
-import launch_ros.actions
-from launch.actions import DeclareLaunchArgument
-from ament_index_python.packages import get_package_share_directory
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import LaunchConfiguration
-
-from urllib.request import urlretrieve
-import os
-
-def generate_launch_description():
- yolox_ros_share_dir = get_package_share_directory('yolox_ros_py')
-
- dog_path = os.path.join(yolox_ros_share_dir, "./", "dog.jpg")
- url = "https://raw.githubusercontent.com/pjreddie/darknet/master/data/dog.jpg"
- if not os.path.exists(dog_path):
- os.system("wget {} -O {}".format(url, dog_path))
-
-
- image_path = LaunchConfiguration('image_path', default=dog_path)
- image_path_arg = DeclareLaunchArgument(
- 'image_path',
- default_value=dog_path,
- description='Image path'
- )
-
- image_pub = launch_ros.actions.Node(
- package='image_publisher',
- executable='image_publisher_node',
- name='image_publisher',
- arguments=[image_path],
- )
- yolox_onnx = launch_ros.actions.Node(
- package="yolox_ros_py", executable="yolox_onnx",output="screen",
- parameters=[
- {"input_shape/width": 416},
- {"input_shape/height": 416},
-
- {"with_p6" : False},
- {"model_path" : yolox_ros_share_dir+"/yolox_nano.onnx"},
- {"conf" : 0.3},
- ],
- )
-
- rqt_graph = launch_ros.actions.Node(
- package="rqt_graph", executable="rqt_graph",
- )
-
- return launch.LaunchDescription([
- image_path_arg,
- image_pub,
- yolox_onnx,
- # rqt_graph
- ])
\ No newline at end of file
diff --git a/yolox_ros_py/launch/yolox_nano_onnx_youtube.launch.py b/yolox_ros_py/launch/yolox_nano_onnx_youtube.launch.py
deleted file mode 100644
index 698167c..0000000
--- a/yolox_ros_py/launch/yolox_nano_onnx_youtube.launch.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import os
-import time
-
-from ament_index_python.packages import get_package_share_directory
-from launch import LaunchDescription
-from launch.actions import IncludeLaunchDescription
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-import launch_ros
-
-def generate_launch_description():
- yolox_ros_share_dir = get_package_share_directory('yolox_ros_py')
- youtube_publisher_share_dir = get_package_share_directory('youtube_publisher')
-
- print("")
- print("===============================================================")
- print("Downloading video from YouTube...")
- print("Donwloading video takes about few minutes.")
- print("===============================================================")
- print("")
- time.sleep(3)
-
- youtube = launch_ros.actions.Node(
- package='youtube_publisher', executable='youtube_pub',
- parameters=[
- {'topic_name': '/image_raw'},
- {'cache_path': youtube_publisher_share_dir + '/cache'},
- {'video_url' : 'https://www.youtube.com/watch?v=CFLOiR2EbKM'},
- {'using_youtube_dl' : True},
- {'clear_cache_force' : False},
- {'width' : 640},
- {'height' : 360},
- {'imshow_is_show' : True}
- ],
- )
-
- yolox_onnx = launch_ros.actions.Node(
- package="yolox_ros_py", executable="yolox_onnx",output="screen",
- parameters=[
- {"input_shape/width": 416},
- {"input_shape/height": 416},
-
- {"with_p6" : False},
- {"model_path" : yolox_ros_share_dir+"/yolox_nano.onnx"},
- {"conf" : 0.3},
- {"sensor_qos_mode" : True},
- ],
- )
-
- return LaunchDescription([
- youtube,
- yolox_onnx,
- ])
diff --git a/yolox_ros_py/launch/yolox_nano_openvino.launch.py b/yolox_ros_py/launch/yolox_nano_openvino.launch.py
deleted file mode 100644
index 0af87b9..0000000
--- a/yolox_ros_py/launch/yolox_nano_openvino.launch.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import launch
-import launch_ros.actions
-from launch.actions import DeclareLaunchArgument
-from ament_index_python.packages import get_package_share_directory
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import LaunchConfiguration
-
-from urllib.request import urlretrieve
-import os
-
-def generate_launch_description():
- yolox_ros_share_dir = get_package_share_directory('yolox_ros_py')
-
- video_device = LaunchConfiguration('video_device', default='/dev/video0')
- video_device_arg = DeclareLaunchArgument(
- 'video_device',
- default_value='/dev/video0',
- description='Video device'
- )
-
- webcam = launch_ros.actions.Node(
- package="v4l2_camera", executable="v4l2_camera_node",
- parameters=[
- {"image_size": [640,480]},
- {"video_device": video_device},
- ],
- )
-
- yolox_openvino = launch_ros.actions.Node(
- package="yolox_ros_py", executable="yolox_openvino",output="screen",
- parameters=[
- {"device" : 'CPU'},
- {"model_path" : yolox_ros_share_dir+"/yolox_nano.onnx"},
- {"conf" : 0.3},
- ],
- )
-
- rqt_graph = launch_ros.actions.Node(
- package="rqt_graph", executable="rqt_graph",
- )
-
- return launch.LaunchDescription([
- video_device_arg,
- webcam,
- yolox_openvino,
- # rqt_graph
- ])
\ No newline at end of file
diff --git a/yolox_ros_py/launch/yolox_nano_torch_cpu_camera.launch.py b/yolox_ros_py/launch/yolox_nano_torch_cpu_camera.launch.py
deleted file mode 100644
index e7957dd..0000000
--- a/yolox_ros_py/launch/yolox_nano_torch_cpu_camera.launch.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import launch
-import launch_ros.actions
-from launch.actions import DeclareLaunchArgument
-from ament_index_python.packages import get_package_share_directory
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import LaunchConfiguration
-
-def generate_launch_description():
- yolox_ros_share_dir = get_package_share_directory('yolox_ros_py')
-
- video_device = LaunchConfiguration('video_device', default='/dev/video0')
- video_device_arg = DeclareLaunchArgument(
- 'video_device',
- default_value='/dev/video0',
- description='Video device'
- )
-
- webcam = launch_ros.actions.Node(
- package="v4l2_camera", executable="v4l2_camera_node",
- parameters=[
- {"image_size": [640,480]},
- {"video_device": video_device},
- ],
- )
-
- yolox_ros = launch_ros.actions.Node(
- package="yolox_ros_py", executable="yolox_ros",
- parameters=[
- {"yolox_exp_py" : yolox_ros_share_dir+'/yolox_nano.py'},
- {"device" : 'cpu'},
- {"fp16" : True},
- {"fuse" : False},
- {"legacy" : False},
- {"trt" : False},
- {"ckpt" : yolox_ros_share_dir+"/yolox_nano.pth"},
- {"conf" : 0.3},
- {"threshold" : 0.65},
- {"resize" : 640},
- ],
- )
-
- rqt_graph = launch_ros.actions.Node(
- package="rqt_graph", executable="rqt_graph",
- )
-
- return launch.LaunchDescription([
- video_device_arg,
- webcam,
- yolox_ros,
- # rqt_graph
- ])
\ No newline at end of file
diff --git a/yolox_ros_py/launch/yolox_nano_torch_gpu_camera.launch.py b/yolox_ros_py/launch/yolox_nano_torch_gpu_camera.launch.py
deleted file mode 100644
index 3d2b606..0000000
--- a/yolox_ros_py/launch/yolox_nano_torch_gpu_camera.launch.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import launch
-import launch_ros.actions
-from launch.actions import DeclareLaunchArgument
-from ament_index_python.packages import get_package_share_directory
-from launch.launch_description_sources import PythonLaunchDescriptionSource
-from launch.substitutions import LaunchConfiguration
-
-def generate_launch_description():
- yolox_ros_share_dir = get_package_share_directory('yolox_ros_py')
-
- video_device = LaunchConfiguration('video_device', default='/dev/video0')
- video_device_arg = DeclareLaunchArgument(
- 'video_device',
- default_value='/dev/video0',
- description='Video device'
- )
-
- webcam = launch_ros.actions.Node(
- package="v4l2_camera", executable="v4l2_camera_node",
- parameters=[
- {"image_size": [640,480]},
- {"video_device": video_device},
- ],
- )
-
- yolox_ros = launch_ros.actions.Node(
- package="yolox_ros_py", executable="yolox_ros",
- parameters=[
- {"yolox_exp_py" : yolox_ros_share_dir+'/yolox_nano.py'},
- {"device" : 'gpu'},
- {"fp16" : True},
- {"fuse" : False},
- {"legacy" : False},
- {"trt" : False},
- {"ckpt" : yolox_ros_share_dir+"/yolox_nano.pth"},
- {"conf" : 0.3},
- {"threshold" : 0.65},
- {"resize" : 640},
- ],
- )
-
- rqt_graph = launch_ros.actions.Node(
- package="rqt_graph", executable="rqt_graph",
- )
-
- return launch.LaunchDescription([
- video_device_arg,
- webcam,
- yolox_ros,
- # rqt_graph
- ])
\ No newline at end of file
diff --git a/yolox_ros_py/package.xml.old b/yolox_ros_py/package.xml.old
deleted file mode 100755
index ae911de..0000000
--- a/yolox_ros_py/package.xml.old
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
- yolox_ros_py
- 0.3.2
- The yolox_ros_py package
- Ar-Ray-code
- Apache License, Version 2.0
- Ar-Ray-code
-
- ament_copyright
- ament_flake8
- ament_pep257
- python3-pytest
- rclpy
- bbox_ex_msgs
-
-
- ament_python
-
-
diff --git a/yolox_ros_py/resource/yolox_ros_py b/yolox_ros_py/resource/yolox_ros_py
deleted file mode 100644
index e69de29..0000000
diff --git a/yolox_ros_py/setup.cfg b/yolox_ros_py/setup.cfg
deleted file mode 100755
index 22e1624..0000000
--- a/yolox_ros_py/setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[develop]
-script-dir=$base/lib/yolox_ros_py
-[install]
-install-scripts=$base/lib/yolox_ros_py
\ No newline at end of file
diff --git a/yolox_ros_py/setup.py.old b/yolox_ros_py/setup.py.old
deleted file mode 100755
index 064864f..0000000
--- a/yolox_ros_py/setup.py.old
+++ /dev/null
@@ -1,114 +0,0 @@
-from setuptools import setup
-
-import os
-from glob import glob
-from urllib.request import urlretrieve
-
-package_name = 'yolox_ros_py'
-
-
-YOLOX_S = 'yolox_s'
-YOLOX_M = 'yolox_m'
-YOLOX_L = 'yolox_l'
-YOLOX_X = 'yolox_x'
-YOLOX_DARKNET53 = 'yolox_darknet53'
-YOLOX_NANO = 'yolox_nano'
-YOLOX_TINY = 'yolox_tiny'
-
-PTH = '.pth'
-ONNX = '.onnx'
-
-YOLOX_VERSION = '0.1.1rc0'
-
-BASE_LINK = 'https://github.com/Megvii-BaseDetection/YOLOX/releases/download/'+YOLOX_VERSION+'/'
-# .pth
-YOLOX_S_URL = BASE_LINK + YOLOX_S + PTH
-YOLOX_M_URL = BASE_LINK + YOLOX_M + PTH
-YOLOX_L_URL = BASE_LINK + YOLOX_L + PTH
-YOLOX_X_URL = BASE_LINK + YOLOX_X + PTH
-YOLOX_X_URL = BASE_LINK + YOLOX_DARKNET53 + PTH
-YOLOX_NANO_URL = BASE_LINK + YOLOX_NANO + PTH
-YOLOX_TINY_URL = BASE_LINK + YOLOX_TINY + PTH
-# .onnx
-YOLOX_S_ONNX_URL = BASE_LINK + YOLOX_S + ONNX
-YOLOX_M_ONNX_URL = BASE_LINK + YOLOX_M + ONNX
-YOLOX_L_ONNX_URL = BASE_LINK + YOLOX_L + ONNX
-YOLOX_X_ONNX_URL = BASE_LINK + YOLOX_X + ONNX
-YOLOX_X_ONNX_URL = BASE_LINK + YOLOX_DARKNET53 + ONNX
-YOLOX_NANO_ONNX_URL = BASE_LINK + YOLOX_NANO + ONNX
-YOLOX_TINY_ONNX_URL = BASE_LINK + YOLOX_TINY + ONNX
-
-BASE_PATH = os.getcwd() + '/../weights/'
-os.makedirs(BASE_PATH, exist_ok=True)
-# .pth
-YOLOX_S_PATH = BASE_PATH + YOLOX_S + PTH
-YOLOX_M_PATH = BASE_PATH + YOLOX_M + PTH
-YOLOX_L_PATH = BASE_PATH + YOLOX_L + PTH
-YOLOX_X_PATH = BASE_PATH + YOLOX_X + PTH
-YOLOX_DARKNET53_PATH = BASE_PATH + YOLOX_DARKNET53 + PTH
-YOLOX_NANO_PATH = BASE_PATH + YOLOX_NANO + PTH
-YOLOX_TINY_PATH = BASE_PATH + YOLOX_TINY + PTH
-# .onnx
-YOLOX_S_ONNX_PATH = BASE_PATH + YOLOX_S + ONNX
-YOLOX_M_ONNX_PATH = BASE_PATH + YOLOX_M + ONNX
-YOLOX_L_ONNX_PATH = BASE_PATH + YOLOX_L + ONNX
-YOLOX_X_ONNX_PATH = BASE_PATH + YOLOX_X + ONNX
-YOLOX_DARKNET53_ONNX_PATH = BASE_PATH + YOLOX_DARKNET53 + ONNX
-YOLOX_NANO_ONNX_PATH = BASE_PATH + YOLOX_NANO + ONNX
-YOLOX_TINY_ONNX_PATH = BASE_PATH + YOLOX_TINY + ONNX
-
-# Download YOLOX-NANO Weights
-if not os.path.exists(YOLOX_NANO_PATH):
- urlretrieve(YOLOX_NANO_URL, YOLOX_NANO_PATH)
-# Download YOLOX-NANO ONNX
-if not os.path.exists(YOLOX_NANO_ONNX_PATH):
- urlretrieve(YOLOX_NANO_ONNX_URL, YOLOX_NANO_ONNX_PATH)
-
-# onnx
-TFLITE_PATH = BASE_PATH + 'tflite/model.onnx'
-if not os.path.exists(TFLITE_PATH):
- urlretrieve('https://github.com/Kazuhito00/Person-Detection-using-RaspberryPi-CPU/raw/main/model/model.onnx', TFLITE_PATH)
-
-# tflite
-TFLITE_PATH = BASE_PATH + 'tflite/model.tflite'
-if not os.path.exists(TFLITE_PATH):
- urlretrieve('https://github.com/Kazuhito00/Person-Detection-using-RaspberryPi-CPU/raw/main/model/model.tflite', TFLITE_PATH)
-
-
-setup(
- name=package_name,
- version='0.3.2',
- packages=[package_name],
- data_files=[
- ('share/ament_index/resource_index/packages',
- ['resource/' + package_name]),
- ('share/' + package_name, ['package.xml']),
- (os.path.join('share', package_name), glob('./launch/*.launch.py')),
- (os.path.join('share', package_name), glob('../weights/*.pth')),
- (os.path.join('share', package_name), glob('../weights/*.onnx')),
- (os.path.join('share', package_name), glob('../weights/*.tflite')),
- (os.path.join('share', package_name), glob('../weights/openvino/*')),
- (os.path.join('share', package_name), glob('../weights/onnx/*')),
- (os.path.join('share', package_name), glob('../weights/tflite/*')),
- (os.path.join('share', package_name), glob('../weights/tensorrt/*')),
- (os.path.join('share', package_name), glob('./exps/*.py')),
- ],
- install_requires=['setuptools'],
- zip_safe=True,
- author='Ar-Ray-code',
- author_email="ray255ar@gmail.com",
- maintainer='Ar-Ray-code',
- maintainer_email="ray255ar@gmail.com",
- description='YOLOX + ROS2 Foxy',
- license='Apache License, Version 2.0',
- tests_require=['pytest'],
- entry_points={
- 'console_scripts': [
- 'yolox_ros = '+package_name+'.yolox_ros:ros_main',
- 'yolox_openvino = '+package_name+'.yolox_openvino:ros_main',
- 'yolox_onnx = '+package_name+'.yolox_onnx:ros_main',
- 'yolox_tflite = '+package_name+'.yolox_tflite:ros_main',
- ],
- },
-)
-
diff --git a/yolox_ros_py/yolox_ros_py/__init__.py b/yolox_ros_py/yolox_ros_py/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/yolox_ros_py/yolox_ros_py/module_tflite/README.md b/yolox_ros_py/yolox_ros_py/module_tflite/README.md
deleted file mode 100644
index 97cba97..0000000
--- a/yolox_ros_py/yolox_ros_py/module_tflite/README.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Person-Detection-using-RaspberryPi-CPU
-Raspberry Pi 4のCPU動作を想定した人検出モデルとデモスクリプトです。
-
-https://user-images.githubusercontent.com/37477845/165421632-600f5f63-51e5-4afa-a0d5-3abc59d0d711.mp4
-
-PINTOさんの「[TensorflowLite-bin](https://github.com/PINTO0309/TensorflowLite-bin)」を使用し4スレッド動作時で45~60ms程度で動作します ※1スレッドは75ms前後
-ノートPC等でも動作しますが、精度が必要であれば本リポジトリ以外の物体検出モデルをおすすめします。
-また、ノートPC使用時は「model.onnx」のほうが高速なケースが多いです。※Core i7-8750Hで10ms前後
-
-# Requirement
-opencv-python 4.5.3.56 or later
-tensorflow 2.8.0 or later ※[TensorflowLite-bin](https://github.com/PINTO0309/TensorflowLite-bin)の使用を推奨
-onnxruntime 1.9.0 or later ※model.onnxを使用する場合のみ
-
-# Demo
-デモの実行方法は以下です。
-```bash
-python demo.py
-```
-* --device
-カメラデバイス番号の指定
-デフォルト:0
-* --movie
-動画ファイルの指定 ※指定時はカメラデバイスより優先
-デフォルト:指定なし
-* --width
-カメラキャプチャ時の横幅
-デフォルト:640
-* --height
-カメラキャプチャ時の縦幅
-デフォルト:360
-* --model
-ロードするモデルの格納パス
-デフォルト:model/model.tflite
-* --score_th
-検出閾値
-デフォルト:0.4
-* --nms_th
-NMSの閾値
-デフォルト:0.5
-* --num_threads
-使用スレッド数 ※TensorFlow-Lite使用時のみ有効
-デフォルト:None
-
-# Demo(ROS2)
-ROS2向けのデモです。
-
-ターミナル1
-```bash
-ros2 run v4l2_camera v4l2_camera_node
-```
-
-ターミナル2
-```bash
-python3 ./demo_ros2.py
-```
-
-# Reference
-* [PINTO0309/TensorflowLite-bin](https://github.com/PINTO0309/TensorflowLite-bin)
-
-# Author
-高橋かずひと(https://twitter.com/KzhtTkhs)
-
-# License
-Person-Detection-using-RaspberryPi-CPU is under [Apache 2.0 License](LICENSE).
-
-# License(Movie)
-サンプル動画は[NHKクリエイティブ・ライブラリー](https://www.nhk.or.jp/archives/creative/)の[イギリス・ロンドン リージェント・ストリート](https://www2.nhk.or.jp/archives/creative/material/view.cgi?m=D0002160979_00000)を使用しています。
diff --git a/yolox_ros_py/yolox_ros_py/module_tflite/__init__.py b/yolox_ros_py/yolox_ros_py/module_tflite/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/yolox_ros_py/yolox_ros_py/module_tflite/demo.py b/yolox_ros_py/yolox_ros_py/module_tflite/demo.py
deleted file mode 100644
index 6b8803c..0000000
--- a/yolox_ros_py/yolox_ros_py/module_tflite/demo.py
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-import copy
-import time
-import argparse
-
-import cv2
-
-from .detector import Detector
-
-
-def get_args():
- parser = argparse.ArgumentParser()
-
- parser.add_argument("--device", type=int, default=0)
- parser.add_argument("--movie", type=str, default=None)
- parser.add_argument("--width", help='cap width', type=int, default=640)
- parser.add_argument("--height", help='cap height', type=int, default=360)
-
- parser.add_argument(
- "--model",
- type=str,
- default='model/model.tflite',
- )
- parser.add_argument(
- '--input_shape',
- type=str,
- default="192,192",
- )
- parser.add_argument(
- '--score_th',
- type=float,
- default=0.4,
- )
- parser.add_argument(
- '--nms_th',
- type=float,
- default=0.5,
- )
- parser.add_argument(
- '--num_threads',
- type=int,
- default=None,
- help='Valid only when using Tensorflow-Lite',
- )
-
- args = parser.parse_args()
-
- return args
-
-
-def main():
- # 引数解析 #################################################################
- args = get_args()
- cap_device = args.device
- cap_width = args.width
- cap_height = args.height
-
- if args.movie is not None:
- cap_device = args.movie
-
- model_path = args.model
- input_shape = tuple(map(int, args.input_shape.split(',')))
- score_th = args.score_th
- nms_th = args.nms_th
- num_threads = args.num_threads
-
- # カメラ準備 ###############################################################
- cap = cv2.VideoCapture(cap_device)
- cap.set(cv2.CAP_PROP_FRAME_WIDTH, cap_width)
- cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cap_height)
-
- # モデルロード #############################################################
- detector = Detector(
- model_path=model_path,
- input_shape=input_shape,
- score_th=score_th,
- nms_th=nms_th,
- providers=['CPUExecutionProvider'],
- num_threads=num_threads,
- )
-
- while True:
- start_time = time.time()
-
- # カメラキャプチャ ################################################
- ret, frame = cap.read()
- if not ret:
- break
- debug_image = copy.deepcopy(frame)
-
- # 推論実施 ########################################################
- bboxes, scores, class_ids = detector.inference(frame)
-
- elapsed_time = time.time() - start_time
-
- # デバッグ描画
- debug_image = draw_debug(
- debug_image,
- elapsed_time,
- score_th,
- bboxes,
- scores,
- class_ids,
- )
-
- # キー処理(ESC:終了) ##############################################
- key = cv2.waitKey(1)
- if key == 27: # ESC
- break
-
- # 画面反映 #########################################################
- debug_image = cv2.resize(debug_image, (cap_width, cap_height))
- cv2.imshow('Person Detection Demo', debug_image)
-
- cap.release()
- cv2.destroyAllWindows()
-
-
-def draw_debug(
- image,
- elapsed_time,
- score_th,
- bboxes,
- scores,
- class_ids,
-):
- debug_image = copy.deepcopy(image)
-
- for bbox, score, class_id in zip(bboxes, scores, class_ids):
- x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
-
- if score_th > score:
- continue
-
- # バウンディングボックス
- debug_image = cv2.rectangle(
- debug_image,
- (x1, y1),
- (x2, y2),
- (0, 255, 0),
- thickness=2,
- )
-
- # クラスID、スコア
- score = '%.2f' % score
- text = '%s:%s' % (str(int(class_id)), score)
- debug_image = cv2.putText(
- debug_image,
- text,
- (x1, y1 - 10),
- cv2.FONT_HERSHEY_SIMPLEX,
- 0.7,
- (0, 255, 0),
- thickness=2,
- )
-
- # 推論時間
- text = 'Elapsed time:' + '%.0f' % (elapsed_time * 1000)
- text = text + 'ms'
- debug_image = cv2.putText(
- debug_image,
- text,
- (10, 30),
- cv2.FONT_HERSHEY_SIMPLEX,
- 0.8,
- (0, 255, 0),
- thickness=2,
- )
-
- return debug_image
-
-
-if __name__ == '__main__':
- main()
diff --git a/yolox_ros_py/yolox_ros_py/module_tflite/detector.py b/yolox_ros_py/yolox_ros_py/module_tflite/detector.py
deleted file mode 100644
index 0d69686..0000000
--- a/yolox_ros_py/yolox_ros_py/module_tflite/detector.py
+++ /dev/null
@@ -1,181 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-import os
-import copy
-
-import cv2
-import numpy as np
-
-
-class Detector(object):
- def __init__(
- self,
- model_path='model.onnx',
- input_shape=(192, 192),
- score_th=0.3,
- nms_th=0.5,
- providers=['CUDAExecutionProvider', 'CPUExecutionProvider'],
- num_threads=None, # Valid only when using Tensorflow-Lite
- ):
- # 入力サイズ
- self.input_shape = input_shape
-
- # 閾値
- self.score_th = score_th
- self.nms_th = nms_th
-
- # モデル読み込み
- self.extension = os.path.splitext(model_path)[1][1:]
- if self.extension == 'onnx':
- import onnxruntime
-
- self.model = onnxruntime.InferenceSession(
- model_path,
- providers=providers,
- )
-
- self.input_name = self.model.get_inputs()[0].name
- self.output_name = self.model.get_outputs()[0].name
- elif self.extension == 'tflite':
- try:
- from tflite_runtime.interpreter import Interpreter
- self.model = Interpreter(
- model_path=model_path,
- num_threads=num_threads,
- )
- except ImportError:
- import tensorflow as tf
- self.model = tf.lite.Interpreter(
- model_path=model_path,
- num_threads=num_threads,
- )
-
- self.model.allocate_tensors()
-
- self.input_name = self.model.get_input_details()[0]['index']
- self.output_name = self.model.get_output_details()[0]['index']
- else:
- raise ValueError("Invalid extension %s." % (model_path))
-
- def inference(self, image):
- temp_image = copy.deepcopy(image)
-
- # 前処理
- image, ratio = self._preprocess(temp_image, self.input_shape)
-
- # 推論実施
- results = None
- if self.extension == 'onnx':
- results = self.model.run(
- None,
- {self.input_name: image[None, :, :, :]},
- )[0]
- elif self.extension == 'tflite':
- image = image.reshape(
- -1,
- 3,
- self.input_shape[0],
- self.input_shape[1],
- )
- self.model.set_tensor(self.input_name, image)
- self.model.invoke()
- results = self.model.get_tensor(self.output_name)
-
- # 後処理
- bboxes, scores, class_ids = self._postprocess(
- results,
- self.input_shape,
- ratio,
- self.score_th,
- self.nms_th,
- )
-
- return bboxes, scores, class_ids
-
- def _preprocess(self, image, input_size):
- # リサイズ
- ratio = min(input_size[0] / image.shape[0],
- input_size[1] / image.shape[1])
- resized_image = cv2.resize(
- image,
- (int(image.shape[1] * ratio), int(image.shape[0] * ratio)),
- interpolation=cv2.INTER_LINEAR,
- )
- resized_image = resized_image.astype(np.uint8)
-
- # パディング込み画像作成
- padded_image = np.ones(
- (input_size[0], input_size[1], 3),
- dtype=np.uint8,
- )
- padded_image *= 114
- padded_image[:int(image.shape[0] * ratio), :int(image.shape[1] *
- ratio)] = resized_image
-
- padded_image = padded_image.transpose((2, 0, 1))
- padded_image = np.ascontiguousarray(padded_image, dtype=np.float32)
-
- return padded_image, ratio
-
- def _postprocess(
- self,
- outputs,
- img_size,
- ratio,
- score_th,
- nms_th,
- ):
- grids = []
- expanded_strides = []
-
- strides = [8, 16, 32]
-
- hsizes = [img_size[0] // stride for stride in strides]
- wsizes = [img_size[1] // stride for stride in strides]
-
- for hsize, wsize, stride in zip(hsizes, wsizes, strides):
- xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
- grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
- grids.append(grid)
- shape = grid.shape[:2]
- expanded_strides.append(np.full((*shape, 1), stride))
-
- grids = np.concatenate(grids, 1)
- expanded_strides = np.concatenate(expanded_strides, 1)
- outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
- outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
-
- predictions = outputs[0]
- bboxes = predictions[:, :4]
- scores = predictions[:, 4:5] * predictions[:, 5:]
- scores = scores.T[0]
-
- bboxes_xyxy = np.ones_like(bboxes)
- bboxes_xyxy[:, 0] = bboxes[:, 0] - bboxes[:, 2] / 2.
- bboxes_xyxy[:, 1] = bboxes[:, 1] - bboxes[:, 3] / 2.
- bboxes_xyxy[:, 2] = bboxes[:, 0] + bboxes[:, 2] / 2.
- bboxes_xyxy[:, 3] = bboxes[:, 1] + bboxes[:, 3] / 2.
- bboxes_xyxy /= ratio
-
- return self._nms(bboxes_xyxy, scores, score_th, nms_th)
-
- def _nms(self, bboxes, scores, score_th, nms_th):
- indexes = cv2.dnn.NMSBoxes(
- bboxes.tolist(),
- scores.tolist(),
- score_th,
- nms_th,
- )
-
- result_bboxes, result_scores, result_class_ids = [], [], []
- if len(indexes) > 0:
- if indexes.ndim == 2:
- result_bboxes = bboxes[indexes[:, 0]]
- result_scores = scores[indexes[:, 0]]
- result_class_ids = np.zeros(result_scores.shape)
- elif indexes.ndim == 1:
- result_bboxes = bboxes[indexes[:]]
- result_scores = scores[indexes[:]]
- result_class_ids = np.zeros(result_scores.shape)
-
- return result_bboxes, result_scores, result_class_ids
diff --git a/yolox_ros_py/yolox_ros_py/yolox_onnx.py b/yolox_ros_py/yolox_ros_py/yolox_onnx.py
deleted file mode 100644
index 576f7b2..0000000
--- a/yolox_ros_py/yolox_ros_py/yolox_onnx.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright (c) Megvii, Inc. and its affiliates.
-
-# ROS2 rclpy -- Ar-Ray-code 2022
-import argparse
-import os
-
-import cv2
-import numpy as np
-
-import onnxruntime
-
-from yolox.data.data_augment import preproc as preprocess
-from yolox.data.datasets import COCO_CLASSES
-from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis
-
-from .yolox_ros_py_utils.utils import yolox_py
-
-# ROS2 =====================================
-import rclpy
-from rclpy.node import Node
-
-from std_msgs.msg import Header
-from cv_bridge import CvBridge
-from sensor_msgs.msg import Image
-
-from bboxes_ex_msgs.msg import BoundingBoxes
-from bboxes_ex_msgs.msg import BoundingBox
-
-from rclpy.qos import qos_profile_sensor_data
-
-# from darkself.net_ros_msgs.msg import BoundingBoxes
-# from darkself.net_ros_msgs.msg import BoundingBox
-
-class yolox_ros(yolox_py):
- def __init__(self) -> None:
-
- # ROS2 init
- super().__init__('yolox_ros', load_params=True)
-
- if (self.imshow_isshow):
- cv2.namedWindow("YOLOX")
-
- self.bridge = CvBridge()
-
- self.pub = self.create_publisher(BoundingBoxes,"bounding_boxes", 10)
- self.sub = self.create_subscription(Image,"image_raw",self.imageflow_callback, self.qos_image_sub)
-
- def imageflow_callback(self,msg:Image) -> None:
- try:
- # fps start
- start_time = cv2.getTickCount()
- bboxes = BoundingBoxes()
- origin_img = self.bridge.imgmsg_to_cv2(msg,"bgr8")
-
- # preprocess
- img, self.ratio = preprocess(origin_img, self.input_shape)
-
- session = onnxruntime.InferenceSession(self.model_path)
-
- ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
- output = session.run(None, ort_inputs)
-
- predictions = demo_postprocess(output[0], self.input_shape, p6=self.with_p6)[0]
-
- boxes = predictions[:, :4]
- scores = predictions[:, 4:5] * predictions[:, 5:]
-
- boxes_xyxy = np.ones_like(boxes)
- boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2.
- boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2.
- boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2.
- boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2.
- boxes_xyxy /= self.ratio
- dets = multiclass_nms(boxes_xyxy, scores, nms_thr=self.nms_th, score_thr=self.conf)
- if dets is not None:
- self.final_boxes, self.final_scores, self.final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5]
- origin_img = vis(origin_img, self.final_boxes, self.final_scores, self.final_cls_inds,
- conf=self.conf, class_names=COCO_CLASSES)
-
- end_time = cv2.getTickCount()
- time_took = (end_time - start_time) / cv2.getTickFrequency()
-
- # rclpy log FPS
- self.get_logger().info(f'FPS: {1 / time_took}')
-
- try:
- bboxes = self.yolox2bboxes_msgs(dets[:, :4], self.final_scores, self.final_cls_inds, COCO_CLASSES, msg.header, origin_img)
- if (self.imshow_isshow):
- cv2.imshow("YOLOX",origin_img)
- cv2.waitKey(1)
-
- except:
- if (self.imshow_isshow):
- cv2.imshow("YOLOX",origin_img)
- cv2.waitKey(1)
-
- self.pub.publish(bboxes)
-
- except Exception as e:
- self.get_logger().info(f'Error: {e}')
- pass
-
-def ros_main(args = None):
- rclpy.init(args=args)
- ros_class = yolox_ros()
-
- try:
- rclpy.spin(ros_class)
- except KeyboardInterrupt:
- pass
- finally:
- ros_class.destroy_node()
- rclpy.shutdown()
-
-if __name__ == "__main__":
- ros_main()
\ No newline at end of file
diff --git a/yolox_ros_py/yolox_ros_py/yolox_openvino.py b/yolox_ros_py/yolox_ros_py/yolox_openvino.py
deleted file mode 100644
index 31f8106..0000000
--- a/yolox_ros_py/yolox_ros_py/yolox_openvino.py
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Copyright (c) Megvii, Inc. and its affiliates.
-
-# ROS2 rclpy -- Ar-Ray-code 2021
-import rclpy
-
-import cv2
-import numpy as np
-import copy
-
-from openvino.inference_engine import IECore
-
-from yolox.data.data_augment import preproc as preprocess
-from yolox.data.datasets import COCO_CLASSES
-from yolox.utils import multiclass_nms, demo_postprocess, vis
-
-from .yolox_ros_py_utils.utils import yolox_py
-
-# ROS2 =====================================
-import rclpy
-from rclpy.node import Node
-
-from std_msgs.msg import Header
-from cv_bridge import CvBridge
-from sensor_msgs.msg import Image
-
-from rclpy.qos import qos_profile_sensor_data
-
-from bboxes_ex_msgs.msg import BoundingBoxes
-from bboxes_ex_msgs.msg import BoundingBox
-
-# from darkself.net_ros_msgs.msg import BoundingBoxes
-# from darkself.net_ros_msgs.msg import BoundingBox
-
-class yolox_ros(yolox_py):
- def __init__(self) -> None:
-
- # ROS2 init
- super().__init__('yolox_ros', load_params=True)
-
- self.setting_yolox_exp()
-
- if (self.imshow_isshow):
- cv2.namedWindow("YOLOX")
-
- self.bridge = CvBridge()
-
- self.pub = self.create_publisher(BoundingBoxes,"bounding_boxes", 10)
- self.sub = self.create_subscription(Image,"image_raw",self.imageflow_callback, self.qos_image_sub)
-
- def setting_yolox_exp(self) -> None:
- print('Creating Inference Engine')
- ie = IECore()
- print(f'Reading the self.network: {self.model_path}')
- # (.xml and .bin files) or (.onnx file)
-
- self.net = ie.read_network(model=self.model_path)
- print('Configuring input and output blobs')
- # Get names of input and output blobs
- self.input_blob = next(iter(self.net.input_info))
- self.out_blob = next(iter(self.net.outputs))
-
- # Set input and output precision manually
- self.net.input_info[self.input_blob].precision = 'FP32'
- self.net.outputs[self.out_blob].precision = 'FP16'
-
- print('Loading the model to the plugin')
- self.exec_net = ie.load_network(network=self.net, device_name=self.device)
-
- def imageflow_callback(self,msg:Image) -> None:
- try:
- # fps start
- start_time = cv2.getTickCount()
- bboxes = BoundingBoxes()
- origin_img = self.bridge.imgmsg_to_cv2(msg,"bgr8")
- # deep copy
- nodetect_image = copy.deepcopy(origin_img)
-
- # origin_img = img_rgb
- _, _, h, w = self.net.input_info[self.input_blob].input_data.shape
- image, ratio = preprocess(origin_img, (h, w))
-
- res = self.exec_net.infer(inputs={self.input_blob: image})
- res = res[self.out_blob]
-
- # Predictions is result
- predictions = demo_postprocess(res, (h, w), p6=False)[0]
-
- boxes = predictions[:, :4]
- scores = predictions[:, 4, None] * predictions[:, 5:]
- # print(scores)
-
- boxes_xyxy = np.ones_like(boxes)
- boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2.
- boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2.
- boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2.
- boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2.
- boxes_xyxy /= ratio
- dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
-
- # print(dets)
- if dets is not None:
- final_boxes = dets[:, :4]
- final_scores, final_cls_inds = dets[:, 4], dets[:, 5]
- origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds,
- conf=self.conf, class_names=COCO_CLASSES)
-
- # ==============================================================
- end_time = cv2.getTickCount()
- time_took = (end_time - start_time) / cv2.getTickFrequency()
-
- # rclpy log FPS
- self.get_logger().info(f'FPS: {1 / time_took}')
-
- try:
- bboxes = self.yolox2bboxes_msgs(dets[:, :4], final_scores, final_cls_inds, COCO_CLASSES, msg.header, origin_img)
- if (self.imshow_isshow):
- cv2.imshow("YOLOX",origin_img)
- cv2.waitKey(1)
-
- except:
- if (self.imshow_isshow):
- cv2.imshow("YOLOX",origin_img)
- cv2.waitKey(1)
-
- self.pub.publish(bboxes)
-
- except Exception as e:
- self.get_logger().info(f'Error: {e}')
- pass
-
-def ros_main(args = None):
- rclpy.init(args=args)
- ros_class = yolox_ros()
-
- try:
- rclpy.spin(ros_class)
- except KeyboardInterrupt:
- pass
- finally:
- ros_class.destroy_node()
- rclpy.shutdown()
-
-if __name__ == "__main__":
- ros_main()
\ No newline at end of file
diff --git a/yolox_ros_py/yolox_ros_py/yolox_ros.py b/yolox_ros_py/yolox_ros_py/yolox_ros.py
deleted file mode 100644
index faec75c..0000000
--- a/yolox_ros_py/yolox_ros_py/yolox_ros.py
+++ /dev/null
@@ -1,271 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Copyright (c) Megvii, Inc. and its affiliates.
-
-# ROS2 rclpy -- Ar-Ray-code 2021
-
-import os
-import time
-from loguru import logger
-
-import cv2
-from numpy import empty
-
-import torch
-import torch.backends.cudnn as cudnn
-
-from yolox.data.data_augment import ValTransform
-from yolox.data.datasets import COCO_CLASSES
-from yolox.exp import get_exp
-from yolox.utils import fuse_model, get_model_info, postprocess, setup_logger, vis
-
-from .yolox_ros_py_utils.utils import yolox_py
-
-import rclpy
-from rclpy.node import Node
-
-from std_msgs.msg import Header
-from cv_bridge import CvBridge
-from sensor_msgs.msg import Image
-
-from rclpy.qos import qos_profile_sensor_data
-
-from bboxes_ex_msgs.msg import BoundingBoxes
-from bboxes_ex_msgs.msg import BoundingBox
-
-# from darknet_ros_msgs.msg import BoundingBoxes
-# from darknet_ros_msgs.msg import BoundingBox
-
-class Predictor(object):
- def __init__(self, model, exp, cls_names=COCO_CLASSES, trt_file=None, decoder=None, device="cpu", fp16=False, legacy=False):
- self.model = model
- self.cls_names = cls_names
- self.decoder = decoder
- self.num_classes = exp.num_classes
- self.confthre = exp.test_conf
- self.threshold = exp.threshold
- self.test_size = exp.test_size
- self.device = device
- self.fp16 = fp16
- self.preproc = ValTransform(legacy=legacy)
- if trt_file is not None:
- from torch2trt import TRTModule
-
- model_trt = TRTModule()
- model_trt.load_state_dict(torch.load(trt_file))
-
- x = torch.ones(1, 3, exp.test_size[0], exp.test_size[1]).cuda()
- self.model(x)
- self.model = model_trt
-
- def inference(self, img):
- img_info = {"id": 0}
- if isinstance(img, str):
- img_info["file_name"] = os.path.basename(img)
- img = cv2.imread(img)
- else:
- img_info["file_name"] = None
-
- height, width = img.shape[:2]
- img_info["height"] = height
- img_info["width"] = width
- img_info["raw_img"] = img
-
- ratio = min(self.test_size[0] / img.shape[0], self.test_size[1] / img.shape[1])
- img_info["ratio"] = ratio
-
- img, _ = self.preproc(img, None, self.test_size)
- img = torch.from_numpy(img).unsqueeze(0)
- img = img.float()
- if self.device == "gpu":
- img = img.cuda()
- if self.fp16:
- img = img.half() # to FP16
-
- with torch.no_grad():
- t0 = time.time()
- outputs = self.model(img)
- if self.decoder is not None:
- outputs = self.decoder(outputs, dtype=outputs.type())
- outputs = postprocess(
- outputs, self.num_classes, self.confthre,
- self.threshold, class_agnostic=True
- )
- fps = int(1/(time.time() - t0))
- logger.info("{}fps".format(fps))
- return outputs, img_info
-
- def visual(self, output, img_info, cls_conf=0.35):
- ratio = img_info["ratio"]
- img = img_info["raw_img"]
- if output is None:
- return img
- output = output.cpu()
-
- bboxes = output[:, 0:4]
-
- # preprocessing: resize
- bboxes /= ratio
-
- cls = output[:, 6]
- scores = output[:, 4] * output[:, 5]
-
- vis_res = vis(img, bboxes, scores, cls, cls_conf, self.cls_names)
-
- return vis_res, bboxes, scores, cls, self.cls_names
-
-class yolox_ros(yolox_py):
- def __init__(self) -> None:
-
- # ROS2 init
- super().__init__('yolox_ros', load_params=False)
-
- self.setting_yolox_exp()
-
- self.bridge = CvBridge()
-
- self.pub = self.create_publisher(BoundingBoxes,"bounding_boxes", 10)
-
- if (self.sensor_qos_mode):
- self.sub = self.create_subscription(Image,"image_raw",self.imageflow_callback, qos_profile_sensor_data)
- else:
- self.sub = self.create_subscription(Image,"image_raw",self.imageflow_callback, 10)
-
- def setting_yolox_exp(self) -> None:
-
- WEIGHTS_PATH = '../../weights/yolox_nano.pth'
-
- self.declare_parameter('imshow_isshow',True)
- self.declare_parameter('yolox_exp_py', '')
-
- self.declare_parameter('fuse',False)
- self.declare_parameter('trt', False)
- self.declare_parameter('fp16', False)
- self.declare_parameter('legacy', False)
- self.declare_parameter('device', "cpu")
- # self.declare_parameter('', 0)
- self.declare_parameter('ckpt', WEIGHTS_PATH)
- self.declare_parameter('conf', 0.3)
-
- # nmsthre -> threshold
- self.declare_parameter('threshold', 0.65)
- # --tsize -> resize
- self.declare_parameter('resize', 640)
-
- self.declare_parameter('sensor_qos_mode', False)
-
- # =============================================================
- self.imshow_isshow = self.get_parameter('imshow_isshow').value
-
- exp_py = self.get_parameter('yolox_exp_py').value
-
- fuse = self.get_parameter('fuse').value
- trt = self.get_parameter('trt').value
- fp16 = self.get_parameter('fp16').value
- device = self.get_parameter('device').value
-
- ckpt = self.get_parameter('ckpt').value
- conf = self.get_parameter('conf').value
- legacy = self.get_parameter('legacy').value
- threshold = self.get_parameter('threshold').value
-
- input_shape_w = self.get_parameter('resize').value
- input_shape_h = input_shape_w
-
- self.sensor_qos_mode = self.get_parameter('sensor_qos_mode').value
-
- # ==============================================================
-
- cudnn.benchmark = True
- exp = get_exp(exp_py, None)
-
-
- BASE_PATH = os.getcwd()
- file_name = os.path.join(BASE_PATH, "YOLOX_PATH/")
- # os.makedirs(file_name, exist_ok=True)
-
- exp.test_conf = conf # test conf
- exp.threshold = threshold # nms threshold
- exp.test_size = (input_shape_h, input_shape_w) # test size
-
- model = exp.get_model()
- logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size)))
-
- if device == "gpu":
- model.cuda()
- if fp16:
- model.half()
- # torch.cuda.set_device()
- # model.cuda()
- model.eval()
-
- # about not trt
- if not trt:
- if ckpt is None:
- ckpt_file = os.path.join(file_name, "best_ckpt.pth")
- else:
- ckpt_file = ckpt
- logger.info("loading checkpoint")
- ckpt = torch.load(ckpt_file, map_location="cpu")
- # load the model state dict
- model.load_state_dict(ckpt["model"])
- logger.info("loaded checkpoint done.")
-
- # about fuse
- if fuse:
- logger.info("\tFusing model...")
- model = fuse_model(model)
-
- # TensorRT
- if trt:
- assert not fuse, "TensorRT model is not support model fusing!"
- trt_file = os.path.join(file_name, "model_trt.pth")
- assert os.path.exists(
- trt_file
- ), "TensorRT model is not found!\n Run python3 tools/trt.py first!"
- model.head.decode_in_inference = False
- decoder = model.head.decode_outputs
- logger.info("Using TensorRT to inference")
- else:
- trt_file = None
- decoder = None
-
- self.predictor = Predictor(model, exp, COCO_CLASSES, trt_file, decoder, device, fp16, legacy)
-
- def imageflow_callback(self,msg:Image) -> None:
- try:
- img_rgb = self.bridge.imgmsg_to_cv2(msg,"bgr8")
- outputs, img_info = self.predictor.inference(img_rgb)
-
- try:
- result_img_rgb, bboxes, scores, cls, cls_names = self.predictor.visual(outputs[0], img_info)
- bboxes_msg = self.yolox2bboxes_msgs(bboxes, scores, cls, cls_names, msg.header, img_rgb)
-
- self.pub.publish(bboxes_msg)
-
- if (self.imshow_isshow):
- cv2.imshow("YOLOX",result_img_rgb)
- cv2.waitKey(1)
-
- except Exception as e:
- if (self.imshow_isshow):
- cv2.imshow("YOLOX",img_rgb)
- cv2.waitKey(1)
- except Exception as e:
- logger.error(e)
- pass
-
-def ros_main(args = None):
- rclpy.init(args=args)
- ros_class = yolox_ros()
-
- try:
- rclpy.spin(ros_class)
- except KeyboardInterrupt:
- pass
- finally:
- ros_class.destroy_node()
- rclpy.shutdown()
-
-if __name__ == "__main__":
- ros_main()
\ No newline at end of file
diff --git a/yolox_ros_py/yolox_ros_py/yolox_ros_py_utils/__init__.py b/yolox_ros_py/yolox_ros_py/yolox_ros_py_utils/__init__.py
deleted file mode 100644
index 8b13789..0000000
--- a/yolox_ros_py/yolox_ros_py/yolox_ros_py_utils/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/yolox_ros_py/yolox_ros_py/yolox_ros_py_utils/utils.py b/yolox_ros_py/yolox_ros_py/yolox_ros_py_utils/utils.py
deleted file mode 100644
index 5304892..0000000
--- a/yolox_ros_py/yolox_ros_py/yolox_ros_py_utils/utils.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import numpy as np
-
-from bboxes_ex_msgs.msg import BoundingBoxes
-from bboxes_ex_msgs.msg import BoundingBox
-
-# from darknet_ros_msgs.msg import BoundingBoxes
-# from darknet_ros_msgs.msg import BoundingBox
-
-from rclpy.node import Node
-from std_msgs.msg import Header
-from rclpy.qos import qos_profile_sensor_data
-
-from loguru import logger
-import sys
-
-class yolox_py(Node):
- def __init__(self, name, load_params=True):
- super().__init__(name)
- if load_params:
- self.parameter_ros2()
-
- def yolox2bboxes_msgs(self, bboxes, scores, cls, cls_names, img_header: Header, image: np.ndarray) -> BoundingBoxes:
- bboxes_msg = BoundingBoxes()
- bboxes_msg.header = img_header
- i = 0
- for bbox in bboxes:
- one_box = BoundingBox()
- # if < 0
- if bbox[0] < 0:
- bbox[0] = 0
- if bbox[1] < 0:
- bbox[1] = 0
- if bbox[2] < 0:
- bbox[2] = 0
- if bbox[3] < 0:
- bbox[3] = 0
- one_box.xmin = int(bbox[0])
- one_box.ymin = int(bbox[1])
- one_box.xmax = int(bbox[2])
- one_box.ymax = int(bbox[3])
-
- if "bboxes_ex_msgs" in sys.modules:
- one_box.img_height = image.shape[0]
- one_box.img_width = image.shape[1]
- else:
- pass
-
- one_box.probability = float(scores[i])
- one_box.class_id = str(cls_names[int(cls[i])])
- bboxes_msg.bounding_boxes.append(one_box)
- i = i+1
-
- return bboxes_msg
-
- def parameter_ros2(self):
- # パラメータ設定 ###################################################
- self.declare_parameter('model_path', './model/model.onnx')
- # self.declare_parameter('score_th', 0.4)
- self.declare_parameter('nms_th', 0.5)
- self.declare_parameter('conf', 0.3)
- self.declare_parameter('device', "CPU")
-
- self.declare_parameter('num_threads', None)
- self.declare_parameter('input_shape/height', 416)
- self.declare_parameter('input_shape/width', 416)
- self.declare_parameter('imshow_isshow',True)
- self.declare_parameter('with_p6', False)
-
- self.declare_parameter('sensor_qos_mode', False)
-
- # パラメータ取得 ###################################################
- self.model_path = self.get_parameter('model_path').value
- # self.score_th = self.get_parameter('score_th').value
- self.nms_th = self.get_parameter('nms_th').value
- self.conf = self.get_parameter('conf').value
- self.device = self.get_parameter('device').value
-
- self.num_threads = self.get_parameter('num_threads').value
- self.input_shape_h = self.get_parameter('input_shape/height').value
- self.input_shape_w = self.get_parameter('input_shape/width').value
- self.imshow_isshow = self.get_parameter('imshow_isshow').value
- self.with_p6 = self.get_parameter('with_p6').value
-
- self.sensor_qos_mode = self.get_parameter('sensor_qos_mode').value
-
- if self.sensor_qos_mode:
- self.qos_image_sub = qos_profile_sensor_data
- else:
- self.qos_image_sub = 10
-
- self.input_shape = (self.input_shape_h, self.input_shape_w)
-
-
- # ==============================================================
- logger.info("parameters -------------------------------------------------")
- logger.info("model_path: {}".format(self.model_path))
- logger.info("nms_th (ONNX): {}".format(self.nms_th))
- logger.info("conf: {}".format(self.conf))
- logger.info("device: {}".format(self.device))
- logger.info("num_threads: {}".format(self.num_threads))
- logger.info("input_shape: {}".format(self.input_shape))
- logger.info("imshow_isshow: {}".format(self.imshow_isshow))
- logger.info("with_p6 (ONNX): {}".format(self.with_p6))
- logger.info("sensor_qos_mode: {}".format(self.sensor_qos_mode))
- logger.info("-------------------------------------------------------------")
- # ==============================================================
diff --git a/yolox_ros_py/yolox_ros_py/yolox_tflite.py b/yolox_ros_py/yolox_ros_py/yolox_tflite.py
deleted file mode 100644
index 73f1e00..0000000
--- a/yolox_ros_py/yolox_ros_py/yolox_tflite.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# -------------------
-# Ar-Ray-code 2022
-# -------------------
-
-# Env(CPU): Raspberry Pi Bullseye, Ubuntu 20
-# Env(ROS2): ROS2-Foxy, Galactic
-
-# input /image_raw(Sensor_msgs/Image)
-# output /detection(Vision_msgs/Detection2DArray)
-
-# run --------------------------------------------------
-# terminal1: ros2 run v4l2_camera v4l2_camera_node
-# terminal2: python3 ./demo_ros2.py
-# ------------------------------------------------------
-
-import time
-import cv2
-
-from .module_tflite.detector import Detector
-from .module_tflite.demo import draw_debug
-from .yolox_ros_py_utils.utils import yolox_py
-
-# ROS2 =====================================
-import rclpy
-from rclpy.node import Node
-
-from std_msgs.msg import Header
-from cv_bridge import CvBridge
-from sensor_msgs.msg import Image
-
-from yolox.data.datasets import COCO_CLASSES
-
-from rclpy.qos import qos_profile_sensor_data
-
-from bboxes_ex_msgs.msg import BoundingBoxes
-from bboxes_ex_msgs.msg import BoundingBox
-
-class yolox_cpu(yolox_py):
- def __init__(self):
- super().__init__('yolox_cpu', load_params=True)
-
- self.bridge = CvBridge()
-
- self.yolox = Detector(
- model_path=self.model_path,
- input_shape=self.input_shape,
- score_th=self.conf,
- nms_th=self.nms_th,
- providers=['CPUExecutionProvider'],
- num_threads=self.num_threads,
- )
-
- self.sub = self.create_subscription(Image,"image_raw",self.imageflow_callback, self.qos_image_sub)
- self.pub_detection = self.create_publisher(BoundingBoxes, 'bounding_boxes', 10)
-
- def imageflow_callback(self, msg):
- start = time.time()
- image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
- bboxes, scores, class_ids = self.yolox.inference(image)
- elapsed_time = time.time() - start
- fps = 1 / elapsed_time
-
- self.image_h = image.shape[0]
- self.image_w = image.shape[1]
-
- # デバッグ描画
- debug_image = draw_debug(
- image,
- elapsed_time,
- self.conf,
- bboxes,
- scores,
- class_ids,
- )
-
- cv2.imshow('debug_image', debug_image)
- cv2.waitKey(1)
-
- bboxes_msg = self.yolox2bboxes_msgs(bboxes, scores, class_ids, COCO_CLASSES, msg.header, image)
-
- self.pub_detection.publish(bboxes_msg)
- self.get_logger().info('fps: %.2f' % fps)
-
- def __del__(self):
- cv2.destroyAllWindows()
- self.pub_detection.destroy()
- super().destroy_node()
-
-def ros_main(args = None):
- rclpy.init(args=args)
- ros_class = yolox_cpu()
-
- try:
- rclpy.spin(ros_class)
- except KeyboardInterrupt:
- pass
- finally:
- rclpy.shutdown()
-
-if __name__ == "__main__":
- ros_main()