Skip to content

Commit

Permalink
Merge pull request #609 from mikel-brostrom/yolov5_v7_update
Browse files Browse the repository at this point in the history
Yolov5 v7 update
  • Loading branch information
mikel-brostrom authored Nov 23, 2022
2 parents 912c1c7 + 9d72853 commit 5caab02
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 48 deletions.
2 changes: 0 additions & 2 deletions reid_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,6 @@
from trackers.strong_sort.deep.models import build_model
from trackers.strong_sort.deep.reid_model_factory import get_model_name, load_pretrained_weights

# remove duplicated stream handler to avoid duplicated logging
logging.getLogger().removeHandler(logging.getLogger().handlers[0])

def file_size(path):
# Return file/dir size (MB)
Expand Down
50 changes: 21 additions & 29 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,55 +1,47 @@
# pip install -r requirements.txt

# base ----------------------------------------
# Base ----------------------------------------
gitpython
ipython # interactive notebook
matplotlib>=3.2.2
numpy>=1.18.5
opencv-python>=4.1.1
Pillow>=7.1.2
psutil # system resources
PyYAML>=5.3.1
requests>=2.23.0
scipy>=1.4.1
torch>=1.7.0
thop>=0.1.1 # FLOPs computation
torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended)
torchvision>=0.8.1
tqdm>=4.64.0
protobuf<=3.20.1
# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012

# plotting ------------------------------------
# Logging ---------------------------------------------------------------------
tensorboard>=2.4.1
# clearml>=1.2.0
# comet

# Plotting --------------------------------------------------------------------
pandas>=1.1.4
seaborn>=0.11.0

# deep_sort -----------------------------------
# StrongSORT ------------------------------------------------------------------
easydict

# torchreid -----------------------------------

Cython
h5py
six
tb-nightly
future
yacs
# torchreid -------------------------------------------------------------------
gdown
flake8
yapf
isort==4.3.21
imageio

#bytetrack
# ByteTrack -------------------------------------------------------------------
lap
git+https://github.com/samson-wang/cython_bbox.git@9badb346a9222c98f828ba45c63fe3b7f2790ea2

# ocsort
# OCSORT ----------------------------------------------------------------------
filterpy

# Tracking eval ----------------------------------
GitPython

# Export --------------------------------------
#onnx>=1.9.0 # ONNX export
#onnx-simplifier>=0.4.1 # ONNX simplifier
#tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64)
#openvino-dev
#git+https://github.com/PINTO0309/openvino2tensorflow #OpenVINO to TFLite export
# Export ----------------------------------------------------------------------
# onnx>=1.9.0 # ONNX export
# onnx-simplifier>=0.4.1 # ONNX simplifier
# nvidia-pyindex # TensorRT export
# nvidia-tensorrt # TensorRT export
# nvidia-tensorrt # TensorRT export
# openvino-dev # OpenVINO export
13 changes: 7 additions & 6 deletions track.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,14 @@
import logging
from yolov5.models.common import DetectMultiBackend
from yolov5.utils.dataloaders import VID_FORMATS, LoadImages, LoadStreams
from yolov5.utils.general import (LOGGER, check_img_size, non_max_suppression, scale_coords, check_requirements, cv2,
from yolov5.utils.general import (LOGGER, check_img_size, non_max_suppression, scale_boxes, check_requirements, cv2,
check_imshow, xyxy2xywh, increment_path, strip_optimizer, colorstr, print_args, check_file)
from yolov5.utils.torch_utils import select_device, time_sync
from yolov5.utils.plots import Annotator, colors, save_one_box
from trackers.multi_tracker_zoo import create_tracker

# remove duplicated stream handler to avoid duplicated logging
logging.getLogger().removeHandler(logging.getLogger().handlers[0])
#logging.getLogger().removeHandler(logging.getLogger().handlers[0])

@torch.no_grad()
def run(
Expand Down Expand Up @@ -70,6 +70,7 @@ def run(
hide_class=False, # hide IDs
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
vid_stride=1, # video frame-rate stride
):

source = str(source)
Expand Down Expand Up @@ -100,8 +101,7 @@ def run(
# Dataloader
if webcam:
show_vid = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt)
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
nr_sources = len(dataset)
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt)
Expand Down Expand Up @@ -168,15 +168,15 @@ def run(
s += '%gx%g ' % im.shape[2:] # print string
imc = im0.copy() if save_crop else im0 # for save_crop

annotator = Annotator(im0, line_width=line_thickness, pil=not ascii)
annotator = Annotator(im0, line_width=line_thickness, example=str(names))

if hasattr(tracker_list[i], 'tracker') and hasattr(tracker_list[i].tracker, 'camera_update'):
if prev_frames[i] is not None and curr_frames[i] is not None: # camera motion compensation
tracker_list[i].tracker.camera_update(prev_frames[i], curr_frames[i])

if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() # xyxy
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # xyxy

# Print results
for c in det[:, -1].unique():
Expand Down Expand Up @@ -290,6 +290,7 @@ def parse_opt():
parser.add_argument('--hide-class', default=False, action='store_true', help='hide IDs')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
print_args(vars(opt))
Expand Down
17 changes: 7 additions & 10 deletions trackers/strong_sort/reid_multibackend.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'),
super().__init__()

w = weights[0] if isinstance(weights, list) else weights
self.pt, self.jit, self.onnx, self.xml, self.engine, self.coreml, \
self.saved_model, self.pb, self.tflite, self.edgetpu, self.tfjs = self.model_type(w) # get backend
self.pt, self.jit, self.onnx, self.xml, self.engine, self.coreml, self.saved_model, \
self.pb, self.tflite, self.edgetpu, self.tfjs, self.paddle = self.model_type(w) # get backend
self.fp16 = fp16
self.fp16 &= self.pt or self.jit or self.engine # FP16

Expand Down Expand Up @@ -164,14 +164,11 @@ def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'),
def model_type(p='path/to/model.pt'):
# Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
from export import export_formats
suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes
check_suffix(p, suffixes) # checks
p = Path(p).name # eliminate trailing separators
pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes)
xml |= xml2 # *_openvino_model or *.xml
tflite &= not edgetpu # *.tflite
return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs

sf = list(export_formats().Suffix) # export suffixes
check_suffix(p, sf) # checks
types = [s in Path(p).name for s in sf]
types[8] &= not types[9] # tflite &= not edgetpu
return types

def _preprocess(self, im_batch):

Expand Down
2 changes: 1 addition & 1 deletion yolov5
Submodule yolov5 updated 70 files
+33 −45 .github/README_cn.md
+33 −6 .github/workflows/ci-testing.yml
+3 −0 .github/workflows/docker.yml
+8 −14 .github/workflows/greetings.yml
+0 −21 .github/workflows/rebase.yml
+1 −1 .github/workflows/stale.yml
+4 −4 .pre-commit-config.yaml
+9 −14 CONTRIBUTING.md
+163 −71 README.md
+22 −10 benchmarks.py
+25 −15 classify/predict.py
+8 −6 classify/train.py
+1,479 −0 classify/tutorial.ipynb
+5 −3 classify/val.py
+1 −1 data/Argoverse.yaml
+101 −0 data/coco128-seg.yaml
+3 −2 data/scripts/download_weights.sh
+1 −1 data/scripts/get_coco.sh
+1 −1 data/xView.yaml
+24 −19 detect.py
+133 −75 export.py
+10 −5 hubconf.py
+227 −143 models/common.py
+49 −0 models/hub/yolov5s-LeakyReLU.yaml
+48 −0 models/segment/yolov5l-seg.yaml
+48 −0 models/segment/yolov5m-seg.yaml
+48 −0 models/segment/yolov5n-seg.yaml
+48 −0 models/segment/yolov5s-seg.yaml
+48 −0 models/segment/yolov5x-seg.yaml
+46 −12 models/tf.py
+55 −21 models/yolo.py
+21 −15 requirements.txt
+274 −0 segment/predict.py
+658 −0 segment/train.py
+593 −0 segment/tutorial.ipynb
+470 −0 segment/val.py
+0 −1 setup.cfg
+28 −25 train.py
+194 −318 tutorial.ipynb
+19 −4 utils/__init__.py
+69 −20 utils/augmentations.py
+5 −5 utils/autoanchor.py
+6 −3 utils/autobatch.py
+180 −117 utils/dataloaders.py
+3 −3 utils/docker/Dockerfile
+1 −1 utils/docker/Dockerfile-arm64
+2 −2 utils/docker/Dockerfile-cpu
+19 −91 utils/downloads.py
+222 −129 utils/general.py
+94 −16 utils/loggers/__init__.py
+10 −2 utils/loggers/clearml/README.md
+3 −2 utils/loggers/clearml/clearml_utils.py
+256 −0 utils/loggers/comet/README.md
+508 −0 utils/loggers/comet/__init__.py
+150 −0 utils/loggers/comet/comet_utils.py
+118 −0 utils/loggers/comet/hpo.py
+209 −0 utils/loggers/comet/optimizer_config.json
+5 −0 utils/loggers/wandb/wandb_utils.py
+19 −23 utils/metrics.py
+67 −11 utils/plots.py
+0 −0 utils/segment/__init__.py
+104 −0 utils/segment/augmentations.py
+331 −0 utils/segment/dataloaders.py
+137 −0 utils/segment/general.py
+186 −0 utils/segment/loss.py
+210 −0 utils/segment/metrics.py
+143 −0 utils/segment/plots.py
+11 −8 utils/torch_utils.py
+85 −0 utils/triton.py
+38 −29 val.py

0 comments on commit 5caab02

Please sign in to comment.