diff --git a/tools/utils/version_detection.py b/tools/utils/version_detection.py index 5d9e63e..9d4cc5c 100644 --- a/tools/utils/version_detection.py +++ b/tools/utils/version_detection.py @@ -82,7 +82,7 @@ def detect_version(path: str, debug: bool = False) -> str: # Remove the output folder subprocess.check_output("rm -r extracted_model", shell=True) - except subprocess.CalledProcessError: - raise RuntimeError() + except subprocess.CalledProcessError as e: + raise RuntimeError() from e return UNRECOGNIZED diff --git a/tools/yolov7/yolov7/README.md b/tools/yolov7/yolov7/README.md index 15b9354..0272d5a 100644 --- a/tools/yolov7/yolov7/README.md +++ b/tools/yolov7/yolov7/README.md @@ -17,26 +17,27 @@ Implementation of paper - [YOLOv7: Trainable bag-of-freebies sets new state-of-t - Integrated into [Huggingface Spaces 🤗](https://huggingface.co/spaces/akhaliq/yolov7) using Gradio. Try out the Web Demo [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/akhaliq/yolov7) -## Performance +## Performance MS COCO -| Model | Test Size | APtest | AP50test | AP75test | batch 1 fps | batch 32 average time | -| :-- | :-: | :-: | :-: | :-: | :-: | :-: | -| [**YOLOv7**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt) | 640 | **51.4%** | **69.7%** | **55.9%** | 161 *fps* | 2.8 *ms* | -| [**YOLOv7-X**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7x.pt) | 640 | **53.1%** | **71.2%** | **57.8%** | 114 *fps* | 4.3 *ms* | -| | | | | | | | -| [**YOLOv7-W6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-w6.pt) | 1280 | **54.9%** | **72.6%** | **60.1%** | 84 *fps* | 7.6 *ms* | -| [**YOLOv7-E6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6.pt) | 1280 | **56.0%** | **73.5%** | **61.2%** | 56 *fps* | 12.3 *ms* | -| [**YOLOv7-D6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-d6.pt) | 1280 | **56.6%** | **74.0%** | **61.8%** | 44 *fps* | 15.0 *ms* | -| [**YOLOv7-E6E**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6e.pt) | 1280 | **56.8%** | **74.4%** | **62.1%** | 36 *fps* | 18.7 *ms* | +| Model | Test Size | APtest | AP50test | AP75test | batch 1 fps | batch 32 average time | +| :------------------------------------------------------------------------------------------ | :-------: | :---------------: | :----------------------------: | :----------------------------: | :---------: | :-------------------: | +| [**YOLOv7**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt) | 640 | **51.4%** | **69.7%** | **55.9%** | 161 *fps* | 2.8 *ms* | +| [**YOLOv7-X**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7x.pt) | 640 | **53.1%** | **71.2%** | **57.8%** | 114 *fps* | 4.3 *ms* | +| | | | | | | | +| [**YOLOv7-W6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-w6.pt) | 1280 | **54.9%** | **72.6%** | **60.1%** | 84 *fps* | 7.6 *ms* | +| [**YOLOv7-E6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6.pt) | 1280 | **56.0%** | **73.5%** | **61.2%** | 56 *fps* | 12.3 *ms* | +| [**YOLOv7-D6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-d6.pt) | 1280 | **56.6%** | **74.0%** | **61.8%** | 44 *fps* | 15.0 *ms* | +| [**YOLOv7-E6E**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6e.pt) | 1280 | **56.8%** | **74.4%** | **62.1%** | 36 *fps* | 18.7 *ms* | ## Installation Docker environment (recommended) +
Expand -``` shell +```shell # create the docker container, you can change the share memory size if you have more. nvidia-docker run --name yolov7 -it -v your_coco_path/:/coco/ -v your_code_path/:/yolov7 --shm-size=64g nvcr.io/nvidia/pytorch:21.08-py3 @@ -57,7 +58,7 @@ cd /yolov7 [`yolov7.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt) [`yolov7x.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7x.pt) [`yolov7-w6.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-w6.pt) [`yolov7-e6.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6.pt) [`yolov7-d6.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-d6.pt) [`yolov7-e6e.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6e.pt) -``` shell +```shell python test.py --data data/coco.yaml --img 640 --batch 32 --conf 0.001 --iou 0.65 --device 0 --weights yolov7.pt --name yolov7_640_val ``` @@ -84,15 +85,15 @@ To measure accuracy, download [COCO-annotations for Pycocotools](http://images.c Data preparation -``` shell +```shell bash scripts/get_coco.sh ``` -* Download MS COCO dataset images ([train](http://images.cocodataset.org/zips/train2017.zip), [val](http://images.cocodataset.org/zips/val2017.zip), [test](http://images.cocodataset.org/zips/test2017.zip)) and [labels](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/coco2017labels-segments.zip). If you have previously used a different version of YOLO, we strongly recommend that you delete `train2017.cache` and `val2017.cache` files, and redownload [labels](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/coco2017labels-segments.zip) +- Download MS COCO dataset images ([train](http://images.cocodataset.org/zips/train2017.zip), [val](http://images.cocodataset.org/zips/val2017.zip), [test](http://images.cocodataset.org/zips/test2017.zip)) and [labels](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/coco2017labels-segments.zip). If you have previously used a different version of YOLO, we strongly recommend that you delete `train2017.cache` and `val2017.cache` files, and redownload [labels](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/coco2017labels-segments.zip) Single GPU training -``` shell +```shell # train p5 models python train.py --workers 8 --device 0 --batch-size 32 --data data/coco.yaml --img 640 640 --cfg cfg/training/yolov7.yaml --weights '' --name yolov7 --hyp data/hyp.scratch.p5.yaml @@ -102,7 +103,7 @@ python train_aux.py --workers 8 --device 0 --batch-size 16 --data data/coco.yaml Multiple GPU training -``` shell +```shell # train p5 models python -m torch.distributed.launch --nproc_per_node 4 --master_port 9527 train.py --workers 8 --device 0,1,2,3 --sync-bn --batch-size 128 --data data/coco.yaml --img 640 640 --cfg cfg/training/yolov7.yaml --weights '' --name yolov7 --hyp data/hyp.scratch.p5.yaml @@ -116,7 +117,7 @@ python -m torch.distributed.launch --nproc_per_node 8 --master_port 9527 train_a Single GPU finetuning for custom dataset -``` shell +```shell # finetune p5 models python train.py --workers 8 --device 0 --batch-size 32 --data data/custom.yaml --img 640 640 --cfg cfg/training/yolov7-custom.yaml --weights 'yolov7_training.pt' --name yolov7-custom --hyp data/hyp.scratch.custom.yaml @@ -131,12 +132,14 @@ See [reparameterization.ipynb](tools/reparameterization.ipynb) ## Inference On video: -``` shell + +```shell python detect.py --weights yolov7.pt --conf 0.25 --img-size 640 --source yourvideo.mp4 ``` On image: -``` shell + +```shell python detect.py --weights yolov7.pt --conf 0.25 --img-size 640 --source inference/images/horses.jpg ``` @@ -146,12 +149,12 @@ python detect.py --weights yolov7.pt --conf 0.25 --img-size 640 --source inferen - ## Export **Pytorch to CoreML (and inference on MacOS/iOS)** Open In Colab **Pytorch to ONNX with NMS (and inference)** Open In Colab + ```shell python export.py --weights yolov7-tiny.pt --grid --end2end --simplify \ --topk-all 100 --iou-thres 0.65 --conf-thres 0.35 --img-size 640 640 --max-wh 640 @@ -168,7 +171,6 @@ python ./tensorrt-python/export.py -o yolov7-tiny.onnx -e yolov7-tiny-nms.trt -p **Pytorch to TensorRT another way** Open In Colab
Expand - ```shell wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-tiny.pt python export.py --weights yolov7-tiny.pt --grid --include-nms @@ -195,7 +197,6 @@ See [keypoint.ipynb](https://github.com/WongKinYiu/yolov7/blob/main/tools/keypoi - ## Instance segmentation [`code`](https://github.com/WongKinYiu/yolov7/tree/mask) [`yolov7-mask.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-mask.pt) @@ -208,7 +209,6 @@ See [instance.ipynb](https://github.com/WongKinYiu/yolov7/blob/main/tools/instan - ## Citation ``` @@ -220,7 +220,6 @@ See [instance.ipynb](https://github.com/WongKinYiu/yolov7/blob/main/tools/instan } ``` - ## Teaser Yolov7-semantic & YOLOv7-panoptic & YOLOv7-caption @@ -240,20 +239,19 @@ Yolov7-semantic & YOLOv7-panoptic & YOLOv7-caption - ## Acknowledgements
Expand -* [https://github.com/AlexeyAB/darknet](https://github.com/AlexeyAB/darknet) -* [https://github.com/WongKinYiu/yolor](https://github.com/WongKinYiu/yolor) -* [https://github.com/WongKinYiu/PyTorch_YOLOv4](https://github.com/WongKinYiu/PyTorch_YOLOv4) -* [https://github.com/WongKinYiu/ScaledYOLOv4](https://github.com/WongKinYiu/ScaledYOLOv4) -* [https://github.com/Megvii-BaseDetection/YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) -* [https://github.com/ultralytics/yolov3](https://github.com/ultralytics/yolov3) -* [https://github.com/ultralytics/yolov5](https://github.com/ultralytics/yolov5) -* [https://github.com/DingXiaoH/RepVGG](https://github.com/DingXiaoH/RepVGG) -* [https://github.com/JUGGHM/OREPA_CVPR2022](https://github.com/JUGGHM/OREPA_CVPR2022) -* [https://github.com/TexasInstruments/edgeai-yolov5/tree/yolo-pose](https://github.com/TexasInstruments/edgeai-yolov5/tree/yolo-pose) +- [https://github.com/AlexeyAB/darknet](https://github.com/AlexeyAB/darknet) +- [https://github.com/WongKinYiu/yolor](https://github.com/WongKinYiu/yolor) +- [https://github.com/WongKinYiu/PyTorch_YOLOv4](https://github.com/WongKinYiu/PyTorch_YOLOv4) +- [https://github.com/WongKinYiu/ScaledYOLOv4](https://github.com/WongKinYiu/ScaledYOLOv4) +- [https://github.com/Megvii-BaseDetection/YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) +- [https://github.com/ultralytics/yolov3](https://github.com/ultralytics/yolov3) +- [https://github.com/ultralytics/yolov5](https://github.com/ultralytics/yolov5) +- [https://github.com/DingXiaoH/RepVGG](https://github.com/DingXiaoH/RepVGG) +- [https://github.com/JUGGHM/OREPA_CVPR2022](https://github.com/JUGGHM/OREPA_CVPR2022) +- [https://github.com/TexasInstruments/edgeai-yolov5/tree/yolo-pose](https://github.com/TexasInstruments/edgeai-yolov5/tree/yolo-pose)