From 04e2db7795c26a3d6cc1baf797134a895d1ad87a Mon Sep 17 00:00:00 2001 From: Lorenzo Mammana Date: Thu, 25 Jan 2024 09:28:54 +0100 Subject: [PATCH] feat: Lightning upgrade (#22) * refactor: Refactor hooks to work properly with the new pytorch lightning * build: Upgrade version and update changelog --- CHANGELOG.md | 6 + .../300_benchmarking/301_benchmarking.ipynb | 943 +++++++++--------- src/anomalib/__init__.py | 2 +- src/anomalib/models/ai_vad/config.yaml | 1 - src/anomalib/models/cfa/config.yaml | 1 - src/anomalib/models/cflow/config.yaml | 1 - src/anomalib/models/cflow/lightning_model.py | 2 +- .../models/components/base/anomaly_module.py | 46 +- src/anomalib/models/csflow/config.yaml | 1 - src/anomalib/models/dfkde/config.yaml | 1 - src/anomalib/models/dfm/config.yaml | 1 - src/anomalib/models/draem/config.yaml | 1 - src/anomalib/models/efficient_ad/config.yaml | 1 - src/anomalib/models/fastflow/config.yaml | 1 - src/anomalib/models/ganomaly/config.yaml | 1 - .../models/ganomaly/lightning_model.py | 12 +- src/anomalib/models/padim/config.yaml | 1 - src/anomalib/models/padim/lightning_model.py | 2 +- src/anomalib/models/patchcore/config.yaml | 2 +- .../models/patchcore/lightning_model.py | 2 +- .../models/reverse_distillation/config.yaml | 1 - .../reverse_distillation/lightning_model.py | 2 +- src/anomalib/models/rkde/config.yaml | 1 - src/anomalib/models/stfpm/config.yaml | 1 - src/anomalib/models/stfpm/lightning_model.py | 2 +- .../utils/callbacks/cdf_normalization.py | 6 +- .../utils/callbacks/min_max_normalization.py | 8 +- .../callbacks/visualizer/visualizer_image.py | 4 +- .../dummy_lightning_model.py | 6 +- 29 files changed, 525 insertions(+), 534 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f9be2120f..b112c0b6b6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). +## [v0.7.0+obx.1.3.0] + +### Updated + +- Update lightning related code to support changes introduced with lightning 2.* + ## [v0.7.0+obx.1.2.11] ### Fixed diff --git a/notebooks/300_benchmarking/301_benchmarking.ipynb b/notebooks/300_benchmarking/301_benchmarking.ipynb index 155e074a58..dbac6ad6c1 100644 --- a/notebooks/300_benchmarking/301_benchmarking.ipynb +++ b/notebooks/300_benchmarking/301_benchmarking.ipynb @@ -1,475 +1,474 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Short walkthrough on Benchmarking in Anomalib\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IJlBPLRvOYuv" - }, - "source": [ - "## Install Anomalib\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Short walkthrough on Benchmarking in Anomalib\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IJlBPLRvOYuv" + }, + "source": [ + "## Install Anomalib\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "HmOFHNPsJV4H", + "outputId": "ad77e030-c2dd-4dbc-f4c3-882e1229999f" + }, + "outputs": [], + "source": [ + "!git clone https://github.com/openvinotoolkit/anomalib.git --branch main --single-branch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "N6bEfY5HOMfQ", + "outputId": "55756dfd-955d-49e3-9f5f-f387454010fe" + }, + "outputs": [], + "source": [ + "% cd anomalib" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zPCaYurIPQPC", + "outputId": "d1a40d6b-d1b6-4464-8259-b52116523229" + }, + "outputs": [], + "source": [ + "% pip install ." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "BjtTp3wdV43q", + "outputId": "fc110924-7bb8-42e4-f019-955a7daee03b" + }, + "outputs": [], + "source": [ + "! pip install -r requirements/openvino.txt" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0NJboi_7XSSN" + }, + "source": [ + "> Note: Restart Runtime if promted by clicking the button at the end of the install logs\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "y4sQOIwOUO0u" + }, + "source": [ + "## Download and setup dataset\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "SN3b1L115gIY", + "outputId": "88804d1d-072b-41ba-e77e-e57a54558d2a" + }, + "outputs": [], + "source": [ + "!wget https://openvinotoolkit.github.io/anomalib/_downloads/3f2af1d7748194b18c2177a34c03a2c4/hazelnut_toy.zip" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "mIrbX6tRWrAM", + "outputId": "c43009f1-f56c-435b-f034-36a25243df42" + }, + "outputs": [], + "source": [ + "% cd /content/anomalib/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XDUsHlfr5wnI" + }, + "outputs": [], + "source": [ + "!mkdir datasets && unzip hazelnut_toy.zip -d datasets/ > /dev/null" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Mb_kkxi-URk7" + }, + "source": [ + "## Create configuration file for training using Folder Dataset\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following configuration file is based on the one at `anomalib/models/padim/config.yaml`. The configuration file at that location uses the MVTec dataset for training. Since we are working with a custom dataset, we will use the `Folder` datset format. In this format, the images are divided among folders such as _good_, and _colour_. Optionally, it can also contain a _mask_ folder as shown below.\n", + "\n", + "```bash\n", + "hazelnut_toy\n", + "├── colour\n", + "│ ├── 00.jpg\n", + "│ ├── 01.jpg\n", + "│ ...\n", + "├── good\n", + "│ ├── 00.jpg\n", + "│ ├── 01.jpg\n", + "└── mask\n", + " ├── 00.jpg\n", + " ├── 01.jpg\n", + " ...\n", + "```\n", + "\n", + "Each of these folders contain images belonging to their respective category. Since we are using the `hazelnut_toy` dataset, we need to change a few lines in the PaDiM configuration as shown below.\n", + "\n", + "```yaml\n", + "dataset:\n", + " name: \n", + " format: folder\n", + " path: \n", + " normal_dir: normal # name of the folder containing normal images.\n", + " abnormal_dir: abnormal # name of the folder containing abnormal images.\n", + " normal_test_dir: null # name of the folder containing normal test images.\n", + " task: segmentation # classification or segmentation\n", + " mask: #optional\n", + " extensions: null\n", + " split_ratio: 0.2 # ratio of the normal images that will be used to create a test split\n", + "```\n", + "\n", + "The complete configuration is in the codeblock below.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GNSo19XlPixN" + }, + "outputs": [], + "source": [ + "folder_padim = \"\"\"\n", + "dataset:\n", + " name: hazelnut\n", + " format: folder\n", + " path: /content/anomalib/datasets/hazelnut_toy\n", + " normal_dir: good # name of the folder containing normal images.\n", + " abnormal_dir: colour # name of the folder containing abnormal images.\n", + " normal_test_dir: null # name of the folder containing normal test images.\n", + " mask_dir: /content/anomalib/datasets/hazelnut_toy/mask/colour # optional\n", + " task: segmentation # classification or segmentation\n", + " extensions: null\n", + " train_batch_size: 32\n", + " eval_batch_size: 32\n", + " num_workers: 8\n", + " image_size: 256 # dimensions to which images are resized (mandatory)\n", + " center_crop: null # dimensions to which images are center-cropped after resizing (optional)\n", + " normalization: imagenet # data distribution to which the images will be normalized: [none, imagenet]\n", + " transform_config:\n", + " train: null\n", + " eval: null\n", + " test_split_mode: from_dir # options: [from_dir, synthetic]\n", + " test_split_ratio: 0.2 # fraction of train images held out testing (usage depends on test_split_mode)\n", + " val_split_mode: same_as_test # options: [same_as_test, from_test, synthetic]\n", + " val_split_ratio: 0.5 # fraction of train/test images held out for validation (usage depends on val_split_mode)\n", + " tiling:\n", + " apply: false\n", + " tile_size: null\n", + " stride: null\n", + " remove_border_count: 0\n", + " use_random_tiling: False\n", + " random_tile_count: 16\n", + "\n", + "model:\n", + " name: padim\n", + " backbone: resnet18\n", + " pre_trained: true\n", + " layers:\n", + " - layer1\n", + " - layer2\n", + " - layer3\n", + " normalization_method: min_max # options: [none, min_max, cdf]\n", + "\n", + "metrics:\n", + " image:\n", + " - F1Score\n", + " - AUROC\n", + " pixel:\n", + " - F1Score\n", + " - AUROC\n", + " threshold:\n", + " image_default: 3\n", + " pixel_default: 3\n", + " adaptive: true\n", + "\n", + "visualization:\n", + " show_images: False # show images on the screen\n", + " save_images: True # save images to the file system\n", + " log_images: True # log images to the available loggers (if any)\n", + " image_save_path: null # path to which images will be saved\n", + " mode: full # options: [\"full\", \"simple\"]\n", + "\n", + "project:\n", + " seed: 42\n", + " path: ./results\n", + "\n", + "logging:\n", + " logger: [] # options: [comet, tensorboard, wandb, csv] or combinations.\n", + " log_graph: false # Logs the model graph to respective logger.\n", + "\n", + "optimization:\n", + " export_mode: null # options: torch, onnx, openvino\n", + "\n", + "# PL Trainer Args. Don't add extra parameter here.\n", + "trainer:\n", + " enable_checkpointing: true\n", + " default_root_dir: null\n", + " gradient_clip_val: 0\n", + " gradient_clip_algorithm: norm\n", + " num_nodes: 1\n", + " devices: 1\n", + " enable_progress_bar: true\n", + " overfit_batches: 0.0\n", + " track_grad_norm: -1\n", + " check_val_every_n_epoch: 1 # Don't validate before extracting features.\n", + " fast_dev_run: false\n", + " accumulate_grad_batches: 1\n", + " max_epochs: 1\n", + " min_epochs: null\n", + " max_steps: -1\n", + " min_steps: null\n", + " max_time: null\n", + " limit_train_batches: 1.0\n", + " limit_val_batches: 1.0\n", + " limit_test_batches: 1.0\n", + " limit_predict_batches: 1.0\n", + " val_check_interval: 1.0 # Don't validate before extracting features.\n", + " log_every_n_steps: 50\n", + " accelerator: auto # <\"cpu\", \"gpu\", \"tpu\", \"ipu\", \"hpu\", \"auto\">\n", + " strategy: null\n", + " sync_batchnorm: false\n", + " precision: 32\n", + " enable_model_summary: true\n", + " num_sanity_val_steps: 0\n", + " profiler: null\n", + " benchmark: false\n", + " deterministic: false\n", + " reload_dataloaders_every_n_epochs: 0\n", + " auto_lr_find: false\n", + " replace_sampler_ddp: true\n", + " detect_anomaly: false\n", + " auto_scale_batch_size: false\n", + " plugins: null\n", + " multiple_trainloader_mode: max_size_cycle\n", + "\n", + "\"\"\"\n", + "with open(\"config.yaml\", \"w\", encoding=\"utf8\") as f:\n", + " f.writelines(folder_padim)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jpjtUHyWUXx0" + }, + "source": [ + "## Train the model to see if it is working\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "h-GnKXC-KAi4", + "outputId": "12aa0da9-2d02-49ba-f10e-9e1fb3630c6f" + }, + "outputs": [], + "source": [ + "! python ./tools/train.py --config config.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Wt6BCkcoUch7" + }, + "source": [ + "## Create Benchmarking config\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Benchmarking runs are configured using a yaml file. It contains five sections. The first one is `seed:` it is used to reproducibility across benchmarking runs. One of the uniqueness of Anomalib is that it supports deployment to edge devices using [OpenVINO](https://docs.openvino.ai/latest/index.html). This enables optimized performance and faster inference on majority of Intel devices. The benchmarking script can be used to compute OpenVINO inference throughput. To do this, `compute_openvino:` should be set to `true`.\n", + "\n", + "> Note: Not all models in Anomalib support OpenVINO export.\n", + "\n", + "The `hardware` section of the config file is used to pass the list of hardwares on which to compute the benchmarking results. If the host system has multiple GPUs, then the benchmarking computation is distributed across GPUs to speed up collection of results. By default, the results are gathered in a `csv` file but with the `writer` flag, you can also save the results to `tensorboard` and `wandb` loggers. The final section is the `grid_search` section. It has two parameters, _dataset_ and _model_name_. The _dataset_ field is used to set the values of grid search while the _model_name_ section is used to pass the list of models for which the benchmark is computed.\n", + "\n", + "In this notebook we are working with a toy dataset, so we also need to tell the benchmarking script to use that particular dataset instead of the default `MVTec` as defined in each of the model config file. We can either update each config file or just pass a list of one value for the fields such as _format_, _path_, etc., as shown below.\n", + "\n", + "For more information about benchmarking, you can look at the [Anomalib Documentation](https://openvinotoolkit.github.io/anomalib/guides/benchmarking.html).\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qdRZlPl9Sh8U" + }, + "outputs": [], + "source": [ + "# While every attribute in dataset and model can be used to perform grid search,\n", + "# in this example the lists with only single values are used for patching the\n", + "# original model config\n", + "benchmarking_params = \"\"\"seed: 42\n", + "compute_openvino: true\n", + "hardware:\n", + " - gpu\n", + "writer: []\n", + "grid_search:\n", + " dataset:\n", + " name: [hazelnut]\n", + " format: [folder]\n", + " root: [/content/anomalib/datasets/hazelnut_toy]\n", + " normal_dir: [good]\n", + " abnormal_dir: [colour]\n", + " normal_test_dir: [null]\n", + " task: [segmentation]\n", + " mask_dir: [/content/anomalib/datasets/hazelnut_toy/mask/colour]\n", + " extensions: [null]\n", + " split_ratio: [0.2]\n", + " image_size: [256, 128]\n", + " num_workers: [4]\n", + " model_name:\n", + " - padim\n", + " - patchcore\n", + "\"\"\"\n", + "with open(\"benchmark_config.yaml\", \"w\", encoding=\"utf8\") as f:\n", + " f.writelines(benchmarking_params)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ISlNVY00af7B", + "outputId": "138840e4-8524-4e6f-c784-f9a175853a4f" + }, + "outputs": [], + "source": [ + "!python ./tools/benchmarking/benchmark.py --config benchmark_config.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 320 + }, + "id": "mKa-0XO_sLLy", + "outputId": "c90e033d-61e3-4b49-f9ba-5363836d0a42" + }, + "outputs": [], + "source": [ + "df = pd.read_csv(\"runs/padim_gpu.csv\")\n", + "df.head()" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "Anomalib Benchmarking", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3.8.0 ('anomalib')", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + }, + "vscode": { + "interpreter": { + "hash": "ba723bee1893023fba5911c5ba85dac05fe2496fa0862b3e274bad096c0e1e2a" + } + } }, - "id": "HmOFHNPsJV4H", - "outputId": "ad77e030-c2dd-4dbc-f4c3-882e1229999f" - }, - "outputs": [], - "source": [ - "!git clone https://github.com/openvinotoolkit/anomalib.git --branch main --single-branch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "N6bEfY5HOMfQ", - "outputId": "55756dfd-955d-49e3-9f5f-f387454010fe" - }, - "outputs": [], - "source": [ - "% cd anomalib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "zPCaYurIPQPC", - "outputId": "d1a40d6b-d1b6-4464-8259-b52116523229" - }, - "outputs": [], - "source": [ - "% pip install ." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "id": "BjtTp3wdV43q", - "outputId": "fc110924-7bb8-42e4-f019-955a7daee03b" - }, - "outputs": [], - "source": [ - "! pip install -r requirements/openvino.txt" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0NJboi_7XSSN" - }, - "source": [ - "> Note: Restart Runtime if promted by clicking the button at the end of the install logs\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "y4sQOIwOUO0u" - }, - "source": [ - "## Download and setup dataset\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "SN3b1L115gIY", - "outputId": "88804d1d-072b-41ba-e77e-e57a54558d2a" - }, - "outputs": [], - "source": [ - "!wget https://openvinotoolkit.github.io/anomalib/_downloads/3f2af1d7748194b18c2177a34c03a2c4/hazelnut_toy.zip" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "mIrbX6tRWrAM", - "outputId": "c43009f1-f56c-435b-f034-36a25243df42" - }, - "outputs": [], - "source": [ - "% cd /content/anomalib/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "XDUsHlfr5wnI" - }, - "outputs": [], - "source": [ - "!mkdir datasets && unzip hazelnut_toy.zip -d datasets/ > /dev/null" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Mb_kkxi-URk7" - }, - "source": [ - "## Create configuration file for training using Folder Dataset\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The following configuration file is based on the one at `anomalib/models/padim/config.yaml`. The configuration file at that location uses the MVTec dataset for training. Since we are working with a custom dataset, we will use the `Folder` datset format. In this format, the images are divided among folders such as _good_, and _colour_. Optionally, it can also contain a _mask_ folder as shown below.\n", - "\n", - "```bash\n", - "hazelnut_toy\n", - "├── colour\n", - "│ ├── 00.jpg\n", - "│ ├── 01.jpg\n", - "│ ...\n", - "├── good\n", - "│ ├── 00.jpg\n", - "│ ├── 01.jpg\n", - "└── mask\n", - " ├── 00.jpg\n", - " ├── 01.jpg\n", - " ...\n", - "```\n", - "\n", - "Each of these folders contain images belonging to their respective category. Since we are using the `hazelnut_toy` dataset, we need to change a few lines in the PaDiM configuration as shown below.\n", - "\n", - "```yaml\n", - "dataset:\n", - " name: \n", - " format: folder\n", - " path: \n", - " normal_dir: normal # name of the folder containing normal images.\n", - " abnormal_dir: abnormal # name of the folder containing abnormal images.\n", - " normal_test_dir: null # name of the folder containing normal test images.\n", - " task: segmentation # classification or segmentation\n", - " mask: #optional\n", - " extensions: null\n", - " split_ratio: 0.2 # ratio of the normal images that will be used to create a test split\n", - "```\n", - "\n", - "The complete configuration is in the codeblock below.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GNSo19XlPixN" - }, - "outputs": [], - "source": [ - "folder_padim = \"\"\"\n", - "dataset:\n", - " name: hazelnut\n", - " format: folder\n", - " path: /content/anomalib/datasets/hazelnut_toy\n", - " normal_dir: good # name of the folder containing normal images.\n", - " abnormal_dir: colour # name of the folder containing abnormal images.\n", - " normal_test_dir: null # name of the folder containing normal test images.\n", - " mask_dir: /content/anomalib/datasets/hazelnut_toy/mask/colour # optional\n", - " task: segmentation # classification or segmentation\n", - " extensions: null\n", - " train_batch_size: 32\n", - " eval_batch_size: 32\n", - " num_workers: 8\n", - " image_size: 256 # dimensions to which images are resized (mandatory)\n", - " center_crop: null # dimensions to which images are center-cropped after resizing (optional)\n", - " normalization: imagenet # data distribution to which the images will be normalized: [none, imagenet]\n", - " transform_config:\n", - " train: null\n", - " eval: null\n", - " test_split_mode: from_dir # options: [from_dir, synthetic]\n", - " test_split_ratio: 0.2 # fraction of train images held out testing (usage depends on test_split_mode)\n", - " val_split_mode: same_as_test # options: [same_as_test, from_test, synthetic]\n", - " val_split_ratio: 0.5 # fraction of train/test images held out for validation (usage depends on val_split_mode)\n", - " tiling:\n", - " apply: false\n", - " tile_size: null\n", - " stride: null\n", - " remove_border_count: 0\n", - " use_random_tiling: False\n", - " random_tile_count: 16\n", - "\n", - "model:\n", - " name: padim\n", - " backbone: resnet18\n", - " pre_trained: true\n", - " layers:\n", - " - layer1\n", - " - layer2\n", - " - layer3\n", - " normalization_method: min_max # options: [none, min_max, cdf]\n", - "\n", - "metrics:\n", - " image:\n", - " - F1Score\n", - " - AUROC\n", - " pixel:\n", - " - F1Score\n", - " - AUROC\n", - " threshold:\n", - " image_default: 3\n", - " pixel_default: 3\n", - " adaptive: true\n", - "\n", - "visualization:\n", - " show_images: False # show images on the screen\n", - " save_images: True # save images to the file system\n", - " log_images: True # log images to the available loggers (if any)\n", - " image_save_path: null # path to which images will be saved\n", - " mode: full # options: [\"full\", \"simple\"]\n", - "\n", - "project:\n", - " seed: 42\n", - " path: ./results\n", - "\n", - "logging:\n", - " logger: [] # options: [comet, tensorboard, wandb, csv] or combinations.\n", - " log_graph: false # Logs the model graph to respective logger.\n", - "\n", - "optimization:\n", - " export_mode: null # options: torch, onnx, openvino\n", - "\n", - "# PL Trainer Args. Don't add extra parameter here.\n", - "trainer:\n", - " enable_checkpointing: true\n", - " default_root_dir: null\n", - " gradient_clip_val: 0\n", - " gradient_clip_algorithm: norm\n", - " num_nodes: 1\n", - " devices: 1\n", - " enable_progress_bar: true\n", - " overfit_batches: 0.0\n", - " track_grad_norm: -1\n", - " check_val_every_n_epoch: 1 # Don't validate before extracting features.\n", - " fast_dev_run: false\n", - " accumulate_grad_batches: 1\n", - " max_epochs: 1\n", - " min_epochs: null\n", - " max_steps: -1\n", - " min_steps: null\n", - " max_time: null\n", - " limit_train_batches: 1.0\n", - " limit_val_batches: 1.0\n", - " limit_test_batches: 1.0\n", - " limit_predict_batches: 1.0\n", - " val_check_interval: 1.0 # Don't validate before extracting features.\n", - " log_every_n_steps: 50\n", - " accelerator: auto # <\"cpu\", \"gpu\", \"tpu\", \"ipu\", \"hpu\", \"auto\">\n", - " strategy: null\n", - " sync_batchnorm: false\n", - " precision: 32\n", - " enable_model_summary: true\n", - " num_sanity_val_steps: 0\n", - " profiler: null\n", - " benchmark: false\n", - " deterministic: false\n", - " reload_dataloaders_every_n_epochs: 0\n", - " auto_lr_find: false\n", - " replace_sampler_ddp: true\n", - " detect_anomaly: false\n", - " auto_scale_batch_size: false\n", - " plugins: null\n", - " move_metrics_to_cpu: false\n", - " multiple_trainloader_mode: max_size_cycle\n", - "\n", - "\"\"\"\n", - "with open(\"config.yaml\", \"w\", encoding=\"utf8\") as f:\n", - " f.writelines(folder_padim)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "jpjtUHyWUXx0" - }, - "source": [ - "## Train the model to see if it is working\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "h-GnKXC-KAi4", - "outputId": "12aa0da9-2d02-49ba-f10e-9e1fb3630c6f" - }, - "outputs": [], - "source": [ - "! python ./tools/train.py --config config.yaml" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Wt6BCkcoUch7" - }, - "source": [ - "## Create Benchmarking config\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Benchmarking runs are configured using a yaml file. It contains five sections. The first one is `seed:` it is used to reproducibility across benchmarking runs. One of the uniqueness of Anomalib is that it supports deployment to edge devices using [OpenVINO](https://docs.openvino.ai/latest/index.html). This enables optimized performance and faster inference on majority of Intel devices. The benchmarking script can be used to compute OpenVINO inference throughput. To do this, `compute_openvino:` should be set to `true`.\n", - "\n", - "> Note: Not all models in Anomalib support OpenVINO export.\n", - "\n", - "The `hardware` section of the config file is used to pass the list of hardwares on which to compute the benchmarking results. If the host system has multiple GPUs, then the benchmarking computation is distributed across GPUs to speed up collection of results. By default, the results are gathered in a `csv` file but with the `writer` flag, you can also save the results to `tensorboard` and `wandb` loggers. The final section is the `grid_search` section. It has two parameters, _dataset_ and _model_name_. The _dataset_ field is used to set the values of grid search while the _model_name_ section is used to pass the list of models for which the benchmark is computed.\n", - "\n", - "In this notebook we are working with a toy dataset, so we also need to tell the benchmarking script to use that particular dataset instead of the default `MVTec` as defined in each of the model config file. We can either update each config file or just pass a list of one value for the fields such as _format_, _path_, etc., as shown below.\n", - "\n", - "For more information about benchmarking, you can look at the [Anomalib Documentation](https://openvinotoolkit.github.io/anomalib/guides/benchmarking.html).\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "qdRZlPl9Sh8U" - }, - "outputs": [], - "source": [ - "# While every attribute in dataset and model can be used to perform grid search,\n", - "# in this example the lists with only single values are used for patching the\n", - "# original model config\n", - "benchmarking_params = \"\"\"seed: 42\n", - "compute_openvino: true\n", - "hardware:\n", - " - gpu\n", - "writer: []\n", - "grid_search:\n", - " dataset:\n", - " name: [hazelnut]\n", - " format: [folder]\n", - " root: [/content/anomalib/datasets/hazelnut_toy]\n", - " normal_dir: [good]\n", - " abnormal_dir: [colour]\n", - " normal_test_dir: [null]\n", - " task: [segmentation]\n", - " mask_dir: [/content/anomalib/datasets/hazelnut_toy/mask/colour]\n", - " extensions: [null]\n", - " split_ratio: [0.2]\n", - " image_size: [256, 128]\n", - " num_workers: [4]\n", - " model_name:\n", - " - padim\n", - " - patchcore\n", - "\"\"\"\n", - "with open(\"benchmark_config.yaml\", \"w\", encoding=\"utf8\") as f:\n", - " f.writelines(benchmarking_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "ISlNVY00af7B", - "outputId": "138840e4-8524-4e6f-c784-f9a175853a4f" - }, - "outputs": [], - "source": [ - "!python ./tools/benchmarking/benchmark.py --config benchmark_config.yaml" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 320 - }, - "id": "mKa-0XO_sLLy", - "outputId": "c90e033d-61e3-4b49-f9ba-5363836d0a42" - }, - "outputs": [], - "source": [ - "df = pd.read_csv(\"runs/padim_gpu.csv\")\n", - "df.head()" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "name": "Anomalib Benchmarking", - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3.8.0 ('anomalib')", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.11" - }, - "vscode": { - "interpreter": { - "hash": "ba723bee1893023fba5911c5ba85dac05fe2496fa0862b3e274bad096c0e1e2a" - } - } - }, - "nbformat": 4, - "nbformat_minor": 0 + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/src/anomalib/__init__.py b/src/anomalib/__init__.py index af989dfe68..263c1717d9 100644 --- a/src/anomalib/__init__.py +++ b/src/anomalib/__init__.py @@ -4,6 +4,6 @@ # SPDX-License-Identifier: Apache-2.0 anomalib_version = "0.7.0" -custom_orobix_version = "1.2.11" +custom_orobix_version = "1.3.0" __version__ = f"{anomalib_version}+obx.{custom_orobix_version}" diff --git a/src/anomalib/models/ai_vad/config.yaml b/src/anomalib/models/ai_vad/config.yaml index 7f45e02e2c..7f2d682213 100644 --- a/src/anomalib/models/ai_vad/config.yaml +++ b/src/anomalib/models/ai_vad/config.yaml @@ -115,5 +115,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/cfa/config.yaml b/src/anomalib/models/cfa/config.yaml index fbf67fcb51..3b7dc2de63 100644 --- a/src/anomalib/models/cfa/config.yaml +++ b/src/anomalib/models/cfa/config.yaml @@ -104,5 +104,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/cflow/config.yaml b/src/anomalib/models/cflow/config.yaml index e4e630777d..87b5bbf180 100644 --- a/src/anomalib/models/cflow/config.yaml +++ b/src/anomalib/models/cflow/config.yaml @@ -110,5 +110,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/cflow/lightning_model.py b/src/anomalib/models/cflow/lightning_model.py index ff1c62e42d..1e8227130c 100644 --- a/src/anomalib/models/cflow/lightning_model.py +++ b/src/anomalib/models/cflow/lightning_model.py @@ -167,7 +167,7 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Returns: Dictionary containing images, anomaly maps, true labels and masks. - These are required in `validation_epoch_end` for feature concatenation. + These are required in `on_validation_epoch_end` for feature concatenation. """ prediction = self.model(batch["image"]) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index fc1984eca8..4655cb2b70 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -14,7 +14,7 @@ from anomalib.models.components.losses import dice import torch from pytorch_lightning.callbacks import Callback -from pytorch_lightning.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT +from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor, nn from torchmetrics import Metric @@ -55,6 +55,8 @@ def __init__(self) -> None: self.pixel_metrics: AnomalibMetricCollection self.false_good = 0 self.false_bad = 0 + self.validation_outputs = [] + self.test_outputs = [] def forward(self, batch: dict[str, str | Tensor], *args, **kwargs) -> Any: """Forward-pass input tensor to the module. @@ -117,34 +119,32 @@ def test_step(self, batch: dict[str, str | Tensor], batch_idx: int, *args, **kwa Returns: Dictionary containing images, features, true labels and masks. - These are required in `validation_epoch_end` for feature concatenation. + These are required in `on_validation_epoch_end` for feature concatenation. """ del args, kwargs # These variables are not used. return self.predict_step(batch, batch_idx) - def validation_step_end(self, val_step_outputs: STEP_OUTPUT, *args, **kwargs) -> STEP_OUTPUT: + def on_validation_batch_end(self, outputs: STEP_OUTPUT, *args, **kwargs) -> STEP_OUTPUT: """Called at the end of each validation step.""" del args, kwargs # These variables are not used. - self._outputs_to_cpu(val_step_outputs) - self._post_process(val_step_outputs) - return val_step_outputs + self._outputs_to_cpu(outputs) + self._post_process(outputs) + + self.validation_outputs.append(outputs) - def test_step_end(self, test_step_outputs: STEP_OUTPUT, *args, **kwargs) -> STEP_OUTPUT: + def on_test_batch_end(self, outputs: STEP_OUTPUT, *args, **kwargs) -> STEP_OUTPUT: """Called at the end of each test step.""" del args, kwargs # These variables are not used. - self._outputs_to_cpu(test_step_outputs) - self._post_process(test_step_outputs) - return test_step_outputs - - def validation_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: - """Compute threshold and performance metrics. + self._outputs_to_cpu(outputs) + self._post_process(outputs) + self.test_outputs.append(outputs) - Args: - outputs: Batch of outputs from the validation step - """ + def on_validation_epoch_end(self) -> None: + """Compute threshold and performance metrics.""" + outputs = self.validation_outputs if self.threshold_method == ThresholdMethod.ADAPTIVE: self._compute_adaptive_threshold(outputs) @@ -158,13 +158,11 @@ def validation_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: self._collect_outputs(self.image_metrics, self.pixel_metrics, outputs) self._log_metrics("validation") + self.validation_outputs.clear() - def test_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: - """Compute and save anomaly scores of the test set. - - Args: - outputs: Batch of outputs from the validation step - """ + def on_test_epoch_end(self) -> None: + """Compute and save anomaly scores of the test set.""" + outputs = self.test_outputs if hasattr(self.image_metrics, "F1Score"): dice_score = 0 counter = 0 @@ -190,7 +188,7 @@ def test_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: self._collect_outputs(self.image_metrics, self.pixel_metrics, outputs) self._log_metrics("test") - def _compute_adaptive_threshold(self, outputs: EPOCH_OUTPUT) -> None: + def _compute_adaptive_threshold(self, outputs: Any) -> None: self.image_threshold.reset() self.pixel_threshold.reset() self._collect_outputs(self.image_threshold, self.pixel_threshold, outputs) @@ -207,7 +205,7 @@ def _compute_adaptive_threshold(self, outputs: EPOCH_OUTPUT) -> None: def _collect_outputs( image_metric: AnomalibMetricCollection, pixel_metric: AnomalibMetricCollection, - outputs: EPOCH_OUTPUT, + outputs: Any, ) -> None: for output in outputs: image_metric.cpu() diff --git a/src/anomalib/models/csflow/config.yaml b/src/anomalib/models/csflow/config.yaml index f3691ed28f..851ece6290 100644 --- a/src/anomalib/models/csflow/config.yaml +++ b/src/anomalib/models/csflow/config.yaml @@ -111,5 +111,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/dfkde/config.yaml b/src/anomalib/models/dfkde/config.yaml index 04acc3bb35..d9e9672f2f 100644 --- a/src/anomalib/models/dfkde/config.yaml +++ b/src/anomalib/models/dfkde/config.yaml @@ -97,5 +97,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/dfm/config.yaml b/src/anomalib/models/dfm/config.yaml index 2430604b71..8de5875600 100755 --- a/src/anomalib/models/dfm/config.yaml +++ b/src/anomalib/models/dfm/config.yaml @@ -98,5 +98,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/draem/config.yaml b/src/anomalib/models/draem/config.yaml index 862fd1159b..d13e4d47e8 100644 --- a/src/anomalib/models/draem/config.yaml +++ b/src/anomalib/models/draem/config.yaml @@ -106,5 +106,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/efficient_ad/config.yaml b/src/anomalib/models/efficient_ad/config.yaml index b2fbc36e60..e2d39ff896 100644 --- a/src/anomalib/models/efficient_ad/config.yaml +++ b/src/anomalib/models/efficient_ad/config.yaml @@ -104,5 +104,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/fastflow/config.yaml b/src/anomalib/models/fastflow/config.yaml index 525b6a3412..5e44bde866 100644 --- a/src/anomalib/models/fastflow/config.yaml +++ b/src/anomalib/models/fastflow/config.yaml @@ -110,5 +110,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/ganomaly/config.yaml b/src/anomalib/models/ganomaly/config.yaml index 0ffc0d11b3..561d2a98ba 100644 --- a/src/anomalib/models/ganomaly/config.yaml +++ b/src/anomalib/models/ganomaly/config.yaml @@ -109,5 +109,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/ganomaly/lightning_model.py b/src/anomalib/models/ganomaly/lightning_model.py index d8153fd6a5..9cfb54a77f 100644 --- a/src/anomalib/models/ganomaly/lightning_model.py +++ b/src/anomalib/models/ganomaly/lightning_model.py @@ -9,11 +9,12 @@ from __future__ import annotations import logging +from typing import Any import torch from omegaconf import DictConfig, ListConfig from pytorch_lightning.callbacks import Callback, EarlyStopping -from pytorch_lightning.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT +from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor, optim from anomalib.models.components import AnomalyModule @@ -157,12 +158,12 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST self.min_scores = min(self.min_scores, torch.min(batch["pred_scores"])) return batch - def validation_epoch_end(self, outputs: EPOCH_OUTPUT) -> EPOCH_OUTPUT: + def on_validation_epoch_end(self, outputs: Any) -> Any: """Normalize outputs based on min/max values.""" logger.info("Normalizing validation outputs based on min/max values.") for prediction in outputs: prediction["pred_scores"] = self._normalize(prediction["pred_scores"]) - super().validation_epoch_end(outputs) + super().on_validation_epoch_end(outputs) return outputs def on_test_start(self) -> None: @@ -177,12 +178,13 @@ def test_step(self, batch: dict[str, str | Tensor], batch_idx: int, *args, **kwa self.min_scores = min(self.min_scores, torch.min(batch["pred_scores"])) return batch - def test_epoch_end(self, outputs: EPOCH_OUTPUT) -> EPOCH_OUTPUT: + def on_test_epoch_end(self) -> Any: """Normalize outputs based on min/max values.""" + outputs = self.test_output_list logger.info("Normalizing test outputs based on min/max values.") for prediction in outputs: prediction["pred_scores"] = self._normalize(prediction["pred_scores"]) - super().test_epoch_end(outputs) + super().on_test_epoch_end() return outputs def _normalize(self, scores: Tensor) -> Tensor: diff --git a/src/anomalib/models/padim/config.yaml b/src/anomalib/models/padim/config.yaml index 84b0d2d421..ee46f820db 100644 --- a/src/anomalib/models/padim/config.yaml +++ b/src/anomalib/models/padim/config.yaml @@ -105,5 +105,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/padim/lightning_model.py b/src/anomalib/models/padim/lightning_model.py index ac28d1b210..67c9f590a3 100644 --- a/src/anomalib/models/padim/lightning_model.py +++ b/src/anomalib/models/padim/lightning_model.py @@ -127,7 +127,7 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Returns: Dictionary containing images, features, true labels and masks. - These are required in `validation_epoch_end` for feature concatenation. + These are required in `on_validation_epoch_end` for feature concatenation. """ del args, kwargs # These variables are not used. diff --git a/src/anomalib/models/patchcore/config.yaml b/src/anomalib/models/patchcore/config.yaml index e0d788d4ea..def4b436ff 100644 --- a/src/anomalib/models/patchcore/config.yaml +++ b/src/anomalib/models/patchcore/config.yaml @@ -106,5 +106,5 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/patchcore/lightning_model.py b/src/anomalib/models/patchcore/lightning_model.py index cd225adf6b..00bf50dff1 100644 --- a/src/anomalib/models/patchcore/lightning_model.py +++ b/src/anomalib/models/patchcore/lightning_model.py @@ -13,7 +13,7 @@ import torch from torch import nn from omegaconf import DictConfig, ListConfig -from pytorch_lightning.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT +from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor from anomalib.models.components import AnomalyModule diff --git a/src/anomalib/models/reverse_distillation/config.yaml b/src/anomalib/models/reverse_distillation/config.yaml index 8724556725..4c1f4414e1 100644 --- a/src/anomalib/models/reverse_distillation/config.yaml +++ b/src/anomalib/models/reverse_distillation/config.yaml @@ -114,5 +114,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/reverse_distillation/lightning_model.py b/src/anomalib/models/reverse_distillation/lightning_model.py index 5489daab5b..f6945d1483 100644 --- a/src/anomalib/models/reverse_distillation/lightning_model.py +++ b/src/anomalib/models/reverse_distillation/lightning_model.py @@ -104,7 +104,7 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Returns: Dictionary containing images, anomaly maps, true labels and masks. - These are required in `validation_epoch_end` for feature concatenation. + These are required in `on_validation_epoch_end` for feature concatenation. """ del args, kwargs # These variables are not used. diff --git a/src/anomalib/models/rkde/config.yaml b/src/anomalib/models/rkde/config.yaml index 4fc6a1a5d8..a1a9a39b62 100644 --- a/src/anomalib/models/rkde/config.yaml +++ b/src/anomalib/models/rkde/config.yaml @@ -107,5 +107,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/stfpm/config.yaml b/src/anomalib/models/stfpm/config.yaml index 1f92f3a187..7e0670d6f2 100644 --- a/src/anomalib/models/stfpm/config.yaml +++ b/src/anomalib/models/stfpm/config.yaml @@ -111,5 +111,4 @@ trainer: detect_anomaly: false auto_scale_batch_size: false plugins: null - move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle diff --git a/src/anomalib/models/stfpm/lightning_model.py b/src/anomalib/models/stfpm/lightning_model.py index e05f0aa783..4602f79fe9 100644 --- a/src/anomalib/models/stfpm/lightning_model.py +++ b/src/anomalib/models/stfpm/lightning_model.py @@ -75,7 +75,7 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Returns: Dictionary containing images, anomaly maps, true labels and masks. - These are required in `validation_epoch_end` for feature concatenation. + These are required in `on_validation_epoch_end` for feature concatenation. """ del args, kwargs # These variables are not used. diff --git a/src/anomalib/utils/callbacks/cdf_normalization.py b/src/anomalib/utils/callbacks/cdf_normalization.py index 464a58479c..b29b87f6c9 100644 --- a/src/anomalib/utils/callbacks/cdf_normalization.py +++ b/src/anomalib/utils/callbacks/cdf_normalization.py @@ -66,7 +66,7 @@ def on_validation_batch_end( outputs: STEP_OUTPUT | None, batch: Any, batch_idx: int, - dataloader_idx: int, + dataloader_idx: int = 0, ) -> None: """Called when the validation batch ends, standardizes the predicted scores and anomaly maps.""" del trainer, batch, batch_idx, dataloader_idx # These variables are not used. @@ -80,7 +80,7 @@ def on_test_batch_end( outputs: STEP_OUTPUT | None, batch: Any, batch_idx: int, - dataloader_idx: int, + dataloader_idx: int = 0, ) -> None: """Called when the test batch ends, normalizes the predicted scores and anomaly maps.""" del trainer, batch, batch_idx, dataloader_idx # These variables are not used. @@ -95,7 +95,7 @@ def on_predict_batch_end( outputs: dict, batch: Any, batch_idx: int, - dataloader_idx: int, + dataloader_idx: int = 0, ) -> None: """Called when the predict batch ends, normalizes the predicted scores and anomaly maps.""" del trainer, batch, batch_idx, dataloader_idx # These variables are not used. diff --git a/src/anomalib/utils/callbacks/min_max_normalization.py b/src/anomalib/utils/callbacks/min_max_normalization.py index 60155031b8..cff1432b89 100644 --- a/src/anomalib/utils/callbacks/min_max_normalization.py +++ b/src/anomalib/utils/callbacks/min_max_normalization.py @@ -9,7 +9,7 @@ import pytorch_lightning as pl import torch -from pytorch_lightning import Callback +from pytorch_lightning import Callback, LightningModule, Trainer from pytorch_lightning.utilities.types import STEP_OUTPUT from anomalib.models.components import AnomalyModule @@ -56,7 +56,7 @@ def on_validation_batch_end( outputs: STEP_OUTPUT, batch: Any, batch_idx: int, - dataloader_idx: int, + dataloader_idx: int = 0, ) -> None: """Called when the validation batch ends, update the min and max observed values.""" del trainer, batch, batch_idx, dataloader_idx # These variables are not used. @@ -77,7 +77,7 @@ def on_test_batch_end( outputs: STEP_OUTPUT | None, batch: Any, batch_idx: int, - dataloader_idx: int, + dataloader_idx: int = 0, ) -> None: """Called when the test batch ends, normalizes the predicted scores and anomaly maps.""" del trainer, batch, batch_idx, dataloader_idx # These variables are not used. @@ -91,7 +91,7 @@ def on_predict_batch_end( outputs: Any, batch: Any, batch_idx: int, - dataloader_idx: int, + dataloader_idx: int = 0, ) -> None: """Called when the predict batch ends, normalizes the predicted scores and anomaly maps.""" del trainer, batch, batch_idx, dataloader_idx # These variables are not used. diff --git a/src/anomalib/utils/callbacks/visualizer/visualizer_image.py b/src/anomalib/utils/callbacks/visualizer/visualizer_image.py index 6d820c2fec..2cdf0f48cc 100644 --- a/src/anomalib/utils/callbacks/visualizer/visualizer_image.py +++ b/src/anomalib/utils/callbacks/visualizer/visualizer_image.py @@ -49,7 +49,7 @@ def on_predict_batch_end( outputs: STEP_OUTPUT | None, batch: Any, batch_idx: int, - dataloader_idx: int, + dataloader_idx: int = 0, ) -> None: """Show images at the end of every batch. @@ -83,7 +83,7 @@ def on_test_batch_end( outputs: STEP_OUTPUT | None, batch: Any, batch_idx: int, - dataloader_idx: int, + dataloader_idx: int = 0, ) -> None: """Log images at the end of every batch. diff --git a/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py b/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py index 064e7f8936..80f6f30812 100644 --- a/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py +++ b/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py @@ -48,7 +48,7 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]): self.pixel_metrics.set_threshold(hparams.model.threshold.pixel_default) def test_step(self, batch, _): - """Only used to trigger on_test_epoch_end.""" + """Only used to trigger on_on_test_epoch_end.""" self.log(name="loss", value=0.0, prog_bar=True) outputs = dict( image_path=[Path(get_dataset_path("bottle")) / "broken_large/000.png"], @@ -61,10 +61,10 @@ def test_step(self, batch, _): ) return outputs - def validation_epoch_end(self, outputs): + def on_validation_epoch_end(self, outputs): return None - def test_epoch_end(self, outputs): + def on_test_epoch_end(self): return None def configure_optimizers(self):