From 04f0278b3cd9c2e929cd1ddcd1d96cf50e4bc190 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 23:38:21 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../data/classification/nlet/pair_dataset.py | 7 +++---- neodroidvision/data/mixed/nvlabs_ffhq.py | 10 +++++----- .../conversion/mnist/convert_mnist_to_png.py | 2 +- .../architecture/nms_box_heads/box_predictor.py | 2 +- .../ssd/architecture/nms_box_heads/ssd_box_head.py | 2 +- .../detection/single_stage/ssd/multi_box_loss.py | 3 ++- .../ssd/object_detection_dataloader.py | 14 ++++++++------ neodroidvision/regression/diffusion/__init__.py | 3 +-- neodroidvision/regression/diffusion/diffusion.py | 3 +-- .../vae/architectures/flow/architectures.py | 3 ++- .../regression/vae/architectures/flow/vae_flow.py | 3 ++- neodroidvision/segmentation/evaluation/f_score.py | 4 +--- .../utilities/visualisation/plot_kernel.py | 3 ++- .../two_stage/maskrcnn_neodroid_pose_dectection.py | 2 +- .../kivy_demo_app/opencv_face_tracking.py | 2 +- .../pygame_demo_app/face_detection.py | 2 +- .../autocapture/dlib_hog_examples/draw_regions.py | 4 ++-- .../autocapture/dlib_hog_examples/landmark_demo.py | 4 ++-- 18 files changed, 37 insertions(+), 36 deletions(-) diff --git a/neodroidvision/data/classification/nlet/pair_dataset.py b/neodroidvision/data/classification/nlet/pair_dataset.py index 3ece7f20..82ef09ee 100644 --- a/neodroidvision/data/classification/nlet/pair_dataset.py +++ b/neodroidvision/data/classification/nlet/pair_dataset.py @@ -29,7 +29,8 @@ class PairDataset( ): # TODO: Extract image specificity of class to a subclass and move this super pair class to a # general torch lib. """ - # This dataset generates a pair of images. 0 for geniune pair and 1 for imposter pair""" + # This dataset generates a pair of images. 0 for geniune pair and 1 for imposter pair + """ @passes_kws_to(DictImageFolder.__init__) @drop_unused_kws @@ -54,9 +55,7 @@ def __init__( root=data_path / SplitEnum.training.value, split=self.split, **kwargs ) - def __getitem__( - self, idx1: int - ) -> Union[ + def __getitem__(self, idx1: int) -> Union[ Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor], ]: diff --git a/neodroidvision/data/mixed/nvlabs_ffhq.py b/neodroidvision/data/mixed/nvlabs_ffhq.py index 8665ba2d..3bed801c 100644 --- a/neodroidvision/data/mixed/nvlabs_ffhq.py +++ b/neodroidvision/data/mixed/nvlabs_ffhq.py @@ -374,11 +374,11 @@ def download_files( "%.2f/%.2f %s" % (bytes_done / bytes_div, bytes_total / bytes_div, bytes_unit), "%.2f %s/s" % (bandwidth / bandwidth_div, bandwidth_unit), - "done" - if bytes_total == bytes_done - else "..." - if len(timing) < timing_window or bandwidth == 0 - else eta, + ( + "done" + if bytes_total == bytes_done + else "..." if len(timing) < timing_window or bandwidth == 0 else eta + ), ), end="", flush=True, diff --git a/neodroidvision/data/synthesis/conversion/mnist/convert_mnist_to_png.py b/neodroidvision/data/synthesis/conversion/mnist/convert_mnist_to_png.py index 2ea0ad81..11cebe6a 100644 --- a/neodroidvision/data/synthesis/conversion/mnist/convert_mnist_to_png.py +++ b/neodroidvision/data/synthesis/conversion/mnist/convert_mnist_to_png.py @@ -75,7 +75,7 @@ def write_dataset(labels, data, size, rows, cols, output_dir) -> None: os.makedirs(dir) import png # pip install pypng - for (i, label) in enumerate(labels): + for i, label in enumerate(labels): output_filename = output_dirs[label] / f"{str(i)}.png" print(f"writing {output_filename}") with open(output_filename, "wb") as h: diff --git a/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/box_predictor.py b/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/box_predictor.py index 52bd72ab..a51f1581 100644 --- a/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/box_predictor.py +++ b/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/box_predictor.py @@ -28,7 +28,7 @@ def __init__(self, boxes_per_location, out_channels, num_categories): self.cls_headers = nn.ModuleList() self.reg_headers = nn.ModuleList() - for (level_i, (num_boxes, num_channels)) in enumerate( + for level_i, (num_boxes, num_channels) in enumerate( zip(boxes_per_location, self.out_channels) ): self.cls_headers.append( diff --git a/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/ssd_box_head.py b/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/ssd_box_head.py index e9885cd5..362d2b53 100644 --- a/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/ssd_box_head.py +++ b/neodroidvision/detection/single_stage/ssd/architecture/nms_box_heads/ssd_box_head.py @@ -118,7 +118,7 @@ def forward(self, features: torch.Tensor) -> SSDOut: categori_logits, bbox_pred = self.predictor(features) results = [] - for (scores, boxes) in zip( + for scores, boxes in zip( functional.log_softmax( categori_logits, dim=-1 ), # TODO:Check dim maybe it should be 1 diff --git a/neodroidvision/detection/single_stage/ssd/multi_box_loss.py b/neodroidvision/detection/single_stage/ssd/multi_box_loss.py index 9af286b1..4affd90f 100644 --- a/neodroidvision/detection/single_stage/ssd/multi_box_loss.py +++ b/neodroidvision/detection/single_stage/ssd/multi_box_loss.py @@ -46,7 +46,8 @@ def forward( confidence (batch_size, num_priors, num_categories): class predictions. predicted_locations (batch_size, num_priors, 4): predicted locations. labels (batch_size, num_priors): real labels of all the priors. - gt_locations (batch_size, num_priors, 4): real boxes corresponding all the priors.""" + gt_locations (batch_size, num_priors, 4): real boxes corresponding all the priors. + """ with torch.no_grad(): # derived from cross_entropy=sum(log(p)) diff --git a/neodroidvision/detection/single_stage/ssd/object_detection_dataloader.py b/neodroidvision/detection/single_stage/ssd/object_detection_dataloader.py index 9a77d747..e0fa6e11 100644 --- a/neodroidvision/detection/single_stage/ssd/object_detection_dataloader.py +++ b/neodroidvision/detection/single_stage/ssd/object_detection_dataloader.py @@ -58,9 +58,9 @@ def object_detection_data_loaders( cfg=cfg, dataset_type=cfg.dataset_type, data_root=data_root, - sub_datasets=cfg.datasets.train - if split == SplitEnum.training - else cfg.datasets.test, + sub_datasets=( + cfg.datasets.train if split == SplitEnum.training else cfg.datasets.test + ), split=split, ).sub_datasets: if distributed: @@ -72,9 +72,11 @@ def object_detection_data_loaders( batch_sampler = torch.utils.data.sampler.BatchSampler( sampler=sampler, - batch_size=cfg.solver.batch_size - if split == SplitEnum.training - else cfg.test.batch_size, + batch_size=( + cfg.solver.batch_size + if split == SplitEnum.training + else cfg.test.batch_size + ), drop_last=False, ) if max_iter is not None: diff --git a/neodroidvision/regression/diffusion/__init__.py b/neodroidvision/regression/diffusion/__init__.py index 4ff024fd..4f36ad6c 100644 --- a/neodroidvision/regression/diffusion/__init__.py +++ b/neodroidvision/regression/diffusion/__init__.py @@ -10,8 +10,7 @@ __all__ = [] -def _main(): - ... +def _main(): ... if __name__ == "__main__": diff --git a/neodroidvision/regression/diffusion/diffusion.py b/neodroidvision/regression/diffusion/diffusion.py index 43e788e9..4285ddff 100644 --- a/neodroidvision/regression/diffusion/diffusion.py +++ b/neodroidvision/regression/diffusion/diffusion.py @@ -10,8 +10,7 @@ __all__ = [] -def _main(): - ... +def _main(): ... if __name__ == "__main__": diff --git a/neodroidvision/regression/vae/architectures/flow/architectures.py b/neodroidvision/regression/vae/architectures/flow/architectures.py index 9cfbf18b..35539b5b 100644 --- a/neodroidvision/regression/vae/architectures/flow/architectures.py +++ b/neodroidvision/regression/vae/architectures/flow/architectures.py @@ -43,7 +43,8 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: class Generator(nn.Module): """ - Bernoulli model parameterized by a generative network with Gaussian latents for MNIST.""" + Bernoulli model parameterized by a generative network with Gaussian latents for MNIST. + """ def __init__(self, latent_size, data_size): super().__init__() diff --git a/neodroidvision/regression/vae/architectures/flow/vae_flow.py b/neodroidvision/regression/vae/architectures/flow/vae_flow.py index 6b0bb657..ed6ab2fb 100644 --- a/neodroidvision/regression/vae/architectures/flow/vae_flow.py +++ b/neodroidvision/regression/vae/architectures/flow/vae_flow.py @@ -106,7 +106,8 @@ class MADE(nn.Module): Follows https://arxiv.org/abs/1502.03509 - This is used to build MAF: Masked Autoregressive Flow (https://arxiv.org/abs/1705.07057).""" + This is used to build MAF: Masked Autoregressive Flow (https://arxiv.org/abs/1705.07057). + """ def __init__(self, num_input, num_output, num_hidden, num_context): super().__init__() diff --git a/neodroidvision/segmentation/evaluation/f_score.py b/neodroidvision/segmentation/evaluation/f_score.py index 7ab802c6..1fffdda5 100644 --- a/neodroidvision/segmentation/evaluation/f_score.py +++ b/neodroidvision/segmentation/evaluation/f_score.py @@ -49,8 +49,6 @@ def f_score( fp = torch.sum(pr) - tp fn = torch.sum(gt) - tp - score = ((1 + beta**2) * tp + eps) / ( - (1 + beta**2) * tp + beta**2 * fn + fp + eps - ) + score = ((1 + beta**2) * tp + eps) / ((1 + beta**2) * tp + beta**2 * fn + fp + eps) return score diff --git a/neodroidvision/utilities/visualisation/plot_kernel.py b/neodroidvision/utilities/visualisation/plot_kernel.py index 52ef43b0..1ecaf878 100644 --- a/neodroidvision/utilities/visualisation/plot_kernel.py +++ b/neodroidvision/utilities/visualisation/plot_kernel.py @@ -57,7 +57,8 @@ def plot_kernels( number_cols: number of columns to be displayed m_interpolation: interpolation methods matplotlib. See in: - https://matplotlib.org/gallery/images_contours_and_fields/interpolation_methods.html""" + https://matplotlib.org/gallery/images_contours_and_fields/interpolation_methods.html + """ number_kernels = tensor.shape[0] number_rows = 1 + number_kernels // number_cols diff --git a/samples/detection/two_stage/maskrcnn_neodroid_pose_dectection.py b/samples/detection/two_stage/maskrcnn_neodroid_pose_dectection.py index 4884ccfe..bf5e6c33 100644 --- a/samples/detection/two_stage/maskrcnn_neodroid_pose_dectection.py +++ b/samples/detection/two_stage/maskrcnn_neodroid_pose_dectection.py @@ -74,7 +74,7 @@ def show_preds(img, pred): drawdot = lambda x, y, r=3, fill="red": draw.ellipse( (x - r, y - r, x + r, y + r), fill=fill ) - for (box, kpts) in pred: + for box, kpts in pred: for kpt in kpts: if kpt[2] == 1: drawdot(kpt[0], kpt[1]) diff --git a/samples/misc/graphical_interfaces/kivy_demo_app/opencv_face_tracking.py b/samples/misc/graphical_interfaces/kivy_demo_app/opencv_face_tracking.py index 109fa1af..e750870d 100644 --- a/samples/misc/graphical_interfaces/kivy_demo_app/opencv_face_tracking.py +++ b/samples/misc/graphical_interfaces/kivy_demo_app/opencv_face_tracking.py @@ -147,7 +147,7 @@ def update(self, dt): gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30) ) - for (x, y, w, h) in faces: + for x, y, w, h in faces: cv2.rectangle(rgb, (x, y), (x + w, y + h), (0, 255, 0), 2) except Exception as e: print(e) diff --git a/samples/misc/graphical_interfaces/pygame_demo_app/face_detection.py b/samples/misc/graphical_interfaces/pygame_demo_app/face_detection.py index fc8b0664..61324df3 100644 --- a/samples/misc/graphical_interfaces/pygame_demo_app/face_detection.py +++ b/samples/misc/graphical_interfaces/pygame_demo_app/face_detection.py @@ -72,7 +72,7 @@ def draw_from_points(cv_image, points): Returns a cv_image.""" cv_image = numpy.ascontiguousarray(cv_image, dtype=numpy.uint8) for f in points: - for (x, y, w, h) in f: + for x, y, w, h in f: cv2.rectangle(cv_image, (x, y), (x + w, y + h), 255) return cv_image diff --git a/samples/misc/opencv_samples/autocapture/dlib_hog_examples/draw_regions.py b/samples/misc/opencv_samples/autocapture/dlib_hog_examples/draw_regions.py index f72c6b0c..b8933967 100644 --- a/samples/misc/opencv_samples/autocapture/dlib_hog_examples/draw_regions.py +++ b/samples/misc/opencv_samples/autocapture/dlib_hog_examples/draw_regions.py @@ -92,7 +92,7 @@ def asijdas(): for image in AsyncVideoStream(): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - for (i, rect) in enumerate(detector(gray, upsample)): + for i, rect in enumerate(detector(gray, upsample)): # determine the facial landmarks for the face region, then # convert the landmark (x, y)-coordinates to a NumPy array shape = shape_to_ndarray(predictor(gray, rect)) @@ -119,7 +119,7 @@ def asijdas(): # loop over the subset of facial landmarks, drawing the # specific face part - for (x, y) in shape[i:j]: + for x, y in shape[i:j]: cv2.circle(clone, (x, y), 1, (0, 0, 255), -1) if False: diff --git a/samples/misc/opencv_samples/autocapture/dlib_hog_examples/landmark_demo.py b/samples/misc/opencv_samples/autocapture/dlib_hog_examples/landmark_demo.py index abf358a1..67ddf585 100644 --- a/samples/misc/opencv_samples/autocapture/dlib_hog_examples/landmark_demo.py +++ b/samples/misc/opencv_samples/autocapture/dlib_hog_examples/landmark_demo.py @@ -25,13 +25,13 @@ for image in AsyncVideoStream(): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - for (i, rect) in enumerate(detector(gray, upsample_num_times)): + for i, rect in enumerate(detector(gray, upsample_num_times)): # determine the facial landmarks for the face region, then # convert the facial landmark (x, y)-coordinates to a NumPy # array # loop over the (x, y)-coordinates for the facial landmarks # and draw them on the image - for (x, y) in shape_to_ndarray(predictor(gray, rect)): + for x, y in shape_to_ndarray(predictor(gray, rect)): cv2.circle(image, (x, y), 2, (0, 255, 0), -1) if show_image(