Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Dec 18, 2023
1 parent 12973a5 commit 33ff177
Show file tree
Hide file tree
Showing 46 changed files with 20 additions and 63 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,6 @@ def pred_target_train_model(
for phase in [SplitEnum.training, SplitEnum.validation]:
if phase == SplitEnum.training:
with TorchTrainSession(model):

input, true_label = zip(*next(train_iterator))

rgb_imgs = torch_vision_normalize_batch_nchw(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,6 @@ def predictor_response_train_model_neodroid_observations(
for phase in [SplitEnum.training, SplitEnum.validation]:
if phase == SplitEnum.training:
with TorchTrainSession(model):

input, true_label = zip(*next(train_iterator))

rgb_imgs = torch_vision_normalize_batch_nchw(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
from pathlib import Path

if __name__ == "__main__":

with open(str(Path.home() / "Downloads" / "imagenet_class_index.json")) as f:
with open("../imagenet_2012_names.py", "w") as sfn:
with open("../imagenet_2012_id.py", "w") as sfi:
Expand Down
3 changes: 2 additions & 1 deletion neodroidvision/data/classification/nlet/pair_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ class PairDataset(
): # TODO: Extract image specificity of class to a subclass and move this super pair class to a
# general torch lib.
"""
# This dataset generates a pair of images. 0 for geniune pair and 1 for imposter pair"""
# This dataset generates a pair of images. 0 for geniune pair and 1 for imposter pair
"""

@passes_kws_to(DictImageFolder.__init__)
@drop_unused_kws
Expand Down
1 change: 0 additions & 1 deletion neodroidvision/data/detection/voc/voc_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,6 @@ def calc_detection_voc_prec_rec(
) in six.moves.zip(
pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_difficults
):

if gt_difficult is None:
gt_difficult = numpy.zeros(gt_bbox.shape[0], dtype=bool)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ def __getitem__(self, index):


if __name__ == "__main__":

import torch
from matplotlib import pyplot

Expand Down
1 change: 0 additions & 1 deletion neodroidvision/data/segmentation/clouds.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ def __init__(
N_FOLDS=10,
SEED=246232,
):

self.transp = transp

if subset != subset.testing:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def write_dataset(labels, data, size, rows, cols, output_dir) -> None:
os.makedirs(dir)
import png # pip install pypng

for (i, label) in enumerate(labels):
for i, label in enumerate(labels):
output_filename = output_dirs[label] / f"{str(i)}.png"
print(f"writing {output_filename}")
with open(output_filename, "wb") as h:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ def save_dataset(X, y, voxel, output, shape=(28, 28)):


if __name__ == "__main__":

with gzip.open(PROJECT_APP_PATH.user_data / "mnist.pkl.gz", "rb") as f:
train_set, valid_set, test_set = pickle.load(f, encoding="iso-8859-1")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ def plot(self, d=2, cmap="Oranges", show_axis: bool = False):
"""
if d == 2:

fig, axes = pyplot.subplots(
int(numpy.ceil(self.n_z / 4)), 4, figsize=(8, 8)
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,6 @@ def __init__(self, size, model_name, blocks_args=None, global_params=None):
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:

# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def __init__(self, boxes_per_location, out_channels, num_categories):
self.cls_headers = nn.ModuleList()
self.reg_headers = nn.ModuleList()

for (level_i, (num_boxes, num_channels)) in enumerate(
for level_i, (num_boxes, num_channels) in enumerate(
zip(boxes_per_location, self.out_channels)
):
self.cls_headers.append(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def forward(self, features: torch.Tensor) -> SSDOut:
categori_logits, bbox_pred = self.predictor(features)

results = []
for (scores, boxes) in zip(
for scores, boxes in zip(
functional.log_softmax(
categori_logits, dim=-1
), # TODO:Check dim maybe it should be 1
Expand Down
3 changes: 2 additions & 1 deletion neodroidvision/detection/single_stage/ssd/multi_box_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ def forward(
confidence (batch_size, num_priors, num_categories): class predictions.
predicted_locations (batch_size, num_priors, 4): predicted locations.
labels (batch_size, num_priors): real labels of all the priors.
gt_locations (batch_size, num_priors, 4): real boxes corresponding all the priors."""
gt_locations (batch_size, num_priors, 4): real boxes corresponding all the priors.
"""

with torch.no_grad():
# derived from cross_entropy=sum(log(p))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ def maskrcnn_train_single_epoch(
:return:"""
model.to(device)
with TorchTrainSession(model):

for images, targets in progress_bar(data_loader, description="Batch #"):
images = [img.to(device) for img in images]
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
Expand Down Expand Up @@ -111,7 +110,6 @@ def maskrcnn_evaluate(

with torch.no_grad():
with TorchEvalSession(model):

for image, targets in progress_bar(data_loader):
image = [img.to(device) for img in image]
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ class Decoder(nn.Module):
def __init__(
self, layer_sizes: Sequence[int], latent_size: int, num_conditions: int
):

super().__init__()

self.MLP = nn.Sequential()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ def forward(self, input: torch.Tensor) -> torch.Tensor:

class Generator(nn.Module):
"""
Bernoulli model parameterized by a generative network with Gaussian latents for MNIST."""
Bernoulli model parameterized by a generative network with Gaussian latents for MNIST.
"""

def __init__(self, latent_size, data_size):
super().__init__()
Expand Down
3 changes: 2 additions & 1 deletion neodroidvision/regression/vae/architectures/flow/vae_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,8 @@ class MADE(nn.Module):
Follows https://arxiv.org/abs/1502.03509
This is used to build MAF: Masked Autoregressive Flow (https://arxiv.org/abs/1705.07057)."""
This is used to build MAF: Masked Autoregressive Flow (https://arxiv.org/abs/1705.07057).
"""

def __init__(self, num_input, num_output, num_hidden, num_context):
super().__init__()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,13 +104,11 @@ def hough_circle_calibrator(
hi = cv2.getTrackbarPos(hi_label, canny_frame_window_label)

if lo != lo_prev or hi != hi_prev: # --------------------------= RE-SYNC

a_canny_refresh_flag = True # --------------------------= FLAG

lo_prev = lo
hi_prev = hi
else:

a_canny_refresh_flag = False # --------------------------= Un-FLAG

dp = cv2.getTrackbarPos(dp_label, canny_hough_circle_window_label)
Expand All @@ -134,7 +132,6 @@ def hough_circle_calibrator(
or min_radius != min_radius_prev
or max_radius != max_radius_prev
): # ----------------------------------------------= RE-SYNC

a_hough_refresh_flag = True # --------------------------= FLAG

dp_prev = dp
Expand All @@ -144,7 +141,6 @@ def hough_circle_calibrator(
min_radius_prev = min_radius
max_radius_prev = max_radius
else:

a_hough_refresh_flag = False # --------------------------= Un-FLAG

if (
Expand All @@ -155,7 +151,6 @@ def hough_circle_calibrator(
cv2.imshow(canny_frame_window_label, edges)

if a_canny_refresh_flag or a_hough_refresh_flag:

circles = cv2.HoughCircles(
edges,
cv2.HOUGH_GRADIENT,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,6 @@ def hough_line_calibrator(
lo_prev = lo
hi_prev = hi
else:

a_canny_refresh_flag = False # --------------------------= Un-FLAG

threshold = cv2.getTrackbarPos(threshold_label, canny_hough_lines_window_label)
Expand Down Expand Up @@ -171,7 +170,6 @@ def hough_line_calibrator(
or min_theta != min_theta_prev
or max_theta != max_theta_prev
): # ----------------------------------------------= RE-SYNC

a_hough_refresh_flag = True # --------------------------= FLAG

rho_prev = rho
Expand All @@ -182,7 +180,6 @@ def hough_line_calibrator(
min_theta_prev = min_theta
max_theta_prev = max_theta
else:

a_hough_refresh_flag = False # --------------------------= Un-FLAG

if (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ def __init__(
rgb_weight=(0, 1, 1.5),
input_frame_shape=None,
) -> None:

logo_image = cv2.imread(logo_path, cv2.IMREAD_UNCHANGED)
h, w, c = logo_image.shape
if angle % 360 != 0:
Expand Down Expand Up @@ -77,7 +76,6 @@ def __init__(
self.logo_image[:, :, 0] = self.logo_image[:, :, 0] * self.rgb_weight[2]

if input_frame_shape is not None:

logo_w = input_frame_shape[1] * self.size
ratio = logo_w / self.ori_shape[1]
logo_h = int(ratio * self.ori_shape[0])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,8 @@
if int(torchvision.__version__.split(".")[1]) >= int("0.3.0".split(".")[1]):
nms_support = torchvision.ops.nms
else:

print(f"torchvision version: {torchvision.__version__}" "\n nms not supported")
try:

import ssd_torch_extension

nms_support = ssd_torch_extension.nms # non_maximum_suppression
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,6 @@ def suahd():
patch_size = 8

if show_2d:

from cv2 import circle
from matplotlib import pyplot

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,6 @@ def draw_bounding_box_on_image(
text_left = left

if top > total_display_str_height:

if label_inside:
text_bottom = top + total_display_str_height
else:
Expand Down
3 changes: 2 additions & 1 deletion neodroidvision/utilities/visualisation/plot_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ def plot_kernels(
number_cols: number of columns to be displayed
m_interpolation: interpolation methods matplotlib. See in:
https://matplotlib.org/gallery/images_contours_and_fields/interpolation_methods.html"""
https://matplotlib.org/gallery/images_contours_and_fields/interpolation_methods.html
"""

number_kernels = tensor.shape[0]
number_rows = 1 + number_kernels // number_cols
Expand Down
1 change: 0 additions & 1 deletion samples/classification/mnist_retrain.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,6 @@ def predictor_response_train_model(
for phase in [SplitEnum.training, SplitEnum.validation]:
if phase == SplitEnum.training:
with TorchTrainSession(model):

img, true_label = next(train_iterator)

rgb_imgs = to_tensor(
Expand Down
1 change: 0 additions & 1 deletion samples/classification/pair_siamese_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,6 @@ def train_siamese(
train_loss = loss_contrastive.cpu().item()
writer.scalar("train_loss", train_loss, batch_i)
if batch_counter.__next__() % validation_interval == 0:

with TorchEvalSession(model):
valid_loss = 0
valid_accuracy = []
Expand Down
1 change: 0 additions & 1 deletion samples/classification/ram/ram_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,6 @@ def train(self, *, writer):
)

for epoch in range(self.start_epoch, self.epochs):

print(
f"\nEpoch: {epoch + 1}/{self.epochs} - LR: {self.optimiser.param_groups[0]['lr']:.6f}"
)
Expand Down
1 change: 0 additions & 1 deletion samples/classification/ram/tests/stest_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
import torch

if __name__ == "__main__":

config = get_ram_config()

# load images
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def show_preds(img, pred):
drawdot = lambda x, y, r=3, fill="red": draw.ellipse(
(x - r, y - r, x + r, y + r), fill=fill
)
for (box, kpts) in pred:
for box, kpts in pred:
for kpt in kpts:
if kpt[2] == 1:
drawdot(kpt[0], kpt[1])
Expand Down
1 change: 0 additions & 1 deletion samples/misc/data/synthesis/mnist/mnist_dect_vis.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ def read_labels(label_path: pathlib.Path) -> Tuple[ndarray, ndarray]:


if __name__ == "__main__":

from draugr.opencv_utilities import draw_bounding_boxes

base_path = pathlib.Path(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def update(self, dt):
gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)
)

for (x, y, w, h) in faces:
for x, y, w, h in faces:
cv2.rectangle(rgb, (x, y), (x + w, y + h), (0, 255, 0), 2)
except Exception as e:
print(e)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,12 @@ def draw_from_points(cv_image, points):
Returns a cv_image."""
cv_image = numpy.ascontiguousarray(cv_image, dtype=numpy.uint8)
for f in points:
for (x, y, w, h) in f:
for x, y, w, h in f:
cv2.rectangle(cv_image, (x, y), (x + w, y + h), 255)
return cv_image


if __name__ == "__main__":

# Set game screen
screen = pygame.display.set_mode(SCREEN)

Expand All @@ -90,7 +89,6 @@ def draw_from_points(cv_image, points):
cam.start()

while 1: # Ze loop

time.sleep(1 / 120) # 60 frames per second

image = cam.get_image() # Get current webcam image
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def asijdas():
for image in AsyncVideoStream():
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

for (i, rect) in enumerate(detector(gray, upsample)):
for i, rect in enumerate(detector(gray, upsample)):
# determine the facial landmarks for the face region, then
# convert the landmark (x, y)-coordinates to a NumPy array
shape = shape_to_ndarray(predictor(gray, rect))
Expand All @@ -119,7 +119,7 @@ def asijdas():

# loop over the subset of facial landmarks, drawing the
# specific face part
for (x, y) in shape[i:j]:
for x, y in shape[i:j]:
cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)

if False:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@
for image in AsyncVideoStream():
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

for (i, rect) in enumerate(detector(gray, upsample_num_times)):
for i, rect in enumerate(detector(gray, upsample_num_times)):
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
# loop over the (x, y)-coordinates for the facial landmarks
# and draw them on the image
for (x, y) in shape_to_ndarray(predictor(gray, rect)):
for x, y in shape_to_ndarray(predictor(gray, rect)):
cv2.circle(image, (x, y), 2, (0, 255, 0), -1)

if show_image(
Expand Down
Loading

0 comments on commit 33ff177

Please sign in to comment.