diff --git a/vis4d/data/datasets/coco.py b/vis4d/data/datasets/coco.py index 9ad54f1b..4e7a963c 100644 --- a/vis4d/data/datasets/coco.py +++ b/vis4d/data/datasets/coco.py @@ -15,8 +15,6 @@ from vis4d.data.const import CommonKeys as K from vis4d.data.typing import DictData -from ..const import CommonKeys as K -from ..typing import DictData from .base import Dataset from .util import CacheMappingMixin, im_decode diff --git a/vis4d/data/datasets/util.py b/vis4d/data/datasets/util.py index 01d4e09b..38486fa3 100644 --- a/vis4d/data/datasets/util.py +++ b/vis4d/data/datasets/util.py @@ -53,7 +53,7 @@ def im_decode( }, f"{mode} not supported for image decoding!" if backend == "PIL": pil_img = Image.open(BytesIO(bytearray(im_bytes))) - pil_img = ImageOps.exif_transpose(pil_img) + pil_img = ImageOps.exif_transpose(pil_img) # type: ignore if pil_img.mode == "L": # pragma: no cover if mode == "L": img: NDArrayUI8 = np.array(pil_img)[..., None] diff --git a/vis4d/engine/loss_module.py b/vis4d/engine/loss_module.py index 344fdf74..fc725317 100644 --- a/vis4d/engine/loss_module.py +++ b/vis4d/engine/loss_module.py @@ -201,22 +201,15 @@ def forward( # Convert loss_dict to total loss and metrics dictionary metrics: dict[str, float] = {} - if isinstance(loss_dict, Tensor): - total_loss = loss_dict - elif isinstance(loss_dict, dict): - keep_loss_dict: LossesType = {} - for k, v in loss_dict.items(): - metrics[k] = v.detach().cpu().item() - if ( - self.exclude_attributes is None - or k not in self.exclude_attributes - ): - keep_loss_dict[k] = v - total_loss = sum(keep_loss_dict.values()) # type: ignore - else: - raise TypeError( - "Loss function must return a Tensor or a dict of Tensor" - ) + keep_loss_dict: LossesType = {} + for k, v in loss_dict.items(): + metrics[k] = v.detach().cpu().item() + if ( + self.exclude_attributes is None + or k not in self.exclude_attributes + ): + keep_loss_dict[k] = v + total_loss: Tensor = sum(keep_loss_dict.values()) # type: ignore metrics["loss"] = total_loss.detach().cpu().item() return total_loss, metrics diff --git a/vis4d/model/adapter/ema.py b/vis4d/model/adapter/ema.py index 6b2f264f..573836bc 100644 --- a/vis4d/model/adapter/ema.py +++ b/vis4d/model/adapter/ema.py @@ -62,7 +62,7 @@ def update(self, steps: int) -> None: # pylint: disable=unused-argument """Update the internal EMA model.""" self._update( self.model, - update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m, # type: ignore # pylint: disable=line-too-long + update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m, ) def set(self, model: nn.Module) -> None: @@ -114,5 +114,5 @@ def update(self, steps: int) -> None: ) self._update( self.model, - update_fn=lambda e, m: decay * e + (1.0 - decay) * m, # type: ignore # pylint: disable=line-too-long + update_fn=lambda e, m: decay * e + (1.0 - decay) * m, ) diff --git a/vis4d/op/base/pointnetpp.py b/vis4d/op/base/pointnetpp.py index 2b7b889c..0cc40f28 100644 --- a/vis4d/op/base/pointnetpp.py +++ b/vis4d/op/base/pointnetpp.py @@ -19,11 +19,11 @@ class PointNetSetAbstractionOut(NamedTuple): """Ouput of PointNet set abstraction.""" - coordinates: torch.Tensor # [B, C, S] - features: torch.Tensor # [B, D', S] + coordinates: Tensor # [B, C, S] + features: Tensor # [B, D', S] -def square_distance(src: torch.Tensor, dst: torch.Tensor) -> torch.Tensor: +def square_distance(src: Tensor, dst: Tensor) -> Tensor: """Calculate Euclid distance between each two points. src^T * dst = xn * xm + yn * ym + zn * zm; @@ -44,10 +44,10 @@ def square_distance(src: torch.Tensor, dst: torch.Tensor) -> torch.Tensor: dist = -2 * torch.matmul(src, dst.permute(0, 2, 1)) dist += torch.sum(src**2, -1).view(bs, n_pts_in, 1) dist += torch.sum(dst**2, -1).view(bs, 1, n_pts_out) - return dist # type: ignore + return dist -def index_points(points: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: +def index_points(points: Tensor, idx: Tensor) -> Tensor: """Indexes points. Input: @@ -73,7 +73,7 @@ def index_points(points: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: return new_points -def farthest_point_sample(xyz: torch.Tensor, npoint: int) -> torch.Tensor: +def farthest_point_sample(xyz: Tensor, npoint: int) -> Tensor: """Farthest point sampling. Input: @@ -100,8 +100,8 @@ def farthest_point_sample(xyz: torch.Tensor, npoint: int) -> torch.Tensor: def query_ball_point( - radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor -) -> torch.Tensor: + radius: float, nsample: int, xyz: Tensor, new_xyz: Tensor +) -> Tensor: """Query around a ball with given radius. Input: @@ -137,9 +137,9 @@ def sample_and_group( npoint: int, radius: float, nsample: int, - xyz: torch.Tensor, - points: torch.Tensor, -) -> tuple[torch.Tensor, torch.Tensor]: + xyz: Tensor, + points: Tensor, +) -> tuple[Tensor, Tensor]: """Samples and groups. Input: @@ -170,9 +170,7 @@ def sample_and_group( return new_xyz, new_points -def sample_and_group_all( - xyz: torch.Tensor, points: torch.Tensor -) -> tuple[torch.Tensor, torch.Tensor]: +def sample_and_group_all(xyz: Tensor, points: Tensor) -> tuple[Tensor, Tensor]: """Sample and groups all. Input: @@ -243,7 +241,7 @@ def __init__( self.group_all = group_all def __call__( - self, coordinates: torch.Tensor, features: torch.Tensor + self, coordinates: Tensor, features: Tensor ) -> PointNetSetAbstractionOut: """Call function. @@ -259,7 +257,7 @@ def __call__( return self._call_impl(coordinates, features) def forward( - self, xyz: torch.Tensor, points: torch.Tensor + self, xyz: Tensor, points: Tensor ) -> PointNetSetAbstractionOut: """Pointnet++ set abstraction layer forward. @@ -327,11 +325,11 @@ def __init__( def __call__( self, - xyz1: torch.Tensor, - xyz2: torch.Tensor, - points1: torch.Tensor | None, - points2: torch.Tensor, - ) -> torch.Tensor: + xyz1: Tensor, + xyz2: Tensor, + points1: Tensor | None, + points2: Tensor, + ) -> Tensor: """Call function. Input: @@ -347,11 +345,11 @@ def __call__( def forward( self, - xyz1: torch.Tensor, - xyz2: torch.Tensor, - points1: torch.Tensor | None, - points2: torch.Tensor, - ) -> torch.Tensor: + xyz1: Tensor, + xyz2: Tensor, + points1: Tensor | None, + points2: Tensor, + ) -> Tensor: """Forward Implementation. Input: @@ -377,7 +375,7 @@ def forward( dists, idx = dists.sort(dim=-1) dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3] - dist_recip: Tensor = 1.0 / (dists + 1e-8) # type: ignore + dist_recip: Tensor = 1.0 / (dists + 1e-8) norm = torch.sum(dist_recip, dim=2, keepdim=True) weight = dist_recip / norm interpolated_points = torch.sum( @@ -387,9 +385,7 @@ def forward( if points1 is not None: points1 = points1.permute(0, 2, 1) - new_points = torch.cat( - [points1, interpolated_points], dim=-1 # type: ignore - ) + new_points = torch.cat([points1, interpolated_points], dim=-1) else: new_points = interpolated_points @@ -403,7 +399,7 @@ def forward( class PointNet2SegmentationOut(NamedTuple): """Prediction for the pointnet++ semantic segmentation network.""" - class_logits: torch.Tensor + class_logits: Tensor class PointNet2Segmentation(nn.Module): # TODO, probably move to module? @@ -445,7 +441,7 @@ def __init__(self, num_classes: int, in_channels: int = 3): self.conv2 = nn.Conv1d(128, num_classes, 1) self.in_channels = in_channels - def __call__(self, xyz: torch.Tensor) -> PointNet2SegmentationOut: + def __call__(self, xyz: Tensor) -> PointNet2SegmentationOut: """Call implementation. Args: @@ -456,7 +452,7 @@ def __call__(self, xyz: torch.Tensor) -> PointNet2SegmentationOut: """ return self._call_impl(xyz) - def forward(self, xyz: torch.Tensor) -> PointNet2SegmentationOut: + def forward(self, xyz: Tensor) -> PointNet2SegmentationOut: """Predicts the semantic class logits for each point. Args: diff --git a/vis4d/op/box/encoder/qd_3dt.py b/vis4d/op/box/encoder/qd_3dt.py index 19b08e01..7258f10b 100644 --- a/vis4d/op/box/encoder/qd_3dt.py +++ b/vis4d/op/box/encoder/qd_3dt.py @@ -82,7 +82,7 @@ def __call__( 2 * np.pi / self.num_rotation_bins, device=alpha.device, ) - bin_centers += np.pi / self.num_rotation_bins # type: ignore + bin_centers += np.pi / self.num_rotation_bins for i in range(alpha.shape[0]): overlap_value = ( np.pi * 2 / self.num_rotation_bins * self.bin_overlap diff --git a/vis4d/op/box/matchers/sim_ota.py b/vis4d/op/box/matchers/sim_ota.py index e56c11bd..d940ce12 100644 --- a/vis4d/op/box/matchers/sim_ota.py +++ b/vis4d/op/box/matchers/sim_ota.py @@ -116,7 +116,7 @@ def forward( # pylint: disable=arguments-differ # type: ignore[override] valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1) # disable AMP autocast and calculate BCE with FP32 to avoid overflow - with torch.cuda.amp.autocast(enabled=False): # type: ignore[attr-defined] # pylint: disable=line-too-long + with torch.cuda.amp.autocast(enabled=False): cls_cost = ( F.binary_cross_entropy( valid_pred_scores.to(dtype=torch.float32), @@ -216,7 +216,7 @@ def dynamic_k_matching( for gt_idx in range(num_gt): _, pos_idx = torch.topk( cost[:, gt_idx], - k=dynamic_ks[gt_idx].item(), + k=dynamic_ks[gt_idx].item(), # type: ignore largest=False, ) matching_matrix[:, gt_idx][pos_idx] = 1 diff --git a/vis4d/op/box/poolers/utils.py b/vis4d/op/box/poolers/utils.py index 457d3e38..9dcb5f74 100644 --- a/vis4d/op/box/poolers/utils.py +++ b/vis4d/op/box/poolers/utils.py @@ -35,7 +35,7 @@ def assign_boxes_to_levels( ) # Eqn.(1) in FPN paper level_assignments = torch.floor( - canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8) # type: ignore # pylint: disable=line-too-long + canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8) ) # clamp level to (min, max), in case the box size is too large or too small # for the available feature maps diff --git a/vis4d/op/box/samplers/combined.py b/vis4d/op/box/samplers/combined.py index 87fe02b0..e94ea007 100644 --- a/vis4d/op/box/samplers/combined.py +++ b/vis4d/op/box/samplers/combined.py @@ -136,9 +136,9 @@ def forward(self, matching: MatchResult) -> SamplingResult: """Sample boxes according to strategies defined in cfg.""" pos_sample_size = int(self.batch_size * self.positive_fraction) - positive_mask: Tensor = ( # type:ignore - matching.assigned_labels != -1 - ) & (matching.assigned_labels != self.bg_label) + positive_mask: Tensor = (matching.assigned_labels != -1) & ( + matching.assigned_labels != self.bg_label + ) negative_mask = torch.eq(matching.assigned_labels, self.bg_label) positive = positive_mask.nonzero()[:, 0] diff --git a/vis4d/op/box/samplers/pseudo.py b/vis4d/op/box/samplers/pseudo.py index a816cc00..e6e22431 100644 --- a/vis4d/op/box/samplers/pseudo.py +++ b/vis4d/op/box/samplers/pseudo.py @@ -30,6 +30,6 @@ def _sample_labels( labels: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: """Randomly sample indices from given labels.""" - positive = ((labels != -1) & (labels != 0)).nonzero()[:, 0] # type: ignore # pylint: disable=line-too-long + positive = ((labels != -1) & (labels != 0)).nonzero()[:, 0] negative = torch.eq(labels, 0).nonzero()[:, 0] return positive, negative diff --git a/vis4d/op/box/samplers/random.py b/vis4d/op/box/samplers/random.py index de04e93a..6e181def 100644 --- a/vis4d/op/box/samplers/random.py +++ b/vis4d/op/box/samplers/random.py @@ -40,7 +40,7 @@ def _sample_labels( self, labels: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor]: """Randomly sample indices from given labels.""" - positive = ((labels != -1) & (labels != self.bg_label)).nonzero()[:, 0] # type: ignore # pylint: disable=line-too-long + positive = ((labels != -1) & (labels != self.bg_label)).nonzero()[:, 0] negative = torch.eq(labels, self.bg_label).nonzero()[:, 0] num_pos = int(self.batch_size * self.positive_fraction) diff --git a/vis4d/op/detect/retinanet.py b/vis4d/op/detect/retinanet.py index c9389a7c..28e847c5 100644 --- a/vis4d/op/detect/retinanet.py +++ b/vis4d/op/detect/retinanet.py @@ -312,7 +312,9 @@ def forward( # since feature map sizes of all images are the same, we only compute # anchors for one time device = cls_outs[0].device - featmap_sizes = [featmap.size()[-2:] for featmap in cls_outs] + featmap_sizes: list[tuple[int, int]] = [ + featmap.size()[-2:] for featmap in cls_outs # type: ignore + ] assert len(featmap_sizes) == self.anchor_generator.num_levels anchor_grids = self.anchor_generator.grid_priors( featmap_sizes, device=device diff --git a/vis4d/op/detect/rpn.py b/vis4d/op/detect/rpn.py index 67099483..ab81fb38 100644 --- a/vis4d/op/detect/rpn.py +++ b/vis4d/op/detect/rpn.py @@ -302,7 +302,9 @@ def forward( # since feature map sizes of all images are the same, we only compute # anchors for one time device = class_outs[0].device - featmap_sizes = [featmap.size()[-2:] for featmap in class_outs] + featmap_sizes: list[tuple[int, int]] = [ + featmap.size()[-2:] for featmap in class_outs # type: ignore + ] assert len(featmap_sizes) == self.anchor_generator.num_levels anchor_grids = self.anchor_generator.grid_priors( featmap_sizes, device=device diff --git a/vis4d/op/detect/yolox.py b/vis4d/op/detect/yolox.py index 8ee5f2b1..e70ebcc8 100644 --- a/vis4d/op/detect/yolox.py +++ b/vis4d/op/detect/yolox.py @@ -246,7 +246,7 @@ def bboxes_nms( max_scores, labels = torch.max(cls_scores, 1) valid_mask = objectness * max_scores >= score_thr valid_idxs = valid_mask.nonzero()[:, 0] - num_topk = min(nms_pre, valid_mask.sum()) + num_topk = min(nms_pre, valid_mask.sum()) # type: ignore scores, idxs = (max_scores[valid_mask] * objectness[valid_mask]).sort( descending=True @@ -288,7 +288,7 @@ def preprocess_outputs( num_imgs = len(images_hw) num_classes = cls_outs[0].shape[1] featmap_sizes: list[tuple[int, int]] = [ - tuple(featmap.size()[-2:]) for featmap in cls_outs + tuple(featmap.size()[-2:]) for featmap in cls_outs # type: ignore ] assert len(featmap_sizes) == point_generator.num_levels mlvl_points = point_generator.grid_priors( diff --git a/vis4d/op/detect3d/bevformer/encoder.py b/vis4d/op/detect3d/bevformer/encoder.py index ddc4c045..c0732130 100644 --- a/vis4d/op/detect3d/bevformer/encoder.py +++ b/vis4d/op/detect3d/bevformer/encoder.py @@ -303,9 +303,9 @@ def forward( batch_size, len_bev, num_bev_level, _ = ref_2d.shape if prev_bev is not None: prev_bev = prev_bev.permute(1, 0, 2) - prev_bev = torch.stack( - [prev_bev, bev_query], 1 # type: ignore - ).reshape(batch_size * 2, len_bev, -1) + prev_bev = torch.stack([prev_bev, bev_query], 1).reshape( + batch_size * 2, len_bev, -1 + ) hybird_ref_2d = torch.stack([shift_ref_2d, ref_2d], 1).reshape( batch_size * 2, len_bev, num_bev_level, 2 ) diff --git a/vis4d/op/detect3d/bevformer/temporal_self_attention.py b/vis4d/op/detect3d/bevformer/temporal_self_attention.py index 076bdff4..00fafdea 100644 --- a/vis4d/op/detect3d/bevformer/temporal_self_attention.py +++ b/vis4d/op/detect3d/bevformer/temporal_self_attention.py @@ -156,18 +156,18 @@ def forward( value = value.permute(1, 0, 2) bs, num_query, embed_dims = query.shape - _, num_value, _ = value.shape # type: ignore + _, num_value, _ = value.shape assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value assert self.num_bev_queue == 2 - query = torch.cat([value[:bs], query], -1) # type: ignore + query = torch.cat([value[:bs], query], -1) value = self.value_proj(value) assert isinstance(value, Tensor) if key_padding_mask is not None: value = value.masked_fill(key_padding_mask[..., None], 0.0) - value = value.reshape( # type: ignore + value = value.reshape( bs * self.num_bev_queue, num_value, self.num_heads, -1 ) @@ -246,7 +246,7 @@ def forward( f" 2 or 4, but get {reference_points.shape[-1]} instead." ) - if torch.cuda.is_available() and value.is_cuda: # type: ignore + if torch.cuda.is_available() and value.is_cuda: output = MSDeformAttentionFunction.apply( value, spatial_shapes, @@ -257,7 +257,7 @@ def forward( ) else: output = ms_deformable_attention_cpu( - value, # type: ignore + value, spatial_shapes, sampling_locations, attention_weights, diff --git a/vis4d/op/detect3d/bevformer/transformer.py b/vis4d/op/detect3d/bevformer/transformer.py index 4e3b324e..489c713f 100644 --- a/vis4d/op/detect3d/bevformer/transformer.py +++ b/vis4d/op/detect3d/bevformer/transformer.py @@ -120,9 +120,7 @@ def get_bev_features( for i in range(batch_size): rotation_angle = float(can_bus[i][-1]) tmp_prev_bev = ( - prev_bev[:, i] # type: ignore - .reshape(bev_h, bev_w, -1) - .permute(2, 0, 1) + prev_bev[:, i].reshape(bev_h, bev_w, -1).permute(2, 0, 1) ) tmp_prev_bev = rotate( tmp_prev_bev, rotation_angle, center=self.rotate_center @@ -130,7 +128,7 @@ def get_bev_features( tmp_prev_bev = tmp_prev_bev.permute(1, 2, 0).reshape( bev_h * bev_w, 1, -1 ) - prev_bev[:, i] = tmp_prev_bev[:, 0] # type: ignore + prev_bev[:, i] = tmp_prev_bev[:, 0] # add can bus signals bev_queries = bev_queries + self.can_bus_mlp(can_bus)[None, :, :] diff --git a/vis4d/op/detect3d/qd_3dt.py b/vis4d/op/detect3d/qd_3dt.py index c857d4c9..99a7e704 100644 --- a/vis4d/op/detect3d/qd_3dt.py +++ b/vis4d/op/detect3d/qd_3dt.py @@ -677,7 +677,7 @@ def forward( pos_depth_self_labels = torch.exp( -torch.mul(torch.abs(pred[:, 2] - target[:, 2]), 5.0) ) - pos_depth_self_weights = torch.where( # type: ignore + pos_depth_self_weights = torch.where( pos_depth_self_labels > 0.8, pos_depth_self_labels.new_ones(1) * 5.0, pos_depth_self_labels.new_ones(1) * 0.1, diff --git a/vis4d/op/geometry/rotation.py b/vis4d/op/geometry/rotation.py index 036b8873..07554514 100644 --- a/vis4d/op/geometry/rotation.py +++ b/vis4d/op/geometry/rotation.py @@ -26,18 +26,18 @@ def acute_angle(theta_1: Tensor, theta_2: Tensor) -> Tensor: """Update theta_1 to mkae the agnle between two thetas is acute.""" # Make sure the angle between two thetas is acute if torch.pi / 2.0 < abs(theta_2 - theta_1) < torch.pi * 3 / 2.0: - theta_1 += torch.pi # type: ignore + theta_1 += torch.pi if theta_1 > torch.pi: - theta_1 -= torch.pi * 2 # type: ignore + theta_1 -= torch.pi * 2 if theta_1 < -torch.pi: - theta_1 += torch.pi * 2 # type: ignore + theta_1 += torch.pi * 2 # Convert the case of > 270 to < 90 if abs(theta_2 - theta_1) >= torch.pi * 3 / 2.0: if theta_2 > 0: - theta_1 += torch.pi * 2 # type: ignore + theta_1 += torch.pi * 2 else: - theta_1 -= torch.pi * 2 # type: ignore + theta_1 -= torch.pi * 2 return theta_1 @@ -89,7 +89,7 @@ def rotation_output_to_alpha(output: Tensor, num_bins: int = 2) -> Tensor: bin_centers = torch.arange( -torch.pi, torch.pi, 2 * torch.pi / num_bins, device=output.device ) - bin_centers += torch.pi / num_bins # type: ignore + bin_centers += torch.pi / num_bins alpha = ( torch.atan(output[out_range, res_idx] / output[out_range, res_idx + 1]) + bin_centers[bin_idx] @@ -355,7 +355,7 @@ def matrix_to_quaternion(matrix: Tensor) -> Tensor: q_abs = _sqrt_positive_part( torch.stack( [ - 1.0 + m00 + m11 + m22, # type: ignore + 1.0 + m00 + m11 + m22, 1.0 + m00 - m11 - m22, 1.0 - m00 + m11 - m22, 1.0 - m00 - m11 + m22, diff --git a/vis4d/op/layer/attention.py b/vis4d/op/layer/attention.py index 6f2af20f..ab60eed9 100644 --- a/vis4d/op/layer/attention.py +++ b/vis4d/op/layer/attention.py @@ -210,7 +210,7 @@ def forward( # from num_query_first to batch_first. if self.batch_first: query = query.transpose(0, 1) - key = key.transpose(0, 1) # type: ignore + key = key.transpose(0, 1) value = value.transpose(0, 1) out = self.attn( diff --git a/vis4d/op/layer/ms_deform_attn.py b/vis4d/op/layer/ms_deform_attn.py index e1bd77b2..2d16f70c 100644 --- a/vis4d/op/layer/ms_deform_attn.py +++ b/vis4d/op/layer/ms_deform_attn.py @@ -131,7 +131,7 @@ def ms_deformable_attention_cpu( _, ) = sampling_locations.shape value_list = value.split([h * w for h, w in value_spatial_shapes], dim=1) - sampling_grids: Tensor = 2 * sampling_locations - 1 # type: ignore + sampling_grids: Tensor = 2 * sampling_locations - 1 sampling_value_list = [] for level, (h, w) in enumerate(value_spatial_shapes): # bs, h*w, num_heads, embed_dims -> diff --git a/vis4d/op/layer/transformer.py b/vis4d/op/layer/transformer.py index f8482f56..614cd825 100644 --- a/vis4d/op/layer/transformer.py +++ b/vis4d/op/layer/transformer.py @@ -67,9 +67,7 @@ def __init__( }, "data_format could only be channels_last or channels_first." self.inplace = inplace self.data_format = data_format - self.gamma = nn.Parameter( - init_values * torch.ones(dim) # type: ignore - ) + self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass.""" diff --git a/vis4d/op/loss/common.py b/vis4d/op/loss/common.py index 20ee1def..991947c6 100644 --- a/vis4d/op/loss/common.py +++ b/vis4d/op/loss/common.py @@ -34,7 +34,7 @@ def smooth_l1_loss( assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) - loss = torch.where( # type: ignore + loss = torch.where( diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta ) return reducer(loss) diff --git a/vis4d/op/mask/util.py b/vis4d/op/mask/util.py index 1944653a..4c7018e7 100644 --- a/vis4d/op/mask/util.py +++ b/vis4d/op/mask/util.py @@ -63,10 +63,10 @@ def _do_paste_mask( num_masks = masks.shape[0] img_y: Tensor = ( - torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 # type: ignore # pylint: disable=line-too-long + torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 ) img_x: Tensor = ( - torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 # type: ignore # pylint: disable=line-too-long + torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 ) img_y = (img_y - y0) / (y1 - y0) * 2 - 1 # (N, h) img_x = (img_x - x0) / (x1 - x0) * 2 - 1 # (N, w) diff --git a/vis4d/op/track/qdtrack.py b/vis4d/op/track/qdtrack.py index b7dca4e3..480161b9 100644 --- a/vis4d/op/track/qdtrack.py +++ b/vis4d/op/track/qdtrack.py @@ -548,7 +548,7 @@ def forward( """ if sum(len(e) for e in key_embeddings) == 0: # pragma: no cover dummy_loss = sum(e.sum() * 0.0 for e in key_embeddings) - return QDTrackInstanceSimilarityLosses(dummy_loss, dummy_loss) + return QDTrackInstanceSimilarityLosses(dummy_loss, dummy_loss) # type: ignore # pylint: disable=line-too-long loss_track = torch.tensor(0.0, device=key_embeddings[0].device) loss_track_aux = torch.tensor(0.0, device=key_embeddings[0].device) diff --git a/vis4d/state/track/qdtrack.py b/vis4d/state/track/qdtrack.py index cf77452b..fd27ce3a 100644 --- a/vis4d/state/track/qdtrack.py +++ b/vis4d/state/track/qdtrack.py @@ -301,7 +301,7 @@ def update_track( self.tracklets[track_id]["box"] = box self.tracklets[track_id]["score"] = score self.tracklets[track_id]["class_id"] = class_id - self.tracklets[track_id]["embed"] = ( # type: ignore + self.tracklets[track_id]["embed"] = ( 1 - self.memory_momentum ) * self.tracklets[track_id][ "embed" diff --git a/vis4d/state/track3d/cc_3dt.py b/vis4d/state/track3d/cc_3dt.py index cc58f5dc..e2509ddd 100644 --- a/vis4d/state/track3d/cc_3dt.py +++ b/vis4d/state/track3d/cc_3dt.py @@ -498,7 +498,7 @@ def update_track( self.tracklets[track_id]["score_3d"] = score_3d self.tracklets[track_id]["class_id"] = class_id - self.tracklets[track_id]["embed"] = ( # type: ignore + self.tracklets[track_id]["embed"] = ( 1 - self.memory_momentum ) * self.tracklets[track_id]["embed"] + self.memory_momentum * embed diff --git a/vis4d/vis/image/bbox3d_visualizer.py b/vis4d/vis/image/bbox3d_visualizer.py index 7d64fe4e..74f4a7f3 100644 --- a/vis4d/vis/image/bbox3d_visualizer.py +++ b/vis4d/vis/image/bbox3d_visualizer.py @@ -166,8 +166,8 @@ class ids each of shape [B, N]. Defaults to None. boxes3d[batch], intrinsics[batch], # type: ignore ( - None if extrinsics is None else extrinsics[batch] - ), # type: ignore + None if extrinsics is None else extrinsics[batch] # type: ignore # pylint: disable=line-too-long + ), None if scores is None else scores[batch], None if class_ids is None else class_ids[batch], None if track_ids is None else track_ids[batch], @@ -405,8 +405,8 @@ class ids each of shape [B, N]. Defaults to None. ( None if extrinsics is None - else extrinsics[idx][batch] - ), # type: ignore + else extrinsics[idx][batch] # type: ignore + ), None if scores is None else scores[batch], None if class_ids is None else class_ids[batch], None if track_ids is None else track_ids[batch], diff --git a/vis4d/vis/image/canvas/pillow_backend.py b/vis4d/vis/image/canvas/pillow_backend.py index 7bffea98..0dbb7c70 100644 --- a/vis4d/vis/image/canvas/pillow_backend.py +++ b/vis4d/vis/image/canvas/pillow_backend.py @@ -86,7 +86,9 @@ def draw_bitmap( bitmap_pil = Image.fromarray( bitmap_with_alpha.astype(np.uint8), mode="RGBA" ) - self._image_draw.bitmap(top_left_corner, bitmap_pil, fill=color) + self._image_draw.bitmap( + top_left_corner, bitmap_pil, fill=color # type: ignore + ) def draw_text( self,