From 7363927eca0e5b6ac5e151b32fa75caa1343dd1b Mon Sep 17 00:00:00 2001 From: Abdelaziz Bouzidi Date: Fri, 24 Mar 2023 11:51:17 +0100 Subject: [PATCH] Made compatible with torchsparse2.0 --- .../Minist_Test/lib/spvcnn_classsification.py | 5 ++- LeReS/Minist_Test/lib/spvcnn_utils.py | 37 +++++++++---------- LeReS/Minist_Test/lib/test_utils.py | 32 +++++++++------- 3 files changed, 39 insertions(+), 35 deletions(-) diff --git a/LeReS/Minist_Test/lib/spvcnn_classsification.py b/LeReS/Minist_Test/lib/spvcnn_classsification.py index ebfb64e..3947396 100644 --- a/LeReS/Minist_Test/lib/spvcnn_classsification.py +++ b/LeReS/Minist_Test/lib/spvcnn_classsification.py @@ -1,6 +1,7 @@ import torch.nn as nn import torchsparse.nn as spnn -from torchsparse.point_tensor import PointTensor +from torchsparse.tensor import PointTensor + from lib.spvcnn_utils import * __all__ = ['SPVCNN_CLASSIFICATION'] @@ -114,7 +115,7 @@ def __init__(self, **kwargs): ResidualBlock(cs[3], cs[4], ks=3, stride=1, dilation=1), ResidualBlock(cs[4], cs[4], ks=3, stride=1, dilation=1), ) - self.avg_pool = spnn.GlobalAveragePooling() + self.avg_pool = spnn.GlobalAvgPool() self.classifier = nn.Sequential(nn.Linear(cs[4], kwargs['num_classes'])) self.point_transforms = nn.ModuleList([ nn.Sequential( diff --git a/LeReS/Minist_Test/lib/spvcnn_utils.py b/LeReS/Minist_Test/lib/spvcnn_utils.py index 43f16bb..466ed1b 100644 --- a/LeReS/Minist_Test/lib/spvcnn_utils.py +++ b/LeReS/Minist_Test/lib/spvcnn_utils.py @@ -1,7 +1,8 @@ -import torchsparse.nn.functional as spf -from torchsparse.point_tensor import PointTensor -from torchsparse.utils.kernel_region import * -from torchsparse.utils.helpers import * +import torchsparse.nn.functional as spf# +import torch +from torchsparse.tensor import PointTensor +from torchsparse.tensor import SparseTensor +from torchsparse.nn.utils import get_kernel_offsets __all__ = ['initial_voxelize', 'point_to_voxel', 'voxel_to_point'] @@ -24,7 +25,6 @@ def initial_voxelize(z, init_res, after_res): inserted_feat = spf.spvoxelize(z.F, idx_query, counts) new_tensor = SparseTensor(inserted_feat, inserted_coords, 1) - new_tensor.check() z.additional_features['idx_query'][1] = idx_query z.additional_features['counts'][1] = counts z.C = new_float_coord @@ -34,49 +34,48 @@ def initial_voxelize(z, init_res, after_res): # x: SparseTensor, z: PointTensor # return: SparseTensor -def point_to_voxel(x, z): +def point_to_voxel(x: SparseTensor, z: PointTensor): if z.additional_features is None or z.additional_features.get('idx_query') is None\ - or z.additional_features['idx_query'].get(x.s) is None: + or z.additional_features['idx_query'].get(x.s[0]) is None: #pc_hash = hash_gpu(torch.floor(z.C).int()) pc_hash = spf.sphash( torch.cat([ - torch.floor(z.C[:, :3] / x.s).int() * x.s, + torch.floor(z.C[:, :3] / x.s[0]).int() * x.s[0], z.C[:, -1].int().view(-1, 1) ], 1)) sparse_hash = spf.sphash(x.C) idx_query = spf.sphashquery(pc_hash, sparse_hash) counts = spf.spcount(idx_query.int(), x.C.shape[0]) - z.additional_features['idx_query'][x.s] = idx_query - z.additional_features['counts'][x.s] = counts + z.additional_features['idx_query'][x.s[0]] = idx_query + z.additional_features['counts'][x.s[0]] = counts else: - idx_query = z.additional_features['idx_query'][x.s] - counts = z.additional_features['counts'][x.s] + idx_query = z.additional_features['idx_query'][x.s[0]] + counts = z.additional_features['counts'][x.s[0]] inserted_feat = spf.spvoxelize(z.F, idx_query, counts) new_tensor = SparseTensor(inserted_feat, x.C, x.s) - new_tensor.coord_maps = x.coord_maps - new_tensor.kernel_maps = x.kernel_maps + new_tensor.cmaps = x.cmaps + new_tensor.kmaps = x.kmaps return new_tensor # x: SparseTensor, z: PointTensor # return: PointTensor -def voxel_to_point(x, z, nearest=False): +def voxel_to_point(x: SparseTensor, z: PointTensor, nearest=False): if z.idx_query is None or z.weights is None or z.idx_query.get( x.s) is None or z.weights.get(x.s) is None: - kr = KernelRegion(2, x.s, 1) - off = kr.get_kernel_offset().to(z.F.device) + off = get_kernel_offsets(2, x.s, 1, z.F.device) #old_hash = kernel_hash_gpu(torch.floor(z.C).int(), off) old_hash = spf.sphash( torch.cat([ - torch.floor(z.C[:, :3] / x.s).int() * x.s, + torch.floor(z.C[:, :3] / x.s[0]).int() * x.s[0], z.C[:, -1].int().view(-1, 1) ], 1), off) pc_hash = spf.sphash(x.C.to(z.F.device)) idx_query = spf.sphashquery(old_hash, pc_hash) weights = spf.calc_ti_weights(z.C, idx_query, - scale=x.s).transpose(0, 1).contiguous() + scale=x.s[0]).transpose(0, 1).contiguous() idx_query = idx_query.transpose(0, 1).contiguous() if nearest: weights[:, 1:] = 0. diff --git a/LeReS/Minist_Test/lib/test_utils.py b/LeReS/Minist_Test/lib/test_utils.py index 08a93ed..52b580d 100644 --- a/LeReS/Minist_Test/lib/test_utils.py +++ b/LeReS/Minist_Test/lib/test_utils.py @@ -2,8 +2,10 @@ import numpy as np import torch from torchsparse import SparseTensor -from torchsparse.utils import sparse_collate_fn, sparse_quantize +from torchsparse.utils.collate import sparse_collate_fn +from torchsparse.utils.quantize import sparse_quantize from plyfile import PlyData, PlyElement +import matplotlib.pyplot as plt def init_image_coor(height, width, u0=None, v0=None): @@ -35,20 +37,21 @@ def pcd_to_sparsetensor(pcd, mask_valid, voxel_size=0.01, num_points=100000): block_ = pcd_valid block = np.zeros_like(block_) block[:, :3] = block_[:, :3] - - pc_ = np.round(block_[:, :3] / voxel_size) + + pc_ = block_ pc_ -= pc_.min(0, keepdims=1) feat_ = block - + # print(pc_.shape) + # transfer point cloud to voxels - inds = sparse_quantize(pc_, - feat_, + pc, inds = sparse_quantize(pc_, + voxel_size, return_index=True, - return_invs=False) + return_inverse=False) if len(inds) > num_points: inds = np.random.choice(inds, num_points, replace=False) - - pc = pc_[inds] + + feat = feat_[inds] lidar = SparseTensor(feat, pc) feed_dict = [{'lidar': lidar}] @@ -67,19 +70,20 @@ def pcd_uv_to_sparsetensor(pcd, u_u0, v_v0, mask_valid, f= 500.0, voxel_size=0.0 block[:, :] = block_[:, :] - pc_ = np.round(block_[:, :3] / voxel_size) + pc_ = block_[:, :3] pc_ -= pc_.min(0, keepdims=1) feat_ = block # transfer point cloud to voxels - inds = sparse_quantize(pc_, - feat_, + pc, inds = sparse_quantize(pc_, + voxel_size, return_index=True, - return_invs=False) + return_inverse=False) if len(inds) > num_points: inds = np.random.choice(inds, num_points, replace=False) - pc = pc_[inds] + + feat = feat_[inds] lidar = SparseTensor(feat, pc) feed_dict = [{'lidar': lidar}]