-
Notifications
You must be signed in to change notification settings - Fork 4
/
eval_PanopticSeg_NPM3D.py
195 lines (165 loc) · 6.75 KB
/
eval_PanopticSeg_NPM3D.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
import os
import numpy as np
from scipy import stats
from torch_points3d.models.panoptic.ply import read_ply, write_ply
from plyfile import PlyData, PlyElement
NUM_CLASSES = 10
NUM_CLASSES_count = 9
#class index for instance segmenatation
ins_classcount = [3,4,5,7,8,9]
#class index for semantic segmenatation
sem_classcount = [1,2,3,4,5,6,7,8,9]
#log directory
file_path = '/scratch2/torch-points3d/outputs/2021-10-20/06-19-43/eval/2021-10-26_14-27-55/'
#predicted semantic segmentation file path
pred_class_label_filename = file_path+'Semantic_results_forEval.ply'
#predicted instance segmentation file path
pred_ins_label_filename = file_path+'Instance_Offset_results_forEval.ply'
# Initialize...
LOG_FOUT = open(os.path.join(file_path+'evaluation.txt'), 'a')
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
# acc and macc
true_positive_classes = np.zeros(NUM_CLASSES)
positive_classes = np.zeros(NUM_CLASSES)
gt_classes = np.zeros(NUM_CLASSES)
# precision & recall
total_gt_ins = np.zeros(NUM_CLASSES)
at = 0.5
tpsins = [[] for itmp in range(NUM_CLASSES)]
fpsins = [[] for itmp in range(NUM_CLASSES)]
IoU_Tp = np.zeros(NUM_CLASSES)
# mucov and mwcov
all_mean_cov = [[] for itmp in range(NUM_CLASSES)]
all_mean_weighted_cov = [[] for itmp in range(NUM_CLASSES)]
#read files
data_class = PlyData.read(pred_class_label_filename)
data_ins = PlyData.read(pred_ins_label_filename)
pred_ins_complete = data_ins['vertex']['preds'].reshape(-1).astype(np.int)
pred_sem_complete = data_class['vertex']['preds'].reshape(-1).astype(np.int)+1
gt_ins_complete = data_ins['vertex']['gt'].reshape(-1).astype(np.int)
gt_sem_complete = data_class['vertex']['gt'].reshape(-1).astype(np.int)+1
idxc = ((gt_sem_complete!=0) & (gt_sem_complete!=1) & (gt_sem_complete!=2) & (gt_sem_complete!=6)) | ((pred_sem_complete!=0) & (pred_sem_complete!=1) & (pred_sem_complete!=2) & (pred_sem_complete!=6))
pred_ins = pred_ins_complete[idxc]
gt_ins = gt_ins_complete[idxc]
pred_sem = pred_sem_complete[idxc]
gt_sem = gt_sem_complete[idxc]
# pn semantic mIoU
for j in range(gt_sem_complete.shape[0]):
gt_l = int(gt_sem_complete[j])
pred_l = int(pred_sem_complete[j])
gt_classes[gt_l] += 1
positive_classes[pred_l] += 1
true_positive_classes[gt_l] += int(gt_l==pred_l)
# semantic results
iou_list = []
for i in range(NUM_CLASSES):
iou = true_positive_classes[i]/float(gt_classes[i]+positive_classes[i]-true_positive_classes[i])
iou_list.append(iou)
log_string('Semantic Segmentation oAcc: {}'.format(sum(true_positive_classes)/float(sum(positive_classes))))
#log_string('Semantic Segmentation Acc: {}'.format(true_positive_classes / gt_classes))
log_string('Semantic Segmentation mAcc: {}'.format(np.mean(true_positive_classes[sem_classcount] / gt_classes[sem_classcount])))
log_string('Semantic Segmentation IoU: {}'.format(iou_list))
log_string('Semantic Segmentation mIoU: {}'.format(1.*sum(iou_list)/NUM_CLASSES_count))
# instance
un = np.unique(pred_ins)
pts_in_pred = [[] for itmp in range(NUM_CLASSES)]
for ig, g in enumerate(un): # each object in prediction
if g == -1:
continue
tmp = (pred_ins == g)
sem_seg_i = int(stats.mode(pred_sem[tmp])[0])
pts_in_pred[sem_seg_i] += [tmp]
un = np.unique(gt_ins)
pts_in_gt = [[] for itmp in range(NUM_CLASSES)]
for ig, g in enumerate(un):
if g == -1:
continue
tmp = (gt_ins == g)
sem_seg_i = int(stats.mode(gt_sem[tmp])[0])
pts_in_gt[sem_seg_i] += [tmp]
# instance mucov & mwcov
for i_sem in range(NUM_CLASSES):
sum_cov = 0
mean_cov = 0
mean_weighted_cov = 0
num_gt_point = 0
for ig, ins_gt in enumerate(pts_in_gt[i_sem]):
ovmax = 0.
num_ins_gt_point = np.sum(ins_gt)
num_gt_point += num_ins_gt_point
for ip, ins_pred in enumerate(pts_in_pred[i_sem]):
union = (ins_pred | ins_gt)
intersect = (ins_pred & ins_gt)
iou = float(np.sum(intersect)) / np.sum(union)
if iou > ovmax:
ovmax = iou
ipmax = ip
sum_cov += ovmax
mean_weighted_cov += ovmax * num_ins_gt_point
if len(pts_in_gt[i_sem]) != 0:
mean_cov = sum_cov / len(pts_in_gt[i_sem])
all_mean_cov[i_sem].append(mean_cov)
mean_weighted_cov /= num_gt_point
all_mean_weighted_cov[i_sem].append(mean_weighted_cov)
#print(all_mean_cov)
# instance precision & recall
for i_sem in range(NUM_CLASSES):
IoU_Tp_per=0
tp = [0.] * len(pts_in_pred[i_sem])
fp = [0.] * len(pts_in_pred[i_sem])
gtflag = np.zeros(len(pts_in_gt[i_sem]))
total_gt_ins[i_sem] += len(pts_in_gt[i_sem])
for ip, ins_pred in enumerate(pts_in_pred[i_sem]):
ovmax = -1.
for ig, ins_gt in enumerate(pts_in_gt[i_sem]):
union = (ins_pred | ins_gt)
intersect = (ins_pred & ins_gt)
iou = float(np.sum(intersect)) / np.sum(union)
if iou > ovmax:
ovmax = iou
igmax = ig
if ovmax >= at:
tp[ip] = 1 # true
IoU_Tp_per += ovmax
else:
fp[ip] = 1 # false positive
tpsins[i_sem] += tp
fpsins[i_sem] += fp
IoU_Tp[i_sem] = IoU_Tp_per
MUCov = np.zeros(NUM_CLASSES)
MWCov = np.zeros(NUM_CLASSES)
for i_sem in range(NUM_CLASSES):
MUCov[i_sem] = np.mean(all_mean_cov[i_sem])
MWCov[i_sem] = np.mean(all_mean_weighted_cov[i_sem])
precision = np.zeros(NUM_CLASSES)
recall = np.zeros(NUM_CLASSES)
RQ = np.zeros(NUM_CLASSES)
SQ = np.zeros(NUM_CLASSES)
PQ = np.zeros(NUM_CLASSES)
for i_sem in range(NUM_CLASSES):
tp = np.asarray(tpsins[i_sem]).astype(np.float)
fp = np.asarray(fpsins[i_sem]).astype(np.float)
tp = np.sum(tp)
fp = np.sum(fp)
rec = tp / total_gt_ins[i_sem]
prec = tp / (tp + fp)
precision[i_sem] = prec
recall[i_sem] = rec
RQ[i_sem] = 2*prec*rec/(prec+rec)
SQ[i_sem] = IoU_Tp[i]/tp
PQ[i_sem] = SQ*RQ
# instance results
log_string('Instance Segmentation MUCov: {}'.format(MUCov[ins_classcount]))
log_string('Instance Segmentation mMUCov: {}'.format(np.mean(MUCov[ins_classcount])))
log_string('Instance Segmentation MWCov: {}'.format(MWCov[ins_classcount]))
log_string('Instance Segmentation mMWCov: {}'.format(np.mean(MWCov[ins_classcount])))
log_string('Instance Segmentation Precision: {}'.format(precision[ins_classcount]))
log_string('Instance Segmentation mPrecision: {}'.format(np.mean(precision[ins_classcount])))
log_string('Instance Segmentation Recall: {}'.format(recall[ins_classcount]))
log_string('Instance Segmentation mRecall: {}'.format(np.mean(recall[ins_classcount])))
log_string('Instance Segmentation RQ: {}'.format(RQ[ins_classcount]))
log_string('Instance Segmentation SQ: {}'.format(SQ[ins_classcount]))
log_string('Instance Segmentation PQ: {}'.format(PQ[ins_classcount]))