Skip to content

Commit

Permalink
update supervised
Browse files Browse the repository at this point in the history
  • Loading branch information
chao chen committed Jan 20, 2022
1 parent aa6a928 commit f99caf3
Show file tree
Hide file tree
Showing 4 changed files with 32 additions and 26 deletions.
23 changes: 13 additions & 10 deletions evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import socket
import importlib
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import sys
import torch
import torch.nn as nn
Expand Down Expand Up @@ -74,9 +74,9 @@ def evaluate_model(model,optimizer,epoch,save=False):
if not os.path.exists(cfg.RESULTS_FOLDER):
os.mkdir(cfg.RESULTS_FOLDER)

recall_1 = np.zeros(25)
recall_5 = np.zeros(25)
recall_10 = np.zeros(25)
recall_1 = np.zeros(20)
recall_5 = np.zeros(102)
recall_10 = np.zeros(205)
count = 0

similarity_1 = []
Expand Down Expand Up @@ -242,20 +242,19 @@ def get_recall(m, n, DATABASE_VECTORS, QUERY_VECTORS, QUERY_SETS):

percent_array = [100, 20, 10]
for percent in percent_array:
recall_N = [0] * num_neighbors
threshold = max(int(round(len(database_output)/percent)), 1)
recall_N = [0] * threshold
topN_similarity_score = []
N_percent_retrieved = 0

threshold = max(int(round(len(database_output)/percent)), 1)

num_evaluated = 0
for i in range(len(queries_output)):
true_neighbors = QUERY_SETS[n][i][m]
if(len(true_neighbors) == 0):
continue
num_evaluated += 1
distances, indices = database_nbrs.query(
np.array([queries_output[i]]),k=num_neighbors)
np.array([queries_output[i]]),k=threshold)

#indices = indices + n*2048
for j in range(len(indices[0])):
Expand All @@ -270,8 +269,12 @@ def get_recall(m, n, DATABASE_VECTORS, QUERY_VECTORS, QUERY_SETS):
if len(list(set(indices[0][0:threshold]).intersection(set(true_neighbors)))) > 0:
N_percent_retrieved += 1

N_percent_recall = (N_percent_retrieved/float(num_evaluated))*100
recall_N = (np.cumsum(recall_N)/float(num_evaluated))*100
if float(num_evaluated)!=0:
N_percent_recall = (N_percent_retrieved/float(num_evaluated))*100
recall_N = (np.cumsum(recall_N)/float(num_evaluated))*100
else:
N_percent_recall = 0
recall_N = 0
recalls.append(recall_N)
similarity_scores.append(topN_similarity_score)
N_percent_recalls.append(N_percent_recall)
Expand Down
17 changes: 11 additions & 6 deletions generating_queries/generate_test_cc_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
filename = "gt_pose.mat"
pointcloud_fols = "/pointcloud_20m_10overlap/"

evaluate_all = True
print("cfg.DATASET_FOLDER:"+str(cfg.DATASET_FOLDER))

cc_dir = "/home/cc/"
Expand All @@ -27,7 +28,10 @@
folders = []

# All runs are used for training (both full and partial)
index_list = [11,14,15,17]
if evaluate_all:
index_list = list(range(18))
else:
index_list = [5,6,7,9]
print("Number of runs: "+str(len(index_list)))
for index in index_list:
print("all_folders[index]:"+str(all_folders[index]))
Expand Down Expand Up @@ -62,8 +66,8 @@ def construct_query_dict(df_centroids, df_database, folder_num, filename_train,
database_sets = []
for folder in range(folder_num):
queries = {}
for i in range(len(df_centroids)//10):
temp_indx = folder*(len(df_centroids)//10) + i
for i in range(len(df_centroids)//folder_num):
temp_indx = folder*(len(df_centroids)//folder_num) + i
query = df_centroids.iloc[temp_indx]["file"]
#print("folder:"+str(folder))
#print("query:"+str(query))
Expand Down Expand Up @@ -153,7 +157,8 @@ def construct_query_dict(df_centroids, df_database, folder_num, filename_train,

print("df_train:"+str(len(df_train)))



#construct_query_dict(df_train,len(folders),"evaluation_database.pickle",False)
construct_query_dict(df_test, df_train, len(folders),"evaluation_database.pickle", "evaluation_query.pickle", True)
if not evaluate_all:
construct_query_dict(df_test, df_train, len(folders),"evaluation_database.pickle", "evaluation_query.pickle", True)
else:
construct_query_dict(df_test, df_train, len(folders),"evaluation_database_full.pickle", "evaluation_query_full.pickle", True)
2 changes: 1 addition & 1 deletion loading_pointclouds.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def get_query_tuple(dict_value, num_pos, num_neg, QUERY_DICT, hard_neg=[], other

random.shuffle(dict_value["positives"])
pos_files = []

for i in range(num_pos):
pos_files.append(QUERY_DICT[dict_value["positives"][i]]["query"])
#positives= load_pc_files(dict_value["positives"][0:num_pos])
Expand Down
16 changes: 7 additions & 9 deletions train_pointnetvlad.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from torch.autograd import Variable
from torch.backends import cudnn

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"


BASE_DIR = os.path.dirname(os.path.abspath(__file__))
Expand All @@ -39,8 +39,8 @@
help='Number of potential positives in each training tuple [default: 2]')
parser.add_argument('--negatives_per_query', type=int, default=18,
help='Number of definite negatives in each training tuple [default: 18]')
parser.add_argument('--max_epoch', type=int, default=20,
help='Epoch to run [default: 20]')
parser.add_argument('--max_epoch', type=int, default=100,
help='Epoch to run [default: 100]')
parser.add_argument('--batch_num_queries', type=int, default=2,
help='Batch Size during training [default: 2]')
parser.add_argument('--learning_rate', type=float, default=0.000005,
Expand Down Expand Up @@ -207,8 +207,6 @@ def train():
log_string('EVAL RECALL_5: %s' % str(eval_recall_5))
log_string('EVAL RECALL_10: %s' % str(eval_recall_10))

train_writer.add_scalar("Val Recall", eval_recall_1, eval_recall_5, eval_recall_10, epoch)


def train_one_epoch(model, optimizer, train_writer, loss_function, epoch):
global HARD_NEGATIVES
Expand All @@ -224,7 +222,7 @@ def train_one_epoch(model, optimizer, train_writer, loss_function, epoch):
train_file_idxs = np.arange(0, len(TRAINING_QUERIES.keys()))
np.random.shuffle(train_file_idxs)
for i in range(len(train_file_idxs)//cfg.BATCH_NUM_QUERIES):
#for i in range(40):
#for i in range(1):
# for i in range (5):
batch_keys = train_file_idxs[i *
cfg.BATCH_NUM_QUERIES:(i+1)*cfg.BATCH_NUM_QUERIES]
Expand Down Expand Up @@ -357,7 +355,7 @@ def train_one_epoch(model, optimizer, train_writer, loss_function, epoch):

def get_feature_representation(filename, model):
model.eval()
queries = load_pc_files([filename])
queries = load_pc_files([filename],True)
queries = np.expand_dims(queries, axis=1)
# if(BATCH_NUM_QUERIES-1>0):
# fake_queries=np.zeros((BATCH_NUM_QUERIES-1,1,NUM_POINTS,3))
Expand Down Expand Up @@ -424,7 +422,7 @@ def get_latent_vectors(model, dict_to_process):
# handle edge case
for q_index in range((len(train_file_idxs) // batch_num * batch_num), len(dict_to_process.keys())):
index = train_file_idxs[q_index]
queries = load_pc_files([dict_to_process[index]["query"]])
queries = load_pc_files([dict_to_process[index]["query"]],True)
queries = np.expand_dims(queries, axis=1)

# if (BATCH_NUM_QUERIES - 1 > 0):
Expand All @@ -439,7 +437,7 @@ def get_latent_vectors(model, dict_to_process):
#o1, o2, o3, o4 = run_model(model, q, fake_pos, fake_neg, fake_other_neg)
with torch.no_grad():
queries_tensor = torch.from_numpy(queries).float()
o1 = model(queries_tensor)
o1 = model.to(device)(queries_tensor.to(device))

output = o1.detach().cpu().numpy()
output = np.squeeze(output)
Expand Down

0 comments on commit f99caf3

Please sign in to comment.