-
Notifications
You must be signed in to change notification settings - Fork 0
/
main_script_for_NMF_model.py
37 lines (29 loc) · 1.72 KB
/
main_script_for_NMF_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import pandas as pd
from datasets.deskdrop_dataset import Deskdrop_Dataset
from models.NMF_model import NMF_Model
from evaluator import Evaluator
"""
An example on using this evaluation framework: You can replace 'deskdrop_dataset' and 'model_NMF' with your own dataset and model objects.
But your dataset object must be a subclass of Dataset defined in dataset.py and implement its abstract methods;
and your model object must be a subclass of Model defined in model.py and implement its abstract methods.
"""
# tunable parameters
num_of_recommendations = 5 # number of recommendations to be given by the model.
n_ranks=[1, 3, 5] # For defining the metrics e.g. if n_ranks = [1,5], and the metrics used is RECALL, then RECALL@1 and RECALL@5 will be used
# initialising the objects
deskdrop_dataset = Deskdrop_Dataset(partition_ratios = [0.6, 0.2, 0.2])
model_NMF = NMF_Model(n =num_of_recommendations )
evaluator = Evaluator(n_ranks=n_ranks)
# do Training
training_set = deskdrop_dataset.getTraining()
model_NMF.train_in_batch(dataset=training_set)
# Validation in stream environment
deskdrop_dataset.set_curr_pointer(mode='Validation')
validation_results = evaluator.evaluate_model_in_stream( dataset=deskdrop_dataset, model=model_NMF, mode='Validation', scheme = [0.9,0.1])
# do Training with both Training and Validation sets:
validation_set = deskdrop_dataset.getValidation(exclude_first_time_users = False)
train_and_validate_sets = pd.concat([training_set, validation_set])
model_NMF.train_in_batch(dataset=train_and_validate_sets)
# evaluate your model
deskdrop_dataset.set_curr_pointer(mode='Testing')
testing_results = evaluator.evaluate_model_in_stream( dataset=deskdrop_dataset, model=model_NMF, mode='Testing', scheme = [0.9,0.1])