-
Notifications
You must be signed in to change notification settings - Fork 21
/
trainer.py
103 lines (77 loc) · 3.8 KB
/
trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
from transformer.layers.generate_mask import generate_mask
import tensorflow as tf
class Trainer:
def __init__(self, model, optimizer, epochs, checkpoint_folder):
self.model = model
self.optimizer = optimizer
self.epochs = epochs
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
self.train_accuracy = tf.keras.metrics.Mean(name='train_accuracy')
self.checkpoint = tf.train.Checkpoint(model = self.model, optimizer = self.optimizer)
self.checkpoint_manager = tf.train.CheckpointManager(self.checkpoint, checkpoint_folder, max_to_keep=3)
def cal_acc(self, real, pred):
accuracies = tf.equal(real, tf.argmax(pred, axis=2))
mask = tf.math.logical_not(real == 0)
accuracies = tf.math.logical_and(mask, accuracies)
accuracies = tf.cast(accuracies, dtype=tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
return tf.reduce_sum(accuracies) / tf.reduce_sum(mask)
def loss_function(self, real, pred):
cross_entropy = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss = cross_entropy(real, pred)
mask = tf.cast(mask, dtype=loss.dtype)
loss = loss * mask
return tf.reduce_sum(loss) / tf.reduce_sum(mask)
def train_step(self, inp, tar):
# TODO: Update document
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
encoder_padding_mask, decoder_look_ahead_mask ,decoder_padding_mask = generate_mask(inp, tar_inp)
with tf.GradientTape() as tape:
preds = self.model(inp, tar_inp, True, encoder_padding_mask, decoder_look_ahead_mask, decoder_padding_mask)
d_loss = self.loss_function(tar_real, preds)
# Compute gradients
grads = tape.gradient(d_loss, self.model.trainable_variables)
# Update weights
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
# Compute metrics
self.train_loss.update_state(d_loss)
self.train_accuracy.update_state(self.cal_acc(tar_real, preds))
# return {"loss": self.train_loss.result(), "acc": self.train_accuracy.result()}
def fit(self, data):
print('=============Training Progress================')
print('----------------Begin--------------------')
# Loading checkpoint
if self.checkpoint_manager.latest_checkpoint:
self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint)
print('Restored checkpoint manager !')
for epoch in range(self.epochs):
self.train_loss.reset_states()
self.train_accuracy.reset_states()
for (batch, (inp, tar)) in enumerate(data):
self.train_step(inp, tar)
if batch % 50 == 0:
print(f'Epoch {epoch + 1} Batch {batch} Loss {self.train_loss.result():.3f} Accuracy {self.train_accuracy.result():.3f}')
if (epoch + 1) % 5 == 0:
saved_path = self.checkpoint_manager.save()
print('Checkpoint was saved at {}'.format(saved_path))
print('----------------Done--------------------')
def predict(self, encoder_input, decoder_input, is_train, max_length, end_token):
print('=============Inference Progress================')
print('----------------Begin--------------------')
# Loading checkpoint
if self.checkpoint_manager.latest_checkpoint:
self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint)
print('Restored checkpoint manager !')
for i in range(max_length):
encoder_padding_mask, decoder_look_ahead_mask ,decoder_padding_mask = generate_mask(encoder_input, decoder_input)
preds = self.model(encoder_input, decoder_input, is_train, encoder_padding_mask, decoder_look_ahead_mask, decoder_padding_mask)
# print('---> preds', preds)
preds = preds[:, -1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.argmax(preds, axis=-1)
decoder_input = tf.concat([decoder_input, predicted_id], axis=-1)
# return the result if the predicted_id is equal to the end token
if predicted_id == end_token:
break
return decoder_input