-
Notifications
You must be signed in to change notification settings - Fork 0
/
dae.py
122 lines (96 loc) · 3.49 KB
/
dae.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
"""Implementation of a Deep Autoencoder"""
import tensorflow as tf
class DAE():
"""A Deep Autoencoder that takes a list of RBMs as input"""
def __init__(self, models):
"""Create a deep autoencoder based on a list of RBM models
Parameters
----------
models: list[RBM]
a list of RBM models to use for autoencoding
"""
# extract weights from each model
encoders = []
encoder_biases = []
decoders = []
decoder_biases = []
for model in models:
encoders.append(tf.Variable(model.W))
encoder_biases.append(tf.Variable(model.h_bias))
decoders.append(tf.Variable(model.W))
decoder_biases.append(tf.Variable(model.v_bias))
# build encoders and decoders
self.encoders = encoders
self.encoder_biases = encoder_biases
self.decoders = [x for x in reversed(encoders)]
self.decoder_biases = [x for x in reversed(decoder_biases)]
# self.encoders = tf.TensorArray(tf.float32, size=len(encoders))
# print(len(encoders))
# for i, model in enumerate(encoders): self.encoders = self.encoders.write(i, model)
# self.encoder_biases = tf.TensorArray(tf.float32, size=len(encoders))
# for i, bias in enumerate(encoder_biases): self.encoder_biases = self.encoder_biases.write(i, bias)
# self.decoders = tf.TensorArray(tf.float32, size=len(decoders))
# for i, model in enumerate(reversed(decoders)): self.decoders = self.decoders.write(i, model)
# self.decoder_biases = tf.TensorArray(tf.float32, size=len(decoders))
# for i, bias in enumerate(reversed(decoder_biases)): self.decoder_biases = self.decoder_biases.write(i, bias)
@tf.function
def predict(self, v):
"""Forward step
Parameters
----------
v: Tensor
input tensor
Returns
-------
Tensor
a reconstruction of v from the autoencoder
"""
# encode
p_h = self.encode(v)
# decode
p_v = self.decode(p_h)
return p_v
@tf.function
def encode(self, v): # for visualization, encode without sigmoid
"""Encode input
Parameters
----------
v: Tensor
visible input tensor
Returns
-------
Tensor
the activations of the last layer
"""
p_v = v
activation = v
# length = self.encoders.size()
length = len(self.encoders)
for i in range(length):
W = self.encoders[i]
h_bias = self.encoder_biases[i]
activation = tf.linalg.matmul(p_v, W) + h_bias
p_v = tf.sigmoid(activation)
# for the last layer, we want to return the activation directly rather than the sigmoid
return activation
@tf.function
def decode(self, h):
"""Encode hidden layer
Parameters
----------
h: Tensor
activations from last hidden layer
Returns
-------
Tensor
reconstruction of original input based on h
"""
p_h = h
# length = self.decoders.size()
length = len(self.encoders) # can't use decoders bc reversed
for i in range(length):
W = self.decoders[i]
v_bias = self.decoder_biases[i]
activation = tf.linalg.matmul(p_h, tf.transpose(W)) + v_bias
p_h = tf.sigmoid(activation)
return p_h