-
Notifications
You must be signed in to change notification settings - Fork 0
/
analysis.py
128 lines (95 loc) · 4.5 KB
/
analysis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
# Import from external libraries
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.layers import Lambda
from tensorflow.keras.backend import sum, function
from tensorflow.keras.activations import relu
from tensorflow.keras import Model
from tensorflow import one_hot
import keras.backend as K
import tensorflow as tf
import numpy as np
import cv2
# Import from other files
from utils import process_image, analyse, rescale, deprocess_image
from activation_functions import relu as relu_activation
from combinations import linear, squared_weights
def guided_backprop(input_data, model, index, layer_name, n_classes):
# Define the loss function
@tf.custom_gradient
def guidedRelu(x):
def grad(dy):
return tf.cast(dy > 0, "float32") * tf.cast(x > 0, "float32") * dy
return tf.nn.relu(x), grad
def category_loss(x):
return categorical_crossentropy(tf.one_hot([index], n_classes), x)
# Update with loss output
loss_layer = Lambda(category_loss)(model.output)
guidedModel = Model(inputs=model.input, outputs=loss_layer)
# Replace relu activations with our custom activation function
for layer in guidedModel.layers:
if (hasattr(layer, "activation")):
if layer.activation == relu:
layer.activation = guidedRelu
# Compute the gradient.
with tf.GradientTape() as tape:
indata = tf.cast(input_data, tf.float32)
tape.watch(indata)
loss = guidedModel(indata)
gradients = tape.gradient(loss, indata)[0]
# Don't know why this, but it seems to work :)
gradients = np.flip(deprocess_image(np.array(gradients)), -1)
return gradients
def grad_cam(input_data, model, index, originalImage, activation_function, combination, layer_name, n_classes):
# Define the loss function
def loss_function(x):
return categorical_crossentropy(one_hot([index], n_classes), x)
# Create loss layer
loss_layer = Lambda(loss_function)(model.output)
# Build a new model with the loss function.
model = Model(inputs=model.input, outputs=loss_layer)
# Extract layer and loss to compute gradient
conv_layer = model.get_layer(layer_name).output
loss = sum(model.output)
# Snagged this code from someplace else
grads = K.gradients(loss, conv_layer)
gradient_function = function([model.inputs[0]], [conv_layer, grads])
# Compute the desired values
output_values, gradients = gradient_function([input_data])
output_values = output_values[0]
gradients = -gradients[0][0]
# Compute weights according to equation 1
alphas = np.mean(gradients, axis=(0, 1))
# Apply combination
combo = combination(alphas, output_values)
# Apply activation function
combo = activation_function(combo)
# Reshape and rescale feature map
combo = cv2.resize(combo, (224, 224))
combo = rescale(combo)
return combo
def analysis(img, model, preprocess_input, decode_predictions, layer_name, n_classes):
# Preprocess data
imgArray, originalImage = process_image(img, preprocess_input)
preds = model.predict(imgArray)
pred_class = np.argmax(preds) # Change here to get view of something else
decoded_preds = decode_predictions(preds)
class_data = decoded_preds[0][0]
# Compute methods
localization_grad = grad_cam(imgArray, model, pred_class, originalImage, relu_activation, linear, layer_name,
n_classes)
localization_squad = grad_cam(imgArray, model, pred_class, originalImage, relu_activation, squared_weights,
layer_name, n_classes)
bprop = guided_backprop(imgArray, model, pred_class, layer_name, n_classes)
# Make it three dimensions to allow for multiplication
localization_grad = np.array([localization_grad, localization_grad, localization_grad])
localization_grad = np.swapaxes(localization_grad, 0, 2)
localization_grad = np.swapaxes(localization_grad, 0, 1)
localization_squad = np.array([localization_squad, localization_squad, localization_squad])
localization_squad = np.swapaxes(localization_squad, 0, 2)
localization_squad = np.swapaxes(localization_squad, 0, 1)
# Combine gradcam and backprop to get guided gradcam
guided_gradcam = np.multiply(localization_grad, bprop)
guided_squadcam = np.multiply(localization_squad, bprop)
guided_gradcam = rescale(guided_gradcam)
guided_squadcam = rescale(guided_squadcam)
return bprop, guided_gradcam, guided_squadcam, class_data