forked from TheAlgorithms/Python
-
Notifications
You must be signed in to change notification settings - Fork 0
/
loss_functions.py
303 lines (242 loc) · 11.3 KB
/
loss_functions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
import numpy as np
def binary_cross_entropy(
y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15
) -> float:
"""
Calculate the mean binary cross-entropy (BCE) loss between true labels and predicted
probabilities.
BCE loss quantifies dissimilarity between true labels (0 or 1) and predicted
probabilities. It's widely used in binary classification tasks.
BCE = -Σ(y_true * ln(y_pred) + (1 - y_true) * ln(1 - y_pred))
Reference: https://en.wikipedia.org/wiki/Cross_entropy
Parameters:
- y_true: True binary labels (0 or 1)
- y_pred: Predicted probabilities for class 1
- epsilon: Small constant to avoid numerical instability
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8])
>>> binary_cross_entropy(true_labels, predicted_probs)
0.2529995012327421
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
>>> binary_cross_entropy(true_labels, predicted_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if len(y_true) != len(y_pred):
raise ValueError("Input arrays must have the same length.")
y_pred = np.clip(y_pred, epsilon, 1 - epsilon) # Clip predictions to avoid log(0)
bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))
return np.mean(bce_loss)
def binary_focal_cross_entropy(
y_true: np.ndarray,
y_pred: np.ndarray,
gamma: float = 2.0,
alpha: float = 0.25,
epsilon: float = 1e-15,
) -> float:
"""
Calculate the mean binary focal cross-entropy (BFCE) loss between true labels
and predicted probabilities.
BFCE loss quantifies dissimilarity between true labels (0 or 1) and predicted
probabilities. It's a variation of binary cross-entropy that addresses class
imbalance by focusing on hard examples.
BCFE = -Σ(alpha * (1 - y_pred)**gamma * y_true * log(y_pred)
+ (1 - alpha) * y_pred**gamma * (1 - y_true) * log(1 - y_pred))
Reference: [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf)
Parameters:
- y_true: True binary labels (0 or 1).
- y_pred: Predicted probabilities for class 1.
- gamma: Focusing parameter for modulating the loss (default: 2.0).
- alpha: Weighting factor for class 1 (default: 0.25).
- epsilon: Small constant to avoid numerical instability.
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8])
>>> binary_focal_cross_entropy(true_labels, predicted_probs)
0.008257977659239775
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
>>> binary_focal_cross_entropy(true_labels, predicted_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if len(y_true) != len(y_pred):
raise ValueError("Input arrays must have the same length.")
# Clip predicted probabilities to avoid log(0)
y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
bcfe_loss = -(
alpha * (1 - y_pred) ** gamma * y_true * np.log(y_pred)
+ (1 - alpha) * y_pred**gamma * (1 - y_true) * np.log(1 - y_pred)
)
return np.mean(bcfe_loss)
def categorical_cross_entropy(
y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15
) -> float:
"""
Calculate categorical cross-entropy (CCE) loss between true class labels and
predicted class probabilities.
CCE = -Σ(y_true * ln(y_pred))
Reference: https://en.wikipedia.org/wiki/Cross_entropy
Parameters:
- y_true: True class labels (one-hot encoded)
- y_pred: Predicted class probabilities
- epsilon: Small constant to avoid numerical instability
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]])
>>> categorical_cross_entropy(true_labels, pred_probs)
0.567395975254385
>>> true_labels = np.array([[1, 0], [0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
>>> categorical_cross_entropy(true_labels, pred_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same shape.
>>> true_labels = np.array([[2, 0, 1], [1, 0, 0]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
>>> categorical_cross_entropy(true_labels, pred_probs)
Traceback (most recent call last):
...
ValueError: y_true must be one-hot encoded.
>>> true_labels = np.array([[1, 0, 1], [1, 0, 0]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
>>> categorical_cross_entropy(true_labels, pred_probs)
Traceback (most recent call last):
...
ValueError: y_true must be one-hot encoded.
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0]])
>>> pred_probs = np.array([[0.9, 0.1, 0.1], [0.2, 0.7, 0.1]])
>>> categorical_cross_entropy(true_labels, pred_probs)
Traceback (most recent call last):
...
ValueError: Predicted probabilities must sum to approximately 1.
"""
if y_true.shape != y_pred.shape:
raise ValueError("Input arrays must have the same shape.")
if np.any((y_true != 0) & (y_true != 1)) or np.any(y_true.sum(axis=1) != 1):
raise ValueError("y_true must be one-hot encoded.")
if not np.all(np.isclose(np.sum(y_pred, axis=1), 1, rtol=epsilon, atol=epsilon)):
raise ValueError("Predicted probabilities must sum to approximately 1.")
y_pred = np.clip(y_pred, epsilon, 1) # Clip predictions to avoid log(0)
return -np.sum(y_true * np.log(y_pred))
def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate the mean hinge loss for between true labels and predicted probabilities
for training support vector machines (SVMs).
Hinge loss = max(0, 1 - true * pred)
Reference: https://en.wikipedia.org/wiki/Hinge_loss
Args:
- y_true: actual values (ground truth) encoded as -1 or 1
- y_pred: predicted values
>>> true_labels = np.array([-1, 1, 1, -1, 1])
>>> pred = np.array([-4, -0.3, 0.7, 5, 10])
>>> hinge_loss(true_labels, pred)
1.52
>>> true_labels = np.array([-1, 1, 1, -1, 1, 1])
>>> pred = np.array([-4, -0.3, 0.7, 5, 10])
>>> hinge_loss(true_labels, pred)
Traceback (most recent call last):
...
ValueError: Length of predicted and actual array must be same.
>>> true_labels = np.array([-1, 1, 10, -1, 1])
>>> pred = np.array([-4, -0.3, 0.7, 5, 10])
>>> hinge_loss(true_labels, pred)
Traceback (most recent call last):
...
ValueError: y_true can have values -1 or 1 only.
"""
if len(y_true) != len(y_pred):
raise ValueError("Length of predicted and actual array must be same.")
if np.any((y_true != -1) & (y_true != 1)):
raise ValueError("y_true can have values -1 or 1 only.")
hinge_losses = np.maximum(0, 1.0 - (y_true * y_pred))
return np.mean(hinge_losses)
def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float:
"""
Calculate the mean Huber loss between the given ground truth and predicted values.
The Huber loss describes the penalty incurred by an estimation procedure, and it
serves as a measure of accuracy for regression models.
Huber loss =
0.5 * (y_true - y_pred)^2 if |y_true - y_pred| <= delta
delta * |y_true - y_pred| - 0.5 * delta^2 otherwise
Reference: https://en.wikipedia.org/wiki/Huber_loss
Parameters:
- y_true: The true values (ground truth)
- y_pred: The predicted values
>>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102)
True
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0])
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
>>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164)
True
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0])
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
>>> huber_loss(true_labels, predicted_probs, 1.0)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if len(y_true) != len(y_pred):
raise ValueError("Input arrays must have the same length.")
huber_mse = 0.5 * (y_true - y_pred) ** 2
huber_mae = delta * (np.abs(y_true - y_pred) - 0.5 * delta)
return np.where(np.abs(y_true - y_pred) <= delta, huber_mse, huber_mae).mean()
def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate the mean squared error (MSE) between ground truth and predicted values.
MSE measures the squared difference between true values and predicted values, and it
serves as a measure of accuracy for regression models.
MSE = (1/n) * Σ(y_true - y_pred)^2
Reference: https://en.wikipedia.org/wiki/Mean_squared_error
Parameters:
- y_true: The true values (ground truth)
- y_pred: The predicted values
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028)
True
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
>>> mean_squared_error(true_labels, predicted_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if len(y_true) != len(y_pred):
raise ValueError("Input arrays must have the same length.")
squared_errors = (y_true - y_pred) ** 2
return np.mean(squared_errors)
def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate the mean squared logarithmic error (MSLE) between ground truth and
predicted values.
MSLE measures the squared logarithmic difference between true values and predicted
values for regression models. It's particularly useful for dealing with skewed or
large-value data, and it's often used when the relative differences between
predicted and true values are more important than absolute differences.
MSLE = (1/n) * Σ(log(1 + y_true) - log(1 + y_pred))^2
Reference: https://insideaiml.com/blog/MeanSquared-Logarithmic-Error-Loss-1035
Parameters:
- y_true: The true values (ground truth)
- y_pred: The predicted values
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> mean_squared_logarithmic_error(true_values, predicted_values)
0.0030860877925181344
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
>>> mean_squared_logarithmic_error(true_labels, predicted_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if len(y_true) != len(y_pred):
raise ValueError("Input arrays must have the same length.")
squared_logarithmic_errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2
return np.mean(squared_logarithmic_errors)
if __name__ == "__main__":
import doctest
doctest.testmod()