diff --git a/Project.toml b/Project.toml index e18179f..59111d2 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Lighthouse" uuid = "ac2c24cd-07f0-4848-96b2-1b82c3ea0e59" authors = ["Beacon Biosignals, Inc."] -version = "0.14.18" +version = "0.14.19" [deps] ArrowTypes = "31f734f8-188a-4ce0-8406-c8a06bd891cd" diff --git a/src/metrics.jl b/src/metrics.jl index 0eeadf0..f5333c7 100644 --- a/src/metrics.jl +++ b/src/metrics.jl @@ -70,9 +70,9 @@ function binary_statistics(confusion::AbstractMatrix, class_index::Integer) actual_positives = sum(view(confusion, :, class_index)) actual_negatives = total - actual_positives true_positives = confusion[class_index, class_index] - true_negatives = sum(diag(confusion)) - true_positives false_positives = predicted_positives - true_positives false_negatives = actual_positives - true_positives + true_negatives = actual_negatives - false_positives true_positive_rate = (true_positives == 0 && actual_positives == 0) ? (one(true_positives) / one(actual_positives)) : (true_positives / actual_positives) diff --git a/test/metrics.jl b/test/metrics.jl index 1195985..7e5f3bc 100644 --- a/test/metrics.jl +++ b/test/metrics.jl @@ -10,20 +10,26 @@ @test accuracy(c) == percent_agreement == 3 / 8 @test kappa == (3 / 8 - chance) / (1 - chance) stats = binary_statistics(c, 3) + total = sum(c) + @test total == 8 @test stats.predicted_positives == 2 @test stats.predicted_negatives == 6 @test stats.actual_positives == 2 @test stats.actual_negatives == 6 @test stats.true_positives == 1 - @test stats.true_negatives == 2 + @test stats.true_negatives == 5 @test stats.false_positives == 1 @test stats.false_negatives == 1 @test stats.true_positive_rate == 0.5 - @test stats.true_negative_rate == 1 / 3 + @test stats.true_negative_rate == 5 / 6 @test stats.false_positive_rate == 1 / 6 @test stats.false_negative_rate == 0.5 @test stats.precision == 0.5 @test stats.f1 == 0.5 + @test stats.true_positives + stats.true_negatives + stats.false_positives + + stats.false_negatives == total + @test stats.actual_positives + stats.actual_negatives == total + @test stats.predicted_positives + stats.predicted_negatives == total labels = rand(StableRNG(42), 1:3, 100) hard_label_pairs = zip(labels, labels)