From 7ee7d8b4e80ae7e741a78d3ea2ca1a05516b5500 Mon Sep 17 00:00:00 2001 From: Victor Trinquet <60815457+VicTrqt@users.noreply.github.com> Date: Wed, 13 Nov 2024 12:00:49 +0100 Subject: [PATCH] GA minor fixes (#244) * fix docstring nested FitGenetic.run * fix lr + balance fraction num_neurons GA --- modnet/hyper_opt/fit_genetic.py | 3 ++- modnet/models/ensemble.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/modnet/hyper_opt/fit_genetic.py b/modnet/hyper_opt/fit_genetic.py index 03fae50..7d0b8b7 100644 --- a/modnet/hyper_opt/fit_genetic.py +++ b/modnet/hyper_opt/fit_genetic.py @@ -52,7 +52,8 @@ def __init__( self.xscale_before_impute = True self.lr_list = [0.1, 0.01, 0.005, 0.001] self.batch_size_list = [32, 64, 128, 256] - self.fraction_list = [1, 0.75, 0.5, 0.25] + self.fraction_list = [1, 1, 0.75, 0.5, 0.25] + # add 1 to balance the chance of having an architecture with the same num_neurons on each layer if fit_params: self.__dict__.update(fit_params) diff --git a/modnet/models/ensemble.py b/modnet/models/ensemble.py index 789d863..880d09e 100644 --- a/modnet/models/ensemble.py +++ b/modnet/models/ensemble.py @@ -510,7 +510,7 @@ def _validate_ensemble_model( model.fit( train_data, - learning_rate=lr, + lr=lr, epochs=epochs, batch_size=batch_size, loss=loss, @@ -522,7 +522,7 @@ def _validate_ensemble_model( val_data=val_data, ) - learning_curves = [m.history["val_loss"] for m in model.model] + learning_curves = [m.history["val_loss"] for m in model.models] val_loss = model.evaluate(val_data)