Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: adapt openml for mo #173

Open
wants to merge 20 commits into
base: yahs_bench
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file modified .github/workflows/run_singularity_versions.yml
100644 → 100755
Empty file.
Empty file modified .github/workflows/run_tests.yml
100644 → 100755
Empty file.
Empty file modified .gitignore
100644 → 100755
Empty file.
Empty file modified LICENSE
100644 → 100755
Empty file.
16 changes: 16 additions & 0 deletions README.md
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,22 @@ fs = b.get_fidelity_space(seed=1)
meta = b.get_meta_information()
```

Multi-Objective benchmarks can be queried in a similar manner

```python
from hpobench.container.benchmarks.mo.adult_benchmark import AdultBenchmark
b = AdultBenchmark(rng=1)
config = b.get_configuration_space(seed=1).sample_configuration()
result_dict = b.objective_function(configuration=config, fidelity={"budget": 66}, rng=1)
result_dict = b.objective_function(configuration=config, rng=1)

>>> print(result_dict['function_values'])
{'misclassification_rate': 0.16572832429112494,
'DSO': 0.197765453723867,
'DEO': 0.1595593763542093,
'DFP': 0.10465117283454546}
```

## Installation

We recommend using a virtual environment. To install HPOBench, please run the following:
Expand Down
Empty file modified changelog.md
100644 → 100755
Empty file.
Empty file modified ci_scripts/codestyle.sh
100644 → 100755
Empty file.
Empty file modified ci_scripts/container_examples.sh
100644 → 100755
Empty file.
Empty file modified ci_scripts/install.sh
100644 → 100755
Empty file.
Empty file modified ci_scripts/install_singularity.sh
100644 → 100755
Empty file.
Empty file modified ci_scripts/local_examples.sh
100644 → 100755
Empty file.
Empty file modified ci_scripts/script.sh
100644 → 100755
Empty file.
Empty file modified codecov.yml
100644 → 100755
Empty file.
Empty file modified examples/container/create_connection_to_benchmark.py
100644 → 100755
Empty file.
Empty file modified examples/container/tabular_benchmark_example.py
100644 → 100755
Empty file.
Empty file modified examples/container/xgboost_with_container.py
100644 → 100755
Empty file.
Empty file modified examples/jahs_bench_201.json
100644 → 100755
Empty file.
Empty file modified examples/local/xgboost_local.py
100644 → 100755
Empty file.
Empty file modified examples/w_optimizer/SVMSurrogate_minicomparison.py
100644 → 100755
Empty file.
Empty file modified examples/w_optimizer/cartpole_bohb.py
100644 → 100755
Empty file.
Empty file modified examples/w_optimizer/cartpole_smachb.py
100644 → 100755
Empty file.
Empty file modified extra_requirements/cartpole.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/examples.json
100644 → 100755
Empty file.
3 changes: 3 additions & 0 deletions extra_requirements/jahs_bench_201.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"jahs_bench_201": ["jahs_bench_201@git+https://github.com/automl/jahs_bench_201.git"]
}
Empty file modified extra_requirements/ml_mfbb.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/mo_cnn.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/multi_objective.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/nasbench_101.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/nasbench_1shot1.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/outlier_detection.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/paramnet.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/pybnn.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/svm.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/tabular_benchmarks.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/tests.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/xgboost.json
100644 → 100755
Empty file.
Empty file modified extra_requirements/yahpo_gym.json
100644 → 100755
Empty file.
Empty file modified hpobench/__init__.py
100644 → 100755
Empty file.
Empty file modified hpobench/__version__.py
100644 → 100755
Empty file.
1 change: 1 addition & 0 deletions hpobench/abstract_benchmark.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ def __init__(self, rng: Union[int, np.random.RandomState, None] = None, **kwargs
create a new random state.
"""
super(_BaseAbstractBenchmark, self).__init__(**kwargs)
print("base abstract benchamrk")
self.rng = rng_helper.get_rng(rng=rng)
self.configuration_space = self.get_configuration_space(self.rng.randint(0, 10000))
self.fidelity_space = self.get_fidelity_space(self.rng.randint(0, 10000))
Expand Down
Empty file modified hpobench/benchmarks/__init__.py
100644 → 100755
Empty file.
Empty file modified hpobench/benchmarks/ml/README.md
100644 → 100755
Empty file.
28 changes: 14 additions & 14 deletions hpobench/benchmarks/ml/__init__.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,22 +1,22 @@
from hpobench.benchmarks.ml.histgb_benchmark import HistGBBenchmark, HistGBBenchmarkBB, HistGBBenchmarkMF
from hpobench.benchmarks.ml.lr_benchmark import LRBenchmark, LRBenchmarkBB, LRBenchmarkMF
from hpobench.benchmarks.ml.nn_benchmark import NNBenchmark, NNBenchmarkBB, NNBenchmarkMF
from hpobench.benchmarks.ml.histgb_benchmark import HistGBBenchmark, HistGBBenchmarkBB, HistGBBenchmarkMF, HistGBBenchmarkMO
from hpobench.benchmarks.ml.lr_benchmark import LRBenchmark, LRBenchmarkBB, LRBenchmarkMF, LRBenchmarkMO
from hpobench.benchmarks.ml.nn_benchmark import NNBenchmark, NNBenchmarkBB, NNBenchmarkMF, NNBenchmarkMO
from hpobench.benchmarks.ml.rf_benchmark import RandomForestBenchmark, RandomForestBenchmarkBB, \
RandomForestBenchmarkMF
from hpobench.benchmarks.ml.svm_benchmark import SVMBenchmark, SVMBenchmarkBB, SVMBenchmarkMF
from hpobench.benchmarks.ml.tabular_benchmark import TabularBenchmark
RandomForestBenchmarkMF, RandomForestBenchmarkMO
from hpobench.benchmarks.ml.svm_benchmark import SVMBenchmark, SVMBenchmarkBB, SVMBenchmarkMF, SVMBenchmarkMO
from hpobench.benchmarks.ml.tabular_benchmark import TabularBenchmark, TabularBenchmarkMO

try:
from hpobench.benchmarks.ml.xgboost_benchmark import XGBoostBenchmark, XGBoostBenchmarkBB, XGBoostBenchmarkMF
from hpobench.benchmarks.ml.xgboost_benchmark import XGBoostBenchmark, XGBoostBenchmarkBB, XGBoostBenchmarkMF, XGBoostBenchmarkMO
except ImportError:
pass


__all__ = ['HistGBBenchmark', 'HistGBBenchmarkBB', 'HistGBBenchmarkMF',
'LRBenchmark', 'LRBenchmarkBB', 'LRBenchmarkMF',
'NNBenchmark', 'NNBenchmarkBB', 'NNBenchmarkMF',
'RandomForestBenchmark', 'RandomForestBenchmarkBB', 'RandomForestBenchmarkMF',
'SVMBenchmark', 'SVMBenchmarkBB', 'SVMBenchmarkMF',
'TabularBenchmark',
'XGBoostBenchmark', 'XGBoostBenchmarkBB', 'XGBoostBenchmarkMF',
__all__ = ['HistGBBenchmark', 'HistGBBenchmarkBB', 'HistGBBenchmarkMF', 'HistGBBenchmarkMO',
'LRBenchmark', 'LRBenchmarkBB', 'LRBenchmarkMF', 'LRBenchmarkMO',
'NNBenchmark', 'NNBenchmarkBB', 'NNBenchmarkMF', 'NNBenchmarkMO',
'RandomForestBenchmark', 'RandomForestBenchmarkBB', 'RandomForestBenchmarkMF', 'RandomForestBenchmarkMO',
'SVMBenchmark', 'SVMBenchmarkBB', 'SVMBenchmarkMF', 'SVMBenchmarkMO',
'TabularBenchmark', 'TabularBenchmarkMO',
'XGBoostBenchmark', 'XGBoostBenchmarkBB', 'XGBoostBenchmarkMF', 'XGBoostBenchmarkMO',
]
23 changes: 19 additions & 4 deletions hpobench/benchmarks/ml/histgb_benchmark.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -15,18 +15,18 @@
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier

from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark
from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark, MO_MLBenchmark

__version__ = '0.0.1'


class HistGBBenchmark(MLBenchmark):
class _HistGBBenchmarkBase:
def __init__(self,
task_id: int,
rng: Union[np.random.RandomState, int, None] = None,
valid_size: float = 0.33,
data_path: Union[str, None] = None):
super(HistGBBenchmark, self).__init__(task_id, rng, valid_size, data_path)
super(_HistGBBenchmarkBase, self).__init__(task_id, rng, valid_size, data_path)

@staticmethod
def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace:
Expand Down Expand Up @@ -101,6 +101,13 @@ def init_model(self, config: Union[CS.Configuration, Dict],
)
return model

class HistGBBenchmark(_HistGBBenchmarkBase, MLBenchmark):
def __init__(self,
task_id: int,
rng: Union[np.random.RandomState, int, None] = None,
valid_size: float = 0.33,
data_path: Union[str, None] = None):
super(HistGBBenchmark, self).__init__(task_id, rng, valid_size, data_path)

class HistGBBenchmarkBB(HistGBBenchmark):
def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace:
Expand All @@ -120,6 +127,14 @@ def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationS
HistGBBenchmark._get_fidelity_choices(ntrees_choice='variable', subsample_choice='fixed')
)
return fidelity_space

class HistGBBenchmarkMO(_HistGBBenchmarkBase, MO_MLBenchmark):
def __init__(self,
task_id: int,
rng: Union[np.random.RandomState, int, None] = None,
valid_size: float = 0.33,
data_path: Union[str, None] = None):
super(HistGBBenchmarkMO, self).__init__(task_id, rng, valid_size, data_path)


__all__ = ['HistGBBenchmark', 'HistGBBenchmarkBB', 'HistGBBenchmarkMF']
__all__ = ['HistGBBenchmark', 'HistGBBenchmarkBB', 'HistGBBenchmarkMF', 'HistGBBenchmarkMO']
39 changes: 34 additions & 5 deletions hpobench/benchmarks/ml/lr_benchmark.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -14,19 +14,28 @@
from ConfigSpace.hyperparameters import Hyperparameter
from sklearn.linear_model import SGDClassifier

from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark
from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark, MO_MLBenchmark
'''
Changelog:
==========
0.0.2:
* Add the multiobjective version of this benchmark by returning val loss, precision, f1 and balanced accuracy

__version__ = '0.0.1'
0.0.1:
* First implementation
'''

__version__ = '0.0.2'

class LRBenchmark(MLBenchmark):

class _LRBenchmarkBase:
def __init__(self,
task_id: int,
rng: Union[np.random.RandomState, int, None] = None,
valid_size: float = 0.33,
data_path: Union[str, None] = None):

super(LRBenchmark, self).__init__(task_id, rng, valid_size, data_path)
super(_LRBenchmarkBase, self).__init__(task_id, rng, valid_size, data_path)
self.cache_size = 500

@staticmethod
Expand Down Expand Up @@ -107,6 +116,17 @@ def init_model(self, config: Union[CS.Configuration, Dict],
)
return model

class LRBenchmark(_LRBenchmarkBase, MLBenchmark):
def __init__(self,
task_id: int,
rng: Union[np.random.RandomState, int, None] = None,
valid_size: float = 0.33,
data_path: Union[str, None] = None):

super(LRBenchmark, self).__init__(task_id, rng, valid_size, data_path)
self.cache_size = 500



class LRBenchmarkBB(LRBenchmark):
def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace:
Expand All @@ -126,6 +146,15 @@ def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationS
LRBenchmark._get_fidelity_choices(iter_choice='variable', subsample_choice='fixed')
)
return fidelity_space

class LRBenchmarkMO(_LRBenchmarkBase, MO_MLBenchmark):
def __init__(self,
task_id: int,
rng: Union[np.random.RandomState, int, None] = None,
valid_size: float = 0.33,
data_path: Union[str, None] = None):
super(LRBenchmarkMO, self).__init__(task_id, rng, valid_size, data_path)
self.cache_size = 500


__all__ = ['LRBenchmark', 'LRBenchmarkBB', 'LRBenchmarkMF']
__all__ = ['LRBenchmark', 'LRBenchmarkBB', 'LRBenchmarkMF', 'LRBenchmarkMO']
34 changes: 29 additions & 5 deletions hpobench/benchmarks/ml/nn_benchmark.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -14,18 +14,28 @@
from ConfigSpace.hyperparameters import Hyperparameter
from sklearn.neural_network import MLPClassifier

from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark
from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark, MO_MLBenchmark

__version__ = '0.0.1'
'''
Changelog:
==========
0.0.2:
* Add the multiobjective version of this benchmark by returning val loss, precision, f1 and balanced accuracy

0.0.1:
* First implementation
'''

__version__ = '0.0.2'


class NNBenchmark(MLBenchmark):
class _NNBenchmarkBase:
def __init__(self,
task_id: int,
rng: Union[np.random.RandomState, int, None] = None,
valid_size: float = 0.33,
data_path: Union[str, None] = None):
super(NNBenchmark, self).__init__(task_id, rng, valid_size, data_path)
super(_NNBenchmarkBase, self).__init__(task_id, rng, valid_size, data_path)

@staticmethod
def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace:
Expand Down Expand Up @@ -109,6 +119,13 @@ def init_model(self, config: Union[CS.Configuration, Dict],
)
return model

class NNBenchmark(_NNBenchmarkBase, MLBenchmark):
def __init__(self,
task_id: int,
rng: Union[np.random.RandomState, int, None] = None,
valid_size: float = 0.33,
data_path: Union[str, None] = None):
super(NNBenchmark, self).__init__(task_id, rng, valid_size, data_path)

class NNBenchmarkBB(NNBenchmark):
def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace:
Expand All @@ -129,5 +146,12 @@ def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationS
)
return fidelity_space

class NNBenchmarkMO(_NNBenchmarkBase, MO_MLBenchmark):
def __init__(self,
task_id: int,
rng: Union[np.random.RandomState, int, None] = None,
valid_size: float = 0.33,
data_path: Union[str, None] = None):
super(NNBenchmarkMO, self).__init__(task_id, rng, valid_size, data_path)

__all__ = ['NNBenchmark', 'NNBenchmarkBB', 'NNBenchmarkMF']
__all__ = ['NNBenchmark', 'NNBenchmarkBB', 'NNBenchmarkMF', 'NNBenchmarkMO']
Empty file modified hpobench/benchmarks/ml/pybnn.py
100644 → 100755
Empty file.
45 changes: 32 additions & 13 deletions hpobench/benchmarks/ml/rf_benchmark.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,11 +1,3 @@
"""
Changelog:
==========

0.0.1:
* First implementation of the RF Benchmarks.
"""

from copy import deepcopy
from typing import Union, Tuple, Dict

Expand All @@ -14,18 +6,27 @@
from ConfigSpace.hyperparameters import Hyperparameter
from sklearn.ensemble import RandomForestClassifier

from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark
from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark, MO_MLBenchmark

'''
Changelog:
==========
0.0.2:
* Add the multiobjective version of this benchmark by returning val loss, precision, f1 and balanced accuracy

__version__ = '0.0.1'
0.0.1:
* First implementation
'''
__version__ = '0.0.2'


class RandomForestBenchmark(MLBenchmark):
class _RandomForestBenchmarkBase:
def __init__(self,
task_id: int,
rng: Union[np.random.RandomState, int, None] = None,
valid_size: float = 0.33,
data_path: Union[str, None] = None):
super(RandomForestBenchmark, self).__init__(task_id, rng, valid_size, data_path)
super(_RandomForestBenchmarkBase, self).__init__(task_id, rng, valid_size, data_path)

@staticmethod
def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace:
Expand Down Expand Up @@ -103,6 +104,13 @@ def init_model(self, config: Union[CS.Configuration, Dict],
)
return model

class RandomForestBenchmark(_RandomForestBenchmarkBase, MLBenchmark):
def __init__(self,
task_id: int,
rng: Union[np.random.RandomState, int, None] = None,
valid_size: float = 0.33,
data_path: Union[str, None] = None):
super(RandomForestBenchmark, self).__init__(task_id, rng, valid_size, data_path)

class RandomForestBenchmarkBB(RandomForestBenchmark):
def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace:
Expand All @@ -122,6 +130,17 @@ def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationS
RandomForestBenchmark._get_fidelity_choices(n_estimators_choice='variable', subsample_choice='fixed')
)
return fidelity_space

class RandomForestBenchmarkMO(_RandomForestBenchmarkBase, MO_MLBenchmark):
def __init__(self,
task_id: int,
rng: Union[np.random.RandomState, int, None] = None,
valid_size: float = 0.33,
data_path: Union[str, None] = None):
super(RandomForestBenchmarkMO, self).__init__(task_id, rng, valid_size, data_path)




__all__ = ['RandomForestBenchmark', 'RandomForestBenchmarkBB', 'RandomForestBenchmarkMF']
__all__ = ['RandomForestBenchmark', 'RandomForestBenchmarkBB', 'RandomForestBenchmarkMF',
'RandomForestBenchmarkMO']
Loading