Skip to content

Commit

Permalink
Merge pull request #46 from LemurPwned/optimization
Browse files Browse the repository at this point in the history
1.4.1 release
  • Loading branch information
LemurPwned authored Jan 5, 2024
2 parents 55315f2 + 734b20c commit 52ea53e
Show file tree
Hide file tree
Showing 11 changed files with 373 additions and 163 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ jobs:
strategy:
matrix:
os: [macos-latest, windows-latest]
python-version: ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11']
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v2
- name: Set up Python
Expand Down
6 changes: 4 additions & 2 deletions .github/workflows/python-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,17 @@ on:
push:
branches: [ "master" ]
paths-ignore:
- 'README.md'
- '**.yaml'
- '**.md'
- 'docs/**'
- 'examples/**'
- LICENSE
- mkdocs.yml
pull_request:
branches: [ "master" ]
paths-ignore:
- 'README.md'
- '**.yaml'
- '**.md'
- 'docs/**'
- 'examples/**'
- LICENSE
Expand Down
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
# Changelog

# 1.4.1

- Adding a basic optimisation script in the `optimization` module.
- Streamlit optimization updates.

# 1.4.0

- Adding new, dynamic symbolic model compatible with `Solver` class. It is now possible to use the `Solver` class with `LayerDynamic` to solve the LLG equation.
Expand Down
76 changes: 0 additions & 76 deletions CMTJ.md

This file was deleted.

95 changes: 85 additions & 10 deletions cmtj/utils/optimization.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
from typing import Callable, Dict
from concurrent.futures import ProcessPoolExecutor
from typing import Callable, Dict, List

import numpy as np
from tqdm import tqdm


def coordinate_descent(operating_point: Dict[str, float],
fn: Callable,
best_mse: float = float("-inf"),
granularity: int = 10,
percentage: float = 0.05):
def coordinate_descent(
operating_point: Dict[str, float],
fn: Callable,
best_mse: float = float("-inf"),
granularity: int = 10,
percentage: float = 0.05,
):
"""Performs coordinate descent on the operating point.
:param operating_point: operating point to be optimised. Order of that dict matters.
:param fn: function to be optimised
Expand All @@ -20,13 +23,85 @@ def coordinate_descent(operating_point: Dict[str, float],
opt_params = operating_point
for k, org_v in tqdm(operating_point.items(), desc="Coordinate descent"):
new_params = operating_point.copy()
for v in tqdm(np.linspace((1 - percentage) * org_v,
(1 + percentage) * org_v, granularity),
desc=f"Optimising {k}",
leave=False):
for v in tqdm(
np.linspace((1 - percentage) * org_v, (1 + percentage) * org_v,
granularity),
desc=f"Optimising {k}",
leave=False,
):
new_params[k] = v
mse = fn(**new_params)
if mse > best_mse:
opt_params = new_params
best_mse = mse
return opt_params, best_mse


def multiprocess_simulate(
fn: Callable,
error_fn: Callable,
suggestions: List[dict],
target: np.ndarray,
fixed_parameters: dict,
):
with ProcessPoolExecutor(max_workers=len(suggestions)) as executor:
futures = [
executor.submit(
fn,
**fixed_parameters,
**suggestion,
) for suggestion in suggestions
]
errors = np.zeros(len(suggestions))
for j, future in enumerate(futures):
result = future.result()
errors[j] = error_fn(target, result)
return errors


def hebo_optimization_loop(
cfg: dict,
fn: Callable,
error_fn: Callable,
target: np.ndarray,
fixed_parameters: dict,
n_iters: int = 150,
n_suggestions: int = 8,
):
"""Optimizes the parameters of a function using HEBO.
See HEBO documentation for more details: https://github.com/huawei-noah/HEBO
:param cfg: configuration of the design space
:param fn: function to be optimised fn(**parameters, **fixed_parameters)
:param error_fn: function to compute the error: error_fn(target, result)
:param target: target data
:param fixed_parameters: parameters that are fixed
:param n_iters: number of iterations
:param n_suggestions: number of suggestions per iteration
"""
try:
from hebo.design_space.design_space import DesignSpace
from hebo.optimizers.hebo import HEBO
except ImportError as e:
raise ImportError(
"HEBO is not installed. Please install it with `pip install HEBO`"
) from e
space = DesignSpace().parse(cfg)
opt = HEBO(space)
best_mse = float("inf")
for i in tqdm(range(1, n_iters + 1), desc="HEBO optimization loop"):
rec = opt.suggest(n_suggestions)
errors = multiprocess_simulate(
fn=fn,
error_fn=error_fn,
suggestions=rec.to_dict(orient="records"),
target=target,
fixed_parameters=fixed_parameters,
)
opt.observe(rec, errors)
val = opt.y.min()
if val < best_mse:
best_mse = val
best_params = opt.best_x.iloc[0].to_dict()
print(f"iteration {i} best mse {best_mse}")
print(best_params)
return opt
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from setuptools import Extension, find_namespace_packages, setup
from setuptools.command.build_ext import build_ext

__version__ = "1.4.0"
__version__ = "1.4.1"
"""
As per
https://github.com/pybind/python_example
Expand Down
Loading

0 comments on commit 52ea53e

Please sign in to comment.