Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add callback to SGD and CMA-ES optimizer #1335

Merged
merged 10 commits into from
Jun 5, 2024
28 changes: 23 additions & 5 deletions src/qibo/optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,13 +84,13 @@ def myloss(parameters, circuit):
RuntimeError,
"The keyword 'bounds' cannot be used with the cma optimizer. Please use 'options' instead as defined by the cma documentation: ex. options['bounds'] = [0.0, 1.0].",
)
return cmaes(loss, initial_parameters, args, options)
return cmaes(loss, initial_parameters, args, callback, options)
elif method == "sgd":
from qibo.backends import _check_backend

backend = _check_backend(backend)

return sgd(loss, initial_parameters, args, options, compile, backend)
return sgd(loss, initial_parameters, args, callback, options, compile, backend)
else:
from qibo.backends import _check_backend

Expand All @@ -114,7 +114,7 @@ def myloss(parameters, circuit):
)


def cmaes(loss, initial_parameters, args=(), options=None):
def cmaes(loss, initial_parameters, args=(), callback=None, options=None):
"""Genetic optimizer based on `pycma <https://github.com/CMA-ES/pycma>`_.

Args:
Expand All @@ -123,13 +123,19 @@ def cmaes(loss, initial_parameters, args=(), options=None):
initial_parameters (np.ndarray): Initial guess for the variational
parameters.
args (tuple): optional arguments for the loss function.
callback (list[callable]): List of callable called after each optimization
iteration. According to cma-es implementation take ``CMAEvolutionStrategy``
instance as argument.
See: https://cma-es.github.io/apidocs-pycma/cma.evolution_strategy.CMAEvolutionStrategy.html.
options (dict): Dictionary with options accepted by the ``cma``
optimizer. The user can use ``import cma; cma.CMAOptions()`` to view the
available options.
"""
import cma

r = cma.fmin2(loss, initial_parameters, 1.7, options=options, args=args)
r = cma.fmin2(
loss, initial_parameters, 1.7, options=options, args=args, callback=callback
)
return r[1].result.fbest, r[1].result.xbest, r


Expand Down Expand Up @@ -213,7 +219,15 @@ def newtonian(
return m.fun, m.x, m


def sgd(loss, initial_parameters, args=(), options=None, compile=False, backend=None):
def sgd(
loss,
initial_parameters,
args=(),
callback=None,
options=None,
compile=False,
backend=None,
):
"""Stochastic Gradient Descent (SGD) optimizer using Tensorflow backpropagation.

See `tf.keras.Optimizers <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers>`_
Expand All @@ -225,6 +239,7 @@ def sgd(loss, initial_parameters, args=(), options=None, compile=False, backend=
initial_parameters (np.ndarray): Initial guess for the variational
parameters.
args (tuple): optional arguments for the loss function.
callback (callable): Called after each iteration.
options (dict): Dictionary with options for the SGD optimizer. Supports
the following keys:

Expand All @@ -234,6 +249,7 @@ def sgd(loss, initial_parameters, args=(), options=None, compile=False, backend=
- ``'nmessage'`` (int, default: ``1e3``): Every how many epochs to print
a message of the loss function.
"""

if not backend.name == "tensorflow":
raise_error(RuntimeError, "SGD optimizer requires Tensorflow backend.")

Expand Down Expand Up @@ -265,6 +281,8 @@ def opt_step():

for e in range(sgd_options["nepochs"]):
l = opt_step()
if callback is not None:
callback(vparams)
if e % sgd_options["nmessage"] == 1:
log.info("ite %d : loss %f", e, l.numpy())

Expand Down
19 changes: 18 additions & 1 deletion tests/test_models_variational.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,23 @@ def test_vqe(backend, method, options, compile, filename):
np.random.seed(0)
initial_parameters = np.random.uniform(0, 2 * np.pi, 2 * nqubits * layers + nqubits)
v = models.VQE(circuit, hamiltonian)

loss_values = []

def callback(parameters, loss_values=loss_values, vqe=v):
# cma callback takes as input a CMAEvolutionStrategy class
# which keeps track of the best current solution into its .best.x
if method == "cma":
parameters = parameters.best.x
vqe.circuit.set_parameters(parameters)
loss_values.append(vqe.hamiltonian.expectation(vqe.circuit().state()))

best, params, _ = v.minimize(
initial_parameters, method=method, options=options, compile=compile
initial_parameters,
method=method,
options=options,
compile=compile,
callback=callback,
)
if method == "cma":
# remove `outcmaes` folder
Expand All @@ -139,6 +154,8 @@ def test_vqe(backend, method, options, compile, filename):
if filename is not None:
assert_regression_fixture(backend, params, filename)

assert best == min(loss_values)

# test energy fluctuation
state = backend.np.ones(2**nqubits) / np.sqrt(2**nqubits)
energy_fluctuation = v.energy_fluctuation(state)
Expand Down
Loading