Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add callback to SGD and CMA-ES optimizer #1335

Merged
merged 10 commits into from
Jun 5, 2024
10 changes: 7 additions & 3 deletions src/qibo/backends/numpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import math

import numpy as np
from scipy import sparse

from qibo import __version__
from qibo.backends import einsum_utils
Expand Down Expand Up @@ -119,7 +120,8 @@ def matrix_parametrized(self, gate):

def matrix_fused(self, fgate):
rank = len(fgate.target_qubits)
matrix = np.eye(2**rank)
matrix = sparse.eye(2**rank)

for gate in fgate.gates:
# transfer gate matrix to numpy as it is more efficient for
# small tensor calculations
Expand All @@ -141,8 +143,10 @@ def matrix_fused(self, fgate):
gmatrix = np.transpose(gmatrix, transpose_indices)
gmatrix = np.reshape(gmatrix, original_shape)
# fuse the individual gate matrix to the total ``FusedGate`` matrix
matrix = gmatrix @ matrix
return self.cast(matrix)
# we are using sparse matrices to improve perfomances
matrix = sparse.csr_matrix(gmatrix).dot(matrix)

return self.cast(matrix.toarray())

def control_matrix(self, gate):
if len(gate.control_qubits) > 1:
Expand Down
40 changes: 34 additions & 6 deletions src/qibo/optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,13 +84,13 @@ def myloss(parameters, circuit):
RuntimeError,
"The keyword 'bounds' cannot be used with the cma optimizer. Please use 'options' instead as defined by the cma documentation: ex. options['bounds'] = [0.0, 1.0].",
)
return cmaes(loss, initial_parameters, args, options)
return cmaes(loss, initial_parameters, args, callback, options)
elif method == "sgd":
from qibo.backends import _check_backend

backend = _check_backend(backend)

return sgd(loss, initial_parameters, args, options, compile, backend)
return sgd(loss, initial_parameters, args, callback, options, compile, backend)
else:
from qibo.backends import _check_backend

Expand All @@ -114,7 +114,7 @@ def myloss(parameters, circuit):
)


def cmaes(loss, initial_parameters, args=(), options=None):
def cmaes(loss, initial_parameters, args=(), callback=None, options=None):
"""Genetic optimizer based on `pycma <https://github.com/CMA-ES/pycma>`_.

Args:
Expand All @@ -123,14 +123,30 @@ def cmaes(loss, initial_parameters, args=(), options=None):
initial_parameters (np.ndarray): Initial guess for the variational
parameters.
args (tuple): optional arguments for the loss function.
callback (list[callable]): List of callable called after each optimization
iteration. According to cma-es implementation take ``CMAEvolutionStrategy``
instance as argument.
See: https://cma-es.github.io/apidocs-pycma/cma.evolution_strategy.CMAEvolutionStrategy.html.
options (dict): Dictionary with options accepted by the ``cma``
optimizer. The user can use ``import cma; cma.CMAOptions()`` to view the
available options.
"""
import cma

r = cma.fmin2(loss, initial_parameters, 1.7, options=options, args=args)
return r[1].result.fbest, r[1].result.xbest, r
es = cma.CMAEvolutionStrategy(initial_parameters, sigma0=1.7, inopts=options)

if callback is not None:
while not es.stop():
solutions = es.ask()
objective_values = [loss(x, *args) for x in solutions]
for solution in solutions:
callback(solution)
es.tell(solutions, objective_values)
es.logger.add()
else:
es.optimize(loss, args=args)

return es.result.fbest, es.result.xbest, es.result


def newtonian(
Expand Down Expand Up @@ -213,7 +229,15 @@ def newtonian(
return m.fun, m.x, m


def sgd(loss, initial_parameters, args=(), options=None, compile=False, backend=None):
def sgd(
loss,
initial_parameters,
args=(),
callback=None,
options=None,
compile=False,
backend=None,
):
"""Stochastic Gradient Descent (SGD) optimizer using Tensorflow backpropagation.

See `tf.keras.Optimizers <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers>`_
Expand All @@ -225,6 +249,7 @@ def sgd(loss, initial_parameters, args=(), options=None, compile=False, backend=
initial_parameters (np.ndarray): Initial guess for the variational
parameters.
args (tuple): optional arguments for the loss function.
callback (callable): Called after each iteration.
options (dict): Dictionary with options for the SGD optimizer. Supports
the following keys:

Expand All @@ -234,6 +259,7 @@ def sgd(loss, initial_parameters, args=(), options=None, compile=False, backend=
- ``'nmessage'`` (int, default: ``1e3``): Every how many epochs to print
a message of the loss function.
"""

if not backend.name == "tensorflow":
raise_error(RuntimeError, "SGD optimizer requires Tensorflow backend.")

Expand Down Expand Up @@ -265,6 +291,8 @@ def opt_step():

for e in range(sgd_options["nepochs"]):
l = opt_step()
if callback is not None:
callback(vparams)
if e % sgd_options["nmessage"] == 1:
log.info("ite %d : loss %f", e, l.numpy())

Expand Down
18 changes: 15 additions & 3 deletions tests/test_models_variational.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def myloss(parameters, circuit, target):
("BFGS", {"maxiter": 1}, False, "vqe_bfgs.out"),
("parallel_L-BFGS-B", {"maxiter": 1}, True, None),
("parallel_L-BFGS-B", {"maxiter": 1}, False, None),
("cma", {"maxfevals": 2}, False, None),
("cma", {"maxiter": 1}, False, None),
("sgd", {"nepochs": 5}, False, None),
("sgd", {"nepochs": 5}, True, None),
]
Expand Down Expand Up @@ -128,8 +128,19 @@ def test_vqe(backend, method, options, compile, filename):
np.random.seed(0)
initial_parameters = np.random.uniform(0, 2 * np.pi, 2 * nqubits * layers + nqubits)
v = models.VQE(circuit, hamiltonian)

loss_values = []

def callback(parameters, loss_values=loss_values, vqe=v):
vqe.circuit.set_parameters(parameters)
loss_values.append(vqe.hamiltonian.expectation(vqe.circuit().state()))

best, params, _ = v.minimize(
initial_parameters, method=method, options=options, compile=compile
initial_parameters,
method=method,
options=options,
compile=compile,
callback=callback,
)
if method == "cma":
# remove `outcmaes` folder
Expand All @@ -138,6 +149,7 @@ def test_vqe(backend, method, options, compile, filename):
shutil.rmtree("outcmaes")
if filename is not None:
assert_regression_fixture(backend, params, filename)
assert best == min(loss_values)

# test energy fluctuation
state = backend.np.ones(2**nqubits) / np.sqrt(2**nqubits)
Expand Down Expand Up @@ -299,7 +311,7 @@ def __call__(self, x):
test_names = "method,options,compile,filename"
test_values = [
("BFGS", {"maxiter": 1}, False, "aavqe_bfgs.out"),
("cma", {"maxfevals": 2}, False, None),
("cma", {"maxiter": 1}, False, None),
("parallel_L-BFGS-B", {"maxiter": 1}, False, None),
]

Expand Down
Loading