Skip to content

Commit

Permalink
feat: random things that don't work anyway...
Browse files Browse the repository at this point in the history
  • Loading branch information
BrunoLiegiBastonLiegi committed Sep 11, 2024
1 parent bdd803d commit f0c8455
Show file tree
Hide file tree
Showing 6 changed files with 83 additions and 50 deletions.
1 change: 1 addition & 0 deletions src/qiboml/backends/jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ def __init__(self):

self.np = jnp
self.tensor_types = (jnp.ndarray, numpy.ndarray)
self.matrices.np = jnp

def set_precision(self, precision):
if precision != self.precision:
Expand Down
12 changes: 8 additions & 4 deletions src/qiboml/models/ansatze.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import random
from dataclasses import dataclass

import numpy as np
from qibo import Circuit, gates

from qiboml.models.abstract import QuantumCircuitLayer
Expand All @@ -11,9 +11,13 @@ class ReuploadingLayer(QuantumCircuitLayer):

def __post_init__(self):
super().__post_init__()
for q in self.qubits:
self.circuit.add(gates.RY(q, theta=0.0))
self.circuit.add(gates.RZ(q, theta=0.0))
params = self.backend.cast(
[[random.random() - 0.5 for _ in range(2)] for _ in range(self.nqubits)],
dtype=self.backend.np.float64,
)
for q, param in zip(self.qubits, params):
self.circuit.add(gates.RY(q, theta=param[0] * self.backend.np.pi))
self.circuit.add(gates.RZ(q, theta=param[1] * self.backend.np.pi))
for i, q in enumerate(self.qubits[:-2]):
self.circuit.add(gates.CNOT(q0=q, q1=self.qubits[i + 1]))
self.circuit.add(gates.CNOT(q0=self.qubits[-1], q1=self.qubits[0]))
Expand Down
2 changes: 1 addition & 1 deletion src/qiboml/models/encoding_decoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def __post_init__(self):
self.circuit.add(gates.RZ(q, theta=0.0))

def forward(self, x: ndarray) -> Circuit:
self.parameters = x
self.parameters = x * self.backend.np.pi
return self.circuit


Expand Down
27 changes: 19 additions & 8 deletions src/qiboml/models/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,16 @@ def __post_init__(self):
params = (
layer.parameters
if self.backend.name == "pytorch"
else torch.as_tensor(np.array(layer.parameters))
)
self.register_parameter(
layer.__class__.__name__,
torch.nn.Parameter(params),
else torch.as_tensor(
layer.parameters.tolist(), dtype=self.backend.np.float64
)
)
params = torch.nn.Parameter(params)
setattr(self, layer.__class__.__name__, params)
# self.register_parameter(
# layer.__class__.__name__,
# torch.nn.Parameter(params),
# )
if not isinstance(self.layers[-1], ed.QuantumDecodingLayer):
raise_error(
RuntimeError,
Expand All @@ -55,7 +59,8 @@ def forward(self, x: torch.Tensor):
x, self.layers, self.backend, self.differentiation, *self.parameters()
)
else:
x = _run_layers(x, self.layers, self.parameters)
breakpoint()
x = _run_layers(x, self.layers, list(self.parameters()))
return x

@property
Expand Down Expand Up @@ -98,7 +103,13 @@ def backward(ctx, grad_output: torch.Tensor):
*parameters,
) = ctx.saved_tensors
gradients = [
torch.as_tensor(grad)
torch.as_tensor(grad.tolist(), dtype=torch.float64)
for grad in ctx.differentiation.evaluate(x, ctx.layers)
]
return grad_output @ gradients[0].T, None, None, None, *gradients
return (
grad_output @ gradients[0].transpose(-1, -2),
None,
None,
None,
*gradients,
)
12 changes: 7 additions & 5 deletions src/qiboml/operations/differentiation.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,24 +62,26 @@ def __init__(self):
def evaluate(self, x: ndarray, layers: list[QuantumCircuitLayer]):
self._input = x
self.layers = layers
parameters = []
parameters, indices = [], []
for layer in layers:
if layer.has_parameters:
parameters.extend(layer.parameters.ravel())
indices.append(len(parameters))
parameters = jnp.asarray(parameters)
breakpoint()
return jax.jacfwd(self._run)(parameters)
gradients = jax.jacfwd(self._run)(parameters)
return [
gradients[:, :, i[0] : i[1]].squeeze(0).T
for i in list(zip([0] + indices[:-1], indices))
]

def _run(self, parameters):
breakpoint()
grouped_parameters = []
left_index = right_index = 0
for layer in self.layers:
if layer.has_parameters:
right_index += len(layer.parameters)
grouped_parameters.append(parameters[left_index:right_index])
left_index = right_index
breakpoint()
return _run_layers(self._input, self.layers, grouped_parameters)


Expand Down
79 changes: 47 additions & 32 deletions tests/test_backprop.py
Original file line number Diff line number Diff line change
@@ -1,42 +1,57 @@
import pytest
import torch
from qibo import hamiltonians
from qibo.backends import NumpyBackend, PyTorchBackend
from qibo.symbols import Z

from qiboml import pytorch as pt
from qiboml.backends import JaxBackend
from qiboml.models import ansatze as ans
from qiboml.models import encoding_decoding as ed

# backend = PyTorchBackend()
backend = NumpyBackend()

nqubits = 5
dim = 4
training_layer = ans.ReuploadingLayer(nqubits, backend=backend)
encoding_layer = ed.PhaseEncodingLayer(nqubits, backend=backend)
kwargs = {"backend": backend}
decoding_qubits = range(nqubits)
observable = hamiltonians.SymbolicHamiltonian(
sum([Z(int(i)) for i in decoding_qubits]),
nqubits=nqubits,
backend=backend,
)
kwargs["observable"] = observable
kwargs["analytic"] = True
decoding_layer = ed.ExpectationLayer(nqubits, decoding_qubits, **kwargs)
q_model = pt.QuantumModel(
layers=[
encoding_layer,
training_layer,
decoding_layer,
],
differentiation="Jax",
)
data = torch.randn(1, 5)
# data.requires_grad = True
out = q_model(data)
print(out.requires_grad)
loss = (out - 1.0) ** 2
print(loss.requires_grad)
loss.backward()
print(loss)
@pytest.mark.parametrize("backend", [JaxBackend(), PyTorchBackend()])
@pytest.mark.parametrize("differentiation", ["Jax", "PSR"])
def test_backpropagation(backend, differentiation):
nqubits = 5
dim = 4
training_layer = ans.ReuploadingLayer(nqubits, backend=backend)
encoding_layer = ed.PhaseEncodingLayer(nqubits, backend=backend)
kwargs = {"backend": backend}
decoding_qubits = range(nqubits)
observable = hamiltonians.SymbolicHamiltonian(
sum([Z(int(i)) for i in decoding_qubits]),
nqubits=nqubits,
backend=backend,
)
kwargs["observable"] = observable
kwargs["analytic"] = True
decoding_layer = ed.ExpectationLayer(nqubits, decoding_qubits, **kwargs)
q_model = pt.QuantumModel(
layers=[
encoding_layer,
training_layer,
decoding_layer,
],
differentiation=differentiation,
)
encoding = torch.nn.Linear(1, nqubits)
model = torch.nn.Sequential(encoding, q_model)
# try to fit a parabola
x = torch.randn(10, 1)
y = x**2

optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.9)
for input, target in zip(x, y):
optimizer.zero_grad()
output = model(input)
loss = (target - output) ** 2
params_bkp = torch.cat(tuple(p.ravel() for p in model.parameters()))
print(f"> loss: {loss}")
loss.backward()
optimizer.step()
print(
f"> Parameters delta: {torch.cat(tuple(p.ravel() for p in model.parameters())) - params_bkp}"
)

assert False

0 comments on commit f0c8455

Please sign in to comment.