-
Notifications
You must be signed in to change notification settings - Fork 3
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat: random things that don't work anyway...
- Loading branch information
1 parent
bdd803d
commit f0c8455
Showing
6 changed files
with
83 additions
and
50 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,42 +1,57 @@ | ||
import pytest | ||
import torch | ||
from qibo import hamiltonians | ||
from qibo.backends import NumpyBackend, PyTorchBackend | ||
from qibo.symbols import Z | ||
|
||
from qiboml import pytorch as pt | ||
from qiboml.backends import JaxBackend | ||
from qiboml.models import ansatze as ans | ||
from qiboml.models import encoding_decoding as ed | ||
|
||
# backend = PyTorchBackend() | ||
backend = NumpyBackend() | ||
|
||
nqubits = 5 | ||
dim = 4 | ||
training_layer = ans.ReuploadingLayer(nqubits, backend=backend) | ||
encoding_layer = ed.PhaseEncodingLayer(nqubits, backend=backend) | ||
kwargs = {"backend": backend} | ||
decoding_qubits = range(nqubits) | ||
observable = hamiltonians.SymbolicHamiltonian( | ||
sum([Z(int(i)) for i in decoding_qubits]), | ||
nqubits=nqubits, | ||
backend=backend, | ||
) | ||
kwargs["observable"] = observable | ||
kwargs["analytic"] = True | ||
decoding_layer = ed.ExpectationLayer(nqubits, decoding_qubits, **kwargs) | ||
q_model = pt.QuantumModel( | ||
layers=[ | ||
encoding_layer, | ||
training_layer, | ||
decoding_layer, | ||
], | ||
differentiation="Jax", | ||
) | ||
data = torch.randn(1, 5) | ||
# data.requires_grad = True | ||
out = q_model(data) | ||
print(out.requires_grad) | ||
loss = (out - 1.0) ** 2 | ||
print(loss.requires_grad) | ||
loss.backward() | ||
print(loss) | ||
@pytest.mark.parametrize("backend", [JaxBackend(), PyTorchBackend()]) | ||
@pytest.mark.parametrize("differentiation", ["Jax", "PSR"]) | ||
def test_backpropagation(backend, differentiation): | ||
nqubits = 5 | ||
dim = 4 | ||
training_layer = ans.ReuploadingLayer(nqubits, backend=backend) | ||
encoding_layer = ed.PhaseEncodingLayer(nqubits, backend=backend) | ||
kwargs = {"backend": backend} | ||
decoding_qubits = range(nqubits) | ||
observable = hamiltonians.SymbolicHamiltonian( | ||
sum([Z(int(i)) for i in decoding_qubits]), | ||
nqubits=nqubits, | ||
backend=backend, | ||
) | ||
kwargs["observable"] = observable | ||
kwargs["analytic"] = True | ||
decoding_layer = ed.ExpectationLayer(nqubits, decoding_qubits, **kwargs) | ||
q_model = pt.QuantumModel( | ||
layers=[ | ||
encoding_layer, | ||
training_layer, | ||
decoding_layer, | ||
], | ||
differentiation=differentiation, | ||
) | ||
encoding = torch.nn.Linear(1, nqubits) | ||
model = torch.nn.Sequential(encoding, q_model) | ||
# try to fit a parabola | ||
x = torch.randn(10, 1) | ||
y = x**2 | ||
|
||
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.9) | ||
for input, target in zip(x, y): | ||
optimizer.zero_grad() | ||
output = model(input) | ||
loss = (target - output) ** 2 | ||
params_bkp = torch.cat(tuple(p.ravel() for p in model.parameters())) | ||
print(f"> loss: {loss}") | ||
loss.backward() | ||
optimizer.step() | ||
print( | ||
f"> Parameters delta: {torch.cat(tuple(p.ravel() for p in model.parameters())) - params_bkp}" | ||
) | ||
|
||
assert False |