-
Notifications
You must be signed in to change notification settings - Fork 3
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Keras Interface #54
base: main
Are you sure you want to change the base?
Keras Interface #54
Changes from all commits
4619103
51d6994
9769548
456c3ce
bf04905
9f5575c
b5af92d
bba581d
05f8d48
9879ecd
6f2623e
2143989
65fb227
be90968
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -127,6 +127,7 @@ venv/ | |
ENV/ | ||
env.bak/ | ||
venv.bak/ | ||
qiboml/ | ||
|
||
# Spyder project settings | ||
.spyderproject | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,21 @@ | ||
{ | ||
// Usare IntelliSense per informazioni sui possibili attributi. | ||
// Al passaggio del mouse vengono visualizzate le descrizioni degli attributi esistenti. | ||
// Per altre informazioni, visitare: https://go.microsoft.com/fwlink/?linkid=830387 | ||
"version": "0.2.0", | ||
"configurations": [ | ||
{ | ||
"name": "Debugger Python: File corrente", | ||
"type": "debugpy", | ||
"request": "launch", | ||
"program": "${file}", | ||
"justMyCode": false, | ||
"console": "integratedTerminal", | ||
"env": { | ||
"ON_HEROKU": "0", | ||
"PYTEST_ADDOPTS": "-c pytest.ini", | ||
"ECHO_SQL_QUERIES": "1" | ||
}, | ||
} | ||
] | ||
} | ||
Comment on lines
+1
to
+21
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this is probably some vscode leftover, better to remove it |
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
|
||
import tensorflow as tf | ||
import numpy as np | ||
from qibo import Circuit, gates | ||
|
||
|
||
@tf.custom_gradient | ||
def custom_operation(): | ||
output = | ||
|
||
def grad_fn() | ||
|
||
|
||
return output, grad_fn | ||
|
||
|
||
|
||
class MyLayer(tf.keras.layers.Layer): | ||
|
||
def __init__(self): | ||
super(MyLayer, self).__init__(): | ||
self.circuit = self.circuit() | ||
|
||
self.weights = self.add_weights(name='w', shape=(4,), initializer="random_normal") | ||
|
||
|
||
def circuit(self): | ||
c = Circuit(2) | ||
c.add(gates.X(0)) | ||
c.add(gates.RX(1, theta=0.5)) | ||
|
||
def call(self, x): | ||
self.circuit() | ||
|
||
def | ||
Comment on lines
+1
to
+35
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this has to be removed as well |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
[pytest] | ||
env = | ||
TESTING=true | ||
ENV=local | ||
Comment on lines
+1
to
+4
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. even this has to go I would say |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,6 +2,8 @@ | |
from dataclasses import dataclass | ||
|
||
import numpy as np | ||
import tensorflow as tf | ||
import tensorflow.experimental.numpy as tnp | ||
from qibo import Circuit, gates | ||
from qibo.config import raise_error | ||
|
||
|
@@ -44,8 +46,9 @@ def __post_init__( | |
self._circuit.add(gates.RY(q, theta=0.0, trainable=False)) | ||
|
||
def _set_phases(self, x: ndarray): | ||
for gate, phase in zip(self._circuit.parametrized_gates, x.ravel()): | ||
gate.parameters = phase | ||
phase = tf.reshape(x, [-1]) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we cannot have |
||
for i, gate in enumerate(self._circuit.parametrized_gates): | ||
gate.parameters = phase[i] | ||
|
||
def __call__(self, x: ndarray) -> Circuit: | ||
self._set_phases(x) | ||
|
@@ -62,7 +65,15 @@ def __call__(self, x: ndarray) -> Circuit: | |
f"Invalid input dimension {x.shape[-1]}, but the allocated qubits are {self.qubits}.", | ||
) | ||
circuit = self.circuit.copy() | ||
ones = np.flatnonzero(x.ravel() == 1) | ||
for bit in ones: | ||
circuit.add(gates.X(self.qubits[bit])) | ||
|
||
def true_fn(): | ||
circuit.add(gates.X(q)) | ||
|
||
def false_fn(): | ||
tf.no_op() | ||
|
||
for i, q in enumerate(self.qubits): | ||
pred = tf.equal(x[0][i], 1) | ||
tf.cond(pred, true_fn=true_fn, false_fn=false_fn) | ||
|
||
Comment on lines
+68
to
+78
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same here, the separation of true and false branches is fine, but you can't use |
||
return circuit |
Original file line number | Diff line number | Diff line change | ||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|
|
@@ -15,7 +15,7 @@ | |||||||||||
|
||||||||||||
BACKEND_2_DIFFERENTIATION = { | ||||||||||||
"pytorch": "PSR", | ||||||||||||
"tensorflow": None, | ||||||||||||
"tensorflow": "PSR", | ||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
I would say that by default |
||||||||||||
"jax": "PSR", | ||||||||||||
} | ||||||||||||
|
||||||||||||
|
@@ -33,29 +33,42 @@ def __post_init__(self): | |||||||||||
|
||||||||||||
params = [p for param in self.circuit.get_parameters() for p in param] | ||||||||||||
params = tf.Variable(self.backend.to_numpy(params)) | ||||||||||||
self.circuit_parameters = self.add_weight(shape=params.shape, trainable=True) | ||||||||||||
self.set_weights([params]) | ||||||||||||
|
||||||||||||
self.circuit_parameters = self.add_weight( | ||||||||||||
shape=params.shape, initializer="random_normal", trainable=True | ||||||||||||
) | ||||||||||||
Comment on lines
+37
to
+39
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You don't want to randomly initialized the parameters actually, but rather use the exact same parameters of the layers you built the model with, for this reason there was a |
||||||||||||
|
||||||||||||
def call(self, x: tf.Tensor) -> tf.Tensor: | ||||||||||||
if self.backend.name != "tensorflow": | ||||||||||||
pass | ||||||||||||
# @tf.custom_gradient | ||||||||||||
# def custom_call(x: tf.Tensor): | ||||||||||||
# x = self.backend.cast(np.array(x)) | ||||||||||||
if self.backend.platform != "tensorflow": | ||||||||||||
return custom_operation( | ||||||||||||
self.encoding, | ||||||||||||
self.circuit, | ||||||||||||
self.decoding, | ||||||||||||
self.differentiation, | ||||||||||||
self.circuit_parameters, | ||||||||||||
x, | ||||||||||||
) | ||||||||||||
|
||||||||||||
else: | ||||||||||||
self.circuit.set_parameters(self.get_weights()[0]) | ||||||||||||
# self.circuit.set_parameters(self.circuit_parameters) | ||||||||||||
x = self.encoding(x) + self.circuit | ||||||||||||
x = self.decoding(x) | ||||||||||||
|
||||||||||||
return x | ||||||||||||
weights = tf.identity(self.circuit_parameters) | ||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why do you need the |
||||||||||||
self.circuit.set_parameters(weights) | ||||||||||||
|
||||||||||||
output = self.decoding(self.encoding(x) + self.circuit) | ||||||||||||
output = tf.expand_dims(output, axis=0) | ||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. are you sure you always need to expand the dims for each decoding? |
||||||||||||
return output | ||||||||||||
|
||||||||||||
def compute_output_shape( | ||||||||||||
self, | ||||||||||||
): | ||||||||||||
return self.output_shape | ||||||||||||
|
||||||||||||
def draw( | ||||||||||||
self, | ||||||||||||
): | ||||||||||||
breakpoint() | ||||||||||||
print("ciao") | ||||||||||||
|
||||||||||||
Comment on lines
+66
to
+71
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||||
@property | ||||||||||||
def output_shape( | ||||||||||||
self, | ||||||||||||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
import qibo | ||
from qibo import gates, Circuit | ||
import numpy as np | ||
import tensorflow as tf | ||
|
||
|
||
def random_subset(nqubits, k): | ||
return np.random.choice(range(nqubits), size=(k,), replace=False).tolist() | ||
|
||
|
||
nqubits = 3 | ||
dim = 2 | ||
backend = "tensorflow" | ||
|
||
c = Circuit(nqubits) | ||
c.add(gates.X(0)) | ||
c.add(gates.X(1)) | ||
c.add(gates.Z(1)) | ||
c.add(gates.CNOT(0, 1)) | ||
c.add(gates.RX(0, theta=0.4)) | ||
|
||
random_choice = random_subset(nqubits, dim) | ||
print(f"Scelta random {random_choice}") | ||
result = c().probabilities() | ||
print(result) | ||
|
||
|
||
tensor = tf.random.uniform((2, nqubits), minval=0, maxval=2, dtype=tf.int32) | ||
print(f"Tensore: {tensor}") | ||
Comment on lines
+1
to
+29
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. if this test covers some case that is not tested in |
Original file line number | Diff line number | Diff line change | ||||||||
---|---|---|---|---|---|---|---|---|---|---|
|
@@ -10,12 +10,12 @@ | |||||||||
# backends to be tested | ||||||||||
BACKENDS = [ | ||||||||||
"tensorflow", | ||||||||||
"pytorch", | ||||||||||
"jax", | ||||||||||
# "pytorch", | ||||||||||
# "jax", | ||||||||||
Comment on lines
+13
to
+14
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
reactivate these to check that |
||||||||||
] | ||||||||||
|
||||||||||
FRONTENDS = [ | ||||||||||
"pytorch", | ||||||||||
# "pytorch", | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
same here |
||||||||||
"keras", | ||||||||||
] | ||||||||||
|
||||||||||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,99 @@ | ||
import numpy as np | ||
import pytest | ||
import qibo | ||
import torch | ||
from qibo import hamiltonians | ||
from qibo.backends import NumpyBackend, PyTorchBackend | ||
|
||
from qibojit.backends import NumbaBackend | ||
|
||
from qiboml.models.ansatze import ReuploadingCircuit | ||
from qiboml.models.decoding import Expectation | ||
from qiboml.models.encoding import PhaseEncoding | ||
from qiboml.operations.differentiation import PSR | ||
|
||
# TODO: use the classical conftest mechanism or customize mechanism for this test | ||
EXECUTION_BACKENDS = [NumpyBackend(), PyTorchBackend()] | ||
|
||
TARGET_GRAD = np.array([0.130832955241203, 0.0, -1.806316614151001, 0.0]) | ||
|
||
torch.set_default_dtype(torch.float64) | ||
torch.set_printoptions(precision=15, sci_mode=False) | ||
|
||
|
||
def construct_x(frontend): | ||
if frontend.__name__ == "qiboml.interfaces.pytorch": | ||
return frontend.torch.tensor([0.5, 0.8]) | ||
elif frontend.__name__ == "qiboml.interfaces.keras": | ||
return frontend.tf.Variable([0.5, 0.8]) | ||
|
||
|
||
def compute_gradient(frontend, model, x): | ||
breakpoint() | ||
if frontend.__name__ == "qiboml.models.keras": | ||
breakpoint() | ||
# TODO: to check if this work once keras interface is introduced | ||
with frontend.tf.GradientTape() as tape: | ||
breakpoint() | ||
expval = model(x) | ||
return tape.gradient(expval, model.parameters) | ||
|
||
elif frontend.__name__ == "qiboml.models.pytorch": | ||
expval = model(x) | ||
expval.backward() | ||
# TODO: standardize this output with keras' one and use less convolutions | ||
grad = np.array(list(model.parameters())[-1].grad) | ||
return grad | ||
|
||
|
||
@pytest.mark.parametrize("nshots", [None, 500000]) | ||
@pytest.mark.parametrize("backend", EXECUTION_BACKENDS) | ||
def test_expval_grad_PSR(frontend, backend, nshots): | ||
""" | ||
Compute test gradient of < 0 | model^dag observable model | 0 > w.r.t model's | ||
parameters. In this test the system size is fixed to two qubits and all the | ||
parameters/data values are fixed. | ||
""" | ||
|
||
if frontend.__name__ == "qiboml.interfaces.keras": | ||
from qiboml.interfaces.keras import QuantumModel | ||
elif frontend.__name__ == "qiboml.interfaces.pytorch": | ||
pytest.skip("torch interface not ready.") | ||
|
||
decimals = 6 if nshots is None else 1 | ||
|
||
frontend.np.random.seed(42) | ||
|
||
x = construct_x(frontend) | ||
|
||
nqubits = 2 | ||
obs = hamiltonians.Z(nqubits=nqubits) | ||
|
||
encoding_layer = PhaseEncoding(nqubits=nqubits) | ||
training_layer = ReuploadingCircuit(nqubits=nqubits, nlayers=1) | ||
decoding_layer = Expectation( | ||
nqubits=nqubits, | ||
backend=backend, | ||
observable=obs, | ||
nshots=nshots, | ||
) | ||
|
||
nparams = len(training_layer.get_parameters()) | ||
initial_params = np.linspace(0.0, 2 * np.pi, nparams) | ||
training_layer.set_parameters(initial_params) | ||
|
||
q_model = frontend.QuantumModel( | ||
encoding=encoding_layer, | ||
circuit=training_layer, | ||
decoding=decoding_layer, | ||
differentiation=PSR(), | ||
) | ||
|
||
grad = compute_gradient(frontend, q_model, x) | ||
|
||
assert np.round(grad[0], decimals=decimals) == np.round( | ||
TARGET_GRAD[0], decimals=decimals | ||
) | ||
assert np.round(grad[2], decimals=decimals) == np.round( | ||
TARGET_GRAD[2], decimals=decimals | ||
) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
better not to ignore this