Skip to content

Commit

Permalink
Merge pull request #950 from qiboteam/fix_fused
Browse files Browse the repository at this point in the history
Fixing bug in `asmatrix_fused` with `CupyBackend`
  • Loading branch information
renatomello authored Jun 26, 2023
2 parents 33f2601 + d1559aa commit 05e2d3c
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 9 deletions.
3 changes: 2 additions & 1 deletion src/qibo/backends/numpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,8 @@ def asmatrix_fused(self, fgate):
for gate in fgate.gates:
# transfer gate matrix to numpy as it is more efficient for
# small tensor calculations
gmatrix = gate.asmatrix(self)
# explicit to_numpy see https://github.com/qiboteam/qibo/issues/928
gmatrix = self.to_numpy(gate.asmatrix(self))
# Kronecker product with identity is needed to make the
# original matrix have shape (2**rank x 2**rank)
eye = np.eye(2 ** (rank - len(gate.qubits)), dtype=self.dtype)
Expand Down
4 changes: 2 additions & 2 deletions tests/test_models_variational.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def test_qaoa_execution(backend, solver, dense, accel=None):
u = expm(-1j * p * m_matrix)
else:
u = expm(-1j * p * h_matrix)
target_state = u @ target_state
target_state = backend.cast(u) @ target_state

qaoa = models.QAOA(h, mixer=m, solver=solver, accelerators=accel)
qaoa.set_parameters(params)
Expand Down Expand Up @@ -196,7 +196,7 @@ def test_qaoa_callbacks(backend, accelerators):
h_matrix = backend.to_numpy(h.matrix)
m_matrix = backend.to_numpy(qaoa.mixer.matrix)
calc_energy = lambda s: (s.conj() * h_matrix.dot(s)).sum()
target_state = np.copy(state)
target_state = backend.to_numpy(state)
target_energy = [calc_energy(target_state)]
for i, p in enumerate(params):
if i % 2:
Expand Down
19 changes: 13 additions & 6 deletions tests/test_quantum_info_superoperator_transformations.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,9 @@ def test_liouville_to_choi(backend, order):
choi = liouville_to_choi(test_superop, order, backend)

axes = [1, 2] if order == "row" else [0, 3]
test_choi = np.reshape(test_superop, [2] * 4).swapaxes(*axes).reshape([4, 4])
test_choi = backend.cast(
np.reshape(test_superop, [2] * 4).swapaxes(*axes).reshape([4, 4])
)

backend.assert_allclose(
backend.calculate_norm(choi - test_choi) < PRECISION_TOL, True
Expand All @@ -324,12 +326,15 @@ def test_liouville_to_choi(backend, order):
@pytest.mark.parametrize("order", ["row", "column"])
def test_choi_to_liouville(backend, order):
axes = [1, 2] if order == "row" else [0, 3]
test_choi = np.reshape(test_superop, [2] * 4).swapaxes(*axes).reshape([4, 4])
test_choi = backend.cast(
np.reshape(test_superop, [2] * 4).swapaxes(*axes).reshape([4, 4])
)

liouville = choi_to_liouville(test_choi, order=order, backend=backend)

backend.assert_allclose(
backend.calculate_norm(liouville - test_superop) < PRECISION_TOL, True
backend.calculate_norm(liouville - backend.cast(test_superop)) < PRECISION_TOL,
True,
)


Expand All @@ -343,7 +348,9 @@ def test_choi_to_kraus(
backend, order, validate_cp, test_a0, test_a1, test_kraus_left, test_kraus_right
):
axes = [1, 2] if order == "row" else [0, 3]
test_choi = np.reshape(test_superop, [2] * 4).swapaxes(*axes).reshape([4, 4])
test_choi = backend.cast(
np.reshape(test_superop, [2] * 4).swapaxes(*axes).reshape([4, 4])
)

with pytest.raises(TypeError):
choi_to_kraus(test_choi, str(PRECISION_TOL), backend=backend)
Expand Down Expand Up @@ -888,7 +895,7 @@ def test_reshuffling(backend, order):
reshuffled = _reshuffling(reshuffled, order, backend=backend)

backend.assert_allclose(
np.linalg.norm(reshuffled - test_superop) < PRECISION_TOL, True
np.linalg.norm(reshuffled - backend.cast(test_superop)) < PRECISION_TOL, True
)

axes = [1, 2] if order == "row" else [0, 3]
Expand All @@ -898,5 +905,5 @@ def test_reshuffling(backend, order):
reshuffled = _reshuffling(reshuffled, order, backend=backend)

backend.assert_allclose(
np.linalg.norm(reshuffled - test_choi) < PRECISION_TOL, True
np.linalg.norm(reshuffled - backend.cast(test_choi)) < PRECISION_TOL, True
)

0 comments on commit 05e2d3c

Please sign in to comment.