diff --git a/CHANGELOG.md b/CHANGELOG.md index 574daba4..5bb89680 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,10 +13,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- Now variables, functions, and classes are named based on PEP8. ## [0.2.16] - 2024-08-26 -This version introduces several important interface changes, aimed at secure expression and improved code maintainability. +This version introduces several important interface changes, aimed at secure expression and improved code maintainability. ### Added @@ -58,7 +59,7 @@ This version introduces several important interface changes, aimed at secure exp ### Added - Transpiled circuits can now have "measure" gates, introduced with - the `circ.m(qubit, plane, angle)` method. The measured qubit cannot + the `circ.m(qubit, plane, angle)` method. The measured qubit cannot be used in any subsequent gate. - Added `gflow.find_pauliflow`, `gflow.verify_pauliflow` and `pauliflow_from_pattern` methods (#117) - Pauli-flow finding algorithm (#117) @@ -106,12 +107,12 @@ This version introduces several important interface changes, aimed at secure exp - Renamed methods; `gflow.flow` and `gflow.gflow` are now `gflow.find_flow` and `gflow.find_gflow`, respectively. - `Pattern.seq` is renamed into a private field `Pattern.__seq` and - `Pattern.Nnode` is now a read-only property. `Pattern` constructor + `Pattern.Nnode` is now a read-only property. `Pattern` constructor now only takes an optional list of `input_nodes`, and can only be updated via `add` and `extend`. `Pattern` are now iterable and `len` is now defined for patterns: we should write `for command in pattern:` instead of `for command in pattern.seq:` and `len(pattern)` instead - of `len(pattern.seq)`. `N` commands are no longer added by `Pattern` + of `len(pattern.seq)`. `N` commands are no longer added by `Pattern` constructor and should be added explicitly after the instantiation. - Changed the behavior of visualization in the `GraphVisualizer` class. Prepared a `visualize` method that visualizes based on the graph only, @@ -128,8 +129,8 @@ This version introduces several important interface changes, aimed at secure exp - Added `rustworkx` as a backend for the graph state simulator - Only `networkx` backend was available for pattern optimization. - By setting the `use_rustworkx` option to True while using `Pattern.perform_pauli_measurements()`, - graphix will run pattern optimization using `rustworkx` (#98) + By setting the `use_rustworkx` option to True while using `Pattern.perform_pauli_measurements()`, + graphix will run pattern optimization using `rustworkx` (#98) - Added `.ccx` and `.swap` methods to `graphix.Circuit`. ### Fixed @@ -183,7 +184,7 @@ This version introduces several important interface changes, aimed at secure exp ### Changed -- bump networkx version to 3.* (#82) +- bump networkx version to 3.\* (#82) ## [0.2.5] - 2023-08-17 @@ -270,7 +271,7 @@ This version introduces several important interface changes, aimed at secure exp ### Fixed -- nested array error in numpy 1.24 (deprecated from 1.23.*) fixed and numpy version changed in requirements.txt (#7) +- nested array error in numpy 1.24 (deprecated from 1.23.\*) fixed and numpy version changed in requirements.txt (#7) - circuit.standardize_and_transpile() error fixed (#9) ## [0.1.0] - 2022-12-15 diff --git a/benchmarks/statevec.py b/benchmarks/statevec.py index 0464941d..3015f728 100644 --- a/benchmarks/statevec.py +++ b/benchmarks/statevec.py @@ -20,8 +20,8 @@ import numpy as np from paddle import to_tensor from paddle_quantum.mbqc.qobject import Circuit as PaddleCircuit -from paddle_quantum.mbqc.simulator import MBQC as PaddleMBQC -from paddle_quantum.mbqc.transpiler import transpile as PaddleTranspile +from paddle_quantum.mbqc.simulator import MBQC as PaddleMBQC # noqa: N811 +from paddle_quantum.mbqc.transpiler import transpile as paddle_transpile from graphix import Circuit @@ -127,7 +127,7 @@ def translate_graphix_rc_into_paddle_quantum_circuit(graphix_circuit: Circuit) - for width in test_cases_for_paddle_quantum: graphix_circuit = graphix_circuits[width] paddle_quantum_circuit = translate_graphix_rc_into_paddle_quantum_circuit(graphix_circuit) - pat = PaddleTranspile(paddle_quantum_circuit) + pat = paddle_transpile(paddle_quantum_circuit) mbqc = PaddleMBQC() mbqc.set_pattern(pat) start = perf_counter() diff --git a/docs/source/lc-mbqc.rst b/docs/source/lc-mbqc.rst index 4c10caed..624accf4 100644 --- a/docs/source/lc-mbqc.rst +++ b/docs/source/lc-mbqc.rst @@ -80,18 +80,18 @@ Furthermore, we can toggle through `equivalent graphs`, graph states with differ :align: center :alt: equivalent graphs -These graphs were generatetd using :class:`~graphix.graphsim.GraphState`, which has two methods to generate equivalent graphs, :meth:`~graphix.graphsim.GraphState.equivalent_graph_E1` and :meth:`~graphix.graphsim.GraphState.equivalent_graph_E2`, which have different conditions for applying them. For this graph, we can use :meth:`~graphix.graphsim.GraphState.equivalent_graph_E2` to any connected nodes since the graph is loopless. +These graphs were generatetd using :class:`~graphix.graphsim.GraphState`, which has two methods to generate equivalent graphs, :meth:`~graphix.graphsim.GraphState.equivalent_graph_e1` and :meth:`~graphix.graphsim.GraphState.equivalent_graph_e2`, which have different conditions for applying them. For this graph, we can use :meth:`~graphix.graphsim.GraphState.equivalent_graph_e2` to any connected nodes since the graph is loopless. .. code-block:: python # series of equivalent graph transformations g = GraphState(nodes=[0,1,2,3],edges=[(0,1),(1,2),(2,3),(3,1)]) # leftmost graph state1 = g.to_statevector() - g.equivalent_graph_E2(0, 1) # second graph + g.equivalent_graph_e2(0, 1) # second graph state2 = g.to_statevector() - g.equivalent_graph_E2(2, 0) # third graph + g.equivalent_graph_e2(2, 0) # third graph state3 = g.to_statevector() - g.equivalent_graph_E2(0, 3) # rightmost graph + g.equivalent_graph_e2(0, 3) # rightmost graph state4 = g.to_statevector() checking that states 1-4 all are the same up to global phase: @@ -144,5 +144,3 @@ References and notes .. [#graph] In fact, it is known that all stabilizer state can be represented by graph states up to local (single-qubit) Clifford operations. .. [#el] Elliot `et al`., `J. Phys. A 43, 025301 (2010) `_ and `PRA 77, 042307 (2008) `_. We note that there are numerous stabilizer simulators available, but this graph simulator formulation by Elliot `et al.` is suitable for optimizing MBQC for three reasons: 1. this is a direct simulator of graph states, 2. the local-Clifford decoration is expressed by up to one H, S and Z gates, which are easier to handle than all 24 possible single-qubit Clifford gates, and 3. this has a method to toggle through all possible equivalent graphs (LC decorated graphs representing exactly the same stabilizer state), to minimize the connectivity of the graph state (to minimize the complexity of MBQC operation and classical simulation). - - diff --git a/examples/deutsch-jozsa.py b/examples/deutsch_jozsa.py similarity index 100% rename from examples/deutsch-jozsa.py rename to examples/deutsch_jozsa.py diff --git a/examples/MBQCvqe.py b/examples/mbqc_vqe.py similarity index 96% rename from examples/MBQCvqe.py rename to examples/mbqc_vqe.py index 65b271a6..9f97b715 100644 --- a/examples/MBQCvqe.py +++ b/examples/mbqc_vqe.py @@ -25,14 +25,14 @@ from graphix import Circuit from graphix.simulator import PatternSimulator +Z = np.array([[1, 0], [0, -1]]) +X = np.array([[0, 1], [1, 0]]) + # %% # Define the Hamiltonian for the VQE problem (Example: H = Z0Z1 + X0 + X1) def create_hamiltonian(): - Z = np.array([[1, 0], [0, -1]]) - X = np.array([[0, 1], [1, 0]]) - H = np.kron(Z, Z) + np.kron(X, np.eye(2)) + np.kron(np.eye(2), X) - return H + return np.kron(Z, Z) + np.kron(X, np.eye(2)) + np.kron(np.eye(2), X) # %% diff --git a/examples/qnn.py b/examples/qnn.py index a9eab469..74a66530 100644 --- a/examples/qnn.py +++ b/examples/qnn.py @@ -25,6 +25,9 @@ np.random.seed(0) + +Z_OP = np.array([[1, 0], [0, -1]]) + # %% # Dataset # ----------------- @@ -73,7 +76,6 @@ def __init__(self, n_qubits, n_layers, n_features): assert n_features % 3 == 0, "n_features must be a multiple of 3" # Pauli Z operator on all qubits - Z_OP = np.array([[1, 0], [0, -1]]) operator = [Z_OP] * self.n_qubits self.obs = reduce(np.kron, operator) self.cost_values = [] # to store cost values during optimization @@ -154,7 +156,7 @@ def get_expectation_value(self, sv): Calculates the expectation value of an PauliZ obeservable given a state vector. Args: - sv: sSate vector represented as a numpy array. + sv: State vector represented as a numpy array. Returns: the expectation value of a quantum observable. diff --git a/examples/rotation.py b/examples/rotation.py index e69cd7ec..6af58644 100644 --- a/examples/rotation.py +++ b/examples/rotation.py @@ -71,8 +71,8 @@ # Let us compare with statevector simulation of the original circuit: state = Statevec(nqubit=2, data=BasicStates.ZERO) # starts with |0> states -state.evolve_single(Ops.Rx(theta[0]), 0) -state.evolve_single(Ops.Rx(theta[1]), 1) +state.evolve_single(Ops.rx(theta[0]), 0) +state.evolve_single(Ops.rx(theta[1]), 1) print("overlap of states: ", np.abs(np.dot(state.psi.flatten().conjugate(), out_state.psi.flatten()))) # %% diff --git a/graphix/channels.py b/graphix/channels.py index 80696621..374f26c3 100644 --- a/graphix/channels.py +++ b/graphix/channels.py @@ -124,10 +124,10 @@ def pauli_channel(px: float, py: float, pz: float) -> KrausChannel: """ if px + py + pz > 1: raise ValueError("The sum of probabilities must not exceed 1.") - pI = 1 - px - py - pz + p_i = 1 - px - py - pz return KrausChannel( [ - {"coef": np.sqrt(1 - pI), "operator": np.eye(2)}, + {"coef": np.sqrt(1 - p_i), "operator": np.eye(2)}, {"coef": np.sqrt(px / 3.0), "operator": Ops.x}, {"coef": np.sqrt(py / 3.0), "operator": Ops.y}, {"coef": np.sqrt(pz / 3.0), "operator": Ops.z}, diff --git a/graphix/gflow.py b/graphix/gflow.py index b0ead7a9..2b85ea18 100644 --- a/graphix/gflow.py +++ b/graphix/gflow.py @@ -329,8 +329,8 @@ def flowaux( c_prime = set() for q in v_c: - N = search_neighbor(q, edges) - p_set = N & (nodes - oset) + nb = search_neighbor(q, edges) + p_set = nb & (nodes - oset) if len(p_set) == 1: # Iterate over p_set assuming there is only one element p (p,) = p_set @@ -412,12 +412,12 @@ def find_pauliflow( check_meas_planes(meas_planes) l_k = dict() p = dict() - Lx, Ly, Lz = get_pauli_nodes(meas_planes, meas_angles) + l_x, l_y, l_z = get_pauli_nodes(meas_planes, meas_angles) for node in graph.nodes: if node in oset: l_k[node] = 0 - return pauliflowaux(graph, iset, oset, meas_planes, 0, set(), oset, l_k, p, (Lx, Ly, Lz), mode) + return pauliflowaux(graph, iset, oset, meas_planes, 0, set(), oset, l_k, p, (l_x, l_y, l_z), mode) def pauliflowaux( @@ -430,7 +430,7 @@ def pauliflowaux( solved_nodes: set[int], l_k: dict[int, int], p: dict[int, set[int]], - L: tuple[set[int], set[int], set[int]], + ls: tuple[set[int], set[int], set[int]], mode: str = "single", ): """Function to find one layer of the Pauli flow. @@ -457,8 +457,8 @@ def pauliflowaux( layers obtained by gflow algorithm. l_k[d] is a node set of depth d. p: dict Pauli flow function. p[i] is the set of qubits to be corrected for the measurement of qubit i. - L: tuple - L = (Lx, Ly, Lz) where Lx, Ly, Lz are sets of qubits whose measurement operators are X, Y, Z, respectively. + ls: tuple + ls = (l_x, l_y, l_z) where l_x, l_y, l_z are sets of qubits whose measurement operators are X, Y, Z, respectively. mode: str(optional) The Pauliflow finding algorithm can yield multiple equivalent solutions. So there are three options - "single": Returrns a single solution @@ -473,7 +473,7 @@ def pauliflowaux( l_k: dict layers obtained by Pauli flow algorithm. l_k[d] is a node set of depth d. """ - Lx, Ly, Lz = L + l_x, l_y, l_z = ls solved_update = set() nodes = set(graph.nodes) if oset == nodes: @@ -486,10 +486,10 @@ def pauliflowaux( node_order_row_lower = node_order_list.copy() node_order_col = node_order_list.copy() - Pbar = correction_candidate | Ly | Lz - P = nodes - Pbar - K = (correction_candidate | Lx | Ly) & (nodes - iset) - Y = Ly - correction_candidate + p_bar = correction_candidate | l_y | l_z + pset = nodes - p_bar + kset = (correction_candidate | l_x | l_y) & (nodes - iset) + yset = l_y - correction_candidate for node in unsolved_nodes: adj_mat_ = adj_mat.copy() @@ -497,13 +497,13 @@ def pauliflowaux( node_order_row_ = node_order_row.copy() node_order_row_lower_ = node_order_row_lower.copy() node_order_col_ = node_order_col.copy() - for node_ in nodes - (P | {node}): + for node_ in nodes - (pset | {node}): adj_mat_.remove_row(node_order_row_.index(node_)) node_order_row_.remove(node_) - for node_ in nodes - (Y - {node}): + for node_ in nodes - (yset - {node}): adj_mat_w_id_.remove_row(node_order_row_lower_.index(node_)) node_order_row_lower_.remove(node_) - for node_ in nodes - (K - {node}): + for node_ in nodes - (kset - {node}): adj_mat_.remove_col(node_order_col_.index(node_)) adj_mat_w_id_.remove_col(node_order_col_.index(node_)) node_order_col_.remove(node_) @@ -516,122 +516,122 @@ def pauliflowaux( p[node] = list() solved = False - if meas_planes[node] == graphix.pauli.Plane.XY or node in Lx or node in Ly: - S = MatGF2(np.zeros((len(node_order_row_), 1), dtype=int)) - S.data[node_order_row_.index(node), :] = 1 - S_lower = MatGF2(np.zeros((len(node_order_row_lower_), 1), dtype=int)) - S.concatenate(S_lower, axis=0) - adj_mat_XY, S, _, col_permutation_XY = adj_mat_.forward_eliminate(S, copy=True) - x_XY, kernels = adj_mat_XY.backward_substitute(S) - - if 0 not in x_XY.shape and x_XY[0, 0] != sp.nan: + if meas_planes[node] == graphix.pauli.Plane.XY or node in l_x or node in l_y: + mat = MatGF2(np.zeros((len(node_order_row_), 1), dtype=int)) + mat.data[node_order_row_.index(node), :] = 1 + mat_lower = MatGF2(np.zeros((len(node_order_row_lower_), 1), dtype=int)) + mat.concatenate(mat_lower, axis=0) + adj_mat_xy, mat, _, col_permutation_xy = adj_mat_.forward_eliminate(mat, copy=True) + x_xy, kernels = adj_mat_xy.backward_substitute(mat) + + if 0 not in x_xy.shape and x_xy[0, 0] != sp.nan: solved_update |= {node} - x_XY = x_XY[:, 0] + x_xy = x_xy[:, 0] l_k[node] = k if mode == "single": - sol_list = [x_XY[i].subs(zip(kernels, [sp.false] * len(kernels))) for i in range(len(x_XY))] + sol_list = [x_xy[i].subs(zip(kernels, [sp.false] * len(kernels))) for i in range(len(x_xy))] sol = np.array(sol_list) sol_index = sol.nonzero()[0] - p[node] = set(node_order_col_[col_permutation_XY.index(i)] for i in sol_index) + p[node] = set(node_order_col_[col_permutation_xy.index(i)] for i in sol_index) solved = True elif mode == "all": binary_combinations = product([0, 1], repeat=len(kernels)) for binary_combination in binary_combinations: - sol_list = [x_XY[i].subs(zip(kernels, binary_combination)) for i in range(len(x_XY))] + sol_list = [x_xy[i].subs(zip(kernels, binary_combination)) for i in range(len(x_xy))] sol = np.array(sol_list) sol_index = sol.nonzero()[0] - p_i = set(node_order_col_[col_permutation_XY.index(i)] for i in sol_index) + p_i = set(node_order_col_[col_permutation_xy.index(i)] for i in sol_index) p[node].add(frozenset(p_i)) elif mode == "abstract": p_i = dict() - for i in range(len(x_XY)): - node_temp = node_order_col_[col_permutation_XY.index(i)] - p_i[node_temp] = x_XY[i] + for i in range(len(x_xy)): + node_temp = node_order_col_[col_permutation_xy.index(i)] + p_i[node_temp] = x_xy[i] p[node].append(p_i) - if not solved and (meas_planes[node] == graphix.pauli.Plane.XZ or node in Lz or node in Lx): - S = MatGF2(np.zeros((len(node_order_row_), 1), dtype=int)) - S.data[node_order_row_.index(node)] = 1 + if not solved and (meas_planes[node] == graphix.pauli.Plane.XZ or node in l_z or node in l_x): + mat = MatGF2(np.zeros((len(node_order_row_), 1), dtype=int)) + mat.data[node_order_row_.index(node)] = 1 for neighbor in search_neighbor(node, graph.edges): - if neighbor in P | {node}: - S.data[node_order_row_.index(neighbor), :] = 1 - S_lower = MatGF2(np.zeros((len(node_order_row_lower_), 1), dtype=int)) + if neighbor in pset | {node}: + mat.data[node_order_row_.index(neighbor), :] = 1 + mat_lower = MatGF2(np.zeros((len(node_order_row_lower_), 1), dtype=int)) for neighbor in search_neighbor(node, graph.edges): - if neighbor in Y - {node}: - S_lower.data[node_order_row_lower_.index(neighbor), :] = 1 - S.concatenate(S_lower, axis=0) - adj_mat_XZ, S, _, col_permutation_XZ = adj_mat_.forward_eliminate(S, copy=True) - x_XZ, kernels = adj_mat_XZ.backward_substitute(S) - if 0 not in x_XZ.shape and x_XZ[0, 0] != sp.nan: + if neighbor in yset - {node}: + mat_lower.data[node_order_row_lower_.index(neighbor), :] = 1 + mat.concatenate(mat_lower, axis=0) + adj_mat_xz, mat, _, col_permutation_xz = adj_mat_.forward_eliminate(mat, copy=True) + x_xz, kernels = adj_mat_xz.backward_substitute(mat) + if 0 not in x_xz.shape and x_xz[0, 0] != sp.nan: solved_update |= {node} - x_XZ = x_XZ[:, 0] + x_xz = x_xz[:, 0] l_k[node] = k if mode == "single": - sol_list = [x_XZ[i].subs(zip(kernels, [sp.false] * len(kernels))) for i in range(len(x_XZ))] + sol_list = [x_xz[i].subs(zip(kernels, [sp.false] * len(kernels))) for i in range(len(x_xz))] sol = np.array(sol_list) sol_index = sol.nonzero()[0] - p[node] = set(node_order_col_[col_permutation_XZ.index(i)] for i in sol_index) | {node} + p[node] = set(node_order_col_[col_permutation_xz.index(i)] for i in sol_index) | {node} solved = True elif mode == "all": binary_combinations = product([0, 1], repeat=len(kernels)) for binary_combination in binary_combinations: - sol_list = [x_XZ[i].subs(zip(kernels, binary_combination)) for i in range(len(x_XZ))] + sol_list = [x_xz[i].subs(zip(kernels, binary_combination)) for i in range(len(x_xz))] sol = np.array(sol_list) sol_index = sol.nonzero()[0] - p_i = set(node_order_col_[col_permutation_XZ.index(i)] for i in sol_index) | {node} + p_i = set(node_order_col_[col_permutation_xz.index(i)] for i in sol_index) | {node} p[node].add(frozenset(p_i)) elif mode == "abstract": p_i = dict() - for i in range(len(x_XZ)): - node_temp = node_order_col_[col_permutation_XZ.index(i)] - p_i[node_temp] = x_XZ[i] + for i in range(len(x_xz)): + node_temp = node_order_col_[col_permutation_xz.index(i)] + p_i[node_temp] = x_xz[i] p_i[node] = sp.true p[node].append(p_i) - if not solved and (meas_planes[node] == graphix.pauli.Plane.YZ or node in Ly or node in Lz): - S = MatGF2(np.zeros((len(node_order_row_), 1), dtype=int)) + if not solved and (meas_planes[node] == graphix.pauli.Plane.YZ or node in l_y or node in l_z): + mat = MatGF2(np.zeros((len(node_order_row_), 1), dtype=int)) for neighbor in search_neighbor(node, graph.edges): - if neighbor in P | {node}: - S.data[node_order_row_.index(neighbor), :] = 1 - S_lower = MatGF2(np.zeros((len(node_order_row_lower_), 1), dtype=int)) + if neighbor in pset | {node}: + mat.data[node_order_row_.index(neighbor), :] = 1 + mat_lower = MatGF2(np.zeros((len(node_order_row_lower_), 1), dtype=int)) for neighbor in search_neighbor(node, graph.edges): - if neighbor in Y - {node}: - S_lower.data[node_order_row_lower_.index(neighbor), :] = 1 - S.concatenate(S_lower, axis=0) - adj_mat_YZ, S, _, col_permutation_YZ = adj_mat_.forward_eliminate(S, copy=True) - x_YZ, kernels = adj_mat_YZ.backward_substitute(S) - if 0 not in x_YZ.shape and x_YZ[0, 0] != sp.nan: + if neighbor in yset - {node}: + mat_lower.data[node_order_row_lower_.index(neighbor), :] = 1 + mat.concatenate(mat_lower, axis=0) + adj_mat_yz, mat, _, col_permutation_yz = adj_mat_.forward_eliminate(mat, copy=True) + x_yz, kernels = adj_mat_yz.backward_substitute(mat) + if 0 not in x_yz.shape and x_yz[0, 0] != sp.nan: solved_update |= {node} - x_YZ = x_YZ[:, 0] + x_yz = x_yz[:, 0] l_k[node] = k if mode == "single": - sol_list = [x_YZ[i].subs(zip(kernels, [sp.false] * len(kernels))) for i in range(len(x_YZ))] + sol_list = [x_yz[i].subs(zip(kernels, [sp.false] * len(kernels))) for i in range(len(x_yz))] sol = np.array(sol_list) sol_index = sol.nonzero()[0] - p[node] = set(node_order_col_[col_permutation_YZ.index(i)] for i in sol_index) | {node} + p[node] = set(node_order_col_[col_permutation_yz.index(i)] for i in sol_index) | {node} solved = True elif mode == "all": binary_combinations = product([0, 1], repeat=len(kernels)) for binary_combination in binary_combinations: - sol_list = [x_YZ[i].subs(zip(kernels, binary_combination)) for i in range(len(x_YZ))] + sol_list = [x_yz[i].subs(zip(kernels, binary_combination)) for i in range(len(x_yz))] sol = np.array(sol_list) sol_index = sol.nonzero()[0] - p_i = set(node_order_col_[col_permutation_YZ.index(i)] for i in sol_index) | {node} + p_i = set(node_order_col_[col_permutation_yz.index(i)] for i in sol_index) | {node} p[node].add(frozenset(p_i)) elif mode == "abstract": p_i = dict() - for i in range(len(x_YZ)): - node_temp = node_order_col_[col_permutation_YZ.index(i)] - p_i[node_temp] = x_YZ[i] + for i in range(len(x_yz)): + node_temp = node_order_col_[col_permutation_yz.index(i)] + p_i[node_temp] = x_yz[i] p_i[node] = sp.true p[node].append(p_i) @@ -641,8 +641,8 @@ def pauliflowaux( else: return None, None else: - B = solved_nodes | solved_update - return pauliflowaux(graph, iset, oset, meas_planes, k + 1, B, B, l_k, p, (Lx, Ly, Lz), mode) + bset = solved_nodes | solved_update + return pauliflowaux(graph, iset, oset, meas_planes, k + 1, bset, bset, l_k, p, (l_x, l_y, l_z), mode) def flow_from_pattern(pattern: Pattern) -> tuple[dict[int, set[int]], dict[int, int]]: @@ -664,10 +664,10 @@ def flow_from_pattern(pattern: Pattern) -> tuple[dict[int, set[int]], dict[int, for plane in meas_planes.values(): if plane != graphix.pauli.Plane.XY: return None, None - G = nx.Graph() + g = nx.Graph() nodes, edges = pattern.get_graph() - G.add_nodes_from(nodes) - G.add_edges_from(edges) + g.add_nodes_from(nodes) + g.add_edges_from(edges) input_nodes = pattern.input_nodes if not pattern.input_nodes else set() output_nodes = set(pattern.output_nodes) nodes = set(nodes) @@ -685,10 +685,10 @@ def flow_from_pattern(pattern: Pattern) -> tuple[dict[int, set[int]], dict[int, xflow, zflow = get_corrections_from_pattern(pattern) - if verify_flow(G, input_nodes, output_nodes, xflow): # if xflow is valid + if verify_flow(g, input_nodes, output_nodes, xflow): # if xflow is valid zflow_from_xflow = dict() for node, corrections in deepcopy(xflow).items(): - cand = find_odd_neighbor(G, corrections) - {node} + cand = find_odd_neighbor(g, corrections) - {node} if cand: zflow_from_xflow[node] = cand if zflow_from_xflow != zflow: # if zflow is consistent with xflow @@ -713,10 +713,10 @@ def gflow_from_pattern(pattern: Pattern) -> tuple[dict[int, set[int]], dict[int, l_k: dict layers obtained by gflow algorithm. l_k[d] is a node set of depth d. """ - G = nx.Graph() + g = nx.Graph() nodes, edges = pattern.get_graph() - G.add_nodes_from(nodes) - G.add_edges_from(edges) + g.add_nodes_from(nodes) + g.add_edges_from(edges) input_nodes = set(pattern.input_nodes) if pattern.input_nodes else set() output_nodes = set(pattern.output_nodes) meas_planes = pattern.get_meas_plane() @@ -740,10 +740,10 @@ def gflow_from_pattern(pattern: Pattern) -> tuple[dict[int, set[int]], dict[int, xflow[node] = {node} xflow[node] |= {node} - if verify_gflow(G, input_nodes, output_nodes, xflow, meas_planes): # if xflow is valid + if verify_gflow(g, input_nodes, output_nodes, xflow, meas_planes): # if xflow is valid zflow_from_xflow = dict() for node, corrections in deepcopy(xflow).items(): - cand = find_odd_neighbor(G, corrections) - {node} + cand = find_odd_neighbor(g, corrections) - {node} if cand: zflow_from_xflow[node] = cand if zflow_from_xflow != zflow: # if zflow is consistent with xflow @@ -771,11 +771,11 @@ def pauliflow_from_pattern(pattern: Pattern, mode="single") -> tuple[dict[int, s l_k: dict layers obtained by Pauli flow algorithm. l_k[d] is a node set of depth d. """ - G = nx.Graph() + g = nx.Graph() nodes, edges = pattern.get_graph() nodes = set(nodes) - G.add_nodes_from(nodes) - G.add_edges_from(edges) + g.add_nodes_from(nodes) + g.add_edges_from(edges) input_nodes = set(pattern.input_nodes) if pattern.input_nodes else set() output_nodes = set(pattern.output_nodes) non_outputs = nodes - output_nodes @@ -783,9 +783,9 @@ def pauliflow_from_pattern(pattern: Pattern, mode="single") -> tuple[dict[int, s meas_angles = pattern.get_angles() nodes = set(nodes) - Lx, Ly, Lz = get_pauli_nodes(meas_planes, meas_angles) + l_x, l_y, l_z = get_pauli_nodes(meas_planes, meas_angles) - p_all, l_k = find_pauliflow(G, input_nodes, output_nodes, meas_planes, meas_angles, mode="all") + p_all, l_k = find_pauliflow(g, input_nodes, output_nodes, meas_planes, meas_angles, mode="all") if p_all is None: return None, None @@ -802,13 +802,13 @@ def pauliflow_from_pattern(pattern: Pattern, mode="single") -> tuple[dict[int, s if xflow_node & p_i == xflow_node: ignored_nodes = p_i - xflow_node - {node} # check if nodes in ignored_nodes are measured in X or Y basis - if ignored_nodes & (Lx | Ly) != ignored_nodes: + if ignored_nodes & (l_x | l_y) != ignored_nodes: continue - odd_neighbers = find_odd_neighbor(G, p_i) + odd_neighbers = find_odd_neighbor(g, p_i) if zflow_node & odd_neighbers == zflow_node: ignored_nodes = zflow_node - odd_neighbers - {node} # check if nodes in ignored_nodes are measured in Z or Y basis - if ignored_nodes & (Ly | Lz) == ignored_nodes: + if ignored_nodes & (l_y | l_z) == ignored_nodes: valid = True if mode == "single": p[node] = set(p_i) @@ -888,13 +888,13 @@ def search_neighbor(node: int, edges: set[tuple[int, int]]) -> set[int]: N: list of ints neighboring nodes """ - N = set() + nb = set() for edge in edges: if node == edge[0]: - N = N | {edge[1]} + nb = nb | {edge[1]} elif node == edge[1]: - N = N | {edge[0]} - return N + nb = nb | {edge[0]} + return nb def get_min_depth(l_k: dict[int, int]) -> int: @@ -998,7 +998,7 @@ def get_dependence_pauliflow( inputs: set[int], flow: dict[int, set[int]], odd_flow: dict[int, set[int]], - L: tuple[set[int], set[int], set[int]], + ls: tuple[set[int], set[int], set[int]], ): """Get dependence flow from Pauli flow. @@ -1010,21 +1010,21 @@ def get_dependence_pauliflow( Pauli flow function. p[i] is the set of qubits to be corrected for the measurement of qubit i. odd_flow: dict[int, set[int]] odd neighbors of Pauli flow or gflow. Odd(p(i)) - L: tuple - L = (Lx, Ly, Lz) where Lx, Ly, Lz are sets of qubits whose measurement operators are X, Y, Z, respectively. + ls: tuple + ls = (l_x, l_y, l_z) where l_x, l_y, l_z are sets of qubits whose measurement operators are X, Y, Z, respectively. Returns ------- dependence_pauliflow: dict[int, set[int]] dependence flow function. dependence_pauliflow[i] is the set of qubits to be corrected for the measurement of qubit i. """ - Lx, Ly, Lz = L + l_x, l_y, l_z = ls dependence_pauliflow = {u: set() for u in inputs} # concatenate p and odd_p combined_flow = dict() for node, corrections in flow.items(): - combined_flow[node] = (corrections - (Lx | Ly)) | (odd_flow[node] - (Ly | Lz)) - for ynode in Ly: + combined_flow[node] = (corrections - (l_x | l_y)) | (odd_flow[node] - (l_y | l_z)) + for ynode in l_y: if ynode in corrections.symmetric_difference(odd_flow[node]): combined_flow[node] |= {ynode} for node, corrections in combined_flow.items(): @@ -1040,7 +1040,7 @@ def get_layers_from_flow( odd_flow: dict[int, set], inputs: set[int], outputs: set[int], - L: tuple[set[int], set[int], set[int]] | None = None, + ls: tuple[set[int], set[int], set[int]] | None = None, ) -> tuple[dict[int, set], int]: """Get layers from flow (incl. gflow, Pauli flow). @@ -1054,8 +1054,8 @@ def get_layers_from_flow( set of input nodes outputs: set set of output nodes - L: tuple - L = (Lx, Ly, Lz) where Lx, Ly, Lz are sets of qubits whose measurement operators are X, Y, Z, respectively. + ls: tuple + ls = (l_x, l_y, l_z) where l_x, l_y, l_z are sets of qubits whose measurement operators are X, Y, Z, respectively. If not None, the layers are obtained based on Pauli flow. Returns @@ -1072,10 +1072,10 @@ def get_layers_from_flow( """ layers = dict() depth = 0 - if L is None: + if ls is None: dependence_flow = get_dependence_flow(inputs, odd_flow, flow) else: - dependence_flow = get_dependence_pauliflow(inputs, flow, odd_flow, L) + dependence_flow = get_dependence_pauliflow(inputs, flow, odd_flow, ls) left_nodes = set(flow.keys()) for output in outputs: if output in left_nodes: @@ -1264,7 +1264,7 @@ def verify_pauliflow( True if the Pauliflow is valid. False otherwise. """ check_meas_planes(meas_planes) - Lx, Ly, Lz = get_pauli_nodes(meas_planes, meas_angles) + l_x, l_y, l_z = get_pauli_nodes(meas_planes, meas_angles) valid_pauliflow = True non_outputs = set(graph.nodes) - oset @@ -1277,7 +1277,7 @@ def verify_pauliflow( odd_flow[non_output] = find_odd_neighbor(graph, pauliflow[non_output]) try: - layers, depth = get_layers_from_flow(pauliflow, odd_flow, iset, oset, (Lx, Ly, Lz)) + layers, depth = get_layers_from_flow(pauliflow, odd_flow, iset, oset, (l_x, l_y, l_z)) except ValueError: valid_flow = False return valid_flow @@ -1286,11 +1286,11 @@ def verify_pauliflow( node_order.extend(list(layers[d])) for node, plane in meas_planes.items(): - if node in Lx: + if node in l_x: valid_pauliflow &= node in odd_flow[node] - elif node in Lz: + elif node in l_z: valid_pauliflow &= node in pauliflow[node] - elif node in Ly: + elif node in l_y: valid_pauliflow &= node in pauliflow[node].symmetric_difference(odd_flow[node]) elif plane == graphix.pauli.Plane.XY: valid_pauliflow &= (node not in pauliflow[node]) and (node in odd_flow[node]) @@ -1361,29 +1361,29 @@ def get_pauli_nodes( Returns ------- - Lx: set + l_x: set set of nodes measured in X basis. - Ly: set + l_y: set set of nodes measured in Y basis. - Lz: set + l_z: set set of nodes measured in Z basis. """ check_meas_planes(meas_planes) - Lx, Ly, Lz = set(), set(), set() + l_x, l_y, l_z = set(), set(), set() for node, plane in meas_planes.items(): if plane == graphix.pauli.Plane.XY: if is_int(meas_angles[node]): # measurement angle is integer - Lx |= {node} + l_x |= {node} elif is_int(2 * meas_angles[node]): # measurement angle is half integer - Ly |= {node} + l_y |= {node} elif plane == graphix.pauli.Plane.XZ: if is_int(meas_angles[node]): - Lz |= {node} + l_z |= {node} elif is_int(2 * meas_angles[node]): - Lx |= {node} + l_x |= {node} elif plane == graphix.pauli.Plane.YZ: if is_int(meas_angles[node]): - Ly |= {node} + l_y |= {node} elif is_int(2 * meas_angles[node]): - Lz |= {node} - return Lx, Ly, Lz + l_z |= {node} + return l_x, l_y, l_z diff --git a/graphix/graphsim/basegraphstate.py b/graphix/graphsim/basegraphstate.py index e484bbab..10ba1706 100644 --- a/graphix/graphsim/basegraphstate.py +++ b/graphix/graphsim/basegraphstate.py @@ -402,7 +402,7 @@ def z(self, node: int) -> None: else: # solid self.flip_sign(node) - def equivalent_graph_E1(self, node: int) -> None: + def equivalent_graph_e1(self, node: int) -> None: """Tranform a graph state to a different graph state representing the same stabilizer state. This rule applies only to a node with loop. @@ -427,7 +427,7 @@ def equivalent_graph_E1(self, node: int) -> None: for i in self.neighbors(node): self.flip_sign(i) - def equivalent_graph_E2(self, node1: int, node2: int) -> None: + def equivalent_graph_e2(self, node1: int, node2: int) -> None: """Tranform a graph state to a different graph state representing the same stabilizer state. This rule applies only to two connected nodes without loop. @@ -498,19 +498,19 @@ def equivalent_fill_node(self, node: int) -> int: """ if self.nodes[node]["hollow"]: if self.nodes[node]["loop"]: - self.equivalent_graph_E1(node) + self.equivalent_graph_e1(node) return 0 else: # node = hollow and loopless if len(list(self.neighbors(node))) == 0: return 1 for i in self.neighbors(node): if not self.nodes[i]["loop"]: - self.equivalent_graph_E2(node, i) + self.equivalent_graph_e2(node, i) return 0 # if all neighbor has loop, pick one and apply E1, then E1 to the node. i = next(self.neighbors(node)) - self.equivalent_graph_E1(i) # this gives loop to node. - self.equivalent_graph_E1(node) + self.equivalent_graph_e1(i) # this gives loop to node. + self.equivalent_graph_e1(node) return 0 else: if len(list(self.neighbors(node))) == 0: diff --git a/graphix/graphsim/graphstate.py b/graphix/graphsim/graphstate.py index 43607a72..f536b5f9 100644 --- a/graphix/graphsim/graphstate.py +++ b/graphix/graphsim/graphstate.py @@ -18,7 +18,7 @@ class GraphState: """Factory class for graph state simulator.""" - def __new__(self, nodes=None, edges=None, vops=None, use_rustworkx: bool = False) -> BaseGraphState: + def __new__(cls, nodes=None, edges=None, vops=None, use_rustworkx: bool = False) -> BaseGraphState: if use_rustworkx: if RUSTWORKX_INSTALLED: return RXGraphState(nodes=nodes, edges=edges, vops=vops) diff --git a/graphix/linalg.py b/graphix/linalg.py index 37c637bf..f30e3af3 100644 --- a/graphix/linalg.py +++ b/graphix/linalg.py @@ -200,10 +200,10 @@ def get_rank(self): rank of the matrix """ if not self.is_canonical_form(): - A = self.forward_eliminate(copy=True)[0] + mat_a = self.forward_eliminate(copy=True)[0] else: - A = self - nonzero_index = np.diag(A.data).nonzero() + mat_a = self + nonzero_index = np.diag(mat_a.data).nonzero() return len(nonzero_index[0]) def forward_eliminate(self, b=None, copy=False): @@ -222,7 +222,7 @@ def forward_eliminate(self, b=None, copy=False): Returns ------- - A: MatGF2 + mat_a: MatGF2 forward eliminated matrix b: MatGF2 forward eliminated right hand side @@ -232,26 +232,26 @@ def forward_eliminate(self, b=None, copy=False): column permutation """ if copy: - A = MatGF2(self.data) + mat_a = MatGF2(self.data) else: - A = self + mat_a = self if b is None: - b = np.zeros((A.data.shape[0], 1), dtype=int) + b = np.zeros((mat_a.data.shape[0], 1), dtype=int) b = MatGF2(b) # Remember the row and column order - row_permutation = [i for i in range(A.data.shape[0])] - col_permutation = [i for i in range(A.data.shape[1])] + row_permutation = [i for i in range(mat_a.data.shape[0])] + col_permutation = [i for i in range(mat_a.data.shape[1])] # Gauss-Jordan Elimination - max_rank = min(A.data.shape) + max_rank = min(mat_a.data.shape) for row in range(max_rank): - if A.data[row, row] == 0: - pivot = A.data[row:, row:].nonzero() + if mat_a.data[row, row] == 0: + pivot = mat_a.data[row:, row:].nonzero() if len(pivot[0]) == 0: break pivot_row = pivot[0][0] + row if pivot_row != row: - A.swap_row(row, pivot_row) + mat_a.swap_row(row, pivot_row) b.swap_row(row, pivot_row) former_row = row_permutation.index(row) former_pivot_row = row_permutation.index(pivot_row) @@ -259,17 +259,17 @@ def forward_eliminate(self, b=None, copy=False): row_permutation[former_pivot_row] = row pivot_col = pivot[1][0] + row if pivot_col != row: - A.swap_col(row, pivot_col) + mat_a.swap_col(row, pivot_col) former_col = col_permutation.index(row) former_pivot_col = col_permutation.index(pivot_col) col_permutation[former_col] = pivot_col col_permutation[former_pivot_col] = row - assert A.data[row, row] == 1 - eliminate_rows = set(A.data[:, row].nonzero()[0]) - {row} + assert mat_a.data[row, row] == 1 + eliminate_rows = set(mat_a.data[:, row].nonzero()[0]) - {row} for eliminate_row in eliminate_rows: - A.data[eliminate_row, :] += A.data[row, :] + mat_a.data[eliminate_row, :] += mat_a.data[row, :] b.data[eliminate_row, :] += b.data[row, :] - return A, b, row_permutation, col_permutation + return mat_a, b, row_permutation, col_permutation def backward_substitute(self, b): """backward substitute the matrix diff --git a/graphix/ops.py b/graphix/ops.py index f8959256..fd7b3e1b 100644 --- a/graphix/ops.py +++ b/graphix/ops.py @@ -37,7 +37,7 @@ class Ops: Pauli_ops: ClassVar = [np.eye(2), x, y, z] @staticmethod - def Rx(theta): + def rx(theta): """x rotation Parameters @@ -53,7 +53,7 @@ def Rx(theta): return np.array([[np.cos(theta / 2), -1j * np.sin(theta / 2)], [-1j * np.sin(theta / 2), np.cos(theta / 2)]]) @staticmethod - def Ry(theta): + def ry(theta): """y rotation Parameters @@ -68,7 +68,7 @@ def Ry(theta): return np.array([[np.cos(theta / 2), -np.sin(theta / 2)], [np.sin(theta / 2), np.cos(theta / 2)]]) @staticmethod - def Rz(theta): + def rz(theta): """z rotation Parameters @@ -83,12 +83,12 @@ def Rz(theta): return np.array([[np.exp(-1j * theta / 2), 0], [0, np.exp(1j * theta / 2)]]) @staticmethod - def Rzz(theta): + def rzz(theta): """zz-rotation. Equivalent to the sequence - CNOT(control, target), - Rz(target, angle), - CNOT(control, target) + cnot(control, target), + rz(target, angle), + cnot(control, target) Parameters ---------- @@ -99,10 +99,10 @@ def Rzz(theta): ---------- operator : 4*4 np.array """ - return Ops.cnot @ np.kron(np.eye(2), Ops.Rz(theta)) @ Ops.cnot + return Ops.cnot @ np.kron(np.eye(2), Ops.rz(theta)) @ Ops.cnot @staticmethod - def build_tensor_Pauli_ops(n_qubits: int): + def build_tensor_pauli_ops(n_qubits: int): """Method to build all the 4^n tensor Pauli operators {I, X, Y, Z}^{\\otimes n} :param n_qubits: number of copies (qubits) to consider @@ -117,6 +117,6 @@ def build_tensor_Pauli_ops(n_qubits: int): else: raise TypeError(f"The number of qubits must be an integer and not {n_qubits}.") - tensor_Pauli_ops = [reduce(lambda x, y: np.kron(x, y), i) for i in product(Ops.Pauli_ops, repeat=n_qubits)] + tensor_pauli_ops = [reduce(lambda x, y: np.kron(x, y), i) for i in product(Ops.Pauli_ops, repeat=n_qubits)] - return np.array(tensor_Pauli_ops) + return np.array(tensor_pauli_ops) diff --git a/graphix/pattern.py b/graphix/pattern.py index d6adfc95..e38e80f6 100644 --- a/graphix/pattern.py +++ b/graphix/pattern.py @@ -22,7 +22,7 @@ from graphix.visualization import GraphVisualizer -class NodeAlreadyPrepared(Exception): +class NodeAlreadyPreparedError(Exception): def __init__(self, node: int): self.__node = node @@ -60,7 +60,7 @@ class Pattern: attr for Z: signal_domain attr for S: signal_domain attr for C: clifford_index, as defined in :py:mod:`graphix.clifford` - Nnode : int + n_node : int total number of nodes in the resource state """ @@ -72,7 +72,7 @@ def __init__(self, input_nodes: list[int] | None = None) -> None: input_nodes = [] self.results = {} # measurement results from the graph state simulator self.__input_nodes = list(input_nodes) # input nodes (list() makes our own copy of the list) - self.__Nnode = len(input_nodes) # total number of nodes in the graph state + self.__n_node = len(input_nodes) # total number of nodes in the graph state self._pauli_preprocessed = False # flag for `measure_pauli` preprocessing completion self.__seq: list[command.Command] = [] @@ -111,8 +111,8 @@ def add(self, cmd: command.Command): """ if cmd.kind == command.CommandKind.N: if cmd.node in self.__output_nodes: - raise NodeAlreadyPrepared(cmd.node) - self.__Nnode += 1 + raise NodeAlreadyPreparedError(cmd.node) + self.__n_node += 1 self.__output_nodes.append(cmd.node) elif cmd.kind == command.CommandKind.M: self.__output_nodes.remove(cmd.node) @@ -128,7 +128,7 @@ def extend(self, cmds: list[command.Command]): def clear(self): """Clear the sequence of pattern commands.""" - self.__Nnode = len(self.__input_nodes) + self.__n_node = len(self.__input_nodes) self.__seq = [] self.__output_nodes = list(self.__input_nodes) @@ -169,9 +169,9 @@ def __getitem__(self, index): return self.__seq[index] @property - def Nnode(self): + def n_node(self): """count of nodes that are either `input_nodes` or prepared with `N` commands""" - return self.__Nnode + return self.__n_node def reorder_output_nodes(self, output_nodes: list[int]): """arrange the order of output_nodes. @@ -276,10 +276,10 @@ def get_local_pattern(self): def fresh_node(): return { "seq": [], - "Mprop": [None, None, set(), set()], - "Xsignal": set(), - "Xsignals": [], - "Zsignal": set(), + "m_prop": [None, None, set(), set()], + "x_signal": set(), + "x_signals": [], + "z_signal": set(), "is_input": False, "is_output": False, } @@ -294,18 +294,18 @@ def fresh_node(): node_prop[cmd.nodes[1]]["seq"].append(cmd.nodes[0]) node_prop[cmd.nodes[0]]["seq"].append(cmd.nodes[1]) elif kind == command.CommandKind.M: - node_prop[cmd.node]["Mprop"] = [cmd.plane, cmd.angle, cmd.s_domain, cmd.t_domain] + node_prop[cmd.node]["m_prop"] = [cmd.plane, cmd.angle, cmd.s_domain, cmd.t_domain] node_prop[cmd.node]["seq"].append(-1) morder.append(cmd.node) elif kind == command.CommandKind.X: if standardized: - node_prop[cmd.node]["Xsignal"] ^= cmd.domain - node_prop[cmd.node]["Xsignals"] += [cmd.domain] + node_prop[cmd.node]["x_signal"] ^= cmd.domain + node_prop[cmd.node]["x_signals"] += [cmd.domain] else: - node_prop[cmd.node]["Xsignals"].append(cmd.domain) + node_prop[cmd.node]["x_signals"].append(cmd.domain) node_prop[cmd.node]["seq"].append(-2) elif kind == command.CommandKind.Z: - node_prop[cmd.node]["Zsignal"] ^= cmd.domain + node_prop[cmd.node]["z_signal"] ^= cmd.domain node_prop[cmd.node]["seq"].append(-3) elif kind == command.CommandKind.C: node_prop[cmd.node]["vop"] = cmd.cliff_index @@ -341,9 +341,9 @@ def standardize(self, method="local"): localpattern.standardize() self.__seq = localpattern.get_pattern().__seq elif method == "global": - self._move_N_to_left() + self._move_n_to_left() self._move_byproduct_to_right() - self._move_E_after_N() + self._move_e_after_n() else: raise ValueError("Invalid method") @@ -410,13 +410,13 @@ def shift_signals(self, method="local") -> dict[int, list[int]]: cmd = self.__seq[target + 1] kind = cmd.kind if kind == command.CommandKind.X: - self._commute_XS(target) + self._commute_xs(target) elif kind == command.CommandKind.Z: - self._commute_ZS(target) + self._commute_zs(target) elif kind == command.CommandKind.M: - self._commute_MS(target) + self._commute_ms(target) elif kind == command.CommandKind.S: - self._commute_SS(target) + self._commute_ss(target) else: self._commute_with_following(target) target += 1 @@ -451,7 +451,7 @@ def _find_op_to_be_moved(self, op: command.CommandKind, rev=False, skipnum=0): # If no target found return None - def _commute_EX(self, target): + def _commute_ex(self, target): """Internal method to perform the commutation of E and X. Parameters ---------- @@ -461,25 +461,25 @@ def _commute_EX(self, target): """ assert self.__seq[target].kind == command.CommandKind.X assert self.__seq[target + 1].kind == command.CommandKind.E - X = self.__seq[target] - E = self.__seq[target + 1] - if E.nodes[0] == X.node: - Z = command.Z(node=E.nodes[1], domain=X.domain) + x = self.__seq[target] + e = self.__seq[target + 1] + if e.nodes[0] == x.node: + z = command.Z(node=e.nodes[1], domain=x.domain) self.__seq.pop(target + 1) # del E - self.__seq.insert(target, Z) # add Z in front of X - self.__seq.insert(target, E) # add E in front of Z + self.__seq.insert(target, z) # add Z in front of X + self.__seq.insert(target, e) # add E in front of Z return True - elif E.nodes[1] == X.node: - Z = command.Z(node=E.nodes[0], domain=X.domain) + elif e.nodes[1] == x.node: + z = command.Z(node=e.nodes[0], domain=x.domain) self.__seq.pop(target + 1) # del E - self.__seq.insert(target, Z) # add Z in front of X - self.__seq.insert(target, E) # add E in front of Z + self.__seq.insert(target, z) # add Z in front of X + self.__seq.insert(target, e) # add E in front of Z return True else: self._commute_with_following(target) return False - def _commute_MX(self, target): + def _commute_mx(self, target): """Internal method to perform the commutation of M and X. Parameters @@ -490,17 +490,17 @@ def _commute_MX(self, target): """ assert self.__seq[target].kind == command.CommandKind.X assert self.__seq[target + 1].kind == command.CommandKind.M - X = self.__seq[target] - M = self.__seq[target + 1] - if X.node == M.node: - M.s_domain ^= X.domain + x = self.__seq[target] + m = self.__seq[target + 1] + if x.node == m.node: + m.s_domain ^= x.domain self.__seq.pop(target) # del X return True else: self._commute_with_following(target) return False - def _commute_MZ(self, target): + def _commute_mz(self, target): """Internal method to perform the commutation of M and Z. Parameters @@ -511,17 +511,17 @@ def _commute_MZ(self, target): """ assert self.__seq[target].kind == command.CommandKind.Z assert self.__seq[target + 1].kind == command.CommandKind.M - Z = self.__seq[target] - M = self.__seq[target + 1] - if Z.node == M.node: - M.t_domain ^= Z.domain + z = self.__seq[target] + m = self.__seq[target + 1] + if z.node == m.node: + m.t_domain ^= z.domain self.__seq.pop(target) # del Z return True else: self._commute_with_following(target) return False - def _commute_XS(self, target): + def _commute_xs(self, target): """Internal method to perform the commutation of X and S. Parameters @@ -532,13 +532,13 @@ def _commute_XS(self, target): """ assert self.__seq[target].kind == command.CommandKind.S assert self.__seq[target + 1].kind == command.CommandKind.X - S = self.__seq[target] - X = self.__seq[target + 1] - if S.node in X.domain: - X.domain ^= S.domain + s = self.__seq[target] + x = self.__seq[target + 1] + if s.node in x.domain: + x.domain ^= s.domain self._commute_with_following(target) - def _commute_ZS(self, target): + def _commute_zs(self, target): """Internal method to perform the commutation of Z and S. Parameters @@ -549,13 +549,13 @@ def _commute_ZS(self, target): """ assert self.__seq[target].kind == command.CommandKind.S assert self.__seq[target + 1].kind == command.CommandKind.Z - S = self.__seq[target] - Z = self.__seq[target + 1] - if S.node in Z.domain: - Z.domain ^= S.domain + s = self.__seq[target] + z = self.__seq[target + 1] + if s.node in z.domain: + z.domain ^= s.domain self._commute_with_following(target) - def _commute_MS(self, target): + def _commute_ms(self, target): """Internal method to perform the commutation of M and S. Parameters @@ -566,15 +566,15 @@ def _commute_MS(self, target): """ assert self.__seq[target].kind == command.CommandKind.S assert self.__seq[target + 1].kind == command.CommandKind.M - S = self.__seq[target] - M = self.__seq[target + 1] - if S.node in M.s_domain: - M.s_domain ^= S.domain - if S.node in M.t_domain: - M.t_domain ^= S.domain + s = self.__seq[target] + m = self.__seq[target + 1] + if s.node in m.s_domain: + m.s_domain ^= s.domain + if s.node in m.t_domain: + m.t_domain ^= s.domain self._commute_with_following(target) - def _commute_SS(self, target): + def _commute_ss(self, target): """Internal method to perform the commutation of two S commands. Parameters ---------- @@ -584,10 +584,10 @@ def _commute_SS(self, target): """ assert self.__seq[target].kind == command.CommandKind.S assert self.__seq[target + 1].kind == command.CommandKind.S - S1 = self.__seq[target] - S2 = self.__seq[target + 1] - if S1.node in S2.domain: - S2.domain ^= S1.domain + s1 = self.__seq[target] + s2 = self.__seq[target + 1] + if s1.node in s2.domain: + s2.domain ^= s1.domain self._commute_with_following(target) def _commute_with_following(self, target): @@ -600,9 +600,9 @@ def _commute_with_following(self, target): target : int target command index """ - A = self.__seq[target + 1] + a = self.__seq[target + 1] self.__seq.pop(target + 1) - self.__seq.insert(target, A) + self.__seq.insert(target, a) def _commute_with_preceding(self, target): """Internal method to perform the commutation of @@ -614,24 +614,24 @@ def _commute_with_preceding(self, target): target : int target command index """ - A = self.__seq[target - 1] + a = self.__seq[target - 1] self.__seq.pop(target - 1) - self.__seq.insert(target, A) + self.__seq.insert(target, a) - def _move_N_to_left(self): + def _move_n_to_left(self): """Internal method to move all 'N' commands to the start of the sequence. N can be moved to the start of sequence without the need of considering commutation relations. """ new_seq = [] - Nlist = [] + n_list = [] for cmd in self.__seq: if cmd.kind == command.CommandKind.N: - Nlist.append(cmd) + n_list.append(cmd) else: new_seq.append(cmd) - Nlist.sort(key=lambda N_cmd: N_cmd.node) - self.__seq = Nlist + new_seq + n_list.sort(key=lambda n_cmd: n_cmd.node) + self.__seq = n_list + new_seq def _move_byproduct_to_right(self): """Internal method to move the byproduct commands to the end of sequence, @@ -639,60 +639,60 @@ def _move_byproduct_to_right(self): """ # First, we move all X commands to the end of sequence index = len(self.__seq) - 1 - X_limit = len(self.__seq) - 1 + x_limit = len(self.__seq) - 1 while index > 0: if self.__seq[index].kind == command.CommandKind.X: - index_X = index - while index_X < X_limit: - cmd = self.__seq[index_X + 1] + index_x = index + while index_x < x_limit: + cmd = self.__seq[index_x + 1] kind = cmd.kind if kind == command.CommandKind.E: - move = self._commute_EX(index_X) + move = self._commute_ex(index_x) if move: - X_limit += 1 # addition of extra Z means target must be increased - index_X += 1 + x_limit += 1 # addition of extra Z means target must be increased + index_x += 1 elif kind == command.CommandKind.M: - search = self._commute_MX(index_X) + search = self._commute_mx(index_x) if search: - X_limit -= 1 # XM commutation rule removes X command + x_limit -= 1 # XM commutation rule removes X command break else: - self._commute_with_following(index_X) - index_X += 1 + self._commute_with_following(index_x) + index_x += 1 else: - X_limit -= 1 + x_limit -= 1 index -= 1 # then, move Z to the end of sequence in front of X - index = X_limit - Z_limit = X_limit + index = x_limit + z_limit = x_limit while index > 0: if self.__seq[index].kind == command.CommandKind.Z: - index_Z = index - while index_Z < Z_limit: - cmd = self.__seq[index_Z + 1] + index_z = index + while index_z < z_limit: + cmd = self.__seq[index_z + 1] if cmd.kind == command.CommandKind.M: - search = self._commute_MZ(index_Z) + search = self._commute_mz(index_z) if search: - Z_limit -= 1 # ZM commutation rule removes Z command + z_limit -= 1 # ZM commutation rule removes Z command break else: - self._commute_with_following(index_Z) - index_Z += 1 + self._commute_with_following(index_z) + index_z += 1 index -= 1 - def _move_E_after_N(self): + def _move_e_after_n(self): """Internal method to move all E commands to the start of sequence, - before all N commands. assumes that _move_N_to_left() method was called. + before all N commands. assumes that _move_n_to_left() method was called. """ - moved_E = 0 - target = self._find_op_to_be_moved(command.CommandKind.E, skipnum=moved_E) + moved_e = 0 + target = self._find_op_to_be_moved(command.CommandKind.E, skipnum=moved_e) while target is not None: if (target == 0) or ( self.__seq[target - 1].kind == command.CommandKind.N or self.__seq[target - 1].kind == command.CommandKind.E ): - moved_E += 1 - target = self._find_op_to_be_moved(command.CommandKind.E, skipnum=moved_E) + moved_e += 1 + target = self._find_op_to_be_moved(command.CommandKind.E, skipnum=moved_e) continue self._commute_with_preceding(target) target -= 1 @@ -868,13 +868,13 @@ def get_measurement_order_from_flow(self): """ # NOTE calling get_graph nodes, edges = self.get_graph() - G = nx.Graph() - G.add_nodes_from(nodes) - G.add_edges_from(edges) + g = nx.Graph() + g.add_nodes_from(nodes) + g.add_edges_from(edges) vin = set(self.input_nodes) if self.input_nodes is not None else set() vout = set(self.output_nodes) meas_planes = self.get_meas_plane() - f, l_k = find_flow(G, vin, vout, meas_planes=meas_planes) + f, l_k = find_flow(g, vin, vout, meas_planes=meas_planes) if f is None: return None depth, layer = get_layers(l_k) @@ -896,16 +896,16 @@ def get_measurement_order_from_gflow(self): """ # NOTE calling get_graph nodes, edges = self.get_graph() - G = nx.Graph() - G.add_nodes_from(nodes) - G.add_edges_from(edges) - isolated = list(nx.isolates(G)) + g = nx.Graph() + g.add_nodes_from(nodes) + g.add_edges_from(edges) + isolated = list(nx.isolates(g)) if isolated: raise ValueError("The input graph must be connected") vin = set(self.input_nodes) if self.input_nodes is not None else set() vout = set(self.output_nodes) meas_plane = self.get_meas_plane() - g, l_k = find_gflow(G, vin, vout, meas_plane=meas_plane) + g, l_k = find_gflow(g, vin, vout, meas_plane=meas_plane) if not g: raise ValueError("No gflow found") k, layers = get_layers(l_k) @@ -1240,15 +1240,15 @@ def space_list(self): time evolution of 'space' at each 'N' and 'M' commands of pattern. """ nodes = 0 - N_list = [] + n_list = [] for cmd in self.__seq: if cmd.kind == command.CommandKind.N: nodes += 1 - N_list.append(nodes) + n_list.append(nodes) elif cmd.kind == command.CommandKind.M: nodes -= 1 - N_list.append(nodes) - return N_list + n_list.append(nodes) + return n_list def simulate_pattern(self, backend="statevector", **kwargs): """Simulate the execution of the pattern by using @@ -1402,7 +1402,7 @@ def copy(self) -> Pattern: result.__seq = [cmd.model_copy() for cmd in self.__seq] result.__input_nodes = self.__input_nodes.copy() result.__output_nodes = self.__output_nodes.copy() - result.__Nnode = self.__Nnode + result.__n_node = self.__n_node result._pauli_preprocessed = self._pauli_preprocessed result.results = self.results.copy() return result @@ -1423,15 +1423,15 @@ class CommandNode: X: -2 Z: -3 C: -4 - Mprop : list + m_prop : list attributes for a measurement command. consists of [meas_plane, angle, s_domain, t_domain] result : int measurement result of the node - Xsignal : list + x_signal : list signal domain - Xsignals : list - signal domain. Xsignals may contains lists. For standardization, this variable is used. - Zsignal : list + x_signals : list + signal domain. x_signals may contains lists. For standardization, this variable is used. + z_signal : list signal domain input : bool whether the node is an input or not @@ -1439,7 +1439,7 @@ class CommandNode: whether the node is an output or not """ - def __init__(self, node_index, seq, Mprop, Zsignal, is_input, is_output, Xsignal=None, Xsignals=None): + def __init__(self, node_index, seq, m_prop, z_signal, is_input, is_output, x_signal=None, x_signals=None): """ Parameters ---------- @@ -1449,17 +1449,17 @@ def __init__(self, node_index, seq, Mprop, Zsignal, is_input, is_output, Xsignal seq : list distributed command sequence - Mprop : list + m_prop : list attributes for measurement command - Xsignal : list + x_signal : list signal domain for X byproduct correction - Xsignals : list of list + x_signals : list of list signal domains for X byproduct correction - Xsignal or Xsignals must be specified + x_signal or x_signals must be specified - Zsignal : list + z_signal : list signal domain for Z byproduct correction is_input : bool @@ -1468,19 +1468,19 @@ def __init__(self, node_index, seq, Mprop, Zsignal, is_input, is_output, Xsignal is_output : bool whether the node is an output or not """ - if Xsignals is None: - Xsignals = [] - if Xsignal is None: - Xsignal = set() + if x_signals is None: + x_signals = [] + if x_signal is None: + x_signal = set() self.index = node_index self.seq = seq # composed of [E, M, X, Z, C] - self.Mprop = Mprop + self.m_prop = m_prop self.result = None - self.Xsignal = Xsignal - self.Xsignals = Xsignals - self.Zsignal = Zsignal # appeared at most e + 1 - self.input = is_input - self.output = is_output + self.x_signal = x_signal + self.x_signals = x_signals + self.z_signal = z_signal # appeared at most e + 1 + self.is_input = is_input + self.is_output = is_output def is_standard(self): """Check whether the local command sequence is standardized. @@ -1506,7 +1506,7 @@ def is_standard(self): cmd_ref = cmd return standardized - def commute_X(self): + def commute_x(self): """Move all X correction commands to the back. Returns @@ -1514,41 +1514,41 @@ def commute_X(self): EXcommutated_nodes : dict when X commutes with E, Z correction is added on the pair node. This dict specifies target nodes where Zs will be added. """ - EXcommutated_nodes = dict() - combined_Xsignal = set() - for Xsignal in self.Xsignals: - Xpos = self.seq.index(-2) - for i in range(Xpos, len(self.seq)): + ex_commutated_nodes = dict() + combined_xsignal = set() + for x_signal in self.x_signals: + x_pos = self.seq.index(-2) + for i in range(x_pos, len(self.seq)): if self.seq[i] >= 0: try: - EXcommutated_nodes[self.seq[i]] ^= Xsignal + ex_commutated_nodes[self.seq[i]] ^= x_signal except KeyError: - EXcommutated_nodes[self.seq[i]] = Xsignal + ex_commutated_nodes[self.seq[i]] = x_signal self.seq.remove(-2) - combined_Xsignal ^= Xsignal - if self.output: + combined_xsignal ^= x_signal + if self.is_output: self.seq.append(-2) # put X on the end of the pattern - self.Xsignal = combined_Xsignal - self.Xsignals = [combined_Xsignal] + self.x_signal = combined_xsignal + self.x_signals = [combined_xsignal] else: - self.Mprop[2] ^= combined_Xsignal - self.Xsignal = [] - self.Xsignals = [] - return EXcommutated_nodes + self.m_prop[2] ^= combined_xsignal + self.x_signal = [] + self.x_signals = [] + return ex_commutated_nodes - def commute_Z(self): + def commute_z(self): """Move all Zs to the back. EZ commutation produces no additional command unlike EX commutation.""" z_in_seq = False while -3 in self.seq: z_in_seq = True self.seq.remove(-3) - if self.output and z_in_seq: + if self.is_output and z_in_seq: self.seq.append(-3) else: - self.Mprop[3] ^= self.Zsignal - self.Zsignal = [] + self.m_prop[3] ^= self.z_signal + self.z_signal = [] - def _add_Z(self, pair, signal): + def _add_z(self, pair, signal): """Add Z correction into the node. Parameters @@ -1559,9 +1559,9 @@ def _add_Z(self, pair, signal): signal domain for the additional Z correction """ # caused by EX commutation. - self.Zsignal ^= signal - Epos = self.seq.index(pair) - self.seq.insert(Epos + 1, -3) + self.z_signal ^= signal + e_pos = self.seq.index(pair) + self.seq.insert(e_pos + 1, -3) def print_pattern(self): """Print the local command sequence""" @@ -1591,19 +1591,19 @@ def get_command(self, cmd): elif cmd == -1: return command.M( node=self.index, - plane=self.Mprop[0], - angle=self.Mprop[1], - s_domain=self.Mprop[2], - t_domain=self.Mprop[3], + plane=self.m_prop[0], + angle=self.m_prop[1], + s_domain=self.m_prop[2], + t_domain=self.m_prop[3], ) elif cmd == -2: if self.seq.count(-2) > 1: raise NotImplementedError("Patterns with more than one X corrections are not supported") - return command.X(node=self.index, domain=self.Xsignal) + return command.X(node=self.index, domain=self.x_signal) elif cmd == -3: if self.seq.count(-3) > 1: raise NotImplementedError("Patterns with more than one Z corrections are not supported") - return command.Z(node=self.index, domain=self.Zsignal) + return command.Z(node=self.index, domain=self.z_signal) elif cmd == -4: return command.C(node=self.index, cliff_index=self.vop) @@ -1615,7 +1615,7 @@ def get_signal_destination(self): signal_destination : set Counterpart of 'dependent nodes'. measurement results of each node propagate to the nodes specified by 'signal_distination'. """ - signal_destination = self.Mprop[2] | self.Mprop[3] | self.Xsignal | self.Zsignal + signal_destination = self.m_prop[2] | self.m_prop[3] | self.x_signal | self.z_signal return signal_destination def get_signal_destination_dict(self): @@ -1627,10 +1627,10 @@ def get_signal_destination_dict(self): Counterpart of 'dependent nodes'. Unlike 'get_signal_destination', types of domains are memorarized. measurement results of each node propagate to the nodes specified by 'signal_distination_dict'. """ dependent_nodes_dict = dict() - dependent_nodes_dict["Ms"] = self.Mprop[2] - dependent_nodes_dict["Mt"] = self.Mprop[3] - dependent_nodes_dict["X"] = self.Xsignal - dependent_nodes_dict["Z"] = self.Zsignal + dependent_nodes_dict["Ms"] = self.m_prop[2] + dependent_nodes_dict["Mt"] = self.m_prop[3] + dependent_nodes_dict["X"] = self.x_signal + dependent_nodes_dict["Z"] = self.z_signal return dependent_nodes_dict @@ -1698,22 +1698,22 @@ def is_standard(self): standardized &= node.is_standard() return standardized - def Xshift(self): + def x_shift(self): """Move X to the back of the pattern""" for index, node in self.nodes.items(): - EXcomutation = node.commute_X() - for target_index, signal in EXcomutation.items(): - self.nodes[target_index]._add_Z(index, signal) + ex_commutation = node.commute_x() + for target_index, signal in ex_commutation.items(): + self.nodes[target_index]._add_z(index, signal) - def Zshift(self): + def z_shift(self): """Move Z to the back of the pattern. This method can be executed separately""" for node in self.nodes.values(): - node.commute_Z() + node.commute_z() def standardize(self): """Standardize pattern. In this structure, it is enough to move all byproduct corrections to the back""" - self.Xshift() - self.Zshift() + self.x_shift() + self.z_shift() def collect_signal_destination(self): """Calculate signal destinations by considering dependencies of each node.""" @@ -1734,24 +1734,24 @@ def shift_signals(self) -> dict[int, list[int]]: signal_dict = {} for node_index in self.morder + self.output_nodes: node = self.nodes[node_index] - if node.Mprop[0] is None: + if node.m_prop[0] is None: continue - extracted_signal = extract_signal(node.Mprop[0], node.Mprop[2], node.Mprop[3]) + extracted_signal = extract_signal(node.m_prop[0], node.m_prop[2], node.m_prop[3]) signal = extracted_signal.signal signal_dict[node_index] = signal - self.nodes[node_index].Mprop[2] = extracted_signal.s_domain - self.nodes[node_index].Mprop[3] = extracted_signal.t_domain + self.nodes[node_index].m_prop[2] = extracted_signal.s_domain + self.nodes[node_index].m_prop[3] = extracted_signal.t_domain for signal_label, destinated_nodes in self.signal_destination[node_index].items(): for destinated_node in destinated_nodes: node = self.nodes[destinated_node] if signal_label == "Ms": - node.Mprop[2] ^= signal + node.m_prop[2] ^= signal elif signal_label == "Mt": - node.Mprop[3] ^= signal + node.m_prop[3] ^= signal elif signal_label == "X": - node.Xsignal ^= signal + node.x_signal ^= signal elif signal_label == "Z": - node.Zsignal ^= signal + node.z_signal ^= signal else: raise ValueError(f"Invalid signal label: {signal_label}") return signal_dict @@ -1786,31 +1786,31 @@ def get_pattern(self): """ assert self.is_standard() pattern = Pattern(input_nodes=self.input_nodes) - Nseq = [command.N(node=i) for i in self.nodes.keys() - self.input_nodes] - Eseq = [] - Mseq = [] - Xseq = [] - Zseq = [] - Cseq = [] + n_seq = [command.N(node=i) for i in self.nodes.keys() - self.input_nodes] + e_seq = [] + m_seq = [] + x_seq = [] + z_seq = [] + c_seq = [] for node_index in self.morder + self.output_nodes: node = self.nodes[node_index] for cmd in node.seq: if cmd >= 0: - Eseq.append(node.get_command(cmd)) + e_seq.append(node.get_command(cmd)) self.nodes[cmd].seq.remove(node_index) elif cmd == -1: - Mseq.append(node.get_command(cmd)) + m_seq.append(node.get_command(cmd)) elif cmd == -2: - Xseq.append(node.get_command(cmd)) + x_seq.append(node.get_command(cmd)) elif cmd == -3: - Zseq.append(node.get_command(cmd)) + z_seq.append(node.get_command(cmd)) elif cmd == -4: - Cseq.append(node.get_command(cmd)) + c_seq.append(node.get_command(cmd)) else: raise ValueError(f"command {cmd} is invalid!") if node.result is not None: pattern.results[node.index] = node.result - pattern.replace(Nseq + Eseq + Mseq + Xseq + Zseq + Cseq) + pattern.replace(n_seq + e_seq + m_seq + x_seq + z_seq + c_seq) return pattern @@ -1942,7 +1942,7 @@ def measure_pauli(pattern, leave_input, copy=False, use_rustworkx=False): output_nodes = deepcopy(pattern.output_nodes) pat.replace(new_seq, input_nodes=new_inputs) pat.reorder_output_nodes(output_nodes) - assert pat.Nnode == len(graph_state.nodes) + assert pat.n_node == len(graph_state.nodes) pat.results = results pat._pauli_preprocessed = True return pat diff --git a/graphix/random_objects.py b/graphix/random_objects.py index ca5bd62f..7f1c0adc 100644 --- a/graphix/random_objects.py +++ b/graphix/random_objects.py @@ -59,8 +59,8 @@ def rand_dm(dim: int, rank: int | None = None, dm_dtype=True) -> DensityMatrix | dm = np.diag(padded_evals / np.sum(padded_evals)) - randU = rand_unit(dim) - dm = randU @ dm @ randU.transpose().conj() + rand_u = rand_unit(dim) + dm = rand_u @ dm @ rand_u.transpose().conj() if dm_dtype: # will raise an error if incorrect dimension @@ -121,15 +121,15 @@ def rand_channel_kraus(dim: int, rank: int | None = None, sig: float = 1 / np.sq raise ValueError("The rank of a Kraus expansion must be greater or equal than 1.") pre_kraus_list = [rand_gauss_cpx_mat(dim=dim, sig=sig) for _ in range(rank)] - Hmat = np.sum([m.transpose().conjugate() @ m for m in pre_kraus_list], axis=0) - kraus_list = np.array(pre_kraus_list) @ scipy.linalg.inv(scipy.linalg.sqrtm(Hmat)) + h_mat = np.sum([m.transpose().conjugate() @ m for m in pre_kraus_list], axis=0) + kraus_list = np.array(pre_kraus_list) @ scipy.linalg.inv(scipy.linalg.sqrtm(h_mat)) return KrausChannel([{"coef": 1.0 + 0.0 * 1j, "operator": kraus_list[i]} for i in range(rank)]) # or merge with previous with a "pauli" kwarg? ### continue here -def rand_Pauli_channel_kraus(dim: int, rank: int | None = None) -> KrausChannel: +def rand_pauli_channel_kraus(dim: int, rank: int | None = None) -> KrausChannel: if not isinstance(dim, int): raise ValueError(f"The dimension must be an integer and not {dim}.") @@ -158,11 +158,11 @@ def rand_Pauli_channel_kraus(dim: int, rank: int | None = None) -> KrausChannel: prob_list[:rank] = tmp_list np.random.shuffle(prob_list) - tensor_Pauli_ops = Ops.build_tensor_Pauli_ops(nqb) + tensor_pauli_ops = Ops.build_tensor_pauli_ops(nqb) target_indices = np.nonzero(prob_list) params = prob_list[target_indices] - ops = tensor_Pauli_ops[target_indices] + ops = tensor_pauli_ops[target_indices] # TODO see how to use zip and dict to convert from tuple to dict # https://www.tutorialspoint.com/How-I-can-convert-a-Python-Tuple-into-Dictionary diff --git a/graphix/sim/density_matrix.py b/graphix/sim/density_matrix.py index 8d386ab6..44589227 100644 --- a/graphix/sim/density_matrix.py +++ b/graphix/sim/density_matrix.py @@ -62,7 +62,7 @@ def check_size_consistency(mat): check_size_consistency(data) # safe: https://numpy.org/doc/stable/reference/generated/numpy.ndarray.copy.html self.rho = data.rho.copy() - self.Nqubit = data.Nqubit + self.n_qubit = data.n_qubit return if isinstance(data, Iterable): input_list = list(data) @@ -75,14 +75,14 @@ def check_size_consistency(mat): check_size_consistency(self.rho) assert check_unit_trace(self.rho) assert check_psd(self.rho) - self.Nqubit = self.rho.shape[0].bit_length() - 1 + self.n_qubit = self.rho.shape[0].bit_length() - 1 return except TypeError: pass statevec = Statevec(data, nqubit) # NOTE this works since np.outer flattens the inputs! self.rho = np.outer(statevec.psi, statevec.psi.conj()) - self.Nqubit = len(statevec.dims()) + self.n_qubit = len(statevec.dims()) def __repr__(self): return f"DensityMatrix object, with density matrix {self.rho} and shape {self.dims()}." @@ -97,14 +97,14 @@ def evolve_single(self, op, i): i : int Index of qubit to apply operator. """ - assert i >= 0 and i < self.Nqubit + assert i >= 0 and i < self.n_qubit if op.shape != (2, 2): raise ValueError("op must be 2*2 matrix.") - rho_tensor = self.rho.reshape((2,) * self.Nqubit * 2) - rho_tensor = np.tensordot(np.tensordot(op, rho_tensor, axes=(1, i)), op.conj().T, axes=(i + self.Nqubit, 0)) - rho_tensor = np.moveaxis(rho_tensor, (0, -1), (i, i + self.Nqubit)) - self.rho = rho_tensor.reshape((2**self.Nqubit, 2**self.Nqubit)) + rho_tensor = self.rho.reshape((2,) * self.n_qubit * 2) + rho_tensor = np.tensordot(np.tensordot(op, rho_tensor, axes=(1, i)), op.conj().T, axes=(i + self.n_qubit, 0)) + rho_tensor = np.moveaxis(rho_tensor, (0, -1), (i, i + self.n_qubit)) + self.rho = rho_tensor.reshape((2**self.n_qubit, 2**self.n_qubit)) def evolve(self, op, qargs): """Multi-qubit operation @@ -133,26 +133,26 @@ def evolve(self, op, qargs): if nqb_op != len(qargs): raise ValueError("The dimension of the operator doesn't match the number of targets.") - if not all(0 <= i < self.Nqubit for i in qargs): + if not all(0 <= i < self.n_qubit for i in qargs): raise ValueError("Incorrect target indices.") if len(set(qargs)) != nqb_op: raise ValueError("A repeated target qubit index is not possible.") op_tensor = op.reshape((2,) * 2 * nqb_op) - rho_tensor = self.rho.reshape((2,) * self.Nqubit * 2) + rho_tensor = self.rho.reshape((2,) * self.n_qubit * 2) rho_tensor = np.tensordot( np.tensordot(op_tensor, rho_tensor, axes=[tuple(nqb_op + i for i in range(len(qargs))), tuple(qargs)]), op.conj().T.reshape((2,) * 2 * nqb_op), - axes=[tuple(i + self.Nqubit for i in qargs), tuple(i for i in range(len(qargs)))], + axes=[tuple(i + self.n_qubit for i in qargs), tuple(i for i in range(len(qargs)))], ) rho_tensor = np.moveaxis( rho_tensor, [i for i in range(len(qargs))] + [-i for i in range(1, len(qargs) + 1)], - [i for i in qargs] + [i + self.Nqubit for i in reversed(list(qargs))], + [i for i in qargs] + [i + self.n_qubit for i in reversed(list(qargs))], ) - self.rho = rho_tensor.reshape((2**self.Nqubit, 2**self.Nqubit)) + self.rho = rho_tensor.reshape((2**self.n_qubit, 2**self.n_qubit)) def expectation_single(self, op, i): """Expectation value of single-qubit operator. @@ -164,8 +164,8 @@ def expectation_single(self, op, i): complex: expectation value (real for hermitian ops!). """ - if not (0 <= i < self.Nqubit): - raise ValueError(f"Wrong target qubit {i}. Must between 0 and {self.Nqubit-1}.") + if not (0 <= i < self.n_qubit): + raise ValueError(f"Wrong target qubit {i}. Must between 0 and {self.n_qubit-1}.") if op.shape != (2, 2): raise ValueError("op must be 2x2 matrix.") @@ -173,10 +173,10 @@ def expectation_single(self, op, i): st1 = deepcopy(self) st1.normalize() - rho_tensor = st1.rho.reshape((2,) * st1.Nqubit * 2) + rho_tensor = st1.rho.reshape((2,) * st1.n_qubit * 2) rho_tensor = np.tensordot(op, rho_tensor, axes=[1, i]) rho_tensor = np.moveaxis(rho_tensor, 0, i) - st1.rho = rho_tensor.reshape((2**self.Nqubit, 2**self.Nqubit)) + st1.rho = rho_tensor.reshape((2**self.n_qubit, 2**self.n_qubit)) return np.trace(st1.rho) @@ -195,7 +195,7 @@ def tensor(self, other): if not isinstance(other, DensityMatrix): other = DensityMatrix(other) self.rho = np.kron(self.rho, other.rho) - self.Nqubit += other.Nqubit + self.n_qubit += other.n_qubit def cnot(self, edge): """Apply CNOT gate to density matrix. @@ -259,7 +259,7 @@ def ptrace(self, qargs): ) self.rho = rho_res.reshape((2**nqubit_after, 2**nqubit_after)) - self.Nqubit = nqubit_after + self.n_qubit = nqubit_after def fidelity(self, statevec): """calculate the fidelity against reference statevector. @@ -293,7 +293,7 @@ def apply_channel(self, channel: KrausChannel, qargs): .... """ - result_array = np.zeros((2**self.Nqubit, 2**self.Nqubit), dtype=np.complex128) + result_array = np.zeros((2**self.n_qubit, 2**self.n_qubit), dtype=np.complex128) tmp_dm = deepcopy(self) if not isinstance(channel, KrausChannel): @@ -335,7 +335,7 @@ def __init__(self, pattern, max_qubit_num=12, pr_calc=True, input_state: Data = self.results = deepcopy(pattern.results) self.state = None self.node_index = [] - self.Nqubit = 0 + self.n_qubit = 0 self.max_qubit_num = max_qubit_num if pattern.max_space() > max_qubit_num: raise ValueError("Pattern.max_space is larger than max_qubit_num. Increase max_qubit_num and try again.") @@ -361,7 +361,7 @@ def add_nodes(self, nodes, input_state: Data = graphix.states.BasicStates.PLUS): dm_to_add = DensityMatrix(nqubit=n, data=input_state) self.state.tensor(dm_to_add) self.node_index.extend(nodes) - self.Nqubit += n + self.n_qubit += n def entangle_nodes(self, edge): """Apply CZ gate to the two connected nodes. diff --git a/graphix/sim/statevec.py b/graphix/sim/statevec.py index e3d8b2d6..13c266ea 100644 --- a/graphix/sim/statevec.py +++ b/graphix/sim/statevec.py @@ -51,7 +51,7 @@ def __init__( self.results = deepcopy(pattern.results) self.state = None self.node_index = [] - self.Nqubit = 0 + self.n_qubit = 0 self.to_trace = [] self.to_trace_loc = [] self.max_qubit_num = max_qubit_num @@ -86,7 +86,7 @@ def add_nodes(self, nodes: list[int], input_state=graphix.states.BasicStates.PLU sv_to_add = Statevec(nqubit=n, data=input_state) self.state.tensor(sv_to_add) self.node_index.extend(nodes) - self.Nqubit += n + self.n_qubit += n def entangle_nodes(self, edge: tuple[int]): """Apply CZ gate to two connected nodes @@ -110,7 +110,7 @@ def measure(self, cmd: command.M): """ loc = self._perform_measure(cmd) self.state.remove_qubit(loc) - self.Nqubit -= 1 + self.n_qubit -= 1 def correct_byproduct(self, cmd: list[command.X, command.Z]): """Byproduct correction @@ -419,7 +419,7 @@ def tensor(self, other): total_num = len(self.dims()) + len(other.dims()) self.psi = np.kron(psi_self, psi_other).reshape((2,) * total_num) - def CNOT(self, qubits): + def cnot(self, qubits): """apply CNOT Parameters diff --git a/graphix/sim/tensornet.py b/graphix/sim/tensornet.py index 94de6bac..45bf30ae 100644 --- a/graphix/sim/tensornet.py +++ b/graphix/sim/tensornet.py @@ -112,7 +112,7 @@ def entangle_nodes(self, edge): for i in range(2): tensors[i].retag({"Open": "Close"}, inplace=True) self.state._dangling[str(edge[i])] = new_inds[i] - CZ_tn = TensorNetwork( + cz_tn = TensorNetwork( [ qtn.Tensor( self._decomposed_cz[0], @@ -126,7 +126,7 @@ def entangle_nodes(self, edge): ), ] ) - self.state.add_tensor_network(CZ_tn) + self.state.add_tensor_network(cz_tn) elif self.graph_prep == "opt": pass @@ -740,14 +740,14 @@ def proj_basis(angle, vop, plane, choice): """ if plane == Plane.XY: vec = BasicStates.VEC[0 + choice].get_statevector() - rotU = Ops.Rz(angle) + rot_u = Ops.rz(angle) elif plane == Plane.YZ: vec = BasicStates.VEC[4 + choice].get_statevector() - rotU = Ops.Rx(angle) + rot_u = Ops.rx(angle) elif plane == Plane.XZ: vec = BasicStates.VEC[0 + choice].get_statevector() - rotU = Ops.Ry(-angle) - vec = np.matmul(rotU, vec) + rot_u = Ops.ry(-angle) + vec = np.matmul(rot_u, vec) vec = np.matmul(CLIFFORD[CLIFFORD_CONJ[vop]], vec) return vec diff --git a/graphix/transpiler.py b/graphix/transpiler.py index 149dd595..377bb4d5 100644 --- a/graphix/transpiler.py +++ b/graphix/transpiler.py @@ -281,21 +281,21 @@ def transpile(self, opt: bool = False) -> TranspileResult: -------- result : :class:`TranspileResult` object """ - Nnode = self.width + n_node = self.width out = [j for j in range(self.width)] pattern = Pattern(input_nodes=[j for j in range(self.width)]) classical_outputs = [] for instr in self.instruction: kind = instr.kind if kind == instruction.InstructionKind.CNOT: - ancilla = [Nnode, Nnode + 1] + ancilla = [n_node, n_node + 1] assert out[instr.control] is not None assert out[instr.target] is not None out[instr.control], out[instr.target], seq = self._cnot_command( out[instr.control], out[instr.target], ancilla ) pattern.extend(seq) - Nnode += 2 + n_node += 2 elif kind == instruction.InstructionKind.SWAP: out[instr.targets[0]], out[instr.targets[1]] = ( out[instr.targets[1]], @@ -304,61 +304,61 @@ def transpile(self, opt: bool = False) -> TranspileResult: elif kind == instruction.InstructionKind.I: pass elif kind == instruction.InstructionKind.H: - ancilla = Nnode + ancilla = n_node out[instr.target], seq = self._h_command(out[instr.target], ancilla) pattern.extend(seq) - Nnode += 1 + n_node += 1 elif kind == instruction.InstructionKind.S: - ancilla = [Nnode, Nnode + 1] + ancilla = [n_node, n_node + 1] out[instr.target], seq = self._s_command(out[instr.target], ancilla) pattern.extend(seq) - Nnode += 2 + n_node += 2 elif kind == instruction.InstructionKind.X: - ancilla = [Nnode, Nnode + 1] + ancilla = [n_node, n_node + 1] out[instr.target], seq = self._x_command(out[instr.target], ancilla) pattern.extend(seq) - Nnode += 2 + n_node += 2 elif kind == instruction.InstructionKind.Y: - ancilla = [Nnode, Nnode + 1, Nnode + 2, Nnode + 3] + ancilla = [n_node, n_node + 1, n_node + 2, n_node + 3] out[instr.target], seq = self._y_command(out[instr.target], ancilla) pattern.extend(seq) - Nnode += 4 + n_node += 4 elif kind == instruction.InstructionKind.Z: - ancilla = [Nnode, Nnode + 1] + ancilla = [n_node, n_node + 1] out[instr.target], seq = self._z_command(out[instr.target], ancilla) pattern.extend(seq) - Nnode += 2 + n_node += 2 elif kind == instruction.InstructionKind.RX: - ancilla = [Nnode, Nnode + 1] + ancilla = [n_node, n_node + 1] out[instr.target], seq = self._rx_command(out[instr.target], ancilla, instr.angle) pattern.extend(seq) - Nnode += 2 + n_node += 2 elif kind == instruction.InstructionKind.RY: - ancilla = [Nnode, Nnode + 1, Nnode + 2, Nnode + 3] + ancilla = [n_node, n_node + 1, n_node + 2, n_node + 3] out[instr.target], seq = self._ry_command(out[instr.target], ancilla, instr.angle) pattern.extend(seq) - Nnode += 4 + n_node += 4 elif kind == instruction.InstructionKind.RZ: if opt: - ancilla = Nnode + ancilla = n_node out[instr.target], seq = self._rz_command_opt(out[instr.target], ancilla, instr.angle) pattern.extend(seq) - Nnode += 1 + n_node += 1 else: - ancilla = [Nnode, Nnode + 1] + ancilla = [n_node, n_node + 1] out[instr.target], seq = self._rz_command(out[instr.target], ancilla, instr.angle) pattern.extend(seq) - Nnode += 2 + n_node += 2 elif kind == instruction.InstructionKind.RZZ: if opt: - ancilla = Nnode + ancilla = n_node ( out[instr.control], out[instr.target], seq, ) = self._rzz_command_opt(out[instr.control], out[instr.target], ancilla, instr.angle) pattern.extend(seq) - Nnode += 1 + n_node += 1 else: raise NotImplementedError( "YZ-plane measurements not accepted and Rzz gate\ @@ -366,7 +366,7 @@ def transpile(self, opt: bool = False) -> TranspileResult: ) elif kind == instruction.InstructionKind.CCX: if opt: - ancilla = [Nnode + i for i in range(11)] + ancilla = [n_node + i for i in range(11)] ( out[instr.controls[0]], out[instr.controls[1]], @@ -379,9 +379,9 @@ def transpile(self, opt: bool = False) -> TranspileResult: ancilla, ) pattern.extend(seq) - Nnode += 11 + n_node += 11 else: - ancilla = [Nnode + i for i in range(18)] + ancilla = [n_node + i for i in range(18)] ( out[instr.controls[0]], out[instr.controls[1]], @@ -394,7 +394,7 @@ def transpile(self, opt: bool = False) -> TranspileResult: ancilla, ) pattern.extend(seq) - Nnode += 18 + n_node += 18 elif kind == instruction.InstructionKind.M: node_index = out[instr.target] seq = self._m_command(instr.target, instr.plane, instr.angle) @@ -421,29 +421,29 @@ def standardize_and_transpile(self, opt: bool = True) -> TranspileResult: -------- pattern : :class:`graphix.pattern.Pattern` object """ - self._N: list[N] = [] + self._n: list[N] = [] # for i in range(self.width): - # self._N.append(["N", i]) - self._M: list[M] = [] - self._E: list[E] = [] + # self._n.append(["N", i]) + self._m: list[M] = [] + self._e: list[E] = [] self._instr: list[instruction.Instruction] = [] - Nnode = self.width + n_node = self.width inputs = [j for j in range(self.width)] out = [j for j in range(self.width)] classical_outputs = [] for instr in self.instruction: kind = instr.kind if kind == instruction.InstructionKind.CNOT: - ancilla = [Nnode, Nnode + 1] + ancilla = [n_node, n_node + 1] assert out[instr.control] is not None assert out[instr.target] is not None out[instr.control], out[instr.target], seq = self._cnot_command( out[instr.control], out[instr.target], ancilla ) - self._N.extend(seq[0:2]) - self._E.extend(seq[2:5]) - self._M.extend(seq[5:7]) - Nnode += 2 + self._n.extend(seq[0:2]) + self._e.extend(seq[2:5]) + self._m.extend(seq[5:7]) + n_node += 2 self._instr.append(instr) self._instr.append( instruction.XC( @@ -472,11 +472,11 @@ def standardize_and_transpile(self, opt: bool = True) -> TranspileResult: elif kind == instruction.InstructionKind.I: pass elif kind == instruction.InstructionKind.H: - ancilla = Nnode + ancilla = n_node out[instr.target], seq = self._h_command(out[instr.target], ancilla) - self._N.append(seq[0]) - self._E.append(seq[1]) - self._M.append(seq[2]) + self._n.append(seq[0]) + self._e.append(seq[1]) + self._m.append(seq[2]) self._instr.append(instr) self._instr.append( instruction.XC( @@ -484,13 +484,13 @@ def standardize_and_transpile(self, opt: bool = True) -> TranspileResult: domain=seq[3].domain, ) ) - Nnode += 1 + n_node += 1 elif kind == instruction.InstructionKind.S: - ancilla = [Nnode, Nnode + 1] + ancilla = [n_node, n_node + 1] out[instr.target], seq = self._s_command(out[instr.target], ancilla) - self._N.extend(seq[0:2]) - self._E.extend(seq[2:4]) - self._M.extend(seq[4:6]) + self._n.extend(seq[0:2]) + self._e.extend(seq[2:4]) + self._m.extend(seq[4:6]) self._instr.append(instr) self._instr.append( instruction.XC( @@ -504,13 +504,13 @@ def standardize_and_transpile(self, opt: bool = True) -> TranspileResult: domain=seq[7].domain, ) ) - Nnode += 2 + n_node += 2 elif kind == instruction.InstructionKind.X: - ancilla = [Nnode, Nnode + 1] + ancilla = [n_node, n_node + 1] out[instr.target], seq = self._x_command(out[instr.target], ancilla) - self._N.extend(seq[0:2]) - self._E.extend(seq[2:4]) - self._M.extend(seq[4:6]) + self._n.extend(seq[0:2]) + self._e.extend(seq[2:4]) + self._m.extend(seq[4:6]) self._instr.append(instr) self._instr.append( instruction.XC( @@ -524,13 +524,13 @@ def standardize_and_transpile(self, opt: bool = True) -> TranspileResult: domain=seq[7].domain, ) ) - Nnode += 2 + n_node += 2 elif kind == instruction.InstructionKind.Y: - ancilla = [Nnode, Nnode + 1, Nnode + 2, Nnode + 3] + ancilla = [n_node, n_node + 1, n_node + 2, n_node + 3] out[instr.target], seq = self._y_command(out[instr.target], ancilla) - self._N.extend(seq[0:4]) - self._E.extend(seq[4:8]) - self._M.extend(seq[8:12]) + self._n.extend(seq[0:4]) + self._e.extend(seq[4:8]) + self._m.extend(seq[8:12]) self._instr.append(instr) self._instr.append( instruction.XC( @@ -544,13 +544,13 @@ def standardize_and_transpile(self, opt: bool = True) -> TranspileResult: domain=seq[13].domain, ) ) - Nnode += 4 + n_node += 4 elif kind == instruction.InstructionKind.Z: - ancilla = [Nnode, Nnode + 1] + ancilla = [n_node, n_node + 1] out[instr.target], seq = self._z_command(out[instr.target], ancilla) - self._N.extend(seq[0:2]) - self._E.extend(seq[2:4]) - self._M.extend(seq[4:6]) + self._n.extend(seq[0:2]) + self._e.extend(seq[2:4]) + self._m.extend(seq[4:6]) self._instr.append(instr) self._instr.append( instruction.XC( @@ -564,15 +564,15 @@ def standardize_and_transpile(self, opt: bool = True) -> TranspileResult: domain=seq[7].domain, ) ) - Nnode += 2 + n_node += 2 elif kind == instruction.InstructionKind.RX: - ancilla = [Nnode, Nnode + 1] + ancilla = [n_node, n_node + 1] out[instr.target], seq = self._rx_command(out[instr.target], ancilla, instr.angle) - self._N.extend(seq[0:2]) - self._E.extend(seq[2:4]) - self._M.extend(seq[4:6]) + self._n.extend(seq[0:2]) + self._e.extend(seq[2:4]) + self._m.extend(seq[4:6]) instr_ = deepcopy(instr) - instr_.meas_index = len(self._M) - 1 # index of arb angle measurement command + instr_.meas_index = len(self._m) - 1 # index of arb angle measurement command self._instr.append(instr_) self._instr.append( instruction.XC( @@ -586,15 +586,15 @@ def standardize_and_transpile(self, opt: bool = True) -> TranspileResult: domain=seq[7].domain, ) ) - Nnode += 2 + n_node += 2 elif kind == instruction.InstructionKind.RY: - ancilla = [Nnode, Nnode + 1, Nnode + 2, Nnode + 3] + ancilla = [n_node, n_node + 1, n_node + 2, n_node + 3] out[instr.target], seq = self._ry_command(out[instr.target], ancilla, instr.angle) - self._N.extend(seq[0:4]) - self._E.extend(seq[4:8]) - self._M.extend(seq[8:12]) + self._n.extend(seq[0:4]) + self._e.extend(seq[4:8]) + self._m.extend(seq[8:12]) instr_ = deepcopy(instr) - instr_.meas_index = len(self._M) - 3 # index of arb angle measurement command + instr_.meas_index = len(self._m) - 3 # index of arb angle measurement command self._instr.append(instr_) self._instr.append( instruction.XC( @@ -608,16 +608,16 @@ def standardize_and_transpile(self, opt: bool = True) -> TranspileResult: domain=seq[13].domain, ) ) - Nnode += 4 + n_node += 4 elif kind == instruction.InstructionKind.RZ: if opt: - ancilla = Nnode + ancilla = n_node out[instr.target], seq = self._rz_command_opt(out[instr.target], ancilla, instr.angle) - self._N.append(seq[0]) - self._E.append(seq[1]) - self._M.append(seq[2]) + self._n.append(seq[0]) + self._e.append(seq[1]) + self._m.append(seq[2]) instr_ = deepcopy(instr) - instr_.meas_index = len(self._M) - 1 # index of arb angle measurement command + instr_.meas_index = len(self._m) - 1 # index of arb angle measurement command self._instr.append(instr_) self._instr.append( instruction.ZC( @@ -625,15 +625,15 @@ def standardize_and_transpile(self, opt: bool = True) -> TranspileResult: domain=seq[3].domain, ) ) - Nnode += 1 + n_node += 1 else: - ancilla = [Nnode, Nnode + 1] + ancilla = [n_node, n_node + 1] out[instr.target], seq = self._rz_command(out[instr.target], ancilla, instr.angle) - self._N.extend(seq[0:2]) - self._E.extend(seq[2:4]) - self._M.extend(seq[4:6]) + self._n.extend(seq[0:2]) + self._e.extend(seq[2:4]) + self._m.extend(seq[4:6]) instr_ = deepcopy(instr) - instr_.meas_index = len(self._M) - 2 # index of arb angle measurement command + instr_.meas_index = len(self._m) - 2 # index of arb angle measurement command self._instr.append(instr_) self._instr.append( instruction.XC( @@ -647,18 +647,18 @@ def standardize_and_transpile(self, opt: bool = True) -> TranspileResult: domain=seq[7].domain, ) ) - Nnode += 2 + n_node += 2 elif kind == instruction.InstructionKind.RZZ: - ancilla = Nnode + ancilla = n_node out[instr.control], out[instr.target], seq = self._rzz_command_opt( out[instr.control], out[instr.target], ancilla, instr.angle ) - self._N.append(seq[0]) - self._E.extend(seq[1:3]) - self._M.append(seq[3]) - Nnode += 1 + self._n.append(seq[0]) + self._e.extend(seq[1:3]) + self._m.append(seq[3]) + n_node += 1 instr_ = deepcopy(instr) - instr_.meas_index = len(self._M) - 1 # index of arb angle measurement command + instr_.meas_index = len(self._m) - 1 # index of arb angle measurement command self._instr.append(instr_) self._instr.append( instruction.ZC( @@ -679,7 +679,7 @@ def standardize_and_transpile(self, opt: bool = True) -> TranspileResult: self._move_byproduct_to_right() # create command sequence - command_seq = [*self._N, *reversed(self._E), *self._M] + command_seq = [*self._n, *reversed(self._e), *self._m] bpx_added = dict() bpz_added = dict() # byproduct command buffer @@ -758,7 +758,7 @@ def _commute_with_cnot(self, target: int): self._commute_with_following(target) return target - def _commute_with_H(self, target: int): + def _commute_with_h(self, target: int): correction_instr = self._instr[target] h_instr = self._instr[target + 1] assert ( @@ -780,7 +780,7 @@ def _commute_with_H(self, target: int): else: self._commute_with_following(target) - def _commute_with_S(self, target: int): + def _commute_with_s(self, target: int): correction_instr = self._instr[target] s_instr = self._instr[target + 1] assert ( @@ -803,7 +803,7 @@ def _commute_with_S(self, target: int): self._commute_with_following(target) return target - def _commute_with_Rx(self, target: int): + def _commute_with_rx(self, target: int): correction_instr = self._instr[target] rx_instr = self._instr[target + 1] assert ( @@ -814,14 +814,14 @@ def _commute_with_Rx(self, target: int): if correction_instr.target == rx_instr.target: if correction_instr.kind == instruction.InstructionKind.ZC: # add to the s-domain - extend_domain(self._M[rx_instr.meas_index], correction_instr.domain) + extend_domain(self._m[rx_instr.meas_index], correction_instr.domain) self._commute_with_following(target) else: self._commute_with_following(target) else: self._commute_with_following(target) - def _commute_with_Ry(self, target: int): + def _commute_with_ry(self, target: int): correction_instr = self._instr[target] ry_instr = self._instr[target + 1] assert ( @@ -831,12 +831,12 @@ def _commute_with_Ry(self, target: int): assert ry_instr.kind == instruction.InstructionKind.RY if correction_instr.target == ry_instr.target: # add to the s-domain - extend_domain(self._M[ry_instr.meas_index], correction_instr.domain) + extend_domain(self._m[ry_instr.meas_index], correction_instr.domain) self._commute_with_following(target) else: self._commute_with_following(target) - def _commute_with_Rz(self, target: int): + def _commute_with_rz(self, target: int): correction_instr = self._instr[target] rz_instr = self._instr[target + 1] assert ( @@ -847,14 +847,14 @@ def _commute_with_Rz(self, target: int): if correction_instr.target == rz_instr.target: if correction_instr.kind == instruction.InstructionKind.XC: # add to the s-domain - extend_domain(self._M[rz_instr.meas_index], correction_instr.domain) + extend_domain(self._m[rz_instr.meas_index], correction_instr.domain) self._commute_with_following(target) else: self._commute_with_following(target) else: self._commute_with_following(target) - def _commute_with_Rzz(self, target: int): + def _commute_with_rzz(self, target: int): correction_instr = self._instr[target] rzz_instr = self._instr[target + 1] assert ( @@ -867,7 +867,7 @@ def _commute_with_Rzz(self, target: int): cond2 = correction_instr.target == rzz_instr.target if cond or cond2: # add to the s-domain - extend_domain(self._M[rzz_instr.meas_index], correction_instr.domain) + extend_domain(self._m[rzz_instr.meas_index], correction_instr.domain) self._commute_with_following(target) def _commute_with_following(self, target: int): @@ -880,9 +880,9 @@ def _commute_with_following(self, target: int): target : int target command index """ - A = self._instr[target + 1] + a = self._instr[target + 1] self._instr.pop(target + 1) - self._instr.insert(target, A) + self._instr.insert(target, a) def _find_byproduct_to_move(self, rev: bool = False, skipnum: int = 0): """Internal method for reordering commands @@ -933,17 +933,17 @@ def _move_byproduct_to_right(self): elif kind == instruction.InstructionKind.SWAP: target = self._commute_with_swap(target) elif kind == instruction.InstructionKind.H: - self._commute_with_H(target) + self._commute_with_h(target) elif kind == instruction.InstructionKind.S: - target = self._commute_with_S(target) + target = self._commute_with_s(target) elif kind == instruction.InstructionKind.RX: - self._commute_with_Rx(target) + self._commute_with_rx(target) elif kind == instruction.InstructionKind.RY: - self._commute_with_Ry(target) + self._commute_with_ry(target) elif kind == instruction.InstructionKind.RZ: - self._commute_with_Rz(target) + self._commute_with_rz(target) elif kind == instruction.InstructionKind.RZZ: - self._commute_with_Rzz(target) + self._commute_with_rzz(target) else: # Pauli gates commute up to global phase. self._commute_with_following(target) @@ -951,7 +951,7 @@ def _move_byproduct_to_right(self): @classmethod def _cnot_command( - self, control_node: int, target_node: int, ancilla: Sequence[int] + cls, control_node: int, target_node: int, ancilla: Sequence[int] ) -> tuple[int, int, list[command.Command]]: """MBQC commands for CNOT gate @@ -986,7 +986,7 @@ def _cnot_command( return control_node, ancilla[1], seq @classmethod - def _m_command(self, input_node: int, plane: Plane, angle: float): + def _m_command(cls, input_node: int, plane: Plane, angle: float): """MBQC commands for measuring qubit Parameters @@ -1007,7 +1007,7 @@ def _m_command(self, input_node: int, plane: Plane, angle: float): return seq @classmethod - def _h_command(self, input_node: int, ancilla: int): + def _h_command(cls, input_node: int, ancilla: int): """MBQC commands for Hadamard gate Parameters @@ -1031,7 +1031,7 @@ def _h_command(self, input_node: int, ancilla: int): return ancilla, seq @classmethod - def _s_command(self, input_node: int, ancilla: Sequence[int]) -> tuple[int, list[command.Command]]: + def _s_command(cls, input_node: int, ancilla: Sequence[int]) -> tuple[int, list[command.Command]]: """MBQC commands for S gate Parameters @@ -1059,7 +1059,7 @@ def _s_command(self, input_node: int, ancilla: Sequence[int]) -> tuple[int, list return ancilla[1], seq @classmethod - def _x_command(self, input_node: int, ancilla: Sequence[int]) -> tuple[int, list[command.Command]]: + def _x_command(cls, input_node: int, ancilla: Sequence[int]) -> tuple[int, list[command.Command]]: """MBQC commands for Pauli X gate Parameters @@ -1087,7 +1087,7 @@ def _x_command(self, input_node: int, ancilla: Sequence[int]) -> tuple[int, list return ancilla[1], seq @classmethod - def _y_command(self, input_node: int, ancilla: Sequence[int]) -> tuple[int, list[command.Command]]: + def _y_command(cls, input_node: int, ancilla: Sequence[int]) -> tuple[int, list[command.Command]]: """MBQC commands for Pauli Y gate Parameters @@ -1120,7 +1120,7 @@ def _y_command(self, input_node: int, ancilla: Sequence[int]) -> tuple[int, list return ancilla[3], seq @classmethod - def _z_command(self, input_node: int, ancilla: Sequence[int]) -> tuple[int, list[command.Command]]: + def _z_command(cls, input_node: int, ancilla: Sequence[int]) -> tuple[int, list[command.Command]]: """MBQC commands for Pauli Z gate Parameters @@ -1148,7 +1148,7 @@ def _z_command(self, input_node: int, ancilla: Sequence[int]) -> tuple[int, list return ancilla[1], seq @classmethod - def _rx_command(self, input_node: int, ancilla: Sequence[int], angle: float) -> tuple[int, list[command.Command]]: + def _rx_command(cls, input_node: int, ancilla: Sequence[int], angle: float) -> tuple[int, list[command.Command]]: """MBQC commands for X rotation gate Parameters @@ -1178,7 +1178,7 @@ def _rx_command(self, input_node: int, ancilla: Sequence[int], angle: float) -> return ancilla[1], seq @classmethod - def _ry_command(self, input_node: int, ancilla: Sequence[int], angle: float) -> tuple[int, list[command.Command]]: + def _ry_command(cls, input_node: int, ancilla: Sequence[int], angle: float) -> tuple[int, list[command.Command]]: """MBQC commands for Y rotation gate Parameters @@ -1213,7 +1213,7 @@ def _ry_command(self, input_node: int, ancilla: Sequence[int], angle: float) -> return ancilla[3], seq @classmethod - def _rz_command(self, input_node: int, ancilla: Sequence[int], angle: float) -> tuple[int, list[command.Command]]: + def _rz_command(cls, input_node: int, ancilla: Sequence[int], angle: float) -> tuple[int, list[command.Command]]: """MBQC commands for Z rotation gate Parameters @@ -1243,7 +1243,7 @@ def _rz_command(self, input_node: int, ancilla: Sequence[int], angle: float) -> return ancilla[1], seq @classmethod - def _rz_command_opt(self, input_node: int, ancilla: int, angle: float) -> tuple[int, list[command.Command]]: + def _rz_command_opt(cls, input_node: int, ancilla: int, angle: float) -> tuple[int, list[command.Command]]: """optimized MBQC commands for Z rotation gate Parameters @@ -1270,7 +1270,7 @@ def _rz_command_opt(self, input_node: int, ancilla: int, angle: float) -> tuple[ @classmethod def _rzz_command_opt( - self, control_node: int, target_node: int, ancilla: int, angle: float + cls, control_node: int, target_node: int, ancilla: int, angle: float ) -> tuple[int, int, list[command.Command]]: """Optimized MBQC commands for ZZ-rotation gate @@ -1302,7 +1302,7 @@ def _rzz_command_opt( @classmethod def _ccx_command( - self, + cls, control_node1: int, control_node2: int, target_node: int, @@ -1460,7 +1460,7 @@ def _ccx_command( @classmethod def _ccx_command_opt( - self, + cls, control_node1: int, control_node2: int, target_node: int, @@ -1544,7 +1544,7 @@ def _ccx_command_opt( return ancilla[10], ancilla[9], ancilla[7], seq @classmethod - def _sort_outputs(self, pattern: Pattern, output_nodes: Sequence[int]): + def _sort_outputs(cls, pattern: Pattern, output_nodes: Sequence[int]): """Sort the node indices of ouput qubits. Parameters @@ -1599,7 +1599,7 @@ def simulate_statevector(self, input_state: graphix.sim.statevec.Data | None = N instr = self.instruction[i] kind = instr.kind if kind == instruction.InstructionKind.CNOT: - state.CNOT((instr.control, instr.target)) + state.cnot((instr.control, instr.target)) elif kind == instruction.InstructionKind.SWAP: state.swap(instr.targets) elif kind == instruction.InstructionKind.I: @@ -1615,13 +1615,13 @@ def simulate_statevector(self, input_state: graphix.sim.statevec.Data | None = N elif kind == instruction.InstructionKind.Z: state.evolve_single(Ops.z, instr.target) elif kind == instruction.InstructionKind.RX: - state.evolve_single(Ops.Rx(instr.angle), instr.target) + state.evolve_single(Ops.rx(instr.angle), instr.target) elif kind == instruction.InstructionKind.RY: - state.evolve_single(Ops.Ry(instr.angle), instr.target) + state.evolve_single(Ops.ry(instr.angle), instr.target) elif kind == instruction.InstructionKind.RZ: - state.evolve_single(Ops.Rz(instr.angle), instr.target) + state.evolve_single(Ops.rz(instr.angle), instr.target) elif kind == instruction.InstructionKind.RZZ: - state.evolve(Ops.Rzz(instr.angle), [instr.control, instr.target]) + state.evolve(Ops.rzz(instr.angle), [instr.control, instr.target]) elif kind == instruction.InstructionKind.CCX: state.evolve(Ops.ccx, [instr.controls[0], instr.controls[1], instr.target]) elif kind == instruction.InstructionKind.M: diff --git a/graphix/visualization.py b/graphix/visualization.py index 4322d20f..934315e1 100644 --- a/graphix/visualization.py +++ b/graphix/visualization.py @@ -39,7 +39,7 @@ class GraphVisualizer: def __init__( self, - G: nx.Graph, + g: nx.Graph, v_in: list[int], v_out: list[int], meas_plane: dict[int, str] | None = None, @@ -49,7 +49,7 @@ def __init__( """ Parameters ---------- - G : :class:`networkx.graph.Graph` object + g : :class:`networkx.graph.Graph` object networkx graph v_in : list list of input nodes @@ -63,11 +63,11 @@ def __init__( local_clifford : dict dict specifying the local clifford for each node. """ - self.G = G + self.graph = g self.v_in = v_in self.v_out = v_out if meas_plane is None: - self.meas_planes = {i: Plane.XY for i in iter(G.nodes)} + self.meas_planes = {i: Plane.XY for i in iter(g.nodes)} else: self.meas_planes = meas_plane self.meas_angles = meas_angles @@ -111,7 +111,7 @@ def visualize( Filename of the saved plot. """ - f, l_k = gflow.find_flow(self.G, set(self.v_in), set(self.v_out), meas_planes=self.meas_planes) # try flow + f, l_k = gflow.find_flow(self.graph, set(self.v_in), set(self.v_out), meas_planes=self.meas_planes) # try flow if f: print("Flow detected in the graph.") self.visualize_w_flow( @@ -126,7 +126,7 @@ def visualize( filename, ) else: - g, l_k = gflow.find_gflow(self.G, set(self.v_in), set(self.v_out), self.meas_planes) # try gflow + g, l_k = gflow.find_gflow(self.graph, set(self.v_in), set(self.v_out), self.meas_planes) # try gflow if g: print("Gflow detected in the graph. (flow not detected)") self.visualize_w_gflow( @@ -292,7 +292,7 @@ def visualize_w_flow( for edge in edge_path.keys(): if len(edge_path[edge]) == 2: - nx.draw_networkx_edges(self.G, pos, edgelist=[edge], style="dashed", alpha=0.7) + nx.draw_networkx_edges(self.graph, pos, edgelist=[edge], style="dashed", alpha=0.7) else: t = np.linspace(0, 1, 100) curve = self._bezier_curve(edge_path[edge], t) @@ -300,7 +300,9 @@ def visualize_w_flow( for arrow in arrow_path.keys(): if len(arrow_path[arrow]) == 2: - nx.draw_networkx_edges(self.G, pos, edgelist=[arrow], edge_color="black", arrowstyle="->", arrows=True) + nx.draw_networkx_edges( + self.graph, pos, edgelist=[arrow], edge_color="black", arrowstyle="->", arrows=True + ) else: path = arrow_path[arrow] last = np.array(path[-1]) @@ -320,7 +322,7 @@ def visualize_w_flow( ) # Draw the nodes with different colors based on their role (input, output, or other) - for node in self.G.nodes(): + for node in self.graph.nodes(): color = "black" # default color for 'other' nodes inner_color = "white" if node in self.v_in: @@ -340,25 +342,25 @@ def visualize_w_flow( ) # Draw the nodes manually with scatter() if show_local_clifford and self.local_clifford is not None: - for node in self.G.nodes(): + for node in self.graph.nodes(): if node in self.local_clifford.keys(): plt.text(*pos[node] + np.array([0.2, 0.2]), f"{self.local_clifford[node]}", fontsize=10, zorder=3) if show_measurement_planes: - for node in self.G.nodes(): + for node in self.graph.nodes(): if node in self.meas_planes.keys(): plt.text(*pos[node] + np.array([0.22, -0.2]), f"{self.meas_planes[node]}", fontsize=9, zorder=3) # Draw the labels fontsize = 12 - if max(self.G.nodes()) >= 100: - fontsize = fontsize * 2 / len(str(max(self.G.nodes()))) - nx.draw_networkx_labels(self.G, pos, font_size=fontsize) + if max(self.graph.nodes()) >= 100: + fontsize = fontsize * 2 / len(str(max(self.graph.nodes()))) + nx.draw_networkx_labels(self.graph, pos, font_size=fontsize) - x_min = min([pos[node][0] for node in self.G.nodes()]) # Get the minimum x coordinate - x_max = max([pos[node][0] for node in self.G.nodes()]) # Get the maximum x coordinate - y_min = min([pos[node][1] for node in self.G.nodes()]) # Get the minimum y coordinate - y_max = max([pos[node][1] for node in self.G.nodes()]) # Get the maximum y coordinate + x_min = min([pos[node][0] for node in self.graph.nodes()]) # Get the minimum x coordinate + x_max = max([pos[node][0] for node in self.graph.nodes()]) # Get the maximum x coordinate + y_min = min([pos[node][1] for node in self.graph.nodes()]) # Get the minimum y coordinate + y_max = max([pos[node][1] for node in self.graph.nodes()]) # Get the maximum y coordinate # Draw the vertical lines to separate different layers for layer in range(min(l_k.values()), max(l_k.values())): @@ -434,7 +436,7 @@ def visualize_w_gflow( for edge in edge_path.keys(): if len(edge_path[edge]) == 2: - nx.draw_networkx_edges(self.G, pos, edgelist=[edge], style="dashed", alpha=0.7) + nx.draw_networkx_edges(self.graph, pos, edgelist=[edge], style="dashed", alpha=0.7) else: t = np.linspace(0, 1, 100) curve = self._bezier_curve(edge_path[edge], t) @@ -453,7 +455,9 @@ def visualize_w_gflow( arrowprops=dict(arrowstyle="->", color="k", lw=1), ) elif len(arrow_path[arrow]) == 2: # straight line - nx.draw_networkx_edges(self.G, pos, edgelist=[arrow], edge_color="black", arrowstyle="->", arrows=True) + nx.draw_networkx_edges( + self.graph, pos, edgelist=[arrow], edge_color="black", arrowstyle="->", arrows=True + ) else: path = arrow_path[arrow] last = np.array(path[-1]) @@ -473,7 +477,7 @@ def visualize_w_gflow( ) # Draw the nodes with different colors based on their role (input, output, or other) - for node in self.G.nodes(): + for node in self.graph.nodes(): color = "black" # default color for 'other' nodes inner_color = "white" if node in self.v_in: @@ -493,25 +497,25 @@ def visualize_w_gflow( ) # Draw the nodes manually with scatter() if show_local_clifford and self.local_clifford is not None: - for node in self.G.nodes(): + for node in self.graph.nodes(): if node in self.local_clifford.keys(): plt.text(*pos[node] + np.array([0.2, 0.2]), f"{self.local_clifford[node]}", fontsize=10, zorder=3) if show_measurement_planes: - for node in self.G.nodes(): + for node in self.graph.nodes(): if node in self.meas_planes.keys(): plt.text(*pos[node] + np.array([0.22, -0.2]), f"{self.meas_planes[node]}", fontsize=9, zorder=3) # Draw the labels fontsize = 12 - if max(self.G.nodes()) >= 100: - fontsize = fontsize * 2 / len(str(max(self.G.nodes()))) - nx.draw_networkx_labels(self.G, pos, font_size=fontsize) + if max(self.graph.nodes()) >= 100: + fontsize = fontsize * 2 / len(str(max(self.graph.nodes()))) + nx.draw_networkx_labels(self.graph, pos, font_size=fontsize) - x_min = min([pos[node][0] for node in self.G.nodes()]) # Get the minimum x coordinate - x_max = max([pos[node][0] for node in self.G.nodes()]) # Get the maximum x coordinate - y_min = min([pos[node][1] for node in self.G.nodes()]) # Get the minimum y coordinate - y_max = max([pos[node][1] for node in self.G.nodes()]) # Get the maximum y coordinate + x_min = min([pos[node][0] for node in self.graph.nodes()]) # Get the minimum x coordinate + x_max = max([pos[node][0] for node in self.graph.nodes()]) # Get the maximum x coordinate + y_min = min([pos[node][1] for node in self.graph.nodes()]) # Get the minimum y coordinate + y_max = max([pos[node][1] for node in self.graph.nodes()]) # Get the maximum y coordinate # Draw the vertical lines to separate different layers for layer in range(min(l_k.values()), max(l_k.values())): @@ -577,14 +581,14 @@ def visualize_wo_structure( for edge in edge_path.keys(): if len(edge_path[edge]) == 2: - nx.draw_networkx_edges(self.G, pos, edgelist=[edge], style="dashed", alpha=0.7) + nx.draw_networkx_edges(self.graph, pos, edgelist=[edge], style="dashed", alpha=0.7) else: t = np.linspace(0, 1, 100) curve = self._bezier_curve(edge_path[edge], t) plt.plot(curve[:, 0], curve[:, 1], "k--", linewidth=1, alpha=0.7) # Draw the nodes with different colors based on their role (input, output, or other) - for node in self.G.nodes(): + for node in self.graph.nodes(): color = "black" # default color for 'other' nodes inner_color = "white" if node in self.v_in: @@ -604,25 +608,25 @@ def visualize_wo_structure( ) # Draw the nodes manually with scatter() if show_local_clifford and self.local_clifford is not None: - for node in self.G.nodes(): + for node in self.graph.nodes(): if node in self.local_clifford.keys(): plt.text(*pos[node] + np.array([0.2, 0.2]), f"{self.local_clifford[node]}", fontsize=10, zorder=3) if show_measurement_planes: - for node in self.G.nodes(): + for node in self.graph.nodes(): if node in self.meas_planes.keys(): plt.text(*pos[node] + np.array([0.22, -0.2]), f"{self.meas_planes[node]}", fontsize=9, zorder=3) # Draw the labels fontsize = 12 - if max(self.G.nodes()) >= 100: - fontsize = fontsize * 2 / len(str(max(self.G.nodes()))) - nx.draw_networkx_labels(self.G, pos, font_size=fontsize) + if max(self.graph.nodes()) >= 100: + fontsize = fontsize * 2 / len(str(max(self.graph.nodes()))) + nx.draw_networkx_labels(self.graph, pos, font_size=fontsize) - x_min = min([pos[node][0] for node in self.G.nodes()]) # Get the minimum x coordinate - x_max = max([pos[node][0] for node in self.G.nodes()]) # Get the maximum x coordinate - y_min = min([pos[node][1] for node in self.G.nodes()]) # Get the minimum y coordinate - y_max = max([pos[node][1] for node in self.G.nodes()]) # Get the maximum y coordinate + x_min = min([pos[node][0] for node in self.graph.nodes()]) # Get the minimum x coordinate + x_max = max([pos[node][0] for node in self.graph.nodes()]) # Get the maximum x coordinate + y_min = min([pos[node][1] for node in self.graph.nodes()]) # Get the minimum y coordinate + y_max = max([pos[node][1] for node in self.graph.nodes()]) # Get the maximum y coordinate plt.xlim( x_min - 0.5 * node_distance[0], x_max + 0.5 * node_distance[0] @@ -699,7 +703,7 @@ def visualize_all_correction( for edge in edge_path.keys(): if len(edge_path[edge]) == 2: - nx.draw_networkx_edges(self.G, pos, edgelist=[edge], style="dashed", alpha=0.7) + nx.draw_networkx_edges(self.graph, pos, edgelist=[edge], style="dashed", alpha=0.7) else: t = np.linspace(0, 1, 100) curve = self._bezier_curve(edge_path[edge], t) @@ -712,7 +716,9 @@ def visualize_all_correction( else: color = "tab:brown" if len(arrow_path[arrow]) == 2: # straight line - nx.draw_networkx_edges(self.G, pos, edgelist=[arrow], edge_color=color, arrowstyle="->", arrows=True) + nx.draw_networkx_edges( + self.graph, pos, edgelist=[arrow], edge_color=color, arrowstyle="->", arrows=True + ) else: path = arrow_path[arrow] last = np.array(path[-1]) @@ -733,7 +739,7 @@ def visualize_all_correction( ) # Draw the nodes with different colors based on their role (input, output, or other) - for node in self.G.nodes(): + for node in self.graph.nodes(): color = "black" inner_color = "white" if node in self.v_in: @@ -751,20 +757,20 @@ def visualize_all_correction( plt.scatter(*pos[node], edgecolor=color, facecolor=inner_color, s=350, zorder=2) if show_local_clifford and self.local_clifford is not None: - for node in self.G.nodes(): + for node in self.graph.nodes(): if node in self.local_clifford.keys(): plt.text(*pos[node] + np.array([0.2, 0.2]), f"{self.local_clifford[node]}", fontsize=10, zorder=3) if show_measurement_planes: - for node in self.G.nodes(): + for node in self.graph.nodes(): if node in self.meas_planes.keys(): plt.text(*pos[node] + np.array([0.22, -0.2]), f"{self.meas_planes[node]}", fontsize=9, zorder=3) # Draw the labels fontsize = 12 - if max(self.G.nodes()) >= 100: - fontsize = fontsize * 2 / len(str(max(self.G.nodes()))) - nx.draw_networkx_labels(self.G, pos, font_size=fontsize) + if max(self.graph.nodes()) >= 100: + fontsize = fontsize * 2 / len(str(max(self.graph.nodes()))) + nx.draw_networkx_labels(self.graph, pos, font_size=fontsize) # legend for arrow colors plt.plot([], [], "k--", alpha=0.7, label="graph edge") @@ -772,10 +778,10 @@ def visualize_all_correction( plt.plot([], [], color="tab:green", label="zflow") plt.plot([], [], color="tab:brown", label="xflow and zflow") - x_min = min([pos[node][0] for node in self.G.nodes()]) # Get the minimum x coordinate - x_max = max([pos[node][0] for node in self.G.nodes()]) - y_min = min([pos[node][1] for node in self.G.nodes()]) - y_max = max([pos[node][1] for node in self.G.nodes()]) + x_min = min([pos[node][0] for node in self.graph.nodes()]) # Get the minimum x coordinate + x_max = max([pos[node][0] for node in self.graph.nodes()]) + y_min = min([pos[node][1] for node in self.graph.nodes()]) + y_max = max([pos[node][1] for node in self.graph.nodes()]) plt.xlim( x_min - 0.5 * node_distance[0], x_max + 3.5 * node_distance[0] @@ -812,11 +818,11 @@ def get_figsize( figure size of the graph. """ if l_k is None: - width = len(set([pos[node][0] for node in self.G.nodes()])) * 0.8 + width = len(set([pos[node][0] for node in self.graph.nodes()])) * 0.8 else: width = (max(l_k.values()) + 1) * 0.8 if pos is not None: - height = len(set([pos[node][1] for node in self.G.nodes()])) + height = len(set([pos[node][1] for node in self.graph.nodes()])) else: height = len(self.v_out) figsize = (width * node_distance[0], height * node_distance[1]) @@ -844,14 +850,14 @@ def get_edge_path(self, flow: dict[int, int | set[int]], pos: dict[int, tuple[fl max_iter = 5 edge_path = {} arrow_path = {} - edge_set = set(self.G.edges()) + edge_set = set(self.graph.edges()) flow_arrows = {(k, v) for k, values in flow.items() for v in values} # set of mid-points of the edges - # mid_points = {(0.5 * (pos[k][0] + pos[v][0]), 0.5 * (pos[k][1] + pos[v][1])) for k, v in edge_set} - set(pos[node] for node in self.G.nodes()) + # mid_points = {(0.5 * (pos[k][0] + pos[v][0]), 0.5 * (pos[k][1] + pos[v][1])) for k, v in edge_set} - set(pos[node] for node in self.g.nodes()) for edge in edge_set: iteration = 0 - nodes = self.G.nodes() + nodes = self.graph.nodes() bezier_path = [pos[edge[0]], pos[edge[1]]] while True: iteration += 1 @@ -900,7 +906,7 @@ def _point_from_node(pos, dist, angle): ] else: iteration = 0 - nodes = self.G.nodes() + nodes = self.graph.nodes() bezier_path = [pos[arrow[0]], pos[arrow[1]]] if arrow in edge_set or (arrow[1], arrow[0]) in edge_set: mid_point = ( @@ -958,10 +964,10 @@ def get_edge_path_wo_structure(self, pos: dict[int, tuple[float, float]]) -> dic """ max_iter = 5 edge_path = {} - edge_set = set(self.G.edges()) + edge_set = set(self.graph.edges()) for edge in edge_set: iteration = 0 - nodes = self.G.nodes() + nodes = self.graph.nodes() bezier_path = [pos[edge[0]], pos[edge[1]]] while True: iteration += 1 @@ -1010,8 +1016,8 @@ def get_pos_from_flow(self, f: dict[int, int], l_k: dict[int, int]) -> dict[int, dictionary of node positions. """ values_union = set().union(*f.values()) - start_nodes = self.G.nodes() - values_union - pos = {node: [0, 0] for node in self.G.nodes()} + start_nodes = self.graph.nodes() - values_union + pos = {node: [0, 0] for node in self.graph.nodes()} for i, k in enumerate(start_nodes): pos[k][1] = i node = k @@ -1048,23 +1054,23 @@ def get_pos_from_gflow(self, g: dict[int, set[int]], l_k: dict[int, int]) -> dic for node, node_list in g.items(): g_edges.extend((node, n) for n in node_list) - G_prime = self.G.copy() - G_prime.add_nodes_from(self.G.nodes()) - G_prime.add_edges_from(g_edges) + g_prime = self.graph.copy() + g_prime.add_nodes_from(self.graph.nodes()) + g_prime.add_edges_from(g_edges) l_max = max(l_k.values()) l_reverse = {v: l_max - l for v, l in l_k.items()} - nx.set_node_attributes(G_prime, l_reverse, "subset") + nx.set_node_attributes(g_prime, l_reverse, "subset") - pos = nx.multipartite_layout(G_prime) + pos = nx.multipartite_layout(g_prime) for node, layer in l_k.items(): pos[node][0] = l_max - layer - vert = list(set([pos[node][1] for node in self.G.nodes()])) + vert = list(set([pos[node][1] for node in self.graph.nodes()])) vert.sort() - for node in self.G.nodes(): + for node in self.graph.nodes(): pos[node][1] = vert.index(pos[node][1]) return pos @@ -1085,10 +1091,10 @@ def get_pos_wo_structure(self) -> dict[int, tuple[float, float]]: """ layers = dict() - connected_components = list(nx.connected_components(self.G)) + connected_components = list(nx.connected_components(self.graph)) for component in connected_components: - subgraph = self.G.subgraph(component) + subgraph = self.graph.subgraph(component) initial_pos = {node: (0, 0) for node in component} if len(set(self.v_out) & set(component)) == 0 and len(set(self.v_in) & set(component)) == 0: @@ -1106,9 +1112,9 @@ def get_pos_wo_structure(self) -> dict[int, tuple[float, float]]: # order the nodes based on the x-coordinate order = sorted(pos, key=lambda x: pos[x][0]) order = [node for node in order if node not in fixed_nodes] - Nv = len(self.v_out) + nv = len(self.v_out) for i, node in enumerate(order[::-1]): - k = i // Nv + 1 + k = i // nv + 1 layers[node] = k elif len(set(self.v_out) & set(component)) == 0 and len(set(self.v_in) & set(component)) > 0: @@ -1119,9 +1125,9 @@ def get_pos_wo_structure(self) -> dict[int, tuple[float, float]]: # order the nodes based on the x-coordinate order = sorted(pos, key=lambda x: pos[x][0]) order = [node for node in order if node not in fixed_nodes] - Nv = len(self.v_in) + nv = len(self.v_in) for i, node in enumerate(order[::-1]): - k = i // Nv + k = i // nv layers[node] = k if layers == dict(): layer_input = 0 @@ -1141,26 +1147,26 @@ def get_pos_wo_structure(self) -> dict[int, tuple[float, float]]: # order the nodes based on the x-coordinate order = sorted(pos, key=lambda x: pos[x][0]) order = [node for node in order if node not in fixed_nodes] - Nv = len(self.v_out) + nv = len(self.v_out) for i, node in enumerate(order[::-1]): - k = i // Nv + 1 + k = i // nv + 1 layers[node] = k layer_input = max(layers.values()) + 1 for node in set(self.v_in) & set(component) - set(self.v_out): layers[node] = layer_input - G_prime = self.G.copy() - G_prime.add_nodes_from(self.G.nodes()) - G_prime.add_edges_from(self.G.edges()) + g_prime = self.graph.copy() + g_prime.add_nodes_from(self.graph.nodes()) + g_prime.add_edges_from(self.graph.edges()) l_max = max(layers.values()) l_reverse = {v: l_max - l for v, l in layers.items()} - nx.set_node_attributes(G_prime, l_reverse, "subset") - pos = nx.multipartite_layout(G_prime) + nx.set_node_attributes(g_prime, l_reverse, "subset") + pos = nx.multipartite_layout(g_prime) for node, layer in layers.items(): pos[node][0] = l_max - layer - vert = list(set([pos[node][1] for node in self.G.nodes()])) + vert = list(set([pos[node][1] for node in self.graph.nodes()])) vert.sort() - for node in self.G.nodes(): + for node in self.graph.nodes(): pos[node][1] = vert.index(pos[node][1]) return pos @@ -1179,16 +1185,16 @@ def get_pos_all_correction(self, layers: dict[int, int]) -> dict[int, tuple[floa dictionary of node positions. """ - G_prime = self.G.copy() - G_prime.add_nodes_from(self.G.nodes()) - G_prime.add_edges_from(self.G.edges()) - nx.set_node_attributes(G_prime, layers, "subset") - pos = nx.multipartite_layout(G_prime) + g_prime = self.graph.copy() + g_prime.add_nodes_from(self.graph.nodes()) + g_prime.add_edges_from(self.graph.edges()) + nx.set_node_attributes(g_prime, layers, "subset") + pos = nx.multipartite_layout(g_prime) for node, layer in layers.items(): pos[node][0] = layer - vert = list(set([pos[node][1] for node in self.G.nodes()])) + vert = list(set([pos[node][1] for node in self.graph.nodes()])) vert.sort() - for node in self.G.nodes(): + for node in self.graph.nodes(): pos[node][1] = vert.index(pos[node][1]) return pos diff --git a/pyproject.toml b/pyproject.toml index 815af2b7..a00cd1b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,23 +47,24 @@ extend-exclude = ["docs"] [tool.ruff.lint] extend-select = [ - "UP", - "NPY", "A", "B", - "W", + "FA", + "I", + "NPY", + "N", + "PERF", "PLE", "PLW", - "FA", "RUF", - "PERF", "TCH", - "I", + "UP", + "W", ] ignore = [ + "E74", # Ambiguous name # TODO: Resolve this immediately "NPY002", # Use np.random.Generator - "E74", # Ambiguous name ] [tool.ruff.format] @@ -74,8 +75,8 @@ docstring-code-format = true "F401", # Unused import ] "examples/*.py" = [ - "E402", # Import not at top of file "B018", # Useless expression + "E402", # Import not at top of file ] [tool.ruff.lint.pydocstyle] diff --git a/tests/test_density_matrix.py b/tests/test_density_matrix.py index 1ed06a11..55a04d75 100644 --- a/tests/test_density_matrix.py +++ b/tests/test_density_matrix.py @@ -79,14 +79,14 @@ def test_init_with_invalid_data_fail(self, fx_rng: Generator) -> None: def test_init_without_data_success(self, n: int) -> None: dm = DensityMatrix(nqubit=n) expected_density_matrix = np.outer(np.ones((2,) * n), np.ones((2,) * n)) / 2**n - assert dm.Nqubit == n + assert dm.n_qubit == n assert dm.rho.shape == (2**n, 2**n) assert np.allclose(dm.rho, expected_density_matrix) dm = DensityMatrix(data=graphix.states.BasicStates.ZERO, nqubit=n) expected_density_matrix = np.zeros((2**n, 2**n)) expected_density_matrix[0, 0] = 1 - assert dm.Nqubit == n + assert dm.n_qubit == n assert dm.rho.shape == (2**n, 2**n) assert np.allclose(dm.rho, expected_density_matrix) @@ -261,7 +261,7 @@ def test_tensor_without_data_success(self, n: int) -> None: dm_a = DensityMatrix(nqubit=n) dm_b = DensityMatrix(nqubit=n + 1) dm_a.tensor(dm_b) - assert dm_a.Nqubit == 2 * n + 1 + assert dm_a.n_qubit == 2 * n + 1 assert dm_a.rho.shape == (2 ** (2 * n + 1), 2 ** (2 * n + 1)) # TODO: Use pytest.mark.parametrize after refactoring randobj.rand_dm @@ -273,7 +273,7 @@ def test_tensor_with_data_success(self) -> None: data_b = randobj.rand_dm(2 ** (n + 1), dm_dtype=False) dm_b = DensityMatrix(data=data_b) dm_a.tensor(dm_b) - assert dm_a.Nqubit == 2 * n + 1 + assert dm_a.n_qubit == 2 * n + 1 assert dm_a.rho.shape == (2 ** (2 * n + 1), 2 ** (2 * n + 1)) assert np.allclose(dm_a.rho, np.kron(data_a, data_b)) @@ -892,7 +892,7 @@ def test_init_success(self, fx_rng: Generator, hadamardpattern, randpattern, nqb assert dm.dims() == (2**nqb, 2**nqb) assert np.allclose(dm.rho, expected_dm) - assert backend.Nqubit == nqb + assert backend.n_qubit == nqb def test_init_fail(self, fx_rng: Generator, nqb, randpattern) -> None: rand_angles = fx_rng.random(nqb + 1) * 2 * np.pi @@ -915,7 +915,7 @@ def test_init_success_2(self) -> None: assert backend.pattern == pattern assert backend.results == pattern.results assert backend.node_index == [0] - assert backend.Nqubit == 1 + assert backend.n_qubit == 1 assert backend.max_qubit_num == 12 def test_add_nodes(self) -> None: diff --git a/tests/test_graphsim.py b/tests/test_graphsim.py index 8c11c504..028d3899 100644 --- a/tests/test_graphsim.py +++ b/tests/test_graphsim.py @@ -85,23 +85,23 @@ def test_e2(self, use_rustworkx: bool) -> None: g.h(3) gstate = get_state(g) - g.equivalent_graph_E2(3, 4) + g.equivalent_graph_e2(3, 4) gstate2 = get_state(g) assert np.abs(np.dot(gstate.flatten().conjugate(), gstate2.flatten())) == pytest.approx(1) - g.equivalent_graph_E2(4, 0) + g.equivalent_graph_e2(4, 0) gstate3 = get_state(g) assert np.abs(np.dot(gstate.flatten().conjugate(), gstate3.flatten())) == pytest.approx(1) - g.equivalent_graph_E2(4, 5) + g.equivalent_graph_e2(4, 5) gstate4 = get_state(g) assert np.abs(np.dot(gstate.flatten().conjugate(), gstate4.flatten())) == pytest.approx(1) - g.equivalent_graph_E2(0, 3) + g.equivalent_graph_e2(0, 3) gstate5 = get_state(g) assert np.abs(np.dot(gstate.flatten().conjugate(), gstate5.flatten())) == pytest.approx(1) - g.equivalent_graph_E2(0, 3) + g.equivalent_graph_e2(0, 3) gstate6 = get_state(g) assert np.abs(np.dot(gstate.flatten().conjugate(), gstate6.flatten())) == pytest.approx(1) @@ -111,16 +111,16 @@ def test_e1(self, use_rustworkx: bool) -> None: g = GraphState(nodes=np.arange(nqubit), edges=edges, use_rustworkx=use_rustworkx) g.nodes[3]["loop"] = True gstate = get_state(g) - g.equivalent_graph_E1(3) + g.equivalent_graph_e1(3) gstate2 = get_state(g) assert np.abs(np.dot(gstate.flatten().conjugate(), gstate2.flatten())) == pytest.approx(1) g.z(4) gstate = get_state(g) - g.equivalent_graph_E1(4) + g.equivalent_graph_e1(4) gstate2 = get_state(g) assert np.abs(np.dot(gstate.flatten().conjugate(), gstate2.flatten())) == pytest.approx(1) - g.equivalent_graph_E1(4) + g.equivalent_graph_e1(4) gstate3 = get_state(g) assert np.abs(np.dot(gstate.flatten().conjugate(), gstate3.flatten())) == pytest.approx(1) diff --git a/tests/test_pyzx.py b/tests/test_pyzx.py index 53c67446..55091bb2 100644 --- a/tests/test_pyzx.py +++ b/tests/test_pyzx.py @@ -8,7 +8,9 @@ try: import pyzx as zx - from pyzx.generate import cliffordT + + # MEMO: PEP8 violation in pyzx + from pyzx.generate import cliffordT as clifford_t # noqa: N813 from graphix.pyzx import from_pyzx_graph, to_pyzx_graph except ModuleNotFoundError: @@ -20,7 +22,7 @@ @pytest.mark.skipif(sys.modules.get("pyzx") is None, reason="pyzx not installed") def test_graph_equality() -> None: random.seed(SEED) - g = cliffordT(4, 10, 0.1) + g = clifford_t(4, 10, 0.1) og1 = from_pyzx_graph(g) @@ -55,7 +57,7 @@ def assert_reconstructed_pyzx_graph_equal(g: zx.Graph) -> None: # graph. Only works with small circuits up to 4 qubits since PyZX's `tensorfy` # function seems to consume huge amount of memory for larger qubit @pytest.mark.skipif(sys.modules.get("pyzx") is None, reason="pyzx not installed") -def test_random_cliffordT() -> None: +def test_random_clifford_t() -> None: for _ in range(15): - g = cliffordT(4, 10, 0.1) + g = clifford_t(4, 10, 0.1) assert_reconstructed_pyzx_graph_equal(g) diff --git a/tests/test_random_utilities.py b/tests/test_random_utilities.py index fab85f94..e820c222 100644 --- a/tests/test_random_utilities.py +++ b/tests/test_random_utilities.py @@ -147,7 +147,7 @@ def test_rand_dm_rank(self, fx_rng: Generator) -> None: # TODO move that somewhere else? def test_pauli_tensor_ops(self, fx_rng: Generator) -> None: nqb = int(fx_rng.integers(2, 6)) - pauli_tensor_ops = Ops.build_tensor_Pauli_ops(nqb) + pauli_tensor_ops = Ops.build_tensor_pauli_ops(nqb) assert len(pauli_tensor_ops) == 4**nqb @@ -157,15 +157,15 @@ def test_pauli_tensor_ops(self, fx_rng: Generator) -> None: def test_pauli_tensor_ops_fail(self, fx_rng: Generator) -> None: with pytest.raises(TypeError): - _ = Ops.build_tensor_Pauli_ops(fx_rng.integers(2, 6) + 0.5) + _ = Ops.build_tensor_pauli_ops(fx_rng.integers(2, 6) + 0.5) with pytest.raises(ValueError): - _ = Ops.build_tensor_Pauli_ops(0) + _ = Ops.build_tensor_pauli_ops(0) def test_random_pauli_channel_success(self, fx_rng: Generator) -> None: nqb = int(fx_rng.integers(2, 6)) rk = int(fx_rng.integers(1, 2**nqb + 1)) - pauli_channel = randobj.rand_Pauli_channel_kraus(dim=2**nqb, rank=rk) # default is full rank + pauli_channel = randobj.rand_pauli_channel_kraus(dim=2**nqb, rank=rk) # default is full rank assert isinstance(pauli_channel, KrausChannel) assert pauli_channel.nqubit == nqb @@ -176,13 +176,13 @@ def test_random_pauli_channel_fail(self) -> None: nqb = 3 rk = 2 with pytest.raises(TypeError): - randobj.rand_Pauli_channel_kraus(dim=2**nqb, rank=rk + 0.5) + randobj.rand_pauli_channel_kraus(dim=2**nqb, rank=rk + 0.5) with pytest.raises(ValueError): - randobj.rand_Pauli_channel_kraus(dim=2**nqb + 0.5, rank=rk) + randobj.rand_pauli_channel_kraus(dim=2**nqb + 0.5, rank=rk) with pytest.raises(ValueError): - randobj.rand_Pauli_channel_kraus(dim=2**nqb, rank=-3) + randobj.rand_pauli_channel_kraus(dim=2**nqb, rank=-3) with pytest.raises(ValueError): - randobj.rand_Pauli_channel_kraus(dim=2**nqb + 1, rank=rk) + randobj.rand_pauli_channel_kraus(dim=2**nqb + 1, rank=rk) diff --git a/tests/test_statevec_backend.py b/tests/test_statevec_backend.py index f49fbc44..464867e8 100644 --- a/tests/test_statevec_backend.py +++ b/tests/test_statevec_backend.py @@ -35,7 +35,7 @@ def test_remove_one_qubit(self) -> None: @pytest.mark.parametrize( "state", [BasicStates.PLUS, BasicStates.ZERO, BasicStates.ONE, BasicStates.PLUS_I, BasicStates.MINUS_I] ) - def test_measurement_into_each_XYZ_basis(self, state: BasicStates) -> None: + def test_measurement_into_each_xyz_basis(self, state: BasicStates) -> None: n = 3 k = 0 # for measurement into |-> returns [[0, 0], ..., [0, 0]] (whose norm is zero)