diff --git a/doc/source/api-reference/qibo.rst b/doc/source/api-reference/qibo.rst index 38aa1e11e5..3457ad0c0c 100644 --- a/doc/source/api-reference/qibo.rst +++ b/doc/source/api-reference/qibo.rst @@ -1616,22 +1616,10 @@ The destabilizers can be extracted analogously with :meth:`qibo.quantum_info.cli :member-order: bysource -Metrics -^^^^^^^ - -Set of functions that are used to calculate metrics of states, (pseudo-)distance measures -between states, and distance measures between quantum channels. - -Purity -"""""" - -.. autofunction:: qibo.quantum_info.purity - - -Impurity -"""""""" +Entanglement measures +^^^^^^^^^^^^^^^^^^^^^ -.. autofunction:: qibo.quantum_info.impurity +Set of functions to calculate entanglement measures. Concurrence @@ -1646,45 +1634,149 @@ Entanglement of formation .. autofunction:: qibo.quantum_info.entanglement_of_formation -Entropy -""""""" +Entanglement fidelity +""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.entanglement_fidelity + + +Meyer-Wallach entanglement +"""""""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.meyer_wallach_entanglement + -.. autofunction:: qibo.quantum_info.entropy +Entanglement capability +""""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.entangling_capability + + +Entropy measures +^^^^^^^^^^^^^^^^ + +Set of functions to calculate entropy measures. + + +Shannon entropy +""""""""""""""" + +.. autofunction:: qibo.quantum_info.shannon_entropy + + +Classical relative entropy +"""""""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.classical_relative_entropy + + +Classical Rényi entropy +""""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.classical_renyi_entropy + + +Classical Rényi relative entropy +"""""""""""""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.classical_relative_renyi_entropy + + +Classical Tsallis entropy +""""""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.classical_tsallis_entropy + + +von Neumann entropy +""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.von_neumann_entropy .. note:: - ``validate`` flag allows the user to choose if the function will check if input - ``state`` is Hermitian or not. Default option is ``validate=False``, i.e. the - assumption of Hermiticity, because it is faster and, more importantly, - the functions are intended to be used on Hermitian inputs. When ``validate=True`` + ``check_hermitian`` flag allows the user to choose if the function will check if input + ``state`` is Hermitian or not. Default option is ``check_hermitian=False``, i.e. the + assumption of Hermiticity. This is faster and, more importantly, + this function are intended to be used on Hermitian inputs. When ``check_hermitian=True`` and ``state`` is non-Hermitian, an error will be raised when using `cupy` backend. +Relative von Neumann entropy +"""""""""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.relative_von_neumann_entropy + +.. note:: + ``check_hermitian`` flag allows the user to choose if the function will check if input + ``state`` is Hermitian or not. Default option is ``check_hermitian=False``, i.e. the + assumption of Hermiticity. This is faster and, more importantly, + this function are intended to be used on Hermitian inputs. When ``check_hermitian=True`` + and either ``state`` or ``target`` is non-Hermitian, + an error will be raised when using `cupy` backend. + + +Rényi entropy +""""""""""""" + +.. autofunction:: qibo.quantum_info.renyi_entropy + + +Relative Rényi entropy +"""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.relative_renyi_entropy + + +Tsallis entropy +""""""""""""""" + +.. autofunction:: qibo.quantum_info.tsallis_entropy + + Entanglement entropy """""""""""""""""""" .. autofunction:: qibo.quantum_info.entanglement_entropy .. note:: - ``validate`` flag allows the user to choose if the function will check if + ``check_hermitian`` flag allows the user to choose if the function will check if the reduced density matrix resulting from tracing out ``bipartition`` from input - ``state`` is Hermitian or not. Default option is ``validate=False``, i.e. the - assumption of Hermiticity, because it is faster and, more importantly, - the functions are intended to be used on Hermitian inputs. When ``validate=True`` + ``state`` is Hermitian or not. Default option is ``check_hermitian=False``, i.e. the + assumption of Hermiticity. This is faster and, more importantly, + this function are intended to be used on Hermitian inputs. When ``check_hermitian=True`` and the reduced density matrix is non-Hermitian, an error will be raised when using `cupy` backend. +Metrics +^^^^^^^ + +Set of functions that are used to calculate metrics of states, (pseudo-)distance measures +between states, and distance measures between quantum channels. + +Purity +"""""" + +.. autofunction:: qibo.quantum_info.purity + + +Impurity +"""""""" + +.. autofunction:: qibo.quantum_info.impurity + + Trace distance """""""""""""" .. autofunction:: qibo.quantum_info.trace_distance .. note:: - ``validate`` flag allows the user to choose if the function will check if difference + ``check_hermitian`` flag allows the user to choose if the function will check if difference between inputs, ``state - target``, is Hermitian or not. Default option is - ``validate=False``, i.e. the assumption of Hermiticity, because it is faster and, + ``check_hermitian=False``, i.e. the assumption of Hermiticity, because it is faster and, more importantly, the functions are intended to be used on Hermitian inputs. - When ``validate=True`` and ``state - target`` is non-Hermitian, an error will be + When ``check_hermitian=True`` and ``state - target`` is non-Hermitian, an error will be raised when using `cupy` backend. @@ -1718,12 +1810,6 @@ Bures distance .. autofunction:: qibo.quantum_info.bures_distance -Entanglement fidelity -""""""""""""""""""""" - -.. autofunction:: qibo.quantum_info.entanglement_fidelity - - Process fidelity """""""""""""""" @@ -1754,18 +1840,6 @@ Diamond Norm .. autofunction:: qibo.quantum_info.diamond_norm -Meyer-Wallach entanglement -"""""""""""""""""""""""""" - -.. autofunction:: qibo.quantum_info.meyer_wallach_entanglement - - -Entanglement capability -""""""""""""""""""""""" - -.. autofunction:: qibo.quantum_info.entangling_capability - - Expressibility of parameterized quantum circuits """""""""""""""""""""""""""""""""""""""""""""""" @@ -2183,18 +2257,6 @@ Hadamard Transform .. autofunction:: qibo.quantum_info.hadamard_transform -Shannon entropy -""""""""""""""" - -.. autofunction:: qibo.quantum_info.shannon_entropy - - -Total Variation distance -"""""""""""""""""""""""" - -.. autofunction:: qibo.quantum_info.total_variation_distance - - Hellinger distance """""""""""""""""" diff --git a/src/qibo/callbacks.py b/src/qibo/callbacks.py index 16d88d712a..77114ed42a 100644 --- a/src/qibo/callbacks.py +++ b/src/qibo/callbacks.py @@ -125,7 +125,7 @@ def nqubits(self, n: int): ] def apply(self, backend, state): - from qibo.quantum_info.metrics import entanglement_entropy + from qibo.quantum_info.entropies import entanglement_entropy entropy, spectrum = entanglement_entropy( state, diff --git a/src/qibo/quantum_info/__init__.py b/src/qibo/quantum_info/__init__.py index 27e3edb046..8d9400104e 100644 --- a/src/qibo/quantum_info/__init__.py +++ b/src/qibo/quantum_info/__init__.py @@ -1,5 +1,7 @@ from qibo.quantum_info.basis import * from qibo.quantum_info.clifford import * +from qibo.quantum_info.entanglement import * +from qibo.quantum_info.entropies import * from qibo.quantum_info.metrics import * from qibo.quantum_info.quantum_networks import * from qibo.quantum_info.random_ensembles import * diff --git a/src/qibo/quantum_info/entanglement.py b/src/qibo/quantum_info/entanglement.py new file mode 100644 index 0000000000..28cbb70305 --- /dev/null +++ b/src/qibo/quantum_info/entanglement.py @@ -0,0 +1,293 @@ +"""Submodules with entanglement measures.""" + +import numpy as np + +from qibo.backends import _check_backend +from qibo.config import PRECISION_TOL, raise_error +from qibo.quantum_info.metrics import fidelity, purity + + +def concurrence(state, bipartition, check_purity: bool = True, backend=None): + """Calculates concurrence of a pure bipartite quantum state + :math:`\\rho \\in \\mathcal{H}_{A} \\otimes \\mathcal{H}_{B}` as + + .. math:: + C(\\rho) = \\sqrt{2 \\, (\\text{tr}^{2}(\\rho) - \\text{tr}(\\rho_{A}^{2}))} \\, , + + where :math:`\\rho_{A} = \\text{tr}_{B}(\\rho)` is the reduced density operator + obtained by tracing out the qubits in the ``bipartition`` :math:`B`. + + Args: + state (ndarray): statevector or density matrix. + bipartition (list or tuple or ndarray): qubits in the subsystem to be traced out. + check_purity (bool, optional): if ``True``, checks if ``state`` is pure. If ``False``, + it assumes ``state`` is pure . Defaults to ``True``. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. + Defaults to ``None``. + + Returns: + float: Concurrence of :math:`\\rho`. + """ + backend = _check_backend(backend) + + if ( + (len(state.shape) not in [1, 2]) + or (len(state) == 0) + or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) + ): + raise_error( + TypeError, + f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", + ) + + if not isinstance(check_purity, bool): + raise_error( + TypeError, + f"check_purity must be type bool, but it is type {type(check_purity)}.", + ) + + nqubits = int(np.log2(state.shape[0])) + + if check_purity is True: + purity_total_system = purity(state) + + mixed = bool(abs(purity_total_system - 1.0) > PRECISION_TOL) + if mixed is True: + raise_error( + NotImplementedError, + "concurrence only implemented for pure quantum states.", + ) + + reduced_density_matrix = ( + backend.partial_trace(state, bipartition, nqubits) + if len(state.shape) == 1 + else backend.partial_trace_density_matrix(state, bipartition, nqubits) + ) + + purity_reduced = purity(reduced_density_matrix) + if purity_reduced - 1.0 > 0.0: + purity_reduced = round(purity_reduced, 7) + + concur = np.sqrt(2 * (1 - purity_reduced)) + + return concur + + +def entanglement_of_formation( + state, bipartition, base: float = 2, check_purity: bool = True, backend=None +): + """Calculates the entanglement of formation :math:`E_{f}` of a pure bipartite + quantum state :math:`\\rho`, which is given by + + .. math:: + E_{f} = H([1 - x, x]) \\, , + + where + + .. math:: + x = \\frac{1 + \\sqrt{1 - C^{2}(\\rho)}}{2} \\, , + + :math:`C(\\rho)` is the :func:`qibo.quantum_info.concurrence` of :math:`\\rho`, + and :math:`H` is the :func:`qibo.quantum_info.entropies.shannon_entropy`. + + Args: + state (ndarray): statevector or density matrix. + bipartition (list or tuple or ndarray): qubits in the subsystem to be traced out. + base (float): the base of the log in :func:`qibo.quantum_info.entropies.shannon_entropy`. + Defaults to :math:`2`. + check_purity (bool, optional): if ``True``, checks if ``state`` is pure. If ``False``, + it assumes ``state`` is pure . Default: ``True``. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. + Defaults to ``None``. + + + Returns: + float: entanglement of formation of state :math:`\\rho`. + """ + from qibo.quantum_info.entropies import shannon_entropy # pylint: disable=C0415 + + backend = _check_backend(backend) + + concur = concurrence( + state, bipartition=bipartition, check_purity=check_purity, backend=backend + ) + concur = (1 + np.sqrt(1 - concur**2)) / 2 + probabilities = [1 - concur, concur] + + ent_of_form = shannon_entropy(probabilities, base=base, backend=backend) + + return ent_of_form + + +def entanglement_fidelity( + channel, nqubits: int, state=None, check_hermitian: bool = False, backend=None +): + """Entanglement fidelity :math:`F_{\\mathcal{E}}` of a ``channel`` :math:`\\mathcal{E}` + on ``state`` :math:`\\rho` is given by + + .. math:: + F_{\\mathcal{E}}(\\rho) = F(\\rho_{f}, \\rho) + + where :math:`F` is the :func:`qibo.quantum_info.fidelity` function for states, + and :math:`\\rho_{f} = \\mathcal{E}_{A} \\otimes I_{B}(\\rho)` + is the state after the channel :math:`\\mathcal{E}` was applied to + partition :math:`A`. + + Args: + channel (:class:`qibo.gates.channels.Channel`): quantum channel + acting on partition :math:`A`. + nqubits (int): total number of qubits in ``state``. + state (ndarray, optional): statevector or density matrix to be evolved + by ``channel``. If ``None``, defaults to the maximally entangled state + :math:`\\frac{1}{2^{n}} \\, \\sum_{k} \\, \\ket{k}\\ket{k}`, where + :math:`n` is ``nqubits``. Defaults to ``None``. + check_hermitian (bool, optional): if ``True``, checks if the final state + :math:`\\rho_{f}` is Hermitian. If ``False``, it assumes it is Hermitian. + Defaults to ``False``. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. + Defaults to ``None``. + + Returns: + float: Entanglement fidelity :math:`F_{\\mathcal{E}}`. + """ + if not isinstance(nqubits, int): + raise_error( + TypeError, f"nqubits must be type int, but it is type {type(nqubits)}." + ) + + if nqubits <= 0: + raise_error( + ValueError, f"nqubits must be a positive integer, but it is {nqubits}." + ) + + if state is not None and ( + (len(state.shape) not in [1, 2]) + or (len(state) == 0) + or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) + ): + raise_error( + TypeError, + f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", + ) + + if not isinstance(check_hermitian, bool): + raise_error( + TypeError, + f"check_hermitian must be type bool, but it is type {type(check_hermitian)}.", + ) + + backend = _check_backend(backend) + + if state is None: + state = backend.plus_density_matrix(nqubits) + + # necessary because this function do support repeated execution, + # so it has to default to density matrices + if len(state.shape) == 1: + state = np.outer(state, np.conj(state)) + + state_final = backend.apply_channel_density_matrix(channel, state, nqubits) + + entang_fidelity = fidelity( + state_final, state, check_hermitian=check_hermitian, backend=backend + ) + + return entang_fidelity + + +def meyer_wallach_entanglement(circuit, backend=None): + """Computes the Meyer-Wallach entanglement Q of the `circuit`, + + .. math:: + Q(\\theta) = 1 - \\frac{1}{N} \\, \\sum_{k} \\, + \\text{tr}\\left(\\rho_{k^{2}}(\\theta)\\right) \\, . + + Args: + circuit (:class:`qibo.models.Circuit`): Parametrized circuit. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. + Defaults to ``None``. + + Returns: + float: Meyer-Wallach entanglement. + """ + + backend = _check_backend(backend) + + circuit.density_matrix = True + nqubits = circuit.nqubits + + rho = backend.execute_circuit(circuit).state() + + ent = 0 + for j in range(nqubits): + trace_q = list(range(nqubits)) + trace_q.pop(j) + + rho_r = backend.partial_trace_density_matrix(rho, trace_q, nqubits) + + trace = purity(rho_r) + + ent += trace + + entanglement = 1 - ent / nqubits + + return entanglement + + +def entangling_capability(circuit, samples: int, seed=None, backend=None): + """Returns the entangling capability :math:`\\text{Ent}` of a parametrized + circuit, which is average Meyer-Wallach entanglement Q of the circuit, i.e. + + .. math:: + \\text{Ent} = \\frac{2}{S}\\sum_{k}Q_k \\, , + + where :math:`S` is the number of samples. + + Args: + circuit (:class:`qibo.models.Circuit`): Parametrized circuit. + samples (int): number of samples to estimate the integral. + seed (int or :class:`numpy.random.Generator`, optional): Either a generator of random + numbers or a fixed seed to initialize a generator. If ``None``, initializes + a generator with a random seed. Default: ``None``. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. + Defaults to ``None``. + + Returns: + float: Entangling capability. + """ + + if not isinstance(samples, int): + raise_error( + TypeError, f"samples must be type int, but it is type {type(samples)}." + ) + + if ( + seed is not None + and not isinstance(seed, int) + and not isinstance(seed, np.random.Generator) + ): + raise_error( + TypeError, "seed must be either type int or numpy.random.Generator." + ) + + backend = _check_backend(backend) + + local_state = ( + np.random.default_rng(seed) if seed is None or isinstance(seed, int) else seed + ) + + res = [] + for _ in range(samples): + params = local_state.uniform(-np.pi, np.pi, circuit.trainable_gates.nparams) + circuit.set_parameters(params) + entanglement = meyer_wallach_entanglement(circuit, backend=backend) + res.append(entanglement) + + capability = 2 * np.real(np.sum(res)) / samples + + return capability diff --git a/src/qibo/quantum_info/entropies.py b/src/qibo/quantum_info/entropies.py new file mode 100644 index 0000000000..716bdda3fd --- /dev/null +++ b/src/qibo/quantum_info/entropies.py @@ -0,0 +1,876 @@ +"""Submodule with entropy measures.""" + +from typing import Union + +import numpy as np +from scipy.linalg import fractional_matrix_power + +from qibo.backends import _check_backend +from qibo.config import PRECISION_TOL, raise_error +from qibo.quantum_info.metrics import _check_hermitian_or_not_gpu, purity + + +def shannon_entropy(prob_dist, base: float = 2, backend=None): + """Calculate the Shannon entropy of a probability array :math:`\\mathbf{p}`, which is given by + + .. math:: + H(\\mathbf{p}) = - \\sum_{k = 0}^{d^{2} - 1} \\, p_{k} \\, \\log_{b}(p_{k}) \\, , + + where :math:`d = \\text{dim}(\\mathcal{H})` is the dimension of the + Hilbert space :math:`\\mathcal{H}`, :math:`b` is the log base (default 2), + and :math:`0 \\log_{b}(0) \\equiv 0`. + + Args: + prob_dist (ndarray or list): a probability array :math:`\\mathbf{p}`. + base (float): the base of the log. Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. + Defaults to ``None``. + + Returns: + (float): Shannon entropy :math:`H(\\mathcal{p})`. + """ + backend = _check_backend(backend) + + if isinstance(prob_dist, list): + # np.float64 is necessary instead of native float because of tensorflow + prob_dist = backend.cast(prob_dist, dtype=np.float64) + + if base <= 0: + raise_error(ValueError, "log base must be non-negative.") + + if len(prob_dist.shape) != 1: + raise_error( + TypeError, + f"Probability array must have dims (k,) but it has {prob_dist.shape}.", + ) + + if len(prob_dist) == 0: + raise_error(TypeError, "Empty array.") + + if any(prob_dist < 0) or any(prob_dist > 1.0): + raise_error( + ValueError, + "All elements of the probability array must be between 0. and 1..", + ) + + if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "Probability array must sum to 1.") + + log_prob = np.where(prob_dist != 0, np.log2(prob_dist) / np.log2(base), 0.0) + + shan_entropy = -np.sum(prob_dist * log_prob) + + # absolute value if entropy == 0.0 to avoid returning -0.0 + shan_entropy = np.abs(shan_entropy) if shan_entropy == 0.0 else shan_entropy + + return complex(shan_entropy).real + + +def classical_relative_entropy(prob_dist_p, prob_dist_q, base: float = 2, backend=None): + """Calculates the relative entropy between two discrete probability distributions. + + For probabilities :math:`\\mathbf{p}` and :math:`\\mathbf{q}`, it is defined as + + ..math:: + D(\\mathbf{p} \\, \\| \\, \\mathbf{q}) = \\sum_{x} \\, \\mathbf{p}(x) \\, + \\log\\left( \\frac{\\mathbf{p}(x)}{\\mathbf{q}(x)} \\right) \\, . + + The classical relative entropy is also known as the + `Kullback-Leibler (KL) divergence `_. + + Args: + prob_dist_p (ndarray or list): discrete probability distribution :math:`p`. + prob_dist_q (ndarray or list): discrete probability distribution :math:`q`. + base (float): the base of the log. Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be + used in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Classical relative entropy between :math:`\\mathbf{p}` and :math:`\\mathbf{q}`. + """ + backend = _check_backend(backend) + + if isinstance(prob_dist_p, list): + # np.float64 is necessary instead of native float because of tensorflow + prob_dist_p = backend.cast(prob_dist_p, dtype=np.float64) + if isinstance(prob_dist_q, list): + # np.float64 is necessary instead of native float because of tensorflow + prob_dist_q = backend.cast(prob_dist_q, dtype=np.float64) + + if (len(prob_dist_p.shape) != 1) or (len(prob_dist_q.shape) != 1): + raise_error( + TypeError, + "Probability arrays must have dims (k,) but have " + + f"dims {prob_dist_p.shape} and {prob_dist_q.shape}.", + ) + + if (len(prob_dist_p) == 0) or (len(prob_dist_q) == 0): + raise_error(TypeError, "At least one of the arrays is empty.") + + if base <= 0: + raise_error(ValueError, "log base must be non-negative.") + + if (any(prob_dist_p < 0) or any(prob_dist_p > 1.0)) or ( + any(prob_dist_q < 0) or any(prob_dist_q > 1.0) + ): + raise_error( + ValueError, + "All elements of the probability array must be between 0. and 1..", + ) + if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "First probability array must sum to 1.") + + if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "Second probability array must sum to 1.") + + entropy_p = -1 * shannon_entropy(prob_dist_p, base=base, backend=backend) + + log_prob_q = np.where( + prob_dist_q != 0.0, np.log2(prob_dist_q) / np.log2(base), -np.inf + ) + + log_prob = np.where(prob_dist_p != 0.0, log_prob_q, 0.0) + + relative = np.sum(prob_dist_p * log_prob) + + return entropy_p - relative + + +def classical_renyi_entropy( + prob_dist, alpha: Union[float, int], base: float = 2, backend=None +): + """Calculates the classical Rényi entropy :math:`H_{\\alpha}` of a discrete probability distribution. + + For :math:`\\alpha \\in (0, \\, 1) \\cup (1, \\, \\infty)` and probability distribution + :math:`\\mathbf{p}`, the classical Rényi entropy is defined as + + .. math:: + H_{\\alpha}(\\mathbf{p}) = \\frac{1}{1 - \\alpha} \\, \\log\\left( \\sum_{x} + \\, \\mathbf{p}^{\\alpha}(x) \\right) \\, . + + A special case is the limit :math:`\\alpha \\to 1`, in which the classical Rényi entropy + coincides with the :func:`qibo.quantum_info.entropies.shannon_entropy`. + + Another special case is the limit :math:`\\alpha \\to 0`, where the function is + reduced to :math:`\\log\\left(|\\mathbf{p}|\\right)`, with :math:`|\\mathbf{p}|` + being the support of :math:`\\mathbf{p}`. + This is known as the `Hartley entropy `_ + (also known as *Hartley function* or *max-entropy*). + + In the limit :math:`\\alpha \\to \\infty`, the function reduces to + :math:`-\\log(\\max_{x}(\\mathbf{p}(x)))`, which is called the + `min-entropy `_. + + Args: + prob_dist (ndarray): discrete probability distribution. + alpha (float or int): order of the Rényi entropy. + base (float): the base of the log. Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be + used in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Classical Rényi entropy :math:`H_{\\alpha}`. + """ + backend = _check_backend(backend) + + if isinstance(prob_dist, list): + # np.float64 is necessary instead of native float because of tensorflow + prob_dist = backend.cast(prob_dist, dtype=np.float64) + + if not isinstance(alpha, (float, int)): + raise_error( + TypeError, f"alpha must be type float, but it is type {type(alpha)}." + ) + + if alpha < 0.0: + raise_error(ValueError, "alpha must a non-negative float.") + + if base <= 0: + raise_error(ValueError, "log base must be non-negative.") + + if len(prob_dist.shape) != 1: + raise_error( + TypeError, + f"Probability array must have dims (k,) but it has {prob_dist.shape}.", + ) + + if len(prob_dist) == 0: + raise_error(TypeError, "Empty array.") + + if any(prob_dist < 0) or any(prob_dist > 1.0): + raise_error( + ValueError, + "All elements of the probability array must be between 0. and 1..", + ) + + if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "Probability array must sum to 1.") + + if alpha == 0.0: + return np.log2(len(prob_dist)) / np.log2(base) + + if alpha == 1.0: + return shannon_entropy(prob_dist, base=base, backend=backend) + + if alpha == np.inf: + return -1 * np.log2(max(prob_dist)) / np.log2(base) + + renyi_ent = (1 / (1 - alpha)) * np.log2(np.sum(prob_dist**alpha)) / np.log2(base) + + return renyi_ent + + +def classical_relative_renyi_entropy( + prob_dist_p, prob_dist_q, alpha: Union[float, int], base: float = 2, backend=None +): + """Calculates the classical relative Rényi entropy between two discrete probability distributions. + + This function is also known as + `Rényi divergence `_. + + For :math:`\\alpha \\in (0, \\, 1) \\cup (1, \\, \\infty)` and probability distributions + :math:`\\mathbf{p}` and :math:`\\mathbf{q}`, the classical relative Rényi entropy is defined as + + .. math:: + H_{\\alpha}(\\mathbf{p} \\, \\| \\, \\mathbf{q}) = \\frac{1}{\\alpha - 1} \\, + \\log\\left( \\sum_{x} \\, \\frac{\\mathbf{p}^{\\alpha}(x)} + {\\mathbf{q}^{\\alpha - 1}(x)} \\right) \\, . + + A special case is the limit :math:`\\alpha \\to 1`, in which the classical Rényi divergence + coincides with the :func:`qibo.quantum_info.entropies.classical_relative_entropy`. + + Another special case is the limit :math:`\\alpha \\to 1/2`, where the function is + reduced to :math:`-2 \\log\\left(\\sum_{x} \\, \\sqrt{\\mathbf{p}(x) \\, \\mathbf{q}(x)} \\right)`. + The sum inside the :math:`\\log` is known as the + `Bhattacharyya coefficient `_. + + In the limit :math:`\\alpha \\to \\infty`, the function reduces to + :math:`\\log(\\max_{x}(\\mathbf{p}(x) \\, \\mathbf{q}(x))`. + + Args: + prob_dist_p (ndarray or list): discrete probability distribution :math:`p`. + prob_dist_q (ndarray or list): discrete probability distribution :math:`q`. + alpha (float or int): order of the Rényi entropy. + base (float): the base of the log. Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be + used in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Classical relative Rényi entropy :math:`H_{\\alpha}(\\mathbf{p} \\, \\| \\, \\mathbf{q})`. + """ + backend = _check_backend(backend) + + if isinstance(prob_dist_p, list): + # np.float64 is necessary instead of native float because of tensorflow + prob_dist_p = backend.cast(prob_dist_p, dtype=np.float64) + if isinstance(prob_dist_q, list): + # np.float64 is necessary instead of native float because of tensorflow + prob_dist_q = backend.cast(prob_dist_q, dtype=np.float64) + + if (len(prob_dist_p.shape) != 1) or (len(prob_dist_q.shape) != 1): + raise_error( + TypeError, + "Probability arrays must have dims (k,) but have " + + f"dims {prob_dist_p.shape} and {prob_dist_q.shape}.", + ) + + if (len(prob_dist_p) == 0) or (len(prob_dist_q) == 0): + raise_error(TypeError, "At least one of the arrays is empty.") + + if not isinstance(alpha, (float, int)): + raise_error( + TypeError, f"alpha must be type float, but it is type {type(alpha)}." + ) + + if alpha < 0.0: + raise_error(ValueError, "alpha must a non-negative float.") + + if base <= 0: + raise_error(ValueError, "log base must be non-negative.") + + if (any(prob_dist_p < 0) or any(prob_dist_p > 1.0)) or ( + any(prob_dist_q < 0) or any(prob_dist_q > 1.0) + ): + raise_error( + ValueError, + "All elements of the probability array must be between 0. and 1..", + ) + if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "First probability array must sum to 1.") + + if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "Second probability array must sum to 1.") + + if alpha == 0.5: + return -2 * np.log2(np.sum(np.sqrt(prob_dist_p * prob_dist_q))) / np.log2(base) + + if alpha == 1.0: + return classical_relative_entropy( + prob_dist_p, prob_dist_q, base=base, backend=backend + ) + + if alpha == np.inf: + return np.log2(max(prob_dist_p / prob_dist_q)) / np.log2(base) + + prob_p = prob_dist_p**alpha + prob_q = prob_dist_q ** (1 - alpha) + + return (1 / (alpha - 1)) * np.log2(np.sum(prob_p * prob_q)) / np.log2(base) + + +def classical_tsallis_entropy(prob_dist, alpha: float, base: float = 2, backend=None): + """Calculates the classical Tsallis entropy for a discrete probability distribution. + + This is defined as + + .. math:: + S_{\\alpha}(\\mathbf{p}) = \\frac{1}{\\alpha - 1} \\, + \\left(1 - \\sum_{x} \\, \\mathbf{p}^{\\alpha}(x) \\right) + + Args: + prob_dist (ndarray): discrete probability distribution. + alpha (float or int): entropic index. + base (float): the base of the log. Used when ``alpha=1.0``. + Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be + used in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Classical Tsallis entropy :math:`S_{\\alpha}(\\mathbf{p})`. + """ + backend = _check_backend(backend) + + if isinstance(prob_dist, list): + # np.float64 is necessary instead of native float because of tensorflow + prob_dist = backend.cast(prob_dist, dtype=np.float64) + + if not isinstance(alpha, (float, int)): + raise_error( + TypeError, f"alpha must be type float, but it is type {type(alpha)}." + ) + + if alpha < 0.0: + raise_error(ValueError, "alpha must a non-negative float.") + + if base <= 0: + raise_error(ValueError, "log base must be non-negative.") + + if len(prob_dist.shape) != 1: + raise_error( + TypeError, + f"Probability array must have dims (k,) but it has {prob_dist.shape}.", + ) + + if len(prob_dist) == 0: + raise_error(TypeError, "Empty array.") + + if any(prob_dist < 0) or any(prob_dist > 1.0): + raise_error( + ValueError, + "All elements of the probability array must be between 0. and 1..", + ) + + if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "Probability array must sum to 1.") + + if alpha == 1.0: + return shannon_entropy(prob_dist, base=base, backend=backend) + + return (1 / (1 - alpha)) * (np.sum(prob_dist**alpha) - 1) + + +def von_neumann_entropy( + state, + base: float = 2, + check_hermitian: bool = False, + return_spectrum: bool = False, + backend=None, +): + """Calculates the von-Neumann entropy :math:`S(\\rho)` of a quantum ``state`` :math:`\\rho`. + + It is given by + + .. math:: + S(\\rho) = - \\text{tr}\\left[\\rho \\, \\log(\\rho)\\right] + + Args: + state (ndarray): statevector or density matrix. + base (float, optional): the base of the log. Defaults to :math:`2`. + check_hermitian (bool, optional): if ``True``, checks if ``state`` is Hermitian. + If ``False``, it assumes ``state`` is Hermitian . + Defaults to ``False``. + return_spectrum: if ``True``, returns ``entropy`` and + :math:`-\\log_{\\textup{b}}(\\textup{eigenvalues})`, where :math:`b` is ``base``. + If ``False``, returns only ``entropy``. Default is ``False``. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: The von-Neumann entropy :math:`S` of ``state`` :math:`\\rho`. + """ + backend = _check_backend(backend) + + if ( + (len(state.shape) >= 3) + or (len(state) == 0) + or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) + ): + raise_error( + TypeError, + f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", + ) + + if base <= 0.0: + raise_error(ValueError, "log base must be non-negative.") + + if not isinstance(check_hermitian, bool): + raise_error( + TypeError, + f"check_hermitian must be type bool, but it is type {type(check_hermitian)}.", + ) + + if purity(state) == 1.0: + if return_spectrum: + return 0.0, backend.cast([1.0], dtype=float) + + return 0.0 + + if not check_hermitian or _check_hermitian_or_not_gpu(state, backend=backend): + eigenvalues = np.linalg.eigvalsh(state) + else: + eigenvalues = np.linalg.eigvals(state) + + log_prob = np.where(eigenvalues > 0, np.log2(eigenvalues) / np.log2(base), 0.0) + + ent = -np.sum(eigenvalues * log_prob) + # absolute value if entropy == 0.0 to avoid returning -0.0 + ent = np.abs(ent) if ent == 0.0 else ent + + ent = float(ent) + + if return_spectrum: + log_prob = backend.cast(log_prob, dtype=log_prob.dtype) + return ent, -log_prob + + return ent + + +def relative_von_neumann_entropy( + state, target, base: float = 2, check_hermitian: bool = False, backend=None +): + """Calculates the relative entropy :math:`S(\\rho \\, \\| \\, \\sigma)` between ``state`` :math:`\\rho` and ``target`` :math:`\\sigma`. + + It is given by + + .. math:: + S(\\rho \\, \\| \\, \\sigma) = \\text{tr}\\left[\\rho \\, \\log(\\rho)\\right] + - \\text{tr}\\left[\\rho \\, \\log(\\sigma)\\right] + + Args: + state (ndarray): statevector or density matrix :math:`\\rho`. + target (ndarray): statevector or density matrix :math:`\\sigma`. + base (float, optional): the base of the log. Defaults to :math:`2`. + check_hermitian (bool, optional): If ``True``, checks if ``state`` is Hermitian. + If ``False``, it assumes ``state`` is Hermitian . + Defaults to ``False``. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Relative (von-Neumann) entropy :math:`S(\\rho \\, \\| \\, \\sigma)`. + """ + backend = _check_backend(backend) + + if ( + (len(state.shape) >= 3) + or (len(state) == 0) + or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) + ): + raise_error( + TypeError, + f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", + ) + + if ( + (len(target.shape) >= 3) + or (len(target) == 0) + or (len(target.shape) == 2 and target.shape[0] != target.shape[1]) + ): + raise_error( + TypeError, + f"target must have dims either (k,) or (k,k), but have dims {target.shape}.", + ) + + if base <= 0.0: + raise_error(ValueError, "log base must be non-negative.") + + if not isinstance(check_hermitian, bool): + raise_error( + TypeError, + f"check_hermitian must be type bool, but it is type {type(check_hermitian)}.", + ) + + if purity(state) == 1.0 and purity(target) == 1.0: + return 0.0 + + if len(state.shape) == 1: + state = np.outer(state, np.conj(state)) + + if len(target.shape) == 1: + target = np.outer(target, np.conj(target)) + + if not check_hermitian or _check_hermitian_or_not_gpu(state, backend=backend): + eigenvalues_state = np.linalg.eigvalsh(state) + else: + eigenvalues_state = np.linalg.eigvals(state) + + if not check_hermitian or _check_hermitian_or_not_gpu(target, backend=backend): + eigenvalues_target = np.linalg.eigvalsh(target) + else: + eigenvalues_target = np.linalg.eigvals(target) + + log_state = np.where( + eigenvalues_state > 0, np.log2(eigenvalues_state) / np.log2(base), 0.0 + ) + log_target = np.where( + eigenvalues_target > 0, np.log2(eigenvalues_target) / np.log2(base), -np.inf + ) + + log_target = np.where(eigenvalues_state != 0.0, log_target, 0.0) + + entropy_state = np.sum(eigenvalues_state * log_state) + + relative = np.sum(eigenvalues_state * log_target) + + return float(entropy_state - relative) + + +def renyi_entropy(state, alpha: Union[float, int], base: float = 2, backend=None): + """Calculates the Rényi entropy :math:`H_{\\alpha}` of a quantum state :math:`\\rho`. + + For :math:`\\alpha \\in (0, \\, 1) \\cup (1, \\, \\infty)`, the Rényi entropy is defined as + + .. math:: + H_{\\alpha}(\\rho) = \\frac{1}{1 - \\alpha} \\, \\log\\left( \\rho^{\\alpha} \\right) \\, . + + A special case is the limit :math:`\\alpha \\to 1`, in which the Rényi entropy + coincides with the :func:`qibo.quantum_info.entropies.entropy`. + + Another special case is the limit :math:`\\alpha \\to 0`, where the function is + reduced to :math:`\\log\\left(d\\right)`, with :math:`d = 2^{n}` + being the dimension of the Hilbert space in which ``state`` :math:`\\rho` lives in. + This is known as the `Hartley entropy `_ + (also known as *Hartley function* or *max-entropy*). + + In the limit :math:`\\alpha \\to \\infty`, the function reduces to + :math:`-\\log(\\|\\rho\\|_{\\infty})`, with :math:`\\|\\cdot\\|_{\\infty}` + being the `spectral norm `_. + This is known as the `min-entropy `_. + + Args: + state (ndarray): statevector or density matrix. + alpha (float or int): order of the Rényi entropy. + base (float): the base of the log. Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be + used in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Rényi entropy :math:`H_{\\alpha}`. + """ + backend = _check_backend(backend) + + if ( + (len(state.shape) >= 3) + or (len(state) == 0) + or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) + ): + raise_error( + TypeError, + f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", + ) + + if not isinstance(alpha, (float, int)): + raise_error( + TypeError, f"alpha must be type float, but it is type {type(alpha)}." + ) + + if alpha < 0.0: + raise_error(ValueError, "alpha must a non-negative float.") + + if base <= 0.0: + raise_error(ValueError, "log base must be non-negative.") + + if abs(purity(state) - 1.0) < PRECISION_TOL: + return 0.0 + + if alpha == 0.0: + return np.log2(len(state)) / np.log2(base) + + if alpha == 1.0: + return von_neumann_entropy(state, base=base, backend=backend) + + if alpha == np.inf: + return ( + -1 + * np.log2(backend.calculate_norm_density_matrix(state, order=2)) + / np.log2(base) + ) + + log = np.log2(np.trace(_matrix_power(state, alpha, backend))) + + return (1 / (1 - alpha)) * log / np.log2(base) + + +def relative_renyi_entropy( + state, target, alpha: Union[float, int], base: float = 2, backend=None +): + """Calculates the relative Rényi entropy between two quantum states. + + For :math:`\\alpha \\in (0, \\, 1) \\cup (1, \\, \\infty)` and quantum states + :math:`\\rho` and :math:`\\sigma`, the relative Rényi entropy is defined as + + .. math:: + H_{\\alpha}(\\rho \\, \\| \\, \\sigma) = \\frac{1}{\\alpha - 1} \\, + \\log\\left( \\textup{tr}\\left( \\rho^{\\alpha} \\, + \\sigma^{1 - \\alpha} \\right) \\right) \\, . + + A special case is the limit :math:`\\alpha \\to 1`, in which the Rényi entropy + coincides with the :func:`qibo.quantum_info.entropies.relative_entropy`. + + In the limit :math:`\\alpha \\to \\infty`, the function reduces to + :math:`-2 \\, \\log(\\|\\sqrt{\\rho} \\, \\sqrt{\\sigma}\\|_{1})`, + with :math:`\\|\\cdot\\|_{1}` being the + `Schatten 1-norm `_. + This is known as the `min-relative entropy `_. + + .. note:: + Function raises ``NotImplementedError`` when ``target`` :math:`sigma` + is a pure state and :math:`\\alpha > 1`. This is due to the fact that + it is not possible to calculate :math:`\\sigma^{1 - \\alpha}` when + :math:`\\alpha > 1` and :math:`\\sigma` is a projector, i.e. a singular matrix. + + Args: + state (ndarray): statevector or density matrix :math:`\\rho`. + target (ndarray): statevector or density matrix :math:`\\sigma`. + alpha (float or int): order of the Rényi entropy. + base (float): the base of the log. Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be + used in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Relative Rényi entropy :math:`H_{\\alpha}(\\rho \\, \\| \\, \\sigma)`. + """ + backend = _check_backend(backend) + + if ( + (len(state.shape) >= 3) + or (len(state) == 0) + or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) + ): + raise_error( + TypeError, + f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", + ) + + if ( + (len(target.shape) >= 3) + or (len(target) == 0) + or (len(target.shape) == 2 and target.shape[0] != target.shape[1]) + ): + raise_error( + TypeError, + f"target must have dims either (k,) or (k,k), but have dims {target.shape}.", + ) + + if not isinstance(alpha, (float, int)): + raise_error( + TypeError, f"alpha must be type float, but it is type {type(alpha)}." + ) + + if alpha < 0.0: + raise_error(ValueError, "alpha must a non-negative float.") + + if base <= 0.0: + raise_error(ValueError, "log base must be non-negative.") + + purity_target = purity(target) + if ( + abs(purity(state) - 1.0) < PRECISION_TOL + and abs(purity_target - 1) < PRECISION_TOL + ): + return 0.0 + + if alpha > 1.0 and abs(purity_target - 1) < PRECISION_TOL: + raise_error( + NotImplementedError, + "It is not possible to invert a singular matrix. ``target`` is a pure state and alpha > 1.", + ) + + if len(state.shape) == 1: + state = np.outer(state, np.conj(state)) + + if alpha == 1.0: + return relative_von_neumann_entropy(state, target, base, backend=backend) + + if alpha == np.inf: + new_state = _matrix_power(state, 0.5, backend) + new_target = _matrix_power(target, 0.5, backend) + + log = np.log2( + backend.calculate_norm_density_matrix(new_state @ new_target, order=1) + ) + + return -2 * log / np.log2(base) + + log = _matrix_power(state, alpha, backend) + log = log @ _matrix_power(target, 1 - alpha, backend) + log = np.log2(np.trace(log)) + + return (1 / (alpha - 1)) * log / np.log2(base) + + +def tsallis_entropy(state, alpha: float, base: float = 2, backend=None): + """Calculates the Tsallis entropy of a quantum state. + + .. math:: + S_{\\alpha}(\\rho) = \\frac{1}{1 - \\alpha} \\, + \\left( \\text{tr}(\\rho^{\\alpha}) - 1 \\right) + + When :math:`\\alpha = 1`, the functions defaults to + :func:`qibo.quantum_info.entropies.entropy`. + + Args: + state (ndarray): statevector or density matrix. + alpha (float or int): entropic index. + base (float, optional): the base of the log. Used when ``alpha=1.0``. + Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Tsallis entropy :math:`S_{\\alpha}(\\rho)`. + """ + backend = _check_backend(backend) + + if ( + (len(state.shape) >= 3) + or (len(state) == 0) + or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) + ): + raise_error( + TypeError, + f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", + ) + + if not isinstance(alpha, (float, int)): + raise_error( + TypeError, f"alpha must be type float, but it is type {type(alpha)}." + ) + + if alpha < 0.0: + raise_error(ValueError, "alpha must a non-negative float.") + + if base <= 0.0: + raise_error(ValueError, "log base must be non-negative.") + + if abs(purity(state) - 1.0) < PRECISION_TOL: + return 0.0 + + if alpha == 1.0: + return von_neumann_entropy(state, base=base, backend=backend) + + return (1 / (1 - alpha)) * (np.trace(_matrix_power(state, alpha, backend)) - 1) + + +def entanglement_entropy( + state, + bipartition, + base: float = 2, + check_hermitian: bool = False, + return_spectrum: bool = False, + backend=None, +): + """Calculates the entanglement entropy :math:`S` of bipartition :math:`A` + of ``state`` :math:`\\rho`. This is given by + + .. math:: + S(\\rho_{A}) = -\\text{tr}(\\rho_{A} \\, \\log(\\rho_{A})) \\, , + + where :math:`\\rho_{A} = \\text{tr}_{B}(\\rho)` is the reduced density matrix calculated + by tracing out the ``bipartition`` :math:`B`. + + Args: + state (ndarray): statevector or density matrix. + bipartition (list or tuple or ndarray): qubits in the subsystem to be traced out. + base (float, optional): the base of the log. Defaults to :math: `2`. + check_hermitian (bool, optional): if ``True``, checks if :math:`\\rho_{A}` is Hermitian. + If ``False``, it assumes ``state`` is Hermitian . Default: ``False``. + return_spectrum: if ``True``, returns ``entropy`` and eigenvalues of ``state``. + If ``False``, returns only ``entropy``. Default is ``False``. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Entanglement entropy :math:`S` of ``state`` :math:`\\rho`. + """ + backend = _check_backend(backend) + + if base <= 0.0: + raise_error(ValueError, "log base must be non-negative.") + + if ( + (len(state.shape) not in [1, 2]) + or (len(state) == 0) + or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) + ): + raise_error( + TypeError, + f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", + ) + + nqubits = int(np.log2(state.shape[0])) + + reduced_density_matrix = ( + backend.partial_trace(state, bipartition, nqubits) + if len(state.shape) == 1 + else backend.partial_trace_density_matrix(state, bipartition, nqubits) + ) + + entropy_entanglement = von_neumann_entropy( + reduced_density_matrix, + base=base, + check_hermitian=check_hermitian, + return_spectrum=return_spectrum, + backend=backend, + ) + + return entropy_entanglement + + +def _matrix_power(matrix, alpha, backend): + """Calculates ``matrix ** alpha`` according to backend.""" + if backend.__class__.__name__ in [ + "CupyBackend", + "CuQuantumBackend", + ]: # pragma: no cover + new_matrix = backend.to_numpy(matrix) + else: + new_matrix = np.copy(matrix) + + if len(new_matrix.shape) == 1: + new_matrix = np.outer(new_matrix, np.conj(new_matrix)) + + new_matrix = fractional_matrix_power(new_matrix, alpha) + + return backend.cast(new_matrix, dtype=new_matrix.dtype) diff --git a/src/qibo/quantum_info/metrics.py b/src/qibo/quantum_info/metrics.py index 41f4740f8f..08dbf777af 100644 --- a/src/qibo/quantum_info/metrics.py +++ b/src/qibo/quantum_info/metrics.py @@ -57,269 +57,6 @@ def impurity(state): return 1 - purity(state) -def concurrence(state, bipartition, check_purity: bool = True, backend=None): - """Calculates concurrence of a pure bipartite quantum state - :math:`\\rho \\in \\mathcal{H}_{A} \\otimes \\mathcal{H}_{B}` as - - .. math:: - C(\\rho) = \\sqrt{2 \\, (\\text{tr}^{2}(\\rho) - \\text{tr}(\\rho_{A}^{2}))} \\, , - - where :math:`\\rho_{A} = \\text{tr}_{B}(\\rho)` is the reduced density operator - obtained by tracing out the qubits in the ``bipartition`` :math:`B`. - - Args: - state (ndarray): statevector or density matrix. - bipartition (list or tuple or ndarray): qubits in the subsystem to be traced out. - check_purity (bool, optional): if ``True``, checks if ``state`` is pure. If ``False``, - it assumes ``state`` is pure . Defaults to ``True``. - backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used - in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. - Defaults to ``None``. - - Returns: - float: Concurrence of :math:`\\rho`. - """ - backend = _check_backend(backend) - - if ( - (len(state.shape) not in [1, 2]) - or (len(state) == 0) - or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) - ): - raise_error( - TypeError, - f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", - ) - - if isinstance(check_purity, bool) is False: - raise_error( - TypeError, - f"check_purity must be type bool, but it is type {type(check_purity)}.", - ) - - nqubits = int(np.log2(state.shape[0])) - - if check_purity is True: - purity_total_system = purity(state) - - mixed = bool(abs(purity_total_system - 1.0) > PRECISION_TOL) - if mixed is True: - raise_error( - NotImplementedError, - "concurrence only implemented for pure quantum states.", - ) - - reduced_density_matrix = ( - backend.partial_trace(state, bipartition, nqubits) - if len(state.shape) == 1 - else backend.partial_trace_density_matrix(state, bipartition, nqubits) - ) - - purity_reduced = purity(reduced_density_matrix) - if purity_reduced - 1.0 > 0.0: - purity_reduced = round(purity_reduced, 7) - - concur = np.sqrt(2 * (1 - purity_reduced)) - - return concur - - -def entanglement_of_formation( - state, bipartition, base: float = 2, check_purity: bool = True, backend=None -): - """Calculates the entanglement of formation :math:`E_{f}` of a pure bipartite - quantum state :math:`\\rho`, which is given by - - .. math:: - E_{f} = H([1 - x, x]) \\, , - - where - - .. math:: - x = \\frac{1 + \\sqrt{1 - C^{2}(\\rho)}}{2} \\, , - - :math:`C(\\rho)` is the :func:`qibo.quantum_info.concurrence` of :math:`\\rho`, - and :math:`H` is the :func:`qibo.quantum_info.shannon_entropy`. - - Args: - state (ndarray): statevector or density matrix. - bipartition (list or tuple or ndarray): qubits in the subsystem to be traced out. - base (float): the base of the log in :func:`qibo.quantum_info.shannon_entropy`. - Defaults to :math:`2`. - check_purity (bool, optional): if ``True``, checks if ``state`` is pure. If ``False``, - it assumes ``state`` is pure . Default: ``True``. - backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used - in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. - Defaults to ``None``. - - - Returns: - float: entanglement of formation of state :math:`\\rho`. - """ - backend = _check_backend(backend) - - from qibo.quantum_info.utils import shannon_entropy # pylint: disable=C0415 - - concur = concurrence( - state, bipartition=bipartition, check_purity=check_purity, backend=backend - ) - concur = (1 + np.sqrt(1 - concur**2)) / 2 - probabilities = [1 - concur, concur] - - ent_of_form = shannon_entropy(probabilities, base=base, backend=backend) - - return ent_of_form - - -def entropy( - state, - base: float = 2, - check_hermitian: bool = False, - return_spectrum: bool = False, - backend=None, -): - """The von-Neumann entropy :math:`S(\\rho)` of a quantum ``state`` :math:`\\rho`, which - is given by - - .. math:: - S(\\rho) = - \\text{tr}\\left[\\rho \\, \\log(\\rho)\\right] - - Args: - state (ndarray): statevector or density matrix. - base (float, optional): the base of the log. Defaults to :math:`2`. - check_hermitian (bool, optional): if ``True``, checks if ``state`` is Hermitian. - If ``False``, it assumes ``state`` is Hermitian . - Defaults to ``False``. - return_spectrum: if ``True``, returns ``entropy`` and - :math:`-\\log_{\\textup{b}}(\\textup{eigenvalues})`, where :math:`b` is ``base``. - If ``False``, returns only ``entropy``. Default is ``False``. - backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used - in the execution. If ``None``, it uses - :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. - - Returns: - float: The von-Neumann entropy :math:`S` of ``state`` :math:`\\rho`. - """ - backend = _check_backend(backend) - - if ( - (len(state.shape) >= 3) - or (len(state) == 0) - or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) - ): - raise_error( - TypeError, - f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", - ) - - if base <= 0.0: - raise_error(ValueError, "log base must be non-negative.") - - if isinstance(check_hermitian, bool) is False: - raise_error( - TypeError, - f"check_hermitian must be type bool, but it is type {type(check_hermitian)}.", - ) - - if purity(state) == 1.0: - if return_spectrum: - return 0.0, backend.cast([1.0], dtype=float) - - return 0.0 - - if check_hermitian is False or _check_hermitian_or_not_gpu(state, backend=backend): - eigenvalues = np.linalg.eigvalsh(state) - else: - eigenvalues = np.linalg.eigvals(state) - - if base == 2: - log_prob = np.where(eigenvalues > 0, np.log2(eigenvalues), 0.0) - elif base == 10: - log_prob = np.where(eigenvalues > 0, np.log10(eigenvalues), 0.0) - elif base == np.e: - log_prob = np.where(eigenvalues > 0, np.log(eigenvalues), 0.0) - else: - log_prob = np.where(eigenvalues > 0, np.log(eigenvalues) / np.log(base), 0.0) - - ent = -np.sum(eigenvalues * log_prob) - # absolute value if entropy == 0.0 to avoid returning -0.0 - ent = np.abs(ent) if ent == 0.0 else ent - - ent = float(ent) - - if return_spectrum: - log_prob = backend.cast(log_prob, dtype=log_prob.dtype) - return ent, -log_prob - - return ent - - -def entanglement_entropy( - state, - bipartition, - base: float = 2, - check_hermitian: bool = False, - return_spectrum: bool = False, - backend=None, -): - """Calculates the entanglement entropy :math:`S` of bipartition :math:`A` - of ``state`` :math:`\\rho`. This is given by - - .. math:: - S(\\rho_{A}) = -\\text{tr}(\\rho_{A} \\, \\log(\\rho_{A})) \\, , - - where :math:`\\rho_{A} = \\text{tr}_{B}(\\rho)` is the reduced density matrix calculated - by tracing out the ``bipartition`` :math:`B`. - - Args: - state (ndarray): statevector or density matrix. - bipartition (list or tuple or ndarray): qubits in the subsystem to be traced out. - base (float, optional): the base of the log. Defaults to :math: `2`. - check_hermitian (bool, optional): if ``True``, checks if :math:`\\rho_{A}` is Hermitian. - If ``False``, it assumes ``state`` is Hermitian . Default: ``False``. - return_spectrum: if ``True``, returns ``entropy`` and eigenvalues of ``state``. - If ``False``, returns only ``entropy``. Default is ``False``. - backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used - in the execution. If ``None``, it uses - :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. - - Returns: - float: Entanglement entropy :math:`S` of ``state`` :math:`\\rho`. - """ - backend = _check_backend(backend) - - if base <= 0.0: - raise_error(ValueError, "log base must be non-negative.") - - if ( - (len(state.shape) not in [1, 2]) - or (len(state) == 0) - or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) - ): - raise_error( - TypeError, - f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", - ) - - nqubits = int(np.log2(state.shape[0])) - - reduced_density_matrix = ( - backend.partial_trace(state, bipartition, nqubits) - if len(state.shape) == 1 - else backend.partial_trace_density_matrix(state, bipartition, nqubits) - ) - - entropy_entanglement = entropy( - reduced_density_matrix, - base=base, - check_hermitian=check_hermitian, - return_spectrum=return_spectrum, - backend=backend, - ) - - return entropy_entanglement - - def trace_distance(state, target, check_hermitian: bool = False, backend=None): """Trace distance between two quantum states, :math:`\\rho` and :math:`\\sigma`: @@ -628,83 +365,6 @@ def bures_distance(state, target, check_hermitian: bool = False, backend=None): return distance -def entanglement_fidelity( - channel, nqubits: int, state=None, check_hermitian: bool = False, backend=None -): - """Entanglement fidelity :math:`F_{\\mathcal{E}}` of a ``channel`` :math:`\\mathcal{E}` - on ``state`` :math:`\\rho` is given by - - .. math:: - F_{\\mathcal{E}}(\\rho) = F(\\rho_{f}, \\rho) - - where :math:`F` is the :func:`qibo.quantum_info.fidelity` function for states, - and :math:`\\rho_{f} = \\mathcal{E}_{A} \\otimes I_{B}(\\rho)` - is the state after the channel :math:`\\mathcal{E}` was applied to - partition :math:`A`. - - Args: - channel (:class:`qibo.gates.channels.Channel`): quantum channel - acting on partition :math:`A`. - nqubits (int): total number of qubits in ``state``. - state (ndarray, optional): statevector or density matrix to be evolved - by ``channel``. If ``None``, defaults to the maximally entangled state - :math:`\\frac{1}{2^{n}} \\, \\sum_{k} \\, \\ket{k}\\ket{k}`, where - :math:`n` is ``nqubits``. Defaults to ``None``. - check_hermitian (bool, optional): if ``True``, checks if the final state - :math:`\\rho_{f}` is Hermitian. If ``False``, it assumes it is Hermitian. - Defaults to ``False``. - backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used - in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. - Defaults to ``None``. - - Returns: - float: Entanglement fidelity :math:`F_{\\mathcal{E}}`. - """ - backend = _check_backend(backend) - - if isinstance(nqubits, int) is False: - raise_error( - TypeError, f"nqubits must be type int, but it is type {type(nqubits)}." - ) - - if nqubits <= 0: - raise_error( - ValueError, f"nqubits must be a positive integer, but it is {nqubits}." - ) - - if state is not None and ( - (len(state.shape) not in [1, 2]) - or (len(state) == 0) - or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) - ): - raise_error( - TypeError, - f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", - ) - - if isinstance(check_hermitian, bool) is False: - raise_error( - TypeError, - f"check_hermitian must be type bool, but it is type {type(check_hermitian)}.", - ) - - if state is None: - state = backend.plus_density_matrix(nqubits) - - # necessary because this function do support repeated execution, - # so it has to default to density matrices - if len(state.shape) == 1: - state = np.outer(state, np.conj(state)) - - state_final = backend.apply_channel_density_matrix(channel, state, nqubits) - - entang_fidelity = fidelity( - state_final, state, check_hermitian=check_hermitian, backend=backend - ) - - return entang_fidelity - - def process_fidelity(channel, target=None, check_unitary: bool = False, backend=None): """Process fidelity between a quantum ``channel`` :math:`\\mathcal{E}` and a ``target`` unitary channel :math:`U`. The process fidelity is defined as @@ -1000,101 +660,6 @@ def diamond_norm(channel, target=None, backend=None, **kwargs): return solution -def meyer_wallach_entanglement(circuit, backend=None): - """Computes the Meyer-Wallach entanglement Q of the `circuit`, - - .. math:: - Q(\\theta) = 1 - \\frac{1}{N} \\, \\sum_{k} \\, - \\text{tr}\\left(\\rho_{k^{2}}(\\theta)\\right) \\, . - - Args: - circuit (:class:`qibo.models.Circuit`): Parametrized circuit. - backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used - in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. - Defaults to ``None``. - - Returns: - float: Meyer-Wallach entanglement. - """ - - backend = _check_backend(backend) - - circuit.density_matrix = True - nqubits = circuit.nqubits - - rho = backend.execute_circuit(circuit).state() - - ent = 0 - for j in range(nqubits): - trace_q = list(range(nqubits)) - trace_q.pop(j) - - rho_r = backend.partial_trace_density_matrix(rho, trace_q, nqubits) - - trace = purity(rho_r) - - ent += trace - - entanglement = 1 - ent / nqubits - - return entanglement - - -def entangling_capability(circuit, samples: int, seed=None, backend=None): - """Returns the entangling capability :math:`\\text{Ent}` of a parametrized - circuit, which is average Meyer-Wallach entanglement Q of the circuit, i.e. - - .. math:: - \\text{Ent} = \\frac{2}{S}\\sum_{k}Q_k \\, , - - where :math:`S` is the number of samples. - - Args: - circuit (:class:`qibo.models.Circuit`): Parametrized circuit. - samples (int): number of samples to estimate the integral. - seed (int or :class:`numpy.random.Generator`, optional): Either a generator of random - numbers or a fixed seed to initialize a generator. If ``None``, initializes - a generator with a random seed. Default: ``None``. - backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used - in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. - Defaults to ``None``. - - Returns: - float: Entangling capability. - """ - - if isinstance(samples, int) is False: - raise_error( - TypeError, f"samples must be type int, but it is type {type(samples)}." - ) - - if ( - seed is not None - and not isinstance(seed, int) - and not isinstance(seed, np.random.Generator) - ): - raise_error( - TypeError, "seed must be either type int or numpy.random.Generator." - ) - - backend = _check_backend(backend) - - local_state = ( - np.random.default_rng(seed) if seed is None or isinstance(seed, int) else seed - ) - - res = [] - for _ in range(samples): - params = local_state.uniform(-np.pi, np.pi, circuit.trainable_gates.nparams) - circuit.set_parameters(params) - entanglement = meyer_wallach_entanglement(circuit, backend=backend) - res.append(entanglement) - - capability = 2 * np.real(np.sum(res)) / samples - - return capability - - def expressibility( circuit, power_t: int, diff --git a/src/qibo/quantum_info/utils.py b/src/qibo/quantum_info/utils.py index e099c96f8e..ec772168e0 100644 --- a/src/qibo/quantum_info/utils.py +++ b/src/qibo/quantum_info/utils.py @@ -189,132 +189,6 @@ def hadamard_transform(array, implementation: str = "fast", backend=None): return array -def shannon_entropy(probability_array, base: float = 2, backend=None): - """Calculates the Shannon entropy of a probability array :math:`\\mathbf{p}`, which is given by - - .. math:: - H(\\mathbf{p}) = - \\sum_{k = 0}^{d^{2} - 1} \\, p_{k} \\, \\log_{b}(p_{k}) \\, , - - where :math:`d = \\text{dim}(\\mathcal{H})` is the dimension of the - Hilbert space :math:`\\mathcal{H}`, :math:`b` is the log base (default 2), - and :math:`0 \\log_{b}(0) \\equiv 0`. - - Args: - probability_array (ndarray or list): a probability array :math:`\\mathbf{p}`. - base (float): the base of the log. Defaults to :math:`2`. - backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used - in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. - Defaults to ``None``. - - Returns: - (float): The Shannon entropy :math:`H(\\mathcal{p})`. - """ - backend = _check_backend(backend) - - if isinstance(probability_array, list): - probability_array = backend.cast(probability_array, dtype=np.float64) - - if base <= 0: - raise_error(ValueError, "log base must be non-negative.") - - if len(probability_array.shape) != 1: - raise_error( - TypeError, - f"Probability array must have dims (k,) but it has {probability_array.shape}.", - ) - - if len(probability_array) == 0: - raise_error(TypeError, "Empty array.") - - if any(probability_array < 0) or any(probability_array > 1.0): - raise_error( - ValueError, - "All elements of the probability array must be between 0. and 1..", - ) - - if np.abs(np.sum(probability_array) - 1.0) > PRECISION_TOL: - raise_error(ValueError, "Probability array must sum to 1.") - - if base == 2: - log_prob = np.where(probability_array != 0.0, np.log2(probability_array), 0.0) - elif base == 10: - log_prob = np.where(probability_array != 0, np.log10(probability_array), 0.0) - elif base == np.e: - log_prob = np.where(probability_array != 0, np.log(probability_array), 0.0) - else: - log_prob = np.where( - probability_array != 0, np.log(probability_array) / np.log(base), 0.0 - ) - - entropy = -np.sum(probability_array * log_prob) - - # absolute value if entropy == 0.0 to avoid returning -0.0 - entropy = np.abs(entropy) if entropy == 0.0 else entropy - - return complex(entropy).real - - -def total_variation_distance( - prob_dist_p, prob_dist_q, validate: bool = False, backend=None -): - """Calculates the Total Variation (TV) distance between two discrete probability distributions. - - For probabilities :math:`\\mathbf{p}` and :math:`\\mathbf{q}`, it is defined as - - .. math:: - d_{\\text{TV}}(\\mathbf{p} \\, , \\, \\mathbf{q}) = \\frac{1}{2} - \\, \\| \\mathbf{p} - \\mathbf{q} \\|_{1} \\, , - - where :math:`\\| \\cdot \\|_{1}` is the :math:`\\mathcal{l}_{1}`-norm. - - Args: - prob_dist_p (ndarray or list): discrete probability distribution :math:`p`. - prob_dist_q (ndarray or list): discrete probability distribution :math:`q`. - validate (bool, optional): If ``True``, checks if :math:`p` and :math:`q` are proper - probability distributions. Defaults to ``False``. - backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be - used in the execution. If ``None``, it uses - :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. - - Returns: - float: Total variation distance between :math:`\\mathbf{p}` and :math:`\\mathbf{q}`. - """ - backend = _check_backend(backend) - - if isinstance(prob_dist_p, list): - prob_dist_p = backend.cast(prob_dist_p, dtype=np.float64) - if isinstance(prob_dist_q, list): - prob_dist_q = backend.cast(prob_dist_q, dtype=np.float64) - - if (len(prob_dist_p.shape) != 1) or (len(prob_dist_q.shape) != 1): - raise_error( - TypeError, - "Probability arrays must have dims (k,) but have " - + f"dims {prob_dist_p.shape} and {prob_dist_q.shape}.", - ) - - if (len(prob_dist_p) == 0) or (len(prob_dist_q) == 0): - raise_error(TypeError, "At least one of the arrays is empty.") - - if validate: - if (any(prob_dist_p < 0) or any(prob_dist_p > 1.0)) or ( - any(prob_dist_q < 0) or any(prob_dist_q > 1.0) - ): - raise_error( - ValueError, - "All elements of the probability array must be between 0. and 1..", - ) - if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: - raise_error(ValueError, "First probability array must sum to 1.") - - if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: - raise_error(ValueError, "Second probability array must sum to 1.") - - total_variation = 0.5 * np.sum(np.abs(prob_dist_p - prob_dist_q)) - - return total_variation - - def hellinger_distance(prob_dist_p, prob_dist_q, validate: bool = False, backend=None): """Calculates the Hellinger distance :math:`H` between two discrete probability distributions. diff --git a/tests/test_quantum_info_entanglement.py b/tests/test_quantum_info_entanglement.py new file mode 100644 index 0000000000..a98cf1909b --- /dev/null +++ b/tests/test_quantum_info_entanglement.py @@ -0,0 +1,175 @@ +import numpy as np +import pytest + +from qibo import Circuit, gates +from qibo.config import PRECISION_TOL +from qibo.quantum_info.entanglement import ( + concurrence, + entanglement_fidelity, + entanglement_of_formation, + entangling_capability, + meyer_wallach_entanglement, +) +from qibo.quantum_info.random_ensembles import random_density_matrix, random_statevector + + +@pytest.mark.parametrize("check_purity", [True, False]) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +@pytest.mark.parametrize("bipartition", [[0], [1]]) +def test_concurrence_and_formation(backend, bipartition, base, check_purity): + with pytest.raises(TypeError): + state = np.random.rand(2, 3) + state = backend.cast(state, dtype=state.dtype) + test = concurrence( + state, bipartition=bipartition, check_purity=check_purity, backend=backend + ) + with pytest.raises(TypeError): + state = random_statevector(4, backend=backend) + test = concurrence( + state, bipartition=bipartition, check_purity="True", backend=backend + ) + + if check_purity is True: + with pytest.raises(NotImplementedError): + state = backend.identity_density_matrix(2, normalize=False) + test = concurrence(state, bipartition=bipartition, backend=backend) + + nqubits = 2 + dim = 2**nqubits + state = random_statevector(dim, backend=backend) + concur = concurrence( + state, bipartition=bipartition, check_purity=check_purity, backend=backend + ) + ent_form = entanglement_of_formation( + state, + bipartition=bipartition, + base=base, + check_purity=check_purity, + backend=backend, + ) + backend.assert_allclose(0.0 <= concur <= np.sqrt(2), True) + backend.assert_allclose(0.0 <= ent_form <= 1.0, True) + + state = np.kron( + random_density_matrix(2, pure=True, backend=backend), + random_density_matrix(2, pure=True, backend=backend), + ) + concur = concurrence(state, bipartition, check_purity=check_purity, backend=backend) + ent_form = entanglement_of_formation( + state, + bipartition=bipartition, + base=base, + check_purity=check_purity, + backend=backend, + ) + backend.assert_allclose(concur, 0.0, atol=10 * PRECISION_TOL) + backend.assert_allclose(ent_form, 0.0, atol=PRECISION_TOL) + + +@pytest.mark.parametrize("check_hermitian", [False, True]) +@pytest.mark.parametrize("nqubits", [4, 6]) +@pytest.mark.parametrize("channel", [gates.DepolarizingChannel]) +def test_entanglement_fidelity(backend, channel, nqubits, check_hermitian): + with pytest.raises(TypeError): + test = entanglement_fidelity( + channel, nqubits=[0], check_hermitian=check_hermitian, backend=backend + ) + with pytest.raises(ValueError): + test = entanglement_fidelity( + channel, nqubits=0, check_hermitian=check_hermitian, backend=backend + ) + with pytest.raises(TypeError): + state = np.random.rand(2, 3, 2) + state = backend.cast(state, dtype=state.dtype) + test = entanglement_fidelity( + channel, + nqubits, + state=state, + check_hermitian=check_hermitian, + backend=backend, + ) + with pytest.raises(TypeError): + state = random_statevector(2, backend=backend) + test = entanglement_fidelity( + channel, nqubits, state=state, check_hermitian="False", backend=backend + ) + + channel = channel([0, 1], 0.5) + + # test on maximally entangled state + ent_fid = entanglement_fidelity( + channel, nqubits=nqubits, check_hermitian=check_hermitian, backend=backend + ) + backend.assert_allclose(ent_fid, 0.625, atol=PRECISION_TOL) + + # test with a state vector + state = backend.plus_state(nqubits) + ent_fid = entanglement_fidelity( + channel, + nqubits=nqubits, + state=state, + check_hermitian=check_hermitian, + backend=backend, + ) + backend.assert_allclose(ent_fid, 0.625, atol=PRECISION_TOL) + + # test on maximally mixed state + state = backend.identity_density_matrix(nqubits) + ent_fid = entanglement_fidelity( + channel, + nqubits=nqubits, + state=state, + check_hermitian=check_hermitian, + backend=backend, + ) + backend.assert_allclose(ent_fid, 1.0, atol=PRECISION_TOL) + + +def test_meyer_wallach_entanglement(backend): + nqubits = 2 + + circuit1 = Circuit(nqubits) + circuit1.add([gates.RX(0, np.pi / 4)] for _ in range(nqubits)) + + circuit2 = Circuit(nqubits) + circuit2.add([gates.RX(0, np.pi / 4)] for _ in range(nqubits)) + circuit2.add(gates.CNOT(0, 1)) + + backend.assert_allclose( + meyer_wallach_entanglement(circuit1, backend=backend), 0.0, atol=PRECISION_TOL + ) + + backend.assert_allclose( + meyer_wallach_entanglement(circuit2, backend=backend), 0.5, atol=PRECISION_TOL + ) + + +@pytest.mark.parametrize("seed", [None, 10, np.random.default_rng(10)]) +def test_entangling_capability(backend, seed): + with pytest.raises(TypeError): + circuit = Circuit(1) + samples = 0.5 + entangling_capability(circuit, samples, seed=seed, backend=backend) + with pytest.raises(TypeError): + circuit = Circuit(1) + entangling_capability(circuit, samples=10, seed="10", backend=backend) + + nqubits = 2 + samples = 100 + + c1 = Circuit(nqubits) + c1.add([gates.RX(q, 0, trainable=True) for q in range(nqubits)]) + c1.add(gates.CNOT(0, 1)) + c1.add([gates.RX(q, 0, trainable=True) for q in range(nqubits)]) + ent_mw1 = entangling_capability(c1, samples, seed=seed, backend=backend) + + c2 = Circuit(nqubits) + c2.add(gates.H(0)) + c2.add(gates.CNOT(0, 1)) + c2.add(gates.RX(0, 0, trainable=True)) + ent_mw2 = entangling_capability(c2, samples, seed=seed, backend=backend) + + c3 = Circuit(nqubits) + ent_mw3 = entangling_capability(c3, samples, seed=seed, backend=backend) + + backend.assert_allclose(ent_mw3 < ent_mw1 < ent_mw2, True) diff --git a/tests/test_quantum_info_entropies.py b/tests/test_quantum_info_entropies.py new file mode 100644 index 0000000000..3cf8b2d721 --- /dev/null +++ b/tests/test_quantum_info_entropies.py @@ -0,0 +1,736 @@ +import numpy as np +import pytest +from scipy.linalg import sqrtm + +from qibo.config import PRECISION_TOL +from qibo.quantum_info.entropies import ( + _matrix_power, + classical_relative_entropy, + classical_relative_renyi_entropy, + classical_renyi_entropy, + classical_tsallis_entropy, + entanglement_entropy, + relative_renyi_entropy, + relative_von_neumann_entropy, + renyi_entropy, + shannon_entropy, + tsallis_entropy, + von_neumann_entropy, +) +from qibo.quantum_info.random_ensembles import ( + random_density_matrix, + random_statevector, + random_unitary, +) + + +def test_shannon_entropy_errors(backend): + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = shannon_entropy(prob, -2, backend=backend) + with pytest.raises(TypeError): + prob = np.array([[1.0], [0.0]]) + prob = backend.cast(prob, dtype=prob.dtype) + test = shannon_entropy(prob, backend=backend) + with pytest.raises(TypeError): + prob = np.array([]) + prob = backend.cast(prob, dtype=prob.dtype) + test = shannon_entropy(prob, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, -1.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = shannon_entropy(prob, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.1, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = shannon_entropy(prob, backend=backend) + with pytest.raises(ValueError): + prob = np.array([0.5, 0.4999999]) + prob = backend.cast(prob, dtype=prob.dtype) + test = shannon_entropy(prob, backend=backend) + + +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +def test_shannon_entropy(backend, base): + prob_array = [1.0, 0.0] + result = shannon_entropy(prob_array, base, backend=backend) + backend.assert_allclose(result, 0.0) + + if base == 2: + prob_array = np.array([0.5, 0.5]) + prob_array = backend.cast(prob_array, dtype=prob_array.dtype) + result = shannon_entropy(prob_array, base, backend=backend) + backend.assert_allclose(result, 1.0) + + +@pytest.mark.parametrize("kind", [None, list]) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +def test_classical_relative_entropy(backend, base, kind): + with pytest.raises(TypeError): + prob = np.random.rand(1, 2) + prob_q = np.random.rand(1, 5) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_entropy(prob, prob_q, backend=backend) + with pytest.raises(TypeError): + prob = np.random.rand(1, 2)[0] + prob_q = np.array([]) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_entropy(prob, prob_q, backend=backend) + with pytest.raises(ValueError): + prob = np.array([-1, 2.0]) + prob_q = np.random.rand(1, 5)[0] + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_entropy(prob, prob_q, backend=backend) + with pytest.raises(ValueError): + prob = np.random.rand(1, 2)[0] + prob_q = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_entropy(prob, prob_q, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob_q = np.random.rand(1, 2)[0] + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_entropy(prob, prob_q, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob_q = np.array([0.0, 1.0]) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_entropy(prob, prob_q, base=-2, backend=backend) + + prob_p = np.random.rand(10) + prob_q = np.random.rand(10) + prob_p /= np.sum(prob_p) + prob_q /= np.sum(prob_q) + + target = np.sum(prob_p * np.log(prob_p) / np.log(base)) - np.sum( + prob_p * np.log(prob_q) / np.log(base) + ) + + if kind is not None: + prob_p, prob_q = kind(prob_p), kind(prob_q) + + divergence = classical_relative_entropy(prob_p, prob_q, base=base, backend=backend) + + backend.assert_allclose(divergence, target, atol=1e-5) + + +@pytest.mark.parametrize("kind", [None, list]) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +@pytest.mark.parametrize("alpha", [0, 1, 2, 3, np.inf]) +def test_classical_renyi_entropy(backend, alpha, base, kind): + with pytest.raises(TypeError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha="2", backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha=-2, backend=backend) + with pytest.raises(TypeError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, base="2", backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, base=-2, backend=backend) + with pytest.raises(TypeError): + prob = np.array([[1.0], [0.0]]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + with pytest.raises(TypeError): + prob = np.array([]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, -1.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.1, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + with pytest.raises(ValueError): + prob = np.array([0.5, 0.4999999]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + + prob_dist = np.random.rand(10) + prob_dist /= np.sum(prob_dist) + + if alpha == 0.0: + target = np.log2(len(prob_dist)) / np.log2(base) + elif alpha == 1: + target = shannon_entropy(prob_dist, base=base, backend=backend) + elif alpha == 2: + target = -1 * np.log2(np.sum(prob_dist**2)) / np.log2(base) + elif alpha == np.inf: + target = -1 * np.log2(max(prob_dist)) / np.log2(base) + else: + target = (1 / (1 - alpha)) * np.log2(np.sum(prob_dist**alpha)) / np.log2(base) + + if kind is not None: + prob_dist = kind(prob_dist) + + renyi_ent = classical_renyi_entropy(prob_dist, alpha, base=base, backend=backend) + + backend.assert_allclose(renyi_ent, target, atol=1e-5) + + +@pytest.mark.parametrize("kind", [None, list]) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +@pytest.mark.parametrize("alpha", [0, 1 / 2, 1, 2, 3, np.inf]) +def test_classical_relative_renyi_entropy(backend, alpha, base, kind): + with pytest.raises(TypeError): + prob = np.random.rand(1, 2) + prob_q = np.random.rand(1, 5) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_renyi_entropy( + prob, prob_q, alpha, base, backend=backend + ) + with pytest.raises(TypeError): + prob = np.random.rand(1, 2)[0] + prob_q = np.array([]) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_renyi_entropy( + prob, prob_q, alpha, base, backend=backend + ) + with pytest.raises(ValueError): + prob = np.array([-1, 2.0]) + prob_q = np.random.rand(1, 5)[0] + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_renyi_entropy( + prob, prob_q, alpha, base, backend=backend + ) + with pytest.raises(ValueError): + prob = np.random.rand(1, 2)[0] + prob_q = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_renyi_entropy( + prob, prob_q, alpha, base, backend=backend + ) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob_q = np.random.rand(1, 2)[0] + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_renyi_entropy( + prob, prob_q, alpha, base, backend=backend + ) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob_q = np.array([0.0, 1.0]) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_renyi_entropy( + prob, prob_q, alpha, base=-2, backend=backend + ) + with pytest.raises(TypeError): + prob = np.array([1.0, 0.0]) + prob_q = np.array([0.0, 1.0]) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_renyi_entropy( + prob, prob_q, alpha="1", base=base, backend=backend + ) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob_q = np.array([0.0, 1.0]) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_renyi_entropy( + prob, prob_q, alpha=-2, base=base, backend=backend + ) + + prob_p = np.random.rand(10) + prob_q = np.random.rand(10) + prob_p /= np.sum(prob_p) + prob_q /= np.sum(prob_q) + + if alpha == 0.5: + target = -2 * np.log2(np.sum(np.sqrt(prob_p * prob_q))) / np.log2(base) + elif alpha == 1.0: + target = classical_relative_entropy(prob_p, prob_q, base=base, backend=backend) + elif alpha == np.inf: + target = np.log2(max(prob_p / prob_q)) / np.log2(base) + else: + target = ( + (1 / (alpha - 1)) + * np.log2(np.sum(prob_p**alpha * prob_q ** (1 - alpha))) + / np.log2(base) + ) + + if kind is not None: + prob_p, prob_q = kind(prob_p), kind(prob_q) + + divergence = classical_relative_renyi_entropy( + prob_p, prob_q, alpha=alpha, base=base, backend=backend + ) + + backend.assert_allclose(divergence, target, atol=1e-5) + + +@pytest.mark.parametrize("kind", [None, list]) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +@pytest.mark.parametrize("alpha", [0, 1, 2, 3]) +def test_classical_tsallis_entropy(backend, alpha, base, kind): + with pytest.raises(TypeError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_tsallis_entropy(prob, alpha="2", backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_tsallis_entropy(prob, alpha=-2, backend=backend) + with pytest.raises(TypeError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_tsallis_entropy(prob, alpha, base="2", backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_tsallis_entropy(prob, alpha, base=-2, backend=backend) + with pytest.raises(TypeError): + prob = np.array([[1.0], [0.0]]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_tsallis_entropy(prob, alpha, backend=backend) + with pytest.raises(TypeError): + prob = np.array([]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_tsallis_entropy(prob, alpha, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, -1.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_tsallis_entropy(prob, alpha, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.1, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_tsallis_entropy(prob, alpha, backend=backend) + with pytest.raises(ValueError): + prob = np.array([0.5, 0.4999999]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_tsallis_entropy(prob, alpha, backend=backend) + + prob_dist = np.random.rand(10) + prob_dist /= np.sum(prob_dist) + + if alpha == 1.0: + target = shannon_entropy(prob_dist, base=base, backend=backend) + else: + target = (1 / (1 - alpha)) * (np.sum(prob_dist**alpha) - 1) + + if kind is not None: + prob_dist = kind(prob_dist) + + backend.assert_allclose( + classical_tsallis_entropy(prob_dist, alpha=alpha, base=base, backend=backend), + target, + atol=1e-5, + ) + + +@pytest.mark.parametrize("check_hermitian", [False, True]) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +def test_von_neumann_entropy(backend, base, check_hermitian): + with pytest.raises(TypeError): + state = np.random.rand(2, 3) + state = backend.cast(state, dtype=state.dtype) + test = von_neumann_entropy( + state, base=base, check_hermitian=check_hermitian, backend=backend + ) + with pytest.raises(ValueError): + state = np.array([1.0, 0.0]) + state = backend.cast(state, dtype=state.dtype) + test = von_neumann_entropy( + state, base=0, check_hermitian=check_hermitian, backend=backend + ) + with pytest.raises(TypeError): + state = np.array([1.0, 0.0]) + state = backend.cast(state, dtype=state.dtype) + test = von_neumann_entropy( + state, base=base, check_hermitian="False", backend=backend + ) + + if backend.__class__.__name__ in ["CupyBackend", "CuQuantumBackend"]: + with pytest.raises(NotImplementedError): + state = random_unitary(4, backend=backend) + test = von_neumann_entropy( + state, base=base, check_hermitian=True, backend=backend + ) + else: + state = random_unitary(4, backend=backend) + test = von_neumann_entropy( + state, base=base, check_hermitian=True, backend=backend + ) + + state = np.array([1.0, 0.0]) + state = backend.cast(state, dtype=state.dtype) + backend.assert_allclose( + von_neumann_entropy(state, backend=backend), 0.0, atol=PRECISION_TOL + ) + + state = np.array([1.0, 0.0, 0.0, 0.0]) + state = np.outer(state, state) + state = backend.cast(state, dtype=state.dtype) + + nqubits = 2 + state = backend.identity_density_matrix(nqubits) + if base == 2: + test = 2.0 + elif base == 10: + test = 0.6020599913279624 + elif base == np.e: + test = 1.3862943611198906 + else: + test = 0.8613531161467861 + + backend.assert_allclose( + von_neumann_entropy( + state, base, check_hermitian=check_hermitian, backend=backend + ), + test, + ) + + +@pytest.mark.parametrize("check_hermitian", [False, True]) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +def test_relative_entropy(backend, base, check_hermitian): + with pytest.raises(TypeError): + state = np.random.rand(2, 3) + state = backend.cast(state, dtype=state.dtype) + target = random_density_matrix(2, pure=True, backend=backend) + test = relative_von_neumann_entropy( + state, target, base=base, check_hermitian=check_hermitian, backend=backend + ) + with pytest.raises(TypeError): + target = np.random.rand(2, 3) + target = backend.cast(target, dtype=target.dtype) + state = random_density_matrix(2, pure=True, backend=backend) + test = relative_von_neumann_entropy( + state, target, base=base, check_hermitian=check_hermitian, backend=backend + ) + with pytest.raises(ValueError): + state = np.array([1.0, 0.0]) + state = backend.cast(state, dtype=state.dtype) + target = np.array([0.0, 1.0]) + target = backend.cast(target, dtype=target.dtype) + test = relative_von_neumann_entropy( + state, target, base=0, check_hermitian=check_hermitian, backend=backend + ) + with pytest.raises(TypeError): + state = np.array([1.0, 0.0]) + state = backend.cast(state, dtype=state.dtype) + target = np.array([0.0, 1.0]) + target = backend.cast(target, dtype=target.dtype) + test = relative_von_neumann_entropy( + state, target, base=base, check_hermitian="False", backend=backend + ) + + nqubits = 2 + dims = 2**nqubits + + state = random_density_matrix(dims, backend=backend) + target = backend.identity_density_matrix(nqubits, normalize=True) + + backend.assert_allclose( + relative_von_neumann_entropy(state, target, base, check_hermitian, backend), + np.log(dims) / np.log(base) + - von_neumann_entropy( + state, base=base, check_hermitian=check_hermitian, backend=backend + ), + atol=1e-5, + ) + + state = backend.cast([1.0, 0.0], dtype=np.float64) + target = backend.cast([0.0, 1.0], dtype=np.float64) + + assert relative_von_neumann_entropy(state, target, backend=backend) == 0.0 + + # for coverage when GPUs are present + if backend.__class__.__name__ in ["CupyBackend", "CuQuantumBackend"]: + with pytest.raises(NotImplementedError): + state = random_unitary(4, backend=backend) + target = random_density_matrix(4, backend=backend) + test = relative_von_neumann_entropy( + state, target, base=base, check_hermitian=True, backend=backend + ) + with pytest.raises(NotImplementedError): + target = random_unitary(4, backend=backend) + state = random_density_matrix(4, backend=backend) + test = relative_von_neumann_entropy( + state, target, base=base, check_hermitian=True, backend=backend + ) + else: + state = random_unitary(4, backend=backend) + target = random_unitary(4, backend=backend) + test = relative_von_neumann_entropy( + state, target, base=base, check_hermitian=True, backend=backend + ) + + +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +@pytest.mark.parametrize("alpha", [0, 1, 2, 3, np.inf]) +def test_renyi_entropy(backend, alpha, base): + with pytest.raises(TypeError): + state = np.random.rand(2, 3) + state = backend.cast(state, dtype=state.dtype) + test = renyi_entropy(state, alpha=alpha, base=base, backend=backend) + with pytest.raises(TypeError): + state = random_statevector(4, backend=backend) + test = renyi_entropy(state, alpha="2", base=base, backend=backend) + with pytest.raises(ValueError): + state = random_statevector(4, backend=backend) + test = renyi_entropy(state, alpha=-1, base=base, backend=backend) + with pytest.raises(ValueError): + state = random_statevector(4, backend=backend) + test = renyi_entropy(state, alpha=alpha, base=0, backend=backend) + + state = random_density_matrix(4, backend=backend) + + if alpha == 0.0: + target = np.log2(len(state)) / np.log2(base) + elif alpha == 1.0: + target = von_neumann_entropy(state, base=base, backend=backend) + elif alpha == np.inf: + target = backend.calculate_norm_density_matrix(state, order=2) + target = -1 * np.log2(target) / np.log2(base) + else: + target = np.log2(np.trace(np.linalg.matrix_power(state, alpha))) + target = (1 / (1 - alpha)) * target / np.log2(base) + + backend.assert_allclose( + renyi_entropy(state, alpha=alpha, base=base, backend=backend), target, atol=1e-5 + ) + + # test pure state + state = random_density_matrix(4, pure=True, backend=backend) + backend.assert_allclose( + renyi_entropy(state, alpha=alpha, base=base, backend=backend), 0.0, atol=1e-8 + ) + + +@pytest.mark.parametrize( + ["state_flag", "target_flag"], [[True, True], [False, True], [True, False]] +) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +@pytest.mark.parametrize("alpha", [0, 1, 2, 3, 5.4, np.inf]) +def test_relative_renyi_entropy(backend, alpha, base, state_flag, target_flag): + with pytest.raises(TypeError): + state = np.random.rand(2, 3) + state = backend.cast(state, dtype=state.dtype) + target = random_density_matrix(4, backend=backend) + test = relative_renyi_entropy( + state, target, alpha=alpha, base=base, backend=backend + ) + with pytest.raises(TypeError): + target = np.random.rand(2, 3) + target = backend.cast(target, dtype=target.dtype) + state = random_density_matrix(4, backend=backend) + test = relative_renyi_entropy( + state, target, alpha=alpha, base=base, backend=backend + ) + with pytest.raises(TypeError): + state = random_statevector(4, backend=backend) + target = random_statevector(4, backend=backend) + test = relative_renyi_entropy( + state, target, alpha="2", base=base, backend=backend + ) + with pytest.raises(ValueError): + state = random_statevector(4, backend=backend) + target = random_statevector(4, backend=backend) + test = relative_renyi_entropy( + state, target, alpha=-1, base=base, backend=backend + ) + with pytest.raises(ValueError): + state = random_statevector(4, backend=backend) + target = random_statevector(4, backend=backend) + test = relative_renyi_entropy( + state, target, alpha=alpha, base=0, backend=backend + ) + + state = ( + random_statevector(4, backend=backend) + if state_flag + else random_density_matrix(4, backend=backend) + ) + target = ( + random_statevector(4, backend=backend) + if target_flag + else random_density_matrix(4, backend=backend) + ) + + if state_flag and target_flag: + backend.assert_allclose( + relative_renyi_entropy(state, target, alpha, base, backend), 0.0, atol=1e-5 + ) + else: + if target_flag and alpha > 1: + with pytest.raises(NotImplementedError): + relative_renyi_entropy(state, target, alpha, base, backend) + else: + if alpha == 1.0: + log = relative_von_neumann_entropy(state, target, base, backend=backend) + elif alpha == np.inf: + new_state = _matrix_power(state, 0.5, backend) + new_target = _matrix_power(target, 0.5, backend) + + log = np.log2( + backend.calculate_norm_density_matrix( + new_state @ new_target, order=1 + ) + ) + + log = -2 * log / np.log2(base) + else: + if len(state.shape) == 1: + state = np.outer(state, np.conj(state)) + + if len(target.shape) == 1: + target = np.outer(target, np.conj(target)) + + log = _matrix_power(state, alpha, backend) + log = log @ _matrix_power(target, 1 - alpha, backend) + log = np.log2(np.trace(log)) + + log = (1 / (alpha - 1)) * log / np.log2(base) + + backend.assert_allclose( + relative_renyi_entropy( + state, target, alpha=alpha, base=base, backend=backend + ), + log, + atol=1e-5, + ) + + # test pure states + state = random_density_matrix(4, pure=True, backend=backend) + target = random_density_matrix(4, pure=True, backend=backend) + backend.assert_allclose( + relative_renyi_entropy(state, target, alpha=alpha, base=base, backend=backend), + 0.0, + atol=1e-8, + ) + + +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +@pytest.mark.parametrize("alpha", [0, 1, 2, 3, 5.4]) +def test_tsallis_entropy(backend, alpha, base): + with pytest.raises(TypeError): + state = np.random.rand(2, 3) + state = backend.cast(state, dtype=state.dtype) + test = tsallis_entropy(state, alpha=alpha, base=base, backend=backend) + with pytest.raises(TypeError): + state = random_statevector(4, backend=backend) + test = tsallis_entropy(state, alpha="2", base=base, backend=backend) + with pytest.raises(ValueError): + state = random_statevector(4, backend=backend) + test = tsallis_entropy(state, alpha=-1, base=base, backend=backend) + with pytest.raises(ValueError): + state = random_statevector(4, backend=backend) + test = tsallis_entropy(state, alpha=alpha, base=0, backend=backend) + + state = random_density_matrix(4, backend=backend) + + if alpha == 1.0: + target = von_neumann_entropy(state, base=base, backend=backend) + else: + target = (1 / (1 - alpha)) * ( + np.trace(_matrix_power(state, alpha, backend)) - 1 + ) + + backend.assert_allclose( + tsallis_entropy(state, alpha=alpha, base=base, backend=backend), + target, + atol=1e-5, + ) + + # test pure state + state = random_density_matrix(4, pure=True, backend=backend) + backend.assert_allclose( + tsallis_entropy(state, alpha=alpha, base=base, backend=backend), 0.0, atol=1e-5 + ) + + +@pytest.mark.parametrize("check_hermitian", [False, True]) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +@pytest.mark.parametrize("bipartition", [[0], [1]]) +def test_entanglement_entropy(backend, bipartition, base, check_hermitian): + with pytest.raises(TypeError): + state = np.random.rand(2, 3) + state = backend.cast(state, dtype=state.dtype) + test = entanglement_entropy( + state, + bipartition=bipartition, + base=base, + check_hermitian=check_hermitian, + backend=backend, + ) + with pytest.raises(ValueError): + state = np.array([1.0, 0.0]) + state = backend.cast(state, dtype=state.dtype) + test = entanglement_entropy( + state, + bipartition=bipartition, + base=0, + check_hermitian=check_hermitian, + backend=backend, + ) + if backend.__class__.__name__ == "CupyBackend": + with pytest.raises(NotImplementedError): + state = random_unitary(4, backend=backend) + test = entanglement_entropy( + state, + bipartition=bipartition, + base=base, + check_hermitian=True, + backend=backend, + ) + + # Bell state + state = np.array([1.0, 0.0, 0.0, 1.0]) / np.sqrt(2) + state = backend.cast(state, dtype=state.dtype) + + entang_entrop = entanglement_entropy( + state, + bipartition=bipartition, + base=base, + check_hermitian=check_hermitian, + backend=backend, + ) + + if base == 2: + test = 1.0 + elif base == 10: + test = 0.30102999566398125 + elif base == np.e: + test = 0.6931471805599454 + else: + test = 0.4306765580733931 + + backend.assert_allclose(entang_entrop, test, atol=PRECISION_TOL) + + # Product state + state = np.kron( + random_statevector(2, backend=backend), random_statevector(2, backend=backend) + ) + + entang_entrop = entanglement_entropy( + state, + bipartition=bipartition, + base=base, + check_hermitian=check_hermitian, + backend=backend, + ) + + backend.assert_allclose(entang_entrop, 0.0, atol=PRECISION_TOL) diff --git a/tests/test_quantum_info_metrics.py b/tests/test_quantum_info_metrics.py index 0e7b86f21b..793bb510a5 100644 --- a/tests/test_quantum_info_metrics.py +++ b/tests/test_quantum_info_metrics.py @@ -7,13 +7,7 @@ average_gate_fidelity, bures_angle, bures_distance, - concurrence, diamond_norm, - entanglement_entropy, - entanglement_fidelity, - entanglement_of_formation, - entangling_capability, - entropy, expressibility, fidelity, frame_potential, @@ -21,7 +15,6 @@ hilbert_schmidt_distance, impurity, infidelity, - meyer_wallach_entanglement, process_fidelity, process_infidelity, purity, @@ -30,7 +23,6 @@ from qibo.quantum_info.random_ensembles import ( random_density_matrix, random_hermitian, - random_statevector, random_unitary, ) from qibo.quantum_info.superoperator_transformations import to_choi @@ -59,183 +51,6 @@ def test_purity_and_impurity(backend): backend.assert_allclose(impurity(state), 1.0 - 1.0 / dim, atol=PRECISION_TOL) -@pytest.mark.parametrize("check_purity", [True, False]) -@pytest.mark.parametrize("base", [2, 10, np.e, 5]) -@pytest.mark.parametrize("bipartition", [[0], [1]]) -def test_concurrence_and_formation(backend, bipartition, base, check_purity): - with pytest.raises(TypeError): - state = np.random.rand(2, 3) - state = backend.cast(state, dtype=state.dtype) - test = concurrence( - state, bipartition=bipartition, check_purity=check_purity, backend=backend - ) - with pytest.raises(TypeError): - state = random_statevector(4, backend=backend) - test = concurrence( - state, bipartition=bipartition, check_purity="True", backend=backend - ) - - if check_purity is True: - with pytest.raises(NotImplementedError): - state = backend.identity_density_matrix(2, normalize=False) - test = concurrence(state, bipartition=bipartition, backend=backend) - - nqubits = 2 - dim = 2**nqubits - state = random_statevector(dim, backend=backend) - concur = concurrence( - state, bipartition=bipartition, check_purity=check_purity, backend=backend - ) - ent_form = entanglement_of_formation( - state, - bipartition=bipartition, - base=base, - check_purity=check_purity, - backend=backend, - ) - backend.assert_allclose(0.0 <= concur <= np.sqrt(2), True) - backend.assert_allclose(0.0 <= ent_form <= 1.0, True) - - state = np.kron( - random_density_matrix(2, pure=True, backend=backend), - random_density_matrix(2, pure=True, backend=backend), - ) - concur = concurrence(state, bipartition, check_purity=check_purity, backend=backend) - ent_form = entanglement_of_formation( - state, - bipartition=bipartition, - base=base, - check_purity=check_purity, - backend=backend, - ) - backend.assert_allclose(concur, 0.0, atol=10 * PRECISION_TOL) - backend.assert_allclose(ent_form, 0.0, atol=PRECISION_TOL) - - -@pytest.mark.parametrize("check_hermitian", [False, True]) -@pytest.mark.parametrize("base", [2, 10, np.e, 5]) -def test_entropy(backend, base, check_hermitian): - with pytest.raises(TypeError): - state = np.random.rand(2, 3) - state = backend.cast(state, dtype=state.dtype) - test = entropy( - state, base=base, check_hermitian=check_hermitian, backend=backend - ) - with pytest.raises(ValueError): - state = np.array([1.0, 0.0]) - state = backend.cast(state, dtype=state.dtype) - test = entropy(state, base=0, check_hermitian=check_hermitian, backend=backend) - with pytest.raises(TypeError): - state = np.array([1.0, 0.0]) - state = backend.cast(state, dtype=state.dtype) - test = entropy(state, base=base, check_hermitian="False", backend=backend) - - if backend.__class__.__name__ in ["CupyBackend", "CuQuantumBackend"]: - with pytest.raises(NotImplementedError): - state = random_unitary(4, backend=backend) - test = entropy(state, base=base, check_hermitian=True, backend=backend) - else: - state = random_unitary(4, backend=backend) - test = entropy(state, base=base, check_hermitian=True, backend=backend) - - state = np.array([1.0, 0.0]) - state = backend.cast(state, dtype=state.dtype) - backend.assert_allclose(entropy(state, backend=backend), 0.0, atol=PRECISION_TOL) - - state = np.array([1.0, 0.0, 0.0, 0.0]) - state = np.outer(state, state) - state = backend.cast(state, dtype=state.dtype) - - nqubits = 2 - state = backend.identity_density_matrix(nqubits) - if base == 2: - test = 2.0 - elif base == 10: - test = 0.6020599913279624 - elif base == np.e: - test = 1.3862943611198906 - else: - test = 0.8613531161467861 - - backend.assert_allclose( - entropy(state, base, check_hermitian=check_hermitian, backend=backend), test - ) - - -@pytest.mark.parametrize("check_hermitian", [False, True]) -@pytest.mark.parametrize("base", [2, 10, np.e, 5]) -@pytest.mark.parametrize("bipartition", [[0], [1]]) -def test_entanglement_entropy(backend, bipartition, base, check_hermitian): - with pytest.raises(TypeError): - state = np.random.rand(2, 3) - state = backend.cast(state, dtype=state.dtype) - test = entanglement_entropy( - state, - bipartition=bipartition, - base=base, - check_hermitian=check_hermitian, - backend=backend, - ) - with pytest.raises(ValueError): - state = np.array([1.0, 0.0]) - state = backend.cast(state, dtype=state.dtype) - test = entanglement_entropy( - state, - bipartition=bipartition, - base=0, - check_hermitian=check_hermitian, - backend=backend, - ) - if backend.__class__.__name__ == "CupyBackend": - with pytest.raises(NotImplementedError): - state = random_unitary(4, backend=backend) - test = entanglement_entropy( - state, - bipartition=bipartition, - base=base, - check_hermitian=True, - backend=backend, - ) - - # Bell state - state = np.array([1.0, 0.0, 0.0, 1.0]) / np.sqrt(2) - state = backend.cast(state, dtype=state.dtype) - - entang_entrop = entanglement_entropy( - state, - bipartition=bipartition, - base=base, - check_hermitian=check_hermitian, - backend=backend, - ) - - if base == 2: - test = 1.0 - elif base == 10: - test = 0.30102999566398125 - elif base == np.e: - test = 0.6931471805599454 - else: - test = 0.4306765580733931 - - backend.assert_allclose(entang_entrop, test, atol=PRECISION_TOL) - - # Product state - state = np.kron( - random_statevector(2, backend=backend), random_statevector(2, backend=backend) - ) - - entang_entrop = entanglement_entropy( - state, - bipartition=bipartition, - base=base, - check_hermitian=check_hermitian, - backend=backend, - ) - - backend.assert_allclose(entang_entrop, 0.0, atol=PRECISION_TOL) - - @pytest.mark.parametrize("check_hermitian", [False, True]) def test_trace_distance(backend, check_hermitian): with pytest.raises(TypeError): @@ -445,65 +260,6 @@ def test_fidelity_and_infidelity_and_bures(backend, check_hermitian): test = fidelity(state, target, check_hermitian=True, backend=backend) -@pytest.mark.parametrize("check_hermitian", [False, True]) -@pytest.mark.parametrize("nqubits", [4, 6]) -@pytest.mark.parametrize("channel", [gates.DepolarizingChannel]) -def test_entanglement_fidelity(backend, channel, nqubits, check_hermitian): - with pytest.raises(TypeError): - test = entanglement_fidelity( - channel, nqubits=[0], check_hermitian=check_hermitian, backend=backend - ) - with pytest.raises(ValueError): - test = entanglement_fidelity( - channel, nqubits=0, check_hermitian=check_hermitian, backend=backend - ) - with pytest.raises(TypeError): - state = np.random.rand(2, 3, 2) - state = backend.cast(state, dtype=state.dtype) - test = entanglement_fidelity( - channel, - nqubits, - state=state, - check_hermitian=check_hermitian, - backend=backend, - ) - with pytest.raises(TypeError): - state = random_statevector(2, backend=backend) - test = entanglement_fidelity( - channel, nqubits, state=state, check_hermitian="False", backend=backend - ) - - channel = channel([0, 1], 0.5) - - # test on maximally entangled state - ent_fid = entanglement_fidelity( - channel, nqubits=nqubits, check_hermitian=check_hermitian, backend=backend - ) - backend.assert_allclose(ent_fid, 0.625, atol=PRECISION_TOL) - - # test with a state vector - state = backend.plus_state(nqubits) - ent_fid = entanglement_fidelity( - channel, - nqubits=nqubits, - state=state, - check_hermitian=check_hermitian, - backend=backend, - ) - backend.assert_allclose(ent_fid, 0.625, atol=PRECISION_TOL) - - # test on maximally mixed state - state = backend.identity_density_matrix(nqubits) - ent_fid = entanglement_fidelity( - channel, - nqubits=nqubits, - state=state, - check_hermitian=check_hermitian, - backend=backend, - ) - backend.assert_allclose(ent_fid, 1.0, atol=PRECISION_TOL) - - def test_process_fidelity_and_infidelity(backend): d = 2 with pytest.raises(TypeError): @@ -572,56 +328,6 @@ def test_diamond_norm(backend, nqubits): backend.assert_allclose(dnorm, 0.0, atol=PRECISION_TOL) -def test_meyer_wallach_entanglement(backend): - nqubits = 2 - - circuit1 = Circuit(nqubits) - circuit1.add([gates.RX(0, np.pi / 4)] for _ in range(nqubits)) - - circuit2 = Circuit(nqubits) - circuit2.add([gates.RX(0, np.pi / 4)] for _ in range(nqubits)) - circuit2.add(gates.CNOT(0, 1)) - - backend.assert_allclose( - meyer_wallach_entanglement(circuit1, backend=backend), 0.0, atol=PRECISION_TOL - ) - - backend.assert_allclose( - meyer_wallach_entanglement(circuit2, backend=backend), 0.5, atol=PRECISION_TOL - ) - - -@pytest.mark.parametrize("seed", [None, 10, np.random.default_rng(10)]) -def test_entangling_capability(backend, seed): - with pytest.raises(TypeError): - circuit = Circuit(1) - samples = 0.5 - entangling_capability(circuit, samples, seed=seed, backend=backend) - with pytest.raises(TypeError): - circuit = Circuit(1) - entangling_capability(circuit, samples=10, seed="10", backend=backend) - - nqubits = 2 - samples = 100 - - c1 = Circuit(nqubits) - c1.add([gates.RX(q, 0, trainable=True) for q in range(nqubits)]) - c1.add(gates.CNOT(0, 1)) - c1.add([gates.RX(q, 0, trainable=True) for q in range(nqubits)]) - ent_mw1 = entangling_capability(c1, samples, seed=seed, backend=backend) - - c2 = Circuit(nqubits) - c2.add(gates.H(0)) - c2.add(gates.CNOT(0, 1)) - c2.add(gates.RX(0, 0, trainable=True)) - ent_mw2 = entangling_capability(c2, samples, seed=seed, backend=backend) - - c3 = Circuit(nqubits) - ent_mw3 = entangling_capability(c3, samples, seed=seed, backend=backend) - - backend.assert_allclose(ent_mw3 < ent_mw1 < ent_mw2, True) - - def test_expressibility(backend): with pytest.raises(TypeError): circuit = Circuit(1) diff --git a/tests/test_quantum_info_utils.py b/tests/test_quantum_info_utils.py index fa121d1eb5..049f800275 100644 --- a/tests/test_quantum_info_utils.py +++ b/tests/test_quantum_info_utils.py @@ -15,8 +15,6 @@ hellinger_distance, hellinger_fidelity, pqc_integral, - shannon_entropy, - total_variation_distance, ) @@ -124,95 +122,6 @@ def test_hadamard_transform(backend, nqubits, implementation, is_matrix): backend.assert_allclose(transformed, test_transformed, atol=PRECISION_TOL) -def test_shannon_entropy_errors(backend): - with pytest.raises(ValueError): - prob = np.array([1.0, 0.0]) - prob = backend.cast(prob, dtype=prob.dtype) - test = shannon_entropy(prob, -2, backend=backend) - with pytest.raises(TypeError): - prob = np.array([[1.0], [0.0]]) - prob = backend.cast(prob, dtype=prob.dtype) - test = shannon_entropy(prob, backend=backend) - with pytest.raises(TypeError): - prob = np.array([]) - prob = backend.cast(prob, dtype=prob.dtype) - test = shannon_entropy(prob, backend=backend) - with pytest.raises(ValueError): - prob = np.array([1.0, -1.0]) - prob = backend.cast(prob, dtype=prob.dtype) - test = shannon_entropy(prob, backend=backend) - with pytest.raises(ValueError): - prob = np.array([1.1, 0.0]) - prob = backend.cast(prob, dtype=prob.dtype) - test = shannon_entropy(prob, backend=backend) - with pytest.raises(ValueError): - prob = np.array([0.5, 0.4999999]) - prob = backend.cast(prob, dtype=prob.dtype) - test = shannon_entropy(prob, backend=backend) - - -@pytest.mark.parametrize("base", [2, 10, np.e, 5]) -def test_shannon_entropy(backend, base): - prob_array = [1.0, 0.0] - result = shannon_entropy(prob_array, base, backend=backend) - backend.assert_allclose(result, 0.0) - - if base == 2: - prob_array = np.array([0.5, 0.5]) - prob_array = backend.cast(prob_array, dtype=prob_array.dtype) - result = shannon_entropy(prob_array, base, backend=backend) - backend.assert_allclose(result, 1.0) - - -@pytest.mark.parametrize("kind", [None, list]) -@pytest.mark.parametrize("validate", [False, True]) -def test_total_variation_distance(backend, validate, kind): - with pytest.raises(TypeError): - prob = np.random.rand(1, 2) - prob_q = np.random.rand(1, 5) - prob = backend.cast(prob, dtype=prob.dtype) - prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = total_variation_distance(prob, prob_q, backend=backend) - with pytest.raises(TypeError): - prob = np.random.rand(1, 2)[0] - prob_q = np.array([]) - prob = backend.cast(prob, dtype=prob.dtype) - prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = total_variation_distance(prob, prob_q, backend=backend) - with pytest.raises(ValueError): - prob = np.array([-1, 2.0]) - prob_q = np.random.rand(1, 5)[0] - prob = backend.cast(prob, dtype=prob.dtype) - prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = total_variation_distance(prob, prob_q, validate=True, backend=backend) - with pytest.raises(ValueError): - prob = np.random.rand(1, 2)[0] - prob_q = np.array([1.0, 0.0]) - prob = backend.cast(prob, dtype=prob.dtype) - prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = total_variation_distance(prob, prob_q, validate=True, backend=backend) - with pytest.raises(ValueError): - prob = np.array([1.0, 0.0]) - prob_q = np.random.rand(1, 2)[0] - prob = backend.cast(prob, dtype=prob.dtype) - prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = total_variation_distance(prob, prob_q, validate=True, backend=backend) - - prob_p = np.random.rand(10) - prob_q = np.random.rand(10) - prob_p /= np.sum(prob_p) - prob_q /= np.sum(prob_q) - - target = 0.5 * np.sum(np.abs(prob_p - prob_q)) - - if kind is not None: - prob_p, prob_q = kind(prob_p), kind(prob_q) - - distance = total_variation_distance(prob_p, prob_q, validate, backend=backend) - - backend.assert_allclose(distance, target, atol=1e-5) - - @pytest.mark.parametrize("kind", [None, list]) @pytest.mark.parametrize("validate", [False, True]) def test_hellinger(backend, validate, kind):