diff --git a/doc/source/api-reference/qibo.rst b/doc/source/api-reference/qibo.rst index 964267ce4c..936ff6ba90 100644 --- a/doc/source/api-reference/qibo.rst +++ b/doc/source/api-reference/qibo.rst @@ -1271,7 +1271,7 @@ Callbacks Callbacks provide a way to calculate quantities on the state vector as it propagates through the circuit. Example of such quantity is the entanglement entropy, which is currently the only callback implemented in -:class:`qibo.callbacks.Entanglemententropy`. +:class:`qibo.callbacks.EntanglementEntropy`. The user can create custom callbacks by inheriting the :class:`qibo.callbacks.Callback` class. The point each callback is calculated inside the circuit is defined by adding a :class:`qibo.gates.CallbackGate`. @@ -1284,7 +1284,7 @@ This can be added similarly to a standard gate and does not affect the state vec Entanglement entropy ^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: qibo.callbacks.Entanglemententropy +.. autoclass:: qibo.callbacks.EntanglementEntropy :members: :member-order: bysource @@ -1555,6 +1555,12 @@ Classical Rényi relative entropy .. autofunction:: qibo.quantum_info.classical_relative_renyi_entropy +Classical Tsallis entropy +""""""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.classical_tsallis_entropy + + Entropy """"""" @@ -1594,6 +1600,12 @@ Rényi relative entropy .. autofunction:: qibo.quantum_info.renyi_relative_entropy +Tsallis entropy +""""""""""""""" + +.. autofunction:: qibo.quantum_info.tsallis_entropy + + Entanglement entropy """""""""""""""""""" diff --git a/src/qibo/quantum_info/entropies.py b/src/qibo/quantum_info/entropies.py index a2ef64899a..fcc20fff8c 100644 --- a/src/qibo/quantum_info/entropies.py +++ b/src/qibo/quantum_info/entropies.py @@ -338,6 +338,72 @@ def classical_relative_renyi_entropy( return (1 / (alpha - 1)) * np.log2(np.sum(prob_p * prob_q)) / np.log2(base) +def classical_tsallis_entropy(prob_dist, alpha: float, base: float = 2, backend=None): + """Calculates the classical Tsallis entropy for a discrete probability distribution. + + This is defined as + + .. math:: + S_{\\alpha}(\\mathbf{p}) = \\frac{1}{\\alpha - 1} \\, + \\left(1 - \\sum_{x} \\, \\mathbf{p}^{\\alpha}(x) \\right) + + Args: + prob_dist (ndarray): discrete probability distribution. + alpha (float or int): entropic index. + base (float): the base of the log. Used when ``alpha=1.0``. + Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be + used in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Classical Tsallis entropy :math:`S_{\\alpha}(\\mathbf{p})`. + """ + if backend is None: # pragma: no cover + backend = GlobalBackend() + + if isinstance(prob_dist, list): + # np.float64 is necessary instead of native float because of tensorflow + prob_dist = backend.cast(prob_dist, dtype=np.float64) + + if not isinstance(alpha, (float, int)): + raise_error( + TypeError, f"alpha must be type float, but it is type {type(alpha)}." + ) + + if alpha < 0.0: + raise_error(ValueError, "alpha must a non-negative float.") + + if base <= 0: + raise_error(ValueError, "log base must be non-negative.") + + if len(prob_dist.shape) != 1: + raise_error( + TypeError, + f"Probability array must have dims (k,) but it has {prob_dist.shape}.", + ) + + if len(prob_dist) == 0: + raise_error(TypeError, "Empty array.") + + if any(prob_dist < 0) or any(prob_dist > 1.0): + raise_error( + ValueError, + "All elements of the probability array must be between 0. and 1..", + ) + + if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "Probability array must sum to 1.") + + if alpha == 1.0: + return shannon_entropy(prob_dist, base=base, backend=backend) + + if isinstance(prob_dist, list): + prob_dist = backend.cast(prob_dist, dtype=np.float64) + + return (1 / (1 - alpha)) * (np.sum(prob_dist**alpha) - 1) + + def entropy( state, base: float = 2, @@ -738,6 +804,61 @@ def relative_renyi_entropy( return (1 / (alpha - 1)) * log / np.log2(base) +def tsallis_entropy(state, alpha: float, base: float = 2, backend=None): + """Calculates the Tsallis entropy of a quantum state. + + .. math:: + S_{\\alpha}(\\rho) = \\frac{1}{1 - \\alpha} \\, + \\left( \\text{tr}(\\rho^{\\alpha}) - 1 \\right) + + When :math:`\\alpha = 1`, the functions defaults to + :func:`qibo.quantum_info.entropies.entropy`. + + Args: + state (ndarray): statevector or density matrix. + alpha (float or int): entropic index. + base (float, optional): the base of the log. Used when ``alpha=1.0``. + Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used + in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Tsallis entropy :math:`S_{\\alpha}(\\rho)`. + """ + if backend is None: # pragma: no cover + backend = GlobalBackend() + + if ( + (len(state.shape) >= 3) + or (len(state) == 0) + or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) + ): + raise_error( + TypeError, + f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", + ) + + if not isinstance(alpha, (float, int)): + raise_error( + TypeError, f"alpha must be type float, but it is type {type(alpha)}." + ) + + if alpha < 0.0: + raise_error(ValueError, "alpha must a non-negative float.") + + if base <= 0.0: + raise_error(ValueError, "log base must be non-negative.") + + if abs(purity(state) - 1.0) < PRECISION_TOL: + return 0.0 + + if alpha == 1.0: + return entropy(state, base=base, backend=backend) + + return (1 / (1 - alpha)) * (np.trace(np.linalg.matrix_power(state, alpha)) - 1) + + def entanglement_entropy( state, bipartition, diff --git a/tests/test_quantum_info_entropies.py b/tests/test_quantum_info_entropies.py index bc2ec4abab..0bcca2e087 100644 --- a/tests/test_quantum_info_entropies.py +++ b/tests/test_quantum_info_entropies.py @@ -7,12 +7,14 @@ classical_relative_entropy, classical_relative_renyi_entropy, classical_renyi_entropy, + classical_tsallis_entropy, entanglement_entropy, entropy, relative_entropy, relative_renyi_entropy, renyi_entropy, shannon_entropy, + tsallis_entropy, ) from qibo.quantum_info.random_ensembles import ( random_density_matrix, @@ -118,6 +120,69 @@ def test_classical_relative_entropy(backend, base, kind): backend.assert_allclose(divergence, target, atol=1e-5) +@pytest.mark.parametrize("kind", [None, list]) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +@pytest.mark.parametrize("alpha", [0, 1, 2, 3, np.inf]) +def test_classical_renyi_entropy(backend, alpha, base, kind): + with pytest.raises(TypeError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha="2", backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha=-2, backend=backend) + with pytest.raises(TypeError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, base="2", backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, base=-2, backend=backend) + with pytest.raises(TypeError): + prob = np.array([[1.0], [0.0]]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + with pytest.raises(TypeError): + prob = np.array([]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, -1.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.1, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + with pytest.raises(ValueError): + prob = np.array([0.5, 0.4999999]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + + prob_dist = np.random.rand(10) + prob_dist /= np.sum(prob_dist) + + if alpha == 0.0: + target = np.log2(len(prob_dist)) / np.log2(base) + elif alpha == 1: + target = shannon_entropy(prob_dist, base=base, backend=backend) + elif alpha == 2: + target = -1 * np.log2(np.sum(prob_dist**2)) / np.log2(base) + elif alpha == np.inf: + target = -1 * np.log2(max(prob_dist)) / np.log2(base) + else: + target = (1 / (1 - alpha)) * np.log2(np.sum(prob_dist**alpha)) / np.log2(base) + + if kind is not None: + prob_dist = kind(prob_dist) + + renyi_ent = classical_renyi_entropy(prob_dist, alpha, base=base, backend=backend) + + backend.assert_allclose(renyi_ent, target, atol=1e-5) + + @pytest.mark.parametrize("kind", [None, list]) @pytest.mark.parametrize("base", [2, 10, np.e, 5]) @pytest.mark.parametrize("alpha", [0, 1 / 2, 1, 2, 3, np.inf]) @@ -217,65 +282,61 @@ def test_classical_relative_renyi_entropy(backend, alpha, base, kind): @pytest.mark.parametrize("kind", [None, list]) @pytest.mark.parametrize("base", [2, 10, np.e, 5]) -@pytest.mark.parametrize("alpha", [0, 1, 2, 3, np.inf]) -def test_classical_renyi_entropy(backend, alpha, base, kind): +@pytest.mark.parametrize("alpha", [0, 1, 2, 3]) +def test_classical_tsallis_entropy(backend, alpha, base, kind): with pytest.raises(TypeError): prob = np.array([1.0, 0.0]) prob = backend.cast(prob, dtype=prob.dtype) - test = classical_renyi_entropy(prob, alpha="2", backend=backend) + test = classical_tsallis_entropy(prob, alpha="2", backend=backend) with pytest.raises(ValueError): prob = np.array([1.0, 0.0]) prob = backend.cast(prob, dtype=prob.dtype) - test = classical_renyi_entropy(prob, alpha=-2, backend=backend) + test = classical_tsallis_entropy(prob, alpha=-2, backend=backend) with pytest.raises(TypeError): prob = np.array([1.0, 0.0]) prob = backend.cast(prob, dtype=prob.dtype) - test = classical_renyi_entropy(prob, alpha, base="2", backend=backend) + test = classical_tsallis_entropy(prob, alpha, base="2", backend=backend) with pytest.raises(ValueError): prob = np.array([1.0, 0.0]) prob = backend.cast(prob, dtype=prob.dtype) - test = classical_renyi_entropy(prob, alpha, base=-2, backend=backend) + test = classical_tsallis_entropy(prob, alpha, base=-2, backend=backend) with pytest.raises(TypeError): prob = np.array([[1.0], [0.0]]) prob = backend.cast(prob, dtype=prob.dtype) - test = classical_renyi_entropy(prob, alpha, backend=backend) + test = classical_tsallis_entropy(prob, alpha, backend=backend) with pytest.raises(TypeError): prob = np.array([]) prob = backend.cast(prob, dtype=prob.dtype) - test = classical_renyi_entropy(prob, alpha, backend=backend) + test = classical_tsallis_entropy(prob, alpha, backend=backend) with pytest.raises(ValueError): prob = np.array([1.0, -1.0]) prob = backend.cast(prob, dtype=prob.dtype) - test = classical_renyi_entropy(prob, alpha, backend=backend) + test = classical_tsallis_entropy(prob, alpha, backend=backend) with pytest.raises(ValueError): prob = np.array([1.1, 0.0]) prob = backend.cast(prob, dtype=prob.dtype) - test = classical_renyi_entropy(prob, alpha, backend=backend) + test = classical_tsallis_entropy(prob, alpha, backend=backend) with pytest.raises(ValueError): prob = np.array([0.5, 0.4999999]) prob = backend.cast(prob, dtype=prob.dtype) - test = classical_renyi_entropy(prob, alpha, backend=backend) + test = classical_tsallis_entropy(prob, alpha, backend=backend) prob_dist = np.random.rand(10) prob_dist /= np.sum(prob_dist) - if alpha == 0.0: - target = np.log2(len(prob_dist)) / np.log2(base) - elif alpha == 1: + if alpha == 1.0: target = shannon_entropy(prob_dist, base=base, backend=backend) - elif alpha == 2: - target = -1 * np.log2(np.sum(prob_dist**2)) / np.log2(base) - elif alpha == np.inf: - target = -1 * np.log2(max(prob_dist)) / np.log2(base) else: - target = (1 / (1 - alpha)) * np.log2(np.sum(prob_dist**alpha)) / np.log2(base) + target = (1 / (1 - alpha)) * (np.sum(prob_dist**alpha) - 1) if kind is not None: prob_dist = kind(prob_dist) - renyi_ent = classical_renyi_entropy(prob_dist, alpha, base=base, backend=backend) - - backend.assert_allclose(renyi_ent, target, atol=1e-5) + backend.assert_allclose( + classical_tsallis_entropy(prob_dist, alpha=alpha, base=base, backend=backend), + target, + atol=1e-5, + ) @pytest.mark.parametrize("check_hermitian", [False, True]) @@ -487,7 +548,11 @@ def test_relative_renyi_entropy(backend, alpha, base, state_flag, target_flag): if state_flag else random_density_matrix(4, backend=backend) ) - target = backend.identity_density_matrix(2, normalize=True) + target = ( + random_statevector(4, backend=backend) + if target_flag + else random_density_matrix(4, backend=backend) + ) if alpha == 1.0: log = relative_entropy(state, target, base, backend=backend) @@ -560,6 +625,39 @@ def test_relative_renyi_entropy(backend, alpha, base, state_flag, target_flag): ) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +@pytest.mark.parametrize("alpha", [0, 1, 2, 3, np.inf]) +def test_tsallis_entropy(backend, alpha, base): + with pytest.raises(TypeError): + state = np.random.rand(2, 3) + state = backend.cast(state, dtype=state.dtype) + test = renyi_entropy(state, alpha=alpha, base=base, backend=backend) + with pytest.raises(TypeError): + state = random_statevector(4, backend=backend) + test = renyi_entropy(state, alpha="2", base=base, backend=backend) + with pytest.raises(ValueError): + state = random_statevector(4, backend=backend) + test = renyi_entropy(state, alpha=-1, base=base, backend=backend) + with pytest.raises(ValueError): + state = random_statevector(4, backend=backend) + test = renyi_entropy(state, alpha=alpha, base=0, backend=backend) + + state = random_density_matrix(4, backend=backend) + + if alpha == 1.0: + target = entropy(state, base=base, backend=backend) + else: + target = (1 / (1 - alpha)) * ( + np.trace(np.linalg.matrix_power(state, alpha)) - 1 + ) + + backend.assert_allclose( + tsallis_entropy(state, alpha=alpha, base=base, backend=backend), + target, + atol=1e-5, + ) + + @pytest.mark.parametrize("check_hermitian", [False, True]) @pytest.mark.parametrize("base", [2, 10, np.e, 5]) @pytest.mark.parametrize("bipartition", [[0], [1]])