From 1815adc31c1a9163f360f6146b9d87cb6cb95a4e Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 30 Jan 2024 15:14:34 +0400 Subject: [PATCH 01/10] reorder functions --- doc/source/api-reference/qibo.rst | 12 +- src/qibo/quantum_info/entropies.py | 166 +++++++++++++-------------- tests/test_quantum_info_entropies.py | 120 +++++++++---------- 3 files changed, 149 insertions(+), 149 deletions(-) diff --git a/doc/source/api-reference/qibo.rst b/doc/source/api-reference/qibo.rst index 73b3fec67b..fb1c5df5dd 100644 --- a/doc/source/api-reference/qibo.rst +++ b/doc/source/api-reference/qibo.rst @@ -1537,6 +1537,12 @@ Shannon entropy .. autofunction:: qibo.quantum_info.shannon_entropy +Classical Relative Entropy +"""""""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.classical_relative_entropy + + Entropy """"""" @@ -1564,12 +1570,6 @@ Relative Entropy an error will be raised when using `cupy` backend. -Classical Relative Entropy -"""""""""""""""""""""""""" - -.. autofunction:: qibo.quantum_info.classical_relative_entropy - - Entanglement Entropy """""""""""""""""""" diff --git a/src/qibo/quantum_info/entropies.py b/src/qibo/quantum_info/entropies.py index 2f9e0a03a5..058b7e4a15 100644 --- a/src/qibo/quantum_info/entropies.py +++ b/src/qibo/quantum_info/entropies.py @@ -74,6 +74,89 @@ def shannon_entropy(probability_array, base: float = 2, backend=None): return complex(shan_entropy).real +def classical_relative_entropy( + prob_dist_p, prob_dist_q, base: float = 2, validate: bool = False, backend=None +): + """Calculates the relative entropy between two discrete probability distributions. + + For probabilities :math:`\\mathbf{p}` and :math:`\\mathbf{q}`, it is defined as + + ..math:: + D(\\mathbf{p} \\, \\| \\, \\mathbf{q}) = \\sum_{x} \\, \\mathbf{p}(x) \\, + \\log\\left( \\frac{\\mathbf{p}(x)}{\\mathbf{q}(x)} \\right) \\, . + + The classical relative entropy is also known as the + `Kullback-Leibler (KL) divergence `_. + + Args: + prob_dist_p (ndarray or list): discrete probability distribution :math:`p`. + prob_dist_q (ndarray or list): discrete probability distribution :math:`q`. + base (float): the base of the log. Defaults to :math:`2`. + validate (bool, optional): If ``True``, checks if :math:`p` and :math:`q` are proper + probability distributions. Defaults to ``False``. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be + used in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + Returns: + float: Classical relative entropy between :math:`\\mathbf{p}` and :math:`\\mathbf{q}`. + """ + if backend is None: # pragma: no cover + backend = GlobalBackend() + + if isinstance(prob_dist_p, list): + # np.float64 is necessary instead of native float because of tensorflow + prob_dist_p = backend.cast(prob_dist_p, dtype=np.float64) + if isinstance(prob_dist_q, list): + # np.float64 is necessary instead of native float because of tensorflow + prob_dist_q = backend.cast(prob_dist_q, dtype=np.float64) + + if (len(prob_dist_p.shape) != 1) or (len(prob_dist_q.shape) != 1): + raise_error( + TypeError, + "Probability arrays must have dims (k,) but have " + + f"dims {prob_dist_p.shape} and {prob_dist_q.shape}.", + ) + + if (len(prob_dist_p) == 0) or (len(prob_dist_q) == 0): + raise_error(TypeError, "At least one of the arrays is empty.") + + if base <= 0: + raise_error(ValueError, "log base must be non-negative.") + + if validate: + if (any(prob_dist_p < 0) or any(prob_dist_p > 1.0)) or ( + any(prob_dist_q < 0) or any(prob_dist_q > 1.0) + ): + raise_error( + ValueError, + "All elements of the probability array must be between 0. and 1..", + ) + if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "First probability array must sum to 1.") + + if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "Second probability array must sum to 1.") + + entropy_p = -1 * shannon_entropy(prob_dist_p, base=base, backend=backend) + + if base == 2: + log_prob_q = np.where(prob_dist_q != 0.0, np.log2(prob_dist_q), -np.inf) + elif base == 10: + log_prob_q = np.where(prob_dist_q != 0.0, np.log10(prob_dist_q), -np.inf) + elif base == np.e: + log_prob_q = np.where(prob_dist_q != 0.0, np.log(prob_dist_q), -np.inf) + else: + log_prob_q = np.where( + prob_dist_q != 0.0, np.log(prob_dist_q) / np.log(base), -np.inf + ) + + log_prob = np.where(prob_dist_p != 0.0, log_prob_q, 0.0) + + relative = np.sum(prob_dist_p * log_prob) + + return entropy_p - relative + + def entropy( state, base: float = 2, @@ -261,89 +344,6 @@ def relative_entropy( return float(entropy_state - relative) -def classical_relative_entropy( - prob_dist_p, prob_dist_q, base: float = 2, validate: bool = False, backend=None -): - """Calculates the relative entropy between two discrete probability distributions. - - For probabilities :math:`\\mathbf{p}` and :math:`\\mathbf{q}`, it is defined as - - ..math:: - D(\\mathbf{p} \\, \\| \\, \\mathbf{q}) = \\sum_{x} \\, \\mathbf{p}(x) \\, - \\log\\left( \\frac{\\mathbf{p}(x)}{\\mathbf{q}(x)} \\right) \\, . - - The classical relative entropy is also known as the - `Kullback-Leibler (KL) divergence `_. - - Args: - prob_dist_p (ndarray or list): discrete probability distribution :math:`p`. - prob_dist_q (ndarray or list): discrete probability distribution :math:`q`. - base (float): the base of the log. Defaults to :math:`2`. - validate (bool, optional): If ``True``, checks if :math:`p` and :math:`q` are proper - probability distributions. Defaults to ``False``. - backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be - used in the execution. If ``None``, it uses - :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. - Returns: - float: Classical relative entropy between :math:`\\mathbf{p}` and :math:`\\mathbf{q}`. - """ - if backend is None: # pragma: no cover - backend = GlobalBackend() - - if isinstance(prob_dist_p, list): - # np.float64 is necessary instead of native float because of tensorflow - prob_dist_p = backend.cast(prob_dist_p, dtype=np.float64) - if isinstance(prob_dist_q, list): - # np.float64 is necessary instead of native float because of tensorflow - prob_dist_q = backend.cast(prob_dist_q, dtype=np.float64) - - if (len(prob_dist_p.shape) != 1) or (len(prob_dist_q.shape) != 1): - raise_error( - TypeError, - "Probability arrays must have dims (k,) but have " - + f"dims {prob_dist_p.shape} and {prob_dist_q.shape}.", - ) - - if (len(prob_dist_p) == 0) or (len(prob_dist_q) == 0): - raise_error(TypeError, "At least one of the arrays is empty.") - - if base <= 0: - raise_error(ValueError, "log base must be non-negative.") - - if validate: - if (any(prob_dist_p < 0) or any(prob_dist_p > 1.0)) or ( - any(prob_dist_q < 0) or any(prob_dist_q > 1.0) - ): - raise_error( - ValueError, - "All elements of the probability array must be between 0. and 1..", - ) - if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: - raise_error(ValueError, "First probability array must sum to 1.") - - if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: - raise_error(ValueError, "Second probability array must sum to 1.") - - entropy_p = -1 * shannon_entropy(prob_dist_p, base=base, backend=backend) - - if base == 2: - log_prob_q = np.where(prob_dist_q != 0.0, np.log2(prob_dist_q), -np.inf) - elif base == 10: - log_prob_q = np.where(prob_dist_q != 0.0, np.log10(prob_dist_q), -np.inf) - elif base == np.e: - log_prob_q = np.where(prob_dist_q != 0.0, np.log(prob_dist_q), -np.inf) - else: - log_prob_q = np.where( - prob_dist_q != 0.0, np.log(prob_dist_q) / np.log(base), -np.inf - ) - - log_prob = np.where(prob_dist_p != 0.0, log_prob_q, 0.0) - - relative = np.sum(prob_dist_p * log_prob) - - return entropy_p - relative - - def entanglement_entropy( state, bipartition, diff --git a/tests/test_quantum_info_entropies.py b/tests/test_quantum_info_entropies.py index cd5cce3362..fc6d56ace6 100644 --- a/tests/test_quantum_info_entropies.py +++ b/tests/test_quantum_info_entropies.py @@ -56,6 +56,66 @@ def test_shannon_entropy(backend, base): backend.assert_allclose(result, 1.0) +@pytest.mark.parametrize("kind", [None, list]) +@pytest.mark.parametrize("validate", [False, True]) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +def test_classical_relative_entropy(backend, base, validate, kind): + with pytest.raises(TypeError): + prob = np.random.rand(1, 2) + prob_q = np.random.rand(1, 5) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_entropy(prob, prob_q, backend=backend) + with pytest.raises(TypeError): + prob = np.random.rand(1, 2)[0] + prob_q = np.array([]) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_entropy(prob, prob_q, backend=backend) + with pytest.raises(ValueError): + prob = np.array([-1, 2.0]) + prob_q = np.random.rand(1, 5)[0] + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_entropy(prob, prob_q, validate=True, backend=backend) + with pytest.raises(ValueError): + prob = np.random.rand(1, 2)[0] + prob_q = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_entropy(prob, prob_q, validate=True, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob_q = np.random.rand(1, 2)[0] + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_entropy(prob, prob_q, validate=True, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob_q = np.array([0.0, 1.0]) + prob = backend.cast(prob, dtype=prob.dtype) + prob_q = backend.cast(prob_q, dtype=prob_q.dtype) + test = classical_relative_entropy(prob, prob_q, base=-2, backend=backend) + + prob_p = np.random.rand(10) + prob_q = np.random.rand(10) + prob_p /= np.sum(prob_p) + prob_q /= np.sum(prob_q) + + target = np.sum(prob_p * np.log(prob_p) / np.log(base)) - np.sum( + prob_p * np.log(prob_q) / np.log(base) + ) + + if kind is not None: + prob_p, prob_q = kind(prob_p), kind(prob_q) + + divergence = classical_relative_entropy( + prob_p, prob_q, base=base, validate=validate, backend=backend + ) + + backend.assert_allclose(divergence, target, atol=1e-5) + + @pytest.mark.parametrize("check_hermitian", [False, True]) @pytest.mark.parametrize("base", [2, 10, np.e, 5]) def test_entropy(backend, base, check_hermitian): @@ -180,66 +240,6 @@ def test_relative_entropy(backend, base, check_hermitian): ) -@pytest.mark.parametrize("kind", [None, list]) -@pytest.mark.parametrize("validate", [False, True]) -@pytest.mark.parametrize("base", [2, 10, np.e, 5]) -def test_classical_relative_entropy(backend, base, validate, kind): - with pytest.raises(TypeError): - prob = np.random.rand(1, 2) - prob_q = np.random.rand(1, 5) - prob = backend.cast(prob, dtype=prob.dtype) - prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = classical_relative_entropy(prob, prob_q, backend=backend) - with pytest.raises(TypeError): - prob = np.random.rand(1, 2)[0] - prob_q = np.array([]) - prob = backend.cast(prob, dtype=prob.dtype) - prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = classical_relative_entropy(prob, prob_q, backend=backend) - with pytest.raises(ValueError): - prob = np.array([-1, 2.0]) - prob_q = np.random.rand(1, 5)[0] - prob = backend.cast(prob, dtype=prob.dtype) - prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = classical_relative_entropy(prob, prob_q, validate=True, backend=backend) - with pytest.raises(ValueError): - prob = np.random.rand(1, 2)[0] - prob_q = np.array([1.0, 0.0]) - prob = backend.cast(prob, dtype=prob.dtype) - prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = classical_relative_entropy(prob, prob_q, validate=True, backend=backend) - with pytest.raises(ValueError): - prob = np.array([1.0, 0.0]) - prob_q = np.random.rand(1, 2)[0] - prob = backend.cast(prob, dtype=prob.dtype) - prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = classical_relative_entropy(prob, prob_q, validate=True, backend=backend) - with pytest.raises(ValueError): - prob = np.array([1.0, 0.0]) - prob_q = np.array([0.0, 1.0]) - prob = backend.cast(prob, dtype=prob.dtype) - prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = classical_relative_entropy(prob, prob_q, base=-2, backend=backend) - - prob_p = np.random.rand(10) - prob_q = np.random.rand(10) - prob_p /= np.sum(prob_p) - prob_q /= np.sum(prob_q) - - target = np.sum(prob_p * np.log(prob_p) / np.log(base)) - np.sum( - prob_p * np.log(prob_q) / np.log(base) - ) - - if kind is not None: - prob_p, prob_q = kind(prob_p), kind(prob_q) - - divergence = classical_relative_entropy( - prob_p, prob_q, base=base, validate=validate, backend=backend - ) - - backend.assert_allclose(divergence, target, atol=1e-5) - - @pytest.mark.parametrize("check_hermitian", [False, True]) @pytest.mark.parametrize("base", [2, 10, np.e, 5]) @pytest.mark.parametrize("bipartition", [[0], [1]]) From d7f885cc5a5dbeabdd05c6e7e61a1fae44728f08 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 30 Jan 2024 15:59:28 +0400 Subject: [PATCH 02/10] api ref --- doc/source/api-reference/qibo.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/doc/source/api-reference/qibo.rst b/doc/source/api-reference/qibo.rst index fb1c5df5dd..5f80e763c1 100644 --- a/doc/source/api-reference/qibo.rst +++ b/doc/source/api-reference/qibo.rst @@ -1543,6 +1543,12 @@ Classical Relative Entropy .. autofunction:: qibo.quantum_info.classical_relative_entropy +Classical Rényi Entropy +""""""""""""""""""""""" + +.. autofunction:: qibo.quantum_info.classical_renyi_entropy + + Entropy """"""" @@ -1570,6 +1576,12 @@ Relative Entropy an error will be raised when using `cupy` backend. +Rényi Entropy +""""""""""""" + +.. autofunction:: qibo.quantum_info.renyi_entropy + + Entanglement Entropy """""""""""""""""""" From 935d87fd172d27d41a102bfbcf9e4ca712ac9e09 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 30 Jan 2024 15:59:38 +0400 Subject: [PATCH 03/10] classical renyi entropy --- src/qibo/quantum_info/entropies.py | 139 ++++++++++++++++++++++------- 1 file changed, 106 insertions(+), 33 deletions(-) diff --git a/src/qibo/quantum_info/entropies.py b/src/qibo/quantum_info/entropies.py index 058b7e4a15..6d3492d45d 100644 --- a/src/qibo/quantum_info/entropies.py +++ b/src/qibo/quantum_info/entropies.py @@ -7,7 +7,7 @@ from qibo.quantum_info.metrics import _check_hermitian_or_not_gpu, purity -def shannon_entropy(probability_array, base: float = 2, backend=None): +def shannon_entropy(prob_dist, base: float = 2, backend=None): """Calculate the Shannon entropy of a probability array :math:`\\mathbf{p}`, which is given by .. math:: @@ -18,7 +18,7 @@ def shannon_entropy(probability_array, base: float = 2, backend=None): and :math:`0 \\log_{b}(0) \\equiv 0`. Args: - probability_array (ndarray or list): a probability array :math:`\\mathbf{p}`. + prob_dist (ndarray or list): a probability array :math:`\\mathbf{p}`. base (float): the base of the log. Defaults to :math:`2`. backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. @@ -30,43 +30,41 @@ def shannon_entropy(probability_array, base: float = 2, backend=None): if backend is None: # pragma: no cover backend = GlobalBackend() - if isinstance(probability_array, list): + if isinstance(prob_dist, list): # np.float64 is necessary instead of native float because of tensorflow - probability_array = backend.cast(probability_array, dtype=np.float64) + prob_dist = backend.cast(prob_dist, dtype=np.float64) if base <= 0: raise_error(ValueError, "log base must be non-negative.") - if len(probability_array.shape) != 1: + if len(prob_dist.shape) != 1: raise_error( TypeError, - f"Probability array must have dims (k,) but it has {probability_array.shape}.", + f"Probability array must have dims (k,) but it has {prob_dist.shape}.", ) - if len(probability_array) == 0: + if len(prob_dist) == 0: raise_error(TypeError, "Empty array.") - if any(probability_array < 0) or any(probability_array > 1.0): + if any(prob_dist < 0) or any(prob_dist > 1.0): raise_error( ValueError, "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(probability_array) - 1.0) > PRECISION_TOL: + if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: raise_error(ValueError, "Probability array must sum to 1.") if base == 2: - log_prob = np.where(probability_array != 0.0, np.log2(probability_array), 0.0) + log_prob = np.where(prob_dist != 0.0, np.log2(prob_dist), 0.0) elif base == 10: - log_prob = np.where(probability_array != 0, np.log10(probability_array), 0.0) + log_prob = np.where(prob_dist != 0, np.log10(prob_dist), 0.0) elif base == np.e: - log_prob = np.where(probability_array != 0, np.log(probability_array), 0.0) + log_prob = np.where(prob_dist != 0, np.log(prob_dist), 0.0) else: - log_prob = np.where( - probability_array != 0, np.log(probability_array) / np.log(base), 0.0 - ) + log_prob = np.where(prob_dist != 0, np.log(prob_dist) / np.log(base), 0.0) - shan_entropy = -np.sum(probability_array * log_prob) + shan_entropy = -np.sum(prob_dist * log_prob) # absolute value if entropy == 0.0 to avoid returning -0.0 shan_entropy = np.abs(shan_entropy) if shan_entropy == 0.0 else shan_entropy @@ -74,9 +72,7 @@ def shannon_entropy(probability_array, base: float = 2, backend=None): return complex(shan_entropy).real -def classical_relative_entropy( - prob_dist_p, prob_dist_q, base: float = 2, validate: bool = False, backend=None -): +def classical_relative_entropy(prob_dist_p, prob_dist_q, base: float = 2, backend=None): """Calculates the relative entropy between two discrete probability distributions. For probabilities :math:`\\mathbf{p}` and :math:`\\mathbf{q}`, it is defined as @@ -92,11 +88,10 @@ def classical_relative_entropy( prob_dist_p (ndarray or list): discrete probability distribution :math:`p`. prob_dist_q (ndarray or list): discrete probability distribution :math:`q`. base (float): the base of the log. Defaults to :math:`2`. - validate (bool, optional): If ``True``, checks if :math:`p` and :math:`q` are proper - probability distributions. Defaults to ``False``. backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used in the execution. If ``None``, it uses :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + Returns: float: Classical relative entropy between :math:`\\mathbf{p}` and :math:`\\mathbf{q}`. """ @@ -123,19 +118,18 @@ def classical_relative_entropy( if base <= 0: raise_error(ValueError, "log base must be non-negative.") - if validate: - if (any(prob_dist_p < 0) or any(prob_dist_p > 1.0)) or ( - any(prob_dist_q < 0) or any(prob_dist_q > 1.0) - ): - raise_error( - ValueError, - "All elements of the probability array must be between 0. and 1..", - ) - if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: - raise_error(ValueError, "First probability array must sum to 1.") + if (any(prob_dist_p < 0) or any(prob_dist_p > 1.0)) or ( + any(prob_dist_q < 0) or any(prob_dist_q > 1.0) + ): + raise_error( + ValueError, + "All elements of the probability array must be between 0. and 1..", + ) + if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "First probability array must sum to 1.") - if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: - raise_error(ValueError, "Second probability array must sum to 1.") + if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "Second probability array must sum to 1.") entropy_p = -1 * shannon_entropy(prob_dist_p, base=base, backend=backend) @@ -157,6 +151,85 @@ def classical_relative_entropy( return entropy_p - relative +def classical_renyi_entropy(prob_dist, alpha: float, base: float = 2, backend=None): + """Calculates the classical Rényi entropy :math:`H_{\\alpha}` of a discrete probability distribution. + + For :math:`\\alpha \\in (0, \\, 1) \\cup (1, \\, \\infty)` and probability distribution + :math:`\\mathbf{p}`, the Rényi entropy is defined as + + .. math:: + H_{\\alpha}(\\mathbf{p}) = \\frac{1}{1 - \\alpha} \\, \\log\\left( \\sum_{x} + \\, \\mathbf{p}^{\\alpha}(x) \\right) \\, . + + For :math:`\\alpha \\in \\{0, 1, \\infty \\}`, it is further defined that + + .. math:: + H_{\\alpha}(\\mathbf{p}) = \\lim_{\\beta \\to \\alpha} \\, H_{\\beta}(\\mathbf{p}) \\, . + + A special case is the limit :math:`\\alpha \\to 1`, in which the classical Rényi entropy + coincides with the :func:`qibo.quantum_info.entropies.shannon_entropy`. + + Args: + prob_dist (ndarray): discrete probability distribution. + alpha (float): order of the Rényi entropy. + If :math:`\\alpha = 1`, defaults to :func:`qibo.quantum_info.entropies.shannon_entropy`. + If :math:`\\alpha = \\infty`, defaults to the + `min-entropy `_. + base (float): the base of the log. Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be + used in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Rényi entropy :math:`H_{\\alpha}`. + """ + if backend is None: # pragma: no cover + backend = GlobalBackend() + + if isinstance(prob_dist, list): + # np.float64 is necessary instead of native float because of tensorflow + prob_dist = backend.cast(prob_dist, dtype=np.float64) + + if not isinstance(alpha, float): + raise_error( + TypeError, f"alpha must be type float, but it is type {type(alpha)}." + ) + + if alpha < 0.0: + raise_error(ValueError, "alpha must a non-negative float.") + + if base <= 0: + raise_error(ValueError, "log base must be non-negative.") + + if len(prob_dist.shape) != 1: + raise_error( + TypeError, + f"Probability array must have dims (k,) but it has {prob_dist.shape}.", + ) + + if len(prob_dist) == 0: + raise_error(TypeError, "Empty array.") + + if any(prob_dist < 0) or any(prob_dist > 1.0): + raise_error( + ValueError, + "All elements of the probability array must be between 0. and 1..", + ) + + if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: + raise_error(ValueError, "Probability array must sum to 1.") + + if alpha == 1.0: + return shannon_entropy(prob_dist, base=base, backend=backend) + + if alpha == np.inf: + return -1 * np.log2(max(prob_dist)) / np.log2(base) + + renyi_ent = (1 / (1 - alpha)) * np.log2(np.sum(prob_dist**alpha)) / np.log2(base) + + return renyi_ent + + def entropy( state, base: float = 2, From cb799bd281230f5f5da805e7d5d18587d1bf2634 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 30 Jan 2024 15:59:48 +0400 Subject: [PATCH 04/10] remove variable --- tests/test_quantum_info_entropies.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/tests/test_quantum_info_entropies.py b/tests/test_quantum_info_entropies.py index fc6d56ace6..392d4b108b 100644 --- a/tests/test_quantum_info_entropies.py +++ b/tests/test_quantum_info_entropies.py @@ -57,9 +57,8 @@ def test_shannon_entropy(backend, base): @pytest.mark.parametrize("kind", [None, list]) -@pytest.mark.parametrize("validate", [False, True]) @pytest.mark.parametrize("base", [2, 10, np.e, 5]) -def test_classical_relative_entropy(backend, base, validate, kind): +def test_classical_relative_entropy(backend, base, kind): with pytest.raises(TypeError): prob = np.random.rand(1, 2) prob_q = np.random.rand(1, 5) @@ -77,19 +76,19 @@ def test_classical_relative_entropy(backend, base, validate, kind): prob_q = np.random.rand(1, 5)[0] prob = backend.cast(prob, dtype=prob.dtype) prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = classical_relative_entropy(prob, prob_q, validate=True, backend=backend) + test = classical_relative_entropy(prob, prob_q, backend=backend) with pytest.raises(ValueError): prob = np.random.rand(1, 2)[0] prob_q = np.array([1.0, 0.0]) prob = backend.cast(prob, dtype=prob.dtype) prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = classical_relative_entropy(prob, prob_q, validate=True, backend=backend) + test = classical_relative_entropy(prob, prob_q, backend=backend) with pytest.raises(ValueError): prob = np.array([1.0, 0.0]) prob_q = np.random.rand(1, 2)[0] prob = backend.cast(prob, dtype=prob.dtype) prob_q = backend.cast(prob_q, dtype=prob_q.dtype) - test = classical_relative_entropy(prob, prob_q, validate=True, backend=backend) + test = classical_relative_entropy(prob, prob_q, backend=backend) with pytest.raises(ValueError): prob = np.array([1.0, 0.0]) prob_q = np.array([0.0, 1.0]) @@ -109,9 +108,7 @@ def test_classical_relative_entropy(backend, base, validate, kind): if kind is not None: prob_p, prob_q = kind(prob_p), kind(prob_q) - divergence = classical_relative_entropy( - prob_p, prob_q, base=base, validate=validate, backend=backend - ) + divergence = classical_relative_entropy(prob_p, prob_q, base=base, backend=backend) backend.assert_allclose(divergence, target, atol=1e-5) From e3474901f669a8bd17a04af0a7fe16972f1ac8e4 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 30 Jan 2024 17:23:09 +0400 Subject: [PATCH 05/10] test errors --- src/qibo/quantum_info/entropies.py | 9 ++++-- tests/test_quantum_info_entropies.py | 43 ++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 3 deletions(-) diff --git a/src/qibo/quantum_info/entropies.py b/src/qibo/quantum_info/entropies.py index 6d3492d45d..ec869ba9c9 100644 --- a/src/qibo/quantum_info/entropies.py +++ b/src/qibo/quantum_info/entropies.py @@ -1,4 +1,5 @@ """Submodule with entropy measures.""" +from typing import Union import numpy as np @@ -151,7 +152,9 @@ def classical_relative_entropy(prob_dist_p, prob_dist_q, base: float = 2, backen return entropy_p - relative -def classical_renyi_entropy(prob_dist, alpha: float, base: float = 2, backend=None): +def classical_renyi_entropy( + prob_dist, alpha: Union[float, int], base: float = 2, backend=None +): """Calculates the classical Rényi entropy :math:`H_{\\alpha}` of a discrete probability distribution. For :math:`\\alpha \\in (0, \\, 1) \\cup (1, \\, \\infty)` and probability distribution @@ -171,7 +174,7 @@ def classical_renyi_entropy(prob_dist, alpha: float, base: float = 2, backend=No Args: prob_dist (ndarray): discrete probability distribution. - alpha (float): order of the Rényi entropy. + alpha (float or int): order of the Rényi entropy. If :math:`\\alpha = 1`, defaults to :func:`qibo.quantum_info.entropies.shannon_entropy`. If :math:`\\alpha = \\infty`, defaults to the `min-entropy `_. @@ -190,7 +193,7 @@ def classical_renyi_entropy(prob_dist, alpha: float, base: float = 2, backend=No # np.float64 is necessary instead of native float because of tensorflow prob_dist = backend.cast(prob_dist, dtype=np.float64) - if not isinstance(alpha, float): + if not isinstance(alpha, (float, int)): raise_error( TypeError, f"alpha must be type float, but it is type {type(alpha)}." ) diff --git a/tests/test_quantum_info_entropies.py b/tests/test_quantum_info_entropies.py index 392d4b108b..e7d19a4ef4 100644 --- a/tests/test_quantum_info_entropies.py +++ b/tests/test_quantum_info_entropies.py @@ -4,6 +4,7 @@ from qibo.config import PRECISION_TOL from qibo.quantum_info.entropies import ( classical_relative_entropy, + classical_renyi_entropy, entanglement_entropy, entropy, relative_entropy, @@ -113,6 +114,48 @@ def test_classical_relative_entropy(backend, base, kind): backend.assert_allclose(divergence, target, atol=1e-5) +@pytest.mark.parametrize("kind", [None, list]) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +@pytest.mark.parametrize("alpha", [1, 2, 3, np.inf]) +def test_classical_renyi_entropy(backend, alpha, base, kind): + with pytest.raises(TypeError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha="2", backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha=-2, backend=backend) + with pytest.raises(TypeError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, base="2", backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, base=-2, backend=backend) + with pytest.raises(TypeError): + prob = np.array([[1.0], [0.0]]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + with pytest.raises(TypeError): + prob = np.array([]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.0, -1.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + with pytest.raises(ValueError): + prob = np.array([1.1, 0.0]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + with pytest.raises(ValueError): + prob = np.array([0.5, 0.4999999]) + prob = backend.cast(prob, dtype=prob.dtype) + test = classical_renyi_entropy(prob, alpha, backend=backend) + + @pytest.mark.parametrize("check_hermitian", [False, True]) @pytest.mark.parametrize("base", [2, 10, np.e, 5]) def test_entropy(backend, base, check_hermitian): From f4a6b6aa0ee56e6aeaf25b37ab8bda65a5a8fa9c Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 30 Jan 2024 17:33:36 +0400 Subject: [PATCH 06/10] test function --- tests/test_quantum_info_entropies.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tests/test_quantum_info_entropies.py b/tests/test_quantum_info_entropies.py index e7d19a4ef4..4853c2eef1 100644 --- a/tests/test_quantum_info_entropies.py +++ b/tests/test_quantum_info_entropies.py @@ -155,6 +155,25 @@ def test_classical_renyi_entropy(backend, alpha, base, kind): prob = backend.cast(prob, dtype=prob.dtype) test = classical_renyi_entropy(prob, alpha, backend=backend) + prob_dist = np.random.rand(10) + prob_dist /= np.sum(prob_dist) + + if alpha == 1: + target = shannon_entropy(prob_dist, base=base, backend=backend) + elif alpha == 2: + target = -1 * np.log2(np.sum(prob_dist**2)) / np.log2(base) + elif alpha == np.inf: + target = -1 * np.log2(max(prob_dist)) / np.log2(base) + else: + target = (1 / (1 - alpha)) * np.log2(np.sum(prob_dist**alpha)) / np.log2(base) + + if kind is not None: + prob_dist = kind(prob_dist) + + renyi_ent = classical_renyi_entropy(prob_dist, alpha, base=base, backend=backend) + + backend.assert_allclose(renyi_ent, target, atol=1e-5) + @pytest.mark.parametrize("check_hermitian", [False, True]) @pytest.mark.parametrize("base", [2, 10, np.e, 5]) From aa23dbc38d6cd204f78574247f7b48fbfa131429 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 30 Jan 2024 17:48:29 +0400 Subject: [PATCH 07/10] add `alpha = 0` --- src/qibo/quantum_info/entropies.py | 19 +++++++++++-------- tests/test_quantum_info_entropies.py | 6 ++++-- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/qibo/quantum_info/entropies.py b/src/qibo/quantum_info/entropies.py index ec869ba9c9..b22246cf5f 100644 --- a/src/qibo/quantum_info/entropies.py +++ b/src/qibo/quantum_info/entropies.py @@ -164,20 +164,20 @@ def classical_renyi_entropy( H_{\\alpha}(\\mathbf{p}) = \\frac{1}{1 - \\alpha} \\, \\log\\left( \\sum_{x} \\, \\mathbf{p}^{\\alpha}(x) \\right) \\, . - For :math:`\\alpha \\in \\{0, 1, \\infty \\}`, it is further defined that - - .. math:: - H_{\\alpha}(\\mathbf{p}) = \\lim_{\\beta \\to \\alpha} \\, H_{\\beta}(\\mathbf{p}) \\, . - A special case is the limit :math:`\\alpha \\to 1`, in which the classical Rényi entropy coincides with the :func:`qibo.quantum_info.entropies.shannon_entropy`. + Another special case is the limit :math:`\\alpha \\to 0`, where the function is + reduced to the :math:`\\log\\left(|\\mathbf{p}|\\right)`, with :math:`|\\mathbf{p}|` + being the support of :math:`\\mathbf{p}`. + + In the limit :math:`\\alpha \\to \\infty`, the function reduces to + :math:`-\\log(\\max_{x}(\\mathbf{p}(x)))`, which is called the + `min-entropy `_. + Args: prob_dist (ndarray): discrete probability distribution. alpha (float or int): order of the Rényi entropy. - If :math:`\\alpha = 1`, defaults to :func:`qibo.quantum_info.entropies.shannon_entropy`. - If :math:`\\alpha = \\infty`, defaults to the - `min-entropy `_. base (float): the base of the log. Defaults to :math:`2`. backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be used in the execution. If ``None``, it uses @@ -222,6 +222,9 @@ def classical_renyi_entropy( if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: raise_error(ValueError, "Probability array must sum to 1.") + if alpha == 0.0: + return np.log2(len(prob_dist)) / np.log2(base) + if alpha == 1.0: return shannon_entropy(prob_dist, base=base, backend=backend) diff --git a/tests/test_quantum_info_entropies.py b/tests/test_quantum_info_entropies.py index 4853c2eef1..0153a3b778 100644 --- a/tests/test_quantum_info_entropies.py +++ b/tests/test_quantum_info_entropies.py @@ -116,7 +116,7 @@ def test_classical_relative_entropy(backend, base, kind): @pytest.mark.parametrize("kind", [None, list]) @pytest.mark.parametrize("base", [2, 10, np.e, 5]) -@pytest.mark.parametrize("alpha", [1, 2, 3, np.inf]) +@pytest.mark.parametrize("alpha", [0, 1, 2, 3, np.inf]) def test_classical_renyi_entropy(backend, alpha, base, kind): with pytest.raises(TypeError): prob = np.array([1.0, 0.0]) @@ -158,7 +158,9 @@ def test_classical_renyi_entropy(backend, alpha, base, kind): prob_dist = np.random.rand(10) prob_dist /= np.sum(prob_dist) - if alpha == 1: + if alpha == 0.0: + target = np.log2(len(prob_dist)) / np.log2(base) + elif alpha == 1: target = shannon_entropy(prob_dist, base=base, backend=backend) elif alpha == 2: target = -1 * np.log2(np.sum(prob_dist**2)) / np.log2(base) From 36f91984803601ff477f215b71b1a3c925547d5a Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Wed, 31 Jan 2024 09:07:58 +0400 Subject: [PATCH 08/10] docstring --- src/qibo/quantum_info/entropies.py | 86 ++++++++++++++++++++++++++++-- 1 file changed, 83 insertions(+), 3 deletions(-) diff --git a/src/qibo/quantum_info/entropies.py b/src/qibo/quantum_info/entropies.py index b22246cf5f..0da8fe2fe2 100644 --- a/src/qibo/quantum_info/entropies.py +++ b/src/qibo/quantum_info/entropies.py @@ -158,7 +158,7 @@ def classical_renyi_entropy( """Calculates the classical Rényi entropy :math:`H_{\\alpha}` of a discrete probability distribution. For :math:`\\alpha \\in (0, \\, 1) \\cup (1, \\, \\infty)` and probability distribution - :math:`\\mathbf{p}`, the Rényi entropy is defined as + :math:`\\mathbf{p}`, the classical Rényi entropy is defined as .. math:: H_{\\alpha}(\\mathbf{p}) = \\frac{1}{1 - \\alpha} \\, \\log\\left( \\sum_{x} @@ -168,8 +168,10 @@ def classical_renyi_entropy( coincides with the :func:`qibo.quantum_info.entropies.shannon_entropy`. Another special case is the limit :math:`\\alpha \\to 0`, where the function is - reduced to the :math:`\\log\\left(|\\mathbf{p}|\\right)`, with :math:`|\\mathbf{p}|` + reduced to :math:`\\log\\left(|\\mathbf{p}|\\right)`, with :math:`|\\mathbf{p}|` being the support of :math:`\\mathbf{p}`. + This is knows as the `Hartley entropy ` + (also known as *Hartley function* or *max-entropy*). In the limit :math:`\\alpha \\to \\infty`, the function reduces to :math:`-\\log(\\max_{x}(\\mathbf{p}(x)))`, which is called the @@ -184,7 +186,7 @@ def classical_renyi_entropy( :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. Returns: - float: Rényi entropy :math:`H_{\\alpha}`. + float: Classical Rényi entropy :math:`H_{\\alpha}`. """ if backend is None: # pragma: no cover backend = GlobalBackend() @@ -423,6 +425,84 @@ def relative_entropy( return float(entropy_state - relative) +def renyi_entropy(state, alpha: Union[float, int], base: float = 2, backend=None): + """Calculates the Rényi entropy :math:`H_{\\alpha}` of a quantum state :math:`\\rho`. + + For :math:`\\alpha \\in (0, \\, 1) \\cup (1, \\, \\infty)`, the Rényi entropy is defined as + + .. math:: + H_{\\alpha}(\\rho) = \\frac{1}{1 - \\alpha} \\, \\log\\left( \\rho^{\\alpha} \\right) \\, . + + A special case is the limit :math:`\\alpha \\to 1`, in which the Rényi entropy + coincides with the :func:`qibo.quantum_info.entropies.entropy`. + + Another special case is the limit :math:`\\alpha \\to 0`, where the function is + reduced to :math:`\\log\\left(d\\right)`, with :math:`d = 2^{n}` + being the dimension of the Hilbert space in which ``state`` :math:`\\rho` lives in. + This is knows as the `Hartley entropy _` + (also known as *Hartley function* or *max-entropy*). + + In the limit :math:`\\alpha \\to \\infty`, the function reduces to + :math:`-\\log(\\|\\rho\\|_{\\infty})`, with :math:`\\|\\cdot\\|_{\\infty}` + being the `spectral norm `_. + This is known as the `min-entropy `_. + + Args: + prob_dist (ndarray): discrete probability distribution. + alpha (float or int): order of the Rényi entropy. + base (float): the base of the log. Defaults to :math:`2`. + backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be + used in the execution. If ``None``, it uses + :class:`qibo.backends.GlobalBackend`. Defaults to ``None``. + + Returns: + float: Rényi entropy :math:`H_{\\alpha}`. + """ + if backend is None: # pragma: no cover + backend = GlobalBackend() + + if ( + (len(state.shape) >= 3) + or (len(state) == 0) + or (len(state.shape) == 2 and state.shape[0] != state.shape[1]) + ): + raise_error( + TypeError, + f"state must have dims either (k,) or (k,k), but have dims {state.shape}.", + ) + + if not isinstance(alpha, (float, int)): + raise_error( + TypeError, f"alpha must be type float, but it is type {type(alpha)}." + ) + + if alpha < 0.0: + raise_error(ValueError, "alpha must a non-negative float.") + + if base <= 0.0: + raise_error(ValueError, "log base must be non-negative.") + + if purity(state) == 1.0: + return 0.0 + + if alpha == 0.0: + return np.log2(len(state)) / np.log2(base) + + if alpha == 1.0: + return entropy(state, base=base, backend=backend) + + if alpha == np.inf: + return ( + -1 + * np.log2(backend.calculate_norm_density_matrix(state, order=2)) + / np.log2(base) + ) + + log = np.log2(np.trace(np.linalg.matrix_power(state, alpha))) + + return (1 / (1 - alpha)) * log / np.log2(base) + + def entanglement_entropy( state, bipartition, From 8e61e250047cf2709a530a5df722bafb64791e13 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Wed, 31 Jan 2024 09:28:43 +0400 Subject: [PATCH 09/10] fix bug --- src/qibo/quantum_info/entropies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qibo/quantum_info/entropies.py b/src/qibo/quantum_info/entropies.py index 0da8fe2fe2..8c674b37a8 100644 --- a/src/qibo/quantum_info/entropies.py +++ b/src/qibo/quantum_info/entropies.py @@ -482,7 +482,7 @@ def renyi_entropy(state, alpha: Union[float, int], base: float = 2, backend=None if base <= 0.0: raise_error(ValueError, "log base must be non-negative.") - if purity(state) == 1.0: + if abs(purity(state) - 1.0) < PRECISION_TOL: return 0.0 if alpha == 0.0: From 6b7b1af87aec223e97fa68c0981722035641b6a4 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Wed, 31 Jan 2024 09:28:48 +0400 Subject: [PATCH 10/10] tests --- tests/test_quantum_info_entropies.py | 42 ++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/tests/test_quantum_info_entropies.py b/tests/test_quantum_info_entropies.py index 0153a3b778..702ce8db2e 100644 --- a/tests/test_quantum_info_entropies.py +++ b/tests/test_quantum_info_entropies.py @@ -8,6 +8,7 @@ entanglement_entropy, entropy, relative_entropy, + renyi_entropy, shannon_entropy, ) from qibo.quantum_info.random_ensembles import ( @@ -301,6 +302,47 @@ def test_relative_entropy(backend, base, check_hermitian): ) +@pytest.mark.parametrize("base", [2, 10, np.e, 5]) +@pytest.mark.parametrize("alpha", [0, 1, 2, 3, np.inf]) +def test_renyi_entropy(backend, alpha, base): + with pytest.raises(TypeError): + state = np.random.rand(2, 3) + state = backend.cast(state, dtype=state.dtype) + test = renyi_entropy(state, alpha=alpha, base=base, backend=backend) + with pytest.raises(TypeError): + state = random_statevector(4, backend=backend) + test = renyi_entropy(state, alpha="2", base=base, backend=backend) + with pytest.raises(ValueError): + state = random_statevector(4, backend=backend) + test = renyi_entropy(state, alpha=-1, base=base, backend=backend) + with pytest.raises(ValueError): + state = random_statevector(4, backend=backend) + test = renyi_entropy(state, alpha=alpha, base=0, backend=backend) + + state = random_density_matrix(4, backend=backend) + + if alpha == 0.0: + target = np.log2(len(state)) / np.log2(base) + elif alpha == 1.0: + target = entropy(state, base=base, backend=backend) + elif alpha == np.inf: + target = backend.calculate_norm_density_matrix(state, order=2) + target = -1 * np.log2(target) / np.log2(base) + else: + target = np.log2(np.trace(np.linalg.matrix_power(state, alpha))) + target = (1 / (1 - alpha)) * target / np.log2(base) + + backend.assert_allclose( + renyi_entropy(state, alpha=alpha, base=base, backend=backend), target, atol=1e-5 + ) + + # test pure state + state = random_density_matrix(4, pure=True, backend=backend) + backend.assert_allclose( + renyi_entropy(state, alpha=alpha, base=base, backend=backend), 0.0, atol=1e-8 + ) + + @pytest.mark.parametrize("check_hermitian", [False, True]) @pytest.mark.parametrize("base", [2, 10, np.e, 5]) @pytest.mark.parametrize("bipartition", [[0], [1]])