Skip to content

Commit

Permalink
Merge pull request #1469 from qiboteam/tsallis
Browse files Browse the repository at this point in the history
Add `classical_tsallis_relative_entropy` to `quantum_info.entropies`
  • Loading branch information
renatomello authored Oct 4, 2024
2 parents 677584c + 823f13e commit f8768fe
Show file tree
Hide file tree
Showing 3 changed files with 93 additions and 1 deletion.
5 changes: 5 additions & 0 deletions doc/source/api-reference/qibo.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1763,6 +1763,11 @@ Classical Tsallis entropy

.. autofunction:: qibo.quantum_info.classical_tsallis_entropy

Classical Tsallis relative entropy
""""""""""""""""""""""""""""""""""

.. autofunction:: qibo.quantum_info.classical_relative_tsallis_entropy


von Neumann entropy
"""""""""""""""""""
Expand Down
58 changes: 57 additions & 1 deletion src/qibo/quantum_info/entropies.py
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,57 @@ def classical_tsallis_entropy(prob_dist, alpha: float, base: float = 2, backend=
total_sum = prob_dist**alpha
total_sum = backend.np.sum(total_sum)

return (1 / (1 - alpha)) * (total_sum - 1)
return (1 / (alpha - 1)) * (1 - total_sum)


def classical_relative_tsallis_entropy(
prob_dist_p, prob_dist_q, alpha: float, base: float = 2, backend=None
):
"""Calculate the classical relative Tsallis entropy between two discrete probability distributions.
Given a discrete random variable :math:`\\chi` that has values :math:`x` in the set
:math:`\\mathcal{X}` with probability :math:`\\mathrm{p}(x)` and a discrete random variable
:math:`\\upsilon` that has the values :math:`x` in the same set :math:`\\mathcal{X}` with
probability :math:`\\mathrm{q}(x)`, their relative Tsallis entropy is given by
.. math::
D_{\\alpha}^{\\text{ts}}(\\chi \\, \\| \\, \\upsilon) = \\sum_{x \\in \\mathcal{X}} \\,
\\mathrm{p}^{\\alpha}(x) \\, \\ln_{\\alpha}
\\left( \\frac{\\mathrm{p}(x)}{\\mathrm{q}(x)} \\right) \\, ,
where :math:`\\ln_{\\alpha}(x) \\equiv \\frac{x^{1 - \\alpha} - 1}{1 - \\alpha}`
is the so-called :math:`\\alpha`-logarithm. When :math:`\\alpha = 1`, it reduces to
:class:`qibo.quantum_info.entropies.classical_relative_entropy`.
Args:
prob_dist_p (ndarray or list): discrete probability distribution :math:`p`.
prob_dist_q (ndarray or list): discrete probability distribution :math:`q`.
alpha (float): entropic index.
base (float): the base of the log used when :math:`\\alpha = 1`. Defaults to :math:`2`.
backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be
used in the execution. If ``None``, it uses
:class:`qibo.backends.GlobalBackend`. Defaults to ``None``.
Returns:
float: Tsallis relative entropy :math:`D_{\\alpha}^{\\text{ts}}`.
"""
if alpha == 1.0:
return classical_relative_entropy(prob_dist_p, prob_dist_q, base, backend)

backend = _check_backend(backend)

if isinstance(prob_dist_p, list):
# np.float64 is necessary instead of native float because of tensorflow
prob_dist_p = backend.cast(prob_dist_p, dtype=np.float64)

if isinstance(prob_dist_q, list):
# np.float64 is necessary instead of native float because of tensorflow
prob_dist_q = backend.cast(prob_dist_q, dtype=np.float64)

element_wise = prob_dist_p**alpha
element_wise = element_wise * _q_logarithm(prob_dist_p / prob_dist_q, alpha)

return backend.np.sum(element_wise)


def von_neumann_entropy(
Expand Down Expand Up @@ -951,3 +1001,9 @@ def entanglement_entropy(
)

return entropy_entanglement


def _q_logarithm(x, q: float):
"""Generalization of logarithm function necessary for classical (relative) Tsallis entropy."""
factor = 1 - q
return (x**factor - 1) / factor
31 changes: 31 additions & 0 deletions tests/test_quantum_info_entropies.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
classical_mutual_information,
classical_relative_entropy,
classical_relative_renyi_entropy,
classical_relative_tsallis_entropy,
classical_renyi_entropy,
classical_tsallis_entropy,
entanglement_entropy,
Expand Down Expand Up @@ -382,6 +383,36 @@ def test_classical_tsallis_entropy(backend, alpha, base, kind):
)


@pytest.mark.parametrize("kind", [None, list])
@pytest.mark.parametrize("base", [2, 10, np.e, 5])
@pytest.mark.parametrize("alpha", [0, 1, 2, 3])
def test_classical_relative_tsallis_entropy(backend, alpha, base, kind):
prob_dist_p = np.random.rand(10)
prob_dist_p /= np.sum(prob_dist_p)

prob_dist_q = np.random.rand(10)
prob_dist_q /= np.sum(prob_dist_q)

prob_dist_p = backend.cast(prob_dist_p, dtype=np.float64)
prob_dist_q = backend.cast(prob_dist_q, dtype=np.float64)

if alpha == 1.0:
target = classical_relative_entropy(prob_dist_p, prob_dist_q, base, backend)
else:
target = ((prob_dist_p / prob_dist_q) ** (1 - alpha) - 1) / (1 - alpha)
target = backend.np.sum(prob_dist_p**alpha * target)

if kind is not None:
prob_dist_p = kind(prob_dist_p)
prob_dist_q = kind(prob_dist_q)

value = classical_relative_tsallis_entropy(
prob_dist_p, prob_dist_q, alpha, base, backend
)

backend.assert_allclose(value, target)


@pytest.mark.parametrize("check_hermitian", [False, True])
@pytest.mark.parametrize("base", [2, 10, np.e, 5])
def test_von_neumann_entropy(backend, base, check_hermitian):
Expand Down

0 comments on commit f8768fe

Please sign in to comment.