Skip to content

Commit

Permalink
Merge pull request #1176 from qiboteam/relative_entropy
Browse files Browse the repository at this point in the history
Add `classical_relative_entropy` to `quantum_info.entropies`
  • Loading branch information
renatomello authored Jan 30, 2024
2 parents cbeb4e6 + 75ec076 commit e9ccdc2
Show file tree
Hide file tree
Showing 2 changed files with 154 additions and 5 deletions.
92 changes: 88 additions & 4 deletions src/qibo/quantum_info/entropies.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ def shannon_entropy(probability_array, base: float = 2, backend=None):
backend = GlobalBackend()

if isinstance(probability_array, list):
probability_array = backend.cast(probability_array, dtype=float)
# np.float64 is necessary instead of native float because of tensorflow
probability_array = backend.cast(probability_array, dtype=np.float64)

if base <= 0:
raise_error(ValueError, "log base must be non-negative.")
Expand Down Expand Up @@ -65,12 +66,12 @@ def shannon_entropy(probability_array, base: float = 2, backend=None):
probability_array != 0, np.log(probability_array) / np.log(base), 0.0
)

entropy = -np.sum(probability_array * log_prob)
shan_entropy = -np.sum(probability_array * log_prob)

# absolute value if entropy == 0.0 to avoid returning -0.0
entropy = np.abs(entropy) if entropy == 0.0 else entropy
shan_entropy = np.abs(shan_entropy) if shan_entropy == 0.0 else shan_entropy

return complex(entropy).real
return complex(shan_entropy).real


def entropy(
Expand Down Expand Up @@ -222,3 +223,86 @@ def entanglement_entropy(
)

return entropy_entanglement


def classical_relative_entropy(
prob_dist_p, prob_dist_q, base: float = 2, validate: bool = False, backend=None
):
"""Calculates the relative entropy between two discrete probability distributions.
For probabilities :math:`\\mathbf{p}` and :math:`\\mathbf{q}`, it is defined as
..math::
D(\\mathbf{p} \\, \\| \\, \\mathbf{q}) = \\sum_{x} \\, \\mathbf{p}(x) \\,
\\log\\left( \\frac{\\mathbf{p}(x)}{\\mathbf{q}(x)} \\right) \\, .
The classical relative entropy is also known as the
`Kullback-Leibler (KL) divergence <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_.
Args:
prob_dist_p (ndarray or list): discrete probability distribution :math:`p`.
prob_dist_q (ndarray or list): discrete probability distribution :math:`q`.
base (float): the base of the log. Defaults to :math:`2`.
validate (bool, optional): If ``True``, checks if :math:`p` and :math:`q` are proper
probability distributions. Defaults to ``False``.
backend (:class:`qibo.backends.abstract.Backend`, optional): backend to be
used in the execution. If ``None``, it uses
:class:`qibo.backends.GlobalBackend`. Defaults to ``None``.
Returns:
float: Classical relative entropy between :math:`\\mathbf{p}` and :math:`\\mathbf{q}`.
"""
if backend is None: # pragma: no cover
backend = GlobalBackend()

if isinstance(prob_dist_p, list):
# np.float64 is necessary instead of native float because of tensorflow
prob_dist_p = backend.cast(prob_dist_p, dtype=np.float64)
if isinstance(prob_dist_q, list):
# np.float64 is necessary instead of native float because of tensorflow
prob_dist_q = backend.cast(prob_dist_q, dtype=np.float64)

if (len(prob_dist_p.shape) != 1) or (len(prob_dist_q.shape) != 1):
raise_error(
TypeError,
"Probability arrays must have dims (k,) but have "
+ f"dims {prob_dist_p.shape} and {prob_dist_q.shape}.",
)

if (len(prob_dist_p) == 0) or (len(prob_dist_q) == 0):
raise_error(TypeError, "At least one of the arrays is empty.")

if base <= 0:
raise_error(ValueError, "log base must be non-negative.")

if validate:
if (any(prob_dist_p < 0) or any(prob_dist_p > 1.0)) or (
any(prob_dist_q < 0) or any(prob_dist_q > 1.0)
):
raise_error(
ValueError,
"All elements of the probability array must be between 0. and 1..",
)
if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL:
raise_error(ValueError, "First probability array must sum to 1.")

if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL:
raise_error(ValueError, "Second probability array must sum to 1.")

entropy_p = -1 * shannon_entropy(prob_dist_p, base=base, backend=backend)

if base == 2:
log_prob_q = np.where(prob_dist_q != 0.0, np.log2(prob_dist_q), -np.inf)
elif base == 10:
log_prob_q = np.where(prob_dist_q != 0.0, np.log10(prob_dist_q), -np.inf)
elif base == np.e:
log_prob_q = np.where(prob_dist_q != 0.0, np.log(prob_dist_q), -np.inf)
else:
log_prob_q = np.where(
prob_dist_q != 0.0, np.log(prob_dist_q) / np.log(base), -np.inf
)

log_prob = np.where(prob_dist_p != 0.0, log_prob_q, 0.0)

relative = np.sum(prob_dist_p * log_prob)

return entropy_p - relative
67 changes: 66 additions & 1 deletion tests/test_quantum_info_entropies.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,12 @@
import pytest

from qibo.config import PRECISION_TOL
from qibo.quantum_info.entropies import entanglement_entropy, entropy, shannon_entropy
from qibo.quantum_info.entropies import (
classical_relative_entropy,
entanglement_entropy,
entropy,
shannon_entropy,
)
from qibo.quantum_info.random_ensembles import random_statevector, random_unitary


Expand Down Expand Up @@ -168,3 +173,63 @@ def test_entanglement_entropy(backend, bipartition, base, check_hermitian):
)

backend.assert_allclose(entang_entrop, 0.0, atol=PRECISION_TOL)


@pytest.mark.parametrize("kind", [None, list])
@pytest.mark.parametrize("validate", [False, True])
@pytest.mark.parametrize("base", [2, 10, np.e, 5])
def test_classical_relative_entropy(backend, base, validate, kind):
with pytest.raises(TypeError):
prob = np.random.rand(1, 2)
prob_q = np.random.rand(1, 5)
prob = backend.cast(prob, dtype=prob.dtype)
prob_q = backend.cast(prob_q, dtype=prob_q.dtype)
test = classical_relative_entropy(prob, prob_q, backend=backend)
with pytest.raises(TypeError):
prob = np.random.rand(1, 2)[0]
prob_q = np.array([])
prob = backend.cast(prob, dtype=prob.dtype)
prob_q = backend.cast(prob_q, dtype=prob_q.dtype)
test = classical_relative_entropy(prob, prob_q, backend=backend)
with pytest.raises(ValueError):
prob = np.array([-1, 2.0])
prob_q = np.random.rand(1, 5)[0]
prob = backend.cast(prob, dtype=prob.dtype)
prob_q = backend.cast(prob_q, dtype=prob_q.dtype)
test = classical_relative_entropy(prob, prob_q, validate=True, backend=backend)
with pytest.raises(ValueError):
prob = np.random.rand(1, 2)[0]
prob_q = np.array([1.0, 0.0])
prob = backend.cast(prob, dtype=prob.dtype)
prob_q = backend.cast(prob_q, dtype=prob_q.dtype)
test = classical_relative_entropy(prob, prob_q, validate=True, backend=backend)
with pytest.raises(ValueError):
prob = np.array([1.0, 0.0])
prob_q = np.random.rand(1, 2)[0]
prob = backend.cast(prob, dtype=prob.dtype)
prob_q = backend.cast(prob_q, dtype=prob_q.dtype)
test = classical_relative_entropy(prob, prob_q, validate=True, backend=backend)
with pytest.raises(ValueError):
prob = np.array([1.0, 0.0])
prob_q = np.array([0.0, 1.0])
prob = backend.cast(prob, dtype=prob.dtype)
prob_q = backend.cast(prob_q, dtype=prob_q.dtype)
test = classical_relative_entropy(prob, prob_q, base=-2, backend=backend)

prob_p = np.random.rand(10)
prob_q = np.random.rand(10)
prob_p /= np.sum(prob_p)
prob_q /= np.sum(prob_q)

target = np.sum(prob_p * np.log(prob_p) / np.log(base)) - np.sum(
prob_p * np.log(prob_q) / np.log(base)
)

if kind is not None:
prob_p, prob_q = kind(prob_p), kind(prob_q)

divergence = classical_relative_entropy(
prob_p, prob_q, base=base, validate=validate, backend=backend
)

backend.assert_allclose(divergence, target, atol=1e-5)

0 comments on commit e9ccdc2

Please sign in to comment.