diff --git a/doc/source/api-reference/qibo.rst b/doc/source/api-reference/qibo.rst index c6b44d2645..0f55b2004d 100644 --- a/doc/source/api-reference/qibo.rst +++ b/doc/source/api-reference/qibo.rst @@ -930,6 +930,13 @@ Toffoli :members: :member-order: bysource +CCZ +""" + +.. autoclass:: qibo.gates.CCZ + :members: + :member-order: bysource + Deutsch """"""" diff --git a/doc/source/code-examples/applications-by-algorithm.rst b/doc/source/code-examples/applications-by-algorithm.rst index 3819ba99b3..4dfb64a6a2 100644 --- a/doc/source/code-examples/applications-by-algorithm.rst +++ b/doc/source/code-examples/applications-by-algorithm.rst @@ -76,4 +76,13 @@ Diagonalization Algorithms .. toctree:: :maxdepth: 1 - tutorials/dbi/dbi.ipynb + tutorials/dbi/README.md + + tutorials/dbi/dbi_cost_functions.ipynb + tutorials/dbi/dbi_gradient_descent_strategies.ipynb + tutorials/dbi/dbi_group_commutator_tests.ipynb + tutorials/dbi/dbi_scheduling.ipynb + tutorials/dbi/dbi_strategies_compare.ipynb + tutorials/dbi/dbi_strategy_Ising_model.ipynb + tutorials/dbi/dbi_strategy_Pauli-Z.ipynb + tutorials/dbi/dbi_tutorial_basic_intro.ipynb diff --git a/doc/source/code-examples/applications-by-topic.rst b/doc/source/code-examples/applications-by-topic.rst index 8d44557d48..500300e9b4 100644 --- a/doc/source/code-examples/applications-by-topic.rst +++ b/doc/source/code-examples/applications-by-topic.rst @@ -61,7 +61,6 @@ Quantum Physics tutorials/bell-variational/README.md tutorials/falqon/README.md tutorials/grover/README.md - tutorials/dbi/dbi.ipynb Quantum Machine Learning ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/source/code-examples/applications.rst b/doc/source/code-examples/applications.rst index 8787659178..9fa6976b54 100644 --- a/doc/source/code-examples/applications.rst +++ b/doc/source/code-examples/applications.rst @@ -69,8 +69,6 @@ Quantum Physics tutorials/bell-variational/README.md tutorials/falqon/README.md tutorials/grover/README.md - tutorials/dbi/dbi.ipynb - Quantum Machine Learning ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/source/code-examples/tutorials/dbi/README.md b/doc/source/code-examples/tutorials/dbi/README.md new file mode 120000 index 0000000000..50b9e9eaec --- /dev/null +++ b/doc/source/code-examples/tutorials/dbi/README.md @@ -0,0 +1 @@ +../../../../../examples/dbi/README.md \ No newline at end of file diff --git a/doc/source/code-examples/tutorials/dbi/dbi.ipynb b/doc/source/code-examples/tutorials/dbi/dbi.ipynb deleted file mode 120000 index 7deb426d68..0000000000 --- a/doc/source/code-examples/tutorials/dbi/dbi.ipynb +++ /dev/null @@ -1 +0,0 @@ -../../../../../examples/dbi/dbi.ipynb \ No newline at end of file diff --git a/doc/source/code-examples/tutorials/dbi/dbi_cost_functions.ipynb b/doc/source/code-examples/tutorials/dbi/dbi_cost_functions.ipynb new file mode 120000 index 0000000000..1c36a4d760 --- /dev/null +++ b/doc/source/code-examples/tutorials/dbi/dbi_cost_functions.ipynb @@ -0,0 +1 @@ +../../../../../examples/dbi/dbi_cost_functions.ipynb \ No newline at end of file diff --git a/doc/source/code-examples/tutorials/dbi/dbi_gradient_descent_strategies.ipynb b/doc/source/code-examples/tutorials/dbi/dbi_gradient_descent_strategies.ipynb new file mode 120000 index 0000000000..e3fbf989a9 --- /dev/null +++ b/doc/source/code-examples/tutorials/dbi/dbi_gradient_descent_strategies.ipynb @@ -0,0 +1 @@ +../../../../../examples/dbi/dbi_gradient_descent_strategies.ipynb \ No newline at end of file diff --git a/doc/source/code-examples/tutorials/dbi/dbi_group_commutator_tests.ipynb b/doc/source/code-examples/tutorials/dbi/dbi_group_commutator_tests.ipynb new file mode 120000 index 0000000000..174deb7e46 --- /dev/null +++ b/doc/source/code-examples/tutorials/dbi/dbi_group_commutator_tests.ipynb @@ -0,0 +1 @@ +../../../../../examples/dbi/dbi_group_commutator_tests.ipynb \ No newline at end of file diff --git a/doc/source/code-examples/tutorials/dbi/dbi_scheduling.ipynb b/doc/source/code-examples/tutorials/dbi/dbi_scheduling.ipynb new file mode 120000 index 0000000000..79275e8268 --- /dev/null +++ b/doc/source/code-examples/tutorials/dbi/dbi_scheduling.ipynb @@ -0,0 +1 @@ +../../../../../examples/dbi/dbi_scheduling.ipynb \ No newline at end of file diff --git a/doc/source/code-examples/tutorials/dbi/dbi_strategies_compare.ipynb b/doc/source/code-examples/tutorials/dbi/dbi_strategies_compare.ipynb new file mode 120000 index 0000000000..78936e1a6b --- /dev/null +++ b/doc/source/code-examples/tutorials/dbi/dbi_strategies_compare.ipynb @@ -0,0 +1 @@ +../../../../../examples/dbi/dbi_strategies_compare.ipynb \ No newline at end of file diff --git a/doc/source/code-examples/tutorials/dbi/dbi_strategy_Ising_model.ipynb b/doc/source/code-examples/tutorials/dbi/dbi_strategy_Ising_model.ipynb new file mode 120000 index 0000000000..f5ee663fd1 --- /dev/null +++ b/doc/source/code-examples/tutorials/dbi/dbi_strategy_Ising_model.ipynb @@ -0,0 +1 @@ +../../../../../examples/dbi/dbi_strategy_Ising_model.ipynb \ No newline at end of file diff --git a/doc/source/code-examples/tutorials/dbi/dbi_strategy_Pauli-Z.ipynb b/doc/source/code-examples/tutorials/dbi/dbi_strategy_Pauli-Z.ipynb new file mode 120000 index 0000000000..23a3561519 --- /dev/null +++ b/doc/source/code-examples/tutorials/dbi/dbi_strategy_Pauli-Z.ipynb @@ -0,0 +1 @@ +../../../../../examples/dbi/dbi_strategy_Pauli-Z.ipynb \ No newline at end of file diff --git a/doc/source/code-examples/tutorials/dbi/dbi_tutorial_basic_intro.ipynb b/doc/source/code-examples/tutorials/dbi/dbi_tutorial_basic_intro.ipynb new file mode 120000 index 0000000000..79ea4d0ea8 --- /dev/null +++ b/doc/source/code-examples/tutorials/dbi/dbi_tutorial_basic_intro.ipynb @@ -0,0 +1 @@ +../../../../../examples/dbi/dbi_tutorial_basic_intro.ipynb \ No newline at end of file diff --git a/examples/dbi/README.md b/examples/dbi/README.md new file mode 100644 index 0000000000..76640ff99f --- /dev/null +++ b/examples/dbi/README.md @@ -0,0 +1,58 @@ +# Double-bracket quantum algorithms + +Qibo features a model implementing double-bracke quantum algorithms (DBQAs) which are helpful for approximating eigenstates based on the ability to run the evolution under the input Hamiltonian. + +More specifically, given a Hamiltonian $H_0$, how can we find a circuit which after applying to the reference state (usually $|0\rangle^{\otimes L}$ for $L$ qubits) will approximate an eigenstate? + +A standard way is to run variational quantum circuits. For example, Qibo already features the `VQE` model [2] which provides the implementation of the variational quantum eigensolver framework. +DBQAs allow to go beyond VQE in that they take a different approach to compiling the quantum circuit approximating the eigenstate. + +## What is the unitary of DBQA? + +Given $H_0$ we begin by assuming that we were given a diagonal and hermitian operator $D_0$ and a time $s_0$. +The `dbi` module provides numerical strategies for selecting them. +For any such choice we define the bracket +$$ W_0 = [D_0, H_0]$$ +and the double-bracket rotation (DBR) of the input Hamiltonian to time $s$ +$$H_0(s) = e^{sW} H e^{- s W}$$ + +### Why are double-bracket rotations useful? +We can show that the magnitude of the off-diagonal norms will decrease. +For this let us set the notation that $\sigma(A)$ is the restriction to the off-diagonal of the matrix A. +In `numpy` this can be implemented by `\sigma(A) = A-np.diag(A)`. In Qibo we implement this as +https://github.com/qiboteam/qibo/blob/8c9c610f5f2190b243dc9120a518a7612709bdbc/src/qibo/models/dbi/double_bracket.py#L145-L147 +which is part of the basic `DoubleBracketIteration` class in the `dbi` module. + +With this notation we next use the Hilbert-Schmidt scalar product and norm to measure the progress of diagonalization + $$||\sigma(H_0(s))||^2- ||\sigma (H_0 )||^2= -2s \langle W, [H,\sigma(H)\rangle+O(s^2)$$ +This equation tells us that as long as the scalar product $\langle W, [H,\sigma(H)\rangle$ is positive then after the DBR the magnitude of the off-diagonal couplings in $H_0(s)$ is less than in $H_0$. + +For the implementation of the DBR unitary $U_0(s) = e^{-s W_0}$ see +https://github.com/qiboteam/qibo/blob/363a6e5e689e5b907a7602bd1cc8d9811c60ee69/src/qibo/models/dbi/double_bracket.py#L68 + +### How to choose $D$? + +For theoretical considerations the canonical bracket is useful. +For this we need the notation of the dephasing channel $\Delta(H)$ which is equivalent to `np.diag(h)`. + $M = [\Delta(H),\sigma(H)]= [H,\sigma(H)]= [\Delta(H),H]$ + The canonical bracket appears on its own in the monotonicity relation above and gives an unconditional reduction of the magnitude of the off-diagonal terms + $$||\sigma(H_0(s))||^2- ||\sigma (H_0 )||^2= -2s ||M||^2+O(s^2)$$ +- the multi qubit Pauli Z generator with $Z(\mu) = (Z_1)^{\mu_1}\ldots (Z_L)^{\mu_L}$ where we optimize over all binary strings $\mu\in \{0,1\}^L$ +- the magnetic field $D = \sum_i B_i Z_i$ +- the two qubit Ising model $D = \sum_i B_i Z_i + \sum_{i,j} J_{i,j} Z_i Z_j$, please follow the tutorial by Matteo and use the QIBO ising model for that with $h=0$ + + +### How to choose s? + +The theory above shows that in generic cases the DBR will have a linear diagonalization effect (as quantified by $||\sigma(H_0(s))||$). +This can be further expanded with Taylor expansion and the Qibo implementation comes with methods for fitting the first local minimum. +Additionally a grid search for the optimal step is provided for an exhaustive evaluation and hyperopt can be used for a more efficient 'unstructured' optimization; additionally simulated annealing is provided which sometimes outperforms hyperopt (and grid search), see example notebooks. +The latter methods may output DBR durations $s_k$ which correspond to secondary local minima. + + + + + +[1] https://arxiv.org/abs/2206.11772 + +[2] https://github.com/qiboteam/vqe-sun diff --git a/examples/dbi/dbi_cost_functions.ipynb b/examples/dbi/dbi_cost_functions.ipynb new file mode 100644 index 0000000000..015a441a94 --- /dev/null +++ b/examples/dbi/dbi_cost_functions.ipynb @@ -0,0 +1,349 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Double-bracket Iteration other cost functions\n", + "\n", + "This notebook presents two additional cost functions for the double-bracket flow: least-squares and energy fluctuation with their respectice scheduling methods." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from copy import deepcopy\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "from qibo import hamiltonians, set_backend\n", + "from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketScheduling, DoubleBracketIteration, DoubleBracketCostFunction" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Least-squares\n", + "\n", + "The cost function is defined as: $\\frac{1}{2}||D-H_k||^2 =\\frac{1}{2}(||D||^2+||H||^2) -Tr(D H_k)$ as in (the negative of https://epubs.siam.org/doi/abs/10.1137/S0036141092229732?journalCode=sjmael) We seek to minimize this function at each DBF iteration. For numerical optimizations, we also ignore the norm of H term as for a given hamiltonian it is fixed through out the flow.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Hamiltonian\n", + "set_backend(\"numpy\")\n", + "\n", + "# hamiltonian parameters\n", + "nqubits = 5\n", + "h = 3.0\n", + "\n", + "# define the hamiltonian\n", + "H_TFIM = hamiltonians.TFIM(nqubits=nqubits, h=h)\n", + "\n", + "# define the least-squares cost function\n", + "cost = DoubleBracketCostFunction.least_squares\n", + "# initialize class\n", + "dbi = DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.single_commutator,cost=cost)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# generate data for plotting sigma decrease of the first step\n", + "d = np.diag(np.linspace(1,2**nqubits,2**nqubits))/2**nqubits\n", + "s_space = np.linspace(1e-5, 1.0, 500)\n", + "off_diagonal_norm_diff = []\n", + "potential = []\n", + "for s in s_space:\n", + " dbi_eval = deepcopy(dbi)\n", + " dbi_eval(s,d=d)\n", + " off_diagonal_norm_diff.append(dbi_eval.off_diagonal_norm - dbi.off_diagonal_norm)\n", + " potential.append(dbi_eval.least_squares(d=d))\n", + "\n", + "# grid_search\n", + "step_grid = dbi.choose_step(scheduling=DoubleBracketScheduling.grid_search,d=d)\n", + "print('grid_search step:', step_grid)\n", + "# hyperopt\n", + "step_hyperopt = dbi.choose_step(scheduling=DoubleBracketScheduling.hyperopt,d=d, max_evals=100, step_max=0.6)\n", + "print('hyperopt_search step:', step_hyperopt)\n", + "# polynomial\n", + "step_poly = dbi.choose_step(scheduling=DoubleBracketScheduling.polynomial_approximation,d=d, n=3)\n", + "print('polynomial_approximation step:', step_poly)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Plot the results\n", + "plt.figure()\n", + "plt.plot(s_space, potential)\n", + "plt.xlabel('s')\n", + "plt.axvline(x=step_grid, color='r', linestyle='-',label='grid_search')\n", + "plt.axvline(x=step_hyperopt, color='g', linestyle='--',label='hyperopt')\n", + "plt.axvline(x=step_poly, color='m', linestyle='-.',label='polynomial')\n", + "plt.title('First DBI step')\n", + "plt.ylabel('Least squares cost function')\n", + "plt.legend()\n", + "plt.figure()\n", + "plt.plot(s_space, off_diagonal_norm_diff)\n", + "plt.axvline(x=step_grid, color='r', linestyle='-',label='grid_search')\n", + "plt.axvline(x=step_hyperopt, color='g', linestyle='--',label='hyperopt')\n", + "plt.axvline(x=step_poly, color='m', linestyle='-.',label='polynomial')\n", + "plt.ylabel(r'$||\\sigma(H_0)||-\\sigma(H_k)||$')\n", + "plt.xlabel('s')\n", + "plt.title('First DBI step')\n", + "plt.legend()\n", + "print('The minimum for cost function in the tested range is:', step_grid)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Comparison of the least-squares cost function with the original cost function using the polynomial scheduling method" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "d = np.diag(np.linspace(1,2**nqubits,2**nqubits))\n", + "off_diagonal_norm_diff = [dbi.off_diagonal_norm]\n", + "off_diagonal_norm_diff_least_squares = [dbi.off_diagonal_norm]\n", + "iters = 100\n", + "dbi_ls = deepcopy(dbi)\n", + "cost = DoubleBracketCostFunction.off_diagonal_norm\n", + "dbi_od = DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.single_commutator,cost=cost)\n", + "for _ in range(iters):\n", + " step_poly = dbi_od.choose_step(scheduling=DoubleBracketScheduling.polynomial_approximation, d=d, n=3)\n", + " dbi_od(step_poly,d=d)\n", + " step_poly = dbi_ls.choose_step(scheduling=DoubleBracketScheduling.polynomial_approximation, d=d, n=3)\n", + " dbi_ls(step_poly,d=d)\n", + " off_diagonal_norm_diff.append(dbi_od.off_diagonal_norm)\n", + " off_diagonal_norm_diff_least_squares.append(dbi_ls.off_diagonal_norm)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure()\n", + "plt.plot(range(iters+1), off_diagonal_norm_diff, label=r'Off-diagonal norm')\n", + "plt.plot(range(iters+1), off_diagonal_norm_diff_least_squares, label=r'Least squares')\n", + "plt.xlabel('Iterations')\n", + "plt.ylabel(r'$||\\sigma(H_k)||$')\n", + "plt.legend()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Energy fluctuation\n", + "\n", + "This cost function is defined as: $\\Xi_k^2 (\\mu) = \\langle \\mu | H_k^2| \\mu \\rangle - \\langle \\mu | H_k| \\mu \\rangle^2$. We must specify the state $| \\mu \\rangle$ for which we want to minimize the fluctuation. The overall diagonalization isn't guaranteed.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Hamiltonian\n", + "set_backend(\"numpy\")\n", + "\n", + "# hamiltonian parameters\n", + "nqubits = 3\n", + "h = 3.0\n", + "\n", + "# define the hamiltonian\n", + "H_TFIM = hamiltonians.TFIM(nqubits=nqubits, h=h)\n", + "\n", + "# define the energy fluctuation cost function\n", + "cost = DoubleBracketCostFunction.off_diagonal_norm\n", + "# define the state\n", + "state = np.zeros(2**nqubits)\n", + "state[3] = 1\n", + "# initialize class\n", + "dbi = DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.single_commutator,cost=cost, ref_state=state)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# generate data for plotting sigma decrease of the first step\n", + "d = np.diag(np.linspace(2**nqubits,1,2**nqubits))/2**nqubits\n", + "s_space = np.linspace(-1, 1, 1000)\n", + "off_diagonal_norm_diff = []\n", + "fluctuation = []\n", + "for s in s_space:\n", + " dbi_eval = deepcopy(dbi)\n", + " dbi_eval(s,d=d)\n", + " off_diagonal_norm_diff.append(dbi_eval.off_diagonal_norm - dbi.off_diagonal_norm)\n", + " fluctuation.append(dbi_eval.energy_fluctuation(state=state))\n", + "\n", + "# grid_search\n", + "step_grid = dbi.choose_step(scheduling=DoubleBracketScheduling.grid_search,d=d)\n", + "print('grid_search step:', step_grid)\n", + "# hyperopt\n", + "step_hyperopt = dbi.choose_step(scheduling=DoubleBracketScheduling.hyperopt,d=d, max_evals=100, step_max=0.6)\n", + "print('hyperopt_search step:', step_hyperopt)\n", + "# polynomial\n", + "step_poly = dbi.choose_step(scheduling=DoubleBracketScheduling.polynomial_approximation,d=d, n=3)\n", + "print('polynomial_approximation step:', step_poly)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Plot the results\n", + "plt.figure()\n", + "plt.plot(s_space, fluctuation)\n", + "plt.xlabel('s')\n", + "plt.axvline(x=step_grid, color='r', linestyle='-',label='grid_search')\n", + "plt.axvline(x=step_hyperopt, color='g', linestyle='--',label ='hyperopt')\n", + "plt.axvline(x=step_poly, color='m', linestyle='-.',label='polynomial')\n", + "plt.title('First DBI step')\n", + "plt.ylabel('Energy fluctuation')\n", + "plt.legend()\n", + "plt.figure()\n", + "plt.plot(s_space, off_diagonal_norm_diff)\n", + "plt.axvline(x=step_grid, color='r', linestyle='-',label='grid_search')\n", + "plt.axvline(x=step_hyperopt, color='g', linestyle='--',label='hyperopt')\n", + "plt.axvline(x=step_poly, color='m', linestyle='-.',label='polynomial')\n", + "plt.ylabel(r'$||\\sigma(H_0)||-\\sigma(H_k)||$')\n", + "plt.xlabel('s')\n", + "plt.title('First DBI step')\n", + "plt.legend()\n", + "print('The minimum for cost function in the tested range is:', step_grid)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "d = np.diag(np.linspace(1,2**nqubits,2**nqubits))\n", + "off_diagonal_norm_diff = [dbi.off_diagonal_norm]\n", + "energy_fluc = [dbi.energy_fluctuation(state=state)]\n", + "iters = 10\n", + "dbi_ = deepcopy(dbi)\n", + "for _ in range(iters):\n", + " step_poly = dbi_.choose_step(scheduling=DoubleBracketScheduling.polynomial_approximation, d=d, n=3)\n", + " dbi_(step_poly,d=d)\n", + " off_diagonal_norm_diff.append(dbi_.off_diagonal_norm)\n", + " energy_fluc.append(dbi_.energy_fluctuation(state=state))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure()\n", + "plt.plot(range(iters+1), off_diagonal_norm_diff)\n", + "plt.xlabel('Iterations')\n", + "plt.ylabel(r'$||\\sigma(H_k)||$')\n", + "\n", + "plt.figure()\n", + "plt.plot(range(iters+1), energy_fluc)\n", + "plt.xlabel('Iterations')\n", + "plt.ylabel(r'Energy fluctuation')\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "iters = 30\n", + "states = [0,1,2,3,4,5,6,7]\n", + "energy = np.empty((len(states),iters))\n", + "\n", + "\n", + "d = (np.diag(np.linspace(1,2**nqubits,2**nqubits)))\n", + "for i in range(len(states)):\n", + " dbi_ = deepcopy(dbi)\n", + " dbi_.state = states[i]\n", + " for j in range(iters):\n", + " step_poly = dbi_.choose_step(scheduling=DoubleBracketScheduling.polynomial_approximation, d=d, n=3)\n", + " if step_poly is not None:\n", + " dbi_(step_poly, d=d)\n", + " energy[i,j] = np.real(dbi_.h.matrix[states[i],states[i]])\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "eigvals = np.linalg.eigh(dbi_.h.matrix)[0]\n", + "print('Eigenvalues:', eigvals )\n", + "plt.figure()\n", + "for i in range(len(states)):\n", + " plt.plot(range(iters), energy[i,:],'.', label='State ' + str(states[i]))\n", + "for eigvals in eigvals:\n", + " plt.axhline(y=eigvals, color='r', linestyle='--')\n", + "plt.xlabel('Iterations')\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "test_qibo", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/dbi/dbi_gradient_descent_strategies.ipynb b/examples/dbi/dbi_gradient_descent_strategies.ipynb new file mode 100644 index 0000000000..58b6ec470b --- /dev/null +++ b/examples/dbi/dbi_gradient_descent_strategies.ipynb @@ -0,0 +1,343 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Double-bracket Gradient Descent Stratgies\n", + "This notebook demonstrates the gradient descent strategies for double-bracket rotations. The mehods uses a numerical method to find the gradient of the cost function with respect to the diagonal operator, and thereby variate the diagonal operator of the rotation. \n", + "\n", + "Finding the gradient requires the parameterization of the diagonal operator, and there are two ways of doing so:\n", + "\n", + "1. Pauli-basis: $D(B,J)= \\sum B_i Z_i + \\sum J_{ij}Z_iZ_j + ...$\n", + "2. Computational-basis: $D(A)=\\sum A_i|i\\rangle\\langle i|$" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from qibo.models.dbi.double_bracket import DoubleBracketIteration, DoubleBracketGeneratorType, DoubleBracketScheduling, DoubleBracketCostFunction\n", + "from qibo.models.dbi.utils import generate_pauli_operator_dict, decompose_into_pauli_basis, params_to_diagonal_operator, ParameterizationTypes\n", + "from copy import deepcopy\n", + "from qibo.models.dbi.utils_dbr_strategies import gradient_descent\n", + "import numpy as np\n", + "from qibo import set_backend, hamiltonians\n", + "from qibo.hamiltonians import Hamiltonian\n", + "from qibo.quantum_info import random_hermitian\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def visualize_matrix(matrix, title=\"\"):\n", + " \"\"\"Visualize hamiltonian in a heatmap form.\"\"\"\n", + " fig, ax = plt.subplots(figsize=(5,5))\n", + " ax.set_title(title)\n", + " try:\n", + " im = ax.imshow(np.absolute(matrix), cmap=\"inferno\")\n", + " except TypeError:\n", + " im = ax.imshow(np.absolute(matrix.get()), cmap=\"inferno\")\n", + " fig.colorbar(im, ax=ax)\n", + "\n", + "def s_hist_to_plot(s_hist):\n", + " # convert list of step durations taken to plotable\n", + " s_plot = [0] * len(s_hist)\n", + " for i in range(len(s_hist)):\n", + " if i != 0:\n", + " s_plot[i] = s_plot[i-1] + s_hist[i]\n", + " return s_plot" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Random Hamiltonian" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# set the qibo backend (we suggest qibojit if N >= 20)\n", + "set_backend(\"qibojit\", platform=\"numba\")\n", + "\n", + "# hamiltonian parameters\n", + "nqubits = 5\n", + "seed = 10\n", + "\n", + "# define the hamiltonian\n", + "h0 = random_hermitian(2**nqubits, seed=seed)\n", + "dbi = DoubleBracketIteration(\n", + " Hamiltonian(nqubits, h0),\n", + " mode=DoubleBracketGeneratorType.single_commutator,\n", + " scheduling=DoubleBracketScheduling.hyperopt,\n", + " cost=DoubleBracketCostFunction.off_diagonal_norm\n", + ")\n", + "# vosualize the matrix\n", + "visualize_matrix(dbi.h.matrix, title=\"Target hamiltonian\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then we set up the required parameters for gradient descent." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Pauli-basis\n", + "pauli_operator_dict = generate_pauli_operator_dict(nqubits)\n", + "pauli_operators = list(pauli_operator_dict.values())\n", + "# let initial d be approximation of $\\Delta(H)\n", + "d_coef_pauli = decompose_into_pauli_basis(dbi.diagonal_h_matrix, pauli_operators=pauli_operators)\n", + "d_pauli = sum([d_coef_pauli[i]*pauli_operators[i] for i in range(nqubits)])\n", + "\n", + "# Computational basis\n", + "d_coef_computational_partial = d_pauli.diagonal()\n", + "d_coef_computational_full = dbi.diagonal_h_matrix.diagonal()\n", + "d_computational_partial = params_to_diagonal_operator(d_coef_computational_partial, nqubits, ParameterizationTypes.computational, normalize=False)\n", + "d_computational_full = params_to_diagonal_operator(d_coef_computational_full, nqubits, ParameterizationTypes.computational, normalize=False)\n", + "\n", + "plt.plot(d_coef_computational_partial, label=\"computational basis partial\")\n", + "plt.plot(d_coef_computational_full, label=r\"computational basis full = $\\Delta(H)$\")\n", + "plt.legend()\n", + "plt.title(r\"Diagonal entries of $D$\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we want to compare 3 scenarios:\n", + "\n", + "1. Pauli-basis: an approximation to the diagonal of $H$\n", + "2. Computational-partial: same as 1. in the computational basis.\n", + "3. Computational-full: a full parameterization of the diagonal of $H$ in the computational basis." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 1. Pauli-basis\n", + "NSTEPS = 5\n", + "dbi_pauli = deepcopy(dbi)\n", + "loss_hist_pauli, d_params_hist_pauli, s_hist_pauli = gradient_descent(dbi_pauli, NSTEPS, d_coef_pauli, ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 2. Computational_partial\n", + "dbi_computational_partial = deepcopy(dbi)\n", + "loss_hist_computational_partial, d_params_hist_computational_partiali, s_computational_partial = gradient_descent(dbi_computational_partial, NSTEPS, d_coef_computational_partial, ParameterizationTypes.computational)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 3. Computational_full\n", + "dbi_computational_full = deepcopy(dbi)\n", + "loss_hist_computational_full, d_params_hist_computational_full, s_computational_full = gradient_descent(dbi_computational_full, NSTEPS, d_coef_computational_full, ParameterizationTypes.computational)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "s_plot_pauli = s_hist_to_plot(s_hist_pauli)\n", + "s_plot_computational_partial = s_hist_to_plot(s_computational_partial)\n", + "s_plot_computational_full = s_hist_to_plot(s_computational_full)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.plot(s_plot_pauli, loss_hist_pauli, label=\"pauli basis\", marker=\"o\")\n", + "plt.plot(s_plot_computational_partial, loss_hist_computational_partial, label=\"computational partial\", marker=\"o\")\n", + "plt.plot(s_plot_computational_full, loss_hist_computational_full, label=\"computational full\", marker=\"o\")\n", + "plt.legend()\n", + "plt.title(\"Off-diagonal norm\")\n", + "plt.ylabel(r\"$||\\sigma(H)||_{HS}$\")\n", + "plt.xlabel(\"s\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# TFIM" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# hamiltonian parameters\n", + "nqubits = 5\n", + "h = 3\n", + "\n", + "# define the hamiltonian\n", + "h = hamiltonians.TFIM(nqubits=nqubits, h=h)\n", + "dbi = DoubleBracketIteration(\n", + " h,\n", + " mode=DoubleBracketGeneratorType.single_commutator,\n", + " scheduling=DoubleBracketScheduling.hyperopt\n", + ")\n", + "# vosualize the matrix\n", + "visualize_matrix(dbi.h.matrix, title=\"Target hamiltonian\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Pauli-basis\n", + "pauli_operator_dict = generate_pauli_operator_dict(nqubits)\n", + "pauli_operators = list(pauli_operator_dict.values())\n", + "# let initial d be approximation of $\\Delta(H)\n", + "d_coef_pauli = decompose_into_pauli_basis(dbi.diagonal_h_matrix, pauli_operators=pauli_operators)\n", + "d_pauli = sum([d_coef_pauli[i]*pauli_operators[i] for i in range(nqubits)])\n", + "\n", + "# Computational basis\n", + "d_coef_computational_partial = d_pauli.diagonal()\n", + "d_coef_computational_full = dbi.diagonal_h_matrix.diagonal()\n", + "d_computational_partial = params_to_diagonal_operator(d_coef_computational_partial, nqubits, ParameterizationTypes.computational, normalize=False)\n", + "d_computational_full = params_to_diagonal_operator(d_coef_computational_full, nqubits, ParameterizationTypes.computational, normalize=False)\n", + "\n", + "plt.plot(d_coef_computational_partial, label=\"computational basis partial\")\n", + "plt.plot(d_coef_computational_full, label=r\"computational basis full = $\\Delta(H)$\")\n", + "plt.legend()\n", + "plt.title(r\"Diagonal entries of $D$\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 1. Pauli-basis\n", + "NSTEPS = 3\n", + "dbi_pauli = deepcopy(dbi)\n", + "loss_hist_pauli, d_params_hist_pauli, s_hist_pauli = gradient_descent(dbi_pauli, NSTEPS, d_coef_pauli, ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 2. Computational_partial\n", + "dbi_computational_partial = deepcopy(dbi)\n", + "loss_hist_computational_partial, d_params_hist_computational_partiali, s_computational_partial = gradient_descent(dbi_computational_partial, NSTEPS, d_coef_computational_partial, ParameterizationTypes.computational)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 3. Computational_full\n", + "dbi_computational_full = deepcopy(dbi)\n", + "loss_hist_computational_full, d_params_hist_computational_full, s_computational_full = gradient_descent(dbi_computational_full, NSTEPS, d_coef_computational_full, ParameterizationTypes.computational)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "s_plot_pauli = s_hist_to_plot(s_hist_pauli)\n", + "s_plot_computational_partial = s_hist_to_plot(s_computational_partial)\n", + "s_plot_computational_full = s_hist_to_plot(s_computational_full)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.plot(s_plot_pauli, loss_hist_pauli, label=\"pauli basis\", marker=\"o\")\n", + "plt.plot(s_plot_computational_partial, loss_hist_computational_partial, label=\"computational partial\", marker=\"o\")\n", + "plt.plot(s_plot_computational_full, loss_hist_computational_full, label=\"computational full\", marker=\"o\")\n", + "plt.legend()\n", + "plt.title(\"Off-diagonal norm\")\n", + "plt.ylabel(r\"$||\\sigma(H)||_{HS}$\")\n", + "plt.xlabel(\"s\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After changing the cost function and scheduling method, we notice that quite consistently, the Pauli-based parameterization diagonalizes the hamiltonian the best, and for the first few iterations, the Computational-based partial (same initial operator as Pauli) performs very similarly, and diverges later on." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "nqubits = 3\n", + "pauli_operator_dict = generate_pauli_operator_dict(\n", + " nqubits, parameterization_order=1\n", + ")\n", + "params = [1, 2, 3]\n", + "operator_pauli = sum([\n", + " params[i] * list(pauli_operator_dict.values())[i] for i in range(nqubits)\n", + "])\n", + "assert (\n", + " operator_pauli\n", + " == params_to_diagonal_operator(\n", + " params, nqubits=nqubits, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict\n", + " )\n", + ").all()\n", + "operator_element = params_to_diagonal_operator(\n", + " params, nqubits=nqubits, parameterization=ParameterizationTypes.computational\n", + ")\n", + "assert (operator_element.diagonal() == params).all()" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/dbi/dbi_group_commutator_tests.ipynb b/examples/dbi/dbi_group_commutator_tests.ipynb new file mode 100644 index 0000000000..3d1d615626 --- /dev/null +++ b/examples/dbi/dbi_group_commutator_tests.ipynb @@ -0,0 +1,122 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from copy import deepcopy\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "from qibo import hamiltonians, set_backend\n", + "from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketScheduling, DoubleBracketIteration, DoubleBracketCostFunction" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Hamiltonian\n", + "set_backend(\"numpy\")\n", + "\n", + "# hamiltonian parameters\n", + "nqubits = 5\n", + "h = 3.0\n", + "\n", + "# define the hamiltonian\n", + "H_TFIM = hamiltonians.TFIM(nqubits=nqubits, h=h)\n", + "\n", + "# define the least-squares cost function\n", + "cost = DoubleBracketCostFunction.least_squares\n", + "# initialize class\n", + "dbi = DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.group_commutator,cost=cost)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "d = np.diag(np.linspace(1,2**nqubits,2**nqubits))\n", + "s_space = np.linspace(1e-5, 0.3, 500)\n", + "r = np.array([1,2,4,8])\n", + "off_diagonal_norm_diff = np.empty((500,len(r)+1))\n", + "\n", + "for s in range(len(s_space)):\n", + " for i in range(len(r)):\n", + " dbi_eval = deepcopy(dbi)\n", + " dbi_eval.mode = DoubleBracketGeneratorType.group_commutator\n", + " for j in range(r[i]):\n", + " dbi_eval(np.sqrt(s_space[s]/r[i]),d=d)\n", + " off_diagonal_norm_diff[s,i+1] = dbi_eval.off_diagonal_norm\n", + " dbi_eval = deepcopy(dbi)\n", + " dbi_eval.mode = DoubleBracketGeneratorType.single_commutator\n", + " dbi_eval(s_space[s],d=d)\n", + " off_diagonal_norm_diff[s,0] = dbi_eval.off_diagonal_norm\n", + "\n", + "\n", + "\n", + "plt.figure()\n", + "plt.plot(s_space, off_diagonal_norm_diff[:,0],label=r'$e^{sW}$')\n", + "for i in range(len(r)):\n", + " plt.plot(s_space, off_diagonal_norm_diff[:,i+1],label=r'$V_{GC}, r = $' + str(r[i]))\n", + "plt.xlabel('s')\n", + "plt.ylabel('off-diagonal norm')\n", + "plt.legend()\n", + "\n", + "plt.figure()\n", + "for i in range(len(r)):\n", + " plt.plot(s_space, off_diagonal_norm_diff[:,i+1]-off_diagonal_norm_diff[:,0],label=r'$V_{GC}, r = $' + str(r[i]))\n", + "plt.xlabel('s')\n", + "plt.ylabel('Difference of the off diagonal norm between $V_{GC}$ and $e^{sW}$')\n", + "plt.legend()\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "d = np.diag(np.linspace(1,2**nqubits,2**nqubits))\n", + "flows = 30\n", + "r = np.array([1,2,4,8])\n", + "off_diagonal_norm_diff = np.empty((1+flows,len(r)+1))\n", + "s = np.empty(flows)\n", + "dbi_eval = deepcopy(dbi)\n", + "off_diagonal_norm_diff[0,:] = dbi_eval.off_diagonal_norm\n", + "for i in range(flows):\n", + " dbi_eval.mode = DoubleBracketGeneratorType.single_commutator\n", + " s[i] = dbi_eval.choose_step(scheduling=DoubleBracketScheduling.polynomial_approximation,d=d, n=3)\n", + " dbi_eval(s[i],d=d)\n", + " off_diagonal_norm_diff[i+1,0] = dbi_eval.off_diagonal_norm\n", + "\n", + "for j in range(len(r)):\n", + " dbi_eval = deepcopy(dbi)\n", + " dbi_eval.mode = DoubleBracketGeneratorType.group_commutator\n", + " for i in range(flows):\n", + " for k in range(r[j]):\n", + " dbi_eval(np.sqrt(s[i]/r[j]),d=d)\n", + " off_diagonal_norm_diff[i+1,j+1] = dbi_eval.off_diagonal_norm\n", + "\n", + "plt.figure()\n", + "plt.plot(off_diagonal_norm_diff[:,0],label=r'$e^{sW}$')\n", + "for i in range(len(r)):\n", + " plt.plot(off_diagonal_norm_diff[:,i+1],label=r'$V_{GC}, r = $' + str(r[i]))\n", + "plt.xlabel('flow iterarion')\n", + "plt.ylabel('off-diagonal norm')\n", + "plt.legend()" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/dbi/dbi_scheduling.ipynb b/examples/dbi/dbi_scheduling.ipynb new file mode 100644 index 0000000000..fb9d349ebf --- /dev/null +++ b/examples/dbi/dbi_scheduling.ipynb @@ -0,0 +1,455 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Double-bracket Iteration Scheduling Strategies\n", + "\n", + "This notebook presents the different strategies for scheduling the step durations for the double-bracket iteration algorithm and their resepctive accuracies." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Import the dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from copy import deepcopy\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "from qibo import hamiltonians, set_backend\n", + "from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketScheduling, DoubleBracketIteration\n", + "from qibo.hamiltonians import SymbolicHamiltonian\n", + "from qibo.models.dbi.utils import str_to_symbolic, generate_Z_operators\n", + "from qibo.models.dbi.utils_scheduling import polynomial_step\n", + "from qibo.models.dbi.utils_dbr_strategies import select_best_dbr_generator" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Canonical\n", + "Set up the basic test case with the transverse field ising model hamiltonian and the canonical bracket as the generator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Hamiltonian\n", + "set_backend(\"qibojit\", platform=\"numba\")\n", + "\n", + "# hamiltonian parameters\n", + "nqubits = 5\n", + "h = 3\n", + "\n", + "# define the hamiltonian\n", + "H_TFIM = hamiltonians.TFIM(nqubits=nqubits, h=h)\n", + "\n", + "# initialize class\n", + "dbi = DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.canonical)\n", + "print(\"Initial off diagonal norm\", dbi.off_diagonal_norm)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We first run a sweep of step duration to map the off-diagonal norm in this range." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# generate data for plotting sigma decrease of the first step\n", + "s_space = np.linspace(1e-5, 0.6, 100)\n", + "off_diagonal_norm_diff = []\n", + "for s in s_space:\n", + " dbi_eval = deepcopy(dbi)\n", + " dbi_eval(s)\n", + " off_diagonal_norm_diff.append(dbi_eval.off_diagonal_norm - dbi.off_diagonal_norm)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The default scheduling strategy is grid search: `DoubleBracketScheduling.\n", + "grid_serach`. This strategy specifies a list of step durations to test one by one and finds the one that maximizes the cost function (off-digonal norm of Hamiltonian)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# grid_search\n", + "step_grid = dbi.choose_step(scheduling=DoubleBracketScheduling.grid_search)\n", + "print('grid_search step:', step_grid)\n", + "# hyperopt\n", + "step_hyperopt = dbi.choose_step(scheduling=DoubleBracketScheduling.hyperopt, max_evals=100, step_max=0.6)\n", + "print('hyperopt_search step:', step_hyperopt)\n", + "step_poly = dbi.choose_step(scheduling=DoubleBracketScheduling.polynomial_approximation, n=5)\n", + "print('polynomial_approximation step:', step_poly)\n", + "step_sa = dbi.choose_step(scheduling=DoubleBracketScheduling.simulated_annealing)\n", + "print('simulated_annealing step:', step_sa)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Plot the results\n", + "plt.plot(s_space, off_diagonal_norm_diff)\n", + "plt.axvline(x=step_grid, color='r', linestyle='-',label='grid_search')\n", + "plt.axvline(x=step_hyperopt, color='g', linestyle='--',label='hyperopt')\n", + "plt.axvline(x=step_poly, color='m', linestyle='-.',label='polynomial')\n", + "plt.axvline(x=step_sa, color='b', linestyle=':',label='simulated annealing')\n", + "plt.ylabel(r'$||\\sigma(H_0)||-\\sigma(H_k)||$')\n", + "plt.xlabel('s')\n", + "plt.title('First DBI step')\n", + "plt.legend()\n", + "print('The minimum for cost function in the tested range is:', step_grid)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Specified diagonal operator\n", + "\n", + "While for the cannonical case, all the scheduling methods are accurate, it is important to realize that the global minimum of the loss function is not always so obvious. It is thus necessary to show whether the 3 converges to an agreeable step duration using different iteration generators, such as the Pauli 'ZZ..Z' operator and 'ZZ..I' operator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Generate the digaonal operators\n", + "Z_str = \"Z\"*nqubits\n", + "ZI_str = \"Z\"*(nqubits-1)+\"I\"\n", + "Z_op = SymbolicHamiltonian(str_to_symbolic(Z_str)).dense.matrix\n", + "ZI_op = SymbolicHamiltonian(str_to_symbolic(ZI_str)).dense.matrix\n", + "op_dict = {Z_str:Z_op, ZI_str: ZI_op}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dbi = DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.single_commutator)\n", + "d_str = ZI_str\n", + "d = op_dict[d_str]\n", + "# generate data for plotting sigma decrease of the first step\n", + "s_space = np.linspace(1e-5, 0.6, 100)\n", + "off_diagonal_norm_diff = []\n", + "for s in s_space:\n", + " dbi_eval = deepcopy(dbi)\n", + " dbi_eval(s,d=d)\n", + " off_diagonal_norm_diff.append(dbi_eval.off_diagonal_norm - dbi.off_diagonal_norm)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# grid_search\n", + "step_grid = dbi.choose_step(scheduling=DoubleBracketScheduling.grid_search, step_max=0.6, d=d)\n", + "grid_min = dbi.loss(step=step_grid, d=d)-dbi.off_diagonal_norm\n", + "print('grid_search step:', step_grid, 'loss', grid_min)\n", + "# hyperopt\n", + "step_hyperopt = dbi.choose_step(scheduling=DoubleBracketScheduling.hyperopt, d=d, max_evals=100, step_max=0.6)\n", + "hyperopt_min = dbi.loss(step=step_hyperopt, d=d)-dbi.off_diagonal_norm\n", + "print('hyperopt_search step:', step_hyperopt, 'loss', hyperopt_min)\n", + "# polynomial expansion\n", + "step_poly = dbi.choose_step(scheduling=DoubleBracketScheduling.polynomial_approximation, d=d, n=5)\n", + "poly_min = dbi.loss(step=step_poly, d=d)-dbi.off_diagonal_norm\n", + "print('polynomial_approximation step:', step_poly, 'loss', poly_min)\n", + "# simulated annealing\n", + "step_sa = dbi.choose_step(scheduling=DoubleBracketScheduling.simulated_annealing, d=d)\n", + "sa_min = dbi.loss(step=step_sa, d=d)-dbi.off_diagonal_norm\n", + "print('simulated_annealing step:', step_sa, 'loss', sa_min)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Plot the results\n", + "plt.plot(s_space, off_diagonal_norm_diff)\n", + "plt.axvline(x=step_grid, color='r', linestyle='-',label='grid_search')\n", + "plt.text(x=step_grid, y=grid_min, s=f'grid min \\n{round(grid_min,3)}')\n", + "plt.text(x=step_poly, y=poly_min, s=f'poly min \\n{round(poly_min,3)}')\n", + "plt.text(x=step_sa, y=sa_min, s=f'sa min \\n{round(sa_min,3)}')\n", + "plt.axvline(x=step_hyperopt, color='g', linestyle='--',label='hyperopt')\n", + "plt.axvline(x=step_poly, color='m', linestyle='-.',label='polynomial')\n", + "plt.axvline(x=step_sa, color='b', linestyle=':',label='simulated annealing')\n", + "plt.ylabel(r'$||\\sigma(H_0)||-\\sigma(H_k)||$')\n", + "plt.xlabel('s')\n", + "plt.title(f'First DBI step with D={d_str}')\n", + "plt.legend()\n", + "print('The minimum for cost function in the tested range is:', step_grid)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We see that there are two similar \"minimal point\" at 0.03 and 0.22, with the latter being the absolute minimum by an insignificant advantage. However, for practical reasons, we prefer taking the first close-minimum calculated by polynomial approximation. Hence, we can use the polynomial approximation to restrict the search area and obtain better results. For example, we define a search range of 0.1 around the polynomial step." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Use polynomial expansion as an restriction for hyperopt/grid range" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "search_range = 0.1\n", + "if step_poly < search_range/2:\n", + " step_min = 0\n", + " step_max = search_range\n", + "else:\n", + " step_min = step_poly - search_range/2\n", + " step_max = step_poly + search_range/2\n", + "# grid_search\n", + "step_grid = dbi.choose_step(scheduling=DoubleBracketScheduling.grid_search, step_min=step_min, step_max=step_max, d=d)\n", + "print('grid_search step:', step_grid)\n", + "# hyperopt\n", + "step_hyperopt = dbi.choose_step(scheduling=DoubleBracketScheduling.hyperopt, step_min=step_min, step_max=step_max, max_evals=100, d=d,)\n", + "print('hyperopt_search step:', step_hyperopt)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Plot the results\n", + "plt.plot(s_space, off_diagonal_norm_diff)\n", + "plt.axvline(x=step_grid, color='r', linestyle='-',label='grid_search')\n", + "plt.axvline(x=step_hyperopt, color='g', linestyle='--',label='hyperopt')\n", + "plt.axvline(x=step_poly, color='m', linestyle='-.',label='polynomial')\n", + "plt.ylabel(r'$||\\sigma(H_0)||-\\sigma(H_k)||$')\n", + "plt.xlabel('s')\n", + "plt.title(r'Restrict $s$ with polynomial')\n", + "plt.legend()\n", + "print('The minimum for cost function in the tested range is:', step_grid)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Hence, we see that the strategy is indeed effective for finding the first minimum of the loss funciton for both the Z operator and the ZI operator." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Compare in Pauli-Z strategy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from qibo.quantum_info import random_hermitian\n", + "from qibo.hamiltonians import Hamiltonian" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Hamiltonian\n", + "set_backend(\"qibojit\", platform=\"numba\")\n", + "nqubits = 4\n", + "h0 = random_hermitian(2**nqubits)\n", + "\n", + "# initialize class\n", + "dbi = DoubleBracketIteration(deepcopy(Hamiltonian(nqubits=nqubits, matrix=h0)),mode=DoubleBracketGeneratorType.single_commutator)\n", + "print(\"Initial off diagonal norm\", dbi.off_diagonal_norm)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "generate_local_Z = generate_Z_operators(nqubits)\n", + "Z_ops = list(generate_local_Z.values())\n", + "Z_names = list(generate_local_Z.keys())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "NSTEPS = 3\n", + "scheduling_list = [DoubleBracketScheduling.grid_search,\n", + " DoubleBracketScheduling.hyperopt,\n", + " DoubleBracketScheduling.polynomial_approximation,\n", + " DoubleBracketScheduling.simulated_annealing,]\n", + "scheduling_labels = ['grid search',\n", + " 'hyperopt',\n", + " 'polynomial',\n", + " 'simulated_annealing']\n", + "Z_optimal_scheduling = []\n", + "s_scheduling = []\n", + "off_norm_scheduling =[]\n", + "for i,scheduling in enumerate(scheduling_list):\n", + " # reinitialize\n", + " dbi = DoubleBracketIteration(Hamiltonian(nqubits=nqubits, matrix=deepcopy(h0)), mode=DoubleBracketGeneratorType.single_commutator)\n", + " Z_optimal = []\n", + " # add in initial values for plotting\n", + " off_diagonal_norm_history = [dbi.off_diagonal_norm]\n", + " steps = [0]\n", + " print(f'----------Scheduling {scheduling_labels[i]}----------')\n", + " for _ in range(NSTEPS):\n", + " dbi, idx, step, flip_sign = select_best_dbr_generator(dbi, Z_ops, scheduling=scheduling, compare_canonical=False)\n", + " off_diagonal_norm_history.append(dbi.off_diagonal_norm)\n", + " steps.append(steps[-1]+step)\n", + " if flip_sign < 0:\n", + " Z_optimal.append('-' + Z_names[idx])\n", + " else:\n", + " Z_optimal.append(Z_names[idx])\n", + " print(f\"New optimized step at iteration {_+1}/{NSTEPS}: {step} with operator {Z_optimal[-1]}, loss {dbi.off_diagonal_norm}\")\n", + " Z_optimal_scheduling.append(Z_optimal)\n", + " s_scheduling.append(steps)\n", + " off_norm_scheduling.append(off_diagonal_norm_history)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure()\n", + "for i, scheduling in enumerate(scheduling_labels):\n", + " plt.plot(s_scheduling[i], off_norm_scheduling[i], '-o', label=scheduling)\n", + "plt.xlabel(\"Step durations\")\n", + "plt.ylabel(\"Norm off-diagonal restriction\")\n", + "plt.title(\"Compare Variational Pauli-Z using different scheduling strategies\")\n", + "plt.legend()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## When polynomial approximation has no solution\n", + "\n", + "In some cases, the prescribed taylor expansion order `n` may not be sufficient to produce a meaningful step duration (real positive). In these cases, we rely on a backup scheduling method in `choose_step`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Hamiltonian\n", + "set_backend(\"qibojit\", platform=\"numba\")\n", + "\n", + "# hamiltonian parameters\n", + "nqubits = 5\n", + "h = 3\n", + "\n", + "# define the hamiltonian\n", + "H_TFIM = hamiltonians.TFIM(nqubits=nqubits, h=h)\n", + "\n", + "# initialize class\n", + "dbi = DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.canonical)\n", + "dbi.scheduling = DoubleBracketScheduling.polynomial_approximation\n", + "print(\"Initial off diagonal norm\", dbi.off_diagonal_norm)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For demonstration purposes, we let `n=1` which is a linear fit to the loss function. This results in no valid solutions and function `polynomial_step` returns `None`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for n in range (5):\n", + " step = polynomial_step(dbi, n=n)\n", + " print(n, step)\n", + "print(dbi.choose_step(n=1))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/dbi/dbi_strategies_compare.ipynb b/examples/dbi/dbi_strategies_compare.ipynb new file mode 100644 index 0000000000..4a60ea3034 --- /dev/null +++ b/examples/dbi/dbi_strategies_compare.ipynb @@ -0,0 +1,464 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# DBI strategies comparison\n", + "\n", + "This notebook is a comparison of the so-far developed diagonalization strategies for DBI, including the canonical, Pauli-Z, and magnetic field strategies. On top of these, we also show case the use of invariant DBI generators such as 'BHMM'." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "from copy import deepcopy\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from qibo import symbols\n", + "from qibo import hamiltonians, set_backend\n", + "from qibo.hamiltonians import Hamiltonian, SymbolicHamiltonian\n", + "from qibo.quantum_info import random_hermitian\n", + "from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketScheduling, DoubleBracketIteration, DoubleBracketCostFunction\n", + "from qibo.models.dbi.utils import generate_Z_operators, generate_pauli_operator_dict, decompose_into_pauli_basis, ParameterizationTypes\n", + "from qibo.models.dbi.utils_dbr_strategies import select_best_dbr_generator, gradient_descent, polynomial_step" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def visualize_matrix(matrix, title=\"\"):\n", + " \"\"\"Visualize hamiltonian in a heatmap form.\"\"\"\n", + " fig, ax = plt.subplots(figsize=(5,5))\n", + " ax.set_title(title)\n", + " try:\n", + " im = ax.imshow(np.absolute(matrix), cmap=\"inferno\")\n", + " except TypeError:\n", + " im = ax.imshow(np.absolute(matrix.get()), cmap=\"inferno\")\n", + " fig.colorbar(im, ax=ax)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test on random Hamiltonian\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# backend\n", + "set_backend(\"qibojit\", platform=\"numba\")\n", + "# initialize dbi object\n", + "nqubits = 5\n", + "h0 = random_hermitian(2**nqubits, seed=2)\n", + "dbi = DoubleBracketIteration(Hamiltonian(nqubits=nqubits, matrix=h0))\n", + "cost = DoubleBracketCostFunction.off_diagonal_norm\n", + "print(\"Initial loss\", dbi.least_squares(d=dbi.diagonal_h_matrix))\n", + "visualize_matrix(dbi.h.matrix, title=f'Random hamiltonian with L={nqubits}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# iterations steps\n", + "NSTEPS = 5\n", + "# choose polynomial scheduling\n", + "scheduling = DoubleBracketScheduling.simulated_annealing" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Canonical" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize DBI class for the canonical case\n", + "dbi_canonical = DoubleBracketIteration(Hamiltonian(nqubits=nqubits, matrix=h0), mode=DoubleBracketGeneratorType.canonical, scheduling=scheduling, cost=cost)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Canonical\n", + "loss_history_canonical = [dbi_canonical.off_diagonal_norm]\n", + "steps_canonical_plot = [0]\n", + "for s in range(NSTEPS):\n", + " # same settings as iteration from list\n", + " d = dbi.diagonal_h_matrix\n", + " step = dbi_canonical.choose_step(d=d)\n", + " dbi_canonical(step=step)\n", + " print(f\"New optimized step at iteration {s+1}/{NSTEPS}: {step}, loss {dbi_canonical.off_diagonal_norm}\")\n", + " loss_history_canonical.append(dbi_canonical.off_diagonal_norm)\n", + " steps_canonical_plot.append(steps_canonical_plot[-1]+step)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Pauli-Z" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize DBI class for the Pauli-Z strategy\n", + "dbi_pauli = DoubleBracketIteration(Hamiltonian(nqubits=nqubits, matrix=h0), mode=DoubleBracketGeneratorType.single_commutator, scheduling=scheduling, cost=cost)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "generate_local_Z = generate_Z_operators(nqubits)\n", + "Z_ops = list(generate_local_Z.values())\n", + "Z_names = list(generate_local_Z.keys())\n", + "Z_optimal = []\n", + "# add in initial values for plotting\n", + "loss_history_pauli = [dbi_pauli.off_diagonal_norm]\n", + "steps_pauli_plot = [0]\n", + "scheduling = DoubleBracketScheduling.simulated_annealing\n", + "for _ in range(NSTEPS):\n", + " dbi_pauli, idx, step, flip_sign = select_best_dbr_generator(dbi_pauli, Z_ops, scheduling=scheduling, compare_canonical=False)\n", + " d = Z_ops[idx]\n", + " loss_history_pauli.append(dbi_pauli.off_diagonal_norm)\n", + " steps_pauli_plot.append(steps_pauli_plot[-1]+step)\n", + " if flip_sign < 0:\n", + " Z_optimal.append('-' + Z_names[idx])\n", + " else:\n", + " Z_optimal.append(Z_names[idx])\n", + " print(f\"New optimized step at iteration {_+1}/{NSTEPS}: {step} with operator {Z_optimal[-1]}, loss {dbi_pauli.off_diagonal_norm}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Magnetic field" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize DBI class for the canonical case\n", + "dbi_gradient = DoubleBracketIteration(Hamiltonian(nqubits=nqubits, matrix=h0), mode=DoubleBracketGeneratorType.single_commutator, scheduling=scheduling, cost=cost)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pauli_operator_dict = generate_pauli_operator_dict(nqubits=nqubits, parameterization_order=2)\n", + "d_coef = decompose_into_pauli_basis(dbi.h.matrix, list(pauli_operator_dict.values()))\n", + "d = sum([d_coef[i] * list(pauli_operator_dict.values())[i] for i in range(nqubits)])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def s_hist_to_plot(s_hist):\n", + " # convert list of step durations taken to plotable\n", + " s_plot = [0] * len(s_hist)\n", + " for i in range(len(s_hist)):\n", + " if i != 0:\n", + " s_plot[i] = s_plot[i-1] + s_hist[i]\n", + " return s_plot" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "loss_history_gradient, d_params_hist, s_hist = gradient_descent(dbi_gradient, NSTEPS, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)\n", + "steps_gradient_plot = s_hist_to_plot(s_hist)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.title(str(nqubits) + ' random Hamiltonian diagonalization')\n", + "plt.plot(loss_history_canonical, label='canonical')\n", + "plt.plot(loss_history_pauli, label='Pauli-Z')\n", + "plt.plot(loss_history_gradient, label='gradient')\n", + "plt.legend()\n", + "plt.xlabel('Iteration')\n", + "plt.ylabel(r'$|| \\sigma(e^{sW}He^{-sW}) || $')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.title(str(nqubits) + ' random Hamiltonian diagonalization')\n", + "plt.plot(steps_canonical_plot, loss_history_canonical, marker='o', label='canonical')\n", + "plt.plot(steps_pauli_plot, loss_history_pauli, marker='o', label='Pauli-Z')\n", + "plt.plot(steps_gradient_plot,loss_history_gradient, marker='o', label='gradient')\n", + "plt.legend()\n", + "plt.xlabel('Iteration')\n", + "plt.ylabel(r'$|| \\sigma(e^{sW}He^{-sW}) || $')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test on TFIM\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# backend\n", + "set_backend(\"qibojit\", platform=\"numba\")\n", + "# initialize dbi object\n", + "# hamiltonian parameters\n", + "# define the hamiltonian\n", + "nqubits = 5\n", + "h = 1\n", + "H_TFIM = SymbolicHamiltonian( - h*symbols.Z(nqubits-1), nqubits=nqubits)\n", + "# add linear interaction terms\n", + "for i in range(nqubits-1):\n", + " H_TFIM -= SymbolicHamiltonian(symbols.X(i)*symbols.X(i+1) + h*symbols.Z(i), nqubits=nqubits)\n", + "H_TFIM = H_TFIM.dense\n", + "\n", + "# initialize class\n", + "dbi = DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.canonical)\n", + "print(\"Initial off diagonal norm\", dbi.off_diagonal_norm)\n", + "visualize_matrix(dbi.h.matrix, title=f'Random hamiltonian with L={nqubits}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# iterations steps\n", + "NSTEPS = 5\n", + "# choose polynomial scheduling\n", + "scheduling = DoubleBracketScheduling.simulated_annealing" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Canonical" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize DBI class for the canonical case\n", + "dbi_canonical = DoubleBracketIteration(deepcopy(H_TFIM), mode=DoubleBracketGeneratorType.canonical, scheduling=scheduling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Canonical\n", + "off_diagonal_norm_history_canonical = [dbi_canonical.off_diagonal_norm]\n", + "steps_canonical_plot = [0]\n", + "for s in range(NSTEPS):\n", + " # same settings as iteration from list\n", + " step = dbi_canonical.choose_step(d=dbi.diagonal_h_matrix)\n", + " dbi_canonical(step=step)\n", + " print(f\"New optimized step at iteration {s+1}/{NSTEPS}: {step}, loss {dbi_canonical.off_diagonal_norm}\")\n", + " off_diagonal_norm_history_canonical.append(dbi_canonical.off_diagonal_norm)\n", + " steps_canonical_plot.append(steps_canonical_plot[-1]+step)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Pauli-Z" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize DBI class for the Pauli-Z strategy\n", + "dbi_pauli = DoubleBracketIteration(deepcopy(H_TFIM), mode=DoubleBracketGeneratorType.single_commutator, scheduling=scheduling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "generate_local_Z = generate_Z_operators(nqubits)\n", + "Z_ops = list(generate_local_Z.values())\n", + "Z_names = list(generate_local_Z.keys())\n", + "Z_optimal = []\n", + "# add in initial values for plotting\n", + "off_diagonal_norm_history_pauli = [dbi_pauli.off_diagonal_norm]\n", + "steps_pauli_plot = [0]\n", + "scheduling = DoubleBracketScheduling.simulated_annealing\n", + "for _ in range(NSTEPS):\n", + " dbi_pauli, idx, step, flip_sign = select_best_dbr_generator(dbi_pauli, Z_ops, scheduling=scheduling, compare_canonical=False)\n", + " off_diagonal_norm_history_pauli.append(dbi_pauli.off_diagonal_norm)\n", + " steps_pauli_plot.append(steps_pauli_plot[-1]+step)\n", + " if flip_sign < 0:\n", + " Z_optimal.append('-' + Z_names[idx])\n", + " else:\n", + " Z_optimal.append(Z_names[idx])\n", + " print(f\"New optimized step at iteration {_+1}/{NSTEPS}: {step} with operator {Z_optimal[-1]}, loss {dbi_pauli.off_diagonal_norm}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Magnetic field" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize DBI class for the canonical case\n", + "dbi_gradient = DoubleBracketIteration(deepcopy(H_TFIM), mode=DoubleBracketGeneratorType.single_commutator, scheduling=scheduling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pauli_operator_dict = generate_pauli_operator_dict(nqubits=nqubits, parameterization_order=2)\n", + "d_coef = decompose_into_pauli_basis(dbi.h.matrix, list(pauli_operator_dict.values()))\n", + "d = sum([d_coef[i] * list(pauli_operator_dict.values())[i] for i in range(nqubits)])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "initial_s = polynomial_step(dbi_object=dbi, d=d, n=4)\n", + "print(initial_s)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "off_diagonal_norm_history_gradient, d_params_hist, s_hist = gradient_descent(dbi_gradient, NSTEPS, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)\n", + "steps_gradient_plot = s_hist_to_plot(s_hist)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.title(str(nqubits) + ' random Hamiltonian diagonalization')\n", + "plt.plot(off_diagonal_norm_history_canonical, label='canonical', marker='o')\n", + "plt.plot(off_diagonal_norm_history_pauli, label='Pauli-Z', marker='o')\n", + "plt.plot(off_diagonal_norm_history_gradient, label='gradient', marker='o')\n", + "plt.legend()\n", + "plt.xlabel('Iteration')\n", + "plt.ylabel(r'$|| \\sigma(e^{sW}He^{-sW}) || $')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.title(str(nqubits) + ' random Hamiltonian diagonalization')\n", + "plt.plot(steps_canonical_plot, off_diagonal_norm_history_canonical, marker='o', label='canonical')\n", + "plt.plot(steps_pauli_plot, off_diagonal_norm_history_pauli, marker='o', label='Pauli-Z')\n", + "plt.plot(steps_gradient_plot,off_diagonal_norm_history_gradient, marker='o', label='gradient')\n", + "plt.legend()\n", + "plt.xlabel('Duration')\n", + "plt.ylabel(r'$|| \\sigma(e^{sW}He^{-sW}) || $')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/dbi/dbi_strategy_Ising_model.ipynb b/examples/dbi/dbi_strategy_Ising_model.ipynb new file mode 100644 index 0000000000..6c0d53209a --- /dev/null +++ b/examples/dbi/dbi_strategy_Ising_model.ipynb @@ -0,0 +1,328 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Double-Bracket Iteration Strategy: magnetic field (Ising model)\n", + "This notebook shows the diagonalization process of DBI using the magnetic field strategy, which varies the diagonal operator $D$ by gradient descent. To find the gradient with respect to $D$, parameterization of $D$ is required. For the purpose of this notebook, we represent it by the Ising model, i.e.\n", + "\n", + "$$ D = \\sum \\alpha_i Z_i +\\sum \\beta_{ij}Z_iZ_j$$\n", + "\n", + "\n", + "The gradients are calculated under the premise that the diagonalization gain curve can be fitted by a polynomial, and that the iteration step duration is taken at the first dip of the curve." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from copy import deepcopy\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "from qibo import hamiltonians, set_backend, symbols\n", + "from qibo.hamiltonians import Hamiltonian, SymbolicHamiltonian\n", + "from qibo.quantum_info import random_hermitian\n", + "from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketScheduling, DoubleBracketIteration\n", + "from qibo.models.dbi.utils import generate_pauli_operator_dict, decompose_into_pauli_basis, ParameterizationTypes\n", + "from qibo.models.dbi.utils_dbr_strategies import gradient_numerical, gradient_descent" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def visualize_matrix(matrix, title=\"\"):\n", + " \"\"\"Visualize hamiltonian in a heatmap form.\"\"\"\n", + " fig, ax = plt.subplots(figsize=(5,5))\n", + " ax.set_title(title)\n", + " try:\n", + " im = ax.imshow(np.absolute(matrix), cmap=\"inferno\")\n", + " except TypeError:\n", + " im = ax.imshow(np.absolute(matrix.get()), cmap=\"inferno\")\n", + " fig.colorbar(im, ax=ax)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test on random Hamiltonian" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# backend\n", + "set_backend(\"numpy\")\n", + "# initialize dbi object\n", + "nqubits = 5\n", + "h0 = random_hermitian(2**nqubits, seed=2)\n", + "scheduling = DoubleBracketScheduling.hyperopt\n", + "mode = DoubleBracketGeneratorType.single_commutator\n", + "n_taylor = 5\n", + "dbi = DoubleBracketIteration(Hamiltonian(nqubits=nqubits, matrix=h0), scheduling=scheduling, mode=mode)\n", + "print(\"Initial off diagonal norm\", dbi.off_diagonal_norm)\n", + "visualize_matrix(dbi.h.matrix, title=f'Random hamiltonian with L={nqubits}')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Order 1: $D=\\sum \\alpha_iZ_i$" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# generate pauli_operator_dict\n", + "pauli_operator_dict = generate_pauli_operator_dict(nqubits=nqubits, parameterization_order=1)\n", + "d_coef = decompose_into_pauli_basis(dbi.h.matrix, list(pauli_operator_dict.values()))\n", + "d = sum([d_coef[i] * list(pauli_operator_dict.values())[i] for i in range(nqubits)])\n", + "grad = gradient_numerical(dbi, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)\n", + "print('The initial D coefficients:', d_coef)\n", + "print('Gradient:', grad)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "iters = 15\n", + "off_diagonal_norm_1, d_params_hist, s_step = gradient_descent(dbi, iters, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.title(str(nqubits) + ' spins random hamiltonian')\n", + "plt.plot(off_diagonal_norm_1)\n", + "plt.xlabel('Iteration')\n", + "plt.ylabel(r'$|| \\sigma(e^{sW}He^{-sW}) || $')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Order 2: $D=\\sum \\alpha_iZ_i + \\beta_{ij}Z_iZ_j$" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dbi = DoubleBracketIteration(Hamiltonian(nqubits=nqubits, matrix=h0), scheduling=scheduling, mode=mode)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# generate pauli_operator_dict\n", + "pauli_operator_dict = generate_pauli_operator_dict(nqubits=nqubits, parameterization_order=2)\n", + "d_coef = decompose_into_pauli_basis(dbi.h.matrix, list(pauli_operator_dict.values()))\n", + "d = sum([d_coef[i] * list(pauli_operator_dict.values())[i] for i in range(nqubits)])\n", + "grad = gradient_numerical(dbi, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)\n", + "print('The initial D coefficients:', d_coef)\n", + "print('Gradient:', grad)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "iters = 15\n", + "off_diagonal_norm_2, d_params_hist, s_step = gradient_descent(dbi, iters, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.title(str(nqubits) + ' spins random hamiltonian')\n", + "plt.plot(off_diagonal_norm_1, label='order 1')\n", + "plt.plot(off_diagonal_norm_2, label='order 2')\n", + "plt.legend()\n", + "plt.xlabel('Iteration')\n", + "plt.ylabel(r'$|| \\sigma(e^{sW}He^{-sW}) || $')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test on TFIM\n", + "Here we choose to customize our TFIM in the X axis using `SymbolicHamiltonian`. It is also possible to use Hadamard gate to rotate the TFIM inbuilt in `qibo`.\n", + "\n", + "$$ H = -(\\sum X_i X_{i+1} + \\sum hZ_i)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# generate the Hamiltonian\n", + "nqubits = 5\n", + "h = 1\n", + "H_TFIM = SymbolicHamiltonian( - h*symbols.Z(nqubits-1), nqubits=nqubits)\n", + "# add linear interaction terms\n", + "for i in range(nqubits-1):\n", + " H_TFIM -= SymbolicHamiltonian(symbols.X(i)*symbols.X(i+1) + h*symbols.Z(i), nqubits=nqubits)\n", + "H_TFIM = H_TFIM.dense\n", + "visualize_matrix(H_TFIM.matrix, title=f'TFIM with L={nqubits} h={h}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# backend\n", + "set_backend(\"numpy\")\n", + "# initialize dbi object\n", + "dbi_TFIM = DoubleBracketIteration(deepcopy(H_TFIM), scheduling=scheduling, mode=mode)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Order 1: $D=\\sum \\alpha_iZ_i$" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dbi_TFIM_1 = DoubleBracketIteration(deepcopy(H_TFIM), scheduling=scheduling, mode=mode)\n", + "# generate pauli_operator_dict\n", + "pauli_operator_dict = generate_pauli_operator_dict(nqubits=nqubits, parameterization_order=1)\n", + "d_coef = decompose_into_pauli_basis(dbi_TFIM_1.h.matrix, list(pauli_operator_dict.values()))\n", + "d = sum([d_coef[i] * list(pauli_operator_dict.values())[i] for i in range(nqubits)])\n", + "grad = gradient_numerical(dbi_TFIM_1, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)\n", + "print('The initial D coefficients:', d_coef)\n", + "print('Gradient:', grad)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "NSTEPS = 3\n", + "off_diagonal_norm_1, d_params_hist, s_step = gradient_descent(dbi_TFIM_1, NSTEPS, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.title(f'n={nqubits} h={h} TFIM, order=1')\n", + "plt.plot(off_diagonal_norm_1)\n", + "plt.xlabel('Iteration')\n", + "plt.ylabel(r'$|| \\sigma(e^{sW}He^{-sW}) || $')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# the final matrix\n", + "visualize_matrix(dbi_TFIM.h.matrix)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Order 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dbi_TFIM_2 = DoubleBracketIteration(deepcopy(H_TFIM), scheduling=scheduling, mode=mode)\n", + "# generate pauli_operator_dict\n", + "pauli_operator_dict = generate_pauli_operator_dict(nqubits=nqubits, parameterization_order=2)\n", + "d_coef = decompose_into_pauli_basis(dbi_TFIM_2.h.matrix, list(pauli_operator_dict.values()))\n", + "d = sum([d_coef[i] * list(pauli_operator_dict.values())[i] for i in range(nqubits)])\n", + "grad = gradient_numerical(dbi_TFIM_2, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)\n", + "print('The initial D coefficients:', d_coef)\n", + "print('Gradient:', grad)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "off_diagonal_norm_2, d_params_hist, s_step = gradient_descent(dbi_TFIM_2, NSTEPS, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.title(f'n={nqubits} h={h} TFIM')\n", + "plt.plot(off_diagonal_norm_1, label='order 1')\n", + "plt.plot(off_diagonal_norm_2, label='order 2')\n", + "plt.legend()\n", + "plt.xlabel('Iteration')\n", + "plt.ylabel(r'$|| \\sigma(e^{sW}He^{-sW}) || $')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In conclusion, we see that the parameterization order or locality of the Pauli based parameterization for gradient descent does not affect significantly the effectiveness of double bracket diagonalization." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/dbi/DBI_strategy_Pauli-Z_products.ipynb b/examples/dbi/dbi_strategy_Pauli-Z.ipynb similarity index 87% rename from examples/dbi/DBI_strategy_Pauli-Z_products.ipynb rename to examples/dbi/dbi_strategy_Pauli-Z.ipynb index 0f76a36245..aabee6c172 100644 --- a/examples/dbi/DBI_strategy_Pauli-Z_products.ipynb +++ b/examples/dbi/dbi_strategy_Pauli-Z.ipynb @@ -22,13 +22,12 @@ "metadata": {}, "outputs": [], "source": [ - "!python -m pip install seaborn # plotting library\n", - "!python -m pip install hyperopt # required to optimize the DBF step" + "# !python -m pip install seaborn # plotting library" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -39,8 +38,9 @@ "import seaborn as sns\n", "\n", "from qibo import hamiltonians, set_backend\n", - "from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketIteration\n", - "from qibo.models.dbi.utils import *" + "from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketIteration, DoubleBracketScheduling\n", + "from qibo.models.dbi.utils import generate_Z_operators\n", + "from qibo.models.dbi.utils_dbr_strategies import select_best_dbr_generator" ] }, { @@ -122,31 +122,16 @@ }, { "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[Qibo 0.2.4|INFO|2024-01-24 19:59:31]: Using qibojit (numba) backend on /CPU:0\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Initial off diagonal norm 8.48528137423857\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# set the qibo backend (we suggest qibojit if N >= 20)\n", "# alternatives: tensorflow (not optimized), numpy (when CPU not supported by jit)\n", - "set_backend(\"qibojit\", \"numba\")\n", + "set_backend(\"qibojit\", platform=\"numba\")\n", "\n", "# hamiltonian parameters\n", - "nqubits = 2\n", + "nqubits = 5\n", "h = 3\n", "\n", "# define the hamiltonian\n", @@ -160,20 +145,9 @@ }, { "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[-2.-0.j -0.-0.j -0.-0.j -0.-0.j]\n", - " [-0.-0.j 2.-0.j -0.-0.j -0.-0.j]\n", - " [-0.-0.j -0.-0.j 2.-0.j -0.-0.j]\n", - " [-0.-0.j -0.-0.j -0.-0.j -2.-0.j]]\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "print(H_TFIM.matrix)" ] @@ -212,15 +186,16 @@ "metadata": {}, "outputs": [], "source": [ - "NSTEPS = 15\n", - "max_evals = 100\n", + "NSTEPS = 2\n", + "max_evals = 10\n", "step_max = 1\n", "Z_optimal = []\n", "# add in initial values for plotting\n", "off_diagonal_norm_history = [dbi.off_diagonal_norm]\n", "steps = [0]\n", + "scheduling = DoubleBracketScheduling.hyperopt\n", "for _ in range(NSTEPS):\n", - " dbi, idx, step, flip_sign = select_best_dbr_generator(dbi, Z_ops, compare_canonical=False, max_evals=max_evals, step_max=step_max)\n", + " dbi, idx, step, flip_sign = select_best_dbr_generator(dbi, Z_ops, scheduling=scheduling, compare_canonical=False, max_evals=max_evals, step_max=step_max)\n", " off_diagonal_norm_history.append(dbi.off_diagonal_norm)\n", " steps.append(steps[-1]+step)\n", " if flip_sign < 0:\n", @@ -269,7 +244,7 @@ "source": [ "# set the qibo backend (we suggest qibojit if N >= 20)\n", "# alternatives: tensorflow (not optimized), numpy (when CPU not supported by jit)\n", - "set_backend(\"qibojit\", \"numba\")\n", + "set_backend(\"qibojit\", platform=\"numba\")\n", "\n", "\n", "# initialize class|\n", @@ -289,13 +264,7 @@ "steps_canonical_plot = [0]\n", "for s in range(NSTEPS):\n", " # same settings as iteration from list\n", - " step = dbi_canonical.hyperopt_step(\n", - " step_min = 1e-5,\n", - " step_max = 1,\n", - " space = hp.uniform,\n", - " optimizer = tpe,\n", - " max_evals = max_evals,\n", - " )\n", + " step = dbi_canonical.choose_step(scheduling=DoubleBracketScheduling.hyperopt)\n", " dbi_canonical(step=step)\n", " print(f\"New optimized step at iteration {s+1}/{NSTEPS}: {step}, loss {dbi_canonical.off_diagonal_norm}\")\n", " off_diagonal_norm_history_canonical.append(dbi_canonical.off_diagonal_norm)\n", @@ -355,7 +324,7 @@ "metadata": {}, "outputs": [], "source": [ - "dbi_mixed = DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.single_commutator)\n", + "dbi_mixed = DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.single_commutator, scheduling=DoubleBracketScheduling.hyperopt)\n", "print(\"Initial off diagonal norm\", dbi_mixed.off_diagonal_norm)" ] }, @@ -368,12 +337,7 @@ "dbi_eval = deepcopy(dbi_mixed)\n", "dbi_eval.mode = DoubleBracketGeneratorType.canonical\n", "if step is None:\n", - " step = dbi_eval.hyperopt_step(\n", - " step_max=step_max,\n", - " space=hp.uniform,\n", - " optimizer=tpe,\n", - " max_evals=max_evals,\n", - " )\n", + " step = dbi_eval.choose_step()\n", "dbi_eval(step=step)\n", "print('canonical norm', dbi_eval.off_diagonal_norm, 'step', step)" ] @@ -389,7 +353,7 @@ "off_diagonal_norm_history_mixed = [dbi_mixed.off_diagonal_norm]\n", "steps = [0]\n", "for _ in range(NSTEPS):\n", - " dbi_mixed, idx, step, flip_sign = select_best_dbr_generator(dbi_mixed, Z_ops, compare_canonical=True, max_evals=max_evals)\n", + " dbi_mixed, idx, step, flip_sign = select_best_dbr_generator(dbi_mixed, Z_ops, scheduling=scheduling, compare_canonical=True, max_evals=max_evals, step_max=step_max)\n", " off_diagonal_norm_history_mixed.append(dbi_mixed.off_diagonal_norm)\n", " steps.append(steps[-1]+step)\n", " if idx == len(Z_ops):\n", @@ -444,7 +408,7 @@ "metadata": {}, "outputs": [], "source": [ - "dbi_mixed_can= DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.canonical)\n", + "dbi_mixed_can= DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.canonical, scheduling=DoubleBracketScheduling.hyperopt)\n", "print(\"Initial off diagonal norm\", dbi_mixed_can.off_diagonal_norm)" ] }, @@ -463,7 +427,7 @@ " dbi_mixed_can(step=step)\n", " off_diagonal_norm_history_mixed_can.append(dbi_mixed_can.off_diagonal_norm)\n", " steps_mixed_can.append(step)\n", - " \n", + "\n", "print(\"After 2 steps, off diagonal norm:\", dbi_mixed_can.off_diagonal_norm)\n", "print(\"By comparison, the Pauli-Z:\", off_diagonal_norm_history[2])" ] @@ -479,7 +443,7 @@ "remaining_NSTEPS = NSTEPS - cannonical_NSTEPS\n", "dbi_mixed_can.mode = DoubleBracketGeneratorType.single_commutator\n", "for _ in range(remaining_NSTEPS):\n", - " dbi_mixed_can, idx, step, flip_sign = select_best_dbr_generator(dbi_mixed_can, Z_ops, compare_canonical=False)\n", + " dbi_mixed_can, idx, step, flip_sign = select_best_dbr_generator(dbi_mixed_can, Z_ops, scheduling=scheduling, compare_canonical=False, max_evals=max_evals, step_max=step_max)\n", " off_diagonal_norm_history_mixed_can.append(dbi_mixed_can.off_diagonal_norm)\n", " steps_mixed_can.append(step)\n", " if idx == len(Z_ops):\n", @@ -516,27 +480,14 @@ "source": [ "This example also shows that the canonical generator is more likely to drive the model into a local minimum than variationally assigned diagonal operator, and that it is hard to get it unstuck even with the Pauli-Z operators." ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] } ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.7" - } - }, + "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/examples/dbi/dbi_tutorial_basic_intro.ipynb b/examples/dbi/dbi_tutorial_basic_intro.ipynb index ecb28bb4d7..bcd0d65c67 100644 --- a/examples/dbi/dbi_tutorial_basic_intro.ipynb +++ b/examples/dbi/dbi_tutorial_basic_intro.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "cb748c1a-2ecd-44a2-91d8-c1255a00615b", + "id": "2a33581d", "metadata": {}, "source": [ "## Double-Bracket Iteration diagonalization algorithm\n", @@ -17,18 +17,18 @@ { "cell_type": "code", "execution_count": null, - "id": "e1f362b8-eb73-456e-ae48-94c5f2a12649", + "id": "62d9723f", "metadata": {}, "outputs": [], "source": [ - "!python -m pip install seaborn # plotting library\n", - "!python -m pip install hyperopt # required to optimize the DBF step" + "# uncomment this line if seaborn is not installed\n", + "# !python -m pip install seaborn" ] }, { "cell_type": "code", "execution_count": null, - "id": "f270b1ea-ee6a-4eac-a0ff-3d7dae296cf0", + "id": "b80b4738", "metadata": {}, "outputs": [], "source": [ @@ -41,12 +41,12 @@ "from hyperopt import hp, tpe\n", "\n", "from qibo import hamiltonians, set_backend\n", - "from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketIteration" + "from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketIteration, DoubleBracketScheduling" ] }, { "cell_type": "markdown", - "id": "ba6e5402-ea34-4979-bb79-fd395567f77d", + "id": "a5e25f51", "metadata": {}, "source": [ "Here we define a simple plotting function useful to keep track of the diagonalization process." @@ -55,7 +55,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4aec7b46-19b9-4004-93c0-a90255e58fd9", + "id": "933d9a00", "metadata": {}, "outputs": [], "source": [ @@ -96,7 +96,7 @@ }, { "cell_type": "markdown", - "id": "9f4cd7cc-9952-4da4-baef-e916300a9365", + "id": "4efd4a97", "metadata": {}, "source": [ "We need to define a target hamiltonian which we aim to diagonalize. As an example, we consider the Transverse Field Ising Model (TFIM):\n", @@ -107,12 +107,12 @@ { "cell_type": "code", "execution_count": null, - "id": "2c4ed408-68ed-4054-825c-2a7df0979a4f", + "id": "7125940f", "metadata": {}, "outputs": [], "source": [ "# set the qibo backend (we suggest qibojit if N >= 20)\n", - "set_backend(\"qibojit\", \"numba\")\n", + "set_backend(\"qibojit\", platform=\"numba\")\n", "\n", "# hamiltonian parameters\n", "nqubits = 5\n", @@ -127,7 +127,7 @@ }, { "cell_type": "markdown", - "id": "4794e779-bf2d-4ab5-97ce-f876d9522a35", + "id": "c2ca8392", "metadata": {}, "source": [ "#### The generator of the evolution\n", @@ -148,7 +148,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26a487e9-366b-4203-b660-e3d4af2bcb68", + "id": "1adafc19", "metadata": {}, "outputs": [], "source": [ @@ -160,7 +160,7 @@ { "cell_type": "code", "execution_count": null, - "id": "da8dce89-27f6-403d-982a-58d531fade48", + "id": "8a4d0e9d", "metadata": {}, "outputs": [], "source": [ @@ -170,7 +170,7 @@ }, { "cell_type": "markdown", - "id": "fc4f9f75-0548-4533-a13c-3aba3191e608", + "id": "a5527622", "metadata": {}, "source": [ "#### The `DoubleBracketIteration` class\n", @@ -181,7 +181,7 @@ { "cell_type": "code", "execution_count": null, - "id": "055870ec-55f2-4b99-a622-e3aa4c7dd0e9", + "id": "9521c464", "metadata": {}, "outputs": [], "source": [ @@ -190,7 +190,7 @@ }, { "cell_type": "markdown", - "id": "b38cf803-60b4-467a-be8e-cbad5d81f14a", + "id": "a262c69f", "metadata": {}, "source": [ "#### `DoubleBracketIteration` features" @@ -199,7 +199,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9e278c3d-9f34-4a40-b453-4e030c751ef5", + "id": "290e5828", "metadata": {}, "outputs": [], "source": [ @@ -210,7 +210,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5b8e142b-a0a2-41bd-a16a-265a420b7360", + "id": "3e2b9950", "metadata": {}, "outputs": [], "source": [ @@ -222,7 +222,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4f9d1d41-3df7-49cf-96ca-fa1019c00c33", + "id": "638ba4b5", "metadata": {}, "outputs": [], "source": [ @@ -233,7 +233,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7b864712-219c-44b6-8337-19ef0100e318", + "id": "08f0c466", "metadata": {}, "outputs": [], "source": [ @@ -243,7 +243,7 @@ }, { "cell_type": "markdown", - "id": "5e576bc4-4e79-4c71-9ea0-b3012e9f2ba1", + "id": "bb5f10da", "metadata": {}, "source": [ "which shows $\\hat{H}$ is now identical to $\\hat{H}_0$ since no evolution step has been performed yet." @@ -252,7 +252,7 @@ { "cell_type": "code", "execution_count": null, - "id": "da3d3aaa-17e1-492e-bcd3-b510f44a5391", + "id": "90e6fdff", "metadata": {}, "outputs": [], "source": [ @@ -262,7 +262,7 @@ }, { "cell_type": "markdown", - "id": "ca0ce252", + "id": "a0101ae0", "metadata": {}, "source": [ "The Hilbert-Schmidt norm of a Hamiltonian is defined as:\n", @@ -273,7 +273,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24d0dfa1-7039-4d7d-8aa3-5a937b9ab0b8", + "id": "0d90c8b5", "metadata": {}, "outputs": [], "source": [ @@ -284,7 +284,7 @@ }, { "cell_type": "markdown", - "id": "d75e35ab-66f4-49f9-af19-679c20065a11", + "id": "a1d1eb77", "metadata": {}, "source": [ "Finally, the energy fluctuation of the system at step $k$ over a given state $\\mu$\n", @@ -297,7 +297,7 @@ { "cell_type": "code", "execution_count": null, - "id": "95f8d86f-07d4-498c-acb1-f6f6a4614c24", + "id": "13710cc2", "metadata": {}, "outputs": [], "source": [ @@ -312,7 +312,7 @@ }, { "cell_type": "markdown", - "id": "3d5b37f3-2477-49a0-9f80-7da5ddda1fff", + "id": "4d34e1e3", "metadata": {}, "source": [ "#### Call the `DoubleBracketIteration` to perform a DBF iteration\n", @@ -323,7 +323,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9a886261-8aa6-4df0-a31b-9c39847db124", + "id": "a7749a96", "metadata": {}, "outputs": [], "source": [ @@ -340,7 +340,7 @@ }, { "cell_type": "markdown", - "id": "b78dd05d-ffe3-435a-b5ec-2a42f28066b2", + "id": "dab441bb", "metadata": {}, "source": [ "We can check now if something happened by plotting the drift:" @@ -349,7 +349,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cc74812d-7c2c-44e4-afc2-e235968801b4", + "id": "fc01baa4", "metadata": {}, "outputs": [], "source": [ @@ -358,7 +358,7 @@ }, { "cell_type": "markdown", - "id": "3465a422-eebf-4e80-ae96-bba894132330", + "id": "9223433b", "metadata": {}, "source": [ "The set step can be good, but maybe not the best one. In order to do this choice in a wiser way, we can call the DBF hyperoptimization routine to search for a better initial step. The `dbf.hyperopt_step` method is built on top of the [`hyperopt`](https://hyperopt.github.io/hyperopt/) package. Any algorithm or sampling space provided by the official package can be used. We are going to use the default options (we sample new steps from a uniform space following a _Tree of Parzen estimators algorithm_)." @@ -367,7 +367,7 @@ { "cell_type": "code", "execution_count": null, - "id": "aad79966-7a11-4a45-aba5-4a4bb8315c50", + "id": "0d7b86d3", "metadata": {}, "outputs": [], "source": [ @@ -375,20 +375,20 @@ "dbf.h = dbf.h0\n", "\n", "# optimization of the step, we allow to search in [1e-5, 1]\n", - "step = dbf.hyperopt_step(\n", + "step = dbf.choose_step(\n", + " scheduling=DoubleBracketScheduling.hyperopt,\n", " step_min = 1e-5,\n", " step_max = 1,\n", " space = hp.uniform,\n", " optimizer = tpe,\n", " max_evals = 1000,\n", - " verbose = True\n", ")" ] }, { "cell_type": "code", "execution_count": null, - "id": "49483a47-d29d-440e-a4bc-143bfe6bb3cf", + "id": "1b9b1431", "metadata": {}, "outputs": [], "source": [ @@ -398,7 +398,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6bdaf7f9-7e49-4a16-8b29-ae1f9746cd9b", + "id": "52fa3599", "metadata": {}, "outputs": [], "source": [ @@ -407,7 +407,7 @@ }, { "cell_type": "markdown", - "id": "b5f1d00e-e763-40d9-822f-e0e8d4c57d9a", + "id": "084c3bcb", "metadata": {}, "source": [ "#### Let's evolve the model for `NSTEPS`\n", @@ -420,7 +420,7 @@ { "cell_type": "code", "execution_count": null, - "id": "59a6a485-a714-4e14-b27a-1df2930068ee", + "id": "d1f197b1", "metadata": {}, "outputs": [], "source": [ @@ -443,7 +443,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7e0b2f18-ca53-4f34-9fcf-0052dcc31dc5", + "id": "c115c222", "metadata": {}, "outputs": [], "source": [ @@ -452,7 +452,7 @@ }, { "cell_type": "markdown", - "id": "eb797d6c-0eba-4da4-b492-8b5d70f9123f", + "id": "233ba431", "metadata": {}, "source": [ "#### Method 2: optimizing the step" @@ -461,30 +461,29 @@ { "cell_type": "code", "execution_count": null, - "id": "a6fd1e33-3620-4f3b-b705-a120f6da0027", + "id": "4e0fc1c2", "metadata": {}, "outputs": [], "source": [ "# restart\n", - "dbf_2 = DoubleBracketIteration(hamiltonian=deepcopy(h), mode=iterationtype)\n", + "dbf_2 = DoubleBracketIteration(hamiltonian=deepcopy(h), mode=iterationtype, scheduling=DoubleBracketScheduling.hyperopt)\n", "off_diagonal_norm_history = [dbf_2.off_diagonal_norm]\n", "\n", "# set the number of evolution steps\n", "NSTEPS = 20\n", "\n", "# optimize first step\n", - "step = dbf_2.hyperopt_step(\n", + "step = dbf_2.choose_step(\n", " step_min = 1e-5,\n", " step_max = 1,\n", " space = hp.uniform,\n", " optimizer = tpe,\n", " max_evals = 500,\n", - " verbose = True\n", ")\n", "\n", "for s in range(NSTEPS):\n", " if s != 0:\n", - " step = dbf_2.hyperopt_step(\n", + " step = dbf_2.choose_step(\n", " step_min = 1e-5,\n", " step_max = 1,\n", " space = hp.uniform,\n", @@ -502,7 +501,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0f0212bf-b642-4fea-9203-037876e0b266", + "id": "40e31e97", "metadata": {}, "outputs": [], "source": [ @@ -511,7 +510,7 @@ }, { "cell_type": "markdown", - "id": "32341937-4178-41d2-a10e-5e4d2634098e", + "id": "0de78acd", "metadata": {}, "source": [ "The hyperoptimization can lead to a faster convergence of the algorithm." @@ -520,7 +519,7 @@ { "cell_type": "code", "execution_count": null, - "id": "82b89092-07e5-4788-9ae0-8907df2428eb", + "id": "baab0ab5", "metadata": {}, "outputs": [], "source": [ @@ -530,33 +529,23 @@ { "cell_type": "code", "execution_count": null, - "id": "ac8ed320-04a8-42af-a980-48ab4f1fff7c", + "id": "2bc9ac69", "metadata": {}, "outputs": [], "source": [ "visualize_matrix(dbf_2.h.matrix)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0bed191d", + "metadata": {}, + "outputs": [], + "source": [] } ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.7" - } - }, + "metadata": {}, "nbformat": 4, "nbformat_minor": 5 } diff --git a/poetry.lock b/poetry.lock index 6f043417f8..791f0a3cc9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -35,13 +35,13 @@ files = [ [[package]] name = "anyio" -version = "4.3.0" +version = "4.4.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, - {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, ] [package.dependencies] @@ -55,6 +55,17 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (>=0.23)"] +[[package]] +name = "appnope" +version = "0.1.4" +description = "Disable App Nap on macOS >= 10.9" +optional = false +python-versions = ">=3.6" +files = [ + {file = "appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c"}, + {file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"}, +] + [[package]] name = "astroid" version = "3.1.0" @@ -678,63 +689,63 @@ test = ["altair", "baytune", "chocolate", "dask", "distributed", "kahypar", "mat [[package]] name = "coverage" -version = "7.5.1" +version = "7.5.3" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0884920835a033b78d1c73b6d3bbcda8161a900f38a488829a83982925f6c2e"}, - {file = "coverage-7.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:39afcd3d4339329c5f58de48a52f6e4e50f6578dd6099961cf22228feb25f38f"}, - {file = "coverage-7.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b0ceee8147444347da6a66be737c9d78f3353b0681715b668b72e79203e4a"}, - {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a9ca3f2fae0088c3c71d743d85404cec8df9be818a005ea065495bedc33da35"}, - {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd215c0c7d7aab005221608a3c2b46f58c0285a819565887ee0b718c052aa4e"}, - {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4bf0655ab60d754491004a5efd7f9cccefcc1081a74c9ef2da4735d6ee4a6223"}, - {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:61c4bf1ba021817de12b813338c9be9f0ad5b1e781b9b340a6d29fc13e7c1b5e"}, - {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db66fc317a046556a96b453a58eced5024af4582a8dbdc0c23ca4dbc0d5b3146"}, - {file = "coverage-7.5.1-cp310-cp310-win32.whl", hash = "sha256:b016ea6b959d3b9556cb401c55a37547135a587db0115635a443b2ce8f1c7228"}, - {file = "coverage-7.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:df4e745a81c110e7446b1cc8131bf986157770fa405fe90e15e850aaf7619bc8"}, - {file = "coverage-7.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:796a79f63eca8814ca3317a1ea443645c9ff0d18b188de470ed7ccd45ae79428"}, - {file = "coverage-7.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fc84a37bfd98db31beae3c2748811a3fa72bf2007ff7902f68746d9757f3746"}, - {file = "coverage-7.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6175d1a0559986c6ee3f7fccfc4a90ecd12ba0a383dcc2da30c2b9918d67d8a3"}, - {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fc81d5878cd6274ce971e0a3a18a8803c3fe25457165314271cf78e3aae3aa2"}, - {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:556cf1a7cbc8028cb60e1ff0be806be2eded2daf8129b8811c63e2b9a6c43bca"}, - {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9981706d300c18d8b220995ad22627647be11a4276721c10911e0e9fa44c83e8"}, - {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d7fed867ee50edf1a0b4a11e8e5d0895150e572af1cd6d315d557758bfa9c057"}, - {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef48e2707fb320c8f139424a596f5b69955a85b178f15af261bab871873bb987"}, - {file = "coverage-7.5.1-cp311-cp311-win32.whl", hash = "sha256:9314d5678dcc665330df5b69c1e726a0e49b27df0461c08ca12674bcc19ef136"}, - {file = "coverage-7.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:5fa567e99765fe98f4e7d7394ce623e794d7cabb170f2ca2ac5a4174437e90dd"}, - {file = "coverage-7.5.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b6cf3764c030e5338e7f61f95bd21147963cf6aa16e09d2f74f1fa52013c1206"}, - {file = "coverage-7.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ec92012fefebee89a6b9c79bc39051a6cb3891d562b9270ab10ecfdadbc0c34"}, - {file = "coverage-7.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16db7f26000a07efcf6aea00316f6ac57e7d9a96501e990a36f40c965ec7a95d"}, - {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:beccf7b8a10b09c4ae543582c1319c6df47d78fd732f854ac68d518ee1fb97fa"}, - {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8748731ad392d736cc9ccac03c9845b13bb07d020a33423fa5b3a36521ac6e4e"}, - {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7352b9161b33fd0b643ccd1f21f3a3908daaddf414f1c6cb9d3a2fd618bf2572"}, - {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7a588d39e0925f6a2bff87154752481273cdb1736270642aeb3635cb9b4cad07"}, - {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:68f962d9b72ce69ea8621f57551b2fa9c70509af757ee3b8105d4f51b92b41a7"}, - {file = "coverage-7.5.1-cp312-cp312-win32.whl", hash = "sha256:f152cbf5b88aaeb836127d920dd0f5e7edff5a66f10c079157306c4343d86c19"}, - {file = "coverage-7.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:5a5740d1fb60ddf268a3811bcd353de34eb56dc24e8f52a7f05ee513b2d4f596"}, - {file = "coverage-7.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e2213def81a50519d7cc56ed643c9e93e0247f5bbe0d1247d15fa520814a7cd7"}, - {file = "coverage-7.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5037f8fcc2a95b1f0e80585bd9d1ec31068a9bcb157d9750a172836e98bc7a90"}, - {file = "coverage-7.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3721c2c9e4c4953a41a26c14f4cef64330392a6d2d675c8b1db3b645e31f0e"}, - {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca498687ca46a62ae590253fba634a1fe9836bc56f626852fb2720f334c9e4e5"}, - {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cdcbc320b14c3e5877ee79e649677cb7d89ef588852e9583e6b24c2e5072661"}, - {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:57e0204b5b745594e5bc14b9b50006da722827f0b8c776949f1135677e88d0b8"}, - {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fe7502616b67b234482c3ce276ff26f39ffe88adca2acf0261df4b8454668b4"}, - {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9e78295f4144f9dacfed4f92935fbe1780021247c2fabf73a819b17f0ccfff8d"}, - {file = "coverage-7.5.1-cp38-cp38-win32.whl", hash = "sha256:1434e088b41594baa71188a17533083eabf5609e8e72f16ce8c186001e6b8c41"}, - {file = "coverage-7.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:0646599e9b139988b63704d704af8e8df7fa4cbc4a1f33df69d97f36cb0a38de"}, - {file = "coverage-7.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4cc37def103a2725bc672f84bd939a6fe4522310503207aae4d56351644682f1"}, - {file = "coverage-7.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fc0b4d8bfeabd25ea75e94632f5b6e047eef8adaed0c2161ada1e922e7f7cece"}, - {file = "coverage-7.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d0a0f5e06881ecedfe6f3dd2f56dcb057b6dbeb3327fd32d4b12854df36bf26"}, - {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9735317685ba6ec7e3754798c8871c2f49aa5e687cc794a0b1d284b2389d1bd5"}, - {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d21918e9ef11edf36764b93101e2ae8cc82aa5efdc7c5a4e9c6c35a48496d601"}, - {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c3e757949f268364b96ca894b4c342b41dc6f8f8b66c37878aacef5930db61be"}, - {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:79afb6197e2f7f60c4824dd4b2d4c2ec5801ceb6ba9ce5d2c3080e5660d51a4f"}, - {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d1d0d98d95dd18fe29dc66808e1accf59f037d5716f86a501fc0256455219668"}, - {file = "coverage-7.5.1-cp39-cp39-win32.whl", hash = "sha256:1cc0fe9b0b3a8364093c53b0b4c0c2dd4bb23acbec4c9240b5f284095ccf7981"}, - {file = "coverage-7.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:dde0070c40ea8bb3641e811c1cfbf18e265d024deff6de52c5950677a8fb1e0f"}, - {file = "coverage-7.5.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:6537e7c10cc47c595828b8a8be04c72144725c383c4702703ff4e42e44577312"}, - {file = "coverage-7.5.1.tar.gz", hash = "sha256:54de9ef3a9da981f7af93eafde4ede199e0846cd819eb27c88e2b712aae9708c"}, + {file = "coverage-7.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a6519d917abb15e12380406d721e37613e2a67d166f9fb7e5a8ce0375744cd45"}, + {file = "coverage-7.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aea7da970f1feccf48be7335f8b2ca64baf9b589d79e05b9397a06696ce1a1ec"}, + {file = "coverage-7.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:923b7b1c717bd0f0f92d862d1ff51d9b2b55dbbd133e05680204465f454bb286"}, + {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62bda40da1e68898186f274f832ef3e759ce929da9a9fd9fcf265956de269dbc"}, + {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8b7339180d00de83e930358223c617cc343dd08e1aa5ec7b06c3a121aec4e1d"}, + {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:25a5caf742c6195e08002d3b6c2dd6947e50efc5fc2c2205f61ecb47592d2d83"}, + {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:05ac5f60faa0c704c0f7e6a5cbfd6f02101ed05e0aee4d2822637a9e672c998d"}, + {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:239a4e75e09c2b12ea478d28815acf83334d32e722e7433471fbf641c606344c"}, + {file = "coverage-7.5.3-cp310-cp310-win32.whl", hash = "sha256:a5812840d1d00eafae6585aba38021f90a705a25b8216ec7f66aebe5b619fb84"}, + {file = "coverage-7.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:33ca90a0eb29225f195e30684ba4a6db05dbef03c2ccd50b9077714c48153cac"}, + {file = "coverage-7.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f81bc26d609bf0fbc622c7122ba6307993c83c795d2d6f6f6fd8c000a770d974"}, + {file = "coverage-7.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7cec2af81f9e7569280822be68bd57e51b86d42e59ea30d10ebdbb22d2cb7232"}, + {file = "coverage-7.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55f689f846661e3f26efa535071775d0483388a1ccfab899df72924805e9e7cd"}, + {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50084d3516aa263791198913a17354bd1dc627d3c1639209640b9cac3fef5807"}, + {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:341dd8f61c26337c37988345ca5c8ccabeff33093a26953a1ac72e7d0103c4fb"}, + {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ab0b028165eea880af12f66086694768f2c3139b2c31ad5e032c8edbafca6ffc"}, + {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5bc5a8c87714b0c67cfeb4c7caa82b2d71e8864d1a46aa990b5588fa953673b8"}, + {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38a3b98dae8a7c9057bd91fbf3415c05e700a5114c5f1b5b0ea5f8f429ba6614"}, + {file = "coverage-7.5.3-cp311-cp311-win32.whl", hash = "sha256:fcf7d1d6f5da887ca04302db8e0e0cf56ce9a5e05f202720e49b3e8157ddb9a9"}, + {file = "coverage-7.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:8c836309931839cca658a78a888dab9676b5c988d0dd34ca247f5f3e679f4e7a"}, + {file = "coverage-7.5.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:296a7d9bbc598e8744c00f7a6cecf1da9b30ae9ad51c566291ff1314e6cbbed8"}, + {file = "coverage-7.5.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:34d6d21d8795a97b14d503dcaf74226ae51eb1f2bd41015d3ef332a24d0a17b3"}, + {file = "coverage-7.5.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e317953bb4c074c06c798a11dbdd2cf9979dbcaa8ccc0fa4701d80042d4ebf1"}, + {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:705f3d7c2b098c40f5b81790a5fedb274113373d4d1a69e65f8b68b0cc26f6db"}, + {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1196e13c45e327d6cd0b6e471530a1882f1017eb83c6229fc613cd1a11b53cd"}, + {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:015eddc5ccd5364dcb902eaecf9515636806fa1e0d5bef5769d06d0f31b54523"}, + {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fd27d8b49e574e50caa65196d908f80e4dff64d7e592d0c59788b45aad7e8b35"}, + {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:33fc65740267222fc02975c061eb7167185fef4cc8f2770267ee8bf7d6a42f84"}, + {file = "coverage-7.5.3-cp312-cp312-win32.whl", hash = "sha256:7b2a19e13dfb5c8e145c7a6ea959485ee8e2204699903c88c7d25283584bfc08"}, + {file = "coverage-7.5.3-cp312-cp312-win_amd64.whl", hash = "sha256:0bbddc54bbacfc09b3edaec644d4ac90c08ee8ed4844b0f86227dcda2d428fcb"}, + {file = "coverage-7.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f78300789a708ac1f17e134593f577407d52d0417305435b134805c4fb135adb"}, + {file = "coverage-7.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b368e1aee1b9b75757942d44d7598dcd22a9dbb126affcbba82d15917f0cc155"}, + {file = "coverage-7.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f836c174c3a7f639bded48ec913f348c4761cbf49de4a20a956d3431a7c9cb24"}, + {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:244f509f126dc71369393ce5fea17c0592c40ee44e607b6d855e9c4ac57aac98"}, + {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4c2872b3c91f9baa836147ca33650dc5c172e9273c808c3c3199c75490e709d"}, + {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dd4b3355b01273a56b20c219e74e7549e14370b31a4ffe42706a8cda91f19f6d"}, + {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f542287b1489c7a860d43a7d8883e27ca62ab84ca53c965d11dac1d3a1fab7ce"}, + {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:75e3f4e86804023e991096b29e147e635f5e2568f77883a1e6eed74512659ab0"}, + {file = "coverage-7.5.3-cp38-cp38-win32.whl", hash = "sha256:c59d2ad092dc0551d9f79d9d44d005c945ba95832a6798f98f9216ede3d5f485"}, + {file = "coverage-7.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:fa21a04112c59ad54f69d80e376f7f9d0f5f9123ab87ecd18fbb9ec3a2beed56"}, + {file = "coverage-7.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5102a92855d518b0996eb197772f5ac2a527c0ec617124ad5242a3af5e25f85"}, + {file = "coverage-7.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d1da0a2e3b37b745a2b2a678a4c796462cf753aebf94edcc87dcc6b8641eae31"}, + {file = "coverage-7.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8383a6c8cefba1b7cecc0149415046b6fc38836295bc4c84e820872eb5478b3d"}, + {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aad68c3f2566dfae84bf46295a79e79d904e1c21ccfc66de88cd446f8686341"}, + {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e079c9ec772fedbade9d7ebc36202a1d9ef7291bc9b3a024ca395c4d52853d7"}, + {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bde997cac85fcac227b27d4fb2c7608a2c5f6558469b0eb704c5726ae49e1c52"}, + {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:990fb20b32990b2ce2c5f974c3e738c9358b2735bc05075d50a6f36721b8f303"}, + {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3d5a67f0da401e105753d474369ab034c7bae51a4c31c77d94030d59e41df5bd"}, + {file = "coverage-7.5.3-cp39-cp39-win32.whl", hash = "sha256:e08c470c2eb01977d221fd87495b44867a56d4d594f43739a8028f8646a51e0d"}, + {file = "coverage-7.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:1d2a830ade66d3563bb61d1e3c77c8def97b30ed91e166c67d0632c018f380f0"}, + {file = "coverage-7.5.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:3538d8fb1ee9bdd2e2692b3b18c22bb1c19ffbefd06880f5ac496e42d7bb3884"}, + {file = "coverage-7.5.3.tar.gz", hash = "sha256:04aefca5190d1dc7a53a4c1a5a7f8568811306d7a8ee231c42fb69215571944f"}, ] [package.dependencies] @@ -1058,6 +1069,37 @@ toolz = ">=0.8.0" [package.extras] cython = ["cython"] +[[package]] +name = "debugpy" +version = "1.8.1" +description = "An implementation of the Debug Adapter Protocol for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "debugpy-1.8.1-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:3bda0f1e943d386cc7a0e71bfa59f4137909e2ed947fb3946c506e113000f741"}, + {file = "debugpy-1.8.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dda73bf69ea479c8577a0448f8c707691152e6c4de7f0c4dec5a4bc11dee516e"}, + {file = "debugpy-1.8.1-cp310-cp310-win32.whl", hash = "sha256:3a79c6f62adef994b2dbe9fc2cc9cc3864a23575b6e387339ab739873bea53d0"}, + {file = "debugpy-1.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:7eb7bd2b56ea3bedb009616d9e2f64aab8fc7000d481faec3cd26c98a964bcdd"}, + {file = "debugpy-1.8.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:016a9fcfc2c6b57f939673c874310d8581d51a0fe0858e7fac4e240c5eb743cb"}, + {file = "debugpy-1.8.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd97ed11a4c7f6d042d320ce03d83b20c3fb40da892f994bc041bbc415d7a099"}, + {file = "debugpy-1.8.1-cp311-cp311-win32.whl", hash = "sha256:0de56aba8249c28a300bdb0672a9b94785074eb82eb672db66c8144fff673146"}, + {file = "debugpy-1.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:1a9fe0829c2b854757b4fd0a338d93bc17249a3bf69ecf765c61d4c522bb92a8"}, + {file = "debugpy-1.8.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3ebb70ba1a6524d19fa7bb122f44b74170c447d5746a503e36adc244a20ac539"}, + {file = "debugpy-1.8.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2e658a9630f27534e63922ebf655a6ab60c370f4d2fc5c02a5b19baf4410ace"}, + {file = "debugpy-1.8.1-cp312-cp312-win32.whl", hash = "sha256:caad2846e21188797a1f17fc09c31b84c7c3c23baf2516fed5b40b378515bbf0"}, + {file = "debugpy-1.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:edcc9f58ec0fd121a25bc950d4578df47428d72e1a0d66c07403b04eb93bcf98"}, + {file = "debugpy-1.8.1-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:7a3afa222f6fd3d9dfecd52729bc2e12c93e22a7491405a0ecbf9e1d32d45b39"}, + {file = "debugpy-1.8.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d915a18f0597ef685e88bb35e5d7ab968964b7befefe1aaea1eb5b2640b586c7"}, + {file = "debugpy-1.8.1-cp38-cp38-win32.whl", hash = "sha256:92116039b5500633cc8d44ecc187abe2dfa9b90f7a82bbf81d079fcdd506bae9"}, + {file = "debugpy-1.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:e38beb7992b5afd9d5244e96ad5fa9135e94993b0c551ceebf3fe1a5d9beb234"}, + {file = "debugpy-1.8.1-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:bfb20cb57486c8e4793d41996652e5a6a885b4d9175dd369045dad59eaacea42"}, + {file = "debugpy-1.8.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efd3fdd3f67a7e576dd869c184c5dd71d9aaa36ded271939da352880c012e703"}, + {file = "debugpy-1.8.1-cp39-cp39-win32.whl", hash = "sha256:58911e8521ca0c785ac7a0539f1e77e0ce2df753f786188f382229278b4cdf23"}, + {file = "debugpy-1.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:6df9aa9599eb05ca179fb0b810282255202a66835c6efb1d112d21ecb830ddd3"}, + {file = "debugpy-1.8.1-py2.py3-none-any.whl", hash = "sha256:28acbe2241222b87e255260c76741e1fbf04fdc3b6d094fcf57b6c6f75ce1242"}, + {file = "debugpy-1.8.1.zip", hash = "sha256:f696d6be15be87aef621917585f9bb94b1dc9e8aced570db1b8a6fc14e8f9b42"}, +] + [[package]] name = "decorator" version = "5.1.1" @@ -1307,53 +1349,53 @@ files = [ [[package]] name = "fonttools" -version = "4.51.0" +version = "4.53.0" description = "Tools to manipulate font files" optional = false python-versions = ">=3.8" files = [ - {file = "fonttools-4.51.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:84d7751f4468dd8cdd03ddada18b8b0857a5beec80bce9f435742abc9a851a74"}, - {file = "fonttools-4.51.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8b4850fa2ef2cfbc1d1f689bc159ef0f45d8d83298c1425838095bf53ef46308"}, - {file = "fonttools-4.51.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5b48a1121117047d82695d276c2af2ee3a24ffe0f502ed581acc2673ecf1037"}, - {file = "fonttools-4.51.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:180194c7fe60c989bb627d7ed5011f2bef1c4d36ecf3ec64daec8302f1ae0716"}, - {file = "fonttools-4.51.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:96a48e137c36be55e68845fc4284533bda2980f8d6f835e26bca79d7e2006438"}, - {file = "fonttools-4.51.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:806e7912c32a657fa39d2d6eb1d3012d35f841387c8fc6cf349ed70b7c340039"}, - {file = "fonttools-4.51.0-cp310-cp310-win32.whl", hash = "sha256:32b17504696f605e9e960647c5f64b35704782a502cc26a37b800b4d69ff3c77"}, - {file = "fonttools-4.51.0-cp310-cp310-win_amd64.whl", hash = "sha256:c7e91abdfae1b5c9e3a543f48ce96013f9a08c6c9668f1e6be0beabf0a569c1b"}, - {file = "fonttools-4.51.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a8feca65bab31479d795b0d16c9a9852902e3a3c0630678efb0b2b7941ea9c74"}, - {file = "fonttools-4.51.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ac27f436e8af7779f0bb4d5425aa3535270494d3bc5459ed27de3f03151e4c2"}, - {file = "fonttools-4.51.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e19bd9e9964a09cd2433a4b100ca7f34e34731e0758e13ba9a1ed6e5468cc0f"}, - {file = "fonttools-4.51.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2b92381f37b39ba2fc98c3a45a9d6383bfc9916a87d66ccb6553f7bdd129097"}, - {file = "fonttools-4.51.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5f6bc991d1610f5c3bbe997b0233cbc234b8e82fa99fc0b2932dc1ca5e5afec0"}, - {file = "fonttools-4.51.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9696fe9f3f0c32e9a321d5268208a7cc9205a52f99b89479d1b035ed54c923f1"}, - {file = "fonttools-4.51.0-cp311-cp311-win32.whl", hash = "sha256:3bee3f3bd9fa1d5ee616ccfd13b27ca605c2b4270e45715bd2883e9504735034"}, - {file = "fonttools-4.51.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f08c901d3866a8905363619e3741c33f0a83a680d92a9f0e575985c2634fcc1"}, - {file = "fonttools-4.51.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4060acc2bfa2d8e98117828a238889f13b6f69d59f4f2d5857eece5277b829ba"}, - {file = "fonttools-4.51.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1250e818b5f8a679ad79660855528120a8f0288f8f30ec88b83db51515411fcc"}, - {file = "fonttools-4.51.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76f1777d8b3386479ffb4a282e74318e730014d86ce60f016908d9801af9ca2a"}, - {file = "fonttools-4.51.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b5ad456813d93b9c4b7ee55302208db2b45324315129d85275c01f5cb7e61a2"}, - {file = "fonttools-4.51.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:68b3fb7775a923be73e739f92f7e8a72725fd333eab24834041365d2278c3671"}, - {file = "fonttools-4.51.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8e2f1a4499e3b5ee82c19b5ee57f0294673125c65b0a1ff3764ea1f9db2f9ef5"}, - {file = "fonttools-4.51.0-cp312-cp312-win32.whl", hash = "sha256:278e50f6b003c6aed19bae2242b364e575bcb16304b53f2b64f6551b9c000e15"}, - {file = "fonttools-4.51.0-cp312-cp312-win_amd64.whl", hash = "sha256:b3c61423f22165541b9403ee39874dcae84cd57a9078b82e1dce8cb06b07fa2e"}, - {file = "fonttools-4.51.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1621ee57da887c17312acc4b0e7ac30d3a4fb0fec6174b2e3754a74c26bbed1e"}, - {file = "fonttools-4.51.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e9d9298be7a05bb4801f558522adbe2feea1b0b103d5294ebf24a92dd49b78e5"}, - {file = "fonttools-4.51.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee1af4be1c5afe4c96ca23badd368d8dc75f611887fb0c0dac9f71ee5d6f110e"}, - {file = "fonttools-4.51.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c18b49adc721a7d0b8dfe7c3130c89b8704baf599fb396396d07d4aa69b824a1"}, - {file = "fonttools-4.51.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de7c29bdbdd35811f14493ffd2534b88f0ce1b9065316433b22d63ca1cd21f14"}, - {file = "fonttools-4.51.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cadf4e12a608ef1d13e039864f484c8a968840afa0258b0b843a0556497ea9ed"}, - {file = "fonttools-4.51.0-cp38-cp38-win32.whl", hash = "sha256:aefa011207ed36cd280babfaa8510b8176f1a77261833e895a9d96e57e44802f"}, - {file = "fonttools-4.51.0-cp38-cp38-win_amd64.whl", hash = "sha256:865a58b6e60b0938874af0968cd0553bcd88e0b2cb6e588727117bd099eef836"}, - {file = "fonttools-4.51.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:60a3409c9112aec02d5fb546f557bca6efa773dcb32ac147c6baf5f742e6258b"}, - {file = "fonttools-4.51.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f7e89853d8bea103c8e3514b9f9dc86b5b4120afb4583b57eb10dfa5afbe0936"}, - {file = "fonttools-4.51.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56fc244f2585d6c00b9bcc59e6593e646cf095a96fe68d62cd4da53dd1287b55"}, - {file = "fonttools-4.51.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d145976194a5242fdd22df18a1b451481a88071feadf251221af110ca8f00ce"}, - {file = "fonttools-4.51.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5b8cab0c137ca229433570151b5c1fc6af212680b58b15abd797dcdd9dd5051"}, - {file = "fonttools-4.51.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:54dcf21a2f2d06ded676e3c3f9f74b2bafded3a8ff12f0983160b13e9f2fb4a7"}, - {file = "fonttools-4.51.0-cp39-cp39-win32.whl", hash = "sha256:0118ef998a0699a96c7b28457f15546815015a2710a1b23a7bf6c1be60c01636"}, - {file = "fonttools-4.51.0-cp39-cp39-win_amd64.whl", hash = "sha256:599bdb75e220241cedc6faebfafedd7670335d2e29620d207dd0378a4e9ccc5a"}, - {file = "fonttools-4.51.0-py3-none-any.whl", hash = "sha256:15c94eeef6b095831067f72c825eb0e2d48bb4cea0647c1b05c981ecba2bf39f"}, - {file = "fonttools-4.51.0.tar.gz", hash = "sha256:dc0673361331566d7a663d7ce0f6fdcbfbdc1f59c6e3ed1165ad7202ca183c68"}, + {file = "fonttools-4.53.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:52a6e0a7a0bf611c19bc8ec8f7592bdae79c8296c70eb05917fd831354699b20"}, + {file = "fonttools-4.53.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:099634631b9dd271d4a835d2b2a9e042ccc94ecdf7e2dd9f7f34f7daf333358d"}, + {file = "fonttools-4.53.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e40013572bfb843d6794a3ce076c29ef4efd15937ab833f520117f8eccc84fd6"}, + {file = "fonttools-4.53.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:715b41c3e231f7334cbe79dfc698213dcb7211520ec7a3bc2ba20c8515e8a3b5"}, + {file = "fonttools-4.53.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:74ae2441731a05b44d5988d3ac2cf784d3ee0a535dbed257cbfff4be8bb49eb9"}, + {file = "fonttools-4.53.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:95db0c6581a54b47c30860d013977b8a14febc206c8b5ff562f9fe32738a8aca"}, + {file = "fonttools-4.53.0-cp310-cp310-win32.whl", hash = "sha256:9cd7a6beec6495d1dffb1033d50a3f82dfece23e9eb3c20cd3c2444d27514068"}, + {file = "fonttools-4.53.0-cp310-cp310-win_amd64.whl", hash = "sha256:daaef7390e632283051e3cf3e16aff2b68b247e99aea916f64e578c0449c9c68"}, + {file = "fonttools-4.53.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a209d2e624ba492df4f3bfad5996d1f76f03069c6133c60cd04f9a9e715595ec"}, + {file = "fonttools-4.53.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f520d9ac5b938e6494f58a25c77564beca7d0199ecf726e1bd3d56872c59749"}, + {file = "fonttools-4.53.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eceef49f457253000e6a2d0f7bd08ff4e9fe96ec4ffce2dbcb32e34d9c1b8161"}, + {file = "fonttools-4.53.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1f3e34373aa16045484b4d9d352d4c6b5f9f77ac77a178252ccbc851e8b2ee"}, + {file = "fonttools-4.53.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:28d072169fe8275fb1a0d35e3233f6df36a7e8474e56cb790a7258ad822b6fd6"}, + {file = "fonttools-4.53.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4a2a6ba400d386e904fd05db81f73bee0008af37799a7586deaa4aef8cd5971e"}, + {file = "fonttools-4.53.0-cp311-cp311-win32.whl", hash = "sha256:bb7273789f69b565d88e97e9e1da602b4ee7ba733caf35a6c2affd4334d4f005"}, + {file = "fonttools-4.53.0-cp311-cp311-win_amd64.whl", hash = "sha256:9fe9096a60113e1d755e9e6bda15ef7e03391ee0554d22829aa506cdf946f796"}, + {file = "fonttools-4.53.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d8f191a17369bd53a5557a5ee4bab91d5330ca3aefcdf17fab9a497b0e7cff7a"}, + {file = "fonttools-4.53.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:93156dd7f90ae0a1b0e8871032a07ef3178f553f0c70c386025a808f3a63b1f4"}, + {file = "fonttools-4.53.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bff98816cb144fb7b85e4b5ba3888a33b56ecef075b0e95b95bcd0a5fbf20f06"}, + {file = "fonttools-4.53.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:973d030180eca8255b1bce6ffc09ef38a05dcec0e8320cc9b7bcaa65346f341d"}, + {file = "fonttools-4.53.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c4ee5a24e281fbd8261c6ab29faa7fd9a87a12e8c0eed485b705236c65999109"}, + {file = "fonttools-4.53.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bd5bc124fae781a4422f61b98d1d7faa47985f663a64770b78f13d2c072410c2"}, + {file = "fonttools-4.53.0-cp312-cp312-win32.whl", hash = "sha256:a239afa1126b6a619130909c8404070e2b473dd2b7fc4aacacd2e763f8597fea"}, + {file = "fonttools-4.53.0-cp312-cp312-win_amd64.whl", hash = "sha256:45b4afb069039f0366a43a5d454bc54eea942bfb66b3fc3e9a2c07ef4d617380"}, + {file = "fonttools-4.53.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:93bc9e5aaa06ff928d751dc6be889ff3e7d2aa393ab873bc7f6396a99f6fbb12"}, + {file = "fonttools-4.53.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2367d47816cc9783a28645bc1dac07f8ffc93e0f015e8c9fc674a5b76a6da6e4"}, + {file = "fonttools-4.53.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:907fa0b662dd8fc1d7c661b90782ce81afb510fc4b7aa6ae7304d6c094b27bce"}, + {file = "fonttools-4.53.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e0ad3c6ea4bd6a289d958a1eb922767233f00982cf0fe42b177657c86c80a8f"}, + {file = "fonttools-4.53.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:73121a9b7ff93ada888aaee3985a88495489cc027894458cb1a736660bdfb206"}, + {file = "fonttools-4.53.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ee595d7ba9bba130b2bec555a40aafa60c26ce68ed0cf509983e0f12d88674fd"}, + {file = "fonttools-4.53.0-cp38-cp38-win32.whl", hash = "sha256:fca66d9ff2ac89b03f5aa17e0b21a97c21f3491c46b583bb131eb32c7bab33af"}, + {file = "fonttools-4.53.0-cp38-cp38-win_amd64.whl", hash = "sha256:31f0e3147375002aae30696dd1dc596636abbd22fca09d2e730ecde0baad1d6b"}, + {file = "fonttools-4.53.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7d6166192dcd925c78a91d599b48960e0a46fe565391c79fe6de481ac44d20ac"}, + {file = "fonttools-4.53.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef50ec31649fbc3acf6afd261ed89d09eb909b97cc289d80476166df8438524d"}, + {file = "fonttools-4.53.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f193f060391a455920d61684a70017ef5284ccbe6023bb056e15e5ac3de11d1"}, + {file = "fonttools-4.53.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba9f09ff17f947392a855e3455a846f9855f6cf6bec33e9a427d3c1d254c712f"}, + {file = "fonttools-4.53.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0c555e039d268445172b909b1b6bdcba42ada1cf4a60e367d68702e3f87e5f64"}, + {file = "fonttools-4.53.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5a4788036201c908079e89ae3f5399b33bf45b9ea4514913f4dbbe4fac08efe0"}, + {file = "fonttools-4.53.0-cp39-cp39-win32.whl", hash = "sha256:d1a24f51a3305362b94681120c508758a88f207fa0a681c16b5a4172e9e6c7a9"}, + {file = "fonttools-4.53.0-cp39-cp39-win_amd64.whl", hash = "sha256:1e677bfb2b4bd0e5e99e0f7283e65e47a9814b0486cb64a41adf9ef110e078f2"}, + {file = "fonttools-4.53.0-py3-none-any.whl", hash = "sha256:6b4f04b1fbc01a3569d63359f2227c89ab294550de277fd09d8fca6185669fa4"}, + {file = "fonttools-4.53.0.tar.gz", hash = "sha256:c93ed66d32de1559b6fc348838c7572d5c0ac1e4a258e76763a5caddd8944002"}, ] [package.extras] @@ -1806,6 +1848,39 @@ files = [ {file = "intel_openmp-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:eef4c8bcc8acefd7f5cd3b9384dbf73d59e2c99fc56545712ded913f43c4a94f"}, ] +[[package]] +name = "ipykernel" +version = "6.29.4" +description = "IPython Kernel for Jupyter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ipykernel-6.29.4-py3-none-any.whl", hash = "sha256:1181e653d95c6808039c509ef8e67c4126b3b3af7781496c7cbfb5ed938a27da"}, + {file = "ipykernel-6.29.4.tar.gz", hash = "sha256:3d44070060f9475ac2092b760123fadf105d2e2493c24848b6691a7c4f42af5c"}, +] + +[package.dependencies] +appnope = {version = "*", markers = "platform_system == \"Darwin\""} +comm = ">=0.1.1" +debugpy = ">=1.6.5" +ipython = ">=7.23.1" +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +matplotlib-inline = ">=0.1" +nest-asyncio = "*" +packaging = "*" +psutil = "*" +pyzmq = ">=24" +tornado = ">=6.1" +traitlets = ">=5.4.0" + +[package.extras] +cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] +pyqt5 = ["pyqt5"] +pyside6 = ["pyside6"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"] + [[package]] name = "ipython" version = "8.18.1" @@ -1845,21 +1920,21 @@ test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.22)", "pa [[package]] name = "ipywidgets" -version = "8.1.2" +version = "8.1.3" description = "Jupyter interactive widgets" optional = false python-versions = ">=3.7" files = [ - {file = "ipywidgets-8.1.2-py3-none-any.whl", hash = "sha256:bbe43850d79fb5e906b14801d6c01402857996864d1e5b6fa62dd2ee35559f60"}, - {file = "ipywidgets-8.1.2.tar.gz", hash = "sha256:d0b9b41e49bae926a866e613a39b0f0097745d2b9f1f3dd406641b4a57ec42c9"}, + {file = "ipywidgets-8.1.3-py3-none-any.whl", hash = "sha256:efafd18f7a142248f7cb0ba890a68b96abd4d6e88ddbda483c9130d12667eaf2"}, + {file = "ipywidgets-8.1.3.tar.gz", hash = "sha256:f5f9eeaae082b1823ce9eac2575272952f40d748893972956dc09700a6392d9c"}, ] [package.dependencies] comm = ">=0.1.3" ipython = ">=6.1.0" -jupyterlab-widgets = ">=3.0.10,<3.1.0" +jupyterlab-widgets = ">=3.0.11,<3.1.0" traitlets = ">=4.3.1" -widgetsnbextension = ">=4.0.10,<4.1.0" +widgetsnbextension = ">=4.0.11,<4.1.0" [package.extras] test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] @@ -2011,13 +2086,13 @@ files = [ [[package]] name = "jupyterlab-widgets" -version = "3.0.10" +version = "3.0.11" description = "Jupyter interactive widgets for JupyterLab" optional = false python-versions = ">=3.7" files = [ - {file = "jupyterlab_widgets-3.0.10-py3-none-any.whl", hash = "sha256:dd61f3ae7a5a7f80299e14585ce6cf3d6925a96c9103c978eda293197730cb64"}, - {file = "jupyterlab_widgets-3.0.10.tar.gz", hash = "sha256:04f2ac04976727e4f9d0fa91cdc2f1ab860f965e504c29dbd6a65c882c9d04c0"}, + {file = "jupyterlab_widgets-3.0.11-py3-none-any.whl", hash = "sha256:78287fd86d20744ace330a61625024cf5521e1c012a352ddc0a3cdc2348becd0"}, + {file = "jupyterlab_widgets-3.0.11.tar.gz", hash = "sha256:dd5ac679593c969af29c9bed054c24f26842baa51352114736756bc035deee27"}, ] [[package]] @@ -2686,6 +2761,17 @@ nbformat = "*" sphinx = ">=1.8" traitlets = ">=5" +[[package]] +name = "nest-asyncio" +version = "1.6.0" +description = "Patch asyncio to allow nested event loops" +optional = false +python-versions = ">=3.5" +files = [ + {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, + {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, +] + [[package]] name = "networkx" version = "3.2.1" @@ -3297,13 +3383,13 @@ files = [ [[package]] name = "prompt-toolkit" -version = "3.0.43" +version = "3.0.45" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" files = [ - {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"}, - {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"}, + {file = "prompt_toolkit-3.0.45-py3-none-any.whl", hash = "sha256:a29b89160e494e3ea8622b09fa5897610b437884dcdcd054fdc1308883326c2a"}, + {file = "prompt_toolkit-3.0.45.tar.gz", hash = "sha256:07c60ee4ab7b7e90824b61afa840c8f5aad2d46b3e2e10acc33d8ecc94a49089"}, ] [package.dependencies] @@ -3878,6 +3964,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -3885,8 +3972,15 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -3903,6 +3997,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -3910,6 +4005,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -4039,7 +4135,7 @@ toml = ">=0.10.2,<0.11.0" [[package]] name = "qibojit" -version = "0.1.4" +version = "0.1.5" description = "Simulation tools based on numba and cupy." optional = false python-versions = "^3.9,<3.13" @@ -4049,7 +4145,7 @@ develop = false [package.dependencies] numba = ">=0.59.0" psutil = "^5.9.5" -qibo = ">=0.2.8" +qibo = "^0.2.8" scipy = "^1.10.1" [package.extras] @@ -4060,7 +4156,7 @@ cuquantum = ["cuquantum-python-cu12 (>=23.10.0,<24.0.0)"] type = "git" url = "https://github.com/qiboteam/qibojit.git" reference = "HEAD" -resolved_reference = "a671c5eb16487349eff5f9c27f464f7d955661f0" +resolved_reference = "683a19467797c8a4d7deaf1a3a1f10a1baca66ca" [[package]] name = "qibotn" @@ -4072,7 +4168,7 @@ files = [] develop = false [package.dependencies] -qibo = {git = "https://github.com/qiboteam/qibo.git"} +qibo = "^0.2.8" quimb = {version = "^1.6.0", extras = ["tensor"]} [package.extras] @@ -4082,7 +4178,7 @@ cuda = ["cupy-cuda11x (>=11.6.0,<12.0.0)", "cuquantum-python-cu11 (>=23.3.0,<24. type = "git" url = "https://github.com/qiboteam/qibotn.git" reference = "HEAD" -resolved_reference = "6016829e0ebd482c01fabbaff5ca57c8603bb545" +resolved_reference = "abb0e363cedfcc2c3f522d360e04ac6f2b5a24f1" [[package]] name = "quimb" @@ -4131,13 +4227,13 @@ sphinx = ">=1.3.1" [[package]] name = "requests" -version = "2.32.2" +version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" files = [ - {file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"}, - {file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"}, + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] [package.dependencies] @@ -4268,30 +4364,50 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win_amd64.whl", hash = "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b"}, {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win32.whl", hash = "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win32.whl", hash = "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win32.whl", hash = "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win_amd64.whl", hash = "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15"}, {file = "ruamel.yaml.clib-0.2.8.tar.gz", hash = "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512"}, @@ -4384,6 +4500,27 @@ dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pyde doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.12.0)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0)", "sphinx-design (>=0.4.0)"] test = ["array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +[[package]] +name = "seaborn" +version = "0.13.2" +description = "Statistical data visualization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "seaborn-0.13.2-py3-none-any.whl", hash = "sha256:636f8336facf092165e27924f223d3c62ca560b1f2bb5dff7ab7fad265361987"}, + {file = "seaborn-0.13.2.tar.gz", hash = "sha256:93e60a40988f4d65e9f4885df477e2fdaff6b73a9ded434c1ab356dd57eefff7"}, +] + +[package.dependencies] +matplotlib = ">=3.4,<3.6.1 || >3.6.1" +numpy = ">=1.20,<1.24.0 || >1.24.0" +pandas = ">=1.2" + +[package.extras] +dev = ["flake8", "flit", "mypy", "pandas-stubs", "pre-commit", "pytest", "pytest-cov", "pytest-xdist"] +docs = ["ipykernel", "nbconvert", "numpydoc", "pydata_sphinx_theme (==0.10.0rc2)", "pyyaml", "sphinx (<6.0.0)", "sphinx-copybutton", "sphinx-design", "sphinx-issues"] +stats = ["scipy (>=1.7)", "statsmodels (>=0.12)"] + [[package]] name = "setuptools" version = "69.5.1" @@ -4715,17 +4852,17 @@ numpy = "*" [[package]] name = "sympy" -version = "1.12" +version = "1.12.1" description = "Computer algebra system (CAS) in Python" optional = false python-versions = ">=3.8" files = [ - {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, - {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, + {file = "sympy-1.12.1-py3-none-any.whl", hash = "sha256:9b2cbc7f1a640289430e13d2a56f02f867a1da0190f2f99d8968c2f74da0e515"}, + {file = "sympy-1.12.1.tar.gz", hash = "sha256:2877b03f998cd8c08f07cd0de5b767119cd3ef40d09f41c30d722f6686b0fb88"}, ] [package.dependencies] -mpmath = ">=0.19" +mpmath = ">=1.1.0,<1.4.0" [[package]] name = "tabulate" @@ -5147,13 +5284,13 @@ files = [ [[package]] name = "typing-extensions" -version = "4.11.0" +version = "4.12.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, - {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, + {file = "typing_extensions-4.12.0-py3-none-any.whl", hash = "sha256:b349c66bea9016ac22978d800cfff206d5f9816951f12a7d0ec5578b0a819594"}, + {file = "typing_extensions-4.12.0.tar.gz", hash = "sha256:8cbcdc8606ebcb0d95453ad7dc5065e6237b6aa230a31e81d0f440c30fed5fd8"}, ] [[package]] @@ -5239,13 +5376,13 @@ test = ["pytest (>=6.0.0)", "setuptools (>=65)"] [[package]] name = "widgetsnbextension" -version = "4.0.10" +version = "4.0.11" description = "Jupyter interactive widgets for Jupyter Notebook" optional = false python-versions = ">=3.7" files = [ - {file = "widgetsnbextension-4.0.10-py3-none-any.whl", hash = "sha256:d37c3724ec32d8c48400a435ecfa7d3e259995201fbefa37163124a9fcb393cc"}, - {file = "widgetsnbextension-4.0.10.tar.gz", hash = "sha256:64196c5ff3b9a9183a8e699a4227fb0b7002f252c814098e66c4d1cd0644688f"}, + {file = "widgetsnbextension-4.0.11-py3-none-any.whl", hash = "sha256:55d4d6949d100e0d08b94948a42efc3ed6dfdc0e9468b2c4b128c9a2ce3a7a36"}, + {file = "widgetsnbextension-4.0.11.tar.gz", hash = "sha256:8b22a8f1910bfd188e596fe7fc05dcbd87e810c8a4ba010bdb3da86637398474"}, ] [[package]] @@ -5346,18 +5483,18 @@ files = [ [[package]] name = "zipp" -version = "3.18.2" +version = "3.19.1" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.18.2-py3-none-any.whl", hash = "sha256:dce197b859eb796242b0622af1b8beb0a722d52aa2f57133ead08edd5bf5374e"}, - {file = "zipp-3.18.2.tar.gz", hash = "sha256:6278d9ddbcfb1f1089a88fde84481528b07b0e10474e09dcfe53dad4069fa059"}, + {file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"}, + {file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [extras] qinfo = [] @@ -5367,4 +5504,4 @@ torch = ["torch"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.13" -content-hash = "5bdaa2f0b5d72541884ea18fb07f539cb8f07a9fc6ab81bd2b39bccaea5d8131" +content-hash = "8e08af9af1dbb26d97fcb75be806be99ea4cc2619a6c986c48f627d3b7d9541a" diff --git a/pyproject.toml b/pyproject.toml index 38cd627478..881b2f8b4c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,6 +55,8 @@ sphinx-markdown-tables = "^0.0.17" sphinx-copybutton = "^0.5.2" nbsphinx = "^0.8.12" ipython = "^8.10.0" +seaborn = "^0.13.2" +ipykernel = "^6.29.4" [tool.poetry.group.tests] optional = true diff --git a/src/qibo/backends/__init__.py b/src/qibo/backends/__init__.py index a3076a5de4..a3dd66c2c6 100644 --- a/src/qibo/backends/__init__.py +++ b/src/qibo/backends/__init__.py @@ -133,6 +133,7 @@ def create(self, dtype): self.ECR = self.matrices.ECR self.SYC = self.matrices.SYC self.TOFFOLI = self.matrices.TOFFOLI + self.CCZ = self.matrices.CCZ matrices = QiboMatrices() diff --git a/src/qibo/backends/npmatrices.py b/src/qibo/backends/npmatrices.py index 63fc1840fb..168e78e09f 100644 --- a/src/qibo/backends/npmatrices.py +++ b/src/qibo/backends/npmatrices.py @@ -485,6 +485,22 @@ def TOFFOLI(self): dtype=self.dtype, ) + @cached_property + def CCZ(self): + return self._cast( + [ + [1, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, -1], + ], + dtype=self.dtype, + ) + def DEUTSCH(self, theta): theta = self._cast_parameter(theta) sin = self.np.sin(theta) + 0j # 0j necessary for right tensorflow dtype diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index eabda0e07d..8174234631 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -2,6 +2,7 @@ import math import numpy as np +from scipy import sparse from qibo import __version__ from qibo.backends import einsum_utils @@ -119,7 +120,8 @@ def matrix_parametrized(self, gate): def matrix_fused(self, fgate): rank = len(fgate.target_qubits) - matrix = np.eye(2**rank) + matrix = sparse.eye(2**rank) + for gate in fgate.gates: # transfer gate matrix to numpy as it is more efficient for # small tensor calculations @@ -141,8 +143,10 @@ def matrix_fused(self, fgate): gmatrix = np.transpose(gmatrix, transpose_indices) gmatrix = np.reshape(gmatrix, original_shape) # fuse the individual gate matrix to the total ``FusedGate`` matrix - matrix = gmatrix @ matrix - return self.cast(matrix) + # we are using sparse matrices to improve perfomances + matrix = sparse.csr_matrix(gmatrix).dot(matrix) + + return self.cast(matrix.toarray()) def control_matrix(self, gate): if len(gate.control_qubits) > 1: diff --git a/src/qibo/gates/gates.py b/src/qibo/gates/gates.py index da6518be6f..831e8dfe35 100644 --- a/src/qibo/gates/gates.py +++ b/src/qibo/gates/gates.py @@ -2308,6 +2308,56 @@ def congruent(self, use_toffolis: bool = True) -> List[Gate]: ] +class CCZ(Gate): + """The controlled-CZ gate. + + Corresponds to the following unitary matrix + + .. math:: + \\begin{pmatrix} + 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\\ + 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\\\ + 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\\\ + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\\\ + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\\\ + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\\\ + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\\\ + 0 & 0 & 0 & 0 & 0 & 0 & 0 & -1 \\\\ + \\end{pmatrix} + + Args: + q0 (int): the first control qubit id number. + q1 (int): the second control qubit id number. + q2 (int): the target qubit id number. + """ + + def __init__(self, q0, q1, q2): + super().__init__() + self.name = "ccz" + self.draw_label = "Z" + self.control_qubits = (q0, q1) + self.target_qubits = (q2,) + self.init_args = [q0, q1, q2] + self.unitary = True + + @property + def qasm_label(self): + return "ccz" + + def decompose(self) -> List[Gate]: + """Decomposition of :math:`\\text{CCZ}` gate. + + Decompose :math:`\\text{CCZ}` gate into :class:`qibo.gates.H` in + the target qubit, followed by :class:`qibo.gates.TOFFOLI`, followed + by a :class:`qibo.gates.H` in the target qubit. + """ + from qibo.transpiler.decompositions import ( # pylint: disable=C0415 + standard_decompositions, + ) + + return standard_decompositions(self) + + class DEUTSCH(ParametrizedGate): """The Deutsch gate. diff --git a/src/qibo/models/dbi/double_bracket.py b/src/qibo/models/dbi/double_bracket.py index c20b8e96e1..4f8ad712f5 100644 --- a/src/qibo/models/dbi/double_bracket.py +++ b/src/qibo/models/dbi/double_bracket.py @@ -1,11 +1,17 @@ from copy import copy from enum import Enum, auto -from functools import partial +from typing import Optional -import hyperopt import numpy as np from qibo.hamiltonians import Hamiltonian +from qibo.models.dbi.utils import * +from qibo.models.dbi.utils_scheduling import ( + grid_search_step, + hyperopt_step, + polynomial_step, + simulated_annealing_step, +) class DoubleBracketGeneratorType(Enum): @@ -20,6 +26,30 @@ class DoubleBracketGeneratorType(Enum): # TODO: add double commutator (does it converge?) +class DoubleBracketCostFunction(str, Enum): + """Define the DBI cost function.""" + + off_diagonal_norm = "off_diagonal_norm" + """Use off-diagonal norm as cost function.""" + least_squares = "least_squares" + """Use least squares as cost function.""" + energy_fluctuation = "energy_fluctuation" + """Use energy fluctuation as cost function.""" + + +class DoubleBracketScheduling(Enum): + """Define the DBI scheduling strategies.""" + + hyperopt = hyperopt_step + """Use hyperopt package.""" + grid_search = grid_search_step + """Use greedy grid search.""" + polynomial_approximation = polynomial_step + """Use polynomial expansion (analytical) of the loss function.""" + simulated_annealing = simulated_annealing_step + """Use simulated annealing algorithm""" + + class DoubleBracketIteration: """ Class implementing the Double Bracket iteration algorithm. @@ -48,63 +78,58 @@ def __init__( self, hamiltonian: Hamiltonian, mode: DoubleBracketGeneratorType = DoubleBracketGeneratorType.canonical, + scheduling: DoubleBracketScheduling = DoubleBracketScheduling.grid_search, + cost: DoubleBracketCostFunction = DoubleBracketCostFunction.off_diagonal_norm, + ref_state: np.array = None, ): self.h = hamiltonian self.h0 = copy(self.h) self.mode = mode + self.scheduling = scheduling + self.cost = cost + self.ref_state = ref_state + """ + Args: + hamiltonian (Hamiltonian): Starting Hamiltonian; + mode (DoubleBracketGeneratorType): type of generator of the evolution. + scheduling (DoubleBracketScheduling): type of scheduling strategy. + cost (DoubleBracketCost): type of cost function. + ref_state (np.array): reference state for computing the energy fluctuation. + """ def __call__( self, step: float, mode: DoubleBracketGeneratorType = None, d: np.array = None ): - r"""We use convention that $H' = U^\dagger H U$ where $U=e^{-sW}$ with $W=[D,H]$ (or depending on `mode` an approximation, see `eval_dbr_unitary`). If $s>0$ then for $D = \Delta(H)$ the GWW DBR will give a $\sigma$-decrease, see https://arxiv.org/abs/2206.11772.""" - - operator = self.eval_dbr_unitary(step, mode, d) - operator_dagger = self.backend.cast( - np.matrix(self.backend.to_numpy(operator)).getH() - ) - self.h.matrix = operator_dagger @ self.h.matrix @ operator - return operator - - def eval_dbr_unitary( - self, step: float, mode: DoubleBracketGeneratorType = None, d=None - ): - """In call we will are working in the convention that $H' = U^\\dagger H U$ where $U=e^{-sW}$ with $W=[D,H]$ or an approximation of that by a group commutator. That is handy because if we switch from the DBI in the Heisenberg picture for the Hamiltonian, we get that the transformation of the state is $|\\psi'\rangle = U |\\psi\rangle$ so that $\\langle H\rangle_{\\psi'} = \\langle H' \rangle_\\psi$ (i.e. when writing the unitary acting on the state dagger notation is avoided). - - The group commutator must approximate $U=e^{-s[D,H]}$. This is achieved by setting $r = \\sqrt{s}$ so that - $$V = e^{-irH}e^{irD}e^{irH}e^{-irD}$$ - because - $$e^{-irH}De^{irH} = D+ir[D,H]+O(r^2)$$ - so - $$V\approx e^{irD +i^2 r^2[D,H] + O(r^2) -irD} \approx U\\ .$$ - See the app in https://arxiv.org/abs/2206.11772 for a derivation. - """ + """Performs one double bracket rotation.""" if mode is None: mode = self.mode if mode is DoubleBracketGeneratorType.canonical: operator = self.backend.calculate_matrix_exp( - -1.0j * step, + 1.0j * step, self.commutator(self.diagonal_h_matrix, self.h.matrix), ) elif mode is DoubleBracketGeneratorType.single_commutator: if d is None: d = self.diagonal_h_matrix operator = self.backend.calculate_matrix_exp( - -1.0j * step, - self.commutator(d, self.h.matrix), + 1.0j * step, + self.commutator(self.backend.cast(d), self.h.matrix), ) elif mode is DoubleBracketGeneratorType.group_commutator: if d is None: d = self.diagonal_h_matrix - - sqrt_step = np.sqrt(step) operator = ( - self.h.exp(sqrt_step) - @ self.backend.calculate_matrix_exp(-sqrt_step, d) - @ self.h.exp(-sqrt_step) - @ self.backend.calculate_matrix_exp(sqrt_step, d) + self.h.exp(-step) + @ self.backend.calculate_matrix_exp(-step, d) + @ self.h.exp(step) + @ self.backend.calculate_matrix_exp(step, d) ) - return operator + operator_dagger = self.backend.cast( + np.array(np.matrix(self.backend.to_numpy(operator)).getH()) + ) + + self.h.matrix = operator @ self.h.matrix @ operator_dagger @staticmethod def commutator(a, b): @@ -118,11 +143,12 @@ def diagonal_h_matrix(self): @property def off_diag_h(self): + """Off-diagonal H matrix.""" return self.h.matrix - self.diagonal_h_matrix @property def off_diagonal_norm(self): - r"""Hilbert Schmidt norm of off-diagonal part of H matrix, namely :math:`\\text{Tr}(\\sqrt{A^{\\dagger} A})`.""" + """Hilbert Schmidt norm of off-diagonal part of H matrix, namely :math:`\\text{Tr}(\\sqrt{A^{\\dagger} A})`.""" off_diag_h_dag = self.backend.cast( np.matrix(self.backend.to_numpy(self.off_diag_h)).getH() ) @@ -135,56 +161,49 @@ def backend(self): """Get Hamiltonian's backend.""" return self.h0.backend - def hyperopt_step( + @property + def nqubits(self): + """Number of qubits.""" + return self.h.nqubits + + def least_squares(self, d: np.array): + """Least squares cost function.""" + d = self.backend.to_numpy(d) + return np.real( + 0.5 * np.linalg.norm(d) ** 2 + - np.trace(self.backend.to_numpy(self.h.matrix) @ d) + ) + + def choose_step( self, - step_min: float = 1e-5, - step_max: float = 1, - max_evals: int = 1000, - space: callable = None, - optimizer: callable = None, - look_ahead: int = 1, - verbose: bool = False, - d: np.array = None, + d: Optional[np.array] = None, + scheduling: Optional[DoubleBracketScheduling] = None, + **kwargs, ): - """ - Optimize iteration step. - - Args: - step_min: lower bound of the search grid; - step_max: upper bound of the search grid; - max_evals: maximum number of iterations done by the hyperoptimizer; - space: see hyperopt.hp possibilities; - optimizer: see hyperopt algorithms; - look_ahead: number of iteration steps to compute the loss function; - verbose: level of verbosity; - d: diagonal operator for generating double-bracket iterations. - - Returns: - (float): optimized best iteration step. - """ - if space is None: - space = hyperopt.hp.uniform - if optimizer is None: - optimizer = hyperopt.tpe - - space = space("step", step_min, step_max) - best = hyperopt.fmin( - fn=partial(self.loss, d=d, look_ahead=look_ahead), - space=space, - algo=optimizer.suggest, - max_evals=max_evals, - verbose=verbose, - ) - return best["step"] + """Calculate the optimal step using respective the `scheduling` methods.""" + if scheduling is None: + scheduling = self.scheduling + step = scheduling(self, d=d, **kwargs) + # TODO: write test for this case + if ( + step is None + and scheduling is DoubleBracketScheduling.polynomial_approximation + ): # pragma: no cover + kwargs["n"] = kwargs.get("n", 3) + kwargs["n"] += 1 + # if n==n_max, return None + step = scheduling(self, d=d, **kwargs) + # if for a given polynomial order n, no solution is found, we increase the order of the polynomial by 1 + return step def loss(self, step: float, d: np.array = None, look_ahead: int = 1): """ Compute loss function distance between `look_ahead` steps. Args: - step: iteration step. - d: diagonal operator, use canonical by default. - look_ahead: number of iteration steps to compute the loss function; + step (float): iteration step. + d (np.array): diagonal operator, use canonical by default. + look_ahead (int): number of iteration steps to compute the loss function; """ # copy initial hamiltonian h_copy = copy(self.h) @@ -192,8 +211,13 @@ def loss(self, step: float, d: np.array = None, look_ahead: int = 1): for _ in range(look_ahead): self.__call__(mode=self.mode, step=step, d=d) - # off_diagonal_norm's value after the steps - loss = self.off_diagonal_norm + # loss values depending on the cost function + if self.cost is DoubleBracketCostFunction.off_diagonal_norm: + loss = self.off_diagonal_norm + elif self.cost is DoubleBracketCostFunction.least_squares: + loss = self.least_squares(d) + elif self.cost == DoubleBracketCostFunction.energy_fluctuation: + loss = self.energy_fluctuation(self.ref_state) # set back the initial configuration self.h = h_copy @@ -202,10 +226,10 @@ def loss(self, step: float, d: np.array = None, look_ahead: int = 1): def energy_fluctuation(self, state): """ - Evaluate energy fluctuation + Evaluate energy fluctuation. .. math:: - \\Xi_{k}(\\mu) = \\sqrt{\\langle\\mu|\\hat{H}^2|\\mu\\rangle - \\langle\\mu|\\hat{H}|\\mu\\rangle^2} \\, + \\Xi(\\mu) = \\sqrt{\\langle\\mu|\\hat{H}^2|\\mu\\rangle - \\langle\\mu|\\hat{H}|\\mu\\rangle^2} \\, for a given state :math:`|\\mu\\rangle`. @@ -213,3 +237,32 @@ def energy_fluctuation(self, state): state (np.ndarray): quantum state to be used to compute the energy fluctuation with H. """ return self.h.energy_fluctuation(state) + + def sigma(self, h: np.array): + """Returns the off-diagonal restriction of matrix `h`.""" + return self.backend.cast(h) - self.backend.cast( + np.diag(np.diag(self.backend.to_numpy(h))) + ) + + def generate_gamma_list(self, n: int, d: np.array): + r"""Computes the n-nested Gamma functions, where $\Gamma_k=[W,...,[W,[W,H]]...]$, where we take k nested commutators with $W = [D, H]$""" + W = self.commutator(self.backend.cast(d), self.sigma(self.h.matrix)) + gamma_list = [self.h.matrix] + for _ in range(n - 1): + gamma_list.append(self.commutator(W, gamma_list[-1])) + return gamma_list + + def cost_expansion(self, d, n): + d = self.backend.cast(d) + + if self.cost is DoubleBracketCostFunction.off_diagonal_norm: + coef = off_diagonal_norm_polynomial_expansion_coef(self, d, n) + elif self.cost is DoubleBracketCostFunction.least_squares: + coef = least_squares_polynomial_expansion_coef(self, d, n) + elif self.cost is DoubleBracketCostFunction.energy_fluctuation: + coef = energy_fluctuation_polynomial_expansion_coef( + self, d, n, self.ref_state + ) + else: # pragma: no cover + raise ValueError(f"Cost function {self.cost} not recognized.") + return coef diff --git a/src/qibo/models/dbi/utils.py b/src/qibo/models/dbi/utils.py index 004dc8f130..1380b0ce88 100644 --- a/src/qibo/models/dbi/utils.py +++ b/src/qibo/models/dbi/utils.py @@ -1,17 +1,12 @@ -from copy import copy -from itertools import product -from typing import Optional +import math +from enum import Enum, auto +from itertools import combinations, product import numpy as np -from hyperopt import hp, tpe from qibo import symbols from qibo.backends import _check_backend from qibo.hamiltonians import SymbolicHamiltonian -from qibo.models.dbi.double_bracket import ( - DoubleBracketGeneratorType, - DoubleBracketIteration, -) def generate_Z_operators(nqubits: int, backend=None): @@ -71,97 +66,201 @@ def str_to_symbolic(name: str): return tensor_op -def select_best_dbr_generator( - dbi_object: DoubleBracketIteration, - d_list: list, - step: Optional[float] = None, - step_min: float = 1e-5, - step_max: float = 1, - max_evals: int = 200, - compare_canonical: bool = True, +def cs_angle_sgn(dbi_object, d): + """Calculates the sign of Cauchy-Schwarz Angle :math:`\\langle W(Z), W({\\rm canonical}) \\rangle_{\\rm HS}`.""" + backend = dbi_object.backend + d = dbi_object.backend.cast(d) + norm = backend.np.trace( + backend.np.dot( + backend.np.conjugate( + dbi_object.commutator(dbi_object.diagonal_h_matrix, dbi_object.h.matrix) + ).T, + dbi_object.commutator(d, dbi_object.h.matrix), + ) + ) + return backend.np.real(backend.np.sign(norm)) + + +def decompose_into_pauli_basis(h_matrix: np.array, pauli_operators: list): + """finds the decomposition of hamiltonian `h_matrix` into Pauli-Z operators""" + nqubits = int(np.log2(h_matrix.shape[0])) + + decomposition = [] + for Z_i in pauli_operators: + expect = np.trace(h_matrix @ Z_i) / 2**nqubits + decomposition.append(expect) + return decomposition + + +def generate_pauli_index(nqubits, order): + """ + Generate all possible combinations of qubits for a given order of Pauli operators. + """ + if order == 1: + return list(range(nqubits)) + else: + indices = list(range(nqubits)) + return indices + [ + comb for i in range(2, order + 1) for comb in combinations(indices, i) + ] + + +def generate_pauli_operator_dict( + nqubits: int, + parameterization_order: int = 1, + symbols_pauli=symbols.Z, + backend=None, ): - """Selects the best double bracket rotation generator from a list and execute the rotation. + """Generates a dictionary containing Pauli `symbols_pauli` operators of locality `parameterization_order` for `nqubits` qubits. Args: - dbi_object (`DoubleBracketIteration`): the target DoubleBracketIteration object. - d_list (list): list of diagonal operators (np.array) to run from. - step (float): fixed iteration duration. - Defaults to ``None``, uses hyperopt. - step_min (float): minimally allowed iteration duration. - step_max (float): maximally allowed iteration duration. - max_evals (int): maximally allowed number of evaluation in hyperopt. - compare_canonical (bool): if `True`, the optimal diagonal operator chosen from "d_list" is compared with the canonical bracket. + nqubits (int): number of qubits in the system. + parameterization_order (int, optional): the locality of the operators generated. Defaults to 1. + symbols_pauli (qibo.symbols, optional): the symbol of the intended Pauli operator. Defaults to symbols.Z. Returns: - The updated dbi_object, index of the optimal diagonal operator, respective step duration, and evolution direction. + pauli_operator_dict (dictionary): dictionary with structure {"operator_name": operator} + + Example: + pauli_operator_dict = generate_pauli_operator_dict) """ - norms_off_diagonal_restriction = [ - dbi_object.off_diagonal_norm for _ in range(len(d_list)) + backend = _check_backend(backend) + pauli_index = generate_pauli_index(nqubits, order=parameterization_order) + pauli_operators = [ + generate_pauli_operators(nqubits, symbols_pauli, index, backend=backend) + for index in pauli_index ] - optimal_steps, flip_list = [], [] - for i, d in enumerate(d_list): - # prescribed step durations - dbi_eval = copy(dbi_object) - flip_list.append(cs_angle_sgn(dbi_eval, d)) - if flip_list[i] != 0: - if step is None: - step_best = dbi_eval.hyperopt_step( - d=flip_list[i] * d, - step_min=step_min, - step_max=step_max, - space=hp.uniform, - optimizer=tpe, - max_evals=max_evals, - ) - else: - step_best = step - dbi_eval(step=step_best, d=flip_list[i] * d) - optimal_steps.append(step_best) - norms_off_diagonal_restriction[i] = dbi_eval.off_diagonal_norm - # canonical - if compare_canonical is True: - flip_list.append(1) - dbi_eval = copy(dbi_object) - dbi_eval.mode = DoubleBracketGeneratorType.canonical - if step is None: - step_best = dbi_eval.hyperopt_step( - step_min=step_min, - step_max=step_max, - space=hp.uniform, - optimizer=tpe, - max_evals=max_evals, - ) - else: - step_best = step - dbi_object(step=step) - optimal_steps.append(step) - norms_off_diagonal_restriction.append(dbi_object.off_diagonal_norm) - # find best d - idx_max_loss = norms_off_diagonal_restriction.index( - min(norms_off_diagonal_restriction) - ) - flip = flip_list[idx_max_loss] - step_optimal = optimal_steps[idx_max_loss] - dbi_eval = copy(dbi_object) - if idx_max_loss == len(d_list) and compare_canonical is True: - # canonical - dbi_eval(step=step_optimal, mode=DoubleBracketGeneratorType.canonical) + return {index: operator for index, operator in zip(pauli_index, pauli_operators)} + +def generate_pauli_operators(nqubits, symbols_pauli, positions, backend=None): + # generate matrix of an nqubit-pauli operator with `symbols_pauli` at `positions` + if isinstance(positions, int): + return SymbolicHamiltonian( + symbols_pauli(positions), + nqubits=nqubits, + backend=backend, + ).dense.matrix else: - d_optimal = flip * d_list[idx_max_loss] - dbi_eval(step=step_optimal, d=d_optimal) - return dbi_eval, idx_max_loss, step_optimal, flip + terms = [symbols_pauli(pos) for pos in positions] + return SymbolicHamiltonian( + math.prod(terms), nqubits=nqubits, backend=backend + ).dense.matrix -def cs_angle_sgn(dbi_object, d): - """Calculates the sign of Cauchy-Schwarz Angle :math:`\\langle W(Z), W({\\rm canonical}) \\rangle_{\\rm HS}`.""" - backend = dbi_object.backend - norm = backend.np.trace( - backend.np.matmul( - backend.np.conj( - dbi_object.commutator(dbi_object.diagonal_h_matrix, dbi_object.h.matrix) - ).T, - dbi_object.commutator(d, dbi_object.h.matrix), +class ParameterizationTypes(Enum): + """Define types of parameterization for diagonal operator.""" + + pauli = auto() + """Uses Pauli-Z operators (magnetic field).""" + computational = auto() + """Uses computational basis.""" + + +def params_to_diagonal_operator( + params: np.array, + nqubits: int, + parameterization: ParameterizationTypes = ParameterizationTypes.pauli, + pauli_parameterization_order: int = 1, + normalize: bool = False, + pauli_operator_dict: dict = None, + backend=None, +): + r"""Creates the $D$ operator for the double-bracket iteration ansatz depending on the parameterization type.""" + backend = _check_backend(backend) + if parameterization is ParameterizationTypes.pauli: + # raise error if dimension mismatch + d = sum( + [ + backend.to_numpy(params[i]) + * backend.to_numpy(list(pauli_operator_dict.values())[i]) + for i in range(nqubits) + ] ) + elif parameterization is ParameterizationTypes.computational: + d = np.zeros((len(params), len(params))) + for i in range(len(params)): + d[i, i] = backend.to_numpy(params[i]) + + # TODO: write proper tests for normalize=True + if normalize: # pragma: no cover + d = d / np.linalg.norm(d) + return d + + +def off_diagonal_norm_polynomial_expansion_coef(dbi_object, d, n): + # generate Gamma's where $\Gamma_{k+1}=[W, \Gamma_{k}], $\Gamma_0=H + W = dbi_object.commutator( + dbi_object.backend.cast(d), dbi_object.sigma(dbi_object.h.matrix) + ) + gamma_list = dbi_object.generate_gamma_list(n + 2, d) + sigma_gamma_list = list(map(dbi_object.sigma, gamma_list)) + gamma_list_np = list(map(dbi_object.backend.to_numpy, sigma_gamma_list)) + exp_list = np.array([1 / math.factorial(k) for k in range(n + 1)]) + # coefficients for rotation with [W,H] and H + c1 = exp_list.reshape((-1, 1, 1)) * gamma_list_np[1:] + c2 = exp_list.reshape((-1, 1, 1)) * gamma_list_np[:-1] + # product coefficient + trace_coefficients = [0] * (2 * n + 1) + for k in range(n + 1): + for j in range(n + 1): + power = k + j + product_matrix = c1[k] @ c2[j] + trace_coefficients[power] += 2 * np.trace(product_matrix) + # coefficients from high to low (n:0) + coef = list(reversed(trace_coefficients[: n + 1])) + return coef + + +def least_squares_polynomial_expansion_coef(dbi_object, d, n: int = 3): + """Return the Taylor expansion coefficients of least square cost of `dbi_object.h` and diagonal operator `d` with respect to double bracket rotation duration `s`.""" + # generate Gamma's where $\Gamma_{k+1}=[W, \Gamma_{k}], $\Gamma_0=H + Gamma_list = dbi_object.generate_gamma_list(n + 1, d) + exp_list = np.array([1 / math.factorial(k) for k in range(n + 1)]) + # coefficients + coef = np.empty(n) + for i in range(n): + coef[i] = np.real( + exp_list[i] * np.trace(dbi_object.backend.cast(d) @ Gamma_list[i + 1]) + ) + coef = list(reversed(coef)) + return coef + + +def energy_fluctuation_polynomial_expansion_coef( + dbi_object, d: np.array, n: int = 3, state=0 +): + """Return the Taylor expansion coefficients of energy fluctuation of `dbi_object` with respect to double bracket rotation duration `s`.""" + # generate Gamma's where $\Gamma_{k+1}=[W, \Gamma_{k}], $\Gamma_0=H + Gamma_list = dbi_object.generate_gamma_list(n + 1, d) + # coefficients + coef = np.empty(3) + state_cast = dbi_object.backend.cast(state) + state_dag = dbi_object.backend.cast(state.conj().T) + + def variance(a): + """Calculates the variance of a matrix A with respect to a state: + Var($A$) = $\\langle\\mu|A^2|\\mu\rangle-\\langle\\mu|A|\\mu\rangle^2$""" + b = a @ a + return state_dag @ b @ state_cast - (state_dag @ a @ state_cast) ** 2 + + def covariance(a, b): + """This is a generalization of the notion of covariance, needed for the polynomial expansion of the energy fluctuation, + applied to two operators A and B with respect to a state: + Cov($A,B$) = $\\langle\\mu|AB|\\mu\rangle-\\langle\\mu|A|\\mu\rangle\\langle\\mu|B|\\mu\rangle$ + """ + + c = a @ b + b @ a + return ( + state_dag @ c @ state_cast + - 2 * state_dag @ a @ state_cast * state_dag @ b @ state_cast + ) + + coef[0] = np.real(2 * covariance(Gamma_list[0], Gamma_list[1])) + coef[1] = np.real(2 * variance(Gamma_list[1])) + coef[2] = np.real( + covariance(Gamma_list[0], Gamma_list[3]) + + 3 * covariance(Gamma_list[1], Gamma_list[2]) ) - return np.sign(backend.to_numpy(norm)) + coef = list(reversed(coef)) + return coef diff --git a/src/qibo/models/dbi/utils_dbr_strategies.py b/src/qibo/models/dbi/utils_dbr_strategies.py new file mode 100644 index 0000000000..5aae761fde --- /dev/null +++ b/src/qibo/models/dbi/utils_dbr_strategies.py @@ -0,0 +1,299 @@ +import hyperopt + +from qibo.backends import _check_backend +from qibo.models.dbi.double_bracket import * +from qibo.models.dbi.utils import * + + +def select_best_dbr_generator( + dbi_object: DoubleBracketIteration, + d_list: list, + step: Optional[float] = None, + compare_canonical: bool = True, + scheduling: DoubleBracketScheduling = None, + **kwargs, +): + """Selects the best double bracket rotation generator from a list and execute the rotation. + + Args: + dbi_object (`DoubleBracketIteration`): the target DoubleBracketIteration object. + d_list (list): list of diagonal operators (np.array) to select from. + step (float): fixed iteration duration. + Defaults to ``None``, optimize with `scheduling` method and `choose_step` function. + compare_canonical (boolean): if `True`, the diagonalization effect with operators from `d_list` is compared with the canonical bracket. + scheduling (`DoubleBracketScheduling`): scheduling method for finding the optimal step. + + Returns: + The updated dbi_object (`DoubleBracketIteration`), index of the optimal diagonal operator (int), respective step duration (float), and sign (int). + + Example: + from qibo.hamiltonians import Hamiltonian + from qibo.models.dbi.double_bracket import * + from qibo.models.dbi.utils_dbr_strategies import select_best_dbr_generator + from qibo.quantum_info import random_hermitian + + nqubits = 3 + NSTEPS = 3 + h0 = random_hermitian(2**nqubits) + dbi = DoubleBracketIteration( + Hamiltonian(nqubits, h0), + mode=DoubleBracketGeneratorType.single_commutator, + ) + initial_off_diagonal_norm = dbi.off_diagonal_norm + generate_local_Z = generate_Z_operators(nqubits) + Z_ops = list(generate_local_Z.values()) + for _ in range(NSTEPS): + dbi, idx, step, flip_sign = select_best_dbr_generator( + dbi, Z_ops, compare_canonical=True + ) + """ + if scheduling is None: + scheduling = dbi_object.scheduling + + if compare_canonical: + norms_off_diagonal_restriction = [dbi_object.off_diagonal_norm] * ( + len(d_list) + 1 + ) + optimal_steps = np.zeros(len(d_list) + 1) + flip_list = np.ones(len(d_list) + 1) + else: + norms_off_diagonal_restriction = [dbi_object.off_diagonal_norm] * (len(d_list)) + optimal_steps = np.zeros(len(d_list)) + flip_list = np.ones(len(d_list)) + + for i, d in enumerate(d_list): + # prescribed step durations + dbi_eval = deepcopy(dbi_object) + d = dbi_eval.backend.cast(d) + flip_list[i] = cs_angle_sgn(dbi_eval, d) + if flip_list[i] != 0: + if step is None: + step_best = dbi_eval.choose_step( + d=flip_list[i] * d, scheduling=scheduling, **kwargs + ) + else: + step_best = step + dbi_eval(step=step_best, d=flip_list[i] * d) + optimal_steps[i] = step_best + norms_off_diagonal_restriction[i] = dbi_eval.off_diagonal_norm + # canonical + if compare_canonical is True: + dbi_eval = deepcopy(dbi_object) + dbi_eval.mode = DoubleBracketGeneratorType.canonical + if step is None: + step_best = dbi_eval.choose_step(scheduling=scheduling, **kwargs) + else: + step_best = step + dbi_eval(step=step_best) + optimal_steps[-1] = step_best + norms_off_diagonal_restriction[-1] = dbi_eval.off_diagonal_norm + # find best d + idx_max_loss = np.argmin(norms_off_diagonal_restriction) + flip = flip_list[idx_max_loss] + step_optimal = optimal_steps[idx_max_loss] + dbi_eval = deepcopy(dbi_object) + if idx_max_loss == len(d_list) and compare_canonical is True: + # canonical + dbi_eval(step=step_optimal, mode=DoubleBracketGeneratorType.canonical) + + else: + d_optimal = flip * d_list[idx_max_loss] + dbi_eval(step=step_optimal, d=d_optimal) + return dbi_eval, idx_max_loss, step_optimal, flip + + +def gradient_numerical( + dbi_object: DoubleBracketIteration, + d_params: list, + parameterization: ParameterizationTypes, + s: float = 1e-2, + delta: float = 1e-3, + backend=None, + **kwargs, +): + r""" + Gradient of the DBI with respect to the parametrization of D. A simple finite difference is used to calculate the gradient. + + Args: + dbi_object (DoubleBracketIteration): DoubleBracketIteration object. + d_params (np.array): Parameters for the ansatz (note that the dimension must be 2**nqubits for full ansazt and nqubits for Pauli ansatz). + s (float): A short flow duration for finding the numerical gradient. + delta (float): Step size for numerical gradient. + Returns: + grad (np.array): Gradient of the D operator. + """ + backend = _check_backend(backend) + nqubits = dbi_object.nqubits + grad = np.zeros(len(d_params)) + d = params_to_diagonal_operator( + d_params, nqubits, parameterization=parameterization, **kwargs + ) + for i in range(len(d_params)): + params_new = backend.cast(d_params, copy=True) + params_new[i] += delta + d_new = params_to_diagonal_operator( + params_new, nqubits, parameterization=parameterization, **kwargs + ) + # find the increment of a very small step + grad[i] = (dbi_object.loss(s, d_new) - dbi_object.loss(s, d)) / delta + return grad + + +def gradient_descent( + dbi_object: DoubleBracketIteration, + iterations: int, + d_params: list, + parameterization: ParameterizationTypes, + pauli_operator_dict: dict = None, + pauli_parameterization_order: int = 1, + normalize: bool = False, + lr_min: float = 1e-5, + lr_max: float = 1, + max_evals: int = 100, + space: callable = None, + optimizer: callable = hyperopt.tpe, + verbose: bool = False, + backend=None, +): + r"""Numerical gradient descent method for variating diagonal operator in each double bracket rotation. + + Args: + dbi_object (DoubleBracketIteration): the target double bracket object. + iterations (int): number of double bracket rotations. + d_params (list): the parameters for the initial diagonal operator. + parameterization (ParameterizationTypes): the parameterization method for diagonal operator. + Options include pauli and computational. + pauli_operator_dict (dictionary, optional): dictionary of "name": Pauli-operator for Pauli-based parameterization type. + Defaults to None. + pauli_parameterization_order (int, optional): the order of parameterization or locality in Pauli basis. Defaults to 1. + normalize (bool, optional): option to normalize the diagonal operator. Defaults to False. + lr_min (float, optional): the minimal gradient step. Defaults to 1e-5. + lr_max (float, optional): the maximal gradient step. Defaults to 1. + max_evals (int, optional): maximum number of evaluations for `lr` using `hyperopt`. Defaults to 100. + space (callable, optional): evalutation space for `hyperopt`. Defaults to None. + optimizer (callable, optional): optimizer option for `hyperopt`. Defaults to `hyperopt.tpe`. + verbose (bool, optional): option for printing `hyperopt` process. Defaults to False. + + Returns: + loss_hist (list): list of history losses of `dbi_object` throughout the double bracket rotations. + d_params_hist (list): list of history of `d` parameters after gradient descent. + s_hist (list): list of history of optimal `s` found. + Example: + from qibo import set_backend + from qibo.hamiltonians import Hamiltonian + from qibo.models.dbi.double_bracket import * + from qibo.models.dbi.utils import * + from qibo.models.dbi.utils_dbr_strategies import gradient_descent + from qibo.quantum_info import random_hermitian + + nqubits = 3 + NSTEPS = 5 + set_backend("numpy") + h0 = random_hermitian(2**nqubits) + dbi = DoubleBracketIteration( + Hamiltonian(nqubits, h0), + mode=DoubleBracketGeneratorType.single_commutator, + scheduling=DoubleBracketScheduling.hyperopt, + cost=DoubleBracketCostFunction.off_diagonal_norm, + ) + initial_off_diagonal_norm = dbi.off_diagonal_norm + pauli_operator_dict = generate_pauli_operator_dict( + nqubits, parameterization_order=1 + ) + pauli_operators = list(pauli_operator_dict.values()) + # let initial d be approximation of $\Delta(H) + d_coef_pauli = decompose_into_Pauli_basis( + dbi.diagonal_h_matrix, pauli_operators=pauli_operators + ) + d_pauli = sum([d_coef_pauli[i] * pauli_operators[i] for i in range(nqubits)]) + loss_hist_pauli, d_params_hist_pauli, s_hist_pauli = gradient_descent( + dbi, + NSTEPS, + d_coef_pauli, + ParameterizationTypes.pauli, + pauli_operator_dict=pauli_operator_dict, + ) + """ + backend = _check_backend(backend) + + nqubits = dbi_object.nqubits + # TODO: write tests where this condition applies + if ( + parameterization is ParameterizationTypes.pauli and pauli_operator_dict is None + ): # pragma: no cover + pauli_operator_dict = generate_pauli_operator_dict( + nqubits=nqubits, parameterization_order=pauli_parameterization_order + ) + d = params_to_diagonal_operator( + d_params, + nqubits, + parameterization=parameterization, + pauli_operator_dict=pauli_operator_dict, + normalize=normalize, + ) + loss_hist = [dbi_object.loss(0.0, d=d)] + d_params_hist = [d_params] + s_hist = [0] + # TODO: write tests where this condition applies + if ( + parameterization is ParameterizationTypes.pauli and pauli_operator_dict is None + ): # pragma: no cover + pauli_operator_dict = generate_pauli_operator_dict( + nqubits=nqubits, + parameterization_order=pauli_parameterization_order, + backend=backend, + ) + # first step + s = dbi_object.choose_step(d=d) + dbi_object(step=s, d=d) + for _ in range(iterations): + grad = gradient_numerical( + dbi_object, + d_params, + parameterization, + pauli_operator_dict=pauli_operator_dict, + pauli_parameterization_order=pauli_parameterization_order, + normalize=normalize, + backend=backend, + ) + + # set up hyperopt to find optimal lr + def func_loss_to_lr(lr): + d_params_eval = [d_params[j] - grad[j] * lr for j in range(len(grad))] + d_eval = params_to_diagonal_operator( + d_params_eval, + nqubits, + parameterization=parameterization, + pauli_operator_dict=pauli_operator_dict, + normalize=normalize, + ) + return dbi_object.loss(step=s, d=d_eval) + + if space is None: + space = hyperopt.hp.loguniform("lr", np.log(lr_min), np.log(lr_max)) + + best = hyperopt.fmin( + fn=func_loss_to_lr, + space=space, + algo=optimizer.suggest, + max_evals=max_evals, + verbose=verbose, + ) + lr = best["lr"] + + d_params = [d_params[j] - grad[j] * lr for j in range(len(grad))] + d = params_to_diagonal_operator( + d_params, + nqubits, + parameterization=parameterization, + pauli_operator_dict=pauli_operator_dict, + normalize=normalize, + ) + s = dbi_object.choose_step(d=d) + dbi_object(step=s, d=d) + + # record history + loss_hist.append(dbi_object.loss(0.0, d=d)) + d_params_hist.append(d_params) + s_hist.append(s) + return loss_hist, d_params_hist, s_hist diff --git a/src/qibo/models/dbi/utils_scheduling.py b/src/qibo/models/dbi/utils_scheduling.py new file mode 100644 index 0000000000..130cd88f30 --- /dev/null +++ b/src/qibo/models/dbi/utils_scheduling.py @@ -0,0 +1,212 @@ +import math +from functools import partial +from typing import Optional + +import hyperopt +import numpy as np + +error = 1e-3 + + +def grid_search_step( + dbi_object, + step_min: float = 1e-5, + step_max: float = 1, + num_evals: int = 100, + space: Optional[np.array] = None, + d: Optional[np.array] = None, +): + """ + Greedy optimization of the iteration step. + + Args: + step_min: lower bound of the search grid; + step_max: upper bound of the search grid; + mnum_evals: number of iterations between step_min and step_max; + d: diagonal operator for generating double-bracket iterations. + + Returns: + (float): optimized best iteration step (minimizing off-diagonal norm). + """ + if space is None: + space = np.linspace(step_min, step_max, num_evals) + + if d is None: + d = dbi_object.diagonal_h_matrix + + loss_list = [dbi_object.loss(step, d=d) for step in space] + + idx_max_loss = np.argmin(loss_list) + return space[idx_max_loss] + + +def hyperopt_step( + dbi_object, + step_min: float = 1e-5, + step_max: float = 1, + max_evals: int = 100, + space: callable = None, + optimizer: callable = None, + look_ahead: int = 1, + d: Optional[np.array] = None, +): + """ + Optimize iteration step using hyperopt. + + Args: + step_min: lower bound of the search grid; + step_max: upper bound of the search grid; + max_evals: maximum number of iterations done by the hyperoptimizer; + space: see hyperopt.hp possibilities; + optimizer: see hyperopt algorithms; + look_ahead: number of iteration steps to compute the loss function; + d: diagonal operator for generating double-bracket iterations. + + Returns: + (float): optimized best iteration step (minimizing loss function). + """ + if space is None: + space = hyperopt.hp.uniform + if optimizer is None: + optimizer = hyperopt.tpe + if d is None: + d = dbi_object.diagonal_h_matrix + + space = space("step", step_min, step_max) + + best = hyperopt.fmin( + fn=partial(dbi_object.loss, d=d, look_ahead=look_ahead), + space=space, + algo=optimizer.suggest, + max_evals=max_evals, + show_progressbar=False, + ) + return best["step"] + + +def polynomial_step( + dbi_object, + n: int = 2, + n_max: int = 5, + d: np.array = None, + coef: Optional[list] = None, + cost: Optional[str] = None, +): + r""" + Optimizes iteration step by solving the n_th order polynomial expansion of the loss function. + e.g. $n=2$: $2\Trace(\sigma(\Gamma_1 + s\Gamma_2 + s^2/2\Gamma_3)\sigma(\Gamma_0 + s\Gamma_1 + s^2/2\Gamma_2)) + Args: + n (int, optional): the order to which the loss function is expanded. Defaults to 4. + n_max (int, optional): maximum order allowed for recurring calls of `polynomial_step`. Defaults to 5. + d (np.array, optional): diagonal operator, default as $\delta(H)$. + backup_scheduling (`DoubleBracketScheduling`): the scheduling method to use in case no real positive roots are found. + """ + if cost is None: + cost = dbi_object.cost + + if d is None: + d = dbi_object.diagonal_h_matrix + + if n > n_max: + raise ValueError( + "No solution can be found with polynomial approximation. Increase `n_max` or use other scheduling methods." + ) + if coef is None: + coef = dbi_object.cost_expansion(d=d, n=n) + roots = np.roots(coef) + real_positive_roots = [ + np.real(root) for root in roots if np.imag(root) < 1e-3 and np.real(root) > 0 + ] + # solution exists, return minimum s + if len(real_positive_roots) > 0: + losses = [dbi_object.loss(step=root, d=d) for root in real_positive_roots] + return real_positive_roots[losses.index(min(losses))] + # solution does not exist, return None + else: + return None + + +def simulated_annealing_step( + dbi_object, + d: Optional[np.array] = None, + initial_s=None, + step_min=1e-5, + step_max=1, + s_jump_range=None, + s_jump_range_divident=5, + initial_temp=1, + cooling_rate=0.85, + min_temp=1e-5, + max_iter=200, +): + """ + Perform a single step of simulated annealing optimization. + + Parameters: + dbi_object: DBI object + The object representing the problem to be optimized. + d: Optional[np.array], optional + The diagonal matrix 'd' used in optimization. If None, it uses the diagonal + matrix 'diagonal_h_matrix' from dbi_object. + initial_s: float or None, optional + Initial value for 's', the step size. If None, it is initialized using + polynomial_step function with 'n=4'. If 'polynomial_step' returns None, + 'initial_s' is set to 'step_min'. + step_min: float, optional + Minimum value for the step size 's'. + step_max: float, optional + Maximum value for the step size 's'. + s_jump_range: float or None, optional + Range for the random jump in step size. If None, it's calculated based on + 'step_min', 'step_max', and 's_jump_range_divident'. + s_jump_range_divident: int, optional + Dividend to determine the range for random jump in step size. + initial_temp: float, optional + Initial temperature for simulated annealing. + cooling_rate: float, optional + Rate at which temperature decreases in simulated annealing. + min_temp: float, optional + Minimum temperature threshold for termination of simulated annealing. + max_iter: int, optional + Maximum number of iterations for simulated annealing. + + Returns: + float: + The optimized step size 's'. + """ + + if d is None: + d = dbi_object.diagonal_h_matrix + if initial_s is None: + initial_s = polynomial_step(dbi_object=dbi_object, d=d, n=4) + # TODO: implement test to catch this if statement + if initial_s is None: # pragma: no cover + initial_s = step_min + if s_jump_range is None: + s_jump_range = (step_max - step_min) / s_jump_range_divident + current_s = initial_s + current_loss = dbi_object.loss(d=d, step=current_s) + temp = initial_temp + + for _ in range(max_iter): + candidate_s = max( + step_min, + min( + current_s + np.random.uniform(-1 * s_jump_range, s_jump_range), step_max + ), + ) + candidate_loss = dbi_object.loss(d=d, step=candidate_s) + + # Calculate change in loss + delta_loss = candidate_loss - current_loss + + # Determine if the candidate solution is an improvement + if delta_loss < 0 or np.random.rand() < math.exp(-delta_loss / temp): + current_s = candidate_s + current_loss = candidate_loss + # Cool down + temp *= cooling_rate + if temp < min_temp or current_s > step_max or current_s < step_min: + break + + return current_s diff --git a/src/qibo/optimizers.py b/src/qibo/optimizers.py index 10e58d9205..a425ad7a1f 100644 --- a/src/qibo/optimizers.py +++ b/src/qibo/optimizers.py @@ -84,13 +84,13 @@ def myloss(parameters, circuit): RuntimeError, "The keyword 'bounds' cannot be used with the cma optimizer. Please use 'options' instead as defined by the cma documentation: ex. options['bounds'] = [0.0, 1.0].", ) - return cmaes(loss, initial_parameters, args, options) + return cmaes(loss, initial_parameters, args, callback, options) elif method == "sgd": from qibo.backends import _check_backend backend = _check_backend(backend) - return sgd(loss, initial_parameters, args, options, compile, backend) + return sgd(loss, initial_parameters, args, callback, options, compile, backend) else: from qibo.backends import _check_backend @@ -114,7 +114,7 @@ def myloss(parameters, circuit): ) -def cmaes(loss, initial_parameters, args=(), options=None): +def cmaes(loss, initial_parameters, args=(), callback=None, options=None): """Genetic optimizer based on `pycma `_. Args: @@ -123,14 +123,30 @@ def cmaes(loss, initial_parameters, args=(), options=None): initial_parameters (np.ndarray): Initial guess for the variational parameters. args (tuple): optional arguments for the loss function. + callback (list[callable]): List of callable called after each optimization + iteration. According to cma-es implementation take ``CMAEvolutionStrategy`` + instance as argument. + See: https://cma-es.github.io/apidocs-pycma/cma.evolution_strategy.CMAEvolutionStrategy.html. options (dict): Dictionary with options accepted by the ``cma`` optimizer. The user can use ``import cma; cma.CMAOptions()`` to view the available options. """ import cma - r = cma.fmin2(loss, initial_parameters, 1.7, options=options, args=args) - return r[1].result.fbest, r[1].result.xbest, r + es = cma.CMAEvolutionStrategy(initial_parameters, sigma0=1.7, inopts=options) + + if callback is not None: + while not es.stop(): + solutions = es.ask() + objective_values = [loss(x, *args) for x in solutions] + for solution in solutions: + callback(solution) + es.tell(solutions, objective_values) + es.logger.add() + else: + es.optimize(loss, args=args) + + return es.result.fbest, es.result.xbest, es.result def newtonian( @@ -213,8 +229,16 @@ def newtonian( return m.fun, m.x, m -def sgd(loss, initial_parameters, args=(), options=None, compile=False, backend=None): - """Stochastic Gradient Descent (SGD) optimizer using Tensorflow or PyTorch backpropagation. +def sgd( + loss, + initial_parameters, + args=(), + callback=None, + options=None, + compile=False, + backend=None, +): + """Stochastic Gradient Descent (SGD) optimizer using Tensorflow backpropagation. See `tf.keras.Optimizers `_ for a list of the available optimizers for Tensorflow. @@ -227,6 +251,7 @@ def sgd(loss, initial_parameters, args=(), options=None, compile=False, backend= initial_parameters (np.ndarray): Initial guess for the variational parameters. args (tuple): optional arguments for the loss function. + callback (callable): Called after each iteration. options (dict): Dictionary with options for the SGD optimizer. Supports the following keys: @@ -237,6 +262,9 @@ def sgd(loss, initial_parameters, args=(), options=None, compile=False, backend= a message of the loss function. """ + if not backend.name == "tensorflow": + raise_error(RuntimeError, "SGD optimizer requires Tensorflow backend.") + sgd_options = { "nepochs": 1000000, "nmessage": 1000, @@ -299,6 +327,8 @@ def opt_step(): for e in range(sgd_options["nepochs"]): l = opt_step() + if callback is not None: + callback(vparams) if e % sgd_options["nmessage"] == 1: log.info("ite %d : loss %f", e, l.numpy()) diff --git a/src/qibo/transpiler/decompositions.py b/src/qibo/transpiler/decompositions.py index f559f0088a..33ab35b56b 100644 --- a/src/qibo/transpiler/decompositions.py +++ b/src/qibo/transpiler/decompositions.py @@ -465,3 +465,4 @@ def _u3_to_gpi2(t, p, l): standard_decompositions.add( gates.ECR, [gates.S(0), gates.SX(1), gates.CNOT(0, 1), gates.X(0)] ) +standard_decompositions.add(gates.CCZ, [gates.H(2), gates.TOFFOLI(0, 1, 2), gates.H(2)]) diff --git a/tests/test_gates_gates.py b/tests/test_gates_gates.py index 3fa2b192cc..6dd7fe279b 100644 --- a/tests/test_gates_gates.py +++ b/tests/test_gates_gates.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from qibo import gates +from qibo import Circuit, gates, matrices from qibo.parameter import Parameter from qibo.quantum_info import random_hermitian, random_statevector, random_unitary @@ -1206,6 +1206,46 @@ def test_toffoli(backend, applyx): assert gates.TOFFOLI(0, 1, 2).unitary +def test_ccz(backend): + nqubits = 3 + initial_state = random_statevector(2**nqubits, backend=backend) + final_state = apply_gates( + backend, + [gates.CCZ(0, 1, 2)], + nqubits=nqubits, + initial_state=initial_state, + ) + + matrix = np.array( + [ + [1, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, -1], + ], + dtype=np.complex128, + ) + matrix = backend.cast(matrix, dtype=matrix.dtype) + + target_state = matrix @ initial_state + backend.assert_allclose(final_state, target_state) + + assert gates.CCZ(0, 1, 2).qasm_label == "ccz" + assert not gates.CCZ(0, 1, 2).clifford + assert gates.CCZ(0, 1, 2).unitary + + # test decomposition + decomposition = Circuit(3) + decomposition.add(gates.CCZ(0, 1, 2).decompose()) + decomposition = decomposition.unitary(backend) + + backend.assert_allclose(decomposition, backend.cast(matrices.CCZ), atol=1e-10) + + def test_deutsch(backend): theta = 0.1234 nqubits = 3 diff --git a/tests/test_models_dbi.py b/tests/test_models_dbi.py index bbfd42160d..c20465e5a9 100644 --- a/tests/test_models_dbi.py +++ b/tests/test_models_dbi.py @@ -3,21 +3,31 @@ import numpy as np import pytest +from qibo import hamiltonians, set_backend from qibo.hamiltonians import Hamiltonian from qibo.models.dbi.double_bracket import ( + DoubleBracketCostFunction, DoubleBracketGeneratorType, DoubleBracketIteration, + DoubleBracketScheduling, ) +from qibo.models.dbi.utils import * +from qibo.models.dbi.utils_dbr_strategies import ( + gradient_descent, + select_best_dbr_generator, +) +from qibo.models.dbi.utils_scheduling import polynomial_step from qibo.quantum_info import random_hermitian -NSTEPS = 50 -SEED = 10 +NSTEPS = 3 +seed = 10 """Number of steps for evolution.""" @pytest.mark.parametrize("nqubits", [1, 2]) def test_double_bracket_iteration_canonical(backend, nqubits): - h0 = random_hermitian(2**nqubits, backend=backend) + """Check default (canonical) mode.""" + h0 = random_hermitian(2**nqubits, backend=backend, seed=seed) dbi = DoubleBracketIteration( Hamiltonian(nqubits, h0, backend=backend), mode=DoubleBracketGeneratorType.canonical, @@ -31,7 +41,8 @@ def test_double_bracket_iteration_canonical(backend, nqubits): @pytest.mark.parametrize("nqubits", [1, 2]) def test_double_bracket_iteration_group_commutator(backend, nqubits): - h0 = random_hermitian(2**nqubits, backend=backend, seed=SEED) + """Check group commutator mode.""" + h0 = random_hermitian(2**nqubits, backend=backend, seed=seed) d = backend.cast(np.diag(np.diag(backend.to_numpy(h0)))) dbi = DoubleBracketIteration( Hamiltonian(nqubits, h0, backend=backend), @@ -41,38 +52,16 @@ def test_double_bracket_iteration_group_commutator(backend, nqubits): # test first iteration with default d dbi(mode=DoubleBracketGeneratorType.group_commutator, step=0.01) - for _ in range(NSTEPS): dbi(step=0.01, d=d) assert initial_off_diagonal_norm > dbi.off_diagonal_norm -@pytest.mark.parametrize("nqubits", [3]) -def test_double_bracket_iteration_eval_dbr_unitary(backend, nqubits): - r"""The bound is $$||e^{-[D,H]}-GC||\le s^{3/2}(||[H,[D,H]||+||[D,[D,H]]||$$""" - h0 = random_hermitian(2**nqubits, backend=backend) - d = backend.cast(np.diag(np.diag(backend.to_numpy(h0)))) - dbi = DoubleBracketIteration( - Hamiltonian(nqubits, h0, backend=backend), - mode=DoubleBracketGeneratorType.group_commutator, - ) - - for s in np.linspace(0.001, 0.01, NSTEPS): - u = dbi.eval_dbr_unitary( - s, d=d, mode=DoubleBracketGeneratorType.single_commutator - ) - v = dbi.eval_dbr_unitary( - s, d=d, mode=DoubleBracketGeneratorType.group_commutator - ) - assert np.linalg.norm(backend.to_numpy(u - v)) < 10 * s**1.49 * ( - np.linalg.norm(backend.to_numpy(h0)) + np.linalg.norm(backend.to_numpy(d)) - ) * np.linalg.norm(backend.to_numpy(h0)) * np.linalg.norm(backend.to_numpy(d)) - - @pytest.mark.parametrize("nqubits", [1, 2]) def test_double_bracket_iteration_single_commutator(backend, nqubits): - h0 = random_hermitian(2**nqubits, backend=backend) + """Check single commutator mode.""" + h0 = random_hermitian(2**nqubits, backend=backend, seed=seed) d = backend.cast(np.diag(np.diag(backend.to_numpy(h0)))) dbi = DoubleBracketIteration( Hamiltonian(nqubits, h0, backend=backend), @@ -89,49 +78,190 @@ def test_double_bracket_iteration_single_commutator(backend, nqubits): assert initial_off_diagonal_norm > dbi.off_diagonal_norm -@pytest.mark.parametrize("nqubits", [3, 4]) -def test_hyperopt_step(backend, nqubits): - h0 = random_hermitian(2**nqubits, backend=backend) - d = backend.cast(np.diag(np.diag(backend.to_numpy(h0)))) - dbi = DoubleBracketIteration(Hamiltonian(nqubits, h0, backend=backend)) +@pytest.mark.parametrize("nqubits", [2, 3]) +@pytest.mark.parametrize( + "scheduling", + [ + DoubleBracketScheduling.grid_search, + DoubleBracketScheduling.hyperopt, + DoubleBracketScheduling.simulated_annealing, + ], +) +def test_variational_scheduling(backend, nqubits, scheduling): + """Check schduling options.""" + h = 2 + # define the hamiltonian + h0 = hamiltonians.TFIM(nqubits=nqubits, h=h) + dbi = DoubleBracketIteration(h0, scheduling=scheduling) # find initial best step with look_ahead = 1 - initial_step = 0.01 - delta = 0.02 + initial_off_diagonal_norm = dbi.off_diagonal_norm + for _ in range(NSTEPS): + step = dbi.choose_step() + dbi(step=step) + assert initial_off_diagonal_norm > dbi.off_diagonal_norm + - step = dbi.hyperopt_step( - step_min=initial_step - delta, step_max=initial_step + delta, max_evals=10 +@pytest.mark.parametrize( + "cost", + [ + DoubleBracketCostFunction.off_diagonal_norm, + DoubleBracketCostFunction.least_squares, + ], +) +def test_polynomial_cost_function(backend, cost): + nqubits = 2 + h0 = random_hermitian(2**nqubits, backend=backend, seed=seed) + dbi = DoubleBracketIteration( + Hamiltonian(nqubits, h0, backend=backend), + mode=DoubleBracketGeneratorType.single_commutator, + cost=cost, + scheduling=DoubleBracketScheduling.polynomial_approximation, ) + initial_off_diagonal_norm = dbi.off_diagonal_norm + for i in range(NSTEPS): + s = dbi.choose_step(d=dbi.diagonal_h_matrix, n=5) + dbi(step=s, d=dbi.off_diag_h) + assert initial_off_diagonal_norm > dbi.off_diagonal_norm - assert step != initial_step - # evolve following the optimized first step - for generator in DoubleBracketGeneratorType: - dbi(mode=generator, step=step, d=d) +def test_polynomial_energy_fluctuation(backend): + nqubits = 4 + h0 = random_hermitian(2**nqubits, seed=seed, backend=backend) + state = np.zeros(2**nqubits) + state[0] = 1 + dbi = DoubleBracketIteration( + Hamiltonian(nqubits, h0, backend=backend), + mode=DoubleBracketGeneratorType.single_commutator, + cost=DoubleBracketCostFunction.energy_fluctuation, + scheduling=DoubleBracketScheduling.polynomial_approximation, + ref_state=state, + ) + for i in range(NSTEPS): + s = dbi.choose_step(d=dbi.diagonal_h_matrix, n=5) + dbi(step=s, d=dbi.diagonal_h_matrix) + assert dbi.energy_fluctuation(state=state) < dbi.h0.energy_fluctuation(state=state) - # find the following step size with look_ahead - look_ahead = 3 - step = dbi.hyperopt_step( - step_min=initial_step - delta, - step_max=initial_step + delta, - max_evals=10, - look_ahead=look_ahead, +@pytest.mark.parametrize("nqubits", [5, 6]) +def test_polynomial_fail_cases(backend, nqubits): + h0 = random_hermitian(2**nqubits, backend=backend, seed=seed) + dbi = DoubleBracketIteration( + Hamiltonian(nqubits, h0, backend=backend), + mode=DoubleBracketGeneratorType.single_commutator, + scheduling=DoubleBracketScheduling.polynomial_approximation, ) + with pytest.raises(ValueError): + polynomial_step(dbi, n=2, n_max=1) + assert polynomial_step(dbi, n=1) == None - # evolve following the optimized first step - for gentype in range(look_ahead): - dbi(mode=DoubleBracketGeneratorType(gentype + 1), step=step, d=d) +def test_least_squares(backend): + """Check least squares cost function.""" + nqubits = 4 + h0 = random_hermitian(2**nqubits, backend=backend, seed=seed) + dbi = DoubleBracketIteration( + Hamiltonian(nqubits, h0, backend=backend), + cost=DoubleBracketCostFunction.least_squares, + ) + d = np.diag(np.linspace(1, 2**nqubits, 2**nqubits)) / 2**nqubits + initial_potential = dbi.least_squares(d=d) + step = dbi.choose_step(d=d) + dbi(d=d, step=step) + assert dbi.least_squares(d=d) < initial_potential + + +@pytest.mark.parametrize("compare_canonical", [True, False]) +@pytest.mark.parametrize("step", [None, 1e-3]) +@pytest.mark.parametrize("nqubits", [2, 3]) +def test_select_best_dbr_generator(backend, nqubits, step, compare_canonical): + h0 = random_hermitian(2**nqubits, backend=backend, seed=seed) + dbi = DoubleBracketIteration( + Hamiltonian(nqubits, h0, backend=backend), + mode=DoubleBracketGeneratorType.single_commutator, + ) + initial_off_diagonal_norm = dbi.off_diagonal_norm + generate_local_Z = generate_Z_operators(nqubits, backend=backend) + Z_ops = list(generate_local_Z.values()) + for _ in range(NSTEPS): + dbi, idx, step, flip_sign = select_best_dbr_generator( + dbi, + Z_ops, + compare_canonical=compare_canonical, + step=step, + ) + assert dbi.off_diagonal_norm < initial_off_diagonal_norm -def test_energy_fluctuations(backend): - h0 = np.array([[1, 0], [0, -1]]) - h0 = backend.cast(h0, dtype=backend.dtype) - state = np.array([1, 0]) - state = backend.cast(state, dtype=backend.dtype) +@pytest.mark.parametrize("step", [None, 1e-3]) +def test_params_to_diagonal_operator(backend, step): + nqubits = 2 + pauli_operator_dict = generate_pauli_operator_dict( + nqubits, parameterization_order=1, backend=backend + ) + params = [1, 2, 3] + operator_pauli = sum( + [params[i] * list(pauli_operator_dict.values())[i] for i in range(nqubits)] + ) + backend.assert_allclose( + operator_pauli, + params_to_diagonal_operator( + params, + nqubits=nqubits, + parameterization=ParameterizationTypes.pauli, + pauli_operator_dict=pauli_operator_dict, + ), + ) + operator_element = params_to_diagonal_operator( + params, + nqubits=nqubits, + parameterization=ParameterizationTypes.computational, + ) + for i in range(len(params)): + backend.assert_allclose( + backend.cast(backend.to_numpy(operator_element).diagonal())[i], params[i] + ) - dbi = DoubleBracketIteration(Hamiltonian(1, matrix=h0, backend=backend)) - energy_fluctuation = dbi.energy_fluctuation(state=state) - assert energy_fluctuation == 0 +@pytest.mark.parametrize("order", [1, 2]) +def test_gradient_descent(backend, order): + nqubits = 2 + h0 = random_hermitian(2**nqubits, seed=seed, backend=backend) + dbi = DoubleBracketIteration( + Hamiltonian(nqubits, h0, backend=backend), + mode=DoubleBracketGeneratorType.single_commutator, + scheduling=DoubleBracketScheduling.hyperopt, + cost=DoubleBracketCostFunction.off_diagonal_norm, + ) + initial_off_diagonal_norm = dbi.off_diagonal_norm + pauli_operator_dict = generate_pauli_operator_dict( + nqubits, + parameterization_order=order, + backend=backend, + ) + pauli_operators = list(pauli_operator_dict.values()) + # let initial d be approximation of $\Delta(H) + d_coef_pauli = decompose_into_pauli_basis( + dbi.diagonal_h_matrix, pauli_operators=pauli_operators + ) + d_pauli = sum([d_coef_pauli[i] * pauli_operators[i] for i in range(nqubits)]) + loss_hist_pauli, d_params_hist_pauli, s_hist_pauli = gradient_descent( + dbi, + NSTEPS, + d_coef_pauli, + ParameterizationTypes.pauli, + pauli_operator_dict=pauli_operator_dict, + pauli_parameterization_order=order, + ) + assert loss_hist_pauli[-1] < initial_off_diagonal_norm + + # computational basis + d_coef_computational_partial = backend.cast(backend.to_numpy(d_pauli).diagonal()) + ( + loss_hist_computational_partial, + _, + _, + ) = gradient_descent( + dbi, NSTEPS, d_coef_computational_partial, ParameterizationTypes.computational + ) + assert loss_hist_computational_partial[-1] < initial_off_diagonal_norm diff --git a/tests/test_models_variational.py b/tests/test_models_variational.py index c28e1c1439..b0ecadaca1 100644 --- a/tests/test_models_variational.py +++ b/tests/test_models_variational.py @@ -92,7 +92,7 @@ def myloss(parameters, circuit, target): ("BFGS", {"maxiter": 1}, False, "vqe_bfgs.out"), ("parallel_L-BFGS-B", {"maxiter": 1}, True, None), ("parallel_L-BFGS-B", {"maxiter": 1}, False, None), - ("cma", {"maxfevals": 2}, False, None), + ("cma", {"maxiter": 1}, False, None), ("sgd", {"nepochs": 5}, False, None), ("sgd", {"nepochs": 5}, True, None), ] @@ -128,8 +128,19 @@ def test_vqe(backend, method, options, compile, filename): if backend.name == "pytorch": initial_parameters = backend.np.tensor(initial_parameters, requires_grad=True) v = models.VQE(circuit, hamiltonian) + + loss_values = [] + + def callback(parameters, loss_values=loss_values, vqe=v): + vqe.circuit.set_parameters(parameters) + loss_values.append(vqe.hamiltonian.expectation(vqe.circuit().state())) + best, params, _ = v.minimize( - initial_parameters, method=method, options=options, compile=compile + initial_parameters, + method=method, + options=options, + compile=compile, + callback=callback, ) if method == "cma": # remove `outcmaes` folder @@ -138,6 +149,7 @@ def test_vqe(backend, method, options, compile, filename): shutil.rmtree("outcmaes") if filename is not None: assert_regression_fixture(backend, params, filename) + assert best == min(loss_values) # test energy fluctuation state = backend.np.ones(2**nqubits) / np.sqrt(2**nqubits) @@ -302,7 +314,7 @@ def __call__(self, x): test_names = "method,options,compile,filename" test_values = [ ("BFGS", {"maxiter": 1}, False, "aavqe_bfgs.out"), - ("cma", {"maxfevals": 2}, False, None), + ("cma", {"maxiter": 1}, False, None), ("parallel_L-BFGS-B", {"maxiter": 1}, False, None), ]