Skip to content

Commit

Permalink
Notebooks running
Browse files Browse the repository at this point in the history
  • Loading branch information
Sam-XiaoyueLi committed May 30, 2024
1 parent fa1d2c4 commit b3706a2
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 81 deletions.
3 changes: 1 addition & 2 deletions examples/dbi/dbi_scheduling.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@
"from qibo import hamiltonians, set_backend\n",
"from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketScheduling, DoubleBracketIteration\n",
"from qibo.models.dbi.utils import *\n",
"from qibo.models.dbi.utils_scheduling import *\n",
"from qibo.models.dbi.utils_strategies import *"
"from qibo.models.dbi.utils_scheduling import *"
]
},
{
Expand Down
38 changes: 20 additions & 18 deletions examples/dbi/dbi_strategies_compare.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
"from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketScheduling, DoubleBracketIteration, DoubleBracketCostFunction\n",
"from qibo.models.dbi.utils import *\n",
"from qibo.models.dbi.utils_scheduling import *\n",
"from qibo.models.dbi.utils_strategies import *"
"from qibo.models.dbi.utils_dbr_strategies import *"
]
},
{
Expand Down Expand Up @@ -133,7 +133,6 @@
"outputs": [],
"source": [
"# initialize DBI class for the Pauli-Z strategy\n",
"set_backend(\"pytorch\", platform=\"numba\")\n",
"dbi_pauli = DoubleBracketIteration(Hamiltonian(nqubits=nqubits, matrix=h0), mode=DoubleBracketGeneratorType.single_commutator, scheduling=scheduling, cost=cost)"
]
},
Expand Down Expand Up @@ -197,14 +196,23 @@
"metadata": {},
"outputs": [],
"source": [
"loss_history_gradient = [dbi_gradient.off_diagonal_norm]\n",
"steps_gradient_plot= [0]\n",
"for _ in range(NSTEPS):\n",
" step, d_coef, d = gradient_descent_pauli(dbi_gradient, d_coef, d, pauli_operator_dict=pauli_operator_dict)\n",
" dbi_gradient(d=d,step=step)\n",
" loss_history_gradient.append(dbi_gradient.off_diagonal_norm)\n",
" print(f\"New optimized step at iteration {_+1}/{NSTEPS}: {step} with d_coef {d_coef}, loss {dbi_gradient.off_diagonal_norm}\")\n",
" steps_gradient_plot.append(steps_gradient_plot[-1]+step)"
"def s_hist_to_plot(s_hist):\n",
" # convert list of step durations taken to plotable\n",
" s_plot = [0] * len(s_hist)\n",
" for i in range(len(s_hist)):\n",
" if i != 0:\n",
" s_plot[i] = s_plot[i-1] + s_hist[i]\n",
" return s_plot"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"loss_history_gradient, d_params_hist, s_hist = gradient_descent(dbi_gradient, NSTEPS, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)\n",
"steps_gradient_plot = s_hist_to_plot(s_hist)"
]
},
{
Expand Down Expand Up @@ -402,14 +410,8 @@
"metadata": {},
"outputs": [],
"source": [
"off_diagonal_norm_history_gradient = [dbi_gradient.off_diagonal_norm]\n",
"steps_gradient_plot= [0]\n",
"for _ in range(NSTEPS):\n",
" step, d_coef, d = gradient_descent_pauli(dbi_gradient, d_coef, d, pauli_operator_dict=pauli_operator_dict)\n",
" dbi_gradient(d=d,step=step)\n",
" off_diagonal_norm_history_gradient.append(dbi_gradient.off_diagonal_norm)\n",
" print(f\"New optimized step at iteration {_+1}/{NSTEPS}: {step} with d_coef {d_coef}, loss {dbi_gradient.off_diagonal_norm}\")\n",
" steps_gradient_plot.append(steps_gradient_plot[-1]+step)"
"off_diagonal_norm_history_gradient, d_params_hist, s_hist = gradient_descent(dbi_gradient, NSTEPS, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)\n",
"steps_gradient_plot = s_hist_to_plot(s_hist)"
]
},
{
Expand Down
62 changes: 20 additions & 42 deletions examples/dbi/dbi_strategy_Ising_model.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
"from qibo.quantum_info import random_hermitian\n",
"from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketScheduling, DoubleBracketIteration\n",
"from qibo.models.dbi.utils import *\n",
"from qibo.models.dbi.utils_strategies import *"
"from qibo.models.dbi.utils_dbr_strategies import *"
]
},
{
Expand Down Expand Up @@ -92,10 +92,9 @@
"pauli_operator_dict = generate_pauli_operator_dict(nqubits=nqubits, parameterization_order=1)\n",
"d_coef = decompose_into_Pauli_basis(dbi.h.matrix, list(pauli_operator_dict.values()))\n",
"d = sum([d_coef[i] * list(pauli_operator_dict.values())[i] for i in range(nqubits)])\n",
"grad, s = gradient_Pauli(dbi, d=d, pauli_operator_dict=pauli_operator_dict)\n",
"grad = gradient_numerical(dbi, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)\n",
"print('The initial D coefficients:', d_coef)\n",
"print('Gradient:', grad)\n",
"print('s:', s)"
"print('Gradient:', grad)"
]
},
{
Expand All @@ -105,13 +104,7 @@
"outputs": [],
"source": [
"iters = 15\n",
"off_diagonal_norm_1 = [dbi.off_diagonal_norm]\n",
"s_step = [0]\n",
"for i in range(iters):\n",
" s, d_coef, d = gradient_descent_pauli(dbi, d_coef=d_coef, d=d, pauli_operator_dict=pauli_operator_dict, max_evals=100)\n",
" dbi(step=s, d=d)\n",
" off_diagonal_norm_1.append(dbi.off_diagonal_norm)\n",
" s_step.append(s)"
"off_diagonal_norm_1, d_params_hist, s_step = gradient_descent(dbi, iters, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)"
]
},
{
Expand Down Expand Up @@ -152,10 +145,9 @@
"pauli_operator_dict = generate_pauli_operator_dict(nqubits=nqubits, parameterization_order=2)\n",
"d_coef = decompose_into_Pauli_basis(dbi.h.matrix, list(pauli_operator_dict.values()))\n",
"d = sum([d_coef[i] * list(pauli_operator_dict.values())[i] for i in range(nqubits)])\n",
"grad, s = gradient_Pauli(dbi, d=d, pauli_operator_dict=pauli_operator_dict)\n",
"grad = gradient_numerical(dbi, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)\n",
"print('The initial D coefficients:', d_coef)\n",
"print('Gradient:', grad)\n",
"print('s:', s)"
"print('Gradient:', grad)"
]
},
{
Expand All @@ -165,13 +157,7 @@
"outputs": [],
"source": [
"iters = 15\n",
"off_diagonal_norm_2 = [dbi.off_diagonal_norm]\n",
"s_step = [0]\n",
"for i in range(iters):\n",
" s, d_coef, d = gradient_descent_pauli(dbi, d_coef=d_coef, d=d, pauli_operator_dict=pauli_operator_dict, max_evals=100)\n",
" dbi(step=s, d=d)\n",
" off_diagonal_norm_2.append(dbi.off_diagonal_norm)\n",
" s_step.append(s)"
"off_diagonal_norm_2, d_params_hist, s_step = gradient_descent(dbi, iters, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)"
]
},
{
Expand Down Expand Up @@ -245,10 +231,9 @@
"pauli_operator_dict = generate_pauli_operator_dict(nqubits=nqubits, parameterization_order=1)\n",
"d_coef = decompose_into_Pauli_basis(dbi_TFIM_1.h.matrix, list(pauli_operator_dict.values()))\n",
"d = sum([d_coef[i] * list(pauli_operator_dict.values())[i] for i in range(nqubits)])\n",
"grad, s = gradient_Pauli(dbi_TFIM_1, d=d, pauli_operator_dict=pauli_operator_dict)\n",
"grad = gradient_numerical(dbi_TFIM_1, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)\n",
"print('The initial D coefficients:', d_coef)\n",
"print('Gradient:', grad)\n",
"print('s:', s)"
"print('Gradient:', grad)"
]
},
{
Expand All @@ -258,13 +243,7 @@
"outputs": [],
"source": [
"NSTEPS = 15\n",
"off_diagonal_norm_1 = [dbi_TFIM_1.off_diagonal_norm]\n",
"s_step = [0]\n",
"for i in range(NSTEPS):\n",
" s, d_coef, d = gradient_descent_pauli(dbi_TFIM_1, d_coef=d_coef, d=d, pauli_operator_dict=pauli_operator_dict, max_evals=100)\n",
" dbi_TFIM_1(step=s, d=d)\n",
" off_diagonal_norm_1.append(dbi_TFIM_1.off_diagonal_norm)\n",
" s_step.append(s)"
"off_diagonal_norm_1, d_params_hist, s_step = gradient_descent(dbi_TFIM_1, NSTEPS, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)"
]
},
{
Expand Down Expand Up @@ -307,10 +286,9 @@
"pauli_operator_dict = generate_pauli_operator_dict(nqubits=nqubits, parameterization_order=2)\n",
"d_coef = decompose_into_Pauli_basis(dbi_TFIM_2.h.matrix, list(pauli_operator_dict.values()))\n",
"d = sum([d_coef[i] * list(pauli_operator_dict.values())[i] for i in range(nqubits)])\n",
"grad, s = gradient_Pauli(dbi_TFIM_2, d=d, pauli_operator_dict=pauli_operator_dict)\n",
"grad = gradient_numerical(dbi_TFIM_2, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)\n",
"print('The initial D coefficients:', d_coef)\n",
"print('Gradient:', grad)\n",
"print('s:', s)"
"print('Gradient:', grad)"
]
},
{
Expand All @@ -319,14 +297,7 @@
"metadata": {},
"outputs": [],
"source": [
"NSTEPS = 15\n",
"off_diagonal_norm_2 = [dbi_TFIM_2.off_diagonal_norm]\n",
"s_step = [0]\n",
"for i in range(NSTEPS):\n",
" s, d_coef, d = gradient_descent_pauli(dbi_TFIM_2, d_coef=d_coef, d=d, pauli_operator_dict=pauli_operator_dict, max_evals=100)\n",
" dbi_TFIM_2(step=s, d=d)\n",
" off_diagonal_norm_2.append(dbi_TFIM_2.off_diagonal_norm)\n",
" s_step.append(s)"
"off_diagonal_norm_2, d_params_hist, s_step = gradient_descent(dbi_TFIM_2, NSTEPS, d_coef, parameterization=ParameterizationTypes.pauli, pauli_operator_dict=pauli_operator_dict)"
]
},
{
Expand All @@ -342,6 +313,13 @@
"plt.xlabel('Iteration')\n",
"plt.ylabel(r'$|| \\sigma(e^{sW}He^{-sW}) || $')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In conclusion, we see that the parameterization order or locality of the Pauli based parameterization for gradient descent does not affect significantly the effectiveness of double bracket diagonalization."
]
}
],
"metadata": {
Expand Down
28 changes: 9 additions & 19 deletions examples/dbi/dbi_strategy_Pauli-Z.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
"from qibo.models.dbi.double_bracket import DoubleBracketGeneratorType, DoubleBracketIteration\n",
"from qibo.models.dbi.utils import *\n",
"from qibo.models.dbi.utils_scheduling import *\n",
"from qibo.models.dbi.utils_strategies import *"
"from qibo.models.dbi.utils_dbr_strategies import *"
]
},
{
Expand Down Expand Up @@ -130,7 +130,7 @@
"source": [
"# set the qibo backend (we suggest qibojit if N >= 20)\n",
"# alternatives: tensorflow (not optimized), numpy (when CPU not supported by jit)\n",
"set_backend(\"qibojit\", \"numba\")\n",
"set_backend(\"qibojit\", platform=\"numba\")\n",
"\n",
"# hamiltonian parameters\n",
"nqubits = 5\n",
Expand Down Expand Up @@ -188,14 +188,14 @@
"metadata": {},
"outputs": [],
"source": [
"NSTEPS = 15\n",
"NSTEPS = 8\n",
"max_evals = 100\n",
"step_max = 1\n",
"Z_optimal = []\n",
"# add in initial values for plotting\n",
"off_diagonal_norm_history = [dbi.off_diagonal_norm]\n",
"steps = [0]\n",
"scheduling = DoubleBracketScheduling.use_hyperopt\n",
"scheduling = DoubleBracketScheduling.hyperopt\n",
"for _ in range(NSTEPS):\n",
" dbi, idx, step, flip_sign = select_best_dbr_generator(dbi, Z_ops, scheduling=scheduling, compare_canonical=False, max_evals=max_evals, step_max=step_max)\n",
" off_diagonal_norm_history.append(dbi.off_diagonal_norm)\n",
Expand Down Expand Up @@ -246,7 +246,7 @@
"source": [
"# set the qibo backend (we suggest qibojit if N >= 20)\n",
"# alternatives: tensorflow (not optimized), numpy (when CPU not supported by jit)\n",
"set_backend(\"qibojit\", \"numba\")\n",
"set_backend(\"qibojit\", platform=\"numba\")\n",
"\n",
"\n",
"# initialize class|\n",
Expand All @@ -266,12 +266,7 @@
"steps_canonical_plot = [0]\n",
"for s in range(NSTEPS):\n",
" # same settings as iteration from list\n",
" step = dbi_canonical.hyperopt_step(\n",
" step_min = 1e-5,\n",
" step_max = 1,\n",
" space = hp.uniform,\n",
" optimizer = tpe,\n",
" )\n",
" step = dbi_canonical.choose_step(scheduling=DoubleBracketScheduling.hyperopt)\n",
" dbi_canonical(step=step)\n",
" print(f\"New optimized step at iteration {s+1}/{NSTEPS}: {step}, loss {dbi_canonical.off_diagonal_norm}\")\n",
" off_diagonal_norm_history_canonical.append(dbi_canonical.off_diagonal_norm)\n",
Expand Down Expand Up @@ -331,7 +326,7 @@
"metadata": {},
"outputs": [],
"source": [
"dbi_mixed = DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.single_commutator)\n",
"dbi_mixed = DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.single_commutator, scheduling=DoubleBracketScheduling.hyperopt)\n",
"print(\"Initial off diagonal norm\", dbi_mixed.off_diagonal_norm)"
]
},
Expand All @@ -344,12 +339,7 @@
"dbi_eval = deepcopy(dbi_mixed)\n",
"dbi_eval.mode = DoubleBracketGeneratorType.canonical\n",
"if step is None:\n",
" step = dbi_eval.hyperopt_step(\n",
" step_max=step_max,\n",
" space=hp.uniform,\n",
" optimizer=tpe,\n",
" max_evals=max_evals,\n",
" )\n",
" step = dbi_eval.choose_step()\n",
"dbi_eval(step=step)\n",
"print('canonical norm', dbi_eval.off_diagonal_norm, 'step', step)"
]
Expand Down Expand Up @@ -420,7 +410,7 @@
"metadata": {},
"outputs": [],
"source": [
"dbi_mixed_can= DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.canonical)\n",
"dbi_mixed_can= DoubleBracketIteration(deepcopy(H_TFIM),mode=DoubleBracketGeneratorType.canonical, scheduling=DoubleBracketScheduling.hyperopt)\n",
"print(\"Initial off diagonal norm\", dbi_mixed_can.off_diagonal_norm)"
]
},
Expand Down

0 comments on commit b3706a2

Please sign in to comment.