diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 19d98ebca..4786784f7 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -6,6 +6,11 @@
"context": "..",
"dockerfile": "Dockerfile"
},
+ "features": {
+ "ghcr.io/julialang/devcontainer-features/julia:1": {
+ "channel": "1.9.3"
+ }
+ },
"postCreateCommand": "/bin/zsh ./.devcontainer/post-install.sh",
"customizations": {
"vscode": {
diff --git a/.devcontainer/post-install.sh b/.devcontainer/post-install.sh
index 53bc587fa..2873ef825 100755
--- a/.devcontainer/post-install.sh
+++ b/.devcontainer/post-install.sh
@@ -1,3 +1,4 @@
#! /bin/zsh
poetry install --all-extras --with dev,doc
+julia --project="julia_pkg" -e "using Pkg; Pkg.instantiate()"
diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md
index 19fb64e39..d3f312e76 100644
--- a/.github/CHANGELOG.md
+++ b/.github/CHANGELOG.md
@@ -1,24 +1,123 @@
-# Release 0.6.1-post1 (current release)
+# Release 0.7.0 (current release)
### New features
+* Added a new interface for backends, as well as a `numpy` backend (which is now default). Users can run
+ all the functions in the `utils`, `math`, `physics`, and `lab` with both backends, while `training`
+ requires using `tensorflow`. The `numpy` backend provides significant improvements both in import
+ time and runtime. [(#301)](https://github.com/XanaduAI/MrMustard/pull/301)
+* Added the classes and methods to create, contract, and draw tensor networks with `mrmustard.math`.
+ [(#284)](https://github.com/XanaduAI/MrMustard/pull/284)
+
+* Added functions in physics.bargmann to join and contract (A,b,c) triples.
+ [(#295)](https://github.com/XanaduAI/MrMustard/pull/295)
+
+* Added an Ansatz abstract class and PolyExpAnsatz concrete implementation. This is used in the Bargmann representation.
+ [(#295)](https://github.com/XanaduAI/MrMustard/pull/295)
+
+* Added `complex_gaussian_integral` method.
+ [(#295)](https://github.com/XanaduAI/MrMustard/pull/295)
+
+* Added `Bargmann` representation (parametrized by Abc). Supports all algebraic operations and CV (exact) inner product.
+ [(#296)](https://github.com/XanaduAI/MrMustard/pull/296)
### Breaking changes
+* Removed circular dependencies by:
+ * Removing `graphics.py`--moved `ProgressBar` to `training` and `mikkel_plot` to `lab`.
+ * Moving `circuit_drawer` and `wigner` to `physics`.
+ * Moving `xptensor` to `math`.
+ [(#289)](https://github.com/XanaduAI/MrMustard/pull/289)
+
+* Created `settings.py` file to host `Settings`.
+ [(#289)](https://github.com/XanaduAI/MrMustard/pull/289)
+
+* Moved `settings.py`, `logger.py`, and `typing.py` to `utils`.
+ [(#289)](https://github.com/XanaduAI/MrMustard/pull/289)
+* Removed the `Math` class. To use the mathematical backend, replace
+ `from mrmustard.math import Math ; math = Math()` with `import mrmustard.math as math`
+ in your scripts.
+ [(#301)](https://github.com/XanaduAI/MrMustard/pull/301)
+
+* The `numpy` backend is now default. To switch to the `tensorflow`
+ backend, add the line `math.change_backend("tensorflow")` to your scripts.
+ [(#301)](https://github.com/XanaduAI/MrMustard/pull/301)
### Improvements
-* Relaxes dependency versions in pyproject.toml. More specifically, this is to unpin scipy.
- [(#300)](https://github.com/XanaduAI/MrMustard/pull/300)
+* Calculating Fock representations and their gradients is now more numerically stable (i.e. numerical blowups that
+result from repeatedly applying the recurrence relation are postponed to higher cutoff values).
+This holds for both the "vanilla strategy" [(#274)](https://github.com/XanaduAI/MrMustard/pull/274) and for the
+"diagonal strategy" and "single leftover mode strategy" [(#288)](https://github.com/XanaduAI/MrMustard/pull/288/).
+This is done by representing Fock amplitudes with a higher precision than complex128 (countering floating-point errors).
+We run Julia code via PyJulia (where Numba was used before) to keep the code fast.
+The precision is controlled by `setting settings.PRECISION_BITS_HERMITE_POLY`. The default value is ``128``,
+which uses the old Numba code. When setting to a higher value, the new Julia code is run.
+
+* Replaced parameters in `training` with `Constant` and `Variable` classes.
+ [(#298)](https://github.com/XanaduAI/MrMustard/pull/298)
+
+* Improved how states, transformations, and detectors deal with parameters by replacing the `Parametrized` class with `ParameterSet`.
+ [(#298)](https://github.com/XanaduAI/MrMustard/pull/298)
+
+* Includes julia dependencies into the python packaging for downstream installation reproducibility.
+ Removes dependency on tomli to load pyproject.toml for version info, uses importlib.metadata instead.
+ [(#303)](https://github.com/XanaduAI/MrMustard/pull/303)
+ [(#304)](https://github.com/XanaduAI/MrMustard/pull/304)
+
+* Improves the algorithms implemented in `vanilla` and `vanilla_vjp` to achieve a speedup.
+ Specifically, the improved algorithms work on flattened arrays (which are reshaped before being returned) as opposed to multi-dimensional array.
+ [(#312)](https://github.com/XanaduAI/MrMustard/pull/312)
+ [(#318)](https://github.com/XanaduAI/MrMustard/pull/318)
+
+* Adds functions `hermite_renormalized_batch` and `hermite_renormalized_diagonal_batch` to speed up calculating
+ Hermite polynomials over a batch of B vectors.
+ [(#308)](https://github.com/XanaduAI/MrMustard/pull/308)
+
+* Added suite to filter undesired warnings, and used it to filter tensorflow's ``ComplexWarning``s.
+ [(#332)](https://github.com/XanaduAI/MrMustard/pull/332)
+
### Bug fixes
+* Added the missing `shape` input parameters to all methods `U` in the `gates.py` file.
+[(#291)](https://github.com/XanaduAI/MrMustard/pull/291)
+* Fixed inconsistent use of `atol` in purity evaluation for Gaussian states.
+[(#294)](https://github.com/XanaduAI/MrMustard/pull/294)
+* Fixed the documentations for loss_XYd and amp_XYd functions for Gaussian channels.
+[(#305)](https://github.com/XanaduAI/MrMustard/pull/305)
+* Replaced all instances of `np.empty` with `np.zeros` to fix instabilities.
+[(#309)](https://github.com/XanaduAI/MrMustard/pull/309)
+* Fixing a bug where `scipy.linalg.sqrtm` returns an unsupported type.
+[(#337)](https://github.com/XanaduAI/MrMustard/pull/337)
### Documentation
+### Tests
+* Added tests for calculating Fock amplitudes with a higher precision than `complex128`.
+
+### Contributors
+[Eli Bourassa](https://github.com/elib20),
+[Robbe De Prins](https://github.com/rdprins),
+[Samuele Ferracin](https://github.com/SamFerracin),
+[Jan Provaznik](https://github.com/jan-provaznik),
+[Yuan Yao](https://github.com/sylviemonet)
+[Filippo Miatto](https://github.com/ziofil)
+
+
+---
+
+# Release 0.6.1-post1
+
+### Improvements
+
+* Relaxes dependency versions in pyproject.toml. More specifically, this is to unpin scipy.
+ [(#300)](https://github.com/XanaduAI/MrMustard/pull/300)
+
### Contributors
[Filippo Miatto](https://github.com/ziofil), [Samuele Ferracin](https://github.com/SamFerracin), [Yuan Yao](https://github.com/sylviemonet), [Zeyue Niu](https://github.com/zeyueN)
+
---
# Release 0.6.0
@@ -30,25 +129,36 @@ can select their preferred methods by setting the value of `Settings.DISCRETIZAT
`clenshaw`.
[(#280)](https://github.com/XanaduAI/MrMustard/pull/280)
-* Added the `PhaseNoise(phase_stdev)` gate (non-Gaussian). Output is a mixed state in Fock representation.
- It is not based on a choi operator, but on a nonlinear transformation of the density matrix.
+* Added the `PhaseNoise(phase_stdev)` gate (non-Gaussian). Output is a mixed state in Fock representation. It is not based on a choi operator, but on a nonlinear transformation of the density matrix.
[(#275)](https://github.com/XanaduAI/MrMustard/pull/275)
### Breaking changes
* The value of `hbar` can no longer be specified outside of `Settings`. All the classes and
methods that allowed specifying its value as an input now retrieve it directly from `Settings`.
- [(#278)](https://github.com/XanaduAI/MrMustard/pull/278)
+ [(#273)](https://github.com/XanaduAI/MrMustard/pull/273)
-* Certain attributes of `Settings` can no longer be changed after their value is queried for the
- first time.
- [(#278)](https://github.com/XanaduAI/MrMustard/pull/278)
+* Certain attributes of `Settings` can no longer be changed after their value is queried for the first time.
+ [(#273)](https://github.com/XanaduAI/MrMustard/pull/273)
### Improvements
+* Calculating Fock representations using the "vanilla strategy" is now more numerically stable (i.e. numerical blowups
+that result from repeatedly applying the recurrence relation are now postponed to higher cutoff values).
+This is done by representing Fock amplitudes with a higher precision than complex128
+(which counters the accumulation of floating-point errors).
+We run Julia code via PyJulia (where Numba was used before) to keep the code fast.
+[(#274)](https://github.com/XanaduAI/MrMustard/pull/274)
+
* Tensorflow bumped to v2.14 with poetry installation working out of the box on Linux and Mac.
[(#281)](https://github.com/XanaduAI/MrMustard/pull/281)
+* Incorporated `Tensor` into `Transformation` in order to deal with modes more robustly.
+ [(#287)](https://github.com/XanaduAI/MrMustard/pull/287)
+
+* Created the classes `Unitary` and `Channel` to simplify the logic in `Transformation`.
+ [(#287)](https://github.com/XanaduAI/MrMustard/pull/287)
+
### Bug fixes
* Fixed a bug about the variable names in functions (apply_kraus_to_ket, apply_kraus_to_dm, apply_choi_to_ket, apply_choi_to_dm).
@@ -57,17 +167,19 @@ can select their preferred methods by setting the value of `Settings.DISCRETIZAT
* Fixed a bug that was leading to an error when computing the Choi representation of a unitary transformation.
[(#283)](https://github.com/XanaduAI/MrMustard/pull/283)
-* Fixed the internal function to calculate ABC of Bargmann representation (now corresponds to the literature) and other fixes to get the correct Fock tensor.
- [(#255)](https://github.com/XanaduAI/MrMustard/pull/255)
-
### Documentation
### Contributors
-[Filippo Miatto](https://github.com/ziofil), [Samuele Ferracin](https://github.com/SamFerracin), [Yuan Yao](https://github.com/sylviemonet), [Zeyue Niu](https://github.com/zeyueN)
+[Filippo Miatto](https://github.com/ziofil),
+[Yuan Yao](https://github.com/sylviemonet),
+[Robbe De Prins](https://github.com/rdprins),
+[Samuele Ferracin](https://github.com/SamFerracin)
+[Zeyue Niu](https://github.com/zeyueN)
+
---
-# Release 0.5.0
+# Release 0.5.0
### New features
@@ -88,7 +200,6 @@ can select their preferred methods by setting the value of `Settings.DISCRETIZAT
def cost_fn():
...
-
def as_dB(cost):
delta = np.sqrt(np.log(1 / (abs(cost) ** 2)) / (2 * np.pi))
cost_dB = -10 * np.log10(delta**2)
@@ -151,7 +262,6 @@ can select their preferred methods by setting the value of `Settings.DISCRETIZAT
[(#239)](https://github.com/XanaduAI/MrMustard/pull/239)
* More robust implementation of cutoffs for States.
-
[(#239)](https://github.com/XanaduAI/MrMustard/pull/239)
* Dependencies and versioning are now managed using Poetry.
@@ -178,6 +288,7 @@ cutoff of the first detector is equal to 1, the resulting density matrix is now
[Robbe De Prins](https://github.com/rdprins), [Gabriele Gullì](https://github.com/ggulli),
[Richard A. Wolf](https://github.com/ryk-wolf)
+
---
# Release 0.4.1
@@ -207,7 +318,7 @@ cutoff of the first detector is equal to 1, the resulting density matrix is now
---
-# Release 0.4.0 (current release)
+# Release 0.4.0
### New features
@@ -450,6 +561,7 @@ This release contains contributions from (in alphabetical order):
[Filippo Miatto](https://github.com/ziofil), [Zeyue Niu](https://github.com/zeyueN),
[Yuan Yao](https://github.com/sylviemonet)
+
---
# Release 0.3.0
@@ -605,7 +717,6 @@ This release contains contributions from (in alphabetical order):
[Mikhail Andrenkov](https://github.com/Mandrenkov), [Sebastian Duque Mesa](https://github.com/sduquemesa), [Filippo Miatto](https://github.com/ziofil), [Yuan Yao](https://github.com/sylviemonet)
-
---
# Release 0.2.0
@@ -656,6 +767,8 @@ This release contains contributions from (in alphabetical order):
[Filippo Miatto](https://github.com/ziofil)
+---
+
# Release 0.1.1
### New features since last release
@@ -704,6 +817,8 @@ This release contains contributions from (in alphabetical order):
[Sebastián Duque](https://github.com/sduquemesa), [Filippo Miatto](https://github.com/ziofil)
+---
+
# Release 0.1.0
### New features since last release
@@ -717,3 +832,4 @@ This release contains contributions from (in alphabetical order):
[Sebastián Duque](https://github.com/sduquemesa), [Zhi Han](https://github.com/hanzhihua1),
[Theodor Isacsson](https://github.com/thisac/), [Josh Izaac](https://github.com/josh146),
[Filippo Miatto](https://github.com/ziofil), [Nicolas Quesada](https://github.com/nquesada)
+
diff --git a/.github/workflows/builds.yml b/.github/workflows/builds.yml
index 90887d6ab..6462073d7 100644
--- a/.github/workflows/builds.yml
+++ b/.github/workflows/builds.yml
@@ -44,5 +44,13 @@ jobs:
- name: Install only test dependencies
run: poetry install --no-root --extras "ray" --with dev
+ - name: Setup Julia
+ uses: julia-actions/setup-julia@v1
+ with:
+ version: 1.9.3
+
+ - name: Setup Julia part 2
+ run: julia --project="julia_pkg" -e "using Pkg; Pkg.instantiate()"
+
- name: Run tests
run: python -m pytest tests -p no:warnings --tb=native
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests_numpy.yml
similarity index 76%
rename from .github/workflows/tests.yml
rename to .github/workflows/tests_numpy.yml
index ad0ff8caf..399f3c233 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests_numpy.yml
@@ -1,4 +1,4 @@
-name: Tests
+name: Numpy tests
on:
push:
branches:
@@ -30,18 +30,18 @@ jobs:
with:
python-version: '3.9'
+ - name: Setup Julia
+ uses: julia-actions/setup-julia@v1
+ with:
+ version: 1.9.3
+
- name: Install dependencies
run: |
python -m pip install --no-cache-dir --upgrade pip
pip install --no-cache-dir poetry==1.4.0
poetry config virtualenvs.create false
poetry install --extras "ray" --with dev
+ julia --project="julia_pkg" -e "using Pkg; Pkg.instantiate()"
- name: Run tests
- run: python -m pytest tests --cov=mrmustard --cov-report=term-missing --cov-report=xml -p no:warnings --tb=native
-
- - name: Upload coverage to Codecov
- uses: codecov/codecov-action@v3
- with:
- files: ./coverage.xml
- fail_ci_if_error: true
+ run: python -m pytest tests --backend=numpy -p no:warnings --tb=native
diff --git a/.github/workflows/tests_tensorflow.yml b/.github/workflows/tests_tensorflow.yml
new file mode 100644
index 000000000..34ec43d5e
--- /dev/null
+++ b/.github/workflows/tests_tensorflow.yml
@@ -0,0 +1,53 @@
+name: Tensorflow tests
+on:
+ push:
+ branches:
+ - develop
+ pull_request:
+ paths:
+ - '.github/workflows/tests.yml'
+ - 'mrmustard/**'
+ - 'tests/**'
+ - 'pyproject.toml'
+ - 'poetry.lock'
+ - 'pytest.ini'
+
+jobs:
+ pytest:
+ runs-on: ubuntu-latest
+ concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+ env:
+ HYPOTHESIS_PROFILE: ci
+
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v3
+
+ - name: Setup python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.9'
+
+ - name: Setup Julia
+ uses: julia-actions/setup-julia@v1
+ with:
+ version: 1.9.3
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --no-cache-dir --upgrade pip
+ pip install --no-cache-dir poetry==1.4.0
+ poetry config virtualenvs.create false
+ poetry install --extras "ray" --with dev
+ julia --project="julia_pkg" -e "using Pkg; Pkg.instantiate()"
+
+ - name: Run tests
+ run: python -m pytest tests --backend=tensorflow --cov=mrmustard --cov-report=term-missing --cov-report=xml -p no:warnings --tb=native
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v3
+ with:
+ files: ./coverage.xml
+ fail_ci_if_error: true
diff --git a/.pylintrc b/.pylintrc
index 0aba4296b..896a6536e 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -28,4 +28,4 @@ ignored-classes=numpy,tensorflow,scipy,networkx,strawberryfields,thewalrus
# can either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once).
-disable=no-member,line-too-long,invalid-name,too-many-lines,redefined-builtin,too-many-locals,duplicate-code,too-many-arguments,too-few-public-methods,no-else-return,isinstance-second-argument-not-valid-type
+disable=no-member,line-too-long,invalid-name,too-many-lines,redefined-builtin,too-many-locals,duplicate-code,too-many-arguments,too-few-public-methods,no-else-return,isinstance-second-argument-not-valid-type,no-self-argument
diff --git a/.readthedocs.yml b/.readthedocs.yml
index a7dbfa275..dbf504ae7 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yml
@@ -8,13 +8,16 @@ build:
python: "3.9"
jobs:
post_install:
+ - "curl -fsSL https://install.julialang.org | sh -s -- --yes"
+ - "ln -s /home/docs/.juliaup/bin/julia /home/docs/.asdf/bin/julia"
+ - 'julia --project="julia_pkg" -e "using Pkg; Pkg.instantiate()"'
- pip install --no-cache-dir poetry==1.4.0
- poetry config virtualenvs.create false
- poetry install --with doc
# Build documentation in the docs/ directory with Sphinx
sphinx:
- configuration: doc/conf.py
+ configuration: doc/conf.py
# If using Sphinx, optionally build your docs in additional formats such as PDF
# formats:
diff --git a/Makefile b/Makefile
index fcd55052a..866fcf179 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,5 @@
-PYTHON := $(shell which python3 2>/dev/null)
+PYTHON3 := $(shell which python3 2>/dev/null)
+JULIA := $(shell which julia 2>/dev/null)
TESTRUNNER := -m pytest tests -p no:warnings
COVERAGE := --cov=mrmustard --cov-report=html:coverage_html_report --cov-append
@@ -19,14 +20,27 @@ install:
ifndef PYTHON3
@echo "To install Mr Mustard you need to have Python 3 installed"
endif
+ifndef JULIA
+ @echo "To use Mr Mustard with higher precision than complex128, it is required to have Julia installed"
poetry install
+else
+ julia --project="julia_pkg" -e "using Pkg; Pkg.instantiate()"
+ poetry install
+endif
+
.PHONY: install-all
install-all:
ifndef PYTHON3
@echo "To install Mr Mustard you need to have Python 3 installed"
endif
+ifndef JULIA
+ @echo "To use Mr Mustard with higher precision than complex128, it is required to have Julia installed"
poetry install --all-extras --with dev,doc
+else
+ julia --project="julia_pkg" -e "using Pkg; Pkg.instantiate()"
+ poetry install --all-extras --with dev,doc
+endif
.PHONY: dist
dist:
diff --git a/README.md b/README.md
index 4979ff5df..ba67b5006 100644
--- a/README.md
+++ b/README.md
@@ -31,6 +31,22 @@ Mr Mustard supports:
- Plug-and-play backends (TensorFlow as default)
- An abstraction layer `XPTensor` for seamless symplectic algebra (experimental)
+# Increased numerical stability using Julia [optional]
+
+Converting phase space objects to Fock space can be numerically unstable due to accumulating floating point errors.
+To resolve this, the conversion can be performed with extended-precision arithmetic. To use this feature,
+an installation of [Julia](https://github.com/JuliaLang/juliaup#installation) is required (version 1.9.3 recommended).
+
+* When installing MrMustard via devcontainer, Julia and its required packages are automatically installed.
+
+* When installing MrMustard via `poetry install` or `pip install`, the required Julia packages have to be installed manually as follows:
+
+```
+julia --project="julia_pkg" -e "using Pkg; Pkg.instantiate()"
+```
+
+* When installing MrMustard via the `MakeFile`, the required Julia packages are automatically installed _only_ if Julia was previously installed by the user.
+
# The lab module
The lab module contains things you'd find in a lab: states, transformations, measurements, circuits. States can be used at the beginning of a circuit as well as at the end, in which case a state is interpreted as a measurement (a projection onto that state). Transformations are usually parametrized and map states to states. The action on states is differentiable with respect to the state and to the gate parameters.
@@ -81,7 +97,6 @@ cat >> Sgate(0.5) # squeezed cat
-
## 2. Gates and the right shift operator `>>`
Applying gates to states looks natural, thanks to python's right-shift operator `>>`:
@@ -217,17 +232,14 @@ The physics module contains a growing number of functions that we can apply to s
# The math module
-The math module is the backbone of Mr Mustard, which consists in the [`Math`](https://github.com/XanaduAI/MrMustard/blob/main/mrmustard/math/math_interface.py) interface. Mr Mustard comes with a plug-and-play backends through a math interface. You can use it as a drop-in replacement for tensorflow or pytorch and your code will be plug-and-play too!
+The math module is the backbone of Mr Mustard. Mr Mustard comes with a plug-and-play backends through a math interface. You can use it as a drop-in replacement for tensorflow or numpy and your code will be plug-and-play too!
```python
-from mrmustard import settings
-from mrmustard.math import Math
-math = Math()
+import mrmustard.math as math
-math.cos(0.1) # tensorflow
-
-settings.BACKEND = 'torch'
+math.cos(0.1) # numpy
-math.cos(0.1) # pytorch (upcoming)
+math.change_backend("tensorflow")
+math.cos(0.1) # tensorflow
```
### Optimization
diff --git a/doc/code/math.rst b/doc/code/math.rst
index 3335bbb04..d27d6af05 100644
--- a/doc/code/math.rst
+++ b/doc/code/math.rst
@@ -3,13 +3,15 @@ mrmustard.math
.. currentmodule:: mrmustard.math
-Backends
----------
+The `math` module
+-----------------
.. toctree::
:maxdepth: 1
- math/tensorflow
+ math/parameter_set
+ math/parameter
+ math/backend_manager
.. automodapi:: mrmustard.math
:no-heading:
diff --git a/doc/code/math/backend_manager.rst b/doc/code/math/backend_manager.rst
new file mode 100644
index 000000000..80c7534aa
--- /dev/null
+++ b/doc/code/math/backend_manager.rst
@@ -0,0 +1,8 @@
+Backend Manager
+===============
+
+.. currentmodule:: mrmustard.math.backend_manager
+
+.. automodapi:: mrmustard.math.backend_manager
+ :no-heading:
+ :include-all-objects:
\ No newline at end of file
diff --git a/doc/code/math/parameter.rst b/doc/code/math/parameter.rst
new file mode 100644
index 000000000..06ebb7e40
--- /dev/null
+++ b/doc/code/math/parameter.rst
@@ -0,0 +1,7 @@
+Parameters
+==========
+
+.. currentmodule:: mrmustard.math.parameters
+
+.. automodapi:: mrmustard.math.parameters
+ :no-heading:
diff --git a/doc/code/math/parameter_set.rst b/doc/code/math/parameter_set.rst
new file mode 100644
index 000000000..a0a727c2c
--- /dev/null
+++ b/doc/code/math/parameter_set.rst
@@ -0,0 +1,7 @@
+ParameterSet
+============
+
+.. currentmodule:: mrmustard.math.parameter_set
+
+.. automodapi:: mrmustard.math.parameter_set
+ :no-heading:
diff --git a/doc/code/math/tensorflow.rst b/doc/code/math/tensorflow.rst
deleted file mode 100644
index 28013cd02..000000000
--- a/doc/code/math/tensorflow.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-Tensorflow
-==========
-
-.. currentmodule:: mrmustard.math.tensorflow
-
-.. automodapi:: mrmustard.math.tensorflow
- :no-heading:
- :include-all-objects:
\ No newline at end of file
diff --git a/doc/code/physics.rst b/doc/code/physics.rst
index a4bb0f23f..17f17587f 100644
--- a/doc/code/physics.rst
+++ b/doc/code/physics.rst
@@ -4,8 +4,11 @@ mrmustard.physics
.. toctree::
:maxdepth: 1
+ physics/ansatze
+ physics/bargmann
physics/fock
physics/gaussian
+ physics/representations
.. currentmodule:: mrmustard.physics
diff --git a/doc/code/physics/ansatze.rst b/doc/code/physics/ansatze.rst
new file mode 100644
index 000000000..1e2202c72
--- /dev/null
+++ b/doc/code/physics/ansatze.rst
@@ -0,0 +1,8 @@
+Ansatze
+=======
+
+.. currentmodule:: mrmustard.physics.ansatze
+
+.. automodapi:: mrmustard.physics.ansatze
+ :no-heading:
+ :include-all-objects:
\ No newline at end of file
diff --git a/doc/code/physics/bargmann.rst b/doc/code/physics/bargmann.rst
new file mode 100644
index 000000000..2b926d9ca
--- /dev/null
+++ b/doc/code/physics/bargmann.rst
@@ -0,0 +1,8 @@
+Bargmann
+========
+
+.. currentmodule:: mrmustard.physics.bargmann
+
+.. automodapi:: mrmustard.physics.bargmann
+ :no-heading:
+ :include-all-objects:
\ No newline at end of file
diff --git a/doc/code/physics/fock.rst b/doc/code/physics/fock.rst
index 6bc724ed1..c8c1f58af 100644
--- a/doc/code/physics/fock.rst
+++ b/doc/code/physics/fock.rst
@@ -1,5 +1,5 @@
Fock
-=======
+====
.. currentmodule:: mrmustard.physics.fock
diff --git a/doc/code/physics/representations.rst b/doc/code/physics/representations.rst
new file mode 100644
index 000000000..a908020fd
--- /dev/null
+++ b/doc/code/physics/representations.rst
@@ -0,0 +1,8 @@
+Representations
+===============
+
+.. currentmodule:: mrmustard.physics.representations
+
+.. automodapi:: mrmustard.physics.representations
+ :no-heading:
+ :include-all-objects:
\ No newline at end of file
diff --git a/doc/code/training.rst b/doc/code/training.rst
index d65fb613a..0ab069232 100644
--- a/doc/code/training.rst
+++ b/doc/code/training.rst
@@ -5,8 +5,6 @@ mrmustard.training
:maxdepth: 1
training/optimizer
- training/parameter
- training/parametrized
training/trainer
training/callbacks
diff --git a/doc/code/training/parameter.rst b/doc/code/training/parameter.rst
deleted file mode 100644
index 514127396..000000000
--- a/doc/code/training/parameter.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Parameter
-=========
-
-.. currentmodule:: mrmustard.training.parameter
-
-.. automodapi:: mrmustard.training.parameter
- :no-heading:
diff --git a/doc/code/training/parametrized.rst b/doc/code/training/parametrized.rst
deleted file mode 100644
index b52e47628..000000000
--- a/doc/code/training/parametrized.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Parametrized
-============
-
-.. currentmodule:: mrmustard.training.parametrized
-
-.. automodapi:: mrmustard.training.parametrized
- :no-heading:
diff --git a/doc/code/utils.rst b/doc/code/utils.rst
new file mode 100644
index 000000000..68da5cfc0
--- /dev/null
+++ b/doc/code/utils.rst
@@ -0,0 +1,15 @@
+mrmustard.utils
+===============
+
+.. toctree::
+ :maxdepth: 1
+
+ utils/settings
+ utils/typing
+
+
+.. currentmodule:: mrmustard.utils
+
+.. automodapi:: mrmustard.utils
+ :no-heading:
+ :include-all-objects:
\ No newline at end of file
diff --git a/doc/code/utils/settings.rst b/doc/code/utils/settings.rst
new file mode 100644
index 000000000..72bbdf67f
--- /dev/null
+++ b/doc/code/utils/settings.rst
@@ -0,0 +1,8 @@
+Settings
+========
+
+.. currentmodule:: mrmustard.utils.settings
+
+.. automodapi:: mrmustard.utils.settings
+ :no-heading:
+ :include-all-objects:
\ No newline at end of file
diff --git a/doc/code/utils/typing.rst b/doc/code/utils/typing.rst
new file mode 100644
index 000000000..07effc04f
--- /dev/null
+++ b/doc/code/utils/typing.rst
@@ -0,0 +1,8 @@
+Typing
+======
+
+.. currentmodule:: mrmustard.utils.typing
+
+.. automodapi:: mrmustard.utils.typing
+ :no-heading:
+ :include-all-objects:
\ No newline at end of file
diff --git a/doc/conf.py b/doc/conf.py
index adf2c0036..bf366a96a 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -123,9 +123,6 @@
# the order in which autodoc lists the documented members
autodoc_member_order = "bysource"
-# mock non-installed imports
-autodoc_mock_imports = ["torch"]
-
# inheritance_diagram graphviz attributes
inheritance_node_attrs = dict(color="lightskyblue1", style="filled")
diff --git a/doc/development/development_guide.rst b/doc/development/development_guide.rst
index 9711859c9..3be0fdd16 100644
--- a/doc/development/development_guide.rst
+++ b/doc/development/development_guide.rst
@@ -18,7 +18,6 @@ as well as the following Python packages:
* `Rich `_
* `tqdm `_
-
If you currently do not have Python 3 installed, we recommend
`Anaconda for Python 3 `_, a distributed version
of Python packaged for scientific computation.
@@ -38,29 +37,21 @@ using development mode:
The ``-e`` flag ensures that edits to the source code will be reflected when
importing Mr Mustard in Python.
-
-PyTorch support
+Increased numerical stability using Julia [optional]
------------------
+Converting phase space objects to Fock space can be numerically unstable due to accumulating floating point errors.
+To resolve this, the conversion can be performed with extended-precision arithmetic. To use this feature,
+an installation of `Julia `_ is required (version 1.9.3 recommended).
-To use Mr Mustard with PyTorch using CPU, install it as follows:
-
-.. code-block:: console
-
- pip install torch
+* When installing MrMustard via devcontainer, Julia and its required packages are automatically installed.
-To install PyTorch with GPU and CUDA 10.2 support:
+* When installing MrMustard via ``poetry install`` or ``pip install``, the required Julia packages have to be installed manually as follows:
.. code-block:: console
- pip install torch==1.10.0+cu102
+ julia --project="julia_pkg" -e "using Pkg; Pkg.instantiate()"
-To instead use CUDA 11.3:
-
-.. code-block:: console
-
- pip install torch==1.10.0+cu113
-
-Refer to `PyTorch `_ project webpage for more details.
+* When installing MrMustard via the `MakeFile`, the required Julia packages are automatically installed only if Julia was previously installed by the user.
Development environment
-----------------------
@@ -92,6 +83,10 @@ Individual test modules are run by invoking pytest directly from the command lin
pytest tests/test_fidelity.py
+The ``--backend`` flag allows specifying the backend used when running the tests. To
+use the numpy backend, type ``pytest tests/test_fidelity.py`` or ``pytest tests/test_fidelity.py --backend=numpy``.
+To use the tensorflow backend, run the command ``pytest tests/test_fidelity.py --backend=tensorflow``.
+
.. note:: **Run options for Mr Mustard tests**
When running tests, it can be useful to examine a single failing test.
diff --git a/doc/index.rst b/doc/index.rst
index 94b92d97c..53934e9b1 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -131,3 +131,4 @@ Mr Mustard supports the following in a fully differentiable way:
code/physics
code/math
code/training
+ code/utils
diff --git a/doc/introduction/basic_reference.md b/doc/introduction/basic_reference.md
index ff3c2c00b..9cffe1aa3 100644
--- a/doc/introduction/basic_reference.md
+++ b/doc/introduction/basic_reference.md
@@ -179,17 +179,14 @@ The physics module contains a growing number of functions that we can apply to s
### The math module
The math module is the backbone of Mr Mustard, which consists in the [Math](https://github.com/XanaduAI/MrMustard/blob/main/mrmustard/math/math_interface.py) interface
-Mr Mustard comes with a plug-and-play backends through a math interface. You can use it as a drop-in replacement for tensorflow or pytorch and your code will be plug-and-play too!
+Mr Mustard comes with a plug-and-play backends through a math interface. You can use it as a drop-in replacement for tensorflow or numpy and your code will be plug-and-play too!
```python
-from mrmustard import settings
-from mrmustard.math import Math
-math = Math()
+from mrmustard import math
-math.cos(0.1) # tensorflow
-
-settings.BACKEND = 'torch'
+math.cos(0.1) # numpy
-math.cos(0.1) # pytorch (upcoming)
+math.change_backend("numpy")
+math.cos(0.1) # tensorflow
```
### Optimization
diff --git a/julia_pkg/Manifest.toml b/julia_pkg/Manifest.toml
new file mode 100644
index 000000000..00ba6902d
--- /dev/null
+++ b/julia_pkg/Manifest.toml
@@ -0,0 +1,176 @@
+# This file is machine-generated - editing it directly is not advised
+
+julia_version = "1.9.3"
+manifest_format = "2.0"
+project_hash = "8d403123f4c94161c2abeebdda89172661c5feb5"
+
+[[deps.ArgTools]]
+uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
+version = "1.1.1"
+
+[[deps.Artifacts]]
+uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
+
+[[deps.Base64]]
+uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
+
+[[deps.CompilerSupportLibraries_jll]]
+deps = ["Artifacts", "Libdl"]
+uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
+version = "1.0.5+0"
+
+[[deps.Conda]]
+deps = ["Downloads", "JSON", "VersionParsing"]
+git-tree-sha1 = "8c86e48c0db1564a1d49548d3515ced5d604c408"
+uuid = "8f4d0f93-b110-5947-807f-2305c1781a2d"
+version = "1.9.1"
+
+[[deps.Dates]]
+deps = ["Printf"]
+uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
+
+[[deps.Downloads]]
+deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
+uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
+version = "1.6.0"
+
+[[deps.FileWatching]]
+uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
+
+[[deps.JSON]]
+deps = ["Dates", "Mmap", "Parsers", "Unicode"]
+git-tree-sha1 = "31e996f0a15c7b280ba9f76636b3ff9e2ae58c9a"
+uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
+version = "0.21.4"
+
+[[deps.LibCURL]]
+deps = ["LibCURL_jll", "MozillaCACerts_jll"]
+uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
+version = "0.6.3"
+
+[[deps.LibCURL_jll]]
+deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
+uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
+version = "7.84.0+0"
+
+[[deps.LibSSH2_jll]]
+deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
+uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
+version = "1.10.2+0"
+
+[[deps.Libdl]]
+uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
+
+[[deps.LinearAlgebra]]
+deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"]
+uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
+
+[[deps.MacroTools]]
+deps = ["Markdown", "Random"]
+git-tree-sha1 = "9ee1618cbf5240e6d4e0371d6f24065083f60c48"
+uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
+version = "0.5.11"
+
+[[deps.Markdown]]
+deps = ["Base64"]
+uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
+
+[[deps.MbedTLS_jll]]
+deps = ["Artifacts", "Libdl"]
+uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
+version = "2.28.2+0"
+
+[[deps.Mmap]]
+uuid = "a63ad114-7e13-5084-954f-fe012c677804"
+
+[[deps.MozillaCACerts_jll]]
+uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
+version = "2022.10.11"
+
+[[deps.MultiFloats]]
+deps = ["LinearAlgebra", "Printf", "Random"]
+git-tree-sha1 = "4d047875ab0571dbec7e7348ff3d2b09edace3aa"
+pinned = true
+uuid = "bdf0d083-296b-4888-a5b6-7498122e68a5"
+version = "1.0.4"
+
+[[deps.NetworkOptions]]
+uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
+version = "1.2.0"
+
+[[deps.OpenBLAS_jll]]
+deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
+uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
+version = "0.3.21+4"
+
+[[deps.Parsers]]
+deps = ["Dates", "PrecompileTools", "UUIDs"]
+git-tree-sha1 = "716e24b21538abc91f6205fd1d8363f39b442851"
+uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
+version = "2.7.2"
+
+[[deps.PrecompileTools]]
+deps = ["Preferences"]
+git-tree-sha1 = "03b4c25b43cb84cee5c90aa9b5ea0a78fd848d2f"
+uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
+version = "1.2.0"
+
+[[deps.Preferences]]
+deps = ["TOML"]
+git-tree-sha1 = "00805cd429dcb4870060ff49ef443486c262e38e"
+uuid = "21216c6a-2e73-6563-6e65-726566657250"
+version = "1.4.1"
+
+[[deps.Printf]]
+deps = ["Unicode"]
+uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
+
+[[deps.PyCall]]
+deps = ["Conda", "Dates", "Libdl", "LinearAlgebra", "MacroTools", "Serialization", "VersionParsing"]
+git-tree-sha1 = "43d304ac6f0354755f1d60730ece8c499980f7ba"
+pinned = true
+uuid = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0"
+version = "1.96.1"
+
+[[deps.Random]]
+deps = ["SHA", "Serialization"]
+uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
+
+[[deps.SHA]]
+uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
+version = "0.7.0"
+
+[[deps.Serialization]]
+uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
+
+[[deps.TOML]]
+deps = ["Dates"]
+uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
+version = "1.0.3"
+
+[[deps.UUIDs]]
+deps = ["Random", "SHA"]
+uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
+
+[[deps.Unicode]]
+uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
+
+[[deps.VersionParsing]]
+git-tree-sha1 = "58d6e80b4ee071f5efd07fda82cb9fbe17200868"
+uuid = "81def892-9a0e-5fdd-b105-ffc91e053289"
+version = "1.3.0"
+
+[[deps.Zlib_jll]]
+deps = ["Libdl"]
+uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
+version = "1.2.13+0"
+
+[[deps.libblastrampoline_jll]]
+deps = ["Artifacts", "Libdl"]
+uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
+version = "5.8.0+0"
+
+[[deps.nghttp2_jll]]
+deps = ["Artifacts", "Libdl"]
+uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
+version = "1.48.0+0"
diff --git a/julia_pkg/Project.toml b/julia_pkg/Project.toml
new file mode 100644
index 000000000..4d863683b
--- /dev/null
+++ b/julia_pkg/Project.toml
@@ -0,0 +1,8 @@
+name = "mrmustard"
+uuid = "5db76431-4455-430e-9a17-a2380d73a721"
+authors = ["Xanadu "]
+version = "0.1.0"
+
+[deps]
+MultiFloats = "bdf0d083-296b-4888-a5b6-7498122e68a5"
+PyCall = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0"
diff --git a/mrmustard/README.md b/mrmustard/README.md
index 0d0724a33..23033f6a2 100644
--- a/mrmustard/README.md
+++ b/mrmustard/README.md
@@ -15,4 +15,4 @@ The lab module contains states, gates and detectors. The code in these objects u
The physics module contains the implementation of all of the functionality used in the lab module. The physics module talks to an interface for a math backend, which is defined in [`math_interface.py`](https://github.com/XanaduAI/MrMustard/blob/main/mrmustard/physics/math_interface.py).
### 3. [math](https://github.com/XanaduAI/MrMustard/blob/main/mrmustard/math)
-The math module contains the concrete implementations of the math interface. At the moment we have [Tensorflow](https://github.com/XanaduAI/MrMustard/blob/main/mrmustard/math/tensorflow.py) and [Pytorch](https://github.com/XanaduAI/MrMustard/blob/main/mrmustard/math/torch.py).
+The math module contains the concrete implementations of the math interface. At the moment we have [Numpy](https://github.com/XanaduAI/MrMustard/blob/main/mrmustard/math/backend_numpy.py) and [Tensorflow](https://github.com/XanaduAI/MrMustard/blob/main/mrmustard/math/backend_tensorflow.py).
diff --git a/mrmustard/__init__.py b/mrmustard/__init__.py
index 5c9afcacb..dd85b8681 100644
--- a/mrmustard/__init__.py
+++ b/mrmustard/__init__.py
@@ -14,11 +14,11 @@
"""This is the top-most `__init__.py` file of MrMustard package."""
-
from rich import print
from ._version import __version__
-from .settings import *
+from .utils.settings import *
+from .utils.filters import add_complex_warning_filter
def version():
@@ -80,10 +80,6 @@ def about():
print("The Walrus version: {}".format(thewalrus.__version__))
print("TensorFlow version: {}".format(tensorflow.__version__))
- try: # pragma: no cover
- import torch
- torch_version = torch.__version__
- print("Torch version: {}".format(torch_version))
- except ImportError:
- torch_version = None
+# filter tensorflow cast warnings
+add_complex_warning_filter()
diff --git a/mrmustard/_version.py b/mrmustard/_version.py
index c9955a3aa..fec2a6687 100644
--- a/mrmustard/_version.py
+++ b/mrmustard/_version.py
@@ -16,19 +16,7 @@
Version number retrieved from pyproject.toml file
"""
-from pathlib import Path
-import tomli
+from importlib.metadata import version
-def _get_project_root():
- """Compute and return root dir"""
- return Path(__file__).parent.parent
-
-
-def _get_project_version():
- """Parse 'pyproject.toml' and return current version"""
- with open(f"{_get_project_root()}/pyproject.toml", mode="rb") as pyproject:
- return tomli.load(pyproject)["tool"]["poetry"]["version"]
-
-
-__version__ = str(_get_project_version())
+__version__ = version("mrmustard")
diff --git a/mrmustard/lab/abstract/__init__.py b/mrmustard/lab/abstract/__init__.py
index bdc730ea3..13a199b2c 100644
--- a/mrmustard/lab/abstract/__init__.py
+++ b/mrmustard/lab/abstract/__init__.py
@@ -17,4 +17,4 @@
from .state import State
from .measurement import FockMeasurement, Measurement
-from .transformation import Transformation
+from .transformation import Transformation, Unitary, Channel
diff --git a/mrmustard/lab/abstract/measurement.py b/mrmustard/lab/abstract/measurement.py
index 60a66471c..5f3d87bfd 100644
--- a/mrmustard/lab/abstract/measurement.py
+++ b/mrmustard/lab/abstract/measurement.py
@@ -19,14 +19,12 @@
from abc import ABC, abstractmethod
from typing import Iterable, Sequence, Union
-from mrmustard import settings
-from mrmustard.math import Math
-from mrmustard.typing import Tensor
-
+from mrmustard import math, settings
+from mrmustard.math.parameter_set import ParameterSet
+from mrmustard.math.parameters import Constant, Variable
+from mrmustard.utils.typing import Tensor
from .state import State
-math = Math()
-
class Measurement(ABC):
"""this is an abstract class holding the common methods and properties that any measurement should
@@ -48,6 +46,25 @@ def __init__(self, outcome: Tensor, modes: Sequence[int]) -> None:
"""used to evaluate if the measurement outcome should be
sampled or is already defined by the user (postselection)"""
+ self._parameter_set = ParameterSet()
+
+ def _add_parameter(self, parameter: Union[Constant, Variable]):
+ r"""
+ Adds a parameter to a transformation.
+
+ Args:
+ parameter: The parameter to add.
+ """
+ self.parameter_set.add_parameter(parameter)
+ self.__dict__[parameter.name] = parameter
+
+ @property
+ def parameter_set(self):
+ r"""
+ The set of parameters for this transformation.
+ """
+ return self._parameter_set
+
@property
def modes(self):
r"""returns the modes being measured"""
@@ -70,12 +87,10 @@ def outcome(self):
...
@abstractmethod
- def _measure_fock(self, other: State) -> Union[State, float]:
- ...
+ def _measure_fock(self, other: State) -> Union[State, float]: ...
@abstractmethod
- def _measure_gaussian(self, other: State) -> Union[State, float]:
- ...
+ def _measure_gaussian(self, other: State) -> Union[State, float]: ...
def primal(self, other: State) -> Union[State, float]:
"""performs the measurement procedure according to the representation of the incoming state"""
diff --git a/mrmustard/lab/abstract/state.py b/mrmustard/lab/abstract/state.py
index 1ca13207e..f914fdbb6 100644
--- a/mrmustard/lab/abstract/state.py
+++ b/mrmustard/lab/abstract/state.py
@@ -27,12 +27,15 @@
Union,
)
+import matplotlib.pyplot as plt
import numpy as np
+from matplotlib import cm
-from mrmustard import settings
-from mrmustard.math import Math
+from mrmustard import math, settings
+from mrmustard.math.parameters import Constant, Variable
from mrmustard.physics import bargmann, fock, gaussian
-from mrmustard.typing import (
+from mrmustard.physics.wigner import wigner_discretized
+from mrmustard.utils.typing import (
ComplexMatrix,
ComplexTensor,
ComplexVector,
@@ -40,13 +43,10 @@
RealTensor,
RealVector,
)
-from mrmustard.utils import graphics
if TYPE_CHECKING:
from .transformation import Transformation
-math = Math()
-
# pylint: disable=too-many-instance-attributes
class State: # pylint: disable=too-many-public-methods
@@ -94,7 +94,7 @@ def __init__(
self._norm = _norm
if cov is not None and means is not None:
self.is_gaussian = True
- self.is_hilbert_vector = np.allclose(gaussian.purity(self.cov), 1.0)
+ self.is_hilbert_vector = np.allclose(gaussian.purity(self.cov), 1.0, atol=1e-6)
self.num_modes = cov.shape[-1] // 2
elif eigenvalues is not None and symplectic is not None:
self.is_gaussian = True
@@ -115,6 +115,26 @@ def __init__(
len(modes) == self.num_modes
), f"Number of modes supplied ({len(modes)}) must match the representation dimension {self.num_modes}"
+ def _add_parameter(self, parameter: Union[Constant, Variable]):
+ r"""
+ Adds a parameter to a state.
+
+ Args:
+ parameter: The parameter to add.
+ """
+ if not getattr(self, "_parameter_set", None):
+ msg = "Cannot add a parameter to a state with no parameter set."
+ raise ValueError(msg)
+ self.parameter_set.add_parameter(parameter)
+ self.__dict__[parameter.name] = parameter
+
+ @property
+ def parameter_set(self):
+ r"""
+ The set of parameters for this state.
+ """
+ return getattr(self, "_parameter_set", None)
+
@property
def modes(self):
r"""Returns the modes of the state."""
@@ -672,10 +692,119 @@ def _repr_markdown_(self):
)
if self.num_modes == 1:
- graphics.mikkel_plot(math.asnumpy(self.dm(cutoffs=self.cutoffs)))
+ mikkel_plot(math.asnumpy(self.dm(cutoffs=self.cutoffs)))
if settings.DEBUG:
detailed_info = f"\ncov={repr(self.cov)}\n" + f"means={repr(self.means)}\n"
return f"{table}\n{detailed_info}"
return table
+
+
+def mikkel_plot(
+ rho: np.ndarray,
+ xbounds: Tuple[int] = (-6, 6),
+ ybounds: Tuple[int] = (-6, 6),
+ **kwargs,
+): # pylint: disable=too-many-statements
+ """Plots the Wigner function of a state given its density matrix.
+
+ Args:
+ rho (np.ndarray): density matrix of the state
+ xbounds (Tuple[int]): range of the x axis
+ ybounds (Tuple[int]): range of the y axis
+
+ Keyword args:
+ resolution (int): number of points used to calculate the wigner function
+ xticks (Tuple[int]): ticks of the x axis
+ xtick_labels (Optional[Tuple[str]]): labels of the x axis; if None uses default formatter
+ yticks (Tuple[int]): ticks of the y axis
+ ytick_labels (Optional[Tuple[str]]): labels of the y axis; if None uses default formatter
+ grid (bool): whether to display the grid
+ cmap (matplotlib.colormap): colormap of the figure
+
+ Returns:
+ tuple: figure and axes
+ """
+
+ plot_args = {
+ "resolution": 200,
+ "xticks": (-5, 0, 5),
+ "xtick_labels": None,
+ "yticks": (-5, 0, 5),
+ "ytick_labels": None,
+ "grid": False,
+ "cmap": cm.RdBu,
+ }
+ plot_args.update(kwargs)
+
+ if plot_args["xtick_labels"] is None:
+ plot_args["xtick_labels"] = plot_args["xticks"]
+ if plot_args["ytick_labels"] is None:
+ plot_args["ytick_labels"] = plot_args["yticks"]
+
+ q, ProbX = fock.quadrature_distribution(rho)
+ p, ProbP = fock.quadrature_distribution(rho, np.pi / 2)
+
+ xvec = np.linspace(*xbounds, plot_args["resolution"])
+ pvec = np.linspace(*ybounds, plot_args["resolution"])
+ W, X, P = wigner_discretized(rho, xvec, pvec)
+
+ ### PLOTTING ###
+
+ fig, ax = plt.subplots(
+ 2,
+ 2,
+ figsize=(6, 6),
+ gridspec_kw={"width_ratios": [2, 1], "height_ratios": [1, 2]},
+ )
+ plt.subplots_adjust(wspace=0.05, hspace=0.05)
+
+ # Wigner function
+ ax[1][0].contourf(X, P, W, 120, cmap=plot_args["cmap"], vmin=-abs(W).max(), vmax=abs(W).max())
+ ax[1][0].set_xlabel("$x$", fontsize=12)
+ ax[1][0].set_ylabel("$p$", fontsize=12)
+ ax[1][0].get_xaxis().set_ticks(plot_args["xticks"])
+ ax[1][0].xaxis.set_ticklabels(plot_args["xtick_labels"])
+ ax[1][0].get_yaxis().set_ticks(plot_args["yticks"])
+ ax[1][0].yaxis.set_ticklabels(plot_args["ytick_labels"], rotation="vertical", va="center")
+ ax[1][0].tick_params(direction="in")
+ ax[1][0].set_xlim(xbounds)
+ ax[1][0].set_ylim(ybounds)
+ ax[1][0].grid(plot_args["grid"])
+
+ # X quadrature probability distribution
+ ax[0][0].fill(q, ProbX, color=plot_args["cmap"](0.5))
+ ax[0][0].plot(q, ProbX, color=plot_args["cmap"](0.8))
+ ax[0][0].get_xaxis().set_ticks(plot_args["xticks"])
+ ax[0][0].xaxis.set_ticklabels([])
+ ax[0][0].get_yaxis().set_ticks([])
+ ax[0][0].tick_params(direction="in")
+ ax[0][0].set_ylabel("Prob($x$)", fontsize=12)
+ ax[0][0].set_xlim(xbounds)
+ ax[0][0].set_ylim([0, 1.1 * max(ProbX)])
+ ax[0][0].grid(plot_args["grid"])
+
+ # P quadrature probability distribution
+ ax[1][1].fill(ProbP, p, color=plot_args["cmap"](0.5))
+ ax[1][1].plot(ProbP, p, color=plot_args["cmap"](0.8))
+ ax[1][1].get_xaxis().set_ticks([])
+ ax[1][1].get_yaxis().set_ticks(plot_args["yticks"])
+ ax[1][1].yaxis.set_ticklabels([])
+ ax[1][1].tick_params(direction="in")
+ ax[1][1].set_xlabel("Prob($p$)", fontsize=12)
+ ax[1][1].set_xlim([0, 1.1 * max(ProbP)])
+ ax[1][1].set_ylim(ybounds)
+ ax[1][1].grid(plot_args["grid"])
+
+ # Density matrix
+ ax[0][1].matshow(abs(rho), cmap=plot_args["cmap"], vmin=-abs(rho).max(), vmax=abs(rho).max())
+ ax[0][1].set_title(r"abs($\rho$)", fontsize=12)
+ ax[0][1].tick_params(direction="in")
+ ax[0][1].get_xaxis().set_ticks([])
+ ax[0][1].get_yaxis().set_ticks([])
+ ax[0][1].set_aspect("auto")
+ ax[0][1].set_ylabel(f"cutoff = {len(rho)}", fontsize=12)
+ ax[0][1].yaxis.set_label_position("right")
+
+ return fig, ax
diff --git a/mrmustard/lab/abstract/transformation.py b/mrmustard/lab/abstract/transformation.py
index 5a8323174..2a9b22340 100644
--- a/mrmustard/lab/abstract/transformation.py
+++ b/mrmustard/lab/abstract/transformation.py
@@ -19,44 +19,61 @@
from __future__ import annotations
-from typing import Callable, Iterable, List, Optional, Sequence, Tuple, Union
+from typing import Callable, Iterable, Optional, Sequence, Tuple, Union
import numpy as np
-from mrmustard import settings
-from mrmustard.math import Math
+from mrmustard import math, settings
+from mrmustard.math.parameter_set import ParameterSet
+from mrmustard.math.parameters import Constant, Variable
+from mrmustard.math.tensor_networks import Tensor
from mrmustard.physics import bargmann, fock, gaussian
-from mrmustard.training.parameter import Parameter
-from mrmustard.typing import RealMatrix, RealVector
+from mrmustard.utils.typing import RealMatrix, RealVector
from .state import State
-math = Math()
+class Transformation(Tensor):
+ r"""
+ Base class for all Transformations.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ modes_in_ket: Optional[list[int]] = None,
+ modes_out_ket: Optional[list[int]] = None,
+ modes_in_bra: Optional[list[int]] = None,
+ modes_out_bra: Optional[list[int]] = None,
+ ):
+ super().__init__(
+ name=name,
+ modes_in_ket=modes_in_ket,
+ modes_out_ket=modes_out_ket,
+ modes_in_bra=modes_in_bra,
+ modes_out_bra=modes_out_bra,
+ )
+ self._parameter_set = ParameterSet()
-class Transformation:
- r"""Base class for all Transformations."""
- is_unitary = True # whether the transformation is unitary (True by default)
+ def _add_parameter(self, parameter: Union[Constant, Variable]):
+ r"""
+ Adds a parameter to a transformation.
- def bargmann(self, numpy=False):
- X, Y, d = self.XYd(allow_none=False)
- if self.is_unitary:
- A, B, C = bargmann.wigner_to_bargmann_U(
- X if X is not None else math.identity(d.shape[-1], dtype=d.dtype),
- d if d is not None else math.zeros(X.shape[-1], dtype=X.dtype),
- )
- else:
- A, B, C = bargmann.wigner_to_bargmann_Choi(
- X if X is not None else math.identity(d.shape[-1], dtype=d.dtype),
- Y if Y is not None else math.zeros((d.shape[-1], d.shape[-1]), dtype=d.dtype),
- d if d is not None else math.zeros(X.shape[-1], dtype=X.dtype),
- )
- if numpy:
- return math.asnumpy(A), math.asnumpy(B), math.asnumpy(C)
- return A, B, C
+ Args:
+ parameter: The parameter to add.
+ """
+ self.parameter_set.add_parameter(parameter)
+ self.__dict__[parameter.name] = parameter
+
+ @property
+ def parameter_set(self):
+ r"""
+ The set of parameters for this transformation.
+ """
+ return self._parameter_set
def primal(self, state: State) -> State:
- r"""Applies ``self`` (a ``Transformation``) to other (a ``State``) and returns the transformed state.
+ r"""Applies this transformation to the given ``state`` and returns the transformed state.
Args:
state (State): the state to transform
@@ -65,13 +82,13 @@ def primal(self, state: State) -> State:
State: the transformed state
"""
if state.is_gaussian:
- new_state = self.transform_gaussian(state, dual=False)
+ new_state = self._transform_gaussian(state, dual=False)
else:
- new_state = self.transform_fock(state, dual=False)
+ new_state = self._transform_fock(state, dual=False)
return new_state
def dual(self, state: State) -> State:
- r"""Applies the dual of self (dual of a ``Transformation``) to other (a ``State``) and returns the transformed state.
+ r"""Applies the dual of this transformation to the given ``state`` and returns the transformed state.
Args:
state (State): the state to transform
@@ -80,12 +97,12 @@ def dual(self, state: State) -> State:
State: the transformed state
"""
if state.is_gaussian:
- new_state = self.transform_gaussian(state, dual=True)
+ new_state = self._transform_gaussian(state, dual=True)
else:
- new_state = self.transform_fock(state, dual=True)
+ new_state = self._transform_fock(state, dual=True)
return new_state
- def transform_gaussian(self, state: State, dual: bool) -> State:
+ def _transform_gaussian(self, state: State, dual: bool) -> State:
r"""Transforms a Gaussian state into a Gaussian state.
Args:
@@ -102,57 +119,6 @@ def transform_gaussian(self, state: State, dual: bool) -> State:
) # NOTE: assumes modes don't change
return new_state
- def transform_fock(self, state: State, dual: bool) -> State:
- r"""Transforms a state in Fock representation.
-
- Args:
- state (State): the state to transform
- dual (bool): whether to apply the dual channel
-
- Returns:
- State: the transformed state
- """
- op_idx = [state.modes.index(m) for m in self.modes]
- if self.is_unitary:
- # until we have output autocutoff we use the same input cutoff list
- U = self.U(cutoffs=[state.cutoffs[i] for i in op_idx] * 2)
- U = math.dagger(U) if dual else U
- if state.is_pure:
- return State(ket=fock.apply_kraus_to_ket(U, state.ket(), op_idx), modes=state.modes)
- return State(dm=fock.apply_kraus_to_dm(U, state.dm(), op_idx), modes=state.modes)
- else:
- # until we have output autocutoff we use the same input cutoff list
- choi = self.choi(cutoffs=[state.cutoffs[i] for i in op_idx] * 4)
- n = state.num_modes
- N0 = list(range(0, n))
- N1 = list(range(n, 2 * n))
- N2 = list(range(2 * n, 3 * n))
- N3 = list(range(3 * n, 4 * n))
- if dual:
- choi = math.transpose(choi, N1 + N0 + N3 + N2) # we flip out-in
-
- if state.is_pure:
- return State(
- dm=fock.apply_choi_to_ket(choi, state.ket(), op_idx), modes=state.modes
- )
- return State(dm=fock.apply_choi_to_dm(choi, state.dm(), op_idx), modes=state.modes)
-
- @property
- def modes(self) -> Sequence[int]:
- """Returns the list of modes on which the transformation acts on."""
- if self._modes in (None, []):
- for elem in self.XYd(allow_none=True):
- if elem is not None:
- self._modes = list(range(elem.shape[-1] // 2))
- break
- return self._modes
-
- @modes.setter
- def modes(self, modes: List[int]):
- r"""Sets the modes on which the transformation acts."""
- self._validate_modes(modes)
- self._modes = modes
-
@property
def num_modes(self) -> int:
r"""The number of modes on which the transformation acts."""
@@ -195,6 +161,59 @@ def d_vector_dual(self) -> Optional[RealVector]:
return -d
return -math.matmul(Xdual, d)
+ def bargmann(self, numpy=False):
+ X, Y, d = self.XYd(allow_none=False)
+ if self.is_unitary:
+ A, B, C = bargmann.wigner_to_bargmann_U(X, d)
+ else:
+ A, B, C = bargmann.wigner_to_bargmann_Choi(X, Y, d)
+ if numpy:
+ return math.asnumpy(A), math.asnumpy(B), math.asnumpy(C)
+ return A, B, C
+
+ def choi(
+ self,
+ cutoffs: Optional[Sequence[int]] = None,
+ shape: Optional[Sequence[int]] = None,
+ dual: bool = False,
+ ):
+ r"""Returns the Choi representation of the transformation.
+
+ If specified, ``shape`` takes precedence over ``cutoffs``.
+ The ``shape`` is in the order ``(out_L, in_L, out_R, in_R)``.
+
+ Args:
+ cutoffs: the cutoffs of the input and output modes
+ shape: the shape of the Choi matrix
+ dual: whether to return the dual Choi
+ """
+ N = self.num_modes
+ if cutoffs is None:
+ pass
+ elif len(cutoffs) != N:
+ raise ValueError(f"len(cutoffs) must be {self.num_modes} (got {len(cutoffs)})")
+
+ shape = shape or tuple(cutoffs) * 4
+
+ if self.is_unitary:
+ shape = shape[: 2 * self.num_modes]
+ U = self.U(shape[: self.num_modes])
+ Udual = self.U(shape[self.num_modes :])
+ if dual:
+ return fock.U_to_choi(U=Udual, Udual=U)
+ return fock.U_to_choi(U=U, Udual=Udual)
+
+ X, Y, d = self.XYd(allow_none=False)
+ choi = fock.wigner_to_fock_Choi(X, Y, d, shape=shape)
+ if dual:
+ n = len(shape) // 4
+ N0 = list(range(0, n))
+ N1 = list(range(n, 2 * n))
+ N2 = list(range(2 * n, 3 * n))
+ N3 = list(range(3 * n, 4 * n))
+ choi = math.conj(math.transpose(choi, N1 + N0 + N3 + N2)) # if dual we flip out-in
+ return choi
+
def XYd(
self, allow_none: bool = True
) -> Tuple[Optional[RealMatrix], Optional[RealMatrix], Optional[RealVector]]:
@@ -223,41 +242,6 @@ def XYd_dual(
ddual = math.zeros_like(Xdual[:, 0]) if self.d_vector_dual is None else self.d_vector_dual
return Xdual, Ydual, ddual
- def U(self, cutoffs: Sequence[int]):
- r"""Returns the unitary representation of the transformation."""
- if not self.is_unitary:
- return None
- X, _, d = self.XYd(allow_none=False)
- if len(cutoffs) == self.num_modes:
- shape = tuple(cutoffs) * 2
- elif len(cutoffs) == 2 * self.num_modes:
- shape = tuple(cutoffs)
-
- else:
- raise ValueError(
- f"Invalid number of cutoffs: {len(cutoffs)} (expected {self.num_modes} or {2*self.num_modes})"
- )
- return fock.wigner_to_fock_U(X, d, shape=shape)
-
- def choi(self, cutoffs: Sequence[int]):
- r"""Returns the Choi representation of the transformation."""
- if len(cutoffs) == self.num_modes:
- shape = tuple(cutoffs) * 4
- elif len(cutoffs) == 4 * self.num_modes:
- shape = tuple(cutoffs)
- else:
- raise ValueError(
- f"Invalid number of cutoffs: {len(cutoffs)} (expected {self.num_modes} or {4*self.num_modes})"
- )
- if self.is_unitary:
- shape = shape[: 2 * self.num_modes]
- U = self.U(shape[: self.num_modes])
- Udual = self.U(shape[self.num_modes :])
- return fock.U_to_choi(U, Udual)
- X, Y, d = self.XYd(allow_none=False)
-
- return fock.wigner_to_fock_Choi(X, Y, d, shape=shape)
-
def __getitem__(self, items) -> Callable:
r"""Sets the modes on which the transformation acts.
@@ -274,12 +258,12 @@ def __getitem__(self, items) -> Callable:
modes = list(items)
else:
raise ValueError(f"{items} is not a valid slice or list of modes.")
- self.modes = modes
+ if self.is_unitary:
+ self.change_modes(modes, modes)
+ else:
+ self.change_modes(modes, modes, modes, modes)
return self
- # TODO: use __class_getitem__ for compiler stuff
-
- # pylint: disable=import-outside-toplevel,cyclic-import
def __rshift__(self, other: Transformation):
r"""Concatenates self with other (other after self).
@@ -292,9 +276,7 @@ def __rshift__(self, other: Transformation):
Returns:
Circuit: A circuit that concatenates self with other
"""
- from ..circuit import (
- Circuit,
- )
+ from mrmustard.lab.circuit import Circuit # pylint: disable=import-outside-toplevel
ops1 = self._ops if isinstance(self, Circuit) else [self]
ops2 = other._ops if isinstance(other, Circuit) else [other]
@@ -327,27 +309,11 @@ def __lshift__(self, other: Union[State, Transformation]):
f"{other} of type {other.__class__} is not a valid state or transformation."
)
- # pylint: disable=too-many-branches,too-many-return-statements
- def __eq__(self, other):
- r"""Returns ``True`` if the two transformations are equal."""
- if not isinstance(other, Transformation):
- return False
- if not (self.is_gaussian and other.is_gaussian):
- return np.allclose(
- self.choi(cutoffs=[settings.EQ_TRANSFORMATION_CUTOFF] * 4 * self.num_modes),
- other.choi(cutoffs=[settings.EQ_TRANSFORMATION_CUTOFF] * 4 * self.num_modes),
- rtol=settings.EQ_TRANSFORMATION_RTOL_FOCK,
- )
-
- sX, sY, sd = self.XYd(allow_none=False)
- oX, oY, od = other.XYd(allow_none=False)
- return np.allclose(sX, oX) and np.allclose(sY, oY) and np.allclose(sd, od)
-
def __repr__(self):
- class_name = self.__class__.__name__
+ class_name = self.name
modes = self.modes
- parameters = {k: v for k, v in self.__dict__.items() if isinstance(v, Parameter)}
+ parameters = {k: v for k, v in self.__dict__.items() if isinstance(v, (Constant, Variable))}
param_str_rep = [
f"{name}={repr(math.asnumpy(par.value))}" for name, par in parameters.items()
]
@@ -357,7 +323,7 @@ def __repr__(self):
return f"{class_name}({params_str}, modes = {modes})".replace("\n", "")
def __str__(self):
- class_name = self.__class__.__name__
+ class_name = self.name
modes = self.modes
return f"<{class_name} object at {hex(id(self))} acting on modes {modes}>"
@@ -370,7 +336,9 @@ def _repr_markdown_(self):
body = ""
with np.printoptions(precision=6, suppress=True):
- parameters = {k: v for k, v in self.__dict__.items() if isinstance(v, Parameter)}
+ parameters = {
+ k: v for k, v in self.__dict__.items() if isinstance(v, (Constant, Variable))
+ }
for name, par in parameters.items():
par_value = repr(math.asnumpy(par.value)).replace("\n", "
")
body += (
@@ -384,3 +352,115 @@ def _repr_markdown_(self):
)
return header + body
+
+
+class Unitary(Transformation):
+ r"""
+ A unitary transformation.
+
+ Args:
+ name: The name of this unitary.
+ modes: The modes that this unitary acts on.
+ """
+
+ def __init__(self, name: str, modes: list[int]):
+ super().__init__(name=name, modes_in_ket=modes, modes_out_ket=modes)
+ self.is_unitary = True
+
+ def value(self, shape: Tuple[int]):
+ return self.U(shape=shape)
+
+ def _transform_fock(self, state: State, dual=False) -> State:
+ op_idx = [state.modes.index(m) for m in self.modes]
+ U = self.U(cutoffs=[state.cutoffs[i] for i in op_idx])
+ if state.is_hilbert_vector:
+ return State(ket=fock.apply_kraus_to_ket(U, state.ket(), op_idx), modes=state.modes)
+ return State(dm=fock.apply_kraus_to_dm(U, state.dm(), op_idx), modes=state.modes)
+
+ def U(
+ self,
+ cutoffs: Optional[Sequence[int]] = None,
+ shape: Optional[Sequence[int]] = None,
+ ):
+ r"""Returns the unitary representation of the transformation.
+
+ If specified, ``shape`` takes precedence over ``cutoffs``.
+ ``shape`` is in the order ``(out, in)``.
+
+ Note that for a unitary transformation on N modes, ``len(cutoffs)`` is ``N``
+ and ``len(shape)`` is ``2N``.
+
+ Args:
+ cutoffs: the cutoffs of the input and output modes
+ shape: the shape of the unitary matrix
+
+ Returns:
+ ComplexTensor: the unitary matrix in Fock representation
+ """
+ if cutoffs is None:
+ pass
+ elif len(cutoffs) != self.num_modes:
+ raise ValueError(f"len(cutoffs) must be {self.num_modes} (got {len(cutoffs)})")
+ shape = shape or tuple(cutoffs) * 2
+ X, _, d = self.XYd(allow_none=False)
+ return fock.wigner_to_fock_U(X, d, shape=shape)
+
+ def __eq__(self, other):
+ r"""Returns ``True`` if the two transformations are equal."""
+ if not isinstance(other, Unitary):
+ return False
+ if not (self.is_gaussian and other.is_gaussian):
+ return np.allclose(
+ self.U(cutoffs=[settings.EQ_TRANSFORMATION_CUTOFF] * 2 * self.num_modes),
+ other.U(cutoffs=[settings.EQ_TRANSFORMATION_CUTOFF] * 2 * self.num_modes),
+ rtol=settings.EQ_TRANSFORMATION_RTOL_FOCK,
+ )
+ sX, sY, sd = self.XYd(allow_none=False)
+ oX, oY, od = other.XYd(allow_none=False)
+ assert np.isclose(np.linalg.norm(sY), 0)
+ assert np.isclose(np.linalg.norm(oY), 0)
+ return np.allclose(sX, oX) and np.allclose(sd, od)
+
+
+class Channel(Transformation):
+ r"""
+ A quantum channel.
+
+ Args:
+ name: The name of this channel.
+ modes: The modes that this channel acts on.
+ """
+
+ def __init__(self, name: str, modes: list[int]):
+ super().__init__(
+ name=name,
+ modes_in_ket=modes,
+ modes_out_ket=modes,
+ modes_in_bra=modes,
+ modes_out_bra=modes,
+ )
+ self.is_unitary = False
+
+ def _transform_fock(self, state: State, dual: bool = False) -> State:
+ op_idx = [state.modes.index(m) for m in self.modes]
+ choi = self.choi(cutoffs=[state.cutoffs[i] for i in op_idx], dual=dual)
+ if state.is_hilbert_vector:
+ return State(dm=fock.apply_choi_to_ket(choi, state.ket(), op_idx), modes=state.modes)
+ return State(dm=fock.apply_choi_to_dm(choi, state.dm(), op_idx), modes=state.modes)
+
+ def value(self, shape: Tuple[int]):
+ return self.choi(shape=shape)
+
+ def __eq__(self, other):
+ r"""Returns ``True`` if the two transformations are equal."""
+ if not isinstance(other, Channel):
+ return False
+ if not (self.is_gaussian and other.is_gaussian):
+ return np.allclose(
+ self.choi(cutoffs=[settings.EQ_TRANSFORMATION_CUTOFF] * 4 * self.num_modes),
+ other.choi(cutoffs=[settings.EQ_TRANSFORMATION_CUTOFF] * 4 * self.num_modes),
+ rtol=settings.EQ_TRANSFORMATION_RTOL_FOCK,
+ )
+ sX, sY, sd = self.XYd(allow_none=False)
+ oX, oY, od = other.XYd(allow_none=False)
+ return np.allclose(sX, oX) and np.allclose(sY, oY) and np.allclose(sd, od)
diff --git a/mrmustard/lab/circuit.py b/mrmustard/lab/circuit.py
index 3b1c2040b..ae3a89f1b 100644
--- a/mrmustard/lab/circuit.py
+++ b/mrmustard/lab/circuit.py
@@ -24,13 +24,14 @@
from mrmustard import settings
from mrmustard.lab.abstract import State, Transformation
-from mrmustard.training import Parametrized
-from mrmustard.typing import RealMatrix, RealVector
-from mrmustard.utils.circdrawer import circuit_text
-from mrmustard.utils.xptensor import XPMatrix, XPVector
+from mrmustard.utils.typing import RealMatrix, RealVector
+from mrmustard.lab.circuit_drawer import circuit_text
+from mrmustard.math.tensor_wrappers import XPMatrix, XPVector
+import numpy as np
-class Circuit(Transformation, Parametrized):
+
+class Circuit(Transformation):
"""Represents a quantum circuit: a set of operations to be applied on quantum states.
Args:
@@ -39,9 +40,16 @@ class Circuit(Transformation, Parametrized):
def __init__(self, ops: Optional[List] = None):
self._ops = list(ops) if ops is not None else []
- super().__init__()
+ super().__init__(name="Circuit")
self.reset()
+ @property
+ def ops(self) -> Optional[List]:
+ r"""
+ The list of operations comprising the circuit.
+ """
+ return self._ops
+
def reset(self):
"""Resets the state of the circuit clearing the list of modes and setting the compiled flag to false."""
self._compiled: bool = False
@@ -97,6 +105,9 @@ def is_unitary(self):
"""Returns `true` if all operations in the circuit are unitary."""
return all(op.is_unitary for op in self._ops)
+ def value(self, shape: Tuple[int]):
+ raise NotImplementedError
+
def __len__(self):
return len(self._ops)
@@ -112,3 +123,19 @@ def __str__(self):
"""String representation of the circuit."""
ops_repr = [repr(op) for op in self._ops]
return " >> ".join(ops_repr)
+
+ # pylint: disable=too-many-branches,too-many-return-statements
+ def __eq__(self, other):
+ r"""Returns ``True`` if the two transformations are equal."""
+ if not isinstance(other, Circuit):
+ return False
+ if not (self.is_gaussian and other.is_gaussian):
+ return np.allclose(
+ self.choi(cutoffs=[settings.EQ_TRANSFORMATION_CUTOFF] * 4 * self.num_modes),
+ other.choi(cutoffs=[settings.EQ_TRANSFORMATION_CUTOFF] * 4 * self.num_modes),
+ rtol=settings.EQ_TRANSFORMATION_RTOL_FOCK,
+ )
+
+ sX, sY, sd = self.XYd(allow_none=False)
+ oX, oY, od = other.XYd(allow_none=False)
+ return np.allclose(sX, oX) and np.allclose(sY, oY) and np.allclose(sd, od)
diff --git a/mrmustard/utils/circdrawer.py b/mrmustard/lab/circuit_drawer.py
similarity index 95%
rename from mrmustard/utils/circdrawer.py
rename to mrmustard/lab/circuit_drawer.py
index 56455556b..9c73b2f7d 100644
--- a/mrmustard/utils/circdrawer.py
+++ b/mrmustard/lab/circuit_drawer.py
@@ -16,6 +16,10 @@
"""
from collections import defaultdict
+__all__ = [
+ "circuit_text",
+]
+
def mode_set(op):
"includes modes in between min and max of op.modes"
@@ -24,7 +28,7 @@ def mode_set(op):
def drawable_layers(ops):
r"""Determine non-overlapping yet dense placement of ops into layers for drawing.
- Arguments:
+ Args:
ops Iterable[op]: a list of operations
Returns:
@@ -59,7 +63,7 @@ def _add_op(op, layer_str, decimals):
control = [op.modes[0]]
label = op.short_name
if decimals is not None:
- param_string = op.param_string(decimals)
+ param_string = op.parameter_set.to_string(decimals)
if param_string == "":
param_string = str(len(op.modes))
label += "(" + param_string + ")"
@@ -74,8 +78,8 @@ def circuit_text(
ops,
decimals=None,
):
- r"""Text based diagram for a Quantum circuit.
- Arguments:
+ r"""Text based diagram for a quantum circuit.
+ Args:
ops (List[Transformation]): the operations and measurements to draw as a list of MrMustard operations
decimals (optional(int)): How many decimal points to include when formatting operation parameters.
Default ``None`` will omit parameters from operation labels.
diff --git a/mrmustard/lab/detectors.py b/mrmustard/lab/detectors.py
index 578f30d1d..7310d2884 100644
--- a/mrmustard/lab/detectors.py
+++ b/mrmustard/lab/detectors.py
@@ -19,22 +19,20 @@
from typing import Iterable, List, Optional, Tuple, Union
from mrmustard import settings
-from mrmustard.math import Math
from mrmustard.physics import fock, gaussian
-from mrmustard.training import Parametrized
-from mrmustard.typing import RealMatrix, RealVector
+from mrmustard.utils.typing import RealMatrix, RealVector
+from mrmustard import math
from .abstract import FockMeasurement, Measurement, State
from .gates import Rgate
from .states import Coherent, DisplacedSqueezed
-
-math = Math()
+from .utils import make_parameter
__all__ = ["PNRDetector", "ThresholdDetector", "Generaldyne", "Homodyne", "Heterodyne"]
# pylint: disable=no-member
-class PNRDetector(Parametrized, FockMeasurement):
+class PNRDetector(FockMeasurement):
r"""Photon Number Resolving detector.
If ``len(modes) > 1`` the detector is applied in parallel to all of the modes provided.
@@ -72,29 +70,29 @@ def __init__(
modes: List[int] = None,
cutoffs: Union[int, List[int]] = None,
):
- Parametrized.__init__(
- self,
- efficiency=math.atleast_1d(efficiency),
- dark_counts=math.atleast_1d(dark_counts),
- efficiency_trainable=efficiency_trainable,
- dark_counts_trainable=dark_counts_trainable,
- efficiency_bounds=efficiency_bounds,
- dark_counts_bounds=dark_counts_bounds,
- )
-
self._stochastic_channel = stochastic_channel
self._should_recompute_stochastic_channel = efficiency_trainable or dark_counts_trainable
+ eff = math.atleast_1d(efficiency)
+ dk = math.atleast_1d(dark_counts)
+
if modes is not None:
num_modes = len(modes)
elif cutoffs is not None:
num_modes = len(cutoffs)
else:
- num_modes = max(len(math.atleast_1d(efficiency)), len(math.atleast_1d(dark_counts)))
+ num_modes = max(len(eff), len(dk))
modes = modes or list(range(num_modes))
outcome = None
- FockMeasurement.__init__(self, outcome, modes, cutoffs)
+ super().__init__(outcome, modes, cutoffs)
+
+ self._add_parameter(
+ make_parameter(efficiency_trainable, eff, "efficiency", efficiency_bounds)
+ )
+ self._add_parameter(
+ make_parameter(dark_counts_trainable, dk, "dark_counts", dark_counts_bounds)
+ )
self.recompute_stochastic_channel()
@@ -132,7 +130,7 @@ def recompute_stochastic_channel(self, cutoffs: List[int] = None):
# pylint: disable: no-member
-class ThresholdDetector(Parametrized, FockMeasurement):
+class ThresholdDetector(FockMeasurement):
r"""Threshold detector: any Fock component other than vacuum counts toward a click in the detector.
If ``len(modes) > 1`` the detector is applied in parallel to all of the modes provided.
@@ -178,16 +176,6 @@ def __init__(
modes = modes or list(range(num_modes))
- Parametrized.__init__(
- self,
- efficiency=efficiency,
- dark_count_prob=dark_count_prob,
- efficiency_trainable=efficiency_trainable,
- dark_count_prob_trainable=dark_count_prob_trainable,
- efficiency_bounds=efficiency_bounds,
- dark_count_prob_bounds=dark_count_prob_bounds,
- )
-
self._stochastic_channel = stochastic_channel
cutoffs = [2] * num_modes
@@ -196,7 +184,19 @@ def __init__(
)
outcome = None
- FockMeasurement.__init__(self, outcome, modes, cutoffs)
+ super().__init__(outcome, modes, cutoffs)
+
+ self._add_parameter(
+ make_parameter(efficiency_trainable, efficiency, "efficiency", efficiency_bounds)
+ )
+ self._add_parameter(
+ make_parameter(
+ dark_count_prob_trainable,
+ dark_count_prob,
+ "dark_count_prob",
+ dark_count_prob_bounds,
+ )
+ )
self.recompute_stochastic_channel()
diff --git a/mrmustard/lab/gates.py b/mrmustard/lab/gates.py
index 2da05f71b..374cf39df 100644
--- a/mrmustard/lab/gates.py
+++ b/mrmustard/lab/gates.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# pylint: disable=no-member
+# pylint: disable=no-member, import-outside-toplevel
"""
This module defines gates and operations that can be applied to quantum modes to construct a quantum circuit.
@@ -20,14 +20,14 @@
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
-from mrmustard import settings
-from mrmustard.lab.abstract import State, Transformation
-from mrmustard.math import Math
-from mrmustard.physics import fock, gaussian
-from mrmustard.training import Parametrized
-from mrmustard.typing import ComplexMatrix, RealMatrix
-math = Math()
+from mrmustard import settings
+from mrmustard.physics import gaussian, fock
+from mrmustard.utils.typing import ComplexMatrix, RealMatrix
+from mrmustard import math
+from mrmustard.math.parameters import update_orthogonal, update_symplectic, update_unitary
+from .abstract import Channel, Unitary, State
+from .utils import make_parameter
__all__ = [
"Dgate",
@@ -49,7 +49,7 @@
]
-class Dgate(Parametrized, Transformation):
+class Dgate(Unitary):
r"""Displacement gate.
If ``len(modes) > 1`` the gate is applied in parallel to all of the modes provided.
@@ -69,55 +69,67 @@ class Dgate(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "D"
+ parallelizable = True
+
def __init__(
self,
- x: Union[Optional[float], Optional[List[float]]] = 0.0,
- y: Union[Optional[float], Optional[List[float]]] = 0.0,
+ x: Union[float, List[float]] = 0.0,
+ y: Union[float, List[float]] = 0.0,
x_trainable: bool = False,
y_trainable: bool = False,
x_bounds: Tuple[Optional[float], Optional[float]] = (None, None),
y_bounds: Tuple[Optional[float], Optional[float]] = (None, None),
modes: Optional[List[int]] = None,
):
+ m = max(len(math.atleast_1d(x)), len(math.atleast_1d(y)))
super().__init__(
- x=x,
- y=y,
- x_trainable=x_trainable,
- y_trainable=y_trainable,
- x_bounds=x_bounds,
- y_bounds=y_bounds,
+ modes=modes or list(range(m)),
+ name="Dgate",
)
- self._modes = modes
- self.is_gaussian = True
- self.short_name = "D"
+ self._add_parameter(make_parameter(x_trainable, x, "x", x_bounds))
+ self._add_parameter(make_parameter(y_trainable, y, "y", y_bounds))
@property
def d_vector(self):
return gaussian.displacement(self.x.value, self.y.value)
- def U(self, cutoffs: Sequence[int]):
+ def U(self, cutoffs: Optional[Sequence[int]] = None, shape: Optional[Sequence[int]] = None):
r"""Returns the unitary representation of the Displacement gate using
the Laguerre polynomials.
+ If specified, ``shape`` takes precedence over ``cutoffs``.
+ ``shape`` is in the order ``(out, in)``.
+
+ Note that for a unitary transformation on N modes, ``len(cutoffs)`` is ``N``
+ and ``len(shape)`` is ``2N``.
+
Arguments:
- cutoffs (Sequence[int]): the Hilbert space dimension cutoff for each mode
+ cutoffs: the Hilbert space dimension cutoff for each mode.
+ shape: the shape of the unitary matrix.
Returns:
Raises:
ValueError: if the length of the cutoffs array is different from N and 2N
"""
-
N = self.num_modes
- x = self.x.value * math.ones(N, dtype=self.x.value.dtype)
- y = self.y.value * math.ones(N, dtype=self.y.value.dtype)
- if len(cutoffs) == N:
- shape = tuple(cutoffs) * 2
+ if cutoffs is None:
+ pass
+ elif len(cutoffs) == N:
+ cutoffs = tuple(cutoffs) * 2
elif len(cutoffs) == 2 * N:
- shape = tuple(cutoffs)
+ cutoffs = tuple(cutoffs)
else:
raise ValueError(
"len(cutoffs) should be either equal to the number of modes or twice the number of modes (for output-input)."
)
+ shape = shape or cutoffs
+ if shape is None:
+ raise ValueError
+
+ x = self.x.value * math.ones(N, dtype=self.x.value.dtype)
+ y = self.y.value * math.ones(N, dtype=self.y.value.dtype)
if N > 1:
# calculate displacement unitary for each mode and concatenate with outer product
@@ -137,7 +149,7 @@ def U(self, cutoffs: Sequence[int]):
return fock.displacement(x[0], y[0], shape=shape)
-class Sgate(Parametrized, Transformation):
+class Sgate(Unitary):
r"""Squeezing gate.
If ``len(modes) > 1`` the gate is applied in parallel to all of the modes provided.
@@ -157,45 +169,58 @@ class Sgate(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "S"
+ parallelizable = True
+
def __init__(
self,
- r: Union[Optional[float], Optional[List[float]]] = 0.0,
- phi: Union[Optional[float], Optional[List[float]]] = 0.0,
+ r: Union[float, list[float]] = 0.0,
+ phi: Union[float, list[float]] = 0.0,
r_trainable: bool = False,
phi_trainable: bool = False,
r_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None),
phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None),
- modes: Optional[List[int]] = None,
+ modes: Optional[list[int]] = None,
):
super().__init__(
- r=r,
- phi=phi,
- r_trainable=r_trainable,
- phi_trainable=phi_trainable,
- r_bounds=r_bounds,
- phi_bounds=phi_bounds,
+ modes=modes or list(range(len(math.atleast_1d(r)))), # type: ignore
+ name="Sgate",
)
- self._modes = modes
- self.is_gaussian = True
- self.short_name = "S"
+ self._add_parameter(make_parameter(r_trainable, r, "r", r_bounds))
+ self._add_parameter(make_parameter(phi_trainable, phi, "phi", phi_bounds))
- def U(self, cutoffs: Sequence[int]):
+ def U(self, cutoffs: Optional[Sequence[int]] = None, shape: Optional[Sequence[int]] = None):
r"""Returns the unitary representation of the Squeezing gate.
+
+ If specified, ``shape`` takes precedence over ``cutoffs``.
+ ``shape`` is in the order ``(out, in)``.
+
+ Note that for a unitary transformation on N modes, ``len(cutoffs)`` is ``N``
+ and ``len(shape)`` is ``2N``.
+
Args:
- cutoffs (Sequence[int]): the Hilbert space dimension cutoff for each mode
+ cutoffs: the Hilbert space dimension cutoff for each mode.
+ shape: the shape of the unitary matrix.
Returns:
array[complex]: the unitary matrix
"""
N = self.num_modes
- if len(cutoffs) == N:
- shape = tuple(cutoffs) * 2
+ if cutoffs is None:
+ pass
+ elif len(cutoffs) == N:
+ cutoffs = tuple(cutoffs) * 2
elif len(cutoffs) == 2 * N:
- shape = tuple(cutoffs)
+ cutoffs = tuple(cutoffs)
else:
raise ValueError(
"len(cutoffs) should be either equal to the number of modes or twice the number of modes (for output-input)."
)
+ shape = shape or cutoffs
+ if shape is None:
+ raise ValueError
+
# this works both or scalar r/phi and vector r/phi:
r = self.r.value * math.ones(N, dtype=self.r.value.dtype)
phi = self.phi.value * math.ones(N, dtype=self.phi.value.dtype)
@@ -221,7 +246,7 @@ def X_matrix(self):
return gaussian.squeezing_symplectic(self.r.value, self.phi.value)
-class Rgate(Parametrized, Transformation):
+class Rgate(Unitary):
r"""Rotation gate.
If ``len(modes) > 1`` the gate is applied in parallel to all of the modes provided.
@@ -239,47 +264,66 @@ class Rgate(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "R"
+ parallelizable = True
+
def __init__(
self,
- angle: Union[Optional[float], Optional[List[float]]] = 0.0,
+ angle: Union[float, list[float]] = 0.0,
angle_trainable: bool = False,
angle_bounds: Tuple[Optional[float], Optional[float]] = (None, None),
- modes: Optional[List[int]] = None,
+ modes: Optional[list[int]] = None,
):
super().__init__(
- angle=angle,
- angle_trainable=angle_trainable,
- angle_bounds=angle_bounds,
+ modes=modes or list(range(len(math.atleast_1d(angle)))), # type: ignore
+ name="Rgate",
)
- self._modes = modes
- self.is_gaussian = True
- self.short_name = "R"
+ self._add_parameter(make_parameter(angle_trainable, angle, "angle", angle_bounds))
@property
def X_matrix(self):
return gaussian.rotation_symplectic(self.angle.value)
- def U(self, cutoffs: Sequence[int], diag_only=False):
+ def U(
+ self,
+ cutoffs: Optional[Sequence[int]] = None,
+ shape: Optional[Sequence[int]] = None,
+ diag_only=False,
+ ):
r"""Returns the unitary representation of the Rotation gate.
+ If specified, ``shape`` takes precedence over ``cutoffs``.
+ ``shape`` is in the order ``(out, in)``.
+
+ Note that for a unitary transformation on N modes, ``len(cutoffs)`` is ``N``
+ and ``len(shape)`` is ``2N``.
+
Args:
- cutoffs (Sequence[int]): cutoff dimension for each mode.
- diag_only (bool): if True, only return the diagonal of the unitary matrix.
+ cutoffs: cutoff dimension for each mode.
+ shape: the shape of the unitary matrix
+ diag_only: if True, only return the diagonal of the unitary matrix.
Returns:
array[complex]: the unitary matrix
"""
+ N = self.num_modes
if diag_only:
raise NotImplementedError("Rgate does not support diag_only=True yet")
- N = self.num_modes
- if len(cutoffs) == N:
- shape = tuple(cutoffs) * 2
+ if cutoffs is None:
+ pass
+ elif len(cutoffs) == N:
+ cutoffs = tuple(cutoffs) * 2
elif len(cutoffs) == 2 * N:
- shape = tuple(cutoffs)
+ cutoffs = tuple(cutoffs)
else:
raise ValueError(
"len(cutoffs) should be either equal to the number of modes or twice the number of modes (for output-input)."
)
+ shape = shape or cutoffs
+ if shape is None:
+ raise ValueError
+
angles = self.angle.value * math.ones(self.num_modes, dtype=self.angle.value.dtype)
# calculate rotation unitary for each mode and concatenate with outer product
@@ -299,7 +343,7 @@ def U(self, cutoffs: Sequence[int], diag_only=False):
)
-class Pgate(Parametrized, Transformation):
+class Pgate(Unitary):
r"""Quadratic phase gate.
If len(modes) > 1 the gate is applied in parallel to all of the modes provided. If a parameter
@@ -315,28 +359,31 @@ class Pgate(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "P"
+ parallelizable = True
+
def __init__(
self,
- shearing: Union[Optional[float], Optional[List[float]]] = 0.0,
+ shearing: Union[Optional[float], Optional[list[float]]] = 0.0,
shearing_trainable: bool = False,
shearing_bounds: Tuple[Optional[float], Optional[float]] = (None, None),
- modes: Optional[List[int]] = None,
+ modes: Optional[list[int]] = None,
):
super().__init__(
- shearing=shearing,
- shearing_trainable=shearing_trainable,
- shearing_bounds=shearing_bounds,
+ modes=modes or list(range(len(math.atleast_1d(shearing)))),
+ name="Pgate",
+ )
+ self._add_parameter(
+ make_parameter(shearing_trainable, shearing, "shearing", shearing_bounds)
)
- self._modes = modes
- self.is_gaussian = True
- self.short_name = "P"
@property
def X_matrix(self):
return gaussian.quadratic_phase(self.shearing.value)
-class CXgate(Parametrized, Transformation):
+class CXgate(Unitary):
r"""Controlled X gate.
It applies to a single pair of modes. One can optionally set bounds for each parameter, which
@@ -349,6 +396,10 @@ class CXgate(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "CX"
+ parallelizable = False
+
def __init__(
self,
s: Optional[float] = 0.0,
@@ -357,20 +408,17 @@ def __init__(
modes: Optional[List[int]] = None,
):
super().__init__(
- s=s,
- s_trainable=s_trainable,
- s_bounds=s_bounds,
+ modes=modes or [0, 1],
+ name="CXgate",
)
- self._modes = modes
- self.is_gaussian = True
- self.short_name = "CX"
+ self._add_parameter(make_parameter(s_trainable, s, "s", s_bounds))
@property
def X_matrix(self):
return gaussian.controlled_X(self.s.value)
-class CZgate(Parametrized, Transformation):
+class CZgate(Unitary):
r"""Controlled Z gate.
It applies to a single pair of modes. One can optionally set bounds for each parameter, which
@@ -383,6 +431,10 @@ class CZgate(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "CZ"
+ parallelizable = False
+
def __init__(
self,
s: Optional[float] = 0.0,
@@ -391,20 +443,17 @@ def __init__(
modes: Optional[List[int]] = None,
):
super().__init__(
- s=s,
- s_trainable=s_trainable,
- s_bounds=s_bounds,
+ modes=modes or [0, 1],
+ name="CZgate",
)
- self._modes = modes
- self.is_gaussian = True
- self.short_name = "CZ"
+ self._add_parameter(make_parameter(s_trainable, s, "s", s_bounds))
@property
def X_matrix(self):
return gaussian.controlled_Z(self.s.value)
-class BSgate(Parametrized, Transformation):
+class BSgate(Unitary):
r"""Beam splitter gate.
It applies to a single pair of modes.
@@ -420,35 +469,46 @@ class BSgate(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "BS"
+ parallelizable = False
+
def __init__(
self,
- theta: Optional[float] = 0.0,
- phi: Optional[float] = 0.0,
+ theta: float = 0.0,
+ phi: float = 0.0,
theta_trainable: bool = False,
phi_trainable: bool = False,
theta_bounds: Tuple[Optional[float], Optional[float]] = (None, None),
phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None),
- modes: Optional[List[int]] = None,
+ modes: Optional[list[int]] = None,
):
super().__init__(
- theta=theta,
- phi=phi,
- theta_trainable=theta_trainable,
- phi_trainable=phi_trainable,
- theta_bounds=theta_bounds,
- phi_bounds=phi_bounds,
+ modes=modes or [0, 1], # type: ignore
+ name="BSgate",
)
- self._modes = modes
- self.is_gaussian = True
- self.short_name = "BS"
+ self._add_parameter(make_parameter(theta_trainable, theta, "theta", theta_bounds))
+ self._add_parameter(make_parameter(phi_trainable, phi, "phi", phi_bounds))
+
+ def U(
+ self,
+ cutoffs: Optional[List[int]] = None,
+ shape: Optional[Sequence[int]] = None,
+ method=None,
+ ):
+ r"""Returns the unitary representation of the beam splitter.
+
+ If specified, ``shape`` takes precedence over ``cutoffs``.
+ ``shape`` is in the order ``(out, in)``.
- def U(self, cutoffs: Optional[List[int]], method=None):
- r"""Returns the symplectic transformation matrix for the beam splitter.
+ Note that for a unitary transformation on N modes, ``len(cutoffs)`` is ``N``
+ and ``len(shape)`` is ``2N``.
Args:
- cutoffs (List[int]): the list of cutoff dimensions for each mode
+ cutoffs: the list of cutoff dimensions for each mode
in the order (out_0, out_1, in_0, in_1).
- method (str): the method used to compute the unitary matrix. Options are:
+ shape: the shape of the unitary matrix
+ method: the method used to compute the unitary matrix. Options are:
* 'vanilla': uses the standard method
* 'schwinger': slower, but numerically stable
default is set in settings.DEFAULT_BS_METHOD (with 'vanilla' by default)
@@ -456,12 +516,17 @@ def U(self, cutoffs: Optional[List[int]], method=None):
Returns:
array[complex]: the unitary tensor of the beamsplitter
"""
- if len(cutoffs) == 4:
+ if cutoffs is None:
+ pass
+ elif len(cutoffs) == 4:
shape = tuple(cutoffs)
elif len(cutoffs) == 2:
shape = tuple(cutoffs) + tuple(cutoffs)
else:
raise ValueError(f"Invalid len(cutoffs): {len(cutoffs)} (should be 2 or 4).")
+
+ shape = shape or cutoffs
+
return fock.beamsplitter(
self.theta.value,
self.phi.value,
@@ -480,7 +545,7 @@ def _validate_modes(self, modes):
)
-class MZgate(Parametrized, Transformation):
+class MZgate(Unitary):
r"""Mach-Zehnder gate.
It supports two conventions:
@@ -500,10 +565,14 @@ class MZgate(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "MZ"
+ parallelizable = False
+
def __init__(
self,
- phi_a: Optional[float] = 0.0,
- phi_b: Optional[float] = 0.0,
+ phi_a: float = 0.0,
+ phi_b: float = 0.0,
phi_a_trainable: bool = False,
phi_b_trainable: bool = False,
phi_a_bounds: Tuple[Optional[float], Optional[float]] = (None, None),
@@ -512,17 +581,12 @@ def __init__(
modes: Optional[List[int]] = None,
):
super().__init__(
- phi_a=phi_a,
- phi_b=phi_b,
- phi_a_trainable=phi_a_trainable,
- phi_b_trainable=phi_b_trainable,
- phi_a_bounds=phi_a_bounds,
- phi_b_bounds=phi_b_bounds,
+ modes=modes or [0, 1],
+ name="MZgate",
)
+ self._add_parameter(make_parameter(phi_a_trainable, phi_a, "phi_a", phi_a_bounds))
+ self._add_parameter(make_parameter(phi_b_trainable, phi_b, "phi_b", phi_b_bounds))
self._internal = internal
- self._modes = modes
- self.is_gaussian = True
- self.short_name = "MZ"
@property
def X_matrix(self):
@@ -535,7 +599,7 @@ def _validate_modes(self, modes):
)
-class S2gate(Parametrized, Transformation):
+class S2gate(Unitary):
r"""Two-mode squeezing gate.
It applies to a single pair of modes. One can optionally set bounds for each parameter, which the optimizer will respect.
@@ -550,10 +614,14 @@ class S2gate(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "S2"
+ parallelizable = False
+
def __init__(
self,
- r: Optional[float] = 0.0,
- phi: Optional[float] = 0.0,
+ r: float = 0.0,
+ phi: float = 0.0,
r_trainable: bool = False,
phi_trainable: bool = False,
r_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None),
@@ -561,16 +629,11 @@ def __init__(
modes: Optional[List[int]] = None,
):
super().__init__(
- r=r,
- phi=phi,
- r_trainable=r_trainable,
- phi_trainable=phi_trainable,
- r_bounds=r_bounds,
- phi_bounds=phi_bounds,
+ modes=modes or [0, 1],
+ name="S2gate",
)
- self._modes = modes
- self.is_gaussian = True
- self.short_name = "S2"
+ self._add_parameter(make_parameter(r_trainable, r, "r", r_bounds))
+ self._add_parameter(make_parameter(phi_trainable, phi, "phi", phi_bounds))
@property
def X_matrix(self):
@@ -581,7 +644,7 @@ def _validate_modes(self, modes):
raise ValueError(f"Invalid number of modes: {len(modes)} (should be 2")
-class Interferometer(Parametrized, Transformation):
+class Interferometer(Unitary):
r"""N-mode interferometer.
It corresponds to a Ggate with zero mean and a ``2N x 2N`` unitary symplectic matrix.
@@ -593,24 +656,28 @@ class Interferometer(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "I"
+ parallelizable = False
+
def __init__(
self,
num_modes: int,
unitary: Optional[ComplexMatrix] = None,
unitary_trainable: bool = False,
- modes: Optional[List[int]] = None,
+ modes: Optional[list[int]] = None,
):
if modes is not None and num_modes != len(modes):
raise ValueError(f"Invalid number of modes: got {len(modes)}, should be {num_modes}")
if unitary is None:
unitary = math.random_unitary(num_modes)
super().__init__(
- unitary=unitary,
- unitary_trainable=unitary_trainable,
+ modes=modes or list(range(num_modes)),
+ name="Interferometer",
+ )
+ self._add_parameter(
+ make_parameter(unitary_trainable, unitary, "unitary", (None, None), update_unitary)
)
- self._modes = modes or list(range(num_modes))
- self.is_gaussian = True
- self.short_name = "I"
@property
def X_matrix(self):
@@ -633,7 +700,7 @@ def __repr__(self):
return f"Interferometer(num_modes = {len(modes)}, unitary = {unitary}){modes}"
-class RealInterferometer(Parametrized, Transformation):
+class RealInterferometer(Unitary):
r"""N-mode interferometer parametrized by an NxN orthogonal matrix (or 2N x 2N block-diagonal orthogonal matrix). This interferometer does not mix q and p.
Does not mix q's and p's.
@@ -643,6 +710,10 @@ class RealInterferometer(Parametrized, Transformation):
orthogonal_trainable (bool): whether orthogonal is a trainable variable
"""
+ is_gaussian = True
+ short_name = "RI"
+ parallelizable = False
+
def __init__(
self,
num_modes: int,
@@ -654,10 +725,16 @@ def __init__(
raise ValueError(f"Invalid number of modes: got {len(modes)}, should be {num_modes}")
if orthogonal is None:
orthogonal = math.random_orthogonal(num_modes)
- super().__init__(orthogonal=orthogonal, orthogonal_trainable=orthogonal_trainable)
- self._modes = modes or list(range(num_modes))
- self._is_gaussian = True
- self.short_name = "RI"
+
+ super().__init__(
+ modes=modes or list(range(num_modes)),
+ name="RealInterferometer",
+ )
+ self._add_parameter(
+ make_parameter(
+ orthogonal_trainable, orthogonal, "orthogonal", (None, None), update_orthogonal
+ )
+ )
@property
def X_matrix(self):
@@ -680,7 +757,7 @@ def __repr__(self):
return f"RealInterferometer(num_modes = {len(modes)}, orthogonal = {orthogonal}){modes}"
-class Ggate(Parametrized, Transformation):
+class Ggate(Unitary):
r"""A generic N-mode Gaussian unitary transformation with zero displacement.
If a symplectic matrix is not provided, one will be picked at random with effective squeezing
@@ -692,24 +769,31 @@ class Ggate(Parametrized, Transformation):
symplectic_trainable (bool): whether symplectic is a trainable variable.
"""
+ is_gaussian = True
+ short_name = "G"
+ parallelizable = False
+
def __init__(
self,
num_modes: int,
symplectic: Optional[RealMatrix] = None,
symplectic_trainable: bool = False,
- modes: Optional[List[int]] = None,
+ modes: Optional[list[int]] = None,
):
if modes is not None and (num_modes != len(modes)):
raise ValueError(f"Invalid number of modes: got {len(modes)}, should be {num_modes}")
if symplectic is None:
symplectic = math.random_symplectic(num_modes)
+
super().__init__(
- symplectic=symplectic,
- symplectic_trainable=symplectic_trainable,
+ modes=modes or list(range(num_modes)),
+ name="Ggate",
+ )
+ self._add_parameter(
+ make_parameter(
+ symplectic_trainable, symplectic, "symplectic", (None, None), update_symplectic
+ )
)
- self._modes = modes or list(range(num_modes))
- self.is_gaussian = True
- self.short_name = "G"
@property
def X_matrix(self):
@@ -733,7 +817,7 @@ def __repr__(self):
# pylint: disable=no-member
-class Attenuator(Parametrized, Transformation):
+class Attenuator(Channel):
r"""The noisy attenuator channel.
It corresponds to mixing with a thermal environment and applying the pure loss channel. The pure
@@ -763,6 +847,10 @@ class Attenuator(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "Att"
+ parallelizable = True
+
def __init__(
self,
transmissivity: Union[Optional[float], Optional[List[float]]] = 1.0,
@@ -774,17 +862,19 @@ def __init__(
modes: Optional[List[int]] = None,
):
super().__init__(
- transmissivity=transmissivity,
- nbar=nbar,
- transmissivity_trainable=transmissivity_trainable,
- nbar_trainable=nbar_trainable,
- transmissivity_bounds=transmissivity_bounds,
- nbar_bounds=nbar_bounds,
+ modes=modes or list(range(len(math.atleast_1d(transmissivity)))),
+ name="Attenuator",
)
- self._modes = modes
- self.is_unitary = False
- self.is_gaussian = True
- self.short_name = "Att"
+ self._add_parameter(
+ make_parameter(
+ transmissivity_trainable,
+ transmissivity,
+ "transmissivity",
+ transmissivity_bounds,
+ None,
+ )
+ )
+ self._add_parameter(make_parameter(nbar_trainable, nbar, "nbar", nbar_bounds))
@property
def X_matrix(self):
@@ -795,7 +885,7 @@ def Y_matrix(self):
return gaussian.loss_XYd(self.transmissivity.value, self.nbar.value)[1]
-class Amplifier(Parametrized, Transformation):
+class Amplifier(Channel):
r"""The noisy amplifier channel.
It corresponds to mixing with a thermal environment and applying a two-mode squeezing gate.
@@ -820,6 +910,10 @@ class Amplifier(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "Amp"
+ parallelizable = True
+
def __init__(
self,
gain: Union[Optional[float], Optional[List[float]]] = 1.0,
@@ -828,20 +922,14 @@ def __init__(
nbar_trainable: bool = False,
gain_bounds: Tuple[Optional[float], Optional[float]] = (1.0, None),
nbar_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None),
- modes: Optional[List[int]] = None,
+ modes: Optional[list[int]] = None,
):
super().__init__(
- gain=gain,
- gain_trainable=gain_trainable,
- gain_bounds=gain_bounds,
- nbar=nbar,
- nbar_trainable=nbar_trainable,
- nbar_bounds=nbar_bounds,
+ modes=modes or list(range(len(math.atleast_1d(gain)))),
+ name="Amplifier",
)
- self._modes = modes
- self.is_unitary = False
- self.is_gaussian = True
- self.short_name = "Amp"
+ self._add_parameter(make_parameter(gain_trainable, gain, "gain", gain_bounds))
+ self._add_parameter(make_parameter(nbar_trainable, nbar, "nbar", nbar_bounds))
@property
def X_matrix(self):
@@ -853,7 +941,7 @@ def Y_matrix(self):
# pylint: disable=no-member
-class AdditiveNoise(Parametrized, Transformation):
+class AdditiveNoise(Channel):
r"""The additive noise channel.
Equivalent to an amplifier followed by an attenuator. E.g.,
@@ -879,29 +967,29 @@ class AdditiveNoise(Parametrized, Transformation):
modes (optional, List[int]): the list of modes this gate is applied to
"""
+ is_gaussian = True
+ short_name = "Add"
+ parallelizable = True
+
def __init__(
self,
- noise: Union[Optional[float], Optional[List[float]]] = 0.0,
+ noise: Union[float, list[float]] = 0.0,
noise_trainable: bool = False,
noise_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None),
- modes: Optional[List[int]] = None,
+ modes: Optional[list[int]] = None,
):
super().__init__(
- noise=noise,
- noise_trainable=noise_trainable,
- noise_bounds=noise_bounds,
+ modes=modes or list(range(len(math.atleast_1d(noise)))),
+ name="AddNoise",
)
- self._modes = modes
- self.is_unitary = False
- self.is_gaussian = True
- self.short_name = "Add"
+ self._add_parameter(make_parameter(noise_trainable, noise, "noise", noise_bounds))
@property
def Y_matrix(self):
return gaussian.noise_Y(self.noise.value)
-class PhaseNoise(Parametrized, Transformation):
+class PhaseNoise(Channel):
r"""The phase noise channel.
The phase noise channel is a non-Gaussian transformation that is equivalent to
@@ -921,10 +1009,13 @@ def __init__(
modes: Optional[List[int]] = None,
):
super().__init__(
- phase_stdev=phase_stdev,
- phase_stdev_trainable=phase_stdev_trainable,
- phase_stdev_bounds=phase_stdev_bounds,
+ modes=modes or [0],
+ name="AddNoise",
)
+ self._add_parameter(
+ make_parameter(phase_stdev_trainable, phase_stdev, "phase_stdev", phase_stdev_bounds)
+ )
+
self._modes = modes or [0]
self.is_unitary = False
self.is_gaussian = False
@@ -950,9 +1041,7 @@ def primal(self, state):
coeff = math.cast(
math.exp(
- -0.5
- * self.phase_stdev.value**2
- * math.arange(-dm.shape[-2] + 1, dm.shape[-1]) ** 2
+ -0.5 * self.phase_stdev.value**2 * math.arange(-dm.shape[-2] + 1, dm.shape[-1]) ** 2
),
dm.dtype,
)
diff --git a/mrmustard/lab/states.py b/mrmustard/lab/states.py
index e7545c2bd..e8c6d0412 100644
--- a/mrmustard/lab/states.py
+++ b/mrmustard/lab/states.py
@@ -18,16 +18,14 @@
from typing import List, Optional, Sequence, Tuple, Union
-from mrmustard import settings
-from mrmustard.math import Math
+from mrmustard import math, settings
+from mrmustard.math.parameter_set import ParameterSet
+from mrmustard.math.parameters import update_symplectic
from mrmustard.physics import fock, gaussian
-from mrmustard.training import Parametrized
-from mrmustard.typing import RealMatrix, Scalar, Vector
+from mrmustard.utils.typing import RealMatrix, Scalar, Vector
from .abstract import State
-
-math = Math()
-
+from .utils import make_parameter
__all__ = [
"Vacuum",
@@ -47,17 +45,17 @@ class Vacuum(State):
def __init__(self, num_modes: int):
cov = gaussian.vacuum_cov(num_modes)
means = gaussian.vacuum_means(num_modes)
- State.__init__(self, cov=cov, means=means)
+ super().__init__(cov=cov, means=means)
-class Coherent(Parametrized, State):
+class Coherent(State):
r"""The N-mode coherent state.
Equivalent to applying a displacement to the vacuum state:
.. code-block::
- Coherent(x=0.5, y=0.2) == Vacuum(1) >> Dgate(x=0.5, y=0.3) # True
+ Coherent(x=0.5, y=0.2) == Vacuum(1) >> Dgate(x=0.5, y=0.2) # True
Parallelizable over x and y:
@@ -97,28 +95,22 @@ def __init__(
cutoffs: Optional[Sequence[int]] = None,
normalize: bool = False,
):
- Parametrized.__init__(
- self,
- x=x,
- y=y,
- x_trainable=x_trainable,
- y_trainable=y_trainable,
- x_bounds=x_bounds,
- y_bounds=y_bounds,
- )
- self._modes = modes
self._normalize = normalize
- means = gaussian.displacement(self.x.value, self.y.value)
+ self._parameter_set = ParameterSet()
+ self._add_parameter(make_parameter(x_trainable, x, "x", x_bounds))
+ self._add_parameter(make_parameter(y_trainable, y, "y", y_bounds))
+
+ means = gaussian.displacement(x, y)
cov = gaussian.vacuum_cov(means.shape[-1] // 2)
- State.__init__(self, cov=cov, means=means, cutoffs=cutoffs, modes=modes)
+ super().__init__(cov=cov, means=means, cutoffs=cutoffs, modes=modes)
@property
def means(self):
return gaussian.displacement(self.x.value, self.y.value)
-class SqueezedVacuum(Parametrized, State):
+class SqueezedVacuum(State):
r"""The N-mode squeezed vacuum state.
Equivalent to applying a squeezing gate to the vacuum state:
@@ -164,30 +156,25 @@ def __init__(
cutoffs: Optional[Sequence[int]] = None,
normalize: bool = False,
):
- Parametrized.__init__(
- self,
- r=r,
- phi=phi,
- r_trainable=r_trainable,
- phi_trainable=phi_trainable,
- r_bounds=r_bounds,
- phi_bounds=phi_bounds,
- )
self._modes = modes
self._normalize = normalize
- cov = gaussian.squeezed_vacuum_cov(self.r.value, self.phi.value)
+ self._parameter_set = ParameterSet()
+ self._add_parameter(make_parameter(r_trainable, r, "r", r_bounds))
+ self._add_parameter(make_parameter(phi_trainable, phi, "phi", phi_bounds))
+
+ cov = gaussian.squeezed_vacuum_cov(r, phi)
means = gaussian.vacuum_means(
cov.shape[-1] // 2,
)
- State.__init__(self, cov=cov, means=means, cutoffs=cutoffs)
+ super().__init__(cov=cov, means=means, cutoffs=cutoffs)
@property
def cov(self):
return gaussian.squeezed_vacuum_cov(self.r.value, self.phi.value)
-class TMSV(Parametrized, State):
+class TMSV(State):
r"""The 2-mode squeezed vacuum state.
Equivalent to applying a 50/50 beam splitter to a pair of squeezed vacuum states:
@@ -221,31 +208,22 @@ def __init__(
cutoffs: Optional[Sequence[int]] = None,
normalize: bool = False,
):
- Parametrized.__init__(
- self,
- r=r,
- phi=phi,
- r_trainable=r_trainable,
- phi_trainable=phi_trainable,
- r_bounds=r_bounds,
- phi_bounds=phi_bounds,
- )
- self._modes = modes
self._normalize = normalize
- cov = gaussian.two_mode_squeezed_vacuum_cov(
- self.r.value,
- self.phi.value,
- )
+ self._parameter_set = ParameterSet()
+ self._add_parameter(make_parameter(r_trainable, r, "r", r_bounds))
+ self._add_parameter(make_parameter(phi_trainable, phi, "phi", phi_bounds))
+
+ cov = gaussian.two_mode_squeezed_vacuum_cov(r, phi)
means = gaussian.vacuum_means(2)
- State.__init__(self, cov=cov, means=means, cutoffs=cutoffs)
+ super().__init__(cov=cov, means=means, cutoffs=cutoffs)
@property
def cov(self):
return gaussian.two_mode_squeezed_vacuum_cov(self.r.value, self.phi.value)
-class Thermal(Parametrized, State):
+class Thermal(State):
r"""The N-mode thermal state.
Equivalent to applying additive noise to the vacuum:
@@ -280,25 +258,22 @@ def __init__(
cutoffs: Optional[Sequence[int]] = None,
normalize: bool = False,
):
- Parametrized.__init__(
- self,
- nbar=nbar,
- nbar_trainable=nbar_trainable,
- nbar_bounds=nbar_bounds,
- )
self._modes = modes
self._normalize = normalize
+ self._parameter_set = ParameterSet()
+ self._add_parameter(make_parameter(nbar_trainable, nbar, "nbar", nbar_bounds))
+
cov = gaussian.thermal_cov(self.nbar.value)
means = gaussian.vacuum_means(cov.shape[-1] // 2)
- State.__init__(self, cov=cov, means=means, cutoffs=cutoffs)
+ super().__init__(cov=cov, means=means, cutoffs=cutoffs)
@property
def cov(self):
return gaussian.thermal_cov(self.nbar.value)
-class DisplacedSqueezed(Parametrized, State):
+class DisplacedSqueezed(State):
r"""The N-mode displaced squeezed state.
Equivalent to applying a displacement to the squeezed vacuum state:
@@ -358,27 +333,18 @@ def __init__(
cutoffs: Optional[Sequence[int]] = None,
normalize: bool = False,
):
- Parametrized.__init__(
- self,
- r=r,
- phi=phi,
- x=x,
- y=y,
- r_trainable=r_trainable,
- phi_trainable=phi_trainable,
- x_trainable=x_trainable,
- y_trainable=y_trainable,
- r_bounds=r_bounds,
- phi_bounds=phi_bounds,
- x_bounds=x_bounds,
- y_bounds=y_bounds,
- )
self._modes = modes
self._normalize = normalize
- cov = gaussian.squeezed_vacuum_cov(self.r.value, self.phi.value)
- means = gaussian.displacement(self.x.value, self.y.value)
- State.__init__(self, cov=cov, means=means, cutoffs=cutoffs, modes=modes)
+ self._parameter_set = ParameterSet()
+ self._add_parameter(make_parameter(x_trainable, x, "x", x_bounds))
+ self._add_parameter(make_parameter(y_trainable, y, "y", y_bounds))
+ self._add_parameter(make_parameter(r_trainable, r, "r", r_bounds))
+ self._add_parameter(make_parameter(phi_trainable, phi, "phi", phi_bounds))
+
+ cov = gaussian.squeezed_vacuum_cov(r, phi)
+ means = gaussian.displacement(x, y)
+ super().__init__(cov=cov, means=means, cutoffs=cutoffs, modes=modes)
@property
def cov(self):
@@ -389,7 +355,7 @@ def means(self):
return gaussian.displacement(self.x.value, self.y.value)
-class Gaussian(Parametrized, State):
+class Gaussian(State):
r"""The N-mode Gaussian state parametrized by a symplectic matrix and N symplectic eigenvalues.
The (mixed) Gaussian state is equivalent to applying a Gaussian symplectic transformation to a Thermal state:
@@ -439,23 +405,21 @@ def __init__(
raise ValueError(
f"Eigenvalues cannot be smaller than hbar/2 = {settings.HBAR}/2 = {settings.HBAR/2}"
)
- Parametrized.__init__(
- self,
- symplectic=symplectic,
- eigenvalues=eigenvalues,
- eigenvalues_trainable=eigenvalues_trainable,
- symplectic_trainable=symplectic_trainable,
- eigenvalues_bounds=(settings.HBAR / 2, None)
- if eigenvalues_bounds == (None, None)
- else eigenvalues_bounds,
- symplectic_bounds=(None, None),
- )
self._modes = modes
self._normalize = normalize
- cov = gaussian.gaussian_cov(self.symplectic.value, self.eigenvalues.value)
+ self._parameter_set = ParameterSet()
+ eb = (settings.HBAR / 2, None) if eigenvalues_bounds == (None, None) else eigenvalues_bounds
+ self._add_parameter(make_parameter(eigenvalues_trainable, eigenvalues, "eigenvalues", eb))
+ self._add_parameter(
+ make_parameter(
+ symplectic_trainable, symplectic, "symplectic", (None, None), update_symplectic
+ )
+ )
+
+ cov = gaussian.gaussian_cov(symplectic, eigenvalues)
means = gaussian.vacuum_means(cov.shape[-1] // 2)
- State.__init__(self, cov=cov, means=means, cutoffs=cutoffs)
+ super().__init__(cov=cov, means=means, cutoffs=cutoffs)
@property
def cov(self):
@@ -466,7 +430,7 @@ def is_mixed(self):
return any(self.eigenvalues.value > settings.HBAR / 2)
-class Fock(Parametrized, State):
+class Fock(State):
r"""The N-mode Fock state.
Args:
@@ -483,8 +447,7 @@ def __init__(
cutoffs: Sequence[int] = None,
normalize: bool = False,
):
- State.__init__(self, ket=fock.fock_state(n), cutoffs=cutoffs)
- Parametrized.__init__(self)
+ super().__init__(ket=fock.fock_state(n), cutoffs=cutoffs)
self._n = [n] if isinstance(n, int) else n
self._modes = modes
diff --git a/mrmustard/lab/utils.py b/mrmustard/lab/utils.py
new file mode 100644
index 000000000..9a7eb3f53
--- /dev/null
+++ b/mrmustard/lab/utils.py
@@ -0,0 +1,46 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: disable=no-member
+
+"""
+This module contains the utility functions used by the classes in ``mrmustard.lab``.
+"""
+from typing import Callable, Optional, Tuple
+
+from mrmustard.math.parameters import update_euclidean, Constant, Variable
+
+
+def make_parameter(
+ is_trainable: bool,
+ value: any,
+ name: str,
+ bounds: Tuple[Optional[float], Optional[float]],
+ update_fn: Callable = update_euclidean,
+):
+ r"""
+ Returns a constant or variable parameter with given name, value, bounds, and update function.
+
+ Args:
+ is_trainable: Whether to return a variable (``True``) or constant (``False``) parameter.
+ value: The value of the returned parameter.
+ name: The name of the returned parameter.
+ bounds: The bounds of the returned parameter (ignored if ``is_trainable`` is ``False``).
+ update_fn: The update_fn of the returned parameter (ignored if ``is_trainable`` is ``False``).
+ """
+ if isinstance(value, (Constant, Variable)):
+ return value
+ if not is_trainable:
+ return Constant(value=value, name=name)
+ return Variable(value=value, name=name, bounds=bounds, update_fn=update_fn)
diff --git a/mrmustard/math/__init__.py b/mrmustard/math/__init__.py
index 1ff9415ca..d2709df9b 100644
--- a/mrmustard/math/__init__.py
+++ b/mrmustard/math/__init__.py
@@ -13,49 +13,19 @@
# limitations under the License.
r"""
-The ``math`` module contains low-level functions for performing mathematical operations.
-
-It is recommended that users access the backends using the an instance of the :class:`Math` class rather than the backends themselves.
-
-The Math class is a wrapper that passes the calls to the currently active backend, which is determined by
-the ``BACKEND`` parameter in ``mrmustard.settings`` (the default is ``tensorflow``).
-
-The advantage of using the Math class is that the same code can run on different backends, allowing for a
-greater degree of flexibility and code reuse.
-
-.. code-block::
-
- from mrmustard.math import Math
- math = Math()
- math.cos(x) # tensorflow backend
-
- from mrmustard import settings
- settings.BACKEND = 'torch'
-
- math.cos(x) # torch backend
+The point of entry for the backend.
"""
-
-
-import importlib
-from mrmustard import settings
-
-if importlib.util.find_spec("tensorflow"):
- from mrmustard.math.tensorflow import TFMath
-if importlib.util.find_spec("torch"):
- from mrmustard.math.torch import TorchMath
-
-
-class Math:
- r"""
- This class is a switcher for performing math operations on the currently active backend.
- """
-
- def __getattribute__(self, name):
- if settings.BACKEND == "tensorflow":
- return object.__getattribute__(TFMath(), name)
- elif settings.BACKEND == "torch":
- return object.__getattribute__(TorchMath(), name)
- else:
- raise ValueError(
- f"No `{settings.BACKEND}` backend found. Ensure your backend is either ``'tensorflow'`` or ``'torch'``"
- )
+import sys
+
+from .autocast import *
+from .caching import *
+from .backend_base import *
+from .backend_manager import BackendManager
+from .backend_numpy import *
+from .lattice import *
+from .parameters import *
+from .parameter_set import *
+from .tensor_networks import *
+from .tensor_wrappers import *
+
+sys.modules[__name__] = BackendManager()
diff --git a/tests/test_utils/test_graphics.py b/mrmustard/math/backend_base.py
similarity index 57%
rename from tests/test_utils/test_graphics.py
rename to mrmustard/math/backend_base.py
index 5cfa84a90..4be7585a2 100644
--- a/tests/test_utils/test_graphics.py
+++ b/mrmustard/math/backend_base.py
@@ -1,4 +1,4 @@
-# Copyright 2022 Xanadu Quantum Technologies Inc.
+# Copyright 2023 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,16 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Test related to visualization on MrMustard."""
-from mrmustard.lab import Coherent
-from mrmustard.utils.graphics import mikkel_plot
+class BackendBase:
+ r"""
+ A base class for backends.
+ """
+ def __init__(self, name):
+ self._name = name
-def test_mikkel_plot():
- """Tests that mikkel plot returns figure and axes."""
- dm = Coherent().dm(cutoffs=[10])
- fig, axs = mikkel_plot(dm.numpy())
-
- assert fig is not None
- assert axs is not None
+ @property
+ def name(self):
+ r"""
+ The name of this backend.
+ """
+ return self._name
diff --git a/mrmustard/math/backend_manager.py b/mrmustard/math/backend_manager.py
new file mode 100644
index 000000000..be58ebd31
--- /dev/null
+++ b/mrmustard/math/backend_manager.py
@@ -0,0 +1,1521 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains the backend manager."""
+
+
+import importlib.util
+import sys
+from functools import lru_cache
+from itertools import product
+from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
+
+import numpy as np
+from scipy.special import binom
+from scipy.stats import ortho_group, unitary_group
+
+from ..utils.settings import settings
+from ..utils.typing import (
+ Batch,
+ Matrix,
+ Tensor,
+ Trainable,
+ Vector,
+)
+from .backend_base import BackendBase
+from .backend_numpy import BackendNumpy
+
+__all__ = [
+ "BackendManager",
+]
+
+# ~~~~~~~
+# Helpers
+# ~~~~~~~
+
+
+def lazy_import(module_name: str):
+ r"""
+ Returns module and loader for lazy import.
+
+ Args:
+ module_name: The name of the module to import.
+ """
+ try:
+ return sys.modules[module_name], None
+ except KeyError:
+ spec = importlib.util.find_spec(module_name)
+ module = importlib.util.module_from_spec(spec)
+ loader = importlib.util.LazyLoader(spec.loader)
+ return module, loader
+
+
+# lazy import for numpy
+module_name_np = "mrmustard.math.backend_numpy"
+module_np, loader_np = lazy_import(module_name_np)
+
+# lazy import for tensorflow
+module_name_tf = "mrmustard.math.backend_tensorflow"
+module_tf, loader_tf = lazy_import(module_name_tf)
+
+all_modules = {
+ "numpy": {"module": module_np, "loader": loader_np, "object": "BackendNumpy"},
+ "tensorflow": {
+ "module": module_tf,
+ "loader": loader_tf,
+ "object": "BackendTensorflow",
+ },
+}
+
+
+class BackendManager: # pylint: disable=too-many-public-methods, fixme
+ r"""
+ A class to manage the different backends supported by Mr Mustard.
+ """
+
+ # the backend in use, which is numpy by default
+ _backend = BackendNumpy()
+
+ # the configured Euclidean optimizer.
+ _euclidean_opt: Optional[type] = None
+
+ # whether or not the backend can be changed
+ _is_immutable = False
+
+ def __init__(self) -> None:
+ # binding types and decorators of numpy backend
+ self._bind()
+
+ def _apply(self, fn: str, args: Optional[Sequence[Any]] = ()) -> Any:
+ r"""
+ Applies a function ``fn`` from the backend in use to the given ``args``.
+ """
+ try:
+ attr = getattr(self.backend, fn)
+ except AttributeError:
+ msg = f"Function ``{fn}`` not implemented for backend ``{self.backend_name}``."
+ # pylint: disable=raise-missing-from
+ raise NotImplementedError(msg)
+ return attr(*args)
+
+ def _bind(self) -> None:
+ r"""
+ Binds the types and decorators of this backend manager to those of the given ``self._backend``.
+ """
+ for name in [
+ "int32",
+ "float32",
+ "float64",
+ "complex64",
+ "complex128",
+ "hermite_renormalized",
+ "hermite_renormalized_binomial",
+ "hermite_renormalized_diagonal_reorderedAB",
+ "hermite_renormalized_1leftoverMode_reorderedAB",
+ ]:
+ setattr(self, name, getattr(self._backend, name))
+
+ def __new__(cls):
+ # singleton
+ try:
+ return cls.instance
+ except AttributeError:
+ cls.instance = super(BackendManager, cls).__new__(cls)
+ return cls.instance
+
+ def __repr__(self) -> str:
+ return f"Backend({self.backend_name})"
+
+ @property
+ def backend(self) -> BackendBase:
+ r"""
+ The backend that is being used.
+ """
+ self._is_immutable = True
+ return self._backend
+
+ @property
+ def backend_name(self) -> str:
+ r"""
+ The name of the backend in use.
+ """
+ return self._backend.name
+
+ def change_backend(self, name: str) -> None:
+ r"""
+ Changes the backend to a different one.
+
+ Args:
+ name: The name of the new backend.
+ """
+ if name not in ["numpy", "tensorflow"]:
+ msg = "Backend must be either ``numpy`` or ``tensorflow``"
+ raise ValueError(msg)
+
+ if self.backend_name != name:
+ if self._is_immutable:
+ msg = "Can no longer change the backend in this session."
+ raise ValueError(msg)
+
+ module = all_modules[name]["module"]
+ object = all_modules[name]["object"]
+ try:
+ backend = getattr(module, object)()
+ except AttributeError:
+ # lazy import
+ loader = all_modules[name]["loader"]
+ loader.exec_module(module)
+ backend = getattr(module, object)()
+
+ # switch backend
+ self._backend = backend
+
+ # bind
+ self._bind()
+
+ # ~~~~~~~
+ # Methods
+ # ~~~~~~~
+ # Below are the methods supported by the various backends.
+
+ def abs(self, array: Tensor) -> Tensor:
+ r"""The absolute value of array.
+
+ Args:
+ array: The array to take the absolute value of.
+
+ Returns:
+ The absolute value of the given ``array``.
+ """
+ return self._apply("abs", (array,))
+
+ def any(self, array: Tensor) -> bool:
+ r"""Returns ``True`` if any element of array is ``True``, ``False`` otherwise.
+
+ Args:
+ array: The array to check.
+
+ Returns:
+ ``True`` if any element of array is ``True``, ``False`` otherwise.
+ """
+ return self._apply("any", (array,))
+
+ def arange(self, start: int, limit: int = None, delta: int = 1, dtype: Any = None) -> Tensor:
+ r"""Returns an array of evenly spaced values within a given interval.
+
+ Args:
+ start: The start of the interval.
+ limit: The end of the interval.
+ delta: The step size.
+ dtype: The dtype of the returned array.
+
+ Returns:
+ The array of evenly spaced values.
+ """
+ # NOTE: is float64 by default
+ return self._apply("arange", (start, limit, delta, dtype))
+
+ def asnumpy(self, tensor: Tensor) -> Tensor:
+ r"""Converts an array to a numpy array.
+
+ Args:
+ tensor: The tensor to convert.
+
+ Returns:
+ The corresponidng numpy array.
+ """
+ return self._apply("asnumpy", (tensor,))
+
+ def assign(self, tensor: Tensor, value: Tensor) -> Tensor:
+ r"""Assigns value to tensor.
+
+ Args:
+ tensor: The tensor to assign to.
+ value: The value to assign.
+
+ Returns:
+ The tensor with value assigned
+ """
+ return self._apply("assign", (tensor, value))
+
+ def astensor(self, array: Tensor, dtype=None):
+ r"""Converts a numpy array to a tensor.
+
+ Args:
+ array: The numpy array to convert.
+ dtype: The dtype of the tensor. If ``None``, the returned tensor
+ is of type ``float``.
+
+ Returns:
+ The tensor with dtype.
+ """
+ return self._apply("astensor", (array, dtype))
+
+ def atleast_1d(self, array: Tensor, dtype=None) -> Tensor:
+ r"""Returns an array with at least one dimension.
+
+ Args:
+ array: The array to convert.
+ dtype: The data type of the array. If ``None``, the returned array
+ is of the same type as the given one.
+
+ Returns:
+ The array with at least one dimension.
+ """
+ return self._apply("atleast_1d", (array, dtype))
+
+ def atleast_2d(self, array: Tensor, dtype=None) -> Tensor:
+ r"""Returns an array with at least two dimensions.
+
+ Args:
+ array: The array to convert.
+ dtype: The data type of the array. If ``None``, the returned array
+ is of the same type as the given one.
+
+ Returns:
+ The array with at least two dimensions.
+ """
+ return self._apply("atleast_2d", (array, dtype))
+
+ def atleast_3d(self, array: Tensor, dtype=None) -> Tensor:
+ r"""Returns an array with at least three dimensions by eventually inserting
+ new axes at the beginning. Note this is not the way atleast_3d works in numpy
+ and tensorflow, where it adds at the beginning and/or end.
+
+ Args:
+ array: The array to convert.
+ dtype: The data type of the array. If ``None``, the returned array
+ is of the same type as the given one.
+
+ Returns:
+ The array with at least three dimensions.
+ """
+ return self._apply("atleast_3d", (array, dtype))
+
+ def block_diag(self, mat1: Matrix, mat2: Matrix) -> Matrix:
+ r"""Returns a block diagonal matrix from the given matrices.
+
+ Args:
+ mat1: A matrix.
+ mat2: A matrix.
+
+ Returns:
+ A block diagonal matrix from the given matrices.
+ """
+ return self._apply("block_diag", (mat1, mat2))
+
+ def boolean_mask(self, tensor: Tensor, mask: Tensor) -> Tensor:
+ """
+ Returns a tensor based on the truth value of the boolean mask.
+
+ Args:
+ tensor: A tensor.
+ mask: A boolean mask.
+
+ Returns:
+ A tensor based on the truth value of the boolean mask.
+ """
+ return self._apply("boolean_mask", (tensor, mask))
+
+ def block(self, blocks: List[List[Tensor]], axes=(-2, -1)) -> Tensor:
+ r"""Returns a matrix made from the given blocks.
+
+ Args:
+ blocks: A list of lists of compatible blocks.
+ axes: The axes to stack the blocks along.
+
+ Returns:
+ The matrix made of blocks.
+ """
+ return self._apply("block", (blocks, axes))
+
+ def cast(self, array: Tensor, dtype=None) -> Tensor:
+ r"""Casts ``array`` to ``dtype``.
+
+ Args:
+ array: The array to cast.
+ dtype: The data type to cast to. If ``None``, the returned array
+ is the same as the given one.
+
+ Returns:
+ The array cast to dtype.
+ """
+ return self._apply("cast", (array, dtype))
+
+ def clip(self, array: Tensor, a_min: float, a_max: float) -> Tensor:
+ r"""Clips array to the interval ``[a_min, a_max]``.
+
+ Args:
+ array: The array to clip.
+ a_min: The minimum value.
+ a_max: The maximum value.
+
+ Returns:
+ The clipped array.
+ """
+ return self._apply("clip", (array, a_min, a_max))
+
+ def concat(self, values: Sequence[Tensor], axis: int) -> Tensor:
+ r"""Concatenates values along the given axis.
+
+ Args:
+ values: The values to concatenate.
+ axis: The axis along which to concatenate.
+
+ Returns:
+ The concatenated values.
+ """
+ return self._apply("concat", (values, axis))
+
+ def conj(self, array: Tensor) -> Tensor:
+ r"""The complex conjugate of array.
+
+ Args:
+ array: The array to take the complex conjugate of.
+
+ Returns:
+ The complex conjugate of the given ``array``.
+ """
+ return self._apply("conj", (array,))
+
+ def constraint_func(
+ self, bounds: Tuple[Optional[float], Optional[float]]
+ ) -> Optional[Callable]:
+ r"""Returns a constraint function for the given bounds.
+
+ A constraint function will clip the value to the interval given by the bounds.
+
+ .. note::
+
+ The upper and/or lower bounds can be ``None``, in which case the constraint
+ function will not clip the value.
+
+ Args:
+ bounds: The bounds of the constraint.
+
+ Returns:
+ The constraint function.
+ """
+ return self._apply("constraint_func", (bounds))
+
+ def convolution(
+ self,
+ array: Tensor,
+ filters: Tensor,
+ padding: Optional[str] = None,
+ data_format="NWC",
+ ) -> Tensor: # TODO: remove strides and data_format?
+ r"""Performs a convolution on array with filters.
+
+ Args:
+ array: The array to convolve.
+ filters: The filters to convolve with.
+ padding: The padding mode.
+ data_format: The data format of the array.
+
+ Returns:
+ The convolved array.
+ """
+ return self._apply("convolution", (array, filters, padding, data_format))
+
+ def cos(self, array: Tensor) -> Tensor:
+ r"""The cosine of an array.
+
+ Args:
+ array: The array to take the cosine of.
+
+ Returns:
+ The cosine of ``array``.
+ """
+ return self._apply("cos", (array,))
+
+ def cosh(self, array: Tensor) -> Tensor:
+ r"""The hyperbolic cosine of array.
+
+ Args:
+ array: The array to take the hyperbolic cosine of.
+
+ Returns:
+ The hyperbolic cosine of ``array``.
+ """
+ return self._apply("cosh", (array,))
+
+ def det(self, matrix: Tensor) -> Tensor:
+ r"""The determinant of matrix.
+
+ Args:
+ matrix: The matrix to take the determinant of
+
+ Returns:
+ The determinant of ``matrix``.
+ """
+ return self._apply("det", (matrix,))
+
+ def diag(self, array: Tensor, k: int = 0) -> Tensor:
+ r"""The array made by inserting the given array along the :math:`k`-th diagonal.
+
+ Args:
+ array: The array to insert.
+ k: The ``k``-th diagonal to insert array into.
+
+ Returns:
+ The array with ``array`` inserted into the ``k``-th diagonal.
+ """
+ return self._apply("diag", (array, k))
+
+ def diag_part(self, array: Tensor, k: int = 0) -> Tensor:
+ r"""The array of the main diagonal of array.
+
+ Args:
+ array: The array to extract the main diagonal of.
+ k: The diagonal to extract.
+
+ Returns:
+ The array of the main diagonal of ``array``.
+ """
+ return self._apply("diag_part", (array, k))
+
+ def eigvals(self, tensor: Tensor) -> Tensor:
+ r"""The eigenvalues of a tensor.
+
+ Args:
+ tensor: The tensor to calculate the eigenvalues of.
+
+ Returns:
+ The eigenvalues of ``tensor``.
+ """
+ return self._apply("eigvals", (tensor,))
+
+ def eigh(self, tensor: Tensor) -> Tensor:
+ """
+ The eigenvalues and eigenvectors of a matrix.
+
+ Args:
+ tensor: The tensor to calculate the eigenvalues and eigenvectors of.
+
+ Returns:
+ The eigenvalues and eigenvectors of ``tensor``.
+ """
+ return self._apply("eigh", (tensor,))
+
+ def einsum(self, string: str, *tensors) -> Tensor:
+ r"""The result of the Einstein summation convention on the tensors.
+
+ Args:
+ string: The string of the Einstein summation convention.
+ tensors: The tensors to perform the Einstein summation on.
+
+ Returns:
+ The result of the Einstein summation convention.
+ """
+ return self._apply("einsum", (string, *tensors))
+
+ def exp(self, array: Tensor) -> Tensor:
+ r"""The exponential of array element-wise.
+
+ Args:
+ array: The array to take the exponential of.
+
+ Returns:
+ The exponential of array.
+ """
+ return self._apply("exp", (array,))
+
+ def expand_dims(self, array: Tensor, axis: int) -> Tensor:
+ r"""The array with an additional dimension inserted at the given axis.
+
+ Args:
+ array: The array to expand.
+ axis: The axis to insert the new dimension.
+
+ Returns:
+ The array with an additional dimension inserted at the given axis.
+ """
+ return self._apply("expand_dims", (array, axis))
+
+ def expm(self, matrix: Tensor) -> Tensor:
+ r"""The matrix exponential of matrix.
+
+ Args:
+ matrix: The matrix to take the exponential of.
+
+ Returns:
+ The exponential of ``matrix``.
+ """
+ return self._apply("expm", (matrix,))
+
+ def eye(self, size: int, dtype=None) -> Tensor:
+ r"""The identity matrix of size.
+
+ Args:
+ size: The size of the identity matrix
+ dtype: The data type of the identity matrix. If ``None``,
+ the returned matrix is of type ``float``.
+
+ Returns:
+ The identity matrix.
+ """
+ return self._apply("eye", (size, dtype))
+
+ def eye_like(self, array: Tensor) -> Tensor:
+ r"""The identity matrix of the same shape and dtype as array.
+
+ Args:
+ array: The array to create the identity matrix of.
+
+ Returns:
+ The identity matrix.
+ """
+ return self._apply("eye_like", (array,))
+
+ def from_backend(self, value: Any) -> bool:
+ r"""Whether the given tensor is a tensor of the concrete backend.
+
+ Args:
+ value: A value.
+
+ Returns:
+ Whether given ``value`` is a tensor of the concrete backend.
+ """
+ return self._apply("from_backend", (value,))
+
+ def gather(self, array: Tensor, indices: Batch[int], axis: Optional[int] = None) -> Tensor:
+ r"""The values of the array at the given indices.
+
+ Args:
+ array: The array to gather values from.
+ indices: The indices to gather values from.
+ axis: The axis to gather values from.
+
+ Returns:
+ The values of the array at the given indices.
+ """
+ return self._apply(
+ "gather",
+ (
+ array,
+ indices,
+ axis,
+ ),
+ )
+
+ def hermite_renormalized_batch(
+ self, A: Tensor, B: Tensor, C: Tensor, shape: Tuple[int]
+ ) -> Tensor:
+ r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor
+ series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)`
+ at the denominator rather than :math:`n!`. It computes all the amplitudes within the
+ tensor of given shape in case of B is a batched vector with a batched diemnsion on the
+ last index.
+
+ Args:
+ A: The A matrix.
+ B: The batched B vector with its batch dimension on the last index.
+ C: The C scalar.
+ shape: The shape of the final tensor.
+
+ Returns:
+ The batched Hermite polynomial of given shape.
+ """
+ return self._apply("hermite_renormalized_batch", (A, B, C, shape))
+
+ def hermite_renormalized_diagonal(
+ self, A: Tensor, B: Tensor, C: Tensor, cutoffs: Tuple[int]
+ ) -> Tensor:
+ r"""Firsts, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.compactFock~
+ Then, calculates the required renormalized multidimensional Hermite polynomial.
+ """
+ return self._apply("hermite_renormalized_diagonal", (A, B, C, cutoffs))
+
+ def hermite_renormalized_diagonal_batch(
+ self, A: Tensor, B: Tensor, C: Tensor, cutoffs: Tuple[int]
+ ) -> Tensor:
+ r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.compactFock~
+ Then, calculates the required renormalized multidimensional Hermite polynomial.
+ Same as hermite_renormalized_diagonal but works for a batch of different B's."""
+ return self._apply("hermite_renormalized_diagonal_batch", (A, B, C, cutoffs))
+
+ def hermite_renormalized_1leftoverMode(
+ self, A: Tensor, B: Tensor, C: Tensor, cutoffs: Tuple[int]
+ ) -> Tensor:
+ r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.compactFock~
+ Then, calculate the required renormalized multidimensional Hermite polynomial.
+ """
+ return self._apply("hermite_renormalized_1leftoverMode", (A, B, C, cutoffs))
+
+ def imag(self, array: Tensor) -> Tensor:
+ r"""The imaginary part of array.
+
+ Args:
+ array: The array to take the imaginary part of
+
+ Returns:
+ The imaginary part of array
+ """
+ return self._apply("imag", (array,))
+
+ def inv(self, tensor: Tensor) -> Tensor:
+ r"""The inverse of tensor.
+
+ Args:
+ tensor: The tensor to take the inverse of
+
+ Returns:
+ The inverse of tensor
+ """
+ return self._apply("inv", (tensor,))
+
+ def is_trainable(self, tensor: Tensor) -> bool:
+ r"""Whether the given tensor is trainable.
+
+ Args:
+ tensor: The tensor to train.
+
+ Returns:
+ Whether the given tensor can be trained.
+ """
+ return self._apply("is_trainable", (tensor,))
+
+ def lgamma(self, x: Tensor) -> Tensor:
+ r"""The natural logarithm of the gamma function of ``x``.
+
+ Args:
+ x: The array to take the natural logarithm of the gamma function of
+
+ Returns:
+ The natural logarithm of the gamma function of ``x``
+ """
+ return self._apply("lgamma", (x,))
+
+ def log(self, x: Tensor) -> Tensor:
+ r"""The natural logarithm of ``x``.
+
+ Args:
+ x: The array to take the natural logarithm of
+
+ Returns:
+ The natural logarithm of ``x``
+ """
+ return self._apply("log", (x,))
+
+ def make_complex(self, real: Tensor, imag: Tensor) -> Tensor:
+ """Given two real tensors representing the real and imaginary part of a complex number,
+ this operation returns a complex tensor. The input tensors must have the same shape.
+
+ Args:
+ real: The real part of the complex number.
+ imag: The imaginary part of the complex number.
+
+ Returns:
+ The complex array ``real + 1j * imag``.
+ """
+ return self._apply("make_complex", (real, imag))
+
+ def matmul(self, *matrices: Matrix) -> Tensor:
+ r"""The matrix product of the given matrices.
+
+ Args:
+ matrices: The matrices to multiply.
+
+ Returns:
+ The matrix product
+ """
+ return self._apply("matmul", matrices)
+
+ def matvec(self, a: Matrix, b: Vector) -> Tensor:
+ r"""The matrix vector product of ``a`` (matrix) and ``b`` (vector).
+
+ Args:
+ a: The matrix to multiply
+ b: The vector to multiply
+
+ Returns:
+ The matrix vector product of ``a`` and ``b``
+ """
+ return self._apply("matvec", (a, b))
+
+ def maximum(self, a: Tensor, b: Tensor) -> Tensor:
+ r"""The element-wise maximum of ``a`` and ``b``.
+
+ Args:
+ a: The first array to take the maximum of
+ b: The second array to take the maximum of
+
+ Returns:
+ The element-wise maximum of ``a`` and ``b``
+ """
+ return self._apply(
+ "maximum",
+ (
+ a,
+ b,
+ ),
+ )
+
+ def minimum(self, a: Tensor, b: Tensor) -> Tensor:
+ r"""The element-wise minimum of ``a`` and ``b``.
+
+ Args:
+ a: The first array to take the minimum of
+ b: The second array to take the minimum of
+
+ Returns:
+ The element-wise minimum of ``a`` and ``b``
+ """
+ return self._apply(
+ "minimum",
+ (
+ a,
+ b,
+ ),
+ )
+
+ def new_variable(
+ self,
+ value: Tensor,
+ bounds: Tuple[Optional[float], Optional[float]],
+ name: str,
+ dtype=None,
+ ) -> Tensor:
+ r"""Returns a new variable with the given value and bounds.
+
+ Args:
+ value: The value of the new variable.
+ bounds: The bounds of the new variable.
+ name: The name of the new variable.
+ dtype: dtype of the new variable. If ``None``, casts it to float.
+ Returns:
+ The new variable.
+ """
+ return self._apply("new_variable", (value, bounds, name, dtype))
+
+ def new_constant(self, value: Tensor, name: str, dtype=None) -> Tensor:
+ r"""Returns a new constant with the given value.
+
+ Args:
+ value: The value of the new constant
+ name (str): name of the new constant
+ dtype (type): dtype of the array
+
+ Returns:
+ The new constant
+ """
+ return self._apply("new_constant", (value, name, dtype))
+
+ def norm(self, array: Tensor) -> Tensor:
+ r"""The norm of array.
+
+ Args:
+ array: The array to take the norm of
+
+ Returns:
+ The norm of array
+ """
+ return self._apply("norm", (array,))
+
+ def ones(self, shape: Sequence[int], dtype=None) -> Tensor:
+ r"""Returns an array of ones with the given ``shape`` and ``dtype``.
+
+ Args:
+ shape (tuple): shape of the array
+ dtype (type): dtype of the array. If ``None``, the returned array is
+ of type ``float``.
+
+ Returns:
+ The array of ones
+ """
+ # NOTE : should be float64 by default
+ return self._apply("ones", (shape, dtype))
+
+ def ones_like(self, array: Tensor) -> Tensor:
+ r"""Returns an array of ones with the same shape and ``dtype`` as ``array``.
+
+ Args:
+ array: The array to take the shape and dtype of
+
+ Returns:
+ The array of ones
+ """
+ return self._apply("ones_like", (array,))
+
+ def outer(self, array1: Tensor, array2: Tensor) -> Tensor:
+ r"""The outer product of ``array1`` and ``array2``.
+
+ Args:
+ array1: The first array to take the outer product of
+ array2: The second array to take the outer product of
+
+ Returns:
+ The outer product of array1 and array2
+ """
+ return self._apply("outer", (array1, array2))
+
+ def pad(
+ self,
+ array: Tensor,
+ paddings: Sequence[Tuple[int, int]],
+ mode="CONSTANT",
+ constant_values=0,
+ ) -> Tensor:
+ r"""The padded array.
+
+ Args:
+ array: The array to pad
+ paddings (tuple): paddings to apply
+ mode (str): mode to apply the padding
+ constant_values (int): constant values to use for padding
+
+ Returns:
+ The padded array
+ """
+ return self._apply("pad", (array, paddings, mode, constant_values))
+
+ def pinv(self, matrix: Tensor) -> Tensor:
+ r"""The pseudo-inverse of matrix.
+
+ Args:
+ matrix: The matrix to take the pseudo-inverse of
+
+ Returns:
+ The pseudo-inverse of matrix
+ """
+ return self._apply("pinv", (matrix,))
+
+ def pow(self, x: Tensor, y: Tensor) -> Tensor:
+ r"""Returns :math:`x^y`. Broadcasts ``x`` and ``y`` if necessary.
+ Args:
+ x: The base
+ y: The exponent
+
+ Returns:
+ The :math:`x^y`
+ """
+ return self._apply("pow", (x, y))
+
+ def real(self, array: Tensor) -> Tensor:
+ r"""The real part of ``array``.
+
+ Args:
+ array: The array to take the real part of
+
+ Returns:
+ The real part of ``array``
+ """
+ return self._apply("real", (array,))
+
+ def reshape(self, array: Tensor, shape: Sequence[int]) -> Tensor:
+ r"""The reshaped array.
+
+ Args:
+ array: The array to reshape
+ shape (tuple): shape to reshape the array to
+
+ Returns:
+ The reshaped array
+ """
+ return self._apply("reshape", (array, shape))
+
+ def round(self, array: Tensor, decimals: int) -> Tensor:
+ r"""The array rounded to the nearest integer.
+
+ Args:
+ array: The array to round
+ decimals: number of decimals to round to
+
+ Returns:
+ The array rounded to the nearest integer
+ """
+ return self._apply("round", (array, decimals))
+
+ def set_diag(self, array: Tensor, diag: Tensor, k: int) -> Tensor:
+ r"""The array with the diagonal set to ``diag``.
+
+ Args:
+ array: The array to set the diagonal of
+ diag: The diagonal to set
+ k (int): diagonal to set
+
+ Returns:
+ The array with the diagonal set to ``diag``
+ """
+ return self._apply("set_diag", (array, diag, k))
+
+ def sin(self, array: Tensor) -> Tensor:
+ r"""The sine of ``array``.
+
+ Args:
+ array: The array to take the sine of
+
+ Returns:
+ The sine of ``array``
+ """
+ return self._apply("sin", (array,))
+
+ def sinh(self, array: Tensor) -> Tensor:
+ r"""The hyperbolic sine of ``array``.
+
+ Args:
+ array: The array to take the hyperbolic sine of
+
+ Returns:
+ The hyperbolic sine of ``array``
+ """
+ return self._apply("sinh", (array,))
+
+ def solve(self, matrix: Tensor, rhs: Tensor) -> Tensor:
+ r"""The solution of the linear system :math:`Ax = b`.
+
+ Args:
+ matrix: The matrix :math:`A`
+ rhs: The vector :math:`b`
+
+ Returns:
+ The solution :math:`x`
+ """
+ return self._apply("solve", (matrix, rhs))
+
+ def sqrt(self, x: Tensor, dtype=None) -> Tensor:
+ r"""The square root of ``x``.
+
+ Args:
+ x: The array to take the square root of
+ dtype: ``dtype`` of the output array.
+
+ Returns:
+ The square root of ``x``
+ """
+ return self._apply("sqrt", (x, dtype))
+
+ def sqrtm(self, tensor: Tensor, dtype=None) -> Tensor:
+ r"""The matrix square root.
+
+ Args:
+ tensor: The tensor to take the matrix square root of.
+ dtype: The ``dtype`` of the output tensor. If ``None``, the output
+ is of type ``math.complex128``.
+
+ Returns:
+ The square root of ``x``"""
+ return self._apply("sqrtm", (tensor, dtype))
+
+ def sum(self, array: Tensor, axes: Sequence[int] = None):
+ r"""The sum of array.
+
+ Args:
+ array: The array to take the sum of
+ axes (tuple): axes to sum over
+
+ Returns:
+ The sum of array
+ """
+ if axes is not None:
+ neg = [a for a in axes if a < 0]
+ pos = [a for a in axes if a >= 0]
+ axes = sorted(neg) + sorted(pos)[::-1]
+ return self._apply("sum", (array, axes))
+
+ def tensordot(self, a: Tensor, b: Tensor, axes: Sequence[int]) -> Tensor:
+ r"""The tensordot product of ``a`` and ``b``.
+
+ Args:
+ a: The first array to take the tensordot product of
+ b: The second array to take the tensordot product of
+ axes: The axes to take the tensordot product over
+
+ Returns:
+ The tensordot product of ``a`` and ``b``
+ """
+ return self._apply("tensordot", (a, b, axes))
+
+ def tile(self, array: Tensor, repeats: Sequence[int]) -> Tensor:
+ r"""The tiled array.
+
+ Args:
+ array: The array to tile
+ repeats (tuple): number of times to tile the array along each axis
+
+ Returns:
+ The tiled array
+ """
+ return self._apply("tile", (array, repeats))
+
+ def trace(self, array: Tensor, dtype=None) -> Tensor:
+ r"""The trace of array.
+
+ Args:
+ array: The array to take the trace of
+ dtype (type): ``dtype`` of the output array
+
+ Returns:
+ The trace of array
+ """
+ return self._apply("trace", (array, dtype))
+
+ def transpose(self, a: Tensor, perm: Sequence[int] = None):
+ r"""The transposed arrays.
+
+ Args:
+ a: The array to transpose
+ perm (tuple): permutation to apply to the array
+
+ Returns:
+ The transposed array
+ """
+ return self._apply("transpose", (a, perm))
+
+ def update_tensor(self, tensor: Tensor, indices: Tensor, values: Tensor) -> Tensor:
+ r"""Updates a tensor in place with the given values.
+
+ Args:
+ tensor: The tensor to update
+ indices: The indices to update
+ values: The values to update
+
+ Returns:
+ The updated tensor
+ """
+ return self._apply("update_tensor", (tensor, indices, values))
+
+ def update_add_tensor(self, tensor: Tensor, indices: Tensor, values: Tensor) -> Tensor:
+ r"""Updates a tensor in place by adding the given values.
+
+ Args:
+ tensor: The tensor to update
+ indices: The indices to update
+ values: The values to add
+
+ Returns:
+ The updated tensor
+ """
+ return self._apply("update_add_tensor", (tensor, indices, values))
+
+ def value_and_gradients(
+ self, cost_fn: Callable, parameters: Dict[str, List[Trainable]]
+ ) -> Tuple[Tensor, Dict[str, List[Tensor]]]:
+ r"""The loss and gradients of the given cost function.
+
+ Args:
+ cost_fn (callable): cost function to compute the loss and gradients of
+ parameters (dict): parameters to compute the loss and gradients of
+
+ Returns:
+ tuple: loss and gradients (dict) of the given cost function
+ """
+ return self._apply("value_and_gradients", (cost_fn, parameters))
+
+ def xlogy(self, x: Tensor, y: Tensor) -> Tensor:
+ """
+ Returns ``0`` if ``x == 0`` elementwise and ``x * log(y)`` otherwise.
+ """
+ return self._apply("xlogy", (x, y))
+
+ def zeros(self, shape: Sequence[int], dtype=None) -> Tensor:
+ r"""Returns an array of zeros with the given shape and ``dtype``.
+
+ Args:
+ shape: The shape of the array.
+ dtype: The dtype of the array. If ``None``, the returned array is
+ of type ``float``.
+
+ Returns:
+ The array of zeros.
+ """
+ return self._apply("zeros", (shape, dtype))
+
+ def zeros_like(self, array: Tensor) -> Tensor:
+ r"""Returns an array of zeros with the same shape and ``dtype`` as ``array``.
+
+ Args:
+ array: The array to take the shape and ``dtype`` of.
+
+ Returns:
+ The array of zeros.
+ """
+ return self._apply("zeros_like", (array,))
+
+ def map_fn(self, fn: Callable, elements: Tensor) -> Tensor:
+ """Transforms elems by applying fn to each element unstacked on axis 0.
+
+ Args:
+ fn (func): The callable to be performed. It accepts one argument,
+ which will have the same (possibly nested) structure as elems.
+ elements (Tensor): A tensor or (possibly nested) sequence of tensors,
+ each of which will be unstacked along their first dimension.
+ ``func`` will be applied to the nested sequence of the resulting slices.
+
+ Returns:
+ Tensor: applied ``func`` on ``elements``
+ """
+ return self._apply("map_fn", (fn, elements))
+
+ def squeeze(self, tensor: Tensor, axis: Optional[List[int]]) -> Tensor:
+ """Removes dimensions of size 1 from the shape of a tensor.
+
+ Args:
+ tensor (Tensor): the tensor to squeeze
+ axis (Optional[List[int]]): if specified, only squeezes the
+ dimensions listed, defaults to []
+
+ Returns:
+ Tensor: tensor with one or more dimensions of size 1 removed
+ """
+ return self._apply("squeeze", (tensor, axis))
+
+ def cholesky(self, input: Tensor) -> Tensor:
+ """Computes the Cholesky decomposition of square matrices.
+
+ Args:
+ input (Tensor)
+
+ Returns:
+ Tensor: tensor with the same type as input
+ """
+ return self._apply("cholesky", (input,))
+
+ def Categorical(self, probs: Tensor, name: str):
+ """Categorical distribution over integers.
+
+ Args:
+ probs (Tensor): tensor representing the probabilities of a set of Categorical
+ distributions.
+ name (str): name prefixed to operations created by this class
+
+ Returns:
+ tfp.distributions.Categorical: instance of ``tfp.distributions.Categorical`` class
+ """
+ return self._apply("Categorical", (probs, name))
+
+ def MultivariateNormalTriL(self, loc: Tensor, scale_tril: Tensor):
+ """Multivariate normal distribution on `R^k` and parameterized by a (batch of) length-k loc
+ vector (aka "mu") and a (batch of) k x k scale matrix; covariance = scale @ scale.T
+ where @ denotes matrix-multiplication.
+
+ Args:
+ loc (Tensor): if this is set to None, loc is implicitly 0
+ scale_tril: lower-triangular Tensor with non-zero diagonal elements
+
+ Returns:
+ tfp.distributions.MultivariateNormalTriL: instance of ``tfp.distributions.MultivariateNormalTriL``
+ """
+ return self._apply("MultivariateNormalTriL", (loc, scale_tril))
+
+ def custom_gradient(self, func):
+ r"""
+ A decorator to define a function with a custom gradient.
+ """
+
+ def wrapper(*args, **kwargs):
+ if self.backend_name == "numpy":
+ return func(*args, **kwargs)
+ else:
+ from tensorflow import custom_gradient # pylint: disable=import-outside-toplevel
+
+ return custom_gradient(func)(*args, **kwargs)
+
+ return wrapper
+
+ def DefaultEuclideanOptimizer(self):
+ r"""Default optimizer for the Euclidean parameters."""
+ return self._apply("DefaultEuclideanOptimizer")
+
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ # Methods that build on the basic ops and don't need to be overridden in the backend implementation
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ @property
+ def euclidean_opt(self):
+ r"""The configured Euclidean optimizer."""
+ if not self._euclidean_opt:
+ self._euclidean_opt = self.DefaultEuclideanOptimizer()
+ return self._euclidean_opt
+
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ # Methods that build on the basic ops and don't need to be overridden in the backend implementation
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ def dagger(self, array: Tensor) -> Tensor:
+ """The adjoint of ``array``. This operation swaps the first
+ and second half of the indexes and then conjugates the matrix.
+
+ Args:
+ array: The array to take the adjoint of
+
+ Returns:
+ The adjoint of ``array``
+ """
+ N = len(array.shape) // 2
+ perm = list(range(N, 2 * N)) + list(range(0, N))
+ return self.conj(self.transpose(array, perm=perm))
+
+ def unitary_to_orthogonal(self, U):
+ r"""Unitary to orthogonal mapping.
+
+ Args:
+ U: The unitary matrix in ``U(n)``
+
+ Returns:
+ The orthogonal matrix in :math:`O(2n)`
+ """
+ X = self.real(U)
+ Y = self.imag(U)
+ return self.block([[X, -Y], [Y, X]])
+
+ def random_symplectic(self, num_modes: int, max_r: float = 1.0) -> Tensor:
+ r"""A random symplectic matrix in ``Sp(2*num_modes)``.
+
+ Squeezing is sampled uniformly from 0.0 to ``max_r`` (1.0 by default).
+ """
+ if num_modes == 1:
+ W = np.exp(1j * settings.rng.uniform(size=(1, 1)))
+ V = np.exp(1j * settings.rng.uniform(size=(1, 1)))
+ else:
+ W = unitary_group.rvs(dim=num_modes, random_state=settings.rng)
+ V = unitary_group.rvs(dim=num_modes, random_state=settings.rng)
+ r = settings.rng.uniform(low=0.0, high=max_r, size=num_modes)
+ OW = self.unitary_to_orthogonal(W)
+ OV = self.unitary_to_orthogonal(V)
+ dd = self.diag(self.concat([self.exp(-r), np.exp(r)], axis=0), k=0)
+ return OW @ dd @ OV
+
+ @staticmethod
+ def random_orthogonal(N: int) -> Tensor:
+ """A random orthogonal matrix in :math:`O(N)`."""
+ if N == 1:
+ return np.array([[1.0]])
+ return ortho_group.rvs(dim=N, random_state=settings.rng)
+
+ def random_unitary(self, N: int) -> Tensor:
+ """a random unitary matrix in :math:`U(N)`"""
+ if N == 1:
+ return self.exp(1j * settings.rng.uniform(size=(1, 1)))
+ return unitary_group.rvs(dim=N, random_state=settings.rng)
+
+ def single_mode_to_multimode_vec(self, vec, num_modes: int):
+ r"""Apply the same 2-vector (i.e. single-mode) to a larger number of modes."""
+ if vec.shape[-1] != 2:
+ raise ValueError("vec must be 2-dimensional (i.e. single-mode)")
+ x, y = vec[..., -2], vec[..., -1]
+ vec = self.concat([self.tile([x], [num_modes]), self.tile([y], [num_modes])], axis=-1)
+ return vec
+
+ def single_mode_to_multimode_mat(self, mat: Tensor, num_modes: int):
+ r"""Apply the same :math:`2\times 2` matrix (i.e. single-mode) to a larger number of modes."""
+ if mat.shape[-2:] != (2, 2):
+ raise ValueError("mat must be a single-mode (2x2) matrix")
+ mat = self.diag(
+ self.tile(self.expand_dims(mat, axis=-1), (1, 1, num_modes)), k=0
+ ) # shape [2,2,N,N]
+ mat = self.reshape(self.transpose(mat, (0, 2, 1, 3)), [2 * num_modes, 2 * num_modes])
+ return mat
+
+ @staticmethod
+ @lru_cache()
+ def Xmat(num_modes: int):
+ r"""The matrix :math:`X_n = \begin{bmatrix}0 & I_n\\ I_n & 0\end{bmatrix}.`
+
+ Args:
+ num_modes (int): positive integer
+
+ Returns:
+ The :math:`2N\times 2N` array
+ """
+ I = np.identity(num_modes)
+ O = np.zeros((num_modes, num_modes))
+ return np.block([[O, I], [I, O]])
+
+ @staticmethod
+ @lru_cache()
+ def rotmat(num_modes: int):
+ "Rotation matrix from quadratures to complex amplitudes."
+ I = np.identity(num_modes)
+ return np.sqrt(0.5) * np.block([[I, 1j * I], [I, -1j * I]])
+
+ @staticmethod
+ @lru_cache()
+ def J(num_modes: int):
+ """Symplectic form."""
+ I = np.identity(num_modes)
+ O = np.zeros_like(I)
+ return np.block([[O, I], [-I, O]])
+
+ def add_at_modes(
+ self, old: Tensor, new: Optional[Tensor], modes: Sequence[int]
+ ) -> Tensor: # NOTE: To be deprecated (XPTensor)
+ """Adds two phase-space tensors (cov matrices, displacement vectors, etc..) on the specified modes."""
+ if new is None:
+ return old
+ shape = getattr(old, "shape", ())
+ N = (shape[-1] if shape != () else 0) // 2
+ indices = modes + [m + N for m in modes]
+ return self.update_add_tensor(
+ old, list(product(*[indices] * len(new.shape))), self.reshape(new, -1)
+ )
+
+ def left_matmul_at_modes(
+ self, a_partial: Tensor, b_full: Tensor, modes: Sequence[int]
+ ) -> Tensor: # NOTE: To be deprecated (XPTensor)
+ r"""Left matrix multiplication of a partial matrix and a full matrix.
+
+ It assumes that that ``a_partial`` is a matrix operating on M modes and that ``modes`` is a
+ list of ``M`` integers, i.e., it will apply ``a_partial`` on the corresponding ``M`` modes
+ of ``b_full`` from the left.
+
+ Args:
+ a_partial: The :math:`2M\times 2M` array
+ b_full: The :math:`2N\times 2N` array
+ modes: A list of ``M`` modes to perform the multiplication on
+
+ Returns:
+ The :math:`2N\times 2N` array
+ """
+ if a_partial is None:
+ return b_full
+
+ N = b_full.shape[-1] // 2
+ indices = self.astensor(modes + [m + N for m in modes], dtype="int32")
+ b_rows = self.gather(b_full, indices, axis=0)
+ b_rows = self.matmul(a_partial, b_rows)
+ return self.update_tensor(b_full, indices[:, None], b_rows)
+
+ def right_matmul_at_modes(
+ self, a_full: Tensor, b_partial: Tensor, modes: Sequence[int]
+ ) -> Tensor: # NOTE: To be deprecated (XPTensor)
+ r"""Right matrix multiplication of a full matrix and a partial matrix.
+
+ It assumes that that ``b_partial`` is a matrix operating on ``M`` modes and that ``modes``
+ is a list of ``M`` integers, i.e., it will apply ``b_partial`` on the corresponding M modes
+ of ``a_full`` from the right.
+
+ Args:
+ a_full: The :math:`2N\times 2N` array
+ b_partial: The :math:`2M\times 2M` array
+ modes: A list of `M` modes to perform the multiplication on
+
+ Returns:
+ The :math:`2N\times 2N` array
+ """
+ return self.transpose(
+ self.left_matmul_at_modes(self.transpose(b_partial), self.transpose(a_full), modes)
+ )
+
+ def matvec_at_modes(
+ self, mat: Optional[Tensor], vec: Tensor, modes: Sequence[int]
+ ) -> Tensor: # NOTE: To be deprecated (XPTensor)
+ """Matrix-vector multiplication between a phase-space matrix and a vector in the specified modes."""
+ if mat is None:
+ return vec
+ N = vec.shape[-1] // 2
+ indices = self.astensor(modes + [m + N for m in modes], dtype="int32")
+ updates = self.matvec(mat, self.gather(vec, indices, axis=0))
+ return self.update_tensor(vec, indices[:, None], updates)
+
+ def all_diagonals(self, rho: Tensor, real: bool) -> Tensor:
+ """Returns all the diagonals of a density matrix."""
+ cutoffs = rho.shape[: rho.ndim // 2]
+ rho = self.reshape(rho, (int(np.prod(cutoffs)), int(np.prod(cutoffs))))
+ diag = self.diag_part(rho)
+ if real:
+ return self.real(self.reshape(diag, cutoffs))
+
+ return self.reshape(diag, cutoffs)
+
+ def poisson(self, max_k: int, rate: Tensor) -> Tensor:
+ """Poisson distribution up to ``max_k``."""
+ k = self.arange(max_k)
+ rate = self.cast(rate, k.dtype)
+ return self.exp(k * self.log(rate + 1e-9) - rate - self.lgamma(k + 1.0))
+
+ def binomial_conditional_prob(self, success_prob: Tensor, dim_out: int, dim_in: int):
+ """:math:`P(out|in) = binom(in, out) * (1-success_prob)**(in-out) * success_prob**out`."""
+ in_ = self.arange(dim_in)[None, :]
+ out_ = self.arange(dim_out)[:, None]
+ return (
+ self.cast(binom(in_, out_), in_.dtype)
+ * self.pow(success_prob, out_)
+ * self.pow(1.0 - success_prob, self.maximum(in_ - out_, 0.0))
+ )
+
+ def convolve_probs_1d(self, prob: Tensor, other_probs: List[Tensor]) -> Tensor:
+ """Convolution of a joint probability with a list of single-index probabilities."""
+
+ if prob.ndim > 3 or len(other_probs) > 3:
+ raise ValueError("cannot convolve arrays with more than 3 axes")
+ if not all((q.ndim == 1 for q in other_probs)):
+ raise ValueError("other_probs must contain 1d arrays")
+ if not all((len(q) == s for q, s in zip(other_probs, prob.shape))):
+ raise ValueError("The length of the 1d prob vectors must match shape of prob")
+
+ q = other_probs[0]
+ for q_ in other_probs[1:]:
+ q = q[..., None] * q_[(None,) * q.ndim + (slice(None),)]
+
+ return self.convolve_probs(prob, q)
+
+ def convolve_probs(self, prob: Tensor, other: Tensor) -> Tensor:
+ r"""Convolve two probability distributions (up to 3D) with the same shape.
+
+ Note that the output is not guaranteed to be a complete joint probability,
+ as it's computed only up to the dimension of the base probs.
+ """
+ if prob.ndim > 3 or other.ndim > 3:
+ raise ValueError("cannot convolve arrays with more than 3 axes")
+ if not prob.shape == other.shape:
+ raise ValueError("prob and other must have the same shape")
+
+ prob_padded = self.pad(prob, [(s - 1, 0) for s in other.shape])
+ other_reversed = other[(slice(None, None, -1),) * other.ndim]
+ return self.convolution(
+ prob_padded[None, ..., None],
+ other_reversed[..., None, None],
+ data_format="N"
+ + ("HD"[: other.ndim - 1])[::-1]
+ + "WC", # TODO: rewrite this to be more readable (do we need it?)
+ )[0, ..., 0]
+
+ def euclidean_to_symplectic(self, S: Matrix, dS_euclidean: Matrix) -> Matrix:
+ r"""Convert the Euclidean gradient to a Riemannian gradient on the
+ tangent bundle of the symplectic manifold.
+
+ Implemented from:
+ Wang J, Sun H, Fiori S. A Riemannian‐steepest‐descent approach
+ for optimization on the real symplectic group.
+ Mathematical Methods in the Applied Sciences. 2018 Jul 30;41(11):4273-86.
+
+ Args:
+ S (Matrix): symplectic matrix
+ dS_euclidean (Matrix): Euclidean gradient tensor
+
+ Returns:
+ Matrix: symplectic gradient tensor
+ """
+ Jmat = self.J(S.shape[-1] // 2)
+ Z = self.matmul(self.transpose(S), dS_euclidean)
+ return 0.5 * (Z + self.matmul(self.matmul(Jmat, self.transpose(Z)), Jmat))
+
+ def euclidean_to_unitary(self, U: Matrix, dU_euclidean: Matrix) -> Matrix:
+ r"""Convert the Euclidean gradient to a Riemannian gradient on the
+ tangent bundle of the unitary manifold.
+
+ Implemented from:
+ Y Yao, F Miatto, N Quesada - arXiv preprint arXiv:2209.06069, 2022.
+
+ Args:
+ U (Matrix): unitary matrix
+ dU_euclidean (Matrix): Euclidean gradient tensor
+
+ Returns:
+ Matrix: unitary gradient tensor
+ """
+ Z = self.matmul(self.conj(self.transpose(U)), dU_euclidean)
+ return 0.5 * (Z - self.conj(self.transpose(Z)))
diff --git a/mrmustard/math/backend_numpy.py b/mrmustard/math/backend_numpy.py
new file mode 100644
index 000000000..dada0eb2d
--- /dev/null
+++ b/mrmustard/math/backend_numpy.py
@@ -0,0 +1,612 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains the numpy backend."""
+
+# pylint: disable = missing-function-docstring, missing-class-docstring, fixme
+
+
+from math import lgamma as mlgamma
+from typing import List, Optional, Sequence, Tuple, Union
+
+import numpy as np
+import scipy as sp
+from scipy.linalg import expm as scipy_expm
+from scipy.linalg import sqrtm as scipy_sqrtm
+from scipy.special import xlogy as scipy_xlogy
+from scipy.stats import multivariate_normal
+
+from ..utils.settings import settings
+from .autocast import Autocast
+from .backend_base import BackendBase
+from .lattice.strategies import binomial, vanilla, vanilla_batch
+from .lattice.strategies.compactFock.inputValidation import (
+ hermite_multidimensional_1leftoverMode,
+ hermite_multidimensional_diagonal,
+ hermite_multidimensional_diagonal_batch,
+)
+
+
+# pylint: disable=too-many-public-methods
+class BackendNumpy(BackendBase): # pragma: no cover
+ r"""
+ A numpy backend.
+ """
+
+ int32 = np.int32
+ float32 = np.float32
+ float64 = np.float64
+ complex64 = np.complex64
+ complex128 = np.complex128
+
+ def __init__(self):
+ super().__init__(name="numpy")
+
+ def __repr__(self) -> str:
+ return "BackendNumpy()"
+
+ def abs(self, array: np.ndarray) -> np.ndarray:
+ return np.abs(array)
+
+ def any(self, array: np.ndarray) -> np.ndarray:
+ return np.any(array)
+
+ def arange(
+ self, start: int, limit: Optional[int] = None, delta: int = 1, dtype=np.float64
+ ) -> np.ndarray:
+ return np.arange(start, limit, delta, dtype=dtype)
+
+ def asnumpy(self, tensor: np.ndarray) -> np.ndarray:
+ if isinstance(tensor, np.ndarray):
+ return tensor
+ return np.array(tensor)
+
+ def assign(self, tensor: np.ndarray, value: np.ndarray) -> np.ndarray:
+ tensor = value
+ return tensor
+
+ def astensor(self, array: Union[np.ndarray, np.ndarray], dtype=None) -> np.ndarray:
+ array = np.array(array)
+ return self.cast(array, dtype=dtype or array.dtype)
+
+ def atleast_1d(self, array: np.ndarray, dtype=None) -> np.ndarray:
+ return np.atleast_1d(self.astensor(array, dtype))
+
+ def atleast_2d(self, array: np.ndarray, dtype=None) -> np.ndarray:
+ return np.atleast_2d(self.astensor(array, dtype))
+
+ def atleast_3d(self, array: np.ndarray, dtype=None) -> np.ndarray:
+ array = self.atleast_2d(self.atleast_1d(array))
+ if len(array.shape) == 2:
+ array = array[None, ...]
+ return array
+
+ def block(self, blocks: List[List[np.ndarray]], axes=(-2, -1)) -> np.ndarray:
+ rows = [self.concat(row, axis=axes[1]) for row in blocks]
+ return self.concat(rows, axis=axes[0])
+
+ def block_diag(self, *blocks: List[np.ndarray]) -> np.ndarray:
+ return sp.linalg.block_diag(*blocks)
+
+ def boolean_mask(self, tensor: np.ndarray, mask: np.ndarray) -> np.ndarray:
+ return np.array([t for i, t in enumerate(tensor) if mask[i]])
+
+ def cast(self, array: np.ndarray, dtype=None) -> np.ndarray:
+ if dtype is None:
+ return array
+ if dtype not in [self.complex64, self.complex128, "complex64", "complex128"]:
+ array = self.real(array)
+ return np.array(array, dtype=dtype)
+
+ def clip(self, array, a_min, a_max) -> np.ndarray:
+ return np.clip(array, a_min, a_max)
+
+ def concat(self, values: List[np.ndarray], axis: int) -> np.ndarray:
+ # tf.concat can concatenate lists of scalars, while np.concatenate errors
+ try:
+ return np.concatenate(values, axis)
+ except ValueError:
+ return np.array(values)
+
+ def conj(self, array: np.ndarray) -> np.ndarray:
+ return np.conj(array)
+
+ def cos(self, array: np.ndarray) -> np.ndarray:
+ return np.cos(array)
+
+ def cosh(self, array: np.ndarray) -> np.ndarray:
+ return np.cosh(array)
+
+ def det(self, matrix: np.ndarray) -> np.ndarray:
+ return np.linalg.det(matrix)
+
+ def diag(self, array: np.ndarray, k: int = 0) -> np.ndarray:
+ if len(array.shape) == 1:
+ return np.diag(array, k=k)
+ elif len(array.shape) == 2:
+ return np.array([np.diag(l, k=k).tolist() for l in array])
+ else:
+ # fallback into more complex algorithm
+ original_sh = array.shape
+
+ ravelled_sh = (np.prod(original_sh[:-1]), original_sh[-1])
+ array = array.ravel().reshape(*ravelled_sh)
+
+ ret = []
+ for line in array:
+ ret.append(np.diag(line, k))
+
+ ret = np.array(ret)
+ inner_shape = (
+ original_sh[-1] + abs(k),
+ original_sh[-1] + abs(k),
+ )
+ return ret.reshape(original_sh[:-1] + inner_shape)
+
+ def diag_part(self, array: np.ndarray, k: int) -> np.ndarray:
+ ret = np.diagonal(array, offset=k, axis1=-2, axis2=-1)
+ ret.flags.writeable = True
+ return ret
+
+ def set_diag(self, array: np.ndarray, diag: np.ndarray, k: int) -> np.ndarray:
+ i = np.arange(0, array.shape[-2] - abs(k))
+ if k < 0:
+ i -= array.shape[-2] - abs(k)
+
+ j = np.arange(abs(k), array.shape[-1])
+ if k < 0:
+ j -= abs(k)
+
+ array[..., i, j] = diag
+
+ return array
+
+ def einsum(self, string: str, *tensors) -> Optional[np.ndarray]:
+ if type(string) is str:
+ return np.einsum(string, *tensors)
+ return None # provide same functionality as numpy.einsum or upgrade to opt_einsum
+
+ def exp(self, array: np.ndarray) -> np.ndarray:
+ return np.exp(array)
+
+ def expand_dims(self, array: np.ndarray, axis: int) -> np.ndarray:
+ return np.expand_dims(array, axis)
+
+ def expm(self, matrix: np.ndarray) -> np.ndarray:
+ return scipy_expm(matrix)
+
+ def eye(self, size: int, dtype=np.float64) -> np.ndarray:
+ return np.eye(size, dtype=dtype)
+
+ def eye_like(self, array: np.ndarray) -> np.ndarray:
+ return np.eye(array.shape[-1], dtype=array.dtype)
+
+ def from_backend(self, value) -> bool:
+ return isinstance(value, np.ndarray)
+
+ def gather(self, array: np.ndarray, indices: np.ndarray, axis: int = 0) -> np.ndarray:
+ return np.take(array, indices, axis=axis)
+
+ def imag(self, array: np.ndarray) -> np.ndarray:
+ return np.imag(array)
+
+ def inv(self, tensor: np.ndarray) -> np.ndarray:
+ return np.linalg.inv(tensor)
+
+ def is_trainable(self, tensor: np.ndarray) -> bool: # pylint: disable=unused-argument
+ return False
+
+ def lgamma(self, x: np.ndarray) -> np.ndarray:
+ return np.array([mlgamma(v) for v in x])
+
+ def log(self, x: np.ndarray) -> np.ndarray:
+ return np.log(x)
+
+ def make_complex(self, real: np.ndarray, imag: np.ndarray) -> np.ndarray:
+ return real + 1j * imag
+
+ @Autocast()
+ def matmul(self, *matrices: np.ndarray) -> np.ndarray:
+ mat = matrices[0]
+ for matrix in matrices[1:]:
+ mat = np.matmul(mat, matrix)
+ return mat
+
+ @Autocast()
+ def matvec(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
+ return self.matmul(a, b[:, None])[:, 0]
+
+ @Autocast()
+ def maximum(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
+ return np.maximum(a, b)
+
+ @Autocast()
+ def minimum(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
+ return np.minimum(a, b)
+
+ def new_variable(
+ self,
+ value,
+ bounds: Union[Tuple[Optional[float], Optional[float]], None],
+ name: str,
+ dtype=np.float64,
+ ): # pylint: disable=unused-argument
+ return np.array(value, dtype=dtype)
+
+ def new_constant(self, value, name: str, dtype=np.float64): # pylint: disable=unused-argument
+ return np.array(value, dtype=dtype)
+
+ def norm(self, array: np.ndarray) -> np.ndarray:
+ return np.linalg.norm(array)
+
+ def ones(self, shape: Sequence[int], dtype=np.float64) -> np.ndarray:
+ return np.ones(shape, dtype=dtype)
+
+ def ones_like(self, array: np.ndarray) -> np.ndarray:
+ return np.ones(array.shape, dtype=array.dtype)
+
+ @Autocast()
+ def outer(self, array1: np.ndarray, array2: np.ndarray) -> np.ndarray:
+ return np.tensordot(array1, array2, [[], []])
+
+ def pad(
+ self,
+ array: np.ndarray,
+ paddings: Sequence[Tuple[int, int]],
+ mode="CONSTANT",
+ constant_values=0,
+ ) -> np.ndarray:
+ if mode == "CONSTANT":
+ mode = "constant"
+ return np.pad(array, paddings, mode, constant_values=constant_values)
+
+ @staticmethod
+ def pinv(matrix: np.ndarray) -> np.ndarray:
+ return np.linalg.pinv(matrix)
+
+ @Autocast()
+ def pow(self, x: np.ndarray, y: float) -> np.ndarray:
+ return np.power(x, y)
+
+ def real(self, array: np.ndarray) -> np.ndarray:
+ return np.real(array)
+
+ def reshape(self, array: np.ndarray, shape: Sequence[int]) -> np.ndarray:
+ return np.reshape(array, shape)
+
+ def round(self, array: np.ndarray, decimals: int = 0) -> np.ndarray:
+ return np.round(array, decimals)
+
+ def sin(self, array: np.ndarray) -> np.ndarray:
+ return np.sin(array)
+
+ def sinh(self, array: np.ndarray) -> np.ndarray:
+ return np.sinh(array)
+
+ def solve(self, matrix: np.ndarray, rhs: np.ndarray) -> np.ndarray:
+ if len(rhs.shape) == len(matrix.shape) - 1:
+ rhs = np.expand_dims(rhs, -1)
+ return np.linalg.solve(matrix, rhs)[..., 0]
+ return np.linalg.solve(matrix, rhs)
+
+ def sqrt(self, x: np.ndarray, dtype=None) -> np.ndarray:
+ return np.sqrt(self.cast(x, dtype))
+
+ def sum(self, array: np.ndarray, axes: Sequence[int] = None):
+ if axes is None:
+ return np.sum(array)
+
+ ret = array
+ for axis in axes:
+ ret = np.sum(ret, axis=axis)
+ return ret
+
+ @Autocast()
+ def tensordot(self, a: np.ndarray, b: np.ndarray, axes: List[int]) -> np.ndarray:
+ return np.tensordot(a, b, axes)
+
+ def tile(self, array: np.ndarray, repeats: Sequence[int]) -> np.ndarray:
+ return np.tile(array, repeats)
+
+ def trace(self, array: np.ndarray, dtype=None) -> np.ndarray:
+ return self.cast(np.trace(array), dtype)
+
+ def transpose(self, a: np.ndarray, perm: Sequence[int] = None) -> Optional[np.ndarray]:
+ if a is None:
+ return None # TODO: remove and address None inputs where tranpose is used
+ return np.transpose(a, axes=perm)
+
+ @Autocast()
+ def update_tensor(
+ self, tensor: np.ndarray, indices: np.ndarray, values: np.ndarray
+ ) -> np.ndarray:
+ indices = self.atleast_2d(indices)
+ for i, v in zip(indices, values):
+ tensor[tuple(i)] = v
+ return tensor
+
+ @Autocast()
+ def update_add_tensor(
+ self, tensor: np.ndarray, indices: np.ndarray, values: np.ndarray
+ ) -> np.ndarray:
+ indices = self.atleast_2d(indices)
+ for i, v in zip(indices, values):
+ tensor[tuple(i)] += v
+ return tensor
+
+ def zeros(self, shape: Sequence[int], dtype=np.float64) -> np.ndarray:
+ return np.zeros(shape, dtype=dtype)
+
+ def zeros_like(self, array: np.ndarray) -> np.ndarray:
+ return np.zeros(np.array(array).shape, dtype=array.dtype)
+
+ def map_fn(self, func, elements):
+ # Is this done like this?
+ return np.array([func(e) for e in elements])
+
+ def squeeze(self, tensor, axis=None):
+ return np.squeeze(tensor, axis=axis)
+
+ def cholesky(self, input: np.ndarray):
+ return np.linalg.cholesky(input)
+
+ def Categorical(self, probs: np.ndarray, name: str): # pylint: disable=unused-argument
+ class Generator:
+ def __init__(self, probs):
+ self._probs = probs
+
+ def sample(self):
+ array = np.random.multinomial(1, pvals=probs)
+ return np.where(array == 1)[0][0]
+
+ return Generator(probs)
+
+ def MultivariateNormalTriL(self, loc: np.ndarray, scale_tril: np.ndarray):
+ class Generator:
+ def __init__(self, mean, cov):
+ self._mean = mean
+ self._cov = cov
+
+ def sample(self, dtype=None): # pylint: disable=unused-argument
+ fn = np.random.default_rng().multivariate_normal
+ ret = fn(self._mean, self._cov)
+ return ret
+
+ def prob(self, x):
+ return multivariate_normal.pdf(x, mean=self._mean, cov=self._cov)
+
+ scale_tril = scale_tril @ np.transpose(scale_tril)
+ return Generator(loc, scale_tril)
+
+ @staticmethod
+ def eigvals(tensor: np.ndarray) -> np.ndarray:
+ return np.linalg.eigvals(tensor)
+
+ @staticmethod
+ def xlogy(x: np.ndarray, y: np.ndarray) -> np.ndarray:
+ return scipy_xlogy(x, y)
+
+ @staticmethod
+ def eigh(tensor: np.ndarray) -> tuple:
+ return np.linalg.eigh(tensor)
+
+ def sqrtm(self, tensor: np.ndarray, dtype, rtol=1e-05, atol=1e-08) -> np.ndarray:
+ if np.allclose(tensor, 0, rtol=rtol, atol=atol):
+ ret = self.zeros_like(tensor)
+ else:
+ ret = scipy_sqrtm(tensor)
+
+ if dtype is None:
+ return self.cast(ret, self.complex128)
+ return self.cast(ret, dtype)
+
+ # ~~~~~~~~~~~~~~~~~
+ # Special functions
+ # ~~~~~~~~~~~~~~~~~
+
+ @staticmethod
+ def DefaultEuclideanOptimizer() -> None:
+ return None
+
+ def hermite_renormalized(
+ self, A: np.ndarray, B: np.ndarray, C: np.ndarray, shape: Tuple[int]
+ ) -> np.ndarray:
+ r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor
+ series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)`
+ at the denominator rather than :math:`n!`. It computes all the amplitudes within the
+ tensor of given shape.
+
+ Args:
+ A: The A matrix.
+ B: The B vector.
+ C: The C scalar.
+ shape: The shape of the final tensor.
+
+ Returns:
+ The renormalized Hermite polynomial of given shape.
+ """
+
+ precision_bits = settings.PRECISION_BITS_HERMITE_POLY
+
+ if precision_bits == 128: # numba
+ G = vanilla(tuple(shape), A, B, C)
+ else: # julia (with precision_bits = 512)
+ # The following import must come after running "jl = Julia(compiled_modules=False)" in settings.py
+ from julia import Main as Main_julia # pylint: disable=import-outside-toplevel
+
+ A, B, C = (
+ np.array(A).astype(np.complex128),
+ np.array(B).astype(np.complex128),
+ np.array(C).astype(np.complex128),
+ )
+ G = Main_julia.Vanilla.vanilla(
+ A, B, C.item(), np.array(shape, dtype=np.int64), precision_bits
+ )
+
+ return G
+
+ def hermite_renormalized_batch(
+ self, A: np.ndarray, B: np.ndarray, C: np.ndarray, shape: Tuple[int]
+ ) -> np.ndarray:
+ G = vanilla_batch(tuple(shape), A, B, C)
+ return G
+
+ def hermite_renormalized_binomial(
+ self,
+ A: np.ndarray,
+ B: np.ndarray,
+ C: np.ndarray,
+ shape: Tuple[int],
+ max_l2: Optional[float],
+ global_cutoff: Optional[int],
+ ) -> np.ndarray:
+ r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor
+ series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)`
+ at the denominator rather than :math:`n!`. The computation fills a tensor of given shape
+ up to a given L2 norm or global cutoff, whichever applies first. The max_l2 value, if
+ not provided, is set to the default value of the AUTOCUTOFF_PROBABILITY setting.
+
+ Args:
+ A: The A matrix.
+ B: The B vector.
+ C: The C scalar.
+ shape: The shape of the final tensor (local cutoffs).
+ max_l2 (float): The maximum squared L2 norm of the tensor.
+ global_cutoff (optional int): The global cutoff.
+
+ Returns:
+ The renormalized Hermite polynomial of given shape.
+ """
+ G, _ = binomial(
+ tuple(shape),
+ A,
+ B,
+ C,
+ max_l2=max_l2 or settings.AUTOCUTOFF_PROBABILITY,
+ global_cutoff=global_cutoff or sum(shape) - len(shape) + 1,
+ )
+
+ return G
+
+ def reorder_AB_bargmann(self, A: np.ndarray, B: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
+ r"""In mrmustard.math.numba.compactFock~ dimensions of the Fock representation are ordered like [mode0,mode0,mode1,mode1,...]
+ while in mrmustard.physics.bargmann the ordering is [mode0,mode1,...,mode0,mode1,...]. Here we reorder A and B.
+ """
+ ordering = np.arange(2 * A.shape[0] // 2).reshape(2, -1).T.flatten()
+ A = self.gather(A, ordering, axis=1)
+ A = self.gather(A, ordering)
+ B = self.gather(B, ordering, axis=0)
+ return A, B
+
+ def hermite_renormalized_diagonal(
+ self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int]
+ ) -> np.ndarray:
+ r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.numba.compactFock~
+ Then, calculate the required renormalized multidimensional Hermite polynomial.
+ """
+ A, B = self.reorder_AB_bargmann(A, B)
+ return self.hermite_renormalized_diagonal_reorderedAB(A, B, C, cutoffs=cutoffs)
+
+ def hermite_renormalized_diagonal_reorderedAB(
+ self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int]
+ ) -> np.ndarray:
+ r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor
+ series of :math:`exp(C + Bx - Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the
+ denominator rather than :math:`n!`. Note the minus sign in front of ``A``.
+
+ Calculates the diagonal of the Fock representation (i.e. the PNR detection probabilities of all modes)
+ by applying the recursion relation in a selective manner.
+
+ Args:
+ A: The A matrix.
+ B: The B vector.
+ C: The C scalar.
+ cutoffs: upper boundary of photon numbers in each mode
+
+ Returns:
+ The renormalized Hermite polynomial.
+ """
+ poly0, _, _, _, _ = hermite_multidimensional_diagonal(A, B, C, cutoffs)
+
+ return poly0
+
+ def hermite_renormalized_diagonal_batch(
+ self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int]
+ ) -> np.ndarray:
+ r"""Same as hermite_renormalized_diagonal but works for a batch of different B's."""
+ A, B = self.reorder_AB_bargmann(A, B)
+ return self.hermite_renormalized_diagonal_reorderedAB_batch(A, B, C, cutoffs=cutoffs)
+
+ def hermite_renormalized_diagonal_reorderedAB_batch(
+ self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int]
+ ) -> np.ndarray:
+ r"""Same as hermite_renormalized_diagonal_reorderedAB but works for a batch of different B's.
+
+ Args:
+ A: The A matrix.
+ B: The B vectors.
+ C: The C scalar.
+ cutoffs: upper boundary of photon numbers in each mode
+
+ Returns:
+ The renormalized Hermite polynomial from different B values.
+ """
+ poly0, _, _, _, _ = hermite_multidimensional_diagonal_batch(A, B, C, cutoffs)
+
+ return poly0
+
+ def hermite_renormalized_1leftoverMode(
+ self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int]
+ ) -> np.ndarray:
+ r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.numba.compactFock~
+ Then, calculate the required renormalized multidimensional Hermite polynomial.
+ """
+ A, B = self.reorder_AB_bargmann(A, B)
+ return self.hermite_renormalized_1leftoverMode_reorderedAB(A, B, C, cutoffs=cutoffs)
+
+ def hermite_renormalized_1leftoverMode_reorderedAB(
+ self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int]
+ ) -> np.ndarray:
+ r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor
+ series of :math:`exp(C + Bx - Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the
+ denominator rather than :math:`n!`. Note the minus sign in front of ``A``.
+
+ Calculates all possible Fock representations of mode 0,
+ where all other modes are PNR detected.
+ This is done by applying the recursion relation in a selective manner.
+
+ Args:
+ A: The A matrix.
+ B: The B vector.
+ C: The C scalar.
+ cutoffs: upper boundary of photon numbers in each mode
+
+ Returns:
+ The renormalized Hermite polynomial.
+ """
+ poly0, _, _, _, _ = hermite_multidimensional_1leftoverMode(A, B, C, cutoffs)
+ return poly0
+
+ @staticmethod
+ def getitem(tensor, *, key):
+ value = np.array(tensor)[key]
+ return value
+
+ @staticmethod
+ def setitem(tensor, value, *, key):
+ _tensor = np.array(tensor)
+ value = np.array(value)
+ _tensor[key] = value
+
+ return _tensor
diff --git a/mrmustard/math/tensorflow.py b/mrmustard/math/backend_tensorflow.py
similarity index 64%
rename from mrmustard/math/tensorflow.py
rename to mrmustard/math/backend_tensorflow.py
index 8a0415892..636b7459a 100644
--- a/mrmustard/math/tensorflow.py
+++ b/mrmustard/math/backend_tensorflow.py
@@ -12,43 +12,54 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""This module contains the Tensorflow implementation of the :class:`Math` interface."""
+"""This module contains the tensorflow backend."""
+
+# pylint: disable = missing-function-docstring, missing-class-docstring, wrong-import-position
from typing import Callable, List, Optional, Sequence, Tuple, Union
+import os
import numpy as np
-import tensorflow as tf
import tensorflow_probability as tfp
-from mrmustard import settings
-from mrmustard.math.autocast import Autocast
-from mrmustard.math.lattice import strategies
-from mrmustard.math.numba.compactFock_inputValidation import (
+os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
+import tensorflow as tf
+
+os.environ["TF_CPP_MIN_LOG_LEVEL"] = "0"
+
+
+from mrmustard.math.lattice.strategies.compactFock.inputValidation import (
grad_hermite_multidimensional_1leftoverMode,
grad_hermite_multidimensional_diagonal,
hermite_multidimensional_1leftoverMode,
hermite_multidimensional_diagonal,
+ hermite_multidimensional_diagonal_batch,
)
-from mrmustard.typing import Tensor, Trainable
-from .math_interface import MathInterface
+from ..utils.settings import settings
+from ..utils.typing import Tensor, Trainable
+from .autocast import Autocast
+from .backend_base import BackendBase
+from .lattice import strategies
-# pylint: disable=too-many-public-methods,no-self-argument,arguments-differ
-class TFMath(MathInterface):
- r"""Tensorflow implemantion of the :class:`Math` interface."""
+# pylint: disable=too-many-public-methods
+class BackendTensorflow(BackendBase): # pragma: no cover
+ r"""
+ A base class for backends.
+ """
- float64 = tf.float64
+ int32 = tf.int32
float32 = tf.float32
+ float64 = tf.float64
complex64 = tf.complex64
complex128 = tf.complex128
- def __getattr__(self, name):
- return getattr(tf, name)
+ def __init__(self):
+ super().__init__(name="tensorflow")
- # ~~~~~~~~~
- # Basic ops
- # ~~~~~~~~~
+ def __repr__(self) -> str:
+ return "BackendTensorflow()"
def abs(self, array: tf.Tensor) -> tf.Tensor:
return tf.abs(array)
@@ -56,7 +67,8 @@ def abs(self, array: tf.Tensor) -> tf.Tensor:
def any(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.reduce_any(array)
- def arange(self, start: int, limit: int = None, delta: int = 1, dtype=tf.float64) -> tf.Tensor:
+ def arange(self, start: int, limit: int = None, delta: int = 1, dtype=None) -> tf.Tensor:
+ dtype = dtype or self.float64
return tf.range(start, limit, delta, dtype=dtype)
def asnumpy(self, tensor: tf.Tensor) -> Tensor:
@@ -67,10 +79,35 @@ def assign(self, tensor: tf.Tensor, value: tf.Tensor) -> tf.Tensor:
return tensor
def astensor(self, array: Union[np.ndarray, tf.Tensor], dtype=None) -> tf.Tensor:
- return tf.convert_to_tensor(array, dtype=dtype)
+ dtype = dtype or np.array(array).dtype.name
+ return tf.convert_to_tensor(array, dtype)
def atleast_1d(self, array: tf.Tensor, dtype=None) -> tf.Tensor:
- return self.cast(tf.reshape(array, [-1]), dtype)
+ return tf.experimental.numpy.atleast_1d(self.astensor(array, dtype))
+
+ def atleast_2d(self, array: tf.Tensor, dtype=None) -> tf.Tensor:
+ return tf.experimental.numpy.atleast_2d(self.astensor(array, dtype))
+
+ def atleast_3d(self, array: tf.Tensor, dtype=None) -> tf.Tensor:
+ array = self.atleast_2d(self.atleast_1d(array, dtype))
+ if len(array.shape) == 2:
+ array = self.expand_dims(array, 0)
+ return array
+
+ def block_diag(self, mat1: tf.Tensor, mat2: tf.Tensor) -> tf.Tensor:
+ Za = self.zeros((mat1.shape[-2], mat2.shape[-1]), dtype=mat1.dtype)
+ Zb = self.zeros((mat2.shape[-2], mat1.shape[-1]), dtype=mat1.dtype)
+ return self.concat(
+ [self.concat([mat1, Za], axis=-1), self.concat([Zb, mat2], axis=-1)],
+ axis=-2,
+ )
+
+ def block(self, blocks: List[List[tf.Tensor]], axes=(-2, -1)) -> tf.Tensor:
+ rows = [self.concat(row, axis=axes[1]) for row in blocks]
+ return self.concat(rows, axis=axes[0])
+
+ def boolean_mask(self, tensor: tf.Tensor, mask: tf.Tensor) -> Tensor:
+ return tf.boolean_mask(tensor, mask)
def cast(self, array: tf.Tensor, dtype=None) -> tf.Tensor:
if dtype is None:
@@ -83,8 +120,7 @@ def clip(self, array, a_min, a_max) -> tf.Tensor:
def concat(self, values: Sequence[tf.Tensor], axis: int) -> tf.Tensor:
if any(tf.rank(v) == 0 for v in values):
return tf.stack(values, axis)
- else:
- return tf.concat(values, axis)
+ return tf.concat(values, axis)
def conj(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.conj(array)
@@ -111,12 +147,11 @@ def convolution(
self,
array: tf.Tensor,
filters: tf.Tensor,
- strides: Optional[List[int]] = None,
- padding="VALID",
+ padding: Optional[str] = None,
data_format="NWC",
- dilations: Optional[List[int]] = None,
) -> tf.Tensor:
- return tf.nn.convolution(array, filters, strides, padding, data_format, dilations)
+ padding = padding or "VALID"
+ return tf.nn.convolution(array, filters=filters, padding=padding, data_format=data_format)
def cos(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.cos(array)
@@ -124,12 +159,6 @@ def cos(self, array: tf.Tensor) -> tf.Tensor:
def cosh(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.cosh(array)
- def atan2(self, y: tf.Tensor, x: tf.Tensor) -> tf.Tensor:
- return tf.math.atan2(y, x)
-
- def make_complex(self, real: tf.Tensor, imag: tf.Tensor) -> tf.Tensor:
- return tf.complex(real, imag)
-
def det(self, matrix: tf.Tensor) -> tf.Tensor:
return tf.linalg.det(matrix)
@@ -140,7 +169,7 @@ def diag_part(self, array: tf.Tensor, k: int = 0) -> tf.Tensor:
return tf.linalg.diag_part(array, k=k)
def einsum(self, string: str, *tensors) -> tf.Tensor:
- if type(string) is str:
+ if isinstance(string, str):
return tf.einsum(string, *tensors)
return None # provide same functionality as numpy.einsum or upgrade to opt_einsum
@@ -153,7 +182,8 @@ def expand_dims(self, array: tf.Tensor, axis: int) -> tf.Tensor:
def expm(self, matrix: tf.Tensor) -> tf.Tensor:
return tf.linalg.expm(matrix)
- def eye(self, size: int, dtype=tf.float64) -> tf.Tensor:
+ def eye(self, size: int, dtype=None) -> tf.Tensor:
+ dtype = dtype or self.float64
return tf.eye(size, dtype=dtype)
def eye_like(self, array: tf.Tensor) -> Tensor:
@@ -162,16 +192,9 @@ def eye_like(self, array: tf.Tensor) -> Tensor:
def from_backend(self, value) -> bool:
return isinstance(value, (tf.Tensor, tf.Variable))
- def gather(self, array: tf.Tensor, indices: tf.Tensor, axis: int = None) -> tf.Tensor:
+ def gather(self, array: tf.Tensor, indices: tf.Tensor, axis: int) -> tf.Tensor:
return tf.gather(array, indices, axis=axis)
- def hash_tensor(self, tensor: tf.Tensor) -> int:
- try:
- REF = tensor.ref()
- except AttributeError as e:
- raise TypeError("Cannot hash tensor") from e
- return hash(REF)
-
def imag(self, array: tf.Tensor) -> tf.Tensor:
return tf.math.imag(array)
@@ -188,20 +211,18 @@ def log(self, x: tf.Tensor) -> tf.Tensor:
return tf.math.log(x)
@Autocast()
- def matmul(
- self,
- a: tf.Tensor,
- b: tf.Tensor,
- transpose_a=False,
- transpose_b=False,
- adjoint_a=False,
- adjoint_b=False,
- ) -> tf.Tensor:
- return tf.linalg.matmul(a, b, transpose_a, transpose_b, adjoint_a, adjoint_b)
+ def matmul(self, *matrices: tf.Tensor) -> tf.Tensor:
+ mat = matrices[0]
+ for matrix in matrices[1:]:
+ mat = tf.matmul(mat, matrix)
+ return mat
@Autocast()
- def matvec(self, a: tf.Tensor, b: tf.Tensor, transpose_a=False, adjoint_a=False) -> tf.Tensor:
- return tf.linalg.matvec(a, b, transpose_a, adjoint_a)
+ def matvec(self, a: tf.Tensor, b: tf.Tensor) -> tf.Tensor:
+ return tf.linalg.matvec(a, b)
+
+ def make_complex(self, real: tf.Tensor, imag: tf.Tensor) -> tf.Tensor:
+ return tf.complex(real, imag)
@Autocast()
def maximum(self, a: tf.Tensor, b: tf.Tensor) -> tf.Tensor:
@@ -216,21 +237,24 @@ def new_variable(
value,
bounds: Union[Tuple[Optional[float], Optional[float]], None],
name: str,
- dtype=tf.float64,
+ dtype=None,
):
bounds = bounds or (None, None)
- value = self.convert_to_tensor(value, dtype)
+ dtype = dtype or self.float64
+ value = self.astensor(value, dtype)
return tf.Variable(value, name=name, dtype=dtype, constraint=self.constraint_func(bounds))
- def new_constant(self, value, name: str, dtype=tf.float64):
- value = self.convert_to_tensor(value, dtype)
+ def new_constant(self, value, name: str, dtype=None):
+ dtype = dtype or self.float64
+ value = self.astensor(value, dtype)
return tf.constant(value, dtype=dtype, name=name)
def norm(self, array: tf.Tensor) -> tf.Tensor:
"""Note that the norm preserves the type of array."""
return tf.linalg.norm(array)
- def ones(self, shape: Sequence[int], dtype=tf.float64) -> tf.Tensor:
+ def ones(self, shape: Sequence[int], dtype=None) -> tf.Tensor:
+ dtype = dtype or self.float64
return tf.ones(shape, dtype=dtype)
def ones_like(self, array: tf.Tensor) -> tf.Tensor:
@@ -263,6 +287,9 @@ def real(self, array: tf.Tensor) -> tf.Tensor:
def reshape(self, array: tf.Tensor, shape: Sequence[int]) -> tf.Tensor:
return tf.reshape(array, shape)
+ def round(self, array: tf.Tensor, decimals: int = 0) -> tf.Tensor:
+ return tf.round(10**decimals * array) / 10**decimals
+
def set_diag(self, array: tf.Tensor, diag: tf.Tensor, k: int) -> tf.Tensor:
return tf.linalg.set_diag(array, diag, k=k)
@@ -300,24 +327,17 @@ def transpose(self, a: tf.Tensor, perm: Sequence[int] = None) -> tf.Tensor:
return tf.transpose(a, perm)
@Autocast()
- def update_tensor(self, tensor: tf.Tensor, indices: tf.Tensor, values: tf.Tensor):
+ def update_tensor(self, tensor: tf.Tensor, indices: tf.Tensor, values: tf.Tensor) -> tf.Tensor:
return tf.tensor_scatter_nd_update(tensor, indices, values)
@Autocast()
- def update_add_tensor(self, tensor: tf.Tensor, indices: tf.Tensor, values: tf.Tensor):
+ def update_add_tensor(
+ self, tensor: tf.Tensor, indices: tf.Tensor, values: tf.Tensor
+ ) -> tf.Tensor:
return tf.tensor_scatter_nd_add(tensor, indices, values)
- def unique_tensors(self, lst: List[Tensor]) -> List[Tensor]:
- hash_dict = {}
- for tensor in lst:
- try:
- if (hash := self.hash_tensor(tensor)) not in hash_dict:
- hash_dict[hash] = tensor
- except TypeError:
- continue
- return list(hash_dict.values())
-
- def zeros(self, shape: Sequence[int], dtype=tf.float64) -> tf.Tensor:
+ def zeros(self, shape: Sequence[int], dtype=None) -> tf.Tensor:
+ dtype = dtype or self.float64
return tf.zeros(shape, dtype=dtype)
def zeros_like(self, array: tf.Tensor) -> tf.Tensor:
@@ -338,14 +358,34 @@ def Categorical(self, probs: Tensor, name: str):
def MultivariateNormalTriL(self, loc: Tensor, scale_tril: Tensor):
return tfp.distributions.MultivariateNormalTriL(loc=loc, scale_tril=scale_tril)
+ @staticmethod
+ def eigh(tensor: tf.Tensor) -> Tensor:
+ return tf.linalg.eigh(tensor)
+
+ @staticmethod
+ def eigvals(tensor: tf.Tensor) -> Tensor:
+ return tf.linalg.eigvals(tensor)
+
+ @staticmethod
+ def xlogy(x: tf.Tensor, y: tf.Tensor) -> Tensor:
+ return tf.math.xlogy(x, y)
+
+ def sqrtm(self, tensor: tf.Tensor, dtype, rtol=1e-05, atol=1e-08) -> Tensor:
+ # The sqrtm function has issues with matrices that are close to zero, hence we branch
+ if np.allclose(tensor, 0, rtol=rtol, atol=atol):
+ ret = self.zeros_like(tensor)
+ else:
+ ret = tf.linalg.sqrtm(tensor)
+
+ if dtype is None:
+ return self.cast(ret, self.complex128)
+ return self.cast(ret, dtype)
+
# ~~~~~~~~~~~~~~~~~
# Special functions
# ~~~~~~~~~~~~~~~~~
- # TODO: is a wrapper class better?
- @staticmethod
- def DefaultEuclideanOptimizer() -> tf.keras.optimizers.legacy.Optimizer:
- r"""Default optimizer for the Euclidean parameters."""
+ def DefaultEuclideanOptimizer(self) -> tf.keras.optimizers.legacy.Optimizer:
return tf.keras.optimizers.legacy.Adam(learning_rate=0.001)
def value_and_gradients(
@@ -368,7 +408,7 @@ def value_and_gradients(
@tf.custom_gradient
def hermite_renormalized(
self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, shape: Tuple[int]
- ) -> tf.Tensor:
+ ) -> Tuple[tf.Tensor, Callable]:
r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor
series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)`
at the denominator rather than :math:`n!`. It computes all the amplitudes within the
@@ -383,15 +423,41 @@ def hermite_renormalized(
Returns:
The renormalized Hermite polynomial of given shape.
"""
- _A, _B, _C = self.asnumpy(A), self.asnumpy(B), self.asnumpy(C)
- G = strategies.vanilla(tuple(shape), _A, _B, _C)
+
+ precision_bits = settings.PRECISION_BITS_HERMITE_POLY
+
+ A, B, C = self.asnumpy(A), self.asnumpy(B), self.asnumpy(C)
+
+ if precision_bits == 128: # numba
+ G = strategies.vanilla(tuple(shape), A, B, C)
+ else: # julia
+ # The following import must come after running "jl = Julia(compiled_modules=False)" in settings.py
+ from julia import Main as Main_julia # pylint: disable=import-outside-toplevel
+
+ A, B, C = (
+ A.astype(np.complex128),
+ B.astype(np.complex128),
+ C.astype(np.complex128),
+ )
+
+ G = Main_julia.Vanilla.vanilla(
+ A, B, C.item(), np.array(shape, dtype=np.int64), precision_bits
+ )
def grad(dLdGconj):
- dLdA, dLdB, dLdC = strategies.vanilla_vjp(G, _C, np.conj(dLdGconj))
+ dLdA, dLdB, dLdC = strategies.vanilla_vjp(G, C, np.conj(dLdGconj))
return self.conj(dLdA), self.conj(dLdB), self.conj(dLdC)
return G, grad
+ def hermite_renormalized_batch(
+ self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, shape: Tuple[int]
+ ) -> tf.Tensor:
+ _A, _B, _C = self.asnumpy(A), self.asnumpy(B), self.asnumpy(C)
+
+ G = strategies.vanilla_batch(tuple(shape), _A, _B, _C)
+ return G
+
@tf.custom_gradient
def hermite_renormalized_binomial(
self,
@@ -436,21 +502,20 @@ def grad(dLdGconj):
return G, grad
def reorder_AB_bargmann(self, A: tf.Tensor, B: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
- r"""In mrmustard.math.numba.compactFock~ dimensions of the Fock representation are ordered like [mode0,mode0,mode1,mode1,...]
+ r"""In mrmustard.math.compactFock.compactFock~ dimensions of the Fock representation are ordered like [mode0,mode0,mode1,mode1,...]
while in mrmustard.physics.bargmann the ordering is [mode0,mode1,...,mode0,mode1,...]. Here we reorder A and B.
"""
- ordering = np.arange(2 * A.shape[0] // 2).reshape(2, -1).T.flatten()
+ ordering = (
+ np.arange(2 * A.shape[0] // 2).reshape(2, -1).T.flatten()
+ ) # ordering is [0,2,4,...,1,3,5,...]
A = tf.gather(A, ordering, axis=1)
A = tf.gather(A, ordering)
- B = tf.gather(B, ordering)
+ B = tf.gather(B, ordering, axis=0)
return A, B
def hermite_renormalized_diagonal(
self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int]
) -> tf.Tensor:
- r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.numba.compactFock~
- Then, calculate the required renormalized multidimensional Hermite polynomial.
- """
A, B = self.reorder_AB_bargmann(A, B)
return self.hermite_renormalized_diagonal_reorderedAB(A, B, C, cutoffs=cutoffs)
@@ -474,16 +539,40 @@ def hermite_renormalized_diagonal_reorderedAB(
Returns:
The renormalized Hermite polynomial.
"""
- poly0, poly2, poly1010, poly1001, poly1 = tf.numpy_function(
- hermite_multidimensional_diagonal, [A, B, C, cutoffs], [A.dtype] * 5
- )
+ A, B, C = self.asnumpy(A), self.asnumpy(B), self.asnumpy(C)
+ precision_bits = settings.PRECISION_BITS_HERMITE_POLY
- def grad(dLdpoly):
- dpoly_dC, dpoly_dA, dpoly_dB = tf.numpy_function(
- grad_hermite_multidimensional_diagonal,
- [A, B, C, poly0, poly2, poly1010, poly1001, poly1],
- [poly0.dtype] * 3,
+ if precision_bits == 128: # numba (complex128)
+ poly0, poly2, poly1010, poly1001, poly1 = tf.numpy_function(
+ hermite_multidimensional_diagonal, [A, B, C, cutoffs], [A.dtype] * 5
+ )
+ else: # julia (higher precision than complex128)
+ # The following import must come after running "jl = Julia(compiled_modules=False)" in settings.py
+ from julia import Main as Main_julia # pylint: disable=import-outside-toplevel
+
+ (
+ poly0,
+ poly2,
+ poly1010,
+ poly1001,
+ poly1,
+ ) = Main_julia.DiagonalAmps.fock_diagonal_amps(
+ A, B, C.item(), tuple(cutoffs), precision_bits
)
+
+ def grad(dLdpoly):
+ if precision_bits == 128: # numba (complex128)
+ dpoly_dC, dpoly_dA, dpoly_dB = tf.numpy_function(
+ grad_hermite_multidimensional_diagonal,
+ [A, B, C.item(), poly0, poly2, poly1010, poly1001, poly1],
+ [poly0.dtype] * 3,
+ )
+ else: # julia (higher precision than complex128)
+ dpoly_dC = poly0 / C.item()
+ dpoly_dA, dpoly_dB = Main_julia.DiagonalGrad.fock_diagonal_grad(
+ A, B, poly0, poly2, poly1010, poly1001, poly1, precision_bits
+ )
+
ax = tuple(range(dLdpoly.ndim))
dLdA = self.sum(dLdpoly[..., None, None] * self.conj(dpoly_dA), axes=ax)
dLdB = self.sum(dLdpoly[..., None] * self.conj(dpoly_dB), axes=ax)
@@ -492,10 +581,39 @@ def grad(dLdpoly):
return poly0, grad
+ def hermite_renormalized_diagonal_batch(
+ self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int]
+ ) -> tf.Tensor:
+ r"""Same as hermite_renormalized_diagonal but works for a batch of different B's."""
+ A, B = self.reorder_AB_bargmann(A, B)
+ return self.hermite_renormalized_diagonal_reorderedAB_batch(A, B, C, cutoffs=cutoffs)
+
+ def hermite_renormalized_diagonal_reorderedAB_batch(
+ self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int]
+ ) -> tf.Tensor:
+ r"""Same as hermite_renormalized_diagonal_reorderedAB but works for a batch of different B's.
+
+ Args:
+ A: The A matrix.
+ B: The B vectors.
+ C: The C scalar.
+ cutoffs: upper boundary of photon numbers in each mode
+
+ Returns:
+ The renormalized Hermite polynomial from different B values.
+ """
+ A, B, C = self.asnumpy(A), self.asnumpy(B), self.asnumpy(C)
+
+ poly0, _, _, _, _ = tf.numpy_function(
+ hermite_multidimensional_diagonal_batch, [A, B, C, cutoffs], [A.dtype] * 5
+ )
+
+ return poly0
+
def hermite_renormalized_1leftoverMode(
self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int]
) -> tf.Tensor:
- r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.numba.compactFock~
+ r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.compactFock.compactFock~
Then, calculate the required renormalized multidimensional Hermite polynomial.
"""
A, B = self.reorder_AB_bargmann(A, B)
@@ -522,16 +640,45 @@ def hermite_renormalized_1leftoverMode_reorderedAB(
Returns:
The renormalized Hermite polynomial.
"""
- poly0, poly2, poly1010, poly1001, poly1 = tf.numpy_function(
- hermite_multidimensional_1leftoverMode, [A, B, C, cutoffs], [A.dtype] * 5
- )
+ A, B, C = self.asnumpy(A), self.asnumpy(B), self.asnumpy(C)
+ precision_bits = settings.PRECISION_BITS_HERMITE_POLY
+
+ if precision_bits == 128: # numba (complex128)
+ poly0, poly2, poly1010, poly1001, poly1 = tf.numpy_function(
+ hermite_multidimensional_1leftoverMode,
+ [A, B, C.item(), cutoffs],
+ [A.dtype] * 5,
+ )
+ else: # julia (higher precision than complex128)
+ # The following import must come after running "jl = Julia(compiled_modules=False)" in settings.py
+ from julia import Main as Main_julia # pylint: disable=import-outside-toplevel
+
+ (
+ poly0,
+ poly2,
+ poly1010,
+ poly1001,
+ poly1,
+ ) = Main_julia.LeftoverModeAmps.fock_1leftoverMode_amps(
+ A, B, C.item(), tuple(cutoffs), precision_bits
+ )
def grad(dLdpoly):
- dpoly_dC, dpoly_dA, dpoly_dB = tf.numpy_function(
- grad_hermite_multidimensional_1leftoverMode,
- [A, B, C, poly0, poly2, poly1010, poly1001, poly1],
- [poly0.dtype] * 3,
- )
+ if precision_bits == 128: # numba (complex128)
+ dpoly_dC, dpoly_dA, dpoly_dB = tf.numpy_function(
+ grad_hermite_multidimensional_1leftoverMode,
+ [A, B, C, poly0, poly2, poly1010, poly1001, poly1],
+ [poly0.dtype] * 3,
+ )
+ else: # julia (higher precision than complex128)
+ dpoly_dC = poly0 / C.item()
+ (
+ dpoly_dA,
+ dpoly_dB,
+ ) = Main_julia.LeftoverModeGrad.fock_1leftoverMode_grad(
+ A, B, poly0, poly2, poly1010, poly1001, poly1, precision_bits
+ )
+
ax = tuple(range(dLdpoly.ndim))
dLdA = self.sum(dLdpoly[..., None, None] * self.conj(dpoly_dA), axes=ax)
dLdB = self.sum(dLdpoly[..., None] * self.conj(dpoly_dB), axes=ax)
@@ -540,53 +687,6 @@ def grad(dLdpoly):
return poly0, grad
- @staticmethod
- def eigvals(tensor: tf.Tensor) -> Tensor:
- """Returns the eigenvalues of a matrix."""
- return tf.linalg.eigvals(tensor)
-
- @staticmethod
- def eigvalsh(tensor: tf.Tensor) -> Tensor:
- """Returns the eigenvalues of a Real Symmetric or Hermitian matrix."""
- return tf.linalg.eigvalsh(tensor)
-
- @staticmethod
- def svd(tensor: tf.Tensor) -> Tensor:
- """Returns the Singular Value Decomposition of a matrix."""
- return tf.linalg.svd(tensor)
-
- @staticmethod
- def xlogy(x: tf.Tensor, y: tf.Tensor) -> Tensor:
- """Returns 0 if ``x == 0,`` and ``x * log(y)`` otherwise, elementwise."""
- return tf.math.xlogy(x, y)
-
- @staticmethod
- def eigh(tensor: tf.Tensor) -> Tensor:
- """Returns the eigenvalues and eigenvectors of a matrix."""
- return tf.linalg.eigh(tensor)
-
- def sqrtm(self, tensor: tf.Tensor, rtol=1e-05, atol=1e-08) -> Tensor:
- """Returns the matrix square root of a square matrix, such that ``sqrt(A) @ sqrt(A) = A``."""
-
- # The sqrtm function has issues with matrices that are close to zero, hence we branch
- if np.allclose(tensor, 0, rtol=rtol, atol=atol):
- return self.zeros_like(tensor)
- return tf.linalg.sqrtm(tensor)
-
- @staticmethod
- def boolean_mask(tensor: tf.Tensor, mask: tf.Tensor) -> Tensor:
- """Returns a tensor based on the truth value of the boolean mask."""
- return tf.boolean_mask(tensor, mask)
-
- @staticmethod
- def custom_gradient(func, *args, **kwargs):
- """Decorator to define a function with a custom gradient."""
- return tf.custom_gradient(func, *args, **kwargs)
-
- # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- # Extras (not in the Interface)
- # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
@tf.custom_gradient
def getitem(tensor, *, key):
"""A differentiable pure equivalent of numpy's ``value = tensor[key]``."""
diff --git a/mrmustard/math/caching.py b/mrmustard/math/caching.py
index 712bd2f08..8b3baa873 100644
--- a/mrmustard/math/caching.py
+++ b/mrmustard/math/caching.py
@@ -15,8 +15,11 @@
"""This module contains the logic for cachin tensor functions in Mr Mustard."""
from functools import lru_cache, wraps
+from mrmustard.math.backend_manager import BackendManager
import numpy as np
+math = BackendManager()
+
def tensor_int_cache(fn):
"""Decorator function to cache functions with a 1D Tensor (Vector) and int as arguments,
@@ -33,7 +36,7 @@ def cached_wrapper(hashable_array, cutoff):
@wraps(fn)
def wrapper(tensor, cutoff):
- return cached_wrapper(tuple(tensor.numpy()), cutoff)
+ return cached_wrapper(tuple(math.asnumpy(tensor)), cutoff)
# copy lru_cache attributes over too
wrapper.cache_info = cached_wrapper.cache_info
diff --git a/mrmustard/math/lattice/paths.py b/mrmustard/math/lattice/paths.py
index 4f3169a30..1c5148c27 100644
--- a/mrmustard/math/lattice/paths.py
+++ b/mrmustard/math/lattice/paths.py
@@ -76,6 +76,3 @@ def BINOMIAL_PATHS_NUMBA_n(modes):
key_type=typeof(((0,) * modes, 0)),
value_type=types.ListType(typeof((0,) * modes)),
)
-
-
-BINOMIAL_PATHS_NUMBA = {modes: BINOMIAL_PATHS_NUMBA_n(modes) for modes in range(1, 100)}
diff --git a/mrmustard/math/lattice/steps.py b/mrmustard/math/lattice/steps.py
index 573e04e5a..ea86f5290 100644
--- a/mrmustard/math/lattice/steps.py
+++ b/mrmustard/math/lattice/steps.py
@@ -28,7 +28,7 @@
from mrmustard.math.lattice.neighbors import lower_neighbors
from mrmustard.math.lattice.pivots import first_available_pivot
-from mrmustard.typing import ComplexMatrix, ComplexTensor, ComplexVector
+from mrmustard.utils.typing import ComplexMatrix, ComplexTensor, ComplexVector
SQRT = np.sqrt(np.arange(100000))
@@ -67,6 +67,42 @@ def vanilla_step(
return value_at_index / SQRT[index[i]]
+@njit
+def vanilla_step_batch(
+ G: ComplexTensor,
+ A: ComplexMatrix,
+ b: ComplexTensor,
+ index: tuple[int, ...],
+) -> complex: # pragma: no cover
+ r"""Fock-Bargmann recurrence relation step, vanilla batched version.
+ This function returns the amplitude of the Gaussian tensor G
+ at G[index]. It does not modify G.
+ The necessary pivot and neighbours must have already been computed,
+ as this step will read those values from G.
+ Note that this function is different from vanilla_step with b is no longer a vector,
+ it becomes a bathced vector with the batch dimension on the last index.
+
+ Args:
+ G (array or dict): fock amplitudes data store that supports getitem[tuple[int, ...]]
+ A (array): A matrix of the Fock-Bargmann representation
+ b (array): batched B vector of the Fock-Bargmann representation, the batch dimension is on the last index
+ index (Sequence): index of the amplitude to calculate
+ Returns:
+ array: the value of the amplitude at the given index according to each batch on the last index
+ """
+ # get pivot
+ i, pivot = first_available_pivot(index)
+
+ # pivot contribution
+ value_at_index = b[i] * G[pivot]
+
+ # neighbors contribution
+ for j, neighbor in lower_neighbors(pivot):
+ value_at_index += A[i, j] * SQRT[pivot[j]] * G[neighbor]
+
+ return value_at_index / SQRT[index[i]]
+
+
@njit
def vanilla_step_jacobian(
G: ComplexTensor,
diff --git a/mrmustard/math/lattice/strategies/beamsplitter.py b/mrmustard/math/lattice/strategies/beamsplitter.py
index 285ff9901..60ecc1d0b 100644
--- a/mrmustard/math/lattice/strategies/beamsplitter.py
+++ b/mrmustard/math/lattice/strategies/beamsplitter.py
@@ -26,7 +26,7 @@
from numba import njit
from mrmustard.math.lattice import steps
-from mrmustard.typing import ComplexMatrix, ComplexTensor, ComplexVector
+from mrmustard.utils.typing import ComplexMatrix, ComplexTensor, ComplexVector
SQRT = np.sqrt(np.arange(100000))
diff --git a/mrmustard/math/lattice/strategies/binomial.py b/mrmustard/math/lattice/strategies/binomial.py
index 771675d1a..d7b867e4b 100644
--- a/mrmustard/math/lattice/strategies/binomial.py
+++ b/mrmustard/math/lattice/strategies/binomial.py
@@ -20,7 +20,7 @@
from numba import njit, typed, types
from mrmustard.math.lattice import paths, steps
-from mrmustard.typing import ComplexMatrix, ComplexTensor, ComplexVector
+from mrmustard.utils.typing import ComplexMatrix, ComplexTensor, ComplexVector
SQRT = np.sqrt(np.arange(100000))
diff --git a/mrmustard/math/numba/__init__.py b/mrmustard/math/lattice/strategies/compactFock/__init__.py
similarity index 100%
rename from mrmustard/math/numba/__init__.py
rename to mrmustard/math/lattice/strategies/compactFock/__init__.py
diff --git a/mrmustard/math/numba/compactFock_diagonal_amps.py b/mrmustard/math/lattice/strategies/compactFock/diagonal_amps.py
similarity index 74%
rename from mrmustard/math/numba/compactFock_diagonal_amps.py
rename to mrmustard/math/lattice/strategies/compactFock/diagonal_amps.py
index d78df379a..4a765fe52 100644
--- a/mrmustard/math/numba/compactFock_diagonal_amps.py
+++ b/mrmustard/math/lattice/strategies/compactFock/diagonal_amps.py
@@ -7,7 +7,7 @@
import numba
from numba import njit, int64
from numba.cpython.unsafe.tuple import tuple_setitem
-from mrmustard.math.numba.compactFock_helperFunctions import (
+from mrmustard.math.lattice.strategies.compactFock.helperFunctions import (
SQRT,
repeat_twice,
construct_dict_params,
@@ -15,7 +15,9 @@
@njit
-def use_offDiag_pivot(A, B, M, cutoffs, params, d, arr0, arr2, arr1010, arr1001, arr1):
+def use_offDiag_pivot(
+ A, B, M, cutoffs, params, d, arr0, arr2, arr1010, arr1001, arr1
+): # pragma: no cover
"""
Apply recurrence relation for pivot of type [a+1,a,b,b,c,c,...] / [a,a,b+1,b,c,c,...] / [a,a,b,b,c+1,c,...]
Args:
@@ -33,7 +35,10 @@ def use_offDiag_pivot(A, B, M, cutoffs, params, d, arr0, arr2, arr1010, arr1001,
pivot[2 * d] += 1
K_l = SQRT[pivot]
K_i = SQRT[pivot + 1]
- G_in = np.empty(2 * M, dtype=np.complex128)
+ if B.ndim == 1:
+ G_in = np.zeros(2 * M, dtype=np.complex128)
+ elif B.ndim == 2:
+ G_in = np.zeros((2 * M, B.shape[1]), dtype=np.complex128)
########## READ ##########
GB = arr1[(2 * d,) + params] * B
@@ -54,7 +59,10 @@ def use_offDiag_pivot(A, B, M, cutoffs, params, d, arr0, arr2, arr1010, arr1001,
G_in[2 * i + 1] = arr1010[(d, i - d - 1) + params_adapted]
########## WRITE ##########
- G_in = np.multiply(K_l, G_in)
+ if B.ndim == 1:
+ G_in = np.multiply(K_l, G_in)
+ elif B.ndim == 2:
+ G_in = np.multiply(np.expand_dims(K_l, 1), G_in)
# Array0
params_adapted = tuple_setitem(params, d, params[d] + 1)
@@ -76,7 +84,7 @@ def use_offDiag_pivot(A, B, M, cutoffs, params, d, arr0, arr2, arr1010, arr1001,
@njit
-def use_diag_pivot(A, B, M, cutoffs, params, arr0, arr1):
+def use_diag_pivot(A, B, M, cutoffs, params, arr0, arr1): # pragma: no cover
"""
Apply recurrence relation for pivot of type [a,a,b,b,c,c...]
Args:
@@ -91,7 +99,10 @@ def use_diag_pivot(A, B, M, cutoffs, params, arr0, arr1):
pivot = repeat_twice(params)
K_l = SQRT[pivot]
K_i = SQRT[pivot + 1]
- G_in = np.empty(2 * M, dtype=np.complex128)
+ if B.ndim == 1:
+ G_in = np.zeros(2 * M, dtype=np.complex128)
+ elif B.ndim == 2:
+ G_in = np.zeros((2 * M, B.shape[1]), dtype=np.complex128)
########## READ ##########
GB = arr0[params] * B
@@ -105,7 +116,10 @@ def use_diag_pivot(A, B, M, cutoffs, params, arr0, arr1):
] # [i+1-2*(i%2) for i in range(6)] = [1,0,3,2,5,4]
########## WRITE ##########
- G_in = np.multiply(K_l, G_in)
+ if B.ndim == 1:
+ G_in = np.multiply(K_l, G_in)
+ elif B.ndim == 2:
+ G_in = np.multiply(np.expand_dims(K_l, 1), G_in)
# Array1
for i in range(2 * M):
@@ -120,10 +134,10 @@ def use_diag_pivot(A, B, M, cutoffs, params, arr0, arr1):
@njit
def fock_representation_diagonal_amps_NUMBA(
A, B, M, cutoffs, arr0, arr2, arr1010, arr1001, arr1, tuple_type, list_type
-):
+): # pragma: no cover
"""
- Returns the PNR probabilities of a state or Choi state
- (by using the recurrence relation to calculate a limited number of Fock amplitudes)
+ Returns the PNR probabilities of a mixed state according to algorithm 1 of:
+ https://doi.org/10.22331/q-2023-08-29-1097
Args:
A, B (array, vector): required input for recurrence relation (given by mrmustard.physics.fock.ABC)
M (int): number of modes
@@ -165,16 +179,30 @@ def fock_representation_diagonal_amps(A, B, G0, M, cutoffs):
tuple_type = numba.types.UniTuple(int64, M)
list_type = numba.types.ListType(tuple_type)
- arr0 = np.empty(cutoffs, dtype=np.complex128)
+ if B.ndim == 1:
+ arr0 = np.zeros(cutoffs, dtype=np.complex128)
+ arr2 = np.zeros((M,) + cutoffs, dtype=np.complex128)
+ arr1 = np.zeros((2 * M,) + cutoffs, dtype=np.complex128)
+ if M == 1:
+ arr1010 = np.zeros((1, 1, 1), dtype=np.complex128)
+ arr1001 = np.zeros((1, 1, 1), dtype=np.complex128)
+ else:
+ arr1010 = np.zeros((M, M - 1) + cutoffs, dtype=np.complex128)
+ arr1001 = np.zeros((M, M - 1) + cutoffs, dtype=np.complex128)
+
+ elif B.ndim == 2:
+ batch_length = B.shape[1]
+ arr0 = np.zeros(cutoffs + (batch_length,), dtype=np.complex128)
+ arr2 = np.zeros((M,) + cutoffs + (batch_length,), dtype=np.complex128)
+ arr1 = np.zeros((2 * M,) + cutoffs + (batch_length,), dtype=np.complex128)
+ if M == 1:
+ arr1010 = np.zeros((1, 1, 1) + (batch_length,), dtype=np.complex128)
+ arr1001 = np.zeros((1, 1, 1) + (batch_length,), dtype=np.complex128)
+ else:
+ arr1010 = np.zeros((M, M - 1) + cutoffs + (batch_length,), dtype=np.complex128)
+ arr1001 = np.zeros((M, M - 1) + cutoffs + (batch_length,), dtype=np.complex128)
+
arr0[(0,) * M] = G0
- arr2 = np.empty((M,) + cutoffs, dtype=np.complex128)
- arr1 = np.empty((2 * M,) + cutoffs, dtype=np.complex128)
- if M == 1:
- arr1010 = np.empty((1, 1, 1), dtype=np.complex128)
- arr1001 = np.empty((1, 1, 1), dtype=np.complex128)
- else:
- arr1010 = np.empty((M, M - 1) + cutoffs, dtype=np.complex128)
- arr1001 = np.empty((M, M - 1) + cutoffs, dtype=np.complex128)
return fock_representation_diagonal_amps_NUMBA(
A, B, M, cutoffs, arr0, arr2, arr1010, arr1001, arr1, tuple_type, list_type
)
diff --git a/mrmustard/math/numba/compactFock_diagonal_grad.py b/mrmustard/math/lattice/strategies/compactFock/diagonal_grad.py
similarity index 97%
rename from mrmustard/math/numba/compactFock_diagonal_grad.py
rename to mrmustard/math/lattice/strategies/compactFock/diagonal_grad.py
index b3aab8fcd..ec1bb36bd 100644
--- a/mrmustard/math/numba/compactFock_diagonal_grad.py
+++ b/mrmustard/math/lattice/strategies/compactFock/diagonal_grad.py
@@ -7,7 +7,7 @@
import numba
from numba import njit, int64
from numba.cpython.unsafe.tuple import tuple_setitem
-from mrmustard.math.numba.compactFock_helperFunctions import (
+from mrmustard.math.lattice.strategies.compactFock.helperFunctions import (
SQRT,
repeat_twice,
construct_dict_params,
@@ -225,7 +225,8 @@ def fock_representation_diagonal_grad_NUMBA(
A, B, M, cutoffs, arr0, arr2, arr1010, arr1001, arr1, tuple_type, list_type
):
"""
- Returns the PNR probabilities of a state or Choi state (by using the recurrence relation to calculate a limited number of Fock amplitudes)
+ Returns the gradients of the PNR probabilities of a mixed state according to algorithm 1 of
+ https://doi.org/10.22331/q-2023-08-29-1097
Args:
A, B (array, vector): required input for recurrence relation (given by mrmustard.physics.fock.ABC)
M (int): number of modes
@@ -237,7 +238,7 @@ def fock_representation_diagonal_grad_NUMBA(
arr1 (array): submatrix of the fock representation that contains Fock amplitudes of the types [a+1,a,b,b,c,c...] / [a,a+1,b,b,c,c...] / [a,a,b+1,b,c,c...] / ...
tuple_type, list_type (Numba types): numba types that need to be defined outside of Numba compiled functions
Returns:
- array: the fock representation
+ array: the derivatives of the fock representation w.r.t. A and B
"""
arr0_dA = np.zeros(arr0.shape + A.shape, dtype=np.complex128)
arr2_dA = np.zeros(arr2.shape + A.shape, dtype=np.complex128)
diff --git a/mrmustard/math/numba/compactFock_helperFunctions.py b/mrmustard/math/lattice/strategies/compactFock/helperFunctions.py
similarity index 87%
rename from mrmustard/math/numba/compactFock_helperFunctions.py
rename to mrmustard/math/lattice/strategies/compactFock/helperFunctions.py
index 6b0381193..12a828f90 100644
--- a/mrmustard/math/numba/compactFock_helperFunctions.py
+++ b/mrmustard/math/lattice/strategies/compactFock/helperFunctions.py
@@ -1,6 +1,6 @@
"""
This module contains helper functions that are used in
-compactFock_diagonal_amps.py, compactFock_diagonal_grad.py, compactFock_1leftoverMode_amps.py and compactFock_1leftoverMode_grad.py
+diagonal_amps.py, diagonal_grad.py, singleLeftoverMode_amps.py and singleLeftoverMode_grad.py
"""
import numpy as np
@@ -20,7 +20,7 @@ def repeat_twice(params):
Returns:
(1D array): [a,a,b,b,c,c,...]
"""
- pivot = np.empty(2 * len(params), dtype=np.int64)
+ pivot = np.zeros(2 * len(params), dtype=np.int64)
for i, val in enumerate(params):
pivot[2 * i] = val
pivot[2 * i + 1] = val
diff --git a/mrmustard/math/numba/compactFock_inputValidation.py b/mrmustard/math/lattice/strategies/compactFock/inputValidation.py
similarity index 60%
rename from mrmustard/math/numba/compactFock_inputValidation.py
rename to mrmustard/math/lattice/strategies/compactFock/inputValidation.py
index 30b1607d2..237c7de59 100644
--- a/mrmustard/math/numba/compactFock_inputValidation.py
+++ b/mrmustard/math/lattice/strategies/compactFock/inputValidation.py
@@ -1,17 +1,21 @@
"""
This module contains helper functions that are used in
-compactFock_diagonal_amps.py, compactFock_diagonal_grad.py, compactFock_1leftoverMode_amps.py and compactFock_1leftoverMode_grad.py
+diagonal_amps.py, diagonal_grad.py, singleLeftoverMode_amps.py and singleLeftoverMode_grad.py
to validate the input provided by the user.
"""
from typing import Iterable
import numpy as np
-from mrmustard.math.numba.compactFock_diagonal_amps import fock_representation_diagonal_amps
-from mrmustard.math.numba.compactFock_diagonal_grad import fock_representation_diagonal_grad
-from mrmustard.math.numba.compactFock_1leftoverMode_amps import (
+from mrmustard.math.lattice.strategies.compactFock.diagonal_amps import (
+ fock_representation_diagonal_amps,
+)
+from mrmustard.math.lattice.strategies.compactFock.diagonal_grad import (
+ fock_representation_diagonal_grad,
+)
+from mrmustard.math.lattice.strategies.compactFock.singleLeftoverMode_amps import (
fock_representation_1leftoverMode_amps,
)
-from mrmustard.math.numba.compactFock_1leftoverMode_grad import (
+from mrmustard.math.lattice.strategies.compactFock.singleLeftoverMode_grad import (
fock_representation_1leftoverMode_grad,
)
from thewalrus._hafnian import input_validation
@@ -19,7 +23,7 @@
def hermite_multidimensional_diagonal(A, B, G0, cutoffs, rtol=1e-05, atol=1e-08):
"""
- Validation of user input for mrmustard.math.tensorflow.hermite_renormalized_diagonal
+ Validation of user input for mrmustard.math.backend_tensorflow.hermite_renormalized_diagonal
"""
input_validation(A, atol=atol, rtol=rtol)
if A.shape[0] != B.shape[0]:
@@ -36,7 +40,7 @@ def hermite_multidimensional_diagonal(A, B, G0, cutoffs, rtol=1e-05, atol=1e-08)
def grad_hermite_multidimensional_diagonal(A, B, G0, arr0, arr2, arr1010, arr1001, arr1):
"""
- Validation of user input for gradients of mrmustard.math.tensorflow.hermite_renormalized_diagonal
+ Validation of user input for gradients of mrmustard.math.backend_tensorflow.hermite_renormalized_diagonal
"""
if A.shape[0] != B.shape[0]:
raise ValueError("The matrix A and vector B have incompatible dimensions")
@@ -50,7 +54,7 @@ def grad_hermite_multidimensional_diagonal(A, B, G0, arr0, arr2, arr1010, arr100
def hermite_multidimensional_1leftoverMode(A, B, G0, cutoffs, rtol=1e-05, atol=1e-08):
"""
- Validation of user input for mrmustard.math.tensorflow.hermite_renormalized_1leftoverMode
+ Validation of user input for mrmustard.math.backend_tensorflow.hermite_renormalized_1leftoverMode
"""
input_validation(A, atol=atol, rtol=rtol)
if A.shape[0] != B.shape[0]:
@@ -69,7 +73,7 @@ def hermite_multidimensional_1leftoverMode(A, B, G0, cutoffs, rtol=1e-05, atol=1
def grad_hermite_multidimensional_1leftoverMode(A, B, G0, arr0, arr2, arr1010, arr1001, arr1):
"""
- Validation of user input for gradients of mrmustard.math.tensorflow.hermite_renormalized_1leftoverMode
+ Validation of user input for gradients of mrmustard.math.backend_tensorflow.hermite_renormalized_1leftoverMode
"""
if A.shape[0] != B.shape[0]:
raise ValueError("The matrix A and vector B have incompatible dimensions")
@@ -81,3 +85,22 @@ def grad_hermite_multidimensional_1leftoverMode(A, B, G0, arr0, arr2, arr1010, a
)
arr0_dG0 = np.array(arr0 / G0).astype(np.complex128)
return arr0_dG0, arr0_dA, arr0_dB
+
+
+def hermite_multidimensional_diagonal_batch(A, B, G0, cutoffs, rtol=1e-05, atol=1e-08):
+ """
+ Validation of user input for mrmustard.math.backend_tensorflow.hermite_renormalized_diagonal_batch
+ """
+ input_validation(A, atol=atol, rtol=rtol)
+ if len(B.shape) != 2:
+ raise ValueError("B should be two dimensional (vector and batch dimension)")
+ if A.shape[0] != B.shape[0]:
+ raise ValueError("The matrix A and vector B have incompatible dimensions")
+ if isinstance(cutoffs, Iterable):
+ cutoffs = tuple(cutoffs)
+ else:
+ raise ValueError("cutoffs should be array like of length M")
+ M = len(cutoffs)
+ if A.shape[0] // 2 != M:
+ raise ValueError("The matrix A and cutoffs have incompatible dimensions")
+ return fock_representation_diagonal_amps(A, B, G0, M, cutoffs)
diff --git a/mrmustard/math/numba/compactFock_1leftoverMode_amps.py b/mrmustard/math/lattice/strategies/compactFock/singleLeftoverMode_amps.py
similarity index 93%
rename from mrmustard/math/numba/compactFock_1leftoverMode_amps.py
rename to mrmustard/math/lattice/strategies/compactFock/singleLeftoverMode_amps.py
index 959ef5466..ce2746cb3 100644
--- a/mrmustard/math/numba/compactFock_1leftoverMode_amps.py
+++ b/mrmustard/math/lattice/strategies/compactFock/singleLeftoverMode_amps.py
@@ -8,7 +8,7 @@
from numba import int64, njit
from numba.cpython.unsafe.tuple import tuple_setitem
-from mrmustard.math.numba.compactFock_helperFunctions import (
+from mrmustard.math.lattice.strategies.compactFock.helperFunctions import (
SQRT,
construct_dict_params,
repeat_twice,
@@ -21,7 +21,7 @@ def write_block(
): # pragma: no cover
"""
Apply the recurrence relation to blocks of Fock amplitudes (of shape cutoff_leftoverMode x cutoff_leftoverMode)
- This is the coarse-grained version of applying the recurrence relation of mrmustard.math.numba.compactFock_diagonal_amps once.
+ This is the coarse-grained version of applying the recurrence relation of mrmustard.math.compactFock.compactFock_diagonal_amps once.
"""
m, n = 0, 0
A_adapted = A[i, 2:]
@@ -107,7 +107,7 @@ def use_offDiag_pivot(
########## READ ##########
read_GB = (2 * d,) + params
- GB = np.empty((cutoff_leftoverMode, cutoff_leftoverMode, len(B)), dtype=np.complex128)
+ GB = np.zeros((cutoff_leftoverMode, cutoff_leftoverMode, len(B)), dtype=np.complex128)
for m in range(cutoff_leftoverMode):
for n in range(cutoff_leftoverMode):
GB[m, n] = arr1[(m, n) + read_GB] * B
@@ -180,7 +180,7 @@ def use_diag_pivot(A, B, M, cutoff_leftoverMode, cutoffs_tail, params, arr0, arr
########## READ ##########
read_GB = params
- GB = np.empty((cutoff_leftoverMode, cutoff_leftoverMode, len(B)), dtype=np.complex128)
+ GB = np.zeros((cutoff_leftoverMode, cutoff_leftoverMode, len(B)), dtype=np.complex128)
for m in range(cutoff_leftoverMode):
for n in range(cutoff_leftoverMode):
GB[m, n] = arr0[(m, n) + read_GB] * B
@@ -228,7 +228,8 @@ def fock_representation_1leftoverMode_amps_NUMBA(
zero_tuple,
):
"""
- Returns the PNR probabilities of a state or Choi state (by using the recurrence relation to calculate a limited number of Fock amplitudes)
+ Returns the density matrices in the upper, undetected mode of a circuit when all other modes are PNR detected
+ according to algorithm 2 of https://doi.org/10.22331/q-2023-08-29-1097
Args:
A, B (array, vector): required input for recurrence relation (given by mrmustard.physics.fock.ABC)
M (int): number of modes
@@ -299,26 +300,24 @@ def fock_representation_1leftoverMode_amps(A, B, G0, M, cutoffs):
list_type = numba.types.ListType(tuple_type)
zero_tuple = (0,) * (M - 1)
- arr0 = np.zeros(
- (cutoff_leftoverMode, cutoff_leftoverMode) + cutoffs_tail, dtype=np.complex128
- ) # doesn't work with np.empty
+ arr0 = np.zeros((cutoff_leftoverMode, cutoff_leftoverMode) + cutoffs_tail, dtype=np.complex128)
arr0[(0,) * (M + 1)] = G0
- arr2 = np.empty(
+ arr2 = np.zeros(
(cutoff_leftoverMode, cutoff_leftoverMode) + (M - 1,) + cutoffs_tail, dtype=np.complex128
)
- arr1 = np.empty(
+ arr1 = np.zeros(
(cutoff_leftoverMode, cutoff_leftoverMode) + (2 * (M - 1),) + cutoffs_tail,
dtype=np.complex128,
)
if M == 2:
- arr1010 = np.empty((1, 1, 1, 1, 1), dtype=np.complex128)
- arr1001 = np.empty((1, 1, 1, 1, 1), dtype=np.complex128)
+ arr1010 = np.zeros((1, 1, 1, 1, 1), dtype=np.complex128)
+ arr1001 = np.zeros((1, 1, 1, 1, 1), dtype=np.complex128)
else:
- arr1010 = np.empty(
+ arr1010 = np.zeros(
(cutoff_leftoverMode, cutoff_leftoverMode) + (M - 1, M - 2) + cutoffs_tail,
dtype=np.complex128,
)
- arr1001 = np.empty(
+ arr1001 = np.zeros(
(cutoff_leftoverMode, cutoff_leftoverMode) + (M - 1, M - 2) + cutoffs_tail,
dtype=np.complex128,
)
diff --git a/mrmustard/math/numba/compactFock_1leftoverMode_grad.py b/mrmustard/math/lattice/strategies/compactFock/singleLeftoverMode_grad.py
similarity index 98%
rename from mrmustard/math/numba/compactFock_1leftoverMode_grad.py
rename to mrmustard/math/lattice/strategies/compactFock/singleLeftoverMode_grad.py
index d94db1a4e..6d1df9f73 100644
--- a/mrmustard/math/numba/compactFock_1leftoverMode_grad.py
+++ b/mrmustard/math/lattice/strategies/compactFock/singleLeftoverMode_grad.py
@@ -7,7 +7,7 @@
import numba
from numba import njit, int64
from numba.cpython.unsafe.tuple import tuple_setitem
-from mrmustard.math.numba.compactFock_helperFunctions import (
+from mrmustard.math.lattice.strategies.compactFock.helperFunctions import (
SQRT,
repeat_twice,
construct_dict_params,
@@ -66,7 +66,7 @@ def write_block_grad(
):
"""
Apply the derivated recurrence relation to blocks of Fock amplitudes (of shape cutoff_leftoverMode x cutoff_leftoverMode)
- This is the coarse-grained version of applying the derivated recurrence relation of mrmustard.math.numba.compactFock_diagonal_grad once.
+ This is the coarse-grained version of applying the derivated recurrence relation of mrmustard.math.compactFock.compactFock_diagonal_grad once.
"""
# m,n = 0,0
m, n = 0, 0
@@ -591,7 +591,8 @@ def fock_representation_1leftoverMode_grad_NUMBA(
zero_tuple,
):
"""
- Returns the PNR probabilities of a state or Choi state (by using the recurrence relation to calculate a limited number of Fock amplitudes)
+ Returns the gradients of the density matrices in the upper, undetected mode of a circuit when all other modes
+ are PNR detected (according to algorithm 2 of https://doi.org/10.22331/q-2023-08-29-1097)
Args:
A, B (array, Vector): required input for recurrence relation (given by mrmustard.physics.fock.ABC)
M (int): number of modes
diff --git a/mrmustard/math/lattice/strategies/flat_indices.py b/mrmustard/math/lattice/strategies/flat_indices.py
new file mode 100644
index 000000000..4a5d945e0
--- /dev/null
+++ b/mrmustard/math/lattice/strategies/flat_indices.py
@@ -0,0 +1,76 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+r"""
+Contains the functions to operate with flattened indices.
+
+Given a multi-dimensional ``np.ndarray``, we can index its elements using ``np.ndindex``.
+Alternatevely, we can flatten the multi-dimensional array and index its elements with
+``int``s (hereby referred to as ''flat indices'').
+"""
+
+from typing import Iterator, Sequence
+from numba import njit
+
+import numpy as np
+
+
+@njit
+def first_available_pivot(
+ index: int, strides: Sequence[int]
+) -> tuple[int, tuple[int, ...]]: # pragma: no cover
+ r"""
+ Returns the first available pivot for the given flat index.
+ A pivot is a nearest neighbor of the index. Here we pick the first available pivot.
+
+ Arguments:
+ index: the flat index to get the first available pivot of.
+ strides: the strides that allow mapping the flat index to a tuple index.
+
+ Returns:
+ the flat index that was decremented and the pivot.
+ """
+ for i, s in enumerate(strides):
+ y = index - s
+ if y >= 0:
+ return (i, y)
+ raise ValueError("Index is zero.")
+
+
+@njit
+def lower_neighbors(
+ index: int, strides: Sequence[int], start: int
+) -> Iterator[tuple[int, tuple[int, ...]]]: # pragma: no cover
+ r"""
+ Yields the flat indices of the lower neighbours of the given flat index.
+ """
+ for i in range(start, len(strides)):
+ yield i, index - strides[i]
+
+
+@njit
+def shape_to_strides(shape: Sequence[int]) -> Sequence[int]: # pragma: no cover
+ r"""
+ Calculates strides from shape.
+
+ Arguments:
+ shape: the shape of the ``np.ndindex``.
+
+ Returns:
+ the strides that allow mapping a flat index to the corresponding ``np.ndindex``.
+ """
+ strides = np.ones_like(shape)
+ for i in range(1, len(shape)):
+ strides[i - 1] = np.prod(shape[i:])
+ return strides
diff --git a/mrmustard/math/lattice/strategies/julia/compactFock/diagonal_amps.jl b/mrmustard/math/lattice/strategies/julia/compactFock/diagonal_amps.jl
new file mode 100644
index 000000000..0875f9ece
--- /dev/null
+++ b/mrmustard/math/lattice/strategies/julia/compactFock/diagonal_amps.jl
@@ -0,0 +1,148 @@
+module DiagonalAmps
+
+import ..GetPrecision
+import ..CompactFock_HelperFunctions
+
+function use_offDiag_pivot!(A, B, M, cutoffs, params, d, arr0, arr2, arr1010, arr1001, arr1, T, SQRT)
+ """Given params=(a,b,c,...), apply the recurrence relation for the pivots
+ [a+1,a,b,b,c,c,...] / [a,a,b+1,b,c,c,...] / [a,a,b,b,c+1,c,...] / ..."""
+ pivot = CompactFock_HelperFunctions.repeat_twice(params)
+ pivot[2 * d - 1] += 1
+
+ K_l = SQRT[pivot] # julia indexing counters extra zero in SQRT
+ K_i = SQRT[pivot .+ 1] # julia indexing counters extra zero in SQRT
+
+ G_in = zeros(Complex{T}, 2 * M)
+
+ ########## READ ##########
+ GB = arr1[2 * d - 1, params...] .* B
+
+ # Array0
+ G_in[2 * d - 1] = arr0[params...]
+
+ # read from Array2
+ if params[d] > 1
+ params_adapted = collect(params)
+ params_adapted[d] -= 1
+ G_in[2 * d] = arr2[d, params_adapted...]
+ end
+
+ # read from Array11
+ for i in d+1:M # i>d
+ if params[i] > 1
+ params_adapted = collect(params)
+ params_adapted[i] -= 1
+ G_in[2 * i - 1] = arr1001[d, i - d, params_adapted...]
+ G_in[2 * i] = arr1010[d, i - d, params_adapted...]
+ end
+ end
+
+ ########## WRITE ##########
+ G_in .*= K_l
+
+ # Array0
+ params_adapted = collect(params)
+ params_adapted[d] += 1
+ arr0[params_adapted...] = (GB[2 * d] .+ sum(A[2 * d,:] .* G_in)) / K_i[2 * d]
+
+ # Array2
+ if params[d] + 1 < cutoffs[d]
+ arr2[d, params...] = (GB[2 * d - 1] .+ sum(A[2 * d - 1,:] .* G_in)) / K_i[2 * d - 1]
+ end
+
+ # Array11
+ for i in d+1:M # i>d
+ if params[i] < cutoffs[i]
+ arr1010[d, i - d, params...] = (GB[2 * i - 1] .+ sum(A[2 * i - 1,:] .* G_in)) / K_i[2 * i - 1]
+ arr1001[d, i - d, params...] = (GB[2 * i] .+ sum(A[2 * i,:] .* G_in)) / K_i[2 * i]
+ end
+ end
+end
+
+function use_diag_pivot!(A, B, M, cutoffs, params, arr0, arr1, T, SQRT)
+ """Given params=(a,b,c,...), apply the recurrence relation for the pivot [a,a,b,b,c,c...]"""
+ pivot = CompactFock_HelperFunctions.repeat_twice(params)
+ K_l = SQRT[pivot] # julia indexing counters extra zero in SQRT
+ K_i = SQRT[pivot .+ 1] # julia indexing counters extra zero in SQRT
+ G_in = zeros(Complex{T}, 2*M)
+
+ ########## READ ##########
+ GB = arr0[params...] .* B
+
+ # Array1
+ for i in 1:2*M
+ if params[(i-1)÷2+1] > 1
+ params_adapted = collect(params)
+ params_adapted[(i-1)÷2+1] -= 1
+ G_in[i] = arr1[i+1-2*((i-1) % 2), params_adapted...]
+ end
+ end
+
+ ########## WRITE ##########
+ G_in .*= K_l
+
+ # Array1
+ for i in 1:2*M
+ if params[(i-1)÷2+1] < cutoffs[(i-1)÷2+1]
+ # this prevents a few elements from being written that will never be read
+ if i ≠ 2 || params[1] + 1 < cutoffs[1]
+ arr1[i, params...] = (GB[i] .+ sum(A[i,:] .* G_in)) / K_i[i]
+ end
+ end
+ end
+end
+
+function fock_diagonal_amps(
+ A::AbstractMatrix{Complex{Float64}},
+ B::AbstractVector{Complex{Float64}},
+ G0::Complex{Float64},
+ cutoffs::Tuple,
+ precision_bits::Int64
+ )
+ """Returns the PNR probabilities of a mixed state according to algorithm 1 of
+ https://doi.org/10.22331/q-2023-08-29-1097
+ Args:
+ A, B, G0: required input for recurrence relation
+ cutoffs: upper bounds for the number of photons in each mode
+ precision_bits: number of bits used to represent a single Fock amplitude
+ Returns:
+ Submatrices of the Fock representation. Each submatrix contains Fock indices of a certain type.
+ arr0 --> type: [a,a,b,b,c,c...]
+ arr2 --> type: [a+2,a,b,b,c,c...] / [a,a,b+2,b,c,c...] / ...
+ arr1010 --> type: [a+1,a,b+1,b,c,c,...] / [a+1,a,b,b,c+1,c,...] / [a,a,b+1,b,c+1,c,...] / ...
+ arr1001 --> type: [a+1,a,b,b+1,c,c,...] / [a+1,a,b,b,c,c+1,...] / [a,a,b+1,b,c,c+1,...] / ...
+ arr1 --> type: [a+1,a,b,b,c,c...] / [a,a+1,b,b,c,c...] / [a,a,b+1,b,c,c...] / ...
+ """
+
+ T = GetPrecision.get_dtype(precision_bits)
+ SQRT = GetPrecision.SQRT_dict[precision_bits]
+
+ M = length(cutoffs)
+
+ arr0 = zeros(Complex{T}, cutoffs)
+ arr0[fill(1,M)...] = G0
+ arr2 = zeros(Complex{T}, M, cutoffs...)
+ arr1 = zeros(Complex{T}, 2*M, cutoffs...)
+ arr1010 = zeros(Complex{T}, M, M - 1, cutoffs...)
+ arr1001 = zeros(Complex{T}, M, M - 1, cutoffs...)
+
+
+ dict_params = CompactFock_HelperFunctions.construct_dict_params(cutoffs)
+ for sum_params in 0:sum(cutoffs)-1
+ for params in dict_params[sum_params]
+ # diagonal pivots: aa,bb,cc,...
+ if (cutoffs[1] == 1) || (params[1] < cutoffs[1]) # julia indexing!
+ use_diag_pivot!(A, B, M, cutoffs, params, arr0, arr1, T, SQRT)
+ end
+ # off-diagonal pivots: d=1: (a+1)a,bb,cc,... | d=2: 00,(b+1)b,cc,... | d=3: 00,00,(c+1)c,... | ...
+ for d in 1:M
+ if all(params[1:d-1] .== 1) && (params[d] < cutoffs[d]) # julia indexing!
+ use_offDiag_pivot!(A, B, M, cutoffs, params, d, arr0, arr2, arr1010, arr1001, arr1, T, SQRT)
+ end
+ end
+ end
+ end
+ return Complex{Float64}.(arr0), Complex{Float64}.(arr2), Complex{Float64}.(arr1010), Complex{Float64}.(arr1001), Complex{Float64}.(arr1)
+end
+
+end # end module
\ No newline at end of file
diff --git a/mrmustard/math/lattice/strategies/julia/compactFock/diagonal_grad.jl b/mrmustard/math/lattice/strategies/julia/compactFock/diagonal_grad.jl
new file mode 100644
index 000000000..6ed0136b3
--- /dev/null
+++ b/mrmustard/math/lattice/strategies/julia/compactFock/diagonal_grad.jl
@@ -0,0 +1,195 @@
+module DiagonalGrad
+
+import ..GetPrecision
+import ..CompactFock_HelperFunctions
+
+function calc_dA_dB(i, G_in_dA, G_in_dB, G_in, A, B, K_l, K_i, M, pivot_val, pivot_val_dA, pivot_val_dB)
+ """Calculate the derivatives of a single Fock amplitude w.r.t A and B.
+ Args:
+ i (int): the element of the multidim index that is increased
+ G_in, G_in_dA, G_in_dB (array, array, array): all Fock amplitudes from the 'read' group in the recurrence relation and their derivatives w.r.t. A and B
+ A, B (array, vector): required input for recurrence relation (given by mrmustard.physics.fock.ABC)
+ K_l, K_i (vector, vector): SQRT[pivot], SQRT[pivot + 1]
+ M (int): number of modes
+ pivot_val, pivot_val_dA, pivot_val_dB (array, array, array): Fock amplitude at the position of the pivot and its derivatives w.r.t. A and B
+ """
+ dA = pivot_val_dA .* B[i]
+ dB = pivot_val_dB .* B[i]
+ dB[i] += pivot_val
+ for l in 1:2*M
+ dA += K_l[l] * A[i, l] * G_in_dA[l,:,:]
+ dB += K_l[l] * A[i, l] * G_in_dB[l,:]
+ dA[i, l] += G_in[l]
+ end
+ return dA ./ K_i[i], dB ./ K_i[i]
+end
+
+function use_offDiag_pivot_grad!(A, B, M, cutoffs, params, d, arr0, arr2, arr1010, arr1001, arr1,
+ arr0_dA, arr2_dA, arr1010_dA, arr1001_dA, arr1_dA, arr0_dB, arr2_dB, arr1010_dB, arr1001_dB, arr1_dB, T, SQRT)
+ """Given params=(a,b,c,...), apply the eqs. 16 & 17 (of https://doi.org/10.22331/q-2023-08-29-1097)
+ for the pivots [a+1,a,b,b,c,c,...] / [a,a,b+1,b,c,c,...] / [a,a,b,b,c+1,c,...] / ..."""
+
+ pivot = CompactFock_HelperFunctions.repeat_twice(params)
+ pivot[2 * d - 1] += 1
+ K_l = SQRT[pivot] # julia indexing counters extra zero in SQRT
+ K_i = SQRT[pivot .+ 1] # julia indexing counters extra zero in SQRT
+ G_in = zeros(Complex{T}, 2 * M)
+ G_in_dA = zeros(Complex{T}, 2*M, size(A)...)
+ G_in_dB = zeros(Complex{T}, 2*M, size(B)...)
+
+ ########## READ ##########
+ pivot_val = arr1[2 * d - 1, params...]
+ pivot_val_dA = arr1_dA[2 * d - 1, params...,:,:]
+ pivot_val_dB = arr1_dB[2 * d - 1, params...,:]
+
+ # Array0
+ G_in[2 * d - 1] = arr0[params...]
+ G_in_dA[2 * d - 1,:,:] = arr0_dA[params...,:,:]
+ G_in_dB[2 * d - 1,:] = arr0_dB[params...,:]
+
+ # read from Array2
+ if params[d] > 1
+ params_adapted = collect(params)
+ params_adapted[d] -= 1
+ G_in[2 * d] = arr2[d, params_adapted...]
+ G_in_dA[2 * d,:,:] = arr2_dA[d, params_adapted...,:,:]
+ G_in_dB[2 * d,:] = arr2_dB[d, params_adapted...,:]
+ end
+
+ # read from Array11
+ for i in d+1:M # i>d
+ if params[i] > 1
+ params_adapted = collect(params)
+ params_adapted[i] -= 1
+ G_in[2 * i - 1] = arr1001[d, i - d, params_adapted...]
+ G_in_dA[2 * i - 1,:,:] = arr1001_dA[d, i - d, params_adapted...,:,:]
+ G_in_dB[2 * i - 1,:] = arr1001_dB[d, i - d, params_adapted...,:]
+ G_in[2 * i] = arr1010[d, i - d, params_adapted...]
+ G_in_dA[2 * i,:,:] = arr1010_dA[d, i - d, params_adapted...,:,:]
+ G_in_dB[2 * i,:] = arr1010_dB[d, i - d, params_adapted...,:]
+ end
+ end
+
+ ########## WRITE ##########
+ G_in .*= K_l
+
+ # Array0
+ params_adapted = collect(params)
+ params_adapted[d] += 1
+ arr0_dA[params_adapted...,:,:], arr0_dB[params_adapted...,:] = calc_dA_dB(2 * d, G_in_dA, G_in_dB, G_in, A, B, K_l, K_i, M, pivot_val, pivot_val_dA, pivot_val_dB)
+
+ # Array2
+ if params[d] + 1 < cutoffs[d]
+ arr2_dA[d, params...,:,:], arr2_dB[d, params...,:] = calc_dA_dB(2 * d - 1, G_in_dA, G_in_dB, G_in, A, B, K_l, K_i, M, pivot_val, pivot_val_dA, pivot_val_dB)
+ end
+
+ # Array11
+ for i in d+1:M # i>d
+ if params[i] < cutoffs[i]
+ arr1010_dA[d, i - d, params...,:,:], arr1010_dB[d, i - d, params...,:] = calc_dA_dB(2 * i - 1, G_in_dA, G_in_dB, G_in, A, B, K_l, K_i, M, pivot_val, pivot_val_dA, pivot_val_dB)
+ arr1001_dA[d, i - d, params...,:,:], arr1001_dB[d, i - d, params...,:] = calc_dA_dB(2 * i, G_in_dA, G_in_dB, G_in, A, B, K_l, K_i, M, pivot_val, pivot_val_dA, pivot_val_dB)
+ end
+ end
+end
+function use_diag_pivot_grad!(A, B, M, cutoffs, params, arr0, arr1, arr0_dA, arr1_dA, arr0_dB, arr1_dB, T, SQRT)
+ """Given params=(a,b,c,...), apply the eqs. 16 & 17 (of https://doi.org/10.22331/q-2023-08-29-1097)
+ for the pivot [a,a,b,b,c,c...]"""
+ pivot = CompactFock_HelperFunctions.repeat_twice(params)
+ K_l = SQRT[pivot] # julia indexing counters extra zero in SQRT
+ K_i = SQRT[pivot .+ 1] # julia indexing counters extra zero in SQRT
+ G_in = zeros(Complex{T}, 2*M)
+ G_in_dA = zeros(Complex{T}, 2*M, size(A)...)
+ G_in_dB = zeros(Complex{T}, 2*M, size(B)...)
+
+ ########## READ ##########
+ pivot_val = arr0[params...]
+ pivot_val_dA = arr0_dA[params...,:,:]
+ pivot_val_dB = arr0_dB[params...,:]
+
+ # Array1
+ for i in 1:2*M
+ if params[(i-1)÷2+1] > 1
+ i_staggered = i+1-2*((i-1) % 2)
+ params_adapted = collect(params)
+ params_adapted[(i-1)÷2+1] -= 1
+ G_in[i] = arr1[i_staggered, params_adapted...]
+ G_in_dA[i,:,:] = arr1_dA[i_staggered, params_adapted...,:,:]
+ G_in_dB[i,:] = arr1_dB[i_staggered, params_adapted...,:]
+ end
+ end
+
+ ########## WRITE ##########
+ G_in .*= K_l
+
+ # Array1
+ for i in 1:2*M
+ if params[(i-1)÷2+1] < cutoffs[(i-1)÷2+1]
+ # This if statement prevents a few elements from being written that will never be read
+ if i ≠ 2 || params[1] + 1 < cutoffs[1]
+ arr1_dA[i, params...,:,:], arr1_dB[i, params...,:] = calc_dA_dB(i, G_in_dA, G_in_dB, G_in, A, B, K_l, K_i, M, pivot_val, pivot_val_dA, pivot_val_dB)
+ end
+ end
+ end
+end
+function fock_diagonal_grad(
+ A::AbstractMatrix{Complex{Float64}},
+ B::AbstractVector{Complex{Float64}},
+ arr0::AbstractArray{Complex{Float64}},
+ arr2::AbstractArray{Complex{Float64}},
+ arr1010::AbstractArray{Complex{Float64}},
+ arr1001::AbstractArray{Complex{Float64}},
+ arr1::AbstractArray{Complex{Float64}},
+ precision_bits::Int64
+ )
+ """Returns the gradients of the PNR probabilities of a mixed state according to algorithm 1 of
+ https://doi.org/10.22331/q-2023-08-29-1097
+ Args:
+ A, B: required input for recurrence relation
+ Submatrices of the Fock representation. Each submatrix contains Fock indices of a certain type.
+ arr0 --> type: [a,a,b,b,c,c...]
+ arr2 --> type: [a+2,a,b,b,c,c...] / [a,a,b+2,b,c,c...] / ...
+ arr1010 --> type: [a+1,a,b+1,b,c,c,...] / [a+1,a,b,b,c+1,c,...] / [a,a,b+1,b,c+1,c,...] / ...
+ arr1001 --> type: [a+1,a,b,b+1,c,c,...] / [a+1,a,b,b,c,c+1,...] / [a,a,b+1,b,c,c+1,...] / ...
+ arr1 --> type: [a+1,a,b,b,c,c...] / [a,a+1,b,b,c,c...] / [a,a,b+1,b,c,c...] / ...
+ precision_bits: number of bits used to represent a single Fock amplitude
+ Returns:
+ arr0_dA, arr0_dB: derivatives of arr0 w.r.t A and B
+ """
+
+ T = GetPrecision.get_dtype(precision_bits)
+ SQRT = GetPrecision.SQRT_dict[precision_bits]
+
+ cutoffs = size(arr0)
+ M = length(cutoffs)
+
+ arr0_dA = zeros(Complex{T}, size(arr0)..., size(A)...)
+ arr2_dA = zeros(Complex{T}, size(arr2)..., size(A)...)
+ arr1010_dA = zeros(Complex{T}, size(arr1010)..., size(A)...)
+ arr1001_dA = zeros(Complex{T}, size(arr1001)..., size(A)...)
+ arr1_dA = zeros(Complex{T}, size(arr1)..., size(A)...)
+ arr0_dB = zeros(Complex{T}, size(arr0)..., size(B)...)
+ arr2_dB = zeros(Complex{T}, size(arr2)..., size(B)...)
+ arr1010_dB = zeros(Complex{T}, size(arr1010)..., size(B)...)
+ arr1001_dB = zeros(Complex{T}, size(arr1001)..., size(B)...)
+ arr1_dB = zeros(Complex{T}, size(arr1)..., size(B)...)
+
+ dict_params = CompactFock_HelperFunctions.construct_dict_params(cutoffs)
+ for sum_params in 0:sum(cutoffs)-1
+ for params in dict_params[sum_params]
+ # diagonal pivots: aa,bb,cc,...
+ if (cutoffs[1] == 1) || (params[1] < cutoffs[1]) # julia indexing!
+ use_diag_pivot_grad!(A, B, M, cutoffs, params, arr0, arr1, arr0_dA, arr1_dA, arr0_dB, arr1_dB, T, SQRT)
+ end
+ # off-diagonal pivots: d=1: (a+1)a,bb,cc,... | d=2: 00,(b+1)b,cc,... | d=3: 00,00,(c+1)c,... | ...
+ for d in 1:M
+ if all(params[1:d-1] .== 1) && (params[d] < cutoffs[d]) # julia indexing!
+ use_offDiag_pivot_grad!(A,B,M,cutoffs,params,d,arr0,arr2,arr1010,arr1001,arr1,arr0_dA,arr2_dA,arr1010_dA,arr1001_dA,arr1_dA,arr0_dB,arr2_dB,arr1010_dB,arr1001_dB,arr1_dB, T, SQRT)
+ end
+ end
+ end
+ end
+
+ return Complex{Float64}.(arr0_dA), Complex{Float64}.(arr0_dB)
+end
+
+end # end module
\ No newline at end of file
diff --git a/mrmustard/math/lattice/strategies/julia/compactFock/helperFunctions.jl b/mrmustard/math/lattice/strategies/julia/compactFock/helperFunctions.jl
new file mode 100644
index 000000000..b73900804
--- /dev/null
+++ b/mrmustard/math/lattice/strategies/julia/compactFock/helperFunctions.jl
@@ -0,0 +1,38 @@
+module CompactFock_HelperFunctions
+
+function repeat_twice(params)
+ """
+ Args:
+ params: [a,b,c,...]
+ Returns:
+ [a,a,b,b,c,c,...]
+ """
+ pivot = Vector{Int64}(undef, 2 * length(params))
+ for (i, val) in enumerate(params)
+ pivot[2 * i - 1] = val
+ pivot[2 * i] = val
+ end
+ return pivot
+end
+
+function construct_dict_params(cutoffs)
+ """
+ Args:
+ cutoffs (tuple): upper bounds for the number of photons in each mode
+ Returns:
+ Dict: all possible values for (a,b,c,...), grouped in lists according to their sum a+b+c+...
+ """
+ M = length(cutoffs)
+ indices = Dict{Int64, Vector{Tuple}}()
+ for sum_params in 0:sum(cutoffs)-1
+ indices[sum_params] = Vector{Tuple}()
+ end
+
+ for params in CartesianIndices(cutoffs)
+ params_tup = Tuple(params)
+ push!(indices[sum(params_tup) - M], params_tup)
+ end
+ return indices
+end
+
+end # end module
\ No newline at end of file
diff --git a/mrmustard/math/lattice/strategies/julia/compactFock/singleLeftoverMode_amps.jl b/mrmustard/math/lattice/strategies/julia/compactFock/singleLeftoverMode_amps.jl
new file mode 100644
index 000000000..353cf04e3
--- /dev/null
+++ b/mrmustard/math/lattice/strategies/julia/compactFock/singleLeftoverMode_amps.jl
@@ -0,0 +1,231 @@
+module LeftoverModeAmps
+
+import ..GetPrecision
+import ..CompactFock_HelperFunctions
+
+function write_block!(i, arr_write, write, arr_read_pivot, read_GB, G_in, GB, A, K_i, cutoff_leftoverMode, SQRT)
+ """
+ Apply the recurrence relation to blocks of Fock amplitudes (of shape cutoff_leftoverMode x cutoff_leftoverMode)
+ (cfr. algorithm 2 of https://doi.org/10.22331/q-2023-08-29-1097)
+ """
+ m, n = 1, 1
+ A_adapted = A[i, 3:end]
+ G_in_adapted = G_in[1, 1, :]
+ arr_write[1, 1, write...] = (GB[1, 1, i] + sum(A_adapted .* G_in_adapted)) / K_i[i - 2]
+
+ m = 1
+ A_adapted = A[i, 2:end]
+ for n in 2:cutoff_leftoverMode
+ G_in_adapted = vcat(arr_read_pivot[1, n - 1, read_GB...] * SQRT[n], G_in[1, n, :])
+ arr_write[1, n, write...] = (GB[1, n, i] + sum(A_adapted .* G_in_adapted)) / K_i[i - 2]
+ end
+
+ n = 1
+ A_adapted = vcat(A[i, 1], A[i, 3:end])
+ for m in 2:cutoff_leftoverMode
+ G_in_adapted = vcat(arr_read_pivot[m - 1, 1, read_GB...] * SQRT[m], G_in[m, 1, :])
+ arr_write[m, 1, write...] = (GB[m, 1, i] + sum(A_adapted .* G_in_adapted)) / K_i[i - 2]
+ end
+
+ A_adapted = A[i, :]
+ for m in 2:cutoff_leftoverMode
+ for n in 2:cutoff_leftoverMode
+ G_in_adapted = vcat(arr_read_pivot[m - 1, n, read_GB...] * SQRT[m], arr_read_pivot[m, n - 1, read_GB...] * SQRT[n], G_in[m, n, :])
+ arr_write[m, n, write...] = (GB[m, n, i] + sum(A_adapted .* G_in_adapted)) / K_i[i - 2]
+ end
+ end
+end
+
+function use_offDiag_pivot!(A, B, M, cutoff_leftoverMode, cutoffs_tail, params, d, arr0, arr2, arr1010, arr1001, arr1, T, SQRT)
+ """Given params=(a,b,c,...), apply the recurrence relation for the pivots
+ [a+1,a,b,b,c,c,...] / [a,a,b+1,b,c,c,...] / [a,a,b,b,c+1,c,...] / ..."""
+ pivot = CompactFock_HelperFunctions.repeat_twice(params)
+ pivot[2 * d - 1] += 1
+ K_l = SQRT[pivot] # julia indexing counters extra zero in SQRT
+ K_i = SQRT[pivot .+ 1] # julia indexing counters extra zero in SQRT
+ G_in = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, 2 * M)
+
+ ########## READ ##########
+ read_GB = tuple(2 * d - 1, params...)
+ GB = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, length(B))
+ for m in 1:cutoff_leftoverMode
+ for n in 1:cutoff_leftoverMode
+ GB[m, n, :] = arr1[m, n, read_GB...] .* B
+ end
+ end
+
+ # Array0
+ G_in[:, :, 2 * d - 1] .= arr0[:, :, params...]
+
+
+ # read from Array2
+ if params[d] > 1
+ params_adapted = collect(params)
+ params_adapted[d] -= 1
+ G_in[:, :, 2 * d] .= arr2[:, :, d, params_adapted...] # read block
+ end
+
+ # read from Array11
+ for i in d+1:M # i>d
+ if params[i] > 1
+ params_adapted = collect(params)
+ params_adapted[i] -= 1
+ G_in[:, :, 2 * i - 1] .= arr1001[:, :, d, i - d, params_adapted...] # read block
+ G_in[:, :, 2 * i] .= arr1010[:, :, d, i - d, params_adapted...] # read block
+ end
+ end
+
+ ########## WRITE ##########
+ for m in 1:cutoff_leftoverMode
+ for n in 1:cutoff_leftoverMode
+ G_in[m, n, :] .*= K_l
+ end
+ end
+
+ # Array0
+ write = collect(params)
+ write[d] += 1
+ write_block!(2 * d + 2, arr0, write, arr1, read_GB, G_in, GB, A, K_i, cutoff_leftoverMode, SQRT)
+
+ # Array2
+ if params[d] + 1 < cutoffs_tail[d]
+ write = (d, params...)
+ write_block!(2 * d + 1, arr2, write, arr1, read_GB, G_in, GB, A, K_i, cutoff_leftoverMode, SQRT)
+ end
+
+ # Array11
+ for i in d+1:M
+ if params[i] < cutoffs_tail[i]
+ write = (d, i - d, params...)
+ write_block!(2 * i + 1, arr1010, write, arr1, read_GB, G_in, GB, A, K_i, cutoff_leftoverMode, SQRT)
+ write_block!(2 * i + 2, arr1001, write, arr1, read_GB, G_in, GB, A, K_i, cutoff_leftoverMode, SQRT)
+ end
+ end
+end
+
+function use_diag_pivot!(A, B, M, cutoff_leftoverMode, cutoffs_tail, params, arr0, arr1, T, SQRT)
+ """Given params=(a,b,c,...), apply the recurrence relation for the pivot [a,a,b,b,c,c...]"""
+ pivot = CompactFock_HelperFunctions.repeat_twice(params)
+ K_l = SQRT[pivot] # julia indexing counters extra zero in SQRT
+ K_i = SQRT[pivot .+ 1] # julia indexing counters extra zero in SQRT
+ G_in = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, 2*M)
+
+ ########## READ ##########
+ read_GB = params
+ GB = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, length(B))
+ for m in 1:cutoff_leftoverMode
+ for n in 1:cutoff_leftoverMode
+ GB[m, n, :] = arr0[m, n, read_GB...] .* B
+ end
+ end
+
+ # Array1
+ for i in 1:2*M
+ if params[(i-1)÷2+1] > 1
+ params_adapted = collect(params)
+ params_adapted[(i-1)÷2+1] -= 1
+ G_in[:, :, i] .= arr1[:, :, i+1-2*((i-1) % 2), params_adapted...] # read block
+ end
+ end
+
+ ########## WRITE ##########
+ for m in 1:cutoff_leftoverMode
+ for n in 1:cutoff_leftoverMode
+ G_in[m, n, :] .*= K_l
+ end
+ end
+
+ # Array1
+ for i in 1:2*M
+ if params[(i-1)÷2+1] < cutoffs_tail[(i-1)÷2+1]
+ # this if statement prevents a few elements from being written that will never be read
+ if i ≠ 2 || params[1] + 1 < cutoffs_tail[1]
+ write = tuple(i,params...)
+ write_block!(i + 2, arr1, write, arr0, read_GB, G_in, GB, A, K_i, cutoff_leftoverMode, SQRT)
+ end
+ end
+ end
+end
+
+function fill_firstMode_PNRzero!(arr0,A,B,M,cutoff_leftoverMode,SQRT)
+ # fill first mode for all PNR detections equal to zero
+ one_tuple = tuple(fill(1,M-1)...)
+ for m in 1:cutoff_leftoverMode-1
+ arr0[m + 1, 1, one_tuple...] = (arr0[m, 1, one_tuple...] * B[1]) / SQRT[m + 1]
+ if m != 1
+ arr0[m + 1, 1, one_tuple...] += (SQRT[m] * A[1, 1] * arr0[m - 1, 1, one_tuple...]) / SQRT[m + 1]
+ end
+ end
+ for m in 1:cutoff_leftoverMode
+ for n in 1:cutoff_leftoverMode-1
+ arr0[m, n + 1, one_tuple...] = (arr0[m, n, one_tuple...] * B[2]) / SQRT[n + 1]
+ if m != 1
+ arr0[m, n + 1, one_tuple...] += (SQRT[m] * A[2, 1] * arr0[m - 1, n, one_tuple...]) / SQRT[n + 1]
+ end
+ if n != 1
+ arr0[m, n + 1, one_tuple...] += (SQRT[n] * A[2, 2] * arr0[m, n - 1, one_tuple...]) / SQRT[n + 1]
+ end
+ end
+ end
+end
+
+function fock_1leftoverMode_amps(
+ A::AbstractMatrix{Complex{Float64}},
+ B::AbstractVector{Complex{Float64}},
+ G0::Complex{Float64},
+ cutoffs::Tuple,
+ precision_bits::Int64
+ )
+ """Returns the density matrices in the upper, undetected mode of a circuit when all other modes are PNR detected
+ according to algorithm 2 of https://doi.org/10.22331/q-2023-08-29-1097
+ Args:
+ A, B, G0: required input for recurrence relation
+ cutoffs: upper bounds for the number of photons in each mode
+ precision_bits: number of bits used to represent a single Fock amplitude
+ Returns:
+ Submatrices of the Fock representation. Each submatrix contains Fock indices of a certain type.
+ arr0 --> type: [a,a,b,b,c,c...]
+ arr2 --> type: [a+2,a,b,b,c,c...] / [a,a,b+2,b,c,c...] / ...
+ arr1010 --> type: [a+1,a,b+1,b,c,c,...] / [a+1,a,b,b,c+1,c,...] / [a,a,b+1,b,c+1,c,...] / ...
+ arr1001 --> type: [a+1,a,b,b+1,c,c,...] / [a+1,a,b,b,c,c+1,...] / [a,a,b+1,b,c,c+1,...] / ...
+ arr1 --> type: [a+1,a,b,b,c,c...] / [a,a+1,b,b,c,c...] / [a,a,b+1,b,c,c...] / ...
+ """
+
+ T = GetPrecision.get_dtype(precision_bits)
+ SQRT = GetPrecision.SQRT_dict[precision_bits]
+
+ M = length(cutoffs)
+ cutoff_leftoverMode = cutoffs[1]
+ cutoffs_tail = cutoffs[2:end]
+
+ arr0 = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, cutoffs_tail...)
+ arr0[fill(1,M+1)...] = G0
+ arr2 = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, M - 1, cutoffs_tail...)
+ arr1 = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, 2 * (M - 1), cutoffs_tail...)
+ arr1010 = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, M - 1, M - 2, cutoffs_tail...)
+ arr1001 = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, M - 1, M - 2, cutoffs_tail...)
+
+
+ # fill first mode for all PNR detections equal to zero
+ fill_firstMode_PNRzero!(arr0,A,B,M,cutoff_leftoverMode,SQRT)
+
+ dict_params = CompactFock_HelperFunctions.construct_dict_params(cutoffs_tail)
+ for sum_params in 0:sum(cutoffs_tail)-1
+ for params in dict_params[sum_params]
+ # diagonal pivots: aa,bb,cc,...
+ if (cutoffs_tail[1] == 1) || (params[1] < cutoffs_tail[1]) # julia indexing!
+ use_diag_pivot!(A, B, M - 1, cutoff_leftoverMode, cutoffs_tail, params, arr0, arr1, T, SQRT)
+ end
+ # off-diagonal pivots: d=1: (a+1)a,bb,cc,... | d=2: 00,(b+1)b,cc,... | d=3: 00,00,(c+1)c,... | ...
+ for d in 1:M - 1
+ if all(params[1:d-1] .== 1) && (params[d] < cutoffs_tail[d])
+ use_offDiag_pivot!(A,B,M - 1,cutoff_leftoverMode,cutoffs_tail,params,d,arr0,arr2,arr1010,arr1001,arr1,T,SQRT)
+ end
+ end
+ end
+ end
+
+ return Complex{Float64}.(arr0), Complex{Float64}.(arr2), Complex{Float64}.(arr1010), Complex{Float64}.(arr1001), Complex{Float64}.(arr1)
+end
+
+end # end module
\ No newline at end of file
diff --git a/mrmustard/math/lattice/strategies/julia/compactFock/singleLeftoverMode_grad.jl b/mrmustard/math/lattice/strategies/julia/compactFock/singleLeftoverMode_grad.jl
new file mode 100644
index 000000000..0ed724d2a
--- /dev/null
+++ b/mrmustard/math/lattice/strategies/julia/compactFock/singleLeftoverMode_grad.jl
@@ -0,0 +1,302 @@
+module LeftoverModeGrad
+
+import ..GetPrecision
+import ..CompactFock_HelperFunctions
+
+function calc_dA_dB(m, n, i, arr_read_pivot, read_GB, G_in_adapted, A_adapted, B, K_i, K_l_adapted, arr_read_pivot_dA, G_in_dA_adapted, arr_read_pivot_dB, G_in_dB_adapted, l_range)
+ """Apply eqs. 16 & 17 (of https://doi.org/10.22331/q-2023-08-29-1097) for a single Fock amplitude"""
+ dA = arr_read_pivot_dA[m, n, read_GB...,:,:] .* B[i]
+ dB = arr_read_pivot_dB[m, n, read_GB...,:] .* B[i]
+ dB[i] += arr_read_pivot[m, n, read_GB...]
+ for (l_prime, l) in enumerate(l_range)
+ dA += (K_l_adapted[l_prime] * A_adapted[l_prime]) .* G_in_dA_adapted[l_prime,:,:]
+ dB += (K_l_adapted[l_prime] * A_adapted[l_prime]) .* G_in_dB_adapted[l_prime,:]
+ dA[i, l] += G_in_adapted[l_prime]
+ end
+ return dA ./ K_i[i - 2], dB ./ K_i[i - 2]
+end
+
+function write_block_grad(i, write, arr_read_pivot, read_GB, G_in, A, B, K_i, K_l, cutoff_leftoverMode, arr_write_dA, arr_read_pivot_dA, G_in_dA, arr_write_dB, arr_read_pivot_dB, G_in_dB, SQRT)
+ """
+ Apply eqs. 16 & 17 (of https://doi.org/10.22331/q-2023-08-29-1097)
+ to blocks of Fock amplitudes (of shape cutoff_leftoverMode x cutoff_leftoverMode)
+ """
+ m, n = 1, 1
+ l_range = 3:size(A)[2]
+ A_adapted = A[i, 3:end]
+ G_in_adapted = G_in[1, 1, :]
+ G_in_dA_adapted = G_in_dA[1,1,:,:,:]
+ G_in_dB_adapted = G_in_dB[1,1,:,:]
+ K_l_adapted = K_l
+ arr_write_dA[1, 1, write...,:,:], arr_write_dB[1, 1, write...,:] = calc_dA_dB(m, n, i, arr_read_pivot, read_GB, G_in_adapted, A_adapted, B, K_i, K_l_adapted, arr_read_pivot_dA, G_in_dA_adapted, arr_read_pivot_dB, G_in_dB_adapted, l_range)
+
+ m = 1
+ l_range = 2:size(A)[2]
+ A_adapted = A[i, 2:end]
+ for n in 2:cutoff_leftoverMode
+ K_l_adapted = vcat(SQRT[n], K_l)
+ G_in_adapted = vcat(arr_read_pivot[1, n - 1, read_GB...] * SQRT[n], G_in[1, n, :])
+ G_in_dA_adapted = vcat(reshape(arr_read_pivot_dA[1, n - 1, read_GB...,:,:], (1, size(A)...)), G_in_dA[1,n,:,:,:])
+ G_in_dB_adapted = vcat(reshape(arr_read_pivot_dB[1, n - 1, read_GB...,:], (1, size(B)...)), G_in_dB[1,n,:,:])
+ arr_write_dA[1, n, write...,:,:], arr_write_dB[1, n, write...,:] = calc_dA_dB(m, n, i, arr_read_pivot, read_GB, G_in_adapted, A_adapted, B, K_i, K_l_adapted, arr_read_pivot_dA, G_in_dA_adapted, arr_read_pivot_dB, G_in_dB_adapted, l_range)
+ end
+
+ n = 1
+ l_range = [2:size(A)[2]...]
+ l_range[1] = 1
+ A_adapted = vcat(A[i, 1], A[i, 3:end])
+ for m in 2:cutoff_leftoverMode
+ K_l_adapted = vcat(SQRT[m], K_l)
+ G_in_adapted = vcat(arr_read_pivot[m - 1, 1, read_GB...] * SQRT[m], G_in[m, 1, :])
+ G_in_dA_adapted = vcat(reshape(arr_read_pivot_dA[m - 1, 1, read_GB...,:,:], (1, size(A)...)), G_in_dA[m, 1,:,:,:])
+ G_in_dB_adapted = vcat(reshape(arr_read_pivot_dB[m - 1, 1, read_GB...,:], (1, size(B)...)), G_in_dB[m, 1,:,:])
+ arr_write_dA[m, 1, write...,:,:], arr_write_dB[m, 1, write...,:] = calc_dA_dB(m, n, i, arr_read_pivot, read_GB, G_in_adapted, A_adapted, B, K_i, K_l_adapted, arr_read_pivot_dA, G_in_dA_adapted, arr_read_pivot_dB, G_in_dB_adapted, l_range)
+ end
+
+ l_range = 1:size(A)[2]
+ A_adapted = A[i, :]
+ for m in 2:cutoff_leftoverMode
+ for n in 2:cutoff_leftoverMode
+ K_l_adapted = vcat(SQRT[m], SQRT[n], K_l)
+ G_in_adapted = vcat(arr_read_pivot[m - 1, n, read_GB...] * SQRT[m], arr_read_pivot[m, n - 1, read_GB...] * SQRT[n], G_in[m, n, :])
+ G_in_dA_adapted = vcat(reshape(arr_read_pivot_dA[m - 1, n, read_GB...,:,:], (1, size(A)...)), reshape(arr_read_pivot_dA[m, n - 1, read_GB...,:,:], (1, size(A)...)), G_in_dA[m, n,:,:,:])
+ G_in_dB_adapted = vcat(reshape(arr_read_pivot_dB[m - 1, n, read_GB...,:], (1, size(B)...)), reshape(arr_read_pivot_dB[m, n - 1, read_GB...,:], (1, size(B)...)), G_in_dB[m, n,:,:])
+ arr_write_dA[m, n, write...,:,:], arr_write_dB[m, n, write...,:] = calc_dA_dB(m, n, i, arr_read_pivot, read_GB, G_in_adapted, A_adapted, B, K_i, K_l_adapted, arr_read_pivot_dA, G_in_dA_adapted, arr_read_pivot_dB, G_in_dB_adapted, l_range)
+ end
+ end
+ return arr_write_dA, arr_write_dB
+end
+
+function read_block(arr_write, arr_write_dA, arr_write_dB, idx_write, arr_read, arr_read_dA, arr_read_dB, idx_read_tail)
+ """
+ Read the blocks of Fock amplitudes (of shape cutoff_leftoverMode x cutoff_leftoverMode)
+ and their derivatives w.r.t A and B and write them to G_in, G_in_dA, G_in_dB
+ """
+ arr_write[:, :, idx_write] .= arr_read[:, :, idx_read_tail...]
+ arr_write_dA[:, :, idx_write, :, :] .= arr_read_dA[:, :, idx_read_tail...,:, :]
+ arr_write_dB[:, :, idx_write, :] .= arr_read_dB[:, :, idx_read_tail...,:]
+ return arr_write, arr_write_dA, arr_write_dB
+end
+
+
+function use_offDiag_pivot_grad!(A, B, M, cutoff_leftoverMode, cutoffs_tail, params, d, arr0, arr2, arr1010, arr1001, arr1, arr0_dA, arr2_dA, arr1010_dA, arr1001_dA, arr1_dA, arr0_dB, arr2_dB, arr1010_dB, arr1001_dB, arr1_dB, T, SQRT)
+ """Given params=(a,b,c,...), apply the eqs. 16 & 17 (of https://doi.org/10.22331/q-2023-08-29-1097)
+ for the pivots [a+1,a,b,b,c,c,...] / [a,a,b+1,b,c,c,...] / [a,a,b,b,c+1,c,...] / ..."""
+ pivot = CompactFock_HelperFunctions.repeat_twice(params)
+ pivot[2 * d - 1] += 1
+ K_l = SQRT[pivot] # julia indexing counters extra zero in SQRT
+ K_i = SQRT[pivot .+ 1] # julia indexing counters extra zero in SQRT
+ G_in = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, 2 * M)
+ G_in_dA = zeros(Complex{T}, size(G_in)..., size(A)...)
+ G_in_dB = zeros(Complex{T}, size(G_in)..., size(B)...)
+
+ ########## READ ##########
+ read_GB = (2 * d - 1, params...)
+ GB = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, length(B))
+ for m in 1:cutoff_leftoverMode
+ for n in 1:cutoff_leftoverMode
+ GB[m, n, :] = arr1[m, n, read_GB...] .* B
+ end
+ end
+
+ # Array0
+ G_in, G_in_dA, G_in_dB = read_block(G_in, G_in_dA, G_in_dB, 2 * d - 1, arr0, arr0_dA, arr0_dB, params)
+
+ # read from Array2
+ if params[d] > 1
+ params_adapted = collect(params)
+ params_adapted[d] -= 1
+ G_in, G_in_dA, G_in_dB = read_block(G_in, G_in_dA, G_in_dB, 2 * d, arr2, arr2_dA, arr2_dB, (d,params_adapted...))
+ end
+
+ # read from Array11
+ for i in d+1:M # i>d
+ if params[i] > 1
+ params_adapted = collect(params)
+ params_adapted[i] -= 1
+ G_in, G_in_dA, G_in_dB = read_block(G_in, G_in_dA, G_in_dB, 2 * i - 1, arr1001, arr1001_dA, arr1001_dB, (d, i - d,params_adapted...))
+ G_in, G_in_dA, G_in_dB = read_block(G_in, G_in_dA, G_in_dB, 2 * i, arr1010, arr1010_dA, arr1010_dB, (d, i - d,params_adapted...))
+ end
+ end
+
+ ########## WRITE ##########
+ for m in 1:cutoff_leftoverMode
+ for n in 1:cutoff_leftoverMode
+ G_in[m, n, :] .*= K_l
+ end
+ end
+
+ # Array0
+ write = collect(params)
+ write[d] += 1
+ arr0_dA, arr0_dB = write_block_grad(2 * d + 2, write, arr1, read_GB, G_in, A, B, K_i, K_l, cutoff_leftoverMode, arr0_dA, arr1_dA, G_in_dA, arr0_dB, arr1_dB, G_in_dB, SQRT)
+
+ # Array2
+ if params[d] + 1 < cutoffs_tail[d]
+ write = (d, params...)
+ arr2_dA, arr2_dB = write_block_grad(2 * d + 1, write, arr1, read_GB, G_in, A, B, K_i, K_l, cutoff_leftoverMode, arr2_dA, arr1_dA, G_in_dA, arr2_dB, arr1_dB, G_in_dB, SQRT)
+ end
+
+ # Array11
+ for i in d+1:M
+ if params[i] < cutoffs_tail[i]
+ write = (d, i - d, params...)
+ arr1010_dA, arr1010_dB = write_block_grad(2 * i + 1, write, arr1, read_GB, G_in, A, B, K_i, K_l, cutoff_leftoverMode, arr1010_dA, arr1_dA, G_in_dA, arr1010_dB, arr1_dB, G_in_dB, SQRT)
+ arr1001_dA, arr1001_dB = write_block_grad(2 * i + 2, write, arr1, read_GB, G_in, A, B, K_i, K_l, cutoff_leftoverMode, arr1001_dA, arr1_dA, G_in_dA, arr1001_dB, arr1_dB, G_in_dB, SQRT)
+ end
+ end
+end
+
+function use_diag_pivot_grad!(A, B, M, cutoff_leftoverMode, cutoffs_tail, params, arr0, arr1, arr0_dA, arr1_dA, arr0_dB, arr1_dB, T, SQRT)
+ pivot = CompactFock_HelperFunctions.repeat_twice(params)
+ """Given params=(a,b,c,...), apply the eqs. 16 & 17 (of https://doi.org/10.22331/q-2023-08-29-1097)
+ for the pivot [a,a,b,b,c,c...]"""
+ K_l = SQRT[pivot] # julia indexing counters extra zero in SQRT
+ K_i = SQRT[pivot .+ 1] # julia indexing counters extra zero in SQRT
+ G_in = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, 2*M)
+ G_in_dA = zeros(Complex{T}, size(G_in)..., size(A)...)
+ G_in_dB = zeros(Complex{T}, size(G_in)..., size(B)...)
+
+ ########## READ ##########
+ read_GB = params
+ GB = zeros(Complex{T}, cutoff_leftoverMode, cutoff_leftoverMode, length(B))
+ for m in 1:cutoff_leftoverMode
+ for n in 1:cutoff_leftoverMode
+ GB[m, n, :] = arr0[m, n, read_GB...] .* B
+ end
+ end
+
+ # Array1
+ for i in 1:2*M
+ if params[(i-1)÷2+1] > 1
+ params_adapted = collect(params)
+ params_adapted[(i-1)÷2+1] -= 1
+ read = tuple(i+1-2*((i-1) % 2), params_adapted...)
+ G_in, G_in_dA, G_in_dB = read_block(G_in, G_in_dA, G_in_dB, i, arr1, arr1_dA, arr1_dB, read)
+ end
+ end
+
+ ########## WRITE ##########
+ for m in 1:cutoff_leftoverMode
+ for n in 1:cutoff_leftoverMode
+ G_in[m, n, :] .*= K_l
+ end
+ end
+
+ # Array1
+ for i in 1:2*M
+ if params[(i-1)÷2+1] < cutoffs_tail[(i-1)÷2+1]
+ # this if statement prevents a few elements from being written that will never be read
+ if i ≠ 2 || params[1] + 1 < cutoffs_tail[1]
+ write = tuple(i,params...)
+ arr1_dA, arr1_dB = write_block_grad(i + 2, write, arr0, read_GB, G_in, A, B, K_i, K_l, cutoff_leftoverMode, arr1_dA, arr0_dA, G_in_dA, arr1_dB, arr0_dB, G_in_dB, SQRT)
+ end
+ end
+ end
+end
+
+function fill_firstMode_PNRzero!(arr0,arr0_dA,arr0_dB,A,B,M,cutoff_leftoverMode,SQRT)
+ """fill first mode when all PNR detection values are equal to zero"""
+ one_tuple = tuple(fill(1,M-1)...)
+
+ for m in 1:cutoff_leftoverMode - 1
+ arr0_dA[m + 1, 1, one_tuple..., :, :] = arr0_dA[m, 1, one_tuple..., :, :] .* B[1]
+ arr0_dB[m + 1, 1, one_tuple..., :] = arr0_dB[m, 1, one_tuple..., :] .* B[1]
+ arr0_dB[m + 1, 1, one_tuple..., 1] += arr0[m, 1, one_tuple...]
+ if m != 1
+ arr0_dA[m + 1, 1, one_tuple..., :, :] += SQRT[m] .* A[1, 1] .* arr0_dA[m - 1, 1, one_tuple..., :, :]
+ arr0_dA[m + 1, 1, one_tuple..., 1, 1] += SQRT[m] .* arr0[m - 1, 1, one_tuple...]
+ arr0_dB[m + 1, 1, one_tuple..., :] += SQRT[m] .* A[1, 1] .* arr0_dB[m - 1, 1, one_tuple..., :]
+ end
+ arr0_dA[m + 1, 1, one_tuple..., :, :] ./= SQRT[m + 1]
+ arr0_dB[m + 1, 1, one_tuple..., :] ./= SQRT[m + 1]
+ end
+
+ for m in 1:cutoff_leftoverMode
+ for n in 1:cutoff_leftoverMode - 1
+ arr0_dA[m, n + 1, one_tuple..., :, :] = arr0_dA[m, n, one_tuple..., :, :] .* B[2]
+ arr0_dB[m, n + 1, one_tuple..., :] = arr0_dB[m, n, one_tuple..., :] .* B[2]
+ arr0_dB[m, n + 1, one_tuple..., 2] += arr0[m, n, one_tuple...]
+ if m != 1
+ arr0_dA[m, n + 1, one_tuple..., :, :] += SQRT[m] .* A[2, 1] .* arr0_dA[m - 1, n, one_tuple..., :, :]
+ arr0_dA[m, n + 1, one_tuple..., 2, 1] += SQRT[m] .* arr0[m - 1, n, one_tuple...]
+ arr0_dB[m, n + 1, one_tuple..., :] += SQRT[m] .* A[2, 1] .* arr0_dB[m - 1, n, one_tuple..., :]
+
+ end
+ if n != 1
+ arr0_dA[m, n + 1, one_tuple..., :, :] += SQRT[n] .* A[2, 2] .* arr0_dA[m, n - 1, one_tuple..., :, :]
+ arr0_dA[m, n + 1, one_tuple..., 2, 2] += SQRT[n] .* arr0[m, n - 1, one_tuple...]
+ arr0_dB[m, n + 1, one_tuple..., :] += SQRT[n] .* A[2, 2] .* arr0_dB[m, n - 1, one_tuple..., :]
+ end
+ arr0_dA[m, n + 1, one_tuple..., :, :] ./= SQRT[n + 1]
+ arr0_dB[m, n + 1, one_tuple..., :] ./= SQRT[n + 1]
+ end
+ end
+end
+
+function fock_1leftoverMode_grad(
+ A::AbstractMatrix{Complex{Float64}},
+ B::AbstractVector{Complex{Float64}},
+ arr0::AbstractArray{Complex{Float64}},
+ arr2::AbstractArray{Complex{Float64}},
+ arr1010::AbstractArray{Complex{Float64}},
+ arr1001::AbstractArray{Complex{Float64}},
+ arr1::AbstractArray{Complex{Float64}},
+ precision_bits::Int64
+ )
+ """Returns the gradients of the density matrices in the upper, undetected mode of a circuit when all other modes
+ are PNR detected (according to algorithm 2 of https://doi.org/10.22331/q-2023-08-29-1097)
+ Args:
+ A, B: required input for recurrence relation
+ Submatrices of the Fock representation. Each submatrix contains Fock indices of a certain type.
+ arr0 --> type: [a,a,b,b,c,c...]
+ arr2 --> type: [a+2,a,b,b,c,c...] / [a,a,b+2,b,c,c...] / ...
+ arr1010 --> type: [a+1,a,b+1,b,c,c,...] / [a+1,a,b,b,c+1,c,...] / [a,a,b+1,b,c+1,c,...] / ...
+ arr1001 --> type: [a+1,a,b,b+1,c,c,...] / [a+1,a,b,b,c,c+1,...] / [a,a,b+1,b,c,c+1,...] / ...
+ arr1 --> type: [a+1,a,b,b,c,c...] / [a,a+1,b,b,c,c...] / [a,a,b+1,b,c,c...] / ...
+ precision_bits: number of bits used to represent a single Fock amplitude
+ Returns:
+ arr0_dA, arr0_dB: derivatives of arr0 w.r.t A and B
+ """
+
+ T = GetPrecision.get_dtype(precision_bits)
+ SQRT = GetPrecision.SQRT_dict[precision_bits]
+
+ cutoffs = size(arr0)[2:end]
+ M = length(cutoffs)
+ cutoff_leftoverMode = cutoffs[1]
+ cutoffs_tail = cutoffs[2:end]
+
+ arr0_dA = zeros(Complex{T}, size(arr0)..., size(A)...)
+ arr2_dA = zeros(Complex{T}, size(arr2)..., size(A)...)
+ arr1010_dA = zeros(Complex{T}, size(arr1010)..., size(A)...)
+ arr1001_dA = zeros(Complex{T}, size(arr1001)..., size(A)...)
+ arr1_dA = zeros(Complex{T}, size(arr1)..., size(A)...)
+ arr0_dB = zeros(Complex{T}, size(arr0)..., size(B)...)
+ arr2_dB = zeros(Complex{T}, size(arr2)..., size(B)...)
+ arr1010_dB = zeros(Complex{T}, size(arr1010)..., size(B)...)
+ arr1001_dB = zeros(Complex{T}, size(arr1001)..., size(B)...)
+ arr1_dB = zeros(Complex{T}, size(arr1)..., size(B)...)
+
+ fill_firstMode_PNRzero!(arr0,arr0_dA,arr0_dB,A,B,M,cutoff_leftoverMode,SQRT)
+
+ dict_params = CompactFock_HelperFunctions.construct_dict_params(cutoffs_tail)
+ for sum_params in 0:sum(cutoffs_tail)-1
+ for params in dict_params[sum_params]
+ # diagonal pivots: aa,bb,cc,...
+ if (cutoffs_tail[1] == 1) || (params[1] < cutoffs_tail[1]) # julia indexing!
+ use_diag_pivot_grad!(A,B,M - 1,cutoff_leftoverMode,cutoffs_tail,params,arr0,arr1,arr0_dA,arr1_dA,arr0_dB,arr1_dB,T,SQRT)
+ end
+ # off-diagonal pivots: d=1: (a+1)a,bb,cc,... | d=2: 00,(b+1)b,cc,... | d=3: 00,00,(c+1)c,... | ...
+ for d in 1:M - 1
+ if all(params[1:d-1] .== 1) && (params[d] < cutoffs_tail[d])
+ use_offDiag_pivot_grad!(A,B,M - 1,cutoff_leftoverMode,cutoffs_tail,params,d,arr0,arr2,arr1010,arr1001,arr1,arr0_dA,arr2_dA,arr1010_dA,arr1001_dA,arr1_dA,arr0_dB,arr2_dB,arr1010_dB,arr1001_dB,arr1_dB,T,SQRT)
+ end
+ end
+ end
+ end
+
+ return Complex{Float64}.(arr0_dA), Complex{Float64}.(arr0_dB)
+end
+
+end # end module
diff --git a/mrmustard/math/lattice/strategies/julia/getPrecision.jl b/mrmustard/math/lattice/strategies/julia/getPrecision.jl
new file mode 100644
index 000000000..e227f4bd7
--- /dev/null
+++ b/mrmustard/math/lattice/strategies/julia/getPrecision.jl
@@ -0,0 +1,18 @@
+module GetPrecision
+
+using MultiFloats
+
+# possible dtypes
+const dtypes_dict = Dict(128 => Float64, 256 => Float64x2, 384 => Float64x3, 512 => Float64x4)
+
+# pre-calculate SQRT values
+const SQRT_dict = Dict(k => [0;sqrt.(dtypes_dict[k].(1:999))] for k in keys(dtypes_dict))
+
+function get_dtype(precision_bits)
+ if !(precision_bits in keys(dtypes_dict))
+ error("The possible values for precision_bits are ", keys(dtypes))
+ end
+ return dtypes_dict[precision_bits]
+end
+
+end # end module
\ No newline at end of file
diff --git a/mrmustard/math/lattice/strategies/julia/vanilla.jl b/mrmustard/math/lattice/strategies/julia/vanilla.jl
new file mode 100644
index 000000000..3192b3ef6
--- /dev/null
+++ b/mrmustard/math/lattice/strategies/julia/vanilla.jl
@@ -0,0 +1,83 @@
+module Vanilla
+
+import ..GetPrecision
+
+function vanilla(
+ A::AbstractMatrix{Complex{Float64}},
+ b::AbstractVector{Complex{Float64}},
+ c::Complex{Float64},
+ shape::AbstractVector{Int64},
+ precision_bits::Int64
+ )
+ """Vanilla Fock-Bargmann strategy. Fills the tensor by iterating over all indices
+ in ndindex (i.e. CartesianIndices) order.
+ Both the input and output of this function have dtype Complex{Float64},
+ but a higher precision (i.e. ``precision_bits``) is used intermediately to postpone the nummerical blowup
+ that results from the instable recurrence relation.
+
+ Args:
+ A: A matrix of the Fock-Bargmann representation
+ b: B vector of the Fock-Bargmann representation
+ c: vacuum amplitude
+ shape: shape of the output tensor
+ precision_bits: the number of bits used to represent a single Fock amplitude
+
+ Returns:
+ Array{Complex{Float64}}: Fock representation of the Gaussian tensor with shape ``shape``
+ """
+
+ T = GetPrecision.get_dtype(precision_bits)
+ SQRT = GetPrecision.SQRT_dict[precision_bits]
+
+ shape = Tuple(shape)
+
+ path = CartesianIndices(shape)
+
+ G = Array{Complex{T}}(undef,shape) # initialize empty array with high precision values
+ G[first(path)] = c
+
+ for idx in Iterators.drop(path, 1)
+ update_Fock_array!(G, A, b, idx, SQRT)
+ end
+
+ return Complex{Float64}.(G) # convert back to lower precision
+end
+
+function update_Fock_array!(
+ G::AbstractArray,
+ A::AbstractMatrix{Complex{Float64}},
+ b::AbstractVector{Complex{Float64}},
+ idx::CartesianIndex,
+ SQRT::AbstractVector
+ )
+ """Apply the recurrence relation once and update G at a certain index"""
+
+ i, pivot = get_pivot(idx)
+
+ @views temp = b[i] * G[pivot]
+ @inbounds for (j, neighbor) in get_neighbors(pivot)
+ @views temp += A[i, j] * G[neighbor] * SQRT[pivot[j]]
+ end
+
+ G[idx] = temp / SQRT[idx[i]]
+end
+
+function get_pivot(idx::CartesianIndex)
+ """returns a single idx where the first non-one value of idx has been lowered.
+ E.g. (1,1,5,3) -> (1,1,4,3)"""
+ @inbounds @simd for i in 1:length(idx)
+ if idx[i] > 1
+ return i, CartesianIndex(ntuple(j -> j == i ? idx[j] - 1 : idx[j], length(idx)))
+ end
+ end
+ return 1, idx
+end
+
+function get_neighbors(idx::CartesianIndex{N}) where {N}
+ """Does the same as get_neighbors, but skips over the first "start" iterations"""
+ return ((i, CartesianIndex(ntuple(d -> d == i ? idx[d] - 1 : idx[d], Val(N))))
+ for i in 1:N if idx[i] > 1)
+end
+
+end # end module
+
diff --git a/mrmustard/math/lattice/strategies/squeezer.py b/mrmustard/math/lattice/strategies/squeezer.py
index a39a7e649..f92e21372 100644
--- a/mrmustard/math/lattice/strategies/squeezer.py
+++ b/mrmustard/math/lattice/strategies/squeezer.py
@@ -18,7 +18,7 @@
from numba import njit
from mrmustard.math.lattice import steps
-from mrmustard.typing import ComplexTensor
+from mrmustard.utils.typing import ComplexTensor
SQRT = np.sqrt(np.arange(100000))
diff --git a/mrmustard/math/lattice/strategies/vanilla.jl b/mrmustard/math/lattice/strategies/vanilla.jl
new file mode 100644
index 000000000..4c1fafc01
--- /dev/null
+++ b/mrmustard/math/lattice/strategies/vanilla.jl
@@ -0,0 +1,73 @@
+using MultiFloats
+T = Float64x4
+SQRT = [0.0; [sqrt(T(i)) for i in 1:100000]]
+
+function vanilla(
+ A::AbstractMatrix{Complex{Float64}},
+ b::AbstractVector{Complex{Float64}},
+ c::Complex{Float64},
+ shape::AbstractVector{Int64}
+ )
+ """Vanilla Fock-Bargmann strategy. Fills the tensor by iterating over all indices
+ in ndindex (i.e. CartesianIndices) order.
+ Both the input and output of this function have dtype Complex{Float64},
+ but Complex{Float64x4} is used intermediately to postpone the nummerical blowup
+ that results from the instable recurrence relation.
+
+ Args:
+ A: A matrix of the Fock-Bargmann representation
+ b: B vector of the Fock-Bargmann representation
+ c: vacuum amplitude
+ shape: shape of the output tensor
+
+ Returns:
+ Array{Complex{Float64}}: Fock representation of the Gaussian tensor with shape ``shape``
+ """
+
+ shape = Tuple(shape)
+
+ path = CartesianIndices(shape)
+
+ G = Array{Complex{T}}(undef,shape) # initialize empty array with high precision values
+ G[first(path)] = c
+
+ for idx in Iterators.drop(path, 1)
+ update_Fock_array!(G, A, b, idx)
+ end
+
+ return Complex{Float64}.(G) # convert back to lower precision
+end
+
+function update_Fock_array!(
+ G::AbstractArray{Complex{T}},
+ A::AbstractMatrix{Complex{Float64}},
+ b::AbstractVector{Complex{Float64}},
+ idx::CartesianIndex,
+ )
+
+ i, pivot = get_pivot(idx)
+
+ @views temp = b[i] * G[pivot]
+ @inbounds for (j, neighbor) in get_neighbors(pivot)
+ @views temp += A[i, j] * G[neighbor] * SQRT[pivot[j]]
+ end
+
+ G[idx] = temp / SQRT[idx[i]]
+end
+
+function get_pivot(idx::CartesianIndex)
+ """returns a single idx where the first non-one value of idx has been lowered.
+ E.g. (1,1,5,3) -> (1,1,4,3)"""
+ @inbounds @simd for i in 1:length(idx)
+ if idx[i] > 1
+ return i, CartesianIndex(ntuple(j -> j == i ? idx[j] - 1 : idx[j], length(idx)))
+ end
+ end
+ return 1, idx
+end
+
+function get_neighbors(idx::CartesianIndex{N}) where {N}
+ """Does the same as get_neighbors, but skips over the first "start" iterations"""
+ return ((i, CartesianIndex(ntuple(d -> d == i ? idx[d] - 1 : idx[d], Val(N))))
+ for i in 1:N if idx[i] > 1)
+end
\ No newline at end of file
diff --git a/mrmustard/math/lattice/strategies/vanilla.py b/mrmustard/math/lattice/strategies/vanilla.py
index 6943f98cf..ece85b11d 100644
--- a/mrmustard/math/lattice/strategies/vanilla.py
+++ b/mrmustard/math/lattice/strategies/vanilla.py
@@ -16,17 +16,18 @@
from numba import njit
from mrmustard.math.lattice import paths, steps
-from mrmustard.typing import ComplexMatrix, ComplexTensor, ComplexVector
+from mrmustard.utils.typing import ComplexMatrix, ComplexTensor, ComplexVector
+from .flat_indices import first_available_pivot, lower_neighbors, shape_to_strides
-SQRT = np.sqrt(np.arange(100000))
-
-__all__ = ["vanilla", "vanilla_jacobian", "vanilla_vjp"]
+__all__ = ["vanilla", "vanilla_batch", "vanilla_jacobian", "vanilla_vjp"]
@njit
def vanilla(shape: tuple[int, ...], A, b, c) -> ComplexTensor: # pragma: no cover
- r"""Vanilla Fock-Bargmann strategy. Fills the tensor by iterating over all indices
- in ndindex order.
+ r"""Vanilla Fock-Bargmann strategy.
+
+ Flattens the tensors, then fills it by iterating over all indices in the order
+ given by ``np.ndindex``. Finally, it reshapes the tensor before returning.
Args:
shape (tuple[int, ...]): shape of the output tensor
@@ -34,6 +35,59 @@ def vanilla(shape: tuple[int, ...], A, b, c) -> ComplexTensor: # pragma: no cov
b (np.ndarray): B vector of the Fock-Bargmann representation
c (complex): vacuum amplitude
+ Returns:
+ np.ndarray: Fock representation of the Gaussian tensor with shape ``shape``
+ """
+ # calculate the strides
+ strides = shape_to_strides(np.array(shape))
+
+ # init flat output tensor
+ ret = np.array([0 + 0j] * np.prod(np.array(shape)))
+
+ # initialize the indeces.
+ # ``index`` is the index of the flattened output tensor, while
+ # ``index_u_iter`` iterates through the unravelled counterparts of
+ # ``index``.
+ index = 0
+ index_u_iter = np.ndindex(shape)
+ next(index_u_iter)
+
+ # write vacuum amplitude
+ ret[0] = c
+
+ # iterate over the rest of the indices
+ for index_u in index_u_iter:
+ # update index
+ index += 1
+
+ # calculate pivot's contribution
+ i, pivot = first_available_pivot(index, strides)
+ value_at_index = b[i] * ret[pivot]
+
+ # add the contribution of pivot's lower's neighbours
+ ns = lower_neighbors(pivot, strides, i)
+ (j0, n0) = next(ns)
+ value_at_index += A[i, j0] * np.sqrt(index_u[j0] - 1) * ret[n0]
+ for j, n in ns:
+ value_at_index += A[i, j] * np.sqrt(index_u[j]) * ret[n]
+ ret[index] = value_at_index / np.sqrt(index_u[i])
+
+ return ret.reshape(shape)
+
+
+@njit
+def vanilla_batch(shape: tuple[int, ...], A, b, c) -> ComplexTensor: # pragma: no cover
+ r"""Vanilla Fock-Bargmann strategy for batched ``b``, with batched dimension on the
+ last index.
+
+ Fills the tensor by iterating over all indices in the order given by ``np.ndindex``.
+
+ Args:
+ shape (tuple[int, ...]): shape of the output tensor with the batch dimension on the last term
+ A (np.ndarray): A matrix of the Fock-Bargmann representation
+ b (np.ndarray): batched B vector of the Fock-Bargmann representation, the batch dimension is on the last index
+ c (complex): vacuum amplitude
+
Returns:
np.ndarray: Fock representation of the Gaussian tensor with shape ``shape``
"""
@@ -42,14 +96,15 @@ def vanilla(shape: tuple[int, ...], A, b, c) -> ComplexTensor: # pragma: no cov
G = np.zeros(shape, dtype=np.complex128)
# initialize path iterator
- path = np.ndindex(shape)
+ path = np.ndindex(shape[:-1]) # We know the last dimension is the batch one
# write vacuum amplitude
G[next(path)] = c
# iterate over the rest of the indices
for index in path:
- G[index] = steps.vanilla_step(G, A, b, index)
+ G[index] = steps.vanilla_step_batch(G, A, b, index)
+
return G
@@ -92,26 +147,44 @@ def vanilla_vjp(G, c, dLdG) -> tuple[ComplexMatrix, ComplexVector, complex]: #
Returns:
tuple[np.ndarray, np.ndarray, complex]: dL/dA, dL/db, dL/dc
"""
- D = G.ndim
+ shape = G.shape
+
+ # calculate the strides
+ strides = shape_to_strides(np.array(shape))
+
+ # linearize G
+ G_lin = G.flatten()
# init gradients
+ D = G.ndim
dA = np.zeros((D, D), dtype=np.complex128) # component of dL/dA
db = np.zeros(D, dtype=np.complex128) # component of dL/db
dLdA = np.zeros_like(dA)
dLdb = np.zeros_like(db)
- # initialize path iterator
- path = np.ndindex(G.shape)
+ # initialize the indices.
+ # ``index`` is the index of the flattened output tensor, while
+ # ``index_u_iter`` iterates through the unravelled counterparts of
+ # ``index``.
+ index = 0
+ index_u_iter = np.ndindex(shape)
+ next(index_u_iter)
- # skip first index
- next(path)
+ for index_u in index_u_iter:
+ index += 1
- # iterate over the rest of the indices
- for index in path:
- dA, db = steps.vanilla_step_grad(G, index, dA, db)
- dLdA += dA * dLdG[index]
- dLdb += db * dLdG[index]
+ ns = lower_neighbors(index, strides, 0)
+
+ for i, _ in enumerate(db):
+ _, n = next(ns)
+ db[i] = np.sqrt(index_u[i]) * G_lin[n]
+ dA[i, i] = 0.5 * np.sqrt(index_u[i] * (index_u[i] - 1)) * G_lin[n - strides[i]]
+ for j in range(i + 1, len(db)):
+ dA[i, j] = np.sqrt(index_u[i] * index_u[j]) * G_lin[n - strides[j]]
+
+ dLdA += dA * dLdG[index_u]
+ dLdb += db * dLdG[index_u]
- dLdc = np.sum(G * dLdG) / c
+ dLdc = np.sum(G_lin.reshape(shape) * dLdG) / c
return dLdA, dLdb, dLdc
diff --git a/mrmustard/math/math_interface.py b/mrmustard/math/math_interface.py
deleted file mode 100644
index 37255ad26..000000000
--- a/mrmustard/math/math_interface.py
+++ /dev/null
@@ -1,1232 +0,0 @@
-# Copyright 2021 Xanadu Quantum Technologies Inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""This module contains the :class:`Math` interface that every backend has to implement."""
-
-from abc import ABC, abstractmethod
-from functools import lru_cache
-from itertools import product
-from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
-
-import numpy as np
-from scipy.special import binom
-from scipy.stats import ortho_group, unitary_group
-
-from mrmustard import settings
-from mrmustard.typing import (
- Matrix,
- Scalar,
- Tensor,
- Trainable,
- Vector,
-)
-
-
-# pylint: disable=too-many-public-methods
-class MathInterface(ABC):
- r"""The interface that all backends must implement."""
- _euclidean_opt: type = None # NOTE this is an object that
-
- # backend is a singleton
- __instance = None
-
- # pylint: disable=unused-argument
- def __new__(cls, *args, **kwargs):
- if cls.__instance is None:
- cls.__instance = super().__new__(cls)
- return cls.__instance
-
- @abstractmethod
- def __getattr__(self, name):
- ... # pass the call to the actual backend
-
- # ~~~~~~~~~
- # Basic ops
- # ~~~~~~~~~
-
- @abstractmethod
- def abs(self, array: Tensor) -> Tensor:
- r"""Returns the absolute value of array.
-
- Args:
- array (array): array to take the absolute value of
-
- Returns:
- array: absolute value of array
- """
-
- @abstractmethod
- def any(self, array: Tensor) -> bool:
- r"""Returns ``True`` if any element of array is ``True``.
-
- Args:
- array (array): array to check
-
- Returns:
- bool: True if any element of array is True
- """
-
- @abstractmethod
- def arange(self, start: int, limit: int = None, delta: int = 1, dtype: Any = None) -> Tensor:
- r"""Returns an array of evenly spaced values within a given interval.
-
- Args:
- start (int): start of the interval
- limit (int): end of the interval
- delta (int): step size
- dtype (type): dtype of the returned array
-
- Returns:
- array: array of evenly spaced values
- """
- # NOTE: is float64 by default
-
- @abstractmethod
- def asnumpy(self, tensor: Tensor) -> Tensor:
- r"""Converts a tensor to a NumPy array.
-
- Args:
- tensor (array): tensor to convert
-
- Returns:
- array: NumPy array
- """
-
- @abstractmethod
- def assign(self, tensor: Tensor, value: Tensor) -> Tensor:
- r"""Assigns value to tensor.
-
- Args:
- tensor (array): tensor to assign to
- value (array): value to assign
-
- Returns:
- array: tensor with value assigned
- """
-
- @abstractmethod
- def astensor(self, array: Tensor, dtype: str) -> Tensor:
- r"""Converts a numpy array to a tensor.
-
- Args:
- array (array): numpy array to convert
- dtype (str): dtype of the tensor
-
- Returns:
- array: tensor with dtype
- """
-
- @abstractmethod
- def atleast_1d(self, array: Tensor, dtype=None) -> Tensor:
- r"""Returns an array with at least one dimension.
-
- Args:
- array (array): array to convert
- dtype (dtype): data type of the array
-
- Returns:
- array: array with at least one dimension
- """
-
- @abstractmethod
- def cast(self, array: Tensor, dtype) -> Tensor:
- r"""Casts ``array`` to ``dtype``.
-
- Args:
- array (array): array to cast
- dtype (dtype): data type to cast to
-
- Returns:
- array: array cast to dtype
- """
-
- @abstractmethod
- def clip(self, array: Tensor, a_min: float, a_max: float) -> Tensor:
- r"""Clips array to the interval ``[a_min, a_max]``.
-
- Args:
- array (array): array to clip
- a_min (float): minimum value
- a_max (float): maximum value
-
- Returns:
- array: clipped array
- """
-
- @abstractmethod
- def concat(self, values: Sequence[Tensor], axis: int) -> Tensor:
- r"""Concatenates values along the given axis.
-
- Args:
- values (array): values to concatenate
- axis (int): axis along which to concatenate
-
- Returns:
- array: concatenated values
- """
-
- @abstractmethod
- def conj(self, array: Tensor) -> Tensor:
- r"""Returns the complex conjugate of array.
-
- Args:
- array (array): array to take the complex conjugate of
-
- Returns:
- array: complex conjugate of array
- """
-
- @abstractmethod
- def constraint_func(
- self, bounds: Tuple[Optional[float], Optional[float]]
- ) -> Optional[Callable]:
- r"""Returns a constraint function for the given bounds.
-
- A constraint function will clip the value to the interval given by the bounds.
-
- .. note::
-
- The upper and/or lower bounds can be ``None``, in which case the constraint
- function will not clip the value.
-
- Args:
- bounds (tuple): bounds of the constraint
-
- Returns:
- function: constraint function
- """
-
- @abstractmethod
- def convolution(
- self,
- array: Tensor,
- filters: Tensor,
- padding="VALID",
- data_format="NWC",
- ) -> Tensor: # TODO: remove strides and data_format?
- r"""Performs a convolution on array with filters.
-
- Args:
- array (array): array to convolve
- filters (array): filters to convolve with
- padding (str): padding mode
- data_format (str): data format of the array
-
- Returns:
- array: convolved array
- """
-
- @abstractmethod
- def cos(self, array: Tensor) -> Tensor:
- r"""Returns the cosine of array.
-
- Args:
- array (array): array to take the cosine of
-
- Returns:
- array: cosine of array
- """
-
- @abstractmethod
- def cosh(self, array: Tensor) -> Tensor:
- r"""Returns the hyperbolic cosine of array.
-
- Args:
- array (array): array to take the hyperbolic cosine of
-
- Returns:
- array: hyperbolic cosine of array
- """
-
- def make_complex(self, real: Tensor, imag: Tensor) -> Tensor:
- """Given two real tensors representing the real and imaginary part of a complex number,
- this operation returns a complex tensor. The input tensors must have the same shape.
-
- Args:
- real (array): real part of the complex number
- imag (array): imaginary part of the complex number
-
- Returns:
- array: complex array ``real + 1j * imag``
- """
-
- @abstractmethod
- def atan2(self, y: Tensor, x: Tensor) -> Tensor:
- r"""Computes the trignometric inverse tangent of y/x element-wise.
-
- Args:
- y (array): numerator array
- x (array): denominator array
-
- Returns:
- array: arctan of y/x
- """
-
- @abstractmethod
- def det(self, matrix: Tensor) -> Tensor:
- r"""Returns the determinant of matrix.
-
- Args:
- matrix (matrix): matrix to take the determinant of
-
- Returns:
- determinant of matrix
- """
-
- @abstractmethod
- def diag(self, array: Tensor, k: int) -> Tensor:
- r"""Returns the array made by inserting the given array along the :math:`k`-th diagonal.
-
- Args:
- array (array): array to insert
- k (int): kth diagonal to insert array into
-
- Returns:
- array: array with array inserted into the kth diagonal
- """
-
- @abstractmethod
- def diag_part(self, array: Tensor, k: int) -> Tensor:
- r"""Returns the array of the main diagonal of array.
-
- Args:
- array (array): array to extract the main diagonal of
- k (int): diagonal to extract
-
- Returns:
- array: array of the main diagonal of array
- """
-
- @abstractmethod
- def eigvals(self, tensor: Tensor) -> Tensor:
- r"""Returns the eigenvalues of a matrix."""
-
- @abstractmethod
- def einsum(self, string: str, *tensors) -> Tensor:
- r"""Returns the result of the Einstein summation convention on the tensors.
-
- Args:
- string (str): string of the Einstein summation convention
- tensors (array): tensors to perform the Einstein summation on
-
- Returns:
- array: result of the Einstein summation convention
- """
-
- @abstractmethod
- def exp(self, array: Tensor) -> Tensor:
- r"""Returns the exponential of array element-wise.
-
- Args:
- array (array): array to take the exponential of
-
- Returns:
- array: exponential of array
- """
-
- @abstractmethod
- def expand_dims(self, array: Tensor, axis: int) -> Tensor:
- r"""Returns the array with an additional dimension inserted at the given axis.
-
- Args:
- array (array): array to expand
- axis (int): axis to insert the new dimension
-
- Returns:
- array: array with an additional dimension inserted at the given axis
- """
-
- @abstractmethod
- def expm(self, matrix: Tensor) -> Tensor:
- r"""Returns the matrix exponential of matrix.
-
- Args:
- matrix (matrix): matrix to take the exponential of
-
- Returns:
- matrix: exponential of matrix
- """
-
- @abstractmethod
- def eye(self, size: int, dtype) -> Tensor:
- r"""Returns the identity matrix of size.
-
- Args:
- size (int): size of the identity matrix
- dtype (dtype): data type of the identity matrix
-
- Returns:
- matrix: identity matrix
- """
-
- @abstractmethod
- def eye_like(self, array: Tensor) -> Tensor:
- r"""Returns the identity matrix of the same shape and dtype as array.
-
- Args:
- array (array): array to create the identity matrix of
-
- Returns:
- matrix: identity matrix
- """
-
- @abstractmethod
- def from_backend(self, value: Any) -> bool:
- r"""Returns whether the given tensor is a tensor of the concrete backend."""
-
- @abstractmethod
- def gather(self, array: Tensor, indices: Tensor, axis: int) -> Tensor:
- r"""Returns the values of the array at the given indices.
-
- Args:
- array (array): array to gather values from
- indices (array): indices to gather values from
- axis (int): axis to gather values from
-
- Returns:
- array: values of the array at the given indices
- """
-
- @abstractmethod
- def hash_tensor(self, tensor: Tensor) -> int:
- r"""Returns the hash of the given tensor.
-
- Args:
- tensor (array): tensor to hash
-
- Returns:
- int: hash of the given tensor
- """
-
- @abstractmethod
- def hermite_renormalized(self, A: Matrix, B: Vector, C: Scalar, shape: Sequence[int]) -> Tensor:
- r"""Returns the array of hermite renormalized polynomials of the given coefficients.
-
- Args:
- A (array): Matrix coefficient of the hermite polynomial
- B (array): Vector coefficient of the hermite polynomial
- C (array): Scalar coefficient of the hermite polynomial
- shape (tuple): shape of the hermite polynomial
-
- Returns:
- array: renormalized hermite polynomials
- """
-
- @abstractmethod
- def imag(self, array: Tensor) -> Tensor:
- r"""Returns the imaginary part of array.
-
- Args:
- array (array): array to take the imaginary part of
-
- Returns:
- array: imaginary part of array
- """
-
- @abstractmethod
- def inv(self, tensor: Tensor) -> Tensor:
- r"""Returns the inverse of tensor.
-
- Args:
- tensor (array): tensor to take the inverse of
-
- Returns:
- array: inverse of tensor
- """
-
- @abstractmethod
- def is_trainable(self, tensor: Tensor) -> bool:
- r"""Returns whether the given tensor is trainable."""
-
- @abstractmethod
- def lgamma(self, x: Tensor) -> Tensor:
- r"""Returns the natural logarithm of the gamma function of ``x``.
-
- Args:
- x (array): array to take the natural logarithm of the gamma function of
-
- Returns:
- array: natural logarithm of the gamma function of ``x``
- """
-
- @abstractmethod
- def log(self, x: Tensor) -> Tensor:
- r"""Returns the natural logarithm of ``x``.
-
- Args:
- x (array): array to take the natural logarithm of
-
- Returns:
- array: natural logarithm of ``x``
- """
-
- @abstractmethod
- def matmul(
- self,
- a: Tensor,
- b: Tensor,
- transpose_a=False,
- transpose_b=False,
- adjoint_a=False,
- adjoint_b=False,
- ) -> Tensor:
- r"""Returns the matrix product of ``a`` and ``b``.
-
- Args:
- a (array): first matrix to multiply
- b (array): second matrix to multiply
- transpose_a (bool): whether to transpose ``a``
- transpose_b (bool): whether to transpose ``b``
- adjoint_a (bool): whether to adjoint ``a``
- adjoint_b (bool): whether to adjoint ``b``
-
- Returns:
- array: matrix product of ``a`` and ``b``
- """
-
- @abstractmethod
- def matvec(self, a: Matrix, b: Vector, transpose_a=False, adjoint_a=False) -> Tensor:
- r"""Returns the matrix vector product of ``a`` (matrix) and ``b`` (vector).
-
- Args:
- a (array): matrix to multiply
- b (array): vector to multiply
- transpose_a (bool): whether to transpose ``a``
- adjoint_a (bool): whether to adjoint ``a``
-
- Returns:
- array: matrix vector product of ``a`` and ``b``
- """
-
- @abstractmethod
- def maximum(self, a: Tensor, b: Tensor) -> Tensor:
- r"""Returns the element-wise maximum of ``a`` and ``b``.
-
- Args:
- a (array): first array to take the maximum of
- b (array): second array to take the maximum of
-
- Returns:
- array: element-wise maximum of ``a`` and ``b``
- """
-
- @abstractmethod
- def minimum(self, a: Tensor, b: Tensor) -> Tensor:
- r"""Returns the element-wise minimum of ``a`` and ``b``.
-
- Args:
- a (array): first array to take the minimum of
- b (array): second array to take the minimum of
-
- Returns:
- array: element-wise minimum of ``a`` and ``b``
- """
-
- @abstractmethod
- def new_variable(
- self, value: Tensor, bounds: Tuple[Optional[float], Optional[float]], name: str, dtype: Any
- ) -> Tensor:
- r"""Returns a new variable with the given value and bounds.
-
- Args:
- value (array): value of the new variable
- bounds (tuple): bounds of the new variable
- name (str): name of the new variable
- dtype (type): dtype of the array
- Returns:
- array: new variable
- """
-
- @abstractmethod
- def new_constant(self, value: Tensor, name: str, dtype: Any) -> Tensor:
- r"""Returns a new constant with the given value.
-
- Args:
- value (array): value of the new constant
- name (str): name of the new constant
- dtype (type): dtype of the array
-
- Returns:
- array: new constant
- """
-
- @abstractmethod
- def norm(self, array: Tensor) -> Tensor:
- r"""Returns the norm of array.
-
- Args:
- array (array): array to take the norm of
-
- Returns:
- array: norm of array
- """
-
- @abstractmethod
- def ones(self, shape: Sequence[int], dtype) -> Tensor:
- r"""Returns an array of ones with the given ``shape`` and ``dtype``.
-
- Args:
- shape (tuple): shape of the array
- dtype (type): dtype of the array
-
- Returns:
- array: array of ones
- """
- # NOTE : should be float64 by default
-
- @abstractmethod
- def ones_like(self, array: Tensor) -> Tensor:
- r"""Returns an array of ones with the same shape and ``dtype`` as ``array``.
-
- Args:
- array (array): array to take the shape and dtype of
-
- Returns:
- array: array of ones
- """
-
- @abstractmethod
- def outer(self, array1: Tensor, array2: Tensor) -> Tensor:
- r"""Returns the outer product of ``array1`` and ``array2``.
-
- Args:
- array1 (array): first array to take the outer product of
- array2 (array): second array to take the outer product of
-
- Returns:
- array: outer product of array1 and array2
- """
-
- @abstractmethod
- def pad(
- self, array: Tensor, paddings: Sequence[Tuple[int, int]], mode="CONSTANT", constant_values=0
- ) -> Tensor:
- r"""Returns the padded array.
-
- Args:
- array (array): array to pad
- paddings (tuple): paddings to apply
- mode (str): mode to apply the padding
- constant_values (int): constant values to use for padding
-
- Returns:
- array: padded array
- """
-
- @abstractmethod
- def pinv(self, matrix: Tensor) -> Tensor:
- r"""Returns the pseudo-inverse of matrix.
-
- Args:
- matrix (array): matrix to take the pseudo-inverse of
-
- Returns:
- array: pseudo-inverse of matrix
- """
-
- @abstractmethod
- def pow(self, x: Tensor, y: Tensor) -> Tensor:
- r"""Returns :math:`x^y`. Broadcasts ``x`` and ``y`` if necessary.
- Args:
- x (array): base
- y (array): exponent
-
- Returns:
- array: :math:`x^y`
- """
-
- @abstractmethod
- def real(self, array: Tensor) -> Tensor:
- r"""Returns the real part of ``array``.
-
- Args:
- array (array): array to take the real part of
-
- Returns:
- array: real part of ``array``
- """
-
- @abstractmethod
- def reshape(self, array: Tensor, shape: Sequence[int]) -> Tensor:
- r"""Returns the reshaped array.
-
- Args:
- array (array): array to reshape
- shape (tuple): shape to reshape the array to
-
- Returns:
- array: reshaped array
- """
-
- @abstractmethod
- def set_diag(self, array: Tensor, diag: Tensor, k: int) -> Tensor:
- r"""Returns the array with the diagonal set to ``diag``.
-
- Args:
- array (array): array to set the diagonal of
- diag (array): diagonal to set
- k (int): diagonal to set
-
- Returns:
- array: array with the diagonal set to ``diag``
- """
-
- @abstractmethod
- def sin(self, array: Tensor) -> Tensor:
- r"""Returns the sine of ``array``.
-
- Args:
- array (array): array to take the sine of
-
- Returns:
- array: sine of ``array``
- """
-
- @abstractmethod
- def sinh(self, array: Tensor) -> Tensor:
- r"""Returns the hyperbolic sine of ``array``.
-
- Args:
- array (array): array to take the hyperbolic sine of
-
- Returns:
- array: hyperbolic sine of ``array``
- """
-
- @abstractmethod
- def solve(self, matrix: Tensor, rhs: Tensor) -> Tensor:
- r"""Returns the solution of the linear system :math:`Ax = b`.
-
- Args:
- matrix (array): matrix :math:`A`
- rhs (array): vector :math:`b`
-
- Returns:
- array: solution :math:`x`
- """
-
- @abstractmethod
- def sqrt(self, x: Tensor, dtype=None) -> Tensor:
- r"""Returns the square root of ``x``.
-
- Args:
- x (array): array to take the square root of
- dtype (type): ``dtype`` of the output array
-
- Returns:
- array: square root of ``x``
- """
-
- @abstractmethod
- def sqrtm(self, tensor: Tensor) -> Tensor:
- r"""Returns the matrix square root."""
-
- @abstractmethod
- def sum(self, array: Tensor, axes: Sequence[int] = None):
- r"""Returns the sum of array.
-
- Args:
- array (array): array to take the sum of
- axes (tuple): axes to sum over
-
- Returns:
- array: sum of array
- """
-
- @abstractmethod
- def tensordot(self, a: Tensor, b: Tensor, axes: Sequence[int]) -> Tensor:
- r"""Returns the tensordot product of ``a`` and ``b``.
-
- Args:
- a (array): first array to take the tensordot product of
- b (array): second array to take the tensordot product of
- axes (tuple): axes to take the tensordot product over
-
- Returns:
- array: tensordot product of ``a`` and ``b``
- """
-
- @abstractmethod
- def tile(self, array: Tensor, repeats: Sequence[int]) -> Tensor:
- r"""Returns the tiled array.
-
- Args:
- array (array): array to tile
- repeats (tuple): number of times to tile the array along each axis
-
- Returns:
- array: tiled array
- """
-
- @abstractmethod
- def trace(self, array: Tensor, dtype: Any = None) -> Tensor:
- r"""Returns the trace of array.
-
- Args:
- array (array): array to take the trace of
- dtype (type): ``dtype`` of the output array
-
- Returns:
- array: trace of array
- """
-
- @abstractmethod
- def transpose(self, a: Tensor, perm: Sequence[int] = None):
- r"""Returns the transposed arrays.
-
- Args:
- a (array): array to transpose
- perm (tuple): permutation to apply to the array
-
- Returns:
- array: transposed array
- """
-
- @abstractmethod
- def unique_tensors(self, lst: List[Tensor]) -> List[Tensor]:
- r"""Returns the tensors in ``lst`` without duplicates and non-tensors.
-
- Args:
- lst (list): list of tensors to remove duplicates and non-tensors from.
-
- Returns:
- list: list of tensors without duplicates and non-tensors.
- """
-
- @abstractmethod
- def update_tensor(self, tensor: Tensor, indices: Tensor, values: Tensor) -> Tensor:
- r"""Updates a tensor in place with the given values.
-
- Args:
- tensor (array): tensor to update
- indices (array): indices to update
- values (array): values to update
- """
-
- @abstractmethod
- def update_add_tensor(self, tensor: Tensor, indices: Tensor, values: Tensor) -> Tensor:
- r"""Updates a tensor in place by adding the given values.
-
- Args:
- tensor (array): tensor to update
- indices (array): indices to update
- values (array): values to add
- """
-
- @abstractmethod
- def value_and_gradients(
- self, cost_fn: Callable, parameters: Dict[str, List[Trainable]]
- ) -> Tuple[Tensor, Dict[str, List[Tensor]]]:
- r"""Returns the loss and gradients of the given cost function.
-
- Args:
- cost_fn (callable): cost function to compute the loss and gradients of
- parameters (dict): parameters to compute the loss and gradients of
-
- Returns:
- tuple: loss and gradients (dict) of the given cost function
- """
-
- @abstractmethod
- def zeros(self, shape: Sequence[int], dtype) -> Tensor:
- r"""Returns an array of zeros with the given shape and ``dtype``.
-
- Args:
- shape (tuple): shape of the array
- dtype (type): dtype of the array
-
- Returns:
- array: array of zeros
- """
-
- @abstractmethod
- def zeros_like(self, array: Tensor) -> Tensor:
- r"""Returns an array of zeros with the same shape and ``dtype`` as ``array``.
-
- Args:
- array (array): array to take the shape and ``dtype`` of
-
- Returns:
- array: array of zeros
- """
-
- @abstractmethod
- def map_fn(self, func, elements: Tensor) -> Tensor:
- """Transforms elems by applying fn to each element unstacked on axis 0.
-
- Args:
- func (func): The callable to be performed. It accepts one argument,
- which will have the same (possibly nested) structure as elems.
- elements (Tensor): A tensor or (possibly nested) sequence of tensors,
- each of which will be unstacked along their first dimension.
- ``func`` will be applied to the nested sequence of the resulting slices.
-
- Returns:
- Tensor: applied ``func`` on ``elements``
- """
-
- @abstractmethod
- def squeeze(self, tensor: Tensor, axis: Optional[List[int]]) -> Tensor:
- """Removes dimensions of size 1 from the shape of a tensor.
-
- Args:
- tensor (Tensor): the tensor to squeeze
- axis (Optional[List[int]]): if specified, only squeezes the
- dimensions listed, defaults to []
-
- Returns:
- Tensor: tensor with one or more dimensions of size 1 removed
- """
-
- @abstractmethod
- def cholesky(self, input: Tensor) -> Tensor:
- """Computes the Cholesky decomposition of square matrices.
-
- Args:
- input (Tensor)
-
- Returns:
- Tensor: tensor with the same type as input
- """
-
- @abstractmethod
- def Categorical(self, probs: Tensor, name: str):
- """Categorical distribution over integers.
-
- Args:
- probs (Tensor): tensor representing the probabilities of a set of Categorical
- distributions.
- name (str): name prefixed to operations created by this class
-
- Returns:
- tfp.distributions.Categorical: instance of ``tfp.distributions.Categorical`` class
- """
-
- @abstractmethod
- def MultivariateNormalTriL(self, loc: Tensor, scale_tril: Tensor):
- """Multivariate normal distribution on `R^k` and parameterized by a (batch of) length-k loc
- vector (aka "mu") and a (batch of) k x k scale matrix; covariance = scale @ scale.T
- where @ denotes matrix-multiplication.
-
- Args:
- loc (Tensor): if this is set to None, loc is implicitly 0
- scale_tril: lower-triangular Tensor with non-zero diagonal elements
-
- Returns:
- tfp.distributions.MultivariateNormalTriL: instance of ``tfp.distributions.MultivariateNormalTriL``
- """
-
- # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- # Methods that build on the basic ops and don't need to be overridden in the backend implementation
- # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- @property
- def euclidean_opt(self):
- r"""Returns the configured Euclidean optimizer."""
- if not self._euclidean_opt:
- self._euclidean_opt = self.DefaultEuclideanOptimizer()
- return self._euclidean_opt
-
- # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- # Methods that build on the basic ops and don't need to be overridden in the backend implementation
- # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- def block(self, blocks: List[List[Tensor]], axes=(-2, -1)) -> Tensor:
- r"""Returns a matrix made from the given blocks.
-
- Args:
- blocks (list): list of lists of compatible blocks
- axes (tuple): axes to stack the blocks along
-
- Returns:
- array: matrix made of blocks
- """
- rows = [self.concat(row, axis=axes[1]) for row in blocks]
- return self.concat(rows, axis=axes[0])
-
- def dagger(self, array: Tensor) -> Tensor:
- """Returns the adjoint of ``array``. This operation swaps the first
- and second half of the indexes and then conjugates the matrix.
-
- Args:
- array (array): array to take the adjoint of
-
- Returns:
- array: adjoint of ``array``
- """
- N = len(array.shape) // 2
- perm = list(range(N, 2 * N)) + list(range(0, N))
- return self.conj(self.transpose(array, perm=perm))
-
- def unitary_to_orthogonal(self, U):
- r"""Unitary to orthogonal mapping.
-
- Args:
- U (array): unitary matrix in ``U(n)``
-
- Returns:
- array: orthogonal matrix in :math:`O(2n)`
- """
- X = self.real(U)
- Y = self.imag(U)
- return self.block([[X, -Y], [Y, X]])
-
- def random_symplectic(self, num_modes: int, max_r: float = 1.0) -> Tensor:
- r"""A random symplectic matrix in ``Sp(2*num_modes)``.
-
- Squeezing is sampled uniformly from 0.0 to ``max_r`` (1.0 by default).
- """
- if num_modes == 1:
- W = np.exp(1j * settings.rng.uniform(size=(1, 1)))
- V = np.exp(1j * settings.rng.uniform(size=(1, 1)))
- else:
- W = unitary_group.rvs(dim=num_modes, random_state=settings.rng)
- V = unitary_group.rvs(dim=num_modes, random_state=settings.rng)
- r = settings.rng.uniform(low=0.0, high=max_r, size=num_modes)
- OW = self.unitary_to_orthogonal(W)
- OV = self.unitary_to_orthogonal(V)
- dd = self.diag(self.concat([self.exp(-r), np.exp(r)], axis=0), k=0)
- return OW @ dd @ OV
-
- @staticmethod
- def random_orthogonal(N: int) -> Tensor:
- """A random orthogonal matrix in :math:`O(N)`."""
- if N == 1:
- return np.array([[1.0]])
- return ortho_group.rvs(dim=N, random_state=settings.rng)
-
- def random_unitary(self, N: int) -> Tensor:
- """a random unitary matrix in :math:`U(N)`"""
- if N == 1:
- return self.exp(1j * settings.rng.uniform(size=(1, 1)))
- return unitary_group.rvs(dim=N, random_state=settings.rng)
-
- def single_mode_to_multimode_vec(self, vec, num_modes: int):
- r"""Apply the same 2-vector (i.e. single-mode) to a larger number of modes."""
- if vec.shape[-1] != 2:
- raise ValueError("vec must be 2-dimensional (i.e. single-mode)")
- x, y = vec[..., -2], vec[..., -1]
- vec = self.concat([self.tile([x], [num_modes]), self.tile([y], [num_modes])], axis=-1)
- return vec
-
- def single_mode_to_multimode_mat(self, mat: Tensor, num_modes: int):
- r"""Apply the same :math:`2\times 2` matrix (i.e. single-mode) to a larger number of modes."""
- if mat.shape[-2:] != (2, 2):
- raise ValueError("mat must be a single-mode (2x2) matrix")
- mat = self.diag(
- self.tile(self.expand_dims(mat, axis=-1), (1, 1, num_modes)), k=0
- ) # shape [2,2,N,N]
- mat = self.reshape(self.transpose(mat, (0, 2, 1, 3)), [2 * num_modes, 2 * num_modes])
- return mat
-
- @staticmethod
- @lru_cache()
- def Xmat(num_modes: int):
- r"""Returns the matrix :math:`X_n = \begin{bmatrix}0 & I_n\\ I_n & 0\end{bmatrix}.`
-
- Args:
- num_modes (int): positive integer
-
- Returns:
- array: :math:`2N\times 2N` array
- """
- I = np.identity(num_modes)
- O = np.zeros((num_modes, num_modes))
- return np.block([[O, I], [I, O]])
-
- @staticmethod
- @lru_cache()
- def rotmat(num_modes: int):
- "Rotation matrix from quadratures to complex amplitudes."
- I = np.identity(num_modes)
- return np.sqrt(0.5) * np.block([[I, 1j * I], [I, -1j * I]])
-
- @staticmethod
- @lru_cache()
- def J(num_modes: int):
- """Symplectic form."""
- I = np.identity(num_modes)
- O = np.zeros_like(I)
- return np.block([[O, I], [-I, O]])
-
- def add_at_modes(
- self, old: Tensor, new: Optional[Tensor], modes: Sequence[int]
- ) -> Tensor: # NOTE: To be deprecated (XPTensor)
- """Adds two phase-space tensors (cov matrices, displacement vectors, etc..) on the specified modes."""
- if new is None:
- return old
- N = old.shape[-1] // 2
- indices = modes + [m + N for m in modes]
- return self.update_add_tensor(
- old, list(product(*[indices] * len(new.shape))), self.reshape(new, -1)
- )
-
- def left_matmul_at_modes(
- self, a_partial: Tensor, b_full: Tensor, modes: Sequence[int]
- ) -> Tensor: # NOTE: To be deprecated (XPTensor)
- r"""Left matrix multiplication of a partial matrix and a full matrix.
-
- It assumes that that ``a_partial`` is a matrix operating on M modes and that ``modes`` is a
- list of ``M`` integers, i.e., it will apply ``a_partial`` on the corresponding ``M`` modes
- of ``b_full`` from the left.
-
- Args:
- a_partial (array): :math:`2M\times 2M` array
- b_full (array): :math:`2N\times 2N` array
- modes (list): list of ``M`` modes to perform the multiplication on
-
- Returns:
- array: :math:`2N\times 2N` array
- """
- if a_partial is None:
- return b_full
- N = b_full.shape[-1] // 2
- indices = self.astensor(modes + [m + N for m in modes], dtype="int32")
- b_rows = self.gather(b_full, indices, axis=0)
- b_rows = self.matmul(a_partial, b_rows)
- return self.update_tensor(b_full, indices[:, None], b_rows)
-
- def right_matmul_at_modes(
- self, a_full: Tensor, b_partial: Tensor, modes: Sequence[int]
- ) -> Tensor: # NOTE: To be deprecated (XPTensor)
- r"""Right matrix multiplication of a full matrix and a partial matrix.
-
- It assumes that that ``b_partial`` is a matrix operating on ``M`` modes and that ``modes``
- is a list of ``M`` integers, i.e., it will apply ``b_partial`` on the corresponding M modes
- of ``a_full`` from the right.
-
- Args:
- a_full (array): :math:`2N\times 2N` array
- b_partial (array): :math:`2M\times 2M` array
- modes (list): list of `M` modes to perform the multiplication on
-
- Returns:
- array: :math:`2N\times 2N` array
- """
- return self.transpose(
- self.left_matmul_at_modes(self.transpose(b_partial), self.transpose(a_full), modes)
- )
-
- def matvec_at_modes(
- self, mat: Optional[Tensor], vec: Tensor, modes: Sequence[int]
- ) -> Tensor: # NOTE: To be deprecated (XPTensor)
- """Matrix-vector multiplication between a phase-space matrix and a vector in the specified modes."""
- if mat is None:
- return vec
- N = vec.shape[-1] // 2
- indices = self.astensor(modes + [m + N for m in modes], dtype="int32")
- updates = self.matvec(mat, self.gather(vec, indices, axis=0))
- return self.update_tensor(vec, indices[:, None], updates)
-
- def all_diagonals(self, rho: Tensor, real: bool) -> Tensor:
- """Returns all the diagonals of a density matrix."""
- cutoffs = rho.shape[: rho.ndim // 2]
- rho = self.reshape(rho, (int(np.prod(cutoffs)), int(np.prod(cutoffs))))
- diag = self.diag_part(rho)
- if real:
- return self.real(self.reshape(diag, cutoffs))
-
- return self.reshape(diag, cutoffs)
-
- def poisson(self, max_k: int, rate: Tensor) -> Tensor:
- """Poisson distribution up to ``max_k``."""
- k = self.arange(max_k)
- rate = self.cast(rate, k.dtype)
- return self.exp(k * self.log(rate + 1e-9) - rate - self.lgamma(k + 1.0))
-
- def binomial_conditional_prob(self, success_prob: Tensor, dim_out: int, dim_in: int):
- """:math:`P(out|in) = binom(in, out) * (1-success_prob)**(in-out) * success_prob**out`."""
- in_ = self.arange(dim_in)[None, :]
- out_ = self.arange(dim_out)[:, None]
- return (
- self.cast(binom(in_, out_), in_.dtype)
- * self.pow(success_prob, out_)
- * self.pow(1.0 - success_prob, self.maximum(in_ - out_, 0.0))
- )
-
- def convolve_probs_1d(self, prob: Tensor, other_probs: List[Tensor]) -> Tensor:
- """Convolution of a joint probability with a list of single-index probabilities."""
-
- if prob.ndim > 3 or len(other_probs) > 3:
- raise ValueError("cannot convolve arrays with more than 3 axes")
- if not all((q.ndim == 1 for q in other_probs)):
- raise ValueError("other_probs must contain 1d arrays")
- if not all((len(q) == s for q, s in zip(other_probs, prob.shape))):
- raise ValueError("The length of the 1d prob vectors must match shape of prob")
-
- q = other_probs[0]
- for q_ in other_probs[1:]:
- q = q[..., None] * q_[(None,) * q.ndim + (slice(None),)]
-
- return self.convolve_probs(prob, q)
-
- def convolve_probs(self, prob: Tensor, other: Tensor) -> Tensor:
- r"""Convolve two probability distributions (up to 3D) with the same shape.
-
- Note that the output is not guaranteed to be a complete joint probability,
- as it's computed only up to the dimension of the base probs.
- """
-
- if prob.ndim > 3 or other.ndim > 3:
- raise ValueError("cannot convolve arrays with more than 3 axes")
- if not prob.shape == other.shape:
- raise ValueError("prob and other must have the same shape")
-
- prob_padded = self.pad(prob, [(s - 1, 0) for s in other.shape])
- other_reversed = other[(slice(None, None, -1),) * other.ndim]
- return self.convolution(
- prob_padded[None, ..., None],
- other_reversed[..., None, None],
- padding="VALID", # TODO: do we need to specify this?
- data_format="N"
- + ("HD"[: other.ndim - 1])[::-1]
- + "WC", # TODO: rewrite this to be more readable (do we need it?)
- )[0, ..., 0]
-
- def euclidean_to_symplectic(self, S: Matrix, dS_euclidean: Matrix) -> Matrix:
- r"""Convert the Euclidean gradient to a Riemannian gradient on the
- tangent bundle of the symplectic manifold.
-
- Implemented from:
- Wang J, Sun H, Fiori S. A Riemannian‐steepest‐descent approach
- for optimization on the real symplectic group.
- Mathematical Methods in the Applied Sciences. 2018 Jul 30;41(11):4273-86.
-
- Args:
- S (Matrix): symplectic matrix
- dS_euclidean (Matrix): Euclidean gradient tensor
-
- Returns:
- Matrix: symplectic gradient tensor
- """
- Jmat = self.J(S.shape[-1] // 2)
- Z = self.matmul(self.transpose(S), dS_euclidean)
- return 0.5 * (Z + self.matmul(self.matmul(Jmat, self.transpose(Z)), Jmat))
-
- def euclidean_to_unitary(self, U: Matrix, dU_euclidean: Matrix) -> Matrix:
- r"""Convert the Euclidean gradient to a Riemannian gradient on the
- tangent bundle of the unitary manifold.
-
- Implemented from:
- Y Yao, F Miatto, N Quesada - arXiv preprint arXiv:2209.06069, 2022.
-
- Args:
- U (Matrix): unitary matrix
- dU_euclidean (Matrix): Euclidean gradient tensor
-
- Returns:
- Matrix: unitary gradient tensor
- """
- Z = self.matmul(self.conj(self.transpose(U)), dU_euclidean)
- return 0.5 * (Z - self.conj(self.transpose(Z)))
diff --git a/mrmustard/math/parameter_set.py b/mrmustard/math/parameter_set.py
new file mode 100644
index 000000000..59fa4fe8b
--- /dev/null
+++ b/mrmustard/math/parameter_set.py
@@ -0,0 +1,136 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains the classes to describe sets of parameters."""
+
+from typing import Sequence, Union
+
+from mrmustard.math.backend_manager import BackendManager
+
+from .parameters import Constant, Variable
+
+math = BackendManager()
+
+__all__ = [
+ "ParameterSet",
+]
+
+
+class ParameterSet:
+ r"""
+ A set of parameters.
+
+ ``ParameterSet`` can store both constant and variable parameters. It provides fast access to
+ both classes of parameters, as well as to their names.
+
+ .. code::
+
+ const1 = Constant(1.2345, "const1")
+ const2 = Constant(2.3456, "const2")
+ var1 = Variable(3.4567, "var1")
+
+ ps = ParameterSet()
+ ps.add_parameter(const1)
+ ps.add_parameter(const2)
+ ps.add_parameter(var1)
+
+ ps.names # returns `["const1", "const2", "var1"]`
+ ps.constants # returns `{"const1": const1, "const2": const2}`
+ ps.variable # returns `{"var1": var1}`
+ """
+
+ def __init__(self):
+ self._names: list[str] = []
+ self._constants: dict[str, Constant] = {}
+ self._variables: dict[str, Variable] = {}
+
+ @property
+ def constants(self) -> dict[str, Constant]:
+ r"""
+ The constant parameters in this parameter set.
+ """
+ return self._constants
+
+ @property
+ def variables(self) -> dict[str, Variable]:
+ r"""
+ The variable parameters in this parameter set.
+ """
+ return self._variables
+
+ @property
+ def names(self) -> Sequence[str]:
+ r"""
+ The names of all the parameters in this parameter set, in the order in which they
+ were added.
+ """
+ return self._names
+
+ def add_parameter(self, parameter: Union[Constant, Variable]) -> None:
+ r"""
+ Adds a parameter to this parameter set.
+
+ Args:
+ parameter: A constant or variable parameter.
+
+ Raises:
+ ValueError: If this parameter set already contains a parameter with the same
+ name as that of the given parameter.
+ """
+ name = parameter.name
+
+ if name in self.names:
+ msg = f"A parameter with name ``{name}`` is already part of this parameter set."
+ raise ValueError(msg)
+ self._names.append(name)
+
+ # updates dictionary and dynamically creates an attribute
+ if isinstance(parameter, Constant):
+ self.constants[name] = parameter
+ self.__dict__[name] = self.constants[name]
+ elif isinstance(parameter, Variable):
+ self.variables[parameter.name] = parameter
+ self.__dict__[name] = self.variables[name]
+
+ def tagged_variables(self, tag: str) -> dict[str, Variable]:
+ r"""
+ Returns a dictionary whose keys are tagged names of the variables in this parameter set, and whose
+ values are the variables in this parameter set. Tagging is done by prepending the string ``f"{tag}"/``
+ to variables' original names.
+ """
+ ret = {}
+ for k, v in self.variables.items():
+ ret[f"{tag}/{k}"] = v
+ return ret
+
+ def to_string(self, decimals: int) -> str:
+ r"""
+ Returns a string representation of the parameter values, separated by commas and rounded
+ to the specified number of decimals.
+
+ Args:
+ decimals (int): number of decimals to round to
+
+ Returns:
+ str: string representation of the parameter values
+ """
+ strings = []
+ for name in self.names:
+ param = self.constants.get(name) or self.variables.get(name)
+ if len(param.value.shape) == 0: # don't show arrays
+ string = str(math.asnumpy(math.round(param.value, decimals)))
+ else:
+ string = f"{name}"
+ strings.append(string)
+ return ", ".join(strings)
diff --git a/mrmustard/math/parameters.py b/mrmustard/math/parameters.py
new file mode 100644
index 000000000..ed0009ad9
--- /dev/null
+++ b/mrmustard/math/parameters.py
@@ -0,0 +1,290 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains the classes to describe constant and variable parameters used in Mr Mustard."""
+
+from typing import Callable, Optional, Tuple
+
+from mrmustard.math.backend_manager import BackendManager
+
+math = BackendManager()
+
+
+__all__ = ["Constant", "Variable"]
+
+
+# ~~~~~~~~~
+# Functions
+# ~~~~~~~~~
+
+
+def update_symplectic(grads_and_vars, symplectic_lr: float):
+ r"""
+ Updates the symplectic parameters using the given symplectic gradients.
+
+ Implemented from:
+ Wang J, Sun H, Fiori S. A Riemannian-steepest-descent approach
+ for optimization on the real symplectic group.
+ Mathematical Methods in the Applied Sciences. 2018 Jul 30;41(11):4273-86.
+ """
+ for dS_euclidean, S in grads_and_vars:
+ Y = math.euclidean_to_symplectic(S, dS_euclidean)
+ YT = math.transpose(Y)
+ new_value = math.matmul(
+ S, math.expm(-symplectic_lr * YT) @ math.expm(-symplectic_lr * (Y - YT))
+ )
+ math.assign(S, new_value)
+
+
+def update_orthogonal(grads_and_vars, orthogonal_lr: float):
+ r"""Updates the orthogonal parameters using the given orthogonal gradients.
+
+ Implemented from:
+ Y Yao, F Miatto, N Quesada - arXiv preprint arXiv:2209.06069, 2022.
+ """
+ for dO_euclidean, O in grads_and_vars:
+ Y = math.euclidean_to_unitary(O, math.real(dO_euclidean))
+ new_value = math.matmul(O, math.expm(-orthogonal_lr * Y))
+ math.assign(O, new_value)
+
+
+def update_unitary(grads_and_vars, unitary_lr: float):
+ r"""Updates the unitary parameters using the given unitary gradients.
+
+ Implemented from:
+ Y Yao, F Miatto, N Quesada - arXiv preprint arXiv:2209.06069, 2022.
+ """
+ for dU_euclidean, U in grads_and_vars:
+ Y = math.euclidean_to_unitary(U, dU_euclidean)
+ new_value = math.matmul(U, math.expm(-unitary_lr * Y))
+ math.assign(U, new_value)
+
+
+def update_euclidean(grads_and_vars, euclidean_lr: float):
+ """Updates the parameters using the euclidian gradients."""
+ math.euclidean_opt.lr = euclidean_lr
+ math.euclidean_opt.apply_gradients(grads_and_vars)
+
+
+# ~~~~~~~
+# Classes
+# ~~~~~~~
+
+
+class Constant:
+ r"""
+ A parameter with a constant, immutable value.
+
+ .. code::
+
+ my_const = Constant(1, "my_const")
+
+ Args:
+ value: The value of this constant.
+ name: The name of this constant.
+ """
+
+ def __init__(self, value: any, name: str):
+ if math.from_backend(value) and not math.is_trainable(value):
+ self._value = value
+ elif type(value) in [list, int, float]:
+ self._value = math.new_constant(value, name)
+ else:
+ self._value = math.new_constant(value, name, value.dtype)
+
+ self._name = name
+
+ @property
+ def name(self) -> str:
+ r"""
+ The name of this constant.
+ """
+ return self._name
+
+ @property
+ def value(self) -> any:
+ r"""
+ The value of this constant.
+ """
+ return self._value
+
+ def __mul__(self, value):
+ return type(self)(value=value * self.value, name=self.name)
+
+ def __rmul__(self, value):
+ return type(self)(value=self.value * value, name=self.name)
+
+
+# pylint: disable=too-many-instance-attributes
+class Variable:
+ r"""
+ A parameter whose value can change.
+
+ .. code::
+
+ my_var = Variable(1, "my_var")
+
+ Args:
+ value: The value of this variable.
+ name: The name of this variable.
+ bounds: The numerical bounds of this variable.
+ update_fn: The function used to update this variable during training.
+ """
+
+ def __init__(
+ self,
+ value: any,
+ name: str,
+ bounds: Tuple[Optional[float], Optional[float]] = (None, None),
+ update_fn: Callable = update_euclidean,
+ ):
+ self._value = self._get_value(value, bounds, name)
+ self._name = name
+ self._bounds = bounds
+ self._update_fn = update_fn
+
+ def _get_value(self, value, bounds, name):
+ r"""
+ Returns a variable from given ``value``, ``bounds``, and ``name``.
+ """
+ if math.from_backend(value) and math.is_trainable(value):
+ return value
+ elif type(value) in [list, int, float]:
+ return math.new_variable(value, bounds, name)
+ else:
+ return math.new_variable(value, bounds, name, value.dtype)
+
+ @property
+ def bounds(self) -> Tuple[Optional[float], Optional[float]]:
+ r"""
+ The numerical bounds of this variable.
+ """
+ return self._bounds
+
+ @property
+ def name(self) -> str:
+ r"""
+ The name of this constant.
+ """
+ return self._name
+
+ @property
+ def update_fn(self) -> Optional[Callable]:
+ r"""
+ The function used to update this variable during training.
+ """
+ return self._update_fn
+
+ @update_fn.setter
+ def update_fn(self, value):
+ self._update_fn = value
+
+ @property
+ def value(self) -> any:
+ r"""
+ The value of this variable.
+ """
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ self._value = self._get_value(value, self.bounds, self.name)
+
+ @staticmethod
+ def orthogonal(
+ value: Optional[any],
+ name: str,
+ bounds: Tuple[Optional[float], Optional[float]] = (None, None),
+ N: int = 1,
+ ):
+ r"""
+ Initializes a variable with ``update_fn`` for orthogonal optimization.
+
+ Args:
+ value: The value of the returned variable. If ``None``, a random orthogonal
+ matrix of dimension ``N`` is initialized.
+ name: The name of the returned variable.
+ bounds: The numerical bounds of the returned variable.
+ N: The dimension of the random orthogonal matrix. This value is ignored if
+ ``value`` is not ``None``.
+
+ Returns:
+ A variable with ``update_fn`` for orthogonal optimization.
+ """
+ value = value or math.random_orthogonal(N)
+ return Variable(value, name, bounds, update_orthogonal)
+
+ @staticmethod
+ def symplectic(
+ value: any,
+ name: str,
+ bounds: Tuple[Optional[float], Optional[float]] = (None, None),
+ N: int = 1,
+ ):
+ r"""
+ Initializes a variable with ``update_fn`` for simplectic optimization.
+
+ Args:
+ value: The value of the returned variable. If ``None``, a random symplectic
+ matrix of dimension ``N`` is initialized.
+ name: The name of the returned variable.
+ bounds: The numerical bounds of the returned variable.
+ N: The dimension of the random symplectic matrix. This value is ignored if
+ ``value`` is not ``None``.
+
+ Returns:
+ A variable with ``update_fn`` for simplectic optimization.
+ """
+ value = value or math.random_symplectic(N)
+ return Variable(value, name, bounds, update_symplectic)
+
+ @staticmethod
+ def unitary(
+ value: any,
+ name: str,
+ bounds: Tuple[Optional[float], Optional[float]] = (None, None),
+ N: int = 1,
+ ):
+ r"""
+ Initializes a variable with ``update_fn`` for unitary optimization.
+
+ Args:
+ value: The value of the returned variable. If ``None``, a random unitary
+ matrix of dimension ``N`` is initialized.
+ name: The name of the returned variable.
+ bounds: The numerical bounds of the returned variable.
+ N: The dimension of the random unitary matrix. This value is ignored if
+ ``value`` is not ``None``.
+
+ Returns:
+ A variable with ``update_fn`` for unitary optimization.
+ """
+ value = value or math.random_unitary(N)
+ return Variable(value, name, bounds, update_unitary)
+
+ def __mul__(self, value):
+ return type(self)(
+ value=value * self.value,
+ name=self.name,
+ bounds=self.bounds,
+ update_fn=self.update_fn,
+ )
+
+ def __rmul__(self, value):
+ return type(self)(
+ value=self.value * value,
+ name=self.name,
+ bounds=self.bounds,
+ update_fn=self.update_fn,
+ )
diff --git a/mrmustard/math/tensor_networks/__init__.py b/mrmustard/math/tensor_networks/__init__.py
new file mode 100644
index 000000000..fd58f4183
--- /dev/null
+++ b/mrmustard/math/tensor_networks/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2021 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+r"""
+This package contains the modules implementing base classes for tensors and tensor networks.
+"""
+
+from .tensors import *
+from .networks import *
diff --git a/mrmustard/math/tensor_networks/networks.py b/mrmustard/math/tensor_networks/networks.py
new file mode 100644
index 000000000..1fe8dc772
--- /dev/null
+++ b/mrmustard/math/tensor_networks/networks.py
@@ -0,0 +1,169 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Functions and classes for tensor networks."""
+
+from __future__ import annotations
+
+from typing import Optional
+from opt_einsum import contract as opt_contract
+
+import networkx as nx
+import numpy as np
+import matplotlib.pyplot as plt
+
+from .tensors import Wire, Tensor
+
+
+def connect(wire1: Wire, wire2: Wire, dim: Optional[int] = None):
+ r"""Connects two wires in a tensor network.
+
+ Args:
+ wire1: The first wire.
+ wire2: The second wire.
+ dim: The dimension of the contraction.
+
+ Raises:
+ ValueError: If one or both of the wires are already connected.
+ """
+ if wire1.is_connected or wire2.is_connected:
+ msg = "Tried to connect wires that are already connected."
+ raise ValueError(msg)
+
+ if dim:
+ wire1.dim = dim
+ wire2.dim = dim
+
+ wire1.is_connected = True
+ wire2.is_connected = True
+
+ wire1.contraction_id = wire2.contraction_id
+
+
+def contract(tensors: list[Tensor], default_dim: int):
+ r"""Contract a list of tensors.
+
+ Args:
+ tensors: The tensors to contract.
+ dim: The default dimension of the contractions.
+
+ Returns:
+ The contracted tensor.
+ """
+ opt_einsum_args = []
+ for t in tensors:
+ shape = t.shape(default_dim=default_dim, out_in=True)
+ opt_einsum_args.append(t.value(shape=shape))
+ opt_einsum_args.append([w.contraction_id for w in t.wires])
+ return opt_contract(*opt_einsum_args)
+
+
+def draw(tensors: list[Tensor], layout: str = "spring_layout", figsize: tuple[int, int] = (10, 6)):
+ r"""Draws a tensor network.
+
+ Args:
+ tensors: The tensors to draw.
+ layout: The layout method. Must be one of the methods in ``nx.drawing.layout``.
+ figsize: The size of the returned figure.
+
+ Returns:
+ A figure showing the tensor network.
+ """
+ try:
+ fn_layout = getattr(nx.drawing.layout, layout)
+ except AttributeError:
+ msg = f"Invalid layout {layout}."
+ # pylint: disable=raise-missing-from
+ raise ValueError(msg)
+
+ # initialize empty lists and dictionaries used to store metadata
+ tensor_labels = {}
+ mode_labels = {}
+ node_size = []
+ node_color = []
+
+ # initialize three graphs--one to store nodes and edges, two to keep track of arrows
+ graph = nx.Graph()
+ arrows_in = nx.Graph()
+ arrows_out = nx.Graph()
+
+ for idx, tensor in enumerate(tensors):
+ tensor_id = tensor.name + str(idx)
+ graph.add_node(tensor_id)
+ tensor_labels[tensor_id] = tensor.name
+ mode_labels[tensor_id] = ""
+ node_size.append(150)
+ node_color.append("red")
+
+ for wire in tensor.wires:
+ wire_id = wire.contraction_id
+ if wire_id not in graph.nodes:
+ node_size.append(0)
+ node_color.append("white")
+ tensor_labels[wire_id] = ""
+ mode_labels[wire_id] = wire.mode
+
+ graph.add_node(wire_id)
+ graph.add_edge(wire_id, tensor_id)
+ if wire.is_input:
+ arrows_in.add_edge(tensor_id, wire_id)
+ else:
+ arrows_out.add_edge(tensor_id, wire_id)
+
+ pos = fn_layout(graph)
+ pos_labels = {k: v + np.array([0.0, 0.05]) for k, v in pos.items()}
+
+ fig = plt.figure(figsize=figsize)
+ nx.draw_networkx_nodes(
+ graph, pos, edgecolors="gray", alpha=0.9, node_size=node_size, node_color=node_color
+ )
+ nx.draw_networkx_edges(graph, pos, edge_color="lightgreen", width=4, alpha=0.6)
+ nx.draw_networkx_edges(
+ arrows_in,
+ pos,
+ edge_color="darkgreen",
+ width=0.5,
+ arrows=True,
+ arrowsize=10,
+ arrowstyle="<|-",
+ )
+ nx.draw_networkx_edges(
+ arrows_out,
+ pos,
+ edge_color="darkgreen",
+ width=0.5,
+ arrows=True,
+ arrowsize=10,
+ arrowstyle="-|>",
+ )
+ nx.draw_networkx_labels(
+ graph,
+ pos=pos_labels,
+ labels=tensor_labels,
+ font_size=12,
+ font_color="black",
+ font_family="serif",
+ )
+ nx.draw_networkx_labels(
+ graph,
+ pos=pos_labels,
+ labels=mode_labels,
+ font_size=12,
+ font_color="black",
+ font_family="FreeMono",
+ )
+
+ plt.title("Mr Mustard Network")
+ plt.show()
+ return fig
diff --git a/mrmustard/math/tensor_networks/tensors.py b/mrmustard/math/tensor_networks/tensors.py
new file mode 100644
index 000000000..6ba6063db
--- /dev/null
+++ b/mrmustard/math/tensor_networks/tensors.py
@@ -0,0 +1,449 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Classes for constructing tensors."""
+
+from __future__ import annotations
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from typing import List, Optional, Tuple
+
+import uuid
+
+from mrmustard.math.backend_manager import BackendManager
+
+math = BackendManager()
+
+
+__all__ = ["Wire", "Tensor"]
+
+
+def random_int() -> int:
+ r"""
+ Returns a random integer obtained from a UUID
+ """
+ return uuid.uuid1().int
+
+
+# pylint: disable=too-many-instance-attributes
+@dataclass
+class Wire:
+ r"""Represents a wire in a tensor network.
+
+ Each wire is characterized by a unique identifier ``id``, which must be different from
+ the identifiers of all the other wires in the tensor network. Additionally, it owns a
+ label ``mode`` that represents the mode of light that this wire is acting on.
+
+ Args:
+ id: A numerical identifier for this wire.
+ mode: The mode of light that this wire is acting on.
+ is_input: Whether this wire is an input to a tensor or an output.
+ is_ket: Whether this wire is on the ket or on the bra side.
+
+ """
+
+ id: int
+ mode: int
+ is_input: bool
+ is_ket: bool
+
+ def __post_init__(self):
+ self._contraction_id: int = random_int()
+ self._dim = None
+ self._is_connected = False
+
+ @property
+ def contraction_id(self) -> int:
+ r"""
+ A numerical identifier for the contraction involving this wire.
+ """
+ return self._contraction_id
+
+ @contraction_id.setter
+ def contraction_id(self, value: int):
+ self._contraction_id = value
+
+ @property
+ def dim(self):
+ r"""
+ The dimension of this wire.
+ """
+ return self._dim
+
+ @dim.setter
+ def dim(self, value: int):
+ if self._dim:
+ raise ValueError("Cannot change the dimension of wire with specified dimension.")
+ self._dim = value
+
+ @property
+ def is_connected(self) -> bool:
+ r"""
+ Whether or not this wire is connected with another wire.
+ """
+ return self._is_connected
+
+ @is_connected.setter
+ def is_connected(self, value: bool):
+ self._is_connected = value
+
+
+@dataclass
+class WireGroup:
+ r"""A group of wires in a tensor network.
+
+ Args:
+ ket: A dictionary containing the wires on the ket side.
+ bra: A dictionary containing the wires on the bra side.
+
+ """
+
+ ket: dict = field(default_factory=dict)
+ bra: dict = field(default_factory=dict)
+
+
+class Tensor(ABC):
+ r"""An abstract class representing a tensor in a tensor network.
+
+ In Mr Mustard, tensors are used to represent a state or a transformation on a given set
+ of modes in the Fock representation. For example, a single-mode unitary matrix
+ :math:`U=\sum_{i,j=1}^Nu_{i,j}|i\rangle\langle{j}|` acting on mode ``3`` in an
+ N-dimensional Fock basis corresponds to the following ``Tensor`` object:
+
+ .. code-block::
+ class U(Tensor):
+ def value(self, shape):
+ # specify the value of the tensor
+ pass
+
+ U("my_unitary", [3], [3], [3], [3])
+
+ Args:
+ name: The name of this tensor.
+ modes_in_ket: The input modes on the ket side.
+ modes_out_ket: The output modes on the ket side.
+ modes_in_bra: The input modes on the bra side.
+ modes_out_bra: The output modes on the bra side.
+ """
+
+ _repr_markdown_ = None # otherwise it takes over the repr due to mro
+
+ def __init__(
+ self,
+ name: str,
+ modes_in_ket: Optional[list[int]] = None,
+ modes_out_ket: Optional[list[int]] = None,
+ modes_in_bra: Optional[list[int]] = None,
+ modes_out_bra: Optional[list[int]] = None,
+ ) -> None:
+ self._name = name
+ self._update_modes(modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra)
+
+ def _update_modes(
+ self,
+ modes_in_ket: Optional[list[int]] = None,
+ modes_out_ket: Optional[list[int]] = None,
+ modes_in_bra: Optional[list[int]] = None,
+ modes_out_bra: Optional[list[int]] = None,
+ ) -> None:
+ r"""
+ Updates the modes in this tensor by setting:
+
+ * self._modes_in_ket, a list of input modes on the ket side
+ * self._modes_out_ket, a list of output modes on the ket side
+ * self._modes_in_bra, a list of input modes on the bra side
+ * self._modes_out_bra, a list of output modes on the bra side
+ * self.self._input, a WireGroup containing all the input modes
+ * self.self._output, a WireGroup containing all the output modes
+
+ It computes a new ``id`` for every wire.
+
+ Raises:
+ ValueError: if `modes_in_ket` and `modes_in_bra` are not equal, and neither
+ of them is `None`.
+ ValueError: if `modes_out_ket` and `modes_out_bra` are not equal, and neither
+ of them is `None`.
+ """
+ msg = "modes on ket and bra sides must be equal, unless either of them is `None`."
+ if modes_in_ket and modes_in_bra:
+ if modes_in_ket != modes_in_bra:
+ msg = f"Input {msg}"
+ raise ValueError(msg)
+ if modes_out_ket and modes_out_bra:
+ if modes_out_ket != modes_out_bra:
+ msg = f"Output {msg}"
+ raise ValueError(msg)
+
+ self._modes_in_ket = modes_in_ket if modes_in_ket else []
+ self._modes_out_ket = modes_out_ket if modes_out_ket else []
+ self._modes_in_bra = modes_in_bra if modes_in_bra else []
+ self._modes_out_bra = modes_out_bra if modes_out_bra else []
+
+ # initialize ket and bra wire dicts
+ self._input = WireGroup()
+ for mode in self._modes_in_ket:
+ self._input.ket |= {mode: Wire(random_int(), mode, True, True)}
+ for mode in self._modes_in_bra:
+ self._input.bra |= {mode: Wire(random_int(), mode, True, False)}
+
+ self._output = WireGroup()
+ for mode in self._modes_out_ket:
+ self._output.ket |= {mode: Wire(random_int(), mode, False, True)}
+ for mode in self._modes_out_bra:
+ self._output.bra |= {mode: Wire(random_int(), mode, False, False)}
+
+ @property
+ def adjoint(self) -> AdjointView:
+ r"""The adjoint view of this Tensor (with new ``id``s). That is, ket <-> bra."""
+ return AdjointView(self)
+
+ @property
+ def input(self):
+ r"""
+ A dictionary mapping the input modes to their respective wires.
+ """
+ return self._input
+
+ @property
+ def modes(self) -> list[int]:
+ r"""
+ For backward compatibility. Don't overuse.
+ It returns a list of modes for this Tensor, unless it's ambiguous.
+ """
+ if self.modes_in == self.modes_out: # transformation on same modes
+ return list(self.modes_in)
+ elif len(self.modes_in) == 0: # state
+ return list(self.modes_out)
+ elif len(self.modes_out) == 0: # measurement
+ return list(self.modes_in)
+ else:
+ raise ValueError("modes are ambiguous for this Tensor.")
+
+ @property
+ def modes_in(self) -> List[int]:
+ r"""
+ The list of input modes that are used by this Tensor.
+
+ If this tensor has no input modes on the bra side, or if the input modes are equal
+ on both ket and bra sides, it returns the list of modes. Otherwise, it performs the
+ ``set()`` operation before returning the list (and hence, the order may be unexpected).
+ """
+ if self._modes_in_ket:
+ return self._modes_in_ket
+ return self._modes_in_bra
+
+ @property
+ def modes_out(self) -> List[int]:
+ r"""
+ The list of output modes that are used by this Tensor.
+
+ If this tensor has no output modes on the bra side, or if the output modes are equal
+ on both ket and bra sides, it returns the list of modes. Otherwise, it performs the
+ ``set()`` operation before returning the list (and hence, the order may be unexpected).
+ """
+ if self._modes_out_ket:
+ return self._modes_out_ket
+ return self._modes_out_bra
+
+ @property
+ def name(self) -> int:
+ r"""
+ The name of this tensor.
+ """
+ return self._name
+
+ @property
+ def output(self):
+ r"""
+ A dictionary mapping the output modes to their respective wires.
+ """
+ return self._output
+
+ def unpack_shape(self, shape: Tuple[int]):
+ r"""
+ Unpack the given ``shape`` into the shapes of the input and output wires on ket and bra sides.
+
+ Args:
+ shape: A shape.
+
+ Returns:
+ shape_in_ket: The shape of the input wires on the ket side.
+ shape_out_ket: The shape of the output wires on the ket side.
+ shape_in_bra: The shape of the input wires on the bra side.
+ shape_out_bra: The shape of the output wires on the bra side.
+ """
+ idx1 = 0
+ idx2 = len(self._modes_in_ket)
+ shape_in_ket = shape[idx1:idx2]
+
+ idx1 = idx2
+ idx2 += len(self._modes_out_ket)
+ shape_out_ket = shape[idx1:idx2]
+
+ idx1 = idx2
+ idx2 += len(self._modes_in_bra)
+ shape_in_bra = shape[idx1:idx2]
+
+ idx1 = idx2
+ idx2 += len(self._modes_out_bra)
+ shape_out_bra = shape[idx1:idx2]
+
+ return shape_in_ket, shape_out_ket, shape_in_bra, shape_out_bra
+
+ @property
+ def wires(self) -> List[Wire]:
+ r"""
+ The list of all wires in this tensor, sorted as ``[ket_in, ket_out, bra_in, bra_out]``.
+ """
+ return (
+ list(self.input.ket.values())
+ + list(self.output.ket.values())
+ + list(self.input.bra.values())
+ + list(self.output.bra.values())
+ )
+
+ @abstractmethod
+ def value(self, shape: Tuple[int]):
+ r"""The value of this tensor.
+
+ Args:
+ shape: the shape of this tensor
+
+ Returns:
+ ComplexTensor: the unitary matrix in Fock representation
+ """
+
+ def change_modes(
+ self,
+ modes_in_ket: Optional[list[int]] = None,
+ modes_out_ket: Optional[list[int]] = None,
+ modes_in_bra: Optional[list[int]] = None,
+ modes_out_bra: Optional[list[int]] = None,
+ ) -> None:
+ r"""
+ Changes the modes in this tensor.
+
+ Args:
+ name: The name of this tensor.
+ modes_in_ket: The input modes on the ket side.
+ modes_out_ket: The output modes on the ket side.
+ modes_in_bra: The input modes on the bra side.
+ modes_out_bra: The output modes on the bra side.
+
+ Raises:
+ ValueError: if one or more wires in this tensor are already connected.
+ """
+ for wire in self.wires:
+ if wire.is_connected:
+ msg = (
+ "Cannot change nodes in a tensor when some of its wires are already connected."
+ )
+ raise ValueError(msg)
+ self._update_modes(modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra)
+
+ def shape(self, default_dim: Optional[int] = None, out_in=False):
+ r"""
+ Returns the shape of the underlying tensor, as inferred from the dimensions of the individual
+ wires.
+
+ If ``out_in`` is ``False``, the shape returned is in the order ``(in_ket, in_bra, out_ket, out_bra)``.
+ Otherwise, it is in the order ``(out_ket, out_bra, in_ket, in_bra)``.
+
+ Args:
+ default_dim: The default dimension of wires with unspecified dimension.
+ out_in: Whether to return output shapes followed by input shapes or viceversa.
+ """
+
+ def _sort_shapes(*args):
+ for arg in args:
+ if arg:
+ yield arg
+
+ shape_in_ket = [w.dim if w.dim else default_dim for w in self.input.ket.values()]
+ shape_out_ket = [w.dim if w.dim else default_dim for w in self.output.ket.values()]
+ shape_in_bra = [w.dim if w.dim else default_dim for w in self.input.bra.values()]
+ shape_out_bra = [w.dim if w.dim else default_dim for w in self.output.bra.values()]
+
+ if out_in:
+ ret = _sort_shapes(shape_out_ket, shape_out_bra, shape_in_ket, shape_in_bra)
+ ret = _sort_shapes(shape_in_ket, shape_in_bra, shape_out_ket, shape_out_bra)
+
+ # pylint: disable=consider-using-generator
+ return tuple([item for sublist in ret for item in sublist])
+
+
+class AdjointView(Tensor):
+ r"""
+ Adjoint view of a tensor. It swaps the ket and bra wires of a Tensor.
+ """
+
+ def __init__(self, tensor):
+ self._original = tensor
+ super().__init__(
+ name=self._original.name,
+ modes_in_ket=self._original.input.bra.keys(),
+ modes_out_ket=self._original.output.bra.keys(),
+ modes_in_bra=self._original.input.ket.keys(),
+ modes_out_bra=self._original.output.ket.keys(),
+ )
+
+ def value(self, shape: Tuple[int]):
+ r"""The value of this tensor.
+
+ Args:
+ shape: the shape of the adjoint tensor.
+
+ Returns:
+ ComplexTensor: the unitary matrix in Fock representation
+ """
+ # converting the given shape into a shape for the original tensor
+ shape_in_ket, shape_out_ket, shape_in_bra, shape_out_bra = self._original.unpack_shape(
+ shape
+ )
+ shape_ret = shape_in_bra + shape_out_bra + shape_in_ket + shape_out_ket
+
+ ret = math.conj(math.astensor(self._original.value(shape_ret)))
+ return ret
+
+
+class DualView(Tensor):
+ r"""
+ Dual view of a tensor. It swaps the input and output wires of a Tensor.
+ """
+
+ def __init__(self, tensor):
+ self._original = tensor
+ super().__init__(
+ name=self._original.name,
+ modes_in_ket=self._original.output.ket.keys(),
+ modes_out_ket=self._original.input.ket.keys(),
+ modes_in_bra=self._original.output.bra.keys(),
+ modes_out_bra=self._original.input.bra.keys(),
+ )
+
+ def value(self, shape: Tuple[int]):
+ r"""The value of this tensor.
+
+ Args:
+ shape: the shape of the dual tensor.
+
+ Returns:
+ ComplexTensor: the unitary matrix in Fock representation.
+ """
+ # converting the given shape into a shape for the original tensor
+ shape_in_ket, shape_out_ket, shape_in_bra, shape_out_bra = self.unpack_shape(shape)
+ shape_ret = shape_out_ket + shape_in_ket + shape_out_bra, shape_in_bra
+
+ return math.conj(self._original.value(shape_ret))
diff --git a/mrmustard/math/tensor_wrappers/__init__.py b/mrmustard/math/tensor_wrappers/__init__.py
new file mode 100644
index 000000000..0d13a51fb
--- /dev/null
+++ b/mrmustard/math/tensor_wrappers/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+r"""
+The point of entry for the tensor wrappers.
+"""
+
+from .mmtensor import *
+from .xptensor import *
diff --git a/mrmustard/math/mmtensor.py b/mrmustard/math/tensor_wrappers/mmtensor.py
similarity index 99%
rename from mrmustard/math/mmtensor.py
rename to mrmustard/math/tensor_wrappers/mmtensor.py
index 46a71aaeb..84ebba12e 100644
--- a/mrmustard/math/mmtensor.py
+++ b/mrmustard/math/tensor_wrappers/mmtensor.py
@@ -21,9 +21,9 @@
from numbers import Number
from typing import List, Optional, Union
-from mrmustard.math import Math
+from mrmustard.math.backend_manager import BackendManager
-math = Math()
+math = BackendManager()
class MMTensor:
diff --git a/mrmustard/utils/xptensor.py b/mrmustard/math/tensor_wrappers/xptensor.py
similarity index 93%
rename from mrmustard/utils/xptensor.py
rename to mrmustard/math/tensor_wrappers/xptensor.py
index 63c3b1503..b84ffec0b 100644
--- a/mrmustard/utils/xptensor.py
+++ b/mrmustard/math/tensor_wrappers/xptensor.py
@@ -26,16 +26,16 @@
Union,
)
-from mrmustard.math import Math
-from mrmustard.typing import Matrix, Scalar, Tensor, Vector
+from mrmustard.utils.typing import Matrix, Scalar, Tensor, Vector
+from mrmustard.math.backend_manager import BackendManager
-math = Math()
+math = BackendManager()
class XPTensor(ABC):
r"""A representation of Matrices and Vectors in phase space.
- Tensors in phase space have a ``(2n, 2n)`` or ``(2n,)`` shape where n is the number of modes.
+ Tensors in phase space have a ``(2n, 2n)`` or ``(2n,)`` shape, where ``n`` is the number of modes.
There are two main orderings:
- xxpp: matrix is a `2\times 2` block matrix where each block is an `xx`, `xp`, `px`, `pp` block on all modes.
@@ -60,9 +60,9 @@ class XPTensor(ABC):
Args:
tensor: The tensor in (n,m,2,2) or (n,2) order.
- modes: a list of modes for a diagonal matrix or a vector and a tuple of two lists for a coherence (not optional for a coherence)
like_0: Whether the null tensor behaves like 0 under addition (e.g. the noise matrix Y)
- like_1: Whether the null tensor behaves like 1 under multiplication (e.g. a symplectic transformation matrix)
+ is_vector: Whether the tensor is a vector.
+ modes: a list of modes for a diagonal matrix or a vector and a tuple of two lists for a coherence (not optional for a coherence)
"""
@abstractmethod # so that XPTensor can't be instantiated directly
@@ -70,7 +70,7 @@ def __init__(
self,
tensor: Optional[Tensor],
like_0: bool,
- isVector: bool,
+ is_vector: bool,
modes: Union[Tuple[List[int], List[int]], None],
):
self.like_0 = like_0
@@ -78,16 +78,16 @@ def __init__(
None if tensor is None else tensor.shape[: len(tensor.shape) // 2]
) # only (N,M) or (N,)
self.ndim = None if tensor is None else len(self.shape)
- self.isVector = isVector
- if self.ndim == 1 and not self.isVector:
+ self.is_vector = is_vector
+ if self.ndim == 1 and not self.is_vector:
raise ValueError(
- f"tensor shape incompatible with isVector={isVector} (expected len(tensor.shape)==4, got {len(tensor.shape)})"
+ f"tensor shape incompatible with is_vector={is_vector} (expected len(tensor.shape)==1, got {len(tensor.shape)})"
)
- if self.ndim == 2 and self.isVector:
+ if self.ndim == 2 and self.is_vector:
raise ValueError(
- f"tensor shape incompatible with isVector={isVector} (expected len(tensor.shape)==2, got {len(tensor.shape)})"
+ f"tensor shape incompatible with is_vector={is_vector} (expected len(tensor.shape)==2, got {len(tensor.shape)})"
)
- if self.isVector and self.like_1:
+ if self.is_vector and self.like_1:
raise ValueError("vectors should be like_0")
self.tensor = tensor
if not (set(modes[0]) == set(modes[1]) or set(modes[0]).isdisjoint(modes[1])):
@@ -115,7 +115,7 @@ def num_modes(self) -> int:
@property
def isMatrix(self) -> Optional[bool]:
- return not self.isVector
+ return not self.is_vector
@property
def isCoherence(self) -> Optional[bool]:
@@ -127,7 +127,7 @@ def like_1(self) -> bool:
@property
def T(self) -> XPMatrix:
- if self.isVector:
+ if self.is_vector:
raise ValueError("Cannot transpose a vector")
if self.tensor is None:
return self
@@ -165,7 +165,7 @@ def modes_last(self) -> Optional[Tensor]:
return None
return math.transpose(self.tensor, (2, 3, 0, 1) if self.isMatrix else (0, 1)) # 22NM or 2N
- def clone(self, times: int, modes=None) -> XPtensor:
+ def clone(self, times: int, modes=None) -> XPTensor:
r"""Create a new XPTensor made by cloning the system a given number of times
(the modes are reset by default unless specified).
"""
@@ -212,7 +212,7 @@ def clone_like(self, other: XPTensor):
f"No integer multiple of {self.num_modes} modes fits into {other.num_modes} modes"
)
times = other.num_modes // self.num_modes
- if self.isVector == other.isVector:
+ if self.is_vector == other.is_vector:
tensor = self.clone(times, modes=other.modes).tensor
else:
raise ValueError("Cannot clone a vector into a matrix or viceversa")
@@ -247,7 +247,7 @@ def __matmul__(self, other: Union[XPMatrix, XPVector]) -> Union[XPMatrix, XPVect
if self.tensor is None and other.tensor is None:
if self.isMatrix and other.isMatrix:
return XPMatrix(None, like_1=self.like_1 and other.like_1)
- if self.isVector or other.isVector:
+ if self.is_vector or other.is_vector:
return XPVector(None)
# either is None
if self.tensor is None:
@@ -258,15 +258,15 @@ def __matmul__(self, other: Union[XPMatrix, XPVector]) -> Union[XPMatrix, XPVect
if self.isMatrix and other.isMatrix:
tensor, modes = self._mode_aware_matmul(other)
return XPMatrix(tensor, like_1=self.like_1 and other.like_1, modes=modes)
- if self.isMatrix and other.isVector:
+ if self.isMatrix and other.is_vector:
tensor, modes = self._mode_aware_matmul(other)
return XPVector(
tensor, modes[0]
) # TODO: check if we can output modes as a list in _mode_aware_matmul
- if self.isVector and other.isMatrix:
+ if self.is_vector and other.isMatrix:
tensor, modes = other.T._mode_aware_matmul(self)
return XPVector(tensor, modes[0])
- # self.isVector and other.isVector:
+ # self.is_vector and other.is_vector:
return self._mode_aware_vecvec(other) # NOTE: this is a scalar, not an XPTensor
# pylint: disable=too-many-statements
@@ -370,7 +370,7 @@ def __add__(self, other: Union[XPMatrix, XPVector]) -> Union[XPMatrix, XPVector]
raise TypeError(
f"unsupported operand type(s) for +: '{self.__class__.__qualname__}' and '{other.__class__.__qualname__}'"
)
- if self.isVector != other.isVector:
+ if self.is_vector != other.is_vector:
raise ValueError("Cannot add a vector and a matrix")
if self.isCoherence != other.isCoherence:
raise ValueError("Cannot add a coherence block and a diagonal block")
@@ -471,7 +471,7 @@ def __getitem__(self, modes: Union[int, slice, List[int], Tuple]) -> Union[XPMat
T[[1,2,3],:] ~ self.tensor[[1,2,3],:,:,:] # i.e. the block with outmodes [1,2,3] and all inmodes
T[[1,2,3],[4,5]] ~ self.tensor[[1,2,3],[4,5],:,:] # i.e. the block with outmodes [1,2,3] and inmodes [4,5]
"""
- if self.isVector:
+ if self.is_vector:
if isinstance(modes, int):
_modes = [modes]
elif isinstance(modes, list) and all(isinstance(m, int) for m in modes):
@@ -518,7 +518,11 @@ def __getitem__(self, modes: Union[int, slice, List[int], Tuple]) -> Union[XPMat
class XPMatrix(XPTensor):
r"""A convenience class for a matrix in the XPTensor format.
- # TODO: write docstring
+ Args:
+ tensor: The tensor in (n,m,2,2) or (n,2) order.
+ like_0: Whether the null tensor behaves like 0 under addition (e.g. the noise matrix Y)
+ like_1: Whether the null tensor behaves like 1 under multiplication (e.g. a symplectic transformation matrix)
+ modes: a list of modes for a diagonal matrix or a vector and a tuple of two lists for a coherence (not optional for a coherence)
"""
def __init__(
@@ -545,7 +549,7 @@ def __init__(
list(range(s)) for s in tensor.shape[:2]
) # NOTE assuming that it isn't a coherence block
like_0 = like_0 if like_0 is not None else not like_1
- super().__init__(tensor, like_0, isVector=False, modes=modes)
+ super().__init__(tensor, like_0, is_vector=False, modes=modes)
@classmethod
def from_xxpp(
@@ -578,7 +582,12 @@ def __repr__(self) -> str:
class XPVector(XPTensor):
- r"""A convenience class for a vector in the XPTensor format."""
+ r"""A convenience class for a vector in the XPTensor format.
+
+ Args:
+ tensor: The tensor in (n,m,2,2) or (n,2) order.
+ modes: a list of modes for a diagonal matrix or a vector and a tuple of two lists for a coherence (not optional for a coherence)
+ """
def __init__(self, tensor: Tensor = None, modes: Union[List[int], None] = None):
if modes is None and tensor is not None:
@@ -589,7 +598,7 @@ def __init__(self, tensor: Tensor = None, modes: Union[List[int], None] = None):
isinstance(modes, list) or all(isinstance(m, int) for m in modes)
):
raise ValueError("the modes of an XPVector should be a list of ints")
- super().__init__(tensor, like_0=True, isVector=True, modes=(modes, []))
+ super().__init__(tensor, like_0=True, is_vector=True, modes=(modes, []))
@classmethod
def from_xxpp(
diff --git a/mrmustard/math/torch.py b/mrmustard/math/torch.py
deleted file mode 100644
index d60913da1..000000000
--- a/mrmustard/math/torch.py
+++ /dev/null
@@ -1,373 +0,0 @@
-# Copyright 2021 Xanadu Quantum Technologies Inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""This module contains the Pytorch implementation of the :class:`Math` interface."""
-
-from typing import Callable, List, Optional, Sequence, Tuple, Union, Dict
-
-import numpy as np
-import torch
-
-from mrmustard.math.autocast import Autocast
-from mrmustard.typing import Tensor, Trainable
-
-from .math_interface import MathInterface
-
-
-# pylint: disable=too-many-public-methods,no-self-use
-class TorchMath(MathInterface):
- r"""Torch implemantion of the :class:`Math` interface."""
-
- float64 = torch.float64
- float32 = torch.float32
- complex64 = torch.complex64
- complex128 = torch.complex128
-
- def __getattr__(self, name):
- return getattr(torch, name)
-
- # ~~~~~~~~~
- # Basic ops
- # ~~~~~~~~~
-
- def atleast_1d(self, array: torch.Tensor, dtype=None) -> torch.Tensor:
- return self.cast(torch.reshape(self.astensor(array), [-1]), dtype)
-
- def astensor(self, array: Union[np.ndarray, torch.Tensor], dtype=None) -> torch.Tensor:
- return self.cast(torch.tensor(array), dtype)
-
- def conj(self, array: torch.Tensor) -> torch.Tensor:
- return torch.conj(array)
-
- def real(self, array: torch.Tensor) -> torch.Tensor:
- return torch.real(array)
-
- def imag(self, array: torch.Tensor) -> torch.Tensor:
- return torch.imag(array)
-
- def cos(self, array: torch.Tensor) -> torch.Tensor:
- return torch.cos(array)
-
- def cosh(self, array: torch.Tensor) -> torch.Tensor:
- return torch.cosh(array)
-
- def sinh(self, array: torch.Tensor) -> torch.Tensor:
- return torch.sinh(array)
-
- def sin(self, array: torch.Tensor) -> torch.Tensor:
- return torch.sin(array)
-
- def exp(self, array: torch.Tensor) -> torch.Tensor:
- return torch.exp(array)
-
- def sqrt(self, x: torch.Tensor, dtype=None) -> torch.Tensor:
- return self.cast(torch.sqrt(x), dtype)
-
- def lgamma(self, x: torch.Tensor) -> torch.Tensor:
- return torch.lgamma(x)
-
- def log(self, x: torch.Tensor) -> torch.Tensor:
- return torch.log(x)
-
- def cast(self, x: torch.Tensor, dtype=None) -> torch.Tensor:
- if dtype is None:
- return x
- return x.to(dtype)
-
- @Autocast()
- def maximum(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
- return torch.maximum(a, b)
-
- @Autocast()
- def minimum(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
- return torch.minimum(a, b)
-
- def abs(self, array: torch.Tensor) -> torch.Tensor:
- return torch.abs(array)
-
- def expm(self, matrix: torch.Tensor) -> torch.Tensor:
- return torch.matrix_exp(matrix)
-
- def norm(self, array: torch.Tensor) -> torch.Tensor:
- """Note that the norm preserves the type of array."""
- return torch.norm(array)
-
- @Autocast()
- def matmul(
- self,
- a: torch.Tensor,
- b: torch.Tensor,
- transpose_a=False,
- transpose_b=False,
- adjoint_a=False,
- adjoint_b=False,
- ) -> torch.Tensor:
- return torch.matmul(a, b)
-
- @Autocast()
- def matvec(
- self, a: torch.Tensor, b: torch.Tensor, transpose_a=False, adjoint_a=False
- ) -> torch.Tensor:
- return torch.mv(a, b)
-
- @Autocast()
- def tensordot(self, a: torch.Tensor, b: torch.Tensor, axes: List[int]) -> torch.Tensor:
- return torch.tensordot(a, b, axes)
-
- def einsum(self, string: str, *tensors) -> torch.Tensor:
- return torch.einsum(string, *tensors)
-
- def inv(self, a: torch.Tensor) -> torch.Tensor:
- return torch.inverse(a)
-
- def pinv(self, array: torch.Tensor) -> torch.Tensor:
- return torch.pinverse(array)
-
- def det(self, a: torch.Tensor) -> torch.Tensor:
- return torch.det(a)
-
- def tile(self, array: torch.Tensor, repeats: Sequence[int]) -> torch.Tensor:
- return torch.tile(array, repeats)
-
- def diag(self, array: torch.Tensor, k: int = 0) -> torch.Tensor:
- return torch.diag(array, k=k)
-
- def diag_part(self, array: torch.Tensor) -> torch.Tensor:
- return torch.diag_embed(array)
-
- def pad(
- self,
- array: torch.Tensor,
- paddings: Sequence[Tuple[int, int]],
- mode="constant",
- constant_values=0,
- ) -> torch.Tensor:
- return torch.nn.functional.pad(array, paddings, mode=mode, value=constant_values)
-
- @Autocast()
- def convolution(
- self,
- array: torch.Tensor,
- filters: torch.Tensor,
- strides: Optional[List[int]] = None,
- padding="VALID",
- data_format="NWC",
- dilations: Optional[List[int]] = None,
- ) -> torch.Tensor:
- r"""Wrapper for ``torch.nn.Conv1d`` and ``torch.nn.Conv2d``.
-
- Args:
- 1D convolution: Tensor of shape [batch_size, input_channels, signal_length].
- 2D convolution: [batch_size, input_channels, input_height, input_width]
-
- Returns:
- """
-
- array.shape[0]
- input_channels = array.shape[1]
- output_channels = ... # TODO: unsure of how to get output channels
-
- if array.dim() == 3: # 1D case
- array.shape[2]
-
- m = torch.nn.Conv1d(
- input_channels,
- output_channels,
- filters,
- stride=strides,
- padding=padding,
- dtype=data_format,
- dilation=dilations,
- )
- return m(array)
-
- if array.dim() == 4: # 2D case
- array.shape[2]
- array.shape[3]
-
- m = torch.nn.Conv2d(
- input_channels,
- output_channels,
- filters,
- stride=strides,
- padding=padding,
- dtype=data_format,
- dilation=dilations,
- )
- return m(array)
-
- raise NotImplementedError
-
- def transpose(self, a: torch.Tensor, perm: List[int] = None) -> torch.Tensor:
- if a is None:
- return None # TODO: remove and address None inputs where transpose is used
- return torch.transpose(a, perm[0], perm[1])
-
- def reshape(self, array: torch.Tensor, shape: Sequence[int]) -> torch.Tensor:
- return torch.reshape(array, shape)
-
- def sum(self, array: torch.Tensor, axes: Sequence[int] = None):
- return torch.sum(array, axes)
-
- def arange(
- self, start: int, limit: int = None, delta: int = 1, dtype=torch.float64
- ) -> torch.Tensor:
- return torch.arange(start, limit, delta, dtype=dtype)
-
- @Autocast()
- def outer(self, array1: torch.Tensor, array2: torch.Tensor) -> torch.Tensor:
- return torch.tensordot(array1, array2, [[], []])
-
- def eye(self, size: int, dtype=torch.float64) -> torch.Tensor:
- return torch.eye(size, dtype=dtype)
-
- def zeros(self, shape: Sequence[int], dtype=torch.float64) -> torch.Tensor:
- return torch.zeros(shape, dtype=dtype)
-
- def zeros_like(self, array: torch.Tensor) -> torch.Tensor:
- return torch.zeros_like(array)
-
- def ones(self, shape: Sequence[int], dtype=torch.float64) -> torch.Tensor:
- return torch.ones(shape, dtype=dtype)
-
- def ones_like(self, array: torch.Tensor) -> torch.Tensor:
- return torch.ones_like(array)
-
- def gather(self, array: torch.Tensor, indices: torch.Tensor, axis: int = None) -> torch.Tensor:
- # TODO: gather works differently in Pytorch vs Tensorflow.
-
- return torch.gather(array, axis, indices)
-
- def trace(self, array: torch.Tensor, dtype=None) -> torch.Tensor:
- return self.cast(torch.trace(array), dtype)
-
- def concat(self, values: Sequence[torch.Tensor], axis: int) -> torch.Tensor:
- return torch.cat(values, axis)
-
- def update_tensor(
- self, tensor: torch.Tensor, indices: torch.Tensor, values: torch.Tensor, dims: int = 0
- ):
- # TODO: dims need to be an argument, or should be interpreted from the other data
-
- return tensor.scatter_(dims, indices, values)
-
- def update_add_tensor(
- self, tensor: torch.Tensor, indices: torch.Tensor, values: torch.Tensor, dims: int = 0
- ):
- # TODO: dims need to be an argument, or should be interpreted from the other data
-
- return tensor.scatter_add_(dims, indices, values)
-
- def constraint_func(
- self, bounds: Tuple[Optional[float], Optional[float]]
- ) -> Optional[Callable]:
- bounds = (
- -np.inf if bounds[0] is None else bounds[0],
- np.inf if bounds[1] is None else bounds[1],
- )
- if bounds != (-np.inf, np.inf):
-
- def constraint(x):
- return torch.clamp(x, min=bounds[0], max=bounds[1])
-
- else:
- constraint = None
- return constraint
-
- def new_variable(
- self, value, bounds: Tuple[Optional[float], Optional[float]], name: str, dtype=torch.float64
- ):
- return torch.tensor(value, dtype=dtype, requires_grad=True)
-
- def new_constant(self, value, name: str, dtype=torch.float64):
- return torch.tensor(value, dtype=dtype)
-
- def asnumpy(self, tensor: torch.Tensor) -> Tensor:
- return tensor.numpy()
-
- def hash_tensor(self, tensor: torch.Tensor) -> str:
- return hash(tensor)
-
- def hermite_renormalized(
- self, A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, shape: Tuple[int]
- ) -> torch.Tensor: # TODO this is not ready
- r"""Renormalized multidimensional Hermite polynomial.
-
- This is given by the "exponential" Taylor series of :math:`exp(Ax^2 + Bx + C)` at zero,
- where the series has :math:`sqrt(n!)` at the denominator rather than `n!`.
-
- Args:
- A: The A matrix.
- B: The B vector.
- C: The C scalar.
- shape: The shape of the final tensor.
-
- Returns:
- The renormalized Hermite polynomial of given shape.
- """
- raise NotImplementedError
-
- def DefaultEuclideanOptimizer(self, params) -> torch.optim.Optimizer:
- r"""Default optimizer for the Euclidean parameters."""
- self.optimizer = torch.optim.Adam(params, lr=0.001)
- return self.optimizer
-
- def value_and_gradients(
- self, cost_fn: Callable, parameters: Dict[str, List[Trainable]]
- ) -> Tuple[torch.Tensor, Dict[str, List[torch.Tensor]]]:
- r"""Computes the loss and gradients of the given cost function.
-
- Args:
- cost_fn (Callable): The cost function. Takes in two arguments:
- - Output: The output tensor of the model.
- parameters (Dict): The parameters to optimize in three kinds:
- symplectic, orthogonal and euclidean.
- optimizer: The optimizer to be used by the math backend.
-
- Returns:
- The loss and the gradients.
- """
- self.optimizer.zero_grad()
- loss = (
- cost_fn()
- ) # TODO: I think this should be cost_fn(params), but if it works I think it is fine.
- loss.backward()
- self.optimizer.step()
-
- grads = [p.grad for p in parameters]
-
- return loss, grads
-
- def eigvals(self, tensor: torch.Tensor) -> Tensor:
- """Returns the eigenvalues of a matrix."""
- return torch.linalg.eigvals(tensor)
-
- def eigvalsh(self, tensor: torch.Tensor) -> Tensor:
- """Returns the eigenvalues of a Real Symmetric or Hermitian matrix."""
- return torch.linalg.eigvalsh(tensor)
-
- def svd(self, tensor: torch.Tensor) -> Tensor:
- """Returns the Singular Value Decomposition of a matrix."""
- return torch.linalg.svd(tensor)
-
- def xlogy(self, x: torch.Tensor, y: torch.Tensor) -> Tensor:
- """Returns 0 if ``x == 0``, and ``x * log(y)`` otherwise, elementwise."""
- return torch.xlogy(x, y)
-
- def sqrtm(self, tensor: torch.Tensor) -> Tensor:
- raise NotImplementedError
-
- def boolean_mask(self, tensor: torch.Tensor, mask: torch.Tensor) -> Tensor:
- """Returns a new 1-D tensor which indexes the `input` tensor according to the boolean mask `mask`."""
- return torch.masked_select(tensor, mask)
diff --git a/mrmustard/physics/ansatze.py b/mrmustard/physics/ansatze.py
new file mode 100644
index 000000000..79f99667f
--- /dev/null
+++ b/mrmustard/physics/ansatze.py
@@ -0,0 +1,382 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+import itertools
+from abc import ABC, abstractmethod
+from typing import Any, Union, Optional
+
+import numpy as np
+
+from mrmustard import math
+from mrmustard.utils.argsort import argsort_gen
+from mrmustard.utils.typing import (
+ Batch,
+ ComplexMatrix,
+ ComplexTensor,
+ ComplexVector,
+ Matrix,
+ Scalar,
+ Tensor,
+ Vector,
+)
+
+__all__ = ["Ansatz", "PolyExpBase", "PolyExpAnsatz"]
+
+
+class Ansatz(ABC):
+ r"""An Ansatz is a function over a continuous and/or discrete domain.
+ It supports many mathematical operations such as addition, subtraction,
+ multiplication, division, negation, equality, etc.
+
+ Note that n-dimensional arrays are like functions defined over an integer lattice of points,
+ so this class also works for e.g. the Fock representation.
+
+ This class is abstract. Concrete Ansatz classes will have to implement the
+ ``__call__``, ``__mul__``, ``__add__``, ``__sub__``, ``__neg__`` and ``__eq__`` methods.
+ """
+
+ @abstractmethod
+ def __neg__(self) -> Ansatz:
+ r"""
+ Negates this ansatz.
+ """
+ ...
+
+ @abstractmethod
+ def __eq__(self, other: Ansatz) -> bool:
+ r"""
+ Whether this ansatz is equal to another ansatz.
+ """
+ ...
+
+ @abstractmethod
+ def __add__(self, other: Ansatz) -> Ansatz:
+ r"""
+ Sums this ansatz to another ansatz.
+ """
+ ...
+
+ def __sub__(self, other: Ansatz) -> Ansatz:
+ r"""
+ Subtracts other from this ansatz.
+ """
+ try:
+ return self.__add__(-other)
+ except AttributeError as e:
+ raise TypeError(f"Cannot subtract {self.__class__} and {other.__class__}.") from e
+
+ @abstractmethod
+ def __call__(self, point: Any) -> Scalar:
+ r"""
+ Evaluates this ansatz at a given point in the domain.
+ """
+ ...
+
+ @abstractmethod
+ def __truediv__(self, other: Union[Scalar, Ansatz]) -> Ansatz:
+ r"""
+ Divides this ansatz by another ansatz or by a scalar.
+ """
+ ...
+
+ @abstractmethod
+ def __mul__(self, other: Union[Scalar, Ansatz]) -> Ansatz:
+ r"""
+ Multiplies this ansatz by another ansatz.
+ """
+ ...
+
+ def __rmul__(self, other: Scalar) -> Ansatz:
+ r"""
+ Multiplies this ansatz by a scalar.
+ """
+ return self.__mul__(other=other)
+
+
+class PolyExpBase(Ansatz):
+ r"""
+ A family of Ansatze parametrized by a triple of a matrix, a vector and an array.
+ For example, the Bargmann representation :math:`c\:\textrm{exp}(z A z / 2 + b z)` is of this
+ form (where ``A``, ``b``, ``c`` is the triple), or the Wigner representation
+ (where ``Sigma``, ``mu``, ``1`` is the triple).
+
+ Note that this class is not initializable (despite having an initializer) because it does
+ not implement all the abstract methods of ``Ansatz``, and it is in fact more general.
+ Concrete ansatze that inherit from this class need to implement ``__call__``,
+ ``__mul__`` and ``__matmul__``, which are representation-specific.
+
+ Note that the arguments are expected to be batched, i.e. to have a batch dimension
+ or to be an iterable. This is because this class also provides the linear superposition
+ functionality by implementing the ``__add__`` method, which concatenates the batch dimensions.
+
+ As this can blow up the number of terms in the representation, it is recommended to
+ run the `simplify()` method after adding terms together, which will combine together
+ terms that have the same exponential part.
+
+ Args:
+ mat: the matrix-like data
+ vec: the vector-like data
+ array: the array-like data
+ """
+
+ def __init__(self, mat: Batch[Matrix], vec: Batch[Vector], array: Batch[Tensor]):
+ self.mat = math.atleast_3d(math.astensor(mat))
+ self.vec = math.atleast_2d(math.astensor(vec))
+ self.array = math.atleast_1d(math.astensor(array))
+ self.batch_size = self.mat.shape[0]
+ self.dim = self.mat.shape[-1]
+ self._simplified = False
+
+ def __neg__(self) -> PolyExpBase:
+ return self.__class__(self.mat, self.vec, -self.array)
+
+ def __eq__(self, other: PolyExpBase) -> bool:
+ return self._equal_no_array(other) and np.allclose(self.array, other.array, atol=1e-10)
+
+ def _equal_no_array(self, other: PolyExpBase) -> bool:
+ self.simplify()
+ other.simplify()
+ return np.allclose(self.vec, other.vec, atol=1e-10) and np.allclose(
+ self.mat, other.mat, atol=1e-10
+ )
+
+ def __add__(self, other: PolyExpBase) -> PolyExpBase:
+ combined_matrices = math.concat([self.mat, other.mat], axis=0)
+ combined_vectors = math.concat([self.vec, other.vec], axis=0)
+ combined_arrays = math.concat([self.array, other.array], axis=0)
+ # note output is not simplified
+ return self.__class__(combined_matrices, combined_vectors, combined_arrays)
+
+ @property
+ def degree(self) -> int:
+ if self.array.ndim == 1:
+ return 0
+ return self.array.shape[-1] - 1
+
+ def simplify(self) -> None:
+ r"""
+ Simplifies the representation by combining together terms that have the same
+ exponential part, i.e. two terms along the batch are considered equal if their
+ matrix and vector are equal. In this case only one is kept and the arrays are added.
+
+ Does not run if the representation has already been simplified, so it is safe to call.
+ """
+ if self._simplified:
+ return
+ indices_to_check = set(range(self.batch_size))
+ removed = []
+ while indices_to_check:
+ i = indices_to_check.pop()
+ for j in indices_to_check.copy():
+ if np.allclose(self.mat[i], self.mat[j]) and np.allclose(self.vec[i], self.vec[j]):
+ self.array = math.update_add_tensor(self.array, [[i]], [self.array[j]])
+ indices_to_check.remove(j)
+ removed.append(j)
+ to_keep = [i for i in range(self.batch_size) if i not in removed]
+ self.mat = math.gather(self.mat, to_keep, axis=0)
+ self.vec = math.gather(self.vec, to_keep, axis=0)
+ self.array = math.gather(self.array, to_keep, axis=0)
+ self._simplified = True
+
+ def simplify_v2(self) -> None:
+ r"""
+ A different implementation that orders the batch dimension first.
+ """
+ if self._simplified:
+ return
+ self._order_batch()
+ to_keep = [d0 := 0]
+ mat, vec = self.mat[d0], self.vec[d0]
+ for d in range(1, self.batch_size):
+ if np.allclose(mat, self.mat[d]) and np.allclose(vec, self.vec[d]):
+ self.array = math.update_add_tensor(self.array, [[d0]], [self.array[d]])
+ else:
+ to_keep.append(d)
+ d0 = d
+ mat, vec = self.mat[d0], self.vec[d0]
+ self.mat = math.gather(self.mat, to_keep, axis=0)
+ self.vec = math.gather(self.vec, to_keep, axis=0)
+ self.array = math.gather(self.array, to_keep, axis=0)
+ self._simplified = True
+
+ def _order_batch(self):
+ r"""This method orders the batch dimension by the lexicographical order of the
+ flattened arrays (mat, vec, array). This is a very cheap way to enforce
+ an ordering of the batch dimension, which is useful for simplification and for
+ determining (in)equality between two Bargmann representations."""
+ generators = [
+ itertools.chain(
+ math.asnumpy(self.vec[i]).flat,
+ math.asnumpy(self.mat[i]).flat,
+ math.asnumpy(self.array[i]).flat,
+ )
+ for i in range(self.batch_size)
+ ]
+ sorted_indices = argsort_gen(generators)
+ self.mat = math.gather(self.mat, sorted_indices, axis=0)
+ self.vec = math.gather(self.vec, sorted_indices, axis=0)
+ self.array = math.gather(self.array, sorted_indices, axis=0)
+
+
+class PolyExpAnsatz(PolyExpBase):
+ r"""
+ Represents the ansatz function:
+
+ :math:`F(z) = \sum_i \textrm{poly}_i(z) \textrm{exp}(z^T A_i z / 2 + z^T b_i)`
+
+ where each :math:`poly_i` is a polynomial in ``z`` that can be expressed as
+
+ :math:`\textrm{poly}_i(z) = \sum_k c^(i)_k z^k`,
+
+ with ``k`` being a multi-index. The matrices :math:`A_i` and vectors :math:`b_i` are
+ parameters of the exponential terms in the ansatz, and :math:`z` is a vector of variables.
+
+ Args:
+ A: The list of square matrices :math:`A_i`
+ b: The list of vectors :math:`b_i`
+ c: The array of coefficients for the polynomial terms in the ansatz.
+
+ .. code-block::
+
+ A = [np.array([[1.0, 0.0], [0.0, 1.0]])]
+ b = [np.array([1.0, 1.0])]
+ c = [np.array(1.0)]
+ F = PolyExpAnsatz(A, b, c)
+ z = np.array([1.0, 2.0])
+ print(F(z)) # prints the value of F at z
+
+ """
+
+ def __init__(
+ self,
+ A: Optional[Batch[Matrix]] = None,
+ b: Optional[Batch[Vector]] = None,
+ c: Batch[Tensor | Scalar] = [1.0],
+ name: str = "",
+ ):
+ self.name = name
+ if A is None and b is None:
+ raise ValueError("Please provide either A or b.")
+ dim = b[0].shape[-1] if A is None else A[0].shape[-1]
+ A = A if A is not None else np.zeros((len(b), dim, dim), dtype=b[0].dtype)
+ b = b if b is not None else np.zeros((len(A), dim), dtype=A[0].dtype)
+ super().__init__(mat=A, vec=b, array=c)
+
+ @property
+ def A(self) -> Batch[ComplexMatrix]:
+ r"""
+ The list of square matrices :math:`A_i`.
+ """
+ return self.mat
+
+ @property
+ def b(self) -> Batch[ComplexVector]:
+ r"""
+ The list of vectors :math:`b_i`.
+ """
+ return self.vec
+
+ @property
+ def c(self) -> Batch[ComplexTensor]:
+ r"""
+ The array of coefficients for the polynomial terms in the ansatz.
+ """
+ return self.array
+
+ def __call__(self, z: Batch[Vector]) -> Scalar:
+ r"""
+ Value of this ansatz at ``z``.
+
+ Args:
+ z: point in C^n where the function is evaluated
+
+ Returns:
+ The value of the function.
+ """
+ z = np.atleast_2d(z) # shape (..., n)
+ zz = np.einsum("...a,...b->...ab", z, z)[..., None, :, :] # shape (..., 1, n, n))
+ A_part = 0.5 * math.sum(
+ zz * self.A, axes=[-1, -2]
+ ) # sum((...,1,n,n) * (b,n,n), [-1,-2]) ~ (...,b)
+ b_part = np.sum(z[..., None, :] * self.b, axis=-1) # sum((...,1,n) * (b,n), -1) ~ (...,b)
+ exp_sum = np.exp(A_part + b_part) # (..., b)
+ result = exp_sum * self.c # (..., b)
+ val = np.sum(result, axis=-1) # (...)
+ return val
+
+ def __mul__(self, other: Union[Scalar, PolyExpAnsatz]) -> PolyExpAnsatz:
+ r"""Multiplies this ansatz by a scalar or another ansatz or a plain scalar.
+
+ Args:
+ other: A scalar or another ansatz.
+
+ Raises:
+ TypeError: If other is neither a scalar nor an ansatz.
+
+ Returns:
+ PolyExpAnsatz: The product of this ansatz and other.
+ """
+ if isinstance(other, PolyExpAnsatz):
+ new_a = [A1 + A2 for A1, A2 in itertools.product(self.A, other.A)]
+ new_b = [b1 + b2 for b1, b2 in itertools.product(self.b, other.b)]
+ new_c = [c1 * c2 for c1, c2 in itertools.product(self.c, other.c)]
+ return self.__class__(A=new_a, b=new_b, c=new_c)
+ else:
+ try:
+ return self.__class__(self.A, self.b, other * self.c)
+ except Exception as e:
+ raise TypeError(f"Cannot multiply {self.__class__} and {other.__class__}.") from e
+
+ def __truediv__(self, other: Union[Scalar, PolyExpAnsatz]) -> PolyExpAnsatz:
+ r"""Divides this ansatz by a scalar or another ansatz or a plain scalar.
+
+ Args:
+ other: A scalar or another ansatz.
+
+ Raises:
+ TypeError: If other is neither a scalar nor an ansatz.
+
+ Returns:
+ PolyExpAnsatz: The division of this ansatz by other.
+ """
+ if isinstance(other, PolyExpAnsatz):
+ new_a = [A1 - A2 for A1, A2 in itertools.product(self.A, other.A)]
+ new_b = [b1 - b2 for b1, b2 in itertools.product(self.b, other.b)]
+ new_c = [c1 / c2 for c1, c2 in itertools.product(self.c, other.c)]
+ return self.__class__(A=new_a, b=new_b, c=new_c)
+ else:
+ try:
+ return self.__class__(self.A, self.b, self.c / other)
+ except Exception as e:
+ raise TypeError(f"Cannot divide {self.__class__} and {other.__class__}.") from e
+
+ def __and__(self, other: PolyExpAnsatz) -> PolyExpAnsatz:
+ r"""Tensor product of this ansatz with another ansatz.
+ Equivalent to :math:`F(a) * G(b)` (with different arguments, that is).
+ As it distributes over addition on both self and other,
+ the batch size of the result is the product of the batch
+ size of this anzatz and the other one.
+
+ Args:
+ other: Another ansatz.
+
+ Returns:
+ The tensor product of this ansatz and other.
+ """
+ As = [math.block_diag(a1, a2) for a1 in self.A for a2 in other.A]
+ bs = [math.concat([b1, b2], axis=-1) for b1 in self.b for b2 in other.b]
+ cs = [math.outer(c1, c2) for c1 in self.c for c2 in other.c]
+ return self.__class__(As, bs, cs)
diff --git a/mrmustard/physics/bargmann.py b/mrmustard/physics/bargmann.py
index 92cc7e078..fb798ba5b 100644
--- a/mrmustard/physics/bargmann.py
+++ b/mrmustard/physics/bargmann.py
@@ -12,18 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# pylint: disable=redefined-outer-name
-
"""
This module contains functions for transforming to the Bargmann representation.
"""
+from typing import Sequence, Tuple
+
import numpy as np
-from mrmustard import settings
-from mrmustard.math import Math
+from mrmustard import math, settings
from mrmustard.physics.husimi import pq_to_aadag, wigner_to_husimi
-
-math = Math()
+from mrmustard.utils.typing import ComplexMatrix, ComplexVector
def cayley(X, c):
@@ -53,7 +51,9 @@ def wigner_to_bargmann_rho(cov, means):
Q, beta = wigner_to_husimi(cov, means)
b = math.solve(Q, beta)
B = math.conj(b)
- C = math.exp(-0.5 * math.sum(math.conj(beta) * b)) / math.sqrt(math.det(Q))
+ num_C = math.exp(-0.5 * math.sum(math.conj(beta) * b))
+ den_C = math.sqrt(math.det(Q), dtype=num_C.dtype)
+ C = num_C / den_C
return A, B, C
@@ -63,11 +63,8 @@ def wigner_to_bargmann_psi(cov, means):
"""
N = cov.shape[-1] // 2
A, B, C = wigner_to_bargmann_rho(cov, means)
- return (
- A[N:, N:],
- B[N:],
- math.sqrt(C),
- ) # NOTE: c for th psi is to calculated from the global phase formula.
+ return A[N:, N:], B[N:], math.sqrt(C)
+ # NOTE: c for th psi is to calculated from the global phase formula.
def wigner_to_bargmann_Choi(X, Y, d):
@@ -107,3 +104,120 @@ def wigner_to_bargmann_U(X, d):
N = X.shape[-1] // 2
A, B, C = wigner_to_bargmann_Choi(X, math.zeros_like(X), d)
return A[2 * N :, 2 * N :], B[2 * N :], math.sqrt(C)
+
+
+def complex_gaussian_integral(
+ Abc: tuple, idx_z: tuple[int, ...], idx_zconj: tuple[int, ...], measure: float = -1
+):
+ r"""Computes the Gaussian integral of the exponential of a complex quadratic form.
+ The integral is defined as (note that in general we integrate over a subset of 2m dimensions):
+
+ :math:`\int_{C^m} F(z) d\mu(z)`
+
+ where
+
+ :math:`F(z) = \textrm{exp}(-0.5 z^T A z + b^T z)`
+
+ Here, ``z`` is an ``n``-dim complex vector, ``A`` is an ``n x n`` complex matrix,
+ ``b`` is an ``n``-dim complex vector, ``c`` is a complex scalar, and :math:`d\mu(z)`
+ is a non-holomorphic complex measure over a subset of m pairs of z,z* variables. These
+ are specified by the indices ``idx_z`` and ``idx_zconj``. The ``measure`` parameter is
+ the exponent of the measure:
+
+ :math: `dmu(z) = \textrm{exp}(m * |z|^2) \frac{d^{2n}z}{\pi^n} = \frac{1}{\pi^n}\textrm{exp}(m * |z|^2) d\textrm{Re}(z) d\textrm{Im}(z)`
+
+ Arguments:
+ A,b,c: the ``(A,b,c)`` triple
+ idx_z: the tuple of indices of the z variables
+ idx_zconj: the tuple of indices of the z* variables
+ measure: the exponent of the measure (default is -1: Bargmann measure)
+
+ Returns:
+ The ``(A,b,c)`` triple of the result of the integral
+ """
+ A, b, c = Abc
+ if len(idx_z) != len(idx_zconj):
+ raise ValueError("idx_z and idx_zconj must have the same length")
+ n = len(idx_z)
+ idx = tuple(idx_z) + tuple(idx_zconj)
+ not_idx = tuple(i for i in range(A.shape[-1]) if i not in idx)
+
+ I = math.eye(n, dtype=A.dtype)
+ Z = math.zeros((n, n), dtype=A.dtype)
+ X = math.block([[Z, I], [I, Z]])
+ M = math.gather(math.gather(A, idx, axis=-1), idx, axis=-2) + X * measure
+ D = math.gather(math.gather(A, idx, axis=-1), not_idx, axis=-2)
+ R = math.gather(math.gather(A, not_idx, axis=-1), not_idx, axis=-2)
+
+ bM = math.gather(b, idx, axis=-1)
+ bR = math.gather(b, not_idx, axis=-1)
+
+ A_post = R - math.matmul(D, math.inv(M), math.transpose(D))
+ b_post = bR - math.matvec(D, math.solve(M, bM))
+ c_post = (
+ c * math.sqrt((-1) ** n / math.det(M)) * math.exp(-0.5 * math.sum(bM * math.solve(M, bM)))
+ )
+
+ return A_post, b_post, c_post
+
+
+def join_Abc(Abc1, Abc2):
+ r"""Joins two ``(A,b,c)`` triples into a single ``(A,b,c)`` triple by block addition of the ``A``
+ matrices and concatenating the ``b`` vectors.
+
+ Arguments:
+ Abc1: the first ``(A,b,c)`` triple
+ Abc2: the second ``(A,b,c)`` triple
+
+ Returns:
+ The joined ``(A,b,c)`` triple
+ """
+ A1, b1, c1 = Abc1
+ A2, b2, c2 = Abc2
+ A12 = math.block_diag(A1, A2)
+ b12 = math.concat([b1, b2], axis=-1)
+ c12 = math.outer(c1, c2)
+ return A12, b12, c12
+
+
+def reorder_abc(Abc: tuple, order: Sequence[int]):
+ r"""
+ Reorders the indices of the A matrix and b vector of an (A,b,c) triple.
+
+ Arguments:
+ Abc: the ``(A,b,c)`` triple
+ order: the new order of the indices
+
+ Returns:
+ The reordered ``(A,b,c)`` triple
+ """
+ A, b, c = Abc
+ A = math.gather(math.gather(A, order, axis=-1), order, axis=-2)
+ b = math.gather(b, order, axis=-1)
+ if len(c.shape) == len(order):
+ c = math.transpose(c, order)
+ return A, b, c
+
+
+def contract_two_Abc(
+ Abc1: Tuple[ComplexMatrix, ComplexVector, complex],
+ Abc2: Tuple[ComplexMatrix, ComplexVector, complex],
+ idx1: Sequence[int],
+ idx2: Sequence[int],
+):
+ r"""
+ Returns the contraction of two ``(A,b,c)`` triples.
+
+ Arguments:
+ Abc1: the first ``(A,b,c)`` triple
+ Abc2: the second ``(A,b,c)`` triple
+ idx1: the indices of the first ``(A,b,c)`` triple to contract
+ idx2: the indices of the second ``(A,b,c)`` triple to contract
+
+ Returns:
+ The contracted ``(A,b,c)`` triple
+ """
+ Abc = join_Abc(Abc1, Abc2)
+ return complex_gaussian_integral(
+ Abc, idx1, tuple(n + Abc1[0].shape[-1] for n in idx2), measure=-1.0
+ )
diff --git a/mrmustard/physics/fock.py b/mrmustard/physics/fock.py
index c4659d1bf..cef6834cd 100644
--- a/mrmustard/physics/fock.py
+++ b/mrmustard/physics/fock.py
@@ -23,23 +23,18 @@
import numpy as np
-from mrmustard import settings
-from mrmustard.math import Math
-from mrmustard.math.caching import tensor_int_cache
+from mrmustard import math, settings
from mrmustard.math.lattice import strategies
-from mrmustard.math.mmtensor import MMTensor
-from mrmustard.math.numba.compactFock_diagonal_amps import (
- fock_representation_diagonal_amps,
-)
+from mrmustard.math.caching import tensor_int_cache
+from mrmustard.math.tensor_wrappers.mmtensor import MMTensor
from mrmustard.physics.bargmann import (
wigner_to_bargmann_Choi,
wigner_to_bargmann_psi,
wigner_to_bargmann_rho,
wigner_to_bargmann_U,
)
-from mrmustard.typing import ComplexTensor, Matrix, Scalar, Tensor, Vector
+from mrmustard.utils.typing import ComplexTensor, Matrix, Scalar, Tensor, Vector
-math = Math()
SQRT = np.sqrt(np.arange(1e6))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -80,7 +75,7 @@ def autocutoffs(cov: Matrix, means: Vector, probability: float):
means_i = np.array([means[i], means[i + M]])
# apply 1-d recursion until probability is less than 0.99
A, B, C = [math.asnumpy(x) for x in wigner_to_bargmann_rho(cov_i, means_i)]
- diag = fock_representation_diagonal_amps(A, B, C, 1, cutoffs=[100])[0]
+ diag = math.hermite_renormalized_diagonal(A, B, C, cutoffs=[100])
# find at what index in the cumsum the probability is more than 0.99
for i, val in enumerate(np.cumsum(diag)):
if val > probability:
@@ -323,10 +318,11 @@ def number_means(tensor, is_dm: bool):
r"""Returns the mean of the number operator in each mode."""
probs = math.all_diagonals(tensor, real=True) if is_dm else math.abs(tensor) ** 2
modes = list(range(len(probs.shape)))
+ # print("aa", [modes[:k] + modes[k + 1 :] for k in range(len(modes))])
marginals = [math.sum(probs, axes=modes[:k] + modes[k + 1 :]) for k in range(len(modes))]
return math.astensor(
[
- math.sum(marginal * math.arange(len(marginal), dtype=marginal.dtype))
+ math.sum(marginal * math.arange(len(marginal), dtype=math.float64))
for marginal in marginals
]
)
@@ -706,7 +702,9 @@ def oscillator_eigenstate(q: Vector, cutoff: int) -> Tensor:
x_tensor = math.sqrt(omega_over_hbar) * math.cast(q, "float64") # unit-less vector
# prefactor term (\Omega/\hbar \pi)**(1/4) * 1 / sqrt(2**n)
- prefactor = (omega_over_hbar / np.pi) ** (1 / 4) * math.sqrt(2 ** (-math.arange(0, cutoff)))
+ prefactor = (omega_over_hbar / np.pi) ** (1 / 4) * math.sqrt(
+ math.pow(1 / 2, math.arange(0, cutoff))
+ )
# Renormalized physicist hermite polys: Hn / sqrt(n!)
R = -np.array([[2 + 0j]]) # to get the physicist polys
@@ -888,6 +886,10 @@ def displacement(x, y, shape, tol=1e-15):
else:
gate = math.eye(max(shape), dtype="complex128")[: shape[0], : shape[1]]
+ ret = math.astensor(gate, dtype=gate.dtype.name)
+ if math.backend_name == "numpy":
+ return ret
+
def grad(dL_dDc):
dD_da, dD_dac = strategies.jacobian_displacement(math.asnumpy(gate), alpha)
dL_dac = np.sum(np.conj(dL_dDc) * dD_dac + dL_dDc * np.conj(dD_da))
@@ -895,7 +897,7 @@ def grad(dL_dDc):
dLdy = 2 * np.imag(dL_dac)
return math.astensor(dLdx, dtype=x.dtype), math.astensor(dLdy, dtype=y.dtype)
- return math.astensor(gate, dtype=gate.dtype.name), grad
+ return ret, grad
@math.custom_gradient
@@ -918,6 +920,10 @@ def beamsplitter(theta: float, phi: float, shape: Sequence[int], method: str):
f"Unknown beamsplitter method {method}. Options are 'vanilla' and 'schwinger'."
)
+ ret = math.astensor(bs_unitary, dtype=bs_unitary.dtype.name)
+ if math.backend_name == "numpy":
+ return ret
+
def vjp(dLdGc):
dtheta, dphi = strategies.beamsplitter_vjp(
math.asnumpy(bs_unitary),
@@ -927,7 +933,7 @@ def vjp(dLdGc):
)
return math.astensor(dtheta, dtype=theta.dtype), math.astensor(dphi, dtype=phi.dtype)
- return math.astensor(bs_unitary, dtype=bs_unitary.dtype.name), vjp
+ return ret, vjp
@math.custom_gradient
@@ -935,6 +941,10 @@ def squeezer(r, phi, shape):
r"""creates a single mode squeezer matrix using a numba-based fock lattice strategy"""
sq_unitary = strategies.squeezer(shape, math.asnumpy(r), math.asnumpy(phi))
+ ret = math.astensor(sq_unitary, dtype=sq_unitary.dtype.name)
+ if math.backend_name == "numpy":
+ return ret
+
def vjp(dLdGc):
dr, dphi = strategies.squeezer_vjp(
math.asnumpy(sq_unitary),
@@ -944,7 +954,7 @@ def vjp(dLdGc):
)
return math.astensor(dr, dtype=r.dtype), math.astensor(dphi, phi.dtype)
- return math.astensor(sq_unitary, dtype=sq_unitary.dtype.name), vjp
+ return ret, vjp
@math.custom_gradient
@@ -952,6 +962,10 @@ def squeezed(r, phi, shape):
r"""creates a single mode squeezed state using a numba-based fock lattice strategy"""
sq_ket = strategies.squeezed(shape, math.asnumpy(r), math.asnumpy(phi))
+ ret = math.astensor(sq_ket, dtype=sq_ket.dtype.name)
+ if math.backend_name == "numpy":
+ return ret
+
def vjp(dLdGc):
dr, dphi = strategies.squeezed_vjp(
math.asnumpy(sq_ket),
@@ -961,4 +975,4 @@ def vjp(dLdGc):
)
return math.astensor(dr, dtype=r.dtype), math.astensor(dphi, phi.dtype)
- return math.astensor(sq_ket, dtype=sq_ket.dtype.name), vjp
+ return ret, vjp
diff --git a/mrmustard/physics/gaussian.py b/mrmustard/physics/gaussian.py
index 0287890df..efeb3d77f 100644
--- a/mrmustard/physics/gaussian.py
+++ b/mrmustard/physics/gaussian.py
@@ -20,13 +20,9 @@
from thewalrus.quantum import is_pure_cov
-from mrmustard import settings
-from mrmustard.math import Math
-from mrmustard.typing import Matrix, Scalar, Vector
-from mrmustard.utils.xptensor import XPMatrix, XPVector
-
-math = Math()
-
+from mrmustard import math, settings
+from mrmustard.math.tensor_wrappers.xptensor import XPMatrix, XPVector
+from mrmustard.utils.typing import Matrix, Scalar, Vector
# ~~~~~~
# States
@@ -166,8 +162,8 @@ def squeezing_symplectic(r: Union[Scalar, Vector], phi: Union[Scalar, Vector]) -
Returns:
Tensor: symplectic matrix of a squeezing gate
"""
- r = math.atleast_1d(r)
- phi = math.atleast_1d(phi)
+ r = math.atleast_1d(r, math.float64)
+ phi = math.atleast_1d(phi, math.float64)
if r.shape[-1] == 1:
r = math.tile(r, phi.shape)
if phi.shape[-1] == 1:
@@ -191,14 +187,14 @@ def displacement(x: Union[Scalar, Vector], y: Union[Scalar, Vector]) -> Vector:
The dimension depends on the dimensions of ``x`` and ``y``.
Args:
- x (scalar or vector): real part of displacement
- y (scalar or vector): imaginary part of displacement
+ x (scalar or vector): real part of displacement (in units of :math:`\sqrt{\hbar}`)
+ y (scalar or vector): imaginary part of displacement (in units of :math:`\sqrt{\hbar}`)
Returns:
Vector: displacement vector of a displacement gate
"""
- x = math.atleast_1d(x)
- y = math.atleast_1d(y)
+ x = math.atleast_1d(x, math.float64)
+ y = math.atleast_1d(y, math.float64)
if x.shape[-1] == 1:
x = math.tile(x, y.shape)
if y.shape[-1] == 1:
@@ -290,11 +286,11 @@ def two_mode_squeezing_symplectic(r: Scalar, phi: Scalar) -> Matrix:
Returns:
Matrix: symplectic matrix of a two-mode squeezing gate
"""
- cp = math.cos(phi)
- sp = math.sin(phi)
- ch = math.cosh(r)
- sh = math.sinh(r)
- zero = math.zeros_like(r)
+ cp = math.cast(math.cos(phi), math.float64)
+ sp = math.cast(math.sin(phi), math.float64)
+ ch = math.cast(math.cosh(r), math.float64)
+ sh = math.cast(math.sinh(r), math.float64)
+ zero = math.cast(math.zeros_like(math.asnumpy(r)), math.float64)
return math.astensor(
[
[ch, cp * sh, zero, sp * sh],
@@ -445,8 +441,8 @@ def loss_XYd(
.. math::
- X = math.sqrt(gain)
- Y = (gain - 1) * (2 * nbar + 1) * hbar / 2
+ X = math.sqrt(transmissivity)
+ Y = (1-transmissivity) * (2 * nbar + 1) * hbar / 2
Reference: Alessio Serafini - Quantum Continuous Variables (5.77, p. 108)
@@ -470,6 +466,13 @@ def loss_XYd(
def amp_XYd(gain: Union[Scalar, Vector], nbar: Union[Scalar, Vector]) -> Matrix:
r"""Returns the ``X``, ``Y`` matrices and the d vector for the noisy amplifier channel.
+ .. math::
+
+ X = math.sqrt(gain)
+ Y = (gain-1) * (2 * nbar + 1) * hbar / 2
+
+ Reference: Alessio Serafini - Quantum Continuous Variables (5.77, p. 111)
+
The quantum limited amplifier channel is recovered for ``nbar = 0.0``.
Args:
@@ -662,7 +665,8 @@ def trace(cov: Matrix, means: Vector, Bmodes: Sequence[int]) -> Tuple[Matrix, Ve
"""
N = len(cov) // 2
Aindices = math.astensor(
- [i for i in range(N) if i not in Bmodes] + [i + N for i in range(N) if i not in Bmodes]
+ [i for i in range(N) if i not in Bmodes] + [i + N for i in range(N) if i not in Bmodes],
+ dtype=math.int32,
)
A_cov_block = math.gather(math.gather(cov, Aindices, axis=0), Aindices, axis=1)
A_means_vec = math.gather(means, Aindices)
@@ -810,7 +814,7 @@ def fidelity(mu1: Vector, cov1: Matrix, mu2: Vector, cov2: Matrix) -> float:
_fidelity = f0 * math.exp((-1 / 2) * dot) # square of equation 95
- return math.cast(_fidelity, "float64")
+ return math.real(_fidelity)
def physical_partial_transpose(cov: Matrix, modes: Sequence[int]) -> Matrix:
@@ -838,7 +842,7 @@ def physical_partial_transpose(cov: Matrix, modes: Sequence[int]) -> Matrix:
def log_negativity(cov: Matrix) -> float:
r"""Returns the log_negativity of a Gaussian state.
- Reference: `https://arxiv.org/pdf/quant-ph/0102117.pdf `_ , Equation 57, 61.
+ Reference: `https://arxiv.org/abs/quant-ph/0102117 `_ , Equation 57, 61.
Args:
cov (Matrix): the covariance matrix
diff --git a/mrmustard/physics/husimi.py b/mrmustard/physics/husimi.py
index f759c96f5..44ce075f0 100644
--- a/mrmustard/physics/husimi.py
+++ b/mrmustard/physics/husimi.py
@@ -17,10 +17,7 @@
"""
This module contains functions for transforming to the Husimi representation.
"""
-from mrmustard import settings
-from mrmustard.math import Math
-
-math = Math()
+from mrmustard import math, settings
def pq_to_aadag(X):
diff --git a/mrmustard/physics/representations.py b/mrmustard/physics/representations.py
new file mode 100644
index 000000000..9434910b2
--- /dev/null
+++ b/mrmustard/physics/representations.py
@@ -0,0 +1,312 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+import numpy as np
+from matplotlib import colors
+import matplotlib.pyplot as plt
+from mrmustard import math
+from mrmustard.physics import bargmann
+from mrmustard.physics.ansatze import Ansatz, PolyExpAnsatz
+from mrmustard.utils.typing import Batch, ComplexMatrix, ComplexTensor, ComplexVector, Scalar
+import numpy as np
+from mrmustard import math
+
+
+class Representation:
+ r"""
+ A base class for representations.
+ """
+
+ def from_ansatz(self, ansatz: Ansatz) -> Representation:
+ r"""
+ Returns a representation object from an ansatz object.
+ To be implemented by subclasses.
+ """
+ raise NotImplementedError
+
+ def __eq__(self, other: Representation) -> bool:
+ r"""
+ Whether this representation is equal to another.
+ """
+ return self.ansatz == other.ansatz
+
+ def __add__(self, other: Representation) -> Representation:
+ r"""
+ Adds this representation to another.
+ """
+ return self.from_ansatz(self.ansatz + other.ansatz)
+
+ def __sub__(self, other) -> Representation:
+ r"""
+ Subtracts another representation from this one.
+ """
+ return self.from_ansatz(self.ansatz - other.ansatz)
+
+ def __mul__(self, other: Representation | Scalar) -> Representation:
+ r"""
+ Multiplies this representation by another or by a scalar.
+ """
+ try:
+ return self.from_ansatz(self.ansatz * other.ansatz)
+ except AttributeError:
+ return self.from_ansatz(self.ansatz * other)
+
+ def __rmul__(self, other: Representation | Scalar) -> Representation:
+ r"""
+ Multiplies this representation by another or by a scalar on the right.
+ """
+ return self.__mul__(other)
+
+ def __truediv__(self, other: Representation | Scalar) -> Representation:
+ r"""
+ Divides this representation by another or by a scalar.
+ """
+ try:
+ return self.from_ansatz(self.ansatz / other.ansatz)
+ except AttributeError:
+ return self.from_ansatz(self.ansatz / other)
+
+ def __rtruediv__(self, other: Representation | Scalar) -> Representation:
+ r"""
+ Divides this representation by another or by a scalar on the right.
+ """
+ return self.from_ansatz(other / self.ansatz)
+
+ def __and__(self, other: Representation) -> Representation:
+ r"""
+ Takes the outer product of this representation with another.
+ """
+ return self.from_ansatz(self.ansatz & other.ansatz)
+
+
+class Bargmann(Representation):
+ r"""This class is the Fock-Bargmann representation of a broad class of quantum states,
+ transformations, measurements, channels, etc.
+
+ The ansatz available in this representation is a linear combination of
+ exponentials of bilinear forms with a polynomial part:
+
+ .. math::
+ F(z) = \sum_i \textrm{poly}_i(z) \textrm{exp}(z^T A_i z / 2 + z^T b_i)
+
+ This function allows for vector space operations on Bargmann objects including
+ linear combinations, outer product, and inner product.
+ The inner product is defined as the contraction of two Bargmann objects across
+ marked indices. This can also be used to contract existing indices
+ in a single Bargmann object, e.g. to implement the partial trace.
+
+ Note that the operations that change the shape of the ansatz (outer product (``&``)
+ and inner product (``@``)) do not automatically modify the ordering of the
+ combined or leftover indices.
+
+ Examples:
+ .. code-block:: python
+
+ A = math.astensor([[[1.0]]]) # 1x1x1
+ b = math.astensor([[0.0]]) # 1x1
+ c = math.astensor([0.9]) # 1
+ psi1 = Bargmann(A, b, c)
+ psi2 = Bargmann(A, b, c)
+ psi3 = 1.3 * psi1 - 2.1 * psi2 # linear combination
+ assert psi3.A.shape == (2, 1, 1) # stacked along batch dimension
+ psi4 = psi1[0] @ psi2[0] # contract wires 0 on each (inner product)
+ assert psi4.A.shape == (1,) # A is 0x0 now (no wires left)
+ psi5 = psi1 & psi2 # outer product (tensor product)
+ rho = psi1.conj() & psi1 # outer product (this is now the density matrix)
+ assert rho.A.shape == (1, 2, 2) # we have two wires now
+ assert np.allclose(rho.trace((0,), (1,)), np.abs(c)**2)
+
+
+ Args:
+ A: batch of quadratic coefficient :math:`A_i`
+ b: batch of linear coefficients :math:`b_i`
+ c: batch of arrays :math:`c_i` (default: [1.0])
+ """
+
+ def __init__(
+ self,
+ A: Batch[ComplexMatrix],
+ b: Batch[ComplexVector],
+ c: Batch[ComplexTensor] = [1.0],
+ ):
+ r"""Initializes the Bargmann representation. Args can be passed non-batched,
+ they will be automatically broadcasted to the correct batch shape.
+
+ Args:
+ A: batch of quadratic coefficient :math:`A_i`
+ b: batch of linear coefficients :math:`b_i`
+ c: batch of arrays :math:`c_i` (default: [1.0])
+ """
+ self._contract_idxs: tuple[int, ...] = ()
+ self.ansatz = PolyExpAnsatz(A, b, c)
+
+ def __call__(self, z: ComplexTensor) -> ComplexTensor:
+ r"""Evaluates the Bargmann function at the given array of points."""
+ return self.ansatz(z)
+
+ def from_ansatz(self, ansatz: PolyExpAnsatz) -> Bargmann:
+ r"""Returns a Bargmann object from an ansatz object."""
+ return self.__class__(ansatz.A, ansatz.b, ansatz.c)
+
+ @property
+ def A(self) -> Batch[ComplexMatrix]:
+ r"""
+ The batch of quadratic coefficient :math:`A_i`.
+ """
+ return self.ansatz.A
+
+ @property
+ def b(self) -> Batch[ComplexVector]:
+ r"""
+ The batch of linear coefficients :math:`b_i`
+ """
+ return self.ansatz.b
+
+ @property
+ def c(self) -> Batch[ComplexTensor]:
+ r"""
+ The batch of arrays :math:`c_i`.
+ """
+ return self.ansatz.c
+
+ def conj(self):
+ r"""
+ The conjugate of this Bargmann object.
+ """
+ new = self.__class__(math.conj(self.A), math.conj(self.b), math.conj(self.c))
+ new._contract_idxs = self._contract_idxs
+ return new
+
+ def __getitem__(self, idx: int | tuple[int, ...]) -> Bargmann:
+ r"""Returns a copy of self with the given indices marked for contraction."""
+ idx = (idx,) if isinstance(idx, int) else idx
+ for i in idx:
+ if i >= self.ansatz.dim:
+ raise IndexError(
+ f"Index {i} out of bounds for ansatz {self.ansatz.__class__.__qualname__} of dimension {self.ansatz.dim}."
+ )
+ new = self.__class__(self.A, self.b, self.c)
+ new._contract_idxs = idx
+ return new
+
+ def __matmul__(self, other: Bargmann) -> Bargmann:
+ r"""Implements the inner product of ansatze across the marked indices."""
+ if self.ansatz.degree > 0 or other.ansatz.degree > 0:
+ raise NotImplementedError(
+ "Inner product of ansatze is only supported for ansatze with polynomial of degree 0."
+ )
+ Abc = []
+ for A1, b1, c1 in zip(self.A, self.b, self.c):
+ for A2, b2, c2 in zip(other.A, other.b, other.c):
+ Abc.append(
+ bargmann.contract_two_Abc(
+ (A1, b1, c1),
+ (A2, b2, c2),
+ self._contract_idxs,
+ other._contract_idxs,
+ )
+ )
+ A, b, c = zip(*Abc)
+ return self.__class__(math.astensor(A), math.astensor(b), math.astensor(c))
+
+ def trace(self, idx_z: tuple[int, ...], idx_zconj: tuple[int, ...]) -> Bargmann:
+ r"""Implements the partial trace over the given index pairs.
+
+ Args:
+ idx_z: indices to trace over
+ idx_zconj: indices to trace over
+
+ Returns:
+ Bargmann: the ansatz with the given indices traced over
+ """
+ if self.ansatz.degree > 0:
+ raise NotImplementedError(
+ "Partial trace is only supported for ansatze with polynomial of degree ``0``."
+ )
+ if len(idx_z) != len(idx_zconj):
+ msg = f"The number of indices to trace over must be the same for ``z`` and ``z*`` (got {len(idx_z)} and {len(idx_zconj)})."
+ raise ValueError(msg)
+ A, b, c = [], [], []
+ for Abci in zip(self.A, self.b, self.c):
+ Aij, bij, cij = bargmann.complex_gaussian_integral(Abci, idx_z, idx_zconj, measure=-1.0)
+ A.append(Aij)
+ b.append(bij)
+ c.append(cij)
+ return self.__class__(math.astensor(A), math.astensor(b), math.astensor(c))
+
+ def reorder(self, order: tuple[int, ...] | list[int]) -> Bargmann:
+ r"""Reorders the indices of the A matrix and b vector of an (A,b,c) triple.
+ Returns a new Bargmann object."""
+ A, b, c = bargmann.reorder_abc((self.A, self.b, self.c), order)
+ return self.__class__(A, b, c)
+
+ def plot(
+ self,
+ just_phase: bool = False,
+ with_measure: bool = False,
+ log_scale: bool = False,
+ xlim=(-2 * np.pi, 2 * np.pi),
+ ylim=(-2 * np.pi, 2 * np.pi),
+ **kwargs,
+ ): # pragma: no cover
+ r"""Plots the Bargmann function F(z) on the complex plane. Phase is represented by color,
+ magnitude by brightness. The function can be multiplied by exp(-|z|^2) to represent
+ the Bargmann function times the measure function (for integration).
+
+ Args:
+ just_phase (bool): whether to plot only the phase of the Bargmann function
+ with_measure (bool): whether to plot the bargmann function times the measure function exp(-|z|^2)
+ log_scale (bool): whether to plot the log of the Bargmann function
+ xlim (tuple[float, float]): x limits of the plot
+ ylim (tuple[float, float]): y limits of the plot
+
+ Returns:
+ tuple[matplotlib.figure.Figure, matplotlib.axes.Axes]: the figure and axes of the plot
+ """
+ # eval F(z) on a grid of complex numbers
+ X, Y = np.mgrid[xlim[0] : xlim[1] : 400j, ylim[0] : ylim[1] : 400j]
+ Z = (X + 1j * Y).T
+ f_values = self(Z[..., None])
+ if log_scale:
+ f_values = np.log(np.abs(f_values)) * np.exp(1j * np.angle(f_values))
+ if with_measure:
+ f_values = f_values * np.exp(-np.abs(Z) ** 2)
+
+ # Get phase and magnitude of F(z)
+ phases = np.angle(f_values) / (2 * np.pi) % 1
+ magnitudes = np.abs(f_values)
+ magnitudes_scaled = magnitudes / np.max(magnitudes)
+
+ # Convert to RGB
+ hsv_values = np.zeros(f_values.shape + (3,))
+ hsv_values[..., 0] = phases
+ hsv_values[..., 1] = 1
+ hsv_values[..., 2] = 1 if just_phase else magnitudes_scaled
+ rgb_values = colors.hsv_to_rgb(hsv_values)
+
+ # Plot the image
+ fig, ax = plt.subplots()
+ ax.imshow(rgb_values, origin="lower", extent=[xlim[0], xlim[1], ylim[0], ylim[1]])
+ ax.set_xlabel("$Re(z)$")
+ ax.set_ylabel("$Im(z)$")
+
+ name = "F_{" + self.ansatz.name + "}(z)"
+ name = f"\\arg({name})\\log|{name}|" if log_scale else name
+ title = name + "e^{-|z|^2}" if with_measure else name
+ title = f"\\arg({name})" if just_phase else title
+ ax.set_title(f"${title}$")
+ plt.show(block=False)
+ return fig, ax
diff --git a/mrmustard/utils/wigner.py b/mrmustard/physics/wigner.py
similarity index 98%
rename from mrmustard/utils/wigner.py
rename to mrmustard/physics/wigner.py
index bfe4d10ad..49ec214b3 100644
--- a/mrmustard/utils/wigner.py
+++ b/mrmustard/physics/wigner.py
@@ -17,13 +17,11 @@
import numpy as np
from numba import njit
-from mrmustard import settings
-from mrmustard.math import Math
+from mrmustard import math, settings
__all__ = ["wigner_discretized"]
-math = Math()
# ~~~~~~~
# Helpers
diff --git a/mrmustard/training/__init__.py b/mrmustard/training/__init__.py
index 8f13cda51..799dc842c 100644
--- a/mrmustard/training/__init__.py
+++ b/mrmustard/training/__init__.py
@@ -28,13 +28,11 @@
import numpy as np
+ from mrmustard import math
from mrmustard.lab.gates import S2gate, BSgate
from mrmustard.lab.states import Vacuum
from mrmustard.lab.circuit import Circuit
from mrmustard.training import Optimizer
- from mrmustard.math import Math
-
- math = Math()
r = np.arcsinh(1.0)
s2_0 = S2gate(r=r, phi=0.0, phi_trainable=True)[0, 1]
@@ -66,6 +64,5 @@ def cost_fn():
"""
-from .parametrized import Parametrized
from .optimizer import Optimizer
from .callbacks import TensorboardCallback
diff --git a/mrmustard/training/callbacks.py b/mrmustard/training/callbacks.py
index 1c47c7ffd..16f8e7e9d 100644
--- a/mrmustard/training/callbacks.py
+++ b/mrmustard/training/callbacks.py
@@ -67,16 +67,22 @@ def rolling_cost_cb(optimizer, cost, **kwargs):
"""
+# pylint: disable = wrong-import-position
+
+
from dataclasses import dataclass
from datetime import datetime
import hashlib
from pathlib import Path
from typing import Callable, Optional, Mapping, Sequence, Union
+
+import os
import numpy as np
+
+os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
-from mrmustard.math import Math
-math = Math()
+os.environ["TF_CPP_MIN_LOG_LEVEL"] = "0"
@dataclass
@@ -240,7 +246,6 @@ def call(
**kwargs,
): # pylint: disable=unused-argument,arguments-differ
"""Logs costs and parameters to Tensorboard."""
-
self.init_writer(trainables=trainables)
obj_tag = "objectives"
@@ -258,9 +263,9 @@ def call(
orig_cost = np.array(optimizer.callback_history["orig_cost"][-1]).item()
obj_scalars[f"{obj_tag}/orig_cost"] = orig_cost
if self.cost_converter is not None:
- obj_scalars[
- f"{obj_tag}/{self.cost_converter.__name__}(orig_cost)"
- ] = self.cost_converter(orig_cost)
+ obj_scalars[f"{obj_tag}/{self.cost_converter.__name__}(orig_cost)"] = (
+ self.cost_converter(orig_cost)
+ )
for k, v in obj_scalars.items():
tf.summary.scalar(k, data=v, step=self.optimizer_step)
diff --git a/mrmustard/training/optimizer.py b/mrmustard/training/optimizer.py
index 4a49fc847..337ebe0e5 100644
--- a/mrmustard/training/optimizer.py
+++ b/mrmustard/training/optimizer.py
@@ -18,15 +18,18 @@
from itertools import chain, groupby
from typing import List, Callable, Sequence, Union, Mapping, Dict
+from mrmustard import math
+from mrmustard.math.parameters import Constant, Variable
from mrmustard.training.callbacks import Callback
-from mrmustard.utils import graphics
-from mrmustard.logger import create_logger
-from mrmustard.math import Math
-from .parameter import Parameter, Trainable, create_parameter
-from .parametrized import Parametrized
-from .parameter_update import param_update_method
-
-math = Math()
+from mrmustard.training.progress_bar import ProgressBar
+from mrmustard.utils.logger import create_logger
+from mrmustard.math.parameters import (
+ update_euclidean,
+ update_orthogonal,
+ update_symplectic,
+ update_unitary,
+)
+from mrmustard.lab import Circuit
__all__ = ["Optimizer"]
@@ -50,10 +53,10 @@ def __init__(
euclidean_lr: float = 0.001,
):
self.learning_rate = {
- "euclidean": euclidean_lr,
- "symplectic": symplectic_lr,
- "unitary": unitary_lr,
- "orthogonal": orthogonal_lr,
+ update_euclidean: euclidean_lr,
+ update_symplectic: symplectic_lr,
+ update_unitary: unitary_lr,
+ update_orthogonal: orthogonal_lr,
}
self.opt_history: List[float] = [0]
self.callback_history: Dict[str, List] = {}
@@ -62,7 +65,7 @@ def __init__(
def minimize(
self,
cost_fn: Callable,
- by_optimizing: Sequence[Trainable],
+ by_optimizing: Sequence[Union[Constant, Variable, Circuit]],
max_steps: int = 1000,
callbacks: Union[Callable, Sequence[Callable], Mapping[str, Callable]] = None,
):
@@ -98,7 +101,7 @@ def _minimize(self, cost_fn, by_optimizing, max_steps, callbacks):
cost_fn_modified = False
orig_cost_fn = cost_fn
- bar = graphics.Progressbar(max_steps)
+ bar = ProgressBar(max_steps)
with bar:
while not self.should_stop(max_steps):
cost, grads = self.compute_loss_and_gradients(cost_fn, trainable_params.values())
@@ -132,43 +135,47 @@ def apply_gradients(self, trainable_params, grads):
applies the corresponding update method for each variable type. Update methods are
registered on :mod:`parameter_update` module.
"""
+ grouped_items = sorted(
+ zip(grads, trainable_params),
+ key=lambda x: hash(getattr(x[1], "update_fn", update_euclidean)),
+ )
+ grouped_items = {
+ key: list(result)
+ for key, result in groupby(
+ grouped_items, key=lambda x: hash(getattr(x[1], "update_fn", update_euclidean))
+ )
+ }
- # group grads and vars by type (i.e. euclidean, symplectic, orthogonal, unitary)
- grouped_vars_and_grads = self._group_vars_and_grads_by_type(trainable_params, grads)
-
- for param_type, grads_vars in grouped_vars_and_grads.items():
- param_lr = self.learning_rate[param_type]
+ for grads_vars in grouped_items.values():
+ update_fn = getattr(grads_vars[0][1], "update_fn", update_euclidean)
+ params_lr = self.learning_rate[update_fn]
# extract value (tensor) from the parameter object and group with grad
grads_and_vars = [(grad, p.value) for grad, p in grads_vars]
- update_method = param_update_method.get(param_type)
- update_method(grads_and_vars, param_lr)
+ update_fn(grads_and_vars, params_lr)
@staticmethod
def _get_trainable_params(trainable_items, root_tag: str = "optimized"):
- """Traverses all instances of Parametrized or trainable items that belong to the backend
+ """Traverses all instances of gates, states, detectors, or trainable items that belong to the backend
and return a dict of trainables of the form `{tags: trainable_parameters}` where the `tags`
are traversal paths of collecting all parent tags for reaching each parameter.
"""
trainables = []
for i, item in enumerate(trainable_items):
owner_tag = f"{root_tag}[{i}]"
- if isinstance(item, Parametrized):
+ if isinstance(item, Circuit):
+ for j, op in enumerate(item.ops):
+ tag = f"{owner_tag}:{item.__class__.__qualname__}/_ops[{j}]"
+ tagged_vars = op.parameter_set.tagged_variables(tag)
+ trainables.append(tagged_vars.items())
+ elif hasattr(item, "parameter_set"):
tag = f"{owner_tag}:{item.__class__.__qualname__}"
- trainables.append(item.traverse_trainables(owner_tag=tag).items())
+ tagged_vars = item.parameter_set.tagged_variables(tag)
+ trainables.append(tagged_vars.items())
elif math.from_backend(item) and math.is_trainable(item):
# the created parameter is wrapped into a list because the case above
# returns a list, hence ensuring we have a list of lists
tag = f"{owner_tag}:{math.__class__.__name__}/{getattr(item, 'name', item.__class__.__name__)}"
- trainables.append(
- [
- (
- tag,
- create_parameter(
- item, name="from_backend", is_trainable=True, owner=tag
- ),
- )
- ]
- )
+ trainables.append([(tag, Variable(item, name="from _backend"))])
return dict(chain(*trainables))
@@ -189,7 +196,7 @@ def _group_vars_and_grads_by_type(trainable_params, grads):
return grouped
@staticmethod
- def compute_loss_and_gradients(cost_fn: Callable, parameters: List[Parameter]):
+ def compute_loss_and_gradients(cost_fn: Callable, parameters: List[Variable]):
r"""Uses the backend to compute the loss and gradients of the parameters
given a cost function.
@@ -199,7 +206,7 @@ def compute_loss_and_gradients(cost_fn: Callable, parameters: List[Parameter]):
Args:
cost_fn (Callable with no args): The cost function.
- parameters (List[Parameter]): The parameters to optimize.
+ parameters (List[Variable]): The variables to optimize.
Returns:
tuple(Tensor, List[Tensor]): The loss and the gradients.
diff --git a/mrmustard/training/parameter.py b/mrmustard/training/parameter.py
deleted file mode 100644
index 901757b84..000000000
--- a/mrmustard/training/parameter.py
+++ /dev/null
@@ -1,255 +0,0 @@
-# Copyright 2022 Xanadu Quantum Technologies Inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-The classes in this module contain the :class:`Parameter` passed to the
-quantum operations represented by :class:`Parametrized` subclasses.
-
-Parameter types
----------------
-There are three basic types of parameters:
-
-1. **Numerical parameters** (bound and fixed): An immediate python object
- (float, complex, int, list, numerical array, ...). Implemented as-is, not encapsulated in a class
- and has the typical python behaviour. They are assigned to properties of the relevant class.
- For example,
-
- .. code-block::
-
- class Gate():
- def __init__(self, modes: List):
- self._modes = modes
-
-2. **Trainable parameters** (bound but not fixed): These are parameters that are updated by
- the optimization procedure. Tipically, this are defined via arguments of the
- :class:`Parametrized` class constructor.
-
- .. code-block::
-
- class Gate(Parametrized):
- def __init__(self, r: float, modes: List, r_trainable: bool):
- super.__init__(r=r, r_trainable=r_trainable)
- self._modes = modes
-
- gate = Gate(r=0, modes=[1], r_trainable=True)
- gate._r # access the dynamically assigned property of the trainable parameter
- isinstance(gate._r, Parameter) # evaluates to True
-
- The dynamically assigned property is an instance of :class:`Parameter` and contains the
- ``value`` property which is a tensor of the autograd backend.
-
- There are four types of trainable parameters: symplectic, euclidean, unitary and orthogonal.
- Each type defines a different optimization procedure on the :py:training: module.
-
- .. code-block::
-
- class SymplecticGate(Parametrized):
- def __init__(self, symplectic: Tensor):
- super.__init__(symplectic=symplectic, symplectic_trainable=True)
-
- class EuclideanGate(Parametrized):
- def __init__(self, euclidean: Tensor):
- super.__init__(euclidean=euclidean, euclidean_trainable=True)
-
- class OrthogonalGate(Parametrized):
- def __init__(self, orthogonal: Tensor):
- super.__init__(orthogonal=orthogonal, orthogonal_trainable=True)
-
- class UnitaryGate(Parametrized):
- def __init__(self, unitary: Array):
- super.__init__(unitary=unitary, unitary_trainable=True)
-
- The optimization procedure updates the value of the trainables *in-place*.
-
-3. **Constant parameters** (bound and fixed): This class of parameters belong to the autograd
- backend but remain fixed during the optimization procedure. They are created by setting the
- trainable flag to False.
-
- .. code-block::
-
- class Gate(Parametrized):
- def __init__(self, r: float):
- super.__init__(r=r, r_trainable=False)
-
-"""
-# pylint: disable=super-init-not-called
-
-from abc import ABC, abstractmethod
-
-from typing import Optional, Sequence, Any
-from mrmustard.math import Math
-from mrmustard.typing import Tensor
-
-math = Math()
-
-__all__ = [
- "Parameter",
- "Trainable",
- "Symplectic",
- "Euclidean",
- "Orthogonal",
- "Constant",
- "create_parameter",
-]
-
-
-class Parameter(ABC):
- """Parameter abstract base class.
-
- This class implements common methods for :class:`Trainable` and :class:`Constant` parameters.
- """
-
- @abstractmethod
- def __init__(self, value: Any, name: str, owner: Optional[str] = None) -> None:
- pass
-
- @property
- def value(self) -> Tensor:
- """tensor value of the parameter"""
- return self._value
-
- @property
- def name(self) -> str:
- """name of the parameter"""
- return self._name
-
- @property
- def owner(self) -> str:
- """parameter owner"""
- return self._owner
-
- @property
- def type(self) -> str:
- """the lowercased name of the class of this parameter object"""
- return self.__class__.__name__.lower()
-
-
-class Trainable(Parameter, ABC):
- """This abstract base class represent parameters that are mutable
- and can updated by the optimization procedure.
-
- Note that the class name of instances of ``Trainable`` are used
- to infer the optimization procedure on the :py:training: module.
- """
-
- @abstractmethod
- def __init__(self, value: Any, name: str, owner: Optional[str] = None) -> None:
- pass
-
-
-class Symplectic(Trainable):
- """Symplectic trainable. Uses :meth:`training.parameter_update.update_symplectic`."""
-
- def __init__(self, value: Any, name: str, owner: Optional[str] = None) -> None:
- self._value = value_to_trainable(value, None, name)
- self._name = name
- self._owner = owner
-
-
-class Euclidean(Trainable):
- """Euclidean trainable. Uses :meth:`training.parameter_update.update_euclidean`."""
-
- def __init__(
- self, value: Any, bounds: Optional[Sequence], name: str, owner: Optional[str] = None
- ) -> None:
- self._value = value_to_trainable(value, bounds, name)
- self._name = name
- self._owner = owner
- self.bounds = bounds
-
-
-class Orthogonal(Trainable):
- """Orthogonal trainable. Uses :meth:`training.parameter_update.update_orthogonal`."""
-
- def __init__(self, value: Any, name: str, owner: Optional[str] = None) -> None:
- self._value = value_to_trainable(value, None, name)
- self._name = name
- self._owner = owner
-
-
-class Unitary(Trainable):
- """Unitary trainable. Uses :meth:`training.parameter_update.update_unitary`."""
-
- def __init__(self, value: Any, name: str, owner: Optional[str] = None) -> None:
- self._value = value_to_trainable(value, None, name)
- self._name = name
- self._owner = owner
-
-
-class Constant(Parameter):
- """Constant parameter. It belongs to the autograd backend but remains fixed
- during any optimization procedure
- """
-
- def __init__(self, value: Any, name: str, owner: Optional[str] = None) -> None:
- if math.from_backend(value) and not math.is_trainable(value):
- self._value = value
- elif type(value) in [list, int, float]:
- self._value = math.new_constant(value, name)
- else:
- self._value = math.new_constant(value, name, value.dtype)
- self._name = name
- self._owner = owner
-
-
-def create_parameter(
- value: Any, name: str, is_trainable: bool = False, bounds: Optional[Sequence] = None, owner=None
-) -> Trainable:
- """A factory function that returns an instance of a :class:`Trainable` given
- its arguments.
-
- Args:
- value: The value to be assigned to the parameter. This value
- is casted into a Tensor belonging to the backend.
- name (str): name of the parameter
- is_trainable (bool): if ``True`` the returned object is instance
- of :class:`Trainable`, else returns an instance of a :class:`Constant`
- bounds (None or Sequence): value constraints for the parameter, only applicable
- for Euclidean parameters
-
- Returns:
- Parameter: an instance of a :class:`Constant` or :class:`Symplectic`, :class:`Orthogonal`
- or :class:`Euclidean` trainable.
- """
-
- if not is_trainable:
- return Constant(value, name, owner)
-
- if name.startswith("symplectic"):
- return Symplectic(value, name, owner)
-
- if name.startswith("orthogonal"):
- return Orthogonal(value, name, owner)
-
- if name.startswith("unitary"):
- return Unitary(value, name, owner)
-
- return Euclidean(value, bounds, name, owner)
-
-
-def value_to_trainable(value: Any, bounds: Optional[Sequence], name: str) -> Tensor:
- """Converts a value to a backend tensor variable if needed.
-
- Args:
- value: value to be casted into a tensor of the backend
- bounds (None or Sequence): value constraints for the parameter, only applicable
- for Euclidean parameters
- name (str): name of the parameter
- """
- if math.from_backend(value) and math.is_trainable(value):
- return value
- elif type(value) in [list, int, float]:
- return math.new_variable(value, bounds, name)
- else:
- return math.new_variable(value, bounds, name, value.dtype)
diff --git a/mrmustard/training/parameter_update.py b/mrmustard/training/parameter_update.py
index 912fc3bf4..0a1be7c9a 100644
--- a/mrmustard/training/parameter_update.py
+++ b/mrmustard/training/parameter_update.py
@@ -16,13 +16,11 @@
"""
from typing import Tuple, Sequence
-from mrmustard.math import Math
-from mrmustard.typing import Tensor
+from mrmustard.utils.typing import Tensor
+from mrmustard import math
from .parameter import Trainable
-math = Math()
-
def update_symplectic(grads_and_vars: Sequence[Tuple[Tensor, Trainable]], symplectic_lr: float):
r"""Updates the symplectic parameters using the given symplectic gradients.
diff --git a/mrmustard/training/parametrized.py b/mrmustard/training/parametrized.py
deleted file mode 100644
index e2a05578c..000000000
--- a/mrmustard/training/parametrized.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright 2022 Xanadu Quantum Technologies Inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""This module contains the :class:`.Parametrized` class which acts as
-an abstract base class for all parametrized objects. Arguments of the
-class constructor generate a backend Tensor and are assigned to fields
-of the class.
-"""
-
-from typing import Any, Generator, List, Sequence, Tuple, Mapping
-
-import numpy as np
-
-from mrmustard.math import Math
-from mrmustard.training.parameter import (
- Constant,
- Parameter,
- Trainable,
- create_parameter,
-)
-from mrmustard.typing import Tensor
-
-math = Math()
-
-__all__ = ["Parametrized"]
-
-
-class Parametrized:
- r"""A class representing all parametrized objects (gates, detectors, etc.). This class
- creates backend tensors out of the arguments of its class constructor and assigns it
- to fields of the parent class. The main role of this class is classifying and providing
- methods to keep track of trainable parameters.
-
- For each trainable parameter keyword arguments must be passed for the initial value ``xxx``
- (tensor), the numerical bounds ``xxx_bounds`` (float, float), whether the parameter ``xxx`` will
- be optimized ``xxx_trainable`` (bool), along with any other parameters.
- """
-
- def __init__(self, **kwargs): # NOTE: only kwargs so that we can use the arg names
- owner = f"{self.__class__.__qualname__}"
- self.param_names = [] # list of parameter names to preserve order
- for name, value in kwargs.items():
- # filter out `{name}_trainable` or `{name}_bounds`` to become fields
- # of the class as those kwargs are used to define the variables
- if "_trainable" in name or "_bounds" in name:
- continue
-
- # convert into parameter class
- is_trainable = kwargs.get(f"{name}_trainable", False) or math.is_trainable(value)
- bounds = kwargs.get(f"{name}_bounds", None)
- param = create_parameter(value, name, is_trainable, bounds, owner)
-
- # dynamically assign parameter as attribute of the class
- self.__dict__[name] = param
- self.param_names.append(name)
-
- def param_string(self, decimals: int) -> str:
- r"""Returns a string representation of the parameter values, separated by commas and rounded
- to the specified number of decimals. It includes only the parameters that are not arrays
- and not the number of modes, or other parameters that are not in principle trainable.
- Keeps the order of the parameters as they are defined in the class constructor.
-
- Args:
- decimals (int): number of decimals to round to
-
- Returns:
- str: string representation of the parameter values
- """
- strings = []
- for name, value in self.kw_parameters:
- value = math.asnumpy(value)
- if value.ndim == 0: # don't show arrays
- sign = "-" if value < 0 else ""
- value = np.abs(np.round(value, decimals))
- int_part = int(value)
- decimal_part = np.round(value - int_part, decimals)
- string = sign + str(int_part) + f"{decimal_part:.{decimals}g}".lstrip("0")
- else:
- string = f"{name}"
- strings.append(string)
- return ", ".join(strings)
-
- @property
- def kw_parameters(self) -> Tuple[Tuple[str, Tensor]]:
- r"""Return a list of parameters within the Parametrized object
- if they have been passed as keyword arguments to the class constructor.
- """
- return tuple((name, getattr(self, name).value) for name in self.param_names)
-
- @property
- def trainable_parameters(self) -> Sequence[Trainable]:
- """Return a list of trainable parameters within the Parametrized object
- by recursively traversing the object's fields
- """
- return list(_traverse_parametrized(self.__dict__, Trainable))
-
- @property
- def constant_parameters(self) -> List[Constant]:
- """Return a list of constant parameters within the Parametrized object
- by recursively traversing the object's fields
- """
- return list(_traverse_parametrized(self.__dict__, Constant))
-
- def traverse_trainables(self, owner_tag=None) -> Mapping[str, Trainable]:
- """Return a dict of trainable parameters within the Parametrized object
- by recursively traversing the object's fields. The key for each parameter
- will be the path of tags for reaching it from the top level Parametrized.
- """
- owner_tag = owner_tag or f"{self.__class__.__qualname__}"
- return dict(_traverse_parametrized(self.__dict__, Trainable, owner_tag))
-
- def traverse_constants(self, owner_tag=None) -> Mapping[str, Constant]:
- """Return a dict of constant parameters within the Parametrized object
- by recursively traversing the object's fields. The key for each parameter
- will be the path of tags for reaching it from the top level Parametrized.
- """
- owner_tag = owner_tag or f"{self.__class__.__qualname__}"
- return dict(_traverse_parametrized(self.__dict__, Constant, owner_tag))
-
-
-def _traverse_parametrized_untagged(object_: Sequence, extract_type: Parameter) -> Generator:
- """This private method traverses recursively all the object's attributes for objects
- present in ``iterable`` which are instances of ``parameter_type`` or ``Parametrized``
- returning a generator with objects of type ``extract_type``.
- """
- for obj in object_:
- if isinstance(
- obj, (List, Tuple, Mapping)
- ): # pylint: disable=isinstance-second-argument-not-valid-type
- yield from _traverse_parametrized(obj, extract_type)
- elif isinstance(obj, Parametrized):
- yield from _traverse_parametrized(obj.__dict__.values(), extract_type)
- elif isinstance(obj, extract_type):
- yield obj
-
-
-def _traverse_parametrized_tagged(
- object_: Mapping, extract_type: Parameter, owner_tag: str = None
-) -> Generator:
- """This private method traverses recursively, while accumulating tags, all the object's
- attributes for objects present in ``iterable`` which are instances of ``parameter_type``
- or ``Parametrized`` returning a generator of 2-tuples of the form (str, ``extract_type``).
- """
-
- delim = "/"
- for k, obj in object_.items():
- obj_tag = f"{owner_tag}[{k}]" if isinstance(k, int) else f"{owner_tag}{delim}{k}"
- if isinstance(obj, (Mapping, List, Tuple)):
- yield from _traverse_parametrized(obj, extract_type, owner_tag=obj_tag)
- elif isinstance(obj, Parametrized):
- yield from _traverse_parametrized(obj.__dict__, extract_type, owner_tag=obj_tag)
- elif isinstance(obj, extract_type):
- yield obj_tag, obj
-
-
-def _traverse_parametrized(
- object_: Any, extract_type: Parameter, owner_tag: str = None
-) -> Generator:
- """The recursive parameter traversal to be used for both tagged and untagged collection
- Depending on if the argument `owner_tag` is provided.
- """
-
- if owner_tag:
- yield from _traverse_parametrized_tagged(
- object_=dict(enumerate(object_)) if isinstance(object_, Sequence) else object_,
- extract_type=extract_type,
- owner_tag=owner_tag,
- )
- else:
- yield from _traverse_parametrized_untagged(
- object_=list(object_.values()) if isinstance(object_, Mapping) else object_,
- extract_type=extract_type,
- )
diff --git a/mrmustard/training/progress_bar.py b/mrmustard/training/progress_bar.py
new file mode 100644
index 000000000..a1497f8fe
--- /dev/null
+++ b/mrmustard/training/progress_bar.py
@@ -0,0 +1,60 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A module containing classes and methods for progress bars."""
+
+from rich.progress import Progress, TextColumn, BarColumn, TimeRemainingColumn
+from mrmustard import settings
+
+
+# pylint: disable=disallowed-name
+class ProgressBar:
+ "A spiffy loading bar to display the progress during an optimization."
+
+ def __init__(self, max_steps: int):
+ self.taskID = None
+ if max_steps == 0:
+ self.bar = Progress(
+ TextColumn("Step {task.completed}/∞"),
+ BarColumn(),
+ TextColumn("Cost = {task.fields[loss]:.5f}"),
+ )
+ else:
+ self.bar = Progress(
+ TextColumn("Step {task.completed}/{task.total} | {task.fields[speed]:.1f} it/s"),
+ BarColumn(),
+ TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
+ TextColumn("Cost = {task.fields[loss]:.5f} | ⏳ "),
+ TimeRemainingColumn(),
+ )
+ self.taskID = self.bar.add_task(
+ description="Optimizing...",
+ start=max_steps > 0,
+ speed=0.0,
+ total=max_steps,
+ loss=1.0,
+ refresh=True,
+ visible=settings.PROGRESSBAR,
+ )
+
+ def step(self, loss):
+ """Update bar step and the loss information associated with it."""
+ speed = self.bar.tasks[0].speed or 0.0
+ self.bar.update(self.taskID, advance=1, refresh=True, speed=speed, loss=loss)
+
+ def __enter__(self):
+ return self.bar.__enter__()
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return self.bar.__exit__(exc_type, exc_val, exc_tb)
diff --git a/mrmustard/training/trainer.py b/mrmustard/training/trainer.py
index 79b3b1e12..215f368ea 100644
--- a/mrmustard/training/trainer.py
+++ b/mrmustard/training/trainer.py
@@ -106,8 +106,7 @@ def cost_fn(circ=make_circ(0.1), y_targ=0.):
import numpy as np
from rich.progress import track
-import mrmustard as mm
-
+from mrmustard import settings
from .optimizer import Optimizer
@@ -158,7 +157,7 @@ def train_device(
"""
- setting_updates, kwargs = update_pop(mm.settings, **kwargs)
+ setting_updates, kwargs = update_pop(settings, **kwargs)
input_kwargs = kwargs.copy() if return_kwargs else {}
diff --git a/mrmustard/utils/argsort.py b/mrmustard/utils/argsort.py
new file mode 100644
index 000000000..efab6fa9a
--- /dev/null
+++ b/mrmustard/utils/argsort.py
@@ -0,0 +1,65 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Sorting functions"""
+
+from collections import defaultdict
+from typing import Generator
+
+import numpy as np
+
+
+def argsort_gen(generators: list[Generator[float, None, None]]) -> list[int]:
+ r"""
+ Sorts a list of generator objects based on their yielded values.
+
+ This function takes a list of generator objects, each yielding a sequence of numbers.
+ It sorts the generators based on their first yielded values. If multiple generators
+ yield the same first value, the function recursively sorts them based on their next
+ yielded values. The result is a list of indices that would sort the original list of
+ generators.
+
+ Parameters:
+ gen_list (list): A list of generator objects, each yielding a sequence of numbers.
+
+ Returns:
+ list: A list of indices that would sort the original list of generators.
+ """
+ vals = []
+ for gen in generators:
+ try:
+ vals.append(next(gen))
+ except StopIteration:
+ vals.append(np.inf)
+
+ if np.allclose(vals, np.inf):
+ return [i for i in range(len(generators))]
+
+ # dict with values and indices where they occur
+ vals_dict = defaultdict(list)
+ for i, val in enumerate(vals):
+ vals_dict[val].append(i)
+
+ # sort dict by keys (vals)
+ vals_dict = {key: vals_dict[key] for key in sorted(vals_dict)}
+
+ # if there are multiple values in the same pool, sort them with argsort_gen
+ for val, index_pool in vals_dict.items():
+ if len(index_pool) > 1:
+ sub_gen_list = [generators[i] for i in index_pool]
+ sub_order = argsort_gen(sub_gen_list)
+ pool_sorted = [index_pool[i] for i in sub_order]
+ vals_dict[val] = pool_sorted
+
+ return [i for pool in vals_dict.values() for i in pool]
diff --git a/mrmustard/utils/filters.py b/mrmustard/utils/filters.py
new file mode 100644
index 000000000..929265e32
--- /dev/null
+++ b/mrmustard/utils/filters.py
@@ -0,0 +1,66 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This module contains a class to filter undesired warnings.
+"""
+
+import logging
+
+
+class WarningFilters(logging.Filter):
+ r"""
+ A custom logging filter to selectively allow log records based on specific warnings.
+
+ Args:
+ warnings: A list of warning messages that must be filtered.
+ """
+
+ def __init__(self, warnings: list[str]):
+ super().__init__()
+ self.warnings = warnings
+
+ def filter(self, record) -> bool:
+ r"""
+ Determine if the log record should be allowed based on specific warnings.
+
+ Args:
+ record: The ``LogRecord`` to be filtered.
+
+ Returns:
+ ``True`` if the log record should be allowed, ``False`` otherwise.
+ """
+ return any(w in record.getMessage() for w in self.warnings)
+
+
+# ComplexWarning filter for tensorflow.
+msg = "WARNING:tensorflow:You are casting an input of type complex128 to an incompatible dtype float64."
+msg += " This will discard the imaginary part and may not be what you intended."
+complex_warninig_filter = WarningFilters([msg])
+
+
+def add_complex_warning_filter():
+ r"""
+ Adds the filter for tensorflow's ComplexWarning, or does nothing if the filter is already in place.
+ """
+ logger = logging.getLogger("tensorflow")
+ logger.addFilter(complex_warninig_filter)
+
+
+def remove_complex_warning_filter():
+ r"""
+ Removes the filter for tensorflow's ComplexWarning, or does nothing if no such filter is present.
+ """
+ logger = logging.getLogger("tensorflow")
+ logger.removeFilter(complex_warninig_filter)
diff --git a/mrmustard/utils/graphics.py b/mrmustard/utils/graphics.py
deleted file mode 100644
index f0eb4117f..000000000
--- a/mrmustard/utils/graphics.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# Copyright 2021 Xanadu Quantum Technologies Inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""A module containing utility classes and functions for graphical display."""
-
-from typing import Tuple
-from rich.progress import Progress, TextColumn, BarColumn, TimeRemainingColumn
-import matplotlib.pyplot as plt
-from matplotlib import cm
-import numpy as np
-from mrmustard import settings
-from mrmustard.physics.fock import quadrature_distribution
-from .wigner import wigner_discretized
-
-
-# pylint: disable=disallowed-name
-class Progressbar:
- "A spiffy loading bar to display the progress during an optimization."
-
- def __init__(self, max_steps: int):
- self.taskID = None
- if max_steps == 0:
- self.bar = Progress(
- TextColumn("Step {task.completed}/∞"),
- BarColumn(),
- TextColumn("Cost = {task.fields[loss]:.5f}"),
- )
- else:
- self.bar = Progress(
- TextColumn("Step {task.completed}/{task.total} | {task.fields[speed]:.1f} it/s"),
- BarColumn(),
- TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
- TextColumn("Cost = {task.fields[loss]:.5f} | ⏳ "),
- TimeRemainingColumn(),
- )
- self.taskID = self.bar.add_task(
- description="Optimizing...",
- start=max_steps > 0,
- speed=0.0,
- total=max_steps,
- loss=1.0,
- refresh=True,
- visible=settings.PROGRESSBAR,
- )
-
- def step(self, loss):
- """Update bar step and the loss information associated with it."""
- speed = self.bar.tasks[0].speed or 0.0
- self.bar.update(self.taskID, advance=1, refresh=True, speed=speed, loss=loss)
-
- def __enter__(self):
- return self.bar.__enter__()
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- return self.bar.__exit__(exc_type, exc_val, exc_tb)
-
-
-def mikkel_plot(
- rho: np.ndarray,
- xbounds: Tuple[int] = (-6, 6),
- ybounds: Tuple[int] = (-6, 6),
- **kwargs,
-): # pylint: disable=too-many-statements
- """Plots the Wigner function of a state given its density matrix.
-
- Args:
- rho (np.ndarray): density matrix of the state
- xbounds (Tuple[int]): range of the x axis
- ybounds (Tuple[int]): range of the y axis
-
- Keyword args:
- resolution (int): number of points used to calculate the wigner function
- xticks (Tuple[int]): ticks of the x axis
- xtick_labels (Optional[Tuple[str]]): labels of the x axis; if None uses default formatter
- yticks (Tuple[int]): ticks of the y axis
- ytick_labels (Optional[Tuple[str]]): labels of the y axis; if None uses default formatter
- grid (bool): whether to display the grid
- cmap (matplotlib.colormap): colormap of the figure
-
- Returns:
- tuple: figure and axes
- """
-
- plot_args = {
- "resolution": 200,
- "xticks": (-5, 0, 5),
- "xtick_labels": None,
- "yticks": (-5, 0, 5),
- "ytick_labels": None,
- "grid": False,
- "cmap": cm.RdBu,
- }
- plot_args.update(kwargs)
-
- if plot_args["xtick_labels"] is None:
- plot_args["xtick_labels"] = plot_args["xticks"]
- if plot_args["ytick_labels"] is None:
- plot_args["ytick_labels"] = plot_args["yticks"]
-
- q, ProbX = quadrature_distribution(rho)
- p, ProbP = quadrature_distribution(rho, np.pi / 2)
-
- xvec = np.linspace(*xbounds, plot_args["resolution"])
- pvec = np.linspace(*ybounds, plot_args["resolution"])
- W, X, P = wigner_discretized(rho, xvec, pvec)
-
- ### PLOTTING ###
-
- fig, ax = plt.subplots(
- 2,
- 2,
- figsize=(6, 6),
- gridspec_kw={"width_ratios": [2, 1], "height_ratios": [1, 2]},
- )
- plt.subplots_adjust(wspace=0.05, hspace=0.05)
-
- # Wigner function
- ax[1][0].contourf(X, P, W, 120, cmap=plot_args["cmap"], vmin=-abs(W).max(), vmax=abs(W).max())
- ax[1][0].set_xlabel("$x$", fontsize=12)
- ax[1][0].set_ylabel("$p$", fontsize=12)
- ax[1][0].get_xaxis().set_ticks(plot_args["xticks"])
- ax[1][0].xaxis.set_ticklabels(plot_args["xtick_labels"])
- ax[1][0].get_yaxis().set_ticks(plot_args["yticks"])
- ax[1][0].yaxis.set_ticklabels(plot_args["ytick_labels"], rotation="vertical", va="center")
- ax[1][0].tick_params(direction="in")
- ax[1][0].set_xlim(xbounds)
- ax[1][0].set_ylim(ybounds)
- ax[1][0].grid(plot_args["grid"])
-
- # X quadrature probability distribution
- ax[0][0].fill(q, ProbX, color=plot_args["cmap"](0.5))
- ax[0][0].plot(q, ProbX, color=plot_args["cmap"](0.8))
- ax[0][0].get_xaxis().set_ticks(plot_args["xticks"])
- ax[0][0].xaxis.set_ticklabels([])
- ax[0][0].get_yaxis().set_ticks([])
- ax[0][0].tick_params(direction="in")
- ax[0][0].set_ylabel("Prob($x$)", fontsize=12)
- ax[0][0].set_xlim(xbounds)
- ax[0][0].set_ylim([0, 1.1 * max(ProbX)])
- ax[0][0].grid(plot_args["grid"])
-
- # P quadrature probability distribution
- ax[1][1].fill(ProbP, p, color=plot_args["cmap"](0.5))
- ax[1][1].plot(ProbP, p, color=plot_args["cmap"](0.8))
- ax[1][1].get_xaxis().set_ticks([])
- ax[1][1].get_yaxis().set_ticks(plot_args["yticks"])
- ax[1][1].yaxis.set_ticklabels([])
- ax[1][1].tick_params(direction="in")
- ax[1][1].set_xlabel("Prob($p$)", fontsize=12)
- ax[1][1].set_xlim([0, 1.1 * max(ProbP)])
- ax[1][1].set_ylim(ybounds)
- ax[1][1].grid(plot_args["grid"])
-
- # Density matrix
- ax[0][1].matshow(abs(rho), cmap=plot_args["cmap"], vmin=-abs(rho).max(), vmax=abs(rho).max())
- ax[0][1].set_title(r"abs($\rho$)", fontsize=12)
- ax[0][1].tick_params(direction="in")
- ax[0][1].get_xaxis().set_ticks([])
- ax[0][1].get_yaxis().set_ticks([])
- ax[0][1].set_aspect("auto")
- ax[0][1].set_ylabel(f"cutoff = {len(rho)}", fontsize=12)
- ax[0][1].yaxis.set_label_position("right")
-
- return fig, ax
diff --git a/mrmustard/logger.py b/mrmustard/utils/logger.py
similarity index 100%
rename from mrmustard/logger.py
rename to mrmustard/utils/logger.py
diff --git a/mrmustard/settings.py b/mrmustard/utils/settings.py
similarity index 68%
rename from mrmustard/settings.py
rename to mrmustard/utils/settings.py
index 8036c4763..446732ff1 100644
--- a/mrmustard/settings.py
+++ b/mrmustard/utils/settings.py
@@ -15,10 +15,14 @@
"""A module containing the settings.
"""
+from typing import Any
+import os
from rich import print
import rich.table
import numpy as np
+from mrmustard.utils.filters import add_complex_warning_filter, remove_complex_warning_filter
+
__all__ = ["Settings", "settings"]
@@ -30,7 +34,7 @@ class ImmutableSetting:
name (str): the name of this setting
"""
- def __init__(self, value: any, name: str) -> None:
+ def __init__(self, value: Any, name: str) -> None:
self._value = value
self._name = name
self._is_immutable = False
@@ -79,7 +83,6 @@ def __new__(cls): # singleton
return cls.instance
def __init__(self):
- self._backend = "tensorflow"
self._hbar = ImmutableSetting(2.0, "HBAR")
self._debug = False
self._autocutoff_probability = 0.999 # capture at least 99.9% of the probability
@@ -100,6 +103,15 @@ def __init__(self):
self._seed = np.random.randint(0, 2**31 - 1)
self.rng = np.random.default_rng(self._seed)
self._default_bs_method = "vanilla" # can be 'vanilla' or 'schwinger'
+ self._precision_bits_hermite_poly = 128
+ self._julia_initialized = (
+ False # set to True when Julia is initialized (cf. PRECISION_BITS_HERMITE_POLY.setter)
+ )
+ self._complex_warning = False
+
+ def _force_hbar(self, value):
+ r"can set the value of HBAR at any time. use with caution."
+ self._hbar._value = value
@property
def AUTOCUTOFF_MAX_CUTOFF(self):
@@ -107,7 +119,7 @@ def AUTOCUTOFF_MAX_CUTOFF(self):
return self._autocutoff_max_cutoff
@AUTOCUTOFF_MAX_CUTOFF.setter
- def AUTOCUTOFF_MAX_CUTOFF(self, value: str):
+ def AUTOCUTOFF_MAX_CUTOFF(self, value: int):
self._autocutoff_max_cutoff = value
@property
@@ -116,7 +128,7 @@ def AUTOCUTOFF_MIN_CUTOFF(self):
return self._autocutoff_min_cutoff
@AUTOCUTOFF_MIN_CUTOFF.setter
- def AUTOCUTOFF_MIN_CUTOFF(self, value: str):
+ def AUTOCUTOFF_MIN_CUTOFF(self, value: int):
self._autocutoff_min_cutoff = value
@property
@@ -125,32 +137,31 @@ def AUTOCUTOFF_PROBABILITY(self):
return self._autocutoff_probability
@AUTOCUTOFF_PROBABILITY.setter
- def AUTOCUTOFF_PROBABILITY(self, value: str):
+ def AUTOCUTOFF_PROBABILITY(self, value: float):
self._autocutoff_probability = value
- @property
- def BACKEND(self):
- r"""The backend which is used. Default is ``tensorflow``.
-
- Can be either ``'tensorflow'`` or ``'torch'``.
- """
- return self._backend
-
- @BACKEND.setter
- def BACKEND(self, value: str):
- if value not in ["tensorflow", "torch"]: # pragma: no cover
- raise ValueError("Backend must be either 'tensorflow' or 'torch'")
- self._backend = value
-
@property
def CIRCUIT_DECIMALS(self):
r"""The number of decimals displayed when drawing a circuit with parameters. Default is ``3``."""
return self._circuit_decimals
@CIRCUIT_DECIMALS.setter
- def CIRCUIT_DECIMALS(self, value: str):
+ def CIRCUIT_DECIMALS(self, value: int):
self._circuit_decimals = value
+ @property
+ def COMPLEX_WARNING(self):
+ r"""Whether tensorflow's ``ComplexWarning``s should be raised when a complex is casted to a float. Default is ``False``."""
+ return self._complex_warning
+
+ @COMPLEX_WARNING.setter
+ def COMPLEX_WARNING(self, value: bool):
+ self._complex_warning = value
+ if value:
+ remove_complex_warning_filter()
+ else:
+ add_complex_warning_filter()
+
@property
def DEBUG(self):
r"""Whether or not to print the vector of means and the covariance matrix alongside the
@@ -159,7 +170,7 @@ def DEBUG(self):
return self._debug
@DEBUG.setter
- def DEBUG(self, value: str):
+ def DEBUG(self, value: bool):
self._debug = value
@property
@@ -194,7 +205,7 @@ def EQ_TRANSFORMATION_CUTOFF(self):
return self._eq_transformation_cutoff
@EQ_TRANSFORMATION_CUTOFF.setter
- def EQ_TRANSFORMATION_CUTOFF(self, value: str):
+ def EQ_TRANSFORMATION_CUTOFF(self, value: int):
self._eq_transformation_cutoff = value
@property
@@ -204,7 +215,7 @@ def EQ_TRANSFORMATION_RTOL_FOCK(self):
return self._eq_transformation_rtol_fock
@EQ_TRANSFORMATION_RTOL_FOCK.setter
- def EQ_TRANSFORMATION_RTOL_FOCK(self, value: str):
+ def EQ_TRANSFORMATION_RTOL_FOCK(self, value: float):
self._eq_transformation_rtol_fock = value
@property
@@ -214,7 +225,7 @@ def EQ_TRANSFORMATION_RTOL_GAUSS(self):
return self._eq_transformation_rtol_gauss
@EQ_TRANSFORMATION_RTOL_GAUSS.setter
- def EQ_TRANSFORMATION_RTOL_GAUSS(self, value: str):
+ def EQ_TRANSFORMATION_RTOL_GAUSS(self, value: float):
self._eq_transformation_rtol_gauss = value
@property
@@ -226,7 +237,7 @@ def HBAR(self):
return self._hbar.value
@HBAR.setter
- def HBAR(self, value: str):
+ def HBAR(self, value: float):
self._hbar.value = value
@property
@@ -235,7 +246,7 @@ def HOMODYNE_SQUEEZING(self):
return self._homodyne_squeezing
@HOMODYNE_SQUEEZING.setter
- def HOMODYNE_SQUEEZING(self, value: str):
+ def HOMODYNE_SQUEEZING(self, value: float):
self._homodyne_squeezing = value
@property
@@ -244,7 +255,7 @@ def PNR_INTERNAL_CUTOFF(self):
return self._pnr_internal_cutoff
@PNR_INTERNAL_CUTOFF.setter
- def PNR_INTERNAL_CUTOFF(self, value: str):
+ def PNR_INTERNAL_CUTOFF(self, value: int):
self._pnr_internal_cutoff = value
@property
@@ -253,7 +264,7 @@ def PROGRESSBAR(self):
return self._progressbar
@PROGRESSBAR.setter
- def PROGRESSBAR(self, value: str):
+ def PROGRESSBAR(self, value: bool):
self._progressbar = value
@property
@@ -265,10 +276,56 @@ def SEED(self):
return self._seed
@SEED.setter
- def SEED(self, value):
+ def SEED(self, value: int):
self._seed = value
self.rng = np.random.default_rng(self._seed)
+ @property
+ def PRECISION_BITS_HERMITE_POLY(self):
+ r"""
+ The number of bits used to represent a single Fock amplitude when calculating Hermite polynomials.
+ Default is 128 (i.e. the Fock representation has dtype complex128).
+ Currently allowed values: 128, 256, 384, 512
+ """
+ return self._precision_bits_hermite_poly
+
+ @PRECISION_BITS_HERMITE_POLY.setter
+ def PRECISION_BITS_HERMITE_POLY(self, value: int):
+ allowed_values = [128, 256, 384, 512]
+ if value not in allowed_values:
+ raise ValueError(
+ f"precision_bits_hermite_poly must be one of the following values: {allowed_values}"
+ )
+ self._precision_bits_hermite_poly = value
+ if (
+ value != 128 and not self._julia_initialized
+ ): # initialize Julia when precision > complex128 and if it wasn't initialized before
+ from julia.api import LibJulia # pylint: disable=import-outside-toplevel
+
+ # the next line must be run before "from julia import Main as Main_julia"
+ LibJulia.load().init_julia(
+ ["--compiled-modules=no", "--project=julia_pkg"]
+ ) # also loads julia environment
+ # the next line must be run after "LibJulia.load().init_julia()"
+ from julia import Main as Main_julia # pylint: disable=import-outside-toplevel
+
+ # import Julia functions
+ utils_directory = os.path.dirname(__file__)
+ Main_julia.cd(utils_directory)
+ Main_julia.include("../math/lattice/strategies/julia/getPrecision.jl")
+ Main_julia.include("../math/lattice/strategies/julia/vanilla.jl")
+ Main_julia.include("../math/lattice/strategies/julia/compactFock/helperFunctions.jl")
+ Main_julia.include("../math/lattice/strategies/julia/compactFock/diagonal_amps.jl")
+ Main_julia.include("../math/lattice/strategies/julia/compactFock/diagonal_grad.jl")
+ Main_julia.include(
+ "../math/lattice/strategies/julia/compactFock/singleLeftoverMode_amps.jl"
+ )
+ Main_julia.include(
+ "../math/lattice/strategies/julia/compactFock/singleLeftoverMode_grad.jl"
+ )
+
+ self._julia_initialized = True
+
# use rich.table to print the settings
def __repr__(self) -> str:
r"""Returns a string representation of the settings."""
diff --git a/mrmustard/typing.py b/mrmustard/utils/typing.py
similarity index 92%
rename from mrmustard/typing.py
rename to mrmustard/utils/typing.py
index 9a8508a0f..84a157784 100644
--- a/mrmustard/typing.py
+++ b/mrmustard/utils/typing.py
@@ -69,6 +69,8 @@
IntTensor = np.ndarray[Tuple[int, ...], Z]
UIntTensor = np.ndarray[Tuple[int, ...], N]
+
+# Revisit when requiring python 3.12 (see PEP 695)
T_co = TypeVar(
"T_co",
RealVector,
@@ -90,6 +92,7 @@
@runtime_checkable
-class Batch(Protocol[T_co]): # pylint: disable=missing-class-docstring
- def __iter__(self) -> Iterator[T_co]:
- ...
+class Batch(Protocol[T_co]):
+ r"""Anything that can iterate over objects of type T_co."""
+
+ def __iter__(self) -> Iterator[T_co]: ...
diff --git a/poetry.lock b/poetry.lock
index 09fa10868..9bab967b6 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,14 +1,14 @@
-# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
[[package]]
name = "absl-py"
-version = "2.0.0"
+version = "2.1.0"
description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py."
optional = false
python-versions = ">=3.7"
files = [
- {file = "absl-py-2.0.0.tar.gz", hash = "sha256:d9690211c5fcfefcdd1a45470ac2b5c5acd45241c3af71eed96bc5441746c0d5"},
- {file = "absl_py-2.0.0-py3-none-any.whl", hash = "sha256:9a28abb62774ae4e8edbe2dd4c49ffcd45a6a848952a5eccc6a49f3f0fc1e2f3"},
+ {file = "absl-py-2.1.0.tar.gz", hash = "sha256:7820790efbb316739cde8b4e19357243fc3608a152024288513dd968d7d959ff"},
+ {file = "absl_py-2.1.0-py3-none-any.whl", hash = "sha256:526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308"},
]
[[package]]
@@ -27,13 +27,13 @@ frozenlist = ">=1.1.0"
[[package]]
name = "alabaster"
-version = "0.7.13"
-description = "A configurable sidebar-enabled Sphinx theme"
+version = "0.7.16"
+description = "A light, configurable Sphinx theme"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.9"
files = [
- {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"},
- {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"},
+ {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"},
+ {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"},
]
[[package]]
@@ -78,43 +78,34 @@ files = [
six = ">=1.6.1,<2.0"
wheel = ">=0.23.0,<1.0"
-[[package]]
-name = "atomicwrites"
-version = "1.4.1"
-description = "Atomic file writes."
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-files = [
- {file = "atomicwrites-1.4.1.tar.gz", hash = "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"},
-]
-
[[package]]
name = "attrs"
-version = "23.1.0"
+version = "23.2.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.7"
files = [
- {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
- {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
+ {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"},
+ {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"},
]
[package.extras]
cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
-dev = ["attrs[docs,tests]", "pre-commit"]
+dev = ["attrs[tests]", "pre-commit"]
docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
tests = ["attrs[tests-no-zope]", "zope-interface"]
-tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"]
+tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"]
[[package]]
name = "babel"
-version = "2.13.1"
+version = "2.14.0"
description = "Internationalization utilities"
optional = false
python-versions = ">=3.7"
files = [
- {file = "Babel-2.13.1-py3-none-any.whl", hash = "sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed"},
- {file = "Babel-2.13.1.tar.gz", hash = "sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900"},
+ {file = "Babel-2.14.0-py3-none-any.whl", hash = "sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287"},
+ {file = "Babel-2.14.0.tar.gz", hash = "sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363"},
]
[package.extras]
@@ -122,29 +113,33 @@ dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
[[package]]
name = "black"
-version = "23.10.1"
+version = "24.1.1"
description = "The uncompromising code formatter."
optional = false
python-versions = ">=3.8"
files = [
- {file = "black-23.10.1-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:ec3f8e6234c4e46ff9e16d9ae96f4ef69fa328bb4ad08198c8cee45bb1f08c69"},
- {file = "black-23.10.1-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:1b917a2aa020ca600483a7b340c165970b26e9029067f019e3755b56e8dd5916"},
- {file = "black-23.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c74de4c77b849e6359c6f01987e94873c707098322b91490d24296f66d067dc"},
- {file = "black-23.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:7b4d10b0f016616a0d93d24a448100adf1699712fb7a4efd0e2c32bbb219b173"},
- {file = "black-23.10.1-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:b15b75fc53a2fbcac8a87d3e20f69874d161beef13954747e053bca7a1ce53a0"},
- {file = "black-23.10.1-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:e293e4c2f4a992b980032bbd62df07c1bcff82d6964d6c9496f2cd726e246ace"},
- {file = "black-23.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d56124b7a61d092cb52cce34182a5280e160e6aff3137172a68c2c2c4b76bcb"},
- {file = "black-23.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:3f157a8945a7b2d424da3335f7ace89c14a3b0625e6593d21139c2d8214d55ce"},
- {file = "black-23.10.1-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:cfcce6f0a384d0da692119f2d72d79ed07c7159879d0bb1bb32d2e443382bf3a"},
- {file = "black-23.10.1-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:33d40f5b06be80c1bbce17b173cda17994fbad096ce60eb22054da021bf933d1"},
- {file = "black-23.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:840015166dbdfbc47992871325799fd2dc0dcf9395e401ada6d88fe11498abad"},
- {file = "black-23.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:037e9b4664cafda5f025a1728c50a9e9aedb99a759c89f760bd83730e76ba884"},
- {file = "black-23.10.1-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:7cb5936e686e782fddb1c73f8aa6f459e1ad38a6a7b0e54b403f1f05a1507ee9"},
- {file = "black-23.10.1-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:7670242e90dc129c539e9ca17665e39a146a761e681805c54fbd86015c7c84f7"},
- {file = "black-23.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed45ac9a613fb52dad3b61c8dea2ec9510bf3108d4db88422bacc7d1ba1243d"},
- {file = "black-23.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:6d23d7822140e3fef190734216cefb262521789367fbdc0b3f22af6744058982"},
- {file = "black-23.10.1-py3-none-any.whl", hash = "sha256:d431e6739f727bb2e0495df64a6c7a5310758e87505f5f8cde9ff6c0f2d7e4fe"},
- {file = "black-23.10.1.tar.gz", hash = "sha256:1f8ce316753428ff68749c65a5f7844631aa18c8679dfd3ca9dc1a289979c258"},
+ {file = "black-24.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2588021038bd5ada078de606f2a804cadd0a3cc6a79cb3e9bb3a8bf581325a4c"},
+ {file = "black-24.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a95915c98d6e32ca43809d46d932e2abc5f1f7d582ffbe65a5b4d1588af7445"},
+ {file = "black-24.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fa6a0e965779c8f2afb286f9ef798df770ba2b6cee063c650b96adec22c056a"},
+ {file = "black-24.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:5242ecd9e990aeb995b6d03dc3b2d112d4a78f2083e5a8e86d566340ae80fec4"},
+ {file = "black-24.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fc1ec9aa6f4d98d022101e015261c056ddebe3da6a8ccfc2c792cbe0349d48b7"},
+ {file = "black-24.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0269dfdea12442022e88043d2910429bed717b2d04523867a85dacce535916b8"},
+ {file = "black-24.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3d64db762eae4a5ce04b6e3dd745dcca0fb9560eb931a5be97472e38652a161"},
+ {file = "black-24.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:5d7b06ea8816cbd4becfe5f70accae953c53c0e53aa98730ceccb0395520ee5d"},
+ {file = "black-24.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e2c8dfa14677f90d976f68e0c923947ae68fa3961d61ee30976c388adc0b02c8"},
+ {file = "black-24.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a21725862d0e855ae05da1dd25e3825ed712eaaccef6b03017fe0853a01aa45e"},
+ {file = "black-24.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07204d078e25327aad9ed2c64790d681238686bce254c910de640c7cc4fc3aa6"},
+ {file = "black-24.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:a83fe522d9698d8f9a101b860b1ee154c1d25f8a82ceb807d319f085b2627c5b"},
+ {file = "black-24.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:08b34e85170d368c37ca7bf81cf67ac863c9d1963b2c1780c39102187ec8dd62"},
+ {file = "black-24.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7258c27115c1e3b5de9ac6c4f9957e3ee2c02c0b39222a24dc7aa03ba0e986f5"},
+ {file = "black-24.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40657e1b78212d582a0edecafef133cf1dd02e6677f539b669db4746150d38f6"},
+ {file = "black-24.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:e298d588744efda02379521a19639ebcd314fba7a49be22136204d7ed1782717"},
+ {file = "black-24.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:34afe9da5056aa123b8bfda1664bfe6fb4e9c6f311d8e4a6eb089da9a9173bf9"},
+ {file = "black-24.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:854c06fb86fd854140f37fb24dbf10621f5dab9e3b0c29a690ba595e3d543024"},
+ {file = "black-24.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3897ae5a21ca132efa219c029cce5e6bfc9c3d34ed7e892113d199c0b1b444a2"},
+ {file = "black-24.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:ecba2a15dfb2d97105be74bbfe5128bc5e9fa8477d8c46766505c1dda5883aac"},
+ {file = "black-24.1.1-py3-none-any.whl", hash = "sha256:5cdc2e2195212208fbcae579b931407c1fa9997584f0a415421748aeafff1168"},
+ {file = "black-24.1.1.tar.gz", hash = "sha256:48b5760dcbfe5cf97fd4fba23946681f3a81514c6ab8a45b50da67ac8fbc6c7b"},
]
[package.dependencies]
@@ -158,7 +153,7 @@ typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""}
[package.extras]
colorama = ["colorama (>=0.4.3)"]
-d = ["aiohttp (>=3.7.4)"]
+d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"]
jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
uvloop = ["uvloop (>=0.15.2)"]
@@ -175,13 +170,13 @@ files = [
[[package]]
name = "certifi"
-version = "2023.7.22"
+version = "2024.2.2"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
- {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
- {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
+ {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"},
+ {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"},
]
[[package]]
@@ -335,134 +330,126 @@ test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"]
[[package]]
name = "contourpy"
-version = "1.1.1"
+version = "1.2.0"
description = "Python library for calculating contours of 2D quadrilateral grids"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "contourpy-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:46e24f5412c948d81736509377e255f6040e94216bf1a9b5ea1eaa9d29f6ec1b"},
- {file = "contourpy-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e48694d6a9c5a26ee85b10130c77a011a4fedf50a7279fa0bdaf44bafb4299d"},
- {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a66045af6cf00e19d02191ab578a50cb93b2028c3eefed999793698e9ea768ae"},
- {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ebf42695f75ee1a952f98ce9775c873e4971732a87334b099dde90b6af6a916"},
- {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6aec19457617ef468ff091669cca01fa7ea557b12b59a7908b9474bb9674cf0"},
- {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:462c59914dc6d81e0b11f37e560b8a7c2dbab6aca4f38be31519d442d6cde1a1"},
- {file = "contourpy-1.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6d0a8efc258659edc5299f9ef32d8d81de8b53b45d67bf4bfa3067f31366764d"},
- {file = "contourpy-1.1.1-cp310-cp310-win32.whl", hash = "sha256:d6ab42f223e58b7dac1bb0af32194a7b9311065583cc75ff59dcf301afd8a431"},
- {file = "contourpy-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:549174b0713d49871c6dee90a4b499d3f12f5e5f69641cd23c50a4542e2ca1eb"},
- {file = "contourpy-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:407d864db716a067cc696d61fa1ef6637fedf03606e8417fe2aeed20a061e6b2"},
- {file = "contourpy-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe80c017973e6a4c367e037cb31601044dd55e6bfacd57370674867d15a899b"},
- {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e30aaf2b8a2bac57eb7e1650df1b3a4130e8d0c66fc2f861039d507a11760e1b"},
- {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3de23ca4f381c3770dee6d10ead6fff524d540c0f662e763ad1530bde5112532"},
- {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:566f0e41df06dfef2431defcfaa155f0acfa1ca4acbf8fd80895b1e7e2ada40e"},
- {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04c2f0adaf255bf756cf08ebef1be132d3c7a06fe6f9877d55640c5e60c72c5"},
- {file = "contourpy-1.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d0c188ae66b772d9d61d43c6030500344c13e3f73a00d1dc241da896f379bb62"},
- {file = "contourpy-1.1.1-cp311-cp311-win32.whl", hash = "sha256:0683e1ae20dc038075d92e0e0148f09ffcefab120e57f6b4c9c0f477ec171f33"},
- {file = "contourpy-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:8636cd2fc5da0fb102a2504fa2c4bea3cbc149533b345d72cdf0e7a924decc45"},
- {file = "contourpy-1.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:560f1d68a33e89c62da5da4077ba98137a5e4d3a271b29f2f195d0fba2adcb6a"},
- {file = "contourpy-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:24216552104ae8f3b34120ef84825400b16eb6133af2e27a190fdc13529f023e"},
- {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56de98a2fb23025882a18b60c7f0ea2d2d70bbbcfcf878f9067234b1c4818442"},
- {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:07d6f11dfaf80a84c97f1a5ba50d129d9303c5b4206f776e94037332e298dda8"},
- {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1eaac5257a8f8a047248d60e8f9315c6cff58f7803971170d952555ef6344a7"},
- {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19557fa407e70f20bfaba7d55b4d97b14f9480856c4fb65812e8a05fe1c6f9bf"},
- {file = "contourpy-1.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:081f3c0880712e40effc5f4c3b08feca6d064cb8cfbb372ca548105b86fd6c3d"},
- {file = "contourpy-1.1.1-cp312-cp312-win32.whl", hash = "sha256:059c3d2a94b930f4dafe8105bcdc1b21de99b30b51b5bce74c753686de858cb6"},
- {file = "contourpy-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:f44d78b61740e4e8c71db1cf1fd56d9050a4747681c59ec1094750a658ceb970"},
- {file = "contourpy-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:70e5a10f8093d228bb2b552beeb318b8928b8a94763ef03b858ef3612b29395d"},
- {file = "contourpy-1.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8394e652925a18ef0091115e3cc191fef350ab6dc3cc417f06da66bf98071ae9"},
- {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bd5680f844c3ff0008523a71949a3ff5e4953eb7701b28760805bc9bcff217"},
- {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66544f853bfa85c0d07a68f6c648b2ec81dafd30f272565c37ab47a33b220684"},
- {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0c02b75acfea5cab07585d25069207e478d12309557f90a61b5a3b4f77f46ce"},
- {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41339b24471c58dc1499e56783fedc1afa4bb018bcd035cfb0ee2ad2a7501ef8"},
- {file = "contourpy-1.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f29fb0b3f1217dfe9362ec55440d0743fe868497359f2cf93293f4b2701b8251"},
- {file = "contourpy-1.1.1-cp38-cp38-win32.whl", hash = "sha256:f9dc7f933975367251c1b34da882c4f0e0b2e24bb35dc906d2f598a40b72bfc7"},
- {file = "contourpy-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:498e53573e8b94b1caeb9e62d7c2d053c263ebb6aa259c81050766beb50ff8d9"},
- {file = "contourpy-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ba42e3810999a0ddd0439e6e5dbf6d034055cdc72b7c5c839f37a7c274cb4eba"},
- {file = "contourpy-1.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c06e4c6e234fcc65435223c7b2a90f286b7f1b2733058bdf1345d218cc59e34"},
- {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca6fab080484e419528e98624fb5c4282148b847e3602dc8dbe0cb0669469887"},
- {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93df44ab351119d14cd1e6b52a5063d3336f0754b72736cc63db59307dabb718"},
- {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eafbef886566dc1047d7b3d4b14db0d5b7deb99638d8e1be4e23a7c7ac59ff0f"},
- {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efe0fab26d598e1ec07d72cf03eaeeba8e42b4ecf6b9ccb5a356fde60ff08b85"},
- {file = "contourpy-1.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f08e469821a5e4751c97fcd34bcb586bc243c39c2e39321822060ba902eac49e"},
- {file = "contourpy-1.1.1-cp39-cp39-win32.whl", hash = "sha256:bfc8a5e9238232a45ebc5cb3bfee71f1167064c8d382cadd6076f0d51cff1da0"},
- {file = "contourpy-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:c84fdf3da00c2827d634de4fcf17e3e067490c4aea82833625c4c8e6cdea0887"},
- {file = "contourpy-1.1.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:229a25f68046c5cf8067d6d6351c8b99e40da11b04d8416bf8d2b1d75922521e"},
- {file = "contourpy-1.1.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10dab5ea1bd4401c9483450b5b0ba5416be799bbd50fc7a6cc5e2a15e03e8a3"},
- {file = "contourpy-1.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4f9147051cb8fdb29a51dc2482d792b3b23e50f8f57e3720ca2e3d438b7adf23"},
- {file = "contourpy-1.1.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a75cc163a5f4531a256f2c523bd80db509a49fc23721b36dd1ef2f60ff41c3cb"},
- {file = "contourpy-1.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b53d5769aa1f2d4ea407c65f2d1d08002952fac1d9e9d307aa2e1023554a163"},
- {file = "contourpy-1.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11b836b7dbfb74e049c302bbf74b4b8f6cb9d0b6ca1bf86cfa8ba144aedadd9c"},
- {file = "contourpy-1.1.1.tar.gz", hash = "sha256:96ba37c2e24b7212a77da85004c38e7c4d155d3e72a45eeaf22c1f03f607e8ab"},
-]
-
-[package.dependencies]
-numpy = {version = ">=1.16,<2.0", markers = "python_version <= \"3.11\""}
+ {file = "contourpy-1.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0274c1cb63625972c0c007ab14dd9ba9e199c36ae1a231ce45d725cbcbfd10a8"},
+ {file = "contourpy-1.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ab459a1cbbf18e8698399c595a01f6dcc5c138220ca3ea9e7e6126232d102bb4"},
+ {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fdd887f17c2f4572ce548461e4f96396681212d858cae7bd52ba3310bc6f00f"},
+ {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d16edfc3fc09968e09ddffada434b3bf989bf4911535e04eada58469873e28e"},
+ {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c203f617abc0dde5792beb586f827021069fb6d403d7f4d5c2b543d87edceb9"},
+ {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b69303ceb2e4d4f146bf82fda78891ef7bcd80c41bf16bfca3d0d7eb545448aa"},
+ {file = "contourpy-1.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:884c3f9d42d7218304bc74a8a7693d172685c84bd7ab2bab1ee567b769696df9"},
+ {file = "contourpy-1.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4a1b1208102be6e851f20066bf0e7a96b7d48a07c9b0cfe6d0d4545c2f6cadab"},
+ {file = "contourpy-1.2.0-cp310-cp310-win32.whl", hash = "sha256:34b9071c040d6fe45d9826cbbe3727d20d83f1b6110d219b83eb0e2a01d79488"},
+ {file = "contourpy-1.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:bd2f1ae63998da104f16a8b788f685e55d65760cd1929518fd94cd682bf03e41"},
+ {file = "contourpy-1.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dd10c26b4eadae44783c45ad6655220426f971c61d9b239e6f7b16d5cdaaa727"},
+ {file = "contourpy-1.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5c6b28956b7b232ae801406e529ad7b350d3f09a4fde958dfdf3c0520cdde0dd"},
+ {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebeac59e9e1eb4b84940d076d9f9a6cec0064e241818bcb6e32124cc5c3e377a"},
+ {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:139d8d2e1c1dd52d78682f505e980f592ba53c9f73bd6be102233e358b401063"},
+ {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e9dc350fb4c58adc64df3e0703ab076f60aac06e67d48b3848c23647ae4310e"},
+ {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18fc2b4ed8e4a8fe849d18dce4bd3c7ea637758c6343a1f2bae1e9bd4c9f4686"},
+ {file = "contourpy-1.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:16a7380e943a6d52472096cb7ad5264ecee36ed60888e2a3d3814991a0107286"},
+ {file = "contourpy-1.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8d8faf05be5ec8e02a4d86f616fc2a0322ff4a4ce26c0f09d9f7fb5330a35c95"},
+ {file = "contourpy-1.2.0-cp311-cp311-win32.whl", hash = "sha256:67b7f17679fa62ec82b7e3e611c43a016b887bd64fb933b3ae8638583006c6d6"},
+ {file = "contourpy-1.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:99ad97258985328b4f207a5e777c1b44a83bfe7cf1f87b99f9c11d4ee477c4de"},
+ {file = "contourpy-1.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:575bcaf957a25d1194903a10bc9f316c136c19f24e0985a2b9b5608bdf5dbfe0"},
+ {file = "contourpy-1.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9e6c93b5b2dbcedad20a2f18ec22cae47da0d705d454308063421a3b290d9ea4"},
+ {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:464b423bc2a009088f19bdf1f232299e8b6917963e2b7e1d277da5041f33a779"},
+ {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:68ce4788b7d93e47f84edd3f1f95acdcd142ae60bc0e5493bfd120683d2d4316"},
+ {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d7d1f8871998cdff5d2ff6a087e5e1780139abe2838e85b0b46b7ae6cc25399"},
+ {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e739530c662a8d6d42c37c2ed52a6f0932c2d4a3e8c1f90692ad0ce1274abe0"},
+ {file = "contourpy-1.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:247b9d16535acaa766d03037d8e8fb20866d054d3c7fbf6fd1f993f11fc60ca0"},
+ {file = "contourpy-1.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:461e3ae84cd90b30f8d533f07d87c00379644205b1d33a5ea03381edc4b69431"},
+ {file = "contourpy-1.2.0-cp312-cp312-win32.whl", hash = "sha256:1c2559d6cffc94890b0529ea7eeecc20d6fadc1539273aa27faf503eb4656d8f"},
+ {file = "contourpy-1.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:491b1917afdd8638a05b611a56d46587d5a632cabead889a5440f7c638bc6ed9"},
+ {file = "contourpy-1.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5fd1810973a375ca0e097dee059c407913ba35723b111df75671a1976efa04bc"},
+ {file = "contourpy-1.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:999c71939aad2780f003979b25ac5b8f2df651dac7b38fb8ce6c46ba5abe6ae9"},
+ {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7caf9b241464c404613512d5594a6e2ff0cc9cb5615c9475cc1d9b514218ae8"},
+ {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:266270c6f6608340f6c9836a0fb9b367be61dde0c9a9a18d5ece97774105ff3e"},
+ {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbd50d0a0539ae2e96e537553aff6d02c10ed165ef40c65b0e27e744a0f10af8"},
+ {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11f8d2554e52f459918f7b8e6aa20ec2a3bce35ce95c1f0ef4ba36fbda306df5"},
+ {file = "contourpy-1.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ce96dd400486e80ac7d195b2d800b03e3e6a787e2a522bfb83755938465a819e"},
+ {file = "contourpy-1.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6d3364b999c62f539cd403f8123ae426da946e142312a514162adb2addd8d808"},
+ {file = "contourpy-1.2.0-cp39-cp39-win32.whl", hash = "sha256:1c88dfb9e0c77612febebb6ac69d44a8d81e3dc60f993215425b62c1161353f4"},
+ {file = "contourpy-1.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:78e6ad33cf2e2e80c5dfaaa0beec3d61face0fb650557100ee36db808bfa6843"},
+ {file = "contourpy-1.2.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:be16975d94c320432657ad2402f6760990cb640c161ae6da1363051805fa8108"},
+ {file = "contourpy-1.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b95a225d4948b26a28c08307a60ac00fb8671b14f2047fc5476613252a129776"},
+ {file = "contourpy-1.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0d7e03c0f9a4f90dc18d4e77e9ef4ec7b7bbb437f7f675be8e530d65ae6ef956"},
+ {file = "contourpy-1.2.0.tar.gz", hash = "sha256:171f311cb758de7da13fc53af221ae47a5877be5a0843a9fe150818c51ed276a"},
+]
+
+[package.dependencies]
+numpy = ">=1.20,<2.0"
[package.extras]
bokeh = ["bokeh", "selenium"]
docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"]
-mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.4.1)", "types-Pillow"]
+mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.6.1)", "types-Pillow"]
test = ["Pillow", "contourpy[test-no-images]", "matplotlib"]
-test-no-images = ["pytest", "pytest-cov", "wurlitzer"]
+test-no-images = ["pytest", "pytest-cov", "pytest-xdist", "wurlitzer"]
[[package]]
name = "coverage"
-version = "7.3.2"
+version = "7.4.1"
description = "Code coverage measurement for Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "coverage-7.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d872145f3a3231a5f20fd48500274d7df222e291d90baa2026cc5152b7ce86bf"},
- {file = "coverage-7.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:310b3bb9c91ea66d59c53fa4989f57d2436e08f18fb2f421a1b0b6b8cc7fffda"},
- {file = "coverage-7.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47d39359e2c3779c5331fc740cf4bce6d9d680a7b4b4ead97056a0ae07cb49a"},
- {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa72dbaf2c2068404b9870d93436e6d23addd8bbe9295f49cbca83f6e278179c"},
- {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:beaa5c1b4777f03fc63dfd2a6bd820f73f036bfb10e925fce067b00a340d0f3f"},
- {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dbc1b46b92186cc8074fee9d9fbb97a9dd06c6cbbef391c2f59d80eabdf0faa6"},
- {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:315a989e861031334d7bee1f9113c8770472db2ac484e5b8c3173428360a9148"},
- {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d1bc430677773397f64a5c88cb522ea43175ff16f8bfcc89d467d974cb2274f9"},
- {file = "coverage-7.3.2-cp310-cp310-win32.whl", hash = "sha256:a889ae02f43aa45032afe364c8ae84ad3c54828c2faa44f3bfcafecb5c96b02f"},
- {file = "coverage-7.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c0ba320de3fb8c6ec16e0be17ee1d3d69adcda99406c43c0409cb5c41788a611"},
- {file = "coverage-7.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ac8c802fa29843a72d32ec56d0ca792ad15a302b28ca6203389afe21f8fa062c"},
- {file = "coverage-7.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:89a937174104339e3a3ffcf9f446c00e3a806c28b1841c63edb2b369310fd074"},
- {file = "coverage-7.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e267e9e2b574a176ddb983399dec325a80dbe161f1a32715c780b5d14b5f583a"},
- {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2443cbda35df0d35dcfb9bf8f3c02c57c1d6111169e3c85fc1fcc05e0c9f39a3"},
- {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4175e10cc8dda0265653e8714b3174430b07c1dca8957f4966cbd6c2b1b8065a"},
- {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf38419fb1a347aaf63481c00f0bdc86889d9fbf3f25109cf96c26b403fda1"},
- {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5c913b556a116b8d5f6ef834038ba983834d887d82187c8f73dec21049abd65c"},
- {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1981f785239e4e39e6444c63a98da3a1db8e971cb9ceb50a945ba6296b43f312"},
- {file = "coverage-7.3.2-cp311-cp311-win32.whl", hash = "sha256:43668cabd5ca8258f5954f27a3aaf78757e6acf13c17604d89648ecc0cc66640"},
- {file = "coverage-7.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10c39c0452bf6e694511c901426d6b5ac005acc0f78ff265dbe36bf81f808a2"},
- {file = "coverage-7.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4cbae1051ab791debecc4a5dcc4a1ff45fc27b91b9aee165c8a27514dd160836"},
- {file = "coverage-7.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12d15ab5833a997716d76f2ac1e4b4d536814fc213c85ca72756c19e5a6b3d63"},
- {file = "coverage-7.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c7bba973ebee5e56fe9251300c00f1579652587a9f4a5ed8404b15a0471f216"},
- {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe494faa90ce6381770746077243231e0b83ff3f17069d748f645617cefe19d4"},
- {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6e9589bd04d0461a417562649522575d8752904d35c12907d8c9dfeba588faf"},
- {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d51ac2a26f71da1b57f2dc81d0e108b6ab177e7d30e774db90675467c847bbdf"},
- {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:99b89d9f76070237975b315b3d5f4d6956ae354a4c92ac2388a5695516e47c84"},
- {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fa28e909776dc69efb6ed975a63691bc8172b64ff357e663a1bb06ff3c9b589a"},
- {file = "coverage-7.3.2-cp312-cp312-win32.whl", hash = "sha256:289fe43bf45a575e3ab10b26d7b6f2ddb9ee2dba447499f5401cfb5ecb8196bb"},
- {file = "coverage-7.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7dbc3ed60e8659bc59b6b304b43ff9c3ed858da2839c78b804973f613d3e92ed"},
- {file = "coverage-7.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f94b734214ea6a36fe16e96a70d941af80ff3bfd716c141300d95ebc85339738"},
- {file = "coverage-7.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af3d828d2c1cbae52d34bdbb22fcd94d1ce715d95f1a012354a75e5913f1bda2"},
- {file = "coverage-7.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630b13e3036e13c7adc480ca42fa7afc2a5d938081d28e20903cf7fd687872e2"},
- {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9eacf273e885b02a0273bb3a2170f30e2d53a6d53b72dbe02d6701b5296101c"},
- {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f17966e861ff97305e0801134e69db33b143bbfb36436efb9cfff6ec7b2fd9"},
- {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b4275802d16882cf9c8b3d057a0839acb07ee9379fa2749eca54efbce1535b82"},
- {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:72c0cfa5250f483181e677ebc97133ea1ab3eb68645e494775deb6a7f6f83901"},
- {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cb536f0dcd14149425996821a168f6e269d7dcd2c273a8bff8201e79f5104e76"},
- {file = "coverage-7.3.2-cp38-cp38-win32.whl", hash = "sha256:307adb8bd3abe389a471e649038a71b4eb13bfd6b7dd9a129fa856f5c695cf92"},
- {file = "coverage-7.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:88ed2c30a49ea81ea3b7f172e0269c182a44c236eb394718f976239892c0a27a"},
- {file = "coverage-7.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b631c92dfe601adf8f5ebc7fc13ced6bb6e9609b19d9a8cd59fa47c4186ad1ce"},
- {file = "coverage-7.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d3d9df4051c4a7d13036524b66ecf7a7537d14c18a384043f30a303b146164e9"},
- {file = "coverage-7.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7363d3b6a1119ef05015959ca24a9afc0ea8a02c687fe7e2d557705375c01f"},
- {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f11cc3c967a09d3695d2a6f03fb3e6236622b93be7a4b5dc09166a861be6d25"},
- {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:149de1d2401ae4655c436a3dced6dd153f4c3309f599c3d4bd97ab172eaf02d9"},
- {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3a4006916aa6fee7cd38db3bfc95aa9c54ebb4ffbfc47c677c8bba949ceba0a6"},
- {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9028a3871280110d6e1aa2df1afd5ef003bab5fb1ef421d6dc748ae1c8ef2ebc"},
- {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f805d62aec8eb92bab5b61c0f07329275b6f41c97d80e847b03eb894f38d083"},
- {file = "coverage-7.3.2-cp39-cp39-win32.whl", hash = "sha256:d1c88ec1a7ff4ebca0219f5b1ef863451d828cccf889c173e1253aa84b1e07ce"},
- {file = "coverage-7.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b4767da59464bb593c07afceaddea61b154136300881844768037fd5e859353f"},
- {file = "coverage-7.3.2-pp38.pp39.pp310-none-any.whl", hash = "sha256:ae97af89f0fbf373400970c0a21eef5aa941ffeed90aee43650b81f7d7f47637"},
- {file = "coverage-7.3.2.tar.gz", hash = "sha256:be32ad29341b0170e795ca590e1c07e81fc061cb5b10c74ce7203491484404ef"},
+ {file = "coverage-7.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:077d366e724f24fc02dbfe9d946534357fda71af9764ff99d73c3c596001bbd7"},
+ {file = "coverage-7.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0193657651f5399d433c92f8ae264aff31fc1d066deee4b831549526433f3f61"},
+ {file = "coverage-7.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d17bbc946f52ca67adf72a5ee783cd7cd3477f8f8796f59b4974a9b59cacc9ee"},
+ {file = "coverage-7.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3277f5fa7483c927fe3a7b017b39351610265308f5267ac6d4c2b64cc1d8d25"},
+ {file = "coverage-7.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dceb61d40cbfcf45f51e59933c784a50846dc03211054bd76b421a713dcdf19"},
+ {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6008adeca04a445ea6ef31b2cbaf1d01d02986047606f7da266629afee982630"},
+ {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c61f66d93d712f6e03369b6a7769233bfda880b12f417eefdd4f16d1deb2fc4c"},
+ {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9bb62fac84d5f2ff523304e59e5c439955fb3b7f44e3d7b2085184db74d733b"},
+ {file = "coverage-7.4.1-cp310-cp310-win32.whl", hash = "sha256:f86f368e1c7ce897bf2457b9eb61169a44e2ef797099fb5728482b8d69f3f016"},
+ {file = "coverage-7.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:869b5046d41abfea3e381dd143407b0d29b8282a904a19cb908fa24d090cc018"},
+ {file = "coverage-7.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b8ffb498a83d7e0305968289441914154fb0ef5d8b3157df02a90c6695978295"},
+ {file = "coverage-7.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3cacfaefe6089d477264001f90f55b7881ba615953414999c46cc9713ff93c8c"},
+ {file = "coverage-7.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d6850e6e36e332d5511a48a251790ddc545e16e8beaf046c03985c69ccb2676"},
+ {file = "coverage-7.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18e961aa13b6d47f758cc5879383d27b5b3f3dcd9ce8cdbfdc2571fe86feb4dd"},
+ {file = "coverage-7.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfd1e1b9f0898817babf840b77ce9fe655ecbe8b1b327983df485b30df8cc011"},
+ {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6b00e21f86598b6330f0019b40fb397e705135040dbedc2ca9a93c7441178e74"},
+ {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:536d609c6963c50055bab766d9951b6c394759190d03311f3e9fcf194ca909e1"},
+ {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7ac8f8eb153724f84885a1374999b7e45734bf93a87d8df1e7ce2146860edef6"},
+ {file = "coverage-7.4.1-cp311-cp311-win32.whl", hash = "sha256:f3771b23bb3675a06f5d885c3630b1d01ea6cac9e84a01aaf5508706dba546c5"},
+ {file = "coverage-7.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:9d2f9d4cc2a53b38cabc2d6d80f7f9b7e3da26b2f53d48f05876fef7956b6968"},
+ {file = "coverage-7.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f68ef3660677e6624c8cace943e4765545f8191313a07288a53d3da188bd8581"},
+ {file = "coverage-7.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:23b27b8a698e749b61809fb637eb98ebf0e505710ec46a8aa6f1be7dc0dc43a6"},
+ {file = "coverage-7.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e3424c554391dc9ef4a92ad28665756566a28fecf47308f91841f6c49288e66"},
+ {file = "coverage-7.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0860a348bf7004c812c8368d1fc7f77fe8e4c095d661a579196a9533778e156"},
+ {file = "coverage-7.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe558371c1bdf3b8fa03e097c523fb9645b8730399c14fe7721ee9c9e2a545d3"},
+ {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3468cc8720402af37b6c6e7e2a9cdb9f6c16c728638a2ebc768ba1ef6f26c3a1"},
+ {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:02f2edb575d62172aa28fe00efe821ae31f25dc3d589055b3fb64d51e52e4ab1"},
+ {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ca6e61dc52f601d1d224526360cdeab0d0712ec104a2ce6cc5ccef6ed9a233bc"},
+ {file = "coverage-7.4.1-cp312-cp312-win32.whl", hash = "sha256:ca7b26a5e456a843b9b6683eada193fc1f65c761b3a473941efe5a291f604c74"},
+ {file = "coverage-7.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:85ccc5fa54c2ed64bd91ed3b4a627b9cce04646a659512a051fa82a92c04a448"},
+ {file = "coverage-7.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8bdb0285a0202888d19ec6b6d23d5990410decb932b709f2b0dfe216d031d218"},
+ {file = "coverage-7.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:918440dea04521f499721c039863ef95433314b1db00ff826a02580c1f503e45"},
+ {file = "coverage-7.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:379d4c7abad5afbe9d88cc31ea8ca262296480a86af945b08214eb1a556a3e4d"},
+ {file = "coverage-7.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b094116f0b6155e36a304ff912f89bbb5067157aff5f94060ff20bbabdc8da06"},
+ {file = "coverage-7.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2f5968608b1fe2a1d00d01ad1017ee27efd99b3437e08b83ded9b7af3f6f766"},
+ {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:10e88e7f41e6197ea0429ae18f21ff521d4f4490aa33048f6c6f94c6045a6a75"},
+ {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a4a3907011d39dbc3e37bdc5df0a8c93853c369039b59efa33a7b6669de04c60"},
+ {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6d224f0c4c9c98290a6990259073f496fcec1b5cc613eecbd22786d398ded3ad"},
+ {file = "coverage-7.4.1-cp38-cp38-win32.whl", hash = "sha256:23f5881362dcb0e1a92b84b3c2809bdc90db892332daab81ad8f642d8ed55042"},
+ {file = "coverage-7.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:a07f61fc452c43cd5328b392e52555f7d1952400a1ad09086c4a8addccbd138d"},
+ {file = "coverage-7.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8e738a492b6221f8dcf281b67129510835461132b03024830ac0e554311a5c54"},
+ {file = "coverage-7.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46342fed0fff72efcda77040b14728049200cbba1279e0bf1188f1f2078c1d70"},
+ {file = "coverage-7.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9641e21670c68c7e57d2053ddf6c443e4f0a6e18e547e86af3fad0795414a628"},
+ {file = "coverage-7.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aeb2c2688ed93b027eb0d26aa188ada34acb22dceea256d76390eea135083950"},
+ {file = "coverage-7.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d12c923757de24e4e2110cf8832d83a886a4cf215c6e61ed506006872b43a6d1"},
+ {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0491275c3b9971cdbd28a4595c2cb5838f08036bca31765bad5e17edf900b2c7"},
+ {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8dfc5e195bbef80aabd81596ef52a1277ee7143fe419efc3c4d8ba2754671756"},
+ {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1a78b656a4d12b0490ca72651fe4d9f5e07e3c6461063a9b6265ee45eb2bdd35"},
+ {file = "coverage-7.4.1-cp39-cp39-win32.whl", hash = "sha256:f90515974b39f4dea2f27c0959688621b46d96d5a626cf9c53dbc653a895c05c"},
+ {file = "coverage-7.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:64e723ca82a84053dd7bfcc986bdb34af8d9da83c521c19d6b472bc6880e191a"},
+ {file = "coverage-7.4.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:32a8d985462e37cfdab611a6f95b09d7c091d07668fdc26e47a725ee575fe166"},
+ {file = "coverage-7.4.1.tar.gz", hash = "sha256:1ed4b95480952b1a26d863e546fa5094564aa0065e1e5f0d4d0041f293251d04"},
]
[package.dependencies]
@@ -488,17 +475,17 @@ tests = ["pytest", "pytest-cov", "pytest-xdist"]
[[package]]
name = "dask"
-version = "2023.10.1"
+version = "2024.1.1"
description = "Parallel PyData with Task Scheduling"
optional = false
python-versions = ">=3.9"
files = [
- {file = "dask-2023.10.1-py3-none-any.whl", hash = "sha256:1fb0ee4d79e3c7c8f2e7c9f2680fd0ef0668801a10eaa290b970982b26a714da"},
- {file = "dask-2023.10.1.tar.gz", hash = "sha256:da3ef0526992845408df491fcd0b3a49c7207aa908a1675cea12ab2ea10c7940"},
+ {file = "dask-2024.1.1-py3-none-any.whl", hash = "sha256:860ce2797905095beff0187c214840b80c77d752dcb9098a8283e3655a762bf5"},
+ {file = "dask-2024.1.1.tar.gz", hash = "sha256:d0dc92e81ce68594a0a0ce23ba33f4d648f2c2f4217ab9b79068b7ecfb0416c7"},
]
[package.dependencies]
-click = ">=8.0"
+click = ">=8.1"
cloudpickle = ">=1.5.0"
fsspec = ">=2021.09.0"
importlib-metadata = ">=4.13.0"
@@ -509,10 +496,10 @@ toolz = ">=0.10.0"
[package.extras]
array = ["numpy (>=1.21)"]
-complete = ["dask[array,dataframe,diagnostics,distributed]", "lz4 (>=4.3.2)", "pyarrow (>=7.0)"]
+complete = ["dask[array,dataframe,diagnostics,distributed]", "lz4 (>=4.3.2)", "pyarrow (>=7.0)", "pyarrow-hotfix"]
dataframe = ["dask[array]", "pandas (>=1.3)"]
diagnostics = ["bokeh (>=2.4.2)", "jinja2 (>=2.10.3)"]
-distributed = ["distributed (==2023.10.1)"]
+distributed = ["distributed (==2024.1.1)"]
test = ["pandas[test]", "pre-commit", "pytest", "pytest-cov", "pytest-rerunfailures", "pytest-timeout", "pytest-xdist"]
[[package]]
@@ -585,6 +572,20 @@ files = [
{file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"},
]
+[[package]]
+name = "exceptiongroup"
+version = "1.2.0"
+description = "Backport of PEP 654 (exception groups)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"},
+ {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"},
+]
+
+[package.extras]
+test = ["pytest (>=6)"]
+
[[package]]
name = "filelock"
version = "3.13.1"
@@ -614,59 +615,59 @@ files = [
[[package]]
name = "fonttools"
-version = "4.43.1"
+version = "4.47.2"
description = "Tools to manipulate font files"
optional = false
python-versions = ">=3.8"
files = [
- {file = "fonttools-4.43.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bf11e2cca121df35e295bd34b309046c29476ee739753bc6bc9d5050de319273"},
- {file = "fonttools-4.43.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:10b3922875ffcba636674f406f9ab9a559564fdbaa253d66222019d569db869c"},
- {file = "fonttools-4.43.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f727c3e3d08fd25352ed76cc3cb61486f8ed3f46109edf39e5a60fc9fecf6ca"},
- {file = "fonttools-4.43.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad0b3f6342cfa14be996971ea2b28b125ad681c6277c4cd0fbdb50340220dfb6"},
- {file = "fonttools-4.43.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3b7ad05b2beeebafb86aa01982e9768d61c2232f16470f9d0d8e385798e37184"},
- {file = "fonttools-4.43.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4c54466f642d2116686268c3e5f35ebb10e49b0d48d41a847f0e171c785f7ac7"},
- {file = "fonttools-4.43.1-cp310-cp310-win32.whl", hash = "sha256:1e09da7e8519e336239fbd375156488a4c4945f11c4c5792ee086dd84f784d02"},
- {file = "fonttools-4.43.1-cp310-cp310-win_amd64.whl", hash = "sha256:1cf9e974f63b1080b1d2686180fc1fbfd3bfcfa3e1128695b5de337eb9075cef"},
- {file = "fonttools-4.43.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5db46659cfe4e321158de74c6f71617e65dc92e54980086823a207f1c1c0e24b"},
- {file = "fonttools-4.43.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1952c89a45caceedf2ab2506d9a95756e12b235c7182a7a0fff4f5e52227204f"},
- {file = "fonttools-4.43.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c36da88422e0270fbc7fd959dc9749d31a958506c1d000e16703c2fce43e3d0"},
- {file = "fonttools-4.43.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bbbf8174501285049e64d174e29f9578495e1b3b16c07c31910d55ad57683d8"},
- {file = "fonttools-4.43.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d4071bd1c183b8d0b368cc9ed3c07a0f6eb1bdfc4941c4c024c49a35429ac7cd"},
- {file = "fonttools-4.43.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d21099b411e2006d3c3e1f9aaf339e12037dbf7bf9337faf0e93ec915991f43b"},
- {file = "fonttools-4.43.1-cp311-cp311-win32.whl", hash = "sha256:b84a1c00f832feb9d0585ca8432fba104c819e42ff685fcce83537e2e7e91204"},
- {file = "fonttools-4.43.1-cp311-cp311-win_amd64.whl", hash = "sha256:9a2f0aa6ca7c9bc1058a9d0b35483d4216e0c1bbe3962bc62ce112749954c7b8"},
- {file = "fonttools-4.43.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4d9740e3783c748521e77d3c397dc0662062c88fd93600a3c2087d3d627cd5e5"},
- {file = "fonttools-4.43.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:884ef38a5a2fd47b0c1291647b15f4e88b9de5338ffa24ee52c77d52b4dfd09c"},
- {file = "fonttools-4.43.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9648518ef687ba818db3fcc5d9aae27a369253ac09a81ed25c3867e8657a0680"},
- {file = "fonttools-4.43.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95e974d70238fc2be5f444fa91f6347191d0e914d5d8ae002c9aa189572cc215"},
- {file = "fonttools-4.43.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:34f713dad41aa21c637b4e04fe507c36b986a40f7179dcc86402237e2d39dcd3"},
- {file = "fonttools-4.43.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:360201d46165fc0753229afe785900bc9596ee6974833124f4e5e9f98d0f592b"},
- {file = "fonttools-4.43.1-cp312-cp312-win32.whl", hash = "sha256:bb6d2f8ef81ea076877d76acfb6f9534a9c5f31dc94ba70ad001267ac3a8e56f"},
- {file = "fonttools-4.43.1-cp312-cp312-win_amd64.whl", hash = "sha256:25d3da8a01442cbc1106490eddb6d31d7dffb38c1edbfabbcc8db371b3386d72"},
- {file = "fonttools-4.43.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8da417431bfc9885a505e86ba706f03f598c85f5a9c54f67d63e84b9948ce590"},
- {file = "fonttools-4.43.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:51669b60ee2a4ad6c7fc17539a43ffffc8ef69fd5dbed186a38a79c0ac1f5db7"},
- {file = "fonttools-4.43.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748015d6f28f704e7d95cd3c808b483c5fb87fd3eefe172a9da54746ad56bfb6"},
- {file = "fonttools-4.43.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7a58eb5e736d7cf198eee94844b81c9573102ae5989ebcaa1d1a37acd04b33d"},
- {file = "fonttools-4.43.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6bb5ea9076e0e39defa2c325fc086593ae582088e91c0746bee7a5a197be3da0"},
- {file = "fonttools-4.43.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5f37e31291bf99a63328668bb83b0669f2688f329c4c0d80643acee6e63cd933"},
- {file = "fonttools-4.43.1-cp38-cp38-win32.whl", hash = "sha256:9c60ecfa62839f7184f741d0509b5c039d391c3aff71dc5bc57b87cc305cff3b"},
- {file = "fonttools-4.43.1-cp38-cp38-win_amd64.whl", hash = "sha256:fe9b1ec799b6086460a7480e0f55c447b1aca0a4eecc53e444f639e967348896"},
- {file = "fonttools-4.43.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13a9a185259ed144def3682f74fdcf6596f2294e56fe62dfd2be736674500dba"},
- {file = "fonttools-4.43.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2adca1b46d69dce4a37eecc096fe01a65d81a2f5c13b25ad54d5430ae430b13"},
- {file = "fonttools-4.43.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18eefac1b247049a3a44bcd6e8c8fd8b97f3cad6f728173b5d81dced12d6c477"},
- {file = "fonttools-4.43.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2062542a7565091cea4cc14dd99feff473268b5b8afdee564f7067dd9fff5860"},
- {file = "fonttools-4.43.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:18a2477c62a728f4d6e88c45ee9ee0229405e7267d7d79ce1f5ce0f3e9f8ab86"},
- {file = "fonttools-4.43.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a7a06f8d95b7496e53af80d974d63516ffb263a468e614978f3899a6df52d4b3"},
- {file = "fonttools-4.43.1-cp39-cp39-win32.whl", hash = "sha256:10003ebd81fec0192c889e63a9c8c63f88c7d72ae0460b7ba0cd2a1db246e5ad"},
- {file = "fonttools-4.43.1-cp39-cp39-win_amd64.whl", hash = "sha256:e117a92b07407a061cde48158c03587ab97e74e7d73cb65e6aadb17af191162a"},
- {file = "fonttools-4.43.1-py3-none-any.whl", hash = "sha256:4f88cae635bfe4bbbdc29d479a297bb525a94889184bb69fa9560c2d4834ddb9"},
- {file = "fonttools-4.43.1.tar.gz", hash = "sha256:17dbc2eeafb38d5d0e865dcce16e313c58265a6d2d20081c435f84dc5a9d8212"},
-]
-
-[package.extras]
-all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.0.0)", "xattr", "zopfli (>=0.1.4)"]
+ {file = "fonttools-4.47.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3b629108351d25512d4ea1a8393a2dba325b7b7d7308116b605ea3f8e1be88df"},
+ {file = "fonttools-4.47.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c19044256c44fe299d9a73456aabee4b4d06c6b930287be93b533b4737d70aa1"},
+ {file = "fonttools-4.47.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8be28c036b9f186e8c7eaf8a11b42373e7e4949f9e9f370202b9da4c4c3f56c"},
+ {file = "fonttools-4.47.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f83a4daef6d2a202acb9bf572958f91cfde5b10c8ee7fb1d09a4c81e5d851fd8"},
+ {file = "fonttools-4.47.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a5a5318ba5365d992666ac4fe35365f93004109d18858a3e18ae46f67907670"},
+ {file = "fonttools-4.47.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8f57ecd742545362a0f7186774b2d1c53423ed9ece67689c93a1055b236f638c"},
+ {file = "fonttools-4.47.2-cp310-cp310-win32.whl", hash = "sha256:a1c154bb85dc9a4cf145250c88d112d88eb414bad81d4cb524d06258dea1bdc0"},
+ {file = "fonttools-4.47.2-cp310-cp310-win_amd64.whl", hash = "sha256:3e2b95dce2ead58fb12524d0ca7d63a63459dd489e7e5838c3cd53557f8933e1"},
+ {file = "fonttools-4.47.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:29495d6d109cdbabe73cfb6f419ce67080c3ef9ea1e08d5750240fd4b0c4763b"},
+ {file = "fonttools-4.47.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0a1d313a415eaaba2b35d6cd33536560deeebd2ed758b9bfb89ab5d97dc5deac"},
+ {file = "fonttools-4.47.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90f898cdd67f52f18049250a6474185ef6544c91f27a7bee70d87d77a8daf89c"},
+ {file = "fonttools-4.47.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3480eeb52770ff75140fe7d9a2ec33fb67b07efea0ab5129c7e0c6a639c40c70"},
+ {file = "fonttools-4.47.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0255dbc128fee75fb9be364806b940ed450dd6838672a150d501ee86523ac61e"},
+ {file = "fonttools-4.47.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f791446ff297fd5f1e2247c188de53c1bfb9dd7f0549eba55b73a3c2087a2703"},
+ {file = "fonttools-4.47.2-cp311-cp311-win32.whl", hash = "sha256:740947906590a878a4bde7dd748e85fefa4d470a268b964748403b3ab2aeed6c"},
+ {file = "fonttools-4.47.2-cp311-cp311-win_amd64.whl", hash = "sha256:63fbed184979f09a65aa9c88b395ca539c94287ba3a364517698462e13e457c9"},
+ {file = "fonttools-4.47.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4ec558c543609e71b2275c4894e93493f65d2f41c15fe1d089080c1d0bb4d635"},
+ {file = "fonttools-4.47.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e040f905d542362e07e72e03612a6270c33d38281fd573160e1003e43718d68d"},
+ {file = "fonttools-4.47.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6dd58cc03016b281bd2c74c84cdaa6bd3ce54c5a7f47478b7657b930ac3ed8eb"},
+ {file = "fonttools-4.47.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32ab2e9702dff0dd4510c7bb958f265a8d3dd5c0e2547e7b5f7a3df4979abb07"},
+ {file = "fonttools-4.47.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a808f3c1d1df1f5bf39be869b6e0c263570cdafb5bdb2df66087733f566ea71"},
+ {file = "fonttools-4.47.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ac71e2e201df041a2891067dc36256755b1229ae167edbdc419b16da78732c2f"},
+ {file = "fonttools-4.47.2-cp312-cp312-win32.whl", hash = "sha256:69731e8bea0578b3c28fdb43dbf95b9386e2d49a399e9a4ad736b8e479b08085"},
+ {file = "fonttools-4.47.2-cp312-cp312-win_amd64.whl", hash = "sha256:b3e1304e5f19ca861d86a72218ecce68f391646d85c851742d265787f55457a4"},
+ {file = "fonttools-4.47.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:254d9a6f7be00212bf0c3159e0a420eb19c63793b2c05e049eb337f3023c5ecc"},
+ {file = "fonttools-4.47.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eabae77a07c41ae0b35184894202305c3ad211a93b2eb53837c2a1143c8bc952"},
+ {file = "fonttools-4.47.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86a5ab2873ed2575d0fcdf1828143cfc6b977ac448e3dc616bb1e3d20efbafa"},
+ {file = "fonttools-4.47.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13819db8445a0cec8c3ff5f243af6418ab19175072a9a92f6cc8ca7d1452754b"},
+ {file = "fonttools-4.47.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4e743935139aa485fe3253fc33fe467eab6ea42583fa681223ea3f1a93dd01e6"},
+ {file = "fonttools-4.47.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d49ce3ea7b7173faebc5664872243b40cf88814ca3eb135c4a3cdff66af71946"},
+ {file = "fonttools-4.47.2-cp38-cp38-win32.whl", hash = "sha256:94208ea750e3f96e267f394d5588579bb64cc628e321dbb1d4243ffbc291b18b"},
+ {file = "fonttools-4.47.2-cp38-cp38-win_amd64.whl", hash = "sha256:0f750037e02beb8b3569fbff701a572e62a685d2a0e840d75816592280e5feae"},
+ {file = "fonttools-4.47.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3d71606c9321f6701642bd4746f99b6089e53d7e9817fc6b964e90d9c5f0ecc6"},
+ {file = "fonttools-4.47.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:86e0427864c6c91cf77f16d1fb9bf1bbf7453e824589e8fb8461b6ee1144f506"},
+ {file = "fonttools-4.47.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a00bd0e68e88987dcc047ea31c26d40a3c61185153b03457956a87e39d43c37"},
+ {file = "fonttools-4.47.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5d77479fb885ef38a16a253a2f4096bc3d14e63a56d6246bfdb56365a12b20c"},
+ {file = "fonttools-4.47.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5465df494f20a7d01712b072ae3ee9ad2887004701b95cb2cc6dcb9c2c97a899"},
+ {file = "fonttools-4.47.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4c811d3c73b6abac275babb8aa439206288f56fdb2c6f8835e3d7b70de8937a7"},
+ {file = "fonttools-4.47.2-cp39-cp39-win32.whl", hash = "sha256:5b60e3afa9635e3dfd3ace2757039593e3bd3cf128be0ddb7a1ff4ac45fa5a50"},
+ {file = "fonttools-4.47.2-cp39-cp39-win_amd64.whl", hash = "sha256:7ee48bd9d6b7e8f66866c9090807e3a4a56cf43ffad48962725a190e0dd774c8"},
+ {file = "fonttools-4.47.2-py3-none-any.whl", hash = "sha256:7eb7ad665258fba68fd22228a09f347469d95a97fb88198e133595947a20a184"},
+ {file = "fonttools-4.47.2.tar.gz", hash = "sha256:7df26dd3650e98ca45f1e29883c96a0b9f5bb6af8d632a6a108bc744fa0bd9b3"},
+]
+
+[package.extras]
+all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"]
graphite = ["lz4 (>=1.7.4.2)"]
-interpolatable = ["munkres", "scipy"]
+interpolatable = ["munkres", "pycairo", "scipy"]
lxml = ["lxml (>=4.0,<5)"]
pathops = ["skia-pathops (>=0.5.0)"]
plot = ["matplotlib"]
@@ -674,88 +675,104 @@ repacker = ["uharfbuzz (>=0.23.0)"]
symfont = ["sympy"]
type1 = ["xattr"]
ufo = ["fs (>=2.2.0,<3)"]
-unicode = ["unicodedata2 (>=15.0.0)"]
+unicode = ["unicodedata2 (>=15.1.0)"]
woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"]
[[package]]
name = "frozenlist"
-version = "1.4.0"
+version = "1.4.1"
description = "A list-like structure which implements collections.abc.MutableSequence"
optional = true
python-versions = ">=3.8"
files = [
- {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"},
- {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"},
- {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"},
- {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"},
- {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"},
- {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"},
- {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"},
- {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"},
- {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"},
- {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"},
- {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"},
- {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"},
- {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"},
- {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"},
- {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"},
- {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"},
- {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"},
- {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"},
- {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"},
- {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"},
- {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"},
- {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"},
- {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"},
- {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"},
- {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"},
- {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"},
- {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"},
- {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"},
- {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"},
- {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"},
- {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"},
- {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"},
- {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"},
- {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"},
- {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"},
- {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"},
- {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"},
- {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"},
- {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"},
- {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"},
- {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"},
- {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"},
- {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"},
- {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"},
- {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"},
- {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"},
- {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"},
- {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"},
- {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"},
- {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"},
- {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"},
- {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"},
- {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"},
- {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"},
- {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"},
- {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"},
- {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"},
- {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"},
- {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"},
- {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"},
- {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"},
+ {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"},
+ {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"},
+ {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"},
+ {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"},
+ {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"},
+ {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"},
+ {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"},
+ {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"},
+ {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"},
+ {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"},
+ {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"},
+ {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"},
+ {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"},
+ {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"},
+ {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"},
+ {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"},
+ {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"},
+ {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"},
+ {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"},
+ {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"},
+ {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"},
+ {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"},
+ {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"},
+ {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"},
+ {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"},
+ {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"},
+ {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"},
]
[[package]]
name = "fsspec"
-version = "2023.10.0"
+version = "2023.12.2"
description = "File-system specification"
optional = false
python-versions = ">=3.8"
files = [
- {file = "fsspec-2023.10.0-py3-none-any.whl", hash = "sha256:346a8f024efeb749d2a5fca7ba8854474b1ff9af7c3faaf636a4548781136529"},
- {file = "fsspec-2023.10.0.tar.gz", hash = "sha256:330c66757591df346ad3091a53bd907e15348c2ba17d63fd54f5c39c4457d2a5"},
+ {file = "fsspec-2023.12.2-py3-none-any.whl", hash = "sha256:d800d87f72189a745fa3d6b033b9dc4a34ad069f60ca60b943a63599f5501960"},
+ {file = "fsspec-2023.12.2.tar.gz", hash = "sha256:8548d39e8810b59c38014934f6b31e57f40c1b20f911f4cc2b85389c7e9bf0cb"},
]
[package.extras]
@@ -795,13 +812,13 @@ files = [
[[package]]
name = "google-auth"
-version = "2.23.4"
+version = "2.27.0"
description = "Google Authentication Library"
optional = false
python-versions = ">=3.7"
files = [
- {file = "google-auth-2.23.4.tar.gz", hash = "sha256:79905d6b1652187def79d491d6e23d0cbb3a21d3c7ba0dbaa9c8a01906b13ff3"},
- {file = "google_auth-2.23.4-py2.py3-none-any.whl", hash = "sha256:d4bbc92fe4b8bfd2f3e8d88e5ba7085935da208ee38a134fc280e7ce682a05f2"},
+ {file = "google-auth-2.27.0.tar.gz", hash = "sha256:e863a56ccc2d8efa83df7a80272601e43487fa9a728a376205c86c26aaefa821"},
+ {file = "google_auth-2.27.0-py2.py3-none-any.whl", hash = "sha256:8e4bad367015430ff253fe49d500fdc3396c1a434db5740828c728e45bcce245"},
]
[package.dependencies]
@@ -851,69 +868,69 @@ six = "*"
[[package]]
name = "grpcio"
-version = "1.59.2"
+version = "1.60.0"
description = "HTTP/2-based RPC framework"
optional = false
python-versions = ">=3.7"
files = [
- {file = "grpcio-1.59.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:d2fa68a96a30dd240be80bbad838a0ac81a61770611ff7952b889485970c4c71"},
- {file = "grpcio-1.59.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:cf0dead5a2c5a3347af2cfec7131d4f2a2e03c934af28989c9078f8241a491fa"},
- {file = "grpcio-1.59.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:e420ced29b5904cdf9ee5545e23f9406189d8acb6750916c2db4793dada065c6"},
- {file = "grpcio-1.59.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b230028a008ae1d0f430acb227d323ff8a619017415cf334c38b457f814119f"},
- {file = "grpcio-1.59.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a4a3833c0e067f3558538727235cd8a49709bff1003200bbdefa2f09334e4b1"},
- {file = "grpcio-1.59.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6b25ed37c27e652db01be341af93fbcea03d296c024d8a0e680017a268eb85dd"},
- {file = "grpcio-1.59.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73abb8584b0cf74d37f5ef61c10722adc7275502ab71789a8fe3cb7ef04cf6e2"},
- {file = "grpcio-1.59.2-cp310-cp310-win32.whl", hash = "sha256:d6f70406695e3220f09cd7a2f879333279d91aa4a8a1d34303b56d61a8180137"},
- {file = "grpcio-1.59.2-cp310-cp310-win_amd64.whl", hash = "sha256:3c61d641d4f409c5ae46bfdd89ea42ce5ea233dcf69e74ce9ba32b503c727e29"},
- {file = "grpcio-1.59.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:3059668df17627f0e0fa680e9ef8c995c946c792612e9518f5cc1503be14e90b"},
- {file = "grpcio-1.59.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:72ca2399097c0b758198f2ff30f7178d680de8a5cfcf3d9b73a63cf87455532e"},
- {file = "grpcio-1.59.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:c978f864b35f2261e0819f5cd88b9830b04dc51bcf055aac3c601e525a10d2ba"},
- {file = "grpcio-1.59.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9411e24328a2302e279e70cae6e479f1fddde79629fcb14e03e6d94b3956eabf"},
- {file = "grpcio-1.59.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb7e0fe6ad73b7f06d7e2b689c19a71cf5cc48f0c2bf8608469e51ffe0bd2867"},
- {file = "grpcio-1.59.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c2504eed520958a5b77cc99458297cb7906308cb92327f35fb7fbbad4e9b2188"},
- {file = "grpcio-1.59.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2171c39f355ba5b551c5d5928d65aa6c69807fae195b86ef4a7d125bcdb860a9"},
- {file = "grpcio-1.59.2-cp311-cp311-win32.whl", hash = "sha256:d2794f0e68b3085d99b4f6ff9c089f6fdd02b32b9d3efdfbb55beac1bf22d516"},
- {file = "grpcio-1.59.2-cp311-cp311-win_amd64.whl", hash = "sha256:2067274c88bc6de89c278a672a652b4247d088811ece781a4858b09bdf8448e3"},
- {file = "grpcio-1.59.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:535561990e075fa6bd4b16c4c3c1096b9581b7bb35d96fac4650f1181e428268"},
- {file = "grpcio-1.59.2-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:a213acfbf186b9f35803b52e4ca9addb153fc0b67f82a48f961be7000ecf6721"},
- {file = "grpcio-1.59.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:6959fb07e8351e20501ffb8cc4074c39a0b7ef123e1c850a7f8f3afdc3a3da01"},
- {file = "grpcio-1.59.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e82c5cf1495244adf5252f925ac5932e5fd288b3e5ab6b70bec5593074b7236c"},
- {file = "grpcio-1.59.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:023088764012411affe7db183d1ada3ad9daf2e23ddc719ff46d7061de661340"},
- {file = "grpcio-1.59.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:da2d94c15f88cd40d7e67f7919d4f60110d2b9d5b1e08cf354c2be773ab13479"},
- {file = "grpcio-1.59.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6009386a2df66159f64ac9f20425ae25229b29b9dd0e1d3dd60043f037e2ad7e"},
- {file = "grpcio-1.59.2-cp312-cp312-win32.whl", hash = "sha256:75c6ecb70e809cf1504465174343113f51f24bc61e22a80ae1c859f3f7034c6d"},
- {file = "grpcio-1.59.2-cp312-cp312-win_amd64.whl", hash = "sha256:cbe946b3e6e60a7b4618f091e62a029cb082b109a9d6b53962dd305087c6e4fd"},
- {file = "grpcio-1.59.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:f8753a6c88d1d0ba64302309eecf20f70d2770f65ca02d83c2452279085bfcd3"},
- {file = "grpcio-1.59.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:f1ef0d39bc1feb420caf549b3c657c871cad4ebbcf0580c4d03816b0590de0cf"},
- {file = "grpcio-1.59.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:4c93f4abbb54321ee6471e04a00139c80c754eda51064187963ddf98f5cf36a4"},
- {file = "grpcio-1.59.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:08d77e682f2bf730a4961eea330e56d2f423c6a9b91ca222e5b1eb24a357b19f"},
- {file = "grpcio-1.59.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ff16d68bf453275466a9a46739061a63584d92f18a0f5b33d19fc97eb69867c"},
- {file = "grpcio-1.59.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4abb717e320e74959517dc8e84a9f48fbe90e9abe19c248541e9418b1ce60acd"},
- {file = "grpcio-1.59.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:36f53c2b3449c015880e7d55a89c992c357f176327b0d2873cdaaf9628a37c69"},
- {file = "grpcio-1.59.2-cp37-cp37m-win_amd64.whl", hash = "sha256:cc3e4cd087f07758b16bef8f31d88dbb1b5da5671d2f03685ab52dece3d7a16e"},
- {file = "grpcio-1.59.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:27f879ae604a7fcf371e59fba6f3ff4635a4c2a64768bd83ff0cac503142fef4"},
- {file = "grpcio-1.59.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:7cf05053242f61ba94014dd3a986e11a083400a32664058f80bf4cf817c0b3a1"},
- {file = "grpcio-1.59.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:e1727c1c0e394096bb9af185c6923e8ea55a5095b8af44f06903bcc0e06800a2"},
- {file = "grpcio-1.59.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5d573e70a6fe77555fb6143c12d3a7d3fa306632a3034b4e7c59ca09721546f8"},
- {file = "grpcio-1.59.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31176aa88f36020055ace9adff2405a33c8bdbfa72a9c4980e25d91b2f196873"},
- {file = "grpcio-1.59.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:11168ef43e4a43ff1b1a65859f3e0ef1a173e277349e7fb16923ff108160a8cd"},
- {file = "grpcio-1.59.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:53c9aa5ddd6857c0a1cd0287225a2a25873a8e09727c2e95c4aebb1be83a766a"},
- {file = "grpcio-1.59.2-cp38-cp38-win32.whl", hash = "sha256:3b4368b33908f683a363f376dfb747d40af3463a6e5044afee07cf9436addf96"},
- {file = "grpcio-1.59.2-cp38-cp38-win_amd64.whl", hash = "sha256:0a754aff9e3af63bdc4c75c234b86b9d14e14a28a30c4e324aed1a9b873d755f"},
- {file = "grpcio-1.59.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:1f9524d1d701e399462d2c90ba7c193e49d1711cf429c0d3d97c966856e03d00"},
- {file = "grpcio-1.59.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:f93dbf58f03146164048be5426ffde298b237a5e059144847e4940f5b80172c3"},
- {file = "grpcio-1.59.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:6da6dea3a1bacf99b3c2187e296db9a83029ed9c38fd4c52b7c9b7326d13c828"},
- {file = "grpcio-1.59.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5f09cffa619adfb44799fa4a81c2a1ad77c887187613fb0a8f201ab38d89ba1"},
- {file = "grpcio-1.59.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c35aa9657f5d5116d23b934568e0956bd50c615127810fffe3ac356a914c176a"},
- {file = "grpcio-1.59.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:74100fecaec8a535e380cf5f2fb556ff84957d481c13e54051c52e5baac70541"},
- {file = "grpcio-1.59.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:128e20f57c5f27cb0157e73756d1586b83c1b513ebecc83ea0ac37e4b0e4e758"},
- {file = "grpcio-1.59.2-cp39-cp39-win32.whl", hash = "sha256:686e975a5d16602dc0982c7c703948d17184bd1397e16c8ee03511ecb8c4cdda"},
- {file = "grpcio-1.59.2-cp39-cp39-win_amd64.whl", hash = "sha256:242adc47725b9a499ee77c6a2e36688fa6c96484611f33b1be4c57ab075a92dd"},
- {file = "grpcio-1.59.2.tar.gz", hash = "sha256:d8f9cd4ad1be90b0cf350a2f04a38a36e44a026cac1e036ac593dc48efe91d52"},
-]
-
-[package.extras]
-protobuf = ["grpcio-tools (>=1.59.2)"]
+ {file = "grpcio-1.60.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:d020cfa595d1f8f5c6b343530cd3ca16ae5aefdd1e832b777f9f0eb105f5b139"},
+ {file = "grpcio-1.60.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b98f43fcdb16172dec5f4b49f2fece4b16a99fd284d81c6bbac1b3b69fcbe0ff"},
+ {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:20e7a4f7ded59097c84059d28230907cd97130fa74f4a8bfd1d8e5ba18c81491"},
+ {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452ca5b4afed30e7274445dd9b441a35ece656ec1600b77fff8c216fdf07df43"},
+ {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43e636dc2ce9ece583b3e2ca41df5c983f4302eabc6d5f9cd04f0562ee8ec1ae"},
+ {file = "grpcio-1.60.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e306b97966369b889985a562ede9d99180def39ad42c8014628dd3cc343f508"},
+ {file = "grpcio-1.60.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f897c3b127532e6befdcf961c415c97f320d45614daf84deba0a54e64ea2457b"},
+ {file = "grpcio-1.60.0-cp310-cp310-win32.whl", hash = "sha256:b87efe4a380887425bb15f220079aa8336276398dc33fce38c64d278164f963d"},
+ {file = "grpcio-1.60.0-cp310-cp310-win_amd64.whl", hash = "sha256:a9c7b71211f066908e518a2ef7a5e211670761651039f0d6a80d8d40054047df"},
+ {file = "grpcio-1.60.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:fb464479934778d7cc5baf463d959d361954d6533ad34c3a4f1d267e86ee25fd"},
+ {file = "grpcio-1.60.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:4b44d7e39964e808b071714666a812049765b26b3ea48c4434a3b317bac82f14"},
+ {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:90bdd76b3f04bdb21de5398b8a7c629676c81dfac290f5f19883857e9371d28c"},
+ {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91229d7203f1ef0ab420c9b53fe2ca5c1fbeb34f69b3bc1b5089466237a4a134"},
+ {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b36a2c6d4920ba88fa98075fdd58ff94ebeb8acc1215ae07d01a418af4c0253"},
+ {file = "grpcio-1.60.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:297eef542156d6b15174a1231c2493ea9ea54af8d016b8ca7d5d9cc65cfcc444"},
+ {file = "grpcio-1.60.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:87c9224acba0ad8bacddf427a1c2772e17ce50b3042a789547af27099c5f751d"},
+ {file = "grpcio-1.60.0-cp311-cp311-win32.whl", hash = "sha256:95ae3e8e2c1b9bf671817f86f155c5da7d49a2289c5cf27a319458c3e025c320"},
+ {file = "grpcio-1.60.0-cp311-cp311-win_amd64.whl", hash = "sha256:467a7d31554892eed2aa6c2d47ded1079fc40ea0b9601d9f79204afa8902274b"},
+ {file = "grpcio-1.60.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:a7152fa6e597c20cb97923407cf0934e14224af42c2b8d915f48bc3ad2d9ac18"},
+ {file = "grpcio-1.60.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:7db16dd4ea1b05ada504f08d0dca1cd9b926bed3770f50e715d087c6f00ad748"},
+ {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:b0571a5aef36ba9177e262dc88a9240c866d903a62799e44fd4aae3f9a2ec17e"},
+ {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fd9584bf1bccdfff1512719316efa77be235469e1e3295dce64538c4773840b"},
+ {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6a478581b1a1a8fdf3318ecb5f4d0cda41cacdffe2b527c23707c9c1b8fdb55"},
+ {file = "grpcio-1.60.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:77c8a317f0fd5a0a2be8ed5cbe5341537d5c00bb79b3bb27ba7c5378ba77dbca"},
+ {file = "grpcio-1.60.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1c30bb23a41df95109db130a6cc1b974844300ae2e5d68dd4947aacba5985aa5"},
+ {file = "grpcio-1.60.0-cp312-cp312-win32.whl", hash = "sha256:2aef56e85901c2397bd557c5ba514f84de1f0ae5dd132f5d5fed042858115951"},
+ {file = "grpcio-1.60.0-cp312-cp312-win_amd64.whl", hash = "sha256:e381fe0c2aa6c03b056ad8f52f8efca7be29fb4d9ae2f8873520843b6039612a"},
+ {file = "grpcio-1.60.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:92f88ca1b956eb8427a11bb8b4a0c0b2b03377235fc5102cb05e533b8693a415"},
+ {file = "grpcio-1.60.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:e278eafb406f7e1b1b637c2cf51d3ad45883bb5bd1ca56bc05e4fc135dfdaa65"},
+ {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:a48edde788b99214613e440fce495bbe2b1e142a7f214cce9e0832146c41e324"},
+ {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de2ad69c9a094bf37c1102b5744c9aec6cf74d2b635558b779085d0263166454"},
+ {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:073f959c6f570797272f4ee9464a9997eaf1e98c27cb680225b82b53390d61e6"},
+ {file = "grpcio-1.60.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c826f93050c73e7769806f92e601e0efdb83ec8d7c76ddf45d514fee54e8e619"},
+ {file = "grpcio-1.60.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9e30be89a75ee66aec7f9e60086fadb37ff8c0ba49a022887c28c134341f7179"},
+ {file = "grpcio-1.60.0-cp37-cp37m-win_amd64.whl", hash = "sha256:b0fb2d4801546598ac5cd18e3ec79c1a9af8b8f2a86283c55a5337c5aeca4b1b"},
+ {file = "grpcio-1.60.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:9073513ec380434eb8d21970e1ab3161041de121f4018bbed3146839451a6d8e"},
+ {file = "grpcio-1.60.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:74d7d9fa97809c5b892449b28a65ec2bfa458a4735ddad46074f9f7d9550ad13"},
+ {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:1434ca77d6fed4ea312901122dc8da6c4389738bf5788f43efb19a838ac03ead"},
+ {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e61e76020e0c332a98290323ecfec721c9544f5b739fab925b6e8cbe1944cf19"},
+ {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675997222f2e2f22928fbba640824aebd43791116034f62006e19730715166c0"},
+ {file = "grpcio-1.60.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5208a57eae445ae84a219dfd8b56e04313445d146873117b5fa75f3245bc1390"},
+ {file = "grpcio-1.60.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:428d699c8553c27e98f4d29fdc0f0edc50e9a8a7590bfd294d2edb0da7be3629"},
+ {file = "grpcio-1.60.0-cp38-cp38-win32.whl", hash = "sha256:83f2292ae292ed5a47cdcb9821039ca8e88902923198f2193f13959360c01860"},
+ {file = "grpcio-1.60.0-cp38-cp38-win_amd64.whl", hash = "sha256:705a68a973c4c76db5d369ed573fec3367d7d196673fa86614b33d8c8e9ebb08"},
+ {file = "grpcio-1.60.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:c193109ca4070cdcaa6eff00fdb5a56233dc7610216d58fb81638f89f02e4968"},
+ {file = "grpcio-1.60.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:676e4a44e740deaba0f4d95ba1d8c5c89a2fcc43d02c39f69450b1fa19d39590"},
+ {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:5ff21e000ff2f658430bde5288cb1ac440ff15c0d7d18b5fb222f941b46cb0d2"},
+ {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c86343cf9ff7b2514dd229bdd88ebba760bd8973dac192ae687ff75e39ebfab"},
+ {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fd3b3968ffe7643144580f260f04d39d869fcc2cddb745deef078b09fd2b328"},
+ {file = "grpcio-1.60.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:30943b9530fe3620e3b195c03130396cd0ee3a0d10a66c1bee715d1819001eaf"},
+ {file = "grpcio-1.60.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b10241250cb77657ab315270b064a6c7f1add58af94befa20687e7c8d8603ae6"},
+ {file = "grpcio-1.60.0-cp39-cp39-win32.whl", hash = "sha256:79a050889eb8d57a93ed21d9585bb63fca881666fc709f5d9f7f9372f5e7fd03"},
+ {file = "grpcio-1.60.0-cp39-cp39-win_amd64.whl", hash = "sha256:8a97a681e82bc11a42d4372fe57898d270a2707f36c45c6676e49ce0d5c41353"},
+ {file = "grpcio-1.60.0.tar.gz", hash = "sha256:2199165a1affb666aa24adf0c97436686d0a61bc5fc113c037701fb7c7fceb96"},
+]
+
+[package.extras]
+protobuf = ["grpcio-tools (>=1.60.0)"]
[[package]]
name = "h5py"
@@ -985,13 +1002,13 @@ zoneinfo = ["backports.zoneinfo (>=0.2.1)", "importlib-resources (>=3.3.0)", "tz
[[package]]
name = "idna"
-version = "3.4"
+version = "3.6"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.5"
files = [
- {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
- {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
+ {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"},
+ {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"},
]
[[package]]
@@ -1007,32 +1024,32 @@ files = [
[[package]]
name = "importlib-metadata"
-version = "6.8.0"
+version = "7.0.1"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "importlib_metadata-6.8.0-py3-none-any.whl", hash = "sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb"},
- {file = "importlib_metadata-6.8.0.tar.gz", hash = "sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743"},
+ {file = "importlib_metadata-7.0.1-py3-none-any.whl", hash = "sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e"},
+ {file = "importlib_metadata-7.0.1.tar.gz", hash = "sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc"},
]
[package.dependencies]
zipp = ">=0.5"
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
perf = ["ipython"]
testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"]
[[package]]
name = "importlib-resources"
-version = "6.1.0"
+version = "6.1.1"
description = "Read resources from Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "importlib_resources-6.1.0-py3-none-any.whl", hash = "sha256:aa50258bbfa56d4e33fbd8aa3ef48ded10d1735f11532b8df95388cc6bdb7e83"},
- {file = "importlib_resources-6.1.0.tar.gz", hash = "sha256:9d48dcccc213325e810fd723e7fbb45ccb39f6cf5c31f00cf2b965f5f10f3cb9"},
+ {file = "importlib_resources-6.1.1-py3-none-any.whl", hash = "sha256:e8bf90d8213b486f428c9c39714b920041cb02c184686a3dee24905aaa8105d6"},
+ {file = "importlib_resources-6.1.1.tar.gz", hash = "sha256:3893a00122eafde6894c59914446a512f728a0c1a45f9bb9b63721b6bacf0b4a"},
]
[package.dependencies]
@@ -1055,30 +1072,27 @@ files = [
[[package]]
name = "isort"
-version = "5.12.0"
+version = "5.13.2"
description = "A Python utility / library to sort Python imports."
optional = false
python-versions = ">=3.8.0"
files = [
- {file = "isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"},
- {file = "isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504"},
+ {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"},
+ {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"},
]
[package.extras]
-colors = ["colorama (>=0.4.3)"]
-pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"]
-plugins = ["setuptools"]
-requirements-deprecated-finder = ["pip-api", "pipreqs"]
+colors = ["colorama (>=0.4.6)"]
[[package]]
name = "jinja2"
-version = "3.1.2"
+version = "3.1.3"
description = "A very fast and expressive template engine."
optional = false
python-versions = ">=3.7"
files = [
- {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
- {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
+ {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"},
+ {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"},
]
[package.dependencies]
@@ -1100,13 +1114,13 @@ files = [
[[package]]
name = "jsonschema"
-version = "4.19.2"
+version = "4.21.1"
description = "An implementation of JSON Schema validation for Python"
optional = true
python-versions = ">=3.8"
files = [
- {file = "jsonschema-4.19.2-py3-none-any.whl", hash = "sha256:eee9e502c788e89cb166d4d37f43084e3b64ab405c795c03d343a4dbc2c810fc"},
- {file = "jsonschema-4.19.2.tar.gz", hash = "sha256:c9ff4d7447eed9592c23a12ccee508baf0dd0d59650615e847feb6cdca74f392"},
+ {file = "jsonschema-4.21.1-py3-none-any.whl", hash = "sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f"},
+ {file = "jsonschema-4.21.1.tar.gz", hash = "sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5"},
]
[package.dependencies]
@@ -1121,17 +1135,31 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-
[[package]]
name = "jsonschema-specifications"
-version = "2023.7.1"
+version = "2023.12.1"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = true
python-versions = ">=3.8"
files = [
- {file = "jsonschema_specifications-2023.7.1-py3-none-any.whl", hash = "sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1"},
- {file = "jsonschema_specifications-2023.7.1.tar.gz", hash = "sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb"},
+ {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"},
+ {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"},
]
[package.dependencies]
-referencing = ">=0.28.0"
+referencing = ">=0.31.0"
+
+[[package]]
+name = "julia"
+version = "0.6.1"
+description = "Julia/Python bridge with IPython support."
+optional = false
+python-versions = ">=3.4"
+files = [
+ {file = "julia-0.6.1-py2.py3-none-any.whl", hash = "sha256:69b997866dac2900e23563bd954ba634f8829f4f16ab7107ba1e9b4a90381b67"},
+ {file = "julia-0.6.1.tar.gz", hash = "sha256:dbada3b47cb14b3e1893dae8339053e014cf09f8158f408b6a129ca4dfca1f61"},
+]
+
+[package.extras]
+test = ["ipython", "mock", "numpy", "pytest (>=4.4)"]
[[package]]
name = "keras"
@@ -1273,47 +1301,48 @@ six = ">=1.4.1"
[[package]]
name = "lazy-object-proxy"
-version = "1.9.0"
+version = "1.10.0"
description = "A fast and thorough lazy object proxy."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "lazy-object-proxy-1.9.0.tar.gz", hash = "sha256:659fb5809fa4629b8a1ac5106f669cfc7bef26fbb389dda53b3e010d1ac4ebae"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b40387277b0ed2d0602b8293b94d7257e17d1479e257b4de114ea11a8cb7f2d7"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8c6cfb338b133fbdbc5cfaa10fe3c6aeea827db80c978dbd13bc9dd8526b7d4"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:721532711daa7db0d8b779b0bb0318fa87af1c10d7fe5e52ef30f8eff254d0cd"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66a3de4a3ec06cd8af3f61b8e1ec67614fbb7c995d02fa224813cb7afefee701"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1aa3de4088c89a1b69f8ec0dcc169aa725b0ff017899ac568fe44ddc1396df46"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-win32.whl", hash = "sha256:f0705c376533ed2a9e5e97aacdbfe04cecd71e0aa84c7c0595d02ef93b6e4455"},
- {file = "lazy_object_proxy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:ea806fd4c37bf7e7ad82537b0757999264d5f70c45468447bb2b91afdbe73a6e"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:946d27deaff6cf8452ed0dba83ba38839a87f4f7a9732e8f9fd4107b21e6ff07"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79a31b086e7e68b24b99b23d57723ef7e2c6d81ed21007b6281ebcd1688acb0a"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f699ac1c768270c9e384e4cbd268d6e67aebcfae6cd623b4d7c3bfde5a35db59"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bfb38f9ffb53b942f2b5954e0f610f1e721ccebe9cce9025a38c8ccf4a5183a4"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:189bbd5d41ae7a498397287c408617fe5c48633e7755287b21d741f7db2706a9"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-win32.whl", hash = "sha256:81fc4d08b062b535d95c9ea70dbe8a335c45c04029878e62d744bdced5141586"},
- {file = "lazy_object_proxy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:f2457189d8257dd41ae9b434ba33298aec198e30adf2dcdaaa3a28b9994f6adb"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d9e25ef10a39e8afe59a5c348a4dbf29b4868ab76269f81ce1674494e2565a6e"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cbf9b082426036e19c6924a9ce90c740a9861e2bdc27a4834fd0a910742ac1e8"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f5fa4a61ce2438267163891961cfd5e32ec97a2c444e5b842d574251ade27d2"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8fa02eaab317b1e9e03f69aab1f91e120e7899b392c4fc19807a8278a07a97e8"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e7c21c95cae3c05c14aafffe2865bbd5e377cfc1348c4f7751d9dc9a48ca4bda"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win32.whl", hash = "sha256:f12ad7126ae0c98d601a7ee504c1122bcef553d1d5e0c3bfa77b16b3968d2734"},
- {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:edd20c5a55acb67c7ed471fa2b5fb66cb17f61430b7a6b9c3b4a1e40293b1671"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0daa332786cf3bb49e10dc6a17a52f6a8f9601b4cf5c295a4f85854d61de63"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cd077f3d04a58e83d04b20e334f678c2b0ff9879b9375ed107d5d07ff160171"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:660c94ea760b3ce47d1855a30984c78327500493d396eac4dfd8bd82041b22be"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:212774e4dfa851e74d393a2370871e174d7ff0ebc980907723bb67d25c8a7c30"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0117049dd1d5635bbff65444496c90e0baa48ea405125c088e93d9cf4525b11"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-win32.whl", hash = "sha256:0a891e4e41b54fd5b8313b96399f8b0e173bbbfc03c7631f01efbe29bb0bcf82"},
- {file = "lazy_object_proxy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:9990d8e71b9f6488e91ad25f322898c136b008d87bf852ff65391b004da5e17b"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e7551208b2aded9c1447453ee366f1c4070602b3d932ace044715d89666899b"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f83ac4d83ef0ab017683d715ed356e30dd48a93746309c8f3517e1287523ef4"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7322c3d6f1766d4ef1e51a465f47955f1e8123caee67dd641e67d539a534d006"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:18b78ec83edbbeb69efdc0e9c1cb41a3b1b1ed11ddd8ded602464c3fc6020494"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:09763491ce220c0299688940f8dc2c5d05fd1f45af1e42e636b2e8b2303e4382"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-win32.whl", hash = "sha256:9090d8e53235aa280fc9239a86ae3ea8ac58eff66a705fa6aa2ec4968b95c821"},
- {file = "lazy_object_proxy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:db1c1722726f47e10e0b5fdbf15ac3b8adb58c091d12b3ab713965795036985f"},
+ {file = "lazy-object-proxy-1.10.0.tar.gz", hash = "sha256:78247b6d45f43a52ef35c25b5581459e85117225408a4128a3daf8bf9648ac69"},
+ {file = "lazy_object_proxy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:855e068b0358ab916454464a884779c7ffa312b8925c6f7401e952dcf3b89977"},
+ {file = "lazy_object_proxy-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab7004cf2e59f7c2e4345604a3e6ea0d92ac44e1c2375527d56492014e690c3"},
+ {file = "lazy_object_proxy-1.10.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc0d2fc424e54c70c4bc06787e4072c4f3b1aa2f897dfdc34ce1013cf3ceef05"},
+ {file = "lazy_object_proxy-1.10.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e2adb09778797da09d2b5ebdbceebf7dd32e2c96f79da9052b2e87b6ea495895"},
+ {file = "lazy_object_proxy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b1f711e2c6dcd4edd372cf5dec5c5a30d23bba06ee012093267b3376c079ec83"},
+ {file = "lazy_object_proxy-1.10.0-cp310-cp310-win32.whl", hash = "sha256:76a095cfe6045c7d0ca77db9934e8f7b71b14645f0094ffcd842349ada5c5fb9"},
+ {file = "lazy_object_proxy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:b4f87d4ed9064b2628da63830986c3d2dca7501e6018347798313fcf028e2fd4"},
+ {file = "lazy_object_proxy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fec03caabbc6b59ea4a638bee5fce7117be8e99a4103d9d5ad77f15d6f81020c"},
+ {file = "lazy_object_proxy-1.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02c83f957782cbbe8136bee26416686a6ae998c7b6191711a04da776dc9e47d4"},
+ {file = "lazy_object_proxy-1.10.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:009e6bb1f1935a62889ddc8541514b6a9e1fcf302667dcb049a0be5c8f613e56"},
+ {file = "lazy_object_proxy-1.10.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75fc59fc450050b1b3c203c35020bc41bd2695ed692a392924c6ce180c6f1dc9"},
+ {file = "lazy_object_proxy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:782e2c9b2aab1708ffb07d4bf377d12901d7a1d99e5e410d648d892f8967ab1f"},
+ {file = "lazy_object_proxy-1.10.0-cp311-cp311-win32.whl", hash = "sha256:edb45bb8278574710e68a6b021599a10ce730d156e5b254941754a9cc0b17d03"},
+ {file = "lazy_object_proxy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:e271058822765ad5e3bca7f05f2ace0de58a3f4e62045a8c90a0dfd2f8ad8cc6"},
+ {file = "lazy_object_proxy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e98c8af98d5707dcdecc9ab0863c0ea6e88545d42ca7c3feffb6b4d1e370c7ba"},
+ {file = "lazy_object_proxy-1.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:952c81d415b9b80ea261d2372d2a4a2332a3890c2b83e0535f263ddfe43f0d43"},
+ {file = "lazy_object_proxy-1.10.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80b39d3a151309efc8cc48675918891b865bdf742a8616a337cb0090791a0de9"},
+ {file = "lazy_object_proxy-1.10.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e221060b701e2aa2ea991542900dd13907a5c90fa80e199dbf5a03359019e7a3"},
+ {file = "lazy_object_proxy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:92f09ff65ecff3108e56526f9e2481b8116c0b9e1425325e13245abfd79bdb1b"},
+ {file = "lazy_object_proxy-1.10.0-cp312-cp312-win32.whl", hash = "sha256:3ad54b9ddbe20ae9f7c1b29e52f123120772b06dbb18ec6be9101369d63a4074"},
+ {file = "lazy_object_proxy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:127a789c75151db6af398b8972178afe6bda7d6f68730c057fbbc2e96b08d282"},
+ {file = "lazy_object_proxy-1.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4ed0518a14dd26092614412936920ad081a424bdcb54cc13349a8e2c6d106a"},
+ {file = "lazy_object_proxy-1.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ad9e6ed739285919aa9661a5bbed0aaf410aa60231373c5579c6b4801bd883c"},
+ {file = "lazy_object_proxy-1.10.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fc0a92c02fa1ca1e84fc60fa258458e5bf89d90a1ddaeb8ed9cc3147f417255"},
+ {file = "lazy_object_proxy-1.10.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0aefc7591920bbd360d57ea03c995cebc204b424524a5bd78406f6e1b8b2a5d8"},
+ {file = "lazy_object_proxy-1.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5faf03a7d8942bb4476e3b62fd0f4cf94eaf4618e304a19865abf89a35c0bbee"},
+ {file = "lazy_object_proxy-1.10.0-cp38-cp38-win32.whl", hash = "sha256:e333e2324307a7b5d86adfa835bb500ee70bfcd1447384a822e96495796b0ca4"},
+ {file = "lazy_object_proxy-1.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:cb73507defd385b7705c599a94474b1d5222a508e502553ef94114a143ec6696"},
+ {file = "lazy_object_proxy-1.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:366c32fe5355ef5fc8a232c5436f4cc66e9d3e8967c01fb2e6302fd6627e3d94"},
+ {file = "lazy_object_proxy-1.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2297f08f08a2bb0d32a4265e98a006643cd7233fb7983032bd61ac7a02956b3b"},
+ {file = "lazy_object_proxy-1.10.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18dd842b49456aaa9a7cf535b04ca4571a302ff72ed8740d06b5adcd41fe0757"},
+ {file = "lazy_object_proxy-1.10.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:217138197c170a2a74ca0e05bddcd5f1796c735c37d0eee33e43259b192aa424"},
+ {file = "lazy_object_proxy-1.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a3a87cf1e133e5b1994144c12ca4aa3d9698517fe1e2ca82977781b16955658"},
+ {file = "lazy_object_proxy-1.10.0-cp39-cp39-win32.whl", hash = "sha256:30b339b2a743c5288405aa79a69e706a06e02958eab31859f7f3c04980853b70"},
+ {file = "lazy_object_proxy-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:a899b10e17743683b293a729d3a11f2f399e8a90c73b089e29f5d0fe3509f0dd"},
+ {file = "lazy_object_proxy-1.10.0-pp310.pp311.pp312.pp38.pp39-none-any.whl", hash = "sha256:80fa48bd89c8f2f456fc0765c11c23bf5af827febacd2f523ca5bc1893fcc09d"},
]
[[package]]
@@ -1401,13 +1430,13 @@ mistune = "0.8.4"
[[package]]
name = "markdown"
-version = "3.5.1"
+version = "3.5.2"
description = "Python implementation of John Gruber's Markdown."
optional = false
python-versions = ">=3.8"
files = [
- {file = "Markdown-3.5.1-py3-none-any.whl", hash = "sha256:5874b47d4ee3f0b14d764324d2c94c03ea66bee56f2d929da9f2508d65e722dc"},
- {file = "Markdown-3.5.1.tar.gz", hash = "sha256:b65d7beb248dc22f2e8a31fb706d93798093c308dc1aba295aedeb9d41a813bd"},
+ {file = "Markdown-3.5.2-py3-none-any.whl", hash = "sha256:d43323865d89fc0cb9b20c75fc8ad313af307cc087e84b657d9eec768eddeadd"},
+ {file = "Markdown-3.5.2.tar.gz", hash = "sha256:e1ac7b3dc550ee80e602e71c1d168002f062e49f1b11e26a36264dafd4df2ef8"},
]
[package.dependencies]
@@ -1419,108 +1448,108 @@ testing = ["coverage", "pyyaml"]
[[package]]
name = "markupsafe"
-version = "2.1.3"
+version = "2.1.5"
description = "Safely add untrusted strings to HTML/XML markup."
optional = false
python-versions = ">=3.7"
files = [
- {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"},
- {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"},
- {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"},
- {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"},
- {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"},
- {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"},
- {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"},
- {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"},
- {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"},
- {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"},
- {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"},
- {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"},
- {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"},
- {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"},
- {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"},
- {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"},
- {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"},
- {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
- {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
- {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"},
- {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
- {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
- {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
- {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"},
- {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"},
- {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"},
- {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"},
- {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"},
- {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"},
- {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"},
- {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"},
- {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"},
- {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"},
- {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"},
- {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"},
- {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"},
- {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"},
- {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"},
- {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"},
- {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"},
- {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"},
- {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"},
- {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"},
- {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"},
- {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"},
- {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"},
- {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"},
- {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"},
- {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"},
- {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"},
+ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"},
]
[[package]]
name = "matplotlib"
-version = "3.8.1"
+version = "3.8.2"
description = "Python plotting package"
optional = false
python-versions = ">=3.9"
files = [
- {file = "matplotlib-3.8.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e11ab864323fa73ac1b7849688d9671c47a2665242e899785b4db1a375b547e1"},
- {file = "matplotlib-3.8.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:43a9d40feb63c9e31a0b8b069dcbd74a912f59bdc0095d187126694cd26977e4"},
- {file = "matplotlib-3.8.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:608ea2951838d391e45dec2e644888db6899c752d3c29e157af9dcefb3d7d8d5"},
- {file = "matplotlib-3.8.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82ec95b02e894561c21e066bd0c716e4b410df141ce9441aa5af6cd937e4ade2"},
- {file = "matplotlib-3.8.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e3ad1759ad4a5245172c6d32b8ada603a6020d03211524c39d78d25c9a7dc0d2"},
- {file = "matplotlib-3.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:20a0fdfd3ee836179047f3782be060057b878ad37f5abe29edf006a1ff3ecd73"},
- {file = "matplotlib-3.8.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:7658b7073c1d6a2922ecc0ed41602410fae88586cb8a54f7a2063d537b6beaf7"},
- {file = "matplotlib-3.8.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bf6889643d4560fcc56f9f0941f078e4df0d72a6c3e4ca548841fc13c5642664"},
- {file = "matplotlib-3.8.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff842e27bc6a80de08c40e0bfdce460bd08080e8a94af131162b6a1b8948f2cc"},
- {file = "matplotlib-3.8.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f99d07c0e753717775be7be39ab383453b4d8b629c9fa174596b970c6555890"},
- {file = "matplotlib-3.8.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f34b46dbb1db1f09bfa937cd5853e5f2af232caeeff509c3ab6e43fd33780eae"},
- {file = "matplotlib-3.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:1fcb49b6baf0375281979cbf26695ec10bd1cada1e311893e89533b3b70143e7"},
- {file = "matplotlib-3.8.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e17674ee127f78f26fea237e7f4d5cf910a8be82beb6260fedf358b88075b823"},
- {file = "matplotlib-3.8.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d921c0270647ab11c3ef283efaaa3d46fd005ba233bfb3aea75231cdf3656de8"},
- {file = "matplotlib-3.8.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2afe7d2f8c9e35e94fbcfcfd9b28f29cb32f0a9068cba469cf907428379c8db9"},
- {file = "matplotlib-3.8.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5a504ff40f81d6233603475a45497a6dca37a873393fa20ae6f7dd6596ef72b"},
- {file = "matplotlib-3.8.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cd54bbf089953140905768ed4626d7223e1ad1d7e2a138410a9c4d3b865ccd80"},
- {file = "matplotlib-3.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:27502d2452208ae784c19504644f09f83742809143bbeae147617640930aa344"},
- {file = "matplotlib-3.8.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:f55fb5ff02d999a100be28bf6ffe826e1867a54c7b465409685332c9dd48ffa5"},
- {file = "matplotlib-3.8.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:afb72822ae410d62aa1a2920c6563cb5680de9078358f0e9474396c6c3e06be2"},
- {file = "matplotlib-3.8.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43cf368a4a1d8cbc426944806e5e183cead746647a64d2cdb786441546235967"},
- {file = "matplotlib-3.8.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c54c55457c7f5ea4dfdba0020004fc7667f5c10c8d9b8010d735345acc06c9b8"},
- {file = "matplotlib-3.8.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e3bb809b743653b5aab5d72ee45c8c937c28e147b0846b0826a54bece898608c"},
- {file = "matplotlib-3.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:c1b0ecaa0d1f4fe1e30f625a2347f0034a89a7d17c39efbb502e554d92ee2f61"},
- {file = "matplotlib-3.8.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ca84deaa38cb64b7dd160ca2046b45f7b5dbff2b0179642e1339fadc337446c9"},
- {file = "matplotlib-3.8.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed3b29f54f6bbf3eaca4cbd23bc260155153ace63b7f597c474fa6fc6f386530"},
- {file = "matplotlib-3.8.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0d24c47a1bb47e392fbcd26fe322e4ff3431653ac1e8718e4e147d450ae97a44"},
- {file = "matplotlib-3.8.1.tar.gz", hash = "sha256:044df81c1f6f3a8e52d70c4cfcb44e77ea9632a10929932870dfaa90de94365d"},
+ {file = "matplotlib-3.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:09796f89fb71a0c0e1e2f4bdaf63fb2cefc84446bb963ecdeb40dfee7dfa98c7"},
+ {file = "matplotlib-3.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f9c6976748a25e8b9be51ea028df49b8e561eed7809146da7a47dbecebab367"},
+ {file = "matplotlib-3.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b78e4f2cedf303869b782071b55fdde5987fda3038e9d09e58c91cc261b5ad18"},
+ {file = "matplotlib-3.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e208f46cf6576a7624195aa047cb344a7f802e113bb1a06cfd4bee431de5e31"},
+ {file = "matplotlib-3.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:46a569130ff53798ea5f50afce7406e91fdc471ca1e0e26ba976a8c734c9427a"},
+ {file = "matplotlib-3.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:830f00640c965c5b7f6bc32f0d4ce0c36dfe0379f7dd65b07a00c801713ec40a"},
+ {file = "matplotlib-3.8.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d86593ccf546223eb75a39b44c32788e6f6440d13cfc4750c1c15d0fcb850b63"},
+ {file = "matplotlib-3.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9a5430836811b7652991939012f43d2808a2db9b64ee240387e8c43e2e5578c8"},
+ {file = "matplotlib-3.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9576723858a78751d5aacd2497b8aef29ffea6d1c95981505877f7ac28215c6"},
+ {file = "matplotlib-3.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ba9cbd8ac6cf422f3102622b20f8552d601bf8837e49a3afed188d560152788"},
+ {file = "matplotlib-3.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:03f9d160a29e0b65c0790bb07f4f45d6a181b1ac33eb1bb0dd225986450148f0"},
+ {file = "matplotlib-3.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:3773002da767f0a9323ba1a9b9b5d00d6257dbd2a93107233167cfb581f64717"},
+ {file = "matplotlib-3.8.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:4c318c1e95e2f5926fba326f68177dee364aa791d6df022ceb91b8221bd0a627"},
+ {file = "matplotlib-3.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:091275d18d942cf1ee9609c830a1bc36610607d8223b1b981c37d5c9fc3e46a4"},
+ {file = "matplotlib-3.8.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b0f3b8ea0e99e233a4bcc44590f01604840d833c280ebb8fe5554fd3e6cfe8d"},
+ {file = "matplotlib-3.8.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7b1704a530395aaf73912be741c04d181f82ca78084fbd80bc737be04848331"},
+ {file = "matplotlib-3.8.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:533b0e3b0c6768eef8cbe4b583731ce25a91ab54a22f830db2b031e83cca9213"},
+ {file = "matplotlib-3.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:0f4fc5d72b75e2c18e55eb32292659cf731d9d5b312a6eb036506304f4675630"},
+ {file = "matplotlib-3.8.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:deaed9ad4da0b1aea77fe0aa0cebb9ef611c70b3177be936a95e5d01fa05094f"},
+ {file = "matplotlib-3.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:172f4d0fbac3383d39164c6caafd3255ce6fa58f08fc392513a0b1d3b89c4f89"},
+ {file = "matplotlib-3.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7d36c2209d9136cd8e02fab1c0ddc185ce79bc914c45054a9f514e44c787917"},
+ {file = "matplotlib-3.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5864bdd7da445e4e5e011b199bb67168cdad10b501750367c496420f2ad00843"},
+ {file = "matplotlib-3.8.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ef8345b48e95cee45ff25192ed1f4857273117917a4dcd48e3905619bcd9c9b8"},
+ {file = "matplotlib-3.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:7c48d9e221b637c017232e3760ed30b4e8d5dfd081daf327e829bf2a72c731b4"},
+ {file = "matplotlib-3.8.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:aa11b3c6928a1e496c1a79917d51d4cd5d04f8a2e75f21df4949eeefdf697f4b"},
+ {file = "matplotlib-3.8.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1095fecf99eeb7384dabad4bf44b965f929a5f6079654b681193edf7169ec20"},
+ {file = "matplotlib-3.8.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:bddfb1db89bfaa855912261c805bd0e10218923cc262b9159a49c29a7a1c1afa"},
+ {file = "matplotlib-3.8.2.tar.gz", hash = "sha256:01a978b871b881ee76017152f1f1a0cbf6bd5f7b8ff8c96df0df1bd57d8755a1"},
]
[package.dependencies]
@@ -1685,6 +1714,24 @@ files = [
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
+[[package]]
+name = "networkx"
+version = "3.2.1"
+description = "Python package for creating and manipulating graphs and networks"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"},
+ {file = "networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6"},
+]
+
+[package.extras]
+default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"]
+developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"]
+doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"]
+extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"]
+test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"]
+
[[package]]
name = "numba"
version = "0.56.4"
@@ -1811,67 +1858,71 @@ files = [
[[package]]
name = "pandas"
-version = "2.1.2"
+version = "2.2.0"
description = "Powerful data structures for data analysis, time series, and statistics"
optional = true
python-versions = ">=3.9"
files = [
- {file = "pandas-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:24057459f19db9ebb02984c6fdd164a970b31a95f38e4a49cf7615b36a1b532c"},
- {file = "pandas-2.1.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6cf8fcc8a63d333970b950a7331a30544cf59b1a97baf0a7409e09eafc1ac38"},
- {file = "pandas-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ae6ffbd9d614c20d028c7117ee911fc4e266b4dca2065d5c5909e401f8ff683"},
- {file = "pandas-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eff794eeb7883c5aefb1ed572e7ff533ae779f6c6277849eab9e77986e352688"},
- {file = "pandas-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:02954e285e8e2f4006b6f22be6f0df1f1c3c97adbb7ed211c6b483426f20d5c8"},
- {file = "pandas-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:5b40c9f494e1f27588c369b9e4a6ca19cd924b3a0e1ef9ef1a8e30a07a438f43"},
- {file = "pandas-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:08d287b68fd28906a94564f15118a7ca8c242e50ae7f8bd91130c362b2108a81"},
- {file = "pandas-2.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bbd98dcdcd32f408947afdb3f7434fade6edd408c3077bbce7bd840d654d92c6"},
- {file = "pandas-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e90c95abb3285d06f6e4feedafc134306a8eced93cb78e08cf50e224d5ce22e2"},
- {file = "pandas-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52867d69a54e71666cd184b04e839cff7dfc8ed0cd6b936995117fdae8790b69"},
- {file = "pandas-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8d0382645ede2fde352da2a885aac28ec37d38587864c0689b4b2361d17b1d4c"},
- {file = "pandas-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:65177d1c519b55e5b7f094c660ed357bb7d86e799686bb71653b8a4803d8ff0d"},
- {file = "pandas-2.1.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5aa6b86802e8cf7716bf4b4b5a3c99b12d34e9c6a9d06dad254447a620437931"},
- {file = "pandas-2.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d594e2ce51b8e0b4074e6644758865dc2bb13fd654450c1eae51201260a539f1"},
- {file = "pandas-2.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3223f997b6d2ebf9c010260cf3d889848a93f5d22bb4d14cd32638b3d8bba7ad"},
- {file = "pandas-2.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4944dc004ca6cc701dfa19afb8bdb26ad36b9bed5bcec617d2a11e9cae6902"},
- {file = "pandas-2.1.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3f76280ce8ec216dde336e55b2b82e883401cf466da0fe3be317c03fb8ee7c7d"},
- {file = "pandas-2.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:7ad20d24acf3a0042512b7e8d8fdc2e827126ed519d6bd1ed8e6c14ec8a2c813"},
- {file = "pandas-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:021f09c15e1381e202d95d4a21ece8e7f2bf1388b6d7e9cae09dfe27bd2043d1"},
- {file = "pandas-2.1.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7f12b2de0060b0b858cfec0016e7d980ae5bae455a1746bfcc70929100ee633"},
- {file = "pandas-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c166b9bb27c1715bed94495d9598a7f02950b4749dba9349c1dd2cbf10729d"},
- {file = "pandas-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25c9976c17311388fcd953cb3d0697999b2205333f4e11e669d90ff8d830d429"},
- {file = "pandas-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:851b5afbb0d62f6129ae891b533aa508cc357d5892c240c91933d945fff15731"},
- {file = "pandas-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:e78507adcc730533619de07bfdd1c62b2918a68cd4419ea386e28abf7f6a1e5c"},
- {file = "pandas-2.1.2.tar.gz", hash = "sha256:52897edc2774d2779fbeb6880d2cfb305daa0b1a29c16b91f531a18918a6e0f3"},
+ {file = "pandas-2.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8108ee1712bb4fa2c16981fba7e68b3f6ea330277f5ca34fa8d557e986a11670"},
+ {file = "pandas-2.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:736da9ad4033aeab51d067fc3bd69a0ba36f5a60f66a527b3d72e2030e63280a"},
+ {file = "pandas-2.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38e0b4fc3ddceb56ec8a287313bc22abe17ab0eb184069f08fc6a9352a769b18"},
+ {file = "pandas-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20404d2adefe92aed3b38da41d0847a143a09be982a31b85bc7dd565bdba0f4e"},
+ {file = "pandas-2.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7ea3ee3f125032bfcade3a4cf85131ed064b4f8dd23e5ce6fa16473e48ebcaf5"},
+ {file = "pandas-2.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f9670b3ac00a387620489dfc1bca66db47a787f4e55911f1293063a78b108df1"},
+ {file = "pandas-2.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:5a946f210383c7e6d16312d30b238fd508d80d927014f3b33fb5b15c2f895430"},
+ {file = "pandas-2.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a1b438fa26b208005c997e78672f1aa8138f67002e833312e6230f3e57fa87d5"},
+ {file = "pandas-2.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8ce2fbc8d9bf303ce54a476116165220a1fedf15985b09656b4b4275300e920b"},
+ {file = "pandas-2.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2707514a7bec41a4ab81f2ccce8b382961a29fbe9492eab1305bb075b2b1ff4f"},
+ {file = "pandas-2.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85793cbdc2d5bc32620dc8ffa715423f0c680dacacf55056ba13454a5be5de88"},
+ {file = "pandas-2.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:cfd6c2491dc821b10c716ad6776e7ab311f7df5d16038d0b7458bc0b67dc10f3"},
+ {file = "pandas-2.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a146b9dcacc3123aa2b399df1a284de5f46287a4ab4fbfc237eac98a92ebcb71"},
+ {file = "pandas-2.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbc1b53c0e1fdf16388c33c3cca160f798d38aea2978004dd3f4d3dec56454c9"},
+ {file = "pandas-2.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a41d06f308a024981dcaa6c41f2f2be46a6b186b902c94c2674e8cb5c42985bc"},
+ {file = "pandas-2.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:159205c99d7a5ce89ecfc37cb08ed179de7783737cea403b295b5eda8e9c56d1"},
+ {file = "pandas-2.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1e1f3861ea9132b32f2133788f3b14911b68102d562715d71bd0013bc45440"},
+ {file = "pandas-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:761cb99b42a69005dec2b08854fb1d4888fdf7b05db23a8c5a099e4b886a2106"},
+ {file = "pandas-2.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a20628faaf444da122b2a64b1e5360cde100ee6283ae8effa0d8745153809a2e"},
+ {file = "pandas-2.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f5be5d03ea2073627e7111f61b9f1f0d9625dc3c4d8dda72cc827b0c58a1d042"},
+ {file = "pandas-2.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:a626795722d893ed6aacb64d2401d017ddc8a2341b49e0384ab9bf7112bdec30"},
+ {file = "pandas-2.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9f66419d4a41132eb7e9a73dcec9486cf5019f52d90dd35547af11bc58f8637d"},
+ {file = "pandas-2.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:57abcaeda83fb80d447f28ab0cc7b32b13978f6f733875ebd1ed14f8fbc0f4ab"},
+ {file = "pandas-2.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e60f1f7dba3c2d5ca159e18c46a34e7ca7247a73b5dd1a22b6d59707ed6b899a"},
+ {file = "pandas-2.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb61dc8567b798b969bcc1fc964788f5a68214d333cade8319c7ab33e2b5d88a"},
+ {file = "pandas-2.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:52826b5f4ed658fa2b729264d63f6732b8b29949c7fd234510d57c61dbeadfcd"},
+ {file = "pandas-2.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bde2bc699dbd80d7bc7f9cab1e23a95c4375de615860ca089f34e7c64f4a8de7"},
+ {file = "pandas-2.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:3de918a754bbf2da2381e8a3dcc45eede8cd7775b047b923f9006d5f876802ae"},
+ {file = "pandas-2.2.0.tar.gz", hash = "sha256:30b83f7c3eb217fb4d1b494a57a2fda5444f17834f5df2de6b2ffff68dc3c8e2"},
]
[package.dependencies]
numpy = {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""}
python-dateutil = ">=2.8.2"
pytz = ">=2020.1"
-tzdata = ">=2022.1"
+tzdata = ">=2022.7"
[package.extras]
-all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"]
-aws = ["s3fs (>=2022.05.0)"]
-clipboard = ["PyQt5 (>=5.15.6)", "qtpy (>=2.2.0)"]
-compression = ["zstandard (>=0.17.0)"]
-computation = ["scipy (>=1.8.1)", "xarray (>=2022.03.0)"]
+all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"]
+aws = ["s3fs (>=2022.11.0)"]
+clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"]
+compression = ["zstandard (>=0.19.0)"]
+computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"]
consortium-standard = ["dataframe-api-compat (>=0.1.7)"]
-excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pyxlsb (>=1.0.9)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)"]
-feather = ["pyarrow (>=7.0.0)"]
-fss = ["fsspec (>=2022.05.0)"]
-gcp = ["gcsfs (>=2022.05.0)", "pandas-gbq (>=0.17.5)"]
-hdf5 = ["tables (>=3.7.0)"]
-html = ["beautifulsoup4 (>=4.11.1)", "html5lib (>=1.1)", "lxml (>=4.8.0)"]
-mysql = ["SQLAlchemy (>=1.4.36)", "pymysql (>=1.0.2)"]
-output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.8.10)"]
-parquet = ["pyarrow (>=7.0.0)"]
-performance = ["bottleneck (>=1.3.4)", "numba (>=0.55.2)", "numexpr (>=2.8.0)"]
-plot = ["matplotlib (>=3.6.1)"]
-postgresql = ["SQLAlchemy (>=1.4.36)", "psycopg2 (>=2.9.3)"]
-spss = ["pyreadstat (>=1.1.5)"]
-sql-other = ["SQLAlchemy (>=1.4.36)"]
-test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"]
-xml = ["lxml (>=4.8.0)"]
+excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"]
+feather = ["pyarrow (>=10.0.1)"]
+fss = ["fsspec (>=2022.11.0)"]
+gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"]
+hdf5 = ["tables (>=3.8.0)"]
+html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"]
+mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"]
+output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"]
+parquet = ["pyarrow (>=10.0.1)"]
+performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"]
+plot = ["matplotlib (>=3.6.3)"]
+postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"]
+spss = ["pyreadstat (>=1.2.0)"]
+sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"]
+test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"]
+xml = ["lxml (>=4.9.2)"]
[[package]]
name = "partd"
@@ -1893,106 +1944,124 @@ complete = ["blosc", "numpy (>=1.9.0)", "pandas (>=0.19.0)", "pyzmq"]
[[package]]
name = "pathspec"
-version = "0.11.2"
+version = "0.12.1"
description = "Utility library for gitignore style pattern matching of file paths."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "pathspec-0.11.2-py3-none-any.whl", hash = "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20"},
- {file = "pathspec-0.11.2.tar.gz", hash = "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"},
+ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
+ {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
]
[[package]]
name = "pillow"
-version = "10.1.0"
+version = "10.2.0"
description = "Python Imaging Library (Fork)"
optional = false
python-versions = ">=3.8"
files = [
- {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"},
- {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"},
- {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"},
- {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"},
- {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"},
- {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"},
- {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"},
- {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"},
- {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"},
- {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"},
- {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"},
- {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"},
- {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"},
- {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"},
- {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"},
- {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"},
- {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"},
- {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"},
- {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"},
- {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"},
- {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"},
- {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"},
- {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"},
- {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"},
- {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"},
- {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"},
- {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"},
- {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"},
- {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"},
- {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"},
- {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"},
- {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"},
- {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"},
- {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"},
- {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"},
- {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"},
- {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"},
- {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"},
- {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"},
- {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"},
- {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"},
- {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"},
- {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"},
- {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"},
- {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"},
- {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"},
- {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"},
- {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"},
- {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"},
- {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"},
- {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"},
- {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"},
- {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"},
- {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"},
+ {file = "pillow-10.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:7823bdd049099efa16e4246bdf15e5a13dbb18a51b68fa06d6c1d4d8b99a796e"},
+ {file = "pillow-10.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:83b2021f2ade7d1ed556bc50a399127d7fb245e725aa0113ebd05cfe88aaf588"},
+ {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fad5ff2f13d69b7e74ce5b4ecd12cc0ec530fcee76356cac6742785ff71c452"},
+ {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da2b52b37dad6d9ec64e653637a096905b258d2fc2b984c41ae7d08b938a67e4"},
+ {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:47c0995fc4e7f79b5cfcab1fc437ff2890b770440f7696a3ba065ee0fd496563"},
+ {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:322bdf3c9b556e9ffb18f93462e5f749d3444ce081290352c6070d014c93feb2"},
+ {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51f1a1bffc50e2e9492e87d8e09a17c5eea8409cda8d3f277eb6edc82813c17c"},
+ {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69ffdd6120a4737710a9eee73e1d2e37db89b620f702754b8f6e62594471dee0"},
+ {file = "pillow-10.2.0-cp310-cp310-win32.whl", hash = "sha256:c6dafac9e0f2b3c78df97e79af707cdc5ef8e88208d686a4847bab8266870023"},
+ {file = "pillow-10.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:aebb6044806f2e16ecc07b2a2637ee1ef67a11840a66752751714a0d924adf72"},
+ {file = "pillow-10.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:7049e301399273a0136ff39b84c3678e314f2158f50f517bc50285fb5ec847ad"},
+ {file = "pillow-10.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35bb52c37f256f662abdfa49d2dfa6ce5d93281d323a9af377a120e89a9eafb5"},
+ {file = "pillow-10.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c23f307202661071d94b5e384e1e1dc7dfb972a28a2310e4ee16103e66ddb67"},
+ {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:773efe0603db30c281521a7c0214cad7836c03b8ccff897beae9b47c0b657d61"},
+ {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11fa2e5984b949b0dd6d7a94d967743d87c577ff0b83392f17cb3990d0d2fd6e"},
+ {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:716d30ed977be8b37d3ef185fecb9e5a1d62d110dfbdcd1e2a122ab46fddb03f"},
+ {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a086c2af425c5f62a65e12fbf385f7c9fcb8f107d0849dba5839461a129cf311"},
+ {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c8de2789052ed501dd829e9cae8d3dcce7acb4777ea4a479c14521c942d395b1"},
+ {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:609448742444d9290fd687940ac0b57fb35e6fd92bdb65386e08e99af60bf757"},
+ {file = "pillow-10.2.0-cp311-cp311-win32.whl", hash = "sha256:823ef7a27cf86df6597fa0671066c1b596f69eba53efa3d1e1cb8b30f3533068"},
+ {file = "pillow-10.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:1da3b2703afd040cf65ec97efea81cfba59cdbed9c11d8efc5ab09df9509fc56"},
+ {file = "pillow-10.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:edca80cbfb2b68d7b56930b84a0e45ae1694aeba0541f798e908a49d66b837f1"},
+ {file = "pillow-10.2.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:1b5e1b74d1bd1b78bc3477528919414874748dd363e6272efd5abf7654e68bef"},
+ {file = "pillow-10.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0eae2073305f451d8ecacb5474997c08569fb4eb4ac231ffa4ad7d342fdc25ac"},
+ {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7c2286c23cd350b80d2fc9d424fc797575fb16f854b831d16fd47ceec078f2c"},
+ {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e23412b5c41e58cec602f1135c57dfcf15482013ce6e5f093a86db69646a5aa"},
+ {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:52a50aa3fb3acb9cf7213573ef55d31d6eca37f5709c69e6858fe3bc04a5c2a2"},
+ {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:127cee571038f252a552760076407f9cff79761c3d436a12af6000cd182a9d04"},
+ {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8d12251f02d69d8310b046e82572ed486685c38f02176bd08baf216746eb947f"},
+ {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54f1852cd531aa981bc0965b7d609f5f6cc8ce8c41b1139f6ed6b3c54ab82bfb"},
+ {file = "pillow-10.2.0-cp312-cp312-win32.whl", hash = "sha256:257d8788df5ca62c980314053197f4d46eefedf4e6175bc9412f14412ec4ea2f"},
+ {file = "pillow-10.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:154e939c5f0053a383de4fd3d3da48d9427a7e985f58af8e94d0b3c9fcfcf4f9"},
+ {file = "pillow-10.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:f379abd2f1e3dddb2b61bc67977a6b5a0a3f7485538bcc6f39ec76163891ee48"},
+ {file = "pillow-10.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8373c6c251f7ef8bda6675dd6d2b3a0fcc31edf1201266b5cf608b62a37407f9"},
+ {file = "pillow-10.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:870ea1ada0899fd0b79643990809323b389d4d1d46c192f97342eeb6ee0b8483"},
+ {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4b6b1e20608493548b1f32bce8cca185bf0480983890403d3b8753e44077129"},
+ {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3031709084b6e7852d00479fd1d310b07d0ba82765f973b543c8af5061cf990e"},
+ {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:3ff074fc97dd4e80543a3e91f69d58889baf2002b6be64347ea8cf5533188213"},
+ {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:cb4c38abeef13c61d6916f264d4845fab99d7b711be96c326b84df9e3e0ff62d"},
+ {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b1b3020d90c2d8e1dae29cf3ce54f8094f7938460fb5ce8bc5c01450b01fbaf6"},
+ {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:170aeb00224ab3dc54230c797f8404507240dd868cf52066f66a41b33169bdbe"},
+ {file = "pillow-10.2.0-cp38-cp38-win32.whl", hash = "sha256:c4225f5220f46b2fde568c74fca27ae9771536c2e29d7c04f4fb62c83275ac4e"},
+ {file = "pillow-10.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:0689b5a8c5288bc0504d9fcee48f61a6a586b9b98514d7d29b840143d6734f39"},
+ {file = "pillow-10.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b792a349405fbc0163190fde0dc7b3fef3c9268292586cf5645598b48e63dc67"},
+ {file = "pillow-10.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c570f24be1e468e3f0ce7ef56a89a60f0e05b30a3669a459e419c6eac2c35364"},
+ {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8ecd059fdaf60c1963c58ceb8997b32e9dc1b911f5da5307aab614f1ce5c2fb"},
+ {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c365fd1703040de1ec284b176d6af5abe21b427cb3a5ff68e0759e1e313a5e7e"},
+ {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:70c61d4c475835a19b3a5aa42492409878bbca7438554a1f89d20d58a7c75c01"},
+ {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b6f491cdf80ae540738859d9766783e3b3c8e5bd37f5dfa0b76abdecc5081f13"},
+ {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d189550615b4948f45252d7f005e53c2040cea1af5b60d6f79491a6e147eef7"},
+ {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:49d9ba1ed0ef3e061088cd1e7538a0759aab559e2e0a80a36f9fd9d8c0c21591"},
+ {file = "pillow-10.2.0-cp39-cp39-win32.whl", hash = "sha256:babf5acfede515f176833ed6028754cbcd0d206f7f614ea3447d67c33be12516"},
+ {file = "pillow-10.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:0304004f8067386b477d20a518b50f3fa658a28d44e4116970abfcd94fac34a8"},
+ {file = "pillow-10.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:0fb3e7fc88a14eacd303e90481ad983fd5b69c761e9e6ef94c983f91025da869"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:322209c642aabdd6207517e9739c704dc9f9db943015535783239022002f054a"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eedd52442c0a5ff4f887fab0c1c0bb164d8635b32c894bc1faf4c618dd89df2"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb28c753fd5eb3dd859b4ee95de66cc62af91bcff5db5f2571d32a520baf1f04"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:33870dc4653c5017bf4c8873e5488d8f8d5f8935e2f1fb9a2208c47cdd66efd2"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3c31822339516fb3c82d03f30e22b1d038da87ef27b6a78c9549888f8ceda39a"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a2b56ba36e05f973d450582fb015594aaa78834fefe8dfb8fcd79b93e64ba4c6"},
+ {file = "pillow-10.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d8e6aeb9201e655354b3ad049cb77d19813ad4ece0df1249d3c793de3774f8c7"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:2247178effb34a77c11c0e8ac355c7a741ceca0a732b27bf11e747bbc950722f"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15587643b9e5eb26c48e49a7b33659790d28f190fc514a322d55da2fb5c2950e"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753cd8f2086b2b80180d9b3010dd4ed147efc167c90d3bf593fe2af21265e5a5"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7c8f97e8e7a9009bcacbe3766a36175056c12f9a44e6e6f2d5caad06dcfbf03b"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d1b35bcd6c5543b9cb547dee3150c93008f8dd0f1fef78fc0cd2b141c5baf58a"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe4c15f6c9285dc54ce6553a3ce908ed37c8f3825b5a51a15c91442bb955b868"},
+ {file = "pillow-10.2.0.tar.gz", hash = "sha256:e87f0b2c78157e12d7686b27d63c070fd65d994e8ddae6f328e0dcf4a0cd007e"},
]
[package.extras]
docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
+fpx = ["olefile"]
+mic = ["olefile"]
tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+typing = ["typing-extensions"]
+xmp = ["defusedxml"]
[[package]]
name = "platformdirs"
-version = "3.11.0"
+version = "4.2.0"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "platformdirs-3.11.0-py3-none-any.whl", hash = "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e"},
- {file = "platformdirs-3.11.0.tar.gz", hash = "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3"},
+ {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"},
+ {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"},
]
[package.extras]
-docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"]
-test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"]
+docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"]
+test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"]
[[package]]
name = "pluggy"
-version = "1.3.0"
+version = "1.4.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"},
- {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"},
+ {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"},
+ {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"},
]
[package.extras]
@@ -2001,46 +2070,33 @@ testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "protobuf"
-version = "4.24.4"
+version = "4.25.2"
description = ""
optional = false
-python-versions = ">=3.7"
-files = [
- {file = "protobuf-4.24.4-cp310-abi3-win32.whl", hash = "sha256:ec9912d5cb6714a5710e28e592ee1093d68c5ebfeda61983b3f40331da0b1ebb"},
- {file = "protobuf-4.24.4-cp310-abi3-win_amd64.whl", hash = "sha256:1badab72aa8a3a2b812eacfede5020472e16c6b2212d737cefd685884c191085"},
- {file = "protobuf-4.24.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8e61a27f362369c2f33248a0ff6896c20dcd47b5d48239cb9720134bef6082e4"},
- {file = "protobuf-4.24.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:bffa46ad9612e6779d0e51ae586fde768339b791a50610d85eb162daeb23661e"},
- {file = "protobuf-4.24.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:b493cb590960ff863743b9ff1452c413c2ee12b782f48beca77c8da3e2ffe9d9"},
- {file = "protobuf-4.24.4-cp37-cp37m-win32.whl", hash = "sha256:dbbed8a56e56cee8d9d522ce844a1379a72a70f453bde6243e3c86c30c2a3d46"},
- {file = "protobuf-4.24.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6b7d2e1c753715dcfe9d284a25a52d67818dd43c4932574307daf836f0071e37"},
- {file = "protobuf-4.24.4-cp38-cp38-win32.whl", hash = "sha256:02212557a76cd99574775a81fefeba8738d0f668d6abd0c6b1d3adcc75503dbe"},
- {file = "protobuf-4.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:2fa3886dfaae6b4c5ed2730d3bf47c7a38a72b3a1f0acb4d4caf68e6874b947b"},
- {file = "protobuf-4.24.4-cp39-cp39-win32.whl", hash = "sha256:b77272f3e28bb416e2071186cb39efd4abbf696d682cbb5dc731308ad37fa6dd"},
- {file = "protobuf-4.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:9fee5e8aa20ef1b84123bb9232b3f4a5114d9897ed89b4b8142d81924e05d79b"},
- {file = "protobuf-4.24.4-py3-none-any.whl", hash = "sha256:80797ce7424f8c8d2f2547e2d42bfbb6c08230ce5832d6c099a37335c9c90a92"},
- {file = "protobuf-4.24.4.tar.gz", hash = "sha256:5a70731910cd9104762161719c3d883c960151eea077134458503723b60e3667"},
-]
-
-[[package]]
-name = "py"
-version = "1.11.0"
-description = "library with cross-python path, ini-parsing, io, code, log facilities"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.8"
files = [
- {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"},
- {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"},
+ {file = "protobuf-4.25.2-cp310-abi3-win32.whl", hash = "sha256:b50c949608682b12efb0b2717f53256f03636af5f60ac0c1d900df6213910fd6"},
+ {file = "protobuf-4.25.2-cp310-abi3-win_amd64.whl", hash = "sha256:8f62574857ee1de9f770baf04dde4165e30b15ad97ba03ceac65f760ff018ac9"},
+ {file = "protobuf-4.25.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:2db9f8fa64fbdcdc93767d3cf81e0f2aef176284071507e3ede160811502fd3d"},
+ {file = "protobuf-4.25.2-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:10894a2885b7175d3984f2be8d9850712c57d5e7587a2410720af8be56cdaf62"},
+ {file = "protobuf-4.25.2-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fc381d1dd0516343f1440019cedf08a7405f791cd49eef4ae1ea06520bc1c020"},
+ {file = "protobuf-4.25.2-cp38-cp38-win32.whl", hash = "sha256:33a1aeef4b1927431d1be780e87b641e322b88d654203a9e9d93f218ee359e61"},
+ {file = "protobuf-4.25.2-cp38-cp38-win_amd64.whl", hash = "sha256:47f3de503fe7c1245f6f03bea7e8d3ec11c6c4a2ea9ef910e3221c8a15516d62"},
+ {file = "protobuf-4.25.2-cp39-cp39-win32.whl", hash = "sha256:5e5c933b4c30a988b52e0b7c02641760a5ba046edc5e43d3b94a74c9fc57c1b3"},
+ {file = "protobuf-4.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:d66a769b8d687df9024f2985d5137a337f957a0916cf5464d1513eee96a63ff0"},
+ {file = "protobuf-4.25.2-py3-none-any.whl", hash = "sha256:a8b7a98d4ce823303145bf3c1a8bdb0f2f4642a414b196f04ad9853ed0c8f830"},
+ {file = "protobuf-4.25.2.tar.gz", hash = "sha256:fe599e175cb347efc8ee524bcd4b902d11f7262c0e569ececcb89995c15f0a5e"},
]
[[package]]
name = "pyaml"
-version = "23.9.7"
+version = "23.12.0"
description = "PyYAML-based module to produce a bit more pretty and readable YAML-serialized data"
optional = true
python-versions = ">=3.8"
files = [
- {file = "pyaml-23.9.7-py3-none-any.whl", hash = "sha256:fdb4c111b676d2381d1aa88c378fcde46c167575dfd688e656977a77075b692c"},
- {file = "pyaml-23.9.7.tar.gz", hash = "sha256:581ea4e99f0e308864407e04c03c609241aefa3a15dfba8964da7644baf3b217"},
+ {file = "pyaml-23.12.0-py3-none-any.whl", hash = "sha256:90407d74c95a55d9b41d3860fcc1759640444d2795df748a328d077bc4f58393"},
+ {file = "pyaml-23.12.0.tar.gz", hash = "sha256:ce6f648efdfb1b3a5579f8cedb04facf0fa1e8f64846b639309b585bb322b4e5"},
]
[package.dependencies]
@@ -2051,61 +2107,61 @@ anchors = ["unidecode"]
[[package]]
name = "pyarrow"
-version = "14.0.0"
+version = "15.0.0"
description = "Python library for Apache Arrow"
optional = true
python-versions = ">=3.8"
files = [
- {file = "pyarrow-14.0.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:4fce1db17efbc453080c5b306f021926de7c636456a128328797e574c151f81a"},
- {file = "pyarrow-14.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:28de7c05b4d7a71ec660360639cc9b65ceb1175e0e9d4dfccd879a1545bc38f7"},
- {file = "pyarrow-14.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1541e9209c094e7f4d7b43fdd9de3a8c71d3069cf6fc03b59bf5774042411849"},
- {file = "pyarrow-14.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c05e6c45d303c80e41ab04996430a0251321f70986ed51213903ea7bc0b7efd"},
- {file = "pyarrow-14.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:426ffec63ab9b4dff23dec51be2150e3a4a99eb38e66c10a70e2c48779fe9c9d"},
- {file = "pyarrow-14.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:968844f591902160bd3c9ee240ce8822a3b4e7de731e91daea76ad43fe0ff062"},
- {file = "pyarrow-14.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dcedbc0b4ea955c530145acfe99e324875c386419a09db150291a24cb01aeb81"},
- {file = "pyarrow-14.0.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:97993a12aacc781efad9c92d4545a877e803c4d106d34237ec4ce987bec825a3"},
- {file = "pyarrow-14.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:80225768d94024d59a31320374f5e6abf8899866c958dfb4f4ea8e2d9ec91bde"},
- {file = "pyarrow-14.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b61546977a8bd7e3d0c697ede723341ef4737e761af2239aef6e1db447f97727"},
- {file = "pyarrow-14.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42509e6c93b4a1c8ae8ccd939a43f437097783fe130a1991497a6a1abbba026f"},
- {file = "pyarrow-14.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:3eccce331a1392e46573f2ce849a9ee3c074e0d7008e9be0b44566ac149fd6a1"},
- {file = "pyarrow-14.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:ecc463c45f2b6b36431f5f2025842245e8c15afe4d42072230575785f3bb00c6"},
- {file = "pyarrow-14.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:4362ed90def81640addcd521811dd16a13015f0a8255bec324a41262c1524b6c"},
- {file = "pyarrow-14.0.0-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:2fbb7ab62537782c5ab31aa08db0e1f6de92c2c515fdfc0790128384e919adcb"},
- {file = "pyarrow-14.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad7095f8f0fe0bfa3d3fca1909b8fa15c70e630b0cc1ff8d35e143f5e2704064"},
- {file = "pyarrow-14.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6602272fce71c0fb64f266e7cdbe51b93b00c22fc1bb57f2b0cb681c4aeedf4"},
- {file = "pyarrow-14.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2b8f87951b08a3e72265c8963da3fe4f737bb81290269037e047dd172aa591"},
- {file = "pyarrow-14.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a1c9675966662a042caebbaafa1ae7fc26291287ebc3da06aa63ad74c323ec30"},
- {file = "pyarrow-14.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:771079fddc0b4440c41af541dbdebc711a7062c93d3c4764476a9442606977db"},
- {file = "pyarrow-14.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:c4096136318de1c4937370c0c365f949961c371201c396d8cc94a353f342069d"},
- {file = "pyarrow-14.0.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:6c94056fb5f0ee0bae2206c3f776881e1db2bd0d133d06805755ae7ac5145349"},
- {file = "pyarrow-14.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:687d0df1e08876b2d24d42abae129742fc655367e3fe6700aa4d79fcf2e3215e"},
- {file = "pyarrow-14.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f4054e5ee6c88ca256a67fc8b27f9c59bcd385216346265831d462a6069033f"},
- {file = "pyarrow-14.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:768b962e4c042ab2c96576ca0757935472e220d11af855c7d0be3279d7fced5f"},
- {file = "pyarrow-14.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:77293b1319c7044f68ebfa43db8c929a0a5254ce371f1a0873d343f1460171d0"},
- {file = "pyarrow-14.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d2bc7c53941d85f0133b1bd5a814bca0af213922f50d8a8dc0eed4d9ed477845"},
- {file = "pyarrow-14.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:378955365dd087c285ef4f34ad939d7e551b7715326710e8cd21cfa2ce511bd7"},
- {file = "pyarrow-14.0.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:f05e81b4c621e6ad4bcd8f785e3aa1d6c49a935818b809ea6e7bf206a5b1a4e8"},
- {file = "pyarrow-14.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6867f6a8057eaef5a7ac6d27fe5518133f67973c5d4295d79a943458350e7c61"},
- {file = "pyarrow-14.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca54b87c46abdfe027f18f959ca388102bd7326c344838f72244807462d091b2"},
- {file = "pyarrow-14.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35abf61bd0cc9daca3afc715f6ba74ea83d792fa040025352624204bec66bf6a"},
- {file = "pyarrow-14.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:65c377523b369f7ef1ba02be814e832443bb3b15065010838f02dae5bdc0f53c"},
- {file = "pyarrow-14.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8a1e470e4b5f7bda7bede0410291daec55ab69f346d77795d34fd6a45b41579"},
- {file = "pyarrow-14.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:466c1a5a7a4b279cfa363ac34dedd0c3c6af388cec9e6a468ffc095a6627849a"},
- {file = "pyarrow-14.0.0.tar.gz", hash = "sha256:45d3324e1c9871a07de6b4d514ebd73225490963a6dd46c64c465c4b6079fe1e"},
-]
-
-[package.dependencies]
-numpy = ">=1.16.6"
+ {file = "pyarrow-15.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:0a524532fd6dd482edaa563b686d754c70417c2f72742a8c990b322d4c03a15d"},
+ {file = "pyarrow-15.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:60a6bdb314affa9c2e0d5dddf3d9cbb9ef4a8dddaa68669975287d47ece67642"},
+ {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66958fd1771a4d4b754cd385835e66a3ef6b12611e001d4e5edfcef5f30391e2"},
+ {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f500956a49aadd907eaa21d4fff75f73954605eaa41f61cb94fb008cf2e00c6"},
+ {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6f87d9c4f09e049c2cade559643424da84c43a35068f2a1c4653dc5b1408a929"},
+ {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85239b9f93278e130d86c0e6bb455dcb66fc3fd891398b9d45ace8799a871a1e"},
+ {file = "pyarrow-15.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b8d43e31ca16aa6e12402fcb1e14352d0d809de70edd185c7650fe80e0769e3"},
+ {file = "pyarrow-15.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:fa7cd198280dbd0c988df525e50e35b5d16873e2cdae2aaaa6363cdb64e3eec5"},
+ {file = "pyarrow-15.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8780b1a29d3c8b21ba6b191305a2a607de2e30dab399776ff0aa09131e266340"},
+ {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0ec198ccc680f6c92723fadcb97b74f07c45ff3fdec9dd765deb04955ccf19"},
+ {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:036a7209c235588c2f07477fe75c07e6caced9b7b61bb897c8d4e52c4b5f9555"},
+ {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2bd8a0e5296797faf9a3294e9fa2dc67aa7f10ae2207920dbebb785c77e9dbe5"},
+ {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e8ebed6053dbe76883a822d4e8da36860f479d55a762bd9e70d8494aed87113e"},
+ {file = "pyarrow-15.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:17d53a9d1b2b5bd7d5e4cd84d018e2a45bc9baaa68f7e6e3ebed45649900ba99"},
+ {file = "pyarrow-15.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9950a9c9df24090d3d558b43b97753b8f5867fb8e521f29876aa021c52fda351"},
+ {file = "pyarrow-15.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:003d680b5e422d0204e7287bb3fa775b332b3fce2996aa69e9adea23f5c8f970"},
+ {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f75fce89dad10c95f4bf590b765e3ae98bcc5ba9f6ce75adb828a334e26a3d40"},
+ {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca9cb0039923bec49b4fe23803807e4ef39576a2bec59c32b11296464623dc2"},
+ {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ed5a78ed29d171d0acc26a305a4b7f83c122d54ff5270810ac23c75813585e4"},
+ {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6eda9e117f0402dfcd3cd6ec9bfee89ac5071c48fc83a84f3075b60efa96747f"},
+ {file = "pyarrow-15.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9a3a6180c0e8f2727e6f1b1c87c72d3254cac909e609f35f22532e4115461177"},
+ {file = "pyarrow-15.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:19a8918045993349b207de72d4576af0191beef03ea655d8bdb13762f0cd6eac"},
+ {file = "pyarrow-15.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0ec076b32bacb6666e8813a22e6e5a7ef1314c8069d4ff345efa6246bc38593"},
+ {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5db1769e5d0a77eb92344c7382d6543bea1164cca3704f84aa44e26c67e320fb"},
+ {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2617e3bf9df2a00020dd1c1c6dce5cc343d979efe10bc401c0632b0eef6ef5b"},
+ {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:d31c1d45060180131caf10f0f698e3a782db333a422038bf7fe01dace18b3a31"},
+ {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:c8c287d1d479de8269398b34282e206844abb3208224dbdd7166d580804674b7"},
+ {file = "pyarrow-15.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:07eb7f07dc9ecbb8dace0f58f009d3a29ee58682fcdc91337dfeb51ea618a75b"},
+ {file = "pyarrow-15.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:47af7036f64fce990bb8a5948c04722e4e3ea3e13b1007ef52dfe0aa8f23cf7f"},
+ {file = "pyarrow-15.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93768ccfff85cf044c418bfeeafce9a8bb0cee091bd8fd19011aff91e58de540"},
+ {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6ee87fd6892700960d90abb7b17a72a5abb3b64ee0fe8db6c782bcc2d0dc0b4"},
+ {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:001fca027738c5f6be0b7a3159cc7ba16a5c52486db18160909a0831b063c4e4"},
+ {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:d1c48648f64aec09accf44140dccb92f4f94394b8d79976c426a5b79b11d4fa7"},
+ {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:972a0141be402bb18e3201448c8ae62958c9c7923dfaa3b3d4530c835ac81aed"},
+ {file = "pyarrow-15.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:f01fc5cf49081426429127aa2d427d9d98e1cb94a32cb961d583a70b7c4504e6"},
+ {file = "pyarrow-15.0.0.tar.gz", hash = "sha256:876858f549d540898f927eba4ef77cd549ad8d24baa3207cf1b72e5788b50e83"},
+]
+
+[package.dependencies]
+numpy = ">=1.16.6,<2"
[[package]]
name = "pyasn1"
-version = "0.5.0"
+version = "0.5.1"
description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
- {file = "pyasn1-0.5.0-py2.py3-none-any.whl", hash = "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57"},
- {file = "pyasn1-0.5.0.tar.gz", hash = "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"},
+ {file = "pyasn1-0.5.1-py2.py3-none-any.whl", hash = "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58"},
+ {file = "pyasn1-0.5.1.tar.gz", hash = "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c"},
]
[[package]]
@@ -2158,17 +2214,18 @@ pybtex = ">=0.16"
[[package]]
name = "pygments"
-version = "2.16.1"
+version = "2.17.2"
description = "Pygments is a syntax highlighting package written in Python."
optional = false
python-versions = ">=3.7"
files = [
- {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"},
- {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"},
+ {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"},
+ {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"},
]
[package.extras]
plugins = ["importlib-metadata"]
+windows-terminal = ["colorama (>=0.4.6)"]
[[package]]
name = "pylint"
@@ -2205,27 +2262,25 @@ diagrams = ["jinja2", "railroad-diagrams"]
[[package]]
name = "pytest"
-version = "6.2.5"
+version = "8.0.0"
description = "pytest: simple powerful testing with Python"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.8"
files = [
- {file = "pytest-6.2.5-py3-none-any.whl", hash = "sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"},
- {file = "pytest-6.2.5.tar.gz", hash = "sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89"},
+ {file = "pytest-8.0.0-py3-none-any.whl", hash = "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6"},
+ {file = "pytest-8.0.0.tar.gz", hash = "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c"},
]
[package.dependencies]
-atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""}
-attrs = ">=19.2.0"
colorama = {version = "*", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
iniconfig = "*"
packaging = "*"
-pluggy = ">=0.12,<2.0"
-py = ">=1.8.2"
-toml = "*"
+pluggy = ">=1.3.0,<2.0"
+tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
[package.extras]
-testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"]
+testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
[[package]]
name = "pytest-cov"
@@ -2261,13 +2316,13 @@ six = ">=1.5"
[[package]]
name = "pytz"
-version = "2023.3.post1"
+version = "2024.1"
description = "World timezone definitions, modern and historical"
optional = true
python-versions = "*"
files = [
- {file = "pytz-2023.3.post1-py2.py3-none-any.whl", hash = "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"},
- {file = "pytz-2023.3.post1.tar.gz", hash = "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b"},
+ {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"},
+ {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"},
]
[[package]]
@@ -2331,35 +2386,31 @@ files = [
[[package]]
name = "ray"
-version = "2.7.1"
+version = "2.9.1"
description = "Ray provides a simple, universal API for building distributed applications."
optional = true
-python-versions = "*"
+python-versions = ">=3.8"
files = [
- {file = "ray-2.7.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:4a2c98ab42881836894f20408ce40c0fd7fe5da7f0bc69cf22c951ccceda55ed"},
- {file = "ray-2.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:53800aadfc07152bc8672d5fa91bb4dc17d96b572a9bd436dd00fd2e0d07ef6a"},
- {file = "ray-2.7.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:17a425b4a2c2098f78fd0ab3831a35a53608d36466453e90c30a6495e9dce354"},
- {file = "ray-2.7.1-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:9681a8a7bf081e2244360206f3cd80d1a6adb4dc6330a507fd8c78ebe6e57365"},
- {file = "ray-2.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:148c77050ceab3c90739147bb86ac535e9590046cc36364ae9eb15469ea16fbc"},
- {file = "ray-2.7.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:0b0e80e26d6899820c12301626a74a209ab29373f46caf5b48c3ae3f99ec1bc7"},
- {file = "ray-2.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b5d13e910bb3449ef7b25084dcc4f0b9a763d3aa7b2fdd39e3b4d93d8c266951"},
- {file = "ray-2.7.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:0a6e8a736fe5294a0b0064679e59e393c66942db81fdf95804bdc1495d1f1651"},
- {file = "ray-2.7.1-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:f4c9f8a813444bd5346756db1a6d6e09a805b28b5fb6831e91b8d1324c12a888"},
- {file = "ray-2.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:85a8b0f122e4c14d2ee354fce9651834f7ffc9b60ebdce023a5ba8ca5841a6ee"},
- {file = "ray-2.7.1-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:bfa924bbc4042e83a0f31f058f08818418307252fceeee27c4c02bc0d3c02f3f"},
- {file = "ray-2.7.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:0f5657abb376eddf6b56489082d2f94ab36597a2f25da2849e2f66476b90dcc0"},
- {file = "ray-2.7.1-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:d548e1c67a512975c4241be64a8df2153ae6c29ee2f5b08834fadcad7dfc94a4"},
- {file = "ray-2.7.1-cp37-cp37m-win_amd64.whl", hash = "sha256:1f4c09a81971cc54d95be55b9b413fd12121a37528b402d1861a8fa0b4e85509"},
- {file = "ray-2.7.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:1f6d2508d117aac0b880d26a4db65a9f90def2d688709b62e0d039879c3afc7a"},
- {file = "ray-2.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32a6c0866d559d4e6c623ff220cd0790d2da1f3785073a5d0444b8f0486ff541"},
- {file = "ray-2.7.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:d035642e6033f43551a0c17e2363a392739f01df6b4072c5ed71cf3096936d33"},
- {file = "ray-2.7.1-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:a366569d1bd220a92af0dbe092821a11d1ff8ad7b00ed4f74b8a5f380e34ccc7"},
- {file = "ray-2.7.1-cp38-cp38-win_amd64.whl", hash = "sha256:6fe65dc7f83f1c617af3068d84f8c67f3371b1a48776e44ab6af54998891364c"},
- {file = "ray-2.7.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:3c1501ca56da394e07213efd5be42c2cf0a2eae68d76949d26a3133154d6d9ff"},
- {file = "ray-2.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:57f7e05ad275317158c447680705e046410f68d2a5992e16d07bbc2cc79da2b3"},
- {file = "ray-2.7.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b5410ae53c765108c65821fc5e5968509579f98a64d275e103408e1b068e8ca8"},
- {file = "ray-2.7.1-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:1b096abab78b63db6c1a2633f242dd8b3c51e395b574215f3cb8e47f5d7364b9"},
- {file = "ray-2.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:c03fe26443598bd7ad1c22de4585daec324bc03eabc04d3c2f805d9697a554d6"},
+ {file = "ray-2.9.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:586d462e555ba51840fbfce4d62b0ed886930e520517b34a88befeb4fb4c244a"},
+ {file = "ray-2.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb3dbb0639fedf2bc2b98784bb94dbdc2c2a470c91c6b54e12c51d0a0069aebf"},
+ {file = "ray-2.9.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:74a1d12117e87ffd7411fadb96b40bf66ca7d32fdb2049cd3dd66705a0923f9e"},
+ {file = "ray-2.9.1-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:50436361012cefdd90ebb8c920711cb334cf64d7a5677c9b72e60d8c9e23ee70"},
+ {file = "ray-2.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:8760d406d782cbf6684c2b98c09bd4893a14c009c2287cbe65aa11cb6e7a571f"},
+ {file = "ray-2.9.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:cd974b141088b752d1eed4d6d0cf94e8ed63b97d5f1d5f5844970f3f373dde87"},
+ {file = "ray-2.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9e9d99496effa490f94e43c10a09964146269733cd24610d3b6902b566190a9b"},
+ {file = "ray-2.9.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:1907649d69efdc1b9ffbc03db086f6d768216cb73908ebd4038ac5030effef9e"},
+ {file = "ray-2.9.1-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:fabc520990c1b98dde592813d62737e5e817460e0ac359f32ba029ace292cbe2"},
+ {file = "ray-2.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:bb0c83c0f40a5ab4139f9357d3fd4ef8a2e8b46f5c023fe45f305fe2297c520c"},
+ {file = "ray-2.9.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:e7b1f3284b35aa98968ba8cdc8ea43f6a0afe42090711f2db678d3f73c5cb8f9"},
+ {file = "ray-2.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:38b7a3282783f74cfd232b0e04bfde40e51e13bf3f83423ce97b2ae577a4a345"},
+ {file = "ray-2.9.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:177a5a018d9ff0eef822b279f7af62ca5f5935e4d83246105868017ee298faae"},
+ {file = "ray-2.9.1-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:917efa43b88d5f5de19a5ffa7c4aa0aa28399a0c33595d83c26d5b9f79dfb861"},
+ {file = "ray-2.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:94961e948763a101d99f9e9cfe8ba1d789f5ca030ebc8089fbf02da1d085f870"},
+ {file = "ray-2.9.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:9efc8a2035521c5d66b625222a7b03c7759f1c0969d382697fd688577bea21a4"},
+ {file = "ray-2.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4aa6a66fb20a35ded74674ad8d48e813afd4e65a0bc8ccd99e981bccf656ce13"},
+ {file = "ray-2.9.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f063f0140bc1ea0b02f8ee59abd8e964866c1ca6c768a2b0fd19b691cf9feace"},
+ {file = "ray-2.9.1-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:334c47ca24dbe59e295e2d46152c09ff113f2c2cde873181da11c24dfdacfcfb"},
+ {file = "ray-2.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:c2e360743ae25babfcb436250275550fd96a567c830393ff5dd7fc708875c4c9"},
]
[package.dependencies]
@@ -2370,7 +2421,6 @@ frozenlist = "*"
fsspec = {version = "*", optional = true, markers = "extra == \"tune\""}
jsonschema = "*"
msgpack = ">=1.0.0,<2.0.0"
-numpy = {version = ">=1.19.3", markers = "python_version >= \"3.9\""}
packaging = "*"
pandas = {version = "*", optional = true, markers = "extra == \"tune\""}
protobuf = ">=3.15.3,<3.19.5 || >3.19.5"
@@ -2380,28 +2430,28 @@ requests = "*"
tensorboardX = {version = ">=1.9", optional = true, markers = "extra == \"tune\""}
[package.extras]
-air = ["aiohttp (>=3.7)", "aiohttp-cors", "aiorwlock", "colorful", "fastapi", "fsspec", "gpustat (>=1.0.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "numpy (>=1.20)", "opencensus", "pandas", "pandas (>=1.3)", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "requests", "smart-open", "starlette", "tensorboardX (>=1.9)", "uvicorn", "virtualenv (>=20.0.24,<20.21.1)", "watchfiles"]
-all = ["aiohttp (>=3.7)", "aiohttp-cors", "aiorwlock", "colorful", "dm-tree", "fastapi", "fsspec", "gpustat (>=1.0.0)", "grpcio (!=1.56.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "gymnasium (==0.28.1)", "lz4", "numpy (>=1.20)", "opencensus", "opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk", "pandas", "pandas (>=1.3)", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyyaml", "ray-cpp (==2.7.1)", "requests", "rich", "scikit-image", "scipy", "smart-open", "starlette", "tensorboardX (>=1.9)", "typer", "uvicorn", "virtualenv (>=20.0.24,<20.21.1)", "watchfiles"]
+air = ["aiohttp (>=3.7)", "aiohttp-cors", "aiorwlock", "colorful", "fastapi", "fsspec", "gpustat (>=1.0.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "numpy (>=1.20)", "opencensus", "pandas", "pandas (>=1.3)", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "requests", "smart-open", "starlette", "tensorboardX (>=1.9)", "uvicorn[standard]", "virtualenv (>=20.0.24,!=20.21.1)", "watchfiles"]
+all = ["aiohttp (>=3.7)", "aiohttp-cors", "aiorwlock", "colorful", "dm-tree", "fastapi", "fsspec", "gpustat (>=1.0.0)", "grpcio (!=1.56.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "gymnasium (==0.28.1)", "lz4", "numpy (>=1.20)", "opencensus", "opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk", "pandas", "pandas (>=1.3)", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "pyyaml", "ray-cpp (==2.9.1)", "requests", "rich", "scikit-image", "scipy", "smart-open", "starlette", "tensorboardX (>=1.9)", "typer", "uvicorn[standard]", "virtualenv (>=20.0.24,!=20.21.1)", "watchfiles"]
client = ["grpcio (!=1.56.0)"]
-cpp = ["ray-cpp (==2.7.1)"]
+cpp = ["ray-cpp (==2.9.1)"]
data = ["fsspec", "numpy (>=1.20)", "pandas (>=1.3)", "pyarrow (>=6.0.1)"]
-default = ["aiohttp (>=3.7)", "aiohttp-cors", "colorful", "gpustat (>=1.0.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "opencensus", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pydantic (<2)", "requests", "smart-open", "virtualenv (>=20.0.24,<20.21.1)"]
+default = ["aiohttp (>=3.7)", "aiohttp-cors", "colorful", "gpustat (>=1.0.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "opencensus", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "requests", "smart-open", "virtualenv (>=20.0.24,!=20.21.1)"]
observability = ["opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk"]
rllib = ["dm-tree", "fsspec", "gymnasium (==0.28.1)", "lz4", "pandas", "pyarrow (>=6.0.1)", "pyyaml", "requests", "rich", "scikit-image", "scipy", "tensorboardX (>=1.9)", "typer"]
-serve = ["aiohttp (>=3.7)", "aiohttp-cors", "aiorwlock", "colorful", "fastapi", "gpustat (>=1.0.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "opencensus", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pydantic (<2)", "requests", "smart-open", "starlette", "uvicorn", "virtualenv (>=20.0.24,<20.21.1)", "watchfiles"]
-serve-grpc = ["aiohttp (>=3.7)", "aiohttp-cors", "aiorwlock", "colorful", "fastapi", "gpustat (>=1.0.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "opencensus", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pydantic (<2)", "requests", "smart-open", "starlette", "uvicorn", "virtualenv (>=20.0.24,<20.21.1)", "watchfiles"]
+serve = ["aiohttp (>=3.7)", "aiohttp-cors", "aiorwlock", "colorful", "fastapi", "gpustat (>=1.0.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "opencensus", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "requests", "smart-open", "starlette", "uvicorn[standard]", "virtualenv (>=20.0.24,!=20.21.1)", "watchfiles"]
+serve-grpc = ["aiohttp (>=3.7)", "aiohttp-cors", "aiorwlock", "colorful", "fastapi", "gpustat (>=1.0.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "opencensus", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pydantic (<2.0.dev0 || >=2.5.dev0,<3)", "requests", "smart-open", "starlette", "uvicorn[standard]", "virtualenv (>=20.0.24,!=20.21.1)", "watchfiles"]
train = ["fsspec", "pandas", "pyarrow (>=6.0.1)", "requests", "tensorboardX (>=1.9)"]
tune = ["fsspec", "pandas", "pyarrow (>=6.0.1)", "requests", "tensorboardX (>=1.9)"]
[[package]]
name = "referencing"
-version = "0.30.2"
+version = "0.33.0"
description = "JSON Referencing + Python"
optional = true
python-versions = ">=3.8"
files = [
- {file = "referencing-0.30.2-py3-none-any.whl", hash = "sha256:449b6669b6121a9e96a7f9e410b245d471e8d48964c67113ce9afe50c8dd7bdf"},
- {file = "referencing-0.30.2.tar.gz", hash = "sha256:794ad8003c65938edcdbc027f1933215e0d0ccc0291e3ce20a4d87432b59efc0"},
+ {file = "referencing-0.33.0-py3-none-any.whl", hash = "sha256:39240f2ecc770258f28b642dd47fd74bc8b02484de54e1882b74b35ebd779bd5"},
+ {file = "referencing-0.33.0.tar.gz", hash = "sha256:c775fedf74bc0f9189c2a3be1c12fd03e8c23f4d371dce795df44e06c5b412f7"},
]
[package.dependencies]
@@ -2468,110 +2518,110 @@ jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"]
[[package]]
name = "rpds-py"
-version = "0.10.6"
+version = "0.17.1"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = true
python-versions = ">=3.8"
files = [
- {file = "rpds_py-0.10.6-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:6bdc11f9623870d75692cc33c59804b5a18d7b8a4b79ef0b00b773a27397d1f6"},
- {file = "rpds_py-0.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:26857f0f44f0e791f4a266595a7a09d21f6b589580ee0585f330aaccccb836e3"},
- {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7f5e15c953ace2e8dde9824bdab4bec50adb91a5663df08d7d994240ae6fa31"},
- {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61fa268da6e2e1cd350739bb61011121fa550aa2545762e3dc02ea177ee4de35"},
- {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c48f3fbc3e92c7dd6681a258d22f23adc2eb183c8cb1557d2fcc5a024e80b094"},
- {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0503c5b681566e8b722fe8c4c47cce5c7a51f6935d5c7012c4aefe952a35eed"},
- {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:734c41f9f57cc28658d98270d3436dba65bed0cfc730d115b290e970150c540d"},
- {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a5d7ed104d158c0042a6a73799cf0eb576dfd5fc1ace9c47996e52320c37cb7c"},
- {file = "rpds_py-0.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e3df0bc35e746cce42579826b89579d13fd27c3d5319a6afca9893a9b784ff1b"},
- {file = "rpds_py-0.10.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:73e0a78a9b843b8c2128028864901f55190401ba38aae685350cf69b98d9f7c9"},
- {file = "rpds_py-0.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5ed505ec6305abd2c2c9586a7b04fbd4baf42d4d684a9c12ec6110deefe2a063"},
- {file = "rpds_py-0.10.6-cp310-none-win32.whl", hash = "sha256:d97dd44683802000277bbf142fd9f6b271746b4846d0acaf0cefa6b2eaf2a7ad"},
- {file = "rpds_py-0.10.6-cp310-none-win_amd64.whl", hash = "sha256:b455492cab07107bfe8711e20cd920cc96003e0da3c1f91297235b1603d2aca7"},
- {file = "rpds_py-0.10.6-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:e8cdd52744f680346ff8c1ecdad5f4d11117e1724d4f4e1874f3a67598821069"},
- {file = "rpds_py-0.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66414dafe4326bca200e165c2e789976cab2587ec71beb80f59f4796b786a238"},
- {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc435d059f926fdc5b05822b1be4ff2a3a040f3ae0a7bbbe672babb468944722"},
- {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8e7f2219cb72474571974d29a191714d822e58be1eb171f229732bc6fdedf0ac"},
- {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3953c6926a63f8ea5514644b7afb42659b505ece4183fdaaa8f61d978754349e"},
- {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2bb2e4826be25e72013916eecd3d30f66fd076110de09f0e750163b416500721"},
- {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bf347b495b197992efc81a7408e9a83b931b2f056728529956a4d0858608b80"},
- {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:102eac53bb0bf0f9a275b438e6cf6904904908562a1463a6fc3323cf47d7a532"},
- {file = "rpds_py-0.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:40f93086eef235623aa14dbddef1b9fb4b22b99454cb39a8d2e04c994fb9868c"},
- {file = "rpds_py-0.10.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e22260a4741a0e7a206e175232867b48a16e0401ef5bce3c67ca5b9705879066"},
- {file = "rpds_py-0.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f4e56860a5af16a0fcfa070a0a20c42fbb2012eed1eb5ceeddcc7f8079214281"},
- {file = "rpds_py-0.10.6-cp311-none-win32.whl", hash = "sha256:0774a46b38e70fdde0c6ded8d6d73115a7c39d7839a164cc833f170bbf539116"},
- {file = "rpds_py-0.10.6-cp311-none-win_amd64.whl", hash = "sha256:4a5ee600477b918ab345209eddafde9f91c0acd931f3776369585a1c55b04c57"},
- {file = "rpds_py-0.10.6-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:5ee97c683eaface61d38ec9a489e353d36444cdebb128a27fe486a291647aff6"},
- {file = "rpds_py-0.10.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0713631d6e2d6c316c2f7b9320a34f44abb644fc487b77161d1724d883662e31"},
- {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5a53f5998b4bbff1cb2e967e66ab2addc67326a274567697379dd1e326bded7"},
- {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6a555ae3d2e61118a9d3e549737bb4a56ff0cec88a22bd1dfcad5b4e04759175"},
- {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:945eb4b6bb8144909b203a88a35e0a03d22b57aefb06c9b26c6e16d72e5eb0f0"},
- {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:52c215eb46307c25f9fd2771cac8135d14b11a92ae48d17968eda5aa9aaf5071"},
- {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1b3cd23d905589cb205710b3988fc8f46d4a198cf12862887b09d7aaa6bf9b9"},
- {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64ccc28683666672d7c166ed465c09cee36e306c156e787acef3c0c62f90da5a"},
- {file = "rpds_py-0.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:516a611a2de12fbea70c78271e558f725c660ce38e0006f75139ba337d56b1f6"},
- {file = "rpds_py-0.10.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9ff93d3aedef11f9c4540cf347f8bb135dd9323a2fc705633d83210d464c579d"},
- {file = "rpds_py-0.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d858532212f0650be12b6042ff4378dc2efbb7792a286bee4489eaa7ba010586"},
- {file = "rpds_py-0.10.6-cp312-none-win32.whl", hash = "sha256:3c4eff26eddac49d52697a98ea01b0246e44ca82ab09354e94aae8823e8bda02"},
- {file = "rpds_py-0.10.6-cp312-none-win_amd64.whl", hash = "sha256:150eec465dbc9cbca943c8e557a21afdcf9bab8aaabf386c44b794c2f94143d2"},
- {file = "rpds_py-0.10.6-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:cf693eb4a08eccc1a1b636e4392322582db2a47470d52e824b25eca7a3977b53"},
- {file = "rpds_py-0.10.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4134aa2342f9b2ab6c33d5c172e40f9ef802c61bb9ca30d21782f6e035ed0043"},
- {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e782379c2028a3611285a795b89b99a52722946d19fc06f002f8b53e3ea26ea9"},
- {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f6da6d842195fddc1cd34c3da8a40f6e99e4a113918faa5e60bf132f917c247"},
- {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4a9fe992887ac68256c930a2011255bae0bf5ec837475bc6f7edd7c8dfa254e"},
- {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b788276a3c114e9f51e257f2a6f544c32c02dab4aa7a5816b96444e3f9ffc336"},
- {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:caa1afc70a02645809c744eefb7d6ee8fef7e2fad170ffdeacca267fd2674f13"},
- {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bddd4f91eede9ca5275e70479ed3656e76c8cdaaa1b354e544cbcf94c6fc8ac4"},
- {file = "rpds_py-0.10.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:775049dfa63fb58293990fc59473e659fcafd953bba1d00fc5f0631a8fd61977"},
- {file = "rpds_py-0.10.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c6c45a2d2b68c51fe3d9352733fe048291e483376c94f7723458cfd7b473136b"},
- {file = "rpds_py-0.10.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0699ab6b8c98df998c3eacf51a3b25864ca93dab157abe358af46dc95ecd9801"},
- {file = "rpds_py-0.10.6-cp38-none-win32.whl", hash = "sha256:ebdab79f42c5961682654b851f3f0fc68e6cc7cd8727c2ac4ffff955154123c1"},
- {file = "rpds_py-0.10.6-cp38-none-win_amd64.whl", hash = "sha256:24656dc36f866c33856baa3ab309da0b6a60f37d25d14be916bd3e79d9f3afcf"},
- {file = "rpds_py-0.10.6-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:0898173249141ee99ffcd45e3829abe7bcee47d941af7434ccbf97717df020e5"},
- {file = "rpds_py-0.10.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e9184fa6c52a74a5521e3e87badbf9692549c0fcced47443585876fcc47e469"},
- {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5752b761902cd15073a527b51de76bbae63d938dc7c5c4ad1e7d8df10e765138"},
- {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99a57006b4ec39dbfb3ed67e5b27192792ffb0553206a107e4aadb39c5004cd5"},
- {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09586f51a215d17efdb3a5f090d7cbf1633b7f3708f60a044757a5d48a83b393"},
- {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e225a6a14ecf44499aadea165299092ab0cba918bb9ccd9304eab1138844490b"},
- {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2039f8d545f20c4e52713eea51a275e62153ee96c8035a32b2abb772b6fc9e5"},
- {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:34ad87a831940521d462ac11f1774edf867c34172010f5390b2f06b85dcc6014"},
- {file = "rpds_py-0.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dcdc88b6b01015da066da3fb76545e8bb9a6880a5ebf89e0f0b2e3ca557b3ab7"},
- {file = "rpds_py-0.10.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:25860ed5c4e7f5e10c496ea78af46ae8d8468e0be745bd233bab9ca99bfd2647"},
- {file = "rpds_py-0.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7854a207ef77319ec457c1eb79c361b48807d252d94348305db4f4b62f40f7f3"},
- {file = "rpds_py-0.10.6-cp39-none-win32.whl", hash = "sha256:e6fcc026a3f27c1282c7ed24b7fcac82cdd70a0e84cc848c0841a3ab1e3dea2d"},
- {file = "rpds_py-0.10.6-cp39-none-win_amd64.whl", hash = "sha256:e98c4c07ee4c4b3acf787e91b27688409d918212dfd34c872201273fdd5a0e18"},
- {file = "rpds_py-0.10.6-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:68fe9199184c18d997d2e4293b34327c0009a78599ce703e15cd9a0f47349bba"},
- {file = "rpds_py-0.10.6-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3339eca941568ed52d9ad0f1b8eb9fe0958fa245381747cecf2e9a78a5539c42"},
- {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a360cfd0881d36c6dc271992ce1eda65dba5e9368575663de993eeb4523d895f"},
- {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:031f76fc87644a234883b51145e43985aa2d0c19b063e91d44379cd2786144f8"},
- {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f36a9d751f86455dc5278517e8b65580eeee37d61606183897f122c9e51cef3"},
- {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:052a832078943d2b2627aea0d19381f607fe331cc0eb5df01991268253af8417"},
- {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:023574366002bf1bd751ebaf3e580aef4a468b3d3c216d2f3f7e16fdabd885ed"},
- {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:defa2c0c68734f4a82028c26bcc85e6b92cced99866af118cd6a89b734ad8e0d"},
- {file = "rpds_py-0.10.6-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:879fb24304ead6b62dbe5034e7b644b71def53c70e19363f3c3be2705c17a3b4"},
- {file = "rpds_py-0.10.6-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:53c43e10d398e365da2d4cc0bcaf0854b79b4c50ee9689652cdc72948e86f487"},
- {file = "rpds_py-0.10.6-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:3777cc9dea0e6c464e4b24760664bd8831738cc582c1d8aacf1c3f546bef3f65"},
- {file = "rpds_py-0.10.6-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:40578a6469e5d1df71b006936ce95804edb5df47b520c69cf5af264d462f2cbb"},
- {file = "rpds_py-0.10.6-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:cf71343646756a072b85f228d35b1d7407da1669a3de3cf47f8bbafe0c8183a4"},
- {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10f32b53f424fc75ff7b713b2edb286fdbfc94bf16317890260a81c2c00385dc"},
- {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:81de24a1c51cfb32e1fbf018ab0bdbc79c04c035986526f76c33e3f9e0f3356c"},
- {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac17044876e64a8ea20ab132080ddc73b895b4abe9976e263b0e30ee5be7b9c2"},
- {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e8a78bd4879bff82daef48c14d5d4057f6856149094848c3ed0ecaf49f5aec2"},
- {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78ca33811e1d95cac8c2e49cb86c0fb71f4d8409d8cbea0cb495b6dbddb30a55"},
- {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c63c3ef43f0b3fb00571cff6c3967cc261c0ebd14a0a134a12e83bdb8f49f21f"},
- {file = "rpds_py-0.10.6-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:7fde6d0e00b2fd0dbbb40c0eeec463ef147819f23725eda58105ba9ca48744f4"},
- {file = "rpds_py-0.10.6-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:79edd779cfc46b2e15b0830eecd8b4b93f1a96649bcb502453df471a54ce7977"},
- {file = "rpds_py-0.10.6-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9164ec8010327ab9af931d7ccd12ab8d8b5dc2f4c6a16cbdd9d087861eaaefa1"},
- {file = "rpds_py-0.10.6-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d29ddefeab1791e3c751e0189d5f4b3dbc0bbe033b06e9c333dca1f99e1d523e"},
- {file = "rpds_py-0.10.6-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:30adb75ecd7c2a52f5e76af50644b3e0b5ba036321c390b8e7ec1bb2a16dd43c"},
- {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd609fafdcdde6e67a139898196698af37438b035b25ad63704fd9097d9a3482"},
- {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6eef672de005736a6efd565577101277db6057f65640a813de6c2707dc69f396"},
- {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cf4393c7b41abbf07c88eb83e8af5013606b1cdb7f6bc96b1b3536b53a574b8"},
- {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ad857f42831e5b8d41a32437f88d86ead6c191455a3499c4b6d15e007936d4cf"},
- {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d7360573f1e046cb3b0dceeb8864025aa78d98be4bb69f067ec1c40a9e2d9df"},
- {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d08f63561c8a695afec4975fae445245386d645e3e446e6f260e81663bfd2e38"},
- {file = "rpds_py-0.10.6-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:f0f17f2ce0f3529177a5fff5525204fad7b43dd437d017dd0317f2746773443d"},
- {file = "rpds_py-0.10.6-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:442626328600bde1d09dc3bb00434f5374948838ce75c41a52152615689f9403"},
- {file = "rpds_py-0.10.6-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e9616f5bd2595f7f4a04b67039d890348ab826e943a9bfdbe4938d0eba606971"},
- {file = "rpds_py-0.10.6.tar.gz", hash = "sha256:4ce5a708d65a8dbf3748d2474b580d606b1b9f91b5c6ab2a316e0b0cf7a4ba50"},
+ {file = "rpds_py-0.17.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:4128980a14ed805e1b91a7ed551250282a8ddf8201a4e9f8f5b7e6225f54170d"},
+ {file = "rpds_py-0.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ff1dcb8e8bc2261a088821b2595ef031c91d499a0c1b031c152d43fe0a6ecec8"},
+ {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d65e6b4f1443048eb7e833c2accb4fa7ee67cc7d54f31b4f0555b474758bee55"},
+ {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a71169d505af63bb4d20d23a8fbd4c6ce272e7bce6cc31f617152aa784436f29"},
+ {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:436474f17733c7dca0fbf096d36ae65277e8645039df12a0fa52445ca494729d"},
+ {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10162fe3f5f47c37ebf6d8ff5a2368508fe22007e3077bf25b9c7d803454d921"},
+ {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:720215373a280f78a1814becb1312d4e4d1077b1202a56d2b0815e95ccb99ce9"},
+ {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:70fcc6c2906cfa5c6a552ba7ae2ce64b6c32f437d8f3f8eea49925b278a61453"},
+ {file = "rpds_py-0.17.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:91e5a8200e65aaac342a791272c564dffcf1281abd635d304d6c4e6b495f29dc"},
+ {file = "rpds_py-0.17.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:99f567dae93e10be2daaa896e07513dd4bf9c2ecf0576e0533ac36ba3b1d5394"},
+ {file = "rpds_py-0.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:24e4900a6643f87058a27320f81336d527ccfe503984528edde4bb660c8c8d59"},
+ {file = "rpds_py-0.17.1-cp310-none-win32.whl", hash = "sha256:0bfb09bf41fe7c51413f563373e5f537eaa653d7adc4830399d4e9bdc199959d"},
+ {file = "rpds_py-0.17.1-cp310-none-win_amd64.whl", hash = "sha256:20de7b7179e2031a04042e85dc463a93a82bc177eeba5ddd13ff746325558aa6"},
+ {file = "rpds_py-0.17.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:65dcf105c1943cba45d19207ef51b8bc46d232a381e94dd38719d52d3980015b"},
+ {file = "rpds_py-0.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:01f58a7306b64e0a4fe042047dd2b7d411ee82e54240284bab63e325762c1147"},
+ {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:071bc28c589b86bc6351a339114fb7a029f5cddbaca34103aa573eba7b482382"},
+ {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae35e8e6801c5ab071b992cb2da958eee76340e6926ec693b5ff7d6381441745"},
+ {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149c5cd24f729e3567b56e1795f74577aa3126c14c11e457bec1b1c90d212e38"},
+ {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e796051f2070f47230c745d0a77a91088fbee2cc0502e9b796b9c6471983718c"},
+ {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e820ee1004327609b28db8307acc27f5f2e9a0b185b2064c5f23e815f248f8"},
+ {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1957a2ab607f9added64478a6982742eb29f109d89d065fa44e01691a20fc20a"},
+ {file = "rpds_py-0.17.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8587fd64c2a91c33cdc39d0cebdaf30e79491cc029a37fcd458ba863f8815383"},
+ {file = "rpds_py-0.17.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4dc889a9d8a34758d0fcc9ac86adb97bab3fb7f0c4d29794357eb147536483fd"},
+ {file = "rpds_py-0.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2953937f83820376b5979318840f3ee47477d94c17b940fe31d9458d79ae7eea"},
+ {file = "rpds_py-0.17.1-cp311-none-win32.whl", hash = "sha256:1bfcad3109c1e5ba3cbe2f421614e70439f72897515a96c462ea657261b96518"},
+ {file = "rpds_py-0.17.1-cp311-none-win_amd64.whl", hash = "sha256:99da0a4686ada4ed0f778120a0ea8d066de1a0a92ab0d13ae68492a437db78bf"},
+ {file = "rpds_py-0.17.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1dc29db3900cb1bb40353772417800f29c3d078dbc8024fd64655a04ee3c4bdf"},
+ {file = "rpds_py-0.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:82ada4a8ed9e82e443fcef87e22a3eed3654dd3adf6e3b3a0deb70f03e86142a"},
+ {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d36b2b59e8cc6e576f8f7b671e32f2ff43153f0ad6d0201250a7c07f25d570e"},
+ {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3677fcca7fb728c86a78660c7fb1b07b69b281964673f486ae72860e13f512ad"},
+ {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:516fb8c77805159e97a689e2f1c80655c7658f5af601c34ffdb916605598cda2"},
+ {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df3b6f45ba4515632c5064e35ca7f31d51d13d1479673185ba8f9fefbbed58b9"},
+ {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a967dd6afda7715d911c25a6ba1517975acd8d1092b2f326718725461a3d33f9"},
+ {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dbbb95e6fc91ea3102505d111b327004d1c4ce98d56a4a02e82cd451f9f57140"},
+ {file = "rpds_py-0.17.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:02866e060219514940342a1f84303a1ef7a1dad0ac311792fbbe19b521b489d2"},
+ {file = "rpds_py-0.17.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2528ff96d09f12e638695f3a2e0c609c7b84c6df7c5ae9bfeb9252b6fa686253"},
+ {file = "rpds_py-0.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bd345a13ce06e94c753dab52f8e71e5252aec1e4f8022d24d56decd31e1b9b23"},
+ {file = "rpds_py-0.17.1-cp312-none-win32.whl", hash = "sha256:2a792b2e1d3038daa83fa474d559acfd6dc1e3650ee93b2662ddc17dbff20ad1"},
+ {file = "rpds_py-0.17.1-cp312-none-win_amd64.whl", hash = "sha256:292f7344a3301802e7c25c53792fae7d1593cb0e50964e7bcdcc5cf533d634e3"},
+ {file = "rpds_py-0.17.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8ffe53e1d8ef2520ebcf0c9fec15bb721da59e8ef283b6ff3079613b1e30513d"},
+ {file = "rpds_py-0.17.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4341bd7579611cf50e7b20bb8c2e23512a3dc79de987a1f411cb458ab670eb90"},
+ {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f4eb548daf4836e3b2c662033bfbfc551db58d30fd8fe660314f86bf8510b93"},
+ {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b686f25377f9c006acbac63f61614416a6317133ab7fafe5de5f7dc8a06d42eb"},
+ {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4e21b76075c01d65d0f0f34302b5a7457d95721d5e0667aea65e5bb3ab415c25"},
+ {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b86b21b348f7e5485fae740d845c65a880f5d1eda1e063bc59bef92d1f7d0c55"},
+ {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f175e95a197f6a4059b50757a3dca33b32b61691bdbd22c29e8a8d21d3914cae"},
+ {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1701fc54460ae2e5efc1dd6350eafd7a760f516df8dbe51d4a1c79d69472fbd4"},
+ {file = "rpds_py-0.17.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9051e3d2af8f55b42061603e29e744724cb5f65b128a491446cc029b3e2ea896"},
+ {file = "rpds_py-0.17.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:7450dbd659fed6dd41d1a7d47ed767e893ba402af8ae664c157c255ec6067fde"},
+ {file = "rpds_py-0.17.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5a024fa96d541fd7edaa0e9d904601c6445e95a729a2900c5aec6555fe921ed6"},
+ {file = "rpds_py-0.17.1-cp38-none-win32.whl", hash = "sha256:da1ead63368c04a9bded7904757dfcae01eba0e0f9bc41d3d7f57ebf1c04015a"},
+ {file = "rpds_py-0.17.1-cp38-none-win_amd64.whl", hash = "sha256:841320e1841bb53fada91c9725e766bb25009cfd4144e92298db296fb6c894fb"},
+ {file = "rpds_py-0.17.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:f6c43b6f97209e370124baf2bf40bb1e8edc25311a158867eb1c3a5d449ebc7a"},
+ {file = "rpds_py-0.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7d63ec01fe7c76c2dbb7e972fece45acbb8836e72682bde138e7e039906e2c"},
+ {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81038ff87a4e04c22e1d81f947c6ac46f122e0c80460b9006e6517c4d842a6ec"},
+ {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:810685321f4a304b2b55577c915bece4c4a06dfe38f6e62d9cc1d6ca8ee86b99"},
+ {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:25f071737dae674ca8937a73d0f43f5a52e92c2d178330b4c0bb6ab05586ffa6"},
+ {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa5bfb13f1e89151ade0eb812f7b0d7a4d643406caaad65ce1cbabe0a66d695f"},
+ {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfe07308b311a8293a0d5ef4e61411c5c20f682db6b5e73de6c7c8824272c256"},
+ {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a000133a90eea274a6f28adc3084643263b1e7c1a5a66eb0a0a7a36aa757ed74"},
+ {file = "rpds_py-0.17.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d0e8a6434a3fbf77d11448c9c25b2f25244226cfbec1a5159947cac5b8c5fa4"},
+ {file = "rpds_py-0.17.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:efa767c220d94aa4ac3a6dd3aeb986e9f229eaf5bce92d8b1b3018d06bed3772"},
+ {file = "rpds_py-0.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:dbc56680ecf585a384fbd93cd42bc82668b77cb525343170a2d86dafaed2a84b"},
+ {file = "rpds_py-0.17.1-cp39-none-win32.whl", hash = "sha256:270987bc22e7e5a962b1094953ae901395e8c1e1e83ad016c5cfcfff75a15a3f"},
+ {file = "rpds_py-0.17.1-cp39-none-win_amd64.whl", hash = "sha256:2a7b2f2f56a16a6d62e55354dd329d929560442bd92e87397b7a9586a32e3e76"},
+ {file = "rpds_py-0.17.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a3264e3e858de4fc601741498215835ff324ff2482fd4e4af61b46512dd7fc83"},
+ {file = "rpds_py-0.17.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f2f3b28b40fddcb6c1f1f6c88c6f3769cd933fa493ceb79da45968a21dccc920"},
+ {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9584f8f52010295a4a417221861df9bea4c72d9632562b6e59b3c7b87a1522b7"},
+ {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c64602e8be701c6cfe42064b71c84ce62ce66ddc6422c15463fd8127db3d8066"},
+ {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:060f412230d5f19fc8c8b75f315931b408d8ebf56aec33ef4168d1b9e54200b1"},
+ {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9412abdf0ba70faa6e2ee6c0cc62a8defb772e78860cef419865917d86c7342"},
+ {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9737bdaa0ad33d34c0efc718741abaafce62fadae72c8b251df9b0c823c63b22"},
+ {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9f0e4dc0f17dcea4ab9d13ac5c666b6b5337042b4d8f27e01b70fae41dd65c57"},
+ {file = "rpds_py-0.17.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1db228102ab9d1ff4c64148c96320d0be7044fa28bd865a9ce628ce98da5973d"},
+ {file = "rpds_py-0.17.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:d8bbd8e56f3ba25a7d0cf980fc42b34028848a53a0e36c9918550e0280b9d0b6"},
+ {file = "rpds_py-0.17.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:be22ae34d68544df293152b7e50895ba70d2a833ad9566932d750d3625918b82"},
+ {file = "rpds_py-0.17.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bf046179d011e6114daf12a534d874958b039342b347348a78b7cdf0dd9d6041"},
+ {file = "rpds_py-0.17.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:1a746a6d49665058a5896000e8d9d2f1a6acba8a03b389c1e4c06e11e0b7f40d"},
+ {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0b8bf5b8db49d8fd40f54772a1dcf262e8be0ad2ab0206b5a2ec109c176c0a4"},
+ {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f7f4cb1f173385e8a39c29510dd11a78bf44e360fb75610594973f5ea141028b"},
+ {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7fbd70cb8b54fe745301921b0816c08b6d917593429dfc437fd024b5ba713c58"},
+ {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bdf1303df671179eaf2cb41e8515a07fc78d9d00f111eadbe3e14262f59c3d0"},
+ {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad059a4bd14c45776600d223ec194e77db6c20255578bb5bcdd7c18fd169361"},
+ {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3664d126d3388a887db44c2e293f87d500c4184ec43d5d14d2d2babdb4c64cad"},
+ {file = "rpds_py-0.17.1-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:698ea95a60c8b16b58be9d854c9f993c639f5c214cf9ba782eca53a8789d6b19"},
+ {file = "rpds_py-0.17.1-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:c3d2010656999b63e628a3c694f23020322b4178c450dc478558a2b6ef3cb9bb"},
+ {file = "rpds_py-0.17.1-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:938eab7323a736533f015e6069a7d53ef2dcc841e4e533b782c2bfb9fb12d84b"},
+ {file = "rpds_py-0.17.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e626b365293a2142a62b9a614e1f8e331b28f3ca57b9f05ebbf4cf2a0f0bdc5"},
+ {file = "rpds_py-0.17.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:380e0df2e9d5d5d339803cfc6d183a5442ad7ab3c63c2a0982e8c824566c5ccc"},
+ {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b760a56e080a826c2e5af09002c1a037382ed21d03134eb6294812dda268c811"},
+ {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5576ee2f3a309d2bb403ec292d5958ce03953b0e57a11d224c1f134feaf8c40f"},
+ {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3c3461ebb4c4f1bbc70b15d20b565759f97a5aaf13af811fcefc892e9197ba"},
+ {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:637b802f3f069a64436d432117a7e58fab414b4e27a7e81049817ae94de45d8d"},
+ {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffee088ea9b593cc6160518ba9bd319b5475e5f3e578e4552d63818773c6f56a"},
+ {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3ac732390d529d8469b831949c78085b034bff67f584559340008d0f6041a049"},
+ {file = "rpds_py-0.17.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:93432e747fb07fa567ad9cc7aaadd6e29710e515aabf939dfbed8046041346c6"},
+ {file = "rpds_py-0.17.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:7b7d9ca34542099b4e185b3c2a2b2eda2e318a7dbde0b0d83357a6d4421b5296"},
+ {file = "rpds_py-0.17.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:0387ce69ba06e43df54e43968090f3626e231e4bc9150e4c3246947567695f68"},
+ {file = "rpds_py-0.17.1.tar.gz", hash = "sha256:0210b2668f24c078307260bf88bdac9d6f1093635df5123789bfee4d8d7fc8e7"},
]
[[package]]
@@ -2590,50 +2640,65 @@ pyasn1 = ">=0.1.3"
[[package]]
name = "scikit-learn"
-version = "1.3.2"
+version = "1.4.0"
description = "A set of python modules for machine learning and data mining"
optional = true
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "scikit-learn-1.3.2.tar.gz", hash = "sha256:a2f54c76accc15a34bfb9066e6c7a56c1e7235dda5762b990792330b52ccfb05"},
- {file = "scikit_learn-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e326c0eb5cf4d6ba40f93776a20e9a7a69524c4db0757e7ce24ba222471ee8a1"},
- {file = "scikit_learn-1.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:535805c2a01ccb40ca4ab7d081d771aea67e535153e35a1fd99418fcedd1648a"},
- {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1215e5e58e9880b554b01187b8c9390bf4dc4692eedeaf542d3273f4785e342c"},
- {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee107923a623b9f517754ea2f69ea3b62fc898a3641766cb7deb2f2ce450161"},
- {file = "scikit_learn-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:35a22e8015048c628ad099da9df5ab3004cdbf81edc75b396fd0cff8699ac58c"},
- {file = "scikit_learn-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6fb6bc98f234fda43163ddbe36df8bcde1d13ee176c6dc9b92bb7d3fc842eb66"},
- {file = "scikit_learn-1.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:18424efee518a1cde7b0b53a422cde2f6625197de6af36da0b57ec502f126157"},
- {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3271552a5eb16f208a6f7f617b8cc6d1f137b52c8a1ef8edf547db0259b2c9fb"},
- {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4144a5004a676d5022b798d9e573b05139e77f271253a4703eed295bde0433"},
- {file = "scikit_learn-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:67f37d708f042a9b8d59551cf94d30431e01374e00dc2645fa186059c6c5d78b"},
- {file = "scikit_learn-1.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8db94cd8a2e038b37a80a04df8783e09caac77cbe052146432e67800e430c028"},
- {file = "scikit_learn-1.3.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:61a6efd384258789aa89415a410dcdb39a50e19d3d8410bd29be365bcdd512d5"},
- {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb06f8dce3f5ddc5dee1715a9b9f19f20d295bed8e3cd4fa51e1d050347de525"},
- {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2de18d86f630d68fe1f87af690d451388bb186480afc719e5f770590c2ef6c"},
- {file = "scikit_learn-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:0402638c9a7c219ee52c94cbebc8fcb5eb9fe9c773717965c1f4185588ad3107"},
- {file = "scikit_learn-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a19f90f95ba93c1a7f7924906d0576a84da7f3b2282ac3bfb7a08a32801add93"},
- {file = "scikit_learn-1.3.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b8692e395a03a60cd927125eef3a8e3424d86dde9b2370d544f0ea35f78a8073"},
- {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15e1e94cc23d04d39da797ee34236ce2375ddea158b10bee3c343647d615581d"},
- {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:785a2213086b7b1abf037aeadbbd6d67159feb3e30263434139c98425e3dcfcf"},
- {file = "scikit_learn-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:64381066f8aa63c2710e6b56edc9f0894cc7bf59bd71b8ce5613a4559b6145e0"},
- {file = "scikit_learn-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c43290337f7a4b969d207e620658372ba3c1ffb611f8bc2b6f031dc5c6d1d03"},
- {file = "scikit_learn-1.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:dc9002fc200bed597d5d34e90c752b74df516d592db162f756cc52836b38fe0e"},
- {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d08ada33e955c54355d909b9c06a4789a729977f165b8bae6f225ff0a60ec4a"},
- {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f0ae4b79b0ff9cca0bf3716bcc9915bdacff3cebea15ec79652d1cc4fa5c9"},
- {file = "scikit_learn-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:ed932ea780517b00dae7431e031faae6b49b20eb6950918eb83bd043237950e0"},
-]
-
-[package.dependencies]
-joblib = ">=1.1.1"
-numpy = ">=1.17.3,<2.0"
-scipy = ">=1.5.0"
+ {file = "scikit-learn-1.4.0.tar.gz", hash = "sha256:d4373c984eba20e393216edd51a3e3eede56cbe93d4247516d205643c3b93121"},
+ {file = "scikit_learn-1.4.0-1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fce93a7473e2f4ee4cc280210968288d6a7d7ad8dc6fa7bb7892145e407085f9"},
+ {file = "scikit_learn-1.4.0-1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d77df3d1e15fc37a9329999979fa7868ba8655dbab21fe97fc7ddabac9e08cc7"},
+ {file = "scikit_learn-1.4.0-1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2404659fedec40eeafa310cd14d613e564d13dbf8f3c752d31c095195ec05de6"},
+ {file = "scikit_learn-1.4.0-1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e98632da8f6410e6fb6bf66937712c949b4010600ccd3f22a5388a83e610cc3c"},
+ {file = "scikit_learn-1.4.0-1-cp310-cp310-win_amd64.whl", hash = "sha256:11b3b140f70fbc9f6a08884631ae8dd60a4bb2d7d6d1de92738ea42b740d8992"},
+ {file = "scikit_learn-1.4.0-1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8341eabdc754d5ab91641a7763243845e96b6d68e03e472531e88a4f1b09f21"},
+ {file = "scikit_learn-1.4.0-1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d1f6bce875ac2bb6b52514f67c185c564ccd299a05b65b7bab091a4c13dde12d"},
+ {file = "scikit_learn-1.4.0-1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c408b46b2fd61952d519ea1af2f8f0a7a703e1433923ab1704c4131520b2083b"},
+ {file = "scikit_learn-1.4.0-1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b465dd1dcd237b7b1dcd1a9048ccbf70a98c659474324fa708464c3a2533fad"},
+ {file = "scikit_learn-1.4.0-1-cp311-cp311-win_amd64.whl", hash = "sha256:0db8e22c42f7980fe5eb22069b1f84c48966f3e0d23a01afde5999e3987a2501"},
+ {file = "scikit_learn-1.4.0-1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e7eef6ea2ed289af40e88c0be9f7704ca8b5de18508a06897c3fe21e0905efdf"},
+ {file = "scikit_learn-1.4.0-1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:349669b01435bc4dbf25c6410b0892073befdaec52637d1a1d1ff53865dc8db3"},
+ {file = "scikit_learn-1.4.0-1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d439c584e58434d0350701bd33f6c10b309e851fccaf41c121aed55f6851d8cf"},
+ {file = "scikit_learn-1.4.0-1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0e2427d9ef46477625ab9b55c1882844fe6fc500f418c3f8e650200182457bc"},
+ {file = "scikit_learn-1.4.0-1-cp312-cp312-win_amd64.whl", hash = "sha256:d3d75343940e7bf9b85c830c93d34039fa015eeb341c5c0b4cd7a90dadfe00d4"},
+ {file = "scikit_learn-1.4.0-1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:76986d22e884ab062b1beecdd92379656e9d3789ecc1f9870923c178de55f9fe"},
+ {file = "scikit_learn-1.4.0-1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e22446ad89f1cb7657f0d849dcdc345b48e2d10afa3daf2925fdb740f85b714c"},
+ {file = "scikit_learn-1.4.0-1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74812c9eabb265be69d738a8ea8d4884917a59637fcbf88a5f0e9020498bc6b3"},
+ {file = "scikit_learn-1.4.0-1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aad2a63e0dd386b92da3270887a29b308af4d7c750d8c4995dfd9a4798691bcc"},
+ {file = "scikit_learn-1.4.0-1-cp39-cp39-win_amd64.whl", hash = "sha256:53b9e29177897c37e2ff9d4ba6ca12fdb156e22523e463db05def303f5c72b5c"},
+ {file = "scikit_learn-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cb8f044a8f5962613ce1feb4351d66f8d784bd072d36393582f351859b065f7d"},
+ {file = "scikit_learn-1.4.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:a6372c90bbf302387792108379f1ec77719c1618d88496d0df30cb8e370b4661"},
+ {file = "scikit_learn-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:785ce3c352bf697adfda357c3922c94517a9376002971bc5ea50896144bc8916"},
+ {file = "scikit_learn-1.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0aba2a20d89936d6e72d95d05e3bf1db55bca5c5920926ad7b92c34f5e7d3bbe"},
+ {file = "scikit_learn-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:2bac5d56b992f8f06816f2cd321eb86071c6f6d44bb4b1cb3d626525820d754b"},
+ {file = "scikit_learn-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:27ae4b0f1b2c77107c096a7e05b33458354107b47775428d1f11b23e30a73e8a"},
+ {file = "scikit_learn-1.4.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5c5c62ffb52c3ffb755eb21fa74cc2cbf2c521bd53f5c04eaa10011dbecf5f80"},
+ {file = "scikit_learn-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f0d2018ac6fa055dab65fe8a485967990d33c672d55bc254c56c35287b02fab"},
+ {file = "scikit_learn-1.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a8918c415c4b4bf1d60c38d32958849a9191c2428ab35d30b78354085c7c7a"},
+ {file = "scikit_learn-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:80a21de63275f8bcd7877b3e781679d2ff1eddfed515a599f95b2502a3283d42"},
+ {file = "scikit_learn-1.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0f33bbafb310c26b81c4d41ecaebdbc1f63498a3f13461d50ed9a2e8f24d28e4"},
+ {file = "scikit_learn-1.4.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:8b6ac1442ec714b4911e5aef8afd82c691b5c88b525ea58299d455acc4e8dcec"},
+ {file = "scikit_learn-1.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05fc5915b716c6cc60a438c250108e9a9445b522975ed37e416d5ea4f9a63381"},
+ {file = "scikit_learn-1.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:842b7d6989f3c574685e18da6f91223eb32301d0f93903dd399894250835a6f7"},
+ {file = "scikit_learn-1.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:88bcb586fdff865372df1bc6be88bb7e6f9e0aa080dab9f54f5cac7eca8e2b6b"},
+ {file = "scikit_learn-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f77674647dd31f56cb12ed13ed25b6ed43a056fffef051715022d2ebffd7a7d1"},
+ {file = "scikit_learn-1.4.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:833999872e2920ce00f3a50839946bdac7539454e200eb6db54898a41f4bfd43"},
+ {file = "scikit_learn-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:970ec697accaef10fb4f51763f3a7b1250f9f0553cf05514d0e94905322a0172"},
+ {file = "scikit_learn-1.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:923d778f378ebacca2c672ab1740e5a413e437fb45ab45ab02578f8b689e5d43"},
+ {file = "scikit_learn-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:1d041bc95006b545b59e458399e3175ab11ca7a03dc9a74a573ac891f5df1489"},
+]
+
+[package.dependencies]
+joblib = ">=1.2.0"
+numpy = ">=1.19.5"
+scipy = ">=1.6.0"
threadpoolctl = ">=2.0.0"
[package.extras]
-benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"]
-docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"]
-examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"]
-tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"]
+benchmark = ["matplotlib (>=3.3.4)", "memory-profiler (>=0.57.0)", "pandas (>=1.1.5)"]
+docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.15.0)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"]
+examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"]
+tests = ["black (>=23.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.19.12)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.17.2)"]
[[package]]
name = "scikit-optimize"
@@ -2658,59 +2723,59 @@ plots = ["matplotlib (>=2.0.0)"]
[[package]]
name = "scipy"
-version = "1.11.3"
+version = "1.12.0"
description = "Fundamental algorithms for scientific computing in Python"
optional = false
-python-versions = "<3.13,>=3.9"
-files = [
- {file = "scipy-1.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:370f569c57e1d888304052c18e58f4a927338eafdaef78613c685ca2ea0d1fa0"},
- {file = "scipy-1.11.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:9885e3e4f13b2bd44aaf2a1a6390a11add9f48d5295f7a592393ceb8991577a3"},
- {file = "scipy-1.11.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e04aa19acc324a1a076abb4035dabe9b64badb19f76ad9c798bde39d41025cdc"},
- {file = "scipy-1.11.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e1a8a4657673bfae1e05e1e1d6e94b0cabe5ed0c7c144c8aa7b7dbb774ce5c1"},
- {file = "scipy-1.11.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7abda0e62ef00cde826d441485e2e32fe737bdddee3324e35c0e01dee65e2a88"},
- {file = "scipy-1.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:033c3fd95d55012dd1148b201b72ae854d5086d25e7c316ec9850de4fe776929"},
- {file = "scipy-1.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:925c6f09d0053b1c0f90b2d92d03b261e889b20d1c9b08a3a51f61afc5f58165"},
- {file = "scipy-1.11.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5664e364f90be8219283eeb844323ff8cd79d7acbd64e15eb9c46b9bc7f6a42a"},
- {file = "scipy-1.11.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00f325434b6424952fbb636506f0567898dca7b0f7654d48f1c382ea338ce9a3"},
- {file = "scipy-1.11.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f290cf561a4b4edfe8d1001ee4be6da60c1c4ea712985b58bf6bc62badee221"},
- {file = "scipy-1.11.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:91770cb3b1e81ae19463b3c235bf1e0e330767dca9eb4cd73ba3ded6c4151e4d"},
- {file = "scipy-1.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:e1f97cd89c0fe1a0685f8f89d85fa305deb3067d0668151571ba50913e445820"},
- {file = "scipy-1.11.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dfcc1552add7cb7c13fb70efcb2389d0624d571aaf2c80b04117e2755a0c5d15"},
- {file = "scipy-1.11.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0d3a136ae1ff0883fffbb1b05b0b2fea251cb1046a5077d0b435a1839b3e52b7"},
- {file = "scipy-1.11.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bae66a2d7d5768eaa33008fa5a974389f167183c87bf39160d3fefe6664f8ddc"},
- {file = "scipy-1.11.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2f6dee6cbb0e263b8142ed587bc93e3ed5e777f1f75448d24fb923d9fd4dce6"},
- {file = "scipy-1.11.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:74e89dc5e00201e71dd94f5f382ab1c6a9f3ff806c7d24e4e90928bb1aafb280"},
- {file = "scipy-1.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:90271dbde4be191522b3903fc97334e3956d7cfb9cce3f0718d0ab4fd7d8bfd6"},
- {file = "scipy-1.11.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a63d1ec9cadecce838467ce0631c17c15c7197ae61e49429434ba01d618caa83"},
- {file = "scipy-1.11.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:5305792c7110e32ff155aed0df46aa60a60fc6e52cd4ee02cdeb67eaccd5356e"},
- {file = "scipy-1.11.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ea7f579182d83d00fed0e5c11a4aa5ffe01460444219dedc448a36adf0c3917"},
- {file = "scipy-1.11.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c77da50c9a91e23beb63c2a711ef9e9ca9a2060442757dffee34ea41847d8156"},
- {file = "scipy-1.11.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:15f237e890c24aef6891c7d008f9ff7e758c6ef39a2b5df264650eb7900403c0"},
- {file = "scipy-1.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:4b4bb134c7aa457e26cc6ea482b016fef45db71417d55cc6d8f43d799cdf9ef2"},
- {file = "scipy-1.11.3.tar.gz", hash = "sha256:bba4d955f54edd61899776bad459bf7326e14b9fa1c552181f0479cc60a568cd"},
-]
-
-[package.dependencies]
-numpy = ">=1.21.6,<1.28.0"
+python-versions = ">=3.9"
+files = [
+ {file = "scipy-1.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:78e4402e140879387187f7f25d91cc592b3501a2e51dfb320f48dfb73565f10b"},
+ {file = "scipy-1.12.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:f5f00ebaf8de24d14b8449981a2842d404152774c1a1d880c901bf454cb8e2a1"},
+ {file = "scipy-1.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e53958531a7c695ff66c2e7bb7b79560ffdc562e2051644c5576c39ff8efb563"},
+ {file = "scipy-1.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e32847e08da8d895ce09d108a494d9eb78974cf6de23063f93306a3e419960c"},
+ {file = "scipy-1.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4c1020cad92772bf44b8e4cdabc1df5d87376cb219742549ef69fc9fd86282dd"},
+ {file = "scipy-1.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:75ea2a144096b5e39402e2ff53a36fecfd3b960d786b7efd3c180e29c39e53f2"},
+ {file = "scipy-1.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:408c68423f9de16cb9e602528be4ce0d6312b05001f3de61fe9ec8b1263cad08"},
+ {file = "scipy-1.12.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5adfad5dbf0163397beb4aca679187d24aec085343755fcdbdeb32b3679f254c"},
+ {file = "scipy-1.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3003652496f6e7c387b1cf63f4bb720951cfa18907e998ea551e6de51a04467"},
+ {file = "scipy-1.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b8066bce124ee5531d12a74b617d9ac0ea59245246410e19bca549656d9a40a"},
+ {file = "scipy-1.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8bee4993817e204d761dba10dbab0774ba5a8612e57e81319ea04d84945375ba"},
+ {file = "scipy-1.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:a24024d45ce9a675c1fb8494e8e5244efea1c7a09c60beb1eeb80373d0fecc70"},
+ {file = "scipy-1.12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e7e76cc48638228212c747ada851ef355c2bb5e7f939e10952bc504c11f4e372"},
+ {file = "scipy-1.12.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:f7ce148dffcd64ade37b2df9315541f9adad6efcaa86866ee7dd5db0c8f041c3"},
+ {file = "scipy-1.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c39f92041f490422924dfdb782527a4abddf4707616e07b021de33467f917bc"},
+ {file = "scipy-1.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7ebda398f86e56178c2fa94cad15bf457a218a54a35c2a7b4490b9f9cb2676c"},
+ {file = "scipy-1.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:95e5c750d55cf518c398a8240571b0e0782c2d5a703250872f36eaf737751338"},
+ {file = "scipy-1.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e646d8571804a304e1da01040d21577685ce8e2db08ac58e543eaca063453e1c"},
+ {file = "scipy-1.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:913d6e7956c3a671de3b05ccb66b11bc293f56bfdef040583a7221d9e22a2e35"},
+ {file = "scipy-1.12.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba1b0c7256ad75401c73e4b3cf09d1f176e9bd4248f0d3112170fb2ec4db067"},
+ {file = "scipy-1.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:730badef9b827b368f351eacae2e82da414e13cf8bd5051b4bdfd720271a5371"},
+ {file = "scipy-1.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6546dc2c11a9df6926afcbdd8a3edec28566e4e785b915e849348c6dd9f3f490"},
+ {file = "scipy-1.12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:196ebad3a4882081f62a5bf4aeb7326aa34b110e533aab23e4374fcccb0890dc"},
+ {file = "scipy-1.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:b360f1b6b2f742781299514e99ff560d1fe9bd1bff2712894b52abe528d1fd1e"},
+ {file = "scipy-1.12.0.tar.gz", hash = "sha256:4bf5abab8a36d20193c698b0f1fc282c1d083c94723902c447e5d2f1780936a3"},
+]
+
+[package.dependencies]
+numpy = ">=1.22.4,<1.29.0"
[package.extras]
dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"]
doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"]
-test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
+test = ["asv", "gmpy2", "hypothesis", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
[[package]]
name = "setuptools"
-version = "68.2.2"
+version = "69.0.3"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "setuptools-68.2.2-py3-none-any.whl", hash = "sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a"},
- {file = "setuptools-68.2.2.tar.gz", hash = "sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87"},
+ {file = "setuptools-69.0.3-py3-none-any.whl", hash = "sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05"},
+ {file = "setuptools-69.0.3.tar.gz", hash = "sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78"},
]
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
@@ -2784,22 +2849,22 @@ test = ["cython (>=3.0)", "filelock", "html5lib", "pytest (>=4.6)", "setuptools
[[package]]
name = "sphinx-autodoc-typehints"
-version = "1.24.1"
+version = "1.25.3"
description = "Type hints (PEP 484) support for the Sphinx autodoc extension"
optional = false
python-versions = ">=3.8"
files = [
- {file = "sphinx_autodoc_typehints-1.24.1-py3-none-any.whl", hash = "sha256:4cc16c5545f2bf896ca52a854babefe3d8baeaaa033d13a7f179ac1d9feb02d5"},
- {file = "sphinx_autodoc_typehints-1.24.1.tar.gz", hash = "sha256:06683a2b76c3c7b1931b75e40e0211866fbb50ba4c4e802d0901d9b4e849add2"},
+ {file = "sphinx_autodoc_typehints-1.25.3-py3-none-any.whl", hash = "sha256:d3da7fa9a9761eff6ff09f8b1956ae3090a2d4f4ad54aebcade8e458d6340835"},
+ {file = "sphinx_autodoc_typehints-1.25.3.tar.gz", hash = "sha256:70db10b391acf4e772019765991d2de0ff30ec0899b9ba137706dc0b3c4835e0"},
]
[package.dependencies]
sphinx = ">=7.1.2"
[package.extras]
-docs = ["furo (>=2023.7.26)", "sphinx (>=7.1.2)"]
+docs = ["furo (>=2023.9.10)"]
numpy = ["nptyping (>=2.5)"]
-testing = ["covdefaults (>=2.3)", "coverage (>=7.3)", "diff-cover (>=7.7)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "sphobjinv (>=2.3.1)", "typing-extensions (>=4.7.1)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "sphobjinv (>=2.3.1)", "typing-extensions (>=4.8)"]
[[package]]
name = "sphinx-automodapi"
@@ -2838,45 +2903,50 @@ rtd = ["ipython", "myst-nb", "sphinx", "sphinx-book-theme", "sphinx-examples"]
[[package]]
name = "sphinx-gallery"
-version = "0.14.0"
+version = "0.15.0"
description = "A `Sphinx `_ extension that builds an HTML gallery of examples from any set of Python scripts."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "sphinx-gallery-0.14.0.tar.gz", hash = "sha256:2a4a0aaf032955508e1d0f3495199a3c7819ce420e71096bff0bca551a4043c2"},
- {file = "sphinx_gallery-0.14.0-py3-none-any.whl", hash = "sha256:55b3ad1f378abd126232c166192270ac0a3ef615dec10b66c961ed2967be1df6"},
+ {file = "sphinx-gallery-0.15.0.tar.gz", hash = "sha256:7217fe98f8c4cce248db798c48f34183e4cdb277d2381e188182f92a14ec26b7"},
+ {file = "sphinx_gallery-0.15.0-py3-none-any.whl", hash = "sha256:d66d38d901f6b65b6e3ee6c2584e37476b035d9e52907b1593a3f312946ae724"},
]
[package.dependencies]
+pillow = "*"
sphinx = ">=4"
+[package.extras]
+jupyterlite = ["jupyterlite-sphinx"]
+recommender = ["numpy"]
+show-api-usage = ["graphviz"]
+show-memory = ["memory-profiler"]
+
[[package]]
name = "sphinxcontrib-applehelp"
-version = "1.0.7"
+version = "1.0.8"
description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books"
optional = false
python-versions = ">=3.9"
files = [
- {file = "sphinxcontrib_applehelp-1.0.7-py3-none-any.whl", hash = "sha256:094c4d56209d1734e7d252f6e0b3ccc090bd52ee56807a5d9315b19c122ab15d"},
- {file = "sphinxcontrib_applehelp-1.0.7.tar.gz", hash = "sha256:39fdc8d762d33b01a7d8f026a3b7d71563ea3b72787d5f00ad8465bd9d6dfbfa"},
+ {file = "sphinxcontrib_applehelp-1.0.8-py3-none-any.whl", hash = "sha256:cb61eb0ec1b61f349e5cc36b2028e9e7ca765be05e49641c97241274753067b4"},
+ {file = "sphinxcontrib_applehelp-1.0.8.tar.gz", hash = "sha256:c40a4f96f3776c4393d933412053962fac2b84f4c99a7982ba42e09576a70619"},
]
-[package.dependencies]
-Sphinx = ">=5"
-
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
+standalone = ["Sphinx (>=5)"]
test = ["pytest"]
[[package]]
name = "sphinxcontrib-bibtex"
-version = "2.6.1"
+version = "2.6.2"
description = "Sphinx extension for BibTeX style citations."
optional = false
python-versions = ">=3.7"
files = [
- {file = "sphinxcontrib-bibtex-2.6.1.tar.gz", hash = "sha256:046b49f070ae5276af34c1b8ddb9bc9562ef6de2f7a52d37a91cb8e53f54b863"},
- {file = "sphinxcontrib_bibtex-2.6.1-py3-none-any.whl", hash = "sha256:094c772098fe6b030cda8618c45722b2957cad0c04f328ba2b154aa08dfe720a"},
+ {file = "sphinxcontrib-bibtex-2.6.2.tar.gz", hash = "sha256:f487af694336f28bfb7d6a17070953a7d264bec43000a2379724274f5f8d70ae"},
+ {file = "sphinxcontrib_bibtex-2.6.2-py3-none-any.whl", hash = "sha256:10d45ebbb19207c5665396c9446f8012a79b8a538cb729f895b5910ab2d0b2da"},
]
[package.dependencies]
@@ -2888,38 +2958,34 @@ Sphinx = ">=3.5"
[[package]]
name = "sphinxcontrib-devhelp"
-version = "1.0.5"
+version = "1.0.6"
description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents"
optional = false
python-versions = ">=3.9"
files = [
- {file = "sphinxcontrib_devhelp-1.0.5-py3-none-any.whl", hash = "sha256:fe8009aed765188f08fcaadbb3ea0d90ce8ae2d76710b7e29ea7d047177dae2f"},
- {file = "sphinxcontrib_devhelp-1.0.5.tar.gz", hash = "sha256:63b41e0d38207ca40ebbeabcf4d8e51f76c03e78cd61abe118cf4435c73d4212"},
+ {file = "sphinxcontrib_devhelp-1.0.6-py3-none-any.whl", hash = "sha256:6485d09629944511c893fa11355bda18b742b83a2b181f9a009f7e500595c90f"},
+ {file = "sphinxcontrib_devhelp-1.0.6.tar.gz", hash = "sha256:9893fd3f90506bc4b97bdb977ceb8fbd823989f4316b28c3841ec128544372d3"},
]
-[package.dependencies]
-Sphinx = ">=5"
-
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
+standalone = ["Sphinx (>=5)"]
test = ["pytest"]
[[package]]
name = "sphinxcontrib-htmlhelp"
-version = "2.0.4"
+version = "2.0.5"
description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files"
optional = false
python-versions = ">=3.9"
files = [
- {file = "sphinxcontrib_htmlhelp-2.0.4-py3-none-any.whl", hash = "sha256:8001661c077a73c29beaf4a79968d0726103c5605e27db92b9ebed8bab1359e9"},
- {file = "sphinxcontrib_htmlhelp-2.0.4.tar.gz", hash = "sha256:6c26a118a05b76000738429b724a0568dbde5b72391a688577da08f11891092a"},
+ {file = "sphinxcontrib_htmlhelp-2.0.5-py3-none-any.whl", hash = "sha256:393f04f112b4d2f53d93448d4bce35842f62b307ccdc549ec1585e950bc35e04"},
+ {file = "sphinxcontrib_htmlhelp-2.0.5.tar.gz", hash = "sha256:0dc87637d5de53dd5eec3a6a01753b1ccf99494bd756aafecd74b4fa9e729015"},
]
-[package.dependencies]
-Sphinx = ">=5"
-
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
+standalone = ["Sphinx (>=5)"]
test = ["html5lib", "pytest"]
[[package]]
@@ -2938,38 +3004,34 @@ test = ["flake8", "mypy", "pytest"]
[[package]]
name = "sphinxcontrib-qthelp"
-version = "1.0.6"
+version = "1.0.7"
description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents"
optional = false
python-versions = ">=3.9"
files = [
- {file = "sphinxcontrib_qthelp-1.0.6-py3-none-any.whl", hash = "sha256:bf76886ee7470b934e363da7a954ea2825650013d367728588732c7350f49ea4"},
- {file = "sphinxcontrib_qthelp-1.0.6.tar.gz", hash = "sha256:62b9d1a186ab7f5ee3356d906f648cacb7a6bdb94d201ee7adf26db55092982d"},
+ {file = "sphinxcontrib_qthelp-1.0.7-py3-none-any.whl", hash = "sha256:e2ae3b5c492d58fcbd73281fbd27e34b8393ec34a073c792642cd8e529288182"},
+ {file = "sphinxcontrib_qthelp-1.0.7.tar.gz", hash = "sha256:053dedc38823a80a7209a80860b16b722e9e0209e32fea98c90e4e6624588ed6"},
]
-[package.dependencies]
-Sphinx = ">=5"
-
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
+standalone = ["Sphinx (>=5)"]
test = ["pytest"]
[[package]]
name = "sphinxcontrib-serializinghtml"
-version = "1.1.9"
+version = "1.1.10"
description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)"
optional = false
python-versions = ">=3.9"
files = [
- {file = "sphinxcontrib_serializinghtml-1.1.9-py3-none-any.whl", hash = "sha256:9b36e503703ff04f20e9675771df105e58aa029cfcbc23b8ed716019b7416ae1"},
- {file = "sphinxcontrib_serializinghtml-1.1.9.tar.gz", hash = "sha256:0c64ff898339e1fac29abd2bf5f11078f3ec413cfe9c046d3120d7ca65530b54"},
+ {file = "sphinxcontrib_serializinghtml-1.1.10-py3-none-any.whl", hash = "sha256:326369b8df80a7d2d8d7f99aa5ac577f51ea51556ed974e7716cfd4fca3f6cb7"},
+ {file = "sphinxcontrib_serializinghtml-1.1.10.tar.gz", hash = "sha256:93f3f5dc458b91b192fe10c397e324f262cf163d79f3282c158e8436a2c4511f"},
]
-[package.dependencies]
-Sphinx = ">=5"
-
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
+standalone = ["Sphinx (>=5)"]
test = ["pytest"]
[[package]]
@@ -3040,26 +3102,26 @@ protobuf = ">=3.20"
[[package]]
name = "tensorflow"
-version = "2.14.0"
+version = "2.14.1"
description = "TensorFlow is an open source machine learning framework for everyone."
optional = false
python-versions = ">=3.9"
files = [
- {file = "tensorflow-2.14.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:318b21b18312df6d11f511d0f205d55809d9ad0f46d5f9c13d8325ce4fe3b159"},
- {file = "tensorflow-2.14.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:927868c9bd4b3d2026ac77ec65352226a9f25e2d24ec3c7d088c68cff7583c9b"},
- {file = "tensorflow-2.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3870063433aebbd1b8da65ed4dcb09495f9239397f8cb5a8822025b6bb65e04"},
- {file = "tensorflow-2.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c9c1101269efcdb63492b45c8e83df0fc30c4454260a252d507dfeaebdf77ff"},
- {file = "tensorflow-2.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:0b7eaab5e034f1695dc968f7be52ce7ccae4621182d1e2bf6d5b3fab583be98c"},
- {file = "tensorflow-2.14.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:00c42e7d8280c660b10cf5d0b3164fdc5e38fd0bf16b3f9963b7cd0e546346d8"},
- {file = "tensorflow-2.14.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c92f5526c2029d31a036be06eb229c71f1c1821472876d34d0184d19908e318c"},
- {file = "tensorflow-2.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c224c076160ef9f60284e88f59df2bed347d55e64a0ca157f30f9ca57e8495b0"},
- {file = "tensorflow-2.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a80cabe6ab5f44280c05533e5b4a08e5b128f0d68d112564cffa3b96638e28aa"},
- {file = "tensorflow-2.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:0587ece626c4f7c4fcb2132525ea6c77ad2f2f5659a9b0f4451b1000be1b5e16"},
- {file = "tensorflow-2.14.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:6d65b54f6928490e2b6ff51836b97f88f5d5b29b5943fe81d8ac5d8c809ccca4"},
- {file = "tensorflow-2.14.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e2840b549686080bfb824cc1578b5a15d5ec416badacc0c327d93f8762ee6b56"},
- {file = "tensorflow-2.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fb16641092b04a37ec2916c30412f986ca6adf969e6062057839efb788985f8"},
- {file = "tensorflow-2.14.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ba2ee1f9fe7f453bcd27d39a36928142de75a427ac2097dee2db1516387c9d5"},
- {file = "tensorflow-2.14.0-cp39-cp39-win_amd64.whl", hash = "sha256:6531e76276b1421f43e008280107ba215256d4570cc56fd54856db7ff45e58f7"},
+ {file = "tensorflow-2.14.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:f6e9ac1e53db30f1759148f731f87b9d12da5ce0f153fc49406824efd486aae7"},
+ {file = "tensorflow-2.14.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:7156bf1f7311dada7dba5345b526a38e6f4e4f4b8509bee162a24342bf6571b2"},
+ {file = "tensorflow-2.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5781aadad5b46e2de4e373b0ca15a852b90d58982270a6db02ec52e4986316d"},
+ {file = "tensorflow-2.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a955c42164eff4d751732c1274ca4bf059db60c9e2362098ce1eed7177c3fe9"},
+ {file = "tensorflow-2.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:4be5f4327a6e854f64b4dcfd08a51c5fc7cc3fea8c76c5bf5c0c3deb002d5221"},
+ {file = "tensorflow-2.14.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:597dd6665a91b3d4b881f0d40277eb55b65b04567553206a46e7db9cfa067310"},
+ {file = "tensorflow-2.14.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:9833e61423ad2726f81e3fc770558b81d5f0a454bdb2dad717c5474ea837ce91"},
+ {file = "tensorflow-2.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14a48a087954722d9e73086e8ce28a14b1f9f889ea5845c7c0bf30d8747ab6e2"},
+ {file = "tensorflow-2.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9aa05a98450fa5bc4efd529383b7d15c10ec12b0238a6744baa1508c4bfa4d5"},
+ {file = "tensorflow-2.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:11958d12e39d44a9f5fc753fc312dd1726a8506f2d2606e01421ca4ee9dc5c55"},
+ {file = "tensorflow-2.14.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:d95404f78a8d5e3d2481383dbe2d2286341ccf9bc5cbb19d857c646494d860c6"},
+ {file = "tensorflow-2.14.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:511c4c5bfb2af17c6ca22663f98a7267c4386bf5486fbe78ee2d21482a6fa822"},
+ {file = "tensorflow-2.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f66d2990157cf27f80c730878cb8befa8ed9716223494037d31c80fbe5f64370"},
+ {file = "tensorflow-2.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9ab2747f75aba0327bfe6092b963694f1001781e5d2c0d251dfeed02b0c3bba"},
+ {file = "tensorflow-2.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:7f5c9215bc00ba88f1cde1399f8160a5cb865c20ad71a1d5a6869f9fad62d9a5"},
]
[package.dependencies]
@@ -3073,7 +3135,7 @@ h5py = ">=2.9.0"
keras = ">=2.14.0,<2.15"
libclang = ">=13.0.0"
ml-dtypes = "0.2.0"
-numpy = ">=1.23.5"
+numpy = ">=1.23.5,<2.0.0"
opt-einsum = ">=2.3.2"
packaging = "*"
protobuf = ">=3.20.3,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev"
@@ -3091,14 +3153,14 @@ and-cuda = ["nvidia-cublas-cu11 (==11.11.3.6)", "nvidia-cuda-cupti-cu11 (==11.8.
[[package]]
name = "tensorflow-cpu-aws"
-version = "2.14.0"
+version = "2.14.1"
description = "TensorFlow is an open source machine learning framework for everyone."
optional = false
python-versions = ">=3.9"
files = [
- {file = "tensorflow_cpu_aws-2.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:947b83ca3bfc22d6fc292364be456f2f56507169890e8c6db21399009bda8ab0"},
- {file = "tensorflow_cpu_aws-2.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:497b400494fa693e8037fa0e2892714aecc257a5752a878b210b3a512b66668c"},
- {file = "tensorflow_cpu_aws-2.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:114526a11d38246fc94c1573e0088a8bf7381ac8b1d549a5f7423b2afa7bbb84"},
+ {file = "tensorflow_cpu_aws-2.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2e35e9d90fc448973b39cd2b76a03a66de88700424a40dcc0ff90b8d40b1a3a"},
+ {file = "tensorflow_cpu_aws-2.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:618af0ac57a7bb6b51c9b409c2838c9910ca2fd2ae2b1f986ae98ebf59a919e5"},
+ {file = "tensorflow_cpu_aws-2.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e65af267aed15f4a59a8fd7a3ffe11ce43a71a33b0cd27122015b134443d872"},
]
[package.dependencies]
@@ -3112,7 +3174,7 @@ h5py = ">=2.9.0"
keras = ">=2.14.0,<2.15"
libclang = ">=13.0.0"
ml-dtypes = "0.2.0"
-numpy = ">=1.23.5"
+numpy = ">=1.23.5,<2.0.0"
opt-einsum = ">=2.3.2"
packaging = "*"
protobuf = ">=3.20.3,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev"
@@ -3140,14 +3202,14 @@ files = [
[[package]]
name = "tensorflow-intel"
-version = "2.14.0"
+version = "2.14.1"
description = "TensorFlow is an open source machine learning framework for everyone."
optional = false
python-versions = ">=3.9"
files = [
- {file = "tensorflow_intel-2.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:f4824c9ff48f9a89b0df494fe61795db05304c95f0e0d661b8d1fd6b855c0324"},
- {file = "tensorflow_intel-2.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f96c729d61ff8e2e340df5b3b4db81a938258f1c9282ab09277896d0c408ae"},
- {file = "tensorflow_intel-2.14.0-cp39-cp39-win_amd64.whl", hash = "sha256:a9d0b57170993b717bad7bc0c07c2df7fe6f9fb5ce946c2a48f8535d49d119fe"},
+ {file = "tensorflow_intel-2.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:d53589eed39607059923e660dfdb28dc65a4b89bec5889a78941bf8ec936d716"},
+ {file = "tensorflow_intel-2.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:dc4b0fb2cf2768104630357e9c06b801163e31db22ef2fd0419a0c09ae2e2315"},
+ {file = "tensorflow_intel-2.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:78c11785eaa1047ac2e4746c86286f6629df0289e73616ce052a82761e1de678"},
]
[package.dependencies]
@@ -3161,7 +3223,7 @@ h5py = ">=2.9.0"
keras = ">=2.14.0,<2.15"
libclang = ">=13.0.0"
ml-dtypes = "0.2.0"
-numpy = ">=1.23.5"
+numpy = ">=1.23.5,<2.0.0"
opt-einsum = ">=2.3.2"
packaging = "*"
protobuf = ">=3.20.3,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev"
@@ -3214,49 +3276,40 @@ tensorflow-rocm = ["tensorflow-rocm (>=2.11.0,<2.12.0)"]
[[package]]
name = "tensorflow-io-gcs-filesystem"
-version = "0.34.0"
+version = "0.35.0"
description = "TensorFlow IO"
optional = false
python-versions = ">=3.7, <3.12"
files = [
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:d831702fbb270996b27cda7fde06e0825b2ea81fd8dd3ead35242f4f8b3889b8"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:b9a93fcb01db269bc845a1ced431f3c61201755ce5f9ec4885760f30122276ef"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5813c336b4f7cb0a01ff4cc6cbd3edf11ef67305baf0e3cf634911b702f493f8"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b035f4c92639657b6d376929d550ac3dee9e6c0523eb434eefe0a27bae3d05b"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:a17a616d2c7fae83de4424404815843507d40d4eb0d507c636a5493a20c3d958"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:ec4604c99cbb5b708f4516dee27aa655abae222b876c98b740f4c2f89dd5c001"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp311-cp311-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cbe26c4a3332589c7b724f147df453b5c226993aa8d346a15536358d77b364c4"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6353123a5b51397950138a118876af833a7db66b531123bb86f82e80ab0e72"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:f211d2b3db8f9931765992b607b71cbfb98c8cd6169079d004a67a94ab10ecb4"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:d3feba2dd76f7c188137c34642d68d378f0eed81636cb95090ecb1496722707c"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:44ad387a812a78e7424bb8bee3820521ae1c044bddf72b1e163e8df95c124a74"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:7f60183473f0ca966451bb1d1bb5dc29b3cf9c74d1d0e7f2ed46760ed56bd4af"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:3f346b287ed2400e09b13cfd8524222fd70a66aadb9164c645286c2087007e9f"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:027a07553367187f918a99661f63ae0506b91b77a70bee9c7ccaf3920bf7cfe7"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d8664bddbe4e7b56ce94db8b93ea9077a158fb5e15364e11e29f93015ceea24"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:182b0fbde7e9a537fda0b354c28b0b6c035736728de8fe2db7ef49cf90352014"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:0dafed144673e1173528768fe208a7c5a6e8edae40208381cac420ee7c918ec9"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:396bfff61b49f80b86ddebe0c76ae0f2731689cee49ad7d782625180b50b13af"},
- {file = "tensorflow_io_gcs_filesystem-0.34.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b20622f8572fcb6c93e8f7d626327472f263e47ebd63d2153ef09162ef5ef7b5"},
-]
-
-[package.extras]
-tensorflow = ["tensorflow (>=2.13.0,<2.14.0)"]
-tensorflow-aarch64 = ["tensorflow-aarch64 (>=2.13.0,<2.14.0)"]
-tensorflow-cpu = ["tensorflow-cpu (>=2.13.0,<2.14.0)"]
-tensorflow-gpu = ["tensorflow-gpu (>=2.13.0,<2.14.0)"]
-tensorflow-rocm = ["tensorflow-rocm (>=2.13.0,<2.14.0)"]
+ {file = "tensorflow_io_gcs_filesystem-0.35.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:5521721b38105496d4b43a4ffb0af5b04cc4873d464f26fbceddf8d63815ce98"},
+ {file = "tensorflow_io_gcs_filesystem-0.35.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd8f30908bf8b7b2a017d6b145720d105aff7f998422671b71729708ec7b2fe4"},
+ {file = "tensorflow_io_gcs_filesystem-0.35.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac8f1de60fdf9c734aea967b98555e366ac8743f77bca15c49eff023f587076b"},
+ {file = "tensorflow_io_gcs_filesystem-0.35.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:35b6eca7225c815d962254327195f191d88c3c9c2278a5ab23e0ac834acbadbb"},
+ {file = "tensorflow_io_gcs_filesystem-0.35.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e997389bfe008210cbd97c0c738d64282a2f03ad4d0536013bb0a9efde0c283"},
+ {file = "tensorflow_io_gcs_filesystem-0.35.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8fb3402fb1457482c386ea19371bc76383412ae9ea4396edb1e8adb4ba76f21"},
+ {file = "tensorflow_io_gcs_filesystem-0.35.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb6bf8f5b40207ecb17e7fdc3b4fc824a8361267c14e9528c1688e16de135cb7"},
+ {file = "tensorflow_io_gcs_filesystem-0.35.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:c4f786eebd98d401565374722f2e67f3878675b0d87489cbaa13c70ee6ac370a"},
+ {file = "tensorflow_io_gcs_filesystem-0.35.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fce1466bdb91096b6d22e7df17358ba228bcb92db5cff83f2f9f1c68eb26788"},
+ {file = "tensorflow_io_gcs_filesystem-0.35.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1856fe321fdb75f3386d92109c60db6ef097f610b450f9cc69d76444fb9980d1"},
+]
+
+[package.extras]
+tensorflow = ["tensorflow (>=2.14.0,<2.15.0)"]
+tensorflow-aarch64 = ["tensorflow-aarch64 (>=2.14.0,<2.15.0)"]
+tensorflow-cpu = ["tensorflow-cpu (>=2.14.0,<2.15.0)"]
+tensorflow-gpu = ["tensorflow-gpu (>=2.14.0,<2.15.0)"]
+tensorflow-rocm = ["tensorflow-rocm (>=2.14.0,<2.15.0)"]
[[package]]
name = "tensorflow-macos"
-version = "2.14.0"
+version = "2.14.1"
description = "TensorFlow is an open source machine learning framework for everyone."
optional = false
python-versions = ">=3.9"
files = [
- {file = "tensorflow_macos-2.14.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:31fa670b0afd2a52b3ad15bab97a0d0aae28c4ea2a96ab7f0a91e8844390bfb0"},
- {file = "tensorflow_macos-2.14.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:064e98b67d7a89e72c37c90254c0a322a0b8d0ce9b68f23286816210e3ef6685"},
- {file = "tensorflow_macos-2.14.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:7b3e4e01721bfc0105c99275113531a659e89fd6512340a7b8970593676e0114"},
+ {file = "tensorflow_macos-2.14.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:5b9832df0852fa534cbd3362b6e00ba1c9d4b541fdfd987d0bba3927229435bc"},
+ {file = "tensorflow_macos-2.14.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:428f071cf9e901c8182be9f7278a79beea6f9e4b687bf0d5e8e8faefb7bcc760"},
+ {file = "tensorflow_macos-2.14.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:4d7ce47f3c593f71eaa98ed3c8fe3c83b6010cc63d06aaf12037845196d06d85"},
]
[package.dependencies]
@@ -3270,7 +3323,7 @@ h5py = ">=2.9.0"
keras = ">=2.14.0,<2.15"
libclang = ">=13.0.0"
ml-dtypes = "0.2.0"
-numpy = ">=1.23.5"
+numpy = ">=1.23.5,<2.0.0"
opt-einsum = ">=2.3.2"
packaging = "*"
protobuf = ">=3.20.3,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev"
@@ -3311,13 +3364,13 @@ tfds = ["tensorflow-datasets (>=2.2.0)"]
[[package]]
name = "termcolor"
-version = "2.3.0"
+version = "2.4.0"
description = "ANSI color formatting for output in terminal"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "termcolor-2.3.0-py3-none-any.whl", hash = "sha256:3afb05607b89aed0ffe25202399ee0867ad4d3cb4180d98aaf8eefa6a5f7d475"},
- {file = "termcolor-2.3.0.tar.gz", hash = "sha256:b5b08f68937f138fe92f6c089b99f1e2da0ae56c52b78bf7075fd95420fd9a5a"},
+ {file = "termcolor-2.4.0-py3-none-any.whl", hash = "sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63"},
+ {file = "termcolor-2.4.0.tar.gz", hash = "sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a"},
]
[package.extras]
@@ -3325,13 +3378,13 @@ tests = ["pytest", "pytest-cov"]
[[package]]
name = "thewalrus"
-version = "0.19.0"
+version = "0.21.0"
description = "Open source library for hafnian calculation"
optional = false
python-versions = "*"
files = [
- {file = "thewalrus-0.19.0-py3-none-any.whl", hash = "sha256:07b6e2969bf5405a2df736c442b1500857438bbd2afc2053b8b600b8b0c67f97"},
- {file = "thewalrus-0.19.0.tar.gz", hash = "sha256:06ff07a14cd8cd4650d9c82b8bb8301ef9a58dcdd4bafb14841768ccf80c98b9"},
+ {file = "thewalrus-0.21.0-py3-none-any.whl", hash = "sha256:5f393d17fc8362e7156337faed769e99f15149040ef298d2a1be27f234aa8cb9"},
+ {file = "thewalrus-0.21.0.tar.gz", hash = "sha256:a8e1d6a7dea1e2c70aeb172f2dba1dfc7fabfa6e000c8ace9c5f81c7df422637"},
]
[package.dependencies]
@@ -3376,51 +3429,51 @@ files = [
[[package]]
name = "toolz"
-version = "0.12.0"
+version = "0.12.1"
description = "List processing tools and functional utilities"
optional = false
-python-versions = ">=3.5"
+python-versions = ">=3.7"
files = [
- {file = "toolz-0.12.0-py3-none-any.whl", hash = "sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f"},
- {file = "toolz-0.12.0.tar.gz", hash = "sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194"},
+ {file = "toolz-0.12.1-py3-none-any.whl", hash = "sha256:d22731364c07d72eea0a0ad45bafb2c2937ab6fd38a3507bf55eae8744aa7d85"},
+ {file = "toolz-0.12.1.tar.gz", hash = "sha256:ecca342664893f177a13dac0e6b41cbd8ac25a358e5f215316d43e2100224f4d"},
]
[[package]]
name = "typing-extensions"
-version = "4.8.0"
+version = "4.9.0"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
- {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"},
- {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"},
+ {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"},
+ {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"},
]
[[package]]
name = "tzdata"
-version = "2023.3"
+version = "2023.4"
description = "Provider of IANA time zone data"
optional = true
python-versions = ">=2"
files = [
- {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"},
- {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"},
+ {file = "tzdata-2023.4-py2.py3-none-any.whl", hash = "sha256:aa3ace4329eeacda5b7beb7ea08ece826c28d761cda36e747cfbf97996d39bf3"},
+ {file = "tzdata-2023.4.tar.gz", hash = "sha256:dd54c94f294765522c77399649b4fefd95522479a664a0cec87f41bebc6148c9"},
]
[[package]]
name = "urllib3"
-version = "2.0.7"
+version = "2.2.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"},
- {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"},
+ {file = "urllib3-2.2.0-py3-none-any.whl", hash = "sha256:ce3711610ddce217e6d113a2732fafad960a03fd0318c91faa79481e35c11224"},
+ {file = "urllib3-2.2.0.tar.gz", hash = "sha256:051d961ad0c62a94e50ecf1af379c3aba230c66c710493493560c0c223c49f20"},
]
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
-secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"]
+h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
@@ -3443,13 +3496,13 @@ watchdog = ["watchdog (>=2.3)"]
[[package]]
name = "wheel"
-version = "0.41.3"
+version = "0.42.0"
description = "A built-package format for Python"
optional = false
python-versions = ">=3.7"
files = [
- {file = "wheel-0.41.3-py3-none-any.whl", hash = "sha256:488609bc63a29322326e05560731bf7bfea8e48ad646e1f5e40d366607de0942"},
- {file = "wheel-0.41.3.tar.gz", hash = "sha256:4d4987ce51a49370ea65c0bfd2234e8ce80a12780820d9dc462597a6e60d0841"},
+ {file = "wheel-0.42.0-py3-none-any.whl", hash = "sha256:177f9c9b0d45c47873b619f5b650346d632cdc35fb5e4d25058e09c9e581433d"},
+ {file = "wheel-0.42.0.tar.gz", hash = "sha256:c45be39f7882c9d34243236f2d63cbd58039e360f85d0913425fbd7ceea617a8"},
]
[package.extras]
@@ -3502,4 +3555,4 @@ ray = ["ray", "scikit-optimize"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<3.11"
-content-hash = "8824d137988dd7d4a8d2459500890955b0f4b9b4ad4d2af7474c7c4d16d822af"
+content-hash = "970e86ff1dd5fc529221b750bc6be686fecf8a48dbf4d063046bf05d5171d092"
diff --git a/pyproject.toml b/pyproject.toml
index 084057717..3fdd13c08 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,11 +1,11 @@
[tool.poetry]
name = "mrmustard"
-version = "0.6.1-post1"
+version = "0.7.0"
description = "Differentiable quantum Gaussian circuits"
authors = ["Xanadu "]
license = "Apache License 2.0"
readme = "README.md"
-include = ["pyproject.toml"]
+include = ["julia_pkg/*"]
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
@@ -26,14 +26,17 @@ classifiers = [
[tool.poetry.dependencies]
python = ">=3.9,<3.11"
+grpcio = "1.60.0"
numpy = "^1.23.5"
scipy = "^1.8.0"
numba = "^0.56.4"
-thewalrus = "~0.19.0"
+thewalrus = "^0.21.0"
rich = "^10.15.1"
matplotlib = "^3.5.0"
ray = { version = "^2.5.0", extras = ["tune"], optional = true }
scikit-optimize = { version = "^0.9.0", optional = true }
+networkx = "^3.1"
+julia = "0.6.1"
###################### The Tensorflow Section ######################
# Dedicated for making sure that poetry can install tensorflow on all platforms.
@@ -65,7 +68,7 @@ ray = ["ray", "scikit-optimize"]
optional = true
[tool.poetry.group.dev.dependencies]
-pytest = "6.2.5"
+pytest = "8.0"
pytest-cov ="3.0.0"
hypothesis = "6.31.6"
pylint = "2.10.0"
diff --git a/tests/conftest.py b/tests/conftest.py
index 572cbeb4f..e36f5300e 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -13,13 +13,57 @@
# limitations under the License.
import os
+import pytest
-from hypothesis import Verbosity, settings
+from mrmustard import math
+from hypothesis import Verbosity, settings as hyp_settings
print("pytest.conf -----------------------")
-settings.register_profile("ci", max_examples=10, deadline=None)
-settings.register_profile("dev", max_examples=10, deadline=None)
-settings.register_profile("debug", max_examples=10, verbosity=Verbosity.verbose, deadline=None)
+# ~~~~~~~~~~
+# Hypothesis
+# ~~~~~~~~~~
-settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "dev"))
+hyp_settings.register_profile("ci", max_examples=10, deadline=None)
+hyp_settings.register_profile("dev", max_examples=10, deadline=None)
+hyp_settings.register_profile("debug", max_examples=10, verbosity=Verbosity.verbose, deadline=None)
+
+hyp_settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "dev"))
+
+# ~~~~~~
+# Pytest
+# ~~~~~~
+
+
+def pytest_addoption(parser):
+ r"""
+ Adds the option to select the backend using the ``--backend`` flag. For example,
+ ``pytest --backend=tensorflow`` runs all the tests with tensorflow backend. The command
+ ``pytest`` defaults to ``pytest --backend=numpy``.
+ """
+ parser.addoption("--backend", default="numpy", help="``numpy`` or ``tensorflow``.")
+
+
+@pytest.fixture
+def backend(request):
+ r"""
+ Extracts ``backend`` from request.
+ """
+ return request.config.getoption("--backend")
+
+
+@pytest.fixture(autouse=True)
+def set_backend(backend):
+ r"""
+ Sets backend for all the tests.
+ """
+ math.change_backend(f"{backend}")
+
+
+def skip_np():
+ if math.backend_name == "numpy":
+ pytest.skip("numpy")
+
+
+def pytest_configure(config):
+ pass # your code goes here
diff --git a/tests/random.py b/tests/random.py
index df3660aae..0c4552ebf 100644
--- a/tests/random.py
+++ b/tests/random.py
@@ -48,6 +48,9 @@
real_not_zero = st.one_of(negative, positive)
small_float = st.floats(min_value=-0.1, max_value=0.1, allow_infinity=False, allow_nan=False)
medium_float = st.floats(min_value=-1.0, max_value=1.0, allow_infinity=False, allow_nan=False)
+complex_nonzero = st.complex_numbers(
+ allow_infinity=False, allow_nan=False, min_magnitude=1e-9, max_magnitude=1e2
+)
# physical parameters
nmodes = st.integers(min_value=1, max_value=10)
@@ -56,6 +59,31 @@
prob = st.floats(min_value=0, max_value=1, allow_infinity=False, allow_nan=False)
gain = st.floats(min_value=1, max_value=2, allow_infinity=False, allow_nan=False)
+# Complex number strategy
+complex_number = st.complex_numbers(
+ min_magnitude=1e-9, max_magnitude=1, allow_infinity=False, allow_nan=False
+)
+
+# Size strategy
+size = st.integers(min_value=1, max_value=9)
+
+
+@st.composite
+def Abc_triple(draw, n=None):
+ n = n or draw(size)
+
+ # Complex symmetric matrix A
+ A = draw(arrays(dtype=complex, shape=(n, n), elements=complex_number))
+ A = 0.5 * (A + A.T) # Make it symmetric
+
+ # Complex vector b
+ b = draw(arrays(dtype=complex, shape=n, elements=complex_number))
+
+ # Complex scalar c
+ c = draw(complex_number)
+
+ return A, b, c
+
@st.composite
def vector(draw, length):
@@ -71,6 +99,34 @@ def list_of_ints(draw, N):
)
+@st.composite
+def matrix(draw, rows, cols):
+ """Return a strategy for generating matrices of shape `rows` x `cols`."""
+ elements = st.floats(allow_infinity=False, allow_nan=False, max_value=1e10, min_value=-1e10)
+ return draw(arrays(np.float, (rows, cols), elements=elements))
+
+
+@st.composite
+def complex_matrix(draw, rows, cols):
+ """Return a strategy for generating matrices of shape `rows` x `cols` with complex numbers."""
+ max_abs_value = 1e10
+ elements = st.complex_numbers(
+ min_magnitude=0, max_magnitude=max_abs_value, allow_infinity=False, allow_nan=False
+ )
+ return draw(arrays(np.complex, (rows, cols), elements=elements))
+
+
+@st.composite
+def complex_vector(draw, length=None):
+ """Return a strategy for generating vectors of length `length` with complex numbers."""
+ elements = st.complex_numbers(
+ min_magnitude=0, max_magnitude=1, allow_infinity=False, allow_nan=False
+ )
+ if length is None:
+ length = draw(st.integers(min_value=1, max_value=10))
+ return draw(arrays(np.complex, (length,), elements=elements))
+
+
def array_of_(strategy, minlen=0, maxlen=100):
r"""Return a strategy that returns an array of values from `strategy`."""
return arrays(
@@ -97,24 +153,6 @@ def bounds_check(t):
prob_bounds = st.tuples(none_or_(prob), none_or_(prob)).filter(bounds_check)
-# settings
-def force_settings(name, value):
- r"""Updates the value of immutable settings.
-
- .. code::
- >>> settings.HBAR
- 2.0
-
- >>> settings.HBAR = 1.0
- ValueError: Cannot change the value of `settings.HBAR`.
-
- >>> settings.force_settings("_hbar", 1.0)
- >>> settings.HBAR
- 1.0
- """
- getattr(settings, name)._value = value # pylint: disable=protected-access
-
-
# gates
@st.composite
def random_Rgate(draw, trainable=False):
@@ -397,7 +435,8 @@ def n_mode_pure_state(draw, num_modes=1):
@st.composite
def n_mode_mixed_state(draw, num_modes=1):
- r"""Return a random n mode mixed state."""
- state = draw(n_mode_pure_state(num_modes))
- attenuator = Attenuator(draw(st.floats(min_value=0.5, max_value=0.9)))
- return state >> attenuator
+ r"""Return a random n mode pure state."""
+ S = draw(random_Sgate(num_modes))
+ I = draw(random_Interferometer(num_modes))
+ D = draw(random_Dgate(num_modes))
+ return Thermal([0.5] * num_modes) >> S >> I >> D
diff --git a/tests/test_about.py b/tests/test_about.py
index c768ee17c..0152d681f 100644
--- a/tests/test_about.py
+++ b/tests/test_about.py
@@ -19,19 +19,19 @@
import io
import re
-import mrmustard as mm
+from mrmustard import about, version
def test_about():
"""Tests if the about string prints correctly."""
f = io.StringIO()
with contextlib.redirect_stdout(f):
- mm.about()
+ about()
out = f.getvalue().strip()
assert "Python version:" in out
pl_version_match = re.search(r"Mr Mustard version:\s+([\S]+)\n", out).group(1)
- assert mm.version() in pl_version_match
+ assert version() in pl_version_match
assert "Numpy version" in out
assert "Scipy version" in out
assert "The Walrus version" in out
diff --git a/tests/test_lab/__init__.py b/tests/test_lab/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/test_utils/test_circuitdrawer.py b/tests/test_lab/test_circuit_drawer.py
similarity index 91%
rename from tests/test_utils/test_circuitdrawer.py
rename to tests/test_lab/test_circuit_drawer.py
index bc01b7fb6..1d8a0d16e 100644
--- a/tests/test_utils/test_circuitdrawer.py
+++ b/tests/test_lab/test_circuit_drawer.py
@@ -16,7 +16,7 @@
from mrmustard import settings
from mrmustard.lab import BSgate, Ggate
-from mrmustard.utils.circdrawer import (
+from mrmustard.lab.circuit_drawer import (
_add_grouping_symbols,
_add_op,
circuit_text,
@@ -108,9 +108,11 @@ def test_circuit_text():
def test_param_order():
- r"""Tests that Parametrized.param_string returns the parameters in the correct order"""
+ r"""Tests that ParameterSet.to_string returns the parameters in the correct order"""
B = BSgate(theta=0.4, phi=0.5)
- assert B.param_string(decimals=1) == "0.4, 0.5"
+ assert B.parameter_set.to_string(decimals=1) == "0.4, 0.5"
B = BSgate(phi=0.5, theta=0.4)
- assert B.param_string(decimals=1) == "0.4, 0.5" # same order as class constructor, not call
+ assert (
+ B.parameter_set.to_string(decimals=1) == "0.4, 0.5"
+ ) # same order as class constructor, not call
diff --git a/tests/test_lab/test_detectors.py b/tests/test_lab/test_detectors.py
index d6b4b3614..ee0cfdd22 100644
--- a/tests/test_lab/test_detectors.py
+++ b/tests/test_lab/test_detectors.py
@@ -20,7 +20,7 @@
from hypothesis.extra.numpy import arrays
from scipy.stats import poisson
-from mrmustard import physics, settings
+from mrmustard import math, physics, settings
from mrmustard.lab import (
TMSV,
Attenuator,
@@ -37,10 +37,11 @@
State,
Vacuum,
)
-from mrmustard.math import Math
from tests.random import none_or_
-math = Math()
+from ..conftest import skip_np
+
+
hbar = settings.HBAR
@@ -54,6 +55,8 @@ class TestPNRDetector:
)
def test_detector_coherent_state(self, alpha, eta, dc):
"""Tests the correct Poisson statistics are generated when a coherent state hits an imperfect detector"""
+ skip_np()
+
detector = PNRDetector(efficiency=eta, dark_counts=dc, modes=[0])
ps = Coherent(x=alpha.real, y=alpha.imag) << detector
expected = poisson.pmf(k=np.arange(len(ps)), mu=eta * np.abs(alpha) ** 2 + dc)
@@ -67,13 +70,15 @@ def test_detector_coherent_state(self, alpha, eta, dc):
)
def test_detector_squeezed_state(self, r, phi, eta, dc):
"""Tests the correct mean and variance are generated when a squeezed state hits an imperfect detector"""
+ skip_np()
+
S = Sgate(r=r, phi=phi)
ps = Vacuum(1) >> S >> PNRDetector(efficiency=eta, dark_counts=dc)
assert np.allclose(np.sum(ps), 1.0)
- mean = np.arange(len(ps)) @ ps.numpy()
+ mean = np.arange(len(ps)) @ math.asnumpy(ps)
expected_mean = eta * np.sinh(r) ** 2 + dc
assert np.allclose(mean, expected_mean)
- variance = np.arange(len(ps)) ** 2 @ ps.numpy() - mean**2
+ variance = np.arange(len(ps)) ** 2 @ math.asnumpy(ps) - mean**2
expected_variance = eta * np.sinh(r) ** 2 * (1 + eta * (1 + 2 * np.sinh(r) ** 2)) + dc
assert np.allclose(variance, expected_variance)
@@ -87,6 +92,8 @@ def test_detector_squeezed_state(self, r, phi, eta, dc):
)
def test_detector_two_mode_squeezed_state(self, r, phi, eta_s, eta_i, dc_s, dc_i):
"""Tests the correct mean and variance are generated when a two mode squeezed state hits an imperfect detector"""
+ skip_np()
+
pnr = PNRDetector(efficiency=[eta_s, eta_i], dark_counts=[dc_s, dc_i])
ps = Vacuum(2) >> S2gate(r=r, phi=phi) >> pnr
n = np.arange(len(ps))
@@ -101,7 +108,7 @@ def test_detector_two_mode_squeezed_state(self, r, phi, eta_s, eta_i, dc_s, dc_i
var_i = np.sum(ps, axis=0) @ n**2 - mean_i**2
expected_var_s = n_s * (n_s + 1) + dc_s
expected_var_i = n_i * (n_i + 1) + dc_i
- covar = n @ ps.numpy() @ n - mean_s * mean_i
+ covar = n @ math.asnumpy(ps) @ n - mean_s * mean_i
expected_covar = eta_s * eta_i * (np.sinh(r) * np.cosh(r)) ** 2
assert np.allclose(mean_s, expected_mean_s)
assert np.allclose(mean_i, expected_mean_i)
@@ -111,6 +118,8 @@ def test_detector_two_mode_squeezed_state(self, r, phi, eta_s, eta_i, dc_s, dc_i
def test_postselection(self):
"""Check the correct state is heralded for a two-mode squeezed vacuum with perfect detector"""
+ skip_np()
+
n_mean = 1.0
n_measured = 1
cutoff = 3
@@ -129,6 +138,8 @@ def test_postselection(self):
@given(eta=st.floats(0, 1))
def test_loss_probs(self, eta):
"Checks that a lossy channel is equivalent to quantum efficiency on detection probs"
+ skip_np()
+
ideal_detector = PNRDetector(efficiency=1.0, dark_counts=0.0)
lossy_detector = PNRDetector(efficiency=eta, dark_counts=0.0)
S = Sgate(r=0.2, phi=[0.0, 0.7])
@@ -154,7 +165,6 @@ def test_homodyne_mode_kwargs(self, outcome):
Also checks postselection ensuring the x-quadrature value is consistent with the
postselected value.
"""
-
S1 = Sgate(modes=[0], r=1, phi=np.pi / 2)
S2 = Sgate(modes=[1], r=1, phi=0)
initial_state = Vacuum(3) >> S1 >> S2
@@ -168,7 +178,7 @@ def test_homodyne_mode_kwargs(self, outcome):
if outcome is not None:
# checks postselection ensuring the x-quadrature
# value is consistent with the postselected value
- x_outcome = detector.outcome.numpy()[:2]
+ x_outcome = math.asnumpy(detector.outcome)[:2]
assert np.allclose(x_outcome, outcome)
@given(
@@ -196,7 +206,7 @@ def test_homodyne_on_2mode_squeezed_vacuum(self, s, outcome):
means = np.array(
[2 * np.sqrt(s * (1 + s)) * outcome / (np.exp(-2 * r) + 1 + 2 * s), 0.0]
)
- assert np.allclose(remaining_state.means.numpy(), means)
+ assert np.allclose(math.asnumpy(remaining_state.means), means)
@given(
s=st.floats(1.0, 10.0),
@@ -233,7 +243,7 @@ def test_homodyne_on_2mode_squeezed_vacuum_with_angle(self, s, outcome, angle):
],
]
)
- assert np.allclose(remaining_state.cov.numpy(), cov, atol=1e-5)
+ assert np.allclose(math.asnumpy(remaining_state.cov), cov, atol=1e-5)
# TODO: figure out why this is not working
# if outcome is not None:
# outcome = outcome * np.sqrt(hbar)
@@ -271,7 +281,7 @@ def test_homodyne_on_2mode_squeezed_vacuum_with_displacement(self, s, X, d):
]
)
- means = remaining_state.means.numpy()
+ means = math.asnumpy(remaining_state.means)
assert np.allclose(means, expected_means)
N_MEAS = 150 # number of homodyne measurements to perform
@@ -279,27 +289,30 @@ def test_homodyne_on_2mode_squeezed_vacuum_with_displacement(self, s, X, d):
std_10 = NUM_STDS / np.sqrt(N_MEAS)
@pytest.mark.parametrize(
- "state, mean_expected, var_expected",
+ "state, kwargs, mean_expected, var_expected",
[
- (Vacuum(1), 0.0, settings.HBAR / 2),
- (Coherent(2.0, 0.5), 2.0 * np.sqrt(2 * settings.HBAR), settings.HBAR / 2),
- (SqueezedVacuum(0.25, 0.0), 0.0, 0.25 * settings.HBAR / 2),
+ (Vacuum, {"num_modes": 1}, 0.0, settings.HBAR / 2),
+ (Coherent, {"x": 2.0, "y": 0.5}, 2.0 * np.sqrt(2 * settings.HBAR), settings.HBAR / 2),
+ (SqueezedVacuum, {"r": 0.25, "phi": 0.0}, 0.0, 0.25 * settings.HBAR / 2),
],
)
@pytest.mark.parametrize("gaussian_state", [True, False])
- def test_sampling_mean_and_var(self, state, mean_expected, var_expected, gaussian_state):
+ def test_sampling_mean_and_var(
+ self, state, kwargs, mean_expected, var_expected, gaussian_state
+ ):
"""Tests that the mean and variance estimates of many homodyne
measurements are in agreement with the expected values for the states"""
+ state = state(**kwargs)
tf.random.set_seed(123)
if not gaussian_state:
state = State(dm=state.dm(cutoffs=[40]))
detector = Homodyne(0.0)
- results = np.empty((self.N_MEAS, 2))
+ results = np.zeros((self.N_MEAS, 2))
for i in range(self.N_MEAS):
_ = state << detector
- results[i] = detector.outcome.numpy()
+ results[i] = math.asnumpy(detector.outcome)
mean = results.mean(axis=0)
assert np.allclose(mean[0], mean_expected, atol=self.std_10, rtol=0)
@@ -311,7 +324,7 @@ def test_homodyne_squeezing_setting(self):
covarince matrix: one that has tends to :math:`diag(1/\sigma[1,1],0)`."""
sigma = np.identity(2)
- sigma_m = SqueezedVacuum(r=settings.HOMODYNE_SQUEEZING, phi=0).cov.numpy()
+ sigma_m = math.asnumpy(SqueezedVacuum(r=settings.HOMODYNE_SQUEEZING, phi=0).cov)
inverse_covariance = np.linalg.inv(sigma + sigma_m)
assert np.allclose(inverse_covariance, np.diag([1 / sigma[1, 1], 0]))
@@ -362,7 +375,7 @@ def test_heterodyne_on_2mode_squeezed_vacuum_with_displacement(
# assert expected covariance
cov = hbar / 2 * np.array([[1, 0], [0, 1]])
- assert np.allclose(remaining_state.cov.numpy(), cov)
+ assert np.allclose(math.asnumpy(remaining_state.cov), cov)
# assert expected means vector, not tested when x or y is None
# because we cannot access the sampled outcome value
diff --git a/tests/test_lab/test_gates_fock.py b/tests/test_lab/test_gates_fock.py
index 0c867f5ee..4ae9cf486 100644
--- a/tests/test_lab/test_gates_fock.py
+++ b/tests/test_lab/test_gates_fock.py
@@ -12,18 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# pylint: disable=import-outside-toplevel
+
import numpy as np
import pytest
from hypothesis import given
from thewalrus.fock_gradients import (
- beamsplitter,
- displacement,
+ beamsplitter as tw_beamsplitter,
+ displacement as tw_displacement,
mzgate,
squeezing,
two_mode_squeezing,
)
-from mrmustard import settings
+from mrmustard import math, settings
from mrmustard.lab import (
Attenuator,
BSgate,
@@ -40,7 +42,6 @@
Thermal,
)
from mrmustard.lab.states import TMSV, Fock, SqueezedVacuum, State
-from mrmustard.math import Math
from mrmustard.math.lattice import strategies
from mrmustard.physics import fock
from tests.random import (
@@ -53,8 +54,7 @@
single_mode_unitary_gate,
two_mode_unitary_gate,
)
-
-math = Math()
+from ..conftest import skip_np
@given(state=n_mode_pure_state(num_modes=1), x=medium_float, y=medium_float)
@@ -165,6 +165,8 @@ def test_parallel_displacement(x1, x2, y1, y2):
def test_squeezer_grad_against_finite_differences():
"""tests fock squeezer gradient against finite differences"""
+ skip_np()
+
cutoffs = (5, 5)
r = math.new_variable(0.5, None, "r")
phi = math.new_variable(0.1, None, "phi")
@@ -183,7 +185,7 @@ def test_displacement_grad():
cutoffs = [5, 5]
x = math.new_variable(0.1, None, "x")
y = math.new_variable(0.1, None, "y")
- alpha = math.make_complex(x, y).numpy()
+ alpha = math.asnumpy(math.make_complex(x, y))
delta = 1e-6
dUdx = (fock.displacement(x + delta, y, cutoffs) - fock.displacement(x - delta, y, cutoffs)) / (
2 * delta
@@ -207,7 +209,7 @@ def test_fock_representation_displacement_rectangular():
Ud = dgate.U(cutoffs)
# compare with tw implementation
- expected_Ud = displacement(np.sqrt(x * x + y * y), np.arctan2(y, x), 10)[:5, :10]
+ expected_Ud = tw_displacement(np.sqrt(x * x + y * y), np.arctan2(y, x), 10)[:5, :10]
assert np.allclose(Ud, expected_Ud, atol=1e-5)
@@ -221,7 +223,7 @@ def test_fock_representation_displacement_rectangular2():
Ud = dgate.U(cutoffs)
# compare with tw implementation
- expected_Ud = displacement(np.sqrt(x * x + y * y), np.arctan2(y, x), 10)[:10, :5]
+ expected_Ud = tw_displacement(np.sqrt(x * x + y * y), np.arctan2(y, x), 10)[:10, :5]
assert np.allclose(Ud, expected_Ud, atol=1e-5)
@@ -245,7 +247,7 @@ def test_parallel_squeezing(r1, phi1, r2, phi2):
@given(theta=angle, phi=angle)
def test_fock_representation_beamsplitter(theta, phi):
BS = BSgate(theta=theta, phi=phi)
- expected = beamsplitter(theta=theta, phi=phi, cutoff=10)
+ expected = tw_beamsplitter(theta=theta, phi=phi, cutoff=10)
assert np.allclose(expected, BS.U(cutoffs=[10, 10, 10, 10]), atol=1e-5)
@@ -253,14 +255,14 @@ def test_fock_representation_beamsplitter(theta, phi):
def test_fock_representation_two_mode_squeezing(r, phi):
S2 = S2gate(r=r, phi=phi)
expected = two_mode_squeezing(r=r, theta=phi, cutoff=10)
- assert np.allclose(expected, S2.U(cutoffs=[10, 10, 10, 10]), atol=1e-5)
+ assert np.allclose(expected, S2.U(cutoffs=[10, 10]), atol=1e-5)
@given(phi_a=angle, phi_b=angle)
def test_fock_representation_mzgate(phi_a, phi_b):
MZ = MZgate(phi_a=phi_a, phi_b=phi_b, internal=False)
expected = mzgate(theta=phi_b, phi=phi_a, cutoff=10)
- assert np.allclose(expected, MZ.U(cutoffs=[10, 10, 10, 10]), atol=1e-5)
+ assert np.allclose(expected, MZ.U(cutoffs=[10, 10]), atol=1e-5)
@pytest.mark.parametrize(
@@ -305,18 +307,27 @@ def test_choi_cutoffs():
assert output.cutoffs == [5, 8] # cutoffs are respected by the gate
-@pytest.mark.parametrize("gate", [Sgate(1), Rgate(0.1), Dgate(0.1)])
+@pytest.mark.parametrize(
+ "gate, kwargs",
+ [
+ (Sgate, {"r": 1}),
+ (Rgate, {"angle": 0.1}),
+ (Dgate, {"x": 0.1}),
+ ],
+)
@pytest.mark.parametrize("cutoff", [2, 5])
@pytest.mark.parametrize("modes", [[0], [1, 2]])
-def test_choi_for_unitary(gate, cutoff, modes):
+def test_choi_for_unitary(gate, kwargs, cutoff, modes):
"""tests the `choi` method for unitary transformations"""
+ gate = gate(**kwargs)
+
gate = gate[modes]
N = gate.num_modes
cutoffs = [cutoff] * N
- choi = gate.choi(cutoffs=cutoffs).numpy().reshape(cutoff ** (2 * N), cutoff ** (2 * N))
+ choi = math.asnumpy(gate.choi(cutoffs=cutoffs)).reshape(cutoff ** (2 * N), cutoff ** (2 * N))
- t = gate.U(cutoffs=cutoffs).numpy()
+ t = math.asnumpy(gate.U(cutoffs=cutoffs))
row = t.flatten().reshape(1, cutoff ** (2 * N))
col = t.flatten().reshape(cutoff ** (2 * N), 1)
expected = np.dot(col, row)
diff --git a/tests/test_lab/test_state.py b/tests/test_lab/test_state.py
index 47aeb6356..d7cb690e8 100644
--- a/tests/test_lab/test_state.py
+++ b/tests/test_lab/test_state.py
@@ -1,6 +1,8 @@
import numpy as np
-from mrmustard.lab import Attenuator, Gaussian
+from mrmustard import math
+from mrmustard.lab import Attenuator, Coherent, Gaussian
+from mrmustard.lab.abstract.state import mikkel_plot
def test_addition():
@@ -24,11 +26,11 @@ def test_multiplication_ket():
def test_multiplication_dm():
"""Test that multiplication of Gaussians is correct"""
- G = Gaussian(1) >> Attenuator(0.9)
+ G = Gaussian(1, cutoffs=[10]) >> Attenuator(0.9)
scaled = 42.0 * G
- assert np.allclose(scaled.dm(), 42.0 * G.dm())
+ assert np.allclose(scaled.dm(G.cutoffs), 42.0 * G.dm())
def test_division_ket():
@@ -42,8 +44,17 @@ def test_division_ket():
def test_division_dm():
"""Test that division of Gaussians is correct"""
- G = Gaussian(1) >> Attenuator(0.9)
+ G = Gaussian(1, cutoffs=[10]) >> Attenuator(0.9)
scaled = G / 42.0
assert np.allclose(scaled.dm(G.cutoffs), G.dm() / 42.0)
+
+
+def test_mikkel_plot():
+ """Tests that mikkel plot returns figure and axes."""
+ dm = Coherent().dm(cutoffs=[10])
+ fig, axs = mikkel_plot(math.asnumpy(dm))
+
+ assert fig is not None
+ assert axs is not None
diff --git a/tests/test_lab/test_states.py b/tests/test_lab/test_states.py
index 45fb4a61b..a513f8ed1 100644
--- a/tests/test_lab/test_states.py
+++ b/tests/test_lab/test_states.py
@@ -18,7 +18,7 @@
from hypothesis import strategies as st
from hypothesis.extra.numpy import arrays
-from mrmustard import settings
+from mrmustard import math, settings
from mrmustard.lab.abstract import State
from mrmustard.lab.gates import Attenuator, Dgate, Ggate, Sgate
from mrmustard.lab.states import (
@@ -30,11 +30,9 @@
Thermal,
Vacuum,
)
-from mrmustard.math import Math
from mrmustard.physics import gaussian as gp
-from tests.random import angle, force_settings, medium_float, n_mode_pure_state, nmodes, r
+from tests.random import angle, medium_float, n_mode_pure_state, nmodes, r
-math = Math()
hbar0 = settings.HBAR
@@ -46,13 +44,13 @@ def xy_arrays(draw):
@given(nmodes, st.floats(0.1, 5.0))
def test_vacuum_state(nmodes, hbar):
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
cov, disp = gp.vacuum_cov(nmodes), gp.vacuum_means(nmodes)
assert np.allclose(cov, np.eye(2 * nmodes) * hbar / 2)
assert np.allclose(disp, np.zeros_like(disp))
# restoring hbar to its original value
- force_settings("_hbar", hbar0)
+ settings._force_hbar(hbar0)
@given(x=medium_float, y=medium_float)
@@ -64,23 +62,23 @@ def test_coherent_state_single(x, y):
@given(hbar=st.floats(0.5, 2.0), x=medium_float, y=medium_float)
def test_coherent_state_list(hbar, x, y):
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
assert np.allclose(gp.displacement([x], [y]), np.array([x, y]) * np.sqrt(2 * hbar))
# restoring hbar to its original value
- force_settings("_hbar", hbar0)
+ settings._force_hbar(hbar0)
@given(hbar=st.floats(0.5, 2.0), x=medium_float, y=medium_float)
def test_coherent_state_array(hbar, x, y):
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
assert np.allclose(
gp.displacement(np.array([x]), np.array([y])),
np.array([x, y]) * np.sqrt(2 * hbar),
)
# restoring hbar to its original value
- force_settings("_hbar", hbar0)
+ settings._force_hbar(hbar0)
@given(xy=xy_arrays())
@@ -92,7 +90,7 @@ def test_coherent_state_multiple(xy):
assert np.allclose(state.means, np.concatenate([x, y], axis=-1) * np.sqrt(2 * settings.HBAR))
# restoring hbar to its original value
- force_settings("_hbar", hbar0)
+ settings._force_hbar(hbar0)
@given(state=n_mode_pure_state(num_modes=1))
@@ -165,11 +163,11 @@ def test_hbar():
"""Test cov matrix is linear in hbar."""
g = Gaussian(2)
p = g.purity
- force_settings("_hbar", 1.234)
+ settings._force_hbar(1.234)
assert g.purity == p
# restoring hbar to its original value
- force_settings("_hbar", hbar0)
+ settings._force_hbar(hbar0)
def test_get_single_mode():
@@ -229,7 +227,7 @@ def test_random_state_is_entangled():
@given(modes=st.lists(st.integers(), min_size=2, max_size=5, unique=True))
def test_getitem_set_modes(modes):
- """Test that using `State.__getitem__` and `modes`
+ """Test that using `super().__getitem__` and `modes`
kwarg correctly set the modes of the state."""
cutoff = len(modes) + 1
@@ -279,7 +277,7 @@ def test_ket_from_pure_dm_new_cutoffs():
"tests that the shape of the internal fock representation reflects the new cutoffs"
state = Vacuum(1) >> Sgate(0.1) >> Dgate(0.1, 0.1) # weak gaussian state
state = State(dm=state.dm(cutoffs=[20])) # assign pure dm directly
- assert state.ket(cutoffs=[5]).shape.as_list() == [5] # shape should be [5]
+ assert list(state.ket(cutoffs=[5]).shape) == [5] # shape should be [5]
def test_ket_probability():
diff --git a/tests/test_math/__init__.py b/tests/test_math/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/test_math/test_backend_manager.py b/tests/test_math/test_backend_manager.py
new file mode 100644
index 000000000..64935f3c0
--- /dev/null
+++ b/tests/test_math/test_backend_manager.py
@@ -0,0 +1,589 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unit tests for the :class:`BackendManager`.
+"""
+import math
+
+import numpy as np
+import pytest
+import tensorflow as tf
+
+from mrmustard import math
+
+
+# pylint: disable=protected-access, too-many-public-methods
+class TestBackendManager:
+ r"""
+ Tests the BackendManager.
+ """
+
+ l1 = [1.0]
+ l2 = [1.0 + 0.0j, -2.0 + 2.0j]
+ l3 = [[1.0, 2.0], [-3.0, 4.0]]
+ l4 = [l2, l2]
+ l5 = [[[1.0, 2.0, 3.0 + 6], [3.0, 4.0, 5.0 - 10]], [[1.0, 2.0 + 1, 3.0], [3.0, 4.0, 5.0]]]
+ lists = [l1, l2, l3, l4, l5]
+
+ types = ["None", "int32", "float32", "float64", "complex128"]
+
+ def test_error(self):
+ r"""
+ Tests the error on `_apply`.
+ """
+ msg = f"Function ``ciao`` not implemented for backend ``{math.backend_name}``."
+ with pytest.raises(NotImplementedError, match=msg):
+ math._apply("ciao")
+
+ def test_types(self):
+ r"""
+ Tests the types.
+ """
+ assert math.int32 is math.backend.int32
+ assert math.float32 is math.backend.float32
+ assert math.float64 is math.backend.float64
+ assert math.complex128 is math.backend.complex128
+
+ @pytest.mark.parametrize("l", lists)
+ def test_abs(self, l):
+ r"""
+ Tests the ``abs`` method.
+ """
+ arr = np.array(l)
+ res = math.asnumpy(math.abs(np.array(l)))
+ assert np.allclose(res, np.abs(arr))
+
+ @pytest.mark.parametrize("l", lists)
+ def test_any(self, l):
+ r"""
+ Tests the ``any`` method.
+ """
+ arr = np.array(l)
+ assert np.allclose(math.asnumpy(math.any(arr)), np.any(arr))
+
+ @pytest.mark.parametrize("t", ["float32", "float64"])
+ def test_arange(self, t):
+ r"""
+ Tests the ``arange`` method.
+ """
+ dtype = getattr(math, t)
+ params = (3, 20, 0.5, dtype)
+
+ np_dtype = getattr(np, t)
+ np_params = (3, 20, 0.5, np_dtype)
+
+ res = math.asnumpy(math.arange(*params))
+ assert np.allclose(res, np.arange(*np_params))
+
+ @pytest.mark.parametrize("l", lists)
+ def test_asnumpy(self, l):
+ r"""
+ Tests the ``asnumpy`` method.
+ """
+ arr = math.astensor(np.array(l), dtype=np.array(l).dtype)
+ res = math.asnumpy(arr)
+ assert np.allclose(res, np.array(l))
+
+ def test_assign(self):
+ r"""
+ Tests the ``assign`` method.
+ """
+ arr = math.new_variable(np.eye(3), (None, None), "")
+ value = math.astensor(2 * np.eye(3))
+ arr = math.asnumpy(math.assign(arr, value))
+ assert np.allclose(arr, value)
+
+ @pytest.mark.parametrize("t", types)
+ @pytest.mark.parametrize("l", [l1, l3])
+ def test_astensor(self, t, l):
+ r"""
+ Tests the ``astensor`` method.
+ """
+ arr = np.array(l)
+ dtype = getattr(math, t, None)
+ res = math.astensor(arr, dtype)
+
+ if math.backend_name == "numpy":
+ assert np.allclose(res, arr.astype(dtype or np.float64))
+ else:
+ exp = tf.convert_to_tensor(arr, dtype=dtype or tf.float64)
+ exp = exp.numpy()
+ assert np.allclose(res, exp)
+
+ @pytest.mark.parametrize("t", types)
+ @pytest.mark.parametrize("l", [l1, l3])
+ def test_atleast_1d(self, t, l):
+ r"""
+ Tests the ``atleast_1d`` method.
+ """
+ dtype = getattr(math, t, None)
+ arr = np.array(l)
+
+ res = math.asnumpy(math.atleast_1d(arr, dtype=dtype))
+
+ exp = np.atleast_1d(arr)
+ if dtype:
+ np_dtype = getattr(np, t, None)
+ exp = exp.astype(np_dtype)
+
+ assert np.allclose(res, exp)
+
+ @pytest.mark.parametrize("t", types)
+ @pytest.mark.parametrize("l", [l1, l3])
+ def test_atleast_2d(self, t, l):
+ r"""
+ Tests the ``atleast_2d`` method.
+ """
+ dtype = getattr(math, t, None)
+ arr = np.array(l)
+
+ res = math.asnumpy(math.atleast_2d(arr, dtype=dtype))
+
+ exp = np.atleast_2d(arr)
+ if dtype:
+ np_dtype = getattr(np, t, None)
+ exp = exp.astype(np_dtype)
+
+ assert np.allclose(res, exp)
+
+ @pytest.mark.parametrize("t", types)
+ @pytest.mark.parametrize("l", [l1, l3, l5])
+ def test_atleast_3d(self, t, l):
+ r"""
+ Tests the ``atleast_3d`` method.
+ """
+ dtype = getattr(math, t, None)
+ arr = np.array(l)
+
+ res = math.asnumpy(math.atleast_3d(arr, dtype=dtype))
+
+ if arr.ndim == 1:
+ exp_shape = (1, 1) + arr.shape
+ elif arr.ndim == 2:
+ exp_shape = (1,) + arr.shape
+ else:
+ exp_shape = arr.shape
+ assert res.shape == exp_shape
+
+ def test_boolean_mask(self):
+ r"""
+ Tests the ``boolean_mask`` method.
+ """
+ arr = np.array([1, 2, 3, 4])
+ mask = [True, False, True, True]
+ res = math.asnumpy(math.boolean_mask(arr, mask))
+ exp = np.array([1, 3, 4])
+ assert np.allclose(res, exp)
+
+ def test_block(self):
+ r"""
+ Tests the ``block`` method.
+ """
+ I = math.ones(shape=(4, 4), dtype=math.complex128)
+ O = math.zeros(shape=(4, 4), dtype=math.complex128)
+ R = math.block(
+ [[I, 1j * I, O, O], [O, O, I, -1j * I], [I, -1j * I, O, O], [O, O, I, 1j * I]]
+ )
+ assert R.shape == (16, 16)
+
+ def test_block_diag(self):
+ r"""
+ Tests the ``block_diag`` method.
+ """
+ I = math.ones(shape=(4, 4), dtype=math.complex128)
+ O = math.zeros(shape=(4, 4), dtype=math.complex128)
+ R = math.block_diag(I, 1j * I)
+ assert R.shape == (8, 8)
+ assert np.allclose(math.block([[I, O], [O, 1j * I]]), R)
+
+ @pytest.mark.parametrize("t", types)
+ def test_cast(self, t):
+ r"""
+ Tests the ``cast`` method.
+ """
+ dtype = getattr(math, t, None)
+ np_dtype = getattr(np, t, None)
+
+ arr = np.array([[1, 2], [3, 4]])
+ res = math.asnumpy(math.cast(arr, dtype))
+ exp = arr.astype(np_dtype or np.float64)
+ assert np.allclose(res, exp)
+
+ @pytest.mark.parametrize("l", [l1, l3])
+ def test_clip(self, l):
+ r"""
+ Tests the ``clip`` method.
+ """
+ arr = np.array(l)
+ params = (arr, 0, 3)
+ res = math.asnumpy(math.clip(*params))
+ assert np.allclose(res, np.clip(*params))
+
+ @pytest.mark.parametrize("axis", [0, 1])
+ def test_concat(self, axis):
+ r"""
+ Tests the ``concat`` method.
+ """
+ arr1 = np.array([[1, 2], [3, 4]])
+ arr2 = np.array([[5, 6], [7, 8]])
+ params = ((arr1, arr2), axis)
+ res = math.asnumpy(math.concat(*params))
+ return np.allclose(res, np.concatenate(*params))
+
+ @pytest.mark.parametrize("l", lists)
+ def test_conj(self, l):
+ r"""
+ Tests the ``conj`` method.
+ """
+ arr = np.array(l)
+ res = math.asnumpy(math.conj(arr))
+ assert np.allclose(res, np.conj(arr))
+
+ @pytest.mark.parametrize("l", lists)
+ def test_cos(self, l):
+ r"""
+ Tests the ``cos`` method.
+ """
+ arr = np.array(l)
+ assert np.allclose(math.asnumpy(math.cos(arr)), np.cos(arr))
+
+ @pytest.mark.parametrize("l", lists)
+ def test_cosh(self, l):
+ r"""
+ Tests the ``cosh`` method.
+ """
+ arr = np.array(l)
+ assert np.allclose(math.asnumpy(math.cosh(arr)), np.cosh(arr))
+
+ def test_det(self):
+ r"""
+ Tests the ``det`` method.
+ """
+ arr = np.array([[1.0, 2.0], [3.0, 4.0]])
+ assert np.allclose(math.det(arr), -2.0)
+
+ def test_diag(self):
+ r"""
+ Tests the ``diag`` method.
+ """
+ d1 = math.ones(shape=(3,), dtype=math.float64)
+ d2 = 2 * math.ones(shape=(2,), dtype=math.float64)
+ d3 = 3 * math.ones(shape=(1,), dtype=math.float64)
+
+ res = math.diag(d1, 0) + math.diag(d2, 1) + math.diag(d3, 2)
+ res = math.asnumpy(res)
+ exp = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 2.0], [0.0, 0.0, 1.0]])
+
+ assert np.allclose(res, exp)
+
+ def test_diag_part(self):
+ r"""
+ Tests the ``diag_part`` method.
+ """
+ arr = np.arange(9).reshape((3, 1, 3))
+
+ dp1 = math.asnumpy(math.diag_part(arr, 0))
+ exp1 = np.array([[0], [3], [6]])
+ assert np.allclose(dp1, exp1)
+
+ dp2 = math.asnumpy(math.diag_part(arr, 1))
+ exp2 = np.array([[1], [4], [7]])
+ assert np.allclose(dp2, exp2)
+
+ dp3 = math.asnumpy(math.diag_part(arr, 2))
+ exp3 = np.array([[2], [5], [8]])
+ assert np.allclose(dp3, exp3)
+
+ def test_eigvals(self):
+ r"""
+ Tests the ``eigvals`` method.
+ """
+ arr = np.arange(9, dtype=np.float64).reshape((3, 3))
+ ev = math.asnumpy(math.eigvals(arr))
+ exp = np.array([1.33484692e01, -1.34846923e00, 0.0])
+
+ ev.sort()
+ exp.sort()
+ assert np.allclose(ev, exp)
+
+ def test_eigh(self):
+ r"""
+ Tests the ``eigh`` method.
+ """
+ arr = np.eye(3)
+ arr[1, 1] = 2
+ arr[2, 2] = 3
+ vals, vecs = math.eigh(arr)
+
+ assert np.allclose(math.asnumpy(vals), np.array([1.0, 2.0, 3.0]))
+ assert np.allclose(math.asnumpy(vecs), np.eye(3))
+
+ def test_exp(self):
+ r"""
+ Tests the ``exp`` method.
+ """
+ arr = np.eye(3)
+ arr[1, 1] = 2
+ arr[2, 2] = 3
+ res = math.asnumpy(math.exp(arr))
+ exp = np.array(
+ [[np.exp(0) if i != j else np.exp(i + 1) for i in range(3)] for j in range(3)]
+ )
+ assert np.allclose(res, exp)
+
+ def test_eye(self):
+ r"""
+ Tests the ``eye`` method.
+ """
+ res = math.asnumpy(math.eye(3))
+ exp = np.eye(3)
+ assert np.allclose(res, exp)
+
+ def test_eye_like(self):
+ r"""
+ Tests the ``eye_like`` method.
+ """
+ res = math.asnumpy(math.eye_like(math.zeros((3, 3))))
+ exp = np.eye(3)
+ assert np.allclose(res, exp)
+
+ def test_from_backend(self):
+ r"""
+ Tests the ``expm`` method.
+ """
+ v1 = [1, 2]
+ assert not math.from_backend(v1)
+
+ v2 = np.array(v1)
+ v3 = tf.constant(v1)
+ if math.backend_name == "numpy":
+ assert math.from_backend(v2) and not math.from_backend(v3)
+ else:
+ assert math.from_backend(v3) and not math.from_backend(v2)
+
+ def test_gather(self):
+ r"""
+ Tests the ``gather`` method.
+ """
+ arr = np.arange(9).reshape((3, 3))
+
+ res1 = math.asnumpy(math.gather(arr, 2, 1))
+ exp1 = np.array([2, 5, 8])
+ assert np.allclose(res1, exp1)
+
+ res2 = math.asnumpy(math.gather(arr, 2, 0))
+ exp2 = np.array([6, 7, 8])
+ assert np.allclose(res2, exp2)
+
+ def test_imag(self):
+ r"""
+ Tests the ``imag`` method.
+ """
+ arr = np.eye(3) + 2j * np.eye(3)
+ assert np.allclose(math.asnumpy(math.imag(arr)), 2 * np.eye(3))
+
+ assert np.allclose(math.asnumpy(math.imag(np.eye(3))), 0 * np.eye(3))
+
+ def test_inv(self):
+ r"""
+ Tests the ``inv`` method.
+ """
+ arr = np.array([[1.0, 0], [0, 1j]])
+ inv = math.inv(arr)
+ assert np.allclose(math.asnumpy(arr @ inv), np.eye(2))
+
+ def test_is_trainable(self):
+ r"""
+ Tests the ``is_trainable`` method.
+ """
+ arr1 = np.array([1, 2])
+ arr2 = tf.constant(arr1)
+ arr3 = tf.Variable(arr1)
+
+ assert not math.is_trainable(arr1)
+ assert not math.is_trainable(arr2)
+ assert math.is_trainable(arr3) is (math.backend_name == "tensorflow")
+
+ def test_lgamma(self):
+ r"""
+ Tests the ``lgamma`` method.
+ """
+ arr = np.array([1.0, 2.0, 3.0, 4.0])
+ assert np.allclose(math.asnumpy(math.lgamma(arr)), math.lgamma(arr))
+
+ def test_log(self):
+ r"""
+ Tests the ``log`` method.
+ """
+ arr = np.array([1.0, 2.0, 3.0, 4.0])
+ assert np.allclose(math.asnumpy(math.log(arr)), np.log(arr))
+
+ def test_make_complex(self):
+ r"""
+ Tests the ``make_complex`` method.
+ """
+ r = 1.0
+ i = 2.0
+ assert math.asnumpy(math.make_complex(r, i)) == r + i * 1j
+
+ def test_maximum(self):
+ r"""
+ Tests the ``maximum`` method.
+ """
+ arr1 = np.eye(3)
+ arr2 = 2 * np.eye(3)
+ res = math.asnumpy(math.maximum(arr1, arr2))
+ assert np.allclose(res, arr2)
+
+ def test_minimum(self):
+ r"""
+ Tests the ``minimum`` method.
+ """
+ arr1 = np.eye(3)
+ arr2 = 2 * np.eye(3)
+ res = math.asnumpy(math.minimum(arr1, arr2))
+ assert np.allclose(res, arr1)
+
+ @pytest.mark.parametrize("t", types)
+ def test_new_variable(self, t):
+ r"""
+ Tests the ``new_variable`` method.
+ """
+ dtype = getattr(math, t, None)
+ arr = np.eye(3)
+ res = math.new_variable(arr, (0, 1), "my_var", dtype)
+
+ if math.backend_name == "numpy":
+ assert np.allclose(res, arr)
+ assert not hasattr(res, "name")
+ assert res.dtype == dtype
+ else:
+ assert isinstance(res, tf.Variable)
+ assert np.allclose(math.asnumpy(res), arr)
+ assert res.dtype == dtype or math.float64
+
+ @pytest.mark.parametrize("t", types)
+ def test_new_constant(self, t):
+ r"""
+ Tests the ``new_constant`` method.
+ """
+ dtype = getattr(math, t, None)
+ arr = np.eye(3)
+ res = math.new_constant(arr, "my_const", dtype)
+
+ if math.backend_name == "numpy":
+ assert np.allclose(res, arr)
+ assert not hasattr(res, "name")
+ assert res.dtype == dtype
+ else:
+ assert np.allclose(math.asnumpy(res), arr)
+
+ def test_ones(self):
+ r"""
+ Tests the ``ones`` method.
+ """
+ arr = np.ones(3)
+ res = math.asnumpy(math.ones(3))
+ assert np.allclose(res, arr)
+
+ def test_ones_like(self):
+ r"""
+ Tests the ``ones_like`` method.
+ """
+ arr = np.ones(3)
+ res = math.asnumpy(math.ones_like(arr))
+ assert np.allclose(res, arr)
+
+ def test_pow(self):
+ r"""
+ Tests the ``pow`` method.
+ """
+ arr = np.array([1.0, 2.0, 3.0, 4.0])
+ assert np.allclose(math.asnumpy(math.pow(arr, 2)), math.pow(arr, 2))
+
+ def test_real(self):
+ r"""
+ Tests the ``real`` method.
+ """
+ arr = np.eye(3) + 2j * np.eye(3)
+ assert np.allclose(math.asnumpy(math.real(arr)), np.eye(3))
+
+ assert np.allclose(math.asnumpy(math.real(np.eye(3))), np.eye(3))
+
+ def test_reshape(self):
+ r"""
+ Tests the ``reshape`` method.
+ """
+ arr = np.eye(3)
+ shape = (1, 9)
+ arr = math.reshape(arr, shape)
+ assert arr.shape == shape
+
+ def test_set_diag(self):
+ r"""
+ Tests the ``set_diag`` method.
+ """
+ arr = np.zeros(shape=(3, 3))
+ diag = np.ones(shape=(3,))
+ assert np.allclose(math.asnumpy(math.set_diag(arr, diag, 0)), np.eye(3))
+
+ @pytest.mark.parametrize("l", lists)
+ def test_sin(self, l):
+ r"""
+ Tests the ``sin`` method.
+ """
+ arr = np.array(l)
+ assert np.allclose(math.asnumpy(math.sin(arr)), np.sin(arr))
+
+ @pytest.mark.parametrize("l", lists)
+ def test_sinh(self, l):
+ r"""
+ Tests the ``sinh`` method.
+ """
+ arr = np.array(l)
+ assert np.allclose(math.asnumpy(math.sinh(arr)), np.sinh(arr))
+
+ def test_solve(self):
+ r"""
+ Tests the ``solve`` method.
+ """
+ arr = np.eye(3)
+ vec = np.array([1.0, 2.0, 3.0])
+ res = math.asnumpy(math.solve(arr, vec))
+ assert np.allclose(arr @ res, vec)
+
+ def test_sqrt(self):
+ r"""
+ Tests the ``sqrt`` method.
+ """
+ arr = 4 * np.eye(3)
+ res = math.asnumpy(math.sqrt(arr))
+ assert np.allclose(res, 2 * np.eye(3))
+
+ def test_sqrtm(self):
+ r"""
+ Tests the ``sqrtm`` method.
+ """
+ arr = 4 * np.eye(3)
+ res = math.asnumpy(math.sqrtm(arr))
+ assert np.allclose(res, 2 * np.eye(3))
+
+ def test_sum(self):
+ r"""
+ Tests the ``sum`` method.
+ """
+ arr = 4 * np.eye(3)
+ res = math.asnumpy(math.sum(arr))
+ assert np.allclose(res, 12)
diff --git a/tests/test_math/test_compactFock.py b/tests/test_math/test_compactFock.py
index 26a11ce0c..48761e5ab 100644
--- a/tests/test_math/test_compactFock.py
+++ b/tests/test_math/test_compactFock.py
@@ -1,34 +1,34 @@
"""
-Unit tests for mrmustard.math.numba.compactFock~
+Unit tests for mrmustard.math.compactFock.compactFock~
"""
+
+import importlib
+
import numpy as np
+import pytest
from hypothesis import given
from hypothesis import strategies as st
+from mrmustard import math, settings
from mrmustard.lab import Ggate, SqueezedVacuum, State, Vacuum
-from mrmustard.math import Math
from mrmustard.physics import fidelity, normalize
from mrmustard.physics.bargmann import wigner_to_bargmann_rho
from mrmustard.training import Optimizer
from tests.random import n_mode_mixed_state
-math = Math() # use methods in math if you want them to be differentiable
+from ..conftest import skip_np
+original_precision = settings.PRECISION_BITS_HERMITE_POLY
-def allowed_cutoffs(max_cutoffs):
- r"""Generate all cutoffs from (1,)*M to max_cutoffs"""
- res = []
- for idx in np.ndindex(max_cutoffs):
- cutoffs = np.array(idx) + 1
- res.append(tuple(cutoffs))
- return res
+do_julia = True if importlib.util.find_spec("julia") else False
+precisions = [128, 256, 384, 512] if do_julia else [128]
@st.composite
def random_ABC(draw, M):
r"""
- generate random Bargmann parameters A,B,C
- for a multimode Gaussian state with displacement
+ Generates random Bargmann parameters A,B,C
+ for an ``M``-mode mixed state.
"""
state = draw(n_mode_mixed_state(M))
A, B, G0 = wigner_to_bargmann_rho(state.cov, state.means)
@@ -36,66 +36,90 @@ def random_ABC(draw, M):
@given(random_ABC(M=3))
-def test_compactFock_diagonal(A_B_G0):
- """Test getting Fock amplitudes if all modes are detected (math.hermite_renormalized_diagonal)"""
- for cutoffs in allowed_cutoffs((7, 7, 7)):
- A, B, G0 = A_B_G0 # Create random state (M mode Gaussian state with displacement)
-
- # Vanilla MM
- G_ref = math.hermite_renormalized(
- math.conj(-A), math.conj(B), math.conj(G0), shape=list(cutoffs) * 2
- ).numpy() # note: shape=[C1,C2,C3,...,C1,C2,C3,...]
-
- # Extract diagonal amplitudes from vanilla MM
- ref_diag = np.zeros(cutoffs, dtype=np.complex128)
- for inds in np.ndindex(*cutoffs):
- inds_expanded = list(inds) + list(inds) # a,b,c,a,b,c
- ref_diag[inds] = G_ref[tuple(inds_expanded)]
-
- # New MM
- G_diag = math.hermite_renormalized_diagonal(
- math.conj(-A), math.conj(B), math.conj(G0), cutoffs
- )
- assert np.allclose(ref_diag, G_diag)
+@pytest.mark.parametrize("precision", precisions)
+def test_compactFock_diagonal(precision, A_B_G0):
+ r"""Test getting Fock amplitudes if all modes are
+ detected (math.hermite_renormalized_diagonal)
+ """
+ settings.PRECISION_BITS_HERMITE_POLY = precision
+ cutoffs = (5, 5, 5)
+
+ A, B, G0 = A_B_G0 # Create random state (M mode Gaussian state with displacement)
+
+ # Vanilla MM
+ G_ref = math.hermite_renormalized(
+ math.conj(-A), math.conj(B), math.conj(G0), shape=list(cutoffs) * 2
+ ) # note: shape=[C1,C2,C3,...,C1,C2,C3,...]
+ G_ref = math.asnumpy(G_ref)
+
+ # Extract diagonal amplitudes from vanilla MM
+ ref_diag = np.zeros(cutoffs, dtype=np.complex128)
+ for inds in np.ndindex(*cutoffs):
+ inds_expanded = list(inds) + list(inds) # a,b,c,a,b,c
+ ref_diag[inds] = G_ref[tuple(inds_expanded)]
+
+ # New MM
+ G_diag = math.hermite_renormalized_diagonal(math.conj(-A), math.conj(B), math.conj(G0), cutoffs)
+ assert np.allclose(ref_diag, G_diag)
+
+ settings.PRECISION_BITS_HERMITE_POLY = original_precision
@given(random_ABC(M=3))
-def test_compactFock_1leftover(A_B_G0):
- """Test getting Fock amplitudes if all but the first mode are detected (math.hermite_renormalized_1leftoverMode)"""
- for cutoffs in allowed_cutoffs((7, 7, 7)):
- A, B, G0 = A_B_G0 # Create random state (M mode Gaussian state with displacement)
-
- # New algorithm
- G_leftover = math.hermite_renormalized_1leftoverMode(
- math.conj(-A), math.conj(B), math.conj(G0), cutoffs
- )
+@pytest.mark.parametrize("precision", precisions)
+def test_compactFock_1leftover(precision, A_B_G0):
+ r"""
+ Test getting Fock amplitudes if all but the first mode
+ are detected (math.hermite_renormalized_1leftoverMode).
+ """
+ skip_np()
- # Vanilla MM
- G_ref = math.hermite_renormalized(
- math.conj(-A), math.conj(B), math.conj(G0), shape=list(cutoffs) * 2
- ).numpy() # note: shape=[C1,C2,C3,...,C1,C2,C3,...]
+ settings.PRECISION_BITS_HERMITE_POLY = precision
+ cutoffs = (5, 5, 5)
- # Extract amplitudes of leftover mode from vanilla MM
- ref_leftover = np.zeros([cutoffs[0]] * 2 + list(cutoffs)[1:], dtype=np.complex128)
- for inds in np.ndindex(*cutoffs[1:]):
- ref_leftover[tuple([slice(cutoffs[0]), slice(cutoffs[0])] + list(inds))] = G_ref[
- tuple([slice(cutoffs[0])] + list(inds) + [slice(cutoffs[0])] + list(inds))
- ]
- assert np.allclose(ref_leftover, G_leftover)
+ A, B, G0 = A_B_G0 # Create random state (M mode Gaussian state with displacement)
+ # New algorithm
+ G_leftover = math.hermite_renormalized_1leftoverMode(
+ math.conj(-A), math.conj(B), math.conj(G0), cutoffs
+ )
-def test_compactFock_diagonal_gradients():
- """Test getting Fock amplitudes AND GRADIENTS if all modes are detected (math.hermite_renormalized_diagonal)"""
- G = Ggate(num_modes=3, symplectic_trainable=True)
+ # Vanilla MM
+ G_ref = math.hermite_renormalized(
+ math.conj(-A), math.conj(B), math.conj(G0), shape=list(cutoffs) * 2
+ ) # note: shape=[C1,C2,C3,...,C1,C2,C3,...]
+ G_ref = math.asnumpy(G_ref)
+
+ # Extract amplitudes of leftover mode from vanilla MM
+ ref_leftover = np.zeros([cutoffs[0]] * 2 + list(cutoffs)[1:], dtype=np.complex128)
+ for inds in np.ndindex(*cutoffs[1:]):
+ ref_leftover[tuple([slice(cutoffs[0]), slice(cutoffs[0])] + list(inds))] = G_ref[
+ tuple([slice(cutoffs[0])] + list(inds) + [slice(cutoffs[0])] + list(inds))
+ ]
+ assert np.allclose(ref_leftover, G_leftover)
+
+ settings.PRECISION_BITS_HERMITE_POLY = original_precision
+
+
+@pytest.mark.parametrize("precision", precisions)
+def test_compactFock_diagonal_gradients(precision):
+ r"""
+ Test getting Fock amplitudes and gradients if all modes
+ are detected (math.hermite_renormalized_diagonal).
+ """
+ skip_np()
+
+ settings.PRECISION_BITS_HERMITE_POLY = precision
+ G = Ggate(num_modes=2, symplectic_trainable=True)
def cost_fn():
- n1, n2, n3 = 2, 2, 4 # number of detected photons
- state_opt = Vacuum(3) >> G
+ n1, n2 = 2, 4 # number of detected photons
+ state_opt = Vacuum(2) >> G
A, B, G0 = wigner_to_bargmann_rho(state_opt.cov, state_opt.means)
probs = math.hermite_renormalized_diagonal(
- math.conj(-A), math.conj(B), math.conj(G0), cutoffs=[n1 + 1, n2 + 1, n3 + 1]
+ math.conj(-A), math.conj(B), math.conj(G0), cutoffs=[n1 + 1, n2 + 1]
)
- p = probs[n1, n2, n3]
+ p = probs[n1, n2]
return -math.real(p)
opt = Optimizer(symplectic_lr=0.5)
@@ -103,22 +127,33 @@ def cost_fn():
for i in range(2, min(20, len(opt.opt_history))):
assert opt.opt_history[i - 1] >= opt.opt_history[i]
+ settings.PRECISION_BITS_HERMITE_POLY = original_precision
+
+
+@pytest.mark.parametrize("precision", precisions)
+def test_compactFock_1leftover_gradients(precision):
+ r"""
+ Test getting Fock amplitudes and if all but the first
+ mode are detected (math.hermite_renormalized_1leftoverMode).
+ """
+ skip_np()
-def test_compactFock_1leftover_gradients():
- """Test getting Fock amplitudes AND GRADIENTS if all but the first mode are detected (math.hermite_renormalized_1leftoverMode)"""
- G = Ggate(num_modes=3, symplectic_trainable=True)
+ settings.PRECISION_BITS_HERMITE_POLY = precision
+ G = Ggate(num_modes=2, symplectic_trainable=True)
def cost_fn():
- n2, n3 = 1, 3 # number of detected photons
- state_opt = Vacuum(3) >> G
+ n2 = 3 # number of detected photons
+ state_opt = Vacuum(2) >> G
A, B, G0 = wigner_to_bargmann_rho(state_opt.cov, state_opt.means)
marginal = math.hermite_renormalized_1leftoverMode(
- math.conj(-A), math.conj(B), math.conj(G0), cutoffs=[8, n2 + 1, n3 + 1]
+ math.conj(-A), math.conj(B), math.conj(G0), cutoffs=[8, n2 + 1]
)
- conditional_state = normalize(State(dm=marginal[..., n2, n3]))
+ conditional_state = normalize(State(dm=marginal[..., n2]))
return -fidelity(conditional_state, SqueezedVacuum(r=1))
opt = Optimizer(symplectic_lr=0.1)
opt.minimize(cost_fn, by_optimizing=[G], max_steps=50)
for i in range(2, min(20, len(opt.opt_history))):
assert opt.opt_history[i - 1] >= opt.opt_history[i]
+
+ settings.PRECISION_BITS_HERMITE_POLY = original_precision
diff --git a/tests/test_math/test_flat_indices.py b/tests/test_math/test_flat_indices.py
new file mode 100644
index 000000000..1d38ed810
--- /dev/null
+++ b/tests/test_math/test_flat_indices.py
@@ -0,0 +1,69 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for flat indices"""
+
+import numpy as np
+import pytest
+
+from mrmustard.math.lattice.strategies.flat_indices import (
+ first_available_pivot,
+ lower_neighbors,
+ shape_to_strides,
+)
+
+
+def test_shape_to_strides():
+ r"""
+ Tests the ``shape_to_strides`` method.
+ """
+ shape1 = np.array([2])
+ strides1 = np.array([1])
+ assert np.allclose(shape_to_strides(shape1), strides1)
+
+ shape2 = np.array([1, 2])
+ strides2 = np.array([2, 1])
+ assert np.allclose(shape_to_strides(shape2), strides2)
+
+ shape3 = np.array([4, 5, 6])
+ strides3 = np.array([30, 6, 1])
+ assert np.allclose(shape_to_strides(shape3), strides3)
+
+
+def test_first_available_pivot():
+ r"""
+ Tests the ``first_available_pivot`` method.
+ """
+ strides1 = shape_to_strides(np.array([2, 2, 2]))
+
+ with pytest.raises(ValueError, match="zero"):
+ first_available_pivot(0, strides1)
+ assert first_available_pivot(1, strides1) == (2, 0)
+ assert first_available_pivot(2, strides1) == (1, 0)
+ assert first_available_pivot(3, strides1) == (1, 1)
+ assert first_available_pivot(4, strides1) == (0, 0)
+ assert first_available_pivot(5, strides1) == (0, 1)
+ assert first_available_pivot(6, strides1) == (0, 2)
+ assert first_available_pivot(7, strides1) == (0, 3)
+
+
+def test_lower_neighbors():
+ r"""
+ Tests the ``lower_neighbors`` method.
+ """
+ strides = shape_to_strides(np.array([2, 2, 2]))
+
+ assert list(lower_neighbors(1, strides, 0)) == [(0, -3), (1, -1), (2, 0)]
+ assert list(lower_neighbors(1, strides, 1)) == [(1, -1), (2, 0)]
+ assert list(lower_neighbors(1, strides, 2)) == [(2, 0)]
diff --git a/tests/test_math/test_interface.py b/tests/test_math/test_interface.py
deleted file mode 100644
index c3fbd63e4..000000000
--- a/tests/test_math/test_interface.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2022 Xanadu Quantum Technologies Inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Unit tests for the :class:`Math`.
-"""
-
-import numpy as np
-import pytest
-
-from mrmustard import settings
-from mrmustard.math import Math
-
-try:
- import torch
-
- torch_available = True
-except ImportError:
- torch_available = False
-
-
-def test_backend_redirection_tf():
- """Test Math class is redirecting calls to the backend set on MM settings"""
- math = Math()
-
- settings.BACKEND = "tensorflow"
- assert math._MathInterface__instance.__module__ == "mrmustard.math.tensorflow"
-
-
-@pytest.mark.skipif(not torch_available, reason="Test only works if Torch is installed")
-def test_backend_redirection_torch():
- """Test Math class is redirecting calls to the backend set on MM settings"""
- math = Math()
-
- settings.BACKEND = "torch"
- assert math._MathInterface__instance.__module__ == "mrmustard.math.torch"
-
-
-def test_error_for_wrong_backend():
- """Test error is raise when using a backend that is not allowed"""
- backend = settings.BACKEND
- with pytest.raises(ValueError) as exception_info:
- settings.BACKEND = "unexisting_backend"
- assert exception_info.value.args[0] == "Backend must be either 'tensorflow' or 'torch'"
-
- # set back to initial value to avoid side effects
- settings.BACKEND = backend
-
-
-def test_hash_tensor():
- """Test hash of a tensor"""
- math = Math()
- tensor = math.astensor([1, 2, 3])
- assert np.allclose(*[math.hash_tensor(tensor) for _ in range(3)])
diff --git a/tests/test_math/test_lattice.py b/tests/test_math/test_lattice.py
index f7cb43a1f..76506660c 100644
--- a/tests/test_math/test_lattice.py
+++ b/tests/test_math/test_lattice.py
@@ -14,16 +14,97 @@
"""Tests for the lattice module"""
+import importlib
+import pytest
import numpy as np
-from mrmustard.lab import Gaussian
+from mrmustard.lab import Gaussian, Dgate
+from mrmustard import settings, math
+from mrmustard.physics.bargmann import wigner_to_bargmann_rho
+from mrmustard.math.lattice.strategies.binomial import binomial, binomial_dict
+original_precision = settings.PRECISION_BITS_HERMITE_POLY
-def test_vanilla_vs_binomial():
- """Test that the vanilla and binomial methods give the same result"""
+do_julia = True if importlib.util.find_spec("julia") else False
+precisions = (
+ [128, 256, 384, 512]
+ if do_julia
+ else [
+ 128,
+ ]
+)
+
+
+@pytest.mark.parametrize("precision", precisions)
+def test_vanillaNumba_vs_binomial(precision):
+ """Test that the vanilla method and the binomial method give the same result.
+ Test is repeated for all possible values of PRECISION_BITS_HERMITE_POLY."""
+
+ settings.PRECISION_BITS_HERMITE_POLY = precision
G = Gaussian(2)
ket_vanilla = G.ket(cutoffs=[10, 10])[:5, :5]
ket_binomial = G.ket(max_photons=10)[:5, :5]
assert np.allclose(ket_vanilla, ket_binomial)
+
+ settings.PRECISION_BITS_HERMITE_POLY = original_precision
+
+
+def test_binomial_vs_binomialDict():
+ """Test that binomial and binomial_dict give the same result."""
+
+ A, b, c = Gaussian(2).bargmann(numpy=True)
+ max_prob = 0.9
+ local_cutoffs = (10, 10)
+ global_cutoff = 15
+
+ G, norm = binomial(local_cutoffs, A, b, c.item(), max_prob, global_cutoff)
+ D = binomial_dict(local_cutoffs, A, b, c.item(), max_prob, global_cutoff)
+
+ for idx in D.keys():
+ assert np.isclose(D[idx], G[idx])
+
+
+@pytest.mark.parametrize("batch_size", [1, 3])
+def test_vanillabatchNumba_vs_vanillaNumba(batch_size):
+ """Test the batch version works versus the normal vanilla version."""
+ state = Gaussian(3) >> Dgate([0.0, 0.1, 0.2])
+ A, B, C = wigner_to_bargmann_rho(
+ state.cov, state.means
+ ) # Create random state (M mode Gaussian state with displacement)
+
+ cutoffs = (20, 20, 20, 20, batch_size)
+
+ # Vanilla MM
+ G_ref = math.hermite_renormalized(A, B, C, shape=cutoffs[:-1])
+
+ # replicate the B
+ B_batched = np.stack((B,) * batch_size, axis=1)
+
+ G_batched = math.hermite_renormalized_batch(A, B_batched, C, shape=cutoffs)
+
+ for nb in range(batch_size):
+ assert np.allclose(G_ref, G_batched[:, :, :, :, nb])
+
+
+@pytest.mark.parametrize("batch_size", [1, 3])
+def test_diagonalbatchNumba_vs_diagonalNumba(batch_size):
+ """Test the batch version works versus the normal diagonal version."""
+ state = Gaussian(3) >> Dgate([0.0, 0.1, 0.2])
+ A, B, C = wigner_to_bargmann_rho(
+ state.cov, state.means
+ ) # Create random state (M mode Gaussian state with displacement)
+
+ cutoffs = (18, 19, 20, batch_size)
+
+ # Diagonal MM
+ G_ref = math.hermite_renormalized_diagonal(A, B, C, cutoffs=cutoffs[:-1])
+
+ # replicate the B
+ B_batched = np.stack((B,) * batch_size, axis=1)
+
+ G_batched = math.hermite_renormalized_diagonal_batch(A, B_batched, C, cutoffs=cutoffs[:-1])
+
+ for nb in range(batch_size):
+ assert np.allclose(G_ref, G_batched[:, :, :, nb])
diff --git a/tests/test_math/test_mmtensor.py b/tests/test_math/test_mmtensor.py
index d5fd63713..bf2dda98e 100644
--- a/tests/test_math/test_mmtensor.py
+++ b/tests/test_math/test_mmtensor.py
@@ -18,10 +18,7 @@
import numpy as np
import pytest
-from mrmustard.math import Math
-from mrmustard.math.mmtensor import MMTensor
-
-math = Math()
+from mrmustard.math.tensor_wrappers.mmtensor import MMTensor
def test_mmtensor_creation():
diff --git a/tests/test_math/test_parameter_set.py b/tests/test_math/test_parameter_set.py
new file mode 100644
index 000000000..11dd03f71
--- /dev/null
+++ b/tests/test_math/test_parameter_set.py
@@ -0,0 +1,86 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unit tests for the :class:`ParameterSet`.
+"""
+
+from mrmustard import math
+from mrmustard.math.parameter_set import ParameterSet
+from mrmustard.math.parameters import Constant, Variable
+
+
+class TestParameterSet:
+ r"""
+ Tests for ParameterSet.
+ """
+
+ def test_init(self):
+ r"""
+ Tests the init.
+ """
+ ps = ParameterSet()
+ assert not ps.names
+ assert not ps.constants
+ assert not ps.variables
+
+ def test_add_parameters(self):
+ r"""
+ Tests the ``add_parameter`` method.
+ """
+ const1 = Constant(1, "const1")
+ const2 = Constant(2, "const2")
+ var1 = Variable(1, "var1")
+
+ ps = ParameterSet()
+ ps.add_parameter(const1)
+ ps.add_parameter(const2)
+ ps.add_parameter(var1)
+
+ assert ps.names == ["const1", "const2", "var1"]
+ assert ps.constants == {"const1": const1, "const2": const2}
+ assert ps.variables == {"var1": var1}
+
+ def test_tagged_variables(self):
+ r"""
+ Tests the ``tagged_variables`` method.
+ """
+ const1 = Constant(1, "const1")
+ const2 = Constant(2, "const2")
+ var1 = Variable(1, "var1")
+
+ ps = ParameterSet()
+ ps.add_parameter(const1)
+ ps.add_parameter(const2)
+ ps.add_parameter(var1)
+
+ variables = ps.tagged_variables("ciao")
+ assert variables == {"ciao/var1": var1}
+
+ def test_to_string(self):
+ r"""
+ Tests the ``to_string`` method.
+ """
+ const1 = Constant(1.2345, "const1")
+ const2 = Constant(2.3456, "const2")
+ var1 = Variable(3.4567, "var1")
+
+ ps = ParameterSet()
+ ps.add_parameter(const1)
+ ps.add_parameter(const2)
+ ps.add_parameter(var1)
+
+ assert ps.to_string(1) == "1.2, 2.3, 3.5"
+ assert ps.to_string(3) == "1.234, 2.346, 3.457"
+ assert ps.to_string(10) == "1.2345, 2.3456, 3.4567"
diff --git a/tests/test_math/test_parameters.py b/tests/test_math/test_parameters.py
new file mode 100644
index 000000000..be8a24ba4
--- /dev/null
+++ b/tests/test_math/test_parameters.py
@@ -0,0 +1,122 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unit tests for :class:`Constant` and :class:`Variable`.
+"""
+
+import pytest
+import numpy as np
+
+from mrmustard import math
+from mrmustard.math.parameters import (
+ Constant,
+ Variable,
+ update_euclidean,
+ update_orthogonal,
+ update_unitary,
+ update_symplectic,
+)
+
+
+class TestConstant:
+ r"""
+ Tests for Constant.
+ """
+
+ def test_init(self):
+ r"""
+ Tests the init.
+ """
+ const1 = Constant(1, "const1")
+ assert const1.value == 1
+ assert const1.name == "const1"
+
+ math_const = math.new_constant(2, "const2")
+ const2 = Constant(math_const, "const2")
+ assert const2.value == math_const
+ assert const2.name == "const2"
+
+ const3 = Constant(np.array([1, 2, 3]), "const3")
+ assert np.allclose(const3.value, np.array([1, 2, 3]))
+
+ def test_is_const(self):
+ r"""
+ Tests that constants are immutable.
+ """
+ const = Constant(1, "const")
+
+ with pytest.raises(AttributeError, match="can't set attribute"):
+ const.value = 2
+
+ with pytest.raises(AttributeError, match="can't set attribute"):
+ const.name = "const2"
+
+
+class TestVariable:
+ r"""
+ Tests for Variable.
+ """
+
+ def test_init(self):
+ r"""
+ Tests the init.
+ """
+ var1 = Variable(1, "var1")
+ assert var1.value == 1
+ assert var1.name == "var1"
+ assert var1.bounds == (None, None)
+ assert var1.update_fn == update_euclidean
+
+ math_var = math.new_variable(2, (0, 1), "var2")
+ var2 = Variable(math_var, "var2")
+ assert var2.value == math_var
+ assert var2.name == "var2"
+ assert var2.update_fn == update_euclidean
+
+ var3 = Variable(np.array([1, 2, 3]), "var3", (0, 1), update_orthogonal)
+ assert np.allclose(var3.value, np.array([1, 2, 3]))
+ assert var3.bounds == (0, 1)
+ assert var3.update_fn == update_orthogonal
+
+ def test_is_variable(self):
+ r"""
+ Tests that variables are mutable.
+ """
+ var = Variable(1, "var")
+
+ var.value = 2
+ assert var.value == 2
+
+ var.update_fn = update_orthogonal
+ assert var.update_fn == update_orthogonal
+
+ with pytest.raises(AttributeError, match="can't set attribute"):
+ var.name = "var2"
+
+ with pytest.raises(AttributeError, match="can't set attribute"):
+ var.bounds = (0, 1)
+
+ def test_static_methods(self):
+ r"""
+ Tests the static methods.
+ """
+ va1 = Variable.symplectic(1, "var1")
+ assert va1.update_fn == update_symplectic
+
+ va2 = Variable.orthogonal(1, "va2")
+ assert va2.update_fn == update_orthogonal
+
+ var3 = Variable.unitary(1, "var3")
+ assert var3.update_fn == update_unitary
diff --git a/tests/test_math/test_special.py b/tests/test_math/test_special.py
index 04916d3b8..c5a20e75e 100644
--- a/tests/test_math/test_special.py
+++ b/tests/test_math/test_special.py
@@ -17,9 +17,7 @@
import numpy as np
from scipy.special import eval_hermite, factorial
-from mrmustard.math import Math
-
-math = Math()
+from mrmustard import math
def test_reduction_to_renorm_physicists_polys():
diff --git a/tests/test_math/test_tensor_networks/__init__.py b/tests/test_math/test_tensor_networks/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/test_math/test_tensor_networks/test_integration.py b/tests/test_math/test_tensor_networks/test_integration.py
new file mode 100644
index 000000000..89f94df8c
--- /dev/null
+++ b/tests/test_math/test_tensor_networks/test_integration.py
@@ -0,0 +1,87 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains tests for simulating quantum circuits with tensor networks."""
+
+from mrmustard.math.tensor_networks import connect, contract
+from mrmustard.lab.gates import (
+ BSgate,
+ CXgate,
+ CZgate,
+ Dgate,
+ MZgate,
+ Rgate,
+ Sgate,
+)
+import numpy as np
+import pytest
+
+
+class TestTransformations:
+ r"""
+ Tests that transformations can be contracted by tensor networks.
+ """
+
+ @pytest.mark.parametrize("modes", [[0], [1, 2], [3, 4, 5, 6]])
+ @pytest.mark.parametrize("dim", [3, 4])
+ @pytest.mark.parametrize("default_dim", [2, 5])
+ def test_sequence_one_mode_unitaries(self, modes, dim, default_dim):
+ r"""
+ Tests that a sequence of one-mode unitaries can be contracted correctly.
+ """
+ s_tens = Sgate(0.1, modes=modes)
+ r_tens = Rgate(0.2, modes=modes)
+ d_tens = Dgate(0.3, modes=modes)
+
+ for mode in modes:
+ connect(s_tens.output.ket[mode], r_tens.input.ket[mode], dim)
+ connect(r_tens.output.ket[mode], d_tens.input.ket[mode], dim)
+ contraction = contract([s_tens, r_tens, d_tens], default_dim)
+
+ s_mat = Sgate(0.1).U(shape=(default_dim, dim))
+ r_mat = Rgate(0.2).U(shape=(dim, dim, dim, dim))
+ d_mat = Dgate(0.3).U(shape=(dim, default_dim))
+ expected = np.dot(s_mat, r_mat)
+ expected = np.dot(expected, d_mat)
+ if len(modes) == 2:
+ expected = np.kron(expected, expected)
+ expected = expected.reshape(*contraction.shape)
+ if len(modes) == 4:
+ expected = np.kron(expected, expected)
+ expected = np.kron(expected, expected)
+ expected = expected.reshape(*contraction.shape)
+
+ assert np.allclose(contraction, expected)
+
+ @pytest.mark.parametrize("modes", [[1, 2]])
+ @pytest.mark.parametrize("dim", [3, 20])
+ @pytest.mark.parametrize("default_dim", [2, 10])
+ def test_sequence_multi_mode_unitaries(self, modes, dim, default_dim):
+ r"""
+ Tests that a sequence of multi-mode unitaries can be contracted correctly.
+ """
+ cx_tens = CXgate(modes=modes)
+ bs_tens = BSgate(0.2, modes=modes)
+ cz_tens = CZgate(modes=modes)
+ mz_tens = MZgate(0.3, modes=modes)
+
+ for mode in modes:
+ connect(cx_tens.output.ket[mode], bs_tens.input.ket[mode], dim)
+ connect(bs_tens.output.ket[mode], cz_tens.input.ket[mode], dim)
+ connect(cz_tens.output.ket[mode], mz_tens.input.ket[mode], dim)
+ contraction = contract([cx_tens, bs_tens, cz_tens, mz_tens], default_dim)
+
+ assert contraction.shape == (default_dim, default_dim, default_dim, default_dim)
+ # TODO: find a way to validate the tensor's values
+ # --> when states are available, apply to states and compare with the expected dm.
diff --git a/tests/test_math/test_tensor_networks/test_networks.py b/tests/test_math/test_tensor_networks/test_networks.py
new file mode 100644
index 000000000..3ffb0ce3d
--- /dev/null
+++ b/tests/test_math/test_tensor_networks/test_networks.py
@@ -0,0 +1,111 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains tests for the networks.py module."""
+
+from mrmustard.math.tensor_networks import Tensor, connect, draw
+
+from matplotlib.figure import Figure
+
+import numpy as np
+import pytest
+
+# ~~~~~~~
+# Helpers
+# ~~~~~~~
+
+
+class TId(Tensor):
+ r"""
+ A tensor whose value is the ones matrix given shape.
+ """
+
+ def value(self, shape):
+ return np.ones(shape)
+
+
+# ~~~~~~~
+# Tests
+# ~~~~~~~
+
+
+class TestConnect:
+ r"""
+ Tests the function to connect wires between tensors.
+ """
+
+ def test_ids(self):
+ r"""
+ Tests that the ``id``s of tensors connected to each other are equal.
+ """
+ t1 = TId("t1", [1, 2, 3], [4, 5, 6])
+ t2 = TId("t2", None, None, [7, 8, 9])
+ t3 = TId("t3", None, None, None, [10])
+
+ connect(t1.input.ket[1], t1.output.ket[4])
+ connect(t1.output.ket[5], t2.input.bra[8])
+ connect(t1.input.ket[3], t3.output.bra[10])
+
+ assert t1.input.ket[1].contraction_id == t1.output.ket[4].contraction_id
+ assert t1.output.ket[5].contraction_id == t2.input.bra[8].contraction_id
+ assert t1.input.ket[3].contraction_id == t3.output.bra[10].contraction_id
+
+ def test_dim(self):
+ r"""
+ Tests that the dim is handled correctly.
+ """
+ t1 = TId("t1", modes_in_ket=[1, 2])
+ t2 = TId("t2", modes_out_ket=[1, 2])
+
+ connect(t1.input.ket[1], t2.output.ket[1])
+ connect(t1.input.ket[2], t2.output.ket[2], dim=10)
+
+ assert t1.input.ket[1].dim is None
+ assert t1.input.ket[2].dim == 10
+ assert t2.output.ket[1].dim is None
+ assert t2.output.ket[2].dim == 10
+
+ def test_error(self):
+ r"""
+ Tests that wires that are already connected can no longer be connected.
+ """
+ t1 = TId("t1", [1, 2, 3], [4, 5, 6])
+ t2 = TId("t2", None, None, [7, 8, 9])
+ connect(t1.output.ket[5], t2.input.bra[8])
+
+ with pytest.raises(ValueError, match="already connected"):
+ connect(t1.output.ket[5], t2.input.bra[8])
+
+
+class TestDraw:
+ r"""
+ Tests the function to draw tensor networks.
+ """
+
+ @pytest.mark.parametrize("layout", ["spring_layout", "circular_layout"])
+ @pytest.mark.parametrize("figsize", [None, (4, 4)])
+ def test_draw(self, layout, figsize):
+ r"""
+ Tests that ``draw`` produces a figure.
+ """
+ t1 = TId("tensor 1", [0, 1, 2], [0, 1, 2])
+ t2 = TId("tensor 2", [1], [1])
+ t3 = TId("tensor 3", [0, 2], [0, 2])
+
+ connect(t1.output.ket[0], t3.input.ket[0])
+ connect(t1.output.ket[2], t3.input.ket[2])
+ connect(t1.output.ket[1], t2.input.ket[1])
+
+ fig = draw([t1, t2, t3], layout, figsize)
+ assert isinstance(fig, Figure)
diff --git a/tests/test_math/test_tensor_networks/test_tensors.py b/tests/test_math/test_tensor_networks/test_tensors.py
new file mode 100644
index 000000000..112ffe45a
--- /dev/null
+++ b/tests/test_math/test_tensor_networks/test_tensors.py
@@ -0,0 +1,238 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains tests for the tensors.py module."""
+
+from mrmustard.math.tensor_networks import Wire, Tensor, connect
+
+import numpy as np
+import pytest
+
+# ~~~~~~~
+# Helpers
+# ~~~~~~~
+
+
+class TBad(Tensor):
+ r"""
+ A tensor without value.
+ """
+
+
+class TComplex(Tensor):
+ r"""
+ A tensor whose value is a complex matrix of given shape.
+ """
+
+ def value(self, shape):
+ return np.random.rand(*shape) + 1j * np.random.rand(*shape)
+
+
+# ~~~~~~~
+# Tests
+# ~~~~~~~
+
+
+class TestWire:
+ r"""
+ Tests the Wire class.
+ """
+
+ @pytest.mark.parametrize("is_input", [True, False])
+ @pytest.mark.parametrize("is_ket", [True, False])
+ def test_init(self, is_input, is_ket):
+ r"""
+ Tests the init of wires.
+ """
+ id = 123
+ mode = 5
+ wire = Wire(id, mode, is_input, is_ket)
+
+ assert wire.id == id
+ assert wire.mode == mode
+ assert wire.is_input is is_input
+ assert wire.is_ket is is_ket
+ assert wire.is_connected is False
+ assert wire.dim is None
+ assert isinstance(wire.contraction_id, int)
+
+ def test_dim_error(self):
+ r"""
+ Tests that ``dim`` cannot be set more than once.
+ """
+ wire = Wire(0, 0, True, True)
+ wire.dim = 18
+
+ with pytest.raises(ValueError, match="Cannot change"):
+ wire.dim = 20
+
+
+class TestTensor:
+ r"""
+ Tests the Tensor class.
+ """
+
+ @pytest.mark.parametrize("modes_in_ket", [None, [1, 2, 3]])
+ @pytest.mark.parametrize("modes_out_ket", [None, [4]])
+ @pytest.mark.parametrize("modes_in_bra", [None, [1, 2, 3]])
+ @pytest.mark.parametrize("modes_out_bra", [None, [4]])
+ def test_init(self, modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra):
+ r"""
+ Tests the init of tensors.
+ """
+ name = "t"
+ t = TComplex(name, modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra)
+
+ assert t.name == name
+
+ assert len(t.input.ket.items()) == 0 if modes_in_ket is None else len(modes_in_ket)
+ assert len(t.output.ket.items()) == 0 if modes_out_ket is None else len(modes_out_ket)
+ assert len(t.input.bra.items()) == 0 if modes_in_bra is None else len(modes_in_bra)
+ assert len(t.output.bra.items()) == 0 if modes_out_bra is None else len(modes_out_bra)
+
+ @pytest.mark.parametrize("modes_in_ket", [None, [1, 2, 3]])
+ @pytest.mark.parametrize("modes_out_ket", [None, [4]])
+ @pytest.mark.parametrize("modes_in_bra", [None, [1, 2, 3]])
+ @pytest.mark.parametrize("modes_out_bra", [None, [4]])
+ def test_ids_in_same_tensor(self, modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra):
+ r"""
+ Tests that tensors generate wires with different ``id``s.
+ """
+ t = TComplex("t", modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra)
+
+ all_ids = [w.id for w in t.input.ket.values()]
+ all_ids += [w.id for w in t.output.ket.values()]
+ all_ids += [w.id for w in t.input.bra.values()]
+ all_ids += [w.id for w in t.output.bra.values()]
+
+ assert len(all_ids) == len(set(all_ids))
+
+ @pytest.mark.parametrize("modes_in_ket", [None, [1, 2, 3]])
+ @pytest.mark.parametrize("modes_out_ket", [None, [4]])
+ @pytest.mark.parametrize("modes_in_bra", [None, [1, 2, 3]])
+ @pytest.mark.parametrize("modes_out_bra", [None, [4]])
+ def test_ids_in_different_tensor(
+ self, modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra
+ ):
+ r"""
+ Tests that different tensors generate wires with different ``id``s.
+ """
+ t1 = TComplex("t1", modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra)
+ t2 = TComplex("t2", modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra)
+
+ all_ids1 = [w.id for w in t1.input.ket.values()]
+ all_ids1 += [w.id for w in t1.output.ket.values()]
+ all_ids1 += [w.id for w in t1.input.bra.values()]
+ all_ids1 += [w.id for w in t1.output.bra.values()]
+
+ all_ids2 = [w.id for w in t2.input.ket.values()]
+ all_ids2 += [w.id for w in t2.output.ket.values()]
+ all_ids2 += [w.id for w in t2.input.bra.values()]
+ all_ids2 += [w.id for w in t2.output.bra.values()]
+
+ assert len(all_ids1 + all_ids2) == len(set(all_ids1 + all_ids2))
+
+ def test_adjoint(self):
+ r"""
+ Tests the adjoint method.
+ """
+ t = TComplex("t", [1, 2], [2, 3])
+ t_adj = t.adjoint
+
+ shape = (3, 4, 8, 1)
+
+ assert t_adj.value(shape).shape == shape
+ assert t.input.ket.keys() == t_adj.input.bra.keys()
+ assert t.input.bra.keys() == t_adj.input.ket.keys()
+ assert t.output.ket.keys() == t_adj.output.bra.keys()
+ assert t.output.bra.keys() == t_adj.output.ket.keys()
+
+ def test_modes_in_out(self):
+ r"""
+ Tests the modes_in and modes_out methods.
+ """
+ t1 = TComplex("t", [1], [2])
+ assert t1.modes_in == [1]
+ assert t1.modes_out == [2]
+
+ t1 = TComplex("t", [1], [2], [1], [2])
+ assert t1.modes_in == [1]
+ assert t1.modes_out == [2]
+
+ @pytest.mark.parametrize("modes_in_ket", [None, [1, 2, 3]])
+ @pytest.mark.parametrize("modes_out_ket", [None, [4]])
+ @pytest.mark.parametrize("modes_in_bra", [None, [1, 2, 3]])
+ @pytest.mark.parametrize("modes_out_bra", [None, [4]])
+ def test_wires(self, modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra):
+ r"""
+ Tests the init of tensors.
+ """
+ name = "t"
+ t = TComplex(name, modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra)
+ wires = np.array(t.wires)
+
+ list_modes = [] if modes_in_ket is None else modes_in_ket
+ mask = [w.mode in list_modes for w in wires]
+ assert len(wires[mask]) == 0 or len(modes_in_ket)
+
+ list_modes = [] if modes_out_ket is None else modes_out_ket
+ mask = [w.mode in list_modes for w in wires]
+ assert len(wires[mask]) == 0 or len(modes_out_ket)
+
+ list_modes = [] if modes_in_bra is None else modes_in_bra
+ mask = [w.mode in list_modes for w in wires]
+ assert len(wires[mask]) == 0 or len(modes_in_bra)
+
+ list_modes = [] if modes_out_bra is None else modes_out_bra
+ mask = [w.mode in list_modes for w in wires]
+ assert len(wires[mask]) == 0 or len(modes_out_bra)
+
+ def test_value_error(self):
+ r"""
+ Tests the error for the value property.
+ """
+ with pytest.raises(TypeError, match="abstract method value"):
+ TBad("t_bad")
+
+ def test_change_modes(self):
+ r"""
+ Tests the function to change modes.
+ """
+ t = TComplex("t")
+
+ modes_in_ket = [1]
+ modes_out_ket = [2, 3]
+ t.change_modes(modes_in_ket, modes_out_ket)
+
+ assert list(t.input.ket.keys()) == modes_in_ket
+ assert not t.input.bra
+ assert list(t.output.ket.keys()) == modes_out_ket
+ assert not t.output.bra
+
+ def test_change_modes_errors(self):
+ r"""
+ Tests the errors of the function to change modes.
+ """
+ t1 = TComplex("t1", [1])
+ t2 = TComplex("t2", None, [1])
+
+ with pytest.raises(ValueError, match="Input modes"):
+ t1.change_modes([2], None, [3])
+
+ with pytest.raises(ValueError, match="Output modes"):
+ t1.change_modes(None, [2], None, [1])
+
+ connect(t1.input.ket[1], t2.output.ket[1], 1)
+ with pytest.raises(ValueError, match="already connected"):
+ t1.change_modes([2])
diff --git a/tests/test_utils/test_xptensor.py b/tests/test_math/test_xptensor.py
similarity index 97%
rename from tests/test_utils/test_xptensor.py
rename to tests/test_math/test_xptensor.py
index b29983c84..c414cb6b5 100644
--- a/tests/test_utils/test_xptensor.py
+++ b/tests/test_math/test_xptensor.py
@@ -12,13 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from hypothesis import strategies as st, given, assume
+from hypothesis import strategies as st, given
from hypothesis.extra.numpy import arrays
-import pytest
-from mrmustard.lab.states import DisplacedSqueezed
-from mrmustard.utils.xptensor import XPVector, XPMatrix
+from mrmustard.math.tensor_wrappers.xptensor import XPVector, XPMatrix
import numpy as np
-from tests.random import n_mode_pure_state
even = st.integers(min_value=2, max_value=10).filter(lambda x: x % 2 == 0)
floats = st.floats(min_value=-1e3, max_value=1e3, allow_nan=False, allow_infinity=False)
diff --git a/tests/test_physics/__init__.py b/tests/test_physics/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/test_physics/test_ansatz.py b/tests/test_physics/test_ansatz.py
new file mode 100644
index 000000000..fe62113a3
--- /dev/null
+++ b/tests/test_physics/test_ansatz.py
@@ -0,0 +1,157 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+from hypothesis import given
+from hypothesis import strategies as st
+from hypothesis.extra.numpy import arrays
+
+from mrmustard import math
+from mrmustard.physics.ansatze import PolyExpAnsatz
+from tests.random import Abc_triple, complex_number
+
+
+@given(Abc=Abc_triple())
+def test_PolyExpAnsatz(Abc):
+ """Test that the PolyExpAnsatz class is initialized correctly"""
+ A, b, c = Abc
+ ansatz = PolyExpAnsatz(A, b, c)
+ assert np.allclose(ansatz.mat[0], A)
+ assert np.allclose(ansatz.vec[0], b)
+ assert np.allclose(ansatz.array[0], c)
+
+
+# test adding two PolyExpAnsatz objects
+@given(Abc1=Abc_triple(5), Abc2=Abc_triple(5))
+def test_PolyExpAnsatz_add(Abc1, Abc2):
+ """Test that we can add two PolyExpAnsatz objects"""
+ A1, b1, c1 = Abc1
+ A2, b2, c2 = Abc2
+ ansatz = PolyExpAnsatz(A1, b1, c1)
+ ansatz2 = PolyExpAnsatz(A2, b2, c2)
+ ansatz3 = ansatz + ansatz2
+ assert np.allclose(ansatz3.mat[0], A1)
+ assert np.allclose(ansatz3.vec[0], b1)
+ assert np.allclose(ansatz3.array[0], c1)
+ assert np.allclose(ansatz3.mat[1], A2)
+ assert np.allclose(ansatz3.vec[1], b2)
+ assert np.allclose(ansatz3.array[1], c2)
+
+
+# test multiplying two PolyExpAnsatz objects
+@given(Abc1=Abc_triple(4), Abc2=Abc_triple(4))
+def test_PolyExpAnsatz_mul(Abc1, Abc2):
+ """Test that we can multiply two PolyExpAnsatz objects"""
+ A1, b1, c1 = Abc1
+ A2, b2, c2 = Abc2
+ ansatz = PolyExpAnsatz(A1, b1, c1)
+ ansatz2 = PolyExpAnsatz(A2, b2, c2)
+ ansatz3 = ansatz * ansatz2
+ assert np.allclose(ansatz3.mat[0], A1 + A2)
+ assert np.allclose(ansatz3.vec[0], b1 + b2)
+ assert np.allclose(ansatz3.array[0], c1 * c2)
+
+
+# test multiplying a PolyExpAnsatz object by a scalar
+@given(Abc=Abc_triple(), d=complex_number)
+def test_PolyExpAnsatz_mul_scalar(Abc, d):
+ """Test that we can multiply a PolyExpAnsatz object by a scalar"""
+ A, b, c = Abc
+ ansatz = PolyExpAnsatz(A, b, c)
+ ansatz2 = ansatz * d
+ assert np.allclose(ansatz2.mat[0], A)
+ assert np.allclose(ansatz2.vec[0], b)
+ assert np.allclose(ansatz2.array[0], d * c)
+
+
+# test calling the PolyExpAnsatz object
+@given(Abc=Abc_triple())
+def test_PolyExpAnsatz_call(Abc):
+ """Test that we can call the PolyExpAnsatz object"""
+ A, b, c = Abc
+ ansatz = PolyExpAnsatz(A, b, c)
+ assert np.allclose(ansatz(z=math.zeros_like(b)), c)
+
+
+# test tensor product of two PolyExpAnsatz objects
+@given(Abc1=Abc_triple(6), Abc2=Abc_triple(6))
+def test_PolyExpAnsatz_kron(Abc1, Abc2):
+ """Test that we can tensor product two PolyExpAnsatz objects"""
+ A1, b1, c1 = Abc1
+ A2, b2, c2 = Abc2
+ ansatz = PolyExpAnsatz(A1, b1, c1)
+ ansatz2 = PolyExpAnsatz(A2, b2, c2)
+ ansatz3 = ansatz & ansatz2
+ assert np.allclose(ansatz3.mat[0], math.block_diag(A1, A2))
+ assert np.allclose(ansatz3.vec[0], math.concat([b1, b2], -1))
+ assert np.allclose(ansatz3.array[0], c1 * c2)
+
+
+# test equality
+@given(Abc=Abc_triple())
+def test_PolyExpAnsatz_eq(Abc):
+ """Test that we can compare two PolyExpAnsatz objects"""
+ A, b, c = Abc
+ ansatz = PolyExpAnsatz(A, b, c)
+ ansatz2 = PolyExpAnsatz(2 * A, 2 * b, 2 * c)
+ assert ansatz == ansatz
+ assert ansatz2 == ansatz2
+ assert ansatz != ansatz2
+ assert ansatz2 != ansatz
+
+
+# test simplify
+@given(Abc=Abc_triple())
+def test_PolyExpAnsatz_simplify(Abc):
+ """Test that we can simplify a PolyExpAnsatz object"""
+ A, b, c = Abc
+ ansatz = PolyExpAnsatz(A, b, c)
+ ansatz = ansatz + ansatz
+ assert np.allclose(ansatz.A[0], ansatz.A[1])
+ assert np.allclose(ansatz.A[0], A)
+ assert np.allclose(ansatz.b[0], ansatz.b[1])
+ assert np.allclose(ansatz.b[0], b)
+ ansatz.simplify()
+ assert len(ansatz.A) == 1
+ assert len(ansatz.b) == 1
+ assert ansatz.c == 2 * c
+
+
+def test_order_batch():
+ ansatz = PolyExpAnsatz(
+ A=[np.array([[0]]), np.array([[1]])], b=[np.array([1]), np.array([0])], c=[1, 2]
+ )
+ ansatz._order_batch()
+ assert np.allclose(ansatz.A[0], np.array([[1]]))
+ assert np.allclose(ansatz.b[0], np.array([0]))
+ assert ansatz.c[0] == 2
+ assert np.allclose(ansatz.A[1], np.array([[0]]))
+ assert np.allclose(ansatz.b[1], np.array([1]))
+ assert ansatz.c[1] == 1
+
+
+@given(Abc=Abc_triple())
+def test_PolyExpAnsatz_simplify_v2(Abc):
+ """Test that we can simplify a PolyExpAnsatz object"""
+ A, b, c = Abc
+ ansatz = PolyExpAnsatz(A, b, c)
+ ansatz = ansatz + ansatz
+ assert np.allclose(ansatz.A[0], ansatz.A[1])
+ assert np.allclose(ansatz.A[0], A)
+ assert np.allclose(ansatz.b[0], ansatz.b[1])
+ assert np.allclose(ansatz.b[0], b)
+ ansatz.simplify_v2()
+ assert len(ansatz.A) == 1
+ assert len(ansatz.b) == 1
+ assert np.allclose(ansatz.c, 2 * c)
diff --git a/tests/test_physics/test_bargmann/test_bargmann_repr.py b/tests/test_physics/test_bargmann/test_bargmann_repr.py
new file mode 100644
index 000000000..e1680a61f
--- /dev/null
+++ b/tests/test_physics/test_bargmann/test_bargmann_repr.py
@@ -0,0 +1,177 @@
+import numpy as np
+
+from mrmustard import math
+from mrmustard.lab import Attenuator, Coherent, Gaussian, Ggate, Dgate
+from mrmustard.physics.bargmann import contract_two_Abc, reorder_abc, wigner_to_bargmann_rho
+from mrmustard.physics.representations import Bargmann
+from tests.random import random_Ggate, single_mode_unitary_gate, n_mode_mixed_state, Abc_triple
+from hypothesis import given
+
+
+def test_make_cat():
+ r"test adding two coherent states via the Bargmann representation"
+ cat = Bargmann(*Coherent(1.0).bargmann()) + Bargmann(*Coherent(-1.0).bargmann())
+ assert np.allclose(cat.A[0], cat.A[1])
+ assert np.allclose(cat.b[0], -cat.b[1])
+
+
+def test_muldiv_with_another_Bargmann():
+ r"test multiplying and dividing two Bargmann representations"
+ Abc1 = Bargmann(*(Gaussian(1) >> Dgate(0.1, 0.2)).bargmann())
+ Abc2 = Bargmann(*(Gaussian(1) >> Dgate(0.4, 0.1)).bargmann())
+ s1 = Abc1 * Abc2
+ s2 = Abc1 / Abc2
+ assert np.allclose(s1.A[0], Abc1.A[0] + Abc2.A[0])
+ assert np.allclose(s1.b[0], Abc1.b[0] + Abc2.b[0])
+ assert np.allclose(s1.c[0], Abc1.c[0] * Abc2.c[0])
+ assert np.allclose(s2.A[0], Abc1.A[0] - Abc2.A[0])
+ assert np.allclose(s2.b[0], Abc1.b[0] - Abc2.b[0])
+ assert np.allclose(s2.c[0], Abc1.c[0] / Abc2.c[0])
+
+
+def test_muldiv_with_scalar():
+ r"test multiplying and dividing a Bargmann representation with a scalar"
+ s1 = Bargmann(*Coherent(1.0).bargmann()) * 2.0
+ s2 = Bargmann(*Coherent(1.0).bargmann()) / 3.0
+ s3 = 4.0 * Bargmann(*Coherent(1.0).bargmann())
+ assert np.allclose(s1.c, Coherent(1.0).bargmann()[2] * 2.0)
+ assert np.allclose(s2.c, Coherent(1.0).bargmann()[2] / 3.0)
+ assert np.allclose(s3.c, Coherent(1.0).bargmann()[2] * 4.0)
+
+
+@given(Abc=Abc_triple(3))
+def test_reorder_indices(Abc):
+ r"""Test that we can reorder the indices of the A matrix and b vector of an (A,b,c) triple"""
+ barg = Bargmann(*Abc)
+ barg = barg.reorder((0, 2, 1))
+ assert np.allclose(barg.A[0], Abc[0][[0, 2, 1], :][:, [0, 2, 1]])
+ assert np.allclose(barg.b[0], Abc[1][[0, 2, 1]])
+
+
+@given(Abc=Abc_triple())
+def test_call(Abc):
+ r"""Test that we can call the PolyExpAnsatz object"""
+ A, b, c = Abc
+ barg = Bargmann(A, b, c)
+ assert np.allclose(barg(z=math.zeros_like(b)), c)
+
+
+def test_subtract():
+ r"test subtracting two coherent states via the Bargmann representation"
+ cat = Bargmann(*Coherent(1.0).bargmann()) - Bargmann(*Coherent(-1.0).bargmann())
+ assert np.allclose(cat.A[0], cat.A[1])
+ assert np.allclose(cat.b[0], -cat.b[1])
+
+
+def test_abc_contraction_2mode_psi_U():
+ "tests that the abc contraction works for U|psi>"
+ psi = Gaussian(2)
+ U = Ggate(2)
+ A1, b1, c1 = psi.bargmann() # out1ket, out2ket
+ A2, b2, c2 = U.bargmann() # out1ket, out2ket, in1ket, in2ket
+ A_abc, b_abc, c_abc = contract_two_Abc((A1, b1, c1), (A2, b2, c2), (0, 1), (2, 3))
+ A_mm, b_mm, c_mm = (psi >> U).bargmann()
+ assert np.allclose(A_abc, A_mm)
+ assert np.allclose(b_abc, b_mm)
+ assert np.allclose(abs(c_abc), abs(c_mm))
+
+
+def test_abc_contraction_2mode_rho_phi():
+ "tests that the abc contraction works for rho >> phi"
+ rho = Gaussian(2) >> Attenuator([0.1, 0.2]) >> Ggate(2) >> Attenuator([0.4, 0.9])
+ phi = Ggate(2) >> Attenuator([0.3, 0.4]) >> Ggate(2)
+ # out1bra, out2bra, out1ket, out2ket
+ A1, b1, c1 = rho.bargmann()
+ # out1bra, out2bra, in1bra, in2bra, out1ket, out2ket, in1ket, in2ket
+ A2, b2, c2 = phi.bargmann()
+
+ A_abc, b_abc, c_abc = contract_two_Abc((A1, b1, c1), (A2, b2, c2), (0, 1, 2, 3), (2, 3, 6, 7))
+
+ A_mm, b_mm, c_mm = (rho >> phi).bargmann()
+
+ assert np.allclose(A_abc, A_mm)
+ assert np.allclose(b_abc, b_mm)
+ assert np.allclose(c_abc, c_mm)
+
+
+def test_abc_contraction_3mode_rho_2mode_U():
+ "tests that the abc contraction works for U rho U_dagger"
+ rho = Gaussian(3) >> Attenuator([0.1, 0.2, 0.4]) >> Ggate(3) >> Attenuator([0.4, 0.5, 0.9])
+ U = Ggate(2)
+ # out1bra, out2bra, out3bra, out1ket, out2ket, out3ket
+ A1, b1, c1 = rho.bargmann()
+ # out1ket, out2ket, in1ket, in2ket
+ A2, b2, c2 = U.bargmann()
+ A_abc, b_abc, c_abc = contract_two_Abc(
+ (A2, b2, c2), (A1, b1, c1), (2, 3), (4, 5)
+ ) # left in out1ket_U, out2ket_U, out1bra_rho, out2bra_rho, out3bra_rho, out1ket_rho
+ A_abc, b_abc, c_abc = contract_two_Abc(
+ (A_abc, b_abc, c_abc),
+ (math.conj(A2), math.conj(b2), math.conj(c2)),
+ (3, 4),
+ (2, 3),
+ ) # left in out1ket_U, out2ket_U, out1bra_rho, out1ket_rho, out1bra_U, out2bra_U
+ A_abc, b_abc, c_abc = reorder_abc((A_abc, b_abc, c_abc), (2, 4, 5, 3, 0, 1))
+ A_mm, b_mm, c_mm = (rho >> U[1, 2]).bargmann()
+ assert np.allclose(A_abc, A_mm)
+ assert np.allclose(b_abc, b_mm)
+ assert np.allclose(c_abc, c_mm)
+
+
+def test_Bargmann_2mode_psi_U():
+ "tests that the Bargmann representation works for U|psi>"
+ psi = Gaussian(2)
+ U = Ggate(2)
+ A1, b1, c1 = psi.bargmann() # out1ket, out2ket
+ A2, b2, c2 = U.bargmann() # out1ket, out2ket, in1ket, in2ket
+ Abc1 = Bargmann(A1, b1, c1)
+ Abc2 = Bargmann(A2, b2, c2)
+ psiU = Abc1[0, 1] @ Abc2[2, 3]
+ A_abc, b_abc, c_abc = psiU.A[0], psiU.b[0], psiU.c[0]
+ A_mm, b_mm, c_mm = (psi >> U).bargmann()
+ assert np.allclose(A_abc, A_mm)
+ assert np.allclose(b_abc, b_mm)
+ assert np.allclose(abs(c_abc), abs(c_mm))
+
+
+@given(G1=random_Ggate(num_modes=1), G2=random_Ggate(num_modes=1))
+def test_composition_GG(G1, G2):
+ r"""Test that the composition of two G gates is the same
+ as the composition of their Bargmann representations"""
+ a12, b12, c12 = (G1 >> G2).bargmann()
+ composed = Bargmann(*G2.bargmann())[1] @ Bargmann(*G1.bargmann())[0]
+ assert np.allclose(composed.A[0], a12)
+ assert np.allclose(composed.b[0], b12)
+ assert np.allclose(np.abs(composed.c[0]), np.abs(c12))
+
+
+@given(G1=single_mode_unitary_gate(), G2=single_mode_unitary_gate())
+def test_composition_all(G1, G2):
+ r"""Test that the composition of any two gates is the same
+ as the composition of their Bargmann representations"""
+ a12, b12, c12 = (G1 >> G2).bargmann()
+ composed = Bargmann(*G2.bargmann())[1] @ Bargmann(*G1.bargmann())[0]
+ assert np.allclose(composed.A[0], a12)
+ assert np.allclose(composed.b[0], b12)
+ assert np.allclose(np.abs(composed.c[0]), np.abs(c12))
+
+
+@given(rho=n_mode_mixed_state(num_modes=2))
+def test_partial_trace_2mode_state(rho):
+ r"""Test that the partial trace of a 2-mode state works"""
+ rho01 = Bargmann(*wigner_to_bargmann_rho(rho.cov, rho.means))
+ rho1 = rho01.trace([0], [2])
+ rho0 = rho01.trace([1], [3])
+ assert rho1 == Bargmann(*rho.get_modes(1).bargmann())
+ assert rho0 == Bargmann(*rho.get_modes(0).bargmann())
+
+
+@given(rho=n_mode_mixed_state(num_modes=3))
+def test_partial_trace_3mode_state(rho):
+ r"""Test that the partial trace of a 3-mode state works"""
+ rho = rho >> Attenuator([0.9, 0.9, 0.9])
+ rho012 = Bargmann(*wigner_to_bargmann_rho(rho.cov, rho.means))
+ rho12 = rho012.trace([0], [3])
+ rho2 = rho012.trace([0, 1], [3, 4])
+ assert np.allclose(rho12.b, Bargmann(*rho.get_modes([1, 2]).bargmann()).b)
+ assert rho2 == Bargmann(*rho.get_modes(2).bargmann())
diff --git a/tests/test_physics/test_bargmann.py b/tests/test_physics/test_bargmann/test_bargmann_utils.py
similarity index 73%
rename from tests/test_physics/test_bargmann.py
rename to tests/test_physics/test_bargmann/test_bargmann_utils.py
index 229536b96..b978a4ce1 100644
--- a/tests/test_physics/test_bargmann.py
+++ b/tests/test_physics/test_bargmann/test_bargmann_utils.py
@@ -6,9 +6,26 @@
wigner_to_bargmann_psi,
wigner_to_bargmann_rho,
wigner_to_bargmann_U,
+ reorder_abc,
)
+def test_reorder_abc():
+ """Test that the reorder_abc function works correctly"""
+ A = np.array([[1, 2], [2, 3]])
+ b = np.array([4, 5])
+ c = np.array(6)
+ same = reorder_abc((A, b, c), (0, 1))
+ assert all(np.allclose(x, y) for x, y in zip(same, (A, b, c)))
+ flipped = reorder_abc((A, b, c), (1, 0))
+ assert all(np.allclose(x, y) for x, y in zip(flipped, (A[[1, 0], :][:, [1, 0]], b[[1, 0]], c)))
+ c = np.array([[6, 7], [8, 9]])
+ flipped = reorder_abc((A, b, c), (1, 0)) # test transposition of c
+ assert all(
+ np.allclose(x, y) for x, y in zip(flipped, (A[[1, 0], :][:, [1, 0]], b[[1, 0]], c.T))
+ )
+
+
def test_wigner_to_bargmann_psi():
"""Test that the Bargmann representation of a ket is correct"""
G = Gaussian(2) >> Dgate(0.1, 0.2)
diff --git a/tests/test_physics/test_fidelity.py b/tests/test_physics/test_fidelity.py
index 722601f56..f209d95c6 100644
--- a/tests/test_physics/test_fidelity.py
+++ b/tests/test_physics/test_fidelity.py
@@ -5,23 +5,23 @@
from mrmustard import physics, settings
from mrmustard.lab import Coherent, Fock, State
-from mrmustard.math import Math
from mrmustard.physics import fock as fp
from mrmustard.physics import gaussian as gp
-from tests.random import force_settings
-
-math = Math()
-hbar0 = settings.HBAR
class TestGaussianStates:
+ hbar0: float = settings.HBAR
+
+ def teardown_method(self, method):
+ settings._force_hbar(self.hbar0)
+
@pytest.mark.parametrize("hbar", [1 / 2, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("num_modes", np.arange(5, 10))
@pytest.mark.parametrize("pure", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_fidelity_is_symmetric(self, num_modes, hbar, pure, block_diag):
"""Test that the fidelity is symmetric"""
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
cov1 = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
means1 = np.sqrt(2 * hbar) * np.random.rand(2 * num_modes)
cov2 = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
@@ -30,16 +30,13 @@ def test_fidelity_is_symmetric(self, num_modes, hbar, pure, block_diag):
f21 = gp.fidelity(means2, cov2, means1, cov1)
assert np.allclose(f12, f21)
- # restoring hbar to its original value
- force_settings("_hbar", hbar0)
-
@pytest.mark.parametrize("hbar", [1 / 2, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("num_modes", np.arange(5, 10))
@pytest.mark.parametrize("pure", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_fidelity_is_leq_one(self, num_modes, hbar, pure, block_diag):
"""Test that the fidelity is between 0 and 1"""
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
cov1 = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
means1 = np.sqrt(2 * hbar) * np.random.rand(2 * num_modes)
cov2 = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
@@ -47,28 +44,22 @@ def test_fidelity_is_leq_one(self, num_modes, hbar, pure, block_diag):
f12 = gp.fidelity(means1, cov1, means2, cov2)
assert 0 <= np.real_if_close(f12) < 1.0
- # restoring hbar to its original value
- force_settings("_hbar", hbar0)
-
@pytest.mark.parametrize("hbar", [1 / 2, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("num_modes", np.arange(2, 6))
@pytest.mark.parametrize("pure", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_fidelity_with_self(self, num_modes, hbar, pure, block_diag):
"""Test that the fidelity of two identical quantum states is 1"""
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
cov = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
means = np.random.rand(2 * num_modes)
assert np.allclose(gp.fidelity(means, cov, means, cov), 1, atol=1e-3)
- # restoring hbar to its original value
- force_settings("_hbar", hbar0)
-
@pytest.mark.parametrize("num_modes", np.arange(5, 10))
@pytest.mark.parametrize("hbar", [0.5, 1.0, 2.0, 1.6])
def test_fidelity_coherent_state(self, num_modes, hbar):
"""Test the fidelity of two multimode coherent states"""
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
beta1 = np.random.rand(num_modes) + 1j * np.random.rand(num_modes)
beta2 = np.random.rand(num_modes) + 1j * np.random.rand(num_modes)
means1 = real_to_complex_displacements(np.concatenate([beta1, beta1.conj()]), hbar=hbar)
@@ -79,29 +70,23 @@ def test_fidelity_coherent_state(self, num_modes, hbar):
expected = np.exp(-np.linalg.norm(beta1 - beta2) ** 2)
assert np.allclose(expected, fid)
- # restoring hbar to its original value
- force_settings("_hbar", hbar0)
-
+ @pytest.mark.parametrize("r1", [0.1, 0.2, 0.3])
+ @pytest.mark.parametrize("r2", [0.4, 0.5, 0.6])
@pytest.mark.parametrize("hbar", [0.5, 1.0, 2.0, 1.6])
- @pytest.mark.parametrize("r1", np.random.rand(3))
- @pytest.mark.parametrize("r2", np.random.rand(3))
def test_fidelity_squeezed_vacuum(self, r1, r2, hbar):
"""Tests fidelity between two squeezed states"""
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
cov1 = np.diag([np.exp(2 * r1), np.exp(-2 * r1)]) * hbar / 2
cov2 = np.diag([np.exp(2 * r2), np.exp(-2 * r2)]) * hbar / 2
mu = np.zeros([2])
assert np.allclose(1 / np.cosh(r1 - r2), gp.fidelity(mu, cov1, mu, cov2))
- # restoring hbar to its original value
- force_settings("_hbar", hbar0)
-
@pytest.mark.parametrize("n1", [0.5, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("n2", [0.5, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("hbar", [0.5, 1.0, 2.0, 1.6])
def test_fidelity_thermal(self, n1, n2, hbar):
"""Test fidelity between two thermal states"""
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
expected = 1 / (1 + n1 + n2 + 2 * n1 * n2 - 2 * np.sqrt(n1 * n2 * (n1 + 1) * (n2 + 1)))
cov1 = hbar * (n1 + 0.5) * np.identity(2)
cov2 = hbar * (n2 + 0.5) * np.identity(2)
@@ -109,15 +94,12 @@ def test_fidelity_thermal(self, n1, n2, hbar):
mu2 = np.zeros([2])
assert np.allclose(expected, gp.fidelity(mu1, cov1, mu2, cov2))
- # restoring hbar to its original value
- force_settings("_hbar", hbar0)
-
@pytest.mark.parametrize("hbar", [0.5, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("r", [-2.0, 0.0, 2.0])
@pytest.mark.parametrize("alpha", np.random.rand(10) + 1j * np.random.rand(10))
def test_fidelity_vac_to_displaced_squeezed(self, r, alpha, hbar):
"""Calculates the fidelity between a coherent squeezed state and vacuum"""
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
cov1 = np.diag([np.exp(2 * r), np.exp(-2 * r)]) * hbar / 2
means1 = real_to_complex_displacements(np.array([alpha, np.conj(alpha)]), hbar=hbar)
means2 = np.zeros([2])
@@ -129,9 +111,6 @@ def test_fidelity_vac_to_displaced_squeezed(self, r, alpha, hbar):
)
assert np.allclose(expected, gp.fidelity(means1, cov1, means2, cov2))
- # restoring hbar to its original value
- force_settings("_hbar", hbar0)
-
class TestMixedStates:
state1 = 1 / 2 * np.eye(2)
@@ -163,23 +142,31 @@ def test_fidelity_formula(self):
class TestGaussianFock:
"""Tests for the fidelity between a pair of single-mode states in Gaussian and Fock representation"""
- state1ket = Coherent(x=1.0)
- state1dm = State(dm=state1ket.dm())
- state2ket = Fock(n=1)
- state2dm = State(dm=state2ket.dm(state1dm.cutoffs))
-
def test_fidelity_across_representations_ket_ket(self):
"""Test that the fidelity of these two states is what it should be"""
- assert np.allclose(physics.fidelity(self.state1ket, self.state2ket), 0.36787944, atol=1e-4)
+ state1ket = Coherent(x=1.0)
+ state2ket = Fock(n=1)
+ assert np.allclose(physics.fidelity(state1ket, state2ket), 0.36787944, atol=1e-4)
def test_fidelity_across_representations_ket_dm(self):
"""Test that the fidelity of these two states is what it should be"""
- assert np.allclose(physics.fidelity(self.state1ket, self.state2dm), 0.36787944, atol=1e-4)
+ state1ket = Coherent(x=1.0)
+ state1dm = State(dm=state1ket.dm())
+ state2ket = Fock(n=1)
+ state2dm = State(dm=state2ket.dm(state1dm.cutoffs))
+ assert np.allclose(physics.fidelity(state1ket, state2dm), 0.36787944, atol=1e-4)
def test_fidelity_across_representations_dm_ket(self):
"""Test that the fidelity of these two states is what it should be"""
- assert np.allclose(physics.fidelity(self.state1dm, self.state2ket), 0.36787944, atol=1e-4)
+ state1ket = Coherent(x=1.0)
+ state1dm = State(dm=state1ket.dm())
+ state2ket = Fock(n=1)
+ assert np.allclose(physics.fidelity(state1dm, state2ket), 0.36787944, atol=1e-4)
def test_fidelity_across_representations_dm_dm(self):
"""Test that the fidelity of these two states is what it should be"""
- assert np.allclose(physics.fidelity(self.state1dm, self.state2dm), 0.36787944, atol=1e-4)
+ state1ket = Coherent(x=1.0)
+ state1dm = State(dm=state1ket.dm())
+ state2ket = Fock(n=1)
+ state2dm = State(dm=state2ket.dm(state1dm.cutoffs))
+ assert np.allclose(physics.fidelity(state1dm, state2dm), 0.36787944, atol=1e-4)
diff --git a/tests/test_physics/test_fock/__init__.py b/tests/test_physics/test_fock/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/test_physics/test_fock/test_fock.py b/tests/test_physics/test_fock/test_fock.py
index eeb94e568..f7276018a 100644
--- a/tests/test_physics/test_fock/test_fock.py
+++ b/tests/test_physics/test_fock/test_fock.py
@@ -19,6 +19,7 @@
from scipy.special import factorial
from thewalrus.quantum import total_photon_number_distribution
+from mrmustard import math
from mrmustard.lab import (
TMSV,
Attenuator,
@@ -160,16 +161,17 @@ def test_density_matrix(num_modes):
@pytest.mark.parametrize(
- "state",
+ "state, kwargs",
[
- Vacuum(num_modes=2),
- Fock([4, 3], modes=[0, 1]),
- Coherent(x=[0.1, 0.2], y=[-0.4, 0.4], cutoffs=[10, 10]),
- Gaussian(num_modes=2, cutoffs=[35, 35]),
+ (Vacuum, {"num_modes": 2}),
+ (Fock, {"n": [4, 3], "modes": [0, 1]}),
+ (Coherent, {"x": [0.1, 0.2], "y": [-0.4, 0.4], "cutoffs": [10, 10]}),
+ (Gaussian, {"num_modes": 2, "cutoffs": [35, 35]}),
],
)
-def test_dm_to_ket(state):
+def test_dm_to_ket(state, kwargs):
"""Tests pure state density matrix conversion to ket"""
+ state = state(**kwargs)
dm = state.dm()
ket = fock.dm_to_ket(dm)
# check if ket is normalized
@@ -186,7 +188,8 @@ def test_dm_to_ket_error():
"""Test fock.dm_to_ket raises an error when state is mixed"""
state = Coherent(x=0.1, y=-0.4, cutoffs=[15]) >> Attenuator(0.5)
- with pytest.raises(ValueError):
+ e = ValueError if math.backend_name == "tensorflow" else TypeError
+ with pytest.raises(e):
fock.dm_to_ket(state)
@@ -233,7 +236,7 @@ def test_fock_trace_function():
def test_dm_choi():
"""tests that choi op is correctly applied to a dm"""
circ = Ggate(1) >> Attenuator([0.1])
- dm_out = fock.apply_choi_to_dm(circ.choi([10, 10, 10, 10]), Vacuum(1).dm([10]), [0], [0])
+ dm_out = fock.apply_choi_to_dm(circ.choi([10]), Vacuum(1).dm([10]), [0], [0])
dm_expected = (Vacuum(1) >> circ).dm([10])
assert np.allclose(dm_out, dm_expected, atol=1e-5)
diff --git a/tests/test_physics/test_gaussian/test_symplectics.py b/tests/test_physics/test_gaussian/test_symplectics.py
index 7843eb8f6..cbcae70fa 100644
--- a/tests/test_physics/test_gaussian/test_symplectics.py
+++ b/tests/test_physics/test_gaussian/test_symplectics.py
@@ -23,6 +23,7 @@
two_mode_squeezing,
)
+from mrmustard import math
from mrmustard.lab import (
Amplifier,
Attenuator,
@@ -87,7 +88,7 @@ def test_CXgate(s):
expected = expand(two_mode_squeezing(2 * r_choi, 0.0), [0, 2], 4) @ expand(
two_mode_squeezing(2 * r_choi, 0.0), [1, 3], 4
)
- CX_expanded = expand(controlled_X(s).numpy(), [0, 1], 4)
+ CX_expanded = expand(math.asnumpy(controlled_X(s)), [0, 1], 4)
expected = CX_expanded @ expected @ CX_expanded.T
assert np.allclose(cov, expected, atol=1e-6)
@@ -104,7 +105,7 @@ def test_CZgate(s):
expected = expand(two_mode_squeezing(2 * r_choi, 0.0), [0, 2], 4) @ expand(
two_mode_squeezing(2 * r_choi, 0.0), [1, 3], 4
)
- CZ_expanded = expand(controlled_Z(s).numpy(), [0, 1], 4)
+ CZ_expanded = expand(math.asnumpy(controlled_Z(s)), [0, 1], 4)
expected = CZ_expanded @ expected @ CZ_expanded.T
assert np.allclose(cov, expected, atol=1e-6)
diff --git a/tests/test_utils/test_wigner.py b/tests/test_physics/test_wigner.py
similarity index 92%
rename from tests/test_utils/test_wigner.py
rename to tests/test_physics/test_wigner.py
index 912e6729a..13cd63f5e 100644
--- a/tests/test_utils/test_wigner.py
+++ b/tests/test_physics/test_wigner.py
@@ -25,8 +25,7 @@
SqueezedVacuum,
State,
)
-from mrmustard.utils.wigner import wigner_discretized
-from tests.random import force_settings
+from mrmustard.physics.wigner import wigner_discretized
# original settings
autocutoff_max0 = settings.AUTOCUTOFF_MAX_CUTOFF
@@ -39,14 +38,6 @@
# ~~~~~~~
-def reset_settings():
- r"""Resets `Settings`"""
- settings.AUTOCUTOFF_MAX_CUTOFF = autocutoff_max0
- settings.AUTOCUTOFF_MIN_CUTOFF = autocutoff_min0
- settings.DISCRETIZATION_METHOD = method0
- force_settings("_hbar", hbar0)
-
-
def distance(W_mm, W_th):
r"""Calculates the distance between the discretized Wigner functions W_mm (generated
by `mrmustard`) and W_th (computed analytically) as the maximum of `|W_mm-W_th|/|W_th|`,
@@ -116,12 +107,21 @@ def generator(q, p, n):
class TestWignerDiscretized:
r"""Tests discretized Wigner functions (DWF) for various states"""
+ hbar0: float = settings.HBAR
+
+ def teardown_method(self, method):
+ r"""Resets `Settings`"""
+ settings.AUTOCUTOFF_MAX_CUTOFF = autocutoff_max0
+ settings.AUTOCUTOFF_MIN_CUTOFF = autocutoff_min0
+ settings.DISCRETIZATION_METHOD = method0
+ settings._force_hbar(self.hbar0)
+
@pytest.mark.parametrize("method", ["iterative", "clenshaw"])
@pytest.mark.parametrize("hbar", [1, 2])
def test_cat_state(self, method, hbar):
r"""Tests DWF for cat states"""
settings.DISCRETIZATION_METHOD = method
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
q_vec = np.linspace(-4, 4, 100)
p_vec = np.linspace(-1.5, 1.5, 100)
@@ -137,8 +137,6 @@ def test_cat_state(self, method, hbar):
assert np.allclose(q_mat.T, q_vec)
assert np.allclose(p_mat, p_vec)
- reset_settings()
-
@pytest.mark.parametrize("alpha", [0 + 0j, 3 + 3j])
@pytest.mark.parametrize("hbar", [2, 3])
@pytest.mark.parametrize("method", ["iterative", "clenshaw"])
@@ -147,7 +145,7 @@ def test_coherent_state(self, alpha, hbar, method):
settings.AUTOCUTOFF_MIN_CUTOFF = 100
settings.AUTOCUTOFF_MAX_CUTOFF = 150
settings.DISCRETIZATION_METHOD = method
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
# centering the intervals around alpha--away from the center,
# the values are small and unstable.
@@ -164,15 +162,13 @@ def test_coherent_state(self, alpha, hbar, method):
assert np.allclose(q_mat.T, q_vec)
assert np.allclose(p_mat, p_vec)
- reset_settings()
-
@pytest.mark.parametrize("n", [2, 6])
@pytest.mark.parametrize("hbar", [2, 3])
@pytest.mark.parametrize("method", ["iterative", "clenshaw"])
def test_fock_state(self, n, hbar, method):
r"""Tests DWF for fock states"""
settings.DISCRETIZATION_METHOD = method
- force_settings("_hbar", hbar)
+ settings._force_hbar(hbar)
q_vec = np.linspace(-1, 1, 20)
p_vec = np.linspace(-1, 1, 20)
@@ -185,8 +181,6 @@ def test_fock_state(self, n, hbar, method):
assert np.allclose(q_mat.T, q_vec)
assert np.allclose(p_mat, p_vec)
- reset_settings()
-
@pytest.mark.parametrize("method", ["iterative", "clenshaw"])
def test_squeezed_vacuum_both_method_succeed(self, method):
r"""Tests DWF for a squeezed vacuum state with squeezing s=1.
@@ -208,8 +202,6 @@ def test_squeezed_vacuum_both_method_succeed(self, method):
assert np.allclose(q_mat.T, q_vec)
assert np.allclose(p_mat, p_vec)
- reset_settings()
-
@pytest.mark.parametrize("method", ["iterative", "clenshaw"])
def test_squeezed_vacuum_iterative_fails(self, method):
r"""Tests DWF for a squeezed vacuum state with squeezing s=2.
@@ -229,5 +221,3 @@ def test_squeezed_vacuum_iterative_fails(self, method):
success = np.allclose(distance(W_mm, W_th), 0, atol=10**-1)
assert success is False if method == "iterative" else True
-
- reset_settings()
diff --git a/tests/test_training/__init__.py b/tests/test_training/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/test_training/test_callbacks.py b/tests/test_training/test_callbacks.py
index 0091b9ff5..56451b75c 100644
--- a/tests/test_training/test_callbacks.py
+++ b/tests/test_training/test_callbacks.py
@@ -17,21 +17,22 @@
import numpy as np
import tensorflow as tf
-from mrmustard import settings
+from mrmustard import math, settings
from mrmustard.lab.circuit import Circuit
from mrmustard.lab.gates import (
BSgate,
S2gate,
)
from mrmustard.lab.states import Vacuum
-from mrmustard.math import Math
from mrmustard.training import Optimizer, TensorboardCallback
-math = Math()
+from ..conftest import skip_np
def test_tensorboard_callback(tmp_path):
"""Tests tensorboard callbacks on hong-ou-mandel optimization."""
+ skip_np()
+
settings.SEED = 42
i, k = 2, 3
r = np.arcsinh(1.0)
diff --git a/tests/test_training/test_opt.py b/tests/test_training/test_opt.py
index 40adfce16..cca94a28e 100644
--- a/tests/test_training/test_opt.py
+++ b/tests/test_training/test_opt.py
@@ -19,7 +19,7 @@
from hypothesis import strategies as st
from thewalrus.symplectic import two_mode_squeezing
-from mrmustard import settings
+from mrmustard import math, settings
from mrmustard.lab.circuit import Circuit
from mrmustard.lab.gates import (
BSgate,
@@ -38,18 +38,20 @@
SqueezedVacuum,
Vacuum,
)
-from mrmustard.math import Math
+from mrmustard.math.parameters import Variable, update_euclidean
from mrmustard.physics import fidelity
from mrmustard.physics.gaussian import trace, von_neumann_entropy
-from mrmustard.training import Optimizer, Parametrized
+from mrmustard.training import Optimizer
from mrmustard.training.callbacks import Callback
-math = Math()
+from ..conftest import skip_np
@given(n=st.integers(0, 3))
def test_S2gate_coincidence_prob(n):
"""Testing the optimal probability of obtaining |n,n> from a two mode squeezed vacuum"""
+ skip_np()
+
settings.SEED = 40
S = S2gate(
r=abs(settings.rng.normal(loc=1.0, scale=0.1)),
@@ -62,7 +64,7 @@ def cost_fn():
def cb(optimizer, cost, trainables, **kwargs): # pylint: disable=unused-argument
return {
"cost": cost,
- "lr": optimizer.learning_rate["euclidean"],
+ "lr": optimizer.learning_rate[update_euclidean],
"num_trainables": len(trainables),
}
@@ -85,6 +87,8 @@ def test_hong_ou_mandel_optimizer(i, k):
see Eq. 20 of https://journals.aps.org/prresearch/pdf/10.1103/PhysRevResearch.3.043065
which lacks a square root in the right hand side.
"""
+ skip_np()
+
settings.SEED = 42
r = np.arcsinh(1.0)
s2_0, s2_1, bs = (
@@ -118,6 +122,8 @@ def cost_fn():
def test_learning_two_mode_squeezing():
"""Finding the optimal beamsplitter transmission to make a pair of single photons"""
+ skip_np()
+
settings.SEED = 42
ops = [
Sgate(
@@ -148,6 +154,8 @@ def cost_fn():
def test_learning_two_mode_Ggate():
"""Finding the optimal Ggate to make a pair of single photons"""
+ skip_np()
+
settings.SEED = 42
G = Ggate(num_modes=2, symplectic_trainable=True)
@@ -163,6 +171,8 @@ def cost_fn():
def test_learning_two_mode_Interferometer():
"""Finding the optimal Interferometer to make a pair of single photons"""
+ skip_np()
+
settings.SEED = 42
ops = [
Sgate(
@@ -188,6 +198,8 @@ def cost_fn():
def test_learning_two_mode_RealInterferometer():
"""Finding the optimal Interferometer to make a pair of single photons"""
+ skip_np()
+
settings.SEED = 2
ops = [
Sgate(
@@ -213,6 +225,8 @@ def cost_fn():
def test_learning_four_mode_Interferometer():
"""Finding the optimal Interferometer to make a NOON state with N=2"""
+ skip_np()
+
settings.SEED = 4
solution_U = np.array(
[
@@ -272,6 +286,8 @@ def cost_fn():
def test_learning_four_mode_RealInterferometer():
"""Finding the optimal Interferometer to make a NOON state with N=2"""
+ skip_np()
+
settings.SEED = 6
solution_O = np.array(
[
@@ -316,6 +332,8 @@ def test_squeezing_hong_ou_mandel_optimizer():
"""Finding the optimal squeezing parameter to get Hong-Ou-Mandel dip in time
see https://www.pnas.org/content/117/52/33107/tab-article-info
"""
+ skip_np()
+
settings.SEED = 42
r = np.arcsinh(1.0)
@@ -335,16 +353,17 @@ def cost_fn():
def test_parameter_passthrough():
"""Same as the test above, but with param passthrough"""
+ skip_np()
+
settings.SEED = 42
r = np.arcsinh(1.0)
- par = Parametrized(
- r=math.new_variable(r, (0.0, None), "r"),
- phi=math.new_variable(settings.rng.normal(), (None, None), "phi"),
- )
+ r_var = Variable(r, "r", (0.0, None))
+ phi_var = Variable(settings.rng.normal(), "phi", (None, None))
+
ops = [
S2gate(r=r, phi=0.0, phi_trainable=True)[0, 1],
S2gate(r=r, phi=0.0, phi_trainable=True)[2, 3],
- S2gate(r=par.r.value, phi=par.phi.value)[1, 2],
+ S2gate(r=r_var, phi=phi_var)[1, 2],
]
circ = Circuit(ops)
@@ -352,13 +371,15 @@ def cost_fn():
return math.abs((Vacuum(4) >> circ).ket(cutoffs=[2, 2, 2, 2])[1, 1, 1, 1]) ** 2
opt = Optimizer(euclidean_lr=0.001)
- opt.minimize(cost_fn, by_optimizing=[par], max_steps=300)
- assert np.allclose(np.sinh(par.r.value) ** 2, 1, atol=1e-2)
+ opt.minimize(cost_fn, by_optimizing=[r_var, phi_var], max_steps=300)
+ assert np.allclose(np.sinh(r_var.value) ** 2, 1, atol=1e-2)
def test_making_thermal_state_as_one_half_two_mode_squeezed_vacuum():
"""Optimizes a Ggate on two modes so as to prepare a state with the same entropy
and mean photon number as a thermal state"""
+ skip_np()
+
settings.SEED = 42
S_init = two_mode_squeezing(np.arcsinh(1.0), 0.0)
@@ -380,13 +401,15 @@ def cost_fn():
opt = Optimizer(symplectic_lr=0.1)
opt.minimize(cost_fn, by_optimizing=[G], max_steps=50)
- S = G.symplectic.value.numpy()
+ S = math.asnumpy(G.symplectic.value)
cov = S @ S.T
assert np.allclose(cov, two_mode_squeezing(2 * np.arcsinh(np.sqrt(nbar)), 0.0))
def test_opt_backend_param():
"""Test the optimization of a backend parameter defined outside a gate."""
+ skip_np()
+
# rotated displaced squeezed state
settings.SEED = 42
rotation_angle = np.pi / 2
@@ -404,11 +427,13 @@ def cost_fn_sympl():
opt = Optimizer(symplectic_lr=0.1, euclidean_lr=0.05)
opt.minimize(cost_fn_sympl, by_optimizing=[S, r_angle])
- assert np.allclose(r_angle.numpy(), rotation_angle / 2, atol=1e-4)
+ assert np.allclose(math.asnumpy(r_angle), rotation_angle / 2, atol=1e-4)
def test_dgate_optimization():
"""Test that Dgate is optimized correctly."""
+ skip_np()
+
settings.SEED = 24
dgate = Dgate(x_trainable=True, y_trainable=True)
@@ -427,6 +452,8 @@ def cost_fn():
def test_sgate_optimization():
"""Test that Sgate is optimized correctly."""
+ skip_np()
+
settings.SEED = 25
sgate = Sgate(r=0.2, phi=0.1, r_trainable=True, phi_trainable=True)
@@ -446,6 +473,8 @@ def cost_fn():
def test_bsgate_optimization():
"""Test that Sgate is optimized correctly."""
+ skip_np()
+
settings.SEED = 25
G = Gaussian(2)
@@ -467,6 +496,8 @@ def cost_fn():
def test_squeezing_grad_from_fock():
"""Test that the gradient of a squeezing gate is computed from the fock representation."""
+ skip_np()
+
squeezing = Sgate(r=1, r_trainable=True)
def cost_fn():
@@ -478,6 +509,8 @@ def cost_fn():
def test_displacement_grad_from_fock():
"""Test that the gradient of a displacement gate is computed from the fock representation."""
+ skip_np()
+
disp = Dgate(x=1.0, y=1.0, x_trainable=True, y_trainable=True)
def cost_fn():
@@ -489,6 +522,8 @@ def cost_fn():
def test_bsgate_grad_from_fock():
"""Test that the gradient of a beamsplitter gate is computed from the fock representation."""
+ skip_np()
+
sq = SqueezedVacuum(r=1.0, r_trainable=True)
def cost_fn():
diff --git a/tests/test_training/test_parameter.py b/tests/test_training/test_parameter.py
deleted file mode 100644
index 9734bcf9d..000000000
--- a/tests/test_training/test_parameter.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2022 Xanadu Quantum Technologies Inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tests for the parameter class."""
-
-import numpy as np
-import pytest
-
-from mrmustard.training.parameter import (
- create_parameter,
- Constant,
- Orthogonal,
- Unitary,
- Euclidean,
- Symplectic,
- Trainable,
-)
-from mrmustard.math import Math
-
-math = Math()
-
-
-@pytest.mark.parametrize("from_backend", [True, False])
-def test_create_constant(from_backend):
- """Checks if the factory function `create_parameter`
- returns an instance of the Constant class when args
- are not trainable."""
-
- value = np.random.rand(*np.random.randint(5, size=5))
- name = "constant_tensor"
- if from_backend:
- value = math.new_constant(value, name)
-
- param = create_parameter(value, name, is_trainable=False)
-
- assert isinstance(param, Constant)
- assert math.from_backend(param.value)
- assert param.name == name
-
-
-@pytest.mark.parametrize("trainable_class", (Euclidean, Orthogonal, Symplectic, Unitary))
-@pytest.mark.parametrize("from_backend", [True, False])
-@pytest.mark.parametrize("bounds", [None, (0, 10)])
-def test_create_trainable(trainable_class, from_backend, bounds):
- """Checks if the factory function `create_parameter`
- returns an instance of the Euclidean/Orthogonal/Symplectic/Unitary class when args
- are trainable."""
-
- value = 5
- name = f"{trainable_class.__name__}_tensor".lower()
- if from_backend:
- value = math.new_variable(value, bounds, name)
-
- param = create_parameter(value, name, is_trainable=True, bounds=bounds)
-
- assert isinstance(param, trainable_class)
- assert isinstance(param, Trainable)
- assert math.from_backend(param.value)
- assert param.name == name
diff --git a/tests/test_training/test_parametrized.py b/tests/test_training/test_parametrized.py
deleted file mode 100644
index 55ca3f7b4..000000000
--- a/tests/test_training/test_parametrized.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# Copyright 2022 Xanadu Quantum Technologies Inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tests for the parametrized class."""
-
-import pytest
-
-from mrmustard.training import Parametrized
-from mrmustard.math import Math
-from mrmustard.lab.circuit import Circuit
-from mrmustard.lab.gates import BSgate, S2gate
-from mrmustard.training.parameter import (
- Constant,
- Unitary,
- Orthogonal,
- Euclidean,
- Symplectic,
- Trainable,
-)
-
-math = Math()
-
-
-@pytest.mark.parametrize("kwargs", [{"a": 5}, {"b": 4.5}])
-def test_attribute_assignment(kwargs):
- """Test that arguments are converted into Trainable or Constant and
- assigned as attributes of the class."""
- parametrized = Parametrized(**kwargs)
-
- instance_attributes = parametrized.__dict__
-
- for name in kwargs.keys():
- attrib = instance_attributes[f"{name}"]
- assert isinstance(attrib, (Trainable, Constant))
- assert instance_attributes[f"{name}"].name == name
-
-
-@pytest.mark.parametrize("trainable_class", (Euclidean, Orthogonal, Symplectic, Unitary))
-@pytest.mark.parametrize("bounds", [None, (0, 10)])
-def test_attribute_from_backend_type_assignment(trainable_class, bounds):
- """Test that arguments that are trainable get defined on the backend,
- are assigned correctly as attributes of the Parametrized instance
- and are the correct type of trainable instance.
- """
-
- name = f"{trainable_class.__name__}_tensor".lower()
- value = 5
- kwargs = {
- name: value,
- f"{name}_trainable": True,
- f"{name}_bounds": bounds,
- }
-
- parametrized = Parametrized(**kwargs)
- attrib = getattr(parametrized, f"{name}")
-
- assert isinstance(attrib, trainable_class)
- assert isinstance(attrib, Trainable)
- assert math.from_backend(attrib.value)
- assert attrib.name == name
-
-
-def test_attribute_from_backend_constant_assignment():
- """Test that arguments that are NOT trainable get defined on the backend,
- are assigned correctly as attributes of the Parametrized instance
- and are instances of :class:`Constant`.
- """
-
- name = "constant_tensor"
- value = math.new_constant(5, name)
- kwargs = {name: value, f"{name}_trainable": False}
-
- parametrized = Parametrized(**kwargs)
- attrib = getattr(parametrized, f"{name}")
-
- assert isinstance(attrib, Constant)
- assert math.from_backend(attrib.value)
- assert attrib.name == name
-
-
-def test_get_parameters():
- """Test that the `get_trainable_parameters` and `get_constant_parameters` properties
- return the correct set of parameters"""
-
- kwargs = {
- "numeric_attribute": 2,
- "constant_attribute": math.new_constant(1, "constant_attribute"),
- "symplectic_attribute": math.new_variable(2, None, "symplectic_attribute"),
- "symplectic_attribute_trainable": True,
- "euclidian_attribute": math.new_variable(3, None, "euclidian_attribute"),
- "euclidian_attribute_trainable": True,
- "orthogonal_attribute": math.new_variable(4, None, "orthogonal_attribute"),
- "orthogonal_attribute_trainable": True,
- "unitary_attribute": math.new_variable(4, None, "unitary_attribute"),
- "unitary_attribute_trainable": True,
- }
- parametrized = Parametrized(**kwargs)
-
- trainable_params = parametrized.trainable_parameters
- assert len(trainable_params) == 4
- assert all(isinstance(param, Trainable) for param in trainable_params)
-
- constant_params = parametrized.constant_parameters
- assert len(constant_params) == 2
- assert all(isinstance(param, Constant) for param in constant_params)
-
- trainable_params = parametrized.traverse_trainables(owner_tag="foo")
- assert len(trainable_params) == 4
- assert all(isinstance(param, Trainable) for param in trainable_params.values())
- assert all(tag.startswith("foo") for tag in trainable_params)
- assert all(tag.split("/")[1] in kwargs for tag in trainable_params)
-
- constant_params = parametrized.traverse_constants()
- assert len(constant_params) == 2
- assert all(isinstance(param, Constant) for param in constant_params.values())
- assert all(tag.startswith("Parametrized") for tag in constant_params)
- assert all(tag.split("/")[1] in kwargs for tag in constant_params)
-
-
-def test_get_nested_parameters():
- """Test that nested Parametrized objects (e.g. a circuit) return all the trainable
- and constant parameters via `get_trainable_parameters` and `get_constant_parameters`
- properties"""
-
- s2 = S2gate(r=0.0, phi=0.0, r_trainable=False, phi_trainable=True)
- bs = BSgate(
- theta=0.0,
- phi=0.0,
- theta_trainable=True,
- phi_trainable=False,
- )
- circ = Circuit([s2, bs])
-
- trainables = circ.trainable_parameters
- constants = circ.constant_parameters
- assert len(trainables) == 2
- assert len(constants) == 2
-
- assert (s2.phi in trainables) and (bs.theta in trainables)
- assert (s2.r in constants) and (bs.phi in constants)
-
- trainables = circ.traverse_trainables()
- constants = circ.traverse_constants("Device")
- assert len(trainables) == 2
- assert len(constants) == 2
- assert all(tag.startswith("Circuit/_ops[") for tag in trainables)
- assert all(tag.startswith("Device/_ops[") for tag in constants)
-
- assert (s2.phi in trainables.values()) and (bs.theta in trainables.values())
- assert (s2.r in constants.values()) and (bs.phi in constants.values())
diff --git a/tests/test_training/test_riemannian_opt.py b/tests/test_training/test_riemannian_opt.py
index 82481843b..c1ae01b0c 100644
--- a/tests/test_training/test_riemannian_opt.py
+++ b/tests/test_training/test_riemannian_opt.py
@@ -21,20 +21,24 @@
from thewalrus.random import random_symplectic
from thewalrus.symplectic import is_symplectic
-from mrmustard.math import Math
-from mrmustard.training.parameter_update import update_orthogonal, update_symplectic, update_unitary
+from mrmustard import math
+from mrmustard.math.parameters import update_orthogonal, update_symplectic, update_unitary
-math = Math()
+from ..conftest import skip_np
def is_unitary(M, rtol=1e-05, atol=1e-08):
"""Testing if the matrix M is unitary"""
+ skip_np()
+
M_dagger = np.transpose(M.conj())
return np.allclose(M @ M_dagger, np.identity(M.shape[-1]), rtol=rtol, atol=atol)
def is_orthogonal(M, rtol=1e-05, atol=1e-08):
"""Testing if the matrix M is orthogonal"""
+ skip_np()
+
M_T = np.transpose(M)
return np.allclose(M @ M_T, np.identity(M.shape[-1]), rtol=rtol, atol=atol)
@@ -42,6 +46,8 @@ def is_orthogonal(M, rtol=1e-05, atol=1e-08):
@given(n=st.integers(2, 4))
def test_update_symplectic(n):
"""Testing the update of symplectic matrix remains to be symplectic"""
+ skip_np()
+
S = math.new_variable(random_symplectic(n), name=None, dtype="complex128", bounds=None)
for _ in range(20):
dS_euclidean = math.new_variable(
@@ -51,19 +57,26 @@ def test_update_symplectic(n):
bounds=None,
)
update_symplectic([[dS_euclidean, S]], 0.01)
- assert is_symplectic(S.numpy()), "training step does not result in a symplectic matrix"
+ assert is_symplectic(
+ math.asnumpy(S)
+ ), "training step does not result in a symplectic matrix"
@given(n=st.integers(2, 4))
def test_update_unitary(n):
"""Testing the update of unitary matrix remains to be unitary"""
+ skip_np()
+
U = math.new_variable(unitary_group.rvs(dim=n), name=None, dtype="complex128", bounds=None)
for _ in range(20):
dU_euclidean = np.random.random((n, n)) + 1j * np.random.random((n, n))
update_unitary([[dU_euclidean, U]], 0.01)
- assert is_unitary(U.numpy()), "training step does not result in a unitary matrix"
+ assert is_unitary(math.asnumpy(U)), "training step does not result in a unitary matrix"
sym = np.block(
- [[np.real(U.numpy()), -np.imag(U.numpy())], [np.imag(U.numpy()), np.real(U.numpy())]]
+ [
+ [np.real(math.asnumpy(U)), -np.imag(math.asnumpy(U))],
+ [np.imag(math.asnumpy(U)), np.real(math.asnumpy(U))],
+ ]
)
assert is_symplectic(sym), "training step does not result in a symplectic matrix"
assert is_orthogonal(sym), "training step does not result in an orthogonal matrix"
@@ -72,15 +85,17 @@ def test_update_unitary(n):
@given(n=st.integers(2, 4))
def test_update_orthogonal(n):
"""Testing the update of orthogonal matrix remains to be orthogonal"""
+ skip_np()
+
O = math.new_variable(math.random_orthogonal(n), name=None, dtype="complex128", bounds=None)
for _ in range(20):
dO_euclidean = np.random.random((n, n)) + 1j * np.random.random((n, n))
update_orthogonal([[dO_euclidean, O]], 0.01)
- assert is_unitary(O.numpy()), "training step does not result in a unitary matrix"
+ assert is_unitary(math.asnumpy(O)), "training step does not result in a unitary matrix"
ortho = np.block(
[
- [np.real(O.numpy()), -math.zeros_like(O.numpy())],
- [math.zeros_like(O.numpy()), np.real(O.numpy())],
+ [np.real(math.asnumpy(O)), -math.zeros_like(math.asnumpy(O))],
+ [math.zeros_like(math.asnumpy(O)), np.real(math.asnumpy(O))],
]
)
assert is_symplectic(ortho), "training step does not result in a symplectic matrix"
diff --git a/tests/test_training/test_trainer.py b/tests/test_training/test_trainer.py
index 5c033dd82..d80259c70 100644
--- a/tests/test_training/test_trainer.py
+++ b/tests/test_training/test_trainer.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# pylint: disable=import-outside-toplevel
+
"""Tests for the ray-based trainer."""
import sys
@@ -35,12 +37,17 @@
from mrmustard.training import Optimizer
from mrmustard.training.trainer import map_trainer, train_device, update_pop
+from ..conftest import skip_np
+
-@pytest.fixture(scope="function")
def wrappers():
"""Dummy wrappers tested."""
def make_circ(x=0.0, return_type=None):
+ from mrmustard import math
+
+ math.change_backend("tensorflow")
+
circ = Ggate(num_modes=1, symplectic_trainable=True) >> Dgate(
x=x, x_trainable=True, y_trainable=True
)
@@ -49,6 +56,10 @@ def make_circ(x=0.0, return_type=None):
)
def cost_fn(circ=make_circ(0.1), y_targ=0.0):
+ from mrmustard import math
+
+ math.change_backend("tensorflow")
+
target = Gaussian(1) >> Dgate(-0.1, y_targ)
s = Vacuum(1) >> circ
return -fidelity(s, target)
@@ -64,10 +75,12 @@ class TestTrainer:
"tasks", [5, [{"y_targ": 0.1}, {"y_targ": -0.2}], {"c0": {}, "c1": {"y_targ": 0.07}}]
)
@pytest.mark.parametrize("seed", [None, 42])
- def test_circ_cost(self, wrappers, tasks, seed): # pylint: disable=redefined-outer-name
+ def test_circ_cost(self, tasks, seed): # pylint: disable=redefined-outer-name
"""Test distributed cost calculations."""
+ skip_np()
+
has_seed = isinstance(seed, int)
- _, cost_fn = wrappers
+ _, cost_fn = wrappers()
results = map_trainer(
cost_fn=cost_fn,
tasks=tasks,
@@ -96,12 +109,12 @@ def test_circ_cost(self, wrappers, tasks, seed): # pylint: disable=redefined-ou
"return_type",
[None, "dict"],
)
- def test_circ_optimize(
- self, wrappers, tasks, return_type
- ): # pylint: disable=redefined-outer-name
+ def test_circ_optimize(self, tasks, return_type): # pylint: disable=redefined-outer-name
"""Test distributed optimizations."""
+ skip_np()
+
max_steps = 15
- make_circ, cost_fn = wrappers
+ make_circ, cost_fn = wrappers()
results = map_trainer(
cost_fn=cost_fn,
device_factory=make_circ,
@@ -139,11 +152,11 @@ def test_circ_optimize(
lambda c: (Vacuum(1) >> c >> c >> c).fock_probabilities([5]),
],
)
- def test_circ_optimize_metrics(
- self, wrappers, metric_fns
- ): # pylint: disable=redefined-outer-name
+ def test_circ_optimize_metrics(self, metric_fns): # pylint: disable=redefined-outer-name
"""Tests custom metric functions on final circuits."""
- make_circ, cost_fn = wrappers
+ skip_np()
+
+ make_circ, cost_fn = wrappers()
tasks = {
"my-job": {"x": 0.1, "euclidean_lr": 0.01, "max_steps": 100},
@@ -179,6 +192,8 @@ def test_circ_optimize_metrics(
def test_update_pop(self):
"""Test for coverage."""
+ skip_np()
+
d = {"a": 3, "b": "foo"}
kwargs = {"b": "bar", "c": 22}
d1, kwargs = update_pop(d, **kwargs)
@@ -187,6 +202,8 @@ def test_update_pop(self):
def test_no_ray(self, monkeypatch):
"""Tests ray import error"""
+ skip_np()
+
monkeypatch.setitem(sys.modules, "ray", None)
with pytest.raises(ImportError, match="Failed to import `ray`"):
_ = map_trainer(
@@ -196,6 +213,8 @@ def test_no_ray(self, monkeypatch):
def test_invalid_tasks(self):
"""Tests unexpected tasks arg"""
+ skip_np()
+
with pytest.raises(
ValueError, match="`tasks` is expected to be of type int, list, or dict."
):
@@ -204,9 +223,11 @@ def test_invalid_tasks(self):
num_cpus=NUM_CPUS,
)
- def test_warn_unused_kwargs(self, wrappers): # pylint: disable=redefined-outer-name
+ def test_warn_unused_kwargs(self): # pylint: disable=redefined-outer-name
"""Test warning of unused kwargs"""
- _, cost_fn = wrappers
+ skip_np()
+
+ _, cost_fn = wrappers()
with pytest.warns(UserWarning, match="Unused kwargs:"):
results = train_device(
cost_fn=cost_fn,
@@ -215,9 +236,11 @@ def test_warn_unused_kwargs(self, wrappers): # pylint: disable=redefined-outer-
assert len(results) >= 4
assert isinstance(results["cost"], float)
- def test_no_pbar(self, wrappers): # pylint: disable=redefined-outer-name
+ def test_no_pbar(self): # pylint: disable=redefined-outer-name
"""Test turning off pregress bar"""
- _, cost_fn = wrappers
+ skip_np()
+
+ _, cost_fn = wrappers()
results = map_trainer(
cost_fn=cost_fn,
tasks=2,
@@ -227,9 +250,11 @@ def test_no_pbar(self, wrappers): # pylint: disable=redefined-outer-name
assert len(results) == 2
@pytest.mark.parametrize("tasks", [2, {"c0": {}, "c1": {"y_targ": -0.7}}])
- def test_unblock(self, wrappers, tasks): # pylint: disable=redefined-outer-name
+ def test_unblock(self, tasks): # pylint: disable=redefined-outer-name
"""Test unblock async mode"""
- _, cost_fn = wrappers
+ skip_np()
+
+ _, cost_fn = wrappers()
result_getter = map_trainer(
cost_fn=cost_fn,
tasks=tasks,
diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/test_utils/test_argsort.py b/tests/test_utils/test_argsort.py
new file mode 100644
index 000000000..265298e84
--- /dev/null
+++ b/tests/test_utils/test_argsort.py
@@ -0,0 +1,42 @@
+# Copyright 2023 Xanadu Quantum Technologies Inc.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from mrmustard.utils.argsort import argsort_gen
+
+
+def test_argsort_gen():
+ # Test with generators yielding ascending values
+ gen_list = [iter(range(i, i + 3)) for i in range(5)]
+ assert argsort_gen(gen_list) == [0, 1, 2, 3, 4]
+
+ # Test with generators yielding descending values
+ gen_list = [iter(range(i, i - 3, -1)) for i in range(5, 0, -1)]
+ assert argsort_gen(gen_list) == [4, 3, 2, 1, 0]
+
+ # Test with empty list
+ gen_list = []
+ assert argsort_gen(gen_list) == []
+
+ # Test with single generator
+ gen_list = [iter(range(0, 3))]
+ assert argsort_gen(gen_list) == [0]
+
+ # Test with generators yielding the same first value
+ gen_list = [iter(range(i, i + 3)) for i in range(5)]
+ gen_list.append(iter(range(0, 3))) # Add another generator with the same first value
+ assert argsort_gen(gen_list) == [0, 5, 1, 2, 3, 4]
+
+ # Test with generators yielding the same values
+ gen_list = [iter(range(0, 3)) for _ in range(5)]
+ assert argsort_gen(gen_list) == [0, 1, 2, 3, 4]
diff --git a/tests/test_utils/test_gaussian_utils.py b/tests/test_utils/test_gaussian_utils.py
deleted file mode 100644
index 1698f48a9..000000000
--- a/tests/test_utils/test_gaussian_utils.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2021 Xanadu Quantum Technologies Inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import numpy as np
-from hypothesis import given, strategies as st
-from mrmustard import *
-from mrmustard.physics import gaussian as gp
-
-
-def test_partition_means():
- A, B = gp.partition_means(gp.math.astensor(np.array([1, 2, 3, 4, 5, 6])), Amodes=[0, 2])
- assert np.allclose(A, [1, 3, 4, 6])
- assert np.allclose(B, [2, 5])
-
- A, B = gp.partition_means(gp.math.astensor(np.array([1, 2, 3, 4, 5, 6])), Amodes=[0])
- assert np.allclose(A, [1, 4])
- assert np.allclose(B, [2, 3, 5, 6])
-
- A, B = gp.partition_means(gp.math.astensor(np.array([1, 2, 3, 4, 5, 6])), Amodes=[1])
- assert np.allclose(A, [2, 5])
- assert np.allclose(B, [1, 3, 4, 6])
-
-
-def test_partition_cov_2modes():
- arr = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
- A, B, AB = gp.partition_cov(gp.math.astensor(arr), Amodes=[0, 1])
- assert np.allclose(A, arr)
- assert np.allclose(B, [])
- assert np.allclose(AB, [])
-
- A, B, AB = gp.partition_cov(gp.math.astensor(arr), Amodes=[0])
- assert np.allclose(A, [[1, 3], [9, 11]])
- assert np.allclose(B, [[6, 8], [14, 16]])
- assert np.allclose(AB, [[2, 4], [10, 12]])
-
- A, B, AB = gp.partition_cov(gp.math.astensor(arr), Amodes=[1])
- assert np.allclose(A, [[6, 8], [14, 16]])
- assert np.allclose(B, [[1, 3], [9, 11]])
- assert np.allclose(AB, [[5, 7], [13, 15]]) # effectively BA because A is mode 1
-
-
-def test_partition_cov_3modes():
- pass # TODO
-
- # arr = np.array([[1,2,3,4,5,6],
- # [7,8,9,10,11,12],
- # [13,14,15,16,17,18],
- # [19,20,21,22,23,24],
- # [25,26,27,28,29,30],
- # [31,32,33,34,35,36]])
- # A,B,AB = gp.partition_cov(gp.math.astensor(arr), Amodes=[0,2])
diff --git a/tests/test_logger.py b/tests/test_utils/test_logger.py
similarity index 98%
rename from tests/test_logger.py
rename to tests/test_utils/test_logger.py
index 361bd72dd..1b28cfb99 100644
--- a/tests/test_logger.py
+++ b/tests/test_utils/test_logger.py
@@ -50,7 +50,7 @@
import logging
import pytest
from mrmustard.training import optimizer
-from mrmustard.logger import logging_handler_defined, default_handler, create_logger
+from mrmustard.utils.logger import logging_handler_defined, default_handler, create_logger
modules_contain_logging = [optimizer]
diff --git a/tests/test_settings.py b/tests/test_utils/test_settings.py
similarity index 56%
rename from tests/test_settings.py
rename to tests/test_utils/test_settings.py
index bae36f9d0..6e36ea261 100644
--- a/tests/test_settings.py
+++ b/tests/test_utils/test_settings.py
@@ -16,9 +16,12 @@
Tests for the Settings class.
"""
-from mrmustard.settings import Settings, ImmutableSetting
+from mrmustard import math
+from mrmustard.utils.settings import Settings, ImmutableSetting
import pytest
+from ..conftest import skip_np
+
class TestImmutableSettings:
"""Tests the ImmutableSettings class"""
@@ -47,7 +50,6 @@ def test_init(self):
"""Test the default values of the settings"""
settings = Settings()
- assert settings.BACKEND == "tensorflow"
assert settings.HBAR == 2.0
assert settings.DEBUG is False
assert settings.AUTOCUTOFF_PROBABILITY == 0.999 # capture at least 99.9% of the probability
@@ -60,12 +62,55 @@ def test_init(self):
assert settings.EQ_TRANSFORMATION_RTOL_GAUSS == 1e-6
assert settings.PNR_INTERNAL_CUTOFF == 50
assert settings.HOMODYNE_SQUEEZING == 10.0
+ assert settings.PRECISION_BITS_HERMITE_POLY == 128
assert settings.PROGRESSBAR is True
assert settings.DEFAULT_BS_METHOD == "vanilla" # can be 'vanilla' or 'schwinger'
+ def test_setters(self):
+ settings = Settings()
+
+ ap0 = settings.AUTOCUTOFF_PROBABILITY
+ settings.AUTOCUTOFF_PROBABILITY = 0.1
+ assert settings.AUTOCUTOFF_PROBABILITY == 0.1
+ settings.AUTOCUTOFF_PROBABILITY = ap0
+
+ db0 = settings.DEBUG
+ settings.DEBUG = True
+ assert settings.DEBUG is True
+ settings.DEBUG = db0
+
+ dbsm0 = settings.DEFAULT_BS_METHOD
+ settings.DEFAULT_BS_METHOD = "schwinger"
+ assert settings.DEFAULT_BS_METHOD == "schwinger"
+ settings.DEFAULT_BS_METHOD = dbsm0
+
+ eqtc0 = settings.EQ_TRANSFORMATION_CUTOFF
+ settings.EQ_TRANSFORMATION_CUTOFF = 2
+ assert settings.EQ_TRANSFORMATION_CUTOFF == 2
+ settings.EQ_TRANSFORMATION_CUTOFF = eqtc0
+
+ pnr0 = settings.PNR_INTERNAL_CUTOFF
+ settings.PNR_INTERNAL_CUTOFF = False
+ assert settings.PNR_INTERNAL_CUTOFF is False
+ settings.PNR_INTERNAL_CUTOFF = pnr0
+
+ pb0 = settings.PROGRESSBAR
+ settings.PROGRESSBAR = False
+ assert settings.PROGRESSBAR is False
+ settings.PROGRESSBAR = pb0
+
+ s0 = settings.SEED
+ settings.SEED = None
+ assert settings.SEED is not None
+ settings.SEED = s0
+
+ assert settings.HBAR == 2.0
with pytest.raises(ValueError, match="Cannot change"):
settings.HBAR = 3
+ with pytest.raises(ValueError, match="precision_bits_hermite_poly"):
+ settings.PRECISION_BITS_HERMITE_POLY = 9
+
def test_settings_seed_randomness_at_init(self):
"""Test that the random seed is set randomly as MM is initialized."""
settings = Settings()
@@ -83,3 +128,22 @@ def test_reproducibility(self):
settings.SEED = 42
seq1 = [settings.rng.integers(0, 2**31 - 1) for _ in range(10)]
assert seq0 == seq1
+
+ def test_complex_warnings(self, caplog):
+ """Tests that complex warnings can be correctly activated and deactivated."""
+ skip_np()
+
+ settings = Settings()
+
+ assert settings.COMPLEX_WARNING is False
+ math.cast(1 + 1j, math.float64)
+ assert len(caplog.records) == 0
+
+ settings.COMPLEX_WARNING = True
+ math.cast(1 + 1j, math.float64)
+ assert len(caplog.records) == 1
+ assert "You are casting an input of type complex128" in caplog.records[0].msg
+
+ settings.COMPLEX_WARNING = False
+ math.cast(1 + 1j, math.float64)
+ assert len(caplog.records) == 1
diff --git a/tests/test_typing.py b/tests/test_utils/test_typing.py
similarity index 99%
rename from tests/test_typing.py
rename to tests/test_utils/test_typing.py
index 2766916e9..1158ab88b 100644
--- a/tests/test_typing.py
+++ b/tests/test_utils/test_typing.py
@@ -17,7 +17,7 @@
from typing import get_origin, get_args
import numpy as np
-from mrmustard.typing import (
+from mrmustard.utils.typing import (
Batch,
ComplexMatrix,
ComplexTensor,
diff --git a/trivy.yaml b/trivy.yaml
new file mode 100644
index 000000000..88bfebe3b
--- /dev/null
+++ b/trivy.yaml
@@ -0,0 +1,2 @@
+vulnerability:
+ ignore: CVE-2023-1428-grpcio