diff --git a/.github/workflows/maturin.yml b/.github/workflows/maturin.yml index 25ad9b6..59b60cb 100644 --- a/.github/workflows/maturin.yml +++ b/.github/workflows/maturin.yml @@ -1,7 +1,7 @@ # This file is autogenerated by maturin v1.7.3 # To update, run # -# maturin generate-ci github +# maturin generate-ci github -o .github/workflows/maturin.yml # name: CI @@ -19,7 +19,29 @@ permissions: contents: read jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install uv + uses: astral-sh/setup-uv@v4 + - name: Set up Python + run: | + uv python install + uv venv + source .venv/bin/activate + - name: Build wheels + uses: PyO3/maturin-action@v1 + with: + command: develop + args: -r --uv + - name: Install pytest + run: uv pip install pytest + - name: Run tests + run: .venv/bin/pytest + linux: + needs: test runs-on: ${{ matrix.platform.runner }} strategy: matrix: @@ -45,7 +67,7 @@ jobs: uses: PyO3/maturin-action@v1 with: target: ${{ matrix.platform.target }} - args: --release --out dist --find-interpreter + args: --release --out dist sccache: 'true' manylinux: auto - name: Upload wheels @@ -55,6 +77,7 @@ jobs: path: dist musllinux: + needs: test runs-on: ${{ matrix.platform.runner }} strategy: matrix: @@ -76,7 +99,7 @@ jobs: uses: PyO3/maturin-action@v1 with: target: ${{ matrix.platform.target }} - args: --release --out dist --find-interpreter + args: --release --out dist sccache: 'true' manylinux: musllinux_1_2 - name: Upload wheels @@ -86,6 +109,7 @@ jobs: path: dist windows: + needs: test runs-on: ${{ matrix.platform.runner }} strategy: matrix: @@ -104,7 +128,7 @@ jobs: uses: PyO3/maturin-action@v1 with: target: ${{ matrix.platform.target }} - args: --release --out dist --find-interpreter + args: --release --out dist sccache: 'true' - name: Upload wheels uses: actions/upload-artifact@v4 @@ -113,6 +137,7 @@ jobs: path: dist macos: + needs: test runs-on: ${{ matrix.platform.runner }} strategy: matrix: @@ -130,7 +155,7 @@ jobs: uses: PyO3/maturin-action@v1 with: target: ${{ matrix.platform.target }} - args: --release --out dist --find-interpreter + args: --release --out dist sccache: 'true' - name: Upload wheels uses: actions/upload-artifact@v4 @@ -139,6 +164,7 @@ jobs: path: dist sdist: + needs: test runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 diff --git a/.pytest.ini b/.pytest.ini new file mode 100644 index 0000000..c1dbf67 --- /dev/null +++ b/.pytest.ini @@ -0,0 +1,2 @@ +[pytest] +testpaths = python/tests diff --git a/.ruff.toml b/.ruff.toml new file mode 100644 index 0000000..f01d82f --- /dev/null +++ b/.ruff.toml @@ -0,0 +1,77 @@ +# Exclude a variety of commonly ignored directories. +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".git-rewrite", + ".hg", + ".ipynb_checkpoints", + ".mypy_cache", + ".nox", + ".pants.d", + ".pyenv", + ".pytest_cache", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + ".vscode", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "site-packages", + "venv", +] + +# Same as Black. +line-length = 90 +indent-width = 4 + +# Assume Python 3.9 +target-version = "py39" + +[lint] +# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default. +# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or +# McCabe complexity (`C901`) by default. +select = ["E4", "E7", "E9", "F"] +ignore = [] + +# Allow fix for all enabled rules (when `--fix`) is provided. +fixable = ["ALL"] +unfixable = [] + +# Allow unused variables when underscore-prefixed. +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" + +[format] +# Like Black, use double quotes for strings. +quote-style = "single" + +# Like Black, indent with spaces, rather than tabs. +indent-style = "space" + +# Like Black, respect magic trailing commas. +skip-magic-trailing-comma = false + +# Like Black, automatically detect the appropriate line ending. +line-ending = "auto" + +# Enable auto-formatting of code examples in docstrings. Markdown, +# reStructuredText code/literal blocks and doctests are all supported. +# +# This is currently disabled by default, but it is planned for this +# to be opt-out in the future. +docstring-code-format = false + +# Set the line length limit used when formatting code snippets in +# docstrings. +# +# This only has an effect when the `docstring-code-format` setting is +# enabled. +docstring-code-line-length = "dynamic" diff --git a/docs/source/conf.py b/docs/source/conf.py index 8a3ed03..fa1a4c4 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -8,30 +8,35 @@ import laddu -project = "laddu" -copyright = "2024, Nathaniel Dene Hoffman" -author = "Nathaniel Dene Hoffman" +project = 'laddu' +copyright = '2024, Nathaniel Dene Hoffman' +author = 'Nathaniel Dene Hoffman' release = laddu.__version__ version = release # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration -extensions = ["sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.napoleon", "sphinx_copybutton"] +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.napoleon', + 'sphinx_copybutton', +] autodoc_default_options = { - "members": True, - "undoc-members": True, + 'members': True, + 'undoc-members': True, } -autodoc_member_order = "groupwise" +autodoc_member_order = 'groupwise' -templates_path = ["_templates"] +templates_path = ['_templates'] exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output -html_theme = "sphinx_rtd_theme" -html_static_path = ["_static"] +html_theme = 'sphinx_rtd_theme' +html_static_path = ['_static'] diff --git a/pyproject.toml b/pyproject.toml index 5db0b5f..93ad334 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "maturin" [project] name = "laddu" -requires-python = ">=3.8" +requires-python = ">=3.9" classifiers = [ "Programming Language :: Rust", "Programming Language :: Python :: Implementation :: CPython", @@ -12,7 +12,7 @@ classifiers = [ ] dynamic = ["version"] dependencies = [ - "numpy", + "numpy>=2", "docopt-ng", "loguru", "pandas", diff --git a/python/laddu/__init__.py b/python/laddu/__init__.py index 9ee7706..aff22c0 100644 --- a/python/laddu/__init__.py +++ b/python/laddu/__init__.py @@ -11,9 +11,18 @@ from laddu.amplitudes.ylm import Ylm from laddu.amplitudes.zlm import Zlm from laddu.convert import convert_from_amptools, read_root_file -from laddu.data import BinnedDataset, Dataset, Event, open # noqa: A004 +from laddu.data import BinnedDataset, Dataset, Event, open from laddu.likelihoods import NLL, LikelihoodManager, Status -from laddu.utils.variables import Angles, CosTheta, Mandelstam, Mass, Phi, PolAngle, Polarization, PolMagnitude +from laddu.utils.variables import ( + Angles, + CosTheta, + Mandelstam, + Mass, + Phi, + PolAngle, + Polarization, + PolMagnitude, +) from laddu.utils.vectors import Vector3, Vector4 from . import amplitudes, convert, data, likelihoods, utils @@ -33,7 +42,7 @@ def callback(self, step: int, status: Status) -> tuple[Status, bool]: def open_amptools( path: str | Path, - tree: str = "kin", + tree: str = 'kin', *, pol_in_beam: bool = False, pol_angle: float | None = None, @@ -41,49 +50,55 @@ def open_amptools( num_entries: int | None = None, ) -> Dataset: pol_angle_rad = pol_angle * np.pi / 180 if pol_angle else None - p4s_list, eps_list, weight_list = read_root_file(path, tree, pol_in_beam, pol_angle_rad, pol_magnitude, num_entries) + p4s_list, eps_list, weight_list = read_root_file( + path, tree, pol_in_beam, pol_angle_rad, pol_magnitude, num_entries + ) return Dataset( [ - Event([Vector4.from_array(p4) for p4 in p4s], [Vector3.from_array(eps_vec) for eps_vec in eps], weight) + Event( + [Vector4.from_array(p4) for p4 in p4s], + [Vector3.from_array(eps_vec) for eps_vec in eps], + weight, + ) for p4s, eps, weight in zip(p4s_list, eps_list, weight_list) ] ) __all__ = [ - "NLL", - "Angles", - "BinnedDataset", - "BreitWigner", - "ComplexScalar", - "CosTheta", - "Dataset", - "Event", - "LikelihoodManager", - "Manager", - "Mandelstam", - "Mass", - "Observer", - "Phi", - "PolAngle", - "PolMagnitude", - "PolarComplexScalar", - "Polarization", - "Scalar", - "Status", - "Vector3", - "Vector4", - "Ylm", - "Zlm", - "__version__", - "amplitudes", - "constant", - "convert", - "convert_from_amptools", - "data", - "likelihoods", - "open", - "open_amptools", - "parameter", - "utils", + 'NLL', + 'Angles', + 'BinnedDataset', + 'BreitWigner', + 'ComplexScalar', + 'CosTheta', + 'Dataset', + 'Event', + 'LikelihoodManager', + 'Manager', + 'Mandelstam', + 'Mass', + 'Observer', + 'Phi', + 'PolAngle', + 'PolMagnitude', + 'PolarComplexScalar', + 'Polarization', + 'Scalar', + 'Status', + 'Vector3', + 'Vector4', + 'Ylm', + 'Zlm', + '__version__', + 'amplitudes', + 'constant', + 'convert', + 'convert_from_amptools', + 'data', + 'likelihoods', + 'open', + 'open_amptools', + 'parameter', + 'utils', ] diff --git a/python/laddu/__init__.pyi b/python/laddu/__init__.pyi index d266b0f..0db21af 100644 --- a/python/laddu/__init__.pyi +++ b/python/laddu/__init__.pyi @@ -7,9 +7,18 @@ from laddu.amplitudes.common import ComplexScalar, PolarComplexScalar, Scalar from laddu.amplitudes.ylm import Ylm from laddu.amplitudes.zlm import Zlm from laddu.convert import convert_from_amptools -from laddu.data import BinnedDataset, Dataset, Event, open # noqa: A004 +from laddu.data import BinnedDataset, Dataset, Event, open from laddu.likelihoods import NLL, LikelihoodManager, Status -from laddu.utils.variables import Angles, CosTheta, Mandelstam, Mass, Phi, PolAngle, Polarization, PolMagnitude +from laddu.utils.variables import ( + Angles, + CosTheta, + Mandelstam, + Mass, + Phi, + PolAngle, + Polarization, + PolMagnitude, +) from laddu.utils.vectors import Vector3, Vector4 from . import amplitudes, convert, data, utils @@ -22,7 +31,7 @@ class Observer(metaclass=ABCMeta): def open_amptools( path: str | Path, - tree: str = "kin", + tree: str = 'kin', *, pol_in_beam: bool = False, pol_angle: float | None = None, @@ -31,39 +40,39 @@ def open_amptools( ) -> Dataset: ... __all__ = [ - "NLL", - "Angles", - "BinnedDataset", - "BreitWigner", - "ComplexScalar", - "CosTheta", - "Dataset", - "Event", - "Expression", - "LikelihoodManager", - "Manager", - "Mandelstam", - "Mass", - "Observer", - "Phi", - "PolAngle", - "PolMagnitude", - "PolarComplexScalar", - "Polarization", - "Scalar", - "Status", - "Vector3", - "Vector4", - "Ylm", - "Zlm", - "__version__", - "amplitudes", - "constant", - "convert", - "convert_from_amptools", - "data", - "open", - "open_amptools", - "parameter", - "utils", + 'NLL', + 'Angles', + 'BinnedDataset', + 'BreitWigner', + 'ComplexScalar', + 'CosTheta', + 'Dataset', + 'Event', + 'Expression', + 'LikelihoodManager', + 'Manager', + 'Mandelstam', + 'Mass', + 'Observer', + 'Phi', + 'PolAngle', + 'PolMagnitude', + 'PolarComplexScalar', + 'Polarization', + 'Scalar', + 'Status', + 'Vector3', + 'Vector4', + 'Ylm', + 'Zlm', + '__version__', + 'amplitudes', + 'constant', + 'convert', + 'convert_from_amptools', + 'data', + 'open', + 'open_amptools', + 'parameter', + 'utils', ] diff --git a/python/laddu/amplitudes/__init__.py b/python/laddu/amplitudes/__init__.py index c4574c4..3299190 100644 --- a/python/laddu/amplitudes/__init__.py +++ b/python/laddu/amplitudes/__init__.py @@ -11,17 +11,17 @@ ) __all__ = [ - "AmplitudeID", - "Expression", - "Amplitude", - "Manager", - "Evaluator", - "ParameterLike", - "parameter", - "constant", - "common", - "ylm", - "zlm", - "breit_wigner", - "kmatrix", + 'AmplitudeID', + 'Expression', + 'Amplitude', + 'Manager', + 'Evaluator', + 'ParameterLike', + 'parameter', + 'constant', + 'common', + 'ylm', + 'zlm', + 'breit_wigner', + 'kmatrix', ] diff --git a/python/laddu/amplitudes/__init__.pyi b/python/laddu/amplitudes/__init__.pyi index de8c64c..0c981a3 100644 --- a/python/laddu/amplitudes/__init__.pyi +++ b/python/laddu/amplitudes/__init__.pyi @@ -30,9 +30,12 @@ class Expression: class Amplitude: ... class Manager: + parameters: list[str] def __init__(self) -> None: ... def register(self, amplitude: Amplitude) -> AmplitudeID: ... - def load(self, expression: Expression, dataset: Dataset) -> Evaluator: ... + def load( + self, expression: Expression | AmplitudeID, dataset: Dataset + ) -> Evaluator: ... class Evaluator: parameters: list[str] @@ -41,20 +44,25 @@ class Evaluator: def deactivate(self, name: str | list[str]) -> None: ... def deactivate_all(self) -> None: ... def isolate(self, name: str | list[str]) -> None: ... - def evaluate(self, parameters: list[float] | npt.NDArray[np.float64]) -> npt.NDArray[np.complex128]: ... + def evaluate( + self, parameters: list[float] | npt.NDArray[np.float64] + ) -> npt.NDArray[np.complex128]: ... + def evaluate_gradient( + self, parameters: list[float] | npt.NDArray[np.float64] + ) -> npt.NDArray[np.complex128]: ... __all__ = [ - "AmplitudeID", - "Expression", - "Amplitude", - "Manager", - "Evaluator", - "ParameterLike", - "parameter", - "constant", - "common", - "ylm", - "zlm", - "breit_wigner", - "kmatrix", + 'AmplitudeID', + 'Expression', + 'Amplitude', + 'Manager', + 'Evaluator', + 'ParameterLike', + 'parameter', + 'constant', + 'common', + 'ylm', + 'zlm', + 'breit_wigner', + 'kmatrix', ] diff --git a/python/laddu/amplitudes/breit_wigner/__init__.py b/python/laddu/amplitudes/breit_wigner/__init__.py index bb63337..b056a85 100644 --- a/python/laddu/amplitudes/breit_wigner/__init__.py +++ b/python/laddu/amplitudes/breit_wigner/__init__.py @@ -1,3 +1,3 @@ from laddu.laddu import BreitWigner -__all__ = ["BreitWigner"] +__all__ = ['BreitWigner'] diff --git a/python/laddu/amplitudes/breit_wigner/__init__.pyi b/python/laddu/amplitudes/breit_wigner/__init__.pyi index 44c2fca..1ffdb1a 100644 --- a/python/laddu/amplitudes/breit_wigner/__init__.pyi +++ b/python/laddu/amplitudes/breit_wigner/__init__.pyi @@ -3,11 +3,11 @@ from typing import Literal from laddu.amplitudes import Amplitude, ParameterLike from laddu.utils.variables import Mass -def BreitWigner( # noqa: N802 +def BreitWigner( name: str, mass: ParameterLike, width: ParameterLike, - l: Literal[0, 1, 2, 3, 4], # noqa: E741 + l: Literal[0, 1, 2, 3, 4], daughter_1_mass: Mass, daughter_2_mass: Mass, resonance_mass: Mass, diff --git a/python/laddu/amplitudes/common/__init__.py b/python/laddu/amplitudes/common/__init__.py index b45baec..6e24d26 100644 --- a/python/laddu/amplitudes/common/__init__.py +++ b/python/laddu/amplitudes/common/__init__.py @@ -1,3 +1,3 @@ from laddu.laddu import ComplexScalar, PolarComplexScalar, Scalar -__all__ = ["Scalar", "ComplexScalar", "PolarComplexScalar"] +__all__ = ['Scalar', 'ComplexScalar', 'PolarComplexScalar'] diff --git a/python/laddu/amplitudes/common/__init__.pyi b/python/laddu/amplitudes/common/__init__.pyi index 863f1d4..a4d7fc8 100644 --- a/python/laddu/amplitudes/common/__init__.pyi +++ b/python/laddu/amplitudes/common/__init__.pyi @@ -1,5 +1,7 @@ from laddu.amplitudes import Amplitude, ParameterLike -def Scalar(name: str, value: ParameterLike) -> Amplitude: ... # noqa: N802 -def ComplexScalar(name: str, re: ParameterLike, im: ParameterLike) -> Amplitude: ... # noqa: N802 -def PolarComplexScalar(name: str, r: ParameterLike, theta: ParameterLike) -> Amplitude: ... # noqa: N802 +def Scalar(name: str, value: ParameterLike) -> Amplitude: ... +def ComplexScalar(name: str, re: ParameterLike, im: ParameterLike) -> Amplitude: ... +def PolarComplexScalar( + name: str, r: ParameterLike, theta: ParameterLike +) -> Amplitude: ... diff --git a/python/laddu/amplitudes/kmatrix/__init__.py b/python/laddu/amplitudes/kmatrix/__init__.py index 534ea94..1c76159 100644 --- a/python/laddu/amplitudes/kmatrix/__init__.py +++ b/python/laddu/amplitudes/kmatrix/__init__.py @@ -1,3 +1,17 @@ -from laddu.laddu import KopfKMatrixA0, KopfKMatrixA2, KopfKMatrixF0, KopfKMatrixF2, KopfKMatrixPi1, KopfKMatrixRho +from laddu.laddu import ( + KopfKMatrixA0, + KopfKMatrixA2, + KopfKMatrixF0, + KopfKMatrixF2, + KopfKMatrixPi1, + KopfKMatrixRho, +) -__all__ = ["KopfKMatrixF0", "KopfKMatrixF2", "KopfKMatrixA0", "KopfKMatrixA2", "KopfKMatrixRho", "KopfKMatrixPi1"] +__all__ = [ + 'KopfKMatrixF0', + 'KopfKMatrixF2', + 'KopfKMatrixA0', + 'KopfKMatrixA2', + 'KopfKMatrixRho', + 'KopfKMatrixPi1', +] diff --git a/python/laddu/amplitudes/kmatrix/__init__.pyi b/python/laddu/amplitudes/kmatrix/__init__.pyi index 88b80b6..c2b65c5 100644 --- a/python/laddu/amplitudes/kmatrix/__init__.pyi +++ b/python/laddu/amplitudes/kmatrix/__init__.pyi @@ -3,7 +3,7 @@ from typing import Literal from laddu.amplitudes import Amplitude, ParameterLike from laddu.utils.variables import Mass -def KopfKMatrixF0( # noqa: N802 +def KopfKMatrixF0( name: str, couplings: tuple[ tuple[ParameterLike, ParameterLike], @@ -15,7 +15,7 @@ def KopfKMatrixF0( # noqa: N802 channel: Literal[0, 1, 2, 3, 4], mass: Mass, ) -> Amplitude: ... -def KopfKMatrixF2( # noqa: N802 +def KopfKMatrixF2( name: str, couplings: tuple[ tuple[ParameterLike, ParameterLike], @@ -26,7 +26,7 @@ def KopfKMatrixF2( # noqa: N802 channel: Literal[0, 1, 2, 3], mass: Mass, ) -> Amplitude: ... -def KopfKMatrixA0( # noqa: N802 +def KopfKMatrixA0( name: str, couplings: tuple[ tuple[ParameterLike, ParameterLike], @@ -35,7 +35,7 @@ def KopfKMatrixA0( # noqa: N802 channel: Literal[0, 1], mass: Mass, ) -> Amplitude: ... -def KopfKMatrixA2( # noqa: N802 +def KopfKMatrixA2( name: str, couplings: tuple[ tuple[ParameterLike, ParameterLike], @@ -44,7 +44,7 @@ def KopfKMatrixA2( # noqa: N802 channel: Literal[0, 1, 2], mass: Mass, ) -> Amplitude: ... -def KopfKMatrixRho( # noqa: N802 +def KopfKMatrixRho( name: str, couplings: tuple[ tuple[ParameterLike, ParameterLike], @@ -53,7 +53,7 @@ def KopfKMatrixRho( # noqa: N802 channel: Literal[0, 1, 2], mass: Mass, ) -> Amplitude: ... -def KopfKMatrixPi1( # noqa: N802 +def KopfKMatrixPi1( name: str, couplings: tuple[tuple[ParameterLike, ParameterLike],], channel: Literal[0, 1], diff --git a/python/laddu/amplitudes/ylm/__init__.py b/python/laddu/amplitudes/ylm/__init__.py index 54c4bda..c9415bf 100644 --- a/python/laddu/amplitudes/ylm/__init__.py +++ b/python/laddu/amplitudes/ylm/__init__.py @@ -1,3 +1,3 @@ from laddu.laddu import Ylm -__all__ = ["Ylm"] +__all__ = ['Ylm'] diff --git a/python/laddu/amplitudes/ylm/__init__.pyi b/python/laddu/amplitudes/ylm/__init__.pyi index 8768704..35ea8db 100644 --- a/python/laddu/amplitudes/ylm/__init__.pyi +++ b/python/laddu/amplitudes/ylm/__init__.pyi @@ -4,22 +4,24 @@ from laddu.amplitudes import Amplitude from laddu.utils.variables import Angles @overload -def Ylm(name: str, l: Literal[0], m: Literal[0], angles: Angles) -> Amplitude: ... # noqa: E741 +def Ylm(name: str, l: Literal[0], m: Literal[0], angles: Angles) -> Amplitude: ... @overload -def Ylm(name: str, l: Literal[1], m: Literal[-1, 0, 1], angles: Angles) -> Amplitude: ... # noqa: E741 +def Ylm(name: str, l: Literal[1], m: Literal[-1, 0, 1], angles: Angles) -> Amplitude: ... @overload -def Ylm(name: str, l: Literal[2], m: Literal[-2, -1, 0, 1, 2], angles: Angles) -> Amplitude: ... # noqa: E741 +def Ylm( + name: str, l: Literal[2], m: Literal[-2, -1, 0, 1, 2], angles: Angles +) -> Amplitude: ... @overload def Ylm( name: str, - l: Literal[3], # noqa: E741 + l: Literal[3], m: Literal[-3, -2, -1, 0, 1, 2, 3], angles: Angles, ) -> Amplitude: ... @overload def Ylm( name: str, - l: Literal[4], # noqa: E741 + l: Literal[4], m: Literal[-4, -3, -2, -1, 0, 1, 2, 3, 4], angles: Angles, ) -> Amplitude: ... diff --git a/python/laddu/amplitudes/zlm/__init__.py b/python/laddu/amplitudes/zlm/__init__.py index c0a538e..d0238e6 100644 --- a/python/laddu/amplitudes/zlm/__init__.py +++ b/python/laddu/amplitudes/zlm/__init__.py @@ -1,3 +1,3 @@ from laddu.laddu import Zlm -__all__ = ["Zlm"] +__all__ = ['Zlm'] diff --git a/python/laddu/amplitudes/zlm/__init__.pyi b/python/laddu/amplitudes/zlm/__init__.pyi index c3df676..3402232 100644 --- a/python/laddu/amplitudes/zlm/__init__.pyi +++ b/python/laddu/amplitudes/zlm/__init__.pyi @@ -1,50 +1,50 @@ from typing import Literal, overload from laddu.amplitudes import Amplitude -from laddu.utils.variables import Angles, CosTheta, Phi, Polarization +from laddu.utils.variables import Angles, Polarization @overload def Zlm( name: str, - l: Literal[0], # noqa: E741 + l: Literal[0], m: Literal[0], - r: Literal["+", "plus", "pos", "positive", "-", "minus", "neg", "negative"], + r: Literal['+', 'plus', 'pos', 'positive', '-', 'minus', 'neg', 'negative'], angles: Angles, polarization: Polarization, ) -> Amplitude: ... @overload def Zlm( name: str, - l: Literal[1], # noqa: E741 + l: Literal[1], m: Literal[-1, 0, 1], - r: Literal["+", "plus", "pos", "positive", "-", "minus", "neg", "negative"], + r: Literal['+', 'plus', 'pos', 'positive', '-', 'minus', 'neg', 'negative'], angles: Angles, polarization: Polarization, ) -> Amplitude: ... @overload def Zlm( name: str, - l: Literal[2], # noqa: E741 + l: Literal[2], m: Literal[-2, -1, 0, 1, 2], - r: Literal["+", "plus", "pos", "positive", "-", "minus", "neg", "negative"], + r: Literal['+', 'plus', 'pos', 'positive', '-', 'minus', 'neg', 'negative'], angles: Angles, polarization: Polarization, ) -> Amplitude: ... @overload def Zlm( name: str, - l: Literal[3], # noqa: E741 + l: Literal[3], m: Literal[-3, -2, -1, 0, 1, 2, 3], - r: Literal["+", "plus", "pos", "positive", "-", "minus", "neg", "negative"], + r: Literal['+', 'plus', 'pos', 'positive', '-', 'minus', 'neg', 'negative'], angles: Angles, polarization: Polarization, ) -> Amplitude: ... @overload def Zlm( name: str, - l: Literal[4], # noqa: E741 + l: Literal[4], m: Literal[-4, -3, -2, -1, 0, 1, 2, 3, 4], - r: Literal["+", "plus", "pos", "positive", "-", "minus", "neg", "negative"], + r: Literal['+', 'plus', 'pos', 'positive', '-', 'minus', 'neg', 'negative'], angles: Angles, polarization: Polarization, ) -> Amplitude: ... diff --git a/python/laddu/convert.py b/python/laddu/convert.py index 584555e..cc8aec4 100644 --- a/python/laddu/convert.py +++ b/python/laddu/convert.py @@ -24,22 +24,34 @@ def read_root_file( input_file, tree_name, pol_in_beam, pol_angle, pol_magnitude, num_entries=None ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """Read ROOT file and extract data with optional polarization from the beam.""" - logger.info(f"Reading ROOT file: {input_file}") + logger.info(f'Reading ROOT file: {input_file}') tfile = uproot.open(input_file) tree = tfile[tree_name] - logger.info(f"Using tree: {tree_name}") + logger.info(f'Using tree: {tree_name}') # Read necessary branches - E_beam = tree["E_Beam"].array(library="np", entry_stop=num_entries) # pyright: ignore - Px_beam = tree["Px_Beam"].array(library="np", entry_stop=num_entries) # pyright: ignore - Py_beam = tree["Py_Beam"].array(library="np", entry_stop=num_entries) # pyright: ignore - Pz_beam = tree["Pz_Beam"].array(library="np", entry_stop=num_entries) # pyright: ignore - weight = tree["Weight"].array(library="np", entry_stop=num_entries) if "Weight" in tree else np.ones_like(E_beam) # pyright: ignore + E_beam = tree['E_Beam'].array(library='np', entry_stop=num_entries) # pyright: ignore + Px_beam = tree['Px_Beam'].array(library='np', entry_stop=num_entries) # pyright: ignore + Py_beam = tree['Py_Beam'].array(library='np', entry_stop=num_entries) # pyright: ignore + Pz_beam = tree['Pz_Beam'].array(library='np', entry_stop=num_entries) # pyright: ignore + weight = ( + tree['Weight'].array(library='np', entry_stop=num_entries) + if 'Weight' in tree + else np.ones_like(E_beam) + ) # pyright: ignore # Final state particles - E_final = np.array(list(tree["E_FinalState"].array(library="np", entry_stop=num_entries))) # pyright: ignore - Px_final = np.array(list(tree["Px_FinalState"].array(library="np", entry_stop=num_entries))) # pyright: ignore - Py_final = np.array(list(tree["Py_FinalState"].array(library="np", entry_stop=num_entries))) # pyright: ignore - Pz_final = np.array(list(tree["Pz_FinalState"].array(library="np", entry_stop=num_entries))) # pyright: ignore + E_final = np.array( + list(tree['E_FinalState'].array(library='np', entry_stop=num_entries)) + ) # pyright: ignore + Px_final = np.array( + list(tree['Px_FinalState'].array(library='np', entry_stop=num_entries)) + ) # pyright: ignore + Py_final = np.array( + list(tree['Py_FinalState'].array(library='np', entry_stop=num_entries)) + ) # pyright: ignore + Pz_final = np.array( + list(tree['Pz_FinalState'].array(library='np', entry_stop=num_entries)) + ) # pyright: ignore # Handle beam four-vector: (nevents, 4) p4_beam = np.stack([Px_beam, Py_beam, Pz_beam, E_beam], axis=-1) @@ -48,13 +60,13 @@ def read_root_file( p4_final = np.stack([Px_final, Py_final, Pz_final, E_final], axis=-1) # Check if EPS branch exists and update eps if needed - if "EPS" in tree: - logger.info("EPS branch found. Using it for eps values.") - eps = tree["EPS"].array(library="np", entry_stop=num_entries) # pyright: ignore + if 'EPS' in tree: + logger.info('EPS branch found. Using it for eps values.') + eps = tree['EPS'].array(library='np', entry_stop=num_entries) # pyright: ignore eps = eps[:, np.newaxis, :] - if "eps" in tree: - logger.info("eps branch found. Using it for eps values.") - eps = tree["eps"].array(library="np", entry_stop=num_entries) # pyright: ignore + if 'eps' in tree: + logger.info('eps branch found. Using it for eps values.') + eps = tree['eps'].array(library='np', entry_stop=num_entries) # pyright: ignore eps = eps[:, np.newaxis, :] elif pol_in_beam: logger.info("Using beam's momentum for polarization (eps).") @@ -64,17 +76,19 @@ def read_root_file( p4_beam[:, 1] = 0 # Set Py to 0 p4_beam[:, 2] = E_beam # Set Pz = E for beam elif pol_angle is not None and pol_magnitude is not None: - logger.info(f"Using input polarization angle ({pol_angle}) and magnitude ({pol_magnitude}).") + logger.info( + f'Using input polarization angle ({pol_angle}) and magnitude ({pol_magnitude}).' + ) eps_x = pol_magnitude * np.cos(pol_angle) * np.ones_like(E_beam) eps_y = pol_magnitude * np.sin(pol_angle) * np.ones_like(E_beam) eps_z = np.zeros_like(E_beam) eps = np.stack([eps_x, eps_y, eps_z], axis=-1)[:, np.newaxis] else: - logger.info("Using default or provided eps values.") + logger.info('Using default or provided eps values.') eps = np.zeros((len(E_beam), 1, 3), dtype=np.float32) # Default to 0 # Concatenate the beam and final state particles: (nevents, nparticles+1, 4) - logger.info("Concatenating beam and final state particles.") + logger.info('Concatenating beam and final state particles.') p4s = np.concatenate([p4_beam[:, np.newaxis, :], p4_final], axis=1) return p4s.astype(np.float32), eps.astype(np.float32), weight @@ -82,61 +96,69 @@ def read_root_file( def save_as_parquet(p4s, eps, weight, output_file): """Save the processed data into Parquet format.""" - logger.info("Saving data to Parquet format.") + logger.info('Saving data to Parquet format.') # Flatten the p4s and eps into individual columns columns = {} n_particles = p4s.shape[1] for i in range(n_particles): - columns[f"p4_{i}_Px"] = p4s[:, i, 0] - columns[f"p4_{i}_Py"] = p4s[:, i, 1] - columns[f"p4_{i}_Pz"] = p4s[:, i, 2] - columns[f"p4_{i}_E"] = p4s[:, i, 3] + columns[f'p4_{i}_Px'] = p4s[:, i, 0] + columns[f'p4_{i}_Py'] = p4s[:, i, 1] + columns[f'p4_{i}_Pz'] = p4s[:, i, 2] + columns[f'p4_{i}_E'] = p4s[:, i, 3] n_eps = eps.shape[1] for i in range(n_eps): - columns[f"eps_{i}_x"] = eps[:, i, 0] - columns[f"eps_{i}_y"] = eps[:, i, 1] - columns[f"eps_{i}_z"] = eps[:, i, 2] + columns[f'eps_{i}_x'] = eps[:, i, 0] + columns[f'eps_{i}_y'] = eps[:, i, 1] + columns[f'eps_{i}_z'] = eps[:, i, 2] # Add weights - columns["weight"] = weight + columns['weight'] = weight # Create a DataFrame and save as Parquet data = pd.DataFrame(columns) data.to_parquet(output_file, index=False) - logger.info(f"File saved: {output_file}") + logger.info(f'File saved: {output_file}') def convert_from_amptools( input_path: Path, output_path: Path, - tree_name: str = "kin", - pol_in_beam: bool = False, # noqa: FBT001, FBT002 + tree_name: str = 'kin', + pol_in_beam: bool = False, pol_angle: float | None = None, pol_magnitude: float | None = None, num_entries: int | None = None, ): - p4s, eps, weight = read_root_file(input_path, tree_name, pol_in_beam, pol_angle, pol_magnitude, num_entries) + p4s, eps, weight = read_root_file( + input_path, tree_name, pol_in_beam, pol_angle, pol_magnitude, num_entries + ) save_as_parquet(p4s, eps, weight, output_path) def run(): """Main entry point for the script.""" - args = docopt(__doc__ if __doc__ else "") - input_file = args[""] - output_file = args[""] - tree_name = args["--tree"] - pol_in_beam = args["--pol-in-beam"] - pol_angle = float(args["--pol-angle"]) * np.pi / 180 if args["--pol-angle"] else None - pol_magnitude = float(args["--pol-magnitude"]) if args["--pol-magnitude"] else None - num_entries = int(args["-n"]) if args["-n"] else None + args = docopt(__doc__ if __doc__ else '') + input_file = args[''] + output_file = args[''] + tree_name = args['--tree'] + pol_in_beam = args['--pol-in-beam'] + pol_angle = float(args['--pol-angle']) * np.pi / 180 if args['--pol-angle'] else None + pol_magnitude = float(args['--pol-magnitude']) if args['--pol-magnitude'] else None + num_entries = int(args['-n']) if args['-n'] else None convert_from_amptools( - Path(input_file), Path(output_file), tree_name, pol_in_beam, pol_angle, pol_magnitude, num_entries + Path(input_file), + Path(output_file), + tree_name, + pol_in_beam, + pol_angle, + pol_magnitude, + num_entries, ) df_read = pd.read_parquet(output_file) - print("Output Parquet File (head):") # noqa: T201 - print(df_read.head()) # noqa: T201 - print(f"Total Entries: {len(df_read)}") # noqa: T201 + print('Output Parquet File (head):') + print(df_read.head()) + print(f'Total Entries: {len(df_read)}') diff --git a/python/laddu/data/__init__.py b/python/laddu/data/__init__.py index 2f8493b..c9c4a22 100644 --- a/python/laddu/data/__init__.py +++ b/python/laddu/data/__init__.py @@ -1,3 +1,3 @@ from laddu.laddu import BinnedDataset, Dataset, Event, open -__all__ = ["Event", "Dataset", "open", "BinnedDataset"] +__all__ = ['Event', 'Dataset', 'open', 'BinnedDataset'] diff --git a/python/laddu/data/__init__.pyi b/python/laddu/data/__init__.pyi index 8873f57..00d4ed4 100644 --- a/python/laddu/data/__init__.pyi +++ b/python/laddu/data/__init__.pyi @@ -9,6 +9,7 @@ class Event: eps: list[Vector3] weight: float def __init__(self, p4s: list[Vector4], eps: list[Vector3], weight: float): ... + def get_p4_sum(self, indices: list[int]) -> Vector4: ... class Dataset: events: list[Event] @@ -18,7 +19,9 @@ class Dataset: def __getitem__(self, index: int) -> Event: ... def len(self) -> int: ... def weighted_len(self) -> float: ... - def bin_by(self, variable: Mass, bins: int, range: tuple[float, float]) -> BinnedDataset: ... # noqa: A002 + def bin_by( + self, variable: Mass, bins: int, range: tuple[float, float] + ) -> BinnedDataset: ... def bootstrap(self, seed: int) -> Dataset: ... class BinnedDataset: @@ -29,4 +32,4 @@ class BinnedDataset: def len(self) -> int: ... def __getitem__(self, index: int) -> Dataset: ... -def open(path: str) -> Dataset: ... # noqa: A001 +def open(path: str) -> Dataset: ... diff --git a/python/laddu/likelihoods/__init__.py b/python/laddu/likelihoods/__init__.py index 312b3ef..52e0e74 100644 --- a/python/laddu/likelihoods/__init__.py +++ b/python/laddu/likelihoods/__init__.py @@ -11,13 +11,13 @@ ) __all__ = [ - "NLL", - "Status", - "Bound", - "LikelihoodID", - "LikelihoodExpression", - "LikelihoodTerm", - "LikelihoodManager", - "LikelihoodEvaluator", - "LikelihoodScalar", + 'NLL', + 'Status', + 'Bound', + 'LikelihoodID', + 'LikelihoodExpression', + 'LikelihoodTerm', + 'LikelihoodManager', + 'LikelihoodEvaluator', + 'LikelihoodScalar', ] diff --git a/python/laddu/likelihoods/__init__.pyi b/python/laddu/likelihoods/__init__.pyi index 620fa82..678e1ce 100644 --- a/python/laddu/likelihoods/__init__.pyi +++ b/python/laddu/likelihoods/__init__.pyi @@ -4,27 +4,45 @@ from typing import Any, Literal import numpy as np import numpy.typing as npt -from laddu.amplitudes import Evaluator, Expression, Manager +from laddu.amplitudes import AmplitudeID, Evaluator, Expression, Manager from laddu.data import Dataset class LikelihoodID: - def __add__(self, other: LikelihoodID | LikelihoodExpression | int) -> LikelihoodExpression: ... - def __radd__(self, other: LikelihoodID | LikelihoodExpression | int) -> LikelihoodExpression: ... - def __mul__(self, other: LikelihoodID | LikelihoodExpression) -> LikelihoodExpression: ... - def __rmul__(self, other: LikelihoodID | LikelihoodExpression) -> LikelihoodExpression: ... + def __add__( + self, other: LikelihoodID | LikelihoodExpression | int + ) -> LikelihoodExpression: ... + def __radd__( + self, other: LikelihoodID | LikelihoodExpression | int + ) -> LikelihoodExpression: ... + def __mul__( + self, other: LikelihoodID | LikelihoodExpression + ) -> LikelihoodExpression: ... + def __rmul__( + self, other: LikelihoodID | LikelihoodExpression + ) -> LikelihoodExpression: ... class LikelihoodExpression: - def __add__(self, other: LikelihoodID | LikelihoodExpression | int) -> LikelihoodExpression: ... - def __radd__(self, other: LikelihoodID | LikelihoodExpression | int) -> LikelihoodExpression: ... - def __mul__(self, other: LikelihoodID | LikelihoodExpression) -> LikelihoodExpression: ... - def __rmul__(self, other: LikelihoodID | LikelihoodExpression) -> LikelihoodExpression: ... + def __add__( + self, other: LikelihoodID | LikelihoodExpression | int + ) -> LikelihoodExpression: ... + def __radd__( + self, other: LikelihoodID | LikelihoodExpression | int + ) -> LikelihoodExpression: ... + def __mul__( + self, other: LikelihoodID | LikelihoodExpression + ) -> LikelihoodExpression: ... + def __rmul__( + self, other: LikelihoodID | LikelihoodExpression + ) -> LikelihoodExpression: ... class LikelihoodTerm: ... class LikelihoodManager: def __init__(self) -> None: ... def register(self, likelihood_term: LikelihoodTerm) -> LikelihoodID: ... - def load(self, likelihood_expression: LikelihoodExpression) -> LikelihoodEvaluator: ... + def load( + self, likelihood_expression: LikelihoodExpression + ) -> LikelihoodEvaluator: ... class LikelihoodEvaluator: parameters: list[str] @@ -32,11 +50,12 @@ class LikelihoodEvaluator: def minimize( self, p0: list[float] | npt.NDArray[np.float64], + *, bounds: Sequence[tuple[float | None, float | None]] | None = None, - method: Literal["lbfgsb", "nelder_mead"] = "lbfgsb", + method: Literal['lbfgsb', 'nelder_mead'] = 'lbfgsb', max_steps: int = 4000, - debug: bool = False, # noqa: FBT001, FBT002 - verbose: bool = False, # noqa: FBT001, FBT002 + debug: bool = False, + verbose: bool = False, **kwargs, ) -> Status: ... @@ -47,7 +66,7 @@ class NLL: def __init__( self, manager: Manager, - expression: Expression, + expression: Expression | AmplitudeID, ds_data: Dataset, ds_accmc: Dataset, ) -> None: ... @@ -59,7 +78,10 @@ class NLL: def isolate(self, name: str | list[str]) -> None: ... def evaluate(self, parameters: list[float] | npt.NDArray[np.float64]) -> float: ... def project( - self, parameters: list[float] | npt.NDArray[np.float64], *, mc_evaluator: Evaluator | None = None + self, + parameters: list[float] | npt.NDArray[np.float64], + *, + mc_evaluator: Evaluator | None = None, ) -> npt.NDArray[np.float64]: ... def project_with( self, @@ -71,15 +93,16 @@ class NLL: def minimize( self, p0: list[float] | npt.NDArray[np.float64], + *, bounds: Sequence[tuple[float | None, float | None]] | None = None, - method: Literal["lbfgsb", "nelder_mead"] = "lbfgsb", + method: Literal['lbfgsb', 'nelder_mead'] = 'lbfgsb', max_steps: int = 4000, - debug: bool = False, # noqa: FBT001, FBT002 - verbose: bool = False, # noqa: FBT001, FBT002 + debug: bool = False, + verbose: bool = False, **kwargs, ) -> Status: ... -def LikelihoodScalar(name: str) -> LikelihoodTerm: ... # noqa: N802 +def LikelihoodScalar(name: str) -> LikelihoodTerm: ... class Status: x: npt.NDArray[np.float64] @@ -107,13 +130,13 @@ class Bound: upper: float __all__ = [ - "NLL", - "Status", - "Bound", - "LikelihoodID", - "LikelihoodExpression", - "LikelihoodTerm", - "LikelihoodManager", - "LikelihoodEvaluator", - "LikelihoodScalar", + 'NLL', + 'Status', + 'Bound', + 'LikelihoodID', + 'LikelihoodExpression', + 'LikelihoodTerm', + 'LikelihoodManager', + 'LikelihoodEvaluator', + 'LikelihoodScalar', ] diff --git a/python/laddu/utils/__init__.py b/python/laddu/utils/__init__.py index cac0830..12932a2 100644 --- a/python/laddu/utils/__init__.py +++ b/python/laddu/utils/__init__.py @@ -1,3 +1,3 @@ from laddu.utils import variables, vectors -__all__ = ["vectors", "variables"] +__all__ = ['vectors', 'variables'] diff --git a/python/laddu/utils/variables/__init__.py b/python/laddu/utils/variables/__init__.py index afc3cbb..00dedb4 100644 --- a/python/laddu/utils/variables/__init__.py +++ b/python/laddu/utils/variables/__init__.py @@ -1,3 +1,21 @@ -from laddu.laddu import Angles, CosTheta, Mandelstam, Mass, Phi, PolAngle, Polarization, PolMagnitude +from laddu.laddu import ( + Angles, + CosTheta, + Mandelstam, + Mass, + Phi, + PolAngle, + Polarization, + PolMagnitude, +) -__all__ = ["Angles", "CosTheta", "Mass", "Phi", "PolAngle", "Polarization", "PolMagnitude", "Mandelstam"] +__all__ = [ + 'Angles', + 'CosTheta', + 'Mass', + 'Phi', + 'PolAngle', + 'Polarization', + 'PolMagnitude', + 'Mandelstam', +] diff --git a/python/laddu/utils/variables/__init__.pyi b/python/laddu/utils/variables/__init__.pyi index ffa05d0..8470ba6 100644 --- a/python/laddu/utils/variables/__init__.pyi +++ b/python/laddu/utils/variables/__init__.pyi @@ -18,8 +18,14 @@ class CosTheta: daughter: list[int], resonance: list[int], frame: Literal[ - "Helicity", "HX", "HEL", "GottfriedJackson", "Gottfried Jackson", "GJ", "Gottfried-Jackson" - ] = "Helicity", + 'Helicity', + 'HX', + 'HEL', + 'GottfriedJackson', + 'Gottfried Jackson', + 'GJ', + 'Gottfried-Jackson', + ] = 'Helicity', ): ... def value(self, event: Event) -> float: ... def value_on(self, dataset: Dataset) -> npt.NDArray[np.float64]: ... @@ -32,8 +38,14 @@ class Phi: daughter: list[int], resonance: list[int], frame: Literal[ - "Helicity", "HX", "HEL", "GottfriedJackson", "Gottfried Jackson", "GJ", "Gottfried-Jackson" - ] = "Helicity", + 'Helicity', + 'HX', + 'HEL', + 'GottfriedJackson', + 'Gottfried Jackson', + 'GJ', + 'Gottfried-Jackson', + ] = 'Helicity', ): ... def value(self, event: Event) -> float: ... def value_on(self, dataset: Dataset) -> npt.NDArray[np.float64]: ... @@ -48,8 +60,14 @@ class Angles: daughter: list[int], resonance: list[int], frame: Literal[ - "Helicity", "HX", "HEL", "GottfriedJackson", "Gottfried Jackson", "GJ", "Gottfried-Jackson" - ] = "Helicity", + 'Helicity', + 'HX', + 'HEL', + 'GottfriedJackson', + 'Gottfried Jackson', + 'GJ', + 'Gottfried-Jackson', + ] = 'Helicity', ) -> None: ... class PolAngle: @@ -81,7 +99,7 @@ class Mandelstam: p2: list[int], p3: list[int], p4: list[int], - channel: Literal["s", "t", "u"], + channel: Literal['s', 't', 'u'], ): ... def value(self, event: Event) -> float: ... def value_on(self, dataset: Dataset) -> npt.NDArray[np.float64]: ... diff --git a/python/laddu/utils/vectors/__init__.py b/python/laddu/utils/vectors/__init__.py index 0283ee8..040207d 100644 --- a/python/laddu/utils/vectors/__init__.py +++ b/python/laddu/utils/vectors/__init__.py @@ -1,3 +1,3 @@ from laddu.laddu import Vector3, Vector4 -__all__ = ["Vector3", "Vector4"] +__all__ = ['Vector3', 'Vector4'] diff --git a/python/laddu/utils/vectors/__init__.pyi b/python/laddu/utils/vectors/__init__.pyi index 17e197f..2793ebb 100644 --- a/python/laddu/utils/vectors/__init__.pyi +++ b/python/laddu/utils/vectors/__init__.pyi @@ -16,6 +16,9 @@ class Vector3: def __init__(self, px: float, py: float, pz: float): ... def __add__(self, other: Vector3 | int) -> Vector3: ... def __radd__(self, other: Vector3 | int) -> Vector3: ... + def __sub__(self, other: Vector3 | int) -> Vector3: ... + def __rsub__(self, other: Vector3 | int) -> Vector3: ... + def __neg__(self) -> Vector3: ... def dot(self, other: Vector3) -> float: ... def cross(self, other: Vector3) -> Vector3: ... def to_numpy(self) -> npt.NDArray[np.float64]: ... @@ -23,6 +26,7 @@ class Vector3: def from_array(array: Sequence) -> Vector3: ... def with_mass(self, mass: float) -> Vector4: ... def with_energy(self, mass: float) -> Vector4: ... + def __getitem__(self, index: int) -> float: ... class Vector4: mag: float @@ -39,7 +43,11 @@ class Vector4: m2: float def __init__(self, px: float, py: float, pz: float, e: float): ... def __add__(self, other: Vector4) -> Vector4: ... + def __sub__(self, other: Vector4 | int) -> Vector4: ... + def __rsub__(self, other: Vector4 | int) -> Vector4: ... + def __neg__(self) -> Vector4: ... def boost(self, beta: Vector3) -> Vector4: ... def to_numpy(self) -> npt.NDArray[np.float64]: ... @staticmethod def from_array(array: Sequence) -> Vector4: ... + def __getitem__(self, index: int) -> float: ... diff --git a/python/tests/test_amplitudes.py b/python/tests/test_amplitudes.py new file mode 100644 index 0000000..527270f --- /dev/null +++ b/python/tests/test_amplitudes.py @@ -0,0 +1,166 @@ +from laddu import ( + Manager, + Scalar, + ComplexScalar, + Event, + Dataset, + Vector3, + constant, + parameter, +) +import pytest + + +def make_test_event() -> Event: + return Event( + [ + Vector3(0.0, 0.0, 8.747).with_mass(0.0), + Vector3(0.119, 0.374, 0.222).with_mass(1.007), + Vector3(-0.112, 0.293, 3.081).with_mass(0.498), + Vector3(-0.007, -0.667, 5.446).with_mass(0.498), + ], + [Vector3(0.385, 0.022, 0.000)], + 0.48, + ) + + +def make_test_dataset() -> Dataset: + return Dataset([make_test_event()]) + + +def test_constant_amplitude(): + manager = Manager() + amp = Scalar('constant', constant(2.0)) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([]) + assert result[0] == 2.0 + 0.0j + + +def test_parametric_amplitude(): + manager = Manager() + amp = Scalar('parametric', parameter('test_param')) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([3.0]) + assert result[0] == 3.0 + 0.0j + + +def test_expression_operations(): + manager = Manager() + amp1 = ComplexScalar('const1', constant(2.0), constant(0.0)) + amp2 = ComplexScalar('const2', constant(0.0), constant(1.0)) + amp3 = ComplexScalar('const3', constant(3.0), constant(4.0)) + aid1 = manager.register(amp1) + aid2 = manager.register(amp2) + aid3 = manager.register(amp3) + dataset = make_test_dataset() + expr_add = aid1 + aid2 + eval_add = manager.load(expr_add, dataset) + result_add = eval_add.evaluate([]) + assert result_add[0] == 2.0 + 1.0j + + expr_mul = aid1 * aid2 + eval_mul = manager.load(expr_mul, dataset) + result_mul = eval_mul.evaluate([]) + assert result_mul[0] == 0.0 + 2.0j + + expr_add2 = expr_add + expr_mul + eval_add2 = manager.load(expr_add2, dataset) + result_add2 = eval_add2.evaluate([]) + assert result_add2[0] == 2.0 + 3.0j + + expr_mul2 = expr_add * expr_mul + eval_mul2 = manager.load(expr_mul2, dataset) + result_mul2 = eval_mul2.evaluate([]) + assert result_mul2[0] == -2.0 + 4.0j + + expr_real = aid3.real() + eval_real = manager.load(expr_real, dataset) + result_real = eval_real.evaluate([]) + assert result_real[0] == 3.0 + 0.0j + + expr_mul2_real = expr_mul2.real() + eval_mul2_real = manager.load(expr_mul2_real, dataset) + result_mul2_real = eval_mul2_real.evaluate([]) + assert result_mul2_real[0] == -2.0 + 0.0j + + expr_imag = aid3.imag() + eval_imag = manager.load(expr_imag, dataset) + result_imag = eval_imag.evaluate([]) + assert result_imag[0] == 4.0 + 0.0j + + expr_mul2_imag = expr_mul2.imag() + eval_mul2_imag = manager.load(expr_mul2_imag, dataset) + result_mul2_imag = eval_mul2_imag.evaluate([]) + assert result_mul2_imag[0] == 4.0 + 0.0j + + expr_norm = aid1.norm_sqr() + eval_norm = manager.load(expr_norm, dataset) + result_norm = eval_norm.evaluate([]) + assert result_norm[0] == 4.0 + 0.0j + + expr_mul2_norm = expr_mul2.norm_sqr() + eval_mul2_norm = manager.load(expr_mul2_norm, dataset) + result_mul2_norm = eval_mul2_norm.evaluate([]) + assert result_mul2_norm[0] == 20.0 + 0.0j + + +def test_amplitude_activation(): + manager = Manager() + amp1 = ComplexScalar('const1', constant(1.0), constant(0.0)) + amp2 = ComplexScalar('const2', constant(2.0), constant(0.0)) + aid1 = manager.register(amp1) + aid2 = manager.register(amp2) + dataset = make_test_dataset() + + expr = aid1 + aid2 + evaluator = manager.load(expr, dataset) + result = evaluator.evaluate([]) + assert result[0] == 3.0 + 0.0j + + evaluator.deactivate('const1') + result = evaluator.evaluate([]) + assert result[0] == 2.0 + 0.0j + + evaluator.isolate('const1') + result = evaluator.evaluate([]) + assert result[0] == 1.0 + 0.0j + + evaluator.activate_all() + result = evaluator.evaluate([]) + assert result[0] == 3.0 + 0.0j + + +def test_gradient(): + manager = Manager() + amp = Scalar('parametric', parameter('test_param')) + aid = manager.register(amp) + dataset = make_test_dataset() + expr = aid.norm_sqr() + evaluator = manager.load(expr, dataset) + params = [2.0] + gradient = evaluator.evaluate_gradient(params) + # For |f(x)|^2 where f(x) = x, the derivative should be 2x + assert gradient[0][0].real == 4.0 + assert gradient[0][0].imag == 0.0 + + +def test_parameter_registration(): + manager = Manager() + amp = Scalar('parametric', parameter('test_param')) + manager.register(amp) + parameters = manager.parameters + assert len(parameters) == 1 + assert parameters[0] == 'test_param' + + +def test_duplicate_amplitude_registration(): + manager = Manager() + amp1 = ComplexScalar('same_name', constant(1.0), constant(0.0)) + amp2 = ComplexScalar('same_name', constant(2.0), constant(0.0)) + manager.register(amp1) + with pytest.raises(ValueError): + manager.register(amp2) diff --git a/python/tests/test_breit_wigner.py b/python/tests/test_breit_wigner.py new file mode 100644 index 0000000..f322628 --- /dev/null +++ b/python/tests/test_breit_wigner.py @@ -0,0 +1,47 @@ +from laddu import Event, Dataset, Vector3, BreitWigner, Mass, Manager, parameter +import pytest + + +def make_test_event() -> Event: + return Event( + [ + Vector3(0.0, 0.0, 8.747).with_mass(0.0), + Vector3(0.119, 0.374, 0.222).with_mass(1.007), + Vector3(-0.112, 0.293, 3.081).with_mass(0.498), + Vector3(-0.007, -0.667, 5.446).with_mass(0.498), + ], + [Vector3(0.385, 0.022, 0.000)], + 0.48, + ) + + +def make_test_dataset() -> Dataset: + return Dataset([make_test_event()]) + + +def test_bw_evaluation(): + manager = Manager() + amp = BreitWigner( + 'bw', parameter('mass'), parameter('width'), 2, Mass([2]), Mass([3]), Mass([2, 3]) + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([1.5, 0.3]) + assert pytest.approx(result[0].real) == 1.4585691 + assert pytest.approx(result[0].imag) == 1.4107341 + + +def test_bw_gradient(): + manager = Manager() + amp = BreitWigner( + 'bw', parameter('mass'), parameter('width'), 2, Mass([2]), Mass([3]), Mass([2, 3]) + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate_gradient([1.5, 0.3]) + assert pytest.approx(result[0][0].real) == 1.3252039 + assert pytest.approx(result[0][0].imag) == -11.6827505 + assert pytest.approx(result[0][1].real) == -2.2688852 + assert pytest.approx(result[0][1].imag) == 2.5079719 diff --git a/python/tests/test_common.py b/python/tests/test_common.py new file mode 100644 index 0000000..60d0718 --- /dev/null +++ b/python/tests/test_common.py @@ -0,0 +1,106 @@ +from laddu import ( + Manager, + Scalar, + ComplexScalar, + PolarComplexScalar, + Event, + Vector3, + Dataset, + parameter, + constant, +) +import numpy as np +import pytest + + +def make_test_event() -> Event: + return Event( + [ + Vector3(0.0, 0.0, 8.747).with_mass(0.0), + Vector3(0.119, 0.374, 0.222).with_mass(1.007), + Vector3(-0.112, 0.293, 3.081).with_mass(0.498), + Vector3(-0.007, -0.667, 5.446).with_mass(0.498), + ], + [Vector3(0.385, 0.022, 0.000)], + 0.48, + ) + + +def make_test_dataset() -> Dataset: + return Dataset([make_test_event()]) + + +def test_scalar_creation_and_evaluation(): + manager = Manager() + amp = Scalar('test_scalar', parameter('test_param')) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([2.5]) + assert result[0].real == 2.5 + assert result[0].imag == 0.0 + + +def test_scalar_gradient(): + manager = Manager() + amp = Scalar('test_scalar', parameter('test_param')) + aid = manager.register(amp) + dataset = make_test_dataset() + expr = aid.norm_sqr() + evaluator = manager.load(expr, dataset) + gradient = evaluator.evaluate_gradient([2.0]) + assert gradient[0][0].real == 4.0 + assert gradient[0][0].imag == 0.0 + + +def test_complex_scalar_creation_and_evaluation(): + manager = Manager() + amp = ComplexScalar('test_complex', parameter('re_param'), parameter('im_param')) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([1.5, 2.5]) + assert result[0].real == 1.5 + assert result[0].imag == 2.5 + + +def test_complex_scalar_gradient(): + manager = Manager() + amp = ComplexScalar('test_complex', parameter('re_param'), parameter('im_param')) + aid = manager.register(amp) + dataset = make_test_dataset() + expr = aid.norm_sqr() + evaluator = manager.load(expr, dataset) + gradient = evaluator.evaluate_gradient([3.0, 4.0]) + assert gradient[0][0].real == 6.0 + assert gradient[0][0].imag == 0.0 + assert gradient[0][1].real == 8.0 + assert gradient[0][1].imag == 0.0 + + +def test_polar_complex_scalar_creation_and_evaluation(): + manager = Manager() + amp = PolarComplexScalar('test_polar', parameter('r_param'), parameter('theta_param')) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + r = 2.0 + theta = np.pi / 4.0 + result = evaluator.evaluate([r, theta]) + assert pytest.approx(result[0].real) == r * np.cos(theta) + assert pytest.approx(result[0].imag) == r * np.sin(theta) + + +def test_polar_complex_scalar_gradient(): + manager = Manager() + amp = PolarComplexScalar('test_polar', parameter('r_param'), parameter('theta_param')) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + r = 2.0 + theta = np.pi / 4.0 + gradient = evaluator.evaluate_gradient([r, theta]) + assert pytest.approx(gradient[0][0].real) == np.cos(theta) + assert pytest.approx(gradient[0][0].imag) == np.sin(theta) + assert pytest.approx(gradient[0][1].real) == -r * np.sin(theta) + assert pytest.approx(gradient[0][1].imag) == r * np.cos(theta) diff --git a/python/tests/test_data.py b/python/tests/test_data.py new file mode 100644 index 0000000..1b77857 --- /dev/null +++ b/python/tests/test_data.py @@ -0,0 +1,122 @@ +from laddu import Event, Dataset, Vector3, Mass + + +def make_test_event() -> Event: + return Event( + [ + Vector3(0.0, 0.0, 8.747).with_mass(0.0), + Vector3(0.119, 0.374, 0.222).with_mass(1.007), + Vector3(-0.112, 0.293, 3.081).with_mass(0.498), + Vector3(-0.007, -0.667, 5.446).with_mass(0.498), + ], + [Vector3(0.385, 0.022, 0.000)], + 0.48, + ) + + +def make_test_dataset() -> Dataset: + return Dataset([make_test_event()]) + + +def test_event_creation(): + event = make_test_event() + assert len(event.p4s) == 4 + assert len(event.eps) == 1 + assert event.weight == 0.48 + + +def test_event_p4_sum(): + event = make_test_event() + sum = event.get_p4_sum([2, 3]) + assert sum[0] == event.p4s[2].px + event.p4s[3].px + assert sum[1] == event.p4s[2].py + event.p4s[3].py + assert sum[2] == event.p4s[2].pz + event.p4s[3].pz + assert sum[3] == event.p4s[2].e + event.p4s[3].e + + +def test_dataset_size_check(): + dataset = Dataset([]) + assert len(dataset) == 0 + dataset = make_test_dataset() + assert len(dataset) == 1 + + +def test_dataset_weights(): + dataset = Dataset( + [ + make_test_event(), + Event( + make_test_event().p4s, + make_test_event().eps, + 0.52, + ), + ] + ) + weights = dataset.weights + assert len(weights) == 2 + assert weights[0] == 0.48 + assert weights[1] == 0.52 + assert dataset.weighted_len() == 1.0 + + +# TODO: Dataset::filter requires free-threading or some other workaround (or maybe we make a non-parallel method) + + +def test_binned_dataset(): + dataset = Dataset( + [ + Event( + [Vector3(0.0, 0.0, 1.0).with_mass(1.0)], + [], + 1.0, + ), + Event( + [Vector3(0.0, 0.0, 2.0).with_mass(2.0)], + [], + 2.0, + ), + ] + ) + + mass = Mass([0]) + binned = dataset.bin_by(mass, 2, (0.0, 3.0)) + + assert binned.bins == 2 + assert len(binned.edges) == 3 + assert binned.edges[0] == 0.0 + assert binned.edges[2] == 3.0 + assert len(binned[0]) == 1 + assert binned[0].weighted_len() == 1.0 + assert len(binned[1]) == 1 + assert binned[1].weighted_len() == 2.0 + + +def test_dataset_bootstrap(): + dataset = Dataset( + [ + make_test_event(), + Event( + make_test_event().p4s, + make_test_event().eps, + 1.0, + ), + ] + ) + assert dataset[0].weight != dataset[1].weight + + bootstrapped = dataset.bootstrap(42) + assert len(bootstrapped) == len(dataset) + assert bootstrapped[0].weight == bootstrapped[1].weight + + empty_dataset = Dataset([]) + empty_bootstrap = empty_dataset.bootstrap(42) + assert len(empty_bootstrap) == 0 + + +def test_event_display(): + event = make_test_event() + display_string = str(event) + assert ( + display_string + == 'Event:\n p4s:\n [e = 8.74700; p = (0.00000, 0.00000, 8.74700); m = 0.00000]\n [e = 1.10334; p = (0.11900, 0.37400, 0.22200); m = 1.00700]\n [e = 3.13671; p = (-0.11200, 0.29300, 3.08100); m = 0.49800]\n [e = 5.50925; p = (-0.00700, -0.66700, 5.44600); m = 0.49800]\n eps:\n [0.385, 0.022, 0]\n weight:\n 0.48\n' + ) diff --git a/python/tests/test_kmatrix.py b/python/tests/test_kmatrix.py new file mode 100644 index 0000000..ead8ef5 --- /dev/null +++ b/python/tests/test_kmatrix.py @@ -0,0 +1,325 @@ +from laddu import Event, Dataset, Vector3, Mass, Manager, parameter +from laddu.amplitudes.kmatrix import ( + KopfKMatrixF0, + KopfKMatrixF2, + KopfKMatrixA0, + KopfKMatrixA2, + KopfKMatrixRho, + KopfKMatrixPi1, +) +import pytest + + +def make_test_event() -> Event: + return Event( + [ + Vector3(0.0, 0.0, 8.747).with_mass(0.0), + Vector3(0.119, 0.374, 0.222).with_mass(1.007), + Vector3(-0.112, 0.293, 3.081).with_mass(0.498), + Vector3(-0.007, -0.667, 5.446).with_mass(0.498), + ], + [Vector3(0.385, 0.022, 0.000)], + 0.48, + ) + + +def make_test_dataset() -> Dataset: + return Dataset([make_test_event()]) + + +def test_f0_evaluation(): + manager = Manager() + res_mass = Mass([2, 3]) + amp = KopfKMatrixF0( + 'f0', + ( + (parameter('p0'), parameter('p1')), + (parameter('p2'), parameter('p3')), + (parameter('p4'), parameter('p5')), + (parameter('p6'), parameter('p7')), + (parameter('p8'), parameter('p9')), + ), + 1, + res_mass, + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) + assert pytest.approx(result[0].real) == 0.2674945 + assert pytest.approx(result[0].imag) == 0.7289451 + + +def test_f0_gradient(): + manager = Manager() + res_mass = Mass([2, 3]) + amp = KopfKMatrixF0( + 'f0', + ( + (parameter('p0'), parameter('p1')), + (parameter('p2'), parameter('p3')), + (parameter('p4'), parameter('p5')), + (parameter('p6'), parameter('p7')), + (parameter('p8'), parameter('p9')), + ), + 1, + res_mass, + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate_gradient( + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] + ) + assert pytest.approx(result[0][0].real) == -0.0324912 + assert pytest.approx(result[0][0].imag) == -0.01107348 + assert pytest.approx(result[0][1].real) == pytest.approx(-result[0][0].imag) + assert pytest.approx(result[0][1].imag) == pytest.approx(result[0][0].real) + assert pytest.approx(result[0][2].real) == 0.0241053 + assert pytest.approx(result[0][2].imag) == 0.007918499 + assert pytest.approx(result[0][3].real) == pytest.approx(-result[0][2].imag) + assert pytest.approx(result[0][3].imag) == pytest.approx(result[0][2].real) + assert pytest.approx(result[0][4].real) == -0.0316345 + assert pytest.approx(result[0][4].imag) == 0.01491556 + assert pytest.approx(result[0][5].real) == pytest.approx(-result[0][4].imag) + assert pytest.approx(result[0][5].imag) == pytest.approx(result[0][4].real) + assert pytest.approx(result[0][6].real) == 0.5838982 + assert pytest.approx(result[0][6].imag) == 0.2071617 + assert pytest.approx(result[0][7].real) == pytest.approx(-result[0][6].imag) + assert pytest.approx(result[0][7].imag) == pytest.approx(result[0][6].real) + assert pytest.approx(result[0][8].real) == 0.0914546 + assert pytest.approx(result[0][8].imag) == 0.03607718 + assert pytest.approx(result[0][9].real) == pytest.approx(-result[0][8].imag) + assert pytest.approx(result[0][9].imag) == pytest.approx(result[0][8].real) + + +def test_f2_evaluation(): + manager = Manager() + res_mass = Mass([2, 3]) + amp = KopfKMatrixF2( + 'f2', + ( + (parameter('p0'), parameter('p1')), + (parameter('p2'), parameter('p3')), + (parameter('p4'), parameter('p5')), + (parameter('p6'), parameter('p7')), + ), + 1, + res_mass, + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) + assert pytest.approx(result[0].real) == 0.02523304 + assert pytest.approx(result[0].imag) == 0.3971239 + + +def test_f2_gradient(): + manager = Manager() + res_mass = Mass([2, 3]) + amp = KopfKMatrixF2( + 'f2', + ( + (parameter('p0'), parameter('p1')), + (parameter('p2'), parameter('p3')), + (parameter('p4'), parameter('p5')), + (parameter('p6'), parameter('p7')), + ), + 1, + res_mass, + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate_gradient([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) + assert pytest.approx(result[0][0].real) == -0.3078948 + assert pytest.approx(result[0][0].imag) == 0.3808689 + assert pytest.approx(result[0][1].real) == pytest.approx(-result[0][0].imag) + assert pytest.approx(result[0][1].imag) == pytest.approx(result[0][0].real) + assert pytest.approx(result[0][2].real) == 0.4290085 + assert pytest.approx(result[0][2].imag) == 0.0799660 + assert pytest.approx(result[0][3].real) == pytest.approx(-result[0][2].imag) + assert pytest.approx(result[0][3].imag) == pytest.approx(result[0][2].real) + assert pytest.approx(result[0][4].real) == 0.1657487 + assert pytest.approx(result[0][4].imag) == -0.00413829 + assert pytest.approx(result[0][5].real) == pytest.approx(-result[0][4].imag) + assert pytest.approx(result[0][5].imag) == pytest.approx(result[0][4].real) + assert pytest.approx(result[0][6].real) == 0.0594691 + assert pytest.approx(result[0][6].imag) == 0.1143819 + assert pytest.approx(result[0][7].real) == pytest.approx(-result[0][6].imag) + assert pytest.approx(result[0][7].imag) == pytest.approx(result[0][6].real) + + +def test_a0_evaluation(): + manager = Manager() + res_mass = Mass([2, 3]) + amp = KopfKMatrixA0( + 'a0', + ( + (parameter('p0'), parameter('p1')), + (parameter('p2'), parameter('p3')), + ), + 1, + res_mass, + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([0.1, 0.2, 0.3, 0.4]) + assert pytest.approx(result[0].real) == -0.8002759 + assert pytest.approx(result[0].imag) == -0.1359306 + + +def test_a0_gradient(): + manager = Manager() + res_mass = Mass([2, 3]) + amp = KopfKMatrixA0( + 'a0', + ( + (parameter('p0'), parameter('p1')), + (parameter('p2'), parameter('p3')), + ), + 1, + res_mass, + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate_gradient([0.1, 0.2, 0.3, 0.4]) + assert pytest.approx(result[0][0].real) == 0.2906192 + assert pytest.approx(result[0][0].imag) == -0.0998906 + assert pytest.approx(result[0][1].real) == pytest.approx(-result[0][0].imag) + assert pytest.approx(result[0][1].imag) == pytest.approx(result[0][0].real) + assert pytest.approx(result[0][2].real) == -1.3136838 + assert pytest.approx(result[0][2].imag) == 1.1380269 + assert pytest.approx(result[0][3].real) == pytest.approx(-result[0][2].imag) + assert pytest.approx(result[0][3].imag) == pytest.approx(result[0][2].real) + + +def test_a2_evaluation(): + manager = Manager() + res_mass = Mass([2, 3]) + amp = KopfKMatrixA2( + 'a2', + ( + (parameter('p0'), parameter('p1')), + (parameter('p2'), parameter('p3')), + ), + 1, + res_mass, + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([0.1, 0.2, 0.3, 0.4]) + assert pytest.approx(result[0].real) == -0.2092661 + assert pytest.approx(result[0].imag) == -0.0985062 + + +def test_a2_gradient(): + manager = Manager() + res_mass = Mass([2, 3]) + amp = KopfKMatrixA2( + 'a2', + ( + (parameter('p0'), parameter('p1')), + (parameter('p2'), parameter('p3')), + ), + 1, + res_mass, + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate_gradient([0.1, 0.2, 0.3, 0.4]) + assert pytest.approx(result[0][0].real) == -0.5756896 + assert pytest.approx(result[0][0].imag) == 0.9398863 + assert pytest.approx(result[0][1].real) == pytest.approx(-result[0][0].imag) + assert pytest.approx(result[0][1].imag) == pytest.approx(result[0][0].real) + assert pytest.approx(result[0][2].real) == -0.0811143 + assert pytest.approx(result[0][2].imag) == -0.1522787 + assert pytest.approx(result[0][3].real) == pytest.approx(-result[0][2].imag) + assert pytest.approx(result[0][3].imag) == pytest.approx(result[0][2].real) + + +def test_rho_evaluation(): + manager = Manager() + res_mass = Mass([2, 3]) + amp = KopfKMatrixRho( + 'rho', + ( + (parameter('p0'), parameter('p1')), + (parameter('p2'), parameter('p3')), + ), + 1, + res_mass, + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([0.1, 0.2, 0.3, 0.4]) + assert pytest.approx(result[0].real) == 0.0948355 + assert pytest.approx(result[0].imag) == 0.2609183 + + +def test_rho_gradient(): + manager = Manager() + res_mass = Mass([2, 3]) + amp = KopfKMatrixRho( + 'rho', + ( + (parameter('p0'), parameter('p1')), + (parameter('p2'), parameter('p3')), + ), + 1, + res_mass, + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate_gradient([0.1, 0.2, 0.3, 0.4]) + assert pytest.approx(result[0][0].real) == 0.0265203 + assert pytest.approx(result[0][0].imag) == -0.02660265 + assert pytest.approx(result[0][1].real) == pytest.approx(-result[0][0].imag) + assert pytest.approx(result[0][1].imag) == pytest.approx(result[0][0].real) + assert pytest.approx(result[0][2].real) == 0.5172379 + assert pytest.approx(result[0][2].imag) == 0.1707373 + assert pytest.approx(result[0][3].real) == pytest.approx(-result[0][2].imag) + assert pytest.approx(result[0][3].imag) == pytest.approx(result[0][2].real) + + +def test_pi1_evaluation(): + manager = Manager() + res_mass = Mass([2, 3]) + amp = KopfKMatrixPi1( + 'pi1', + ((parameter('p0'), parameter('p1')),), + 1, + res_mass, + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([0.1, 0.2]) + assert pytest.approx(result[0].real) == -0.1101758 + assert pytest.approx(result[0].imag) == 0.2638717 + + +def test_pi1_gradient(): + manager = Manager() + res_mass = Mass([2, 3]) + amp = KopfKMatrixPi1( + 'pi1', + ((parameter('p0'), parameter('p1')),), + 1, + res_mass, + ) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate_gradient([0.1, 0.2]) + assert pytest.approx(result[0][0].real) == -14.7987174 + assert pytest.approx(result[0][0].imag) == -5.8430094 + assert pytest.approx(result[0][1].real) == pytest.approx(-result[0][0].imag) + assert pytest.approx(result[0][1].imag) == pytest.approx(result[0][0].real) diff --git a/python/tests/test_variables.py b/python/tests/test_variables.py new file mode 100644 index 0000000..a008160 --- /dev/null +++ b/python/tests/test_variables.py @@ -0,0 +1,132 @@ +from laddu import ( + Mass, + CosTheta, + Phi, + Angles, + PolMagnitude, + PolAngle, + Polarization, + Mandelstam, + Event, + Vector3, + Dataset, +) +import pytest + + +def make_test_event() -> Event: + return Event( + [ + Vector3(0.0, 0.0, 8.747).with_mass(0.0), + Vector3(0.119, 0.374, 0.222).with_mass(1.007), + Vector3(-0.112, 0.293, 3.081).with_mass(0.498), + Vector3(-0.007, -0.667, 5.446).with_mass(0.498), + ], + [Vector3(0.385, 0.022, 0.000)], + 0.48, + ) + + +def make_test_dataset() -> Dataset: + return Dataset([make_test_event()]) + + +def test_mass_single_particle(): + event = make_test_event() + mass = Mass([1]) + assert mass.value(event) == 1.007 + + +def test_mass_multiple_particles(): + event = make_test_event() + mass = Mass([2, 3]) + assert pytest.approx(mass.value(event)) == 1.3743786 + + +def test_costheta_helicity(): + event = make_test_event() + costheta = CosTheta(0, [1], [2], [2, 3], 'Helicity') + assert pytest.approx(costheta.value(event)) == -0.4611175 + + +def test_phi_helicity(): + event = make_test_event() + phi = Phi(0, [1], [2], [2, 3], 'Helicity') + assert pytest.approx(phi.value(event)) == -2.6574625 + + +def test_costheta_gottfried_jackson(): + event = make_test_event() + costheta = CosTheta(0, [1], [2], [2, 3], 'Gottfried-Jackson') + assert pytest.approx(costheta.value(event)) == 0.09198832 + + +def test_phi_gottfried_jackson(): + event = make_test_event() + phi = Phi(0, [1], [2], [2, 3], 'Gottfried-Jackson') + assert pytest.approx(phi.value(event)) == -2.7139131 + + +def test_angles(): + event = make_test_event() + angles = Angles(0, [1], [2], [2, 3], 'Helicity') + assert pytest.approx(angles.costheta.value(event)) == -0.4611175 + assert pytest.approx(angles.phi.value(event)) == -2.6574625 + + +def test_pol_angle(): + event = make_test_event() + pol_angle = PolAngle(0, [1]) + assert pytest.approx(pol_angle.value(event)) == 1.9359298 + + +def test_pol_magnitude(): + event = make_test_event() + pol_magnitude = PolMagnitude(0) + assert pytest.approx(pol_magnitude.value(event)) == 0.3856280 + + +def test_polarization(): + event = make_test_event() + polarization = Polarization(0, [1]) + assert pytest.approx(polarization.pol_angle.value(event)) == 1.9359298 + assert pytest.approx(polarization.pol_magnitude.value(event)) == 0.3856280 + + +def test_mandelstam(): + event = make_test_event() + s = Mandelstam([0], [], [2, 3], [1], 's') + t = Mandelstam([0], [], [2, 3], [1], 't') + u = Mandelstam([0], [], [2, 3], [1], 'u') + sp = Mandelstam([], [0], [1], [2, 3], 's') + tp = Mandelstam([], [0], [1], [2, 3], 't') + up = Mandelstam([], [0], [1], [2, 3], 'u') + assert pytest.approx(s.value(event)) == 18.504011 + assert pytest.approx(s.value(event)) == pytest.approx(sp.value(event)) + assert pytest.approx(t.value(event)) == -0.1922285 + assert pytest.approx(t.value(event)) == pytest.approx(tp.value(event)) + assert pytest.approx(u.value(event)) == -14.4041989 + assert pytest.approx(u.value(event)) == pytest.approx(up.value(event)) + m2_beam = event.get_p4_sum([0]).m2 + m2_recoil = event.get_p4_sum([1]).m2 + m2_res = event.get_p4_sum([2, 3]).m2 + assert ( + pytest.approx( + s.value(event) + + t.value(event) + + u.value(event) + - m2_beam + - m2_recoil + - m2_res, + abs=1e-2, + ) + == 1.0 + ) + + +def test_variable_value_on(): + dataset = make_test_dataset() + mass = Mass([2, 3]) + values = mass.value_on(dataset) + assert len(values) == 1 + assert pytest.approx(values[0]) == 1.3743786 diff --git a/python/tests/test_vectors.py b/python/tests/test_vectors.py new file mode 100644 index 0000000..c03ded0 --- /dev/null +++ b/python/tests/test_vectors.py @@ -0,0 +1,90 @@ +from laddu import Vector3, Vector4 +import numpy as np +import pytest + + +def test_three_to_four_momentum_conversion(): + p3 = Vector3(1.0, 2.0, 3.0) + target_p4 = Vector4(1.0, 2.0, 3.0, 10.0) + p4_from_mass = p3.with_mass(target_p4.m) + assert target_p4.e == p4_from_mass.e + assert target_p4.px == p4_from_mass.px + assert target_p4.py == p4_from_mass.py + assert target_p4.pz == p4_from_mass.pz + p4_from_energy = p3.with_energy(target_p4.e) + assert target_p4.e == p4_from_energy.e + assert target_p4.px == p4_from_energy.px + assert target_p4.py == p4_from_energy.py + assert target_p4.pz == p4_from_energy.pz + + +def test_four_momentum_basics(): + p = Vector4(3.0, 4.0, 5.0, 10.0) + assert p.e == 10.0 + assert p.px == 3.0 + assert p.py == 4.0 + assert p.pz == 5.0 + assert p.momentum.px == 3.0 + assert p.momentum.py == 4.0 + assert p.momentum.pz == 5.0 + assert p.beta.px == 0.3 + assert p.beta.py == 0.4 + assert p.beta.pz == 0.5 + assert p.m2 == 50.0 + assert p.m == np.sqrt(50.0) + assert repr(p) == '[e = 10.00000; p = (3.00000, 4.00000, 5.00000); m = 7.07107]' + + +def test_three_momentum_basics(): + p = Vector4(3.0, 4.0, 5.0, 10.0) + q = Vector4(1.2, -3.4, 7.6, 0.0) + p3_view = p.momentum + q3_view = q.momentum + assert p3_view.px == 3.0 + assert p3_view.py == 4.0 + assert p3_view.pz == 5.0 + assert p3_view.mag2 == 50.0 + assert p3_view.mag == np.sqrt(50.0) + assert p3_view.costheta == 5.0 / np.sqrt(50.0) + assert p3_view.theta == np.acos(5.0 / np.sqrt(50.0)) + assert p3_view.phi == np.atan2(4.0, 3.0) + assert pytest.approx(p3_view.unit.px) == 3.0 / np.sqrt(50.0) + assert pytest.approx(p3_view.unit.py) == 4.0 / np.sqrt(50.0) + assert pytest.approx(p3_view.unit.pz) == 5.0 / np.sqrt(50.0) + assert pytest.approx(p3_view.cross(q3_view).px) == 47.4 + assert pytest.approx(p3_view.cross(q3_view).py) == -16.8 + assert pytest.approx(p3_view.cross(q3_view).pz) == -15.0 + p3 = Vector3(3.0, 4.0, 5.0) + q3 = Vector3(1.2, -3.4, 7.6) + assert p3.px == 3.0 + assert p3.py == 4.0 + assert p3.pz == 5.0 + assert p3.mag2 == 50.0 + assert p3.mag == np.sqrt(50.0) + assert p3.costheta == 5.0 / np.sqrt(50.0) + assert p3.theta == np.acos(5.0 / np.sqrt(50.0)) + assert p3.phi == np.atan2(4.0, 3.0) + assert pytest.approx(p3.unit.px) == 3.0 / np.sqrt(50.0) + assert pytest.approx(p3.unit.py) == 4.0 / np.sqrt(50.0) + assert pytest.approx(p3.unit.pz) == 5.0 / np.sqrt(50.0) + assert pytest.approx(p3.cross(q3).px) == 47.4 + assert pytest.approx(p3.cross(q3).py) == -16.8 + assert pytest.approx(p3.cross(q3).pz) == -15.0 + + +def test_boost_com(): + p = Vector4(3.0, 4.0, 5.0, 10.0) + zero = p.boost(-p.beta) + assert zero.px == 0.0 + assert zero.py == 0.0 + assert zero.pz == 0.0 + + +def test_boost(): + p1 = Vector4(3.0, 4.0, 5.0, 10.0) + p2 = Vector4(3.4, 2.3, 1.2, 9.0) + p1_boosted = p1.boost(-p2.beta) + assert p1_boosted.e == 8.157632144622882 + assert p1_boosted.px == -0.6489200627053444 + assert p1_boosted.py == 1.5316128987581492 + assert p1_boosted.pz == 3.712145860221643 diff --git a/python/tests/test_ylm.py b/python/tests/test_ylm.py new file mode 100644 index 0000000..7ee3690 --- /dev/null +++ b/python/tests/test_ylm.py @@ -0,0 +1,44 @@ +from laddu import Event, Dataset, Vector3, Zlm, Angles, Manager, Polarization +import pytest + + +def make_test_event() -> Event: + return Event( + [ + Vector3(0.0, 0.0, 8.747).with_mass(0.0), + Vector3(0.119, 0.374, 0.222).with_mass(1.007), + Vector3(-0.112, 0.293, 3.081).with_mass(0.498), + Vector3(-0.007, -0.667, 5.446).with_mass(0.498), + ], + [Vector3(0.385, 0.022, 0.000)], + 0.48, + ) + + +def make_test_dataset() -> Dataset: + return Dataset([make_test_event()]) + + +def test_zlm_evaluation(): + manager = Manager() + angles = Angles(0, [1], [2], [2, 3], 'Helicity') + polarization = Polarization(0, [1]) + amp = Zlm('zlm', 1, 1, '+', angles, polarization) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([]) + assert pytest.approx(result[0].real) == 0.04284127 + assert pytest.approx(result[0].imag) == -0.2385963 + + +def test_ylm_gradient(): + manager = Manager() + angles = Angles(0, [1], [2], [2, 3], 'Helicity') + polarization = Polarization(0, [1]) + amp = Zlm('zlm', 1, 1, '+', angles, polarization) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate_gradient([]) + assert len(result[0]) == 0 # amplitude has no parameters diff --git a/python/tests/test_zlm.py b/python/tests/test_zlm.py new file mode 100644 index 0000000..01fe59d --- /dev/null +++ b/python/tests/test_zlm.py @@ -0,0 +1,42 @@ +from laddu import Event, Dataset, Vector3, Ylm, Angles, Manager +import pytest + + +def make_test_event() -> Event: + return Event( + [ + Vector3(0.0, 0.0, 8.747).with_mass(0.0), + Vector3(0.119, 0.374, 0.222).with_mass(1.007), + Vector3(-0.112, 0.293, 3.081).with_mass(0.498), + Vector3(-0.007, -0.667, 5.446).with_mass(0.498), + ], + [Vector3(0.385, 0.022, 0.000)], + 0.48, + ) + + +def make_test_dataset() -> Dataset: + return Dataset([make_test_event()]) + + +def test_ylm_evaluation(): + manager = Manager() + angles = Angles(0, [1], [2], [2, 3], 'Helicity') + amp = Ylm('ylm', 1, 1, angles) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate([]) + assert pytest.approx(result[0].real) == 0.2713394 + assert pytest.approx(result[0].imag) == 0.1426897 + + +def test_ylm_gradient(): + manager = Manager() + angles = Angles(0, [1], [2], [2, 3], 'Helicity') + amp = Ylm('ylm', 1, 1, angles) + aid = manager.register(amp) + dataset = make_test_dataset() + evaluator = manager.load(aid, dataset) + result = evaluator.evaluate_gradient([]) + assert len(result[0]) == 0 # amplitude has no parameters diff --git a/python_examples/example_1/example_1.py b/python_examples/example_1/example_1.py index ebb4da4..dee97b3 100755 --- a/python_examples/example_1/example_1.py +++ b/python_examples/example_1/example_1.py @@ -24,39 +24,52 @@ from uncertainties import ufloat -def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 +def main(bins: int, niters: int, nboot: int): script_dir = Path(os.path.realpath(__file__)).parent.resolve() - data_file = str(script_dir / "data_1.parquet") - accmc_file = str(script_dir / "accmc_1.parquet") - genmc_file = str(script_dir / "mc_1.parquet") + data_file = str(script_dir / 'data_1.parquet') + accmc_file = str(script_dir / 'accmc_1.parquet') + genmc_file = str(script_dir / 'mc_1.parquet') start = perf_counter() - logger.info("Opening Data file...") + logger.info('Opening Data file...') data_ds = ld.open(data_file) - logger.info("Opening AccMC file...") + logger.info('Opening AccMC file...') accmc_ds = ld.open(accmc_file) - logger.info("Opening GenMC file...") + logger.info('Opening GenMC file...') genmc_ds = ld.open(genmc_file) - (binned_tot, binned_tot_err), (binned_s0p, binned_s0p_err), (binned_d2p, binned_d2p_err), bin_edges = fit_binned( - bins, niters, nboot, data_ds, accmc_ds, genmc_ds - ) - tot_weights, s0p_weights, d2p_weights, status, boot_statuses, parameters = fit_unbinned( - niters, nboot, data_ds, accmc_ds, genmc_ds + ( + (binned_tot, binned_tot_err), + (binned_s0p, binned_s0p_err), + (binned_d2p, binned_d2p_err), + bin_edges, + ) = fit_binned(bins, niters, nboot, data_ds, accmc_ds, genmc_ds) + tot_weights, s0p_weights, d2p_weights, status, boot_statuses, parameters = ( + fit_unbinned(niters, nboot, data_ds, accmc_ds, genmc_ds) ) end = perf_counter() - logger.info(f"Total time: {end - start:.3f}s") - logger.info(f"\n\n{status}") - - f0_width = status.x[parameters.index("f0_width")] - f2_width = status.x[parameters.index("f2_width")] - f0_re = status.x[parameters.index("S0+ re")] - f2_re = status.x[parameters.index("D2+ re")] - f2_im = status.x[parameters.index("D2+ im")] - - f0_width_err = np.array([boot_status.x[parameters.index("f0_width")] for boot_status in boot_statuses]).std() - f2_width_err = np.array([boot_status.x[parameters.index("f2_width")] for boot_status in boot_statuses]).std() - f0_re_err = np.array([boot_status.x[parameters.index("S0+ re")] for boot_status in boot_statuses]).std() - f2_re_err = np.array([boot_status.x[parameters.index("D2+ re")] for boot_status in boot_statuses]).std() - f2_im_err = np.array([boot_status.x[parameters.index("D2+ im")] for boot_status in boot_statuses]).std() + logger.info(f'Total time: {end - start:.3f}s') + logger.info(f'\n\n{status}') + + f0_width = status.x[parameters.index('f0_width')] + f2_width = status.x[parameters.index('f2_width')] + f0_re = status.x[parameters.index('S0+ re')] + f2_re = status.x[parameters.index('D2+ re')] + f2_im = status.x[parameters.index('D2+ im')] + + f0_width_err = np.array( + [boot_status.x[parameters.index('f0_width')] for boot_status in boot_statuses] + ).std() + f2_width_err = np.array( + [boot_status.x[parameters.index('f2_width')] for boot_status in boot_statuses] + ).std() + f0_re_err = np.array( + [boot_status.x[parameters.index('S0+ re')] for boot_status in boot_statuses] + ).std() + f2_re_err = np.array( + [boot_status.x[parameters.index('D2+ re')] for boot_status in boot_statuses] + ).std() + f2_im_err = np.array( + [boot_status.x[parameters.index('D2+ im')] for boot_status in boot_statuses] + ).std() u_f0_re = ufloat(f0_re, f0_re_err) u_f2_re = ufloat(f2_re, f2_re_err) @@ -64,14 +77,20 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 sd_ratio = u_f0_re / unp.sqrt(unp.pow(u_f2_re, 2) + unp.pow(u_f2_im, 2)) # pyright: ignore sd_phase = unp.atan2(u_f2_im, u_f2_re) # pyright: ignore - table = Table("", "f0(1500) Width", "f2'(1525) Width", "S/D Ratio", "S-D Phase") - table.add_row("Truth", "0.11200", "0.08600", f"{100 / np.sqrt(50**2 + 50**2):.5f}", f"{np.atan2(50, 50):.5f}") + table = Table('', 'f0(1500) Width', "f2'(1525) Width", 'S/D Ratio', 'S-D Phase') + table.add_row( + 'Truth', + '0.11200', + '0.08600', + f'{100 / np.sqrt(50**2 + 50**2):.5f}', + f'{np.atan2(50, 50):.5f}', + ) table.add_row( - "Fit", - f"{f0_width:.5f} ± {f0_width_err:.5f}", - f"{f2_width:.5f} ± {f2_width_err:.5f}", - f"{sd_ratio.n:.5f} ± {sd_ratio.s:.5f}", - f"{sd_phase.n:.5f} ± {sd_phase.s:.5f}", + 'Fit', + f'{f0_width:.5f} ± {f0_width_err:.5f}', + f'{f2_width:.5f} ± {f2_width_err:.5f}', + f'{sd_ratio.n:.5f} ± {sd_ratio.s:.5f}', + f'{sd_phase.n:.5f} ± {sd_phase.s:.5f}', ) pprint(table) @@ -85,30 +104,34 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 m_acceptance = m_accmc_hist / m_genmc_hist m_data_corr_hist = m_data_hist / m_acceptance - font = {"family": "DejaVu Sans", "weight": "normal", "size": 22} - plt.rc("font", **font) - plt.rc("axes", titlesize=48) - plt.rc("legend", fontsize=24) - plt.rc("xtick", direction="in") - plt.rc("ytick", direction="in") - plt.rcParams["xtick.minor.visible"] = True - plt.rcParams["xtick.minor.size"] = 4 - plt.rcParams["xtick.minor.width"] = 1 - plt.rcParams["xtick.major.size"] = 8 - plt.rcParams["xtick.major.width"] = 1 - plt.rcParams["ytick.minor.visible"] = True - plt.rcParams["ytick.minor.size"] = 4 - plt.rcParams["ytick.minor.width"] = 1 - plt.rcParams["ytick.major.size"] = 8 - plt.rcParams["ytick.major.width"] = 1 - - red = "#EF3A47" - blue = "#007BC0" - black = "#000000" + font = {'family': 'DejaVu Sans', 'weight': 'normal', 'size': 22} + plt.rc('font', **font) + plt.rc('axes', titlesize=48) + plt.rc('legend', fontsize=24) + plt.rc('xtick', direction='in') + plt.rc('ytick', direction='in') + plt.rcParams['xtick.minor.visible'] = True + plt.rcParams['xtick.minor.size'] = 4 + plt.rcParams['xtick.minor.width'] = 1 + plt.rcParams['xtick.major.size'] = 8 + plt.rcParams['xtick.major.width'] = 1 + plt.rcParams['ytick.minor.visible'] = True + plt.rcParams['ytick.minor.size'] = 4 + plt.rcParams['ytick.minor.width'] = 1 + plt.rcParams['ytick.major.size'] = 8 + plt.rcParams['ytick.major.width'] = 1 + + red = '#EF3A47' + blue = '#007BC0' + black = '#000000' _, ax = plt.subplots(ncols=2, sharey=True, figsize=(22, 12)) - ax[0].hist(m_data, bins=bins, range=(1, 2), color=black, histtype="step", label="Data") - ax[1].hist(m_data, bins=bins, range=(1, 2), color=black, histtype="step", label="Data") + ax[0].hist( + m_data, bins=bins, range=(1, 2), color=black, histtype='step', label='Data' + ) + ax[1].hist( + m_data, bins=bins, range=(1, 2), color=black, histtype='step', label='Data' + ) ax[0].hist( m_accmc, weights=tot_weights[0], @@ -116,7 +139,7 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 range=(1, 2), color=black, alpha=0.1, - label="Fit (unbinned)", + label='Fit (unbinned)', ) ax[1].hist( m_accmc, @@ -125,7 +148,7 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 range=(1, 2), color=black, alpha=0.1, - label="Fit (unbinned)", + label='Fit (unbinned)', ) ax[0].hist( m_accmc, @@ -134,7 +157,7 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 range=(1, 2), color=blue, alpha=0.1, - label="$S_0^+$ (unbinned)", + label='$S_0^+$ (unbinned)', ) ax[1].hist( m_accmc, @@ -143,30 +166,58 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 range=(1, 2), color=red, alpha=0.1, - label="$D_2^+$ (unbinned)", + label='$D_2^+$ (unbinned)', ) centers = (bin_edges[1:] + bin_edges[:-1]) / 2 - ax[0].errorbar(centers, binned_tot[0], yerr=binned_tot_err[0], fmt=".", color=black, label="Fit (binned)") - ax[1].errorbar(centers, binned_tot[0], yerr=binned_tot_err[0], fmt=".", color=black, label="Fit (binned)") - ax[0].errorbar(centers, binned_s0p[0], yerr=binned_s0p_err[0], fmt=".", color=blue, label="$S_0^+$ (binned)") - ax[1].errorbar(centers, binned_d2p[0], yerr=binned_d2p_err[0], fmt=".", color=red, label="$D_2^+$ (binned)") + ax[0].errorbar( + centers, + binned_tot[0], + yerr=binned_tot_err[0], + fmt='.', + color=black, + label='Fit (binned)', + ) + ax[1].errorbar( + centers, + binned_tot[0], + yerr=binned_tot_err[0], + fmt='.', + color=black, + label='Fit (binned)', + ) + ax[0].errorbar( + centers, + binned_s0p[0], + yerr=binned_s0p_err[0], + fmt='.', + color=blue, + label='$S_0^+$ (binned)', + ) + ax[1].errorbar( + centers, + binned_d2p[0], + yerr=binned_d2p_err[0], + fmt='.', + color=red, + label='$D_2^+$ (binned)', + ) ax[0].legend() ax[1].legend() ax[0].set_ylim(0) ax[1].set_ylim(0) - ax[0].set_xlabel("Mass of $K_S^0 K_S^0$ (GeV/$c^2$)") - ax[1].set_xlabel("Mass of $K_S^0 K_S^0$ (GeV/$c^2$)") + ax[0].set_xlabel('Mass of $K_S^0 K_S^0$ (GeV/$c^2$)') + ax[1].set_xlabel('Mass of $K_S^0 K_S^0$ (GeV/$c^2$)') bin_width = int(1000 / bins) - ax[0].set_ylabel(f"Counts / {bin_width} MeV/$c^2$") - ax[1].set_ylabel(f"Counts / {bin_width} MeV/$c^2$") + ax[0].set_ylabel(f'Counts / {bin_width} MeV/$c^2$') + ax[1].set_ylabel(f'Counts / {bin_width} MeV/$c^2$') plt.tight_layout() - plt.savefig("example_1.svg") + plt.savefig('example_1.svg') plt.close() _, ax = plt.subplots(ncols=2, sharey=True, figsize=(22, 12)) - ax[0].stairs(m_data_corr_hist, mass_bins, color=black, label="Data (corrected)") - ax[1].stairs(m_data_corr_hist, mass_bins, color=black, label="Data (corrected)") + ax[0].stairs(m_data_corr_hist, mass_bins, color=black, label='Data (corrected)') + ax[1].stairs(m_data_corr_hist, mass_bins, color=black, label='Data (corrected)') ax[0].hist( m_genmc, weights=tot_weights[1], @@ -174,7 +225,7 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 range=(1, 2), color=black, alpha=0.1, - label="Fit (unbinned)", + label='Fit (unbinned)', ) ax[1].hist( m_genmc, @@ -183,7 +234,7 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 range=(1, 2), color=black, alpha=0.1, - label="Fit (unbinned)", + label='Fit (unbinned)', ) ax[0].hist( m_genmc, @@ -192,7 +243,7 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 range=(1, 2), color=blue, alpha=0.1, - label="$S_0^+$ (unbinned)", + label='$S_0^+$ (unbinned)', ) ax[1].hist( m_genmc, @@ -201,37 +252,70 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 range=(1, 2), color=red, alpha=0.1, - label="$D_2^+$ (unbinned)", + label='$D_2^+$ (unbinned)', ) centers = (bin_edges[1:] + bin_edges[:-1]) / 2 - ax[0].errorbar(centers, binned_tot[1], yerr=binned_tot_err[1], fmt=".", color=black, label="Fit (binned)") - ax[1].errorbar(centers, binned_tot[1], yerr=binned_tot_err[1], fmt=".", color=black, label="Fit (binned)") - ax[0].errorbar(centers, binned_s0p[1], yerr=binned_s0p_err[1], fmt=".", color=blue, label="$S_0^+$ (binned)") - ax[1].errorbar(centers, binned_d2p[1], yerr=binned_d2p_err[1], fmt=".", color=red, label="$D_2^+$ (binned)") + ax[0].errorbar( + centers, + binned_tot[1], + yerr=binned_tot_err[1], + fmt='.', + color=black, + label='Fit (binned)', + ) + ax[1].errorbar( + centers, + binned_tot[1], + yerr=binned_tot_err[1], + fmt='.', + color=black, + label='Fit (binned)', + ) + ax[0].errorbar( + centers, + binned_s0p[1], + yerr=binned_s0p_err[1], + fmt='.', + color=blue, + label='$S_0^+$ (binned)', + ) + ax[1].errorbar( + centers, + binned_d2p[1], + yerr=binned_d2p_err[1], + fmt='.', + color=red, + label='$D_2^+$ (binned)', + ) ax[0].legend() ax[1].legend() ax[0].set_ylim(0) ax[1].set_ylim(0) - ax[0].set_xlabel("Mass of $K_S^0 K_S^0$ (GeV/$c^2$)") - ax[1].set_xlabel("Mass of $K_S^0 K_S^0$ (GeV/$c^2$)") + ax[0].set_xlabel('Mass of $K_S^0 K_S^0$ (GeV/$c^2$)') + ax[1].set_xlabel('Mass of $K_S^0 K_S^0$ (GeV/$c^2$)') bin_width = int(1000 / bins) - ax[0].set_ylabel(f"Counts / {bin_width} MeV/$c^2$") - ax[1].set_ylabel(f"Counts / {bin_width} MeV/$c^2$") + ax[0].set_ylabel(f'Counts / {bin_width} MeV/$c^2$') + ax[1].set_ylabel(f'Counts / {bin_width} MeV/$c^2$') plt.tight_layout() - plt.savefig("example_1_corrected.svg") + plt.savefig('example_1_corrected.svg') plt.close() -def fit_binned( # noqa: PLR0915 - bins: int, niters: int, nboot: int, data_ds: ld.Dataset, accmc_ds: ld.Dataset, genmc_ds: ld.Dataset +def fit_binned( + bins: int, + niters: int, + nboot: int, + data_ds: ld.Dataset, + accmc_ds: ld.Dataset, + genmc_ds: ld.Dataset, ) -> tuple[ tuple[tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray]], tuple[tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray]], tuple[tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray]], np.ndarray, ]: - logger.info("Starting Binned Fit") + logger.info('Starting Binned Fit') res_mass = ld.Mass([2, 3]) angles = ld.Angles(0, [1], [2], [2, 3]) polarization = ld.Polarization(0, [1]) @@ -239,10 +323,12 @@ def fit_binned( # noqa: PLR0915 accmc_ds_binned = accmc_ds.bin_by(res_mass, bins, (1.0, 2.0)) genmc_ds_binned = genmc_ds.bin_by(res_mass, bins, (1.0, 2.0)) manager = ld.Manager() - z00p = manager.register(ld.Zlm("Z00+", 0, 0, "+", angles, polarization)) - z22p = manager.register(ld.Zlm("Z22+", 2, 2, "+", angles, polarization)) - s0p = manager.register(ld.Scalar("S0+", ld.parameter("S0+ re"))) - d2p = manager.register(ld.ComplexScalar("D2+", ld.parameter("D2+ re"), ld.parameter("D2+ im"))) + z00p = manager.register(ld.Zlm('Z00+', 0, 0, '+', angles, polarization)) + z22p = manager.register(ld.Zlm('Z22+', 2, 2, '+', angles, polarization)) + s0p = manager.register(ld.Scalar('S0+', ld.parameter('S0+ re'))) + d2p = manager.register( + ld.ComplexScalar('D2+', ld.parameter('D2+ re'), ld.parameter('D2+ im')) + ) pos_re = (s0p * z00p.real() + d2p * z22p.real()).norm_sqr() pos_im = (s0p * z00p.imag() + d2p * z22p.imag()).norm_sqr() model = pos_re + pos_im @@ -263,7 +349,7 @@ def fit_binned( # noqa: PLR0915 rng = np.random.default_rng(0) for ibin in range(bins): - logger.info(f"Fitting Bin #{ibin}") + logger.info(f'Fitting Bin #{ibin}') best_nll = np.inf best_status = None nll = ld.NLL(manager, model, data_ds_binned[ibin], accmc_ds_binned[ibin]) @@ -272,7 +358,7 @@ def fit_binned( # noqa: PLR0915 genmc_ds_binned[ibin], ) for iiter in range(niters): - logger.info(f"Fitting Iteration #{iiter}") + logger.info(f'Fitting Iteration #{iiter}') p0 = rng.uniform(-1000.0, 1000.0, len(nll.parameters)) status = nll.minimize(p0) if status.fx < best_nll: @@ -280,15 +366,19 @@ def fit_binned( # noqa: PLR0915 best_status = status if best_status is None: - logger.error(f"All fits for bin #{ibin} failed!") + logger.error(f'All fits for bin #{ibin} failed!') sys.exit(1) tot_res.append(nll.project(best_status.x).sum()) tot_res_acc.append(nll.project(best_status.x, mc_evaluator=gen_eval).sum()) - s0p_res.append(nll.project_with(best_status.x, ["Z00+", "S0+"]).sum()) - s0p_res_acc.append(nll.project_with(best_status.x, ["Z00+", "S0+"], mc_evaluator=gen_eval).sum()) - d2p_res.append(nll.project_with(best_status.x, ["Z22+", "D2+"]).sum()) - d2p_res_acc.append(nll.project_with(best_status.x, ["Z22+", "D2+"], mc_evaluator=gen_eval).sum()) + s0p_res.append(nll.project_with(best_status.x, ['Z00+', 'S0+']).sum()) + s0p_res_acc.append( + nll.project_with(best_status.x, ['Z00+', 'S0+'], mc_evaluator=gen_eval).sum() + ) + d2p_res.append(nll.project_with(best_status.x, ['Z22+', 'D2+']).sum()) + d2p_res_acc.append( + nll.project_with(best_status.x, ['Z22+', 'D2+'], mc_evaluator=gen_eval).sum() + ) nll.activate_all() gen_eval.activate_all() @@ -299,16 +389,31 @@ def fit_binned( # noqa: PLR0915 d2p_boot = [] d2p_boot_acc = [] for iboot in range(nboot): - logger.info(f"Running bootstrap fit #{iboot}") - boot_nll = ld.NLL(manager, model, data_ds_binned[ibin].bootstrap(iboot), accmc_ds_binned[ibin]) + logger.info(f'Running bootstrap fit #{iboot}') + boot_nll = ld.NLL( + manager, + model, + data_ds_binned[ibin].bootstrap(iboot), + accmc_ds_binned[ibin], + ) boot_status = boot_nll.minimize(best_status.x) tot_boot.append(boot_nll.project(boot_status.x).sum()) - tot_boot_acc.append(boot_nll.project(boot_status.x, mc_evaluator=gen_eval).sum()) - s0p_boot.append(boot_nll.project_with(boot_status.x, ["Z00+", "S0+"]).sum()) - s0p_boot_acc.append(boot_nll.project_with(boot_status.x, ["Z00+", "S0+"], mc_evaluator=gen_eval).sum()) - d2p_boot.append(boot_nll.project_with(boot_status.x, ["Z22+", "D2+"]).sum()) - d2p_boot_acc.append(boot_nll.project_with(boot_status.x, ["Z22+", "D2+"], mc_evaluator=gen_eval).sum()) + tot_boot_acc.append( + boot_nll.project(boot_status.x, mc_evaluator=gen_eval).sum() + ) + s0p_boot.append(boot_nll.project_with(boot_status.x, ['Z00+', 'S0+']).sum()) + s0p_boot_acc.append( + boot_nll.project_with( + boot_status.x, ['Z00+', 'S0+'], mc_evaluator=gen_eval + ).sum() + ) + d2p_boot.append(boot_nll.project_with(boot_status.x, ['Z22+', 'D2+']).sum()) + d2p_boot_acc.append( + boot_nll.project_with( + boot_status.x, ['Z22+', 'D2+'], mc_evaluator=gen_eval + ).sum() + ) boot_nll.activate_all() gen_eval.activate_all() tot_res_err.append(np.std(tot_boot)) @@ -319,15 +424,28 @@ def fit_binned( # noqa: PLR0915 d2p_res_acc_err.append(np.std(d2p_boot_acc)) return ( - ((np.array(tot_res), np.array(tot_res_acc)), (np.array(tot_res_err), np.array(tot_res_acc_err))), - ((np.array(s0p_res), np.array(s0p_res_acc)), (np.array(s0p_res_err), np.array(s0p_res_acc_err))), - ((np.array(d2p_res), np.array(d2p_res_acc)), (np.array(d2p_res_err), np.array(d2p_res_acc_err))), + ( + (np.array(tot_res), np.array(tot_res_acc)), + (np.array(tot_res_err), np.array(tot_res_acc_err)), + ), + ( + (np.array(s0p_res), np.array(s0p_res_acc)), + (np.array(s0p_res_err), np.array(s0p_res_acc_err)), + ), + ( + (np.array(d2p_res), np.array(d2p_res_acc)), + (np.array(d2p_res_err), np.array(d2p_res_acc_err)), + ), data_ds_binned.edges, ) def fit_unbinned( - niters: int, nboot: int, data_ds: ld.Dataset, accmc_ds: ld.Dataset, genmc_ds: ld.Dataset + niters: int, + nboot: int, + data_ds: ld.Dataset, + accmc_ds: ld.Dataset, + genmc_ds: ld.Dataset, ) -> tuple[ tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray], @@ -336,25 +454,39 @@ def fit_unbinned( list[ld.Status], list[str], ]: - logger.info("Starting Unbinned Fit") + logger.info('Starting Unbinned Fit') res_mass = ld.Mass([2, 3]) angles = ld.Angles(0, [1], [2], [2, 3]) polarization = ld.Polarization(0, [1]) manager = ld.Manager() - z00p = manager.register(ld.Zlm("Z00+", 0, 0, "+", angles, polarization)) - z22p = manager.register(ld.Zlm("Z22+", 2, 2, "+", angles, polarization)) + z00p = manager.register(ld.Zlm('Z00+', 0, 0, '+', angles, polarization)) + z22p = manager.register(ld.Zlm('Z22+', 2, 2, '+', angles, polarization)) bw_f01500 = manager.register( ld.BreitWigner( - "f0(1500)", ld.constant(1.506), ld.parameter("f0_width"), 0, ld.Mass([2]), ld.Mass([3]), res_mass + 'f0(1500)', + ld.constant(1.506), + ld.parameter('f0_width'), + 0, + ld.Mass([2]), + ld.Mass([3]), + res_mass, ) ) bw_f21525 = manager.register( ld.BreitWigner( - "f2(1525)", ld.constant(1.517), ld.parameter("f2_width"), 2, ld.Mass([2]), ld.Mass([3]), res_mass + 'f2(1525)', + ld.constant(1.517), + ld.parameter('f2_width'), + 2, + ld.Mass([2]), + ld.Mass([3]), + res_mass, ) ) - s0p = manager.register(ld.Scalar("S0+", ld.parameter("S0+ re"))) - d2p = manager.register(ld.ComplexScalar("D2+", ld.parameter("D2+ re"), ld.parameter("D2+ im"))) + s0p = manager.register(ld.Scalar('S0+', ld.parameter('S0+ re'))) + d2p = manager.register( + ld.ComplexScalar('D2+', ld.parameter('D2+ re'), ld.parameter('D2+ im')) + ) pos_re = (s0p * bw_f01500 * z00p.real() + d2p * bw_f21525 * z22p.real()).norm_sqr() pos_im = (s0p * bw_f01500 * z00p.imag() + d2p * bw_f21525 * z22p.imag()).norm_sqr() model = pos_re + pos_im @@ -376,7 +508,7 @@ def fit_unbinned( genmc_ds, ) for iiter in range(niters): - logger.info(f"Fitting Iteration #{iiter}") + logger.info(f'Fitting Iteration #{iiter}') p0 = rng.uniform(-1000.0, 1000.0, 3) p0 = np.append([0.8, 0.5], p0) status = nll.minimize(p0, bounds=bounds) @@ -385,20 +517,24 @@ def fit_unbinned( best_status = status if best_status is None: - logger.error("All unbinned fits failed!") + logger.error('All unbinned fits failed!') sys.exit(1) tot_weights = nll.project(best_status.x) tot_weights_acc = nll.project(best_status.x, mc_evaluator=gen_eval) - s0p_weights = nll.project_with(best_status.x, ["S0+", "Z00+", "f0(1500)"]) - s0p_weights_acc = nll.project_with(best_status.x, ["S0+", "Z00+", "f0(1500)"], mc_evaluator=gen_eval) - d2p_weights = nll.project_with(best_status.x, ["D2+", "Z22+", "f2(1525)"]) - d2p_weights_acc = nll.project_with(best_status.x, ["D2+", "Z22+", "f2(1525)"], mc_evaluator=gen_eval) + s0p_weights = nll.project_with(best_status.x, ['S0+', 'Z00+', 'f0(1500)']) + s0p_weights_acc = nll.project_with( + best_status.x, ['S0+', 'Z00+', 'f0(1500)'], mc_evaluator=gen_eval + ) + d2p_weights = nll.project_with(best_status.x, ['D2+', 'Z22+', 'f2(1525)']) + d2p_weights_acc = nll.project_with( + best_status.x, ['D2+', 'Z22+', 'f2(1525)'], mc_evaluator=gen_eval + ) nll.activate_all() boot_statuses = [] for iboot in range(nboot): - logger.info(f"Running bootstrap fit #{iboot}") + logger.info(f'Running bootstrap fit #{iboot}') boot_nll = ld.NLL(manager, model, data_ds.bootstrap(iboot), accmc_ds) boot_statuses.append(boot_nll.minimize(best_status.x)) @@ -412,6 +548,6 @@ def fit_unbinned( ) -if __name__ == "__main__": +if __name__ == '__main__': args = docopt(__doc__) # pyright: ignore - main(int(args["-n"]), int(args["-i"]), int(args["-b"])) + main(int(args['-n']), int(args['-i']), int(args['-b'])) diff --git a/src/amplitudes/breit_wigner.rs b/src/amplitudes/breit_wigner.rs index 4b11bc7..2040d53 100644 --- a/src/amplitudes/breit_wigner.rs +++ b/src/amplitudes/breit_wigner.rs @@ -109,7 +109,7 @@ mod tests { use std::sync::Arc; use super::*; - use crate::amplitudes::{constant, Manager}; + use crate::amplitudes::{parameter, Manager}; use crate::data::test_dataset; use crate::utils::variables::Mass; use approx::assert_relative_eq; @@ -119,8 +119,8 @@ mod tests { let mut manager = Manager::default(); let amp = BreitWigner::new( "bw", - constant(1.5), - constant(0.3), + parameter("mass"), + parameter("width"), 2, &Mass::new([2]), &Mass::new([3]), @@ -132,7 +132,7 @@ mod tests { let expr = aid.into(); let evaluator = manager.load(&expr, &dataset); - let result = evaluator.evaluate(&[]); + let result = evaluator.evaluate(&[1.5, 0.3]); assert_relative_eq!(result[0].re, 1.4585691, epsilon = 1e-7); assert_relative_eq!(result[0].im, 1.4107341, epsilon = 1e-7); @@ -143,8 +143,8 @@ mod tests { let mut manager = Manager::default(); let amp = BreitWigner::new( "bw", - constant(1.5), - constant(0.3), + parameter("mass"), + parameter("width"), 2, &Mass::new([2]), &Mass::new([3]), @@ -156,7 +156,10 @@ mod tests { let expr = aid.into(); let evaluator = manager.load(&expr, &dataset); - let result = evaluator.evaluate_gradient(&[]); - assert_eq!(result[0].len(), 0); // amplitude has no parameters + let result = evaluator.evaluate_gradient(&[1.5, 0.3]); + assert_relative_eq!(result[0][0].re, 1.3252039, epsilon = 1e-7); + assert_relative_eq!(result[0][0].im, -11.6827505, epsilon = 1e-7); + assert_relative_eq!(result[0][1].re, -2.2688852, epsilon = 1e-7); + assert_relative_eq!(result[0][1].im, 2.5079719, epsilon = 1e-7); } } diff --git a/src/amplitudes/common.rs b/src/amplitudes/common.rs index 227cb96..cd1b837 100644 --- a/src/amplitudes/common.rs +++ b/src/amplitudes/common.rs @@ -271,13 +271,15 @@ mod tests { let expr = aid.into(); // f(r,θ) = re^(iθ) let evaluator = manager.load(&expr, &dataset); - let params = vec![2.0, PI / 4.0]; // r and theta + let r = 2.0; + let theta = PI / 4.0; + let params = vec![r, theta]; let gradient = evaluator.evaluate_gradient(¶ms); // d/dr re^(iθ) = e^(iθ), d/dθ re^(iθ) = ire^(iθ) - assert_relative_eq!(gradient[0][0].re, f64::cos(PI / 4.0)); - assert_relative_eq!(gradient[0][0].im, f64::sin(PI / 4.0)); - assert_relative_eq!(gradient[0][1].re, -2.0 * f64::sin(PI / 4.0)); - assert_relative_eq!(gradient[0][1].im, 2.0 * f64::cos(PI / 4.0)); + assert_relative_eq!(gradient[0][0].re, f64::cos(theta)); + assert_relative_eq!(gradient[0][0].im, f64::sin(theta)); + assert_relative_eq!(gradient[0][1].re, -r * f64::sin(theta)); + assert_relative_eq!(gradient[0][1].im, r * f64::cos(theta)); } } diff --git a/src/amplitudes/mod.rs b/src/amplitudes/mod.rs index f2629a6..7a0df65 100644 --- a/src/amplitudes/mod.rs +++ b/src/amplitudes/mod.rs @@ -355,6 +355,10 @@ pub struct Manager { } impl Manager { + /// Get the list of parameter names in the order they appear in the [`Manager`]'s [`Resources`] field. + pub fn parameters(&self) -> Vec { + self.resources.parameters.iter().cloned().collect() + } /// Register the given [`Amplitude`] and return an [`AmplitudeID`] that can be used to build /// [`Expression`]s. /// @@ -764,7 +768,7 @@ mod tests { let amp = Scalar::new("parametric", parameter("test_param")); manager.register(amp).unwrap(); - let parameters = manager.resources.parameters.clone(); + let parameters = manager.parameters(); assert_eq!(parameters.len(), 1); assert_eq!(parameters[0], "test_param"); } diff --git a/src/data.rs b/src/data.rs index 45f4d59..08aaa49 100644 --- a/src/data.rs +++ b/src/data.rs @@ -534,7 +534,6 @@ mod tests { #[test] fn test_event_p4_sum() { let event = test_event(); - println!("{}", event); let sum = event.get_p4_sum([2, 3]); assert_relative_eq!(sum[0], event.p4s[2].px() + event.p4s[3].px()); assert_relative_eq!(sum[1], event.p4s[2].py() + event.p4s[3].py()); diff --git a/src/python.rs b/src/python.rs index 8791ea3..172dd3b 100644 --- a/src/python.rs +++ b/src/python.rs @@ -54,8 +54,8 @@ pub(crate) mod laddu { Self(nalgebra::Vector3::new(px, py, pz)) } fn __add__(&self, other: &Bound<'_, PyAny>) -> PyResult { - if let Ok(other_vec) = other.extract::>() { - Ok(Vector3(self.0 + other_vec.0)) + if let Ok(other_vec) = other.extract::>() { + Ok(Self(self.0 + other_vec.0)) } else if let Ok(other_int) = other.extract::() { if other_int == 0 { Ok(self.clone()) @@ -69,8 +69,8 @@ pub(crate) mod laddu { } } fn __radd__(&self, other: &Bound<'_, PyAny>) -> PyResult { - if let Ok(other_vec) = other.extract::>() { - Ok(Vector3(other_vec.0 + self.0)) + if let Ok(other_vec) = other.extract::>() { + Ok(Self(other_vec.0 + self.0)) } else if let Ok(other_int) = other.extract::() { if other_int == 0 { Ok(self.clone()) @@ -83,6 +83,39 @@ pub(crate) mod laddu { Err(PyTypeError::new_err("Unsupported operand type for +")) } } + fn __sub__(&self, other: &Bound<'_, PyAny>) -> PyResult { + if let Ok(other_vec) = other.extract::>() { + Ok(Self(self.0 - other_vec.0)) + } else if let Ok(other_int) = other.extract::() { + if other_int == 0 { + Ok(self.clone()) + } else { + Err(PyTypeError::new_err( + "Subtraction with an integer for this type is only defined for 0", + )) + } + } else { + Err(PyTypeError::new_err("Unsupported operand type for -")) + } + } + fn __rsub__(&self, other: &Bound<'_, PyAny>) -> PyResult { + if let Ok(other_vec) = other.extract::>() { + Ok(Self(other_vec.0 - self.0)) + } else if let Ok(other_int) = other.extract::() { + if other_int == 0 { + Ok(self.clone()) + } else { + Err(PyTypeError::new_err( + "Subtraction with an integer for this type is only defined for 0", + )) + } + } else { + Err(PyTypeError::new_err("Unsupported operand type for -")) + } + } + fn __neg__(&self) -> PyResult { + Ok(Self(-self.0)) + } /// The dot product /// /// Calculates the dot product of two Vector3s. @@ -228,10 +261,30 @@ pub(crate) mod laddu { /// float /// The x-component /// + /// See Also + /// -------- + /// Vector3.x + /// #[getter] fn px(&self) -> Float { self.0.px() } + /// The x-component of this vector + /// + /// Returns + /// ------- + /// float + /// The x-component + /// + /// See Also + /// -------- + /// Vector3.px + /// + #[getter] + fn x(&self) -> Float { + self.0.x + } + /// The y-component of this vector /// /// Returns @@ -239,10 +292,29 @@ pub(crate) mod laddu { /// float /// The y-component /// + /// See Also + /// -------- + /// Vector3.y + /// #[getter] fn py(&self) -> Float { self.0.py() } + /// The y-component of this vector + /// + /// Returns + /// ------- + /// float + /// The y-component + /// + /// See Also + /// -------- + /// Vector3.py + /// + #[getter] + fn y(&self) -> Float { + self.0.y + } /// The z-component of this vector /// /// Returns @@ -250,10 +322,29 @@ pub(crate) mod laddu { /// float /// The z-component /// + /// See Also + /// -------- + /// Vector3.z + /// #[getter] fn pz(&self) -> Float { self.0.pz() } + /// The z-component of this vector + /// + /// Returns + /// ------- + /// float + /// The z-component + /// + /// See Also + /// -------- + /// Vector3.pz + /// + #[getter] + fn z(&self) -> Float { + self.0.z + } /// Convert a 3-vector momentum to a 4-momentum with the given mass /// /// The mass-energy equivalence is used to compute the energy of the 4-momentum: @@ -320,6 +411,12 @@ pub(crate) mod laddu { fn __str__(&self) -> String { format!("{}", self.0) } + fn __getitem__(&self, index: usize) -> PyResult { + self.0 + .get(index) + .ok_or(PyIndexError::new_err("index out of range")) + .copied() + } } /// A 4-momentum vector formed from energy and Cartesian 3-momentum components @@ -345,8 +442,8 @@ pub(crate) mod laddu { Self(nalgebra::Vector4::new(px, py, pz, e)) } fn __add__(&self, other: &Bound<'_, PyAny>) -> PyResult { - if let Ok(other_vec) = other.extract::>() { - Ok(Vector4(self.0 + other_vec.0)) + if let Ok(other_vec) = other.extract::>() { + Ok(Self(self.0 + other_vec.0)) } else if let Ok(other_int) = other.extract::() { if other_int == 0 { Ok(self.clone()) @@ -360,8 +457,8 @@ pub(crate) mod laddu { } } fn __radd__(&self, other: &Bound<'_, PyAny>) -> PyResult { - if let Ok(other_vec) = other.extract::>() { - Ok(Vector4(other_vec.0 + self.0)) + if let Ok(other_vec) = other.extract::>() { + Ok(Self(other_vec.0 + self.0)) } else if let Ok(other_int) = other.extract::() { if other_int == 0 { Ok(self.clone()) @@ -374,6 +471,39 @@ pub(crate) mod laddu { Err(PyTypeError::new_err("Unsupported operand type for +")) } } + fn __sub__(&self, other: &Bound<'_, PyAny>) -> PyResult { + if let Ok(other_vec) = other.extract::>() { + Ok(Self(self.0 - other_vec.0)) + } else if let Ok(other_int) = other.extract::() { + if other_int == 0 { + Ok(self.clone()) + } else { + Err(PyTypeError::new_err( + "Subtraction with an integer for this type is only defined for 0", + )) + } + } else { + Err(PyTypeError::new_err("Unsupported operand type for -")) + } + } + fn __rsub__(&self, other: &Bound<'_, PyAny>) -> PyResult { + if let Ok(other_vec) = other.extract::>() { + Ok(Self(other_vec.0 - self.0)) + } else if let Ok(other_int) = other.extract::() { + if other_int == 0 { + Ok(self.clone()) + } else { + Err(PyTypeError::new_err( + "Subtraction with an integer for this type is only defined for 0", + )) + } + } else { + Err(PyTypeError::new_err("Unsupported operand type for -")) + } + } + fn __neg__(&self) -> PyResult { + Ok(Self(-self.0)) + } /// The magnitude of the 4-vector /// /// This is calculated as: @@ -461,7 +591,18 @@ pub(crate) mod laddu { /// #[getter] fn e(&self) -> Float { - self.0[0] + self.0.e() + } + /// The energy associated with this vector + /// + /// Returns + /// ------- + /// float + /// The energy + /// + #[getter] + fn w(&self) -> Float { + self.0.w } /// The x-component of this vector /// @@ -470,10 +611,30 @@ pub(crate) mod laddu { /// float /// The x-component /// + /// See Also + /// -------- + /// Vector4.x + /// #[getter] fn px(&self) -> Float { self.0.px() } + /// The x-component of this vector + /// + /// Returns + /// ------- + /// float + /// The x-component + /// + /// See Also + /// -------- + /// Vector4.px + /// + #[getter] + fn x(&self) -> Float { + self.0.x + } + /// The y-component of this vector /// /// Returns @@ -481,10 +642,29 @@ pub(crate) mod laddu { /// float /// The y-component /// + /// See Also + /// -------- + /// Vector4.y + /// #[getter] fn py(&self) -> Float { self.0.py() } + /// The y-component of this vector + /// + /// Returns + /// ------- + /// float + /// The y-component + /// + /// See Also + /// -------- + /// Vector4.py + /// + #[getter] + fn y(&self) -> Float { + self.0.y + } /// The z-component of this vector /// /// Returns @@ -492,10 +672,29 @@ pub(crate) mod laddu { /// float /// The z-component /// + /// See Also + /// -------- + /// Vector4.z + /// #[getter] fn pz(&self) -> Float { self.0.pz() } + /// The z-component of this vector + /// + /// Returns + /// ------- + /// float + /// The z-component + /// + /// See Also + /// -------- + /// Vector4.pz + /// + #[getter] + fn z(&self) -> Float { + self.0.z + } /// The 3-momentum part of this 4-momentum /// /// Returns @@ -621,6 +820,12 @@ pub(crate) mod laddu { fn __repr__(&self) -> String { self.0.to_p4_string() } + fn __getitem__(&self, index: usize) -> PyResult { + self.0 + .get(index) + .ok_or(PyIndexError::new_err("index out of range")) + .copied() + } } /// A single event @@ -675,6 +880,21 @@ pub(crate) mod laddu { pub(crate) fn get_weight(&self) -> Float { self.0.weight } + /// Get the sum of the four-momenta within the event at the given indices + /// + /// Parameters + /// ---------- + /// indices : list of int + /// The indices of the four-momenta to sum + /// + /// Returns + /// ------- + /// Vector4 + /// The result of summing the given four-momenta + /// + pub(crate) fn get_p4_sum(&self, indices: Vec) -> Vector4 { + Vector4(self.0.get_p4_sum(indices)) + } } /// A set of Events @@ -1682,6 +1902,17 @@ pub(crate) mod laddu { fn new() -> Self { Self(rust::amplitudes::Manager::default()) } + /// The free parameters used by the Manager + /// + /// Returns + /// ------- + /// parameters : list of str + /// The list of parameter names + /// + #[getter] + fn parameters(&self) -> Vec { + self.0.parameters() + } /// Register an Amplitude with the Manager /// /// Parameters @@ -1707,7 +1938,7 @@ pub(crate) mod laddu { /// /// Parameters /// ---------- - /// expression : Expression + /// expression : Expression or AmplitudeID /// The expression to use in precalculation /// dataset : Dataset /// The Dataset to use in precalculation @@ -1725,8 +1956,17 @@ pub(crate) mod laddu { /// expression. These parameters will have no effect on evaluation, but they must be /// included in function calls. /// - fn load(&self, expression: &Expression, dataset: &Dataset) -> Evaluator { - Evaluator(self.0.load(&expression.0, &dataset.0)) + fn load(&self, expression: &Bound<'_, PyAny>, dataset: &Dataset) -> PyResult { + let expression = if let Ok(expression) = expression.extract::() { + Ok(expression.0) + } else if let Ok(aid) = expression.extract::() { + Ok(rust::amplitudes::Expression::Amp(aid.0)) + } else { + Err(PyTypeError::new_err( + "'expression' must either by an Expression or AmplitudeID", + )) + }?; + Ok(Evaluator(self.0.load(&expression, &dataset.0))) } } @@ -1861,6 +2101,34 @@ pub(crate) mod laddu { ) -> Bound<'py, PyArray1>> { PyArray1::from_slice_bound(py, &self.0.evaluate(¶meters)) } + /// Evaluate the gradient of the stored Expression over the stored Dataset + /// + /// Parameters + /// ---------- + /// parameters : list of float + /// The values to use for the free parameters + /// + /// Returns + /// ------- + /// result : array_like + /// A ``numpy`` 2D array of complex values for each Event in the Dataset + /// + fn evaluate_gradient<'py>( + &self, + py: Python<'py>, + parameters: Vec, + ) -> Bound<'py, PyArray2>> { + PyArray2::from_vec2_bound( + py, + &self + .0 + .evaluate_gradient(¶meters) + .iter() + .map(|grad| grad.data.as_vec().to_vec()) + .collect::>>>(), + ) + .expect("Gradients do not have the same length (please make an issue report on GitHub)") + } } trait GetStrExtractObj { @@ -2034,14 +2302,12 @@ pub(crate) mod laddu { /// ---------- /// manager : Manager /// The Manager to use for precalculation + /// expression : Expression or AmplitudeID + /// The Expression to evaluate /// ds_data : Dataset /// A Dataset representing true signal data /// ds_accmc : Dataset /// A Dataset of physically flat accepted Monte Carlo data used for normalization - /// gen_len: int, optional - /// The size of the generated dataset (will use ``len(ds_accmc)`` if None) - /// expression : Expression - /// The Expression to evaluate /// #[pyclass] #[derive(Clone)] @@ -2053,16 +2319,25 @@ pub(crate) mod laddu { #[pyo3(signature = (manager, expression, ds_data, ds_accmc))] fn new( manager: &Manager, - expression: &Expression, + expression: &Bound<'_, PyAny>, ds_data: &Dataset, ds_accmc: &Dataset, - ) -> Self { - Self(rust::likelihoods::NLL::new( + ) -> PyResult { + let expression = if let Ok(expression) = expression.extract::() { + Ok(expression.0) + } else if let Ok(aid) = expression.extract::() { + Ok(rust::amplitudes::Expression::Amp(aid.0)) + } else { + Err(PyTypeError::new_err( + "'expression' must either by an Expression or AmplitudeID", + )) + }?; + Ok(Self(rust::likelihoods::NLL::new( &manager.0, - &expression.0, + &expression, &ds_data.0, &ds_accmc.0, - )) + ))) } /// The underlying signal dataset used in calculating the NLL /// @@ -2212,6 +2487,25 @@ pub(crate) mod laddu { fn evaluate(&self, parameters: Vec) -> Float { self.0.evaluate(¶meters) } + /// Evaluate the gradient of the negative log-likelihood over the stored Dataset + /// + /// Parameters + /// ---------- + /// parameters : list of float + /// The values to use for the free parameters + /// + /// Returns + /// ------- + /// result : array_like + /// A ``numpy`` array of representing the gradient of the negative log-likelihood over each parameter + /// + fn evaluate_gradient<'py>( + &self, + py: Python<'py>, + parameters: Vec, + ) -> Bound<'py, PyArray1> { + PyArray1::from_slice_bound(py, self.0.evaluate_gradient(¶meters).as_slice()) + } /// Project the model over the Monte Carlo dataset with the given parameter values /// /// This is defined as