From 873853904bce8f3d5856d4c300f279c7f524a0df Mon Sep 17 00:00:00 2001
From: Felix Hekhorn
Date: Fri, 9 Aug 2024 11:01:11 +0300
Subject: [PATCH] Merge master
---
.github/workflows/crates.yml | 28 +
.github/workflows/maturin.yml | 123 +
.github/workflows/pypi.yml | 2 +-
.github/workflows/unittests-rust.yml | 21 +
.github/workflows/unittests.yml | 6 +-
.pre-commit-config.yaml | 36 +-
.readthedocs.yaml | 12 +-
crates/Cargo.lock => Cargo.lock | 52 +-
Cargo.toml | 22 +
README.md | 1 +
benchmarks/DSSV_bench.py | 1 +
benchmarks/NNPDF_bench.py | 1 +
benchmarks/apfel_bench.py | 2 +-
benchmarks/asv.conf.json | 2 +-
benchmarks/eko/benchmark_alphaem.py | 12 +-
benchmarks/eko/benchmark_evol_to_unity.py | 8 +-
benchmarks/eko/benchmark_inverse_matching.py | 9 +-
benchmarks/eko/benchmark_msbar_evolution.py | 8 +-
benchmarks/eko/benchmark_strong_coupling.py | 74 +-
benchmarks/ekobox/benchmark_evol_pdf.py | 10 +-
...k_ad.py => benchmark_pegasus_ad_us_as2.py} | 40 +-
benchmarks/ekore/benchmark_pegasus_mellin.py | 41 +
.../ekore/benchmark_pegasus_ome_ps_as2.py | 199 +
benchmarks/lha_paper_bench.py | 23 +-
crates/Cargo.toml | 3 -
crates/README.md | 18 +
crates/bump-versions.py | 40 +
crates/doc-header.html | 65 +
crates/eko/Cargo.toml | 19 +-
crates/eko/doc-header.html | 1 +
crates/eko/pyproject.toml | 4 +-
crates/eko/src/bib.rs | 1 +
crates/eko/src/lib.rs | 31 +-
crates/eko/src/mellin.rs | 20 +-
crates/ekore/Cargo.toml | 17 +-
crates/ekore/doc-header.html | 1 +
crates/ekore/refs.bib | 19 +
.../unpolarized/spacelike.rs | 33 +-
.../unpolarized/spacelike/as2.rs | 338 ++
.../unpolarized/spacelike/as3.rs | 455 +++
crates/ekore/src/bib.rs | 30 +-
crates/ekore/src/constants.rs | 26 +-
crates/ekore/src/harmonics.rs | 6 +-
crates/ekore/src/harmonics/cache.rs | 82 +-
crates/ekore/src/harmonics/g_functions.rs | 50 +
crates/ekore/src/harmonics/polygamma.rs | 15 +-
crates/ekore/src/harmonics/w1.rs | 3 +-
crates/ekore/src/harmonics/w2.rs | 13 +
crates/ekore/src/harmonics/w3.rs | 13 +
crates/ekore/src/harmonics/w4.rs | 13 +
crates/ekore/src/util.rs | 20 +
crates/make_bib.py | 23 +-
crates/parse-abbrev.py | 20 +
crates/release.json | 1 +
doc/source/code/genpdf.rst | 56 +-
doc/source/conf.py | 10 +-
doc/source/overview/tutorials/alpha_s.ipynb | 17 +-
doc/source/overview/tutorials/dglap.ipynb | 15 +-
doc/source/overview/tutorials/output.ipynb | 8 +-
doc/source/overview/tutorials/pdf.ipynb | 134 +-
doc/source/refs.bib | 63 +-
doc/source/theory/Matching.rst | 6 +-
doc/source/theory/N3LO_ad.rst | 5 +-
extras/lh_bench_23/.gitignore | 1 +
extras/lh_bench_23/cfg.py | 16 +-
extras/lh_bench_23/parse_to_latex.py | 179 +
extras/lh_bench_23/plot_bench.py | 49 -
extras/lh_bench_23/plot_bench_evol.py | 16 +-
extras/lh_bench_23/plot_bench_msht.py | 57 +-
extras/lh_bench_23/plot_trn_exa.py | 64 +
extras/lh_bench_23/run-n3lo.py | 2 +-
extras/lh_bench_23/run-nnlo.py | 3 +-
extras/lh_bench_23/run-trn_exa.py | 72 +
.../{run_fhmv.sh => run_fhmruvv.sh} | 8 +-
extras/lh_bench_23/tables/EKO.zip | Bin 0 -> 742394 bytes
extras/lh_bench_23/tables/MSHT.zip | Bin 0 -> 99712 bytes
extras/lh_bench_23/tables/table14-part1.csv | 12 +
extras/lh_bench_23/tables/table15-part1.csv | 12 +
.../tables/table_FFNS-1_iterate-exact.csv | 12 +
.../tables/table_FFNS-1_truncated.csv | 12 +
.../tables/table_FFNS-2_iterate-exact.csv | 12 +
.../tables/table_FFNS-2_truncated.csv | 12 +
.../tables/table_FFNS-3_iterate-exact.csv | 12 +
.../tables/table_FFNS-3_truncated.csv | 12 +
extras/lh_bench_23/utils.py | 84 +-
extras/n3lo_bench/plot_msht.py | 2 +-
extras/n3lo_bench/splitting_function_utils.py | 4 +-
flake.lock | 130 +-
flake.nix | 62 +-
poetry.lock | 3254 +++++++++--------
pyproject.toml | 24 +-
pyproject.toml.patch | 16 +-
rustify.sh | 8 +-
src/eko/__init__.py | 1 +
src/eko/couplings.py | 16 +-
src/eko/evolution_operator/__init__.py | 69 +-
src/eko/evolution_operator/__init__.py.patch | 70 +-
src/eko/evolution_operator/flavors.py | 1 +
src/eko/evolution_operator/grid.py | 49 +-
.../evolution_operator/matching_condition.py | 50 +-
.../operator_matrix_element.py | 74 +-
src/eko/evolution_operator/physical.py | 24 +-
src/eko/evolution_operator/quad_ker.py | 4 +-
src/eko/gamma.py | 1 +
src/eko/interpolation.py | 1 +
src/eko/io/bases.py | 117 -
src/eko/io/dictlike.py | 1 +
src/eko/io/exceptions.py | 1 +
src/eko/io/inventory.py | 5 +-
src/eko/io/items.py | 1 +
src/eko/io/legacy.py | 49 +-
src/eko/io/manipulate.py | 234 +-
src/eko/io/metadata.py | 9 +-
src/eko/io/paths.py | 1 +
src/eko/io/raw.py | 1 +
src/eko/io/runcards.py | 56 +-
src/eko/io/struct.py | 21 +-
src/eko/io/types.py | 9 +-
src/eko/kernels/__init__.py | 34 +
src/eko/kernels/as4_evolution_integrals.py | 4 +-
src/eko/kernels/non_singlet.py | 32 +-
src/eko/kernels/non_singlet_qed.py | 8 +-
src/eko/kernels/singlet.py | 26 +-
src/eko/kernels/singlet_qed.py | 8 +-
src/eko/kernels/utils.py | 26 -
src/eko/kernels/valence_qed.py | 8 +-
src/eko/matchings.py | 1 +
src/eko/mellin.py | 7 +-
src/eko/member.py | 1 +
src/eko/msbar_masses.py | 7 +-
src/eko/quantities/couplings.py | 15 +-
src/eko/quantities/heavy_quarks.py | 19 +-
src/eko/runner/__init__.py | 5 +-
src/eko/runner/commons.py | 3 +-
src/eko/runner/legacy.py | 11 +-
src/eko/runner/managed.py | 3 +-
src/eko/runner/operators.py | 5 +-
src/eko/runner/parts.py | 55 +-
src/eko/runner/recipes.py | 1 +
src/eko/scale_variations/__init__.py | 19 +-
src/eko/scale_variations/expanded.py | 8 +-
src/eko/scale_variations/exponentiated.py | 1 +
src/ekobox/apply.py | 65 +-
src/ekobox/cards.py | 10 +-
src/ekobox/cli/__init__.py | 1 +
src/ekobox/cli/base.py | 1 +
src/ekobox/cli/convert.py | 1 +
src/ekobox/cli/inspect.py | 3 +-
src/ekobox/cli/library.py | 1 +
src/ekobox/cli/log.py | 1 +
src/ekobox/cli/run.py | 15 +-
src/ekobox/cli/runcards.py | 7 +-
src/ekobox/evol_pdf.py | 36 +-
src/ekobox/genpdf/__init__.py | 13 +-
src/ekobox/genpdf/export.py | 1 +
src/ekobox/genpdf/flavors.py | 1 +
src/ekobox/genpdf/load.py | 1 +
src/ekobox/info_file.py | 77 +-
src/ekobox/mock.py | 1 +
src/ekobox/utils.py | 16 +-
src/ekomark/__init__.py | 1 +
src/ekomark/benchmark/external/LHA_utils.py | 1 +
src/ekomark/benchmark/external/apfel_utils.py | 1 +
.../benchmark/external/lhapdf_utils.py | 1 +
.../benchmark/external/pegasus_utils.py | 1 +
src/ekomark/benchmark/runner.py | 14 +-
src/ekomark/data/__init__.py | 29 +
src/ekomark/data/operators.py | 3 +-
src/ekomark/navigator/__init__.py | 1 +
src/ekomark/navigator/glob.py | 1 +
src/ekomark/navigator/navigator.py | 1 +
src/ekomark/plots.py | 50 +-
.../polarized/space_like/__init__.py | 1 +
.../polarized/space_like/as2.py | 16 +-
.../polarized/space_like/as3.py | 10 +-
.../unpolarized/space_like/__init__.py | 2 +
.../unpolarized/space_like/as2.py | 2 +-
.../unpolarized/space_like/as3.py | 1 +
.../unpolarized/space_like/as4/__init__.py | 1 +
.../space_like/as4/fhmruvv/__init__.py | 1 +
.../unpolarized/space_like/as4/fhmruvv/ggg.py | 1 +
.../unpolarized/space_like/as4/fhmruvv/ggq.py | 132 +-
.../space_like/as4/fhmruvv/gnsm.py | 1 +
.../space_like/as4/fhmruvv/gnsp.py | 1 +
.../space_like/as4/fhmruvv/gnsv.py | 1 +
.../unpolarized/space_like/as4/fhmruvv/gps.py | 1 +
.../unpolarized/space_like/as4/fhmruvv/gqg.py | 1 +
.../unpolarized/space_like/as4/ggq.py | 89 +-
.../unpolarized/space_like/as4/gnsm.py | 1 +
.../unpolarized/space_like/as4/gnsp.py | 1 +
.../unpolarized/space_like/as4/gnsv.py | 1 +
.../unpolarized/time_like/__init__.py | 2 +
.../unpolarized/time_like/as3.py | 2 +-
src/ekore/harmonics/__init__.py | 1 +
src/ekore/harmonics/cache.py | 1 +
src/ekore/harmonics/g_functions.py | 1 +
src/ekore/harmonics/log_functions.py | 1 +
src/ekore/harmonics/w3.py | 3 +-
src/ekore/harmonics/w4.py | 3 +-
src/ekore/harmonics/w5.py | 3 +-
.../polarized/space_like/as1.py | 1 +
.../polarized/space_like/as2.py | 36 +-
.../unpolarized/space_like/__init__.py | 4 +-
.../unpolarized/space_like/as1.py | 12 +-
.../unpolarized/space_like/as2.py | 37 +-
.../unpolarized/space_like/as3/__init__.py | 16 +-
.../unpolarized/space_like/as3/aHg.py | 5 +-
.../unpolarized/space_like/as3/aHg_param.py | 1178 ++----
.../unpolarized/space_like/as3/aHq.py | 3 +-
.../unpolarized/space_like/as3/agg.py | 99 +-
.../unpolarized/space_like/as3/agq.py | 3 +-
.../unpolarized/space_like/as3/aqg.py | 3 +-
.../unpolarized/space_like/as3/aqqNS.py | 5 +-
.../unpolarized/space_like/as3/aqqPS.py | 3 +-
tests/eko/evolution_operator/test_grid.py | 2 +-
tests/eko/evolution_operator/test_init.py | 53 +-
.../eko/evolution_operator/test_init.py.patch | 601 ---
.../test_matching_condition.py | 91 +-
tests/eko/evolution_operator/test_ome.py | 20 +-
tests/eko/evolution_operator/test_physical.py | 36 +-
tests/eko/io/test_bases.py | 83 -
tests/eko/io/test_manipulate.py | 274 +-
tests/eko/io/test_metadata.py | 6 +-
tests/eko/io/test_runcards.py | 90 -
tests/eko/kernels/test_init.py | 21 +
tests/eko/kernels/test_kernels_QEDns.py | 7 +-
tests/eko/kernels/test_kernels_QEDsinglet.py | 12 +-
tests/eko/kernels/test_kernels_QEDvalence.py | 16 +-
tests/eko/kernels/test_ns.py | 37 +-
tests/eko/kernels/test_s.py | 44 +-
tests/eko/kernels/test_utils.py | 12 -
tests/eko/quantities/test_couplings.py | 4 +-
tests/eko/runner/__init__.py | 10 +-
tests/eko/runner/conftest.py | 3 +-
tests/eko/runner/test_legacy.py | 2 +-
tests/eko/runner/test_operators.py | 6 +-
tests/eko/scale_variations/test_diff.py | 182 +
tests/eko/scale_variations/test_expanded.py | 206 +-
tests/eko/test_beta.py | 1 +
tests/eko/test_couplings.py | 39 +-
tests/eko/test_gamma.py | 1 +
tests/eko/test_matchings.py | 1 +
tests/eko/test_msbar_masses.py | 4 +-
tests/eko/test_quantities.py | 3 -
tests/ekobox/test_apply.py | 11 +-
tests/ekobox/test_cards.py | 8 +-
tests/ekobox/test_evol_pdf.py | 18 +-
tests/ekobox/test_info_file.py | 26 +-
tests/ekobox/test_utils.py | 7 +-
tests/ekomark/data/__init__.py | 0
tests/ekomark/data/test_init.py | 96 +
.../polarized/space_like/test_ad_as2.py | 8 +-
.../unpolarized/space_like/test_as4.py | 4 +-
.../unpolarized/space_like/test_as4_fhmv.py | 4 +-
.../polarized/space_like/test_nnlo.py | 20 +-
.../unpolarized/space_like/test_as3.py | 29 +-
256 files changed, 6317 insertions(+), 5568 deletions(-)
create mode 100644 .github/workflows/crates.yml
create mode 100644 .github/workflows/maturin.yml
create mode 100644 .github/workflows/unittests-rust.yml
rename crates/Cargo.lock => Cargo.lock (75%)
create mode 100644 Cargo.toml
rename benchmarks/ekore/{benchmark_ad.py => benchmark_pegasus_ad_us_as2.py} (87%)
create mode 100644 benchmarks/ekore/benchmark_pegasus_mellin.py
create mode 100644 benchmarks/ekore/benchmark_pegasus_ome_ps_as2.py
delete mode 100644 crates/Cargo.toml
create mode 100644 crates/README.md
create mode 100644 crates/bump-versions.py
create mode 100644 crates/doc-header.html
create mode 120000 crates/eko/doc-header.html
create mode 120000 crates/eko/src/bib.rs
create mode 120000 crates/ekore/doc-header.html
create mode 100644 crates/ekore/src/anomalous_dimensions/unpolarized/spacelike/as2.rs
create mode 100644 crates/ekore/src/anomalous_dimensions/unpolarized/spacelike/as3.rs
create mode 100644 crates/ekore/src/harmonics/g_functions.rs
create mode 100644 crates/ekore/src/harmonics/w2.rs
create mode 100644 crates/ekore/src/harmonics/w3.rs
create mode 100644 crates/ekore/src/harmonics/w4.rs
create mode 100644 crates/parse-abbrev.py
create mode 100644 crates/release.json
create mode 100644 extras/lh_bench_23/parse_to_latex.py
delete mode 100644 extras/lh_bench_23/plot_bench.py
create mode 100644 extras/lh_bench_23/plot_trn_exa.py
create mode 100644 extras/lh_bench_23/run-trn_exa.py
rename extras/lh_bench_23/{run_fhmv.sh => run_fhmruvv.sh} (76%)
create mode 100644 extras/lh_bench_23/tables/EKO.zip
create mode 100644 extras/lh_bench_23/tables/MSHT.zip
create mode 100644 extras/lh_bench_23/tables/table14-part1.csv
create mode 100644 extras/lh_bench_23/tables/table15-part1.csv
create mode 100644 extras/lh_bench_23/tables/table_FFNS-1_iterate-exact.csv
create mode 100644 extras/lh_bench_23/tables/table_FFNS-1_truncated.csv
create mode 100644 extras/lh_bench_23/tables/table_FFNS-2_iterate-exact.csv
create mode 100644 extras/lh_bench_23/tables/table_FFNS-2_truncated.csv
create mode 100644 extras/lh_bench_23/tables/table_FFNS-3_iterate-exact.csv
create mode 100644 extras/lh_bench_23/tables/table_FFNS-3_truncated.csv
delete mode 100644 src/eko/io/bases.py
delete mode 100644 src/eko/kernels/utils.py
delete mode 100644 tests/eko/evolution_operator/test_init.py.patch
delete mode 100644 tests/eko/io/test_bases.py
create mode 100644 tests/eko/kernels/test_init.py
delete mode 100644 tests/eko/kernels/test_utils.py
create mode 100644 tests/eko/scale_variations/test_diff.py
create mode 100644 tests/ekomark/data/__init__.py
create mode 100644 tests/ekomark/data/test_init.py
diff --git a/.github/workflows/crates.yml b/.github/workflows/crates.yml
new file mode 100644
index 000000000..935ce2aba
--- /dev/null
+++ b/.github/workflows/crates.yml
@@ -0,0 +1,28 @@
+name: Deploy Crates
+
+on:
+ push:
+ tags:
+ - "*"
+ workflow_dispatch:
+
+jobs:
+ deploy:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ - name: Install and configure Poetry
+ uses: snok/install-poetry@v1
+ - name: Install task runner
+ run: pip install poethepoet
+ - name: Bump versions
+ run: |
+ poetry install --only version
+ poe bump-version
+ - name: Publish crates
+ run: |
+ jq '.[]' crates/release.json | xargs -I _ cargo publish -p _ --allow-dirty
+ env:
+ CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
diff --git a/.github/workflows/maturin.yml b/.github/workflows/maturin.yml
new file mode 100644
index 000000000..4680d0316
--- /dev/null
+++ b/.github/workflows/maturin.yml
@@ -0,0 +1,123 @@
+name: Deploy Maturin wheels
+
+on:
+ push:
+ tags:
+ - "*"
+ workflow_dispatch:
+
+permissions:
+ contents: read
+
+jobs:
+ linux:
+ runs-on: ${{ matrix.platform.runner }}
+ strategy:
+ matrix:
+ platform:
+ - runner: ubuntu-latest
+ target: x86_64
+ - runner: ubuntu-latest
+ target: x86
+ - runner: ubuntu-latest
+ target: aarch64
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.10"
+ - name: Build wheels
+ uses: PyO3/maturin-action@v1
+ with:
+ target: ${{ matrix.platform.target }}
+ args: --release --out dist --find-interpreter -m crates/eko/Cargo.toml
+ sccache: "true"
+ manylinux: auto
+ - name: Upload wheels
+ uses: actions/upload-artifact@v4
+ with:
+ name: wheels-linux-${{ matrix.platform.target }}
+ path: dist
+
+ windows:
+ runs-on: ${{ matrix.platform.runner }}
+ strategy:
+ matrix:
+ platform:
+ - runner: windows-latest
+ target: x64
+ - runner: windows-latest
+ target: x86
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.10"
+ architecture: ${{ matrix.platform.target }}
+ - name: Build wheels
+ uses: PyO3/maturin-action@v1
+ with:
+ target: ${{ matrix.platform.target }}
+ args: --release --out dist --find-interpreter -m crates/eko/Cargo.toml
+ sccache: "true"
+ - name: Upload wheels
+ uses: actions/upload-artifact@v4
+ with:
+ name: wheels-windows-${{ matrix.platform.target }}
+ path: dist
+
+ macos:
+ runs-on: ${{ matrix.platform.runner }}
+ strategy:
+ matrix:
+ platform:
+ - runner: macos-latest
+ target: x86_64
+ - runner: macos-14
+ target: aarch64
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.10"
+ - name: Build wheels
+ uses: PyO3/maturin-action@v1
+ with:
+ target: ${{ matrix.platform.target }}
+ args: --release --out dist --find-interpreter -m crates/eko/Cargo.toml
+ sccache: "true"
+ - name: Upload wheels
+ uses: actions/upload-artifact@v4
+ with:
+ name: wheels-macos-${{ matrix.platform.target }}
+ path: dist
+
+ sdist:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Build sdist
+ uses: PyO3/maturin-action@v1
+ with:
+ command: sdist
+ args: --out dist -m crates/eko/Cargo.toml
+ - name: Upload sdist
+ uses: actions/upload-artifact@v4
+ with:
+ name: wheels-sdist
+ path: dist
+
+ release:
+ name: Release
+ runs-on: ubuntu-latest
+ if: "startsWith(github.ref, 'refs/tags/')"
+ needs: [linux, windows, macos, sdist]
+ steps:
+ - uses: actions/download-artifact@v4
+ - name: Publish to PyPI
+ uses: PyO3/maturin-action@v1
+ env:
+ MATURIN_PYPI_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
+ with:
+ command: upload
+ args: --non-interactive --skip-existing wheels-*/*
diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml
index 46040a1ae..45f89248b 100644
--- a/.github/workflows/pypi.yml
+++ b/.github/workflows/pypi.yml
@@ -7,7 +7,7 @@ on:
jobs:
publish:
- uses: N3PDF/workflows/.github/workflows/python-poetry-pypi.yml@v2
+ uses: NNPDF/workflows/.github/workflows/python-poetry-pypi.yml@v2
secrets:
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
with:
diff --git a/.github/workflows/unittests-rust.yml b/.github/workflows/unittests-rust.yml
new file mode 100644
index 000000000..2b05cb4d6
--- /dev/null
+++ b/.github/workflows/unittests-rust.yml
@@ -0,0 +1,21 @@
+name: Rust unit tests
+
+on: push
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ - name: Install task runner
+ run: pip install poethepoet
+ - name: Run fmt
+ run: |
+ poe fmtcheck
+ - name: Run clippy
+ run: |
+ poe clippy
+ - name: Run Rust unit tests
+ run: |
+ poe rtest
diff --git a/.github/workflows/unittests.yml b/.github/workflows/unittests.yml
index ef3f7fa3f..a21b8531e 100644
--- a/.github/workflows/unittests.yml
+++ b/.github/workflows/unittests.yml
@@ -1,4 +1,4 @@
-name: tests
+name: Python unit tests
on: push
@@ -6,10 +6,10 @@ jobs:
test:
strategy:
matrix:
- python-version: ["3.8", "3.9", "3.10", "3.11"]
+ python-version: ["3.9", "3.10", "3.11", "3.12"]
fail-fast: false
- uses: N3PDF/workflows/.github/workflows/python-poetry-tests.yml@v2
+ uses: NNPDF/workflows/.github/workflows/python-poetry-tests.yml@v2
with:
python-version: ${{ matrix.python-version }}
poetry-extras: "-E mark -E box"
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index bee3e7adc..dfd0862f5 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -2,10 +2,10 @@
# See https://pre-commit.com/hooks.html for more hooks
ci:
autofix_prs: false
- skip: [fmt-eko, fmt-ekore]
+ skip: [fmt] # will be run by a separate CI
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.5.0
+ rev: v4.6.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
@@ -19,11 +19,11 @@ repos:
- id: pycln
args: [--config=pyproject.toml]
- repo: https://github.com/psf/black-pre-commit-mirror
- rev: 23.12.1
+ rev: 24.4.2
hooks:
- id: black
- repo: https://github.com/asottile/blacken-docs
- rev: 1.16.0
+ rev: 1.18.0
hooks:
- id: blacken-docs
- repo: https://github.com/pycqa/isort
@@ -32,7 +32,7 @@ repos:
- id: isort
args: ["--profile", "black"]
- repo: https://github.com/asottile/pyupgrade
- rev: v3.15.0
+ rev: v3.17.0
hooks:
- id: pyupgrade
- repo: https://github.com/pycqa/pydocstyle
@@ -43,25 +43,23 @@ repos:
args: ["--add-ignore=D107,D105"]
additional_dependencies:
- toml
- - repo: local
+ - repo: https://github.com/pre-commit/mirrors-mypy
+ rev: v1.4.1
hooks:
- - id: fmt-eko
- name: fmt-eko
- description: Format eko files with cargo fmt.
- entry: cargo fmt --manifest-path crates/eko/Cargo.toml --
- language: system
- files: ^crates/eko/.*\.rs$
- args: []
+ - id: mypy
+ additional_dependencies: [types-PyYAML]
+ pass_filenames: false
+ args: ["--ignore-missing-imports", "src/"]
- repo: local
hooks:
- - id: fmt-ekore
- name: fmt-ekore
- description: Format ekore files with cargo fmt.
- entry: cargo fmt --manifest-path crates/ekore/Cargo.toml --
+ - id: fmt
+ name: fmt
+ description: Format Rust files with cargo fmt.
+ entry: cargo fmt --
language: system
- files: ^crates/ekore/.*\.rs$
+ files: ^crates/.*\.rs$
args: []
- repo: https://github.com/pre-commit/pre-commit
- rev: v3.6.0
+ rev: v3.8.0
hooks:
- id: validate_manifest
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
index 7b5c39c8d..e734d32c8 100644
--- a/.readthedocs.yaml
+++ b/.readthedocs.yaml
@@ -1,3 +1,7 @@
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+# for poetry see https://docs.readthedocs.io/en/stable/build-customization.html#install-dependencies-with-poetry
+
+# Required
version: 2
build:
@@ -9,17 +13,13 @@ build:
jobs:
post_create_environment:
- pip install poetry
- - poetry config virtualenvs.create false
post_install:
- - poetry install --with docs
+ - VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install --with docs
+# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: doc/source/conf.py
-# Optionally build your docs in additional formats such as PDF
-# formats:
-# - pdf
-
python:
install:
- method: pip
diff --git a/crates/Cargo.lock b/Cargo.lock
similarity index 75%
rename from crates/Cargo.lock
rename to Cargo.lock
index b6d5d084d..80fa02400 100644
--- a/crates/Cargo.lock
+++ b/Cargo.lock
@@ -33,20 +33,18 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "eko"
-version = "0.1.0"
+version = "0.0.1"
dependencies = [
"ekore",
- "katexit",
"num",
]
[[package]]
name = "ekore"
-version = "0.1.0"
+version = "0.0.1"
dependencies = [
"float-cmp",
"hashbrown",
- "katexit",
"num",
]
@@ -69,17 +67,6 @@ dependencies = [
"allocator-api2",
]
-[[package]]
-name = "katexit"
-version = "0.1.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb1304c448ce2c207c2298a34bc476ce7ae47f63c23fa2b498583b26be9bc88c"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
[[package]]
name = "num"
version = "0.4.1"
@@ -162,41 +149,6 @@ version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
-[[package]]
-name = "proc-macro2"
-version = "1.0.66"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
-dependencies = [
- "unicode-ident",
-]
-
-[[package]]
-name = "quote"
-version = "1.0.32"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965"
-dependencies = [
- "proc-macro2",
-]
-
-[[package]]
-name = "syn"
-version = "1.0.109"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-ident",
-]
-
-[[package]]
-name = "unicode-ident"
-version = "1.0.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c"
-
[[package]]
name = "version_check"
version = "0.9.4"
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 000000000..f26518890
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,22 @@
+[workspace]
+members = ["crates/*"]
+resolver = "2"
+
+[workspace.package]
+authors = [
+ "A. Barontini ",
+ "A. Candido ",
+ "F. Hekhorn ",
+ "N. Laurenti ",
+ "G. Magni ",
+ "T. Sharma ",
+]
+description = "Evolution Kernel Operators"
+readme = "README.md"
+categories = ["science"]
+edition = "2021"
+keywords = ["physics"]
+license = "GPL-3.0-or-later"
+repository = "https://github.com/NNPDF/eko"
+rust-version = "1.60.0"
+version = "0.0.1"
diff --git a/README.md b/README.md
index 8344d4cb1..aef72ca2f 100644
--- a/README.md
+++ b/README.md
@@ -3,6 +3,7 @@
+
diff --git a/benchmarks/DSSV_bench.py b/benchmarks/DSSV_bench.py
index 3df7af356..4654f67d7 100644
--- a/benchmarks/DSSV_bench.py
+++ b/benchmarks/DSSV_bench.py
@@ -3,6 +3,7 @@
Note that the PDF set is private, but can be obtained from the
authors upon request.
"""
+
from banana import register
from eko import interpolation
diff --git a/benchmarks/NNPDF_bench.py b/benchmarks/NNPDF_bench.py
index 0e939f9ea..444b452cd 100644
--- a/benchmarks/NNPDF_bench.py
+++ b/benchmarks/NNPDF_bench.py
@@ -1,6 +1,7 @@
"""
Benchmark NNPDF pdf family
"""
+
from banana import register
from eko import interpolation
diff --git a/benchmarks/apfel_bench.py b/benchmarks/apfel_bench.py
index ae3189119..d40c65894 100644
--- a/benchmarks/apfel_bench.py
+++ b/benchmarks/apfel_bench.py
@@ -1,6 +1,7 @@
"""
Benchmark EKO to Apfel
"""
+
import numpy as np
from banana import register
from banana.data import cartesian_product
@@ -20,7 +21,6 @@ def tolist(input_dict):
class ApfelBenchmark(Runner):
-
"""
Globally set the external program to Apfel
"""
diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json
index 628c6b460..eea67e10c 100644
--- a/benchmarks/asv.conf.json
+++ b/benchmarks/asv.conf.json
@@ -35,7 +35,7 @@
// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
- "pythons": ["3.8", "3.9", "3.10"],
+ "pythons": ["3.9", "3.10"],
// The matrix of dependencies to test. Each key is the name of a
// package (in PyPI) and the values are version numbers. An empty
diff --git a/benchmarks/eko/benchmark_alphaem.py b/benchmarks/eko/benchmark_alphaem.py
index 16e56ac4a..9cc05536c 100644
--- a/benchmarks/eko/benchmark_alphaem.py
+++ b/benchmarks/eko/benchmark_alphaem.py
@@ -35,9 +35,7 @@ def test_alphaQED_high(self):
dict(
alphas=0.118,
alphaem=7.7553e-03,
- scale=91.2,
- num_flavs_ref=5,
- max_num_flavs=5,
+ ref=(91.2, 5),
em_running=True,
)
)
@@ -75,9 +73,7 @@ def test_alphaQED_low(self):
dict(
alphas=0.118,
alphaem=7.7553e-03,
- scale=91.2,
- num_flavs_ref=5,
- max_num_flavs=5,
+ ref=(91.2, 5),
em_running=True,
)
)
@@ -124,9 +120,7 @@ def test_validphys(self):
dict(
alphas=0.118,
alphaem=7.7553e-03,
- scale=91.2,
- num_flavs_ref=5,
- max_num_flavs=5,
+ ref=(91.2, 5),
em_running=True,
)
)
diff --git a/benchmarks/eko/benchmark_evol_to_unity.py b/benchmarks/eko/benchmark_evol_to_unity.py
index 41bc4b4d0..c466d00d8 100644
--- a/benchmarks/eko/benchmark_evol_to_unity.py
+++ b/benchmarks/eko/benchmark_evol_to_unity.py
@@ -19,16 +19,12 @@ def update_cards(theory: TheoryCard, operator: OperatorCard):
theory.couplings = CouplingsInfo(
alphas=0.35,
alphaem=0.007496,
- scale=float(np.sqrt(2)),
- max_num_flavs=6,
- num_flavs_ref=None,
+ ref=(float(np.sqrt(2)), 4),
)
- theory.heavy.num_flavs_init = 4
- theory.heavy.intrinsic_flavors = [4, 5]
theory.heavy.masses.c.value = 1.0
theory.heavy.masses.b.value = 4.75
theory.heavy.masses.t.value = 173.0
- operator.mu0 = float(np.sqrt(2))
+ operator.init = (float(np.sqrt(2)), 4)
operator.mugrid = [(10, 5)]
operator.xgrid = XGrid(np.linspace(1e-1, 1, 30))
operator.configs.interpolation_polynomial_degree = 1
diff --git a/benchmarks/eko/benchmark_inverse_matching.py b/benchmarks/eko/benchmark_inverse_matching.py
index d9c16b0b2..d63b776f0 100644
--- a/benchmarks/eko/benchmark_inverse_matching.py
+++ b/benchmarks/eko/benchmark_inverse_matching.py
@@ -20,14 +20,9 @@
couplings=dict(
alphas=0.118,
alphaem=0.007496252,
- scale=91.2,
- num_flavs_ref=5,
- max_num_flavs=6,
+ ref=(91.2, 5),
),
heavy=dict(
- num_flavs_init=4,
- num_flavs_max_pdf=6,
- intrinsic_flavors=[],
masses=[ReferenceRunning([mq, np.nan]) for mq in (MC, 4.92, 172.5)],
masses_scheme="POLE",
matching_ratios=[1.0, 1.0, np.inf],
@@ -40,7 +35,7 @@
# operator settings
op_raw = dict(
- mu0=1.65,
+ init=(1.65, 4),
xgrid=[0.0001, 0.001, 0.01, 0.1, 1],
mugrid=[(MC, 3), (MC, 4)],
configs=dict(
diff --git a/benchmarks/eko/benchmark_msbar_evolution.py b/benchmarks/eko/benchmark_msbar_evolution.py
index 6286e15ad..49dc0e952 100644
--- a/benchmarks/eko/benchmark_msbar_evolution.py
+++ b/benchmarks/eko/benchmark_msbar_evolution.py
@@ -1,4 +1,5 @@
"""This module benchmarks MSbar mass evolution against APFEL."""
+
import numpy as np
import pytest
@@ -20,9 +21,8 @@
def update_theory(theory: TheoryCard):
theory.order = (3, 0)
- theory.couplings.scale = 91
+ theory.couplings.ref = (91, 5)
theory.couplings.alphaem = 0.007496
- theory.couplings.num_flavs_ref = 5
theory.heavy.masses_scheme = QuarkMassScheme.MSBAR
theory.heavy.masses.c = QuarkMassRef([1.5, 18])
theory.heavy.masses.b = QuarkMassRef([4.1, 20])
@@ -149,7 +149,7 @@ def benchmark_APFEL_msbar_evolution(
apfel.SetTheory("QCD")
apfel.SetPerturbativeOrder(order - 1)
apfel.SetAlphaEvolution(method)
- apfel.SetAlphaQCDRef(coupl.alphas, coupl.scale)
+ apfel.SetAlphaQCDRef(coupl.alphas, coupl.ref[0])
apfel.SetVFNS()
apfel.SetMSbarMasses(
qmasses.c.value, qmasses.b.value, qmasses.t.value
@@ -217,7 +217,7 @@ def benchmark_APFEL_msbar_solution(
apfel.SetTheory("QCD")
apfel.SetPerturbativeOrder(order - 1)
apfel.SetAlphaEvolution("exact")
- apfel.SetAlphaQCDRef(coupl.alphas, coupl.scale)
+ apfel.SetAlphaQCDRef(coupl.alphas, coupl.ref[0])
apfel.SetVFNS()
apfel.SetMSbarMasses(qmasses.c.value, qmasses.b.value, qmasses.t.value)
apfel.SetMassScaleReference(
diff --git a/benchmarks/eko/benchmark_strong_coupling.py b/benchmarks/eko/benchmark_strong_coupling.py
index d13a264bd..d60bd28b0 100644
--- a/benchmarks/eko/benchmark_strong_coupling.py
+++ b/benchmarks/eko/benchmark_strong_coupling.py
@@ -8,7 +8,7 @@
from eko.couplings import Couplings
from eko.io.runcards import TheoryCard
from eko.quantities.couplings import CouplingEvolutionMethod, CouplingsInfo
-from eko.quantities.heavy_quarks import QuarkMassScheme
+from eko.quantities.heavy_quarks import FlavorsNumber, QuarkMassScheme
# try to load LHAPDF - if not available, we'll use the cached values
try:
@@ -35,16 +35,11 @@
use_PEGASUS = False
-def ref_couplings(
- ref_values,
- ref_scale: float,
-) -> CouplingsInfo:
+def ref_couplings(ref_values, ref_scale: float, ref_nf: FlavorsNumber) -> CouplingsInfo:
return CouplingsInfo(
alphas=ref_values[0],
alphaem=ref_values[1],
- scale=ref_scale,
- max_num_flavs=6,
- num_flavs_ref=None,
+ ref=(ref_scale, ref_nf),
)
@@ -54,11 +49,11 @@ def test_a_s(self):
"""Tests the value of alpha_s (for now only at LO)
for a given set of parameters
"""
- # TODO @JCM: we need a source for this!
+ # source: JCM
known_val = 0.0091807954
ref_mu2 = 90
ask_q2 = 125
- ref = ref_couplings([0.1181, 0.007496], np.sqrt(ref_mu2))
+ ref = ref_couplings([0.1181, 0.007496], np.sqrt(ref_mu2), 5)
as_FFNS_LO = Couplings(
couplings=ref,
order=(1, 0),
@@ -80,7 +75,7 @@ def benchmark_LHA_paper(self):
# LO - FFNS
# note that the LO-FFNS value reported in :cite:`Giele:2002hx`
# was corrected in :cite:`Dittmar:2005ed`
- coupling_ref = ref_couplings([0.35, 0.007496], np.sqrt(2))
+ coupling_ref = ref_couplings([0.35, 0.007496], np.sqrt(2), 4)
as_FFNS_LO = Couplings(
couplings=coupling_ref,
order=(1, 0),
@@ -141,7 +136,7 @@ def benchmark_APFEL_ffns(self):
threshold_holder = matchings.Atlas.ffns(nf, 0.0)
for order in [1, 2, 3]:
as_FFNS = Couplings(
- couplings=ref_couplings(coupling_ref, scale_ref),
+ couplings=ref_couplings(coupling_ref, scale_ref, nf),
order=(order, 0),
method=CouplingEvolutionMethod.EXPANDED,
masses=threshold_holder.walls[1:-1],
@@ -212,8 +207,7 @@ def benchmark_pegasus_ffns(self):
}
# collect my values
threshold_holder = matchings.Atlas.ffns(nf, 0.0)
- couplings = ref_couplings(coupling_ref, scale_ref)
- couplings.max_num_flavs = 4
+ couplings = ref_couplings(coupling_ref, scale_ref, nf)
for order in [1, 2, 3, 4]:
as_FFNS = Couplings(
couplings=couplings,
@@ -259,6 +253,7 @@ def benchmark_APFEL_vfns(self):
Q2s = [1, 2**2, 3**2, 90**2, 100**2]
coupling_ref = np.array([0.118, 0.007496])
scale_ref = 91.0
+ nf_ref = 5
threshold_list = np.power([2, 4, 175], 2)
apfel_vals_dict = {
1: np.array(
@@ -292,7 +287,7 @@ def benchmark_APFEL_vfns(self):
# collect my values
for order in [1, 2, 3]:
as_VFNS = Couplings(
- couplings=ref_couplings(coupling_ref, scale_ref),
+ couplings=ref_couplings(coupling_ref, scale_ref, nf_ref),
order=(order, 0),
method=CouplingEvolutionMethod.EXPANDED,
masses=threshold_list,
@@ -339,8 +334,9 @@ def benchmark_APFEL_vfns_fact_to_ren(self):
]
coupling_ref = np.array([0.118, 0.007496])
scale_ref = 91.0
+ nf_refs = (5, 6)
fact_to_ren_lin_list = [0.567, 2.34]
- threshold_list = np.power([2, 2 * 4, 2 * 92], 2)
+ masses_list = np.power([2, 2 * 4, 2 * 92], 2)
apfel_vals_dict_list = [
{
1: np.array(
@@ -431,16 +427,16 @@ def benchmark_APFEL_vfns_fact_to_ren(self):
),
},
]
- for fact_to_ren_lin, apfel_vals_dict in zip(
- fact_to_ren_lin_list, apfel_vals_dict_list
+ for fact_to_ren_lin, apfel_vals_dict, nf_ref in zip(
+ fact_to_ren_lin_list, apfel_vals_dict_list, nf_refs
):
# collect my values
for order in [1, 2, 3]:
as_VFNS = Couplings(
- couplings=ref_couplings(coupling_ref, scale_ref),
+ couplings=ref_couplings(coupling_ref, scale_ref, nf_ref),
order=(order, 0),
method=CouplingEvolutionMethod.EXACT,
- masses=threshold_list.tolist(),
+ masses=masses_list.tolist(),
hqm_scheme=QuarkMassScheme.POLE,
thresholds_ratios=np.array([1.0, 1.0, 1.0]) / fact_to_ren_lin**2,
)
@@ -457,7 +453,7 @@ def benchmark_APFEL_vfns_fact_to_ren(self):
apfel.SetAlphaEvolution("exact")
apfel.SetAlphaQCDRef(coupling_ref[0], scale_ref)
apfel.SetVFNS()
- apfel.SetPoleMasses(*np.sqrt(threshold_list))
+ apfel.SetPoleMasses(*np.sqrt(masses_list))
apfel.SetRenFacRatio(1.0 / fact_to_ren_lin)
apfel.InitializeAPFEL()
# collect a_s
@@ -468,12 +464,18 @@ def benchmark_APFEL_vfns_fact_to_ren(self):
)
np.testing.assert_allclose(apfel_vals, np.array(apfel_vals_cur))
# check myself to APFEL
- np.testing.assert_allclose(apfel_vals, np.array(my_vals), rtol=2.5e-5)
+ np.testing.assert_allclose(
+ apfel_vals,
+ np.array(my_vals),
+ rtol=2.5e-5,
+ err_msg=f"{order=},{fact_to_ren_lin=}",
+ )
def benchmark_APFEL_vfns_threshold(self):
Q2s = np.power([30, 96, 150], 2)
coupling_ref = np.array([0.118, 0.007496])
scale_ref = 91.0
+ nf_ref = 4
threshold_list = np.power([30, 95, 240], 2)
thresholds_ratios = [2.34**2, 1.0**2, 0.5**2]
apfel_vals_dict = {
@@ -487,7 +489,7 @@ def benchmark_APFEL_vfns_threshold(self):
# collect my values
for order in [2, 3]:
as_VFNS = Couplings(
- couplings=ref_couplings(coupling_ref, scale_ref),
+ couplings=ref_couplings(coupling_ref, scale_ref, nf_ref),
order=(order, 0),
method=CouplingEvolutionMethod.EXPANDED,
masses=threshold_list.tolist(),
@@ -496,7 +498,6 @@ def benchmark_APFEL_vfns_threshold(self):
)
my_vals = []
for Q2 in Q2s:
- print(Q2)
my_vals.append(as_VFNS.a(Q2)[0])
# get APFEL numbers - if available else use cache
apfel_vals = apfel_vals_dict[order]
@@ -516,7 +517,6 @@ def benchmark_APFEL_vfns_threshold(self):
apfel_vals_cur = []
for Q2 in Q2s:
apfel_vals_cur.append(apfel.AlphaQCD(np.sqrt(Q2)) / (4.0 * np.pi))
- print(apfel_vals_cur)
np.testing.assert_allclose(apfel_vals, np.array(apfel_vals_cur))
# check myself to APFEL
np.testing.assert_allclose(apfel_vals, np.array(my_vals))
@@ -525,6 +525,7 @@ def benchmark_APFEL_vfns_msbar(self):
Q2s = np.power([3, 96, 150], 2)
coupling_ref = np.array([0.118, 0.007496])
scale_ref = 91.0
+ nf_ref = 5
Q2m = np.power([2.0, 4.0, 175], 2)
m2s = np.power((1.4, 4.0, 175), 2)
apfel_vals_dict = {
@@ -538,7 +539,7 @@ def benchmark_APFEL_vfns_msbar(self):
# collect my values
for order in [2, 3]:
as_VFNS = Couplings(
- couplings=ref_couplings(coupling_ref, scale_ref),
+ couplings=ref_couplings(coupling_ref, scale_ref, nf_ref),
order=(order, 0),
method=CouplingEvolutionMethod.EXPANDED,
masses=m2s.tolist(),
@@ -586,7 +587,7 @@ def benchmark_lhapdf_ffns_lo(self):
# collect my values
threshold_holder = matchings.Atlas.ffns(nf, 0.0)
as_FFNS_LO = Couplings(
- couplings=ref_couplings(coupling_ref, scale_ref),
+ couplings=ref_couplings(coupling_ref, scale_ref, nf),
order=(1, 0),
method=CouplingEvolutionMethod.EXACT,
masses=threshold_holder.walls[1:-1],
@@ -629,8 +630,9 @@ def benchmark_apfel_exact(self):
Q2s = [1e1, 1e2, 1e3, 1e4]
coupling_ref = np.array([0.118, 0.007496])
scale_ref = 90
+ nf = 3
# collect my values
- threshold_holder = matchings.Atlas.ffns(3, 0.0)
+ threshold_holder = matchings.Atlas.ffns(nf, 0.0)
# LHAPDF cache
apfel_vals_dict = {
1: np.array(
@@ -660,7 +662,7 @@ def benchmark_apfel_exact(self):
}
for order in range(1, 3 + 1):
sc = Couplings(
- couplings=ref_couplings(coupling_ref, scale_ref),
+ couplings=ref_couplings(coupling_ref, scale_ref, nf),
order=(order, 0),
method=CouplingEvolutionMethod.EXACT,
masses=threshold_holder.walls[1:-1],
@@ -696,8 +698,9 @@ def benchmark_lhapdf_exact(self):
Q2s = [1e1, 1e2, 1e3, 1e4]
coupling_ref = np.array([0.118, 0.007496])
scale_ref = 90
+ nf = 3
# collect my values
- threshold_holder = matchings.Atlas.ffns(3, 0.0)
+ threshold_holder = matchings.Atlas.ffns(nf, 0.0)
# LHAPDF cache
lhapdf_vals_dict = {
1: np.array(
@@ -735,7 +738,7 @@ def benchmark_lhapdf_exact(self):
}
for order in range(1, 4 + 1):
sc = Couplings(
- couplings=ref_couplings(coupling_ref, scale_ref),
+ couplings=ref_couplings(coupling_ref, scale_ref, nf),
order=(order, 0),
method=CouplingEvolutionMethod.EXACT,
masses=threshold_holder.walls[1:-1],
@@ -766,6 +769,7 @@ def benchmark_lhapdf_zmvfns_lo(self):
Q2s = [1, 1e1, 1e2, 1e3, 1e4]
coupling_ref = np.array([0.118, 0.007496])
scale_ref = 900
+ nf_ref = 5
m2c = 2.0
m2b = 25.0
m2t = 1500.0
@@ -785,7 +789,7 @@ def benchmark_lhapdf_zmvfns_lo(self):
# collect my values
as_VFNS_LO = Couplings(
- couplings=ref_couplings(coupling_ref, np.sqrt(scale_ref)),
+ couplings=ref_couplings(coupling_ref, np.sqrt(scale_ref), nf_ref),
order=(1, 0),
method=CouplingEvolutionMethod.EXACT,
masses=threshold_list,
@@ -836,10 +840,8 @@ def benchmark_APFEL_fact_to_ren_lha_settings(self, theory_card: TheoryCard):
theory = theory_card
theory.order = (3, 0)
theory.couplings.alphas = 0.35
- theory.couplings.scale = float(np.sqrt(2))
theory.couplings.alphaem = 0.007496
- theory.couplings.num_flavs_ref = 4
- theory.heavy.num_flavs_init = 3
+ theory.couplings.ref = (float(np.sqrt(2)), 4)
theory.xif = np.sqrt(1.0 / 2.0)
theory.heavy.masses.c.value = np.sqrt(2.0)
theory.heavy.masses.b.value = 4.5
@@ -881,7 +883,7 @@ def benchmark_APFEL_fact_to_ren_lha_settings(self, theory_card: TheoryCard):
apfel.SetTheory("QCD")
apfel.SetPerturbativeOrder(theory.order[0] - 1)
apfel.SetAlphaEvolution("exact")
- apfel.SetAlphaQCDRef(theory.couplings.alphas, theory.couplings.scale)
+ apfel.SetAlphaQCDRef(theory.couplings.alphas, theory.couplings.ref[0])
apfel.SetVFNS()
apfel.SetPoleMasses(qmasses.c.value, qmasses.b.value, qmasses.t.value)
apfel.SetMassMatchingScales(
diff --git a/benchmarks/ekobox/benchmark_evol_pdf.py b/benchmarks/ekobox/benchmark_evol_pdf.py
index 3a9dc0f4a..a0ddcaac4 100644
--- a/benchmarks/ekobox/benchmark_evol_pdf.py
+++ b/benchmarks/ekobox/benchmark_evol_pdf.py
@@ -21,15 +21,13 @@ def benchmark_evolve_single_member(
theory = theory_card
theory.order = (1, 0)
theory.couplings.alphas = 0.118000
- theory.couplings.scale = 91.1876
+ theory.couplings.ref = (91.1876, 5)
theory.couplings.alphaem = 0.007496
- theory.couplings.max_num_flavs = 3
- theory.heavy.num_flavs_max_pdf = 3
theory.heavy.masses.c.value = 1.3
theory.heavy.masses.b.value = 4.75
theory.heavy.masses.t.value = 172
op = operator_card
- op.mu0 = 5.0
+ op.init = (5.0, 4)
op.mugrid = mugrid
# lhapdf import (maybe i have to dump with a x*), do plots)
with lhapdf_path(test_pdf):
@@ -49,7 +47,7 @@ def benchmark_evolve_single_member(
ev_pdf = lhapdf.mkPDF("EvPDF", 0)
assert info["XMin"] == op.xgrid.raw[0]
assert info["SetDesc"] == "MyEvolvedPDF"
- assert info["MZ"] == theory.couplings.scale
+ assert info["MZ"] == theory.couplings.ref[0]
assert info["Debug"] == "Debug"
xgrid = op.xgrid.raw
for idx, mu2 in enumerate(op.mu2grid):
@@ -76,7 +74,7 @@ def benchmark_evolve_more_members(
theory = theory_card
theory.order = (1, 0)
op = operator_card
- op.mu0 = 1.0
+ op.init = (1.0, 3)
op.mugrid = [(10.0, 5), (100.0, 5)]
op.xgrid = XGrid([1e-7, 0.01, 0.1, 0.2, 0.3])
with lhapdf_path(test_pdf):
diff --git a/benchmarks/ekore/benchmark_ad.py b/benchmarks/ekore/benchmark_pegasus_ad_us_as2.py
similarity index 87%
rename from benchmarks/ekore/benchmark_ad.py
rename to benchmarks/ekore/benchmark_pegasus_ad_us_as2.py
index c3c8de01e..ebc53e3ae 100644
--- a/benchmarks/ekore/benchmark_ad.py
+++ b/benchmarks/ekore/benchmark_pegasus_ad_us_as2.py
@@ -1,4 +1,8 @@
-"""Benchmark the NLO anomalous dimensions against PEGASUS"""
+"""Benchmark the unpolarized NLO anomalous dimensions against PEGASUS.
+
+Recall that we obtained our representation not from PEGASUS, but derived it
+ourselves (see comment there)."""
+
import numpy as np
import pytest
@@ -7,39 +11,6 @@
from eko.constants import CA, CF, TR, zeta2, zeta3
-@pytest.mark.isolated
-def benchmark_melling_g3_pegasus():
- for N in [1, 2, 3, 4, 1 + 1j, 1 - 1j, 2 + 1j, 3 + 1j]:
- check_melling_g3_pegasus(N)
-
-
-def check_melling_g3_pegasus(N):
- S1 = h.S1(N)
- N1 = N + 1.0
- N2 = N + 2.0
- N3 = N + 3.0
- N4 = N + 4.0
- N5 = N + 5.0
- N6 = N + 6.0
- S11 = S1 + 1.0 / N1
- S12 = S11 + 1.0 / N2
- S13 = S12 + 1.0 / N3
- S14 = S13 + 1.0 / N4
- S15 = S14 + 1.0 / N5
- S16 = S15 + 1.0 / N6
-
- SPMOM = (
- 1.0000 * (zeta2 - S1 / N) / N
- - 0.9992 * (zeta2 - S11 / N1) / N1
- + 0.9851 * (zeta2 - S12 / N2) / N2
- - 0.9005 * (zeta2 - S13 / N3) / N3
- + 0.6621 * (zeta2 - S14 / N4) / N4
- - 0.3174 * (zeta2 - S15 / N5) / N5
- + 0.0699 * (zeta2 - S16 / N6) / N6
- )
- np.testing.assert_allclose(h.g_functions.mellin_g3(N, S1), SPMOM)
-
-
@pytest.mark.isolated
def benchmark_gamma_ns_1_pegasus():
# remember that singlet has pole at N=1
@@ -53,7 +24,6 @@ def check_gamma_1_pegasus(N, NF):
ZETA2 = zeta2
ZETA3 = zeta3
- # N = np.random.rand(1) + np.random.rand(1) * 1j
S1 = h.S1(N)
S2 = h.S2(N)
diff --git a/benchmarks/ekore/benchmark_pegasus_mellin.py b/benchmarks/ekore/benchmark_pegasus_mellin.py
new file mode 100644
index 000000000..addefe421
--- /dev/null
+++ b/benchmarks/ekore/benchmark_pegasus_mellin.py
@@ -0,0 +1,41 @@
+"""Benchmark the Mellin transforms against PEGASUS."""
+
+import numpy as np
+import pytest
+
+import ekore.anomalous_dimensions.unpolarized.space_like.as2 as ad_as2
+import ekore.harmonics as h
+from eko.constants import CA, CF, TR, zeta2, zeta3
+
+
+@pytest.mark.isolated
+def benchmark_melling_g3_pegasus():
+ for N in [1, 2, 3, 4, 1 + 1j, 1 - 1j, 2 + 1j, 3 + 1j]:
+ check_melling_g3_pegasus(N)
+
+
+def check_melling_g3_pegasus(N):
+ S1 = h.S1(N)
+ N1 = N + 1.0
+ N2 = N + 2.0
+ N3 = N + 3.0
+ N4 = N + 4.0
+ N5 = N + 5.0
+ N6 = N + 6.0
+ S11 = S1 + 1.0 / N1
+ S12 = S11 + 1.0 / N2
+ S13 = S12 + 1.0 / N3
+ S14 = S13 + 1.0 / N4
+ S15 = S14 + 1.0 / N5
+ S16 = S15 + 1.0 / N6
+
+ SPMOM = (
+ 1.0000 * (zeta2 - S1 / N) / N
+ - 0.9992 * (zeta2 - S11 / N1) / N1
+ + 0.9851 * (zeta2 - S12 / N2) / N2
+ - 0.9005 * (zeta2 - S13 / N3) / N3
+ + 0.6621 * (zeta2 - S14 / N4) / N4
+ - 0.3174 * (zeta2 - S15 / N5) / N5
+ + 0.0699 * (zeta2 - S16 / N6) / N6
+ )
+ np.testing.assert_allclose(h.g_functions.mellin_g3(N, S1), SPMOM)
diff --git a/benchmarks/ekore/benchmark_pegasus_ome_ps_as2.py b/benchmarks/ekore/benchmark_pegasus_ome_ps_as2.py
new file mode 100644
index 000000000..66b471c57
--- /dev/null
+++ b/benchmarks/ekore/benchmark_pegasus_ome_ps_as2.py
@@ -0,0 +1,199 @@
+"""Benchmark the polarized NNLO OME against PEGASUS"""
+
+import numpy as np
+import pytest
+
+import ekore.harmonics as h
+import ekore.operator_matrix_elements.polarized.space_like.as2 as ome_as2
+from eko.constants import CA, CF, TR, zeta2, zeta3
+
+
+@pytest.mark.isolated
+def benchmark_pegasus_ome_ps_as2():
+ # remember that singlet has pole at N=1
+ for N in [2, 3, 4, +1j, -1j, +1j, +1j]:
+ for NF in [3, 4, 5]:
+ check_pegasus_ome_ps_as2_s(N, NF)
+
+
+def check_pegasus_ome_ps_as2_s(N, NF):
+ # Test against pegasus implementation by Ignacio Borsa
+ ZETA2 = zeta2
+ ZETA3 = zeta3
+
+ S1 = h.S1(N)
+ S2 = h.S2(N)
+ S3 = h.S3(N)
+ #
+ NM = N - 1.0
+ N1 = N + 1.0
+ N2 = N + 2.0
+ NI = 1.0 / N
+ NMI = 1.0 / NM
+ N1I = 1.0 / N1
+ N2I = 1.0 / N2
+ #
+ S1M = S1 - NI
+ S2M = S2 - NI * NI
+ S3M = S3 - NI**3
+ S11 = S1 + N1I
+ S21 = S2 + N1I * N1I
+ S31 = S3 + N1I**3
+ S22 = S21 + N2I * N2I
+ ACG3 = h.g_functions.mellin_g3(N1, S11)
+ #
+ # CALL BET(N1,V1)
+ # CALL BET1(N1,V2)
+ # CALL BET2(N1,V3)
+ # CALL BET3(N1,V4)
+ V1 = (
+ h.polygamma.cern_polygamma((N1 + 1.0) / 2.0, 0)
+ - h.polygamma.cern_polygamma(N1 / 2.0, 0)
+ ) / 2.0
+ V2 = (
+ h.polygamma.cern_polygamma((N1 + 1.0) / 2.0, 1)
+ - h.polygamma.cern_polygamma(N1 / 2.0, 1)
+ ) / 4.0
+ V3 = (
+ h.polygamma.cern_polygamma((N1 + 1.0) / 2.0, 2)
+ - h.polygamma.cern_polygamma(N1 / 2.0, 2)
+ ) / 8.0
+ #
+ #
+ # ..The moments of the OME's DA_Hq^{PS,(2)} and DA_Hg^{S,(2)} given in
+ # Eqs. (138) and (111) of BBDKS. Note that for the former, an
+ # additional finite renormalization is needed to go from the Larin
+ # to the the M scheme
+ #
+ #
+ # ... Anomalous dimension terms
+ #
+ G0QG_HAT = -8 * TR * NM / N / N1
+ #
+ G0GQ = -4 * CF * N2 / N / N1
+ #
+ G0QQ = -CF * (2 * (2.0 + 3.0 * N + 3.0 * N**2) / N / N1 - 8.0 * S1)
+ #
+ # ... Polinomials in N
+ POL1 = (
+ 12 * N**8 + 52 * N**7 + 60 * N**6 - 25 * N**4 - 2 * N**3 + 3 * N**2 + 8 * N + 4
+ )
+ #
+ POL2 = (
+ 2.0 * N**8
+ + 10.0 * N**7
+ + 22.0 * N**6
+ + 36.0 * N**5
+ + 29.0 * N**4
+ + 4.0 * N**3
+ + 33.0 * N**2
+ + 12.0 * N
+ + 4
+ )
+ #
+ POLR3 = 15 * N**6 + 45 * N**5 + 374 * N**4 + 601 * N**3 + 161 * N**2 - 24 * N + 36
+ #
+ POLR8 = (
+ -15 * N**8
+ - 60 * N**7
+ - 82 * N**6
+ - 44 * N**5
+ - 15 * N**4
+ - 4 * N**2
+ - 12 * N
+ - 8
+ )
+ #
+ # ... Finite renormalization term from Larin to M scheme
+ #
+ ZQQPS = -CF * TR * 8 * N2 * (N**2 - N - 1.0) / N**3 / N1**3
+ #
+ A2HQ = (
+ -4
+ * CF
+ * TR
+ * N2
+ / N**2
+ / N1**2
+ * (NM * (2 * S2 + ZETA2) - (4 * N**3 - 4 * N**2 - 3 * N - 1) / N**2 / N1**2)
+ + ZETA2 / 8 * G0QG_HAT * G0GQ
+ )
+ #
+ #
+ A2HG = (
+ CF
+ * TR
+ * (
+ 4.0
+ / 3
+ * NM
+ / N
+ / N1
+ * (-4.0 * S3 + S1**3 + 3.0 * S1 * S2 + 6.0 * S1 * ZETA2)
+ - 4
+ * (N**4 + 17.0 * N**3 + 43.0 * N**2 + 33.0 * N + 2)
+ * S2
+ / N**2
+ / N1**2
+ / N2
+ - 4 * (3.0 * N**2 + 3.0 * N - 2) * S1**2 / N**2 / N1 / N2
+ - 2 * NM * (3.0 * N**2 + 3.0 * N + 2) * ZETA2 / N**2 / N1**2
+ - 4 * (N**3 - 2.0 * N**2 - 22.0 * N - 36) * S1 / N**2 / N1 / N2
+ - 2 * POL1 / N**4 / N1**4 / N2
+ )
+ + CA
+ * TR
+ * (
+ 4 * (N**2 + 4.0 * N + 5) * S1**2 / N / N1**2 / N2
+ + 4 * (7.0 * N**3 + 24.0 * N**2 + 15.0 * N - 16) * S2 / N**2 / N1**2 / N2
+ + 8 * NM * N2 * ZETA2 / N**2 / N1**2
+ + 4 * (N**4 + 4.0 * N**3 - N**2 - 10.0 * N + 2) * S1 / N / N1**3 / N2
+ - 4 * POL2 / N**4 / N1**4 / N2
+ - 16 * NM / N / N1**2 * V2
+ + 4
+ * NM
+ / 3.0
+ / N
+ / N1
+ * (
+ 12.0 * ACG3
+ + 3.0 * V3
+ - 8.0 * S3
+ - S1**3
+ - 9.0 * S1 * S2
+ - 12.0 * S1 * V2
+ - 12.0 * V1 * ZETA2
+ - 3.0 * ZETA3
+ )
+ )
+ # ... added simplified Gamma0_gg+2*beta0
+ + 1 / 8 * G0QG_HAT * (8 * CA * (-2.0 / N / N1 + S1) - G0QQ)
+ )
+ #
+ # ..The moments of the OME's DA_{gq,H}^{S,(2)} and DA_{gg,H}^{S,(2)}
+ # given in Eqs. (175) and (188) of Bierenblaum et al.
+ #
+ A2GQ = (
+ CF
+ * TR
+ * N2
+ * (
+ 8 * (22.0 + 41.0 * N + 28.0 * N**2) / 27.0 / N / N1**3
+ - 8 * (2.0 + 5.0 * N) * S1 / 9.0 / N / N1**2
+ + 4 * (S1**2 + S2) / 3.0 / N / N1
+ )
+ )
+ #
+ A2GG = (
+ CA
+ * TR
+ * (2 * POLR3 / 27.0 / N**3 / N1**3 - 4 * (47.0 + 56.0 * N) * S1 / 27.0 / N1)
+ + CF * TR * POLR8 / N**4 / N1**4
+ )
+
+ cache = h.cache.reset()
+ omeS2 = ome_as2.A_singlet(N, cache, 0.0, NF)
+ np.testing.assert_allclose(omeS2[0, 0], A2GG, err_msg=f"gg,{N=}")
+ np.testing.assert_allclose(omeS2[0, 1], A2GQ, err_msg=f"gq,{N=}")
+ np.testing.assert_allclose(omeS2[2, 1], A2HQ + NF * ZQQPS, err_msg=f"hq,{N=}")
+ np.testing.assert_allclose(omeS2[2, 0], A2HG, err_msg=f"hg,{N=}")
diff --git a/benchmarks/lha_paper_bench.py b/benchmarks/lha_paper_bench.py
index 9254307dc..bdc48ce59 100644
--- a/benchmarks/lha_paper_bench.py
+++ b/benchmarks/lha_paper_bench.py
@@ -1,6 +1,7 @@
"""
Benchmark to :cite:`Giele:2002hx` (LO + NLO) and :cite:`Dittmar:2005ed` (NNLO).
"""
+
import os
import numpy as np
@@ -12,19 +13,15 @@
register(__file__)
+_sqrt2 = float(np.sqrt(2.0))
+
base_theory = {
"ModEv": "EXA",
- "Q0": np.sqrt(
- 2.0
- ), # Eq. (30) :cite:`Giele:2002hx`, Eq. (4.53) :cite:`Dittmar:2005ed`
- "mc": np.sqrt(
- 2.0
- ), # Eq. (34) :cite:`Giele:2002hx`, Eq. (4.56) :cite:`Dittmar:2005ed`
+ "Q0": _sqrt2, # Eq. (30) :cite:`Giele:2002hx`, Eq. (4.53) :cite:`Dittmar:2005ed`
+ "mc": _sqrt2, # Eq. (34) :cite:`Giele:2002hx`, Eq. (4.56) :cite:`Dittmar:2005ed`
"mb": 4.5,
"mt": 175,
- "Qref": np.sqrt(
- 2.0
- ), # Eq. (32) :cite:`Giele:2002hx`,Eq. (4.53) :cite:`Dittmar:2005ed`
+ "Qref": _sqrt2, # Eq. (32) :cite:`Giele:2002hx`,Eq. (4.53) :cite:`Dittmar:2005ed`
"alphas": 0.35, # Eq. (4.55) :cite:`Dittmar:2005ed`
"alphaqed": 0.007496,
"QED": 0,
@@ -78,11 +75,11 @@ def sv_theories(self, pto):
"""
low = self.theory.copy()
low["PTO"] = pto
- low["XIF"] = np.sqrt(1.0 / 2.0)
+ low["XIF"] = 1.0 / _sqrt2
low["ModSV"] = "exponentiated"
high = self.theory.copy()
high["PTO"] = pto
- high["XIF"] = np.sqrt(2.0)
+ high["XIF"] = _sqrt2
high["ModSV"] = "exponentiated"
return [high, low]
@@ -301,7 +298,7 @@ def benchmark_sv(self, pto):
sv_theory["kcThr"] = 1.0 + 1e-15
sv_theory["nfref"] = 4
sv_theory["EScaleVar"] = 0
- low["XIR"] = np.sqrt(2.0)
- high["XIR"] = np.sqrt(0.5)
+ low["XIR"] = _sqrt2
+ high["XIR"] = 1.0 / _sqrt2
self.run_lha([low, high])
diff --git a/crates/Cargo.toml b/crates/Cargo.toml
deleted file mode 100644
index 8a69f7202..000000000
--- a/crates/Cargo.toml
+++ /dev/null
@@ -1,3 +0,0 @@
-[workspace]
-
-members = ["eko", "ekore"]
diff --git a/crates/README.md b/crates/README.md
new file mode 100644
index 000000000..baa27a087
--- /dev/null
+++ b/crates/README.md
@@ -0,0 +1,18 @@
+# Welcome to the rusty side of EKO!
+
+Here, we develop the Rust components of the EKO library
+
+## Crates
+
+- `ekore` contains the underlying collinear anomalous dimensions and the operator matrix elements
+- `eko` is the glue between the Python side and the `ekore` crate
+
+## Files
+
+- `release.json` defines the releasing order of crates
+ - only listed crates will be released
+ - dependent crates should follow those they are depending on
+- `katex-header.html` is an HTML snippet to be included in every docs page to inject
+ KaTeX support
+- `bump-versions.py` increases the Rust versions in all crates consistently
+- `make_bib.py` generates the Rust function stubs which serve as fake bibliography system
diff --git a/crates/bump-versions.py b/crates/bump-versions.py
new file mode 100644
index 000000000..6b7220f7e
--- /dev/null
+++ b/crates/bump-versions.py
@@ -0,0 +1,40 @@
+import json
+import sys
+from pathlib import Path
+
+import tomlkit
+
+HERE = Path(__file__).parent
+CRATES = json.loads((HERE / "release.json").read_text())
+
+
+def workspace(manifest, version):
+ manifest["workspace"]["package"]["version"] = version
+ return manifest
+
+
+def crate(manifest, version):
+ internals = set(manifest["dependencies"].keys()).intersection(CRATES)
+ for dep in internals:
+ manifest["dependencies"][dep]["version"] = version
+ return manifest
+
+
+def update(path, version, edit):
+ path = HERE / Path(path) / "Cargo.toml"
+ manifest = tomlkit.parse(path.read_text())
+ manifest = edit(manifest, version)
+ path.write_text(tomlkit.dumps(manifest))
+
+
+def main(version):
+ update("..", version, workspace)
+ for name in CRATES:
+ update(name, version, crate)
+
+
+if __name__ == "__main__":
+ if len(sys.argv) < 2:
+ raise ValueError(f"Pass a version (e.g. v0.0.0) to {sys.argv[0]}")
+ # `git describe` starts with a `v` which we need to remove again
+ main(sys.argv[1][1:])
diff --git a/crates/doc-header.html b/crates/doc-header.html
new file mode 100644
index 000000000..3b2f9651b
--- /dev/null
+++ b/crates/doc-header.html
@@ -0,0 +1,65 @@
+
+
+
+
diff --git a/crates/eko/Cargo.toml b/crates/eko/Cargo.toml
index 50aee6504..0fffdea1a 100644
--- a/crates/eko/Cargo.toml
+++ b/crates/eko/Cargo.toml
@@ -1,7 +1,19 @@
[package]
name = "eko"
-version = "0.1.0"
-edition = "2021"
+
+authors.workspace = true
+description.workspace = true
+readme.workspace = true
+categories.workspace = true
+edition.workspace = true
+keywords.workspace = true
+license.workspace = true
+repository.workspace = true
+rust-version.workspace = true
+version.workspace = true
+
+[package.metadata.docs.rs]
+rustdoc-args = ["--html-in-header", "doc-header.html"]
[lib]
name = "ekors"
@@ -9,5 +21,4 @@ crate-type = ["cdylib"]
[dependencies]
num = "0.4.1"
-katexit = "0.1.4"
-ekore = { version = "0.1.0", path = "../ekore" }
+ekore = { path = "../ekore", version = "0.0.1" }
diff --git a/crates/eko/doc-header.html b/crates/eko/doc-header.html
new file mode 120000
index 000000000..22282661d
--- /dev/null
+++ b/crates/eko/doc-header.html
@@ -0,0 +1 @@
+../doc-header.html
\ No newline at end of file
diff --git a/crates/eko/pyproject.toml b/crates/eko/pyproject.toml
index 2fff64504..7b2921e04 100644
--- a/crates/eko/pyproject.toml
+++ b/crates/eko/pyproject.toml
@@ -3,8 +3,8 @@ requires = ["maturin>=1.1,<2.0"]
build-backend = "maturin"
[project]
-name = "ekors"
-requires-python = ">=3.8"
+name = "eko-rs"
+requires-python = ">=3.9"
classifiers = [
"Programming Language :: Rust",
"Programming Language :: Python :: Implementation :: CPython",
diff --git a/crates/eko/src/bib.rs b/crates/eko/src/bib.rs
new file mode 120000
index 000000000..1e713280e
--- /dev/null
+++ b/crates/eko/src/bib.rs
@@ -0,0 +1 @@
+../../ekore/src/bib.rs
\ No newline at end of file
diff --git a/crates/eko/src/lib.rs b/crates/eko/src/lib.rs
index e6b3e44cc..67bf8956a 100644
--- a/crates/eko/src/lib.rs
+++ b/crates/eko/src/lib.rs
@@ -1,12 +1,15 @@
//! Interface to the eko Python package.
-use ekore;
use ekore::harmonics::cache::Cache;
use std::ffi::c_void;
-mod mellin;
+pub mod bib;
+pub mod mellin;
/// QCD intergration kernel inside quad.
+///
+/// # Safety
+/// This is the connection from Python, so we don't know what is on the other side.
#[no_mangle]
pub unsafe extern "C" fn rust_quad_ker_qcd(u: f64, rargs: *mut c_void) -> f64 {
let args = *(rargs as *mut QuadQCDargs);
@@ -23,11 +26,11 @@ pub unsafe extern "C" fn rust_quad_ker_qcd(u: f64, rargs: *mut c_void) -> f64 {
&mut c,
args.nf,
);
- for k in 0..args.order_qcd {
- for l in 0..2 {
- for m in 0..2 {
- re.push(res[k][l][m].re);
- im.push(res[k][l][m].im);
+ for gamma_s in res.iter().take(args.order_qcd) {
+ for col in gamma_s.iter().take(2) {
+ for el in col.iter().take(2) {
+ re.push(el.re);
+ im.push(el.im);
}
}
}
@@ -38,9 +41,9 @@ pub unsafe extern "C" fn rust_quad_ker_qcd(u: f64, rargs: *mut c_void) -> f64 {
&mut c,
args.nf,
);
- for j in 0..args.order_qcd {
- re.push(res[j].re);
- im.push(res[j].im);
+ for el in res.iter().take(args.order_qcd) {
+ re.push(el.re);
+ im.push(el.im);
}
}
// pass on
@@ -127,7 +130,10 @@ pub struct QuadQCDargs {
pub is_threshold: bool,
}
-/// empty placeholder function for python callback
+/// Empty placeholder function for python callback.
+///
+/// # Safety
+/// This is the connection back to Python, so we don't know what is on the other side.
pub unsafe extern "C" fn my_py(
_re_gamma: *const f64,
_im_gamma: *const f64,
@@ -161,6 +167,9 @@ pub unsafe extern "C" fn my_py(
///
/// This is required to make the arguments part of the API, otherwise it won't be added to the compiled
/// package (since it does not appear in the signature of `quad_ker_qcd`).
+///
+/// # Safety
+/// This is the connection from and back to Python, so we don't know what is on the other side.
#[no_mangle]
pub unsafe extern "C" fn empty_qcd_args() -> QuadQCDargs {
QuadQCDargs {
diff --git a/crates/eko/src/mellin.rs b/crates/eko/src/mellin.rs
index 5afc96f5f..1322bbe08 100644
--- a/crates/eko/src/mellin.rs
+++ b/crates/eko/src/mellin.rs
@@ -6,14 +6,16 @@
use num::complex::Complex;
use std::f64::consts::PI;
-#[cfg_attr(doc, katexit::katexit)]
/// Talbot inversion path.
///
/// Implements the algorithm presented in [\[Abate\]](crate::bib::Abate).
-/// $p_{\text{Talbot}}(t) = o + r \cdot ( \theta \cot(\theta) + i\theta)$ with $\theta = \pi(2t-1)$
-/// The default values for the parameters $r,o$ are given by $r = 1/2, o = 0$ for
-/// the non-singlet integrals and by $r = \frac{2}{5} \frac{16}{1 - \ln(x)}, o = 1$
-/// for the singlet sector. Note that the non-singlet kernels evolve poles only up to
+///
+/// $$p_{\text{Talbot}}(t) = o + r \cdot ( \theta \cot(\theta) + i\theta) ~ \text{with}~ \theta = \pi(2t-1)$$
+///
+/// The default values for the parameters $r,o$ are given by
+/// $r = \frac{2}{5} \frac{16}{0.1 - \ln(x)}$ and $o = 0$ for
+/// the non-singlet integrals and $o = 1$ for the singlet sector.
+/// Note that the non-singlet kernels evolve poles only up to
/// $N=0$ whereas the singlet kernels have poles up to $N=1$.
pub struct TalbotPath {
/// integration variable
@@ -27,16 +29,20 @@ pub struct TalbotPath {
}
impl TalbotPath {
- /// Auxilary angle.
+ /// Auxiliary angle.
fn theta(&self) -> f64 {
PI * (2.0 * self.t - 1.0)
}
/// Constructor from parameters.
pub fn new(t: f64, logx: f64, is_singlet: bool) -> Self {
+ // The prescription suggested by Abate for r is 0.4 * M / ( - logx)
+ // with M the number of accurate digits; Maria Ubiali suggested in her thesis M = 16.
+ // However, this seems to yield unstable results for the OME in the large x region
+ // so we add an additional regularization, which makes the path less "edgy" there.
Self {
t,
- r: 0.4 * 16.0 / (1.0 - logx),
+ r: 0.4 * 16.0 / (0.1 - logx),
o: if is_singlet { 1. } else { 0. },
}
}
diff --git a/crates/ekore/Cargo.toml b/crates/ekore/Cargo.toml
index ef8499dba..419bc126a 100644
--- a/crates/ekore/Cargo.toml
+++ b/crates/ekore/Cargo.toml
@@ -1,14 +1,23 @@
[package]
name = "ekore"
-version = "0.1.0"
-edition = "2021"
description = "EKO expressions"
-license = "GPL-3.0-or-later"
+
+authors.workspace = true
+readme.workspace = true
+categories.workspace = true
+edition.workspace = true
+keywords.workspace = true
+license.workspace = true
+repository.workspace = true
+rust-version.workspace = true
+version.workspace = true
+
+[package.metadata.docs.rs]
+rustdoc-args = ["--html-in-header", "doc-header.html"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
num = "0.4.1"
float-cmp = "0.9.0"
-katexit = "0.1.4"
hashbrown = "0.14"
diff --git a/crates/ekore/doc-header.html b/crates/ekore/doc-header.html
new file mode 120000
index 000000000..22282661d
--- /dev/null
+++ b/crates/ekore/doc-header.html
@@ -0,0 +1 @@
+../doc-header.html
\ No newline at end of file
diff --git a/crates/ekore/refs.bib b/crates/ekore/refs.bib
index 3c63b69e7..e8ef02c2b 100644
--- a/crates/ekore/refs.bib
+++ b/crates/ekore/refs.bib
@@ -50,3 +50,22 @@ @article{Abate
journal = {International Journal for Numerical Methods in Engineering},
doi = {10.1002/nme.995}
}
+@phdthesis{MuselliPhD,
+ author = "Muselli, Claudio",
+ title = "{Resummations of Transverse Momentum Distributions}",
+ doi = "10.13130/muselli-claudio_phd2017-12-01",
+ school = "Università degli studi di Milano, Dipartimento di Fisica",
+ year = "2017"
+}
+@article{Vogt2004ns,
+ author = "Vogt, A.",
+ archivePrefix = "arXiv",
+ doi = "10.1016/j.cpc.2005.03.103",
+ eprint = "hep-ph/0408244",
+ journal = "Comput. Phys. Commun.",
+ pages = "65--92",
+ reportNumber = "NIKHEF-04-011",
+ title = "{Efficient evolution of unpolarized and polarized parton distributions with QCD-PEGASUS}",
+ volume = "170",
+ year = "2005"
+}
diff --git a/crates/ekore/src/anomalous_dimensions/unpolarized/spacelike.rs b/crates/ekore/src/anomalous_dimensions/unpolarized/spacelike.rs
index c2d0fb0d7..dec73bf04 100644
--- a/crates/ekore/src/anomalous_dimensions/unpolarized/spacelike.rs
+++ b/crates/ekore/src/anomalous_dimensions/unpolarized/spacelike.rs
@@ -1,14 +1,37 @@
//! The unpolarized, space-like anomalous dimensions at various couplings power.
+use crate::constants::{PID_NSM, PID_NSP, PID_NSV};
use crate::harmonics::cache::Cache;
use num::complex::Complex;
use num::Zero;
pub mod as1;
+pub mod as2;
+pub mod as3;
/// Compute the tower of the non-singlet anomalous dimensions.
-pub fn gamma_ns_qcd(order_qcd: usize, _mode: u16, c: &mut Cache, nf: u8) -> Vec> {
+pub fn gamma_ns_qcd(order_qcd: usize, mode: u16, c: &mut Cache, nf: u8) -> Vec> {
let mut gamma_ns = vec![Complex::::zero(); order_qcd];
gamma_ns[0] = as1::gamma_ns(c, nf);
+ // NLO and beyond
+ if order_qcd >= 2 {
+ let gamma_ns_1 = match mode {
+ PID_NSP => as2::gamma_nsp(c, nf),
+ // To fill the full valence vector in NNLO we need to add gamma_ns^1 explicitly here
+ PID_NSM | PID_NSV => as2::gamma_nsm(c, nf),
+ _ => panic!("Unkown non-singlet sector element"),
+ };
+ gamma_ns[1] = gamma_ns_1
+ }
+ // NNLO and beyond
+ if order_qcd >= 3 {
+ let gamma_ns_2 = match mode {
+ PID_NSP => as3::gamma_nsp(c, nf),
+ PID_NSM => as3::gamma_nsm(c, nf),
+ PID_NSV => as3::gamma_nsv(c, nf),
+ _ => panic!("Unkown non-singlet sector element"),
+ };
+ gamma_ns[2] = gamma_ns_2
+ }
gamma_ns
}
@@ -22,5 +45,13 @@ pub fn gamma_singlet_qcd(order_qcd: usize, c: &mut Cache, nf: u8) -> Vec<[[Compl
order_qcd
];
gamma_S[0] = as1::gamma_singlet(c, nf);
+ // NLO and beyond
+ if order_qcd >= 2 {
+ gamma_S[1] = as2::gamma_singlet(c, nf);
+ }
+ // NNLO and beyond
+ if order_qcd >= 3 {
+ gamma_S[2] = as3::gamma_singlet(c, nf);
+ }
gamma_S
}
diff --git a/crates/ekore/src/anomalous_dimensions/unpolarized/spacelike/as2.rs b/crates/ekore/src/anomalous_dimensions/unpolarized/spacelike/as2.rs
new file mode 100644
index 000000000..b2b16459c
--- /dev/null
+++ b/crates/ekore/src/anomalous_dimensions/unpolarized/spacelike/as2.rs
@@ -0,0 +1,338 @@
+//! NLO QCD
+
+use ::num::complex::Complex;
+use std::f64::consts::LN_2;
+
+use crate::constants::{CA, CF, TR, ZETA2, ZETA3};
+use crate::harmonics::cache::{Cache, K};
+
+/// Compute the valence-like non-singlet anomalous dimension.
+///
+/// Implements Eq. (3.6) of [\[Moch:2004pa\]][crate::bib::Moch2004pa].
+pub fn gamma_nsm(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let S1 = c.get(K::S1);
+ let S2 = c.get(K::S2);
+ let Sp1m = c.get(K::S1mh);
+ let Sp2m = c.get(K::S2mh);
+ let Sp3m = c.get(K::S3mh);
+ let g3n = c.get(K::G3);
+ #[rustfmt::skip]
+ let gqq1m_cfca = 16.0 * g3n
+ - (144.0 + N * (1.0 + N) * (156.0 + N * (340.0 + N * (655.0 + 51.0 * N * (2.0 + N)))))
+ / (18.0 * N.powu(3) * (1. + N).powu(3))
+ + (-14.666666666666666 + 8.0 / N - 8.0 / (1.0 + N)) * S2
+ - (4.0 * Sp2m) / (N + N.powu(2))
+ + S1 * (29.77777777777778 + 16.0 / N.powu(2) - 16.0 * S2 + 8.0 * Sp2m)
+ + 2.0 * Sp3m
+ + 10.0 * ZETA3
+ + ZETA2 * (16.0 * S1 - 16.0 * Sp1m - (16.0 * (1. + N * LN_2)) / N);
+ #[rustfmt::skip]
+ let gqq1m_cfcf = -32. * g3n
+ + (24. - N * (-32. + 3. * N * (-8. + N * (3. + N) * (3. + N.powu(2)))))
+ / (2. * N.powu(3) * (1. + N).powu(3))
+ + (12. - 8. / N + 8. / (1. + N)) * S2
+ + S1 * (-24. / N.powu(2) - 8. / (1. + N).powu(2) + 16. * S2 - 16. * Sp2m)
+ + (8. * Sp2m) / (N + N.powu(2))
+ - 4. * Sp3m
+ - 20. * ZETA3
+ + ZETA2 * (-32. * S1 + 32. * Sp1m + 32. * (1. / N + LN_2));
+ #[rustfmt::skip]
+ let gqq1m_cfnf = (-12. + N * (20. + N * (47. + 3. * N * (2. + N))))
+ / (9. * N.powu(2) * (1. + N).powu(2))
+ - (40. * S1) / 9.
+ + (8. * S2) / 3.;
+
+ CF * (CA * gqq1m_cfca + CF * gqq1m_cfcf + 2.0 * TR * (nf as f64) * gqq1m_cfnf)
+}
+
+/// Compute the singlet-like non-singlet anomalous dimension.
+///
+/// Implements Eq. (3.5) of [\[Moch:2004pa\]][crate::bib::Moch2004pa].
+pub fn gamma_nsp(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let S1 = c.get(K::S1);
+ let S2 = c.get(K::S2);
+ let Sp1p = c.get(K::S1h);
+ let Sp2p = c.get(K::S2h);
+ let Sp3p = c.get(K::S3h);
+ let g3n = c.get(K::G3);
+ #[rustfmt::skip]
+ let gqq1p_cfca = -16. * g3n
+ + (132. - N * (340. + N * (655. + 51. * N * (2. + N))))
+ / (18. * N.powu(2) * (1. + N).powu(2))
+ + (-14.666666666666666 + 8. / N - 8. / (1. + N)) * S2
+ - (4. * Sp2p) / (N + N.powu(2))
+ + S1 * (29.77777777777778 - 16. / N.powu(2) - 16. * S2 + 8. * Sp2p)
+ + 2. * Sp3p
+ + 10. * ZETA3
+ + ZETA2 * (16. * S1 - 16. * Sp1p + 16. * (1. / N - LN_2));
+ #[rustfmt::skip]
+ let gqq1p_cfcf = 32. * g3n
+ - (8. + N * (32. + N * (40. + 3. * N * (3. + N) * (3. + N.powu(2)))))
+ / (2. * N.powu(3) * (1. + N).powu(3))
+ + (12. - 8. / N + 8. / (1. + N)) * S2
+ + S1 * (40. / N.powu(2) - 8. / (1. + N).powu(2) + 16. * S2 - 16. * Sp2p)
+ + (8. * Sp2p) / (N + N.powu(2))
+ - 4. * Sp3p
+ - 20. * ZETA3
+ + ZETA2 * (-32. * S1 + 32. * Sp1p + 32. * (-(1. / N) + LN_2));
+ #[rustfmt::skip]
+ let gqq1p_cfnf = (-12. + N * (20. + N * (47. + 3. * N * (2. + N))))
+ / (9. * N.powu(2) * (1. + N).powu(2))
+ - (40. * S1) / 9.
+ + (8. * S2) / 3.;
+
+ CF * (CA * gqq1p_cfca + CF * gqq1p_cfcf + 2.0 * TR * (nf as f64) * gqq1p_cfnf)
+}
+
+/// Compute the pure-singlet quark-quark anomalous dimension.
+///
+/// Implements Eq. (3.6) of [\[Vogt:2004mw\]][crate::bib::Vogt2004mw].
+pub fn gamma_ps(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let gqqps1_nfcf = (-4. * (2. + N * (5. + N)) * (4. + N * (4. + N * (7. + 5. * N))))
+ / ((-1. + N) * N.powu(3) * (1. + N).powu(3) * (2. + N).powu(2));
+ 2.0 * TR * (nf as f64) * CF * gqqps1_nfcf
+}
+
+/// Compute the quark-gluon singlet anomalous dimension.
+///
+/// Implements Eq. (3.7) of [\[Vogt:2004mw\]][crate::bib::Vogt2004mw].
+pub fn gamma_qg(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let S1 = c.get(K::S1);
+ let S2 = c.get(K::S2);
+ let Sp2p = c.get(K::S2h);
+ #[rustfmt::skip]
+ let gqg1_nfca = (-4.
+ * (16.
+ + N * (64.
+ + N * (104.
+ + N * (128. + N * (85. + N * (36. + N * (25. + N * (15. + N * (6. + N))))))))))
+ / ((-1. + N) * N.powu(3) * (1. + N).powu(3) * (2. + N).powu(3))
+ - (16. * (3. + 2. * N) * S1) / (2. + 3. * N + N.powu(2)).powu(2)
+ + (4. * (2. + N + N.powu(2)) * S1.powu(2)) / (N * (2. + 3. * N + N.powu(2)))
+ - (4. * (2. + N + N.powu(2)) * S2) / (N * (2. + 3. * N + N.powu(2)))
+ + (4. * (2. + N + N.powu(2)) * Sp2p) / (N * (2. + 3. * N + N.powu(2)));
+ #[rustfmt::skip]
+ let gqg1_nfcf = (-2. * (4. + N * (8. + N * (1. + N) * (25. + N * (26. + 5. * N * (2. + N))))))
+ / (N.powu(3) * (1. + N).powu(3) * (2. + N))
+ + (8. * S1) / N.powu(2)
+ - (4. * (2. + N + N.powu(2)) * S1.powu(2)) / (N * (2. + 3. * N + N.powu(2)))
+ + (4. * (2. + N + N.powu(2)) * S2) / (N * (2. + 3. * N + N.powu(2)));
+ 2.0 * TR * (nf as f64) * (CA * gqg1_nfca + CF * gqg1_nfcf)
+}
+
+/// Compute the gluon-quark singlet anomalous dimension.
+///
+/// Implements Eq. (3.8) of [\[Vogt:2004mw\]][crate::bib::Vogt2004mw].
+pub fn gamma_gq(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let S1 = c.get(K::S1);
+ let S2 = c.get(K::S2);
+ let Sp2p = c.get(K::S2h);
+ #[rustfmt::skip]
+ let ggq1_cfcf = (-8.
+ + 2. * N * (-12. + N * (-1. + N * (28. + N * (43. + 6. * N * (5. + 2. * N))))))
+ / ((-1. + N) * N.powu(3) * (1. + N).powu(3))
+ - (4. * (10. + N * (17. + N * (8. + 5. * N))) * S1) / ((-1. + N) * N * (1. + N).powu(2))
+ + (4. * (2. + N + N.powu(2)) * S1.powu(2)) / (N * (-1. + N.powu(2)))
+ + (4. * (2. + N + N.powu(2)) * S2) / (N * (-1. + N.powu(2)));
+ #[rustfmt::skip]
+ let ggq1_cfca = (-4.
+ * (144.
+ + N * (432.
+ + N * (-152.
+ + N * (-1304.
+ + N * (-1031.
+ + N * (695. + N * (1678. + N * (1400. + N * (621. + 109. * N))))))))))
+ / (9. * N.powu(3) * (1. + N).powu(3) * (-2. + N + N.powu(2)).powu(2))
+ + (4. * (-12. + N * (-22. + 41. * N + 17. * N.powu(3))) * S1)
+ / (3. * (-1. + N).powu(2) * N.powu(2) * (1. + N))
+ + ((8. + 4. * N + 4. * N.powu(2)) * S1.powu(2)) / (N - N.powu(3))
+ + ((8. + 4. * N + 4. * N.powu(2)) * S2) / (N - N.powu(3))
+ + (4. * (2. + N + N.powu(2)) * Sp2p) / (N * (-1. + N.powu(2)));
+ #[rustfmt::skip]
+ let ggq1_cfnf = (8. * (16. + N * (27. + N * (13. + 8. * N))))
+ / (9. * (-1. + N) * N * (1. + N).powu(2))
+ - (8. * (2. + N + N.powu(2)) * S1) / (3. * N * (-1. + N.powu(2)));
+ CF * (CA * ggq1_cfca + CF * ggq1_cfcf + 2.0 * TR * (nf as f64) * ggq1_cfnf)
+}
+
+/// Compute the gluon-gluon singlet anomalous dimension.
+///
+/// Implements Eq. (3.9) of [\[Vogt:2004mw\]][crate::bib::Vogt2004mw].
+pub fn gamma_gg(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let S1 = c.get(K::S1);
+ let Sp1p = c.get(K::S1h);
+ let Sp2p = c.get(K::S2h);
+ let Sp3p = c.get(K::S3h);
+ let g3n = c.get(K::G3);
+ #[rustfmt::skip]
+ let ggg1_caca = 16. * g3n
+ - (2.
+ * (576.
+ + N * (1488.
+ + N * (560.
+ + N * (-1248.
+ + N * (-1384.
+ + N * (1663.
+ + N * (4514.
+ + N * (4744.
+ + N * (3030.
+ + N * (1225. + 48. * N * (7. + N))))))))))))
+ / (9. * (-1. + N).powu(2) * N.powu(3) * (1. + N).powu(3) * (2. + N).powu(3))
+ + S1 * (29.77777777777778 + 16. / (-1. + N).powu(2) + 16. / (1. + N).powu(2)
+ - 16. / (2. + N).powu(2)
+ - 8. * Sp2p)
+ + (16. * (1. + N + N.powu(2)) * Sp2p) / (N * (1. + N) * (-2. + N + N.powu(2)))
+ - 2. * Sp3p
+ - 10. * ZETA3
+ + ZETA2 * (-16. * S1 + 16. * Sp1p + 16. * (-(1. / N) + LN_2));
+ #[rustfmt::skip]
+ let ggg1_canf = (8. * (6. + N * (1. + N) * (28. + N * (1. + N) * (13. + 3. * N * (1. + N)))))
+ / (9. * N.powu(2) * (1. + N).powu(2) * (-2. + N + N.powu(2)))
+ - (40. * S1) / 9.;
+ #[rustfmt::skip]
+ let ggg1_cfnf = (2.
+ * (-8.
+ + N * (-8.
+ + N * (-10. + N * (-22. + N * (-3. + N * (6. + N * (8. + N * (4. + N)))))))))
+ / (N.powu(3) * (1. + N).powu(3) * (-2. + N + N.powu(2)));
+
+ CA * CA * ggg1_caca + 2.0 * TR * (nf as f64) * (CA * ggg1_canf + CF * ggg1_cfnf)
+}
+
+/// Compute the singlet anomalous dimension matrix.
+pub fn gamma_singlet(c: &mut Cache, nf: u8) -> [[Complex; 2]; 2] {
+ let gamma_qq = gamma_nsp(c, nf) + gamma_ps(c, nf);
+ [
+ [gamma_qq, gamma_qg(c, nf)],
+ [gamma_gq(c, nf), gamma_gg(c, nf)],
+ ]
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::cmplx;
+ use crate::{anomalous_dimensions::unpolarized::spacelike::as2::*, harmonics::cache::Cache};
+ use float_cmp::assert_approx_eq;
+ use num::complex::Complex;
+ use num::traits::Pow;
+ use std::f64::consts::PI;
+
+ const NF: u8 = 5;
+
+ #[test]
+ fn physical_constraints() {
+ // number conservation
+ let mut c = Cache::new(cmplx![1., 0.]);
+ assert_approx_eq!(f64, gamma_nsm(&mut c, NF).re, 0.0, epsilon = 2e-6);
+
+ // momentum conservation
+ let mut c = Cache::new(cmplx![2., 0.]);
+ let gS1 = gamma_singlet(&mut c, NF);
+
+ // gluon momentum conservation
+ assert_approx_eq!(f64, (gS1[0][1] + gS1[1][1]).re, 0.0, epsilon = 4e-5);
+ // quark momentum conservation
+ assert_approx_eq!(f64, (gS1[0][0] + gS1[1][0]).re, 0.0, epsilon = 2e-6);
+ }
+
+ #[test]
+ fn N2() {
+ // reference values are obtained from MMa
+ let mut c = Cache::new(cmplx![2., 0.]);
+
+ // ns+
+ assert_approx_eq!(
+ f64,
+ gamma_nsp(&mut c, NF).re,
+ (-112.0 * CF + 376.0 * CA - 64.0 * (NF as f64)) * CF / 27.0,
+ epsilon = 2e-6
+ );
+
+ // ns-
+ let check = ((34.0 / 27.0 * (-47.0 + 6. * PI.pow(2)) - 16.0 * ZETA3) * CF
+ + (373.0 / 9.0 - 34.0 * PI.pow(2) / 9.0 + 8.0 * ZETA3) * CA
+ - 64.0 * (NF as f64) / 27.0)
+ * CF;
+ assert_approx_eq!(f64, gamma_nsm(&mut c, NF).re, check, epsilon = 2e-6);
+
+ // singlet sector
+ let gS1 = gamma_singlet(&mut c, NF);
+ // ps
+ assert_approx_eq!(
+ f64,
+ gamma_ps(&mut c, NF).re,
+ -40.0 * CF * (NF as f64) / 27.0
+ );
+ // qg
+ assert_approx_eq!(
+ f64,
+ gS1[0][1].re,
+ (-74.0 * CF - 35.0 * CA) * (NF as f64) / 27.0
+ );
+ // gq
+ assert_approx_eq!(
+ f64,
+ gS1[1][0].re,
+ (112.0 * CF - 376.0 * CA + 104.0 * (NF as f64)) * CF / 27.0,
+ epsilon = 1e-13
+ );
+ }
+
+ #[test]
+ fn N3() {
+ let mut c = Cache::new(cmplx![3., 0.]);
+ // ns+
+ let check = ((-34487.0 / 432.0 + 86.0 * PI.pow(2) / 9.0 - 16.0 * ZETA3) * CF
+ + (459.0 / 8.0 - 43.0 * PI.pow(2) / 9.0 + 8.0 * ZETA3) * CA
+ - 415.0 * (NF as f64) / 108.0)
+ * CF;
+ assert_approx_eq!(f64, gamma_nsp(&mut c, NF).re, check, epsilon = 2e-6);
+
+ // singlet sector
+ let gS1 = gamma_singlet(&mut c, NF);
+ // ps
+ assert_approx_eq!(
+ f64,
+ gamma_ps(&mut c, NF).re,
+ -1391.0 * CF * (NF as f64) / 5400.0
+ );
+ // gq
+ assert_approx_eq!(
+ f64,
+ gS1[1][0].re,
+ (973.0 / 432.0 * CF
+ + (2801.0 / 5400.0 - 7.0 * PI.pow(2) / 9.0) * CA
+ + 61.0 / 54.0 * (NF as f64))
+ * CF
+ );
+ // gg
+ assert_approx_eq!(
+ f64,
+ gS1[1][1].re,
+ (-79909.0 / 3375.0 + 194.0 * PI.pow(2) / 45.0 - 8.0 * ZETA3) * CA.pow(2)
+ - 967.0 / 270.0 * CA * (NF as f64)
+ + 541.0 / 216.0 * CF * (NF as f64),
+ epsilon = 3e-5
+ );
+ }
+
+ #[test]
+ fn N4() {
+ let mut c = Cache::new(cmplx![4., 0.]);
+ // singlet sector
+ let gS1 = gamma_singlet(&mut c, NF);
+ // qg
+ assert_approx_eq!(
+ f64,
+ gS1[0][1].re,
+ (-56317.0 / 18000.0 * CF + 16387.0 / 9000.0 * CA) * (NF as f64),
+ epsilon = 1e-14
+ )
+ }
+}
diff --git a/crates/ekore/src/anomalous_dimensions/unpolarized/spacelike/as3.rs b/crates/ekore/src/anomalous_dimensions/unpolarized/spacelike/as3.rs
new file mode 100644
index 000000000..14354f736
--- /dev/null
+++ b/crates/ekore/src/anomalous_dimensions/unpolarized/spacelike/as3.rs
@@ -0,0 +1,455 @@
+//! NNLO QCD
+
+use ::num::complex::Complex;
+use num::traits::Pow;
+
+use crate::cmplx;
+use crate::constants::{ZETA2, ZETA3};
+use crate::harmonics::cache::{Cache, K};
+
+/// Compute the valence-like non-singlet anomalous dimension.
+///
+/// Implements Eq. (3.8) of [\[Moch:2004pa\]][crate::bib::Moch2004pa].
+pub fn gamma_nsm(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let S1 = c.get(K::S1);
+ let S2 = c.get(K::S2);
+ let S3 = c.get(K::S3);
+ let E1 = S1 / N.powu(2) + (S2 - ZETA2) / N;
+ let E2 = 2.0 * (-S1 / N.powu(3) + (ZETA2 - S2) / N.powu(2) - (S3 - ZETA3) / N);
+
+ #[rustfmt::skip]
+ let pm2 =
+ -1174.898 * (S1 - 1.0 / N)
+ + 1295.470
+ - 714.1 * S1 / N
+ - 433.2 / (N + 3.0)
+ + 297.0 / (N + 2.0)
+ - 3505.0 / (N + 1.0)
+ + 1860.2 / N
+ - 1465.2 / N.powu(2)
+ + 399.2 * 2.0 / N.powu(3)
+ - 320.0 / 9.0 * 6.0 / N.powu(4)
+ + 116.0 / 81.0 * 24.0 / N.powu(5)
+ + 684.0 * E1
+ + 251.2 * E2;
+
+ #[rustfmt::skip]
+ let pm2_nf =
+ 183.187 * (S1 - 1.0 / N)
+ - 173.933
+ + 5120./ 81.0 * S1 / N
+ + 34.76 / (N + 3.0)
+ + 77.89 / (N + 2.0)
+ + 406.5 / (N + 1.0)
+ - 216.62 / N
+ + 172.69 / N.powu(2)
+ - 3216.0 / 81.0 * 2.0 / N.powu(3)
+ + 256.0 / 81.0 * 6.0 / N.powu(4)
+ - 65.43 * E1
+ + 1.136 * 6.0 / (N + 1.0).powu(4);
+
+ #[rustfmt::skip]
+ let pf2_nfnf =
+ -(
+ 17.0 / 72.0
+ - 2.0 / 27.0 * S1
+ - 10.0 / 27.0 * S2
+ + 2.0 / 9.0 * S3
+ - (12.0 * N.powu(4) + 2.0 * N.powu(3) - 12.0 * N.powu(2) - 2.0 * N + 3.0)
+ / (27.0 * N.powu(3) * (N + 1.0).powu(3))
+ )* 32.0 / 3.0;
+
+ let result = pm2 + (nf as f64) * pm2_nf + (nf as f64).pow(2) * pf2_nfnf;
+ -1. * result
+}
+
+/// Compute the singlet-like non-singlet anomalous dimension.
+///
+/// Implements Eq. (3.7) of [\[Moch:2004pa\]][crate::bib::Moch2004pa].
+pub fn gamma_nsp(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let S1 = c.get(K::S1);
+ let S2 = c.get(K::S2);
+ let S3 = c.get(K::S3);
+ let E1 = S1 / N.powu(2) + (S2 - ZETA2) / N;
+ let E2 = 2.0 * (-S1 / N.powu(3) + (ZETA2 - S2) / N.powu(2) - (S3 - ZETA3) / N);
+
+ #[rustfmt::skip]
+ let pp2 =
+ -1174.898 * (S1 - 1.0 / N)
+ + 1295.384
+ - 714.1 * S1 / N
+ - 522.1 / (N + 3.0)
+ + 243.6 / (N + 2.0)
+ - 3135.0 / (N + 1.0)
+ + 1641.1 / N
+ - 1258.0 / N.powu(2)
+ + 294.9 * 2.0 / N.powu(3)
+ - 800. / 27.0 * 6.0 / N.powu(4)
+ + 128. / 81.0 * 24.0 / N.powu(5)
+ + 563.9 * E1
+ + 256.8 * E2;
+
+ #[rustfmt::skip]
+ let pp2_nf =
+ 183.187 * (S1 - 1.0 / N)
+ - 173.924
+ + 5120. / 81.0 * S1 / N
+ + 44.79 / (N + 3.0)
+ + 72.94 / (N + 2.0)
+ + 381.1 / (N + 1.0)
+ - 197.0 / N
+ + 152.6 / N.powu(2)
+ - 2608.0 / 81.0 * 2.0 / N.powu(3)
+ + 192.0 / 81.0 * 6.0 / N.powu(4)
+ - 56.66 * E1
+ + 1.497 * 6.0 / (N + 1.0).powu(4);
+
+ #[rustfmt::skip]
+ let pf2_nfnf =
+ -(
+ 17.0 / 72.0
+ - 2.0 / 27.0 * S1
+ - 10.0 / 27.0 * S2
+ + 2.0 / 9.0 * S3
+ - (12.0 * N.powu(4) + 2.0 * N.powu(3) - 12.0 * N.powu(2) - 2.0 * N + 3.0)
+ / (27.0 * N.powu(3) * (N + 1.0).powu(3))
+ )* 32.0/ 3.0;
+
+ let result = pp2 + (nf as f64) * pp2_nf + (nf as f64).pow(2) * pf2_nfnf;
+ -1. * result
+}
+
+/// Compute the valence non-singlet anomalous dimension.
+///
+/// Implements Eq. (3.9) of [\[Moch:2004pa\]][crate::bib::Moch2004pa].
+pub fn gamma_nsv(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let S1 = c.get(K::S1);
+ let S2 = c.get(K::S2);
+ let S3 = c.get(K::S3);
+ let E1 = S1 / N.powu(2) + (S2 - ZETA2) / N;
+ let E2 = 2.0 * (-S1 / N.powu(3) + (ZETA2 - S2) / N.powu(2) - (S3 - ZETA3) / N);
+ let B11 = -(S1 + 1.0 / (N + 1.0)) / (N + 1.0);
+ let B12 = -(S1 + 1.0 / (N + 1.0) + 1.0 / (N + 2.0)) / (N + 2.0);
+
+ let B1M = if N.im.abs() < 1.0e-5 && (N - 1.0).re.abs() < 1.0e-5 {
+ cmplx![-ZETA2, 0.]
+ } else {
+ -(S1 - 1.0 / N) / (N - 1.0)
+ };
+
+ #[rustfmt::skip]
+ let ps2 = -(
+ -163.9 * (B1M + S1 / N)
+ - 7.208 * (B11 - B12)
+ + 4.82 * (1.0 / (N + 3.0) - 1.0 / (N + 4.0))
+ - 43.12 * (1.0 / (N + 2.0) - 1.0 / (N + 3.0))
+ + 44.51 * (1.0 / (N + 1.0) - 1.0 / (N + 2.0))
+ + 151.49 * (1.0 / N - 1.0 / (N + 1.0))
+ - 178.04 / N.powu(2)
+ + 6.892 * 2.0 / N.powu(3)
+ - 40.0 / 27.0 * (-2.0 * 6.0 / N.powu(4) - 24.0 / N.powu(5))
+ - 173.1 * E1
+ + 46.18 * E2
+ );
+
+ gamma_nsm(c, nf) + (nf as f64) * ps2
+}
+
+/// Compute the pure-singlet quark-quark anomalous dimension.
+///
+/// Implements Eq. (3.10) of [\[Vogt:2004mw\]][crate::bib::Vogt2004mw].
+pub fn gamma_ps(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let S1 = c.get(K::S1);
+ let S2 = c.get(K::S2);
+ let S3 = c.get(K::S3);
+
+ let E1 = S1 / N.powu(2) + (S2 - ZETA2) / N;
+ let E11 = (S1 + 1.0 / (N + 1.0)) / (N + 1.0).powu(2)
+ + (S2 + 1.0 / (N + 1.0).powu(2) - ZETA2) / (N + 1.0);
+ let B21 = ((S1 + 1.0 / (N + 1.0)).powu(2) + S2 + 1.0 / (N + 1.0).powu(2)) / (N + 1.0);
+ let B3 = -(S1.powu(3) + 3.0 * S1 * S2 + 2.0 * S3) / N;
+
+ #[rustfmt::skip]
+ let B31 = -(
+ (S1 + 1.0 / (N + 1.0)).powu(3)
+ + 3.0 * (S1 + 1.0 / (N + 1.0)) * (S2 + 1.0 / (N + 1.0).powu(2))
+ + 2.0 * (S3 + 1.0 / (N + 1.0).powu(3))
+ ) / (N + 1.0);
+
+ #[rustfmt::skip]
+ let ps1 =
+ -3584.0 / 27.0 * (-1.0 / (N - 1.0).powu(2) + 1.0 / N.powu(2))
+ - 506.0 * (1.0 / (N - 1.0) - 1.0 / N)
+ + 160.0 / 27.0 * (24.0 / N.powu(5) - 24.0 / (N + 1.0).powu(5))
+ - 400.0 / 9.0 * (-6.0 / N.powu(4) + 6.0 / (N + 1.0).powu(4))
+ + 131.4 * (2.0 / N.powu(3) - 2.0 / (N + 1.0).powu(3))
+ - 661.6 * (-1.0 / N.powu(2) + 1.0 / (N + 1.0).powu(2))
+ - 5.926 * (B3 - B31)
+ - 9.751 * ((S1.powu(2) + S2) / N - B21)
+ - 72.11 * (-S1 / N + (S1 + 1.0 / (N + 1.0)) / (N + 1.0))
+ + 177.4 * (1.0 / N - 1.0 / (N + 1.0))
+ + 392.9 * (1.0 / (N + 1.0) - 1.0 / (N + 2.0))
+ - 101.4 * (1.0 / (N + 2.0) - 1.0 / (N + 3.0))
+ - 57.04 * (E1 - E11);
+
+ #[rustfmt::skip]
+ let ps2 =
+ 256.0 / 81.0 * (1.0 / (N - 1.0) - 1.0 / N)
+ + 32.0 / 27.0 * (-6.0 / N.powu(4) + 6.0 / (N + 1.0).powu(4))
+ + 17.89 * (2.0 / N.powu(3) - 2.0 / (N + 1.0).powu(3))
+ + 61.75 * (-1.0 / N.powu(2) + 1.0 / (N + 1.0).powu(2))
+ + 1.778 * ((S1.powu(2) + S2) / N - B21)
+ + 5.944 * (-S1 / N + (S1 + 1.0 / (N + 1.0)) / (N + 1.0))
+ + 100.1 * (1.0 / N - 1.0 / (N + 1.0))
+ - 125.2 * (1.0 / (N + 1.0) - 1.0 / (N + 2.0))
+ + 49.26 * (1.0 / (N + 2.0) - 1.0 / (N + 3.0))
+ - 12.59 * (1.0 / (N + 3.0) - 1.0 / (N + 4.0))
+ - 1.889 * (E1 - E11);
+
+ let result = (nf as f64) * ps1 + (nf as f64).pow(2) * ps2;
+ -1.0 * result
+}
+
+/// Compute the quark-gluon singlet anomalous dimension.
+///
+/// Implements Eq. (3.11) of [\[Vogt:2004mw\]][crate::bib::Vogt2004mw].
+pub fn gamma_qg(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let S1 = c.get(K::S1);
+ let S2 = c.get(K::S2);
+ let S3 = c.get(K::S3);
+ let S4 = c.get(K::S4);
+
+ let E1 = S1 / N.powu(2) + (S2 - ZETA2) / N;
+ let E2 = 2.0 * (-S1 / N.powu(3) + (ZETA2 - S2) / N.powu(2) - (S3 - ZETA3) / N);
+ let B3 = -(S1.powu(3) + 3.0 * S1 * S2 + 2.0 * S3) / N;
+ let B4 = (S1.powu(4) + 6.0 * S1.powu(2) * S2 + 8.0 * S1 * S3 + 3.0 * S2.powu(2) + 6.0 * S4) / N;
+
+ #[rustfmt::skip]
+ let qg1 =
+ 896.0 / 3.0 / (N - 1.0).powu(2)
+ - 1268.3 / (N - 1.0)
+ + 536.0 / 27.0 * 24.0 / N.powu(5)
+ + 44.0 / 3.0 * 6.0 / N.powu(4)
+ + 881.5 * 2.0 / N.powu(3)
+ - 424.9 / N.powu(2)
+ + 100.0 / 27.0 * B4
+ - 70.0 / 9.0 * B3
+ - 120.5 * (S1.powu(2) + S2) / N
+ - 104.42 * S1 / N
+ + 2522.0 / N
+ - 3316.0 / (N + 1.0)
+ + 2126.0 / (N + 2.0)
+ + 1823.0 * E1
+ - 25.22 * E2
+ + 252.5 * 6.0 / (N + 1.0).powu(4);
+
+ #[rustfmt::skip]
+ let qg2 =
+ 1112.0 / 243.0 / (N - 1.0)
+ - 16.0 / 9.0 * 24.0 / N.powu(5)
+ + 376.0 / 27.0 * 6.0 / N.powu(4)
+ - 90.8 * 2.0 / N.powu(3)
+ + 254.0 / N.powu(2)
+ + 20.0 / 27.0 * B3
+ + 200.0 / 27.0 * (S1.powu(2) + S2) / N
+ + 5.496 * S1 / N
+ - 252.0 / N
+ + 158.0 / (N + 1.0)
+ + 145.4 / (N + 2.0)
+ - 139.28 / (N + 3.0)
+ - 53.09 * E1
+ - 80.616 * E2
+ - 98.07 * 2.0 / (N + 1.0).powu(3)
+ - 11.70 * 6.0 / (N + 1.0).powu(4);
+
+ let result = (nf as f64) * qg1 + (nf as f64).pow(2) * qg2;
+ -1.0 * result
+}
+
+/// Compute the gluon-quark singlet anomalous dimension.
+///
+/// Implements Eq. (3.12) of [\[Vogt:2004mw\]][crate::bib::Vogt2004mw].
+pub fn gamma_gq(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let S1 = c.get(K::S1);
+ let S2 = c.get(K::S2);
+ let S3 = c.get(K::S3);
+ let S4 = c.get(K::S4);
+
+ let E1 = S1 / N.powu(2) + (S2 - ZETA2) / N;
+ let E2 = 2.0 * (-S1 / N.powu(3) + (ZETA2 - S2) / N.powu(2) - (S3 - ZETA3) / N);
+ let B21 = ((S1 + 1.0 / (N + 1.0)).powu(2) + S2 + 1.0 / (N + 1.0).powu(2)) / (N + 1.0);
+ let B3 = -(S1.powu(3) + 3.0 * S1 * S2 + 2.0 * S3) / N;
+ let B4 = (S1.powu(4) + 6.0 * S1.powu(2) * S2 + 8.0 * S1 * S3 + 3.0 * S2.powu(2) + 6.0 * S4) / N;
+
+ #[rustfmt::skip]
+ let gq0 =
+ -1189.3 * 1.0 / (N - 1.0).powu(2)
+ + 6163.1 / (N - 1.0)
+ - 4288.0 / 81.0 * 24.0 / N.powu(5)
+ - 1568.0 / 9.0 * 6.0 / N.powu(4)
+ - 1794.0 * 2.0 / N.powu(3)
+ - 4033.0 * 1.0 / N.powu(2)
+ + 400.0 / 81.0 * B4
+ + 2200.0 / 27.0 * B3
+ + 606.3 * (S1.powu(2) + S2) / N
+ - 2193.0 * S1 / N
+ - 4307.0 / N
+ + 489.3 / (N + 1.0)
+ + 1452.0 / (N + 2.0)
+ + 146.0 / (N + 3.0)
+ - 447.3 * E2
+ - 972.9 * 2.0 / (N + 1.0).powu(3);
+
+ #[rustfmt::skip]
+ let gq1 =
+ -71.082 / (N - 1.0).powu(2)
+ - 46.41 / (N - 1.0)
+ + 128.0 / 27.0 * 24.0 / N.powu(5)
+ - 704. / 81.0 * 6.0 / N.powu(4)
+ + 20.39 * 2.0 / N.powu(3)
+ - 174.8 * 1.0 / N.powu(2)
+ - 400.0 / 81.0 * B3
+ - 68.069 * (S1.powu(2) + S2) / N
+ + 296.7 * S1 / N
+ - 183.8 / N
+ + 33.35 / (N + 1.0)
+ - 277.9 / (N + 2.0)
+ + 108.6 * 2.0 / (N + 1.0).powu(3)
+ - 49.68 * E1;
+
+ #[rustfmt::skip]
+ let gq2 = (
+ 64.0 * (-1.0 / (N - 1.0) + 1.0 / N + 2.0 / (N + 1.0))
+ + 320.0
+ * (
+ -(S1 - 1.0 / N) / (N - 1.0)
+ + S1 / N
+ - 0.8 * (S1 + 1.0 / (N + 1.0)) / (N + 1.0)
+ )
+ + 96.0
+ * (
+ ((S1 - 1.0 / N).powu(2) + S2 - 1.0 / N.powu(2)) / (N - 1.0)
+ - (S1.powu(2) + S2) / N
+ + 0.5 * B21
+ )
+ ) / 27.0;
+
+ let result = gq0 + (nf as f64) * gq1 + (nf as f64).pow(2) * gq2;
+ -1.0 * result
+}
+
+/// Compute the gluon-quark singlet anomalous dimension.
+///
+/// Implements Eq. (3.13) of [\[Vogt:2004mw\]][crate::bib::Vogt2004mw].
+pub fn gamma_gg(c: &mut Cache, nf: u8) -> Complex {
+ let N = c.n;
+ let S1 = c.get(K::S1);
+ let S2 = c.get(K::S2);
+ let S3 = c.get(K::S3);
+
+ let E1 = S1 / N.powu(2) + (S2 - ZETA2) / N;
+ let E11 = (S1 + 1.0 / (N + 1.0)) / (N + 1.0).powu(2)
+ + (S2 + 1.0 / (N + 1.0).powu(2) - ZETA2) / (N + 1.0);
+ let E2 = 2.0 * (-S1 / N.powu(3) + (ZETA2 - S2) / N.powu(2) - (S3 - ZETA3) / N);
+
+ #[rustfmt::skip]
+ let gg0 =
+ -2675.8 / (N - 1.0).powu(2)
+ + 14214.0 / (N - 1.0)
+ - 144.0 * 24.0 / N.powu(5)
+ - 72.0 * 6.0 / N.powu(4)
+ - 7471.0 * 2.0 / N.powu(3)
+ - 274.4 / N.powu(2)
+ - 20852.0 / N
+ + 3968.0 / (N + 1.0)
+ - 3363.0 / (N + 2.0)
+ + 4848.0 / (N + 3.0)
+ + 7305.0 * E1
+ + 8757.0 * E2
+ - 3589.0 * S1 / N
+ + 4425.894
+ - 2643.521 * (S1 - 1.0 / N);
+
+ #[rustfmt::skip]
+ let gg1 =
+ -157.27 / (N - 1.0).powu(2)
+ + 182.96 / (N - 1.0)
+ + 512.0 / 27.0 * 24.0 / N.powu(5)
+ - 832.0 / 9.0 * 6.0 / N.powu(4)
+ + 491.3 * 2.0 / N.powu(3)
+ - 1541.0 / N.powu(2)
+ - 350.2 / N
+ + 755.7 / (N + 1.0)
+ - 713.8 / (N + 2.0)
+ + 559.3 / (N + 3.0)
+ + 26.15 * E1
+ - 808.7 * E2
+ + 320.0 * S1 / N
+ - 528.723
+ + 412.172 * (S1 - 1.0 / N);
+
+ #[rustfmt::skip]
+ let gg2 =
+ -680.0 / 243.0 / (N - 1.0)
+ + 32.0 / 27.0 * 6.0 / N.powu(4)
+ + 9.680 * 2.0 / N.powu(3)
+ + 3.422 / N.powu(2)
+ - 13.878 / N
+ + 153.4 / (N + 1.0)
+ - 187.7 / (N + 2.0)
+ + 52.75 / (N + 3.0)
+ - 115.6 * E1
+ + 85.25 * E11
+ - 63.23 * E2
+ + 6.4630
+ + 16.0 / 9.0 * (S1 - 1.0 / N);
+
+ let result = gg0 + (nf as f64) * gg1 + (nf as f64).pow(2) * gg2;
+ -1.0 * result
+}
+
+/// Compute the singlet anomalous dimension matrix.
+pub fn gamma_singlet(c: &mut Cache, nf: u8) -> [[Complex; 2]; 2] {
+ let gamma_qq = gamma_nsp(c, nf) + gamma_ps(c, nf);
+ [
+ [gamma_qq, gamma_qg(c, nf)],
+ [gamma_gq(c, nf), gamma_gg(c, nf)],
+ ]
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::cmplx;
+ use crate::{anomalous_dimensions::unpolarized::spacelike::as3::*, harmonics::cache::Cache};
+ use float_cmp::assert_approx_eq;
+ use num::complex::Complex;
+
+ const NF: u8 = 5;
+
+ #[test]
+ fn physical_constraints() {
+ // number conservation
+ let mut c = Cache::new(cmplx![1., 0.]);
+ assert_approx_eq!(f64, gamma_nsv(&mut c, NF).re, -0.000960586, epsilon = 3e-7);
+ assert_approx_eq!(f64, gamma_nsm(&mut c, NF).re, 0.000594225, epsilon = 6e-7);
+
+ let mut c = Cache::new(cmplx![2., 0.]);
+ let gS2 = gamma_singlet(&mut c, NF);
+ // gluon momentum conservation
+ assert_approx_eq!(f64, (gS2[0][1] + gS2[1][1]).re, -0.00388726, epsilon = 2e-6);
+ // quark momentum conservation
+ assert_approx_eq!(f64, (gS2[0][0] + gS2[1][0]).re, 0.00169375, epsilon = 2e-6);
+ }
+
+ #[test]
+ fn N2() {
+ let mut c = Cache::new(cmplx![2., 0.]);
+ assert_approx_eq!(f64, gamma_nsv(&mut c, NF).re, 188.325593, epsilon = 3e-7);
+ }
+}
diff --git a/crates/ekore/src/bib.rs b/crates/ekore/src/bib.rs
index c62f16d02..75da707bb 100644
--- a/crates/ekore/src/bib.rs
+++ b/crates/ekore/src/bib.rs
@@ -1,5 +1,6 @@
-//! List of References (autogenerated on 2023-11-29T18:07:41.518255).
+//! List of References (autogenerated on 2024-05-30T12:57:15.459698).
+#[allow(non_snake_case)]
/// The Three loop splitting functions in QCD: The Nonsinglet case
///
/// Moch, S. and Vermaseren, J. A. M. and Vogt, A.
@@ -11,6 +12,7 @@
/// DOI: [10.1016/j.nuclphysb.2004.03.030](https:dx.doi.org/10.1016/j.nuclphysb.2004.03.030)
pub fn Moch2004pa() {}
+#[allow(non_snake_case)]
/// The Three-loop splitting functions in QCD: The Singlet case
///
/// Vogt, A. and Moch, S. and Vermaseren, J. A. M.
@@ -22,6 +24,7 @@ pub fn Moch2004pa() {}
/// DOI: [10.1016/j.nuclphysb.2004.04.024](https:dx.doi.org/10.1016/j.nuclphysb.2004.04.024)
pub fn Vogt2004mw() {}
+#[allow(non_snake_case)]
/// Programs for computing the logarithm of the gamma function, and the digamma function, for complex argument
///
/// K.S. Kölbig
@@ -33,6 +36,7 @@ pub fn Vogt2004mw() {}
/// DOI: [https://doi.org/10.1016/0010-4655(72)90012-4](https://doi.org/10.1016/0010-4655(72)90012-4)
pub fn KOLBIG1972221() {}
+#[allow(non_snake_case)]
/// Multi‐precision Laplace transform inversion
///
/// Abate, J. and Valkó, P.
@@ -43,3 +47,27 @@ pub fn KOLBIG1972221() {}
///
/// DOI: [10.1002/nme.995](https:dx.doi.org/10.1002/nme.995)
pub fn Abate() {}
+
+#[allow(non_snake_case)]
+/// Resummations of Transverse Momentum Distributions
+///
+/// Muselli, Claudio
+///
+/// Published as PhD thesis at Università degli studi di Milano, Dipartimento di Fisica (2017)
+///
+///
+///
+/// DOI: [10.13130/muselli-claudio_phd2017-12-01](https:dx.doi.org/10.13130/muselli-claudio_phd2017-12-01)
+pub fn MuselliPhD() {}
+
+#[allow(non_snake_case)]
+/// Efficient evolution of unpolarized and polarized parton distributions with QCD-PEGASUS
+///
+/// Vogt, A.
+///
+/// Published in: Comput. Phys. Commun. 170 (2005), 65--92
+///
+/// e-Print: [hep-ph/0408244](https://arxiv.org/abs/hep-ph/0408244)
+///
+/// DOI: [10.1016/j.cpc.2005.03.103](https:dx.doi.org/10.1016/j.cpc.2005.03.103)
+pub fn Vogt2004ns() {}
diff --git a/crates/ekore/src/constants.rs b/crates/ekore/src/constants.rs
index 3cdd13a15..c824bdf5a 100644
--- a/crates/ekore/src/constants.rs
+++ b/crates/ekore/src/constants.rs
@@ -1,25 +1,43 @@
//! Global constants.
-#[cfg_attr(doc, katexit::katexit)]
/// The number of colors.
///
/// Defaults to $N_C = 3$.
pub const NC: u8 = 3;
-#[cfg_attr(doc, katexit::katexit)]
/// The normalization of fundamental generators.
///
/// Defaults to $T_R = 1/2$.
pub const TR: f64 = 1.0 / 2.0;
-#[cfg_attr(doc, katexit::katexit)]
/// Second Casimir constant in the adjoint representation.
///
/// Defaults to $C_A = N_C = 3$.
pub const CA: f64 = NC as f64;
-#[cfg_attr(doc, katexit::katexit)]
/// Second Casimir constant in the fundamental representation.
///
/// Defaults to $C_F = \frac{N_C^2-1}{2N_C} = 4/3$.
pub const CF: f64 = ((NC * NC - 1) as f64) / ((2 * NC) as f64);
+
+/// Riemann zeta function at z = 2.
+///
+/// $\zeta(2) = \pi^2 / 6$.
+pub const ZETA2: f64 = 1.6449340668482264;
+
+/// Riemann zeta function at z = 3.
+pub const ZETA3: f64 = 1.2020569031595942;
+
+/// Riemann zeta function at z = 4.
+///
+/// $\zeta(4) = \pi^4 / 90$.
+pub const ZETA4: f64 = 1.082323233711138;
+
+/// singlet-like non-singlet PID
+pub const PID_NSP: u16 = 10101;
+
+/// valence-like non-singlet PID
+pub const PID_NSM: u16 = 10201;
+
+/// non-singlet all-valence PID
+pub const PID_NSV: u16 = 10200;
diff --git a/crates/ekore/src/harmonics.rs b/crates/ekore/src/harmonics.rs
index b829060fc..a8a3b3241 100644
--- a/crates/ekore/src/harmonics.rs
+++ b/crates/ekore/src/harmonics.rs
@@ -1,5 +1,9 @@
//! Tools to compute harmonic sums and related special functions.
pub mod cache;
+pub mod g_functions;
pub mod polygamma;
-mod w1;
+pub mod w1;
+pub mod w2;
+pub mod w3;
+pub mod w4;
diff --git a/crates/ekore/src/harmonics/cache.rs b/crates/ekore/src/harmonics/cache.rs
index 6ad72171d..1342f5175 100644
--- a/crates/ekore/src/harmonics/cache.rs
+++ b/crates/ekore/src/harmonics/cache.rs
@@ -1,16 +1,35 @@
//! Cache harmonic sums for given Mellin N.
use hashbrown::HashMap;
-use num::complex::Complex;
+use num::{complex::Complex, Zero};
-use crate::harmonics::w1;
+use crate::harmonics::{g_functions, w1, w2, w3, w4};
-#[cfg_attr(doc, katexit::katexit)]
/// List of available elements.
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum K {
/// $S_1(N)$
S1,
+ /// $S_2(N)$
+ S2,
+ /// $S_3(N)$
+ S3,
+ /// $S_4(N)$
+ S4,
+ /// $S_1(N/2)$
+ S1h,
+ /// $S_2(N/2)$
+ S2h,
+ /// $S_3(N/2)$
+ S3h,
+ /// $S_1((N-1)/2)$
+ S1mh,
+ /// $S_2((N-1)/2)$
+ S2mh,
+ /// $S_3((N-1)/2)$
+ S3mh,
+ /// $g_3(N)$
+ G3,
}
/// Hold all cached values.
@@ -34,15 +53,68 @@ impl Cache {
pub fn get(&mut self, k: K) -> Complex {
let val = self.m.get(&k);
// already there?
- if val.is_some() {
- return *val.unwrap();
+ if let Some(value) = val {
+ return *value;
}
// compute new
let val = match k {
K::S1 => w1::S1(self.n),
+ K::S2 => w2::S2(self.n),
+ K::S3 => w3::S3(self.n),
+ K::S4 => w4::S4(self.n),
+ K::S1h => w1::S1(self.n / 2.),
+ K::S2h => w2::S2(self.n / 2.),
+ K::S3h => w3::S3(self.n / 2.),
+ K::S1mh => w1::S1((self.n - 1.) / 2.),
+ K::S2mh => w2::S2((self.n - 1.) / 2.),
+ K::S3mh => w3::S3((self.n - 1.) / 2.),
+ K::G3 => g_functions::g3(self.n, self.get(K::S1)),
};
// insert
self.m.insert(k, val);
val
}
}
+
+/// Recursive computation of harmonic sums.
+///
+/// Compute the harmonic sum $S_{w}(N+k)$ stating from the value $S_{w}(N)$ via the recurrence relations.
+pub fn recursive_harmonic_sum(
+ base_value: Complex,
+ n: Complex,
+ iterations: usize,
+ weight: u32,
+) -> Complex {
+ let mut fact = Complex::zero();
+ for i in 1..iterations + 1 {
+ fact += (1.0 / (n + (i as f64))).powu(weight);
+ }
+ base_value + fact
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::harmonics::cache::recursive_harmonic_sum;
+ use crate::harmonics::{w1, w2, w3, w4};
+ use crate::{assert_approx_eq_cmplx, cmplx};
+ use num::complex::Complex;
+
+ #[test]
+ fn test_recursive_harmonic_sum() {
+ const SX: [fn(Complex) -> Complex; 4] = [w1::S1, w2::S2, w3::S3, w4::S4];
+ const NS: [Complex; 2] = [cmplx![1.0, 0.0], cmplx![2.34, 3.45]];
+ const ITERS: [usize; 2] = [1, 2];
+ for sit in SX.iter().enumerate() {
+ for nit in NS.iter().enumerate() {
+ let n = *nit.1;
+ for iit in ITERS.iter().enumerate() {
+ let iterations = *iit.1;
+ let s_base = sit.1(n);
+ let s_test = sit.1(n + (iterations as f64));
+ let s_ref = recursive_harmonic_sum(s_base, n, iterations, 1 + (sit.0 as u32));
+ assert_approx_eq_cmplx!(f64, s_test, s_ref);
+ }
+ }
+ }
+ }
+}
diff --git a/crates/ekore/src/harmonics/g_functions.rs b/crates/ekore/src/harmonics/g_functions.rs
new file mode 100644
index 000000000..cfde9e9ea
--- /dev/null
+++ b/crates/ekore/src/harmonics/g_functions.rs
@@ -0,0 +1,50 @@
+//! Auxilary functions for harmonics sums of weight = 3,4.
+
+use crate::constants::ZETA2;
+use crate::harmonics::cache::recursive_harmonic_sum as s;
+use num::{complex::Complex, Zero};
+
+/// Compute the Mellin transform of $\text{Li}_2(x)/(1+x)$.
+///
+/// This function appears in the analytic continuation of the harmonic sum
+/// $S_{-2,1}(N)$ which in turn appears in the NLO anomalous dimension.
+///
+/// We use the name from [\[MuselliPhD\]](crate::bib::MuselliPhD), but not his implementation - rather we use the
+/// Pegasus [\[Vogt:2004ns\]](crate::bib::Vogt2004ns) implementation.
+pub fn g3(N: Complex, S1: Complex) -> Complex {
+ const CS: [f64; 7] = [
+ 1.0000e0, -0.9992e0, 0.9851e0, -0.9005e0, 0.6621e0, -0.3174e0, 0.0699e0,
+ ];
+ let mut g3 = Complex::zero();
+ for cit in CS.iter().enumerate() {
+ let Nj = N + (cit.0 as f64);
+ g3 += (*cit.1) * (ZETA2 - s(S1, N, cit.0, 1) / Nj) / Nj;
+ }
+ g3
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::harmonics::g_functions::g3;
+ use crate::harmonics::w1;
+ use crate::{assert_approx_eq_cmplx, cmplx};
+ use num::complex::Complex;
+
+ #[test]
+ fn test_mellin_g3() {
+ const NS: [Complex; 3] = [cmplx![1.0, 0.0], cmplx![2.0, 0.0], cmplx![1.0, 1.0]];
+ // NIntegrate[x^({1, 2, 1 + I} - 1) PolyLog[2, x]/(1 + x), {x, 0, 1}]
+ const REFVALS: [Complex; 3] = [
+ cmplx![0.3888958462, 0.],
+ cmplx![0.2560382207, 0.],
+ cmplx![0.3049381491, -0.1589060625],
+ ];
+ for it in NS.iter().enumerate() {
+ let n = *it.1;
+ let s1 = w1::S1(n);
+ let refval = REFVALS[it.0];
+ let g3 = g3(n, s1);
+ assert_approx_eq_cmplx![f64, g3, refval, epsilon = 1e-6];
+ }
+ }
+}
diff --git a/crates/ekore/src/harmonics/polygamma.rs b/crates/ekore/src/harmonics/polygamma.rs
index efbdae147..7415db88e 100644
--- a/crates/ekore/src/harmonics/polygamma.rs
+++ b/crates/ekore/src/harmonics/polygamma.rs
@@ -3,10 +3,11 @@
use num::{complex::Complex, Zero};
use std::f64::consts::PI;
-#[cfg_attr(doc, katexit::katexit)]
+#[allow(clippy::excessive_precision, clippy::assign_op_pattern)]
/// Compute the polygamma functions $\psi_k(z)$.
///
-/// Reimplementation of ``WPSIPG`` (C317) in [CERNlib](http://cernlib.web.cern.ch/cernlib/) given by [[KOLBIG1972221]][crate::bib::KOLBIG1972221].
+/// Reimplementation of ``WPSIPG`` (C317) in [CERNlib](http://cernlib.web.cern.ch/cernlib/)
+/// given by [\[KOLBIG1972221\]](crate::bib::KOLBIG1972221).
///
/// TODO: introduce back errors
pub fn cern_polygamma(Z: Complex, K: usize) -> Complex {
@@ -83,9 +84,7 @@ pub fn cern_polygamma(Z: Complex, K: usize) -> Complex {
}
let mut R = 1. / V.powu(2);
let mut P = R * C[K][6 - 1];
- for i in (1..=5).rev()
- // (int i = 5; i>1-1; i--)
- {
+ for i in (1..=5).rev() {
P = R * (C[K][i - 1] + P);
}
H = (SGN[K] as f64)
@@ -121,9 +120,8 @@ pub fn cern_polygamma(Z: Complex, K: usize) -> Complex {
#[cfg(test)]
mod tests {
- use crate::cmplx;
use crate::harmonics::polygamma::cern_polygamma;
- use float_cmp::assert_approx_eq;
+ use crate::{assert_approx_eq_cmplx, cmplx};
use num::complex::Complex;
#[test]
@@ -202,8 +200,7 @@ mod tests {
for zit in ZS.iter().enumerate() {
let fref = FORTRAN_REF[kit.0][zit.0];
let me = cern_polygamma(*zit.1, *kit.1);
- assert_approx_eq!(f64, me.re, fref.re, ulps = 32);
- assert_approx_eq!(f64, me.im, fref.im, ulps = 32);
+ assert_approx_eq_cmplx!(f64, me, fref, ulps = 32);
}
}
}
diff --git a/crates/ekore/src/harmonics/w1.rs b/crates/ekore/src/harmonics/w1.rs
index 2b861cb30..a1aa893d9 100644
--- a/crates/ekore/src/harmonics/w1.rs
+++ b/crates/ekore/src/harmonics/w1.rs
@@ -1,3 +1,4 @@
+//! Harmonic sums of weight 1.
use num::complex::Complex;
use crate::harmonics::polygamma::cern_polygamma;
@@ -7,5 +8,5 @@ use crate::harmonics::polygamma::cern_polygamma;
/// $$S_1(N) = \sum\limits_{j=1}^N \frac 1 j = \psi_0(N+1)+\gamma_E$$
/// with $\psi_0(N)$ the digamma function and $\gamma_E$ the Euler-Mascheroni constant.
pub fn S1(N: Complex) -> Complex {
- cern_polygamma(N + 1.0, 0) + 0.5772156649015328606065120
+ cern_polygamma(N + 1.0, 0) + 0.577_215_664_901_532_9
}
diff --git a/crates/ekore/src/harmonics/w2.rs b/crates/ekore/src/harmonics/w2.rs
new file mode 100644
index 000000000..d55a32679
--- /dev/null
+++ b/crates/ekore/src/harmonics/w2.rs
@@ -0,0 +1,13 @@
+//! Harmonic sums of weight 2.
+use num::complex::Complex;
+
+use crate::constants::ZETA2;
+use crate::harmonics::polygamma::cern_polygamma;
+
+/// Compute the harmonic sum $S_2(N)$.
+///
+/// $$S_2(N) = \sum\limits_{j=1}^N \frac 1 {j^2} = -\psi_1(N+1)+\zeta(2)$$
+/// with $\psi_1(N)$ the trigamma function and $\zeta$ the Riemann zeta function.
+pub fn S2(N: Complex) -> Complex {
+ -cern_polygamma(N + 1.0, 1) + ZETA2
+}
diff --git a/crates/ekore/src/harmonics/w3.rs b/crates/ekore/src/harmonics/w3.rs
new file mode 100644
index 000000000..7850c678d
--- /dev/null
+++ b/crates/ekore/src/harmonics/w3.rs
@@ -0,0 +1,13 @@
+//! Harmonic sums of weight 3.
+use num::complex::Complex;
+
+use crate::constants::ZETA3;
+use crate::harmonics::polygamma::cern_polygamma;
+
+/// Compute the harmonic sum $S_3(N)$.
+///
+/// $$S_3(N) = \sum\limits_{j=1}^N \frac 1 {j^3} = \frac 1 2 \psi_2(N+1)+\zeta(3)$$
+/// with $\psi_2(N)$ the 2nd polygamma function and $\zeta$ the Riemann zeta function.
+pub fn S3(N: Complex) -> Complex {
+ 0.5 * cern_polygamma(N + 1.0, 2) + ZETA3
+}
diff --git a/crates/ekore/src/harmonics/w4.rs b/crates/ekore/src/harmonics/w4.rs
new file mode 100644
index 000000000..66835ac5f
--- /dev/null
+++ b/crates/ekore/src/harmonics/w4.rs
@@ -0,0 +1,13 @@
+//! Harmonic sums of weight 4.
+use num::complex::Complex;
+
+use crate::constants::ZETA4;
+use crate::harmonics::polygamma::cern_polygamma;
+
+/// Compute the harmonic sum $S_4(N)$.
+///
+/// $$S_4(N) = \sum\limits_{j=1}^N \frac 1 {j^4} = - \frac 1 6 \psi_3(N+1)+\zeta(4)$$
+/// with $\psi_3(N)$ the 3rd polygamma function and $\zeta$ the Riemann zeta function.
+pub fn S4(N: Complex) -> Complex {
+ ZETA4 - 1.0 / 6.0 * cern_polygamma(N + 1.0, 3)
+}
diff --git a/crates/ekore/src/util.rs b/crates/ekore/src/util.rs
index 5c4e34f4d..55b1ab2bd 100644
--- a/crates/ekore/src/util.rs
+++ b/crates/ekore/src/util.rs
@@ -7,3 +7,23 @@ macro_rules! cmplx {
Complex::new($re, $im)
};
}
+
+/// Shorthand complex number contructor.
+#[macro_export]
+macro_rules! assert_approx_eq_cmplx {
+ ($size:ty, $ref:expr, $target:expr) => {
+ use float_cmp::assert_approx_eq;
+ assert_approx_eq!($size, $ref.re, $target.re);
+ assert_approx_eq!($size, $ref.im, $target.im);
+ };
+ ($size:ty, $ref:expr, $target:expr, ulps=$ulps:expr) => {
+ use float_cmp::assert_approx_eq;
+ assert_approx_eq!($size, $ref.re, $target.re, ulps = $ulps);
+ assert_approx_eq!($size, $ref.im, $target.im, ulps = $ulps);
+ };
+ ($size:ty, $ref:expr, $target:expr, epsilon=$epsilon:expr) => {
+ use float_cmp::assert_approx_eq;
+ assert_approx_eq!($size, $ref.re, $target.re, epsilon = $epsilon);
+ assert_approx_eq!($size, $ref.im, $target.im, epsilon = $epsilon);
+ };
+}
diff --git a/crates/make_bib.py b/crates/make_bib.py
index 1b15d663d..e758c41ad 100644
--- a/crates/make_bib.py
+++ b/crates/make_bib.py
@@ -1,4 +1,5 @@
"""Parse bibtex file to rust crate."""
+
import datetime
import pathlib
import re
@@ -9,7 +10,8 @@
BIBFILE = pathlib.Path(__file__).parent / "ekore" / "refs.bib"
# A single entry
-ENTRY = """/// {title}
+ENTRY = """#[allow(non_snake_case)]
+/// {title}
///
/// {author}
///
@@ -20,6 +22,7 @@
/// {doi}"""
# Combine publication information
PUB = """Published in: {journal} {volume} ({year}), {pages}"""
+PHD = """Published as PhD thesis at {school} ({year})"""
def clean_nl(t: str) -> str:
@@ -38,12 +41,18 @@ def clean_nl(t: str) -> str:
for el in bib_database.entries:
title = re.sub(r"^\{(.+)\}$", r"\1", clean_nl(el.fields_dict["title"].value))
author = el.fields_dict["author"].value
- publication = PUB.format(
- journal=el.fields_dict["journal"].value,
- volume=el.fields_dict["volume"].value,
- year=el.fields_dict["year"].value,
- pages=el.fields_dict["pages"].value,
- )
+ if el.entry_type == "phdthesis":
+ publication = PHD.format(
+ school=el.fields_dict["school"].value,
+ year=el.fields_dict["year"].value,
+ )
+ else:
+ publication = PUB.format(
+ journal=el.fields_dict["journal"].value,
+ volume=el.fields_dict["volume"].value,
+ year=el.fields_dict["year"].value,
+ pages=el.fields_dict["pages"].value,
+ )
eprint = ""
if (
"eprint" in el.fields_dict
diff --git a/crates/parse-abbrev.py b/crates/parse-abbrev.py
new file mode 100644
index 000000000..b928ed03c
--- /dev/null
+++ b/crates/parse-abbrev.py
@@ -0,0 +1,20 @@
+"""Parse abbreviations from sphinx to Rust."""
+
+import pathlib
+import re
+
+SRC = (
+ pathlib.Path(__file__).parents[1]
+ / "doc"
+ / "source"
+ / "shared"
+ / "abbreviations.rst"
+)
+
+cnt = SRC.read_text("utf-8")
+for el in cnt.split(".."):
+ test = re.match(r"\s*(.+)\s+replace::\n\s+:abbr:`(.+?)\((.+)\)`", el.strip())
+ if test is None:
+ continue
+ # Print to terminal - the user can dump to the relevant file
+ print(f'"{test[2].strip()}": "{test[3].strip()}",')
diff --git a/crates/release.json b/crates/release.json
new file mode 100644
index 000000000..c2b9f6f54
--- /dev/null
+++ b/crates/release.json
@@ -0,0 +1 @@
+["ekore", "eko"]
diff --git a/doc/source/code/genpdf.rst b/doc/source/code/genpdf.rst
index f9d52bd47..ab3f94673 100644
--- a/doc/source/code/genpdf.rst
+++ b/doc/source/code/genpdf.rst
@@ -3,40 +3,46 @@ genpdf
We provide also a console script called ``genpdf`` that is able
to generate and install a custom |PDF| set in the `lhapdf` format.
-In particular, the command ``genpdf install [NAME]`` simply
-install the |PDF| called ``[NAME]`` in the lhapdf folder and
-the ``genpdf generate [NAME]`` command generates the custom |PDF|
+
+To use it, you have to install the additional `ekobox` dependencies via
+the pip extra `box`:
+
+.. code-block:: bash
+
+ $ pip install eko[box]
+
+The ``genpdf generate [NAME]`` command generates a custom |PDF|
and saves it as ``[NAME]`` in the current directory.
-Notice that the argument ``[NAME]`` is the only mandatory one.
+Afterwards, it can be conveniently installed using the ``genpdf install [NAME]`` command
+(which simply copies the generated folder into the LHAPDF folder).
+Note that the argument ``[NAME]`` is the only mandatory one.
-The custom |PDF| can be generated in three different ways which
-are accessible trough the option ``-p [PARENT]`` (the complete spelling
-is ``genpdf generate [NAME] -p [PARENT]``):
+A custom |PDF| can be generated in three different ways which
+are accessible trough the option ``-p [PARENT]``:
1. If ``[PARENT]`` is the name of an available |PDF|, it is used as parent
- |PDF| and thus copied to generate the new custom PDF.
- 2. If ``[PARENT]`` is "toylh" or "toy", the **toy** |PDF| is used as parent.
+ |PDF| and thus copied to generate the new custom PDF.
+ 2. If ``[PARENT]`` is "toylh" or "toy", the **toy** |PDF| :cite:`Giele:2002hx,Dittmar:2005ed` is used as parent.
3. If the option ``-p [PARENT]`` is not used, the |PDF| is
- generated using **x(1-x)** for all the flavors.
+ generated using **x(1-x)** for all the flavors.
-Trough the use of the argument
-``[LABEL]`` (``genpdf generate [NAME] [LABEL] [OPTIONS]``) it is also possible
+Through the use of the argument
+``[LABEL]`` (``genpdf generate [NAME] [LABEL] [OPTIONS]``) it is possible
to specify a set of flavors (expressed in |pid| basis) or a set of
-**evolution basis components** on which filtering the custom |PDF|.
-In this way the specified set is kept in the final |PDF|, while the rest
-is discarded.
+evolution basis components on which to filter the new |PDF|.
+In this way the specified set is kept in the new |PDF|, while the rest
+is discarded, i.e. set to zero.
In the case of custom |PDF| generated starting from a parent |PDF|,
it is possible to generate all the members trough the flag ``-m``. If this
flag is not used, only the *zero* member is generated (together with the *info*
-file of course). Using the flag ``-m`` when the custom |PDF| is generated
-either using the **toy** |PDF| or the **x(1-x)** function, has no effects.
+file of course). Using the flag ``-m`` when a custom |PDF| is generated
+either using the **toy** |PDF| or the **x(1-x)** function has no effects.
In order to automatically install the custom |PDF| in the lhapdf folder
at generation time (so without using ``genpdf install [NAME]`` after the
generation), it is possible to use the ``-i`` flag.
-
We also provide an API with some additional features and possibilities
such as generating a |PDF| with a custom function for every |pid|
(through a ``dict`` structure) and filtering custom combination of
@@ -49,7 +55,7 @@ Examples
$ genpdf generate gonly 21
-This will generate the custom PDF using the debug x(1-x) PDF as parent
+This will generate a custom PDF using the debug x(1-x) PDF as parent
and then it will keep only the gluon.
.. code-block:: bash
@@ -62,7 +68,7 @@ This will install the previous PDF in the lhapdf folder.
$ genpdf generate Sonly S -p toy -i
-This will generate the custom PDF using the toy PDF as parent and then
+This will generate a custom PDF using the toy PDF as parent and then
it will keep only the singlet combination. The generated PDF is also
automatically installed in the lhapdf folder.
@@ -70,6 +76,14 @@ automatically installed in the lhapdf folder.
$ genpdf generate Vonly V -p CT10 -m
-This will generate the custom PDF using the CT10 PDF set as parent
+This will generate a custom PDF using the CT10 PDF set as parent
(if available) and it will keep only the valence combination. Moreover
it will generate all the members of the parent PDF.
+
+.. code-block:: bash
+
+ $ genpdf generate NN40qqbar -p NNPDF40_nnlo_as_01180 -- -5 -4 -3 -2 -1 1 2 3 4 5
+
+This will generate a custom PDF with no gluon contributions, but all quarks
+taken are from NNPDF4.0. Note that (as customary with CLIs) you have to add the explicit
+separator ``--`` before specifying anti-quark flavors.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index c271bd870..1f03599ac 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -27,7 +27,7 @@
# -- Project information -----------------------------------------------------
project = "EKO"
-copyright = "2019-2023, the NNPDF team" # pylint: disable=redefined-builtin
+copyright = "2019-2024, the NNPDF team" # pylint: disable=redefined-builtin
author = "NNPDF team"
# The short X.Y version
@@ -112,10 +112,10 @@
)
extlinks = {
- "yadism": ("https://n3pdf.github.io/yadism/%s", "yadism"),
- "banana": ("https://n3pdf.github.io/banana/%s", "banana"),
- "pineappl": ("https://n3pdf.github.io/pineappl/%s", "pineappl"),
- "pineko": ("https://github.com/N3PDF/pineko/%s", "pineko"),
+ "yadism": ("https://n3pdf.github.io/yadism/%s", "yadism%s"),
+ "banana": ("https://n3pdf.github.io/banana/%s", "banana%s"),
+ "pineappl": ("https://n3pdf.github.io/pineappl/%s", "pineappl%s"),
+ "pineko": ("https://github.com/N3PDF/pineko/%s", "pineko%s"),
}
# -- Options for HTML output -------------------------------------------------
diff --git a/doc/source/overview/tutorials/alpha_s.ipynb b/doc/source/overview/tutorials/alpha_s.ipynb
index a9d75c0ca..f9cc5a240 100644
--- a/doc/source/overview/tutorials/alpha_s.ipynb
+++ b/doc/source/overview/tutorials/alpha_s.ipynb
@@ -39,12 +39,10 @@
"from eko.quantities.heavy_quarks import MatchingScales, QuarkMassScheme\n",
"\n",
"# set the (alpha_s, alpha_em) reference values\n",
- "couplings_ref = CouplingsInfo(\n",
- " alphas=0.118, alphaem=0.007496252, scale=91.0, num_flavs_ref=None, max_num_flavs=5\n",
- ")\n",
+ "couplings_ref = CouplingsInfo(alphas=0.118, alphaem=0.007496252, ref=(91.0, 5))\n",
"\n",
"# set heavy quark masses and their threshold ratios\n",
- "heavy_quark_masses = np.power([1.51, 4.92, 172.0],2)\n",
+ "heavy_quark_masses = np.power([1.51, 4.92, 172.0], 2)\n",
"thresholds_ratios = np.array([1.0, 1.0, 1.0])\n",
"\n",
"# set (QCD,QED) perturbative order\n",
@@ -88,9 +86,9 @@
}
],
"source": [
- "target_scale = 10.0 ** 2\n",
+ "target_scale = 10.0**2\n",
"a_s = sc.a_s(target_scale)\n",
- "print(\"The value of alpha_s at Q^2=100 GeV^2 is: \", 4. * np.pi * a_s)"
+ "print(\"The value of alpha_s at Q^2=100 GeV^2 is: \", 4.0 * np.pi * a_s)"
]
},
{
@@ -105,13 +103,10 @@
}
],
"metadata": {
- "interpreter": {
- "hash": "0a84ba3ac8703c04e87bc503a7d00188dfd591ad56130da93c406115a1e4a408"
- },
"kernelspec": {
- "display_name": "eko-KkPVjVhh-py3.10",
+ "display_name": "Python 3 (ipykernel)",
"language": "python",
- "name": "eko-kkpvjvhh-py3.10"
+ "name": "python3"
},
"language_info": {
"codemirror_mode": {
diff --git a/doc/source/overview/tutorials/dglap.ipynb b/doc/source/overview/tutorials/dglap.ipynb
index 0e271a00c..98cb4a6e1 100644
--- a/doc/source/overview/tutorials/dglap.ipynb
+++ b/doc/source/overview/tutorials/dglap.ipynb
@@ -68,7 +68,7 @@
"th_card = example.theory()\n",
"op_card = example.operator()\n",
"# here we replace the grid with a very minimal one, to speed up the example\n",
- "op_card.xgrid = [1e-3, 1e-2, 1e-1, 5e-1, 1.]"
+ "op_card.xgrid = [1e-3, 1e-2, 1e-1, 5e-1, 1.0]"
]
},
{
@@ -91,14 +91,9 @@
"{'order': [1, 0],\n",
" 'couplings': {'alphas': 0.118,\n",
" 'alphaem': 0.007496252,\n",
- " 'scale': 91.2,\n",
- " 'max_num_flavs': 6,\n",
- " 'num_flavs_ref': 5,\n",
+ " 'ref': [91.2, 5],\n",
" 'em_running': False},\n",
- " 'heavy': {'num_flavs_init': 4,\n",
- " 'num_flavs_max_pdf': 6,\n",
- " 'intrinsic_flavors': [4],\n",
- " 'masses': [[2.0, nan], [4.5, nan], [173.07, nan]],\n",
+ " 'heavy': {'masses': [[2.0, nan], [4.5, nan], [173.07, nan]],\n",
" 'masses_scheme': 'pole',\n",
" 'matching_ratios': [1.0, 1.0, 1.0]},\n",
" 'xif': 1.0,\n",
@@ -123,7 +118,7 @@
{
"data": {
"text/plain": [
- "{'mu0': 1.65,\n",
+ "{'init': [1.65, 4],\n",
" 'mugrid': [[100.0, 5]],\n",
" 'xgrid': [0.001, 0.01, 0.1, 0.5, 1.0],\n",
" 'configs': {'evolution_method': 'iterate-exact',\n",
@@ -174,7 +169,7 @@
"id": "880aadcf-8f87-4918-a0bc-09581d0d3579",
"metadata": {},
"source": [
- "The actual result is a complicate EKO object, we will discuss it in a separate tutorial.\n",
+ "The actual result is a complicate EKO object, which we will discuss it in a separate tutorial.\n",
"\n",
"You have just run your first DGLAP calculation!"
]
diff --git a/doc/source/overview/tutorials/output.ipynb b/doc/source/overview/tutorials/output.ipynb
index 47345ac35..1cfe9f071 100644
--- a/doc/source/overview/tutorials/output.ipynb
+++ b/doc/source/overview/tutorials/output.ipynb
@@ -34,7 +34,7 @@
"id": "2f8f0666-c6ab-40f6-86f5-15773f205b51",
"metadata": {},
"source": [
- "We can access the operator, by using the `open` method (similar to python's `open`):"
+ "We can access the operator, by using the `read` method:"
]
},
{
@@ -76,8 +76,8 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "TheoryCard(order=(1, 0), couplings=CouplingsInfo(alphas=0.118, alphaem=0.007496252, scale=91.2, max_num_flavs=6, num_flavs_ref=5, em_running=False), heavy=HeavyInfo(num_flavs_init=4, num_flavs_max_pdf=6, intrinsic_flavors=[4, 5, 6], masses=[[2.0, nan], [4.5, nan], [173.07, nan]], masses_scheme=, matching_ratios=[1.0, 1.0, 1.0]), xif=1.0, n3lo_ad_variation=(0, 0, 0, 0))\n",
- "OperatorCard(mu0=1.65, mugrid=[(100.0, 5)], xgrid=, configs=Configs(evolution_method=, ev_op_max_order=(10, 0), ev_op_iterations=10, scvar_method=None, inversion_method=None, interpolation_polynomial_degree=4, interpolation_is_log=True, polarized=False, time_like=False, n_integration_cores=0), debug=Debug(skip_singlet=False, skip_non_singlet=False), eko_version='0.0.0')\n"
+ "TheoryCard(order=(1, 0), couplings=CouplingsInfo(alphas=0.118, alphaem=0.007496252, ref=(91.2, 5), em_running=False), heavy=HeavyInfo(masses=[[2.0, nan], [4.5, nan], [173.07, nan]], masses_scheme=, matching_ratios=[1.0, 1.0, 1.0]), xif=1.0, n3lo_ad_variation=(0, 0, 0, 0))\n",
+ "OperatorCard(init=(1.65, 4), mugrid=[(100.0, 5)], xgrid=, configs=Configs(evolution_method=, ev_op_max_order=(10, 0), ev_op_iterations=10, scvar_method=None, inversion_method=None, interpolation_polynomial_degree=4, interpolation_is_log=True, polarized=False, time_like=False, n_integration_cores=0), debug=Debug(skip_singlet=False, skip_non_singlet=False), eko_version='0.0.0')\n"
]
}
],
@@ -143,7 +143,7 @@
],
"source": [
"with eko.EKO.read(\"./myeko.tar\") as evolution_operator:\n",
- " with evolution_operator.operator((10000.,5)) as op:\n",
+ " with evolution_operator.operator((10000.0, 5)) as op:\n",
" print(f\"operator: {op.operator.shape}\")\n",
" print(f\"error: {op.error.shape}\")"
]
diff --git a/doc/source/overview/tutorials/pdf.ipynb b/doc/source/overview/tutorials/pdf.ipynb
index 4607ef891..bfc6ad3ad 100644
--- a/doc/source/overview/tutorials/pdf.ipynb
+++ b/doc/source/overview/tutorials/pdf.ipynb
@@ -13,9 +13,9 @@
"id": "3e0dcd0f",
"metadata": {},
"source": [
- "## Method 1: Using apply_pdf\n",
+ "## Method 1: Using `apply_pdf`\n",
"\n",
- "In this first part we will compute the eko and subsequently apply the initial PDF \"manually\" calling a dedicated function. "
+ "In this first part, we compute the eko and subsequently apply the initial PDF \"manually\" calling a dedicated function. "
]
},
{
@@ -52,6 +52,7 @@
"import pathlib\n",
"import eko\n",
"from banana import toy\n",
+ "\n",
"pdf = toy.mkPDF(\"\", 0)"
]
},
@@ -71,6 +72,7 @@
"outputs": [],
"source": [
"from ekobox.apply import apply_pdf\n",
+ "\n",
"with eko.EKO.read(\"./myeko.tar\") as evolution_operator:\n",
" evolved_pdfs = apply_pdf(evolution_operator, pdf)"
]
@@ -92,7 +94,7 @@
{
"data": {
"text/plain": [
- "dict_keys([10000.0])"
+ "dict_keys([(10000.0, 5)])"
]
},
"execution_count": 3,
@@ -134,7 +136,7 @@
}
],
"source": [
- "evolved_pdfs[10000.0][\"pdfs\"][21]"
+ "evolved_pdfs[(10000.0, 5)][\"pdfs\"][21]"
]
},
{
@@ -150,7 +152,7 @@
"id": "e925d2c9",
"metadata": {},
"source": [
- "## Method 2: Using evolve_pdfs\n",
+ "## Method 2: Using `evolve_pdfs`\n",
"\n",
"In this second part we illustrate how to get (and install) directly a LHAPDF set evolved with eko. "
]
@@ -171,6 +173,7 @@
"outputs": [],
"source": [
"from banana import toy\n",
+ "\n",
"pdf = toy.mkPDF(\"\", 0)"
]
},
@@ -195,10 +198,10 @@
"th_card = example.theory()\n",
"op_card = example.operator()\n",
"# here we replace the grid with a very minimal one, to speed up the example\n",
- "op_card.xgrid = eko.interpolation.XGrid([1e-3, 1e-2, 1e-1, 5e-1, 1.])\n",
- "op_card.mugrid = [(10.,5), (100.,5)]\n",
+ "op_card.xgrid = eko.interpolation.XGrid([1e-3, 1e-2, 1e-1, 5e-1, 1.0])\n",
+ "op_card.mugrid = [(10.0, 5), (100.0, 5)]\n",
"# set QCD LO evolution\n",
- "th_card.orders = (1,0)"
+ "th_card.orders = (1, 0)"
]
},
{
@@ -236,16 +239,10 @@
],
"source": [
"from ekobox.evol_pdf import evolve_pdfs\n",
+ "\n",
"path = pathlib.Path(\"./myeko2.tar\")\n",
"path.unlink(missing_ok=True)\n",
- "evolve_pdfs(\n",
- " [pdf],\n",
- " th_card,\n",
- " op_card,\n",
- " install=True,\n",
- " name=\"Evolved_PDF\",\n",
- " store_path=path\n",
- ")"
+ "evolve_pdfs([pdf], th_card, op_card, install=True, name=\"Evolved_PDF\", store_path=path)"
]
},
{
@@ -273,6 +270,7 @@
],
"source": [
"import lhapdf\n",
+ "\n",
"evolved_pdf = lhapdf.mkPDF(\"Evolved_PDF\", 0)"
]
},
@@ -300,12 +298,12 @@
}
],
"source": [
- "pid = 21 # gluon pid\n",
- "Q2 = 89.10 # Q^2 in Gev^2\n",
- "x = 0.01 # momentum fraction \n",
+ "pid = 21 # gluon pid\n",
+ "Q2 = 89.10 # Q^2 in Gev^2\n",
+ "x = 0.01 # momentum fraction\n",
"\n",
"# check that the particle is present\n",
- "print(\"has gluon?\",evolved_pdf.hasFlavor(pid))\n",
+ "print(\"has gluon?\", evolved_pdf.hasFlavor(pid))\n",
"# now do the lookup\n",
"xg = evolved_pdf.xfxQ2(pid, x, Q2)\n",
"print(f\"xg(x={x}, Q2={Q2}) = {xg}\")"
@@ -352,28 +350,28 @@
"import lhapdf\n",
"from ekobox.cards import example\n",
"from eko.interpolation import make_grid\n",
- "from eko.quantities.heavy_quarks import QuarkMassRef,HeavyQuarks\n",
+ "from eko.quantities.heavy_quarks import QuarkMassRef, HeavyQuarks\n",
"\n",
"# get the PDF object\n",
"ct14llo = lhapdf.mkPDF(\"CT14llo\")\n",
"\n",
"# setup the operator card\n",
"op_card = example.operator()\n",
- "op_card.xgrid = eko.interpolation.XGrid(make_grid(30, 30)) # x grid\n",
- "op_card.mugrid = [(float(q),5) for q in np.geomspace(5., 100, 5)] # Q2 grid\n",
- "op_card.mu0 = 1.295000 # starting point for the evolution \n",
+ "op_card.xgrid = eko.interpolation.XGrid(make_grid(30, 30)) # x grid\n",
+ "op_card.mugrid = [(float(q), 5) for q in np.geomspace(5.0, 100, 5)] # Q2 grid\n",
+ "op_card.init = (1.295000, 3) # starting point for the evolution\n",
"\n",
"# setup the theory card - this can be mostly inferred from the PDF's .info file\n",
- "\n",
"th_card = example.theory()\n",
- "th_card.orders = (1,0) # QCD LO\n",
- "th_card.heavy.masses = HeavyQuarks([QuarkMassRef([1.3,nan]), QuarkMassRef([4.75,nan]), QuarkMassRef([172.,nan])]) # quark mass\n",
- "th_card.couplings.alphas = 0.130000 # reference value of alpha_s\n",
- "th_card.couplings.scale = 91.1876 # the reference scale at which alpha_s is provided\n",
- "th_card.couplings.num_flavs_ref = 5 # the number of flavors active at the alpha_s reference scale\n",
- "th_card.couplings.max_num_flavs = 5 # the maximum number of flavors active in the alpha_s evolution\n",
- "th_card.couplings.num_flavs_init = 3 # the number of flavors active at the reference scale\n",
- "th_card.num_flavs_max_pdf = 5 # the maximum number of flavors active in the pdf evolution."
+ "th_card.orders = (1, 0) # QCD LO\n",
+ "th_card.heavy.masses = HeavyQuarks(\n",
+ " [QuarkMassRef([1.3, nan]), QuarkMassRef([4.75, nan]), QuarkMassRef([172.0, nan])]\n",
+ ") # quark mass\n",
+ "th_card.couplings.alphas = 0.130000 # reference value of alpha_s\n",
+ "th_card.couplings.ref = (\n",
+ " 91.1876,\n",
+ " 5,\n",
+ ") # the reference scale together with the number of flavors at which alpha_s is provided"
]
},
{
@@ -407,15 +405,11 @@
],
"source": [
"from ekobox.evol_pdf import evolve_pdfs\n",
+ "\n",
"path = pathlib.Path(\"./myeko_ct14llo.tar\")\n",
"path.unlink(missing_ok=True)\n",
"evolve_pdfs(\n",
- " [ct14llo],\n",
- " th_card,\n",
- " op_card,\n",
- " install=True,\n",
- " name=\"my_ct14llo\",\n",
- " store_path=path\n",
+ " [ct14llo], th_card, op_card, install=True, name=\"my_ct14llo\", store_path=path\n",
")"
]
},
@@ -439,33 +433,33 @@
"output_type": "stream",
"text": [
"LHAPDF 6.4.0 loading /home/felix/local/share/LHAPDF/my_ct14llo/my_ct14llo_0000.dat\n",
+ "my_ct14llo PDF set, member #0, version 1\n",
" x Q2 ct14llo my_ct14llo relative_diff\n",
- "0 0.000010 25.000000 7.635785e+01 7.630461e+01 0.000697\n",
- "1 0.000173 25.000000 3.194273e+01 3.192092e+01 0.000683\n",
- "2 0.003000 25.000000 1.081843e+01 1.081086e+01 0.000701\n",
- "3 0.051962 25.000000 1.958956e+00 1.958632e+00 0.000166\n",
- "4 0.900000 25.000000 1.922415e-05 1.955026e-05 -0.016963\n",
- "5 0.000010 111.803399 1.333957e+02 1.332985e+02 0.000729\n",
- "6 0.000173 111.803399 4.777286e+01 4.773664e+01 0.000758\n",
- "7 0.003000 111.803399 1.341028e+01 1.339967e+01 0.000791\n",
- "8 0.051962 111.803399 1.978216e+00 1.978130e+00 0.000044\n",
- "9 0.900000 111.803399 6.644805e-06 6.753652e-06 -0.016381\n",
- "10 0.000010 500.000000 1.967032e+02 1.965456e+02 0.000801\n",
- "11 0.000173 500.000000 6.291393e+01 6.286095e+01 0.000842\n",
- "12 0.003000 500.000000 1.542347e+01 1.540996e+01 0.000876\n",
- "13 0.051962 500.000000 1.947465e+00 1.947391e+00 0.000038\n",
- "14 0.900000 500.000000 2.929060e-06 2.977306e-06 -0.016471\n",
- "15 0.000010 2236.067977 2.633266e+02 2.631109e+02 0.000819\n",
- "16 0.000173 2236.067977 7.708540e+01 7.701938e+01 0.000856\n",
- "17 0.003000 2236.067977 1.700410e+01 1.698928e+01 0.000872\n",
- "18 0.051962 2236.067977 1.893923e+00 1.893971e+00 -0.000025\n",
- "19 0.900000 2236.067977 1.544450e-06 1.570997e-06 -0.017189\n",
- "20 0.000010 10000.000000 3.314097e+02 3.311351e+02 0.000829\n",
- "21 0.000173 10000.000000 9.023010e+01 9.015279e+01 0.000857\n",
- "22 0.003000 10000.000000 1.825934e+01 1.824402e+01 0.000839\n",
- "23 0.051962 10000.000000 1.830992e+00 1.831183e+00 -0.000104\n",
- "24 0.900000 10000.000000 9.288458e-07 9.447927e-07 -0.017169\n",
- "my_ct14llo PDF set, member #0, version 1\n"
+ "0 0.000010 25.000000 7.635785e+01 7.630719e+01 0.000663\n",
+ "1 0.000173 25.000000 3.194273e+01 3.192239e+01 0.000637\n",
+ "2 0.003000 25.000000 1.081843e+01 1.081160e+01 0.000632\n",
+ "3 0.051962 25.000000 1.958956e+00 1.958820e+00 0.000069\n",
+ "4 0.900000 25.000000 1.922415e-05 1.955440e-05 -0.017179\n",
+ "5 0.000010 111.803399 1.333957e+02 1.333028e+02 0.000697\n",
+ "6 0.000173 111.803399 4.777286e+01 4.773855e+01 0.000718\n",
+ "7 0.003000 111.803399 1.341028e+01 1.340044e+01 0.000734\n",
+ "8 0.051962 111.803399 1.978216e+00 1.978292e+00 -0.000038\n",
+ "9 0.900000 111.803399 6.644805e-06 6.756354e-06 -0.016787\n",
+ "10 0.000010 500.000000 1.967032e+02 1.965517e+02 0.000770\n",
+ "11 0.000173 500.000000 6.291393e+01 6.286327e+01 0.000805\n",
+ "12 0.003000 500.000000 1.542347e+01 1.541073e+01 0.000826\n",
+ "13 0.051962 500.000000 1.947465e+00 1.947532e+00 -0.000034\n",
+ "14 0.900000 500.000000 2.929060e-06 2.979511e-06 -0.017224\n",
+ "15 0.000010 2236.067977 2.633266e+02 2.631189e+02 0.000789\n",
+ "16 0.000173 2236.067977 7.708540e+01 7.702204e+01 0.000822\n",
+ "17 0.003000 2236.067977 1.700410e+01 1.699004e+01 0.000827\n",
+ "18 0.051962 2236.067977 1.893923e+00 1.894094e+00 -0.000090\n",
+ "19 0.900000 2236.067977 1.544450e-06 1.572860e-06 -0.018395\n",
+ "20 0.000010 10000.000000 3.314097e+02 3.311450e+02 0.000799\n",
+ "21 0.000173 10000.000000 9.023010e+01 9.015576e+01 0.000824\n",
+ "22 0.003000 10000.000000 1.825934e+01 1.824477e+01 0.000798\n",
+ "23 0.051962 10000.000000 1.830992e+00 1.831291e+00 -0.000163\n",
+ "24 0.900000 10000.000000 9.288458e-07 9.463689e-07 -0.018866\n"
]
}
],
@@ -475,15 +469,15 @@
"# load evolved pdf\n",
"my_ct14llo = lhapdf.mkPDF(\"my_ct14llo\", 0)\n",
"\n",
- "pid = 21 # gluon pid\n",
+ "pid = 21 # gluon pid\n",
"\n",
"# collect data\n",
- "log = {\"x\": [], \"Q2\" : [], \"ct14llo\": [], \"my_ct14llo\": [], \"relative_diff\": []} \n",
- "for q in np.geomspace(5., 100, 5):\n",
- " q2 = q**2.\n",
+ "log = {\"x\": [], \"Q2\": [], \"ct14llo\": [], \"my_ct14llo\": [], \"relative_diff\": []}\n",
+ "for q in np.geomspace(5.0, 100, 5):\n",
+ " q2 = q**2.0\n",
" for x in np.geomspace(1e-5, 0.9, 5):\n",
" value = ct14llo.xfxQ2(pid, x, q2)\n",
- " my_value = my_ct14llo.xfxQ2(pid, x, q2)\n",
+ " my_value = my_ct14llo.xfxQ2(pid, x, q2)\n",
" log[\"x\"].append(x)\n",
" log[\"Q2\"].append(q2)\n",
" log[\"ct14llo\"].append(value)\n",
diff --git a/doc/source/refs.bib b/doc/source/refs.bib
index 16a662451..769f2e326 100644
--- a/doc/source/refs.bib
+++ b/doc/source/refs.bib
@@ -823,14 +823,18 @@ @article{Kawamura:2012cr
@article{Falcioni:2023luc,
author = "Falcioni, G. and Herzog, F. and Moch, S. and Vogt, A.",
- title = "{Four-loop splitting functions in QCD -- The quark-quark case}",
+ title = "{Four-loop splitting functions in QCD \textendash{} The quark-quark case}",
eprint = "2302.07593",
archivePrefix = "arXiv",
primaryClass = "hep-ph",
reportNumber = "DESY 23--022, LTH 1333",
- month = "2",
+ doi = "10.1016/j.physletb.2023.137944",
+ journal = "Phys. Lett. B",
+ volume = "842",
+ pages = "137944",
year = "2023"
}
+
@article{Gluck:1995yr,
author = "Gluck, M. and Reya, E. and Stratmann, M. and Vogelsang, W.",
title = "{Next-to-leading order radiative parton model analysis of polarized deep inelastic lepton - nucleon scattering}",
@@ -991,24 +995,30 @@ @article{Dokshitzer:2005bf
@article{Falcioni:2023vqq,
author = "Falcioni, G. and Herzog, F. and Moch, S. and Vogt, A.",
- title = "{Four-loop splitting functions in QCD -- The gluon-to-quark case}",
+ title = "{Four-loop splitting functions in QCD \textendash{} The gluon-to-quark case}",
eprint = "2307.04158",
archivePrefix = "arXiv",
primaryClass = "hep-ph",
reportNumber = "DESY 23-096, LTH 1345",
- month = "7",
+ doi = "10.1016/j.physletb.2023.138215",
+ journal = "Phys. Lett. B",
+ volume = "846",
+ pages = "138215",
year = "2023"
}
@article{Gehrmann:2023cqm,
author = "Gehrmann, Thomas and von Manteuffel, Andreas and Sotnikov, Vasily and Yang, Tong-Zhi",
- title = "{Complete $N_f^2$ contributions to four-loop pure-singlet splitting functions}",
+ title = "{Complete $ {N}_f^2 $ contributions to four-loop pure-singlet splitting functions}",
eprint = "2308.07958",
archivePrefix = "arXiv",
primaryClass = "hep-ph",
reportNumber = "MSUHEP-23-024, ZU-TH 43/23",
- month = "8",
- year = "2023"
+ doi = "10.1007/JHEP01(2024)029",
+ journal = "JHEP",
+ volume = "01",
+ pages = "029",
+ year = "2024"
}
@article{Falcioni:2023tzp,
@@ -1018,8 +1028,11 @@ @article{Falcioni:2023tzp
archivePrefix = "arXiv",
primaryClass = "hep-ph",
reportNumber = "ZU-TH 62/23, DESY 23-146, Nikhef 2023-015, LTH 1353",
- month = "10",
- year = "2023"
+ doi = "10.1016/j.physletb.2023.138351",
+ journal = "Phys. Lett. B",
+ volume = "848",
+ pages = "138351",
+ year = "2024"
}
@article{Moch:2023tdj,
@@ -1029,6 +1042,34 @@ @article{Moch:2023tdj
archivePrefix = "arXiv",
primaryClass = "hep-ph",
reportNumber = "DESY-23-150, Nikhef 23-016, LTH 1354",
- month = "10",
- year = "2023"
+ doi = "10.1016/j.physletb.2024.138468",
+ journal = "Phys. Lett. B",
+ volume = "849",
+ pages = "138468",
+ year = "2024"
+}
+
+@article{Ablinger:2024xtt,
+ author = {Ablinger, J. and Behring, A. and Bl\"umlein, J. and De Freitas, A. and von Manteuffel, A. and Schneider, C. and Sch\"onwald, K.},
+ title = "{The non-first-order-factorizable contributions to the three-loop single-mass operator matrix elements $A_{Qg}^{(3)}$ and $\Delta A_{Qg}^{(3)}$}",
+ eprint = "2403.00513",
+ archivePrefix = "arXiv",
+ primaryClass = "hep-ph",
+ reportNumber = "DO--TH 23/15. DESY 24--027, RISC Report series 24--02, ZU-TH 13/24,
+ CERN-TH-2024-30, DO-TH 23/15, RISC Report series 24--02, ZU-TH 13/24,
+ CERN-TH-2024-30, DESY-24-027",
+ month = "3",
+ year = "2024",
+ journal = ""
+}
+
+@article{Falcioni:2024xyt,
+ author = "Falcioni, G. and Herzog, F. and Moch, S. and Pelloni, A. and Vogt, A.",
+ title = "{Four-loop splitting functions in QCD -- The quark-to-gluon case}",
+ eprint = "2404.09701",
+ archivePrefix = "arXiv",
+ primaryClass = "hep-ph",
+ reportNumber = "ZU-TH 20/24, DESY-24-053, LTH 1367",
+ month = "4",
+ year = "2024"
}
diff --git a/doc/source/theory/Matching.rst b/doc/source/theory/Matching.rst
index 5089c0a95..fa84cab16 100644
--- a/doc/source/theory/Matching.rst
+++ b/doc/source/theory/Matching.rst
@@ -88,11 +88,11 @@ During the matching we use :math:`a_s^{(n_f+1)}`: in fact the :math:`a_s` decoup
:math:`\ln(\mu_{h}^2/m_{h}^2)`, which are cancelled by the OME's :math:`A_{kl,H}`.
|N3LO| matrix elements have been presented in :cite:`Bierenbaum:2009mv` and following publications
-:cite:`Ablinger:2010ty,Ablinger:2014vwa,Ablinger:2014uka,Behring:2014eya,Blumlein:2017wxd,Ablinger_2014,Ablinger_2015,Ablinger:2022wbb`.
+:cite:`Ablinger:2010ty,Ablinger:2014vwa,Ablinger:2014uka,Behring:2014eya,Blumlein:2017wxd,Ablinger_2014,Ablinger_2015,Ablinger:2022wbb,Ablinger:2024xtt`.
Parts proportional to :math:`\ln(\mu_{h}^2/m_{h}^2)` are also included up to |N3LO|.
-The contribution of :math:`A_{Hg}^{(3)}` is not yet fully known analytically and has been parameterized using the first 5 known
-moments :cite:`Bierenbaum:2009mv` and the |LL| small-x contribution :cite:`Kawamura:2012cr`
+All the contributions are now known analytically. Due to the lengthy and complex expressions
+some parts of :math:`A_{Hg}^{S,(3)},A_{Hq}^{S,(3)},A_{gg}^{S,(3)},A_{qq}^{NS,(3)}` have been parameterized.
We remark that contributions of the heavy quark initiated diagrams at |NNLO| and |N3LO| have not been computed yet,
thus the elements :math:`A_{qH}^{(2)},A_{gH}^{(2)}A_{HH}^{(2)}` are not encoded in EKO despite of being present.
diff --git a/doc/source/theory/N3LO_ad.rst b/doc/source/theory/N3LO_ad.rst
index 34030f7e7..1fb13a15e 100644
--- a/doc/source/theory/N3LO_ad.rst
+++ b/doc/source/theory/N3LO_ad.rst
@@ -264,7 +264,8 @@ The other parts are approximated using some known limits:
& \gamma_{qg}(2) + \gamma_{gg}(2) = 0 \\
& \gamma_{qq}(2) + \gamma_{gq}(2) = 0 \\
- For :math:`\gamma_{qq,ps}, \gamma_{qg}` other 5 additional moments are available :cite:`Falcioni:2023luc,Falcioni:2023vqq`.
+ For :math:`\gamma_{qq,ps}, \gamma_{qg},\gamma_{gq}` other 5 additional moments are available from
+ :cite:`Falcioni:2023luc,Falcioni:2023vqq,Falcioni:2024xyt` respectively.
making the parametrization of this splitting function much more accurate.
The difference between the known moments and the known limits is parametrized
@@ -337,7 +338,7 @@ final reduced sets of candidates.
* - :math:`f_4(N)`
- :math:`\frac{1}{N^4},\ \frac{1}{N^3},\ \frac{1}{N^2},\ \frac{1}{(N+1)},\ \frac{1}{(N+2)},\ \mathcal{M}[\ln^2(1-x)],\ \mathcal{M}[\ln(1-x)]`
- Following :cite:`Moch:2023tdj` we have assumed no violation of the scaling with :math:`\gamma_{gg}`
+ Following :cite:`Moch:2023tdj,Falcioni:2024xyt` we have assumed no violation of the scaling with :math:`\gamma_{gg}`
also for the |NLL| small-x term, to help the convergence. We expect that any possible deviation can be parametrized as a shift in the |NNLL| terms
which are free to vary independently.
diff --git a/extras/lh_bench_23/.gitignore b/extras/lh_bench_23/.gitignore
index 2942ad98d..79e096ab2 100644
--- a/extras/lh_bench_23/.gitignore
+++ b/extras/lh_bench_23/.gitignore
@@ -1,3 +1,4 @@
*.tar
*.csv
*.dat
+*.tex
diff --git a/extras/lh_bench_23/cfg.py b/extras/lh_bench_23/cfg.py
index ef7c747ef..c3700687d 100644
--- a/extras/lh_bench_23/cfg.py
+++ b/extras/lh_bench_23/cfg.py
@@ -62,10 +62,12 @@ def vfns_theory(xif=1.0):
]
-def ffns_theory(xif=1.0):
+def ffns_theory(xif=1.0, pto=2):
"""Generate a VFNS theory card."""
tt = copy.deepcopy(_t_ffns)
tt["xif"] = xif
+ tt["order"] = (pto + 1, 0)
+ tt["matching_order"] = (pto, 0)
return runcards.TheoryCard.from_dict(tt)
@@ -107,9 +109,15 @@ def n3lo_theory(ad_variation, is_ffns, use_fhmruvv=False, xif=1.0):
)
vfns_operator = runcards.OperatorCard.from_dict(_o_vfns)
-_o_ffns = copy.deepcopy(_o_vfns)
-_o_ffns["mugrid"] = [(100.0, 4)]
-ffns_operator = runcards.OperatorCard.from_dict(_o_ffns)
+
+def ffns_operator(ev_method="iterate-exact"):
+ """Generate a FFNS theory card."""
+ op = copy.deepcopy(_o_vfns)
+ op["mugrid"] = [(100.0, 4)]
+ op["configs"]["evolution_method"] = ev_method
+ if ev_method == "truncated":
+ op["configs"]["ev_op_iterations"] = 1
+ return runcards.OperatorCard.from_dict(op)
# flavor rotations
diff --git a/extras/lh_bench_23/parse_to_latex.py b/extras/lh_bench_23/parse_to_latex.py
new file mode 100644
index 000000000..7f59ddc2b
--- /dev/null
+++ b/extras/lh_bench_23/parse_to_latex.py
@@ -0,0 +1,179 @@
+from cfg import here, table_dir, xgrid
+from utils import compute_n3lo_avg_err, load_n3lo_tables
+
+n3lo_table_dir = table_dir
+
+latex_tab = here / "latex_tab"
+latex_tab.mkdir(exist_ok=True)
+
+SVS = ["central", "up", "down"]
+
+MIDRULE1 = r"""
+\hline \hline
+\multicolumn{9}{||c||}{} \\[-3mm]
+\multicolumn{9}{||c||}{"""
+
+MIDRULE2 = r"""} \\
+\multicolumn{9}{||c||}{} \\[-0.3cm]
+\hline \hline
+ & & & & & & & \\[-0.3cm]
+"""
+
+BOTTOMRULE = r"""
+\hline \hline
+\end{tabular}
+\end{center}
+\end{table}
+"""
+
+VFNS_LABELS = r"""
+ \multicolumn{1}{c|} {$xu_v$} &
+ \multicolumn{1}{c|} {$xd_v$} &
+ \multicolumn{1}{c|} {$xL_-$} &
+ \multicolumn{1}{c|} {$xL_+$} &
+ \multicolumn{1}{c|} {$xs_+$} &
+ \multicolumn{1}{c|} {$xc_+$} &
+ \multicolumn{1}{c|} {$xb_+$} &
+ \multicolumn{1}{c||}{$xg$} \\[0.5mm]
+ """
+
+FFNS_LABELS = r"""
+ \multicolumn{1}{c|} {$xu_v$} &
+ \multicolumn{1}{c|} {$xd_v$} &
+ \multicolumn{1}{c|} {$xL_-$} &
+ \multicolumn{1}{c|} {$xL_+$} &
+ \multicolumn{1}{c|} {$xs_v$} &
+ \multicolumn{1}{c|} {$xs_+$} &
+ \multicolumn{1}{c|} {$xc_+$} &
+ \multicolumn{1}{c||}{$xg$}
+ """
+
+
+def insert_headrule(scheme, approx, caption):
+ """Insert the middle rule."""
+ label = r"\label{tab:" + f"n3lo_{scheme.lower()}_{approx.lower()}" + "}"
+ scheme_label = (
+ r", $\, N_{\rm f} = 3\ldots 5\,$,"
+ if scheme == "VFNS"
+ else r"$\, N_{\rm f} = 4$,"
+ )
+ HEADRULE = (
+ r"""
+ \begin{table}[htp]
+ \caption{"""
+ + caption
+ + r"""}
+ """
+ + label
+ + r"""
+ \begin{center}
+ \vspace{5mm}
+ \begin{tabular}{||c||r|r|r|r|r|r|r|r||}
+ \hline \hline
+ \multicolumn{9}{||c||}{} \\[-3mm]
+ \multicolumn{9}{||c||}{"""
+ # + r"""aN$^3$LO, """
+ + approx
+ + scheme_label
+ + r"""$\,\mu_{\rm f}^2 = 10^4 \mbox{ GeV}^2$} \\
+ \multicolumn{9}{||c||}{} \\[-0.3cm]
+ \hline \hline
+ \multicolumn{9}{||c||}{} \\[-3mm]
+ \multicolumn{1}{||c||}{$x$} &
+ """
+ )
+ HEADRULE += VFNS_LABELS if scheme == "VFNS" else FFNS_LABELS
+ HEADRULE += r"""\\[0.5mm]"""
+ return HEADRULE
+
+
+def insert_midrule(sv):
+ """Insert the middle rule."""
+ # TODO: is this mapping correct or the other way round ??
+ # xif2 = 2 -> up
+ # xif2 = 1/2 -> down
+ label = {
+ "central": r"$\mu_{\rm r}^2 = \ \mu_{\rm f}^2$",
+ "down": r"$\mu_{\rm r}^2 = 0.5 \ \mu_{\rm f}^2$",
+ "up": r"$\mu_{\rm r}^2 = 2 \ \mu_{\rm f}^2$",
+ }
+ return MIDRULE1 + label[sv] + MIDRULE2
+
+
+def format_float(values):
+ """Clean float format."""
+ values = values.replace("0000", "0")
+ values = values.replace("e-0", "$^{-")
+ values = values.replace("e-10", "$^{-10")
+ values = values.replace("e+0", "$^{+")
+ values = values.replace("&", "}$ &")
+ values = values.replace(r"\\", r"}$ \\")
+ return values
+
+
+def dump_table(scheme: str, approx: str, caption: str):
+ """Write a nice latex table."""
+ final_tab = insert_headrule(scheme, approx.replace("EKO", "NNPDF"), caption)
+ # loop on scales
+ for sv in SVS:
+ # load tables
+ dfs = load_n3lo_tables(n3lo_table_dir, scheme, sv=sv, approx=approx)
+
+ central, err = compute_n3lo_avg_err(dfs)
+
+ central.insert(0, "x", xgrid)
+ values = central.to_latex(float_format="{:.4e}".format, index=False)
+ values = "".join(e for e in values.split("\n")[4:-3])
+ final_tab += insert_midrule(sv) + format_float(values)
+
+ final_tab += BOTTOMRULE
+
+ # write
+ with open(
+ latex_tab / f"table-{scheme}-{approx.replace('EKO', 'NNPDF')}.tex",
+ "w",
+ encoding="utf-8",
+ ) as f:
+ f.writelines(final_tab)
+
+
+if __name__ == "__main__":
+ approx = "FHMRUVV"
+ scheme = "FFNS"
+ caption = r"""
+ Results for the FFNS aN$^3$LO evolution
+ for the initial conditions and the input parton distributions
+ given in Sec.~\ref{sec:toy_pdf},
+ with the FHMRUVV splitting functions approximation and the NNPDF code.
+ """
+ dump_table(scheme, approx, caption)
+
+ approx = "FHMRUVV"
+ scheme = "VFNS"
+ caption = r"""
+ Results for the VFNS aN$^3$LO evolution
+ for the initial conditions and the input parton distributions
+ given in Sec.~\ref{sec:toy_pdf},
+ with the FHMRUVV splitting functions approximation and the NNPDF code.
+ """
+ dump_table(scheme, approx, caption)
+
+ approx = "EKO"
+ scheme = "FFNS"
+ caption = r"""
+ Results for the FFNS aN$^3$LO evolution
+ for the initial conditions and the input parton distributions
+ given in Sec.~\ref{sec:toy_pdf},
+ with the NNPDF splitting functions approximation.
+ """
+ dump_table(scheme, approx, caption)
+
+ approx = "EKO"
+ scheme = "VFNS"
+ caption = r"""
+ Results for the VFNS aN$^3$LO evolution
+ for the initial conditions and the input parton distributions
+ given in Sec.~\ref{sec:toy_pdf},
+ with the NNPDF splitting functions approximation.
+ """
+ dump_table(scheme, approx, caption)
diff --git a/extras/lh_bench_23/plot_bench.py b/extras/lh_bench_23/plot_bench.py
deleted file mode 100644
index 6887e7ac3..000000000
--- a/extras/lh_bench_23/plot_bench.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from cfg import here, table_dir, xgrid
-from utils import (
- compute_n3lo_avg_err,
- compute_n3lo_nnlo_diff,
- load_n3lo_tables,
- load_nnlo_table,
- plot_diff_to_nnlo,
- plot_pdfs,
-)
-
-USE_LINX = True
-REL_DIFF = True
-SCHEME = "VFNS"
-SV = "central"
-
-plot_dir = here / "plots"
-n3lo_table_dir = table_dir # / SCHEME
-
-
-# load tables
-eko_dfs = load_n3lo_tables(n3lo_table_dir, SCHEME, approx="EKO")
-fhmv_dfs = load_n3lo_tables(n3lo_table_dir, SCHEME, approx="FHMV")
-nnlo_central = load_nnlo_table(table_dir, SCHEME, SV)
-
-# compute avg and std
-eko_res = compute_n3lo_avg_err(eko_dfs)
-fhmv_res = compute_n3lo_avg_err(fhmv_dfs)
-# eko_4mom_res = = compute_n3lo_avg_err(eko_dfs_4mom)
-
-n3lo_dfs = [
- (eko_res, "aN3LO EKO"),
- (fhmv_res, "aN3LO FHMV"),
- # (eko_4mom_res, "aN3LO EKO 4 mom"),
-]
-
-# PDFs plots
-plot_pdfs(xgrid, n3lo_dfs, nnlo_central, SCHEME, USE_LINX, plot_dir)
-
-# relative diff plots
-eko_diff = compute_n3lo_nnlo_diff(eko_res, nnlo_central, REL_DIFF)
-fhmv_diff = compute_n3lo_nnlo_diff(fhmv_res, nnlo_central, REL_DIFF)
-n3lo_dfs = [
- (eko_diff, "aN3LO EKO"),
- (fhmv_diff, "aN3LO FHMV"),
- # (eko_4mom_res, "aN3LO EKO 4 mom"),
-]
-
-# relative, absolute diff plots
-plot_diff_to_nnlo(xgrid, n3lo_dfs, SCHEME, USE_LINX, plot_dir, REL_DIFF)
diff --git a/extras/lh_bench_23/plot_bench_evol.py b/extras/lh_bench_23/plot_bench_evol.py
index 92972af14..af25f5c65 100644
--- a/extras/lh_bench_23/plot_bench_evol.py
+++ b/extras/lh_bench_23/plot_bench_evol.py
@@ -18,17 +18,21 @@
# load tables
-eko_dfs = load_n3lo_tables(n3lo_table_dir, SCHEME, approx="EKO", rotate_to_evol=True)
-fhmv_dfs = load_n3lo_tables(n3lo_table_dir, SCHEME, approx="FHMV", rotate_to_evol=True)
+eko_dfs = load_n3lo_tables(
+ n3lo_table_dir, SCHEME, sv="central", approx="EKO", rotate_to_evol=True
+)
+fhmruvv_dfs = load_n3lo_tables(
+ n3lo_table_dir, SCHEME, sv="central", approx="FHMRUVV", rotate_to_evol=True
+)
nnlo_central = load_nnlo_table(table_dir, SCHEME, SV, rotate_to_evol=True)
# compute avg and std
eko_res = compute_n3lo_avg_err(eko_dfs)
-fhmv_res = compute_n3lo_avg_err(fhmv_dfs)
+fhmruvv_res = compute_n3lo_avg_err(fhmruvv_dfs)
n3lo_dfs = [
(eko_res, "aN3LO EKO"),
- (fhmv_res, "aN3LO FHMV"),
+ (fhmruvv_res, "aN3LO FHMRUVV"),
]
# absolute plots
@@ -36,10 +40,10 @@
# relative, absolute diff plots
eko_diff = compute_n3lo_nnlo_diff(eko_res, nnlo_central, REL_DIFF)
-fhmv_diff = compute_n3lo_nnlo_diff(fhmv_res, nnlo_central, REL_DIFF)
+fhmruvv_diff = compute_n3lo_nnlo_diff(fhmruvv_res, nnlo_central, REL_DIFF)
n3lo_dfs = [
(eko_diff, "aN3LO EKO"),
- (fhmv_diff, "aN3LO FHMV"),
+ (fhmruvv_diff, "aN3LO FHMRUVV"),
]
plot_diff_to_nnlo(xgrid, n3lo_dfs, SCHEME, USE_LINX, plot_dir, REL_DIFF)
diff --git a/extras/lh_bench_23/plot_bench_msht.py b/extras/lh_bench_23/plot_bench_msht.py
index 0314082ee..f99c4c613 100644
--- a/extras/lh_bench_23/plot_bench_msht.py
+++ b/extras/lh_bench_23/plot_bench_msht.py
@@ -15,48 +15,51 @@
SV = "central"
plot_dir = here / "plots_msht"
-n3lo_table_dir = table_dir # / SCHEME
+n3lo_table_dir = table_dir
msht_table_dir = table_dir
# load tables
-eko_dfs = load_n3lo_tables(n3lo_table_dir, SCHEME, approx="EKO")
-fhmv_eko_dfs = load_n3lo_tables(n3lo_table_dir, SCHEME, approx="FHMV")
-msht_dfs = load_msht(msht_table_dir, SCHEME, approx="MSHT")
-fhmv_msht_dfs = load_msht(msht_table_dir, SCHEME, approx="FHMV")
+eko_dfs = load_n3lo_tables(n3lo_table_dir, SCHEME, SV, approx="EKO")
+
+fhmruvv_eko_dfs = load_n3lo_tables(n3lo_table_dir, SCHEME, SV, approx="FHMRUVV")
+fhmruvv_msht_dfs = load_msht(msht_table_dir, SCHEME, approx="FHMRUVV")
+
+msht_post_dfs = load_msht(msht_table_dir, SCHEME, approx="MSHTposterior")
+msht_prior_dfs = load_msht(msht_table_dir, SCHEME, approx="MSHTprior")
nnlo_central = load_nnlo_table(table_dir, SCHEME, SV)
# compute avg and std
eko_res = compute_n3lo_avg_err(eko_dfs)
-fhmv_eko_res = compute_n3lo_avg_err(fhmv_eko_dfs)
-msht_res = compute_n3lo_avg_err(msht_dfs)
-fhmv_msht_res = compute_n3lo_avg_err(fhmv_msht_dfs)
-# eko_4mom_res = = compute_n3lo_avg_err(eko_dfs_4mom)
+fhmruvv_eko_res = compute_n3lo_avg_err(fhmruvv_eko_dfs)
+fhmruvv_msht_res = compute_n3lo_avg_err(fhmruvv_msht_dfs)
+msht_post_res = compute_n3lo_avg_err(msht_post_dfs)
+msht_prior_res = compute_n3lo_avg_err(msht_prior_dfs)
-n3lo_dfs = [
- (eko_res, "EKO"),
- (fhmv_eko_res, "FHMV EKO"),
- (msht_res, "MSHT"),
- (fhmv_msht_res, "FHMV MSHT")
- # (eko_4mom_res, "aN3LO EKO 4 mom"),
-]
+# compute average of FHMRUVV
+fhmruvv_res = []
+for a, b in zip(fhmruvv_msht_res, fhmruvv_eko_res):
+ fhmruvv_res.append((a + b) / 2)
# PDFs plots
+n3lo_dfs = [
+ (fhmruvv_res, "FHMRUVV"),
+ (msht_prior_res, "MSHT (prior)"),
+ (msht_post_res, "MSHT (posterior)"),
+ (eko_res, "NNPDF"),
+]
plot_pdfs(xgrid, n3lo_dfs, nnlo_central, SCHEME, USE_LINX, plot_dir)
-# relative diff plots
+# relative, absolute diff plots
eko_diff = compute_n3lo_nnlo_diff(eko_res, nnlo_central, REL_DIFF)
-fhmv_eko_diff = compute_n3lo_nnlo_diff(fhmv_eko_res, nnlo_central, REL_DIFF)
-msht_diff = compute_n3lo_nnlo_diff(msht_res, nnlo_central, REL_DIFF)
-fhmv_msht_diff = compute_n3lo_nnlo_diff(fhmv_msht_res, nnlo_central, REL_DIFF)
+fhmruvv_diff = compute_n3lo_nnlo_diff(fhmruvv_res, nnlo_central, REL_DIFF)
+msht_prior_diff = compute_n3lo_nnlo_diff(msht_prior_res, nnlo_central, REL_DIFF)
+msht_post_diff = compute_n3lo_nnlo_diff(msht_post_res, nnlo_central, REL_DIFF)
n3lo_dfs = [
- (eko_diff, "EKO"),
- (fhmv_eko_diff, "FHMV EKO"),
- (msht_diff, "MSHT"),
- (fhmv_msht_diff, "FHMV MSHT")
- # (eko_4mom_res, "aN3LO EKO 4 mom"),
+ (fhmruvv_diff, "FHMRUVV"),
+ (msht_prior_diff, "MSHT (prior)"),
+ (msht_post_diff, "MSHT (posterior)"),
+ (eko_diff, "NNPDF"),
]
-
-# relative, absolute diff plots
plot_diff_to_nnlo(xgrid, n3lo_dfs, SCHEME, USE_LINX, plot_dir, REL_DIFF)
diff --git a/extras/lh_bench_23/plot_trn_exa.py b/extras/lh_bench_23/plot_trn_exa.py
new file mode 100644
index 000000000..7558cedab
--- /dev/null
+++ b/extras/lh_bench_23/plot_trn_exa.py
@@ -0,0 +1,64 @@
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+from cfg import table_dir, xgrid
+from utils import HERE, lha_labels
+
+from eko.io.types import EvolutionMethod
+
+plt.style.use(HERE / "plotstyle.mplstyle")
+plot_dir = HERE / "plots_trn_exa"
+
+PTOS = {1: "NLO", 2: "NNLO", 3: "N$^3$LO"}
+
+COLUMNS_TO_KEEP = ["L_m", "L_p", "g"]
+
+
+def load_table(method):
+ """Load tables."""
+ dfs = {}
+ for pto in PTOS:
+ with open(table_dir / f"table_FFNS-{pto}_{method}.csv", encoding="utf-8") as f:
+ dfs[pto] = pd.read_csv(f, index_col=0)
+ return dfs
+
+
+def plot_diff(xgrid, dfs_trn, dfs_exa):
+ cut_smallx = 0
+ cut_largex = -1
+ xgrid = xgrid[cut_smallx:cut_largex]
+
+ plot_dir.mkdir(exist_ok=True)
+
+ # loop on PDFs
+ for column in COLUMNS_TO_KEEP:
+ _, ax = plt.subplots(1, 1, figsize=(1 * 5, 1 * 3.5))
+ j = np.where(dfs_trn[1].columns == column)[0][0]
+
+ # loop on ptos
+ for pto, pto_label in PTOS.items():
+ diff = (dfs_trn[pto] - dfs_exa[pto]) / dfs_trn[pto] * 100
+
+ ax.plot(xgrid, diff.values[cut_smallx:cut_largex, j], label=pto_label)
+ ax.hlines(
+ 0,
+ xgrid.min() - xgrid.min() / 3,
+ 1,
+ linestyles="dotted",
+ color="black",
+ linewidth=0.5,
+ )
+ ax.set_xscale("log")
+ ax.set_xlabel("$x$")
+ ax.set_ylabel(f'${lha_labels("FFNS")[j]}$')
+ ax.set_xlim(xgrid.min() - xgrid.min() / 3, 1)
+
+ plt.legend()
+ plt.tight_layout()
+ plt.savefig(f"{plot_dir}/diff_trn_exa_{column}.pdf")
+
+
+if __name__ == "__main__":
+ dfs_trn = load_table(EvolutionMethod.TRUNCATED.value)
+ dfs_exa = load_table(EvolutionMethod.ITERATE_EXACT.value)
+ plot_diff(xgrid, dfs_trn, dfs_exa)
diff --git a/extras/lh_bench_23/run-n3lo.py b/extras/lh_bench_23/run-n3lo.py
index 4ee00d520..8098f28dc 100644
--- a/extras/lh_bench_23/run-n3lo.py
+++ b/extras/lh_bench_23/run-n3lo.py
@@ -76,7 +76,7 @@
use_fhmruvv=args.use_fhmruvv,
xif=xif,
)
- o = ffns_operator
+ o = ffns_operator()
tab = 14
lab = ffns_labels
rot = ffns_rotate_to_LHA
diff --git a/extras/lh_bench_23/run-nnlo.py b/extras/lh_bench_23/run-nnlo.py
index 85ab2f984..9f5c681cc 100644
--- a/extras/lh_bench_23/run-nnlo.py
+++ b/extras/lh_bench_23/run-nnlo.py
@@ -3,7 +3,6 @@
import pathlib
import sys
-import numpy as np
import pandas as pd
import yaml
from banana import toy
@@ -63,7 +62,7 @@
if args.scheme == "FFNS":
scheme = "FFNS"
t = ffns_theory(xif)
- o = ffns_operator
+ o = ffns_operator()
tab = 14
lab = ffns_labels
rot = ffns_rotate_to_LHA
diff --git a/extras/lh_bench_23/run-trn_exa.py b/extras/lh_bench_23/run-trn_exa.py
new file mode 100644
index 000000000..97d1a1cf5
--- /dev/null
+++ b/extras/lh_bench_23/run-trn_exa.py
@@ -0,0 +1,72 @@
+import logging
+import pathlib
+import sys
+
+import pandas as pd
+from banana import toy
+from cfg import (
+ ffns_labels,
+ ffns_operator,
+ ffns_rotate_to_LHA,
+ ffns_theory,
+ n3lo_theory,
+ table_dir,
+ xgrid,
+)
+
+import eko
+from eko.io.types import EvolutionMethod
+from eko.runner.managed import solve
+from ekobox import apply
+
+stdout_log = logging.StreamHandler(sys.stdout)
+logger = logging.getLogger("eko")
+logger.handlers = []
+logger.setLevel(logging.INFO)
+logger.addHandler(stdout_log)
+
+
+def compute(op_card, th_card):
+ rot = ffns_rotate_to_LHA
+ lab = ffns_labels
+
+ method = op_card.configs.evolution_method.value
+ pto = th_card.order[0] - 1
+ path = pathlib.Path(f"ekos/FFNS-{pto}_{method}.tar")
+ path.unlink(missing_ok=True)
+
+ solve(th_card, op_card, path)
+
+ # apply PDF
+ out = {}
+ with eko.EKO.read(path) as eko_:
+ pdf = apply.apply_pdf_flavor(eko_, toy.mkPDF("ToyLH", 0), xgrid, rot, lab)
+ for lab, f in list(pdf.values())[0]["pdfs"].items():
+ out[lab] = xgrid * f
+
+ # display result
+ pd.set_option("display.float_format", "{:.4e}".format)
+ me = pd.DataFrame(out)
+ print("EKO")
+ print(me)
+ # dump to file
+ table_dir.mkdir(exist_ok=True)
+ me.to_csv(f"{table_dir}/table_FFNS-{pto}_{method}.csv")
+
+
+if __name__ == "__main__":
+ # loop on ev methods
+ for ev_method in [EvolutionMethod.TRUNCATED, EvolutionMethod.ITERATE_EXACT]:
+ op_card = ffns_operator(ev_method=ev_method.value)
+ # loop on pto
+ for pto in [1, 2, 3]:
+ if pto == 3:
+ th_card = n3lo_theory(
+ ad_variation=(0, 0, 0, 0, 0, 0, 0),
+ is_ffns=True,
+ use_fhmruvv=True,
+ xif=1.0,
+ )
+ else:
+ th_card = ffns_theory(xif=1.0, pto=pto)
+ compute(op_card, th_card)
diff --git a/extras/lh_bench_23/run_fhmv.sh b/extras/lh_bench_23/run_fhmruvv.sh
similarity index 76%
rename from extras/lh_bench_23/run_fhmv.sh
rename to extras/lh_bench_23/run_fhmruvv.sh
index 45807ea4d..a9c572ea8 100755
--- a/extras/lh_bench_23/run_fhmv.sh
+++ b/extras/lh_bench_23/run_fhmruvv.sh
@@ -1,12 +1,12 @@
#!/bin/bash
-SCHEME="FFNS"
-SV="central"
+SCHEME="VFNS"
+SV="up"
AD_VAR=(0 0 0 0 0 0 0)
# run the central
-python run-n3lo.py $SCHEME $SV "${AD_VAR[@]}" "--use_fhmv"
+python run-n3lo.py $SCHEME $SV "${AD_VAR[@]}" "--use_fhmruvv"
# loop on gammas
for I in {0..6}
@@ -15,7 +15,7 @@ for I in {0..6}
for VAR in {1..2}
do
AD_VAR[$I]=$VAR
- python run-n3lo.py $SCHEME $SV "${AD_VAR[@]}" "--use_fhmv"
+ python run-n3lo.py $SCHEME $SV "${AD_VAR[@]}" "--use_fhmruvv"
AD_VAR[$I]=0
done
done
diff --git a/extras/lh_bench_23/tables/EKO.zip b/extras/lh_bench_23/tables/EKO.zip
new file mode 100644
index 0000000000000000000000000000000000000000..66a797e7af18ff15c7a337b8d79e08d3594a6acf
GIT binary patch
literal 742394
zcmd431yo(xmZ*(eaDuyQa3{Ds1c%`6?(Q1ggS&fx;O_43?(XtK-M(Gb{i^y_-M`;>
z1!J?%8RIZnYp%JzX=@`R4h-@U;Qb3)YFPP~7yt1C1ON{}P+XcuK^_JGTqMWD;GdVh
z(+2=RkZV8yfM0%7<`)SN01*F0LiU>!*f$B9nv%Ty(sIf))S7R?{);S`WnY8;CQDZf
zS=UfYN9#;dLh%evF-XP>C=v~r6dn`M8Xw;GE)+coC>$T3SB6BS7aDIzNUkFY9#wK=
zbXuH>tWR{Bl5BuRd{}%)TuNU{PlwfE_(J^E`PKzMMjRXh{e%-s@oh}+O^sBUwbmvXjH-8$wItAZjeV`gW6iRh7MF?&9h
zC}e*0Y+P(gACuIrd3A#ne=QcW|8}z%DYZvcY8j>iF6%jxK)?GFqlsl)y-e>gu;22M
z&DZ#Hen!;q+Kt?{Wp;(1MK~eeo-)2>=vECuRl~T}oEDlPu;9_x514fvLEn~!6ea_cixdSl8Xfu!4>JI+S&>Jb_OdAs1U&tWQv(qQOJneP*eAhET`-Qdx
zCcA=96Wj-3s#-B`w%gF_hz6)YAz}jtV@8bC3(4FAK`T+^9PoW9_dk-mTD@D}+k*Z6
zQaZgYGTLvA@7<26HGh9q|B@M2V!oLnGS3iz+&W&H7cgsp9|&&}GOhaF`%u6kJ_NO~
zGQIur_W}H4Xm2k7jD&x+viJ8W|6yfP3=-0sLL!o16ciOTg@mN!{;}5tg^dkF-mdri
ze>1mVr~6x(?>3G1e@^%R5dVuY|5N&3n)d(x?t9k*>2G%5`+LOS>g8?8exs9yH(j*6
zUGMil)X8tc7rT;(H%e!AWm;BW;&4K(%=`Vu30+NE%=6b&j81>CX#q5~=;->zaI{f9^Ef#5^
zn-3`=Y63ydQmEBw0A2RRoPlR@1?%h+0OQOiE8$gai04ILJYvoAj}^6Uc;iL5qE4f@
z*u14REtT+Qc3JAIp6_=KU@KUPE+>roWKr!2k5HQWklA}YrY3mx<%3b6vYR6pyvyk_
zo;G1g;9!-37DY9MxdT_ko2}PWsq~2Z$*x1_GIoooGKE6%ctj=1!2Fg6x%AEYPT3d+
zvIN!87X<*6`N)eybbf#0GN`M2B$7((A>!ou+KR~4C-M)OG?Q!zIZ@tv
zg%JF39Agu^P)4Ja2-c1Maa*eP2?aCc_@ah(Sf{zAaRjBFDa@VG&uH8}geP3kP_t$E
z)y*j*IWBCQe|nz)gaxdK30WCybfq@5~M)O+q`Bm+nAtRLHa
z#Pr-lXO%+|VDjMaiun9h8OQs{^c`T8t?*}DHiC47yxG#nVIsau0$ICb%;a0YGM@Y}
z&5tm6*xOBEs14=BsuaFZ%aTNq+l2ao1P(5?Rq$~dNpSrs%yvVUWD>u2G8@S9Ek69D!>(yqh;O`~h(T$N+3-{c|}l5(X&3%ZE{uE%0a~
z4#^LQNdq-nE)TpAxf|N1DKYnRatOVZNmhY*yvYqfW2(Bl@C%kaF0INwg*r#|tXVR#
z+AQ9RUy!Q9NZ<@S`E!rp-u=S_Of9fK5CFit75~ce-`e;49{nBH_4}s&|9Ee{Gn?>V
zG5l9vRlRZQowo1y|F%JY)x~e+{VRt5C#(J``Twy#-dXiqzbpq10KoNIpY~sTy`8p>
znf|-i|DXz_{mzkL74C#iL99sbj6zkBZAi3tSb#KQ@Np#^T)#qA01Nl(`}hB|PD
z2E4qz-91_*jLmot;+0kOXbJjUUS4iSFTvASTb}!O%XdjvJuhAzxwthR&$sEhD%~v|
zje}1$InroUIwHsszA=h6uB$BO^Vx-}g&__J(<^o9eJkiqyV)&Pl2j^I4iadNsij_y
zVVBimYD|mYj-j6?k~s7zD57+2q8Jp&(YQGJ!E2~7vN|FqyM2O*vKFhF2ny=DQ7K3i
ztkz&`xKpaZyE50{Iyph@+_D16em7rT*z1Zj$do;vrm>mjOi=a{`D>Rh86rjOeCZ@J
z-z`nE6DvOf9oZl!3(*K?slXb^@DVUpTQ*}f-SD@YL>f#cj142dIq*zX3u^iC?SYjk)?*uocPp
zOfka-2)+Hh9X8njLHsR5fJSO51O;BlnjLfiEl$ENDoIhBGNOtnv`0TzCuI7gu%HV+
zZ|75kq_0M~KaoX|_C;AVO^G4D3La<1DMq%_RR63Av{WXN_<40g2s(y&p@398m|Z;d
z@k1Vj;-XYn_L+U+#~JU)UMtf6@mUvX02&eDZpb0ao#CHsk=+@{<7>4fdynz{b(0)r
zVHqBBz^L75cpSa@59=4mPK(m>P5
z{H6y!pB@Hn<{|#ChJ4J?3{JGpC3o?sXz9#_-7Ob4w**>AReNVXsJz}1YR=Gxj=^-9
zJC2?$q~^+;BU@=I!*`=-i`P_HnIDWiPMhMV^iEVN!Q8JKI<4e&t?C10RG%{=GlgD<
z`xwZNl=fDK585%hwDvBLi;YuA<}{ARN1-o2dgXOe6Yr-$5^0PsFC4jk%uiZL-=tC|
zW-}^h;(N7>xMuo{yQCeg9z`>s{=tgvd;iU*F{BM4zeO1QBV&OjZ~Jf}&NfJxqDS1ZYsc(=t}rlHicNdL*HG4bN@sCPu3bo6!&qMeAZ5>
zy^x)5+b+)^-aW%d$9u(Q!N`$Yfm((_ZCXn2=Abil2ByqzBRsN)(e6+k5)AD@*6S3ZHgecjjaKu@bO
zPtWJmhtyG;)|c+x(?i^s*TB}+^ZQG}*(Gzfq&U*v?%fF7;K9?e=nDo}MY=;~*qX+rpin
z#qyA684?~>(P{z&NwjLgqadm21)>p#Hdb*t(o=IXjp*3WmT
z`q)HDKS$jX>Y9A9O>CP88CZyt{alciNlMxm
zPK6@h#l|8N+-EFB7cQa!b}}wSxtTodwcv@D1FmsL3~KRW&}Leb9~qx)Kp_4kmd|%P
zYcI7-yk$=;Vd7@CSd^OtDBUnpnP?cF%>0Fn*6^!l6M6!vv;=#SUGQUDGLdm4!UMHU
ztU^turC;iNg9W}Mv|p1e+WJ7_5U5NB+0^4#LdRpIaTEEHr@I;5kdn<2-=HiL-JO}<
zwI`9#M^sbLVgHXHQ0uvqo6woTta1dp<$TOWNmWNN?wK_FV0MB`$*KZwxg%6o`kgBY
zZXq73GVTi9UBxl3L7wwsb*gS
z9tlGb979$}(DYf-F%@)pw1k%*xq$nrfjXUbx$L3~c~=dhL$IvfKhXE0d+G5KS=CSQ
zf#&1YJ(q+(R|lhPxQXx+{4}EbRI{#evv)Adg2@Q$%|=~(I+qf$hB~y*T+1Zd$ODnB
zMF@V@+oR@%ne(lVd`ROw&KErk%NAuTG>}rF56~yk$g2PTs>AqIY<8vbJm^(x$h`_(B6hzIUDn
zIb*u*%-sA2%Asvp%l8GF%X_s2p4r
zr!3MO=F~X(O{Y%lOWcZN~tlzCY%8xcr`nTxQNgK|9sSN<`
z=KfihqU}Sb>X8I}UrTB=i>mjP=>&7iy+N;MF`>j)I5p@g$!A&}D
zKjq0(JS<046nUJqlLhuALV_H%E^iI>x1XdgPPa&iE3*;H!o&&`i~?Ubf3AACCWkeK
z6y|A~J38kJam9tJQy@9+L*{}d40F5mex~B_d9BH|aidVP5bn8YgYfOWb)4-@n^w|B(?ppueRb}seZ)gxJa*)A
z#{+w{2FEyD+|-|prZ={g5&S*sVP4
zI!f1N9_F~Fwc7AAvjETx@B}bJPBut$>WOKg^$y16yvF{-$Pm3*%@W5Kg4|)hCc*rx
zR8cPEy^o(6P3sgSCD1W=zVGerA_HxFFt6GC8EerRV{j7fit^18q72i(nJo#WHb5dD
zh+&;9`_8O!^fOtQ3$HoaS(wXu{neNYS4kKq1PZpfYs1
zHJJ11#pcpTAh`-?tb_C)mrvlSDuJ=VYWWwDdbykJ5#4tOOp%+Fyo;Z1$+$@Kxl#tf
zn&G)OppZ|6AxogVg+pi&S=AEoL|E6cZ3ErZTgm4H=Ca3FT&f(+Sy#T|)0rC*Nh_xK
zc8GY7dzq#?JLS>`oGIX#UXYiUgesVyGV7AN6VI@gRIU4@(|BY+2NWPDp*W&YlvE{7
zBun}WXz;gkoE#|_FLLR%;N)PG(A+m>T`RnhwfKeO6l>3<=Q9kgkc|Nrnx34(JGNVC
z(0&v?KWm=xcgPd7q#v!hC}U!7Lez%4M|vJtt30K4)QdoiK(w^!QtZKZ<5pfyTea+E
zKWJ0gsjhrE9=V0*7*!s11L2}rb;|gl4~okSyw7}Fpxa4!1lFZzX`y8c_gVp$4Z7zy
z??z>Y#}R7i9&~`~<>qg<-%WmGUVC(go@JoL@6dg@tX<5#+O*z?Ksg2kHg$Axw{v5d
zRB<$2e|>X#{|iQ?3j@{q-Z1(OkN*;pSmz%4Ha4W6PccHhdvotNQ8<&nN
zo*p9#j;6|a$$tLdo}bvQ_pwQvN+$At`-PBwKtTAb=8+ps_1U2oqO
zKylj_Nwq2rT72L{Kun_`SK3civ{|GJJU)dq$ztAqLmoV7v?`DS8vwY&1zPKtVvhhN7-n4tOKVAwN**K;OafBWN=N
z+}+MKF^|&(1_^6TiXm4&W(!r0ByV6pO>pBa&iLo+GuuLnrnE`+&1tv$IR})%Jt(@_
z6uWD~lxNSd;@t1R8ywjQ@eG%?sZH*Bbl%`|6Q_gq9~QcyCEGp>A94Xgs3u!k?kKJN
zc=87QNa544v$O}SJO{Fo&oxTM2;7WysG(>xI;0LPNQD9IbOBl`Lq!NxkYEQU8Aj3_
ziY+wJGD}-=7?flzLM6OJX(mX_mKwH#Vt~3GPtnr3%y@YiL