diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 67e901972..dfd0862f5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -32,7 +32,7 @@ repos: - id: isort args: ["--profile", "black"] - repo: https://github.com/asottile/pyupgrade - rev: v3.16.0 + rev: v3.17.0 hooks: - id: pyupgrade - repo: https://github.com/pycqa/pydocstyle @@ -43,6 +43,13 @@ repos: args: ["--add-ignore=D107,D105"] additional_dependencies: - toml + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.4.1 + hooks: + - id: mypy + additional_dependencies: [types-PyYAML] + pass_filenames: false + args: ["--ignore-missing-imports", "src/"] - repo: local hooks: - id: fmt @@ -53,6 +60,6 @@ repos: files: ^crates/.*\.rs$ args: [] - repo: https://github.com/pre-commit/pre-commit - rev: v3.7.1 + rev: v3.8.0 hooks: - id: validate_manifest diff --git a/benchmarks/eko/benchmark_alphaem.py b/benchmarks/eko/benchmark_alphaem.py index 16e56ac4a..9cc05536c 100644 --- a/benchmarks/eko/benchmark_alphaem.py +++ b/benchmarks/eko/benchmark_alphaem.py @@ -35,9 +35,7 @@ def test_alphaQED_high(self): dict( alphas=0.118, alphaem=7.7553e-03, - scale=91.2, - num_flavs_ref=5, - max_num_flavs=5, + ref=(91.2, 5), em_running=True, ) ) @@ -75,9 +73,7 @@ def test_alphaQED_low(self): dict( alphas=0.118, alphaem=7.7553e-03, - scale=91.2, - num_flavs_ref=5, - max_num_flavs=5, + ref=(91.2, 5), em_running=True, ) ) @@ -124,9 +120,7 @@ def test_validphys(self): dict( alphas=0.118, alphaem=7.7553e-03, - scale=91.2, - num_flavs_ref=5, - max_num_flavs=5, + ref=(91.2, 5), em_running=True, ) ) diff --git a/benchmarks/eko/benchmark_evol_to_unity.py b/benchmarks/eko/benchmark_evol_to_unity.py index 41bc4b4d0..c466d00d8 100644 --- a/benchmarks/eko/benchmark_evol_to_unity.py +++ b/benchmarks/eko/benchmark_evol_to_unity.py @@ -19,16 +19,12 @@ def update_cards(theory: TheoryCard, operator: OperatorCard): theory.couplings = CouplingsInfo( alphas=0.35, alphaem=0.007496, - scale=float(np.sqrt(2)), - max_num_flavs=6, - num_flavs_ref=None, + ref=(float(np.sqrt(2)), 4), ) - theory.heavy.num_flavs_init = 4 - theory.heavy.intrinsic_flavors = [4, 5] theory.heavy.masses.c.value = 1.0 theory.heavy.masses.b.value = 4.75 theory.heavy.masses.t.value = 173.0 - operator.mu0 = float(np.sqrt(2)) + operator.init = (float(np.sqrt(2)), 4) operator.mugrid = [(10, 5)] operator.xgrid = XGrid(np.linspace(1e-1, 1, 30)) operator.configs.interpolation_polynomial_degree = 1 diff --git a/benchmarks/eko/benchmark_inverse_matching.py b/benchmarks/eko/benchmark_inverse_matching.py index d9c16b0b2..d63b776f0 100644 --- a/benchmarks/eko/benchmark_inverse_matching.py +++ b/benchmarks/eko/benchmark_inverse_matching.py @@ -20,14 +20,9 @@ couplings=dict( alphas=0.118, alphaem=0.007496252, - scale=91.2, - num_flavs_ref=5, - max_num_flavs=6, + ref=(91.2, 5), ), heavy=dict( - num_flavs_init=4, - num_flavs_max_pdf=6, - intrinsic_flavors=[], masses=[ReferenceRunning([mq, np.nan]) for mq in (MC, 4.92, 172.5)], masses_scheme="POLE", matching_ratios=[1.0, 1.0, np.inf], @@ -40,7 +35,7 @@ # operator settings op_raw = dict( - mu0=1.65, + init=(1.65, 4), xgrid=[0.0001, 0.001, 0.01, 0.1, 1], mugrid=[(MC, 3), (MC, 4)], configs=dict( diff --git a/benchmarks/eko/benchmark_msbar_evolution.py b/benchmarks/eko/benchmark_msbar_evolution.py index b9e3bd252..49dc0e952 100644 --- a/benchmarks/eko/benchmark_msbar_evolution.py +++ b/benchmarks/eko/benchmark_msbar_evolution.py @@ -21,9 +21,8 @@ def update_theory(theory: TheoryCard): theory.order = (3, 0) - theory.couplings.scale = 91 + theory.couplings.ref = (91, 5) theory.couplings.alphaem = 0.007496 - theory.couplings.num_flavs_ref = 5 theory.heavy.masses_scheme = QuarkMassScheme.MSBAR theory.heavy.masses.c = QuarkMassRef([1.5, 18]) theory.heavy.masses.b = QuarkMassRef([4.1, 20]) @@ -150,7 +149,7 @@ def benchmark_APFEL_msbar_evolution( apfel.SetTheory("QCD") apfel.SetPerturbativeOrder(order - 1) apfel.SetAlphaEvolution(method) - apfel.SetAlphaQCDRef(coupl.alphas, coupl.scale) + apfel.SetAlphaQCDRef(coupl.alphas, coupl.ref[0]) apfel.SetVFNS() apfel.SetMSbarMasses( qmasses.c.value, qmasses.b.value, qmasses.t.value @@ -218,7 +217,7 @@ def benchmark_APFEL_msbar_solution( apfel.SetTheory("QCD") apfel.SetPerturbativeOrder(order - 1) apfel.SetAlphaEvolution("exact") - apfel.SetAlphaQCDRef(coupl.alphas, coupl.scale) + apfel.SetAlphaQCDRef(coupl.alphas, coupl.ref[0]) apfel.SetVFNS() apfel.SetMSbarMasses(qmasses.c.value, qmasses.b.value, qmasses.t.value) apfel.SetMassScaleReference( diff --git a/benchmarks/eko/benchmark_strong_coupling.py b/benchmarks/eko/benchmark_strong_coupling.py index d13a264bd..d60bd28b0 100644 --- a/benchmarks/eko/benchmark_strong_coupling.py +++ b/benchmarks/eko/benchmark_strong_coupling.py @@ -8,7 +8,7 @@ from eko.couplings import Couplings from eko.io.runcards import TheoryCard from eko.quantities.couplings import CouplingEvolutionMethod, CouplingsInfo -from eko.quantities.heavy_quarks import QuarkMassScheme +from eko.quantities.heavy_quarks import FlavorsNumber, QuarkMassScheme # try to load LHAPDF - if not available, we'll use the cached values try: @@ -35,16 +35,11 @@ use_PEGASUS = False -def ref_couplings( - ref_values, - ref_scale: float, -) -> CouplingsInfo: +def ref_couplings(ref_values, ref_scale: float, ref_nf: FlavorsNumber) -> CouplingsInfo: return CouplingsInfo( alphas=ref_values[0], alphaem=ref_values[1], - scale=ref_scale, - max_num_flavs=6, - num_flavs_ref=None, + ref=(ref_scale, ref_nf), ) @@ -54,11 +49,11 @@ def test_a_s(self): """Tests the value of alpha_s (for now only at LO) for a given set of parameters """ - # TODO @JCM: we need a source for this! + # source: JCM known_val = 0.0091807954 ref_mu2 = 90 ask_q2 = 125 - ref = ref_couplings([0.1181, 0.007496], np.sqrt(ref_mu2)) + ref = ref_couplings([0.1181, 0.007496], np.sqrt(ref_mu2), 5) as_FFNS_LO = Couplings( couplings=ref, order=(1, 0), @@ -80,7 +75,7 @@ def benchmark_LHA_paper(self): # LO - FFNS # note that the LO-FFNS value reported in :cite:`Giele:2002hx` # was corrected in :cite:`Dittmar:2005ed` - coupling_ref = ref_couplings([0.35, 0.007496], np.sqrt(2)) + coupling_ref = ref_couplings([0.35, 0.007496], np.sqrt(2), 4) as_FFNS_LO = Couplings( couplings=coupling_ref, order=(1, 0), @@ -141,7 +136,7 @@ def benchmark_APFEL_ffns(self): threshold_holder = matchings.Atlas.ffns(nf, 0.0) for order in [1, 2, 3]: as_FFNS = Couplings( - couplings=ref_couplings(coupling_ref, scale_ref), + couplings=ref_couplings(coupling_ref, scale_ref, nf), order=(order, 0), method=CouplingEvolutionMethod.EXPANDED, masses=threshold_holder.walls[1:-1], @@ -212,8 +207,7 @@ def benchmark_pegasus_ffns(self): } # collect my values threshold_holder = matchings.Atlas.ffns(nf, 0.0) - couplings = ref_couplings(coupling_ref, scale_ref) - couplings.max_num_flavs = 4 + couplings = ref_couplings(coupling_ref, scale_ref, nf) for order in [1, 2, 3, 4]: as_FFNS = Couplings( couplings=couplings, @@ -259,6 +253,7 @@ def benchmark_APFEL_vfns(self): Q2s = [1, 2**2, 3**2, 90**2, 100**2] coupling_ref = np.array([0.118, 0.007496]) scale_ref = 91.0 + nf_ref = 5 threshold_list = np.power([2, 4, 175], 2) apfel_vals_dict = { 1: np.array( @@ -292,7 +287,7 @@ def benchmark_APFEL_vfns(self): # collect my values for order in [1, 2, 3]: as_VFNS = Couplings( - couplings=ref_couplings(coupling_ref, scale_ref), + couplings=ref_couplings(coupling_ref, scale_ref, nf_ref), order=(order, 0), method=CouplingEvolutionMethod.EXPANDED, masses=threshold_list, @@ -339,8 +334,9 @@ def benchmark_APFEL_vfns_fact_to_ren(self): ] coupling_ref = np.array([0.118, 0.007496]) scale_ref = 91.0 + nf_refs = (5, 6) fact_to_ren_lin_list = [0.567, 2.34] - threshold_list = np.power([2, 2 * 4, 2 * 92], 2) + masses_list = np.power([2, 2 * 4, 2 * 92], 2) apfel_vals_dict_list = [ { 1: np.array( @@ -431,16 +427,16 @@ def benchmark_APFEL_vfns_fact_to_ren(self): ), }, ] - for fact_to_ren_lin, apfel_vals_dict in zip( - fact_to_ren_lin_list, apfel_vals_dict_list + for fact_to_ren_lin, apfel_vals_dict, nf_ref in zip( + fact_to_ren_lin_list, apfel_vals_dict_list, nf_refs ): # collect my values for order in [1, 2, 3]: as_VFNS = Couplings( - couplings=ref_couplings(coupling_ref, scale_ref), + couplings=ref_couplings(coupling_ref, scale_ref, nf_ref), order=(order, 0), method=CouplingEvolutionMethod.EXACT, - masses=threshold_list.tolist(), + masses=masses_list.tolist(), hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=np.array([1.0, 1.0, 1.0]) / fact_to_ren_lin**2, ) @@ -457,7 +453,7 @@ def benchmark_APFEL_vfns_fact_to_ren(self): apfel.SetAlphaEvolution("exact") apfel.SetAlphaQCDRef(coupling_ref[0], scale_ref) apfel.SetVFNS() - apfel.SetPoleMasses(*np.sqrt(threshold_list)) + apfel.SetPoleMasses(*np.sqrt(masses_list)) apfel.SetRenFacRatio(1.0 / fact_to_ren_lin) apfel.InitializeAPFEL() # collect a_s @@ -468,12 +464,18 @@ def benchmark_APFEL_vfns_fact_to_ren(self): ) np.testing.assert_allclose(apfel_vals, np.array(apfel_vals_cur)) # check myself to APFEL - np.testing.assert_allclose(apfel_vals, np.array(my_vals), rtol=2.5e-5) + np.testing.assert_allclose( + apfel_vals, + np.array(my_vals), + rtol=2.5e-5, + err_msg=f"{order=},{fact_to_ren_lin=}", + ) def benchmark_APFEL_vfns_threshold(self): Q2s = np.power([30, 96, 150], 2) coupling_ref = np.array([0.118, 0.007496]) scale_ref = 91.0 + nf_ref = 4 threshold_list = np.power([30, 95, 240], 2) thresholds_ratios = [2.34**2, 1.0**2, 0.5**2] apfel_vals_dict = { @@ -487,7 +489,7 @@ def benchmark_APFEL_vfns_threshold(self): # collect my values for order in [2, 3]: as_VFNS = Couplings( - couplings=ref_couplings(coupling_ref, scale_ref), + couplings=ref_couplings(coupling_ref, scale_ref, nf_ref), order=(order, 0), method=CouplingEvolutionMethod.EXPANDED, masses=threshold_list.tolist(), @@ -496,7 +498,6 @@ def benchmark_APFEL_vfns_threshold(self): ) my_vals = [] for Q2 in Q2s: - print(Q2) my_vals.append(as_VFNS.a(Q2)[0]) # get APFEL numbers - if available else use cache apfel_vals = apfel_vals_dict[order] @@ -516,7 +517,6 @@ def benchmark_APFEL_vfns_threshold(self): apfel_vals_cur = [] for Q2 in Q2s: apfel_vals_cur.append(apfel.AlphaQCD(np.sqrt(Q2)) / (4.0 * np.pi)) - print(apfel_vals_cur) np.testing.assert_allclose(apfel_vals, np.array(apfel_vals_cur)) # check myself to APFEL np.testing.assert_allclose(apfel_vals, np.array(my_vals)) @@ -525,6 +525,7 @@ def benchmark_APFEL_vfns_msbar(self): Q2s = np.power([3, 96, 150], 2) coupling_ref = np.array([0.118, 0.007496]) scale_ref = 91.0 + nf_ref = 5 Q2m = np.power([2.0, 4.0, 175], 2) m2s = np.power((1.4, 4.0, 175), 2) apfel_vals_dict = { @@ -538,7 +539,7 @@ def benchmark_APFEL_vfns_msbar(self): # collect my values for order in [2, 3]: as_VFNS = Couplings( - couplings=ref_couplings(coupling_ref, scale_ref), + couplings=ref_couplings(coupling_ref, scale_ref, nf_ref), order=(order, 0), method=CouplingEvolutionMethod.EXPANDED, masses=m2s.tolist(), @@ -586,7 +587,7 @@ def benchmark_lhapdf_ffns_lo(self): # collect my values threshold_holder = matchings.Atlas.ffns(nf, 0.0) as_FFNS_LO = Couplings( - couplings=ref_couplings(coupling_ref, scale_ref), + couplings=ref_couplings(coupling_ref, scale_ref, nf), order=(1, 0), method=CouplingEvolutionMethod.EXACT, masses=threshold_holder.walls[1:-1], @@ -629,8 +630,9 @@ def benchmark_apfel_exact(self): Q2s = [1e1, 1e2, 1e3, 1e4] coupling_ref = np.array([0.118, 0.007496]) scale_ref = 90 + nf = 3 # collect my values - threshold_holder = matchings.Atlas.ffns(3, 0.0) + threshold_holder = matchings.Atlas.ffns(nf, 0.0) # LHAPDF cache apfel_vals_dict = { 1: np.array( @@ -660,7 +662,7 @@ def benchmark_apfel_exact(self): } for order in range(1, 3 + 1): sc = Couplings( - couplings=ref_couplings(coupling_ref, scale_ref), + couplings=ref_couplings(coupling_ref, scale_ref, nf), order=(order, 0), method=CouplingEvolutionMethod.EXACT, masses=threshold_holder.walls[1:-1], @@ -696,8 +698,9 @@ def benchmark_lhapdf_exact(self): Q2s = [1e1, 1e2, 1e3, 1e4] coupling_ref = np.array([0.118, 0.007496]) scale_ref = 90 + nf = 3 # collect my values - threshold_holder = matchings.Atlas.ffns(3, 0.0) + threshold_holder = matchings.Atlas.ffns(nf, 0.0) # LHAPDF cache lhapdf_vals_dict = { 1: np.array( @@ -735,7 +738,7 @@ def benchmark_lhapdf_exact(self): } for order in range(1, 4 + 1): sc = Couplings( - couplings=ref_couplings(coupling_ref, scale_ref), + couplings=ref_couplings(coupling_ref, scale_ref, nf), order=(order, 0), method=CouplingEvolutionMethod.EXACT, masses=threshold_holder.walls[1:-1], @@ -766,6 +769,7 @@ def benchmark_lhapdf_zmvfns_lo(self): Q2s = [1, 1e1, 1e2, 1e3, 1e4] coupling_ref = np.array([0.118, 0.007496]) scale_ref = 900 + nf_ref = 5 m2c = 2.0 m2b = 25.0 m2t = 1500.0 @@ -785,7 +789,7 @@ def benchmark_lhapdf_zmvfns_lo(self): # collect my values as_VFNS_LO = Couplings( - couplings=ref_couplings(coupling_ref, np.sqrt(scale_ref)), + couplings=ref_couplings(coupling_ref, np.sqrt(scale_ref), nf_ref), order=(1, 0), method=CouplingEvolutionMethod.EXACT, masses=threshold_list, @@ -836,10 +840,8 @@ def benchmark_APFEL_fact_to_ren_lha_settings(self, theory_card: TheoryCard): theory = theory_card theory.order = (3, 0) theory.couplings.alphas = 0.35 - theory.couplings.scale = float(np.sqrt(2)) theory.couplings.alphaem = 0.007496 - theory.couplings.num_flavs_ref = 4 - theory.heavy.num_flavs_init = 3 + theory.couplings.ref = (float(np.sqrt(2)), 4) theory.xif = np.sqrt(1.0 / 2.0) theory.heavy.masses.c.value = np.sqrt(2.0) theory.heavy.masses.b.value = 4.5 @@ -881,7 +883,7 @@ def benchmark_APFEL_fact_to_ren_lha_settings(self, theory_card: TheoryCard): apfel.SetTheory("QCD") apfel.SetPerturbativeOrder(theory.order[0] - 1) apfel.SetAlphaEvolution("exact") - apfel.SetAlphaQCDRef(theory.couplings.alphas, theory.couplings.scale) + apfel.SetAlphaQCDRef(theory.couplings.alphas, theory.couplings.ref[0]) apfel.SetVFNS() apfel.SetPoleMasses(qmasses.c.value, qmasses.b.value, qmasses.t.value) apfel.SetMassMatchingScales( diff --git a/benchmarks/ekobox/benchmark_evol_pdf.py b/benchmarks/ekobox/benchmark_evol_pdf.py index 3a9dc0f4a..a0ddcaac4 100644 --- a/benchmarks/ekobox/benchmark_evol_pdf.py +++ b/benchmarks/ekobox/benchmark_evol_pdf.py @@ -21,15 +21,13 @@ def benchmark_evolve_single_member( theory = theory_card theory.order = (1, 0) theory.couplings.alphas = 0.118000 - theory.couplings.scale = 91.1876 + theory.couplings.ref = (91.1876, 5) theory.couplings.alphaem = 0.007496 - theory.couplings.max_num_flavs = 3 - theory.heavy.num_flavs_max_pdf = 3 theory.heavy.masses.c.value = 1.3 theory.heavy.masses.b.value = 4.75 theory.heavy.masses.t.value = 172 op = operator_card - op.mu0 = 5.0 + op.init = (5.0, 4) op.mugrid = mugrid # lhapdf import (maybe i have to dump with a x*), do plots) with lhapdf_path(test_pdf): @@ -49,7 +47,7 @@ def benchmark_evolve_single_member( ev_pdf = lhapdf.mkPDF("EvPDF", 0) assert info["XMin"] == op.xgrid.raw[0] assert info["SetDesc"] == "MyEvolvedPDF" - assert info["MZ"] == theory.couplings.scale + assert info["MZ"] == theory.couplings.ref[0] assert info["Debug"] == "Debug" xgrid = op.xgrid.raw for idx, mu2 in enumerate(op.mu2grid): @@ -76,7 +74,7 @@ def benchmark_evolve_more_members( theory = theory_card theory.order = (1, 0) op = operator_card - op.mu0 = 1.0 + op.init = (1.0, 3) op.mugrid = [(10.0, 5), (100.0, 5)] op.xgrid = XGrid([1e-7, 0.01, 0.1, 0.2, 0.3]) with lhapdf_path(test_pdf): diff --git a/benchmarks/lha_paper_bench.py b/benchmarks/lha_paper_bench.py index 137d8b3d7..bdc48ce59 100644 --- a/benchmarks/lha_paper_bench.py +++ b/benchmarks/lha_paper_bench.py @@ -13,19 +13,15 @@ register(__file__) +_sqrt2 = float(np.sqrt(2.0)) + base_theory = { "ModEv": "EXA", - "Q0": np.sqrt( - 2.0 - ), # Eq. (30) :cite:`Giele:2002hx`, Eq. (4.53) :cite:`Dittmar:2005ed` - "mc": np.sqrt( - 2.0 - ), # Eq. (34) :cite:`Giele:2002hx`, Eq. (4.56) :cite:`Dittmar:2005ed` + "Q0": _sqrt2, # Eq. (30) :cite:`Giele:2002hx`, Eq. (4.53) :cite:`Dittmar:2005ed` + "mc": _sqrt2, # Eq. (34) :cite:`Giele:2002hx`, Eq. (4.56) :cite:`Dittmar:2005ed` "mb": 4.5, "mt": 175, - "Qref": np.sqrt( - 2.0 - ), # Eq. (32) :cite:`Giele:2002hx`,Eq. (4.53) :cite:`Dittmar:2005ed` + "Qref": _sqrt2, # Eq. (32) :cite:`Giele:2002hx`,Eq. (4.53) :cite:`Dittmar:2005ed` "alphas": 0.35, # Eq. (4.55) :cite:`Dittmar:2005ed` "alphaqed": 0.007496, "QED": 0, @@ -79,11 +75,11 @@ def sv_theories(self, pto): """ low = self.theory.copy() low["PTO"] = pto - low["XIF"] = np.sqrt(1.0 / 2.0) + low["XIF"] = 1.0 / _sqrt2 low["ModSV"] = "exponentiated" high = self.theory.copy() high["PTO"] = pto - high["XIF"] = np.sqrt(2.0) + high["XIF"] = _sqrt2 high["ModSV"] = "exponentiated" return [high, low] @@ -302,7 +298,7 @@ def benchmark_sv(self, pto): sv_theory["kcThr"] = 1.0 + 1e-15 sv_theory["nfref"] = 4 sv_theory["EScaleVar"] = 0 - low["XIR"] = np.sqrt(2.0) - high["XIR"] = np.sqrt(0.5) + low["XIR"] = _sqrt2 + high["XIR"] = 1.0 / _sqrt2 self.run_lha([low, high]) diff --git a/crates/ekore/src/operator_matrix_elements/unpolarized/spacelike.rs b/crates/ekore/src/operator_matrix_elements/unpolarized/spacelike.rs index 50b69aeab..fc17da638 100644 --- a/crates/ekore/src/operator_matrix_elements/unpolarized/spacelike.rs +++ b/crates/ekore/src/operator_matrix_elements/unpolarized/spacelike.rs @@ -3,6 +3,7 @@ use crate::harmonics::cache::Cache; use num::complex::Complex; use num::Zero; pub mod as1; +pub mod as2; /// Compute the tower of the singlet |OME|. pub fn A_singlet( diff --git a/crates/ekore/src/operator_matrix_elements/unpolarized/spacelike/as2.rs b/crates/ekore/src/operator_matrix_elements/unpolarized/spacelike/as2.rs new file mode 100644 index 000000000..0e176c282 --- /dev/null +++ b/crates/ekore/src/operator_matrix_elements/unpolarized/spacelike/as2.rs @@ -0,0 +1,81 @@ +//! |NNLO| |QCD| + +use num::complex::Complex; +use num::traits::Pow; +use num::Zero; + +use crate::cmplx; +use crate::constants::{CF, TR, ZETA2, ZETA3}; +use crate::harmonics::cache::{Cache, K}; + +use crate::operator_matrix_elements::unpolarized::spacelike::as1; + +/// |NNLO| light-light non-singlet |OME|. +/// It is given in Eq.() of +pub fn A_qq_ns(c: &mut Cache, _nf: u8, L: f64) -> Complex { + let N = c.n; + let S1 = c.get(K::S1); + let S2 = c.get(K::S2); + let S3 = c.get(K::S3); + let S1m = c.get(K::S1) - 1. / N; + let S2m = c.get(K::S2) - 1. / N.powu(2); + + let a_qq_l0 = -224.0 / 27.0 * (S1 - 1.0 / N) - 8.0 / 3.0 * ZETA3 + + 40. / 9.0 * ZETA2 + + 73.0 / 18.0 + + 44.0 / 27.0 / N + - 268.0 / 27.0 / (N + 1.0) + + 8.0 / 3.0 * (-1.0 / N.powu(2) + 1.0 / (N + 1.0).powu(2)) + + 20.0 / 9.0 * (S2 - 1.0 / N.powu(2) - ZETA2 + S2 + 1.0 / (N + 1.0).powu(2) - ZETA2) + + 2.0 / 3.0 + * (-2.0 * (S3 - 1.0 / N.powu(3) - ZETA3) + - 2.0 * (S3 + 1.0 / (N + 1.0).powu(3) - ZETA3)); + + let a_qq_l1 = 2. * (-12. - 28. * N + 9. * N.powu(2) + 34. * N.powu(3) - 3. * N.powu(4)) + / (9. * (N * (N + 1.)).powu(2)) + + 80. / 9. * S1m + - 16. / 3. * S2m; + + let a_qq_l2 = -2. * ((2. + N - 3. * N.powu(2)) / (3. * N * (N + 1.)) + 4. / 3. * S1m); + + CF * TR * (a_qq_l2 * L.pow(2) + a_qq_l1 * L + a_qq_l0) +} + +/// |NNLO| heavy-light pure-singlet |OME| +/// It is given in Eq.() of +pub fn A_hq_ps(c: &mut Cache, _nf: u8, L: f64) -> Complex { + let N = c.n; + let S2 = c.get(K::S1); + let F1M = 1.0 / (N - 1.0) * (ZETA2 - (S2 - 1.0 / N.powu(2))); + let F11 = 1.0 / (N + 1.0) * (ZETA2 - (S2 + 1.0 / (N + 1.0).powu(2))); + let F12 = 1.0 / (N + 2.0) * (ZETA2 - (S2 + 1.0 / (N + 1.0).powu(2) + 1.0 / (N + 2.0).powu(2))); + let F21 = -F11 / (N + 1.0); + + let a_hq_l0 = -(32.0 / 3.0 / (N - 1.0) + 8.0 * (1.0 / N - 1.0 / (N + 1.0)) + - 32.0 / 3.0 * 1.0 / (N + 2.0)) + * ZETA2 + - 448.0 / 27.0 / (N - 1.0) + - 4.0 / 3.0 / N + - 124.0 / 3.0 * 1.0 / (N + 1.0) + + 1600.0 / 27.0 / (N + 2.0) + - 4.0 / 3.0 * (-6.0 / N.powu(4) - 6.0 / (N + 1.0).powu(4)) + + 2.0 * 2.0 / N.powu(3) + + 10.0 * 2.0 / (N + 1.0).powu(3) + + 16.0 / 3.0 * 2.0 / (N + 2.0).powu(3) + - 16.0 * ZETA2 * (-1.0 / N.powu(2) - 1.0 / (N + 1.0).powu(2)) + + 56.0 / 3.0 / N.powu(2) + + 88.0 / 3.0 / (N + 1.0).powu(2) + + 448.0 / 9.0 / (N + 2.0).powu(2) + + 32.0 / 3.0 * F1M + + 8.0 * ((ZETA2 - S2) / N - F11) + - 32.0 / 3.0 * F12 + + 16.0 * (-(ZETA2 - S2) / N.powu(2) + F21); + + let a_hq_l1 = 8. * (2. + N * (5. + N)) * (4. + N * (4. + N * (7. + 5. * N))) + / ((N - 1.) * (N + 2.).powu(2) * (N * (N + 1.)).powu(3)); + + let a_hq_l2 = + -4. * (2. + N + N.powu(2)).powu(2) / ((N - 1.) * (N + 2.) * (N * (N + 1.)).powu(2)); + + CF * TR * (a_hq_l2 * L.pow(2) + a_hq_l1 * L + a_hq_l0) +} diff --git a/doc/source/overview/tutorials/alpha_s.ipynb b/doc/source/overview/tutorials/alpha_s.ipynb index a9d75c0ca..f9cc5a240 100644 --- a/doc/source/overview/tutorials/alpha_s.ipynb +++ b/doc/source/overview/tutorials/alpha_s.ipynb @@ -39,12 +39,10 @@ "from eko.quantities.heavy_quarks import MatchingScales, QuarkMassScheme\n", "\n", "# set the (alpha_s, alpha_em) reference values\n", - "couplings_ref = CouplingsInfo(\n", - " alphas=0.118, alphaem=0.007496252, scale=91.0, num_flavs_ref=None, max_num_flavs=5\n", - ")\n", + "couplings_ref = CouplingsInfo(alphas=0.118, alphaem=0.007496252, ref=(91.0, 5))\n", "\n", "# set heavy quark masses and their threshold ratios\n", - "heavy_quark_masses = np.power([1.51, 4.92, 172.0],2)\n", + "heavy_quark_masses = np.power([1.51, 4.92, 172.0], 2)\n", "thresholds_ratios = np.array([1.0, 1.0, 1.0])\n", "\n", "# set (QCD,QED) perturbative order\n", @@ -88,9 +86,9 @@ } ], "source": [ - "target_scale = 10.0 ** 2\n", + "target_scale = 10.0**2\n", "a_s = sc.a_s(target_scale)\n", - "print(\"The value of alpha_s at Q^2=100 GeV^2 is: \", 4. * np.pi * a_s)" + "print(\"The value of alpha_s at Q^2=100 GeV^2 is: \", 4.0 * np.pi * a_s)" ] }, { @@ -105,13 +103,10 @@ } ], "metadata": { - "interpreter": { - "hash": "0a84ba3ac8703c04e87bc503a7d00188dfd591ad56130da93c406115a1e4a408" - }, "kernelspec": { - "display_name": "eko-KkPVjVhh-py3.10", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "eko-kkpvjvhh-py3.10" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/doc/source/overview/tutorials/dglap.ipynb b/doc/source/overview/tutorials/dglap.ipynb index 0e271a00c..98cb4a6e1 100644 --- a/doc/source/overview/tutorials/dglap.ipynb +++ b/doc/source/overview/tutorials/dglap.ipynb @@ -68,7 +68,7 @@ "th_card = example.theory()\n", "op_card = example.operator()\n", "# here we replace the grid with a very minimal one, to speed up the example\n", - "op_card.xgrid = [1e-3, 1e-2, 1e-1, 5e-1, 1.]" + "op_card.xgrid = [1e-3, 1e-2, 1e-1, 5e-1, 1.0]" ] }, { @@ -91,14 +91,9 @@ "{'order': [1, 0],\n", " 'couplings': {'alphas': 0.118,\n", " 'alphaem': 0.007496252,\n", - " 'scale': 91.2,\n", - " 'max_num_flavs': 6,\n", - " 'num_flavs_ref': 5,\n", + " 'ref': [91.2, 5],\n", " 'em_running': False},\n", - " 'heavy': {'num_flavs_init': 4,\n", - " 'num_flavs_max_pdf': 6,\n", - " 'intrinsic_flavors': [4],\n", - " 'masses': [[2.0, nan], [4.5, nan], [173.07, nan]],\n", + " 'heavy': {'masses': [[2.0, nan], [4.5, nan], [173.07, nan]],\n", " 'masses_scheme': 'pole',\n", " 'matching_ratios': [1.0, 1.0, 1.0]},\n", " 'xif': 1.0,\n", @@ -123,7 +118,7 @@ { "data": { "text/plain": [ - "{'mu0': 1.65,\n", + "{'init': [1.65, 4],\n", " 'mugrid': [[100.0, 5]],\n", " 'xgrid': [0.001, 0.01, 0.1, 0.5, 1.0],\n", " 'configs': {'evolution_method': 'iterate-exact',\n", @@ -174,7 +169,7 @@ "id": "880aadcf-8f87-4918-a0bc-09581d0d3579", "metadata": {}, "source": [ - "The actual result is a complicate EKO object, we will discuss it in a separate tutorial.\n", + "The actual result is a complicate EKO object, which we will discuss it in a separate tutorial.\n", "\n", "You have just run your first DGLAP calculation!" ] diff --git a/doc/source/overview/tutorials/output.ipynb b/doc/source/overview/tutorials/output.ipynb index 47345ac35..1cfe9f071 100644 --- a/doc/source/overview/tutorials/output.ipynb +++ b/doc/source/overview/tutorials/output.ipynb @@ -34,7 +34,7 @@ "id": "2f8f0666-c6ab-40f6-86f5-15773f205b51", "metadata": {}, "source": [ - "We can access the operator, by using the `open` method (similar to python's `open`):" + "We can access the operator, by using the `read` method:" ] }, { @@ -76,8 +76,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "TheoryCard(order=(1, 0), couplings=CouplingsInfo(alphas=0.118, alphaem=0.007496252, scale=91.2, max_num_flavs=6, num_flavs_ref=5, em_running=False), heavy=HeavyInfo(num_flavs_init=4, num_flavs_max_pdf=6, intrinsic_flavors=[4, 5, 6], masses=[[2.0, nan], [4.5, nan], [173.07, nan]], masses_scheme=, matching_ratios=[1.0, 1.0, 1.0]), xif=1.0, n3lo_ad_variation=(0, 0, 0, 0))\n", - "OperatorCard(mu0=1.65, mugrid=[(100.0, 5)], xgrid=, configs=Configs(evolution_method=, ev_op_max_order=(10, 0), ev_op_iterations=10, scvar_method=None, inversion_method=None, interpolation_polynomial_degree=4, interpolation_is_log=True, polarized=False, time_like=False, n_integration_cores=0), debug=Debug(skip_singlet=False, skip_non_singlet=False), eko_version='0.0.0')\n" + "TheoryCard(order=(1, 0), couplings=CouplingsInfo(alphas=0.118, alphaem=0.007496252, ref=(91.2, 5), em_running=False), heavy=HeavyInfo(masses=[[2.0, nan], [4.5, nan], [173.07, nan]], masses_scheme=, matching_ratios=[1.0, 1.0, 1.0]), xif=1.0, n3lo_ad_variation=(0, 0, 0, 0))\n", + "OperatorCard(init=(1.65, 4), mugrid=[(100.0, 5)], xgrid=, configs=Configs(evolution_method=, ev_op_max_order=(10, 0), ev_op_iterations=10, scvar_method=None, inversion_method=None, interpolation_polynomial_degree=4, interpolation_is_log=True, polarized=False, time_like=False, n_integration_cores=0), debug=Debug(skip_singlet=False, skip_non_singlet=False), eko_version='0.0.0')\n" ] } ], @@ -143,7 +143,7 @@ ], "source": [ "with eko.EKO.read(\"./myeko.tar\") as evolution_operator:\n", - " with evolution_operator.operator((10000.,5)) as op:\n", + " with evolution_operator.operator((10000.0, 5)) as op:\n", " print(f\"operator: {op.operator.shape}\")\n", " print(f\"error: {op.error.shape}\")" ] diff --git a/doc/source/overview/tutorials/pdf.ipynb b/doc/source/overview/tutorials/pdf.ipynb index 4607ef891..bfc6ad3ad 100644 --- a/doc/source/overview/tutorials/pdf.ipynb +++ b/doc/source/overview/tutorials/pdf.ipynb @@ -13,9 +13,9 @@ "id": "3e0dcd0f", "metadata": {}, "source": [ - "## Method 1: Using apply_pdf\n", + "## Method 1: Using `apply_pdf`\n", "\n", - "In this first part we will compute the eko and subsequently apply the initial PDF \"manually\" calling a dedicated function. " + "In this first part, we compute the eko and subsequently apply the initial PDF \"manually\" calling a dedicated function. " ] }, { @@ -52,6 +52,7 @@ "import pathlib\n", "import eko\n", "from banana import toy\n", + "\n", "pdf = toy.mkPDF(\"\", 0)" ] }, @@ -71,6 +72,7 @@ "outputs": [], "source": [ "from ekobox.apply import apply_pdf\n", + "\n", "with eko.EKO.read(\"./myeko.tar\") as evolution_operator:\n", " evolved_pdfs = apply_pdf(evolution_operator, pdf)" ] @@ -92,7 +94,7 @@ { "data": { "text/plain": [ - "dict_keys([10000.0])" + "dict_keys([(10000.0, 5)])" ] }, "execution_count": 3, @@ -134,7 +136,7 @@ } ], "source": [ - "evolved_pdfs[10000.0][\"pdfs\"][21]" + "evolved_pdfs[(10000.0, 5)][\"pdfs\"][21]" ] }, { @@ -150,7 +152,7 @@ "id": "e925d2c9", "metadata": {}, "source": [ - "## Method 2: Using evolve_pdfs\n", + "## Method 2: Using `evolve_pdfs`\n", "\n", "In this second part we illustrate how to get (and install) directly a LHAPDF set evolved with eko. " ] @@ -171,6 +173,7 @@ "outputs": [], "source": [ "from banana import toy\n", + "\n", "pdf = toy.mkPDF(\"\", 0)" ] }, @@ -195,10 +198,10 @@ "th_card = example.theory()\n", "op_card = example.operator()\n", "# here we replace the grid with a very minimal one, to speed up the example\n", - "op_card.xgrid = eko.interpolation.XGrid([1e-3, 1e-2, 1e-1, 5e-1, 1.])\n", - "op_card.mugrid = [(10.,5), (100.,5)]\n", + "op_card.xgrid = eko.interpolation.XGrid([1e-3, 1e-2, 1e-1, 5e-1, 1.0])\n", + "op_card.mugrid = [(10.0, 5), (100.0, 5)]\n", "# set QCD LO evolution\n", - "th_card.orders = (1,0)" + "th_card.orders = (1, 0)" ] }, { @@ -236,16 +239,10 @@ ], "source": [ "from ekobox.evol_pdf import evolve_pdfs\n", + "\n", "path = pathlib.Path(\"./myeko2.tar\")\n", "path.unlink(missing_ok=True)\n", - "evolve_pdfs(\n", - " [pdf],\n", - " th_card,\n", - " op_card,\n", - " install=True,\n", - " name=\"Evolved_PDF\",\n", - " store_path=path\n", - ")" + "evolve_pdfs([pdf], th_card, op_card, install=True, name=\"Evolved_PDF\", store_path=path)" ] }, { @@ -273,6 +270,7 @@ ], "source": [ "import lhapdf\n", + "\n", "evolved_pdf = lhapdf.mkPDF(\"Evolved_PDF\", 0)" ] }, @@ -300,12 +298,12 @@ } ], "source": [ - "pid = 21 # gluon pid\n", - "Q2 = 89.10 # Q^2 in Gev^2\n", - "x = 0.01 # momentum fraction \n", + "pid = 21 # gluon pid\n", + "Q2 = 89.10 # Q^2 in Gev^2\n", + "x = 0.01 # momentum fraction\n", "\n", "# check that the particle is present\n", - "print(\"has gluon?\",evolved_pdf.hasFlavor(pid))\n", + "print(\"has gluon?\", evolved_pdf.hasFlavor(pid))\n", "# now do the lookup\n", "xg = evolved_pdf.xfxQ2(pid, x, Q2)\n", "print(f\"xg(x={x}, Q2={Q2}) = {xg}\")" @@ -352,28 +350,28 @@ "import lhapdf\n", "from ekobox.cards import example\n", "from eko.interpolation import make_grid\n", - "from eko.quantities.heavy_quarks import QuarkMassRef,HeavyQuarks\n", + "from eko.quantities.heavy_quarks import QuarkMassRef, HeavyQuarks\n", "\n", "# get the PDF object\n", "ct14llo = lhapdf.mkPDF(\"CT14llo\")\n", "\n", "# setup the operator card\n", "op_card = example.operator()\n", - "op_card.xgrid = eko.interpolation.XGrid(make_grid(30, 30)) # x grid\n", - "op_card.mugrid = [(float(q),5) for q in np.geomspace(5., 100, 5)] # Q2 grid\n", - "op_card.mu0 = 1.295000 # starting point for the evolution \n", + "op_card.xgrid = eko.interpolation.XGrid(make_grid(30, 30)) # x grid\n", + "op_card.mugrid = [(float(q), 5) for q in np.geomspace(5.0, 100, 5)] # Q2 grid\n", + "op_card.init = (1.295000, 3) # starting point for the evolution\n", "\n", "# setup the theory card - this can be mostly inferred from the PDF's .info file\n", - "\n", "th_card = example.theory()\n", - "th_card.orders = (1,0) # QCD LO\n", - "th_card.heavy.masses = HeavyQuarks([QuarkMassRef([1.3,nan]), QuarkMassRef([4.75,nan]), QuarkMassRef([172.,nan])]) # quark mass\n", - "th_card.couplings.alphas = 0.130000 # reference value of alpha_s\n", - "th_card.couplings.scale = 91.1876 # the reference scale at which alpha_s is provided\n", - "th_card.couplings.num_flavs_ref = 5 # the number of flavors active at the alpha_s reference scale\n", - "th_card.couplings.max_num_flavs = 5 # the maximum number of flavors active in the alpha_s evolution\n", - "th_card.couplings.num_flavs_init = 3 # the number of flavors active at the reference scale\n", - "th_card.num_flavs_max_pdf = 5 # the maximum number of flavors active in the pdf evolution." + "th_card.orders = (1, 0) # QCD LO\n", + "th_card.heavy.masses = HeavyQuarks(\n", + " [QuarkMassRef([1.3, nan]), QuarkMassRef([4.75, nan]), QuarkMassRef([172.0, nan])]\n", + ") # quark mass\n", + "th_card.couplings.alphas = 0.130000 # reference value of alpha_s\n", + "th_card.couplings.ref = (\n", + " 91.1876,\n", + " 5,\n", + ") # the reference scale together with the number of flavors at which alpha_s is provided" ] }, { @@ -407,15 +405,11 @@ ], "source": [ "from ekobox.evol_pdf import evolve_pdfs\n", + "\n", "path = pathlib.Path(\"./myeko_ct14llo.tar\")\n", "path.unlink(missing_ok=True)\n", "evolve_pdfs(\n", - " [ct14llo],\n", - " th_card,\n", - " op_card,\n", - " install=True,\n", - " name=\"my_ct14llo\",\n", - " store_path=path\n", + " [ct14llo], th_card, op_card, install=True, name=\"my_ct14llo\", store_path=path\n", ")" ] }, @@ -439,33 +433,33 @@ "output_type": "stream", "text": [ "LHAPDF 6.4.0 loading /home/felix/local/share/LHAPDF/my_ct14llo/my_ct14llo_0000.dat\n", + "my_ct14llo PDF set, member #0, version 1\n", " x Q2 ct14llo my_ct14llo relative_diff\n", - "0 0.000010 25.000000 7.635785e+01 7.630461e+01 0.000697\n", - "1 0.000173 25.000000 3.194273e+01 3.192092e+01 0.000683\n", - "2 0.003000 25.000000 1.081843e+01 1.081086e+01 0.000701\n", - "3 0.051962 25.000000 1.958956e+00 1.958632e+00 0.000166\n", - "4 0.900000 25.000000 1.922415e-05 1.955026e-05 -0.016963\n", - "5 0.000010 111.803399 1.333957e+02 1.332985e+02 0.000729\n", - "6 0.000173 111.803399 4.777286e+01 4.773664e+01 0.000758\n", - "7 0.003000 111.803399 1.341028e+01 1.339967e+01 0.000791\n", - "8 0.051962 111.803399 1.978216e+00 1.978130e+00 0.000044\n", - "9 0.900000 111.803399 6.644805e-06 6.753652e-06 -0.016381\n", - "10 0.000010 500.000000 1.967032e+02 1.965456e+02 0.000801\n", - "11 0.000173 500.000000 6.291393e+01 6.286095e+01 0.000842\n", - "12 0.003000 500.000000 1.542347e+01 1.540996e+01 0.000876\n", - "13 0.051962 500.000000 1.947465e+00 1.947391e+00 0.000038\n", - "14 0.900000 500.000000 2.929060e-06 2.977306e-06 -0.016471\n", - "15 0.000010 2236.067977 2.633266e+02 2.631109e+02 0.000819\n", - "16 0.000173 2236.067977 7.708540e+01 7.701938e+01 0.000856\n", - "17 0.003000 2236.067977 1.700410e+01 1.698928e+01 0.000872\n", - "18 0.051962 2236.067977 1.893923e+00 1.893971e+00 -0.000025\n", - "19 0.900000 2236.067977 1.544450e-06 1.570997e-06 -0.017189\n", - "20 0.000010 10000.000000 3.314097e+02 3.311351e+02 0.000829\n", - "21 0.000173 10000.000000 9.023010e+01 9.015279e+01 0.000857\n", - "22 0.003000 10000.000000 1.825934e+01 1.824402e+01 0.000839\n", - "23 0.051962 10000.000000 1.830992e+00 1.831183e+00 -0.000104\n", - "24 0.900000 10000.000000 9.288458e-07 9.447927e-07 -0.017169\n", - "my_ct14llo PDF set, member #0, version 1\n" + "0 0.000010 25.000000 7.635785e+01 7.630719e+01 0.000663\n", + "1 0.000173 25.000000 3.194273e+01 3.192239e+01 0.000637\n", + "2 0.003000 25.000000 1.081843e+01 1.081160e+01 0.000632\n", + "3 0.051962 25.000000 1.958956e+00 1.958820e+00 0.000069\n", + "4 0.900000 25.000000 1.922415e-05 1.955440e-05 -0.017179\n", + "5 0.000010 111.803399 1.333957e+02 1.333028e+02 0.000697\n", + "6 0.000173 111.803399 4.777286e+01 4.773855e+01 0.000718\n", + "7 0.003000 111.803399 1.341028e+01 1.340044e+01 0.000734\n", + "8 0.051962 111.803399 1.978216e+00 1.978292e+00 -0.000038\n", + "9 0.900000 111.803399 6.644805e-06 6.756354e-06 -0.016787\n", + "10 0.000010 500.000000 1.967032e+02 1.965517e+02 0.000770\n", + "11 0.000173 500.000000 6.291393e+01 6.286327e+01 0.000805\n", + "12 0.003000 500.000000 1.542347e+01 1.541073e+01 0.000826\n", + "13 0.051962 500.000000 1.947465e+00 1.947532e+00 -0.000034\n", + "14 0.900000 500.000000 2.929060e-06 2.979511e-06 -0.017224\n", + "15 0.000010 2236.067977 2.633266e+02 2.631189e+02 0.000789\n", + "16 0.000173 2236.067977 7.708540e+01 7.702204e+01 0.000822\n", + "17 0.003000 2236.067977 1.700410e+01 1.699004e+01 0.000827\n", + "18 0.051962 2236.067977 1.893923e+00 1.894094e+00 -0.000090\n", + "19 0.900000 2236.067977 1.544450e-06 1.572860e-06 -0.018395\n", + "20 0.000010 10000.000000 3.314097e+02 3.311450e+02 0.000799\n", + "21 0.000173 10000.000000 9.023010e+01 9.015576e+01 0.000824\n", + "22 0.003000 10000.000000 1.825934e+01 1.824477e+01 0.000798\n", + "23 0.051962 10000.000000 1.830992e+00 1.831291e+00 -0.000163\n", + "24 0.900000 10000.000000 9.288458e-07 9.463689e-07 -0.018866\n" ] } ], @@ -475,15 +469,15 @@ "# load evolved pdf\n", "my_ct14llo = lhapdf.mkPDF(\"my_ct14llo\", 0)\n", "\n", - "pid = 21 # gluon pid\n", + "pid = 21 # gluon pid\n", "\n", "# collect data\n", - "log = {\"x\": [], \"Q2\" : [], \"ct14llo\": [], \"my_ct14llo\": [], \"relative_diff\": []} \n", - "for q in np.geomspace(5., 100, 5):\n", - " q2 = q**2.\n", + "log = {\"x\": [], \"Q2\": [], \"ct14llo\": [], \"my_ct14llo\": [], \"relative_diff\": []}\n", + "for q in np.geomspace(5.0, 100, 5):\n", + " q2 = q**2.0\n", " for x in np.geomspace(1e-5, 0.9, 5):\n", " value = ct14llo.xfxQ2(pid, x, q2)\n", - " my_value = my_ct14llo.xfxQ2(pid, x, q2)\n", + " my_value = my_ct14llo.xfxQ2(pid, x, q2)\n", " log[\"x\"].append(x)\n", " log[\"Q2\"].append(q2)\n", " log[\"ct14llo\"].append(value)\n", diff --git a/extras/matching/check-matching.py b/extras/matching/check-matching.py index e080883cb..f83cc714b 100644 --- a/extras/matching/check-matching.py +++ b/extras/matching/check-matching.py @@ -121,7 +121,7 @@ def collect_data(): data = {} pdf = toy.mkPDF("", 0) for id in th_updates.keys(): - with eko.EKO.open(f"./eko_{id}.tar") as evolution_operator: + with eko.EKO.read(f"./eko_{id}.tar") as evolution_operator: x = evolution_operator.metadata.rotations.targetgrid.raw data[id] = { mu2: el["pdfs"] diff --git a/extras/n3lo_bench/splitting_function_utils.py b/extras/n3lo_bench/splitting_function_utils.py index 6b32c86cc..66a516a5e 100644 --- a/extras/n3lo_bench/splitting_function_utils.py +++ b/extras/n3lo_bench/splitting_function_utils.py @@ -103,9 +103,7 @@ def compute_a_s(q2=None, xif2=1.0, nf=None, order=(4, 0)): ref = CouplingsInfo( alphas=0.1181, alphaem=0.007496, - scale=91.00, - max_num_flavs=6, - num_flavs_ref=5, + ref=(91.00, 5), ) sc = Couplings( couplings=ref, diff --git a/src/eko/couplings.py b/src/eko/couplings.py index 2b261afdb..502695b32 100644 --- a/src/eko/couplings.py +++ b/src/eko/couplings.py @@ -9,10 +9,11 @@ """ import logging -from typing import Iterable, List +from typing import Dict, Iterable, List, Tuple import numba as nb import numpy as np +import numpy.typing as npt import scipy from . import constants, matchings @@ -383,6 +384,10 @@ def couplings_expanded_fixed_alphaem(order, couplings_ref, nf, scale_from, scale return np.array([res_as, aem]) +_CouplingsCacheKey = Tuple[float, float, int, float, float] +"""Cache key containing (a0, a1, nf, scale_from, scale_to).""" + + class Couplings: r"""Compute the strong and electromagnetic coupling constants :math:`a_s, a_{em}`. @@ -436,7 +441,7 @@ def assert_positive(name, var): assert_positive("alpha_s_ref", couplings.alphas) assert_positive("alpha_em_ref", couplings.alphaem) - assert_positive("scale_ref", couplings.scale) + assert_positive("scale_ref", couplings.ref[0]) if order[0] not in [1, 2, 3, 4]: raise NotImplementedError( "QCD order has to be an integer between 1 (LO) and 4 (N3LO)" @@ -450,7 +455,7 @@ def assert_positive(name, var): raise ValueError(f"Unknown method {method.value}") self.method = method.value - nf_ref = couplings.num_flavs_ref + nf_ref = couplings.ref[1] scheme_name = hqm_scheme.name self.alphaem_running = couplings.em_running self.decoupled_running = False @@ -459,7 +464,7 @@ def assert_positive(name, var): self.a_ref = np.array(couplings.values) / 4.0 / np.pi # convert to a_s and a_em matching_scales = (np.array(masses) * np.array(thresholds_ratios)).tolist() self.thresholds_ratios = list(thresholds_ratios) - self.atlas = matchings.Atlas(matching_scales, (couplings.scale**2, nf_ref)) + self.atlas = matchings.Atlas(matching_scales, (couplings.ref[0] ** 2, nf_ref)) self.hqm_scheme = scheme_name logger.info( "Strong Coupling: a_s(µ_R^2=%f)%s=%f=%f/(4π)", @@ -480,7 +485,7 @@ def assert_positive(name, var): self.decoupled_running, ) # cache - self.cache = {} + self.cache: Dict[_CouplingsCacheKey, npt.NDArray] = {} @property def mu2_ref(self): diff --git a/src/eko/evolution_operator/__init__.py b/src/eko/evolution_operator/__init__.py index 1c759c5c5..fe07ade9e 100644 --- a/src/eko/evolution_operator/__init__.py +++ b/src/eko/evolution_operator/__init__.py @@ -8,6 +8,7 @@ import os import time from multiprocessing import Pool +from typing import Dict, Tuple import numba as nb import numpy as np @@ -20,6 +21,8 @@ from .. import basis_rotation as br from .. import interpolation, mellin from .. import scale_variations as sv +from ..io.types import EvolutionMethod, OperatorLabel +from ..kernels import ev_method from ..kernels import non_singlet as ns from ..kernels import non_singlet_qed as qed_ns from ..kernels import singlet as s @@ -600,7 +603,11 @@ def quad_ker_qed( return ker -class Operator(sv.ModeMixin): +OpMembers = Dict[OperatorLabel, OpMember] +"""Map of all operators.""" + + +class Operator(sv.ScaleVariationModeMixin): """Internal representation of a single EKO. The actual matrices are computed upon calling :meth:`compute`. @@ -625,8 +632,8 @@ class Operator(sv.ModeMixin): log_label = "Evolution" # complete list of possible evolution operators labels - full_labels = br.full_labels - full_labels_qed = br.full_unified_labels + full_labels: Tuple[OperatorLabel, ...] = br.full_labels + full_labels_qed: Tuple[OperatorLabel, ...] = br.full_unified_labels def __init__( self, config, managers, segment: Segment, mellin_cut=5e-2, is_threshold=False @@ -639,9 +646,9 @@ def __init__( # TODO make 'cut' external parameter? self._mellin_cut = mellin_cut self.is_threshold = is_threshold - self.op_members = {} + self.op_members: OpMembers = {} self.order = tuple(config["order"]) - self.alphaem_running = self.managers["couplings"].alphaem_running + self.alphaem_running = self.managers.couplings.alphaem_running if self.log_label == "Evolution": self.a = self.compute_a() self.as_list, self.a_half_list = self.compute_aem_list() @@ -663,7 +670,7 @@ def xif2(self): @property def int_disp(self): """Return the interpolation dispatcher.""" - return self.managers["interpol_dispatcher"] + return self.managers.interpolator @property def grid_size(self): @@ -686,7 +693,7 @@ def mu2(self): def compute_a(self): """Return the computed values for :math:`a_s` and :math:`a_{em}`.""" - coupling = self.managers["couplings"] + coupling = self.managers.couplings a0 = coupling.a( self.mu2[0], nf_to=self.nf, @@ -722,7 +729,7 @@ def compute_aem_list(self): as_list = np.array([self.a_s[0], self.a_s[1]]) a_half = np.zeros((ev_op_iterations, 2)) else: - couplings = self.managers["couplings"] + couplings = self.managers.couplings mu2_steps = np.geomspace(self.q2_from, self.q2_to, 1 + ev_op_iterations) mu2_l = mu2_steps[0] as_list = np.array( @@ -780,6 +787,11 @@ def labels(self): labels.extend(br.singlet_unified_labels) return labels + @property + def ev_method(self): + """Return the evolution method.""" + return ev_method(EvolutionMethod(self.config["method"])) + def quad_ker(self, label, logx, areas): """Return partially initialized integrand function. @@ -803,7 +815,7 @@ def quad_ker(self, label, logx, areas): order=self.order, mode0=label[0], mode1=label[1], - method=self.config["method"], + method=self.ev_method, is_log=self.int_disp.log, logx=logx, areas=areas, diff --git a/src/eko/evolution_operator/__init__.py.patch b/src/eko/evolution_operator/__init__.py.patch index 10f75df85..47c6f252e 100644 --- a/src/eko/evolution_operator/__init__.py.patch +++ b/src/eko/evolution_operator/__init__.py.patch @@ -1,8 +1,8 @@ diff --git a/src/eko/evolution_operator/__init__.py b/src/eko/evolution_operator/__init__.py -index 1c759c5c..5ff2d867 100644 +index fe07ade9..0f58c9e5 100644 --- a/src/eko/evolution_operator/__init__.py +++ b/src/eko/evolution_operator/__init__.py -@@ -3,15 +3,15 @@ r"""Contains the central operator classes. +@@ -3,16 +3,16 @@ r"""Contains the central operator classes. See :doc:`Operator overview `. """ @@ -11,6 +11,7 @@ index 1c759c5c..5ff2d867 100644 import os import time from multiprocessing import Pool + from typing import Dict, Tuple +import ekors import numba as nb @@ -20,7 +21,7 @@ index 1c759c5c..5ff2d867 100644 import ekore.anomalous_dimensions.polarized.space_like as ad_ps import ekore.anomalous_dimensions.unpolarized.space_like as ad_us -@@ -27,92 +27,10 @@ from ..kernels import singlet_qed as qed_s +@@ -30,92 +30,10 @@ from ..kernels import singlet_qed as qed_s from ..kernels import valence_qed as qed_v from ..matchings import Segment, lepton_number from ..member import OpMember @@ -114,7 +115,7 @@ index 1c759c5c..5ff2d867 100644 spec = [ ("is_singlet", nb.boolean), ("is_QEDsinglet", nb.boolean), -@@ -184,422 +102,6 @@ class QuadKerBase: +@@ -187,421 +105,6 @@ class QuadKerBase: return self.path.prefactor * pj * self.path.jac @@ -533,13 +534,12 @@ index 1c759c5c..5ff2d867 100644 - ) - return ker - -- - class Operator(sv.ModeMixin): - """Internal representation of a single EKO. -@@ -780,50 +282,6 @@ class Operator(sv.ModeMixin): - labels.extend(br.singlet_unified_labels) - return labels + OpMembers = Dict[OperatorLabel, OpMember] + """Map of all operators.""" +@@ -792,50 +295,6 @@ class Operator(sv.ScaleVariationModeMixin): + """Return the evolution method.""" + return ev_method(EvolutionMethod(self.config["method"])) - def quad_ker(self, label, logx, areas): - """Return partially initialized integrand function. @@ -564,7 +564,7 @@ index 1c759c5c..5ff2d867 100644 - order=self.order, - mode0=label[0], - mode1=label[1], -- method=self.config["method"], +- method=self.ev_method, - is_log=self.int_disp.log, - logx=logx, - areas=areas, @@ -588,7 +588,7 @@ index 1c759c5c..5ff2d867 100644 def initialize_op_members(self): """Init all operators with the identity or zeros.""" eye = OpMember( -@@ -846,10 +304,13 @@ class Operator(sv.ModeMixin): +@@ -858,10 +317,13 @@ class Operator(sv.ScaleVariationModeMixin): else: self.op_members[n] = zero.copy() @@ -606,7 +606,7 @@ index 1c759c5c..5ff2d867 100644 """Run the integration for each grid point. Parameters -@@ -864,18 +325,54 @@ class Operator(sv.ModeMixin): +@@ -876,18 +332,54 @@ class Operator(sv.ScaleVariationModeMixin): """ column = [] k, logx = log_grid diff --git a/src/eko/evolution_operator/grid.py b/src/eko/evolution_operator/grid.py index e64885418..969750389 100644 --- a/src/eko/evolution_operator/grid.py +++ b/src/eko/evolution_operator/grid.py @@ -6,8 +6,8 @@ """ import logging -from dataclasses import astuple -from typing import Dict, List, Optional +from dataclasses import dataclass +from typing import Any, Dict, List, Optional import numpy as np import numpy.typing as npt @@ -18,9 +18,9 @@ from ..interpolation import InterpolatorDispatcher from ..io.runcards import Configs, Debug from ..io.types import EvolutionPoint as EPoint -from ..io.types import Order +from ..io.types import Order, SquaredScale from ..matchings import Atlas, Segment, flavor_shift, is_downward_path -from . import Operator, flavors, matching_condition, physical +from . import Operator, OpMembers, flavors, matching_condition, physical from .operator_matrix_element import OperatorMatrixElement logger = logging.getLogger(__name__) @@ -29,7 +29,16 @@ """In particular, only the ``operator`` and ``error`` fields are expected.""" -class OperatorGrid(sv.ModeMixin): +@dataclass(frozen=True) +class Managers: + """Set of steering objects.""" + + atlas: Atlas + couplings: Couplings + interpolator: InterpolatorDispatcher + + +class OperatorGrid(sv.ScaleVariationModeMixin): """Collection of evolution operators for several scales. The operator grid is the driver class of the evolution. @@ -52,7 +61,6 @@ def __init__( masses: List[float], mass_scheme, thresholds_ratios: List[float], - intrinsic_flavors: list, xif: float, n3lo_ad_variation: tuple, matching_order: Order, @@ -64,9 +72,8 @@ def __init__( use_fhmruvv: bool, ): # check - config = {} + config: Dict[str, Any] = {} config["order"] = order - config["intrinsic_range"] = intrinsic_flavors config["xif2"] = xif**2 config["HQ"] = mass_scheme config["ModSV"] = configs.scvar_method @@ -95,13 +102,13 @@ def __init__( self.config = config self.q2_grid = mu2grid - self.managers = dict( - thresholds_config=atlas, + self.managers = Managers( + atlas=atlas, couplings=couplings, - interpol_dispatcher=interpol_dispatcher, + interpolator=interpol_dispatcher, ) - self._threshold_operators = {} - self._matching_operators = {} + self._threshold_operators: Dict[Segment, Operator] = {} + self._matching_operators: Dict[SquaredScale, OpMembers] = {} def get_threshold_operators(self, path: List[Segment]) -> List[Operator]: """Generate the threshold operators. @@ -123,7 +130,6 @@ def get_threshold_operators(self, path: List[Segment]) -> List[Operator]: is_downward = is_downward_path(path) shift = flavor_shift(is_downward) for seg in path[:-1]: - new_op_key = astuple(seg) kthr = self.config["thresholds_ratios"][seg.nf - shift] ome = OperatorMatrixElement( self.config, @@ -134,13 +140,13 @@ def get_threshold_operators(self, path: List[Segment]) -> List[Operator]: np.log(kthr), self.config["HQ"] == "MSBAR", ) - if new_op_key not in self._threshold_operators: + if seg not in self._threshold_operators: # Compute the operator and store it logger.info("Prepare threshold operator") op_th = Operator(self.config, self.managers, seg, is_threshold=True) op_th.compute() - self._threshold_operators[new_op_key] = op_th - thr_ops.append(self._threshold_operators[new_op_key]) + self._threshold_operators[seg] = op_th + thr_ops.append(self._threshold_operators[seg]) # Compute the matching conditions and store it if seg.target not in self._matching_operators: @@ -159,7 +165,7 @@ def generate(self, q2: EPoint) -> OpDict: """ # The lists of areas as produced by the thresholds - path = self.managers["thresholds_config"].path(q2) + path = self.managers.atlas.path(q2) # Prepare the path for the composition of the operator thr_ops = self.get_threshold_operators(path) # we start composing with the highest operator ... @@ -167,16 +173,15 @@ def generate(self, q2: EPoint) -> OpDict: operator.compute() is_downward = is_downward_path(path) - intrinsic_range = [4, 5, 6] if is_downward else self.config["intrinsic_range"] qed = self.config["order"][1] > 0 final_op = physical.PhysicalOperator.ad_to_evol_map( - operator.op_members, operator.nf, operator.q2_to, intrinsic_range, qed + operator.op_members, operator.nf, operator.q2_to, qed ) # and multiply the lower ones from the right for op in reversed(list(thr_ops)): phys_op = physical.PhysicalOperator.ad_to_evol_map( - op.op_members, op.nf, op.q2_to, intrinsic_range, qed + op.op_members, op.nf, op.q2_to, qed ) # join with the basis rotation, since matching requires c+ (or likewise) @@ -185,7 +190,6 @@ def generate(self, q2: EPoint) -> OpDict: self._matching_operators[op.q2_to], nf_match, op.q2_to, - intrinsic_range=intrinsic_range, qed=qed, ) if is_downward: diff --git a/src/eko/evolution_operator/matching_condition.py b/src/eko/evolution_operator/matching_condition.py index 8df4775ff..e84ef4d2e 100644 --- a/src/eko/evolution_operator/matching_condition.py +++ b/src/eko/evolution_operator/matching_condition.py @@ -20,8 +20,7 @@ def split_ad_to_evol_map( op_members, nf, q2_thr, - intrinsic_range, - qed, + qed=False, ): """ Create the instance from the |OME|. @@ -35,8 +34,6 @@ def split_ad_to_evol_map( number of active flavors *below* the threshold q2_thr: float threshold value - intrinsic_range : list - list of intrinsic quark pids qed : bool activate qed """ @@ -78,27 +75,26 @@ def split_ad_to_evol_map( ) # intrinsic matching - if len(intrinsic_range) != 0: - op_id = member.OpMember.id_like(op_members[(200, 200)]) - for intr_fl in intrinsic_range: - ihq = br.quark_names[intr_fl - 1] # find name - if intr_fl > nf + 1: - # keep the higher quarks as they are - m[f"{ihq}+.{ihq}+"] = op_id.copy() - m[f"{ihq}-.{ihq}-"] = op_id.copy() - elif intr_fl == nf + 1: - # match the missing contribution from h+ and h- - m.update( - { - f"{ihq}+.{ihq}+": op_members[ - (br.matching_hplus_pid, br.matching_hplus_pid) - ], - f"S.{ihq}+": op_members[(100, br.matching_hplus_pid)], - f"g.{ihq}+": op_members[(21, br.matching_hplus_pid)], - f"{ihq}-.{ihq}-": op_members[ - (br.matching_hminus_pid, br.matching_hminus_pid) - ], - # f"V.{ihq}-": op_members[(200, br.matching_hminus_pid)], - } - ) + op_id = member.OpMember.id_like(op_members[(200, 200)]) + for intr_fl in [4, 5, 6]: + ihq = br.quark_names[intr_fl - 1] # find name + if intr_fl > nf + 1: + # keep the higher quarks as they are + m[f"{ihq}+.{ihq}+"] = op_id.copy() + m[f"{ihq}-.{ihq}-"] = op_id.copy() + elif intr_fl == nf + 1: + # match the missing contribution from h+ and h- + m.update( + { + f"{ihq}+.{ihq}+": op_members[ + (br.matching_hplus_pid, br.matching_hplus_pid) + ], + f"S.{ihq}+": op_members[(100, br.matching_hplus_pid)], + f"g.{ihq}+": op_members[(21, br.matching_hplus_pid)], + f"{ihq}-.{ihq}-": op_members[ + (br.matching_hminus_pid, br.matching_hminus_pid) + ], + # f"V.{ihq}-": op_members[(200, br.matching_hminus_pid)], + } + ) return cls.promote_names(m, q2_thr) diff --git a/src/eko/evolution_operator/operator_matrix_element.py b/src/eko/evolution_operator/operator_matrix_element.py index 202fd18d9..022f80730 100644 --- a/src/eko/evolution_operator/operator_matrix_element.py +++ b/src/eko/evolution_operator/operator_matrix_element.py @@ -1,6 +1,7 @@ """The |OME| for the non-trivial matching conditions in the |VFNS| evolution.""" import copy +import enum import functools import logging @@ -20,6 +21,33 @@ logger = logging.getLogger(__name__) +class MatchingMethods(enum.IntEnum): + """Enumerate matching methods.""" + + FORWARD = enum.auto() + BACKWARD_EXACT = enum.auto() + BACKWARD_EXPANDED = enum.auto() + + +def matching_method(s: InversionMethod) -> MatchingMethods: + """Return the matching method. + + Parameters + ---------- + s : + string representation + + Returns + ------- + i : + int representation + + """ + if s is not None: + return MatchingMethods["BACKWARD_" + s.value.upper()] + return MatchingMethods.FORWARD + + @nb.njit(cache=True) def build_ome(A, matching_order, a_s, backward_method): r"""Construct the matching expansion in :math:`a_s` with the appropriate method. @@ -32,7 +60,7 @@ def build_ome(A, matching_order, a_s, backward_method): perturbation matching order a_s : float strong coupling, needed only for the exact inverse - backward_method : InversionMethod or None + backward_method : MatchingMethods empty or method for inverting the matching condition (exact or expanded) Returns @@ -51,7 +79,7 @@ def build_ome(A, matching_order, a_s, backward_method): ome = np.eye(len(A[0]), dtype=np.complex_) A = A[:, :, :] A = np.ascontiguousarray(A) - if backward_method is InversionMethod.EXPANDED: + if backward_method is MatchingMethods.BACKWARD_EXPANDED: # expended inverse if matching_order[0] >= 1: ome -= a_s * A[0] @@ -68,7 +96,7 @@ def build_ome(A, matching_order, a_s, backward_method): if matching_order[0] >= 3: ome += a_s**3 * A[2] # need inverse exact ? so add the missing pieces - if backward_method is InversionMethod.EXACT: + if backward_method is MatchingMethods.BACKWARD_EXACT: ome = np.linalg.inv(ome) return ome @@ -199,7 +227,7 @@ class OperatorMatrixElement(Operator): log_label = "Matching" # complete list of possible matching operators labels - full_labels = [ + full_labels = ( *br.singlet_labels, (br.matching_hplus_pid, 21), (br.matching_hplus_pid, 100), @@ -210,17 +238,15 @@ class OperatorMatrixElement(Operator): (200, br.matching_hminus_pid), (br.matching_hminus_pid, 200), (br.matching_hminus_pid, br.matching_hminus_pid), - ] + ) # still valid in QED since Sdelta and Vdelta matchings are diagonal full_labels_qed = copy.deepcopy(full_labels) def __init__(self, config, managers, nf, q2, is_backward, L, is_msbar): super().__init__(config, managers, Segment(q2, q2, nf)) - self.backward_method = config["backward_inversion"] if is_backward else None - if is_backward: - self.is_intrinsic = True - else: - self.is_intrinsic = bool(len(config["intrinsic_range"]) != 0) + self.backward_method = matching_method( + config["backward_inversion"] if is_backward else None + ) self.L = L self.is_msbar = is_msbar # Note for the moment only QCD matching is implemented @@ -241,11 +267,10 @@ def labels(self): logger.warning("%s: skipping non-singlet sector", self.log_label) else: labels.append((200, 200)) - if self.is_intrinsic or self.backward_method is not None: - # intrinsic labels, which are not zero at NLO - labels.append((br.matching_hminus_pid, br.matching_hminus_pid)) - # These contributions are always 0 for the moment - # labels.extend([(200, br.matching_hminus_pid), (br.matching_hminus_pid, 200)]) + # intrinsic labels, which are not zero at NLO + labels.append((br.matching_hminus_pid, br.matching_hminus_pid)) + # These contributions are always 0 for the moment + # labels.extend([(200, br.matching_hminus_pid), (br.matching_hminus_pid, 200)]) # same for singlet if self.config["debug_skip_singlet"]: logger.warning("%s: skipping singlet sector", self.log_label) @@ -257,14 +282,13 @@ def labels(self): (br.matching_hplus_pid, 100), ] ) - if self.is_intrinsic or self.backward_method is not None: - labels.extend( - [ - (21, br.matching_hplus_pid), - (100, br.matching_hplus_pid), - (br.matching_hplus_pid, br.matching_hplus_pid), - ] - ) + labels.extend( + [ + (21, br.matching_hplus_pid), + (100, br.matching_hplus_pid), + (br.matching_hplus_pid, br.matching_hplus_pid), + ] + ) return labels def quad_ker(self, label, logx, areas): @@ -309,7 +333,7 @@ def a_s(self): Note that here you need to use :math:`a_s^{n_f+1}` """ - sc = self.managers["couplings"] + sc = self.managers.couplings return sc.a_s( self.q2_from * (self.xif2 if self.sv_mode == sv.Modes.exponentiated else 1.0), @@ -329,7 +353,7 @@ def compute(self): self.log_label, self.order[0], self.order[1], - self.backward_method, + MatchingMethods(self.backward_method).name, ) self.integrate() diff --git a/src/eko/evolution_operator/operator_matrix_element.py.patch b/src/eko/evolution_operator/operator_matrix_element.py.patch index d00feb1e4..22a8c3e6c 100644 --- a/src/eko/evolution_operator/operator_matrix_element.py.patch +++ b/src/eko/evolution_operator/operator_matrix_element.py.patch @@ -1,14 +1,13 @@ diff --git a/src/eko/evolution_operator/operator_matrix_element.py b/src/eko/evolution_operator/operator_matrix_element.py -index 202fd18d..5f53858d 100644 +index 022f8073..017ec73d 100644 --- a/src/eko/evolution_operator/operator_matrix_element.py +++ b/src/eko/evolution_operator/operator_matrix_element.py -@@ -1,11 +1,13 @@ - """The |OME| for the non-trivial matching conditions in the |VFNS| evolution.""" +@@ -2,11 +2,12 @@ import copy + import enum -import functools import logging -+import time +import ekors import numba as nb @@ -17,7 +16,7 @@ index 202fd18d..5f53858d 100644 import ekore.operator_matrix_elements.polarized.space_like as ome_ps import ekore.operator_matrix_elements.unpolarized.space_like as ome_us -@@ -15,7 +17,8 @@ from .. import basis_rotation as br +@@ -16,7 +17,8 @@ from .. import basis_rotation as br from .. import scale_variations as sv from ..io.types import InversionMethod from ..matchings import Segment @@ -27,16 +26,16 @@ index 202fd18d..5f53858d 100644 logger = logging.getLogger(__name__) -@@ -49,8 +52,6 @@ def build_ome(A, matching_order, a_s, backward_method): +@@ -77,8 +79,6 @@ def build_ome(A, matching_order, a_s, backward_method): # Print; # .end ome = np.eye(len(A[0]), dtype=np.complex_) - A = A[:, :, :] - A = np.ascontiguousarray(A) - if backward_method is InversionMethod.EXPANDED: + if backward_method is MatchingMethods.BACKWARD_EXPANDED: # expended inverse if matching_order[0] >= 1: -@@ -73,106 +74,6 @@ def build_ome(A, matching_order, a_s, backward_method): +@@ -101,106 +101,6 @@ def build_ome(A, matching_order, a_s, backward_method): return ome @@ -143,8 +142,8 @@ index 202fd18d..5f53858d 100644 class OperatorMatrixElement(Operator): r""" Internal representation of a single |OME|. -@@ -267,41 +168,13 @@ class OperatorMatrixElement(Operator): - ) +@@ -291,41 +191,13 @@ class OperatorMatrixElement(Operator): + ) return labels - def quad_ker(self, label, logx, areas): @@ -188,7 +187,8 @@ index 202fd18d..5f53858d 100644 + cfg.py = ekors.ffi.cast("void *", cb_quad_ker_ome.address) + cfg.L = self.L + cfg.as1 = self.a_s -+ cfg.as0 = 0. ++ cfg.as0 = 0.0 ++ cfg.Lsv = np.log(self.xif2) @property def a_s(self): diff --git a/src/eko/evolution_operator/physical.py b/src/eko/evolution_operator/physical.py index ca01c4ea4..f8a3545d8 100644 --- a/src/eko/evolution_operator/physical.py +++ b/src/eko/evolution_operator/physical.py @@ -23,7 +23,7 @@ class PhysicalOperator(member.OperatorBase): """ @classmethod - def ad_to_evol_map(cls, op_members, nf, q2_final, intrinsic_range, qed): + def ad_to_evol_map(cls, op_members, nf, q2_final, qed=False): """ Obtain map between the 3-dimensional anomalous dimension basis and the 4-dimensional evolution basis. @@ -35,8 +35,6 @@ def ad_to_evol_map(cls, op_members, nf, q2_final, intrinsic_range, qed): operator members in anomalous dimension basis nf : int number of active light flavors - intrinsic_range : sequence - intrinsic heavy flavors qed : bool activate qed @@ -94,15 +92,14 @@ def ad_to_evol_map(cls, op_members, nf, q2_final, intrinsic_range, qed): m["Tu8.Tu8"] = op_members[(br.non_singlet_pids_map["ns+u"], 0)] m["Vu8.Vu8"] = op_members[(br.non_singlet_pids_map["ns-u"], 0)] # deal with intrinsic heavy quark PDFs - if intrinsic_range is not None: - hqfl = "cbt" - op_id = member.OpMember.id_like(op_members[(21, 21)]) - for intr_fl in intrinsic_range: - if intr_fl <= nf: # light quarks are not intrinsic - continue - hq = hqfl[intr_fl - 4] # find name - # intrinsic means no evolution, i.e. they are evolving with the identity - m[f"{hq}+.{hq}+"] = op_id.copy() - m[f"{hq}-.{hq}-"] = op_id.copy() + hqfl = "cbt" + op_id = member.OpMember.id_like(op_members[(21, 21)]) + for intr_fl in [4, 5, 6]: + if intr_fl <= nf: # light quarks are not intrinsic + continue + hq = hqfl[intr_fl - 4] # find name + # intrinsic means no evolution, i.e. they are evolving with the identity + m[f"{hq}+.{hq}+"] = op_id.copy() + m[f"{hq}-.{hq}-"] = op_id.copy() # map key to MemberName return cls.promote_names(m, q2_final) diff --git a/src/eko/io/access.py b/src/eko/io/access.py index b8dec50a8..5a4d631c0 100644 --- a/src/eko/io/access.py +++ b/src/eko/io/access.py @@ -43,7 +43,7 @@ class ClosedOperator(RuntimeError, exceptions.OutputError): class AccessConfigs: """Configurations specified during opening of an EKO.""" - path: Path + path: Optional[Path] """The path to the permanent object.""" readonly: bool "Read-only flag" diff --git a/src/eko/io/bases.py b/src/eko/io/bases.py deleted file mode 100644 index 50ee74c64..000000000 --- a/src/eko/io/bases.py +++ /dev/null @@ -1,118 +0,0 @@ -"""Operators bases.""" - -from dataclasses import dataclass, fields -from typing import Optional - -import numpy as np -import numpy.typing as npt - -from .. import basis_rotation as br -from .. import interpolation -from .dictlike import DictLike - - -@dataclass -class Bases(DictLike): - """Rotations related configurations. - - Here "Rotation" is intended in a broad sense: it includes both rotations in - flavor space (labeled with suffix `pids`) and in :math:`x`-space (labeled - with suffix `grid`). - Rotations in :math:`x`-space correspond to reinterpolate the result on a - different basis of polynomials. - - """ - - xgrid: interpolation.XGrid - """Internal momentum fraction grid.""" - _targetgrid: Optional[interpolation.XGrid] = None - _inputgrid: Optional[interpolation.XGrid] = None - _targetpids: Optional[npt.NDArray] = None - _inputpids: Optional[npt.NDArray] = None - - def __post_init__(self): - """Adjust types when loaded from serialized object.""" - for attr in ("xgrid", "_inputgrid", "_targetgrid"): - value = getattr(self, attr) - if value is None: - continue - if isinstance(value, (np.ndarray, list)): - setattr(self, attr, interpolation.XGrid(value)) - elif not isinstance(value, interpolation.XGrid): - setattr(self, attr, interpolation.XGrid.load(value)) - - @property - def pids(self): - """Internal flavor basis, used for computation.""" - return np.array(br.flavor_basis_pids) - - @property - def inputpids(self) -> npt.NDArray: - """Provide pids expected on the input PDF.""" - if self._inputpids is None: - return self.pids - return self._inputpids - - @inputpids.setter - def inputpids(self, value): - self._inputpids = value - - @property - def targetpids(self) -> npt.NDArray: - """Provide pids corresponding to the output PDF.""" - if self._targetpids is None: - return self.pids - return self._targetpids - - @targetpids.setter - def targetpids(self, value): - self._targetpids = value - - @property - def inputgrid(self) -> interpolation.XGrid: - """Provide :math:`x`-grid expected on the input PDF.""" - if self._inputgrid is None: - return self.xgrid - return self._inputgrid - - @inputgrid.setter - def inputgrid(self, value: interpolation.XGrid): - self._inputgrid = value - - @property - def targetgrid(self) -> interpolation.XGrid: - """Provide :math:`x`-grid corresponding to the output PDF.""" - if self._targetgrid is None: - return self.xgrid - return self._targetgrid - - @targetgrid.setter - def targetgrid(self, value: interpolation.XGrid): - self._targetgrid = value - - @classmethod - def from_dict(cls, dictionary: dict): - """Deserialize rotation. - - Load from full state, but with public names. - - """ - d = dictionary.copy() - for f in fields(cls): - if f.name.startswith("_"): - d[f.name] = d.pop(f.name[1:]) - return cls._from_dict(d) - - @property - def raw(self): - """Serialize rotation. - - Pass through interfaces, access internal values but with a public name. - - """ - d = self._raw() - for key in d.copy(): - if key.startswith("_"): - d[key[1:]] = d.pop(key) - - return d diff --git a/src/eko/io/inventory.py b/src/eko/io/inventory.py index 5bb98842f..9b25ad75d 100644 --- a/src/eko/io/inventory.py +++ b/src/eko/io/inventory.py @@ -3,7 +3,7 @@ import base64 from dataclasses import asdict, dataclass, field from pathlib import Path -from typing import Dict, Generic, Optional, Type, TypeVar +from typing import Dict, Generic, Literal, Optional, Type, TypeVar import yaml @@ -11,7 +11,7 @@ from .items import Header, Operator NBYTES = 8 -ENDIANNESS = "little" +ENDIANNESS: Literal["little", "big"] = "little" HEADER_EXT = ".yaml" ARRAY_EXT = [".npy", ".npz"] diff --git a/src/eko/io/legacy.py b/src/eko/io/legacy.py index c05d150ec..41dc8af85 100644 --- a/src/eko/io/legacy.py +++ b/src/eko/io/legacy.py @@ -2,7 +2,6 @@ import dataclasses import io -import os import pathlib import tarfile import tempfile @@ -13,18 +12,26 @@ import numpy as np import yaml -from eko.interpolation import XGrid -from eko.io.runcards import flavored_mugrid -from eko.quantities.heavy_quarks import HeavyInfo, HeavyQuarkMasses, MatchingRatios - +from ..interpolation import XGrid +from ..io.runcards import flavored_mugrid +from ..quantities.heavy_quarks import ( + HeavyInfo, + HeavyQuarkMasses, + MatchingRatios, + QuarkMassScheme, +) from . import raw from .dictlike import DictLike from .struct import EKO, Operator from .types import EvolutionPoint as EPoint -from .types import RawCard +from .types import RawCard, ReferenceRunning + +_MC = 1.51 +_MB = 4.92 +_MT = 172.5 -def load_tar(source: os.PathLike, dest: os.PathLike, errors: bool = False): +def load_tar(source: pathlib.Path, dest: pathlib.Path, errors: bool = False): """Load tar representation from file. Compliant with :meth:`dump_tar` output. @@ -39,8 +46,8 @@ def load_tar(source: os.PathLike, dest: os.PathLike, errors: bool = False): whether to load also errors (default ``False``) """ - with tempfile.TemporaryDirectory() as tmpdir: - tmpdir = pathlib.Path(tmpdir) + with tempfile.TemporaryDirectory() as tmpdirr: + tmpdir = pathlib.Path(tmpdirr) with tarfile.open(source, "r") as tar: raw.safe_extractall(tar, tmpdir) @@ -60,7 +67,7 @@ def load_tar(source: os.PathLike, dest: os.PathLike, errors: bool = False): if op5 is None: op5 = metaold["mu2grid"] grid = op5to4( - flavored_mugrid(op5, theory.heavy.masses, theory.heavy.matching_ratios), arrays + flavored_mugrid(op5, [_MC, _MB, _MT], theory.heavy.matching_ratios), arrays ) with EKO.create(dest) as builder: @@ -92,11 +99,14 @@ class PseudoTheory(DictLike): def from_old(cls, old: RawCard): """Load from old metadata.""" heavy = HeavyInfo( - num_flavs_init=4, - num_flavs_max_pdf=None, - intrinsic_flavors=None, - masses=HeavyQuarkMasses([1.51, 4.92, 172.5]), - masses_scheme=None, + masses=HeavyQuarkMasses( + [ + ReferenceRunning([_MC, np.inf]), + ReferenceRunning([_MB, np.inf]), + ReferenceRunning([_MT, np.inf]), + ] + ), + masses_scheme=QuarkMassScheme.POLE, matching_ratios=MatchingRatios([1.0, 1.0, 1.0]), ) return cls(heavy=heavy) @@ -111,7 +121,7 @@ class PseudoOperator(DictLike): """ - mu20: float + init: EPoint evolgrid: List[EPoint] xgrid: XGrid configs: dict @@ -119,13 +129,13 @@ class PseudoOperator(DictLike): @classmethod def from_old(cls, old: RawCard): """Load from old metadata.""" - mu20 = float(old["q2_ref"]) + mu0 = float(np.sqrt(float(old["q2_ref"]))) mu2list = old.get("Q2grid") if mu2list is None: mu2list = old["mu2grid"] mu2grid = np.array(mu2list) evolgrid = flavored_mugrid( - np.sqrt(mu2grid).tolist(), [1.51, 4.92, 172.5], [1, 1, 1] + np.sqrt(mu2grid).tolist(), [_MC, _MB, _MT], [1, 1, 1] ) xgrid = XGrid(old["interpolation_xgrid"]) @@ -135,7 +145,7 @@ def from_old(cls, old: RawCard): interpolation_is_log=old.get("interpolation_is_log"), ) - return cls(mu20=mu20, evolgrid=evolgrid, xgrid=xgrid, configs=configs) + return cls(init=(mu0, 4), evolgrid=evolgrid, xgrid=xgrid, configs=configs) ARRAY_SUFFIX = ".npy.lz4" diff --git a/src/eko/io/manipulate.py b/src/eko/io/manipulate.py index d2bf0ff76..362c8d77d 100644 --- a/src/eko/io/manipulate.py +++ b/src/eko/io/manipulate.py @@ -1,8 +1,9 @@ """Manipulate output generate by EKO.""" +import copy import logging import warnings -from typing import Callable, Optional, Union +from typing import Callable, Optional import numpy as np import numpy.typing as npt @@ -10,7 +11,7 @@ from .. import basis_rotation as br from .. import interpolation from ..interpolation import XGrid -from .struct import EKO, Operator +from .struct import Operator logger = logging.getLogger(__name__) @@ -19,13 +20,13 @@ SIMGRID_ROTATION = "ij,ajbk,kl->aibl" """Simultaneous grid rotation contraction indices.""" -Basis = Union[XGrid, npt.NDArray] - -def rotation(new: Optional[Basis], old: Basis, check: Callable, compute: Callable): +def rotation( + new: Optional[XGrid], old: XGrid, check: Callable, compute: Callable +) -> npt.NDArray: """Define grid rotation. - This function returns the new grid to be assigned and the rotation computed, + This function returns the necessary rotation, if the checks for a non-trivial new grid are passed. However, the check and the computation are delegated respectively to the @@ -33,21 +34,23 @@ def rotation(new: Optional[Basis], old: Basis, check: Callable, compute: Callabl """ if new is None: - return old, None + return None if check(new, old): warnings.warn("The new grid is close to the current one") - return old, None + return None - return new, compute(new, old) + return compute(new, old) -def xgrid_check(new: Optional[XGrid], old: XGrid): +def xgrid_check(new: Optional[XGrid], old: XGrid) -> bool: """Check validity of new xgrid.""" return new is not None and len(new) == len(old) and np.allclose(new.raw, old.raw) -def xgrid_compute_rotation(new: XGrid, old: XGrid, interpdeg: int, swap: bool = False): +def xgrid_compute_rotation( + new: XGrid, old: XGrid, interpdeg: int, swap: bool = False +) -> npt.NDArray: """Compute rotation from old to new xgrid. By default, the roation is computed for a target xgrid. Whether the function @@ -62,81 +65,66 @@ def xgrid_compute_rotation(new: XGrid, old: XGrid, interpdeg: int, swap: bool = def xgrid_reshape( - eko: EKO, + elem: Operator, + xgrid: XGrid, + interpdeg: int, targetgrid: Optional[XGrid] = None, inputgrid: Optional[XGrid] = None, -): - """Reinterpolate operators on output and/or input grids. +) -> Operator: + """Reinterpolate the operator on output and/or input grid(s). Target corresponds to the output PDF. - - The operation is in-place. - """ - eko.assert_permissions(write=True) - # calling with no arguments is an error if targetgrid is None and inputgrid is None: raise ValueError("Nor inputgrid nor targetgrid was given") - interpdeg = eko.operator_card.configs.interpolation_polynomial_degree check = xgrid_check crot = xgrid_compute_rotation # construct matrices - newtarget, targetrot = rotation( + targetrot = rotation( targetgrid, - eko.bases.targetgrid, + xgrid, check, lambda new, old: crot(new, old, interpdeg), ) - newinput, inputrot = rotation( + inputrot = rotation( inputgrid, - eko.bases.inputgrid, + xgrid, check, lambda new, old: crot(new, old, interpdeg, swap=True), ) - # after the checks: if there is still nothing to do, skip if targetrot is None and inputrot is None: logger.debug("Nothing done.") - return - # if no rotation is done, the grids are not modified - if targetrot is not None: - eko.bases.targetgrid = newtarget - if inputrot is not None: - eko.bases.inputgrid = newinput + return copy.deepcopy(elem) # build new grid - for ep, elem in eko.items(): - assert elem is not None - - operands = [elem.operator] - operands_errs = [elem.error] - - if targetrot is not None and inputrot is None: - contraction = TARGETGRID_ROTATION - elif inputrot is not None and targetrot is None: - contraction = INPUTGRID_ROTATION - else: - contraction = SIMGRID_ROTATION + operands = [elem.operator] + operands_errs = [elem.error] - if targetrot is not None: - operands.insert(0, targetrot) - operands_errs.insert(0, targetrot) - if inputrot is not None: - operands.append(inputrot) - operands_errs.append(inputrot) + if targetrot is not None and inputrot is None: + contraction = TARGETGRID_ROTATION + elif inputrot is not None and targetrot is None: + contraction = INPUTGRID_ROTATION + else: + contraction = SIMGRID_ROTATION - new_operator = np.einsum(contraction, *operands, optimize="optimal") - if elem.error is not None: - new_error = np.einsum(contraction, *operands_errs, optimize="optimal") - else: - new_error = None + if targetrot is not None: + operands.insert(0, targetrot) + operands_errs.insert(0, targetrot) + if inputrot is not None: + operands.append(inputrot) + operands_errs.append(inputrot) - eko[ep] = Operator(operator=new_operator, error=new_error) + new_operator = np.einsum(contraction, *operands, optimize="optimal") + if elem.error is not None: + new_error = np.einsum(contraction, *operands_errs, optimize="optimal") + else: + new_error = None - eko.update() + return Operator(operator=new_operator, error=new_error) TARGETPIDS_ROTATION = "ca,ajbk->cjbk" @@ -146,47 +134,38 @@ def xgrid_reshape( def flavor_reshape( - eko: EKO, + elem: Operator, targetpids: Optional[npt.NDArray] = None, inputpids: Optional[npt.NDArray] = None, - update: bool = True, -): - """Change the operators to have in the output targetpids and/or in the input inputpids. - - The operation is in-place. +) -> Operator: + """Change the operator to have in the output targetpids and/or in the input inputpids. Parameters ---------- - eko : + elem : the operator to be rotated targetpids : target rotation specified in the flavor basis inputpids : input rotation specified in the flavor basis - update : - update :class:`~eko.io.struct.EKO` metadata after writing """ - eko.assert_permissions(write=True) - # calling with no arguments is an error if targetpids is None and inputpids is None: raise ValueError("Nor inputpids nor targetpids was given") # now check to the current status if targetpids is not None and np.allclose( - targetpids, np.eye(len(eko.bases.targetpids)) + targetpids, np.eye(elem.operator.shape[0]) ): targetpids = None warnings.warn("The new targetpids is close to current basis") - if inputpids is not None and np.allclose( - inputpids, np.eye(len(eko.bases.inputpids)) - ): + if inputpids is not None and np.allclose(inputpids, np.eye(elem.operator.shape[2])): inputpids = None warnings.warn("The new inputpids is close to current basis") # after the checks: if there is still nothing to do, skip if targetpids is None and inputpids is None: logger.debug("Nothing done.") - return + return copy.deepcopy(elem) # flip input around inv_inputpids = np.zeros_like(inputpids) @@ -194,60 +173,47 @@ def flavor_reshape( inv_inputpids = np.linalg.inv(inputpids) # build new grid - for q2, elem in eko.items(): - ops = elem.operator - errs = elem.error - if targetpids is not None and inputpids is None: - ops = np.einsum(TARGETPIDS_ROTATION, targetpids, ops, optimize="optimal") - errs = ( - np.einsum(TARGETPIDS_ROTATION, targetpids, errs, optimize="optimal") - if errs is not None - else None - ) - elif inputpids is not None and targetpids is None: - ops = np.einsum(INPUTPIDS_ROTATION, ops, inv_inputpids, optimize="optimal") - errs = ( - np.einsum(INPUTPIDS_ROTATION, errs, inv_inputpids, optimize="optimal") - if errs is not None - else None - ) - else: - ops = np.einsum( - SIMPIDS_ROTATION, targetpids, ops, inv_inputpids, optimize="optimal" - ) - errs = ( - np.einsum( - SIMPIDS_ROTATION, - targetpids, - errs, - inv_inputpids, - optimize="optimal", - ) - if errs is not None - else None + ops = elem.operator + errs = elem.error + if targetpids is not None and inputpids is None: + ops = np.einsum(TARGETPIDS_ROTATION, targetpids, ops, optimize="optimal") + errs = ( + np.einsum(TARGETPIDS_ROTATION, targetpids, errs, optimize="optimal") + if errs is not None + else None + ) + elif inputpids is not None and targetpids is None: + ops = np.einsum(INPUTPIDS_ROTATION, ops, inv_inputpids, optimize="optimal") + errs = ( + np.einsum(INPUTPIDS_ROTATION, errs, inv_inputpids, optimize="optimal") + if errs is not None + else None + ) + else: + ops = np.einsum( + SIMPIDS_ROTATION, targetpids, ops, inv_inputpids, optimize="optimal" + ) + errs = ( + np.einsum( + SIMPIDS_ROTATION, + targetpids, + errs, + inv_inputpids, + optimize="optimal", ) + if errs is not None + else None + ) - eko[q2] = Operator(operator=ops, error=errs) + return Operator(operator=ops, error=errs) - # drop PIDs - keeping them int nevertheless - # there is no meaningful way to set them in general, after rotation - if inputpids is not None: - eko.bases.inputpids = np.array([0] * len(eko.bases.inputpids)) - if targetpids is not None: - eko.bases.targetpids = np.array([0] * len(eko.bases.targetpids)) - if update: - eko.update() - - -def to_evol(eko: EKO, source: bool = True, target: bool = False): +def to_evol(elem: Operator, source: bool = True, target: bool = False) -> Operator: """Rotate the operator into evolution basis. - This assigns also the pids. The operation is in-place. - Parameters ---------- - eko : + elem : the operator to be rotated source : rotate on the input tensor @@ -258,27 +224,15 @@ def to_evol(eko: EKO, source: bool = True, target: bool = False): # rotate inputpids = br.rotate_flavor_to_evolution if source else None targetpids = br.rotate_flavor_to_evolution if target else None - # prevent metadata update, since flavor_reshape has not enough information - # to determine inpupids and targetpids, and they will be updated after the - # call - flavor_reshape(eko, inputpids=inputpids, targetpids=targetpids, update=False) - # assign pids - if source: - eko.bases.inputpids = inputpids - if target: - eko.bases.targetpids = targetpids + return flavor_reshape(elem, inputpids=inputpids, targetpids=targetpids) - eko.update() - -def to_uni_evol(eko: EKO, source: bool = True, target: bool = False): +def to_uni_evol(elem: Operator, source: bool = True, target: bool = False) -> Operator: """Rotate the operator into evolution basis. - This assigns also the pids. The operation is in-place. - Parameters ---------- - eko : + elem : the operator to be rotated source : rotate on the input tensor @@ -289,14 +243,4 @@ def to_uni_evol(eko: EKO, source: bool = True, target: bool = False): # rotate inputpids = br.rotate_flavor_to_unified_evolution if source else None targetpids = br.rotate_flavor_to_unified_evolution if target else None - # prevent metadata update, since flavor_reshape has not enough information - # to determine inpupids and targetpids, and they will be updated after the - # call - flavor_reshape(eko, inputpids=inputpids, targetpids=targetpids, update=False) - # assign pids - if source: - eko.bases.inputpids = inputpids - if target: - eko.bases.targetpids = targetpids - - eko.update() + return flavor_reshape(elem, inputpids=inputpids, targetpids=targetpids) diff --git a/src/eko/io/metadata.py b/src/eko/io/metadata.py index 94cfad613..c95bbfcb5 100644 --- a/src/eko/io/metadata.py +++ b/src/eko/io/metadata.py @@ -9,7 +9,7 @@ import yaml from .. import version as vmod -from .bases import Bases +from ..interpolation import XGrid from .dictlike import DictLike from .paths import InternalPaths from .types import EvolutionPoint as EPoint @@ -35,13 +35,11 @@ class Metadata(DictLike): origin: EPoint """Inital scale.""" - bases: Bases - """Manipulation information, describing the current status of the EKO (e.g. - `inputgrid` and `targetgrid`). - """ + xgrid: XGrid + """Interpolation grid""" # tagging information _path: Optional[pathlib.Path] = None - """Path to temporary dir.""" + """Path to the open dir.""" version: str = vmod.__version__ """Library version used to create the corresponding file.""" data_version: int = vmod.__data_version__ diff --git a/src/eko/io/runcards.py b/src/eko/io/runcards.py index e48437dc3..e9fac3893 100644 --- a/src/eko/io/runcards.py +++ b/src/eko/io/runcards.py @@ -105,7 +105,7 @@ class Configs(DictLike): class OperatorCard(DictLike): """Operator Card info.""" - mu0: float + init: EPoint """Initial scale.""" mugrid: List[EPoint] xgrid: interpolation.XGrid @@ -126,7 +126,7 @@ class OperatorCard(DictLike): @property def mu20(self): """Squared value of initial scale.""" - return self.mu0**2 + return self.init[0] ** 2 @property def mu2grid(self) -> npt.NDArray: @@ -194,25 +194,12 @@ def new_theory(self): alphas=old["alphas"], alphaem=alphaem, em_running=em_running, - scale=old["Qref"], - num_flavs_ref=old["nfref"], - max_num_flavs=old["MaxNfAs"], + ref=(old["Qref"], old["nfref"]), ) new["heavy"] = { - "num_flavs_init": ( - nf_default(old["Q0"] ** 2.0, default_atlas(ms, ks)) - if old["nf0"] is None - else old["nf0"] - ), - "num_flavs_max_pdf": old["MaxNfPdf"], "matching_ratios": self.heavies("k%sThr", old), "masses_scheme": old["HQ"], } - intrinsic = [] - for idx, q in enumerate(hq.FLAVORS): - if old.get(f"i{q}".upper()) == 1: - intrinsic.append(idx + 4) - new["heavy"]["intrinsic_flavors"] = intrinsic if old["HQ"] == "POLE": new["heavy"]["masses"] = [[m, nan] for m in ms] elif old["HQ"] == "MSBAR": @@ -236,17 +223,22 @@ def new_operator(self): old_th = self.theory new = {} - new["mu0"] = old_th["Q0"] + ms = self.heavies("m%s", old_th) + ks = self.heavies("k%sThr", old_th) + new["init"] = ( + old_th["Q0"], + ( + nf_default(old_th["Q0"] ** 2.0, default_atlas(ms, ks)) + if old_th["nf0"] is None + else old_th["nf0"] + ), + ) if "mugrid" in old: mugrid = old["mugrid"] else: mu2grid = old["Q2grid"] if "Q2grid" in old else old["mu2grid"] mugrid = np.sqrt(mu2grid).tolist() - new["mugrid"] = flavored_mugrid( - mugrid, - list(self.heavies("m%s", old_th)), - list(self.heavies("k%sThr", old_th)), - ) + new["mugrid"] = flavored_mugrid(mugrid, list(ms), list(ks)) new["configs"] = {} evmod = old_th["ModEv"] @@ -278,27 +270,6 @@ def new_operator(self): return OperatorCard.from_dict(new) -def update(theory: Union[RawCard, TheoryCard], operator: Union[RawCard, OperatorCard]): - """Update legacy runcards. - - This function is mainly defined for compatibility with the old interface. - Prefer direct usage of :class:`Legacy` in new code. - - Consecutive applications of this function yield identical results:: - - cards = update(theory, operator) - assert update(*cards) == cards - """ - if isinstance(theory, TheoryCard) or isinstance(operator, OperatorCard): - # if one is not a dict, both have to be new cards - assert isinstance(theory, TheoryCard) - assert isinstance(operator, OperatorCard) - return theory, operator - - cards = Legacy(theory, operator) - return cards.new_theory, cards.new_operator - - def default_atlas(masses: list, matching_ratios: list): r"""Create default landscape. diff --git a/src/eko/io/struct.py b/src/eko/io/struct.py index 57d2ae9df..9ee1b73f9 100644 --- a/src/eko/io/struct.py +++ b/src/eko/io/struct.py @@ -3,12 +3,11 @@ import contextlib import copy import logging -import os -import pathlib import shutil import tarfile import tempfile from dataclasses import dataclass +from pathlib import Path from typing import List, Optional import numpy as np @@ -17,7 +16,6 @@ from .. import interpolation from . import exceptions, raw from .access import AccessConfigs -from .bases import Bases from .inventory import Inventory from .items import Evolution, Matching, Operator, Recipe, Target from .metadata import Metadata @@ -31,7 +29,7 @@ TEMP_PREFIX = "eko-" -def inventories(path: pathlib.Path, access: AccessConfigs) -> dict: +def inventories(path: Path, access: AccessConfigs) -> dict: """Set up empty inventories for object initialization.""" paths = InternalPaths(path) return dict( @@ -106,20 +104,15 @@ def paths(self) -> InternalPaths: """Accessor for internal paths.""" return InternalPaths(self.metadata.path) - @property - def bases(self) -> Bases: - """Bases information.""" - return self.metadata.bases - @property def xgrid(self) -> interpolation.XGrid: """Momentum fraction internal grid.""" - return self.bases.xgrid + return self.metadata.xgrid @xgrid.setter def xgrid(self, value: interpolation.XGrid): """Set `xgrid` value.""" - self.bases.xgrid = value + self.metadata.xgrid = value self.update() @property @@ -273,7 +266,7 @@ def approx( Raises ------ ValueError - if multiple values are find in the neighbourhood + if multiple values are found in the neighbourhood """ eps = np.array([ep_ for ep_ in self if ep_[1] == ep[1]]) @@ -281,7 +274,9 @@ def approx( close = eps[np.isclose(ep[0], mu2s, rtol=rtol, atol=atol)] if len(close) == 1: - return tuple(close[0]) + found = close[0] + assert isinstance(found[0], float) + return (found[0], int(found[1])) if len(close) == 0: return None raise ValueError(f"Multiple values of Q2 have been found close to {ep}") @@ -294,7 +289,7 @@ def unload(self): # operator management # ------------------- - def deepcopy(self, path: os.PathLike): + def deepcopy(self, path: Path): """Create a deep copy of current instance. The managed on-disk object is copied as well, to the new ``path`` @@ -328,115 +323,101 @@ def deepcopy(self, path: os.PathLike): self.unload() new = copy.deepcopy(self) - new.access.path = pathlib.Path(path) + new.access.path = path new.access.readonly = False new.access.open = True - tmpdir = pathlib.Path(tempfile.mkdtemp(prefix=TEMP_PREFIX)) + tmpdir = Path(tempfile.mkdtemp(prefix=TEMP_PREFIX)) new.metadata.path = tmpdir # copy old dir to new dir tmpdir.rmdir() shutil.copytree(self.paths.root, new.paths.root) new.close() - @staticmethod - def load(tarpath: os.PathLike, dest: os.PathLike): - """Load the content of archive in a target directory. + @classmethod + def load(cls, path: Path): + """Load the EKO from disk information. - Parameters - ---------- - tarpath: os.PathLike - the archive to extract - tmppath: os.PathLike - the destination directory + Note + ---- + No archive path is assigned to the :class:`EKO` object, setting its + :attr:`EKO.access.path` to `None`. + If you want to properly load from an archive, use the :meth:`read` + constructor. """ - try: - with tarfile.open(tarpath) as tar: - raw.safe_extractall(tar, dest) - except tarfile.ReadError: - raise exceptions.OutputNotTar(f"Not a valid tar archive: '{tarpath}'") + access = AccessConfigs(None, readonly=True, open=True) - @classmethod - def open(cls, path: os.PathLike, mode="r"): - """Open EKO object in the specified mode.""" - path = pathlib.Path(path) - access = AccessConfigs(path, readonly=False, open=True) - load = False - if mode == "r": - load = True - access.readonly = True - elif mode in "w": - pass - elif mode in "a": - load = True - else: - raise ValueError(f"Unknown file mode: {mode}") - - tmpdir = pathlib.Path(tempfile.mkdtemp(prefix=TEMP_PREFIX)) - if load: - cls.load(path, tmpdir) - metadata = Metadata.load(tmpdir) - opened = cls( - **inventories(tmpdir, access), - metadata=metadata, - access=access, - ) - opened.operators.sync() - else: - opened = Builder(path=tmpdir, access=access) + metadata = Metadata.load(path) + loaded = cls( + **inventories(path, access), + metadata=metadata, + access=access, + ) + loaded.operators.sync() - return opened + return loaded @classmethod - def read(cls, path: os.PathLike): - """Read the content of an EKO. + def read( + cls, + path: Path, + extract: bool = True, + dest: Optional[Path] = None, + readonly: bool = True, + ): + """Load an existing EKO. + + If the `extract` attribute is `True` the EKO is loaded from its archived + format. Otherwise, the `path` is interpreted as the location of an + already extracted folder. - Type-safe alias for:: - EKO.open(... , "r") """ - eko = cls.open(path, "r") - assert isinstance(eko, EKO) - return eko + if extract: + dir_ = Path(tempfile.mkdtemp(prefix=TEMP_PREFIX)) if dest is None else dest + with tarfile.open(path) as tar: + raw.safe_extractall(tar, dir_) + else: + dir_ = path - @classmethod - def create(cls, path: os.PathLike): - """Create a new EKO. + loaded = cls.load(dir_) - Type-safe alias for:: + loaded.access.readonly = readonly + if extract: + loaded.access.path = path - EKO.open(... , "w") + return loaded - """ - builder = cls.open(path, "w") - assert isinstance(builder, Builder) + @classmethod + def create(cls, path: Path): + """Create a new EKO.""" + access = AccessConfigs(path, readonly=False, open=True) + builder = Builder( + path=Path(tempfile.mkdtemp(prefix=TEMP_PREFIX)), access=access + ) return builder @classmethod - def edit(cls, path: os.PathLike): + def edit(cls, *args, **kwargs): """Read from and write on existing EKO. - Type-safe alias for:: - - EKO.open(... , "a") + Alias of `EKO.read(..., readonly=False)`, see :meth:`read`. """ - eko = cls.open(path, "a") - assert isinstance(eko, EKO) - return eko + return cls.read(*args, readonly=False, **kwargs) def __enter__(self): """Allow EKO to be used in :obj:`with` statements.""" return self - def dump(self, archive: Optional[os.PathLike] = None): + def dump(self, archive: Optional[Path] = None): """Dump the current content to archive. Parameters ---------- - archive: os.PathLike or None + archive: Path or None path to archive, in general you should keep the default, that will make use of the registered path (default: ``None``) @@ -475,7 +456,8 @@ def __exit__(self, exc_type: type, _exc_value, _traceback): if exc_type is not None: return - self.close() + if self.access.path is not None: + self.close() @property def raw(self) -> dict: @@ -495,8 +477,8 @@ def raw(self) -> dict: class Builder: """Build EKO instances.""" - path: pathlib.Path - """Path on disk to .""" + path: Path + """Path on disk to the EKO.""" access: AccessConfigs """Access related configurations.""" @@ -551,8 +533,8 @@ def build(self) -> EKO: self.access.open = True metadata = Metadata( _path=self.path, - origin=(self.operator.mu20, self.theory.heavy.num_flavs_init), - bases=Bases(xgrid=self.operator.xgrid), + origin=(self.operator.init[0] ** 2, self.operator.init[1]), + xgrid=self.operator.xgrid, ) InternalPaths(self.path).bootstrap( theory=self.theory.raw, diff --git a/src/eko/io/types.py b/src/eko/io/types.py index d8377a4e6..20ac52db5 100644 --- a/src/eko/io/types.py +++ b/src/eko/io/types.py @@ -1,8 +1,7 @@ """Common type definitions, only used for static analysis.""" import enum -import typing -from typing import Any, Dict, Generic, Tuple, TypeVar +from typing import Any, Dict, Generic, List, Tuple, TypeVar # Energy scales # ------------- @@ -23,8 +22,9 @@ Order = Tuple[int, int] FlavorsNumber = int FlavorIndex = int -IntrinsicFlavors = typing.List[FlavorIndex] -N3LOAdVariation = typing.Tuple[int, int, int, int] +IntrinsicFlavors = List[FlavorIndex] +N3LOAdVariation = Tuple[int, int, int, int] +OperatorLabel = Tuple[int, int] # Evolution coordinates # --------------------- diff --git a/src/eko/kernels/__init__.py b/src/eko/kernels/__init__.py index 7640727b5..594fbf1fa 100644 --- a/src/eko/kernels/__init__.py +++ b/src/eko/kernels/__init__.py @@ -1 +1,35 @@ """The solutions to the |DGLAP| equations.""" + +import enum + +from ..io.types import EvolutionMethod + + +class EvoMethods(enum.IntEnum): + """Enumerate evolution methods.""" + + ITERATE_EXACT = enum.auto() + ITERATE_EXPANDED = enum.auto() + PERTURBATIVE_EXACT = enum.auto() + PERTURBATIVE_EXPANDED = enum.auto() + TRUNCATED = enum.auto() + ORDERED_TRUNCATED = enum.auto() + DECOMPOSE_EXACT = enum.auto() + DECOMPOSE_EXPANDED = enum.auto() + + +def ev_method(s: EvolutionMethod) -> EvoMethods: + """Return the evolution method. + + Parameters + ---------- + s : + string representation + + Returns + ------- + i : + int representation + + """ + return EvoMethods[s.value.upper().replace("-", "_")] diff --git a/src/eko/kernels/non_singlet.py b/src/eko/kernels/non_singlet.py index bd5bd10f0..15b913438 100644 --- a/src/eko/kernels/non_singlet.py +++ b/src/eko/kernels/non_singlet.py @@ -4,6 +4,7 @@ import numpy as np from .. import beta +from . import EvoMethods from . import as4_evolution_integrals as as4_ei from . import evolution_integrals as ei @@ -355,7 +356,7 @@ def dispatcher( ---------- order : tuple(int,int) perturbation order - method : str + method : int method gamma_ns : numpy.ndarray non-singlet anomalous dimensions @@ -378,38 +379,38 @@ def dispatcher( # use always exact in LO if order[0] == 1: return lo_exact(gamma_ns, a1, a0, betalist) - if method == "ordered-truncated": + if method is EvoMethods.ORDERED_TRUNCATED: return eko_ordered_truncated( gamma_ns, a1, a0, betalist, order, ev_op_iterations ) - if method == "truncated": + if method is EvoMethods.TRUNCATED: return eko_truncated(gamma_ns, a1, a0, betalist, order, ev_op_iterations) # NLO if order[0] == 2: if method in [ - "iterate-expanded", - "decompose-expanded", - "perturbative-expanded", + EvoMethods.ITERATE_EXPANDED, + EvoMethods.DECOMPOSE_EXPANDED, + EvoMethods.PERTURBATIVE_EXPANDED, ]: return nlo_expanded(gamma_ns, a1, a0, betalist) - # if method in ["iterate-exact", "decompose-exact", "perturbative-exact"]: + # exact is left return nlo_exact(gamma_ns, a1, a0, betalist) # NNLO if order[0] == 3: if method in [ - "iterate-expanded", - "decompose-expanded", - "perturbative-expanded", + EvoMethods.ITERATE_EXPANDED, + EvoMethods.DECOMPOSE_EXPANDED, + EvoMethods.PERTURBATIVE_EXPANDED, ]: return nnlo_expanded(gamma_ns, a1, a0, betalist) return nnlo_exact(gamma_ns, a1, a0, betalist) # N3LO if order[0] == 4: if method in [ - "iterate-expanded", - "decompose-expanded", - "perturbative-expanded", + EvoMethods.ITERATE_EXPANDED, + EvoMethods.DECOMPOSE_EXPANDED, + EvoMethods.PERTURBATIVE_EXPANDED, ]: return n3lo_expanded(gamma_ns, a1, a0, betalist) return n3lo_exact(gamma_ns, a1, a0, betalist) diff --git a/src/eko/kernels/non_singlet_qed.py b/src/eko/kernels/non_singlet_qed.py index 7a2de816a..d81d5076b 100644 --- a/src/eko/kernels/non_singlet_qed.py +++ b/src/eko/kernels/non_singlet_qed.py @@ -146,7 +146,7 @@ def as4_exact(gamma_ns, a1, a0, beta): @nb.njit(cache=True) def dispatcher( order, - method, + _method, gamma_ns, as_list, aem_half, @@ -164,7 +164,7 @@ def dispatcher( ---------- order : tuple(int,int) perturbation order - method : str + method : int method gamma_ns : numpy.ndarray non-singlet anomalous dimensions diff --git a/src/eko/kernels/singlet.py b/src/eko/kernels/singlet.py index 5983c0c67..e637af046 100644 --- a/src/eko/kernels/singlet.py +++ b/src/eko/kernels/singlet.py @@ -6,6 +6,7 @@ from ekore import anomalous_dimensions as ad from .. import beta +from . import EvoMethods from . import as4_evolution_integrals as as4_ei from . import evolution_integrals as ei @@ -579,7 +580,7 @@ def dispatcher( # pylint: disable=too-many-return-statements ---------- order : tuple(int,int) perturbative order - method : str + method : int method gamma_singlet : numpy.ndarray singlet anomalous dimensions matrices @@ -609,9 +610,9 @@ def dispatcher( # pylint: disable=too-many-return-statements return lo_exact(gamma_singlet, a1, a0, betalist) # Common method for NLO and NNLO - if method in ["iterate-exact", "iterate-expanded"]: + if method in [EvoMethods.ITERATE_EXACT, EvoMethods.ITERATE_EXPANDED]: return eko_iterate(gamma_singlet, a1, a0, betalist, order, ev_op_iterations) - if method == "perturbative-exact": + if method is EvoMethods.PERTURBATIVE_EXACT: return eko_perturbative( gamma_singlet, a1, @@ -622,7 +623,7 @@ def dispatcher( # pylint: disable=too-many-return-statements ev_op_max_order, True, ) - if method == "perturbative-expanded": + if method is EvoMethods.PERTURBATIVE_EXPANDED: return eko_perturbative( gamma_singlet, a1, @@ -633,19 +634,19 @@ def dispatcher( # pylint: disable=too-many-return-statements ev_op_max_order, False, ) - if method in ["truncated", "ordered-truncated"]: + if method in [EvoMethods.TRUNCATED, EvoMethods.ORDERED_TRUNCATED]: return eko_truncated(gamma_singlet, a1, a0, betalist, order, ev_op_iterations) # These methods are scattered for nlo and nnlo - if method == "decompose-exact": + if method is EvoMethods.DECOMPOSE_EXACT: if order[0] == 2: return nlo_decompose_exact(gamma_singlet, a1, a0, betalist) - elif order[0] == 3: + if order[0] == 3: return nnlo_decompose_exact(gamma_singlet, a1, a0, betalist) return n3lo_decompose_exact(gamma_singlet, a1, a0, nf) - if method == "decompose-expanded": + if method is EvoMethods.DECOMPOSE_EXPANDED: if order[0] == 2: return nlo_decompose_expanded(gamma_singlet, a1, a0, betalist) - elif order[0] == 3: + if order[0] == 3: return nnlo_decompose_expanded(gamma_singlet, a1, a0, betalist) return n3lo_decompose_expanded(gamma_singlet, a1, a0, nf) raise NotImplementedError("Selected method is not implemented") diff --git a/src/eko/kernels/singlet_qed.py b/src/eko/kernels/singlet_qed.py index c13dee95c..d63a2f1ac 100644 --- a/src/eko/kernels/singlet_qed.py +++ b/src/eko/kernels/singlet_qed.py @@ -6,6 +6,7 @@ from ekore import anomalous_dimensions as ad from .. import beta +from . import EvoMethods @nb.njit(cache=True) @@ -66,7 +67,7 @@ def dispatcher( a_half, nf, ev_op_iterations, - ev_op_max_order, + _ev_op_max_order, ): """Determine used kernel and call it. @@ -74,7 +75,7 @@ def dispatcher( ---------- order : tuple(int,int) perturbative order - method : str + method : int method gamma_singlet : numpy.ndarray singlet anomalous dimensions matrices @@ -96,7 +97,7 @@ def dispatcher( e_s : numpy.ndarray singlet EKO """ - if method in ["iterate-exact", "iterate-expanded"]: + if method is EvoMethods.ITERATE_EXACT: return eko_iterate( gamma_singlet, as_list, a_half, nf, order, ev_op_iterations, 4 ) diff --git a/src/eko/kernels/valence_qed.py b/src/eko/kernels/valence_qed.py index 0d5b747c3..186c70219 100644 --- a/src/eko/kernels/valence_qed.py +++ b/src/eko/kernels/valence_qed.py @@ -2,6 +2,7 @@ import numba as nb +from . import EvoMethods from .singlet_qed import eko_iterate @@ -14,7 +15,7 @@ def dispatcher( a_half, nf, ev_op_iterations, - ev_op_max_order, + _ev_op_max_order, ): """ Determine used kernel and call it. @@ -23,7 +24,7 @@ def dispatcher( ---------- order : tuple(int,int) perturbative order - method : str + method : int method gamma_singlet : numpy.ndarray singlet anomalous dimensions matrices @@ -45,7 +46,7 @@ def dispatcher( e_v : numpy.ndarray singlet EKO """ - if method in ["iterate-exact", "iterate-expanded"]: + if method is EvoMethods.ITERATE_EXACT: return eko_iterate( gamma_valence, as_list, a_half, nf, order, ev_op_iterations, 2 ) diff --git a/src/eko/msbar_masses.py b/src/eko/msbar_masses.py index 26fb9874e..63e1b72d1 100644 --- a/src/eko/msbar_masses.py +++ b/src/eko/msbar_masses.py @@ -378,8 +378,8 @@ def compute( """ # TODO: sketch in the docs how the MSbar computation works with a figure. - mu2_ref = couplings.scale**2 - nf_ref: FlavorsNumber = couplings.num_flavs_ref + mu2_ref = couplings.ref[0] ** 2 + nf_ref: FlavorsNumber = couplings.ref[1] masses = np.concatenate((np.zeros(nf_ref - 3), np.full(6 - nf_ref, np.inf))) def sc(thr_masses): @@ -396,7 +396,7 @@ def sc(thr_masses): heavy_quarks = quark_names[3:] hq_idxs = np.arange(0, 3) if nf_ref > 4: - heavy_quarks = reversed(heavy_quarks) + heavy_quarks = "".join(reversed(heavy_quarks)) hq_idxs = reversed(hq_idxs) # loop on heavy quarks and compute the msbar masses diff --git a/src/eko/quantities/couplings.py b/src/eko/quantities/couplings.py index aec1d083f..cbeb40a62 100644 --- a/src/eko/quantities/couplings.py +++ b/src/eko/quantities/couplings.py @@ -2,10 +2,10 @@ import dataclasses import enum -from typing import Optional from ..io.dictlike import DictLike -from ..io.types import FlavorsNumber, LinearScale, ReferenceRunning, Scalar +from ..io.types import EvolutionPoint as EPoint +from ..io.types import LinearScale, ReferenceRunning, Scalar Coupling = Scalar CouplingRef = ReferenceRunning[Coupling] @@ -21,15 +21,7 @@ class CouplingsInfo(DictLike): alphas: Coupling alphaem: Coupling - scale: LinearScale - max_num_flavs: FlavorsNumber - num_flavs_ref: Optional[FlavorsNumber] - r"""Number of active flavors at strong coupling reference scale. - - I.e. :math:`n_{f,\text{ref}}(\mu^2_{\text{ref}})`, formerly called - ``nfref``. - - """ + ref: EPoint em_running: bool = False @property diff --git a/src/eko/quantities/heavy_quarks.py b/src/eko/quantities/heavy_quarks.py index 8db084ed4..7328f0f12 100644 --- a/src/eko/quantities/heavy_quarks.py +++ b/src/eko/quantities/heavy_quarks.py @@ -7,13 +7,7 @@ import numpy as np from ..io.dictlike import DictLike -from ..io.types import ( - FlavorsNumber, - IntrinsicFlavors, - LinearScale, - ReferenceRunning, - SquaredScale, -) +from ..io.types import FlavorsNumber, LinearScale, ReferenceRunning, SquaredScale FLAVORS = "cbt" @@ -91,16 +85,6 @@ class HeavyInfo(DictLike): """ - num_flavs_init: FlavorsNumber - r"""Number of active flavors at fitting scale. - - I.e. :math:`n_{f,\text{ref}}(\mu^2_0)`, formerly called ``nf0``. - - """ - num_flavs_max_pdf: FlavorsNumber - """Maximum number of quark PDFs.""" - intrinsic_flavors: IntrinsicFlavors - """List of intrinsic quark PDFs.""" masses: HeavyQuarkMasses """List of heavy quark masses.""" masses_scheme: QuarkMassScheme diff --git a/src/eko/runner/__init__.py b/src/eko/runner/__init__.py index 5f1036c7b..1c331d17b 100644 --- a/src/eko/runner/__init__.py +++ b/src/eko/runner/__init__.py @@ -1,6 +1,6 @@ """Manage steps to DGLAP solution, and operator creation.""" -import os +from pathlib import Path from typing import Union from ..io.runcards import OperatorCard, TheoryCard @@ -15,7 +15,7 @@ def solve( theory_card: Union[RawCard, TheoryCard], operators_card: Union[RawCard, OperatorCard], - path: os.PathLike, + path: Path, ): r"""Solve DGLAP equations in terms of evolution kernel operators (EKO). diff --git a/src/eko/runner/commons.py b/src/eko/runner/commons.py index 543fb927c..af547b9a6 100644 --- a/src/eko/runner/commons.py +++ b/src/eko/runner/commons.py @@ -33,7 +33,7 @@ def atlas(theory: TheoryCard, operator: OperatorCard) -> Atlas: # TODO: cache result masses = runcards.masses(theory, operator.configs.evolution_method) matching_scales = np.power(theory.heavy.matching_ratios, 2.0) * np.array(masses) - return Atlas(matching_scales.tolist(), (operator.mu20, theory.heavy.num_flavs_init)) + return Atlas(matching_scales.tolist(), (operator.mu20, operator.init[1])) def couplings(theory: TheoryCard, operator: OperatorCard) -> Couplings: diff --git a/src/eko/runner/legacy.py b/src/eko/runner/legacy.py index 9ce925a9f..44f91670b 100644 --- a/src/eko/runner/legacy.py +++ b/src/eko/runner/legacy.py @@ -1,9 +1,11 @@ """Main application class of eko.""" import logging -import os +from pathlib import Path from typing import Union +from ekomark.data import update_runcards + from ..evolution_operator.grid import OperatorGrid from ..io import EKO, Operator, runcards from ..io.types import RawCard @@ -30,7 +32,7 @@ def __init__( self, theory_card: Union[RawCard, runcards.TheoryCard], operators_card: Union[RawCard, runcards.OperatorCard], - path: os.PathLike, + path: Path, ): """Initialize runner. @@ -44,8 +46,7 @@ def __init__( path where to store the computed operator """ - new_theory, new_operator = runcards.update(theory_card, operators_card) - new_theory.heavy.intrinsic_flavors = [4, 5, 6] + new_theory, new_operator = update_runcards(theory_card, operators_card) # Store inputs self.path = path @@ -70,7 +71,6 @@ def __init__( masses=masses, mass_scheme=new_theory.heavy.masses_scheme.value, thresholds_ratios=new_theory.heavy.squared_ratios, - intrinsic_flavors=new_theory.heavy.intrinsic_flavors, xif=new_theory.xif, configs=new_operator.configs, debug=new_operator.debug, diff --git a/src/eko/runner/managed.py b/src/eko/runner/managed.py index 486c8035c..c50012ffd 100644 --- a/src/eko/runner/managed.py +++ b/src/eko/runner/managed.py @@ -13,41 +13,31 @@ from pathlib import Path -from ..io.items import Evolution, Matching, Target +from ..io.items import Target from ..io.runcards import OperatorCard, TheoryCard from ..io.struct import EKO -from . import commons, operators, parts, recipes +from . import operators, parts, recipes def solve(theory: TheoryCard, operator: OperatorCard, path: Path): """Solve DGLAP equations in terms of evolution kernel operators (EKO).""" - theory.heavy.intrinsic_flavors = [4, 5, 6] - with EKO.create(path) as builder: eko = builder.load_cards(theory, operator).build() # pylint: disable=E1101 - - atlas = commons.atlas(eko.theory_card, eko.operator_card) - - recs = recipes.create(eko.operator_card.evolgrid, atlas) - eko.load_recipes(recs) + recipes.create(eko) for recipe in eko.recipes: - assert isinstance(recipe, Evolution) eko.parts[recipe] = parts.evolve(eko, recipe) # flush the memory del eko.parts[recipe] for recipe in eko.recipes_matching: - assert isinstance(recipe, Matching) eko.parts_matching[recipe] = parts.match(eko, recipe) # flush the memory del eko.parts_matching[recipe] for ep in operator.evolgrid: - headers = recipes.elements(ep, atlas) - parts_ = operators.retrieve(headers, eko.parts, eko.parts_matching) + components = operators.retrieve(ep, eko) target = Target.from_ep(ep) - eko.operators[target] = operators.join(parts_) + eko.operators[target] = operators.join(components) # flush the memory del eko.parts - del eko.parts_matching del eko.operators[target] diff --git a/src/eko/runner/operators.py b/src/eko/runner/operators.py index 07ac34093..a315f6964 100644 --- a/src/eko/runner/operators.py +++ b/src/eko/runner/operators.py @@ -8,21 +8,37 @@ from ..io.inventory import Inventory from ..io.items import Evolution, Operator, Recipe +from ..io.struct import EKO +from ..io.types import EvolutionPoint +from . import commons, recipes -def retrieve( +def _retrieve( headers: List[Recipe], parts: Inventory, parts_matching: Inventory ) -> List[Operator]: """Retrieve parts to be joined.""" elements = [] for head in headers: inv = parts if isinstance(head, Evolution) else parts_matching - elements.append(inv[head]) + op = inv[head] + assert op is not None + elements.append(op) return elements -def dot4(op1: npt.NDArray, op2: npt.NDArray) -> npt.NDArray: +def _parts(ep: EvolutionPoint, eko: EKO) -> List[Recipe]: + """Determine parts required for the given evolution point operator.""" + atlas = commons.atlas(eko.theory_card, eko.operator_card) + return recipes._elements(ep, atlas) + + +def retrieve(ep: EvolutionPoint, eko: EKO) -> List[Operator]: + """Retrieve parts required for the given evolution point operator.""" + return _retrieve(_parts(ep, eko), eko.parts, eko.parts_matching) + + +def _dot4(op1: npt.NDArray, op2: npt.NDArray) -> npt.NDArray: """Dot product between rank 4 objects. The product is performed considering them as matrices indexed by pairs, so @@ -32,10 +48,10 @@ def dot4(op1: npt.NDArray, op2: npt.NDArray) -> npt.NDArray: return np.einsum("aibj,bjck->aick", op1, op2) -def dotop(op1: Operator, op2: Operator) -> Operator: +def _dotop(op1: Operator, op2: Operator) -> Operator: r"""Dot product between two operators. - Essentially a wrapper of :func:`dot4`, applying linear error propagation, + Essentially a wrapper of :func:`_dot4`, applying linear error propagation, if applicable. Note @@ -65,10 +81,10 @@ def dotop(op1: Operator, op2: Operator) -> Operator: |da_i| \cdot |b_i| + |a_i| \cdot |db_i| + \mathcal{O}(d^2) """ - val = dot4(op1.operator, op2.operator) + val = _dot4(op1.operator, op2.operator) if op1.error is not None and op2.error is not None: - err = dot4(np.abs(op1.operator), np.abs(op2.error)) + dot4( + err = _dot4(np.abs(op1.operator), np.abs(op2.error)) + _dot4( np.abs(op1.error), np.abs(op2.operator) ) else: @@ -91,4 +107,4 @@ def join(elements: List[Operator]) -> Operator: consider if reversing the path... """ - return reduce(dotop, reversed(elements)) + return reduce(_dotop, reversed(elements)) diff --git a/src/eko/runner/parts.py b/src/eko/runner/parts.py index f9379bf76..75c4d8db8 100644 --- a/src/eko/runner/parts.py +++ b/src/eko/runner/parts.py @@ -16,13 +16,14 @@ from ..evolution_operator import matching_condition from ..evolution_operator import operator_matrix_element as ome from ..evolution_operator import physical +from ..evolution_operator.grid import Managers from ..io import EKO from ..io.items import Evolution, Matching, Operator from ..quantities.heavy_quarks import QuarkMassScheme from . import commons -def managers(eko: EKO) -> dict: +def _managers(eko: EKO) -> Managers: """Collect managers for operator computation. .. todo:: @@ -31,39 +32,14 @@ def managers(eko: EKO) -> dict: """ tcard = eko.theory_card ocard = eko.operator_card - return dict( - thresholds_config=commons.atlas(tcard, ocard), + return Managers( + atlas=commons.atlas(tcard, ocard), couplings=commons.couplings(tcard, ocard), - interpol_dispatcher=commons.interpolator(ocard), + interpolator=commons.interpolator(ocard), ) -def blowup_info(eko: EKO) -> dict: - """Prepare common information to blow up to flavor basis. - - Note - ---- - ``intrinsic_range`` is a fully deprecated feature, here and anywhere else, - since a full range is already always used for backward evolution, and it is - not harmful to use it also for forward. - - Indeed, the only feature of non-intrinsic evolution is to absorb a - non-trivial boundary condition when an intrinsic PDF is defined. - But to achieve this, is sufficient to not specify any intrinsic boundary - condition at all, while if something is there, it is intuitive enough that - it will be consistently evolved. - - Moreover, since two different behavior are applied for the forward and - backward evolution, the intrinsic range is a "non-local" function, since it - does not depend only on the evolution segment, but also on the previous - evolution history (to determine if evolution is backward in flavor, - irrespectively of happening for an increasing or decreasing interval in - scale at fixed flavor). - """ - return dict(intrinsic_range=[4, 5, 6], qed=eko.theory_card.order[1] > 0) - - -def evolve_configs(eko: EKO) -> dict: +def _evolve_configs(eko: EKO) -> dict: """Create configs for :class:`Operator`. .. todo:: @@ -94,19 +70,22 @@ def evolve_configs(eko: EKO) -> dict: def evolve(eko: EKO, recipe: Evolution) -> Operator: """Compute evolution in isolation.""" op = evop.Operator( - evolve_configs(eko), managers(eko), recipe.as_atlas, is_threshold=recipe.cliff + _evolve_configs(eko), + _managers(eko), + recipe.as_atlas, + is_threshold=recipe.cliff, ) op.compute() - binfo = blowup_info(eko) + qed = eko.theory_card.order[1] > 0 res, err = physical.PhysicalOperator.ad_to_evol_map( - op.op_members, op.nf, op.q2_to, **binfo - ).to_flavor_basis_tensor(qed=binfo["qed"]) + op.op_members, op.nf, op.q2_to, qed + ).to_flavor_basis_tensor(qed) return Operator(res, err) -def matching_configs(eko: EKO) -> dict: +def _matching_configs(eko: EKO) -> dict: """Create configs for :class:`OperatorMatrixElement`. .. todo:: @@ -116,9 +95,8 @@ def matching_configs(eko: EKO) -> dict: tcard = eko.theory_card ocard = eko.operator_card return dict( - **evolve_configs(eko), + **_evolve_configs(eko), backward_inversion=ocard.configs.inversion_method, - intrinsic_range=tcard.heavy.intrinsic_flavors, ) @@ -136,8 +114,8 @@ def match(eko: EKO, recipe: Matching) -> Operator: """ kthr = eko.theory_card.heavy.squared_ratios[recipe.hq - 4] op = ome.OperatorMatrixElement( - matching_configs(eko), - managers(eko), + _matching_configs(eko), + _managers(eko), recipe.hq - 1, recipe.scale, recipe.inverse, @@ -145,10 +123,9 @@ def match(eko: EKO, recipe: Matching) -> Operator: eko.theory_card.heavy.masses_scheme is QuarkMassScheme.MSBAR, ) op.compute() - - binfo = blowup_info(eko) + qed = eko.theory_card.order[1] > 0 res, err = matching_condition.MatchingCondition.split_ad_to_evol_map( - op.op_members, op.nf, recipe.scale, **binfo - ).to_flavor_basis_tensor(qed=binfo["qed"]) + op.op_members, op.nf, recipe.scale, qed + ).to_flavor_basis_tensor(qed) return Operator(res, err) diff --git a/src/eko/runner/recipes.py b/src/eko/runner/recipes.py index f340b9dce..1f83e75dd 100644 --- a/src/eko/runner/recipes.py +++ b/src/eko/runner/recipes.py @@ -3,11 +3,13 @@ from typing import List from ..io.items import Evolution, Matching, Recipe +from ..io.struct import EKO from ..io.types import EvolutionPoint as EPoint from ..matchings import Atlas, Segment +from . import commons -def elements(ep: EPoint, atlas: Atlas) -> List[Recipe]: +def _elements(ep: EPoint, atlas: Atlas) -> List[Recipe]: """Determine recipes to compute a given operator.""" recipes = [] @@ -27,10 +29,17 @@ def elements(ep: EPoint, atlas: Atlas) -> List[Recipe]: return recipes -def create(evolgrid: List[EPoint], atlas: Atlas) -> List[Recipe]: +def _create(evolgrid: List[EPoint], atlas: Atlas) -> List[Recipe]: """Create all associated recipes.""" recipes = [] for ep in evolgrid: - recipes.extend(elements(ep, atlas)) + recipes.extend(_elements(ep, atlas)) return list(set(recipes)) + + +def create(eko: EKO): + """Create and load all associated recipes.""" + atlas = commons.atlas(eko.theory_card, eko.operator_card) + recs = _create(eko.operator_card.evolgrid, atlas) + eko.load_recipes(recs) diff --git a/src/eko/scale_variations/__init__.py b/src/eko/scale_variations/__init__.py index 4af80592b..8d3f01556 100644 --- a/src/eko/scale_variations/__init__.py +++ b/src/eko/scale_variations/__init__.py @@ -5,30 +5,32 @@ """ import enum +from typing import Any, Dict +from ..io.types import ScaleVariationsMethod from . import expanded, exponentiated class Modes(enum.IntEnum): - """Enumerate scale Variation modes.""" + """Enumerate scale variation modes.""" unvaried = enum.auto() exponentiated = enum.auto() expanded = enum.auto() -def sv_mode(s): +def sv_mode(s: ScaleVariationsMethod) -> Modes: """Return the scale variation mode. Parameters ---------- - s : str + s : string representation Returns ------- - enum.IntEnum - enum representation + i : + int representation """ if s is not None: @@ -36,10 +38,12 @@ def sv_mode(s): return Modes.unvaried -class ModeMixin: +class ScaleVariationModeMixin: """Mixin to cast scale variation mode.""" + config: Dict[str, Any] + @property - def sv_mode(self): + def sv_mode(self) -> Modes: """Return the scale variation mode.""" return sv_mode(self.config["ModSV"]) diff --git a/src/ekobox/apply.py b/src/ekobox/apply.py index 336900798..bf9f2083d 100644 --- a/src/ekobox/apply.py +++ b/src/ekobox/apply.py @@ -1,10 +1,14 @@ """Apply operator evolution to PDF set.""" +from dataclasses import dataclass +from typing import Dict, Optional, Union + import numpy as np from eko import basis_rotation as br from eko import interpolation from eko.io import EKO +from eko.io.types import EvolutionPoint def apply_pdf( @@ -49,6 +53,17 @@ def apply_pdf( CONTRACTION = "ajbk,bk" +_PdfLabel = Union[int, str] +"""PDF identifier: either PID or label.""" + + +@dataclass +class PdfResult: + """Helper class to collect PDF results.""" + + pdfs: Dict[_PdfLabel, float] + errors: Optional[Dict[_PdfLabel, float]] = None + def apply_pdf_flavor( eko: EKO, lhapdf_like, targetgrid=None, flavor_rotation=None, labels=None @@ -76,60 +91,60 @@ def apply_pdf_flavor( output PDFs and their associated errors for the computed mu2grid """ # create pdfs - pdfs = np.zeros((len(eko.bases.inputpids), len(eko.bases.inputgrid))) - for j, pid in enumerate(eko.bases.inputpids): + pdfs = np.zeros((len(br.flavor_basis_pids), len(eko.xgrid))) + for j, pid in enumerate(br.flavor_basis_pids): if not lhapdf_like.hasFlavor(pid): continue pdfs[j] = np.array( - [lhapdf_like.xfxQ2(pid, x, eko.mu20) / x for x in eko.bases.inputgrid.raw] + [lhapdf_like.xfxQ2(pid, x, eko.mu20) / x for x in eko.xgrid.raw] ) # build output - out_grid = {} + out_grid: Dict[EvolutionPoint, PdfResult] = {} for ep, elem in eko.items(): pdf_final = np.einsum(CONTRACTION, elem.operator, pdfs, optimize="optimal") if elem.error is not None: error_final = np.einsum(CONTRACTION, elem.error, pdfs, optimize="optimal") else: error_final = None - out_grid[ep] = { - "pdfs": dict(zip(eko.bases.targetpids, pdf_final)), - "errors": None, - } + out_grid[ep] = PdfResult(dict(zip(br.flavor_basis_pids, pdf_final))) if error_final is not None: - out_grid[ep]["errors"] = dict(zip(eko.bases.targetpids, error_final)) + out_grid[ep].errors = dict(zip(br.flavor_basis_pids, error_final)) qed = eko.theory_card.order[1] > 0 # rotate to evolution basis if flavor_rotation is not None: for q2, op in out_grid.items(): pdf = flavor_rotation @ np.array( - [op["pdfs"][pid] for pid in br.flavor_basis_pids] + [op.pdfs[pid] for pid in br.flavor_basis_pids] ) - if labels is None: - labels = list(range(flavor_rotation.shape[0])) - op["pdfs"] = dict(zip(labels, pdf)) - if op["errors"] is not None: + if not qed: + evol_basis = br.evol_basis + else: + evol_basis = br.unified_evol_basis + op.pdfs = dict(zip(evol_basis, pdf)) + if op.errors is not None: errors = flavor_rotation @ np.array( - [op["errors"][pid] for pid in br.flavor_basis_pids] + [op.errors[pid] for pid in br.flavor_basis_pids] ) - op["errors"] = dict(zip(labels, errors)) + op.errors = dict(zip(evol_basis, errors)) # rotate/interpolate to target grid if targetgrid is not None: b = interpolation.InterpolatorDispatcher( - xgrid=eko.bases.targetgrid, + xgrid=eko.xgrid, polynomial_degree=eko.operator_card.configs.interpolation_polynomial_degree, mode_N=False, ) rot = b.get_interpolation(targetgrid) for evpdf in out_grid.values(): - for pdf_label in evpdf["pdfs"]: - evpdf["pdfs"][pdf_label] = np.matmul(rot, evpdf["pdfs"][pdf_label]) - if evpdf["errors"] is not None: - evpdf["errors"][pdf_label] = np.matmul( - rot, evpdf["errors"][pdf_label] - ) - - return out_grid + for pdf_label in evpdf.pdfs: + evpdf.pdfs[pdf_label] = np.matmul(rot, evpdf.pdfs[pdf_label]) + if evpdf.errors is not None: + evpdf.errors[pdf_label] = np.matmul(rot, evpdf.errors[pdf_label]) + # cast back to be backward compatible + real_out_grid = {} + for ep, res in out_grid.items(): + real_out_grid[ep] = {"pdfs": res.pdfs, "errors": res.errors} + return real_out_grid diff --git a/src/ekobox/cards.py b/src/ekobox/cards.py index 9476bb77a..86233c0df 100644 --- a/src/ekobox/cards.py +++ b/src/ekobox/cards.py @@ -14,14 +14,9 @@ couplings=dict( alphas=0.118, alphaem=0.007496252, - scale=91.2, - num_flavs_ref=5, - max_num_flavs=6, + ref=(91.2, 5), ), heavy=dict( - num_flavs_init=4, - num_flavs_max_pdf=6, - intrinsic_flavors=[4], masses=[ReferenceRunning([mq, nan]) for mq in (2.0, 4.5, 173.07)], masses_scheme="POLE", matching_ratios=[1.0, 1.0, 1.0], @@ -33,7 +28,7 @@ ) _operator = dict( - mu0=1.65, + init=(1.65, 4), mugrid=[(100.0, 5)], xgrid=np.geomspace(1e-7, 1.0, 50).tolist(), configs=dict( diff --git a/src/ekobox/cli/inspect.py b/src/ekobox/cli/inspect.py index 5787c5f1e..c355fce61 100644 --- a/src/ekobox/cli/inspect.py +++ b/src/ekobox/cli/inspect.py @@ -28,7 +28,7 @@ def subcommand(ctx, path: pathlib.Path): @click.pass_obj def sub_mu2(operator: EKO): """Check operator's mu2grid.""" - rich.print_json(data=operator.mu2grid.tolist()) + rich.print_json(data=operator.mu2grid) @subcommand.command("cards") diff --git a/src/ekobox/cli/run.py b/src/ekobox/cli/run.py index 9a91e36ab..44cfe216e 100644 --- a/src/ekobox/cli/run.py +++ b/src/ekobox/cli/run.py @@ -53,11 +53,11 @@ def subcommand(paths: Sequence[pathlib.Path]): else: output = operator.parent / OUTPUT - theory = yaml.safe_load(theory.read_text(encoding="utf-8")) - if "order" in theory: - theory = TheoryCard.from_dict(theory) - operator = yaml.safe_load(operator.read_text(encoding="utf-8")) - if "configs" in operator: - operator = OperatorCard.from_dict(operator) - - eko.solve(theory, operator, path=output) + tc = yaml.safe_load(theory.read_text(encoding="utf-8")) + if "order" in tc: + tc = TheoryCard.from_dict(tc) + oc = yaml.safe_load(operator.read_text(encoding="utf-8")) + if "configs" in oc: + oc = OperatorCard.from_dict(oc) + + eko.solve(tc, oc, path=output) diff --git a/src/ekobox/cli/runcards.py b/src/ekobox/cli/runcards.py index 020a58917..99db353bd 100644 --- a/src/ekobox/cli/runcards.py +++ b/src/ekobox/cli/runcards.py @@ -3,6 +3,8 @@ import logging import pathlib +import numpy as np + from .. import cards from . import library as lib from .base import command @@ -34,7 +36,7 @@ def sub_example(destination: pathlib.Path): theory.order = (1, 0) cards.dump(theory.raw, path=destination / "theory.yaml") operator = cards.example.operator() - operator.mu0 = 1.65 - operator.mu2grid = [1e5] + operator.init = (1.65, 4) + operator.mugrid = [(np.sqrt(1e5), 5)] cards.dump(operator.raw, path=destination / "operator.yaml") _logger.info(f"Runcards generated to '{destination}'") diff --git a/src/ekobox/evol_pdf.py b/src/ekobox/evol_pdf.py index abdb2ec6a..9da6dda65 100644 --- a/src/ekobox/evol_pdf.py +++ b/src/ekobox/evol_pdf.py @@ -73,12 +73,16 @@ def evolve_pdfs( # apply PDF to eko evolved_PDF_list = [] + q2block_per_nf = {} with EKO.read(eko_path) as eko_output: for initial_PDF in initial_PDF_list: evolved_PDF_list.append( apply.apply_pdf(eko_output, initial_PDF, targetgrid) ) + # separate by nf the evolgrid (and order per nf/q) + q2block_per_nf = regroup_evolgrid(eko_output.evolgrid) # pylint: disable=E1101 + # update info file if targetgrid is None: targetgrid = operators_card.xgrid @@ -93,8 +97,6 @@ def evolve_pdfs( info_update=info_update, ) - # in the eko scales are squared - q2block_per_nf = {nf: np.power(q2s, 2) for nf, q2s in q2block_per_nf.items()} # write all replicas all_member_blocks = [] for evolved_PDF in evolved_PDF_list: @@ -134,7 +136,7 @@ def pdf_xq2(pid, x, Q2): pdf_xq2, xgrid=xgrid, sorted_q2grid=q2grid, - pids=br.flavor_basis_pids, + pids=np.array(br.flavor_basis_pids), ) all_blocks.append(block) return all_blocks diff --git a/src/ekobox/genpdf/__init__.py b/src/ekobox/genpdf/__init__.py index 4666891bc..d6c427989 100644 --- a/src/ekobox/genpdf/__init__.py +++ b/src/ekobox/genpdf/__init__.py @@ -17,7 +17,7 @@ def take_data( - parent_pdf_set: Optional[Union[str, dict]] = None, + parent_pdf_set=None, members: bool = False, xgrid: Optional[List[float]] = None, evolgrid: Optional[List[EPoint]] = None, @@ -63,7 +63,7 @@ def take_data( all_blocks.append( [ generate_block( - toylh.xfxQ2, xgrid, sorted_q2grid, br.flavor_basis_pids + toylh.xfxQ2, xgrid, sorted_q2grid, list(br.flavor_basis_pids) ) ] ) @@ -87,7 +87,7 @@ def take_data( ), xgrid, sorted_q2grid, - br.flavor_basis_pids, + list(br.flavor_basis_pids), ) ] ) diff --git a/src/ekobox/info_file.py b/src/ekobox/info_file.py index fc5c17f1d..16dde0197 100644 --- a/src/ekobox/info_file.py +++ b/src/ekobox/info_file.py @@ -2,6 +2,7 @@ import copy import math +from typing import Any, Dict import numpy as np @@ -37,7 +38,9 @@ def build( info file in lhapdf format """ template_info = copy.deepcopy(load.template_info) - template_info["SetDesc"] = "Evolved PDF from " + str(operators_card.mu0) + " GeV" + template_info["SetDesc"] = ( + "Evolved PDF from " + str(operators_card.init[0]) + " GeV" + ) template_info["Authors"] = "" template_info["FlavorScheme"] = "variable" template_info.update(info_update) @@ -50,7 +53,7 @@ def build( template_info["OrderQCD"] = theory_card.order[0] - 1 template_info["QMin"] = round(math.sqrt(operators_card.mu2grid[0]), 4) template_info["QMax"] = round(math.sqrt(operators_card.mu2grid[-1]), 4) - template_info["MZ"] = theory_card.couplings.scale + template_info["MZ"] = theory_card.couplings.ref[0] template_info["MUp"] = 0.0 template_info["MDown"] = 0.0 template_info["MStrange"] = 0.0 @@ -81,7 +84,7 @@ def build_alphas( info file section in lhapdf format """ # start with meta stuff - template_info = {} + template_info: Dict[str, Any] = {} template_info["AlphaS_MZ"] = theory_card.couplings.alphas template_info["AlphaS_OrderQCD"] = theory_card.order[0] - 1 # prepare diff --git a/src/ekobox/utils.py b/src/ekobox/utils.py index 091cde15e..5a00314e7 100644 --- a/src/ekobox/utils.py +++ b/src/ekobox/utils.py @@ -1,7 +1,7 @@ """Generic utilities to work with EKOs.""" -import os from collections import defaultdict +from pathlib import Path from typing import Optional import numpy as np @@ -16,7 +16,7 @@ def ekos_product( eko_fin: EKO, rtol: float = 1e-6, atol: float = 1e-10, - path: Optional[os.PathLike] = None, + path: Optional[Path] = None, ): """Compute the product of two ekos. @@ -39,7 +39,7 @@ def ekos_product( # another kind of output which includes the theory and operator runcards) ep_match = eko_ini.approx( - (eko_fin.operator_card.mu0**2, eko_fin.theory_card.heavy.num_flavs_init), + (eko_fin.operator_card.init[0] ** 2, eko_fin.operator_card.init[1]), rtol=rtol, atol=atol, ) diff --git a/src/ekomark/benchmark/runner.py b/src/ekomark/benchmark/runner.py index 8ac48f5d3..2b003e1b3 100644 --- a/src/ekomark/benchmark/runner.py +++ b/src/ekomark/benchmark/runner.py @@ -14,7 +14,6 @@ import eko from eko import EKO from eko import basis_rotation as br -from eko.io import manipulate from ekobox import apply from .. import pdfname @@ -117,22 +116,14 @@ def run_me(self, theory, ocard, _pdf): os.makedirs(output_path) # rotating to evolution basis if requested with EKO.edit(path) as out_copy: - change_lab = False - if self.rotate_to_evolution_basis: - qed = theory["QED"] > 0 - if not qed: - manipulate.to_evol(out_copy, source=True, target=True) - else: - manipulate.to_uni_evol(out_copy, source=True, target=True) - change_lab = True - save_operators_to_pdf( output_path, theory, ocard, out_copy, self.skip_pdfs(theory), - change_lab, + self.rotate_to_evolution_basis, + theory["QED"] > 0, ) else: # else we always rerun diff --git a/src/ekomark/data/__init__.py b/src/ekomark/data/__init__.py index 797409204..89fb6377b 100644 --- a/src/ekomark/data/__init__.py +++ b/src/ekomark/data/__init__.py @@ -1 +1,30 @@ """EKO database configuration.""" + +from typing import Union + +from eko.io.runcards import Legacy, OperatorCard, TheoryCard +from eko.io.types import RawCard + + +def update_runcards( + theory: Union[RawCard, TheoryCard], operator: Union[RawCard, OperatorCard] +): + """Update legacy runcards. + + This function is mainly defined for compatibility with the old interface. + Prefer direct usage of :class:`Legacy` in new code. + + Consecutive applications of this function yield identical results:: + + cards = update(theory, operator) + assert update(*cards) == cards + + """ + if isinstance(theory, TheoryCard) or isinstance(operator, OperatorCard): + # if one is not a dict, both have to be new cards + assert isinstance(theory, TheoryCard) + assert isinstance(operator, OperatorCard) + return theory, operator + + cards = Legacy(theory, operator) + return cards.new_theory, cards.new_operator diff --git a/src/ekomark/data/operators.py b/src/ekomark/data/operators.py index ad928574d..03ad4d349 100644 --- a/src/ekomark/data/operators.py +++ b/src/ekomark/data/operators.py @@ -15,7 +15,7 @@ ev_op_max_order=10, ev_op_iterations=10, backward_inversion="expanded", - n_integration_cores=0, + n_integration_cores=1, debug_skip_non_singlet=False, debug_skip_singlet=False, mugrid=[10], diff --git a/src/ekomark/plots.py b/src/ekomark/plots.py index 0395bd03d..5e2bee420 100644 --- a/src/ekomark/plots.py +++ b/src/ekomark/plots.py @@ -2,6 +2,7 @@ import io import pprint +from typing import Dict, List import matplotlib.pyplot as plt import numpy as np @@ -10,6 +11,7 @@ from matplotlib.colors import LogNorm from eko import basis_rotation as br +from eko.io import manipulate from eko.io.struct import EKO @@ -210,7 +212,15 @@ def plot_operator(var_name, op, op_err, log_operator=True, abs_operator=True): return fig -def save_operators_to_pdf(path, theory, ops, me: EKO, skip_pdfs, change_lab=False): +def save_operators_to_pdf( + path, + theory, + ops, + me: EKO, + skip_pdfs, + rotate_to_evolution_basis: bool, + qed: bool = False, +): """Output all operator heatmaps to PDF. Parameters @@ -225,15 +235,12 @@ def save_operators_to_pdf(path, theory, ops, me: EKO, skip_pdfs, change_lab=Fals DGLAP result skip_pdfs : list PDF to skip - change_lab : bool - set whether to rename the labels - + rotate_to_evolution_basis : bool + plot operators in evolution basis + qed : bool + plot operators in unified evolution basis, if the former is active """ - ops_names = list(me.bases.targetpids) - if np.allclose(ops_names, br.rotate_flavor_to_evolution): - ops_names = br.evol_basis_pids - else: - raise ValueError("Can not reconstruct PDF names") + ops_names = br.evol_basis_pids if rotate_to_evolution_basis else br.evol_basis_pids ops_id = f"o{ops['hash'][:6]}_t{theory['hash'][:6]}" path = f"{path}/{ops_id}.pdf" print(f"Plotting operators plots to {path}") @@ -245,16 +252,21 @@ def save_operators_to_pdf(path, theory, ops, me: EKO, skip_pdfs, change_lab=Fals # plot the operators # it's necessary to reshuffle the eko output - for mu2 in me.mu2grid: - results = me[mu2].operator - errors = me[mu2].error + for ep, op in me.items(): + if rotate_to_evolution_basis: + if not qed: + op = manipulate.to_evol(op, source=True, target=True) + else: + op = manipulate.to_uni_evol(op, source=True, target=True) + results = op.operator + errors = op.error # loop on pids for label_out, res, res_err in zip(ops_names, results, errors): if label_out in skip_pdfs: continue - new_op = {} - new_op_err = {} + new_op: Dict[int, List[float]] = {} + new_op_err: Dict[int, List[float]] = {} # loop on xgrid point for j in range(len(me.xgrid)): # loop on pid in @@ -270,17 +282,10 @@ def save_operators_to_pdf(path, theory, ops, me: EKO, skip_pdfs, change_lab=Fals for label_in in ops_names: if label_in in skip_pdfs: continue - lab_in = label_in - lab_out = label_out - if change_lab: - index_in = br.evol_basis_pids.index(label_in) - index_out = br.evol_basis_pids.index(label_out) - lab_in = br.evol_basis[index_in] - lab_out = br.evol_basis[index_out] fig = None try: fig = plot_operator( - f"Operator ({lab_in};{lab_out}) µ_F^2 = {mu2} GeV^2", + f"Operator ({label_in};{label_out}) µ_F^2 = {ep[0]} GeV^2, nf = {ep[1]}", new_op[label_in], new_op_err[label_in], ) diff --git a/tests/eko/evolution_operator/test_grid.py b/tests/eko/evolution_operator/test_grid.py index 804388db6..01754578a 100644 --- a/tests/eko/evolution_operator/test_grid.py +++ b/tests/eko/evolution_operator/test_grid.py @@ -72,7 +72,7 @@ def test_mod_expanded(theory_card, theory_ffns, operator_card, tmp_path: pathlib else: theory = theory_card theory.order = (1, 0) - theory.heavy.num_flavs_init = nf0 + operator_card.init = (operator_card.init[0], nf0) path.unlink(missing_ok=True) opgrid = legacy.Runner(theory, operator_card, path=path).op_grid opg = opgrid.compute() diff --git a/tests/eko/evolution_operator/test_init.py b/tests/eko/evolution_operator/test_init.py index 597f645f8..ed003f925 100644 --- a/tests/eko/evolution_operator/test_init.py +++ b/tests/eko/evolution_operator/test_init.py @@ -1,4 +1,5 @@ import os +from dataclasses import dataclass import numpy as np import pytest @@ -11,6 +12,7 @@ from eko.evolution_operator import Operator, quad_ker from eko.interpolation import InterpolatorDispatcher from eko.io.runcards import OperatorCard, ScaleVariationsMethod, TheoryCard +from eko.kernels import EvoMethods from eko.kernels import non_singlet as ns from eko.kernels import non_singlet_qed as qed_ns from eko.kernels import singlet as s @@ -25,7 +27,7 @@ def test_quad_ker_errors(): order=(1, 0), mode0=mode0, mode1=0, - method="", + method="iterate-exact", is_log=True, logx=np.log(0.1), areas=[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], @@ -60,15 +62,29 @@ def test_quad_ker(monkeypatch): monkeypatch.setattr(qed_ns, "dispatcher", lambda *args: 1.0) monkeypatch.setattr(s, "dispatcher", lambda *args: np.identity(2)) params = [ - ((1, 0), br.non_singlet_pids_map["ns+"], 0, "", 0.0, 0.0), - ((1, 0), br.non_singlet_pids_map["ns+"], 0, "", 0.123, 1.0), - ((3, 1), br.non_singlet_pids_map["ns+u"], 0, "", 0.0, 0.0), - ((1, 0), 100, 100, "", 0.123, 1.0), - ((1, 0), 100, 21, "", 0.0, 0.0), - ((1, 1), 100, 100, "iterate-exact", 0.123, 1.0), - ((1, 1), 100, 21, "iterate-exact", 0.123, 0.0), - ((1, 1), 10200, 10200, "iterate-exact", 0.123, 1.0), - ((1, 1), 10200, 10204, "iterate-exact", 0.123, 0.0), + ((1, 0), br.non_singlet_pids_map["ns+"], 0, EvoMethods.ITERATE_EXACT, 0.0, 0.0), + ( + (1, 0), + br.non_singlet_pids_map["ns+"], + 0, + EvoMethods.ITERATE_EXACT, + 0.123, + 1.0, + ), + ( + (3, 1), + br.non_singlet_pids_map["ns+u"], + 0, + EvoMethods.ITERATE_EXACT, + 0.0, + 0.0, + ), + ((1, 0), 100, 100, EvoMethods.ITERATE_EXACT, 0.123, 1.0), + ((1, 0), 100, 21, EvoMethods.ITERATE_EXACT, 0.0, 0.0), + ((1, 1), 100, 100, EvoMethods.ITERATE_EXACT, 0.123, 1.0), + ((1, 1), 100, 21, EvoMethods.ITERATE_EXACT, 0.123, 0.0), + ((1, 1), 10200, 10200, EvoMethods.ITERATE_EXACT, 0.123, 1.0), + ((1, 1), 10200, 10204, EvoMethods.ITERATE_EXACT, 0.123, 0.0), ] for order, mode0, mode1, method, logx, res in params: for is_log in [True, False]: @@ -107,7 +123,7 @@ def test_quad_ker(monkeypatch): order=(1, 0), mode0=label[0], mode1=label[1], - method="", + method=EvoMethods.ITERATE_EXACT, is_log=True, logx=0.123, areas=np.zeros(3), @@ -143,7 +159,7 @@ def test_quad_ker(monkeypatch): order=(1, 1), mode0=label[0], mode1=label[1], - method="iterate-exact", + method=EvoMethods.ITERATE_EXACT, is_log=True, logx=0.123, areas=np.zeros(3), @@ -171,7 +187,7 @@ def test_quad_ker(monkeypatch): order=(1, 0), mode0=br.non_singlet_pids_map["ns+"], mode1=0, - method="", + method=EvoMethods.ITERATE_EXACT, is_log=True, logx=0.0, areas=np.zeros(3), @@ -212,7 +228,12 @@ def compute(self, a_ref, nf, scale_from, scale_to): return a_ref -fake_managers = {"couplings": FakeCoupling()} +@dataclass(frozen=True) +class FakeManagers: + couplings: FakeCoupling + + +fake_managers = FakeManagers(couplings=FakeCoupling()) class TestOperator: @@ -436,7 +457,7 @@ def quad_ker_pegasus( order = (2, 0) mode0 = br.non_singlet_pids_map["ns+"] mode1 = 0 - method = "" + method = EvoMethods.ITERATE_EXACT logxs = np.log(int_disp.xgrid.raw) a1 = 1 a0 = 2 diff --git a/tests/eko/evolution_operator/test_matching_condition.py b/tests/eko/evolution_operator/test_matching_condition.py index c854fb90c..bb6aeebbc 100644 --- a/tests/eko/evolution_operator/test_matching_condition.py +++ b/tests/eko/evolution_operator/test_matching_condition.py @@ -22,12 +22,6 @@ def mkOME(self): (br.matching_hplus_pid, 21), (200, 200), (br.matching_hminus_pid, 200), - ]: - ome.update({key: mkOM(self.shape)}) - return ome - - def update_intrinsic_OME(self, ome): - for key in [ (br.matching_hplus_pid, br.matching_hplus_pid), (br.matching_hminus_pid, br.matching_hminus_pid), (200, br.matching_hminus_pid), @@ -35,10 +29,11 @@ def update_intrinsic_OME(self, ome): (21, br.matching_hplus_pid), ]: ome.update({key: mkOM(self.shape)}) + return ome def test_split_ad_to_evol_map(self): ome = self.mkOME() - a = MatchingCondition.split_ad_to_evol_map(ome, 3, 1, [], False) + a = MatchingCondition.split_ad_to_evol_map(ome, 3, 1) triv_keys = [ "V.V", "T3.T3", @@ -55,6 +50,14 @@ def test_split_ad_to_evol_map(self): "c+.S", "c+.g", # "c-.V", + "S.c+", + "g.c+", + "c+.c+", + "c-.c-", + "b+.b+", + "b-.b-", + "t+.t+", + "t-.t-", ] assert sorted(str(k) for k in a.op_members.keys()) == sorted( [*triv_keys, *keys3] @@ -63,36 +66,8 @@ def test_split_ad_to_evol_map(self): a.op_members[member.MemberName("V.V")].value, ome[(200, 200)].value, ) - # # if alpha is zero, nothing non-trivial should happen - b = MatchingCondition.split_ad_to_evol_map(ome, 3, 1, [], False) - assert sorted(str(k) for k in b.op_members.keys()) == sorted( - [*triv_keys, *keys3] - ) - # assert_almost_equal( - # b.op_members[member.MemberName("V.V")].value, - # np.eye(self.shape[0]), - # ) - # nf=3 + IC - self.update_intrinsic_OME(ome) - c = MatchingCondition.split_ad_to_evol_map(ome, 3, 1, [4], False) - assert sorted(str(k) for k in c.op_members.keys()) == sorted( - [*triv_keys, *keys3, "S.c+", "g.c+", "c+.c+", "c-.c-"] - ) - assert_almost_equal( - c.op_members[member.MemberName("V.V")].value, - b.op_members[member.MemberName("V.V")].value, - ) - # nf=3 + IB - d = MatchingCondition.split_ad_to_evol_map(ome, 3, 1, [5], False) - assert sorted(str(k) for k in d.op_members.keys()) == sorted( - [*triv_keys, *keys3, "b+.b+", "b-.b-"] - ) - assert_almost_equal( - d.op_members[member.MemberName("b+.b+")].value, - np.eye(self.shape[0]), - ) # nf=4 + IB - d = MatchingCondition.split_ad_to_evol_map(ome, 4, 1, [5], False) + d = MatchingCondition.split_ad_to_evol_map(ome, 4, 1) assert sorted(str(k) for k in d.op_members.keys()) == sorted( [ *triv_keys, @@ -106,6 +81,8 @@ def test_split_ad_to_evol_map(self): "b+.b+", # "b-.V", "b-.b-", + "t+.t+", + "t-.t-", ] ) assert_almost_equal( @@ -127,7 +104,7 @@ def test_split_ad_to_evol_map(self): def test_split_ad_to_evol_map_qed(self): ome = self.mkOME() - a = MatchingCondition.split_ad_to_evol_map(ome, 3, 1, [], qed=True) + a = MatchingCondition.split_ad_to_evol_map(ome, 3, 1, qed=True) triv_keys = [ "ph.ph", "S.S", @@ -144,7 +121,15 @@ def test_split_ad_to_evol_map_qed(self): keys3 = [ "c+.S", "c+.g", + "S.c+", + "g.c+", + "c+.c+", + "c-.c-", # "c-.V", + "b+.b+", + "b-.b-", + "t+.t+", + "t-.t-", ] assert sorted(str(k) for k in a.op_members.keys()) == sorted( [*triv_keys, *keys3] @@ -153,36 +138,8 @@ def test_split_ad_to_evol_map_qed(self): a.op_members[member.MemberName("V.V")].value, ome[(200, 200)].value, ) - # # if alpha is zero, nothing non-trivial should happen - b = MatchingCondition.split_ad_to_evol_map(ome, 3, 1, [], qed=True) - assert sorted(str(k) for k in b.op_members.keys()) == sorted( - [*triv_keys, *keys3] - ) - # assert_almost_equal( - # b.op_members[member.MemberName("V.V")].value, - # np.eye(self.shape[0]), - # ) - # nf=3 + IC - self.update_intrinsic_OME(ome) - c = MatchingCondition.split_ad_to_evol_map(ome, 3, 1, [4], qed=True) - assert sorted(str(k) for k in c.op_members.keys()) == sorted( - [*triv_keys, *keys3, "S.c+", "g.c+", "c+.c+", "c-.c-"] - ) - assert_almost_equal( - c.op_members[member.MemberName("V.V")].value, - b.op_members[member.MemberName("V.V")].value, - ) - # nf=3 + IB - d = MatchingCondition.split_ad_to_evol_map(ome, 3, 1, [5], qed=True) - assert sorted(str(k) for k in d.op_members.keys()) == sorted( - [*triv_keys, *keys3, "b+.b+", "b-.b-"] - ) - assert_almost_equal( - d.op_members[member.MemberName("b+.b+")].value, - np.eye(self.shape[0]), - ) # nf=4 + IB - d = MatchingCondition.split_ad_to_evol_map(ome, 4, 1, [5], qed=True) + d = MatchingCondition.split_ad_to_evol_map(ome, 4, 1, qed=True) assert sorted(str(k) for k in d.op_members.keys()) == sorted( [ *triv_keys, @@ -196,6 +153,8 @@ def test_split_ad_to_evol_map_qed(self): "b+.b+", # "b-.V", "b-.b-", + "t+.t+", + "t-.t-", ] ) assert_almost_equal( diff --git a/tests/eko/evolution_operator/test_ome.py b/tests/eko/evolution_operator/test_ome.py index b844290c9..e807a587b 100644 --- a/tests/eko/evolution_operator/test_ome.py +++ b/tests/eko/evolution_operator/test_ome.py @@ -8,6 +8,7 @@ from eko import interpolation, mellin from eko import scale_variations as sv from eko.evolution_operator.operator_matrix_element import ( + MatchingMethods, OperatorMatrixElement, build_ome, quad_ker, @@ -33,7 +34,7 @@ def test_build_ome_as(): aS = A_singlet((o, 0), N, nf, L, is_msbar) for a in [aNS, aS]: - for method in [None, InversionMethod.EXPANDED, InversionMethod.EXACT]: + for method in MatchingMethods: dim = len(a[0]) if o != 1: assert len(a) == o @@ -53,7 +54,7 @@ def test_build_ome_nlo(): aNSi = A_non_singlet((1, 0), N, nf, L) aSi = A_singlet((1, 0), N, nf, L, is_msbar) for a in [aNSi, aSi]: - for method in [None, InversionMethod.EXPANDED, InversionMethod.EXACT]: + for method in MatchingMethods: dim = len(a[0]) # hh assert a[0, -1, -1] != 0.0 @@ -86,7 +87,7 @@ def test_quad_ker_errors(): is_log=True, logx=0.123, areas=[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], - backward_method=None, + backward_method=MatchingMethods.FORWARD, a_s=0.0, nf=3, L=0.0, @@ -119,7 +120,7 @@ def test_quad_ker(monkeypatch): is_log=is_log, logx=0.123, areas=np.zeros(3), - backward_method=None, + backward_method=MatchingMethods.FORWARD, a_s=0.0, nf=3, L=0.0, @@ -138,7 +139,7 @@ def test_quad_ker(monkeypatch): is_log=is_log, logx=0.123, areas=np.zeros(3), - backward_method=None, + backward_method=MatchingMethods.FORWARD, a_s=0.0, nf=3, L=0.0, @@ -157,7 +158,7 @@ def test_quad_ker(monkeypatch): is_log=is_log, logx=0.0, areas=np.zeros(3), - backward_method=None, + backward_method=MatchingMethods.FORWARD, a_s=0.0, nf=3, L=0.0, @@ -191,7 +192,7 @@ def test_quad_ker(monkeypatch): is_log=True, logx=0.123, areas=np.zeros(3), - backward_method=InversionMethod.EXPANDED, + backward_method=MatchingMethods.BACKWARD_EXPANDED, a_s=0.0, nf=3, L=0.0, @@ -228,7 +229,7 @@ def test_quad_ker(monkeypatch): is_log=True, logx=0.123, areas=np.zeros(3), - backward_method=InversionMethod.EXACT, + backward_method=MatchingMethods.BACKWARD_EXACT, a_s=0.0, nf=3, L=0.0, @@ -252,7 +253,7 @@ def test_quad_ker(monkeypatch): is_log=True, logx=0.0, areas=np.array([0.01, 0.1, 1.0]), - backward_method=None, + backward_method=MatchingMethods.FORWARD, a_s=0.0, nf=3, L=0.0, @@ -338,6 +339,7 @@ def test_labels(self, theory_ffns, operator_card, tmp_path: pathlib.Path): path = tmp_path / "eko.tar" for skip_singlet in [True, False]: for skip_ns in [True, False]: + operator_card.configs.inversion_method = InversionMethod.EXACT operator_card.debug.skip_singlet = skip_singlet operator_card.debug.skip_non_singlet = skip_ns path.unlink(missing_ok=True) diff --git a/tests/eko/evolution_operator/test_physical.py b/tests/eko/evolution_operator/test_physical.py index e9882de5b..0ba41d91f 100644 --- a/tests/eko/evolution_operator/test_physical.py +++ b/tests/eko/evolution_operator/test_physical.py @@ -221,27 +221,21 @@ def mk_op_members(shape=(2, 2), qed=False): return om -def get_ad_to_evol_map(nf, intrinsic_range=None, qed=False): +def get_ad_to_evol_map(nf, qed=False): oms = mk_op_members(qed=qed) - m = PhysicalOperator.ad_to_evol_map(oms, nf, 1, intrinsic_range, qed) + m = PhysicalOperator.ad_to_evol_map(oms, nf, 1, qed) return sorted(map(str, m.op_members.keys())) def test_ad_to_evol_map(): triv_ops = ("S.S", "S.g", "g.S", "g.g", "V.V", "V3.V3", "T3.T3", "V8.V8", "T8.T8") # nf=3 - assert sorted(triv_ops) == get_ad_to_evol_map(3) - # nf=3 + IC - assert sorted([*triv_ops, "c+.c+", "c-.c-"]) == get_ad_to_evol_map(3, [4]) - # nf=3 + IC + IB assert sorted( - [*triv_ops, "c+.c+", "c-.c-", "b+.b+", "b-.b-"] - ) == get_ad_to_evol_map(3, [4, 5]) - # nf=4 + IC(non-existant) + IB - ks = sorted([*triv_ops, "V15.V15", "T15.T15", "b+.b+", "b-.b-"]) - assert ks == get_ad_to_evol_map(4, [4, 5]) - # nf=4 + IB - assert ks == get_ad_to_evol_map(4, [5]) + [*triv_ops, "c+.c+", "c-.c-", "b+.b+", "b-.b-", "t+.t+", "t-.t-"] + ) == get_ad_to_evol_map(3) + # nf=4 + ks = sorted([*triv_ops, "V15.V15", "T15.T15", "b+.b+", "b-.b-", "t+.t+", "t-.t-"]) + assert ks == get_ad_to_evol_map(4) # nf=6 assert sorted( [*triv_ops, "T15.T15", "V15.V15", "T24.T24", "V24.V24", "T35.T35", "V35.V35"] @@ -274,18 +268,12 @@ def test_ad_to_evol_map_qed(): "Td3.Td3", ) # nf=3 - assert sorted(triv_ops) == get_ad_to_evol_map(3, qed=True) - # nf=3 + IC - assert sorted([*triv_ops, "c+.c+", "c-.c-"]) == get_ad_to_evol_map(3, [4], qed=True) - # nf=3 + IC + IB assert sorted( - [*triv_ops, "c+.c+", "c-.c-", "b+.b+", "b-.b-"] - ) == get_ad_to_evol_map(3, [4, 5], qed=True) - # nf=4 + IC(non-existant) + IB - ks = sorted([*triv_ops, "Vu3.Vu3", "Tu3.Tu3", "b+.b+", "b-.b-"]) - assert ks == get_ad_to_evol_map(4, [4, 5], qed=True) - # nf=4 + IB - assert ks == get_ad_to_evol_map(4, [5], qed=True) + [*triv_ops, "c+.c+", "c-.c-", "b+.b+", "b-.b-", "t+.t+", "t-.t-"] + ) == get_ad_to_evol_map(3, True) + # nf=4 + ks = sorted([*triv_ops, "Vu3.Vu3", "Tu3.Tu3", "b+.b+", "b-.b-", "t+.t+", "t-.t-"]) + assert ks == get_ad_to_evol_map(4, True) # nf=6 assert sorted( [*triv_ops, "Tu3.Tu3", "Vu3.Vu3", "Td8.Td8", "Vd8.Vd8", "Tu8.Tu8", "Vu8.Vu8"] diff --git a/tests/eko/io/test_bases.py b/tests/eko/io/test_bases.py deleted file mode 100644 index a0165243f..000000000 --- a/tests/eko/io/test_bases.py +++ /dev/null @@ -1,83 +0,0 @@ -from dataclasses import fields - -import numpy as np - -from eko import basis_rotation as br -from eko import interpolation -from eko.io.bases import Bases - - -class TestBases: - XGRID_TEST = [1e-3, 1e-2, 1e-1, 1.0] - - def test_serialization(self): - rot = Bases(interpolation.XGrid(self.XGRID_TEST)) - - d = rot.raw - rot1 = rot.from_dict(d) - - for f in fields(Bases): - assert getattr(rot, f.name) == getattr(rot1, f.name) - - assert d["targetgrid"] is None - assert "_targetgrid" not in d - - def test_pids(self): - rot = Bases(interpolation.XGrid(self.XGRID_TEST)) - - # no check on correctness of value set - rot.inputpids = [0, 1] - # but the internal grid is unmodified - assert len(rot.pids) == 14 - # and fallback implemented for unset external bases - assert np.all(rot.targetpids == rot.pids) - - def test_grids(self): - rot = Bases(interpolation.XGrid(self.XGRID_TEST)) - - # no check on correctness of value set - rot.inputgrid = interpolation.XGrid([0.1, 1]) - # but the internal grid is unmodified - assert len(rot.xgrid) == len(self.XGRID_TEST) - # and fallback implemented for unset external grids - assert np.all(rot.targetgrid == rot.xgrid) - - def test_fallback(self): - xg = interpolation.XGrid([0.1, 1.0]) - r = Bases(xgrid=xg) - np.testing.assert_allclose(r.targetpids, r.pids) - np.testing.assert_allclose(r.inputpids, r.pids) - assert r.xgrid == xg - assert r.targetgrid == xg - assert r.inputgrid == xg - - def test_overwrite(self): - tpids = np.array([3, 4] + list(br.flavor_basis_pids[2:])) - ipids = np.array([5, 6] + list(br.flavor_basis_pids[2:])) - xg = interpolation.XGrid([0.1, 1.0]) - txg = interpolation.XGrid([0.2, 1.0]) - ixg = interpolation.XGrid([0.3, 1.0]) - r = Bases( - xgrid=xg, - _targetgrid=txg, - _inputgrid=ixg, - _targetpids=tpids, - _inputpids=ipids, - ) - np.testing.assert_allclose(r.targetpids, tpids) - np.testing.assert_allclose(r.inputpids, ipids) - assert r.xgrid == xg - assert r.targetgrid == txg - assert r.inputgrid == ixg - - def test_init(self): - xg = interpolation.XGrid([0.1, 1.0]) - txg = np.array([0.2, 1.0]) - ixg = {"grid": [0.3, 1.0], "log": True} - r = Bases(xgrid=xg, _targetgrid=txg, _inputgrid=ixg) - assert isinstance(r.xgrid, interpolation.XGrid) - assert isinstance(r.targetgrid, interpolation.XGrid) - assert isinstance(r.inputgrid, interpolation.XGrid) - assert r.xgrid == xg - assert r.targetgrid == interpolation.XGrid(txg) - assert r.inputgrid == interpolation.XGrid.load(ixg) diff --git a/tests/eko/io/test_manipulate.py b/tests/eko/io/test_manipulate.py index 3431d93ce..56ceafc0f 100644 --- a/tests/eko/io/test_manipulate.py +++ b/tests/eko/io/test_manipulate.py @@ -1,5 +1,3 @@ -import pathlib - import numpy as np import pytest @@ -21,235 +19,125 @@ def chk_keys(a, b): class TestManipulate: - def test_xgrid_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): + def test_xgrid_reshape(self): # create object - muout = 10.0 - mu2out = muout**2 - epout = (mu2out, 5) + interpdeg = 1 xg = interpolation.XGrid(np.geomspace(1e-5, 1.0, 21)) - eko_factory.operator.mugrid = [(muout, 5)] - eko_factory.operator.xgrid = xg - o1 = eko_factory.get() + xgp = interpolation.XGrid(np.geomspace(1e-5, 1.0, 11)) lpids = 2 - o1[epout] = eko.io.Operator( + o1 = eko.io.Operator( operator=eko_identity([1, lpids, len(xg), lpids, len(xg)])[0] ) - xgp = interpolation.XGrid(np.geomspace(1e-5, 1.0, 11)) # only target - otpath = tmp_path / "ot.tar" - o1.deepcopy(otpath) - with EKO.edit(otpath) as ot: - manipulate.xgrid_reshape(ot, xgp) - chk_keys(o1.raw, ot.raw) - assert ot[epout].operator.shape == (lpids, len(xgp), lpids, len(xg)) - ottpath = tmp_path / "ott.tar" - o1.deepcopy(ottpath) - with EKO.edit(ottpath) as ott: - with pytest.warns(Warning): - manipulate.xgrid_reshape(ott, xg) - chk_keys(o1.raw, ott.raw) - np.testing.assert_allclose(ott[epout].operator, o1[epout].operator) + ot = manipulate.xgrid_reshape(o1, xg, interpdeg, xgp) + assert ot.operator.shape == (lpids, len(xgp), lpids, len(xg)) + ott = manipulate.xgrid_reshape(ot, xgp, interpdeg, xg) + # when blowing up again a line 0 ... 0 0 1 0 0 ... 0 becomes + # 0 ... 0 0.5 0 0.5 0 ... 0 instead + np.testing.assert_allclose( + np.sum(ott.operator, axis=3), np.sum(o1.operator, axis=3) + ) # only input - oipath = tmp_path / "oi.tar" - o1.deepcopy(oipath) - with EKO.edit(oipath) as oi: - manipulate.xgrid_reshape(oi, inputgrid=xgp) - assert oi[epout].operator.shape == (lpids, len(xg), lpids, len(xgp)) - chk_keys(o1.raw, oi.raw) - oiipath = tmp_path / "oii.tar" - o1.deepcopy(oiipath) - with EKO.edit(oiipath) as oii: - with pytest.warns(Warning): - manipulate.xgrid_reshape(oii, inputgrid=xg) - chk_keys(o1.raw, oii.raw) - np.testing.assert_allclose(oii[epout].operator, o1[epout].operator) + oi = manipulate.xgrid_reshape(o1, xg, interpdeg, inputgrid=xgp) + assert oi.operator.shape == (lpids, len(xg), lpids, len(xgp)) + oii = manipulate.xgrid_reshape(oi, xgp, interpdeg, inputgrid=xg) + np.testing.assert_allclose( + np.sum(oii.operator, axis=3), np.sum(o1.operator, axis=3) + ) + with pytest.warns(Warning): + oiii = manipulate.xgrid_reshape(oii, xg, interpdeg, inputgrid=xg) + np.testing.assert_allclose(oiii.operator, oii.operator) # both - oitpath = tmp_path / "oit.tar" - o1.deepcopy(oitpath) - with EKO.edit(oitpath) as oit: - manipulate.xgrid_reshape(oit, xgp, xgp) - chk_keys(o1.raw, oit.raw) - op = eko_identity([1, 2, len(xgp), 2, len(xgp)]) - np.testing.assert_allclose(oit[epout].operator, op[0], atol=1e-10) - + oit = manipulate.xgrid_reshape(o1, xg, interpdeg, xgp, xgp) + op = eko_identity([1, 2, len(xgp), 2, len(xgp)]) + np.testing.assert_allclose(oit.operator, op[0], atol=1e-10) # op error handling - ep2 = (25, 5) - o1[ep2] = eko.io.Operator( + o1e = eko.io.Operator( operator=eko_identity([1, lpids, len(xg), lpids, len(xg)])[0], error=0.1 * eko_identity([1, lpids, len(xg), lpids, len(xg)])[0], ) - ot2path = tmp_path / "ot2.tar" - o1.deepcopy(ot2path) - with EKO.edit(ot2path) as ot2: - manipulate.xgrid_reshape(ot2, xgp) - chk_keys(o1.raw, ot2.raw) - assert ot2[ep2].operator.shape == (lpids, len(xgp), lpids, len(xg)) - assert ot2[epout].error is None - assert ot2[ep2].error is not None + assert ot.error is None + assert oi.error is None + ot2 = manipulate.xgrid_reshape(o1e, xg, interpdeg, xgp) + assert ot2.error is not None # Python error - with pytest.raises(ValueError): - manipulate.xgrid_reshape(o1) + with pytest.raises(ValueError, match="Nor inputgrid nor targetgrid"): + manipulate.xgrid_reshape(o1, xg, interpdeg) - def test_reshape_io(self, eko_factory: EKOFactory, tmp_path): - eko_factory.path = tmp_path / "eko.tar" - eko_factory.operator.configs.interpolation_polynomial_degree = 1 - # create object - o1 = eko_factory.get() - lpids = len(o1.bases.pids) - path_copy = tmp_path / "eko_copy.tar" - o1.deepcopy(path_copy) - newxgrid = interpolation.XGrid([0.1, 1.0]) - inputpids = np.eye(lpids) - inputpids[:2, :2] = np.array([[1, -1], [1, 1]]) - with EKO.edit(path_copy) as o2: - manipulate.xgrid_reshape(o2, newxgrid, newxgrid) - manipulate.flavor_reshape(o2, inputpids=inputpids) - # reload - with EKO.read(path_copy) as o3: - chk_keys(o1.raw, o3.raw) - np.testing.assert_allclose(o3.bases.inputgrid.raw, newxgrid.raw) - np.testing.assert_allclose(o3.bases.targetgrid.raw, newxgrid.raw) - # since we use a general rotation, the inputpids are erased, - # leaving just as many zeros as PIDs, as placeholders for missing - # values - np.testing.assert_allclose(o3.bases.inputpids, [0] * len(o3.bases.pids)) - # these has to be unchanged - np.testing.assert_allclose(o3.bases.targetpids, o3.bases.pids) - - def test_flavor_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): + def test_flavor_reshape(self): # create object xg = interpolation.XGrid(np.geomspace(1e-5, 1.0, 21)) - muout = 10.0 - mu2out = muout**2 - epout = (mu2out, 5) - eko_factory.operator.xgrid = xg - eko_factory.operator.mugrid = [(muout, 5)] - o1 = eko_factory.get() - lpids = len(o1.bases.pids) + lpids = len(br.flavor_basis_pids) lx = len(xg) - o1[epout] = eko.io.Operator( + o1 = eko.io.Operator( operator=eko_identity([1, lpids, lx, lpids, lx])[0], error=None, ) - # only target - target_r = np.eye(lpids) - target_r[:2, :2] = np.array([[1, -1], [1, 1]]) - tpath = tmp_path / "ot.tar" - ttpath = tmp_path / "ott.tar" - o1.deepcopy(tpath) - with EKO.edit(tpath) as ot: - manipulate.flavor_reshape(ot, target_r) - chk_keys(o1.raw, ot.raw) - assert ot[epout].operator.shape == (lpids, len(xg), lpids, len(xg)) - ot.deepcopy(ttpath) - with EKO.edit(ttpath) as ott: - manipulate.flavor_reshape(ott, np.linalg.inv(target_r)) - np.testing.assert_allclose(ott[epout].operator, o1[epout].operator) - with pytest.warns(Warning): - manipulate.flavor_reshape(ott, np.eye(lpids)) - chk_keys(o1.raw, ott.raw) - np.testing.assert_allclose(ott[epout].operator, o1[epout].operator) # only input input_r = np.eye(lpids) input_r[:2, :2] = np.array([[1, -1], [1, 1]]) - ipath = tmp_path / "oi.tar" - iipath = tmp_path / "oii.tar" - o1.deepcopy(ipath) - with EKO.edit(ipath) as oi: - manipulate.flavor_reshape(oi, inputpids=input_r) - chk_keys(o1.raw, oi.raw) - assert oi[epout].operator.shape == (lpids, len(xg), lpids, len(xg)) - oi.deepcopy(iipath) - with EKO.edit(iipath) as oii: - manipulate.flavor_reshape(oii, inputpids=np.linalg.inv(input_r)) - np.testing.assert_allclose(oii[epout].operator, o1[epout].operator) - with pytest.warns(Warning): - manipulate.flavor_reshape(oii, inputpids=np.eye(lpids)) - chk_keys(o1.raw, oii.raw) - np.testing.assert_allclose(oii[epout].operator, o1[epout].operator) + oi = manipulate.flavor_reshape(o1, inputpids=input_r) + assert oi.operator.shape == (lpids, len(xg), lpids, len(xg)) + oii = manipulate.flavor_reshape(oi, inputpids=np.linalg.inv(input_r)) + np.testing.assert_allclose(oii.operator, o1.operator) + with pytest.warns(Warning): + oiii = manipulate.flavor_reshape(oii, inputpids=np.eye(lpids)) + np.testing.assert_allclose(oiii.operator, oii.operator) + + # only target + target_r = np.eye(lpids) + target_r[:2, :2] = np.array([[1, -1], [1, 1]]) + ot = manipulate.flavor_reshape(o1, target_r) + assert ot.operator.shape == (lpids, len(xg), lpids, len(xg)) + ott = manipulate.flavor_reshape(ot, np.linalg.inv(target_r)) + np.testing.assert_allclose(ott.operator, o1.operator) + with pytest.warns(Warning): + ottt = manipulate.flavor_reshape(ott, np.eye(lpids)) + np.testing.assert_allclose(ottt.operator, ott.operator) # both - itpath = tmp_path / "oit.tar" - o1.deepcopy(itpath) - with EKO.edit(itpath) as oit: - manipulate.flavor_reshape(oit, target_r, input_r) - chk_keys(o1.raw, oit.raw) - op = eko_identity([1, lpids, len(xg), lpids, len(xg)]).copy() - np.testing.assert_allclose(oit[epout].operator, op[0], atol=1e-10) + oit = manipulate.flavor_reshape(o1, target_r, input_r) + op = eko_identity([1, lpids, len(xg), lpids, len(xg)]).copy() + np.testing.assert_allclose(oit.operator, op[0], atol=1e-10) # error - fpath = tmp_path / "fail.tar" - o1.deepcopy(fpath) - with pytest.raises(ValueError): - with EKO.edit(fpath) as of: - manipulate.flavor_reshape(of) + with pytest.raises(ValueError, match="Nor inputpids nor targetpids"): + manipulate.flavor_reshape(o1) - def test_to_evol(self, eko_factory: EKOFactory, tmp_path): + def test_to_evol(self): self._test_to_all_evol( - eko_factory, - tmp_path, manipulate.to_evol, br.rotate_flavor_to_evolution, - br.flavor_basis_pids, ) - def test_to_uni_evol(self, eko_factory: EKOFactory, tmp_path): + def test_to_uni_evol(self): self._test_to_all_evol( - eko_factory, - tmp_path, manipulate.to_uni_evol, br.rotate_flavor_to_unified_evolution, - br.flavor_basis_pids, ) - def _test_to_all_evol( - self, eko_factory: EKOFactory, tmp_path, to_evol_fnc, rot_matrix, pids - ): - xgrid = interpolation.XGrid([0.5, 1.0]) - mu_out = 2.0 - mu2_out = mu_out**2 - nfout = 4 - epout = (mu2_out, nfout) - eko_factory.operator.mu0 = float(np.sqrt(1.0)) - eko_factory.operator.mugrid = [(mu_out, nfout)] - eko_factory.operator.xgrid = xgrid - eko_factory.operator.configs.interpolation_polynomial_degree = 1 - eko_factory.operator.configs.interpolation_is_log = False - eko_factory.operator.configs.ev_op_max_order = (2, 0) - eko_factory.operator.configs.ev_op_iterations = 1 - eko_factory.operator.configs.inversion_method = runcards.InversionMethod.EXACT - o00 = eko_factory.get() - o01_path = tmp_path / "o01.tar" - o00.deepcopy(o01_path) - with EKO.edit(o01_path) as o01: - to_evol_fnc(o01) - o10_path = tmp_path / "o10.tar" - o00.deepcopy(o10_path) - with EKO.edit(o10_path) as o10: - to_evol_fnc(o10, False, True) - o11_path = tmp_path / "o11.tar" - o00.deepcopy(o11_path) - with EKO.edit(o11_path) as o11: - to_evol_fnc(o11, True, True) - chk_keys(o00.raw, o11.raw) - - with EKO.edit(o01_path) as o01: - with EKO.edit(o10_path) as o10: - with EKO.read(o11_path) as o11: - # check the input rotated one - np.testing.assert_allclose(o01.bases.inputpids, rot_matrix) - np.testing.assert_allclose(o01.bases.targetpids, pids) - # rotate also target - to_evol_fnc(o01, False, True) - np.testing.assert_allclose(o01[epout].operator, o11[epout].operator) - chk_keys(o00.raw, o01.raw) - # check the target rotated one - np.testing.assert_allclose(o10.bases.inputpids, pids) - np.testing.assert_allclose(o10.bases.targetpids, rot_matrix) - # rotate also input - to_evol_fnc(o10) - np.testing.assert_allclose(o10[epout].operator, o11[epout].operator) - chk_keys(o00.raw, o10.raw) + def _test_to_all_evol(self, to_evol_fnc, rot_matrix): + # create object + xg = interpolation.XGrid(np.geomspace(1e-5, 1.0, 21)) + lpids = len(br.flavor_basis_pids) + lx = len(xg) + o = eko.io.Operator( + operator=eko_identity([1, lpids, lx, lpids, lx])[0], + error=None, + ) + + # do it once + o01 = to_evol_fnc(o, True, False) + o10 = to_evol_fnc(o, False, True) + o11 = to_evol_fnc(o, True, True) + + # do also the other one + np.testing.assert_allclose( + to_evol_fnc(o01, False, True).operator, o11.operator, atol=1e-15 + ) + np.testing.assert_allclose( + to_evol_fnc(o10, True, False).operator, o11.operator, atol=1e-15 + ) diff --git a/tests/eko/io/test_metadata.py b/tests/eko/io/test_metadata.py index a5651e0c8..fcfa8dd09 100644 --- a/tests/eko/io/test_metadata.py +++ b/tests/eko/io/test_metadata.py @@ -3,11 +3,11 @@ import pytest import yaml -from eko.io import bases, metadata, paths +from eko.io import metadata, paths def test_metadata(tmp_path, caplog): - m = metadata.Metadata((1.0, 3), bases.Bases([0.1, 1.0])) + m = metadata.Metadata(origin=(1.0, 3), xgrid=[0.1, 1.0]) # errors with caplog.at_level(logging.INFO): m.update() @@ -26,7 +26,7 @@ def test_metadata(tmp_path, caplog): m.version = "0.0.0-a1~really1.0.0" m.update() # if I read back the thing should be what I set - mn = metadata.Metadata((1.0, 3), bases.Bases([0.1, 1.0])) + mn = metadata.Metadata(origin=(1.0, 3), xgrid=[0.1, 1.0]) mm = metadata.Metadata.load(tmp_path) assert m.path == tmp_path assert mm.version != mn.version diff --git a/tests/eko/io/test_runcards.py b/tests/eko/io/test_runcards.py index 6af37edcb..4fe9a8e4b 100644 --- a/tests/eko/io/test_runcards.py +++ b/tests/eko/io/test_runcards.py @@ -1,14 +1,10 @@ -import copy from io import StringIO import numpy as np import pytest import yaml -from banana.data.theories import default_card as theory_card from eko.io import runcards as rc -from eko.io.bases import Bases -from ekomark.data.operators import default_card as operator_card def test_flavored_mu2grid(): @@ -31,48 +27,6 @@ def test_flavored_mu2grid(): assert t == tuple(l) -def test_runcards_opcard(): - # check conversion - tc = copy.deepcopy(theory_card) - oc = copy.deepcopy(operator_card) - tc["Q0"] = 2.0 - # mugrid - oc["mugrid"] = [2.0, 10.0] - _nt, no = rc.update(tc, oc) - assert isinstance(no, rc.OperatorCard) - assert len(no.evolgrid) == len(oc["mugrid"]) - assert len(no.mu2grid) == len(no.evolgrid) - assert no.evolgrid[0][-1] == 4 - assert no.evolgrid[1][-1] == 5 - np.testing.assert_allclose(no.mu0, tc["Q0"]) - np.testing.assert_allclose(no.mu20, tc["Q0"] ** 2.0) - assert len(no.pids) == 14 - check_dumpable(no) - del oc["mugrid"] - # or mu2grid - oc["mu2grid"] = [9.0, 30.0, 32.0] - _nt, no = rc.update(tc, oc) - assert isinstance(no, rc.OperatorCard) - assert len(no.evolgrid) == len(oc["mu2grid"]) - assert len(no.mu2grid) == len(no.evolgrid) - assert no.evolgrid[0][-1] == 4 - assert no.evolgrid[1][-1] == 5 - assert no.evolgrid[2][-1] == 5 - check_dumpable(no) - del oc["mu2grid"] - # or Q2grid - oc["Q2grid"] = [15.0, 130.0, 140.0, 1e5] - _nt, no = rc.update(tc, oc) - assert isinstance(no, rc.OperatorCard) - assert len(no.evolgrid) == len(oc["Q2grid"]) - assert len(no.mu2grid) == len(no.evolgrid) - assert no.evolgrid[0][-1] == 4 - assert no.evolgrid[1][-1] == 5 - assert no.evolgrid[2][-1] == 5 - assert no.evolgrid[3][-1] == 6 - check_dumpable(no) - - def check_dumpable(no): """Check we can write and read to yaml.""" so = StringIO() @@ -81,50 +35,6 @@ def check_dumpable(no): noo = yaml.safe_load(so) -def test_runcards_ekomark(): - # check conversion - tc = copy.deepcopy(theory_card) - oc = copy.deepcopy(operator_card) - nt, no = rc.update(tc, oc) - assert isinstance(nt, rc.TheoryCard) - assert isinstance(no, rc.OperatorCard) - # check is idempotent - nnt, nno = rc.update(nt, no) - assert nnt == nt - assert nno == no - - -def test_runcards_quarkmass(): - tc = copy.deepcopy(theory_card) - tc["nfref"] = 5 - tc["IC"] = 1 - oc = copy.deepcopy(operator_card) - nt, no = rc.update(tc, oc) - assert nt.heavy.intrinsic_flavors == [4] - for _, scale in nt.heavy.masses: - assert np.isnan(scale) - m2s = rc.masses(nt, no.configs.evolution_method) - raw = rc.Legacy.heavies("m%s", tc) - raw2 = np.power(raw, 2.0) - np.testing.assert_allclose(m2s, raw2) - tc["HQ"] = "MSBAR" - tc["Qmc"] = raw[0] * 1.1 - tc["Qmb"] = raw[1] * 1.1 - tc["Qmt"] = raw[2] * 0.9 - nt, no = rc.update(tc, oc) - for _, scale in nt.heavy.masses: - assert not np.isnan(scale) - m2s = rc.masses(nt, no.configs.evolution_method) - for m1, m2 in zip(m2s, raw2): - assert not np.isclose(m1, m2) - tc["HQ"] = "Blub" - with pytest.raises(ValueError, match="mass scheme"): - _nt, _no = rc.update(tc, oc) - nt.heavy.masses_scheme = "Bla" - with pytest.raises(ValueError, match="mass scheme"): - _ms = rc.masses(nt, no.configs.evolution_method) - - def test_legacy_fallback(): assert rc.Legacy.fallback(1, 2, 3) == 1 assert rc.Legacy.fallback(None, 2, 3) == 2 diff --git a/tests/eko/io/test_struct.py b/tests/eko/io/test_struct.py index b7ff9877b..1370ccdb6 100644 --- a/tests/eko/io/test_struct.py +++ b/tests/eko/io/test_struct.py @@ -28,16 +28,14 @@ def test_new_error(self, tmp_path: pathlib.Path, theory_card, operator_card): for args in [(None, None), (theory_card, None), (None, operator_card)]: with pytest.raises(RuntimeError, match="missing"): with struct.EKO.create(tmp_path / "Blub2.tar") as builder: - eko = builder.load_cards(*args).build() + _ = builder.load_cards(*args).build() def test_load_error(self, tmp_path): # try to read from a non-tar path no_tar_path = tmp_path / "Blub.tar" no_tar_path.write_text("Blub", encoding="utf-8") - with pytest.raises(ValueError, match="tar"): + with pytest.raises(tarfile.ReadError): struct.EKO.read(no_tar_path) - with pytest.raises(ValueError, match="file mode"): - struct.EKO.open(no_tar_path, "ü") def test_properties(self, eko_factory: EKOFactory): mu = 10.0 @@ -184,3 +182,16 @@ def test_context_operator(self, eko_factory: EKOFactory): assert isinstance(op, struct.Operator) assert eko.operators.cache[Target.from_ep(ep)] is None + + def test_load_opened(self, tmp_path: pathlib.Path, eko_factory: EKOFactory): + """Test the loading of an already opened EKO.""" + eko = eko_factory.get() + eko.close() + # drop from cache to avoid double close by the fixture + eko_factory.cache = None + + assert eko.access.path is not None + read_closed = EKO.read(eko.access.path, dest=tmp_path) + read_opened = EKO.read(tmp_path, extract=False) + + assert read_closed.metadata == read_opened.metadata diff --git a/tests/eko/kernels/test_init.py b/tests/eko/kernels/test_init.py new file mode 100644 index 000000000..3717de275 --- /dev/null +++ b/tests/eko/kernels/test_init.py @@ -0,0 +1,21 @@ +from eko.io.types import EvolutionMethod +from eko.kernels import EvoMethods, ev_method + + +def test_ev_method(): + methods = { + "iterate-expanded": EvoMethods.ITERATE_EXPANDED, + "decompose-expanded": EvoMethods.DECOMPOSE_EXPANDED, + "perturbative-expanded": EvoMethods.PERTURBATIVE_EXPANDED, + "truncated": EvoMethods.TRUNCATED, + "ordered-truncated": EvoMethods.ORDERED_TRUNCATED, + "iterate-exact": EvoMethods.ITERATE_EXACT, + "decompose-exact": EvoMethods.DECOMPOSE_EXACT, + "perturbative-exact": EvoMethods.PERTURBATIVE_EXACT, + } + assert len(methods.keys()) == len(EvolutionMethod) + assert len(methods.keys()) == len(EvoMethods) + for s, i in methods.items(): + j = ev_method(EvolutionMethod(s)) + assert j == i + assert isinstance(j, int) diff --git a/tests/eko/kernels/test_kernels_QEDns.py b/tests/eko/kernels/test_kernels_QEDns.py index bb18c3e17..bfa655a81 100644 --- a/tests/eko/kernels/test_kernels_QEDns.py +++ b/tests/eko/kernels/test_kernels_QEDns.py @@ -3,6 +3,7 @@ from eko import basis_rotation as br from eko.couplings import Couplings +from eko.kernels import EvoMethods from eko.kernels import non_singlet_qed as ns from eko.quantities.couplings import CouplingEvolutionMethod, CouplingsInfo from eko.quantities.heavy_quarks import QuarkMassScheme @@ -14,7 +15,7 @@ # "perturbative-expanded", # "truncated", # "ordered-truncated", - "iterate-exact", + EvoMethods.ITERATE_EXACT, # "decompose-exact", # "perturbative-exact", ] @@ -119,9 +120,7 @@ def test_zero_true_gamma(): dict( alphas=alpharef[0], alphaem=alpharef[1], - scale=muref, - num_flavs_ref=5, - max_num_flavs=6, + ref=(muref, 5), ) ) evmod = CouplingEvolutionMethod.EXACT diff --git a/tests/eko/kernels/test_kernels_QEDsinglet.py b/tests/eko/kernels/test_kernels_QEDsinglet.py index b953c053c..2fee4e17f 100644 --- a/tests/eko/kernels/test_kernels_QEDsinglet.py +++ b/tests/eko/kernels/test_kernels_QEDsinglet.py @@ -1,6 +1,7 @@ import numpy as np import pytest +from eko.kernels import EvoMethods from eko.kernels import singlet_qed as s from ekore.anomalous_dimensions.unpolarized import space_like as ad @@ -10,7 +11,7 @@ # "perturbative-expanded", # "truncated", # "ordered-truncated", - "iterate-exact", + EvoMethods.ITERATE_EXACT, # "decompose-exact", # "perturbative-exact", ] @@ -125,5 +126,12 @@ def test_zero_true_gamma(monkeypatch): def test_error(): with pytest.raises(NotImplementedError): s.dispatcher( - (3, 2), "AAA", np.random.rand(4, 3, 2, 2), [0.2, 0.1], [0.01], 3, 10, 10 + (3, 2), + "iterate-exact", + np.random.rand(4, 3, 2, 2), + [0.2, 0.1], + [0.01], + 3, + 10, + 10, ) diff --git a/tests/eko/kernels/test_kernels_QEDvalence.py b/tests/eko/kernels/test_kernels_QEDvalence.py index 29f0f3d7a..4b12a492b 100644 --- a/tests/eko/kernels/test_kernels_QEDvalence.py +++ b/tests/eko/kernels/test_kernels_QEDvalence.py @@ -1,6 +1,7 @@ import numpy as np import pytest +from eko.kernels import EvoMethods from eko.kernels import valence_qed as val from ekore import anomalous_dimensions @@ -10,13 +11,13 @@ # "perturbative-expanded", # "truncated", # "ordered-truncated", - "iterate-exact", + EvoMethods.ITERATE_EXACT, # "decompose-exact", # "perturbative-exact", ] -def test_zero(monkeypatch): +def test_zero(): """No evolution results in exp(0)""" nf = 3 ev_op_iterations = 2 @@ -57,7 +58,7 @@ def test_zero(monkeypatch): ) -def test_zero_true_gamma(monkeypatch): +def test_zero_true_gamma(): """No evolution results in exp(0)""" nf = 3 ev_op_iterations = 2 @@ -101,5 +102,12 @@ def test_zero_true_gamma(monkeypatch): def test_error(): with pytest.raises(NotImplementedError): val.dispatcher( - (3, 2), "AAA", np.random.rand(4, 3, 2, 2), [0.2, 0.1], [0.01], 3, 10, 10 + (3, 2), + "iterate-exact", + np.random.rand(4, 3, 2, 2), + [0.2, 0.1], + [0.01], + 3, + 10, + 10, ) diff --git a/tests/eko/kernels/test_ns.py b/tests/eko/kernels/test_ns.py index f78282142..70e7faf8d 100644 --- a/tests/eko/kernels/test_ns.py +++ b/tests/eko/kernels/test_ns.py @@ -5,19 +5,9 @@ import ekore.anomalous_dimensions.unpolarized.space_like as ad from eko import beta +from eko.kernels import EvoMethods from eko.kernels import non_singlet as ns -methods = [ - "iterate-expanded", - "decompose-expanded", - "perturbative-expanded", - "truncated", - "ordered-truncated", - "iterate-exact", - "decompose-exact", - "perturbative-exact", -] - def test_zero(): """No evolution results in exp(0)""" @@ -25,7 +15,7 @@ def test_zero(): ev_op_iterations = 2 gamma_ns = np.array([1 + 0.0j, 1 + 0j, 1 + 0j, 1 + 0j]) for order in [1, 2, 3, 4]: - for method in methods: + for method in EvoMethods: np.testing.assert_allclose( ns.dispatcher( (order, 0), method, gamma_ns, 1.0, 1.0, nf, ev_op_iterations @@ -54,7 +44,7 @@ def test_ode_lo(): a0 = 0.3 for a1 in [0.1, 0.2]: r = a1 * gamma_ns / (beta.beta_qcd((2, 0), nf) * a1**2) - for method in methods: + for method in EvoMethods: rhs = r * ns.dispatcher( (1, 0), method, gamma_ns, a1, a0, nf, ev_op_iterations ) @@ -91,7 +81,7 @@ def test_ode_nlo(): r = (a1 * gamma_ns[0] + a1**2 * gamma_ns[1]) / ( beta.beta_qcd((2, 0), nf) * a1**2 + beta.beta_qcd((3, 0), nf) * a1**3 ) - for method in ["iterate-exact"]: + for method in [EvoMethods.ITERATE_EXACT]: rhs = r * ns.dispatcher( (2, 0), method, gamma_ns, a1, a0, nf, ev_op_iterations ) @@ -130,7 +120,7 @@ def test_ode_nnlo(): + beta.beta_qcd((3, 0), nf) * a1**2 + beta.beta_qcd((4, 0), nf) * a1**3 ) - for method in ["iterate-exact"]: + for method in [EvoMethods.ITERATE_EXACT]: rhs = r * ns.dispatcher( (3, 0), method, gamma_ns, a1, a0, nf, ev_op_iterations ) @@ -172,7 +162,7 @@ def test_ode_n3lo(): + beta.beta_qcd((4, 0), nf) * a1**3 + beta.beta_qcd((5, 0), nf) * a1**4 ) - for method in ["iterate-exact"]: + for method in [EvoMethods.ITERATE_EXACT]: rhs = r * ns.dispatcher( (4, 0), method, gamma_ns, a1, a0, nf, ev_op_iterations ) @@ -202,7 +192,9 @@ def test_ode_n3lo(): def test_error(monkeypatch): monkeypatch.setattr("eko.beta.beta_qcd", lambda *_args: 1.0) with pytest.raises(NotImplementedError, match="order is not implemented"): - ns.dispatcher((5, 0), "iterate-exact", np.random.rand(3) + 0j, 0.2, 0.1, 3, 10) + ns.dispatcher( + (5, 0), EvoMethods.ITERATE_EXACT, np.random.rand(3) + 0j, 0.2, 0.1, 3, 10 + ) with pytest.raises(NotImplementedError): ad.gamma_ns((2, 0), 10202, 1, (0, 0, 0, 0, 0, 0, 0), 3) @@ -216,7 +208,7 @@ def test_gamma_usage(): gamma_ns = np.full(4, np.nan) for order in range(1, 5): gamma_ns[order - 1] = np.random.rand() - for method in methods: + for method in EvoMethods: r = ns.dispatcher( (order, 0), method, gamma_ns, a1, a0, nf, ev_op_iterations ) @@ -225,8 +217,8 @@ def test_gamma_usage(): for order in range(1, 5): gamma_ns = np.random.rand(order) gamma_ns[order - 1] = np.nan - for method in methods: - if method == "ordered-truncated": + for method in EvoMethods: + if method is EvoMethods.ORDERED_TRUNCATED: # we are actually dividing by a np.nan, # since the sum of U vec is nan warnings.simplefilter("ignore", RuntimeWarning) diff --git a/tests/eko/kernels/test_s.py b/tests/eko/kernels/test_s.py index 5380ba319..ff4fdfc0d 100644 --- a/tests/eko/kernels/test_s.py +++ b/tests/eko/kernels/test_s.py @@ -3,20 +3,10 @@ import numpy as np import pytest +from eko.kernels import EvoMethods from eko.kernels import singlet as s from ekore import anomalous_dimensions as ad -methods = [ - "iterate-expanded", - "decompose-expanded", - "perturbative-expanded", - "truncated", - "ordered-truncated", - "iterate-exact", - "decompose-exact", - "perturbative-exact", -] - def test_zero_lo(monkeypatch): """No evolution results in exp(0)""" @@ -35,7 +25,7 @@ def test_zero_lo(monkeypatch): np.array([[0, 0], [0, 1]]), ), ) - for method in methods: + for method in EvoMethods: np.testing.assert_allclose( s.dispatcher( (1, 0), method, gamma_s, 1, 1, nf, ev_op_iterations, ev_op_max_order @@ -75,8 +65,8 @@ def test_zero_nlo_decompose(monkeypatch): ), ) for method in [ - "decompose-expanded", - "decompose-exact", + EvoMethods.DECOMPOSE_EXPANDED, + EvoMethods.DECOMPOSE_EXACT, ]: np.testing.assert_allclose( s.dispatcher( @@ -117,8 +107,8 @@ def test_zero_nnlo_decompose(monkeypatch): ), ) for method in [ - "decompose-expanded", - "decompose-exact", + EvoMethods.DECOMPOSE_EXPANDED, + EvoMethods.DECOMPOSE_EXACT, ]: np.testing.assert_allclose( s.dispatcher( @@ -159,8 +149,8 @@ def test_zero_n3lo_decompose(monkeypatch): ), ) for method in [ - "decompose-expanded", - "decompose-exact", + EvoMethods.DECOMPOSE_EXPANDED, + EvoMethods.DECOMPOSE_EXACT, ]: np.testing.assert_allclose( s.dispatcher( @@ -200,7 +190,7 @@ def test_similarity(): ]: ref = s.dispatcher( order, - "decompose-exact", + EvoMethods.DECOMPOSE_EXACT, gamma_s, a1, a0, @@ -208,7 +198,7 @@ def test_similarity(): ev_op_iterations, ev_op_max_order, ) - for method in methods: + for method in EvoMethods: np.testing.assert_allclose( s.dispatcher( order, @@ -227,7 +217,9 @@ def test_similarity(): def test_error(): with pytest.raises(NotImplementedError): - s.dispatcher((4, 0), "AAA", np.random.rand(3, 2, 2), 0.2, 0.1, 3, 10, 10) + s.dispatcher( + (4, 0), "iterate-exact", np.random.rand(3, 2, 2), 0.2, 0.1, 3, 10, 10 + ) def mk_almost_diag_matrix(n, max_ang=np.pi / 8.0): @@ -247,7 +239,7 @@ def test_gamma_usage(): gamma_s = np.full((4, 2, 2), np.nan) for order in range(1, 5): gamma_s[order - 1] = mk_almost_diag_matrix(1) - for method in methods: + for method in EvoMethods: r = s.dispatcher( (order, 0), method, @@ -263,8 +255,8 @@ def test_gamma_usage(): for order in range(1, 5): gamma_s = mk_almost_diag_matrix(4) gamma_s[order - 1] = np.full((2, 2), np.nan) - for method in methods: - if "iterate" in method: + for method in EvoMethods: + if method in [EvoMethods.ITERATE_EXACT, EvoMethods.ITERATE_EXPANDED]: # we are actually dividing by the determinant of # matrix full of np.nan warnings.simplefilter("ignore", RuntimeWarning) @@ -287,8 +279,8 @@ def test_singlet_back(): nf = 4 a1 = 3.0 a0 = 4.0 - s10 = s.dispatcher(order, "iterate-exact", gamma_s, a1, a0, nf, 15, 1) + s10 = s.dispatcher(order, EvoMethods.ITERATE_EXACT, gamma_s, a1, a0, nf, 15, 1) np.testing.assert_allclose( np.linalg.inv(s10), - s.dispatcher(order, "iterate-exact", gamma_s, a0, a1, nf, 15, 1), + s.dispatcher(order, EvoMethods.ITERATE_EXACT, gamma_s, a0, a1, nf, 15, 1), ) diff --git a/tests/eko/quantities/test_couplings.py b/tests/eko/quantities/test_couplings.py index 0a3b97415..e4d194d5b 100644 --- a/tests/eko/quantities/test_couplings.py +++ b/tests/eko/quantities/test_couplings.py @@ -3,7 +3,7 @@ def test_couplings_ref(): scale = 90.0 - d = dict(alphas=0.1, alphaem=0.01, scale=scale, max_num_flavs=6, num_flavs_ref=None) + d = dict(alphas=0.1, alphaem=0.01, ref=(scale, 5)) couplings = CouplingsInfo.from_dict(d) - assert couplings.scale == scale + assert couplings.ref[0] == scale assert not couplings.em_running diff --git a/tests/eko/runner/__init__.py b/tests/eko/runner/__init__.py index c8ec5b854..0e3ea68c0 100644 --- a/tests/eko/runner/__init__.py +++ b/tests/eko/runner/__init__.py @@ -1,19 +1,21 @@ import numpy as np +from eko import basis_rotation as br + def check_shapes(o, txs, ixs, theory_card, operators_card): - tpids = len(o.bases.targetpids) - ipids = len(o.bases.inputpids) + tpids = len(br.flavor_basis_pids) + ipids = len(br.flavor_basis_pids) op_shape = (tpids, len(txs), ipids, len(ixs)) # check output = input np.testing.assert_allclose(o.xgrid.raw, operators_card.xgrid.raw) # targetgrid and inputgrid in the opcard are now ignored, we are testing this np.testing.assert_allclose( - o.bases.targetgrid.raw, + o.xgrid.raw, txs.raw, ) - np.testing.assert_allclose(o.bases.inputgrid.raw, ixs.raw) + np.testing.assert_allclose(o.xgrid.raw, ixs.raw) np.testing.assert_allclose(o.mu20, operators_card.mu20) # check available operators ~o.operators diff --git a/tests/eko/runner/conftest.py b/tests/eko/runner/conftest.py index a1516b2a7..3b3bac0b3 100644 --- a/tests/eko/runner/conftest.py +++ b/tests/eko/runner/conftest.py @@ -4,6 +4,7 @@ import pytest from eko import EKO +from eko import basis_rotation as br from eko.io.items import Operator from eko.io.runcards import OperatorCard, TheoryCard from eko.runner import commons, recipes @@ -21,14 +22,14 @@ def neweko(theory_card: TheoryCard, operator_card: OperatorCard, tmp_path: Path) @pytest.fixture def identity(neweko: EKO): xs = len(neweko.xgrid.raw) - flavs = len(neweko.bases.pids) + flavs = len(br.flavor_basis_pids) return Operator(operator=np.eye(xs * flavs).reshape((xs, flavs, xs, flavs))) @pytest.fixture def ekoparts(neweko: EKO, identity: Operator): atlas = commons.atlas(neweko.theory_card, neweko.operator_card) - neweko.load_recipes(recipes.create(neweko.operator_card.evolgrid, atlas)) + neweko.load_recipes(recipes._create(neweko.operator_card.evolgrid, atlas)) for rec in neweko.recipes: neweko.parts[rec] = identity diff --git a/tests/eko/runner/test_legacy.py b/tests/eko/runner/test_legacy.py index 60395acc5..1828285c3 100644 --- a/tests/eko/runner/test_legacy.py +++ b/tests/eko/runner/test_legacy.py @@ -36,7 +36,7 @@ class FakeEM(enum.Enum): eko.runner.legacy.Runner(theory_card, operator_card, path=path) # MSbar scheme theory_card.heavy.masses_scheme = QuarkMassScheme.MSBAR - theory_card.couplings.num_flavs_ref = 5 + theory_card.couplings.ref = (91.0, 5) theory_card.heavy.masses.c.scale = 2 theory_card.heavy.masses.b.scale = 4.5 theory_card.heavy.masses.t.scale = 173.07 diff --git a/tests/eko/runner/test_operators.py b/tests/eko/runner/test_operators.py index b69066da2..7a8b4cdb7 100644 --- a/tests/eko/runner/test_operators.py +++ b/tests/eko/runner/test_operators.py @@ -2,18 +2,18 @@ from eko.io.items import Operator from eko.io.struct import EKO -from eko.runner.operators import join, retrieve +from eko.runner.operators import _retrieve, join def test_retrieve(ekoparts: EKO): evhead, _evop = next(iter(ekoparts.parts.cache.items())) matchhead, _matchop = next(iter(ekoparts.parts_matching.cache.items())) - els = retrieve([evhead] * 5, ekoparts.parts, ekoparts.parts_matching) + els = _retrieve([evhead] * 5, ekoparts.parts, ekoparts.parts_matching) assert len(els) == 5 assert all(isinstance(el, Operator) for el in els) - els = retrieve( + els = _retrieve( [evhead, matchhead, matchhead], ekoparts.parts, ekoparts.parts_matching ) assert len(els) == 3 diff --git a/tests/eko/runner/test_parts.py b/tests/eko/runner/test_parts.py index 2ec217dd7..1c7dd105b 100644 --- a/tests/eko/runner/test_parts.py +++ b/tests/eko/runner/test_parts.py @@ -5,18 +5,18 @@ def test_evolve_configs(eko_factory): # QCD@LO e10 = eko_factory.get() assert e10.theory_card.order == (1, 0) - p10 = parts.evolve_configs(e10) + p10 = parts._evolve_configs(e10) assert p10["matching_order"] == (0, 0) # QCD@N3LO + QED@N2LO w/o matching_order eko_factory.theory.order = (4, 3) eko_factory.theory.matching_order = None e43 = eko_factory.get({}) assert e43.theory_card.order == (4, 3) - p43 = parts.evolve_configs(e43) + p43 = parts._evolve_configs(e43) assert p43["matching_order"] == (3, 0) # QCD@N3LO + QED@N2LO w/ matching_order eko_factory.theory.matching_order = (3, 0) e43b = eko_factory.get({}) assert e43b.theory_card.order == (4, 3) - p43b = parts.evolve_configs(e43b) + p43b = parts._evolve_configs(e43b) assert p43b["matching_order"] == (3, 0) diff --git a/tests/eko/runner/test_recipes.py b/tests/eko/runner/test_recipes.py index 27bf4e860..7b93ab4c4 100644 --- a/tests/eko/runner/test_recipes.py +++ b/tests/eko/runner/test_recipes.py @@ -4,26 +4,26 @@ from eko.io.types import EvolutionPoint from eko.matchings import Atlas from eko.quantities.heavy_quarks import MatchingScales -from eko.runner.recipes import create, elements +from eko.runner.recipes import _create, _elements SCALES = MatchingScales([10.0, 20.0, 30.0]) ATLAS = Atlas(SCALES, (50.0, 5)) def test_elements(): - onestep = elements((60.0, 5), ATLAS) + onestep = _elements((60.0, 5), ATLAS) assert len(onestep) == 1 assert isinstance(onestep[0], Evolution) assert not onestep[0].cliff - backandforth = elements((60.0, 6), ATLAS) + backandforth = _elements((60.0, 6), ATLAS) assert len(backandforth) == 3 assert isinstance(backandforth[0], Evolution) assert backandforth[0].cliff assert isinstance(backandforth[1], Matching) assert not backandforth[1].inverse - down = elements((5.0, 3), ATLAS) + down = _elements((5.0, 3), ATLAS) assert all([isinstance(el, Evolution) for i, el in enumerate(down) if i % 2 == 0]) assert all([isinstance(el, Matching) for i, el in enumerate(down) if i % 2 == 1]) assert all([el.inverse for i, el in enumerate(down) if i % 2 == 1]) @@ -32,13 +32,13 @@ def test_elements(): def test_create(): evolgrid: List[EvolutionPoint] = [(60.0, 5)] - recs = create(evolgrid, ATLAS) + recs = _create(evolgrid, ATLAS) assert len(recs) == 1 evolgrid.append((60.0, 6)) - recs = create(evolgrid, ATLAS) + recs = _create(evolgrid, ATLAS) assert len(recs) == 1 + 3 evolgrid.append((70.0, 6)) - recs = create(evolgrid, ATLAS) + recs = _create(evolgrid, ATLAS) assert len(recs) == 1 + 3 + 1 diff --git a/tests/eko/scale_variations/test_diff.py b/tests/eko/scale_variations/test_diff.py index c0dd67236..13cc4938f 100644 --- a/tests/eko/scale_variations/test_diff.py +++ b/tests/eko/scale_variations/test_diff.py @@ -11,7 +11,7 @@ from eko import basis_rotation as br from eko.beta import beta_qcd_as2, beta_qcd_as3 from eko.couplings import CouplingEvolutionMethod, Couplings, CouplingsInfo -from eko.kernels import non_singlet, singlet +from eko.kernels import EvoMethods, non_singlet, singlet from eko.quantities.heavy_quarks import QuarkMassScheme from eko.scale_variations import expanded, exponentiated from ekore.anomalous_dimensions.unpolarized.space_like import gamma_ns, gamma_singlet @@ -19,7 +19,7 @@ NF = 4 Q02 = 1.65**2 Q12 = 100**2 -EV_METHOD = "truncated" +EV_METHOD = EvoMethods.TRUNCATED def compute_a_s(q2, order): @@ -27,9 +27,7 @@ def compute_a_s(q2, order): couplings=CouplingsInfo( alphas=0.1181, alphaem=0.007496, - scale=91.00, - max_num_flavs=4, - num_flavs_ref=4, + ref=(91.00, 4), ), order=order, method=CouplingEvolutionMethod.EXPANDED, diff --git a/tests/eko/scale_variations/test_expanded.py b/tests/eko/scale_variations/test_expanded.py index 6af22dd51..309a87597 100644 --- a/tests/eko/scale_variations/test_expanded.py +++ b/tests/eko/scale_variations/test_expanded.py @@ -2,7 +2,7 @@ from eko import basis_rotation as br from eko.couplings import CouplingEvolutionMethod, Couplings, CouplingsInfo -from eko.kernels import non_singlet, singlet +from eko.kernels import EvoMethods, non_singlet, singlet from eko.quantities.heavy_quarks import QuarkMassScheme from eko.scale_variations import Modes, expanded from ekore.anomalous_dimensions.unpolarized.space_like import gamma_ns, gamma_singlet @@ -10,7 +10,7 @@ NF = 4 Q02 = 1.65**2 Q12 = 100**2 -EV_METHOD = "truncated" +EV_METHOD = EvoMethods.TRUNCATED def compute_a_s(q2, order): @@ -18,9 +18,7 @@ def compute_a_s(q2, order): couplings=CouplingsInfo( alphas=0.1181, alphaem=0.007496, - scale=91.00, - max_num_flavs=4, - num_flavs_ref=4, + ref=(91.00, 4), ), order=order, method=CouplingEvolutionMethod.EXPANDED, diff --git a/tests/eko/test_couplings.py b/tests/eko/test_couplings.py index ef32fb0d0..9b68d308b 100644 --- a/tests/eko/test_couplings.py +++ b/tests/eko/test_couplings.py @@ -54,9 +54,7 @@ def test_init(self): dict( alphas=alpharef[0], alphaem=alpharef[1], - scale=muref, - num_flavs_ref=None, - max_num_flavs=6, + ref=(muref, 5), ) ) order = (1, 0) @@ -109,7 +107,7 @@ def test_init(self): ) with pytest.raises(ValueError): coup3 = copy.deepcopy(couplings) - coup3.scale = 0 + coup3.ref = (0.0, 5) Couplings( coup3, order, @@ -153,18 +151,17 @@ def test_ref(self): (0, np.inf, np.inf), (2, 4, 175), ] + nfrefs = (3, 4, 5) alpharef = (0.118, 0.00781) muref = 91.0 - couplings = CouplingsInfo.from_dict( - dict( - alphas=alpharef[0], - alphaem=alpharef[1], - scale=muref, - num_flavs_ref=None, - max_num_flavs=6, + for thresh_setup, nfref in zip(thresh_setups, nfrefs): + couplings = CouplingsInfo.from_dict( + dict( + alphas=alpharef[0], + alphaem=alpharef[1], + ref=(muref, nfref), + ) ) - ) - for thresh_setup in thresh_setups: for order_s in [1, 2, 3, 4]: for order_em in [0, 1, 2]: for evmod in CouplingEvolutionMethod: @@ -195,9 +192,7 @@ def test_ref_copy_e841b0dfdee2f31d9ccc1ecee4d9d1a6f6624313(self): dict( alphas=alpharef[0], alphaem=alpharef[1], - scale=muref, - num_flavs_ref=3, # reference nf is needed to force the matching - max_num_flavs=6, + ref=(muref, 3), # reference nf is needed to force the matching ) ) sc = Couplings( @@ -221,9 +216,10 @@ def test_exact(self): (0, np.inf, np.inf), (2, 4, 175), ] + nfrefs = (3, 4, 5) alpharef = np.array([0.118, 0.00781]) muref = 91.0 - for thresh_setup in thresh_setups: + for thresh_setup, nfref in zip(thresh_setups, nfrefs): for qcd in range(1, 4 + 1): for qed in range(2 + 1): for em_running in [ @@ -235,9 +231,7 @@ def test_exact(self): dict( alphas=alpharef[0], alphaem=alpharef[1], - scale=muref, - num_flavs_ref=None, - max_num_flavs=6, + ref=(muref, nfref), em_running=em_running, ) ) @@ -298,9 +292,7 @@ def benchmark_expanded_n3lo(self): couplings = CouplingsInfo( alphas=alpharef[0], alphaem=alpharef[1], - scale=muref, - num_flavs_ref=None, - max_num_flavs=6, + ref=(muref, 5), ) m2c = 2 m2b = 25 diff --git a/tests/eko/test_msbar_masses.py b/tests/eko/test_msbar_masses.py index 12cb88f91..25718e962 100644 --- a/tests/eko/test_msbar_masses.py +++ b/tests/eko/test_msbar_masses.py @@ -16,9 +16,8 @@ def theory_card(theory_card: TheoryCard): th = theory_card th.order = (3, 0) th.couplings.alphas = 0.1180 - th.couplings.scale = 91.0 th.couplings.alphaem = 0.00781 - th.couplings.num_flavs_ref = 5 + th.couplings.ref = (91.0, 5) th.heavy.masses = HeavyQuarkMasses( [QuarkMassRef(val) for val in [(2.0, 2.1), (4.0, 4.1), (175.0, 174.9)]] ) diff --git a/tests/eko/test_quantities.py b/tests/eko/test_quantities.py index 0c2628b30..d3c412a25 100644 --- a/tests/eko/test_quantities.py +++ b/tests/eko/test_quantities.py @@ -33,9 +33,6 @@ def test_HeavyQuarks(): def test_HeavyInfo(): i = hq.HeavyInfo( - num_flavs_init=4, - num_flavs_max_pdf=6, - intrinsic_flavors=[4, 5], masses=hq.HeavyQuarkMasses( [ hq.QuarkMassRef([2.0, nan]), diff --git a/tests/ekobox/test_apply.py b/tests/ekobox/test_apply.py index 8fa23548f..70eb74adc 100644 --- a/tests/ekobox/test_apply.py +++ b/tests/ekobox/test_apply.py @@ -1,5 +1,6 @@ import numpy as np +from eko import basis_rotation as br from ekobox import apply from tests.conftest import EKOFactory @@ -12,13 +13,13 @@ def test_apply(self, eko_factory: EKOFactory, fake_pdf): pdf_grid = apply.apply_pdf(eko, fake_pdf) assert len(pdf_grid) == len(eko.evolgrid) pdfs = pdf_grid[ep_out]["pdfs"] - assert list(pdfs.keys()) == list(eko.bases.targetpids) + assert list(pdfs.keys()) == list(br.flavor_basis_pids) # rotate to target_grid target_grid = [0.75] pdf_grid = apply.apply_pdf(eko, fake_pdf, target_grid) assert len(pdf_grid) == 1 pdfs = pdf_grid[ep_out]["pdfs"] - assert list(pdfs.keys()) == list(eko.bases.targetpids) + assert list(pdfs.keys()) == list(br.flavor_basis_pids) def test_apply_flavor(self, eko_factory: EKOFactory, fake_pdf, monkeypatch): eko = eko_factory.get() @@ -27,12 +28,8 @@ def test_apply_flavor(self, eko_factory: EKOFactory, fake_pdf, monkeypatch): monkeypatch.setattr( "eko.basis_rotation.rotate_flavor_to_evolution", np.ones((14, 14)) ) - monkeypatch.setattr( - "eko.basis_rotation.flavor_basis_pids", - eko.bases.targetpids, - ) fake_evol_basis = tuple( - ["a", "b"] + [str(x) for x in range(len(eko.bases.pids) - 2)] + ["a", "b"] + [str(x) for x in range(len(br.flavor_basis_pids) - 2)] ) monkeypatch.setattr("eko.basis_rotation.evol_basis", fake_evol_basis) pdf_grid = apply.apply_pdf(eko, fake_pdf, rotate_to_evolution_basis=True) diff --git a/tests/ekobox/test_cards.py b/tests/ekobox/test_cards.py index 1a35e137e..00ef0e4c3 100644 --- a/tests/ekobox/test_cards.py +++ b/tests/ekobox/test_cards.py @@ -10,14 +10,14 @@ def test_generate_ocard(): mu0 = 1.65 mugrid = [(10.0, 6), (100.0, 5)] op = cards.example.operator() - op.mu0 = mu0 + op.init = (mu0, 4) op.mugrid = mugrid assert pytest.approx(op.mugrid) == mugrid assert pytest.approx(op.mu2grid) == np.array([mu**2 for mu, _ in mugrid]) assert op.configs.interpolation_polynomial_degree == 4 mugrid1 = [100.0, 5] op = cards.example.operator() - op.mu0 = mu0 + op.init = (mu0, 4) op.mugrid = mugrid1 op.configs.interpolation_polynomial_degree = 2 op.configs.interpolation_is_log = False @@ -35,7 +35,7 @@ def test_dump_load_op_card(tmp_path, cd): path1 = tmp_path / "debug_op.yaml" path2 = tmp_path / "debug_op_two.yaml" op = cards.example.operator() - op.mu0 = mu0 + op.init = (mu0, 4) cards.dump(op.raw, path1) cards.dump(op.raw, path2) op_loaded = cards.load(path1) @@ -52,7 +52,7 @@ def test_generate_theory_card(): assert theory.order[0] == 2 rawt = cards.example.raw_theory() assert isinstance(rawt, dict) - assert theory.heavy.num_flavs_init == rawt["heavy"]["num_flavs_init"] + assert theory.heavy.masses.c[0] == rawt["heavy"]["masses"][0][0] def containsnan(obj) -> bool: diff --git a/tests/ekobox/test_evol_pdf.py b/tests/ekobox/test_evol_pdf.py index 141787b49..7d788277b 100644 --- a/tests/ekobox/test_evol_pdf.py +++ b/tests/ekobox/test_evol_pdf.py @@ -12,7 +12,7 @@ def init_cards(): op = cards.example.operator() - op.mu0 = 1.65 + op.init = (1.65, 4) op.xgrid = XGrid([0.1, 0.5, 1.0]) op.configs.interpolation_polynomial_degree = 1 theory = cards.example.theory() diff --git a/tests/ekobox/test_info_file.py b/tests/ekobox/test_info_file.py index acf3f7272..62df7d739 100644 --- a/tests/ekobox/test_info_file.py +++ b/tests/ekobox/test_info_file.py @@ -10,7 +10,7 @@ def test_build(): theory.order = (2, 0) theory.couplings.alphas = 0.2 op = cards.example.operator() - op.mu0 = 1.0 + op.init = (1.0, 3) op.mugrid = [(10.0, 5), (100.0, 5)] info = info_file.build( theory, op, 4, info_update={"SetDesc": "Prova", "NewArg": 15.3, "MTop": 1.0} diff --git a/tests/ekobox/test_utils.py b/tests/ekobox/test_utils.py index 557db679c..1233d1d91 100644 --- a/tests/ekobox/test_utils.py +++ b/tests/ekobox/test_utils.py @@ -16,10 +16,9 @@ def test_ekos_product(tmp_path): theory = cards.example.theory() theory.order = (1, 0) - theory.heavy.num_flavs_init = 5 op1 = cards.example.operator() - op1.mu0 = mu01 + op1.init = (mu01, 5) op1.mugrid = mugrid1 op1.xgrid = xgrid op1.configs.interpolation_polynomial_degree = 1 @@ -28,13 +27,13 @@ def test_ekos_product(tmp_path): mugrid2 = [(8.0, 5), (10.0, 5), (12.0, 5)] op2 = cards.example.operator() - op2.mu0 = mu0 + op2.init = (mu0, 5) op2.mugrid = mugrid2 op2.xgrid = xgrid op2.configs.interpolation_polynomial_degree = 1 op_err = copy.deepcopy(op2) - op_err.mu0 = mu01 + op_err.init = (mu01, 5) mu2first = (mugrid2[0][0] ** 2, mugrid2[0][1]) diff --git a/tests/ekomark/data/__init__.py b/tests/ekomark/data/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/ekomark/data/test_init.py b/tests/ekomark/data/test_init.py new file mode 100644 index 000000000..278a06181 --- /dev/null +++ b/tests/ekomark/data/test_init.py @@ -0,0 +1,96 @@ +import copy + +import numpy as np +import pytest +from banana.data.theories import default_card as theory_card + +from eko.io import runcards as rc +from ekomark.data import update_runcards +from ekomark.data.operators import default_card as operator_card + +from ...eko.io.test_runcards import check_dumpable + + +def test_runcards_opcard(): + # check conversion + tc = copy.deepcopy(theory_card) + oc = copy.deepcopy(operator_card) + tc["Q0"] = 2.0 + # mugrid + oc["mugrid"] = [2.0, 10.0] + _nt, no = update_runcards(tc, oc) + assert isinstance(no, rc.OperatorCard) + assert len(no.evolgrid) == len(oc["mugrid"]) + assert len(no.mu2grid) == len(no.evolgrid) + assert no.evolgrid[0][-1] == 4 + assert no.evolgrid[1][-1] == 5 + np.testing.assert_allclose(no.init[0], tc["Q0"]) + np.testing.assert_allclose(no.mu20, tc["Q0"] ** 2.0) + assert len(no.pids) == 14 + check_dumpable(no) + del oc["mugrid"] + # or mu2grid + oc["mu2grid"] = [9.0, 30.0, 32.0] + _nt, no = update_runcards(tc, oc) + assert isinstance(no, rc.OperatorCard) + assert len(no.evolgrid) == len(oc["mu2grid"]) + assert len(no.mu2grid) == len(no.evolgrid) + assert no.evolgrid[0][-1] == 4 + assert no.evolgrid[1][-1] == 5 + assert no.evolgrid[2][-1] == 5 + check_dumpable(no) + del oc["mu2grid"] + # or Q2grid + oc["Q2grid"] = [15.0, 130.0, 140.0, 1e5] + _nt, no = update_runcards(tc, oc) + assert isinstance(no, rc.OperatorCard) + assert len(no.evolgrid) == len(oc["Q2grid"]) + assert len(no.mu2grid) == len(no.evolgrid) + assert no.evolgrid[0][-1] == 4 + assert no.evolgrid[1][-1] == 5 + assert no.evolgrid[2][-1] == 5 + assert no.evolgrid[3][-1] == 6 + check_dumpable(no) + + +def test_runcards_ekomark(): + # check conversion + tc = copy.deepcopy(theory_card) + oc = copy.deepcopy(operator_card) + nt, no = update_runcards(tc, oc) + assert isinstance(nt, rc.TheoryCard) + assert isinstance(no, rc.OperatorCard) + # check is idempotent + nnt, nno = update_runcards(nt, no) + assert nnt == nt + assert nno == no + + +def test_runcards_quarkmass(): + tc = copy.deepcopy(theory_card) + tc["nfref"] = 5 + tc["IC"] = 1 + oc = copy.deepcopy(operator_card) + nt, no = update_runcards(tc, oc) + for _, scale in nt.heavy.masses: + assert np.isnan(scale) + m2s = rc.masses(nt, no.configs.evolution_method) + raw = rc.Legacy.heavies("m%s", tc) + raw2 = np.power(raw, 2.0) + np.testing.assert_allclose(m2s, raw2) + tc["HQ"] = "MSBAR" + tc["Qmc"] = raw[0] * 1.1 + tc["Qmb"] = raw[1] * 1.1 + tc["Qmt"] = raw[2] * 0.9 + nt, no = update_runcards(tc, oc) + for _, scale in nt.heavy.masses: + assert not np.isnan(scale) + m2s = rc.masses(nt, no.configs.evolution_method) + for m1, m2 in zip(m2s, raw2): + assert not np.isclose(m1, m2) + tc["HQ"] = "Blub" + with pytest.raises(ValueError, match="mass scheme"): + _nt, _no = update_runcards(tc, oc) + nt.heavy.masses_scheme = "Bla" + with pytest.raises(ValueError, match="mass scheme"): + _ms = rc.masses(nt, no.configs.evolution_method)