diff --git a/README.md b/README.md index c474ac5..5ddb16b 100644 --- a/README.md +++ b/README.md @@ -197,7 +197,7 @@ def main(): s0p = manager.register(ld.Scalar("s0p", parameter("s0p"))) s0n = manager.register(ld.Scalar("s0n", parameter("s0n"))) - d2p = manager.register(ld.ComplexScalar("d2", parameter("d2 re"), parameter("d2 im"))) + d2p = manager.register(ld.ComplexScalar("d2p", parameter("d2 re"), parameter("d2 im"))) pos_re = (s0p * z00p.real() + d2p * z22p.real()).norm_sqr() pos_im = (s0p * z00p.imag() + d2p * z22p.imag()).norm_sqr() @@ -205,15 +205,21 @@ def main(): neg_im = (s0n * z00n.imag()).norm_sqr() model = pos_re + pos_im + neg_re + neg_im - nll = ld.NLL(manager, ds_data, ds_mc) - status = nll.minimize(model, [1.0] * len(nll.parameters)) + nll = ld.NLL(manager, model, ds_data, ds_mc) + status = nll.minimize([1.0] * len(nll.parameters)) print(status) - fit_weights = nll.project(status.x, model) + fit_weights = nll.project(status.x) + s0p_weights = nll.project_with(status.x, ["z00p", "s0p"]) + s0n_weights = nll.project_with(status.x, ["z00n", "s0n"]) + d2p_weights = nll.project_with(status.x, ["z22p", "d2p"]) masses_mc = res_mass.value_on(ds_mc) masses_data = res_mass.value_on(ds_data) weights_data = ds_data.weights plt.hist(masses_data, weights=weights_data, bins=80, range=(1.0, 2.0), label="Data", histtype="step") plt.hist(masses_mc, weights=fit_weights, bins=80, range=(1.0, 2.0), label="Fit", histtype="step") + plt.hist(masses_mc, weights=s0p_weights, bins=80, range=(1.0, 2.0), label="$S_0^+$", histtype="step") + plt.hist(masses_mc, weights=s0n_weights, bins=80, range=(1.0, 2.0), label="$S_0^-$", histtype="step") + plt.hist(masses_mc, weights=d2p_weights, bins=80, range=(1.0, 2.0), label="$D_2^+$", histtype="step") plt.legend() plt.savefig("demo.svg") diff --git a/benches/kmatrix_benchmark.rs b/benches/kmatrix_benchmark.rs index 9c7011c..acfa16d 100644 --- a/benches/kmatrix_benchmark.rs +++ b/benches/kmatrix_benchmark.rs @@ -139,7 +139,7 @@ fn kmatrix_nll_benchmark(c: &mut Criterion) { let neg_re = (&s0n * z00n.real()).norm_sqr(); let neg_im = (&s0n * z00n.imag()).norm_sqr(); let model = pos_re + pos_im + neg_re + neg_im; - let nll = NLL::new(&manager, &ds_data, &ds_mc, &model); + let nll = NLL::new(&manager, &model, &ds_data, &ds_mc); let mut rng = rand::thread_rng(); let range = Uniform::new(-100.0, 100.0); c.bench_function("kmatrix benchmark (nll)", |b| { diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index dee52b7..0000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -laddu -sphinx==8.1.3 -sphinx-rtd-theme==3.0.1 diff --git a/docs/source/conf.py b/docs/source/conf.py index c71ca5c..8a3ed03 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -17,7 +17,7 @@ # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration -extensions = ["sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.napoleon"] +extensions = ["sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.napoleon", "sphinx_copybutton"] autodoc_default_options = { "members": True, diff --git a/docs/source/index.rst b/docs/source/index.rst index 9a054a8..a0c55b4 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,13 +1,13 @@ laddu ===== -``laddu`` is an library for analyzing particle physics data. It is written in Rust with bindings to Python which are documented here. +`laddu `_ is an library for analyzing particle physics data. It is written in Rust with bindings to Python which are documented here. -Right now, this page just documents the basic Python API of the library, but the plan is to eventually write a tutorial here for general use. See the `GitHub Repository `_ for more details. .. toctree:: - :caption: API Reference - :maxdepth: 2 + :caption: Contents + :maxdepth: 1 :name: sec-api - + + tutorials/index modules/index diff --git a/docs/source/modules/index.rst b/docs/source/modules/index.rst index 49e2e1d..c83e01a 100644 --- a/docs/source/modules/index.rst +++ b/docs/source/modules/index.rst @@ -1,5 +1,7 @@ -Submodules -========== +API Reference +============= + +This is the main API documentation for ``laddu``. .. toctree:: :maxdepth: 1 diff --git a/docs/source/modules/laddu/amplitudes/index.rst b/docs/source/modules/laddu/amplitudes/index.rst index 49304f3..5eea678 100644 --- a/docs/source/modules/laddu/amplitudes/index.rst +++ b/docs/source/modules/laddu/amplitudes/index.rst @@ -1,4 +1,4 @@ -Amplitudes +amplitudes ========== .. automodule:: laddu.amplitudes diff --git a/docs/source/modules/laddu/data.rst b/docs/source/modules/laddu/data.rst index e39671f..61867b7 100644 --- a/docs/source/modules/laddu/data.rst +++ b/docs/source/modules/laddu/data.rst @@ -1,4 +1,4 @@ -Data +data ==== .. automodule:: laddu.data diff --git a/docs/source/modules/laddu/likelihoods.rst b/docs/source/modules/laddu/likelihoods.rst index 51559c4..ee98a00 100644 --- a/docs/source/modules/laddu/likelihoods.rst +++ b/docs/source/modules/laddu/likelihoods.rst @@ -1,4 +1,4 @@ -Likelihoods +likelihoods =========== .. automodule:: laddu.likelihoods diff --git a/docs/source/modules/laddu/utils/index.rst b/docs/source/modules/laddu/utils/index.rst index d93e072..4ddb476 100644 --- a/docs/source/modules/laddu/utils/index.rst +++ b/docs/source/modules/laddu/utils/index.rst @@ -1,4 +1,4 @@ -Utilities +utils ========= .. toctree:: diff --git a/docs/source/tutorials/binned_fit.rst b/docs/source/tutorials/binned_fit.rst new file mode 100644 index 0000000..14ae92c --- /dev/null +++ b/docs/source/tutorials/binned_fit.rst @@ -0,0 +1,4 @@ +Binned Fitting Tutorial +======================= + +.. note:: Tutorial under construction! diff --git a/docs/source/tutorials/index.rst b/docs/source/tutorials/index.rst new file mode 100644 index 0000000..1145c73 --- /dev/null +++ b/docs/source/tutorials/index.rst @@ -0,0 +1,11 @@ +Tutorials +========= + +To get started with ``laddu``, please refer to the following tutorials for the basic operation of the library and the API reference for more details. + +.. toctree:: + :maxdepth: 1 + + unbinned_fit + binned_fit + live_plotting diff --git a/docs/source/tutorials/live_plotting.rst b/docs/source/tutorials/live_plotting.rst new file mode 100644 index 0000000..9af407d --- /dev/null +++ b/docs/source/tutorials/live_plotting.rst @@ -0,0 +1,4 @@ +Live Plotting +============= + +.. note:: Tutorial under construction! diff --git a/docs/source/tutorials/unbinned_fit.rst b/docs/source/tutorials/unbinned_fit.rst new file mode 100644 index 0000000..7a92aea --- /dev/null +++ b/docs/source/tutorials/unbinned_fit.rst @@ -0,0 +1,325 @@ +Unbinned Fitting Tutorial +========================= + +Theory +------ + +Perhaps the simplest kind of fitting we can do with ``laddu`` is one which involves fitting all of the data simultaneously to one model. Suppose we have some data representing events from a particular reaction. These data might contain resonances from intermediate particles which can be reconstructed from the four-momenta we've recorded, and those resonances have their own observable properties like angular momentum and parity. We can construct a model of these resonances in both mass :math:`m` and angular space :math:`\Omega` and define :math:`p(x; m, \Omega)` to be the probability that an event :math:`x` has the given phase space distribution. Since we also observe events themselves in a probabilistic manner, we must also consider the probability of observing :math:`N` events from such a process. This can be done with an extended maximum likelihood, following the derivation by [Barlow]_. First, we admit that while we defined :math:`p(x; m, \Omega)`, we assumed it would have unit normalization. However, we will now consider replacing this with a function whose normalization is not so constrained, called the intensity: + +.. math:: \int \mathcal{I}(x; m, \Omega) \text{d}x = \mathcal{N}(m, \Omega) + +We do this because our observed number of events :math:`N` will deviate from the expected number predicted by our model (:math:`\mathcal{N}(m, \Omega)`) according to Poisson statistics, since the observation of events themselves is a random process and are also subject to the efficiency of the detector (which we will discuss later). + +Of course, we now have the problem of maximizing the resultant likelihood from this unnormalized distribution. We can write the likelihood using a Poisson probability distribution multiplied by the original product of probabilities over each observed event: + +.. math:: + + \mathcal{L} &= e^{-\mathcal{N}}\frac{\mathcal{N}^N}{N!} \prod_{i=1}^{N} p(x_i; m, \Omega) \\ + &= \frac{e^{-\mathcal{N}}}{N!} \prod_{i=1}^{N} \mathcal{I}(x_i; m, \Omega) + +given that the extended probability is related to the standard one by :math:`\mathcal{I} = \mathcal{N} p`. Next, we will consider that the efficiency of the detector can be modeled with a function :math:`\eta(x)`. In reality, we generally cannot know this function to any level where it would be useful in such a minimization, but we can approximate it through a finite sum of simulated events passed through a simulation of the detector used in the experiment. We will then say that + +.. math:: \mathcal{N}'(m,\Omega) = \int \mathcal{I}(x; m, \Omega)\eta(x)\text{d}x + +gives the predicted number of events with efficiency encorporated, so + +.. math:: \mathcal{L} = \frac{e^{-\mathcal{N}'}}{N!}\prod_{i=1}^{N}\mathcal{I}(x_i; m, \Omega) + +While we mathematically could maximize the likelihood given above, a large product of terms between zero and one (or floating point values in general) is computationally unstable. Instead, we rephrase the problem from maximimizing the likelihood to maximizing the natural log of the likelihood, since the logarithm is monotonic and the log of a product is just the sum of the logs of the terms. Futhermore, since most optimization algorithms prefer to minimize functions rather than maximize them, we can just flip the sign. The the negative log of the extended likelihood (times two for error estimation purposes) is given by + +.. math:: + + -2\ln\mathcal{L} &= -2\left(\ln\left[\prod_{i=1}^{N}\mathcal{I}(x; m, \Omega)\right] - \mathcal{N}' + \ln N! \right) \\ + &= -2\left(\ln\left[\prod_{i=1}^{N}\mathcal{I}(x; m, \Omega)\right] - \left[\int \mathcal{I}(x; m, \Omega)\eta(x)\text{d}x \right] + \ln N! \right) \\ + &= -2\left(\left[\sum_{i=1}^{N}\ln \mathcal{I}(x; m, \Omega)\right] - \left[\int \mathcal{I}(x; m, \Omega)\eta(x)\text{d}x \right] + \ln N! \right) + +As mentioned, we don't actually know the analytical form of :math:`\eta(x)`, but we can approximate it using Monte Carlo data. Assume we generate some data without any explicit physics model other than the phase space of the channel and pass it through a simulation of the detector. We will call these the "generated" and "accepted" datasets. We can approximate this integral a finite sum over this simulated data: + +.. math:: \int \mathcal{I}(x; m, \Omega)\eta(x)\text{d}x \approx \frac{1}{\mathbb{P} N_g} \sum_{i=1}^{N_a} \mathcal{I}(x_i; m, \Omega) + +where :math:`N_g` and :math:`N_a` are the size of the generated and accepted datasets respectively, the sum is over accepted events only, and :math:`\mathbb{P}` is the area of the integration region. This last term is another unknown, but in practice, we can consider that :math:`\mathcal{I}` could be rescaled by this factor, and that the multiplicative factor in the first part of the negative-log-likelihood would be extracted as the additive term :math:`N\ln\mathbb{P}` which is a constant in parameter space and therefore doesn't effect the overall minimization. + +Removing all such constants, we obtain the following form for the negative log-likelihood: + +.. math:: -2\ln\mathcal{L} = -2\left(\left[\sum_{i=1}^{N}\ln \mathcal{I}(x; m, \Omega)\right] - \left[ \frac{1}{N_a} \sum_{i=1}^{N_a} \mathcal{I}(x_i; m, \Omega) \right]\right) + +Next, consider that events in both the data and in the Monte Carlo might have weights associated with them. We can easily adjust the negative log-likelihood to account for weighted events: + +.. math:: -2\ln\mathcal{L} = -2\left(\left[\sum_{i=1}^{N} w_i \ln \mathcal{I}(x; m, \Omega)\right] - \left[ \frac{1}{N_a} \sum_{i=1}^{N_a} w_i \mathcal{I}(x_i; m, \Omega) \right]\right) + +To visualize the result after minimization, we can weight each accepted Monte Carlo event by :math:`w \mathcal{L}(\text{event}) / N_a` to see the result without acceptance correction, or we can weight each generated Monte Carlo event by :math:`\mathcal{L}(\text{event}) / N_a` (generally the generated Monte Carlo is not weighted) to obtain the result corrected for the efficiency of the detector. + +Example +------- + +``laddu`` takes care of most of the math above and requires the user to provide data and the intensity function :math:`I(\text{event};\text{parameters})`. For the rest of this tutorial, this function will be refered to as the "model". In ``laddu``, we construct the model out of modular "amplitudes" (:math:`A(e; \vec{p})`) which can be added and multiplied together. Additionally, since these amplitudes are functions on the space :math:`\mathbb{R}^n \to \mathbb{C}`, we can also take their real part, imaginary part, or the square of their norm (:math:`AA^*`). Generally, when building a model, we compartmentalize amplitude terms into coherent summations, where coherent just means we take the norm-squared of the sum. Since the likelihood should be strictly real, the real part of the model's evaluation is taken to calculate the intensity, so imaginary terms (which should be zero in practice) are discarded. + +For a simple unbinned fit, we must first obtain some data. ``laddu`` does not currently have a built-in event generator, so it is recommended that users utilize other methods of generating Monte Carlo data. For the sake of this tutorial, we will assume that these data files are readily available as Parquet files. + +.. note:: The Parquet file format is not common to particle physics but is ubiquitous in data science. The structure that ``laddu`` requires is specified in the API reference and can be generated via `pandas `_, `polars `_ or most other data libraries. The only difficulty is translating existing data (which is likely in the ROOT format) into this representation. For this process, `uproot `_ is recommended to avoid using ROOT directly. There is also an executable ``amptools-to-laddu`` which is installed alongside the Python package which can convert directly from ROOT files in the AmpTools format to the equivalent ``laddu`` Parquet files. The Python API also exposes the underlying conversion method in its ``convert`` submodule. + +Reading data with ``laddu`` is as simple as using the `laddu.open` method. It takes the path to the data file as its argument: + +.. code-block:: python + + import laddu as ld + + data_ds = ld.open("data.parquet") + accmc_ds = ld.open("accmc.parquet") + genmc_ds = ld.open("genmc.parquet") + +Next, we need to construct a model. Let's assume that the dataset contains events from the channel :math:`\gamma p \to K_S^0 K_S^0 p'` and that the measured particles in the data files are :math:`[\gamma, p', K_{S,1}^0, K_{S,2}^0]`. This setup mimics the GlueX experiment at Jefferson Lab (the momentum of the initial proton target is not measured and can be reasonably assumed to be close to zero in magnitude). Furthermore, because GlueX uses a polarized beam, we will assume the polarization fraction and angle are stored in the data files. + +.. note:: The four-momenta in the datasets need to be in the center-of-momentum frame, which is the only frame that can be considered invariant between different experiments. Some of the amplitudes used will boost particles from the center-of-momentum frame to some new frame, and this is a distinct transformation from boosting directly from a lab frame to the same target frame! + +Let's further assume that there are only two resonances present in our data, an :math:`f_0(1500)` and a :math:`f_2'(1525)` [#f1]_. We will assume that the data were generated via two relativistic Breit-Wigner distributions with masses at :math:`1506\text{ MeV}/c^2` and :math:`1517\text{ MeV}/c^2` respectively and widths of :math:`112\text{ MeV}/c^2` and :math:`86\text{ MeV}/c^2` respectively (these values come from the PDG). These resonances also have spin, so we can look at their decay angles as well as the overall mass distribution. Additionally, they have either positive or negative reflectivity, which is related to the parity of the exchanged particle and can be measured in polarized production (we will assume both particles are generated with positive reflectivity). These variables are all defined by ``laddu`` as helper classes: + +.. code:: python + + # the mass of the combination of particles 2 and 3, the kaons + res_mass = ld.Mass([2, 3]) + + # the decay angles in the helicity frame + angles = ld.Angles(0, [1], [2], [2, 3]) + +So far, these angles just represent particles in a generic dataset by index and provide an appropriate method to calculate the corresponding observable. Before we fit anything, we might want to just see what the dataset looks like: + +.. code:: python + + import matplotlib.pyplot as plt + + m_data = res_mass.value_on(data_ds) + costheta_data = angles.costheta.value_on(data_ds) + phi_data = angles.phi.value_on(data_ds) + + fig, ax = plt.subplots(ncols=2) + ax[0].hist(m_data, bins=100) + ax[0].set_xlabel('Mass of $K_SK_S$ in GeV/$c^2$') + ax[1].hist2d(costheta_data, phi_data, bins=(100, 100)) + ax[1].set_xlabel(r'$\cos(\theta_{HX})$') + ax[1].set_ylabel(r'$\varphi_{HX}$') + plt.tight_layout() + plt.show() + +.. image:: ./unbinned_fit_mass_angle_plot.png + :width: 800 + :alt: Data for unbinned fit + +Next, let's come up with a model. ``laddu`` models are formed by combining individual amplitudes after they are registered with a manager. The manager keeps track of all of the free parameters and caching done when a dataset is pre-computed. ``laddu`` has the amplitudes that we need already built in. We will use a relativistic Breit-Wigner to describe the mass-dependency and a :math:`Z_{L}^{M}` amplitude described by [Mathieu]_ to fit the angular distributions with beam polarization in mind. The angular part of this model requires two coherent sums for each reflectivity, and assuming just positive reflectivity, we can write the entire model as follows: + +.. math:: + I(m, \theta, \varphi, P_{\gamma}, \Phi) \propto &\left[ [f_0(1500)] BW_0(m; m_{f_0}, \Gamma_{f_0}) \Re\left[Z_{0}^{0(+)}(\theta, \varphi, P_\gamma, \Phi)\right]\right.\\ + &\left. + [f_2'(1525)] BW_2(m; m_{f_2'}, \Gamma_{f_2'}) \Re\left[Z_{2}^{2(+)}(\theta, \varphi, P_\gamma, \Phi)\right]\right]^2 \\ + + &\left[ [f_0(1500)] BW_0(m; m_{f_0}, \Gamma_{f_0}) \Im\left[Z_{0}^{0(+)}(\theta, \varphi, P_\gamma, \Phi)\right]\right.\\ + &\left. + [f_2'(1525)] BW_2(m; m_{f_2'}, \Gamma_{f_2'}) \Im\left[Z_{2}^{2(+)}(\theta, \varphi, P_\gamma, \Phi)\right]\right]^2 + +where :math:`BW_{L}(m, m_\alpha, \Gamma_\alpha)` is the Breit-Wigner amplitude for a spin-:math:`L` particle with mass :math:`m_\alpha` and width :math:`\Gamma_\alpha` and :math:`Z_{L}^{M}(\theta, \varphi, P_\gamma, \Phi)` describes the angular distribution of a spin-:math:`L` particle with decay angles :math:`\theta` and :math:`\varphi`, photoproduction polarization fraction :math:`P_\gamma` and angle :math:`\Phi`, and angular moment :math:`M`. The terms with particle names in square brackets represent the production coefficients. While these are technically both allowed to be complex values, in practice we set one to be real in each sum since the norm-squared of a complex value is invariant up to a total phase. The exact form of these amplitudes is not important for this tutorial. Instead, we will demonstrate how they can be created and combined with simple operations. First, we create a manager and a ``Polarization`` object which grabs polarization information from the dataset using the index of the beam and recoil proton to form the production plane: + +.. code:: python + + manager = ld.Manager() + polarization = ld.Polarization(0, [1]) + +Next, we can create ``Zlm`` amplitudes: + +.. code:: python + + z00p = manager.register(ld.Zlm("Z00+", 0, 0, "+", angles, polarization)) + z22p = manager.register(ld.Zlm("Z22+", 2, 2, "+", angles, polarization)) + +The ``z00p`` and ``z22p`` objects are just pointers to this amplitude's registration within the ``manager``, but they can be combined with other amplitudes using basic math operations. The first artgument to ``Zlm`` is the name by which we will refer to the amplitude when we project the fit results onto the Monte Carlo later. Since there are no free parameters in the ``Zlm`` amplitudes, if we just built a model with these amplitudes alone, we wouldn't have anything to minimize. Let's now construct some amplitudes which have free parameters, particularly our production coefficients. These are the simplest amplitudes, just scalar values which are either purely real or complex. We can use the ``parameter`` function to create a named parameter in our model: + +.. code:: python + + f0_1500 = manager.register(ld.Scalar("[f_0(1500)]", ld.parameter("Re[f_0(1500)]"))) + f2_1525 = manager.register(ld.ComplexScalar("[f_2'(1525)]", ld.parameter("Re[f_2'(1525)]"), ld.parameter("Im[f_2'(1525)]"))) + +Finally, we can register the Breit-Wigners. These have two free parameters, the mass and width of the resonance. For the sake of demonstration, let's fix the mass by passing in a ``constant`` and let the width float with a ``parameter``. These two functions create the same object, so we could just as easily write this with both values fixed or free in the fit: + +.. code:: python + + bw0 = manager.register(ld.BreitWigner("BW_0", ld.constant(1.506), ld.parameter("f_0 width"), 0, ld.Mass([2]), ld.Mass([3]), res_mass)) + bw2 = manager.register(ld.BreitWigner("BW_2", ld.constant(1.517), ld.parameter("f_2 width"), 0, ld.Mass([2]), ld.Mass([3]), res_mass)) + +As you can see, these amplitudes also take additional parameters like the masses of each decay product. + +Next, we combine these together according to our model. For these amplitude pointers (``AmplitudeID``), we can use the ``+`` and ``*`` operators as well as ``real()`` and ``imag()`` to take the real or imaginary part of the amplitude and ``norm_sqr()`` to take the square of the magnitude (for the coherent sums). These operations can also be applied to the operated versions of the amplitudes, so we can form the entire expression given above: + +.. code:: python + + positive_real_sum = (f0_1500 * bw0 * z00p.real() + f2_1525 * bw2 * z22p.real()).norm_sqr() + positive_imag_sum = (f0_1500 * bw0 * z00p.imag() + f2_1525 * bw2 * z22p.imag()).norm_sqr() + model = positive_real_sum + positive_imag_sum + +Now that we have the model, we want to fit the free parameters, which in this case are the complex photocouplings and the widths of each Breit-Wigner. We can do this by creating an ``NLL`` object which uses the data and accepted Monte-Carlo datasets to calculate the negative log-likelihood described earlier. + +.. code:: python + + nll = ld.NLL(manager, model, data_ds, accmc_ds) + print(nll.parameters) + # ['Re[f_0(1500)]', "Re[f_2'(1525)]", "Im[f_2'(1525)]", 'f_0 width', 'f_2 width'] + +Finally, let's run the fit. By default, we will be using the L-BFGS-B algorithm, which supports bounds. We need to provide some bounds, since for the widths, it wouldn't make physical sense (and might cause mathematical issues) if the widths are zero or negative. The first argument is just a starting position for the fit. + +.. code:: python + + status = nll.minimize([100.0, 100.0, 100.0, 0.100, 0.100], bounds=[(None, None), (None, None), (None, None), (0.001, 0.4), (0.001, 0.4)]) + +The ``status`` object contains a lot of information about the fit result, particularly we can check ``status.converged`` to see if the fit was successful, ``status.x`` to see the best position, ``status.err`` to get uncertainties, and ``status.fx`` to view the negative log-likelihood. We can also print it all out at once: + +.. code:: python + + print(status) + +.. code:: + + ╒══════════════════════════════════════════════════════════════════════════════════════════════╕ + │ FIT RESULTS │ + ╞════════════════════════════════════════════╤════════════════════╤═════════════╤══════════════╡ + │ Status: Converged │ fval: -2.350E6 │ #fcn: 277 │ #grad: 277 │ + ├────────────────────────────────────────────┴────────────────────┴─────────────┴──────────────┤ + │ Message: F_EVAL CONVERGED │ + ├───────╥──────────────┬──────────────╥──────────────┬──────────────┬──────────────┬───────────┤ + │ Par # ║ Value │ Uncertainty ║ Initial │ -Bound │ +Bound │ At Limit? │ + ├───────╫──────────────┼──────────────╫──────────────┼──────────────┼──────────────┼───────────┤ + │ 0 ║ +9.928E2 │ +5.214E0 ║ +1.000E2 │ -inf │ +inf │ │ + │ 1 ║ +1.150E3 │ +1.174E1 ║ +1.000E2 │ -inf │ +inf │ │ + │ 2 ║ +1.231E3 │ +8.226E0 ║ +1.000E2 │ -inf │ +inf │ │ + │ 3 ║ +1.122E-1 │ +9.437E-4 ║ +1.000E-1 │ +1.000E-3 │ +4.000E-1 │ │ + │ 4 ║ +7.931E-2 │ +3.144E-4 ║ +1.000E-1 │ +1.000E-3 │ +4.000E-1 │ │ + └───────╨──────────────┴──────────────╨──────────────┴──────────────┴──────────────┴───────────┘ + +Now that we have the fitted free parameters, we can plot the result by calculating weights for the accepted Monte Carlo. This will be done using the ``NLL.project`` and ``NLL.project_with`` methods. Every amplitude in the model is either activated or deactivated. Deactivated amplitudes act like zeros in the model, so we can deactive certain amplitudes to isolate others. The ``NLL.project_with`` method provides a shorthand way to do this, isolating the given amplitudes for just a single calculation before reverting the ``NLL`` back to its prior state. ``NLL.project`` just calculates weights given all currently active amplitudes, and if we don't deactivate any, this will give us the total fit result. + +.. code:: python + + tot_weights = nll.project(status.x) + f0_weights = nll.project_with(status.x, ["[f_0(1500)]", "BW_0", "Z00+"]) + f2_weights = nll.project_with(status.x, ["[f_2'(1525)]", "BW_2", "Z22+"]) + + fig, ax = plt.subplots(ncols=2, sharey=True) + # Plot the data on both axes + ax[0].hist(m_data, bins=100, range=(1.0, 2.0), color="k", histtype="step", label="Data") + ax[1].hist(m_data, bins=100, range=(1.0, 2.0), color="k", histtype="step", label="Data") + + m_accmc = res_mass.value_on(accmc_ds) + # Plot the total fit on both axes + ax[0].hist(m_accmc, weights=tot_weights, bins=100, range=(1.0, 2.0), color="k", alpha=0.1, label="Fit") + ax[1].hist(m_accmc, weights=tot_weights, bins=100, range=(1.0, 2.0), color="k", alpha=0.1, label="Fit") + + # Plot the f_0(1500) on the left + ax[0].hist(m_accmc, weights=f0_weights, bins=100, range=(1.0, 2.0), color="r", alpha=0.1, label="$f_0(1500)$") + + # Plot the f_2'(1525) on the right + ax[1].hist(m_accmc, weights=f2_weights, bins=100, range=(1.0, 2.0), color="r", alpha=0.1, label="$f_2'(1525)$") + + ax[0].legend() + ax[1].legend() + ax[0].set_ylim(0) + ax[1].set_ylim(0) + ax[0].set_xlabel("Mass of $K_S^0 K_S^0$ (GeV/$c^2$)") + ax[1].set_xlabel("Mass of $K_S^0 K_S^0$ (GeV/$c^2$)") + ax[0].set_ylabel(f"Counts / 10 MeV/$c^2$") + ax[1].set_ylabel(f"Counts / 10 MeV/$c^2$") + plt.tight_layout() + plt.show() + +.. image:: ./unbinned_fit_result.png + :width: 800 + :alt: Fit result + +Notice that we have not yet used the generated Monte Carlo. We always assume that the generated Monte Carlo is distributed evenly in phase space, without any "physics" like resonances or spin. We can quickly plot the mass distributions for the Monte Carlo as well as the "efficiency" of the reconstruction per bin of mass:[#f2]_ + +.. code:: python + + import numpy as np + + m_genmc = res_mass.value_on(genmc_ds) + m_accmc_hist, mass_bins = np.histogram(m_accmc, bins=100, range=(1.0, 2.0)) + m_genmc_hist, _ = np.histogram(m_genmc, bins=100, range=(1.0, 2.0)) + m_efficiency = m_accmc_hist / m_genmc_hist + + fig, ax = plt.subplots(ncols=2) + ax[0].stairs(m_accmc_hist, mass_bins, color="b", fill=True, alpha=0.1, label="Accepted") + ax[0].stairs(m_genmc_hist, mass_bins, color="k", label="Generated") + ax[1].stairs(m_efficiency, mass_bins, color="r", label="Efficiency") + + ax[0].legend() + ax[1].legend() + ax[0].set_ylim(0) + ax[1].set_ylim(0) + ax[0].set_xlabel("Mass of $K_S^0 K_S^0$ (GeV/$c^2$)") + ax[1].set_xlabel("Mass of $K_S^0 K_S^0$ (GeV/$c^2$)") + ax[0].set_ylabel(f"Counts / 10 MeV/$c^2$") + ax[1].set_ylabel(f"Counts / 10 MeV/$c^2$") + plt.tight_layout() + plt.show() + +.. image:: ./unbinned_fit_efficiency.png + :width: 800 + :alt: Efficiency plot + +Finally, to project the fit result onto the generated Monte Carlo, we need to create an evaluator specifically for the generated Monte Carlo data. The reason this is done separately is that generated Monte Carlo datasets usually contain many events, so it's sometimes more efficient to do the fit without loading this data at all, save the fit results, and plot the acceptance-corrected plots in a separate step, minimizing overall memory impact. + +To create an ``Evaluator`` object, we just need to load up the manager with the model and dataset we want to use. All of these operations create efficient copies of the manager, so we don't need to worry about duplicating resources or events. + +.. code:: python + + gen_eval = manager.load(model, genmc_ds) + tot_weights_acc = nll.project(status.x, mc_evaluator=gen_eval) + f0_weights_acc = nll.project_with(status.x, ["[f_0(1500)]", "BW_0", "Z00+"], mc_evaluator=gen_eval) + f2_weights_acc = nll.project_with(status.x, ["[f_2'(1525)]", "BW_2", "Z22+"], mc_evaluator=gen_eval) + + # acceptance-correct the data distribution + m_data_hist, _ = np.histogram(m_data, bins=100, range=(1.0, 2.0)) + m_data_acc_hist = m_data_hist / m_efficiency + + fig, ax = plt.subplots(ncols=2, sharey=True) + # Plot the data on both axes + ax[0].stairs(m_data_acc_hist, mass_bins, color="k", label="Data") + ax[1].stairs(m_data_acc_hist, mass_bins, color="k", label="Data") + + # Plot the total fit on both axes + ax[0].hist(m_genmc, weights=tot_weights_acc, bins=100, range=(1.0, 2.0), color="k", alpha=0.1, label="Fit") + ax[1].hist(m_genmc, weights=tot_weights_acc, bins=100, range=(1.0, 2.0), color="k", alpha=0.1, label="Fit") + + # Plot the f_0(1500) on the left + ax[0].hist(m_genmc, weights=f0_weights_acc, bins=100, range=(1.0, 2.0), color="b", alpha=0.1, label="$f_0(1500)$") + + # Plot the f_2'(1525) on the right + ax[1].hist(m_genmc, weights=f2_weights_acc, bins=100, range=(1.0, 2.0), color="b", alpha=0.1, label="$f_2'(1525)$") + + ax[0].legend() + ax[1].legend() + ax[0].set_ylim(0) + ax[1].set_ylim(0) + ax[0].set_xlabel("Mass of $K_S^0 K_S^0$ (GeV/$c^2$)") + ax[1].set_xlabel("Mass of $K_S^0 K_S^0$ (GeV/$c^2$)") + ax[0].set_ylabel(f"Counts / 10 MeV/$c^2$") + ax[1].set_ylabel(f"Counts / 10 MeV/$c^2$") + plt.tight_layout() + plt.show() + +.. image:: ./unbinned_fit_result_acceptance_corrected.png + :width: 800 + :alt: Fit result with efficiency correction + +Finally, we might want to save this fit result and refer back to it in the future. While ``Status`` objects directly support Python ``pickle`` serialization, there's a shorthand method built in: + +.. code:: python + + status.save_as("fit_result.pkl") + # This saves the status to a file called "fit_result.pkl" + + saved_status = Status.load("fit_result.pkl") + # Now we've loaded that fit result again + +This will create a rather small file, since the fit is not very complex, but saving multiple fits to a single file will become very useful when doing binned fits, which are the subject of the next tutorial. + +.. rubric:: Footnotes + +.. [#f1] In reality, there are many more resonances present in this channel, and the model we are about to construct technically doesn't preserve unitarity, but this is just a simple example to demonstrate the mechanics of ``laddu``. + +.. [#f2] In this toy example, the accepted Monte Carlo is just a random subset of the generated Monte Carlo, so the acceptance is approximately a constant 30% by construction. In reality, acceptance effects can be much more important than an overall scaling factor. + +.. [Barlow] Barlow, R. (1990). Extended maximum likelihood. Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment, 297(3), 496–506. doi:10.1016/0168-9002(90)91334-8 diff --git a/docs/source/tutorials/unbinned_fit_efficiency.png b/docs/source/tutorials/unbinned_fit_efficiency.png new file mode 100644 index 0000000..72ea3f1 Binary files /dev/null and b/docs/source/tutorials/unbinned_fit_efficiency.png differ diff --git a/docs/source/tutorials/unbinned_fit_mass_angle_plot.png b/docs/source/tutorials/unbinned_fit_mass_angle_plot.png new file mode 100644 index 0000000..b7bbaf4 Binary files /dev/null and b/docs/source/tutorials/unbinned_fit_mass_angle_plot.png differ diff --git a/docs/source/tutorials/unbinned_fit_result.png b/docs/source/tutorials/unbinned_fit_result.png new file mode 100644 index 0000000..0361dfa Binary files /dev/null and b/docs/source/tutorials/unbinned_fit_result.png differ diff --git a/docs/source/tutorials/unbinned_fit_result_acceptance_corrected.png b/docs/source/tutorials/unbinned_fit_result_acceptance_corrected.png new file mode 100644 index 0000000..efdd226 Binary files /dev/null and b/docs/source/tutorials/unbinned_fit_result_acceptance_corrected.png differ diff --git a/pyproject.toml b/pyproject.toml index 87932f0..5db0b5f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,7 +25,7 @@ amptools-to-laddu = "laddu:convert.run" [project.optional-dependencies] tests = ["pytest"] -docs = ["sphinx", "sphinx-rtd-theme"] +docs = ["sphinx", "sphinx-rtd-theme", "sphinx-copybutton"] [tool.maturin] python-source = "python" diff --git a/python/laddu/amplitudes/__init__.pyi b/python/laddu/amplitudes/__init__.pyi index f57fca0..de8c64c 100644 --- a/python/laddu/amplitudes/__init__.pyi +++ b/python/laddu/amplitudes/__init__.pyi @@ -32,7 +32,7 @@ class Amplitude: ... class Manager: def __init__(self) -> None: ... def register(self, amplitude: Amplitude) -> AmplitudeID: ... - def load(self, dataset: Dataset, expression: Expression) -> Evaluator: ... + def load(self, expression: Expression, dataset: Dataset) -> Evaluator: ... class Evaluator: parameters: list[str] diff --git a/python/laddu/likelihoods/__init__.pyi b/python/laddu/likelihoods/__init__.pyi index d0d0f79..620fa82 100644 --- a/python/laddu/likelihoods/__init__.pyi +++ b/python/laddu/likelihoods/__init__.pyi @@ -4,7 +4,7 @@ from typing import Any, Literal import numpy as np import numpy.typing as npt -from laddu.amplitudes import Expression, Manager +from laddu.amplitudes import Evaluator, Expression, Manager from laddu.data import Dataset class LikelihoodID: @@ -43,8 +43,14 @@ class LikelihoodEvaluator: class NLL: parameters: list[str] data: Dataset - mc: Dataset - def __init__(self, manager: Manager, ds_data: Dataset, ds_mc: Dataset, expression: Expression) -> None: ... + accmc: Dataset + def __init__( + self, + manager: Manager, + expression: Expression, + ds_data: Dataset, + ds_accmc: Dataset, + ) -> None: ... def as_term(self) -> LikelihoodTerm: ... def activate(self, name: str | list[str]) -> None: ... def activate_all(self) -> None: ... @@ -52,9 +58,15 @@ class NLL: def deactivate_all(self) -> None: ... def isolate(self, name: str | list[str]) -> None: ... def evaluate(self, parameters: list[float] | npt.NDArray[np.float64]) -> float: ... - def project(self, parameters: list[float] | npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: ... + def project( + self, parameters: list[float] | npt.NDArray[np.float64], *, mc_evaluator: Evaluator | None = None + ) -> npt.NDArray[np.float64]: ... def project_with( - self, parameters: list[float] | npt.NDArray[np.float64], name: str | list[str] + self, + parameters: list[float] | npt.NDArray[np.float64], + name: str | list[str], + *, + mc_evaluator: Evaluator | None = None, ) -> npt.NDArray[np.float64]: ... def minimize( self, diff --git a/python_examples/example_1/accmc_1.parquet b/python_examples/example_1/accmc_1.parquet new file mode 100644 index 0000000..48edaef Binary files /dev/null and b/python_examples/example_1/accmc_1.parquet differ diff --git a/python_examples/example_1/example_1.py b/python_examples/example_1/example_1.py index 43788d6..ebb4da4 100755 --- a/python_examples/example_1/example_1.py +++ b/python_examples/example_1/example_1.py @@ -27,17 +27,20 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 script_dir = Path(os.path.realpath(__file__)).parent.resolve() data_file = str(script_dir / "data_1.parquet") - mc_file = str(script_dir / "mc_1.parquet") + accmc_file = str(script_dir / "accmc_1.parquet") + genmc_file = str(script_dir / "mc_1.parquet") start = perf_counter() logger.info("Opening Data file...") data_ds = ld.open(data_file) - logger.info("Opening MC file...") - accmc_ds = ld.open(mc_file) - binned_tot_res, binned_tot_err, binned_s0p_res, binned_s0p_err, binned_d2p_res, binned_d2p_err, bin_edges = ( - fit_binned(bins, niters, nboot, data_ds, accmc_ds) + logger.info("Opening AccMC file...") + accmc_ds = ld.open(accmc_file) + logger.info("Opening GenMC file...") + genmc_ds = ld.open(genmc_file) + (binned_tot, binned_tot_err), (binned_s0p, binned_s0p_err), (binned_d2p, binned_d2p_err), bin_edges = fit_binned( + bins, niters, nboot, data_ds, accmc_ds, genmc_ds ) tot_weights, s0p_weights, d2p_weights, status, boot_statuses, parameters = fit_unbinned( - niters, nboot, data_ds, accmc_ds + niters, nboot, data_ds, accmc_ds, genmc_ds ) end = perf_counter() logger.info(f"Total time: {end - start:.3f}s") @@ -75,6 +78,12 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 res_mass = ld.Mass([2, 3]) m_data = res_mass.value_on(data_ds) m_accmc = res_mass.value_on(accmc_ds) + m_genmc = res_mass.value_on(genmc_ds) + m_data_hist, mass_bins = np.histogram(m_data, bins=bins, range=(1, 2)) + m_accmc_hist, _ = np.histogram(m_accmc, bins=bins, range=(1, 2)) + m_genmc_hist, _ = np.histogram(m_genmc, bins=bins, range=(1, 2)) + m_acceptance = m_accmc_hist / m_genmc_hist + m_data_corr_hist = m_data_hist / m_acceptance font = {"family": "DejaVu Sans", "weight": "normal", "size": 22} plt.rc("font", **font) @@ -102,7 +111,7 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 ax[1].hist(m_data, bins=bins, range=(1, 2), color=black, histtype="step", label="Data") ax[0].hist( m_accmc, - weights=tot_weights, + weights=tot_weights[0], bins=bins, range=(1, 2), color=black, @@ -111,7 +120,7 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 ) ax[1].hist( m_accmc, - weights=tot_weights, + weights=tot_weights[0], bins=bins, range=(1, 2), color=black, @@ -120,7 +129,7 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 ) ax[0].hist( m_accmc, - weights=s0p_weights, + weights=s0p_weights[0], bins=bins, range=(1, 2), color=blue, @@ -129,7 +138,7 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 ) ax[1].hist( m_accmc, - weights=d2p_weights, + weights=d2p_weights[0], bins=bins, range=(1, 2), color=red, @@ -137,10 +146,10 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 label="$D_2^+$ (unbinned)", ) centers = (bin_edges[1:] + bin_edges[:-1]) / 2 - ax[0].errorbar(centers, binned_tot_res, yerr=binned_tot_err, fmt=".", color=black, label="Fit (binned)") - ax[1].errorbar(centers, binned_tot_res, yerr=binned_tot_err, fmt=".", color=black, label="Fit (binned)") - ax[0].errorbar(centers, binned_s0p_res, yerr=binned_s0p_err, fmt=".", color=blue, label="$S_0^+$ (binned)") - ax[1].errorbar(centers, binned_d2p_res, yerr=binned_d2p_err, fmt=".", color=red, label="$D_2^+$ (binned)") + ax[0].errorbar(centers, binned_tot[0], yerr=binned_tot_err[0], fmt=".", color=black, label="Fit (binned)") + ax[1].errorbar(centers, binned_tot[0], yerr=binned_tot_err[0], fmt=".", color=black, label="Fit (binned)") + ax[0].errorbar(centers, binned_s0p[0], yerr=binned_s0p_err[0], fmt=".", color=blue, label="$S_0^+$ (binned)") + ax[1].errorbar(centers, binned_d2p[0], yerr=binned_d2p_err[0], fmt=".", color=red, label="$D_2^+$ (binned)") ax[0].legend() ax[1].legend() @@ -153,15 +162,82 @@ def main(bins: int, niters: int, nboot: int): # noqa: PLR0915 ax[1].set_ylabel(f"Counts / {bin_width} MeV/$c^2$") plt.tight_layout() plt.savefig("example_1.svg") + plt.close() + _, ax = plt.subplots(ncols=2, sharey=True, figsize=(22, 12)) + ax[0].stairs(m_data_corr_hist, mass_bins, color=black, label="Data (corrected)") + ax[1].stairs(m_data_corr_hist, mass_bins, color=black, label="Data (corrected)") + ax[0].hist( + m_genmc, + weights=tot_weights[1], + bins=bins, + range=(1, 2), + color=black, + alpha=0.1, + label="Fit (unbinned)", + ) + ax[1].hist( + m_genmc, + weights=tot_weights[1], + bins=bins, + range=(1, 2), + color=black, + alpha=0.1, + label="Fit (unbinned)", + ) + ax[0].hist( + m_genmc, + weights=s0p_weights[1], + bins=bins, + range=(1, 2), + color=blue, + alpha=0.1, + label="$S_0^+$ (unbinned)", + ) + ax[1].hist( + m_genmc, + weights=d2p_weights[1], + bins=bins, + range=(1, 2), + color=red, + alpha=0.1, + label="$D_2^+$ (unbinned)", + ) + centers = (bin_edges[1:] + bin_edges[:-1]) / 2 + ax[0].errorbar(centers, binned_tot[1], yerr=binned_tot_err[1], fmt=".", color=black, label="Fit (binned)") + ax[1].errorbar(centers, binned_tot[1], yerr=binned_tot_err[1], fmt=".", color=black, label="Fit (binned)") + ax[0].errorbar(centers, binned_s0p[1], yerr=binned_s0p_err[1], fmt=".", color=blue, label="$S_0^+$ (binned)") + ax[1].errorbar(centers, binned_d2p[1], yerr=binned_d2p_err[1], fmt=".", color=red, label="$D_2^+$ (binned)") -def fit_binned(bins: int, niters: int, nboot: int, data_ds: ld.Dataset, accmc_ds: ld.Dataset): + ax[0].legend() + ax[1].legend() + ax[0].set_ylim(0) + ax[1].set_ylim(0) + ax[0].set_xlabel("Mass of $K_S^0 K_S^0$ (GeV/$c^2$)") + ax[1].set_xlabel("Mass of $K_S^0 K_S^0$ (GeV/$c^2$)") + bin_width = int(1000 / bins) + ax[0].set_ylabel(f"Counts / {bin_width} MeV/$c^2$") + ax[1].set_ylabel(f"Counts / {bin_width} MeV/$c^2$") + plt.tight_layout() + plt.savefig("example_1_corrected.svg") + plt.close() + + +def fit_binned( # noqa: PLR0915 + bins: int, niters: int, nboot: int, data_ds: ld.Dataset, accmc_ds: ld.Dataset, genmc_ds: ld.Dataset +) -> tuple[ + tuple[tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray]], + tuple[tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray]], + tuple[tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray]], + np.ndarray, +]: logger.info("Starting Binned Fit") res_mass = ld.Mass([2, 3]) angles = ld.Angles(0, [1], [2], [2, 3]) polarization = ld.Polarization(0, [1]) data_ds_binned = data_ds.bin_by(res_mass, bins, (1.0, 2.0)) accmc_ds_binned = accmc_ds.bin_by(res_mass, bins, (1.0, 2.0)) + genmc_ds_binned = genmc_ds.bin_by(res_mass, bins, (1.0, 2.0)) manager = ld.Manager() z00p = manager.register(ld.Zlm("Z00+", 0, 0, "+", angles, polarization)) z22p = manager.register(ld.Zlm("Z22+", 2, 2, "+", angles, polarization)) @@ -172,19 +248,29 @@ def fit_binned(bins: int, niters: int, nboot: int, data_ds: ld.Dataset, accmc_ds model = pos_re + pos_im tot_res = [] + tot_res_acc = [] tot_res_err = [] + tot_res_acc_err = [] s0p_res = [] + s0p_res_acc = [] s0p_res_err = [] + s0p_res_acc_err = [] d2p_res = [] + d2p_res_acc = [] d2p_res_err = [] + d2p_res_acc_err = [] - rng = np.random.default_rng() + rng = np.random.default_rng(0) for ibin in range(bins): logger.info(f"Fitting Bin #{ibin}") best_nll = np.inf best_status = None - nll = ld.NLL(manager, data_ds_binned[ibin], accmc_ds_binned[ibin], model) + nll = ld.NLL(manager, model, data_ds_binned[ibin], accmc_ds_binned[ibin]) + gen_eval = manager.load( + model, + genmc_ds_binned[ibin], + ) for iiter in range(niters): logger.info(f"Fitting Iteration #{iiter}") p0 = rng.uniform(-1000.0, 1000.0, len(nll.parameters)) @@ -198,38 +284,58 @@ def fit_binned(bins: int, niters: int, nboot: int, data_ds: ld.Dataset, accmc_ds sys.exit(1) tot_res.append(nll.project(best_status.x).sum()) - nll.isolate(["Z00+", "S0+"]) - s0p_res.append(nll.project(best_status.x).sum()) - nll.isolate(["Z22+", "D2+"]) - d2p_res.append(nll.project(best_status.x).sum()) + tot_res_acc.append(nll.project(best_status.x, mc_evaluator=gen_eval).sum()) + s0p_res.append(nll.project_with(best_status.x, ["Z00+", "S0+"]).sum()) + s0p_res_acc.append(nll.project_with(best_status.x, ["Z00+", "S0+"], mc_evaluator=gen_eval).sum()) + d2p_res.append(nll.project_with(best_status.x, ["Z22+", "D2+"]).sum()) + d2p_res_acc.append(nll.project_with(best_status.x, ["Z22+", "D2+"], mc_evaluator=gen_eval).sum()) nll.activate_all() + gen_eval.activate_all() tot_boot = [] + tot_boot_acc = [] s0p_boot = [] + s0p_boot_acc = [] d2p_boot = [] + d2p_boot_acc = [] for iboot in range(nboot): logger.info(f"Running bootstrap fit #{iboot}") - boot_nll = ld.NLL( - manager, data_ds_binned[ibin].bootstrap(iboot), accmc_ds_binned[ibin].bootstrap(iboot), model - ) + boot_nll = ld.NLL(manager, model, data_ds_binned[ibin].bootstrap(iboot), accmc_ds_binned[ibin]) boot_status = boot_nll.minimize(best_status.x) tot_boot.append(boot_nll.project(boot_status.x).sum()) - boot_nll.isolate(["Z00+", "S0+"]) - s0p_boot.append(boot_nll.project(boot_status.x).sum()) - boot_nll.isolate(["Z22+", "D2+"]) - d2p_boot.append(boot_nll.project(boot_status.x).sum()) + tot_boot_acc.append(boot_nll.project(boot_status.x, mc_evaluator=gen_eval).sum()) + s0p_boot.append(boot_nll.project_with(boot_status.x, ["Z00+", "S0+"]).sum()) + s0p_boot_acc.append(boot_nll.project_with(boot_status.x, ["Z00+", "S0+"], mc_evaluator=gen_eval).sum()) + d2p_boot.append(boot_nll.project_with(boot_status.x, ["Z22+", "D2+"]).sum()) + d2p_boot_acc.append(boot_nll.project_with(boot_status.x, ["Z22+", "D2+"], mc_evaluator=gen_eval).sum()) boot_nll.activate_all() + gen_eval.activate_all() tot_res_err.append(np.std(tot_boot)) + tot_res_acc_err.append(np.std(tot_boot_acc)) s0p_res_err.append(np.std(s0p_boot)) + s0p_res_acc_err.append(np.std(s0p_boot_acc)) d2p_res_err.append(np.std(d2p_boot)) + d2p_res_acc_err.append(np.std(d2p_boot_acc)) - return (tot_res, tot_res_err, s0p_res, s0p_res_err, d2p_res, d2p_res_err, data_ds_binned.edges) + return ( + ((np.array(tot_res), np.array(tot_res_acc)), (np.array(tot_res_err), np.array(tot_res_acc_err))), + ((np.array(s0p_res), np.array(s0p_res_acc)), (np.array(s0p_res_err), np.array(s0p_res_acc_err))), + ((np.array(d2p_res), np.array(d2p_res_acc)), (np.array(d2p_res_err), np.array(d2p_res_acc_err))), + data_ds_binned.edges, + ) def fit_unbinned( - niters: int, nboot: int, data_ds: ld.Dataset, accmc_ds: ld.Dataset -) -> tuple[np.ndarray, np.ndarray, np.ndarray, ld.Status, list[ld.Status], list[str]]: + niters: int, nboot: int, data_ds: ld.Dataset, accmc_ds: ld.Dataset, genmc_ds: ld.Dataset +) -> tuple[ + tuple[np.ndarray, np.ndarray], + tuple[np.ndarray, np.ndarray], + tuple[np.ndarray, np.ndarray], + ld.Status, + list[ld.Status], + list[str], +]: logger.info("Starting Unbinned Fit") res_mass = ld.Mass([2, 3]) angles = ld.Angles(0, [1], [2], [2, 3]) @@ -253,7 +359,7 @@ def fit_unbinned( pos_im = (s0p * bw_f01500 * z00p.imag() + d2p * bw_f21525 * z22p.imag()).norm_sqr() model = pos_re + pos_im - rng = np.random.default_rng() + rng = np.random.default_rng(0) best_nll = np.inf best_status = None @@ -264,7 +370,11 @@ def fit_unbinned( (-1000.0, 1000.0), (-1000.0, 1000.0), ] - nll = ld.NLL(manager, data_ds, accmc_ds, model) + nll = ld.NLL(manager, model, data_ds, accmc_ds) + gen_eval = manager.load( + model, + genmc_ds, + ) for iiter in range(niters): logger.info(f"Fitting Iteration #{iiter}") p0 = rng.uniform(-1000.0, 1000.0, 3) @@ -279,19 +389,27 @@ def fit_unbinned( sys.exit(1) tot_weights = nll.project(best_status.x) - nll.isolate(["S0+", "Z00+", "f0(1500)"]) - s0p_weights = nll.project(best_status.x) - nll.isolate(["D2+", "Z22+", "f2(1525)"]) - d2p_weights = nll.project(best_status.x) + tot_weights_acc = nll.project(best_status.x, mc_evaluator=gen_eval) + s0p_weights = nll.project_with(best_status.x, ["S0+", "Z00+", "f0(1500)"]) + s0p_weights_acc = nll.project_with(best_status.x, ["S0+", "Z00+", "f0(1500)"], mc_evaluator=gen_eval) + d2p_weights = nll.project_with(best_status.x, ["D2+", "Z22+", "f2(1525)"]) + d2p_weights_acc = nll.project_with(best_status.x, ["D2+", "Z22+", "f2(1525)"], mc_evaluator=gen_eval) nll.activate_all() boot_statuses = [] for iboot in range(nboot): logger.info(f"Running bootstrap fit #{iboot}") - boot_nll = ld.NLL(manager, data_ds.bootstrap(iboot), accmc_ds.bootstrap(iboot), model) + boot_nll = ld.NLL(manager, model, data_ds.bootstrap(iboot), accmc_ds) boot_statuses.append(boot_nll.minimize(best_status.x)) - return (tot_weights, s0p_weights, d2p_weights, best_status, boot_statuses, nll.parameters) + return ( + (tot_weights, tot_weights_acc), + (s0p_weights, s0p_weights_acc), + (d2p_weights, d2p_weights_acc), + best_status, + boot_statuses, + nll.parameters, + ) if __name__ == "__main__": diff --git a/python_examples/example_1/example_1.svg b/python_examples/example_1/example_1.svg index f08d2b6..cfe21f6 100644 --- a/python_examples/example_1/example_1.svg +++ b/python_examples/example_1/example_1.svg @@ -6,7 +6,7 @@ - 2024-11-03T15:14:47.292893 + 2024-11-19T14:32:00.680303 image/svg+xml @@ -40,813 +40,813 @@ z +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> +" clip-path="url(#p05dbb86726)" style="fill: #007bc0; opacity: 0.1"/> - - + @@ -905,7 +905,7 @@ z - + @@ -946,7 +946,7 @@ z - + @@ -982,7 +982,7 @@ z - + @@ -1029,7 +1029,7 @@ z - + @@ -1085,7 +1085,7 @@ z - + @@ -1100,124 +1100,124 @@ z - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -1542,12 +1542,12 @@ z - - + @@ -1560,12 +1560,12 @@ L 8 0 - + - + @@ -1576,12 +1576,12 @@ L 8 0 - + - + @@ -1592,12 +1592,12 @@ L 8 0 - + - + @@ -1608,12 +1608,12 @@ L 8 0 - + - + @@ -1624,12 +1624,12 @@ L 8 0 - + - + @@ -1641,12 +1641,12 @@ L 8 0 - + - + @@ -1658,12 +1658,12 @@ L 8 0 - + - + @@ -1675,173 +1675,173 @@ L 8 0 - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -1968,68 +1968,68 @@ L 208.136136 777.995623 L 220.684773 777.995623 L 220.684773 777.136296 L 233.233409 777.136296 -L 233.233409 777.231777 -L 245.782045 777.231777 +L 233.233409 777.231776 +L 245.782045 777.231776 L 245.782045 774.701535 L 258.330682 774.701535 -L 258.330682 773.937689 -L 270.879318 773.937689 +L 258.330682 773.937688 +L 270.879318 773.937688 L 270.879318 772.266774 L 283.427955 772.266774 L 283.427955 769.163648 L 295.976591 769.163648 L 295.976591 768.113359 L 308.525227 768.113359 -L 308.525227 764.914752 -L 321.073864 764.914752 -L 321.073864 760.188452 -L 333.6225 760.188452 +L 308.525227 764.914751 +L 321.073864 764.914751 +L 321.073864 760.188451 +L 333.6225 760.188451 L 333.6225 756.942104 L 346.171136 756.942104 -L 346.171136 751.404217 -L 358.719773 751.404217 +L 346.171136 751.404216 +L 358.719773 751.404216 L 358.719773 744.386377 L 371.268409 744.386377 -L 371.268409 733.692527 -L 383.817045 733.692527 -L 383.817045 718.033674 -L 396.365682 718.033674 -L 396.365682 697.266597 -L 408.914318 697.266597 -L 408.914318 666.378555 -L 421.462955 666.378555 -L 421.462955 619.688438 -L 434.011591 619.688438 -L 434.011591 537.383978 -L 446.560227 537.383978 -L 446.560227 402.803775 -L 459.108864 402.803775 -L 459.108864 213.417585 -L 471.6575 213.417585 -L 471.6575 59.884441 -L 484.206136 59.884441 -L 484.206136 143.048227 -L 496.754773 143.048227 -L 496.754773 336.922015 -L 509.303409 336.922015 -L 509.303409 493.653767 -L 521.852045 493.653767 -L 521.852045 592.715109 -L 534.400682 592.715109 -L 534.400682 647.377873 -L 546.949318 647.377873 -L 546.949318 691.537748 -L 559.497955 691.537748 -L 559.497955 708.962996 -L 572.046591 708.962996 -L 572.046591 728.25012 -L 584.595227 728.25012 -L 584.595227 739.803299 -L 597.143864 739.803299 -L 597.143864 746.295994 -L 609.6925 746.295994 -L 609.6925 754.459603 -L 622.241136 754.459603 +L 371.268409 733.692525 +L 383.817045 733.692525 +L 383.817045 718.033672 +L 396.365682 718.033672 +L 396.365682 697.266595 +L 408.914318 697.266595 +L 408.914318 666.378552 +L 421.462955 666.378552 +L 421.462955 619.688434 +L 434.011591 619.688434 +L 434.011591 537.383972 +L 446.560227 537.383972 +L 446.560227 402.803765 +L 459.108864 402.803765 +L 459.108864 213.417571 +L 471.6575 213.417571 +L 471.6575 59.884423 +L 484.206136 59.884423 +L 484.206136 143.048212 +L 496.754773 143.048212 +L 496.754773 336.922004 +L 509.303409 336.922004 +L 509.303409 493.65376 +L 521.852045 493.65376 +L 521.852045 592.715104 +L 534.400682 592.715104 +L 534.400682 647.37787 +L 546.949318 647.37787 +L 546.949318 691.537746 +L 559.497955 691.537746 +L 559.497955 708.962995 +L 572.046591 708.962995 +L 572.046591 728.250119 +L 584.595227 728.250119 +L 584.595227 739.803297 +L 597.143864 739.803297 +L 597.143864 746.295993 +L 609.6925 746.295993 +L 609.6925 754.459602 +L 622.241136 754.459602 L 622.241136 761.047779 L 634.789773 761.047779 L 634.789773 762.575472 @@ -2048,324 +2048,324 @@ L 710.081591 773.269323 L 722.630227 773.269323 L 722.630227 774.462833 L 735.178864 774.462833 -L 735.178864 775.704084 -L 747.7275 775.704084 +L 735.178864 775.704083 +L 747.7275 775.704083 L 747.7275 775.560862 L 760.276136 775.560862 -L 760.276136 776.993075 -L 772.824773 776.993075 +L 760.276136 776.993074 +L 772.824773 776.993074 L 772.824773 778.186585 L 785.373409 778.186585 L 785.373409 782.34 -" clip-path="url(#pcae40e8602)" style="fill: none; stroke: #000000; stroke-linejoin: miter"/> +" clip-path="url(#p05dbb86726)" style="fill: none; stroke: #000000; stroke-linejoin: miter"/> - - +" clip-path="url(#p05dbb86726)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> + + +" clip-path="url(#p05dbb86726)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> +" clip-path="url(#p05dbb86726)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> +" clip-path="url(#p05dbb86726)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> +L 239.507727 777.231775 +" clip-path="url(#p05dbb86726)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> - + - + +" clip-path="url(#p05dbb86726)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> - + +" clip-path="url(#p05dbb86726)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> +L 314.799545 764.914749 +" clip-path="url(#p05dbb86726)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> - - - - - - + + + + + + - - - - - - - - - - - - - +" clip-path="url(#p05dbb86726)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> + + + + + + + + + + + + + - - - - + + + + - - - - + + + + - - + + - + - + - - + + +" clip-path="url(#p05dbb86726)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> - +" clip-path="url(#p05dbb86726)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -2729,7 +2729,7 @@ L 574.585 152.53125 - + @@ -2757,7 +2757,7 @@ L 574.585 189.4425 - + @@ -2791,808 +2791,808 @@ z +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> +" clip-path="url(#pdd87805a17)" style="fill: #ef3a47; opacity: 0.1"/> - + @@ -3607,7 +3607,7 @@ z - + @@ -3622,7 +3622,7 @@ z - + @@ -3637,7 +3637,7 @@ z - + @@ -3652,7 +3652,7 @@ z - + @@ -3667,7 +3667,7 @@ z - + @@ -3682,119 +3682,119 @@ z - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -3831,224 +3831,224 @@ z - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -4090,68 +4090,68 @@ L 951.631136 777.995623 L 964.179773 777.995623 L 964.179773 777.136296 L 976.728409 777.136296 -L 976.728409 777.231777 -L 989.277045 777.231777 +L 976.728409 777.231776 +L 989.277045 777.231776 L 989.277045 774.701535 L 1001.825682 774.701535 -L 1001.825682 773.937689 -L 1014.374318 773.937689 +L 1001.825682 773.937688 +L 1014.374318 773.937688 L 1014.374318 772.266774 L 1026.922955 772.266774 L 1026.922955 769.163648 L 1039.471591 769.163648 L 1039.471591 768.113359 L 1052.020227 768.113359 -L 1052.020227 764.914752 -L 1064.568864 764.914752 -L 1064.568864 760.188452 -L 1077.1175 760.188452 +L 1052.020227 764.914751 +L 1064.568864 764.914751 +L 1064.568864 760.188451 +L 1077.1175 760.188451 L 1077.1175 756.942104 L 1089.666136 756.942104 -L 1089.666136 751.404217 -L 1102.214773 751.404217 +L 1089.666136 751.404216 +L 1102.214773 751.404216 L 1102.214773 744.386377 L 1114.763409 744.386377 -L 1114.763409 733.692527 -L 1127.312045 733.692527 -L 1127.312045 718.033674 -L 1139.860682 718.033674 -L 1139.860682 697.266597 -L 1152.409318 697.266597 -L 1152.409318 666.378555 -L 1164.957955 666.378555 -L 1164.957955 619.688438 -L 1177.506591 619.688438 -L 1177.506591 537.383978 -L 1190.055227 537.383978 -L 1190.055227 402.803775 -L 1202.603864 402.803775 -L 1202.603864 213.417585 -L 1215.1525 213.417585 -L 1215.1525 59.884441 -L 1227.701136 59.884441 -L 1227.701136 143.048227 -L 1240.249773 143.048227 -L 1240.249773 336.922015 -L 1252.798409 336.922015 -L 1252.798409 493.653767 -L 1265.347045 493.653767 -L 1265.347045 592.715109 -L 1277.895682 592.715109 -L 1277.895682 647.377873 -L 1290.444318 647.377873 -L 1290.444318 691.537748 -L 1302.992955 691.537748 -L 1302.992955 708.962996 -L 1315.541591 708.962996 -L 1315.541591 728.25012 -L 1328.090227 728.25012 -L 1328.090227 739.803299 -L 1340.638864 739.803299 -L 1340.638864 746.295994 -L 1353.1875 746.295994 -L 1353.1875 754.459603 -L 1365.736136 754.459603 +L 1114.763409 733.692525 +L 1127.312045 733.692525 +L 1127.312045 718.033672 +L 1139.860682 718.033672 +L 1139.860682 697.266595 +L 1152.409318 697.266595 +L 1152.409318 666.378552 +L 1164.957955 666.378552 +L 1164.957955 619.688434 +L 1177.506591 619.688434 +L 1177.506591 537.383972 +L 1190.055227 537.383972 +L 1190.055227 402.803765 +L 1202.603864 402.803765 +L 1202.603864 213.417571 +L 1215.1525 213.417571 +L 1215.1525 59.884423 +L 1227.701136 59.884423 +L 1227.701136 143.048212 +L 1240.249773 143.048212 +L 1240.249773 336.922004 +L 1252.798409 336.922004 +L 1252.798409 493.65376 +L 1265.347045 493.65376 +L 1265.347045 592.715104 +L 1277.895682 592.715104 +L 1277.895682 647.37787 +L 1290.444318 647.37787 +L 1290.444318 691.537746 +L 1302.992955 691.537746 +L 1302.992955 708.962995 +L 1315.541591 708.962995 +L 1315.541591 728.250119 +L 1328.090227 728.250119 +L 1328.090227 739.803297 +L 1340.638864 739.803297 +L 1340.638864 746.295993 +L 1353.1875 746.295993 +L 1353.1875 754.459602 +L 1365.736136 754.459602 L 1365.736136 761.047779 L 1378.284773 761.047779 L 1378.284773 762.575472 @@ -4170,378 +4170,378 @@ L 1453.576591 773.269323 L 1466.125227 773.269323 L 1466.125227 774.462833 L 1478.673864 774.462833 -L 1478.673864 775.704084 -L 1491.2225 775.704084 +L 1478.673864 775.704083 +L 1491.2225 775.704083 L 1491.2225 775.560862 L 1503.771136 775.560862 -L 1503.771136 776.993075 -L 1516.319773 776.993075 +L 1503.771136 776.993074 +L 1516.319773 776.993074 L 1516.319773 778.186585 L 1528.868409 778.186585 L 1528.868409 782.34 -" clip-path="url(#p6180711b8f)" style="fill: none; stroke: #000000; stroke-linejoin: miter"/> +" clip-path="url(#pdd87805a17)" style="fill: none; stroke: #000000; stroke-linejoin: miter"/> - - +" clip-path="url(#pdd87805a17)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> + + +" clip-path="url(#pdd87805a17)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> +" clip-path="url(#pdd87805a17)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> +" clip-path="url(#pdd87805a17)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> +L 983.002727 777.231775 +" clip-path="url(#pdd87805a17)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> - + - + +" clip-path="url(#pdd87805a17)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> - + +" clip-path="url(#pdd87805a17)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> +L 1058.294545 764.914749 +" clip-path="url(#pdd87805a17)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> - - - - - - + + + + + + - - - - - - - - - - - - - +" clip-path="url(#pdd87805a17)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> + + + + + + + + + + + + + - - - - + + + + - - - - + + + + - - + + - + - + - - + + +" clip-path="url(#pdd87805a17)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> - +" clip-path="url(#pdd87805a17)" style="fill: none; stroke: #000000; stroke-width: 1.5"/> + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -4743,7 +4743,7 @@ L 1314.72 152.29125 - + @@ -4771,7 +4771,7 @@ L 1314.72 189.2025 - + @@ -4795,10 +4795,10 @@ L 1314.72 189.2025 - + - + diff --git a/python_examples/example_1/example_1.txt b/python_examples/example_1/example_1.txt index 872e63e..686d3b8 100644 --- a/python_examples/example_1/example_1.txt +++ b/python_examples/example_1/example_1.txt @@ -2,5 +2,5 @@ ┃ ┃ f0(1500) Width ┃ f2'(1525) Width ┃ S/D Ratio ┃ S-D Phase ┃ ┡━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩ │ Truth │ 0.11200 │ 0.08600 │ 1.41421 │ 0.78540 │ -│ Fit │ 0.11252 ± 0.00104 │ 0.08656 ± 0.00031 │ 1.44949 ± 0.00771 │ 0.80024 ± 0.00369 │ +│ Fit │ 0.11405 ± 0.00083 │ 0.08663 ± 0.00027 │ 1.44653 ± 0.00708 │ 0.79962 ± 0.00449 │ └───────┴───────────────────┴───────────────────┴───────────────────┴───────────────────┘ diff --git a/python_examples/example_1/example_1_corrected.svg b/python_examples/example_1/example_1_corrected.svg new file mode 100644 index 0000000..65b9431 --- /dev/null +++ b/python_examples/example_1/example_1_corrected.svg @@ -0,0 +1,4834 @@ + + + + + + + + 2024-11-19T14:32:01.165768 + image/svg+xml + + + Matplotlib v3.9.2, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/amplitudes/breit_wigner.rs b/src/amplitudes/breit_wigner.rs index cfc1f32..4b11bc7 100644 --- a/src/amplitudes/breit_wigner.rs +++ b/src/amplitudes/breit_wigner.rs @@ -130,7 +130,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate(&[]); @@ -154,7 +154,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate_gradient(&[]); assert_eq!(result[0].len(), 0); // amplitude has no parameters diff --git a/src/amplitudes/common.rs b/src/amplitudes/common.rs index a23f312..227cb96 100644 --- a/src/amplitudes/common.rs +++ b/src/amplitudes/common.rs @@ -175,7 +175,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); // Direct amplitude evaluation - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let params = vec![2.5]; let result = evaluator.evaluate(¶ms); @@ -192,7 +192,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.norm_sqr(); // |f(x)|^2 - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let params = vec![2.0]; let gradient = evaluator.evaluate_gradient(¶ms); @@ -210,7 +210,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let params = vec![1.5, 2.5]; // Real and imaginary parts let result = evaluator.evaluate(¶ms); @@ -227,7 +227,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.norm_sqr(); // |f(x + iy)|^2 - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let params = vec![3.0, 4.0]; // Real and imaginary parts let gradient = evaluator.evaluate_gradient(¶ms); @@ -248,7 +248,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let r = 2.0; let theta = PI / 4.0; @@ -269,7 +269,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); // f(r,θ) = re^(iθ) - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let params = vec![2.0, PI / 4.0]; // r and theta let gradient = evaluator.evaluate_gradient(¶ms); diff --git a/src/amplitudes/kmatrix.rs b/src/amplitudes/kmatrix.rs index 692d23f..546c311 100644 --- a/src/amplitudes/kmatrix.rs +++ b/src/amplitudes/kmatrix.rs @@ -963,7 +963,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate(&[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]); @@ -991,7 +991,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate_gradient(&[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]); @@ -1037,7 +1037,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate(&[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]); @@ -1064,7 +1064,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate_gradient(&[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]); @@ -1103,7 +1103,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate(&[0.1, 0.2, 0.3, 0.4]); @@ -1128,7 +1128,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate_gradient(&[0.1, 0.2, 0.3, 0.4]); @@ -1159,7 +1159,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate(&[0.1, 0.2, 0.3, 0.4]); @@ -1184,7 +1184,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate_gradient(&[0.1, 0.2, 0.3, 0.4]); @@ -1215,7 +1215,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate(&[0.1, 0.2, 0.3, 0.4]); @@ -1240,7 +1240,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate_gradient(&[0.1, 0.2, 0.3, 0.4]); @@ -1263,7 +1263,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate(&[0.1, 0.2]); @@ -1280,7 +1280,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate_gradient(&[0.1, 0.2]); diff --git a/src/amplitudes/mod.rs b/src/amplitudes/mod.rs index 92170fe..f2629a6 100644 --- a/src/amplitudes/mod.rs +++ b/src/amplitudes/mod.rs @@ -371,7 +371,7 @@ impl Manager { /// Create an [`Evaluator`] which can compute the result of the given [`Expression`] built on /// registered [`Amplitude`]s over the given [`Dataset`]. This method precomputes any relevant /// information over the [`Event`]s in the [`Dataset`]. - pub fn load(&self, dataset: &Arc, expression: &Expression) -> Evaluator { + pub fn load(&self, expression: &Expression, dataset: &Arc) -> Evaluator { let loaded_resources = Arc::new(RwLock::new(self.resources.clone())); loaded_resources.write().reserve_cache(dataset.len()); for amplitude in &self.amplitudes { @@ -616,7 +616,7 @@ mod tests { events: vec![Arc::new(test_event())], }); let expr = Expression::Amp(aid); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate(&[]); assert_eq!(result[0], Complex::from(2.0)); } @@ -628,7 +628,7 @@ mod tests { let aid = manager.register(amp).unwrap(); let dataset = Arc::new(test_dataset()); let expr = Expression::Amp(aid); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate(&[3.0]); assert_eq!(result[0], Complex::from(3.0)); } @@ -648,61 +648,61 @@ mod tests { // Test (amp) addition let expr_add = &aid1 + &aid2; - let eval_add = manager.load(&dataset, &expr_add); + let eval_add = manager.load(&expr_add, &dataset); let result_add = eval_add.evaluate(&[]); assert_eq!(result_add[0], Complex::new(2.0, 1.0)); // Test (amp) multiplication let expr_mul = &aid1 * &aid2; - let eval_mul = manager.load(&dataset, &expr_mul); + let eval_mul = manager.load(&expr_mul, &dataset); let result_mul = eval_mul.evaluate(&[]); assert_eq!(result_mul[0], Complex::new(0.0, 2.0)); // Test (expr) addition let expr_add2 = &expr_add + &expr_mul; - let eval_add2 = manager.load(&dataset, &expr_add2); + let eval_add2 = manager.load(&expr_add2, &dataset); let result_add2 = eval_add2.evaluate(&[]); assert_eq!(result_add2[0], Complex::new(2.0, 3.0)); // Test (expr) multiplication let expr_mul2 = &expr_add * &expr_mul; - let eval_mul2 = manager.load(&dataset, &expr_mul2); + let eval_mul2 = manager.load(&expr_mul2, &dataset); let result_mul2 = eval_mul2.evaluate(&[]); assert_eq!(result_mul2[0], Complex::new(-2.0, 4.0)); // Test (amp) real let expr_real = aid3.real(); - let eval_real = manager.load(&dataset, &expr_real); + let eval_real = manager.load(&expr_real, &dataset); let result_real = eval_real.evaluate(&[]); assert_eq!(result_real[0], Complex::new(3.0, 0.0)); // Test (expr) real let expr_mul2_real = expr_mul2.real(); - let eval_mul2_real = manager.load(&dataset, &expr_mul2_real); + let eval_mul2_real = manager.load(&expr_mul2_real, &dataset); let result_mul2_real = eval_mul2_real.evaluate(&[]); assert_eq!(result_mul2_real[0], Complex::new(-2.0, 0.0)); // Test (expr) imag let expr_mul2_imag = expr_mul2.imag(); - let eval_mul2_imag = manager.load(&dataset, &expr_mul2_imag); + let eval_mul2_imag = manager.load(&expr_mul2_imag, &dataset); let result_mul2_imag = eval_mul2_imag.evaluate(&[]); assert_eq!(result_mul2_imag[0], Complex::new(4.0, 0.0)); // Test (amp) imag let expr_imag = aid3.imag(); - let eval_imag = manager.load(&dataset, &expr_imag); + let eval_imag = manager.load(&expr_imag, &dataset); let result_imag = eval_imag.evaluate(&[]); assert_eq!(result_imag[0], Complex::new(4.0, 0.0)); // Test (amp) norm_sqr let expr_norm = aid1.norm_sqr(); - let eval_norm = manager.load(&dataset, &expr_norm); + let eval_norm = manager.load(&expr_norm, &dataset); let result_norm = eval_norm.evaluate(&[]); assert_eq!(result_norm[0], Complex::new(4.0, 0.0)); // Test (expr) norm_sqr let expr_mul2_norm = expr_mul2.norm_sqr(); - let eval_mul2_norm = manager.load(&dataset, &expr_mul2_norm); + let eval_mul2_norm = manager.load(&expr_mul2_norm, &dataset); let result_mul2_norm = eval_mul2_norm.evaluate(&[]); assert_eq!(result_mul2_norm[0], Complex::new(20.0, 0.0)); } @@ -718,7 +718,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = &aid1 + &aid2; - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); // Test initial state (all active) let result = evaluator.evaluate(&[]); @@ -748,7 +748,7 @@ mod tests { let aid = manager.register(amp).unwrap(); let dataset = Arc::new(test_dataset()); let expr = aid.norm_sqr(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let params = vec![2.0]; let gradient = evaluator.evaluate_gradient(¶ms); diff --git a/src/amplitudes/ylm.rs b/src/amplitudes/ylm.rs index 7114f42..bbed1eb 100644 --- a/src/amplitudes/ylm.rs +++ b/src/amplitudes/ylm.rs @@ -91,7 +91,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate(&[]); @@ -108,7 +108,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate_gradient(&[]); assert_eq!(result[0].len(), 0); // amplitude has no parameters diff --git a/src/amplitudes/zlm.rs b/src/amplitudes/zlm.rs index a489ea6..39f994c 100644 --- a/src/amplitudes/zlm.rs +++ b/src/amplitudes/zlm.rs @@ -122,7 +122,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate(&[]); @@ -140,7 +140,7 @@ mod tests { let dataset = Arc::new(test_dataset()); let expr = aid.into(); - let evaluator = manager.load(&dataset, &expr); + let evaluator = manager.load(&expr, &dataset); let result = evaluator.evaluate_gradient(&[]); assert_eq!(result[0].len(), 0); // amplitude has no parameters diff --git a/src/lib.rs b/src/lib.rs index 2067839..c8cd751 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -205,7 +205,7 @@ //! let mag = manager.register(Scalar::new("mag", parameter("magnitude"))).unwrap(); //! let model = (mag * bw).norm_sqr(); //! -//! let nll = NLL::new(&manager, &ds_data, &ds_mc, &model); +//! let nll = NLL::new(&manager, &model, &ds_data, &ds_mc); //! println!("Parameters names and order: {:?}", nll.parameters()); //! let result = nll.evaluate(&[1.27, 0.120, 100.0]); //! println!("The extended negative log-likelihood is {}", result); diff --git a/src/likelihoods.rs b/src/likelihoods.rs index f82d92f..a92eb32 100644 --- a/src/likelihoods.rs +++ b/src/likelihoods.rs @@ -92,7 +92,7 @@ dyn_clone::clone_trait_object!(LikelihoodTerm); #[derive(Clone)] pub struct NLL { pub(crate) data_evaluator: Evaluator, - pub(crate) mc_evaluator: Evaluator, + pub(crate) accmc_evaluator: Evaluator, } impl NLL { @@ -101,55 +101,55 @@ impl NLL { /// but for two [`Dataset`]s and a different method of evaluation. pub fn new( manager: &Manager, - ds_data: &Arc, - ds_mc: &Arc, expression: &Expression, + ds_data: &Arc, + ds_accmc: &Arc, ) -> Box { Self { - data_evaluator: manager.clone().load(ds_data, expression), - mc_evaluator: manager.clone().load(ds_mc, expression), + data_evaluator: manager.clone().load(expression, ds_data), + accmc_evaluator: manager.clone().load(expression, ds_accmc), } .into() } /// Activate an [`Amplitude`](`crate::amplitudes::Amplitude`) by name. pub fn activate>(&self, name: T) { self.data_evaluator.activate(&name); - self.mc_evaluator.activate(name); + self.accmc_evaluator.activate(&name); } /// Activate several [`Amplitude`](`crate::amplitudes::Amplitude`)s by name. pub fn activate_many>(&self, names: &[T]) { self.data_evaluator.activate_many(names); - self.mc_evaluator.activate_many(names); + self.accmc_evaluator.activate_many(names); } /// Activate all registered [`Amplitude`](`crate::amplitudes::Amplitude`)s. pub fn activate_all(&self) { self.data_evaluator.activate_all(); - self.mc_evaluator.activate_all(); + self.accmc_evaluator.activate_all(); } /// Dectivate an [`Amplitude`](`crate::amplitudes::Amplitude`) by name. pub fn deactivate>(&self, name: T) { self.data_evaluator.deactivate(&name); - self.mc_evaluator.deactivate(name); + self.accmc_evaluator.deactivate(&name); } /// Deactivate several [`Amplitude`](`crate::amplitudes::Amplitude`)s by name. pub fn deactivate_many>(&self, names: &[T]) { self.data_evaluator.deactivate_many(names); - self.mc_evaluator.deactivate_many(names); + self.accmc_evaluator.deactivate_many(names); } /// Deactivate all registered [`Amplitude`](`crate::amplitudes::Amplitude`)s. pub fn deactivate_all(&self) { self.data_evaluator.deactivate_all(); - self.mc_evaluator.deactivate_all(); + self.accmc_evaluator.deactivate_all(); } /// Isolate an [`Amplitude`](`crate::amplitudes::Amplitude`) by name (deactivate the rest). pub fn isolate>(&self, name: T) { self.data_evaluator.isolate(&name); - self.mc_evaluator.isolate(name); + self.accmc_evaluator.isolate(&name); } /// Isolate several [`Amplitude`](`crate::amplitudes::Amplitude`)s by name (deactivate the rest). pub fn isolate_many>(&self, names: &[T]) { self.data_evaluator.isolate_many(names); - self.mc_evaluator.isolate_many(names); + self.accmc_evaluator.isolate_many(names); } /// Project the stored [`Expression`] over the events in the [`Dataset`] stored by the @@ -162,13 +162,26 @@ impl NLL { /// ```math /// \text{weight}(\vec{p}; e) = \text{weight}(e) \mathcal{L}(e) / N_{\text{MC}} /// ``` + /// + /// Note that $`N_{\text{MC}}`$ will always be the number of accepted Monte Carlo events, + /// regardless of the `mc_evaluator`. #[cfg(feature = "rayon")] - pub fn project(&self, parameters: &[Float]) -> Vec { - let mc_result = self.mc_evaluator.evaluate(parameters); - let n_mc = self.mc_evaluator.dataset.len() as Float; - mc_result + pub fn project(&self, parameters: &[Float], mc_evaluator: Option) -> Vec { + let (events, result) = if let Some(mc_evaluator) = mc_evaluator { + ( + &mc_evaluator.dataset.clone(), + mc_evaluator.evaluate(parameters), + ) + } else { + ( + &self.accmc_evaluator.dataset, + self.accmc_evaluator.evaluate(parameters), + ) + }; + let n_mc = self.accmc_evaluator.dataset.len() as Float; + result .par_iter() - .zip(self.mc_evaluator.dataset.par_iter()) + .zip(events.par_iter()) .map(|(l, e)| e.weight * l.re / n_mc) .collect() } @@ -183,13 +196,26 @@ impl NLL { /// ```math /// \text{weight}(\vec{p}; e) = \text{weight}(e) \mathcal{L}(e) \frac{1}{N_{\text{MC}}} /// ``` + /// + /// Note that $`N_{\text{MC}}`$ will always be the number of accepted Monte Carlo events, + /// regardless of the `mc_evaluator`. #[cfg(not(feature = "rayon"))] - pub fn project(&self, parameters: &[Float]) -> Vec { - let mc_result = self.mc_evaluator.evaluate(parameters); - let n_mc = self.mc_evaluator.dataset.len() as Float; - mc_result + pub fn project(&self, parameters: &[Float], mc_evaluator: Option) -> Vec { + let (events, result) = if let Some(mc_evaluator) = mc_evaluator { + ( + &mc_evaluator.dataset.clone(), + mc_evaluator.evaluate(parameters), + ) + } else { + ( + &self.accmc_evaluator.dataset, + self.accmc_evaluator.evaluate(parameters), + ) + }; + let n_mc = self.accmc_evaluator.dataset.len() as Float; + result .iter() - .zip(self.mc_evaluator.dataset.iter()) + .zip(events.iter()) .map(|(l, e)| e.weight * l.re / n_mc) .collect() } @@ -208,21 +234,45 @@ impl NLL { /// ```math /// \text{weight}(\vec{p}; e) = \text{weight}(e) \mathcal{L}(e) / N_{\text{MC}} /// ``` + /// + /// Note that $`N_{\text{MC}}`$ will always be the number of accepted Monte Carlo events, + /// regardless of the `mc_evaluator`. #[cfg(feature = "rayon")] - pub fn project_with>(&self, parameters: &[Float], names: &[T]) -> Vec { - let current_active_data = self.data_evaluator.resources.read().active.clone(); - let current_active_mc = self.mc_evaluator.resources.read().active.clone(); - self.isolate_many(names); - let mc_result = self.mc_evaluator.evaluate(parameters); - let n_mc = self.mc_evaluator.dataset.len() as Float; - let res = mc_result - .par_iter() - .zip(self.mc_evaluator.dataset.par_iter()) - .map(|(l, e)| e.weight * l.re / n_mc) - .collect(); - self.data_evaluator.resources.write().active = current_active_data; - self.mc_evaluator.resources.write().active = current_active_mc; - res + pub fn project_with>( + &self, + parameters: &[Float], + names: &[T], + mc_evaluator: Option, + ) -> Vec { + if let Some(mc_evaluator) = &mc_evaluator { + let current_active_mc = mc_evaluator.resources.read().active.clone(); + mc_evaluator.isolate_many(names); + let events = mc_evaluator.dataset.clone(); + let result = mc_evaluator.evaluate(parameters); + let n_mc = self.accmc_evaluator.dataset.len() as Float; + let res = result + .par_iter() + .zip(events.par_iter()) + .map(|(l, e)| e.weight * l.re / n_mc) + .collect(); + mc_evaluator.resources.write().active = current_active_mc; + res + } else { + let current_active_data = self.data_evaluator.resources.read().active.clone(); + let current_active_accmc = self.accmc_evaluator.resources.read().active.clone(); + self.isolate_many(names); + let events = &self.accmc_evaluator.dataset; + let result = self.accmc_evaluator.evaluate(parameters); + let n_mc = self.accmc_evaluator.dataset.len() as Float; + let res = result + .par_iter() + .zip(events.par_iter()) + .map(|(l, e)| e.weight * l.re / n_mc) + .collect(); + self.data_evaluator.resources.write().active = current_active_data; + self.accmc_evaluator.resources.write().active = current_active_accmc; + res + } } /// Project the stored [`Expression`] over the events in the [`Dataset`] stored by the @@ -239,21 +289,45 @@ impl NLL { /// ```math /// \text{weight}(\vec{p}; e) = \text{weight}(e) \mathcal{L}(e) / N_{\text{MC}} /// ``` + /// + /// Note that $`N_{\text{MC}}`$ will always be the number of accepted Monte Carlo events, + /// regardless of the `mc_evaluator`. #[cfg(not(feature = "rayon"))] - pub fn project_with>(&self, parameters: &[Float], names: &[T]) -> Vec { - let current_active_data = self.data_evaluator.resources.read().active.clone(); - let current_active_mc = self.mc_evaluator.resources.read().active.clone(); - self.isolate_many(names); - let mc_result = self.mc_evaluator.evaluate(parameters); - let n_mc = self.mc_evaluator.dataset.len() as Float; - let res = mc_result - .iter() - .zip(self.mc_evaluator.dataset.iter()) - .map(|(l, e)| e.weight * l.re / n_mc) - .collect(); - self.data_evaluator.resources.write().active = current_active_data; - self.mc_evaluator.resources.write().active = current_active_mc; - res + pub fn project_with>( + &self, + parameters: &[Float], + names: &[T], + mc_evaluator: Option, + ) -> Vec { + if let Some(mc_evaluator) = &mc_evaluator { + let current_active_mc = mc_evaluator.resources.read().active.clone(); + mc_evaluator.isolate_many(names); + let events = mc_evaluator.dataset.clone(); + let result = mc_evaluator.evaluate(parameters); + let n_mc = self.accmc_evaluator.dataset.len() as Float; + let res = result + .iter() + .zip(events.iter()) + .map(|(l, e)| e.weight * l.re / n_mc) + .collect(); + mc_evaluator.resources.write().active = current_active_mc; + res + } else { + let current_active_data = self.data_evaluator.resources.read().active.clone(); + let current_active_accmc = self.accmc_evaluator.resources.read().active.clone(); + self.isolate_many(names); + let events = &self.accmc_evaluator.dataset; + let result = self.accmc_evaluator.evaluate(parameters); + let n_mc = self.accmc_evaluator.dataset.len() as Float; + let res = result + .iter() + .zip(events.iter()) + .map(|(l, e)| e.weight * l.re / n_mc) + .collect(); + self.data_evaluator.resources.write().active = current_active_data; + self.accmc_evaluator.resources.write().active = current_active_accmc; + res + } } } @@ -276,22 +350,21 @@ impl LikelihoodTerm for NLL { /// result is given by the following formula: /// /// ```math - /// NLL(\vec{p}) = -2 \left(\sum_{e \in \text{Data}} \text{weight}(e) \ln(\mathcal{L}(e) / N_{\text{DATA}}) - \frac{1}{N_{\text{MC}}} \sum_{e \in \text{MC}} \text{weight}(e) \mathcal{L}(e) \right) + /// NLL(\vec{p}) = -2 \left(\sum_{e \in \text{Data}} \text{weight}(e) \ln(\mathcal{L}(e)) - \frac{1}{N_{\text{MC}_A}} \sum_{e \in \text{MC}_A} \text{weight}(e) \mathcal{L}(e) \right) /// ``` #[cfg(feature = "rayon")] fn evaluate(&self, parameters: &[Float]) -> Float { let data_result = self.data_evaluator.evaluate(parameters); - let n_data = self.data_evaluator.dataset.len() as Float; - let mc_result = self.mc_evaluator.evaluate(parameters); - let n_mc = self.mc_evaluator.dataset.len() as Float; + let mc_result = self.accmc_evaluator.evaluate(parameters); + let n_mc = self.accmc_evaluator.dataset.len() as Float; let data_term: Float = data_result .par_iter() .zip(self.data_evaluator.dataset.par_iter()) - .map(|(l, e)| e.weight * Float::ln(l.re / n_data)) + .map(|(l, e)| e.weight * Float::ln(l.re)) .parallel_sum_with_accumulator::>(); let mc_term: Float = mc_result .par_iter() - .zip(self.mc_evaluator.dataset.par_iter()) + .zip(self.accmc_evaluator.dataset.par_iter()) .map(|(l, e)| e.weight * l.re) .parallel_sum_with_accumulator::>(); -2.0 * (data_term - mc_term / n_mc) @@ -304,18 +377,18 @@ impl LikelihoodTerm for NLL { /// result is given by the following formula: /// /// ```math - /// NLL(\vec{p}) = -2 \left(\sum_{e \in \text{Data}} \text{weight}(e) \ln(\mathcal{L}(e) / N_{\text{DATA}}) - \frac{1}{N_{\text{MC}}} \sum_{e \in \text{MC}} \text{weight}(e) \mathcal{L}(e) \right) + /// NLL(\vec{p}) = -2 \left(\sum_{e \in \text{Data}} \text{weight}(e) \ln(\mathcal{L}(e)) - \frac{1}{N_{\text{MC}_A}} \sum_{e \in \text{MC}_A} \text{weight}(e) \mathcal{L}(e) \right) /// ``` #[cfg(not(feature = "rayon"))] fn evaluate(&self, parameters: &[Float]) -> Float { let data_result = self.data_evaluator.evaluate(parameters); let n_data = self.data_evaluator.dataset.len() as Float; - let mc_result = self.mc_evaluator.evaluate(parameters); - let n_mc = self.mc_evaluator.dataset.len() as Float; + let mc_result = self.accmc_evaluator.evaluate(parameters); + let n_mc = self.accmc_evaluator.dataset.len() as Float; let data_term: Float = data_result .iter() .zip(self.data_evaluator.dataset.iter()) - .map(|(l, e)| e.weight * Float::ln(l.re / n_data)) + .map(|(l, e)| e.weight * Float::ln(l.re)) .sum_with_accumulator::>(); let mc_term: Float = mc_result .iter() @@ -333,9 +406,9 @@ impl LikelihoodTerm for NLL { fn evaluate_gradient(&self, parameters: &[Float]) -> DVector { let data_resources = self.data_evaluator.resources.read(); let data_parameters = Parameters::new(parameters, &data_resources.constants); - let mc_resources = self.mc_evaluator.resources.read(); + let mc_resources = self.accmc_evaluator.resources.read(); let mc_parameters = Parameters::new(parameters, &mc_resources.constants); - let n_mc = self.mc_evaluator.dataset.len() as Float; + let n_mc = self.accmc_evaluator.dataset.len() as Float; let data_term: DVector = self .data_evaluator .dataset @@ -388,14 +461,14 @@ impl LikelihoodTerm for NLL { .sum(); // TODO: replace with custom implementation of accurate crate's trait let mc_term: DVector = self - .mc_evaluator + .accmc_evaluator .dataset .par_iter() .zip(mc_resources.caches.par_iter()) .map(|(event, cache)| { let mut gradient_values = - vec![DVector::zeros(parameters.len()); self.mc_evaluator.amplitudes.len()]; - self.mc_evaluator + vec![DVector::zeros(parameters.len()); self.accmc_evaluator.amplitudes.len()]; + self.accmc_evaluator .amplitudes .iter() .zip(mc_resources.active.iter()) @@ -408,7 +481,7 @@ impl LikelihoodTerm for NLL { ( event.weight, AmplitudeValues( - self.mc_evaluator + self.accmc_evaluator .amplitudes .iter() .zip(mc_resources.active.iter()) @@ -427,7 +500,7 @@ impl LikelihoodTerm for NLL { .map(|(weight, amp_vals, grad_vals)| { ( weight, - self.mc_evaluator + self.accmc_evaluator .expression .evaluate_gradient(&_vals, &grad_vals), ) @@ -449,9 +522,9 @@ impl LikelihoodTerm for NLL { let data_resources = self.data_evaluator.resources.read(); let data_parameters = Parameters::new(parameters, &data_resources.constants); let n_data = self.data_evaluator.dataset.weighted_len(); - let mc_resources = self.mc_evaluator.resources.read(); + let mc_resources = self.accmc_evaluator.resources.read(); let mc_parameters = Parameters::new(parameters, &mc_resources.constants); - let n_mc = self.mc_evaluator.dataset.len() as Float; + let n_mc = self.accmc_evaluator.dataset.len() as Float; let data_term: DVector = self .data_evaluator .dataset diff --git a/src/python.rs b/src/python.rs index 43d128c..e0704ee 100644 --- a/src/python.rs +++ b/src/python.rs @@ -1690,10 +1690,10 @@ pub(crate) mod laddu { /// /// Parameters /// ---------- - /// dataset : Dataset - /// The Dataset to use in precalculation /// expression : Expression /// The expression to use in precalculation + /// dataset : Dataset + /// The Dataset to use in precalculation /// /// Returns /// ------- @@ -1708,8 +1708,8 @@ pub(crate) mod laddu { /// expression. These parameters will have no effect on evaluation, but they must be /// included in function calls. /// - fn load(&self, dataset: &Dataset, expression: &Expression) -> Evaluator { - Evaluator(self.0.load(&dataset.0, &expression.0)) + fn load(&self, expression: &Expression, dataset: &Dataset) -> Evaluator { + Evaluator(self.0.load(&expression.0, &dataset.0)) } } @@ -2019,8 +2019,10 @@ pub(crate) mod laddu { /// The Manager to use for precalculation /// ds_data : Dataset /// A Dataset representing true signal data - /// ds_mc : Dataset - /// A Dataset of physically flat Monte Carlo data used for normalization + /// ds_accmc : Dataset + /// A Dataset of physically flat accepted Monte Carlo data used for normalization + /// gen_len: int, optional + /// The size of the generated dataset (will use ``len(ds_accmc)`` if None) /// expression : Expression /// The Expression to evaluate /// @@ -2031,17 +2033,18 @@ pub(crate) mod laddu { #[pymethods] impl NLL { #[new] + #[pyo3(signature = (manager, expression, ds_data, ds_accmc))] fn new( manager: &Manager, - ds_data: &Dataset, - ds_mc: &Dataset, expression: &Expression, + ds_data: &Dataset, + ds_accmc: &Dataset, ) -> Self { Self(rust::likelihoods::NLL::new( &manager.0, - &ds_data.0, - &ds_mc.0, &expression.0, + &ds_data.0, + &ds_accmc.0, )) } /// The underlying signal dataset used in calculating the NLL @@ -2054,15 +2057,15 @@ pub(crate) mod laddu { fn data(&self) -> Dataset { Dataset(self.0.data_evaluator.dataset.clone()) } - /// The underlying Monte Carlo dataset used in calculating the NLL + /// The underlying accepted Monte Carlo dataset used in calculating the NLL /// /// Returns /// ------- /// Dataset /// #[getter] - fn mc(&self) -> Dataset { - Dataset(self.0.mc_evaluator.dataset.clone()) + fn accmc(&self) -> Dataset { + Dataset(self.0.accmc_evaluator.dataset.clone()) } /// Turn an ``NLL`` into a term that can be used by a ``LikelihoodManager`` /// @@ -2177,7 +2180,7 @@ pub(crate) mod laddu { /// /// This is defined as /// - /// .. math:: NLL(\vec{p}; D, MC) = -2 \left( \sum_{e \in D} (e_w \log(\mathcal{L}(e) / N_D)) - \frac{1}{N_{MC}} \sum_{e \in MC} (e_w \mathcal{L}(e)) \right) + /// .. math:: NLL(\vec{p}; D, MC) = -2 \left( \sum_{e \in D} (e_w \log(\mathcal{L}(e))) - \frac{1}{N_{MC}} \sum_{e \in MC} (e_w \mathcal{L}(e)) \right) /// /// Parameters /// ---------- @@ -2202,18 +2205,27 @@ pub(crate) mod laddu { /// ---------- /// parameters : list of float /// The values to use for the free parameters + /// mc_evaluator: Evaluator, optional + /// Project using the given Evaluator or use the stored ``accmc`` if None /// /// Returns /// ------- /// result : array_like /// Weights for every Monte Carlo event which represent the fit to data /// + #[pyo3(signature = (parameters, *, mc_evaluator = None))] fn project<'py>( &self, py: Python<'py>, parameters: Vec, + mc_evaluator: Option, ) -> Bound<'py, PyArray1> { - PyArray1::from_slice_bound(py, &self.0.project(¶meters)) + PyArray1::from_slice_bound( + py, + &self + .0 + .project(¶meters, mc_evaluator.map(|pyeval| pyeval.0.clone())), + ) } /// Project the model over the Monte Carlo dataset with the given parameter values, first @@ -2230,6 +2242,8 @@ pub(crate) mod laddu { /// The values to use for the free parameters /// arg : str or list of str /// Names of Amplitudes to be isolated + /// mc_evaluator: Evaluator, optional + /// Project using the given Evaluator or use the stored ``accmc`` if None /// /// Returns /// ------- @@ -2241,11 +2255,13 @@ pub(crate) mod laddu { /// TypeError /// If `arg` is not a str or list of str /// + #[pyo3(signature = (parameters, arg, *, mc_evaluator = None))] fn project_with<'py>( &self, py: Python<'py>, parameters: Vec, arg: &Bound<'_, PyAny>, + mc_evaluator: Option, ) -> PyResult>> { let names = if let Ok(string_arg) = arg.extract::() { vec![string_arg] @@ -2259,7 +2275,11 @@ pub(crate) mod laddu { }; Ok(PyArray1::from_slice_bound( py, - &self.0.project_with(¶meters, &names), + &self.0.project_with( + ¶meters, + &names, + mc_evaluator.map(|pyeval| pyeval.0.clone()), + ), )) }