diff --git a/pineappl/src/convolutions.rs b/pineappl/src/convolutions.rs index 98a2b594..235c8d6e 100644 --- a/pineappl/src/convolutions.rs +++ b/pineappl/src/convolutions.rs @@ -52,11 +52,6 @@ impl<'a> ConvolutionCache<'a> { } pub(crate) fn setup(&mut self, grid: &Grid, xi: &[(f64, f64, f64)]) -> Result<(), ()> { - let convolutions = grid.convolutions(); - - // TODO: the following code only works with exactly two convolutions - assert_eq!(convolutions.len(), 2); - self.perm = grid .convolutions() .iter() diff --git a/pineappl/src/empty_subgrid.rs b/pineappl/src/empty_subgrid.rs index 191aadd1..0238d262 100644 --- a/pineappl/src/empty_subgrid.rs +++ b/pineappl/src/empty_subgrid.rs @@ -81,7 +81,7 @@ mod tests { #[should_panic(expected = "EmptySubgridV1 doesn't support the fill operation")] fn fill() { let mut subgrid = EmptySubgridV1; - subgrid.fill(&v0::default_interps(), &[0.0; 3], 0.0); + subgrid.fill(&v0::default_interps(2), &[0.0; 3], 0.0); } #[test] diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index 71cec721..1bd41827 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -12,7 +12,7 @@ use float_cmp::approx_eq; use itertools::izip; use itertools::Itertools; use ndarray::linalg; -use ndarray::{s, Array1, Array2, Array3, ArrayD, ArrayView1, ArrayView4, Axis, Ix2}; +use ndarray::{s, Array1, Array2, Array3, ArrayD, ArrayView1, ArrayView4, Axis, Ix1, Ix2}; use std::iter; /// Number of ULPS used to de-duplicate grid values in [`Grid::evolve_info`]. @@ -980,8 +980,6 @@ pub(crate) fn evolve_slice_with_many( let dim: Vec<_> = infos.iter().map(|info| info.x0.len()).collect(); for subgrids_oc in grid.subgrids().axis_iter(Axis(1)) { - assert_eq!(infos[0].x0.len(), infos[1].x0.len()); - let mut tables = vec![ArrayD::zeros(dim.clone()); channels0.len()]; for (subgrids_o, channel1) in subgrids_oc.axis_iter(Axis(1)).zip(grid.channels()) { @@ -1020,19 +1018,13 @@ pub(crate) fn evolve_slice_with_many( } } - let mut tmp = Array2::zeros((last_x1[0].len(), infos[1].x0.len())); - - for (pids1, factor) in channel1 - .entry() - .iter() - .map(|(pids1, factor)| ([pids1[0], pids1[1]], factor)) - { + for (pids1, factor) in channel1.entry() { for (fk_table, ops) in channels0 .iter() .zip(tables.iter_mut()) .filter_map(|(pids0, fk_table)| { - izip!(pids0, &pids1, &pids01, &eko_slices) + izip!(pids0, pids1, &pids01, &eko_slices) .map(|(&pid0, &pid1, pids, slices)| { pids.iter().zip(slices).find_map(|(&(p0, p1), op)| { ((p0 == pid0) && (p1 == pid1)).then_some(op) @@ -1043,20 +1035,22 @@ pub(crate) fn evolve_slice_with_many( .map(|ops| (fk_table, ops)) }) { - general_tensor_mul(*factor, &array, &ops, &mut tmp, fk_table); + general_tensor_mul(*factor, &array, &ops, fk_table); } } } + // TODO: generalize this for arbitrary scales and x values + let mut node_values = vec![NodeValues::UseThese(vec![infos[0].fac0])]; + + for info in infos { + node_values.push(NodeValues::UseThese(info.x0.clone())); + } + sub_fk_tables.extend(tables.into_iter().map(|table| { PackedQ1X2SubgridV1::new( PackedArray::from(table.insert_axis(Axis(0)).view()), - // TODO: generalize this for arbitrary scales and x values - vec![ - NodeValues::UseThese(vec![infos[0].fac0]), - NodeValues::UseThese(infos[0].x0.clone()), - NodeValues::UseThese(infos[1].x0.clone()), - ], + node_values.clone(), ) .into() })); @@ -1067,8 +1061,8 @@ pub(crate) fn evolve_slice_with_many( .into_shape((1, grid.bin_info().bins(), channels0.len())) .unwrap(), channels0 - .iter() - .map(|c| channel![c[0], c[1], 1.0]) + .into_iter() + .map(|c| Channel::new(vec![(c, 1.0)])) .collect(), )) } @@ -1077,16 +1071,25 @@ fn general_tensor_mul( factor: f64, array: &ArrayD, ops: &[&Array2], - tmp: &mut Array2, fk_table: &mut ArrayD, ) { - // TODO: generalize this to n dimensions - assert_eq!(array.shape().len(), 2); - let array = array.view().into_dimensionality::().unwrap(); - let mut fk_table = fk_table.view_mut().into_dimensionality::().unwrap(); - - // tmp = array * ops[1]^T - linalg::general_mat_mul(1.0, &array, &ops[1].t(), 0.0, tmp); - // fk_table += factor * ops[0] * tmp - linalg::general_mat_mul(factor, ops[0], &tmp, 1.0, &mut fk_table); + match array.shape().len() { + 1 => { + let array = array.view().into_dimensionality::().unwrap(); + let mut fk_table = fk_table.view_mut().into_dimensionality::().unwrap(); + fk_table.scaled_add(factor, &ops[0].dot(&array)); + } + 2 => { + let array = array.view().into_dimensionality::().unwrap(); + let mut fk_table = fk_table.view_mut().into_dimensionality::().unwrap(); + + let mut tmp = Array2::zeros((array.shape()[0], ops[1].shape()[0])); + // tmp = array * ops[1]^T + linalg::general_mat_mul(1.0, &array, &ops[1].t(), 0.0, &mut tmp); + // fk_table += factor * ops[0] * tmp + linalg::general_mat_mul(factor, ops[0], &tmp, 1.0, &mut fk_table); + } + // TODO: generalize this to n dimensions + _ => unimplemented!(), + } } diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index bae8becf..3c8e881b 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1275,15 +1275,16 @@ impl Grid { operator.dim(), ); - let view = operator.view(); - - let (subgrids, channels) = if self.convolutions()[0] != Convolution::None - && self.convolutions()[1] != Convolution::None - { - evolution::evolve_slice_with_two(self, &view, &info, order_mask, xi, alphas_table) - } else { - evolution::evolve_slice_with_one(self, &view, &info, order_mask, xi, alphas_table) - }?; + let views = vec![operator.view(); self.convolutions().len()]; + let infos = vec![info.clone(); self.convolutions().len()]; + let (subgrids, channels) = evolution::evolve_slice_with_many( + self, + &views, + &infos, + order_mask, + (xi.0, xi.1, 1.0), + alphas_table, + )?; let rhs = Self { subgrids, @@ -1734,7 +1735,7 @@ mod tests { vec![Order::new(0, 2, 0, 0, 0)], vec![0.0, 1.0], vec![Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)], - v0::default_interps(), + v0::default_interps(2), vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], Scales { ren: ScaleFuncForm::Scale(0), @@ -1755,7 +1756,7 @@ mod tests { vec![Order::new(0, 2, 0, 0, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], vec![Convolution::UnpolPDF(2212); 2], - v0::default_interps(), + v0::default_interps(2), vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], Scales { ren: ScaleFuncForm::Scale(0), @@ -1778,7 +1779,7 @@ mod tests { vec![Order::new(1, 2, 0, 0, 0), Order::new(1, 2, 0, 1, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], vec![Convolution::UnpolPDF(2212); 2], - v0::default_interps(), + v0::default_interps(2), vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], Scales { ren: ScaleFuncForm::Scale(0), @@ -1806,7 +1807,7 @@ mod tests { vec![Order::new(0, 2, 0, 0, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], vec![Convolution::UnpolPDF(2212); 2], - v0::default_interps(), + v0::default_interps(2), vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], Scales { ren: ScaleFuncForm::Scale(0), @@ -1832,7 +1833,7 @@ mod tests { ], vec![0.0, 0.25, 0.5, 0.75, 1.0], vec![Convolution::UnpolPDF(2212); 2], - v0::default_interps(), + v0::default_interps(2), vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], Scales { ren: ScaleFuncForm::Scale(0), @@ -1865,7 +1866,7 @@ mod tests { vec![Order::new(0, 2, 0, 0, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], vec![Convolution::UnpolPDF(2212); 2], - v0::default_interps(), + v0::default_interps(2), vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], Scales { ren: ScaleFuncForm::Scale(0), @@ -1884,7 +1885,7 @@ mod tests { vec![Order::new(0, 2, 0, 0, 0)], vec![0.0, 0.25, 0.5, 0.75, 1.0], vec![Convolution::UnpolPDF(2212); 2], - v0::default_interps(), + v0::default_interps(2), vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], Scales { ren: ScaleFuncForm::Scale(0), @@ -1914,7 +1915,7 @@ mod tests { vec![Order::new(0, 2, 0, 0, 0)], vec![0.0, 0.25, 0.5], vec![Convolution::UnpolPDF(2212); 2], - v0::default_interps(), + v0::default_interps(2), vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], Scales { ren: ScaleFuncForm::Scale(0), @@ -1937,7 +1938,7 @@ mod tests { vec![Order::new(0, 2, 0, 0, 0)], vec![0.5, 0.75, 1.0], vec![Convolution::UnpolPDF(2212); 2], - v0::default_interps(), + v0::default_interps(2), vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], Scales { ren: ScaleFuncForm::Scale(0), @@ -1972,7 +1973,7 @@ mod tests { }], vec![0.0, 1.0], vec![Convolution::UnpolPDF(2212); 2], - v0::default_interps(), + v0::default_interps(2), vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], Scales { ren: ScaleFuncForm::Scale(0), diff --git a/pineappl/src/lagrange_subgrid.rs b/pineappl/src/lagrange_subgrid.rs index feb5e137..3abbfeac 100644 --- a/pineappl/src/lagrange_subgrid.rs +++ b/pineappl/src/lagrange_subgrid.rs @@ -127,7 +127,7 @@ mod tests { #[test] fn fill_zero() { - let interps = v0::default_interps(); + let interps = v0::default_interps(2); let mut subgrid = LagrangeSubgridV2::new(&interps); subgrid.fill(&interps, &[1000.0, 0.5, 0.5], 0.0); @@ -148,7 +148,7 @@ mod tests { #[test] fn fill_outside_range() { - let interps = v0::default_interps(); + let interps = v0::default_interps(2); let mut subgrid = LagrangeSubgridV2::new(&interps); subgrid.fill(&interps, &[1000.0, 1e-10, 0.5], 0.0); @@ -169,7 +169,7 @@ mod tests { #[test] fn fill() { - let interps = v0::default_interps(); + let interps = v0::default_interps(2); let mut subgrid = LagrangeSubgridV2::new(&interps); subgrid.fill(&interps, &[1000.0, 0.5, 0.5], 1.0); diff --git a/pineappl/src/packed_subgrid.rs b/pineappl/src/packed_subgrid.rs index 4b604580..1a0a4acb 100644 --- a/pineappl/src/packed_subgrid.rs +++ b/pineappl/src/packed_subgrid.rs @@ -191,7 +191,7 @@ mod tests { PackedArray::new(vec![0, 0, 0]), vec![NodeValues::UseThese(Vec::new()); 3], ); - subgrid.fill(&v0::default_interps(), &[0.0; 3], 0.0); + subgrid.fill(&v0::default_interps(2), &[0.0; 3], 0.0); } #[test] diff --git a/pineappl/src/v0.rs b/pineappl/src/v0.rs index 6c65ab43..1bf6e0e4 100644 --- a/pineappl/src/v0.rs +++ b/pineappl/src/v0.rs @@ -13,27 +13,19 @@ use pineappl_v0::grid::Grid as GridV0; use std::io::BufRead; use std::iter; -pub fn default_interps() -> Vec { - vec![ - Interp::new( - 1e2, - 1e8, - 40, - 3, - ReweightMeth::NoReweight, - Map::ApplGridH0, - InterpMeth::Lagrange, - ), - Interp::new( - 2e-7, - 1.0, - 50, - 3, - ReweightMeth::ApplGridX, - Map::ApplGridF2, - InterpMeth::Lagrange, - ), - Interp::new( +pub fn default_interps(convolutions: usize) -> Vec { + let mut interps = vec![Interp::new( + 1e2, + 1e8, + 40, + 3, + ReweightMeth::NoReweight, + Map::ApplGridH0, + InterpMeth::Lagrange, + )]; + + for _ in 0..convolutions { + interps.push(Interp::new( 2e-7, 1.0, 50, @@ -41,16 +33,26 @@ pub fn default_interps() -> Vec { ReweightMeth::ApplGridX, Map::ApplGridF2, InterpMeth::Lagrange, - ), - ] + )); + } + + interps } pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result { use pineappl_v0::subgrid::Subgrid as _; let grid = GridV0::read(&mut reader).map_err(|err| GridError::Other(err.into()))?; + let convolutions = read_convolutions_from_metadata(&grid); + + // TODO: read in flexible-scale grids properly + let mut kinematics = vec![Kinematics::Scale(0), Kinematics::X(0)]; + if convolutions[0].is_some() && convolutions[1].is_some() { + kinematics.push(Kinematics::X(1)); + } + + assert_eq!(convolutions.len(), 2); - // TODO: convert differently if grid only has one convolution let result = Grid { subgrids: Array3::from_shape_vec( grid.subgrids().dim(), @@ -69,24 +71,40 @@ pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result frg: -1.0, }) .collect(); - let x1_grid = subgrid.x1_grid().into_owned(); - let x2_grid = subgrid.x2_grid().into_owned(); - let mut array = - PackedArray::new(vec![mu2_grid.len(), x1_grid.len(), x2_grid.len()]); - for (index, v) in subgrid.indexed_iter() { - array[<[usize; 3]>::from(index)] = v; + + let mut dim = vec![mu2_grid.len()]; + if convolutions[0].is_some() { + dim.push(subgrid.x1_grid().len()); } - PackedQ1X2SubgridV1::new( - array, - vec![ - NodeValues::UseThese( - mu2_grid.iter().map(|&Mu2 { ren, .. }| ren).collect(), - ), - NodeValues::UseThese(x1_grid), - NodeValues::UseThese(x2_grid), - ], - ) - .into() + if convolutions[1].is_some() { + dim.push(subgrid.x2_grid().len()); + } + let mut array = PackedArray::new(dim); + + if convolutions[0].is_none() { + for (index, v) in subgrid.indexed_iter() { + array[[index.0, index.2]] = v; + } + } else if convolutions[1].is_none() { + for (index, v) in subgrid.indexed_iter() { + array[[index.0, index.1]] = v; + } + } else { + for (index, v) in subgrid.indexed_iter() { + array[<[usize; 3]>::from(index)] = v; + } + } + + let mut node_values = vec![NodeValues::UseThese( + mu2_grid.iter().map(|&Mu2 { ren, .. }| ren).collect(), + )]; + if convolutions[0].is_some() { + node_values.push(NodeValues::UseThese(subgrid.x1_grid().into_owned())); + } + if convolutions[1].is_some() { + node_values.push(NodeValues::UseThese(subgrid.x2_grid().into_owned())); + } + PackedQ1X2SubgridV1::new(array, node_values).into() } }) .collect(), @@ -96,7 +114,23 @@ pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result channels: grid .channels() .iter() - .map(|c| Channel::new(c.entry().iter().map(|&(a, b, f)| (vec![a, b], f)).collect())) + .map(|c| { + Channel::new( + c.entry() + .iter() + .map(|&(a, b, f)| { + let mut pids = Vec::new(); + if convolutions[0].is_some() { + pids.push(a); + } + if convolutions[1].is_some() { + pids.push(b); + }; + (pids, f) + }) + .collect(), + ) + }) .collect(), // TODO: change this member to something much easier to handle bin_limits: BinLimits::new(if grid.remapper().is_none() { @@ -127,7 +161,12 @@ pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result .unwrap_or_default() .into_iter() .collect(), - convolutions: read_convolutions_from_metadata(&grid), + interps: default_interps(convolutions.len()), + convolutions: convolutions + .into_iter() + //.map(|conv| conv.unwrap_or(Convolution::None)) + .filter_map(|conv| conv) + .collect(), pid_basis: grid .key_values() .and_then(|kv| kv.get("lumi_id_types")) @@ -144,9 +183,7 @@ pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result // and limits we should be able to do the same without error BinRemapper::new(r.normalizations().to_vec(), r.limits().to_vec()).unwrap() }), - // TODO: read in flexible-scale grids properly - kinematics: vec![Kinematics::Scale(0), Kinematics::X1, Kinematics::X2], - interps: default_interps(), + kinematics, scales: Scales { // TODO: read in flexible-scale grids properly ren: ScaleFuncForm::Scale(0), @@ -160,10 +197,10 @@ pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result Ok(result) } -fn read_convolutions_from_metadata(grid: &GridV0) -> Vec { +fn read_convolutions_from_metadata(grid: &GridV0) -> Vec> { grid.key_values().map_or_else( // if there isn't any metadata, we assume two unpolarized proton-PDFs are used - || vec![Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)], + || vec![Some(Convolution::UnpolPDF(2212)); 2], |kv| { // file format v0 only supports exactly two convolutions (1..=2) @@ -177,11 +214,11 @@ fn read_convolutions_from_metadata(grid: &GridV0) -> Vec { kv.get(&format!("convolution_type_{index}")) .map(String::as_str), ) { - (_, Some("None")) => Convolution::None, - (Some(Ok(pid)), Some("UnpolPDF")) => Convolution::UnpolPDF(pid), - (Some(Ok(pid)), Some("PolPDF")) => Convolution::PolPDF(pid), - (Some(Ok(pid)), Some("UnpolFF")) => Convolution::UnpolFF(pid), - (Some(Ok(pid)), Some("PolFF")) => Convolution::PolFF(pid), + (_, Some("None")) => None, + (Some(Ok(pid)), Some("UnpolPDF")) => Some(Convolution::UnpolPDF(pid)), + (Some(Ok(pid)), Some("PolPDF")) => Some(Convolution::PolPDF(pid)), + (Some(Ok(pid)), Some("UnpolFF")) => Some(Convolution::UnpolFF(pid)), + (Some(Ok(pid)), Some("PolFF")) => Some(Convolution::PolFF(pid)), (None, None) => { // if these key-value pairs are missing use the old metadata match kv @@ -199,13 +236,9 @@ fn read_convolutions_from_metadata(grid: &GridV0) -> Vec { ) }); - if condition { - Convolution::UnpolPDF(pid) - } else { - Convolution::None - } + condition.then_some(Convolution::UnpolPDF(pid)) } - None => Convolution::UnpolPDF(2212), + None => Some(Convolution::UnpolPDF(2212)), Some(Err(err)) => panic!( "metadata 'initial_state_{index}' could not be parsed: {err}" ), diff --git a/pineappl_cli/src/export/applgrid.rs b/pineappl_cli/src/export/applgrid.rs index fbe86470..9554470e 100644 --- a/pineappl_cli/src/export/applgrid.rs +++ b/pineappl_cli/src/export/applgrid.rs @@ -124,14 +124,8 @@ pub fn convert_into_applgrid( } let lumis = grid.channels().len(); - let has_pdf1 = grid.convolutions()[0] != Convolution::None; - let has_pdf2 = grid.convolutions()[1] != Convolution::None; - // let (has_pdf1, has_pdf2) = match (grid.convolutions()[0].clone(), grid.convolutions()[1].clone()) { - // (Convolution::None, Convolution::None) => unreachable!(), - // (Convolution::None, _) => (false, true), - // (_, Convolution::None) => (true, false), - // _ => (true, true), - // }; + let has_pdf1 = grid.convolutions().get(0).is_some(); + let has_pdf2 = grid.convolutions().get(1).is_some(); // TODO: check that PDG MC IDs are used @@ -378,9 +372,9 @@ pub fn convert_into_applgrid( let mut weightgrid = ffi::igrid_weightgrid(igrid.pin_mut(), channel); for (indices, value) in subgrid.indexed_iter() { - let &[iq2, ix1, ix2] = indices.as_slice() else { - unimplemented!() - }; + // TODO: here we assume that all X are consecutive starting from the second + // element and are in ascending order + let iq2 = indices[0]; let appl_q2_idx = appl_q2_idx[iq2]; if appl_q2_idx == -1 { @@ -406,9 +400,9 @@ pub fn convert_into_applgrid( ffi::sparse_matrix_set( weightgrid.as_mut(), appl_q2_idx, - appl_x1_idx[ix1], + appl_x1_idx[indices[1]], if has_pdf1 && has_pdf2 { - appl_x2_idx[ix2] + appl_x2_idx[indices[2]] } else { 0 },