From 649588c4a900a778e0972e3b7a2dc04c33eae907 Mon Sep 17 00:00:00 2001 From: Terrence Tricco Date: Thu, 18 Jul 2024 12:57:25 -0230 Subject: [PATCH 01/12] lint kernels --- sarracen/kernels/base_kernel.py | 14 +-- sarracen/kernels/cubic_spline.py | 8 +- sarracen/kernels/cubic_spline_exact.py | 137 ++++++++++++++++--------- sarracen/kernels/quartic_spline.py | 12 +-- sarracen/kernels/quintic_spline.py | 12 +-- 5 files changed, 115 insertions(+), 68 deletions(-) diff --git a/sarracen/kernels/base_kernel.py b/sarracen/kernels/base_kernel.py index 28e099c..e7f5b1c 100644 --- a/sarracen/kernels/base_kernel.py +++ b/sarracen/kernels/base_kernel.py @@ -36,7 +36,7 @@ def w(q: float, dim: int) -> float: return 1 def get_column_kernel(self, samples: int = 1000) -> np.ndarray: - """ Generate a 2D column kernel approximation, by integrating a given 3D kernel over the z-axis. + """ Integrate a given 3D kernel over the z-axis. Parameters ---------- @@ -50,7 +50,9 @@ def get_column_kernel(self, samples: int = 1000) -> np.ndarray: Examples -------- Use np.linspace and np.interp to use this column kernel approximation: - np.interp(q, np.linspace(0, kernel.get_radius(), samples), column_kernel) + np.interp(q, + np.linspace(0, kernel.get_radius(), samples), + column_kernel) """ if samples == 1000 and self._column_cache is not None: return self._column_cache @@ -65,8 +67,8 @@ def get_column_kernel(self, samples: int = 1000) -> np.ndarray: def get_column_kernel_func(self, samples): """ Generate a numba-accelerated column kernel function. - Creates a numba-accelerated function for column kernel weights. This function - can be utilized similarly to kernel.w. + Creates a numba-accelerated function for column kernel weights. This + function can be utilized similarly to kernel.w(). Parameters ---------- @@ -84,8 +86,8 @@ def get_column_kernel_func(self, samples): @njit(fastmath=True) def func(q, dim): - # using np.linspace() would break compatibility with the GPU backend, - # so the calculation here is performed manually. + # using np.linspace() would break compatibility with the GPU + # backend, so the calculation here is performed manually. wab_index = q * (samples - 1) / radius index = min(max(0, int(math.floor(wab_index))), samples - 1) index1 = min(max(0, int(math.ceil(wab_index))), samples - 1) diff --git a/sarracen/kernels/cubic_spline.py b/sarracen/kernels/cubic_spline.py index a8ad0ea..ca6c940 100644 --- a/sarracen/kernels/cubic_spline.py +++ b/sarracen/kernels/cubic_spline.py @@ -14,7 +14,9 @@ def get_radius() -> float: @staticmethod @njit(fastmath=True) def w(q: float, ndim: int): - norm = 2 / 3 if (ndim == 1) else 10 / (7 * np.pi) if (ndim == 2) else 1 / np.pi + norm = 2 / 3 if (ndim == 1) \ + else 10 / (7 * np.pi) if (ndim == 2) \ + else 1 / np.pi - return norm * ((1 - (3. / 2.) * q ** 2 + (3. / 4.) * q ** 3) * (0 <= q) * (q < 1) - + (1. / 4.) * (2 - q) ** 3 * (1 <= q) * (q < 2)) + return norm * ((1 - 1.5 * q**2 + 0.75 * q**3) * (0 <= q) * (q < 1) + + 0.25 * (2 - q)**3 * (1 <= q) * (q < 2)) diff --git a/sarracen/kernels/cubic_spline_exact.py b/sarracen/kernels/cubic_spline_exact.py index 839a8a5..84238c1 100644 --- a/sarracen/kernels/cubic_spline_exact.py +++ b/sarracen/kernels/cubic_spline_exact.py @@ -33,7 +33,8 @@ def line_int(r0, d1, d2, h): q0 = ar0 / h - # Determine the angle between q0 and the endpoints of the line, relative to the contributing particle. + # Determine the angle between q0 and the endpoints of the line, + # relative to the contributing particle. phi1 = math.atan(abs(d1) / ar0) phi2 = math.atan(abs(d2) / ar0) @@ -41,10 +42,12 @@ def line_int(r0, d1, d2, h): # Both line endpoints are on opposite sides of r0. result = result * (_full_2d_mod(phi1, q0) + _full_2d_mod(phi2, q0)) elif abs(d1) < abs(d2): - # Both line endpoints are on the same side of r0, with d2 having a larger magnitude. + # Both line endpoints are on the same side of r0, + # with d2 having a larger magnitude. result = result * (_full_2d_mod(phi2, q0) - _full_2d_mod(phi1, q0)) else: - # Both line endpoints are on the same side of r0, with d1 having a larger magnitude. + # Both line endpoints are on the same side of r0, + # with d1 having a larger magnitude. result = result * (_full_2d_mod(phi1, q0) - _full_2d_mod(phi2, q0)) return result @@ -59,9 +62,11 @@ def _full_2d_mod(phi, q0): Parameters ---------- phi: float - Angle between `q0` and the endpoint of the line, relative to the contributing particle. + Angle between `q0` and the endpoint of the line, relative to the + contributing particle. q0: float - The distance between the contributing particle and the line, scaled by the smoothing length of the particle. + The distance between the contributing particle and the line, scaled by + the smoothing length of the particle. Returns ------- @@ -77,19 +82,25 @@ def _full_2d_mod(phi, q0): elif q <= 2.0: # The line lies partly in 0 < q <= 1 and partly in 1 < q <= 2. - # Angle between q0 and the line region endpoint within 0 < q <= 1, relative to the contributing particle. + # Angle between q0 and the line region endpoint within 0 < q <= 1, + # relative to the contributing particle. phi1 = math.acos(q0) return _f2_2d(phi, q0) - _f2_2d(phi1, q0) + _f1_2d(phi1, q0) else: - # The line spans all three possible regions, 0 < q <= 1, 1 < q <= 2, and q > 2. + # The line spans all three possible regions, 0 < q <= 1, + # 1 < q <= 2, and q > 2. - # Angle between q0 and the line region endpoint within 0 < q <= 1, relative to the contributing particle. + # Angle between q0 and the line region endpoint within 0 < q <= 1, + # relative to the contributing particle. phi1 = math.acos(q0) - # Angle between q0 and the line region endpoint within 1 < q <= 2, relative to the contributing particle. + # Angle between q0 and the line region endpoint within 1 < q <= 2, + # relative to the contributing particle. phi2 = math.acos(0.5 * q0) - return _f3_2d(phi) - _f3_2d(phi2) + _f2_2d(phi2, q0) - _f2_2d(phi1, q0) + _f1_2d(phi1, q0) + return _f3_2d(phi) - _f3_2d(phi2) + _f2_2d(phi2, q0) \ + - _f2_2d(phi1, q0) + _f1_2d(phi1, q0) elif q0 <= 2.0: - # No part of the line lies within 0 < q <= 1, but it does lie within 1 < q <= 2. + # No part of the line lies within 0 < q <= 1, but it does lie within + # 1 < q <= 2. q = q0 / math.cos(phi) if q <= 2.0: @@ -98,7 +109,8 @@ def _full_2d_mod(phi, q0): else: # The line lies partly in 1 < q <= 2 and q > 2. - # Angle between q0 and the line region endpoint within 1 < q <= 2, relative to the contributing particle. + # Angle between q0 and the line region endpoint within 1 < q <= 2, + # relative to the contributing particle. phi2 = math.acos(0.5 * q0) return _f3_2d(phi) - _f3_2d(phi2) + _f2_2d(phi2, q0) else: @@ -110,15 +122,18 @@ def _full_2d_mod(phi, q0): def _f1_2d(phi, q0): """ Calculate an exact 2D line integral over the cubic spline kernel. - Assumes that one endpoint of the line is at the end of `q0`. Only valid for 0 < q <= 1. + Assumes that one endpoint of the line is at the end of `q0`. Only valid + for 0 < q <= 1. Used in _full_2d_mod. Parameters ---------- phi: float - Angle between `q0` and the endpoint of the line segment, relative to the contributing particle. + Angle between `q0` and the endpoint of the line segment, relative to + the contributing particle. q0: float - The distance between the contributing particle and the line, scaled by the smoothing length of the particle. + The distance between the contributing particle and the line, scaled by + the smoothing length of the particle. Returns ------- @@ -129,24 +144,28 @@ def _f1_2d(phi, q0): i2 = math.tan(phi) i4 = 1. / 3. * math.tan(phi) * (2. + 1. / cphi2) - i5 = 1. / 16. * (0.5 * (11. * math.sin(phi) + 3. * math.sin(3. * phi)) / cphi2 / cphi2 + 6. * logs) + i5 = 1. / 16. * (0.5 * (11. * math.sin(phi) + 3. * math.sin(3. * phi)) + / cphi2 / cphi2 + 6. * logs) - return 5. / 7. * q0 ** 2 / math.pi * (i2 - 3. / 4. * q0 ** 2 * i4 + 0.3 * q0 ** 3 * i5) + return 5. / 7. * q0**2 / math.pi \ + * (i2 - 0.75 * q0**2 * i4 + 0.3 * q0**3 * i5) @njit def _f2_2d(phi, q0): """ Calculate an exact 2D line integral over the cubic spline kernel. - Assumes that one endpoint of the line is at the end of `q0`. Only valid for 1 < q <= 2. - Used in _full_2d_mod. + Assumes that one endpoint of the line is at the end of `q0`. Only valid + for 1 < q <= 2. Used in _full_2d_mod. Parameters ---------- phi: float - Angle between `q0` and the endpoint of the line segment, relative to the contributing particle. + Angle between `q0` and the endpoint of the line segment, relative to + the contributing particle. q0: float - The distance between the contributing particle and the line, scaled by the smoothing length of the particle. + The distance between the contributing particle and the line, scaled by + the smoothing length of the particle. Returns ------- @@ -163,23 +182,27 @@ def _f2_2d(phi, q0): i2 = math.tan(phi) i3 = 1. / 2. * (math.tan(phi) / math.cos(phi) + logs) i4 = 1. / 3. * math.tan(phi) * (2. + 1. / cphi2) - i5 = 1. / 16. * (0.5 * (11. * math.sin(phi) + 3. * math.sin(3. * phi)) / cphi2 / cphi2 + 6. * logs) + i5 = 1. / 16. * (0.5 * (11. * math.sin(phi) + + 3. * math.sin(3. * phi)) / cphi2 / cphi2 + + 6. * logs) return 5. / 7. * q02 / math.pi * ( - 2. * i2 - 2. * q0 * i3 + 3. / 4. * q02 * i4 - 1. / 10. * q03 * i5 - 1. / 10. / q02 * i0) + 2. * i2 - 2. * q0 * i3 + 3. / 4. * q02 * i4 - 1. / 10. * q03 * i5 + - 1. / 10. / q02 * i0) @njit def _f3_2d(phi): """ Calculate an exact 2D line integral over the cubic spline kernel. - Assumes that one endpoint of the line is at the end of `q0`. Only valid for q > 2. - Used in _full_2d_mod. + Assumes that one endpoint of the line is at the end of `q0`. Only valid + for q > 2. Used in _full_2d_mod. Parameters ---------- phi: float - Angle pointing towards the endpoint of the line segment, relative to the contributing particle. + Angle pointing towards the endpoint of the line segment, relative to + the contributing particle. Returns ------- @@ -192,7 +215,8 @@ def _f3_2d(phi): def surface_int(r0, x1, y1, x2, y2, wx, wy, h): """ Calculate an exact 3D surface integral over the cubic spline kernel. - Used to exactly calculating the contribution of a particle to a pixel's volume in 3D space. + Used to exactly calculating the contribution of a particle to a pixel's + volume in 3D space. Parameters ---------- @@ -213,7 +237,8 @@ def surface_int(r0, x1, y1, x2, y2, wx, wy, h): dx = x2 - x1 dy = y2 - y1 - # Calculate the exact value of this surface by summing the comprising line integrals. + # Calculate the exact value of this surface by summing the comprising line + # integrals. # Bottom boundary r1 = 0.5 * wy + dy @@ -278,7 +303,8 @@ def _line_int3d(r0, r1, d1, d2, h): result = -result ar1 = -r1 - # Split this line integral into two separate line integrals, where one end point is at the endpoint of r1, + # Split this line integral into two separate line integrals, + # where one end point is at the endpoint of r1, # and the other end point is d1 or d2 respectively. int1 = _full_integral_3d(d1, ar0, ar1, h) int2 = _full_integral_3d(d2, ar0, ar1, h) @@ -294,12 +320,14 @@ def _line_int3d(r0, r1, d1, d2, h): if int1 + int2 < 0: print('Error: int1 + int2 < 0') elif abs(d1) < abs(d2): - # Both line endpoints are on the same side of r1, with d2 having a larger magnitude. + # Both line endpoints are on the same side of r1, + # with d2 having a larger magnitude. result = result * (int2 - int1) if int2 - int1 < 0: print('Error: int2 - int1 < 0: ', int1, int2, '(', d1, d2, ')') else: - # Both line endpoints are on the same side of r1, with d1 having a larger magnitude. + # Both line endpoints are on the same side of r1, + # with d1 having a larger magnitude. result = result * (int1 - int2) if int1 - int2 < 0: print('Error: int1 - int2 < 0: ', int1, int2, '(', d1, d2, ')') @@ -311,7 +339,8 @@ def _line_int3d(r0, r1, d1, d2, h): def _full_integral_3d(d, r0, r1, h): """ Calculate an exact 3D line integral over the cubic spline kernel. - Assumes that one endpoint of the line is at the end of `r1`. Used in _pint3d. + Assumes that one endpoint of the line is at the end of `r1`. + Used in _pint3d. Parameters ---------- @@ -329,7 +358,8 @@ def _full_integral_3d(d, r0, r1, h): float: The exact value of this line integral. """ r0h = r0 / h - # Angle between the end of the line and the end of r1, relative to the start of r1. + # Angle between the end of the line and the end of r1, + # relative to the start of r1. phi = math.atan(abs(d) / r1) if abs(r0h) == 0 or abs(r1 / h) == 0 or abs(phi) == 0: @@ -349,12 +379,14 @@ def _full_integral_3d(d, r0, r1, h): b3 = 0.25 * h2 * h elif r0 > h: # A part of the surface lies in the region h < r <= 2h. - b3 = 0.25 * r03 * (-4. / 3. + r0h - 0.3 * r0h2 + 1. / 30. * r0h3 - 1. / 15. * r0h_3 + 8. / 5. * r0h_2) - b2 = 0.25 * r03 * (-4. / 3. + r0h - 0.3 * r0h2 + 1. / 30. * r0h3 - 1. / 15. * r0h_3) + b3 = 0.25 * r03 * (-4. / 3. + r0h - 0.3 * r0h2 + 1. / 30. * r0h3 + - 1. / 15. * r0h_3 + 8. / 5. * r0h_2) + b2 = 0.25 * r03 * (-4. / 3. + r0h - 0.3 * r0h2 + 1. / 30. * r0h3 + - 1. / 15. * r0h_3) else: # A part of the surface lies in the region 0 < r <= h. - b3 = 0.25 * r03 * (-2. / 3. + 0.3 * r0h2 - 0.1 * r0h3 + 7. / 5. * r0h_2) - b2 = 0.25 * r03 * (-2. / 3. + 0.3 * r0h2 - 0.1 * r0h3 - 1. / 5. * r0h_2) + b3 = 0.25 * r03 * (-2. / 3. + 0.3 * r0h2 - 0.1 * r0h3 + 1.4 * r0h_2) + b2 = 0.25 * r03 * (-2. / 3. + 0.3 * r0h2 - 0.1 * r0h3 - 0.2 * r0h_2) b1 = 0.25 * r03 * (-2. / 3. + 0.3 * r0h2 - 0.1 * r0h3) a = r1 / r0 @@ -364,7 +396,8 @@ def _full_integral_3d(d, r0, r1, h): linedist2 = r0 * r0 + r1 * r1 # Distance between the end of r1 and the end of the line. r_ = r1 / math.cos(phi) - # Squared distance between the contributing particle and the end of the line. + # Squared distance between the contributing particle and the end of the + # line. r2 = (r0 * r0 + r_ * r_) d2 = 0.0 @@ -374,26 +407,34 @@ def _full_integral_3d(d, r0, r1, h): # A portion of the line lies within 0 < r < h. i = get_I_terms(r1 / math.sqrt(h2 - r0 * r0), a2, a) - d2 = -1. / 6. * i[2] + 0.25 * r0h * i[3] - 0.15 * r0h2 * i[4] + 1. / 30. * r0h3 * i[5] - 1. / 60. * r0h_3\ - * i[1] + (b1 - b2) / r03 * i[0] + d2 = -1. / 6. * i[2] + 0.25 * r0h * i[3] - 0.15 * r0h2 * i[4] + d2 += 1. / 30. * r0h3 * i[5] - 1. / 60. * r0h_3 * i[1] + d2 += (b1 - b2) / r03 * i[0] + if linedist2 < 4. * h2: # A portion of the line lies within 0 < r < 2h. i = get_I_terms(r1 / math.sqrt(4.0 * h2 - r0 * r0), a2, a) - d3 = 1. / 3. * i[2] - 0.25 * r0h * i[3] + 3. / 40. * r0h2 * i[4] - 1. / 120. * r0h3 * i[5] + 4. / 15. * r0h_3\ - * i[1] + (b2 - b3) / r03 * i[0] + d2 + d3 = 1. / 3. * i[2] - 0.25 * r0h * i[3] + 3. / 40. * r0h2 * i[4] + d3 += -1. / 120. * r0h3 * i[5] + 4. / 15. * r0h_3 * i[1] + d3 += (b2 - b3) / r03 * i[0] + d2 i = get_I_terms(math.cos(phi), a2, a) if r2 <= h2: # The entire line lies within 0 < r <= h. - return r0h3 / math.pi * (1. / 6. * i[2] - 3. / 40. * r0h2 * i[4] + 1. / 40. * r0h3 * i[5] + b1 / r03 * i[0]) + return r0h3 / math.pi * (1. / 6. * i[2] - 3. / 40. * r0h2 * i[4] + + 1. / 40. * r0h3 * i[5] + b1 / r03 * i[0]) elif r2 <= 4. * h2: # The entire line lies within 0 < r <= 2h. - return r0h3 / math.pi * (0.25 * (4. / 3. * i[2] - (r0 / h) * i[3] + 0.3 * r0h2 * i[4] - 1. / 30. * r0h3 * i[5] + - 1. / 15. * r0h_3 * i[1]) + b2 / r03 * i[0] + d2) + return r0h3 / math.pi * (0.25 * (4. / 3. * i[2] - (r0 / h) * i[3] + + 0.3 * r0h2 * i[4] + - 1. / 30. * r0h3 * i[5] + + 1. / 15. * r0h_3 * i[1]) + + b2 / r03 * i[0] + d2) else: - # The line lies in all possible regions, 0 < r <= h, 0 < r <= 2h, and r > 2h. + # The line lies in all possible regions, 0 < r <= h, 0 < r <= 2h, + # and r > 2h. return r0h3 / math.pi * (-0.25 * r0h_3 * i[1] + b3 / r03 * i[0] + d3) @@ -417,6 +458,8 @@ def get_I_terms(cosp, a2, a): fac = 1. / (1. - u2) I_1 = 0.5 * a * logs + I1 I_3 = I_1 + a * 0.25 * (1. + a2) * (2. * u * fac + logs) - I_5 = I_3 + a * (1. + a2) * (1. + a2) / 16. * ((10. * u - 6. * u * u2) * fac * fac + 3. * logs) + + I_5 = I_3 + (a * (1. + a2)**2 / 16. * + ((10. * u - 6. * u * u2) * fac**2 + 3. * logs)) return I0, I1, I_2, I_3, I_4, I_5 diff --git a/sarracen/kernels/quartic_spline.py b/sarracen/kernels/quartic_spline.py index 1fd8252..2974682 100644 --- a/sarracen/kernels/quartic_spline.py +++ b/sarracen/kernels/quartic_spline.py @@ -14,10 +14,10 @@ def get_radius() -> float: @staticmethod @njit(fastmath=True) def w(q: float, ndim: int): - norm = 1 / 24 if (ndim == 1) else \ - 96 / (1199 * np.pi) if (ndim == 2) else \ - 1 / (20 * np.pi) + norm = 1 / 24 if (ndim == 1) \ + else 96 / (1199 * np.pi) if (ndim == 2) \ + else 1 / (20 * np.pi) - return norm * (((5 / 2) - q) ** 4 * (q < 2.5) - - 5 * ((3 / 2) - q) ** 4 * (q < 1.5) - + 10 * ((1 / 2) - q) ** 4 * ( q < 0.5)) * (0 <= q) + return norm * ((2.5 - q)**4 * (q < 2.5) + - 5 * (1.5 - q)**4 * (q < 1.5) + + 10 * (0.5 - q)**4 * (q < 0.5)) * (0 <= q) diff --git a/sarracen/kernels/quintic_spline.py b/sarracen/kernels/quintic_spline.py index 406447f..e37abd8 100644 --- a/sarracen/kernels/quintic_spline.py +++ b/sarracen/kernels/quintic_spline.py @@ -14,10 +14,10 @@ def get_radius() -> float: @staticmethod @njit(fastmath=True) def w(q: float, ndim: int): - norm = 1 / 120 if (ndim == 1) else \ - 7 / (478 * np.pi) if (ndim == 2) else \ - 1 / (120 * np.pi) + norm = 1 / 120 if (ndim == 1) \ + else 7 / (478 * np.pi) if (ndim == 2) \ + else 1 / (120 * np.pi) - return norm * ((3 - q) ** 5 * (q < 3) - - 6 * (2 - q) ** 5 * (q < 2) - + 15 * (1 - q) ** 5 * (q < 1)) * (0 <= q) + return norm * ((3 - q)**5 * (q < 3) + - 6 * (2 - q)**5 * (q < 2) + + 15 * (1 - q)**5 * (q < 1)) * (0 <= q) From 06627402b0a7bbdb48c39755ee7c1040cc09e60d Mon Sep 17 00:00:00 2001 From: Terrence Tricco Date: Thu, 18 Jul 2024 14:21:20 -0230 Subject: [PATCH 02/12] lint readers --- sarracen/readers/__init__.py | 1 - sarracen/readers/read_csv.py | 6 +- sarracen/readers/read_gasoline.py | 252 +++++++------ sarracen/readers/read_gradsph.py | 2 +- sarracen/readers/read_marisa.py | 601 +++++++++++++++--------------- sarracen/readers/read_phantom.py | 138 ++++--- sarracen/readers/read_shamrock.py | 15 +- 7 files changed, 526 insertions(+), 489 deletions(-) diff --git a/sarracen/readers/__init__.py b/sarracen/readers/__init__.py index 8b13789..e69de29 100644 --- a/sarracen/readers/__init__.py +++ b/sarracen/readers/__init__.py @@ -1 +0,0 @@ - diff --git a/sarracen/readers/read_csv.py b/sarracen/readers/read_csv.py index b96d1a3..2982286 100644 --- a/sarracen/readers/read_csv.py +++ b/sarracen/readers/read_csv.py @@ -23,11 +23,9 @@ def read_csv(*args, **kwargs) -> SarracenDataFrame: return df - - def _get_units(columns: pd.Series) -> pd.Series: - return columns.str.extract(r'((?<=\[).+(?=\]))')[0] + return columns.str.extract(r'((?<=\[).+(?=\]))')[0] def _get_labels(columns: pd.Series) -> pd.Series: - return columns.str.extract(r'(^[^\[]*[^\s\[])')[0] + return columns.str.extract(r'(^[^\[]*[^\s\[])')[0] diff --git a/sarracen/readers/read_gasoline.py b/sarracen/readers/read_gasoline.py index 1567fee..0536ad9 100644 --- a/sarracen/readers/read_gasoline.py +++ b/sarracen/readers/read_gasoline.py @@ -5,14 +5,6 @@ from ..sarracen_dataframe import SarracenDataFrame -#This is a formal notification that Nicholas Owens BSci of McMaster University is the greatest -'''Read Data from Gasoline style tipsy file - Note: This portion of the code is based on "PyTipsy" by Ben Keller. - Copyright as folllows: - GNU General Public License, Version 3, 29 June 2007 - Appropriate attribution should be given to Ben Keller based on: - https://github.com/bwkeller/pytipsy -''' def read_gasoline(filename: str, outtype: str = "sarracen"): @@ -54,12 +46,12 @@ def read_gasoline(filename: str, >>> header, catg, catd, cats = sarracen.read_gasoline('dumpfile', outtype='dict') """ - if outtype.lower() in ["dic","dict","dictionary"]: + if outtype.lower() in ["dic", "dict", "dictionary"]: dictcheck = 1 else: dictcheck = 0 - #----------Get all relevant files------------------------- + # Get all relevant files filin = filename.split("/")[-1] dirin = filename[0:len(filename)-len(filin)] if len(dirin) > 0: @@ -79,82 +71,89 @@ def read_gasoline(filename: str, if var != filin: varlist[ii] = var[len(filin)+1:] ii += 1 - #---------------------From PyTipsy---------------------- - try: - fp = open("{}/{}".format(dirin,filin),'rb') - except: - print("Tipsy ERROR: File won't open") - return 1 + + # From PyTipsy + fp = open("{}/{}".format(dirin, filin), 'rb') fs = len(fp.read()) fp.seek(0) - #Take in the Header - t, n, ndim, ng, nd, ns = struct.unpack(" 3): endianswap = True fp.seek(0) - t,n,ndim,ng,nd,ns = struct.unpack(">diiiii",fp.read(28)) - #Catch for 4 byte padding + t, n, ndim, ng, nd, ns = struct.unpack(">diiiii", fp.read(28)) + + # Catch for 4 byte padding if (fs == 32+48*ng+36*nd+44*ns): fp.read(4) - #File is borked if this is true + # File is borked if this is true elif (fs != 28+48*ng+36*nd+44*ns): print("Tipsy ERROR: Header and file size inconsistend") print("Estimates: Header bytes: 28 or 32 (either is OK)") - print(" ngas: ",ng," bytes:",48*ng) - print(" ndark: ",nd," bytes:",38*nd) - print(" nstar: ",ns," bytes:",44*ns) - print("Actual File bytes:",fs," does not work") + print(" ngas: ", ng, " bytes:", 48*ng) + print(" ndark: ", nd, " bytes:", 38*nd) + print(" nstar: ", ns, " bytes:", 44*ns) + print("Actual File bytes:", fs, " does not work") fp.close() return 1 - #--------------Make dicitonaries for data----------------- - catg = {'mass':np.zeros(ng), 'pos':np.zeros((ng,3)), 'vel':np.zeros((ng,3)), 'rho':np.zeros(ng), - 'tempg':np.zeros(ng), 'h_gas':np.zeros(ng), 'zmetal':np.zeros(ng), 'phi':np.zeros(ng)} - catd = {'mass':np.zeros(nd), 'pos':np.zeros((nd,3)), 'vel':np.zeros((nd,3)), - 'eps':np.zeros(nd), 'phi':np.zeros(nd)} - cats = {'mass':np.zeros(ns), 'pos':np.zeros((ns,3)), 'vel':np.zeros((ns,3)), - 'metals':np.zeros(ns), 'tform':np.zeros(ns), 'eps':np.zeros(ns), 'phi':np.zeros(ns)} - for cat in ['g','d','s']: + + # Make dicitonaries for data + catg = {'mass': np.zeros(ng), 'pos': np.zeros((ng, 3)), + 'vel': np.zeros((ng, 3)), 'rho': np.zeros(ng), + 'tempg': np.zeros(ng), 'h_gas': np.zeros(ng), + 'zmetal': np.zeros(ng), 'phi': np.zeros(ng)} + catd = {'mass': np.zeros(nd), 'pos': np.zeros((nd, 3)), + 'vel': np.zeros((nd, 3)), 'eps': np.zeros(nd), + 'phi': np.zeros(nd)} + cats = {'mass': np.zeros(ns), 'pos': np.zeros((ns, 3)), + 'vel': np.zeros((ns, 3)), 'metals': np.zeros(ns), + 'tform': np.zeros(ns), 'eps': np.zeros(ns), + 'phi': np.zeros(ns)} + for cat in ['g', 'd', 's']: j = 0 - for qty in ['x','y','z']: - locals()['cat'+cat][qty] = locals()['cat'+cat]['pos'][:,j] - locals()['cat'+cat]['v'+qty] = locals()['cat'+cat]['vel'][:,j] + for qty in ['x', 'y', 'z']: + locals()['cat'+cat][qty] = locals()['cat'+cat]['pos'][:, j] + locals()['cat'+cat]['v'+qty] = locals()['cat'+cat]['vel'][:, j] j += 1 - #---------Read in additional variables------------------- + + # Read in additional variables for var in varlist: - fvar = open("{}.{}".format(filename,var),"rb") + fvar = open("{}.{}".format(filename, var), "rb") nfvar = len(fvar.read()) fvar.seek(0) fvar.read(4) - if (int((nfvar-4)/4)==(ng+ns+nd)): + if int((nfvar-4)/4) == (ng+ns+nd): catg[var] = np.zeros(ng) catd[var] = np.zeros(nd) cats[var] = np.zeros(ns) - if (ng >0): + if ng > 0: for i in range(ng): if endianswap: - catg[var][i], = struct.unpack(">f",fvar.read(4)) + catg[var][i], = struct.unpack(">f", fvar.read(4)) else: - catg[var][i], = struct.unpack("0): + catg[var][i], = struct.unpack(" 0: for i in range(ns): if endianswap: - cats[var][i], = struct.unpack(">f",fvar.read(4)) + cats[var][i], = struct.unpack(">f", fvar.read(4)) else: - cats[var][i], = struct.unpack("0): + cats[var][i], = struct.unpack(" 0: for i in range(nd): if endianswap: - catd[var][i], = struct.unpack(">f",fvar.read(4)) + catd[var][i], = struct.unpack(">f", fvar.read(4)) else: - catd[var][i], = struct.unpack("0): + if (ng > 0): for i in range(ng): if endianswap: - catg[varx][i] = struct.unpack(">f",fvar.read(4)) - catg[vary][i] = struct.unpack(">f",fvar.read(4)) - catg[varz][i] = struct.unpack(">f",fvar.read(4)) + catg[varx][i] = struct.unpack(">f", fvar.read(4)) + catg[vary][i] = struct.unpack(">f", fvar.read(4)) + catg[varz][i] = struct.unpack(">f", fvar.read(4)) else: - catg[varx][i] = struct.unpack("0): + catg[varx][i] = struct.unpack(" 0): for i in range(ns): if endianswap: - cats[varx][i] = struct.unpack(">f",fvar.read(4)) - cats[vary][i] = struct.unpack(">f",fvar.read(4)) - cats[varz][i] = struct.unpack(">f",fvar.read(4)) + cats[varx][i] = struct.unpack(">f", fvar.read(4)) + cats[vary][i] = struct.unpack(">f", fvar.read(4)) + cats[varz][i] = struct.unpack(">f", fvar.read(4)) else: - cats[varx][i] = struct.unpack("0): + cats[varx][i] = struct.unpack(" 0): for i in range(nd): if endianswap: - catd[varx][i] = struct.unpack(">f",fvar.read(4)) - catd[vary][i] = struct.unpack(">f",fvar.read(4)) - catd[varz][i] = struct.unpack(">f",fvar.read(4)) + catd[varx][i] = struct.unpack(">f", fvar.read(4)) + catd[vary][i] = struct.unpack(">f", fvar.read(4)) + catd[varz][i] = struct.unpack(">f", fvar.read(4)) else: - catd[varx][i] = struct.unpack("0): + if (ng > 0): for i in range(ng): if endianswap: - catg[varx][i] = struct.unpack(">f",fvar.read(4)) - catg[vary][i] = struct.unpack(">f",fvar.read(4)) - catg[varz][i] = struct.unpack(">f",fvar.read(4)) + catg[varx][i] = struct.unpack(">f", fvar.read(4)) + catg[vary][i] = struct.unpack(">f", fvar.read(4)) + catg[varz][i] = struct.unpack(">f", fvar.read(4)) else: - catg[varx][i] = struct.unpack("0): + catg[varx][i] = struct.unpack(" 0): for i in range(ns): if endianswap: - cats[varx][i] = struct.unpack(">f",fvar.read(4)) - cats[vary][i] = struct.unpack(">f",fvar.read(4)) - cats[varz][i] = struct.unpack(">f",fvar.read(4)) + cats[varx][i] = struct.unpack(">f", fvar.read(4)) + cats[vary][i] = struct.unpack(">f", fvar.read(4)) + cats[varz][i] = struct.unpack(">f", fvar.read(4)) else: - cats[varx][i] = struct.unpack("0): + cats[varx][i] = struct.unpack(" 0): for i in range(nd): if endianswap: - catd[varx][i] = struct.unpack(">f",fvar.read(4)) - catd[vary][i] = struct.unpack(">f",fvar.read(4)) - catd[varz][i] = struct.unpack(">f",fvar.read(4)) + catd[varx][i] = struct.unpack(">f", fvar.read(4)) + catd[vary][i] = struct.unpack(">f", fvar.read(4)) + catd[varz][i] = struct.unpack(">f", fvar.read(4)) else: - catd[varx][i] = struct.unpack(" 0): for i in range(ng): if endianswap: - mass, x, y, z, vx, vy, vz, dens, tempg, h, zmetal, phi = struct.unpack(">ffffffffffff", fp.read(48)) + data = struct.unpack(">ffffffffffff", fp.read(48)) else: - mass, x, y, z, vx, vy, vz, dens, tempg, h, zmetal, phi = struct.unpack(" 0): for i in range(nd): if endianswap: - mass, x, y, z, vx, vy, vz, eps, phi = struct.unpack(">fffffffff", fp.read(36)) + data = struct.unpack(">fffffffff", fp.read(36)) else: - mass, x, y, z, vx, vy, vz, eps, phi = struct.unpack(" 0): for i in range(ns): if endianswap: - mass, x, y, z, vx, vy, vz, metals, tform, eps, phi = struct.unpack(">fffffffffff", fp.read(44)) + data = struct.unpack(">fffffffffff", fp.read(44)) else: - mass, x, y, z, vx, vy, vz, metals, tform, eps, phi = struct.unpack(" SarracenDataFrame: - """ Read data from a Marisa dump file. - - Parameters - ---------- - filename : str - Name of the file to be loaded. - slicenumber : int, default=0 - The time slice to read from the data file. - - Returns - ------- - SarracenDataFrame - """ - fp = open(filename, "rb") - ntags = 0 - tags = 0 - sizes = 0 - offsets = 0 - tags, offsets = _marisa_parse_tags(fp) - Ns = _marisa_count_slices(fp, tags) - - if slicenumber < 0: - slicenumber = Ns + slicenumber - - if (slicenumber < 0 or slicenumber >= Ns): - raise ValueError("Invalid slice number") - - verbose = False - - if verbose: - print("seeking slice number: " + str(slicenumber)) - - # find slice - - slicecounter = -1 - slicetagID = -1 - startheaderread = False - endheaderread = False - - for i in range(len(tags)): - - if tags[i] == MARISAIO_TAGS.startslice: - - slicecounter = slicecounter + 1 - - if (slicecounter == slicenumber): - slicetagID = i - - if verbose: - print("found slice: ") - print(" slicecounter: " + str(slicecounter)) - print(" slicetagID: " + str(slicetagID)) - - # all the header bit we will ignore - - if (slicetagID == -1 or slicecounter < slicenumber): - raise ValueError("Slice number not found") - - columns = [] - - done = False - endsliceread = False - i = slicetagID - n = 0 - df = pd.DataFrame() - params = dict() - while (not done): - fp.seek(offsets[i], 0) - - tag = tags[i] - - if (tag == MARISAIO_TAGS.endslice): - endsliceread = True - done = True - i = len(tags) + 1 - elif (tag == MARISAIO_TAGS.n): - params['n'] = np.frombuffer(_marisa_read_data(fp), dtype=np.int32)[0] - elif (tag == MARISAIO_TAGS.t): - params['t'] = np.frombuffer(_marisa_read_data(fp), dtype=np.float64)[0] - elif (tag == MARISAIO_TAGS.totalge): - params['totalge'] = np.frombuffer(_marisa_read_data(fp), dtype=np.float64)[0] - elif (tag == MARISAIO_TAGS.totalke): - params['totalke'] = np.frombuffer(_marisa_read_data(fp), dtype=np.float64)[0] - elif (tag == MARISAIO_TAGS.totalue): - params['totalue'] = np.frombuffer(_marisa_read_data(fp), dtype=np.float64)[0] - elif (tag == MARISAIO_TAGS.totalbe): - params['totalbe'] = np.frombuffer(_marisa_read_data(fp), dtype=np.float64)[0] - elif (tag == MARISAIO_TAGS.totalpsie): - params['totalpsie'] = np.frombuffer(_marisa_read_data(fp), dtype=np.float64)[0] - elif (tag == MARISAIO_TAGS.totalmomentum): - params['totalmomentum'] = np.frombuffer(_marisa_read_data(fp), dtype=np.float64)[0] - - if (tag == MARISAIO_TAGS.rx): - df[MARISAIO_TAGS.rx.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.ry): - df[MARISAIO_TAGS.ry.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.rz): - df[MARISAIO_TAGS.rz.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.vx): - df[MARISAIO_TAGS.vx.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.vy): - df[MARISAIO_TAGS.vy.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.vz): - df[MARISAIO_TAGS.vz.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - - if (tag == MARISAIO_TAGS.bx): - df[MARISAIO_TAGS.bx.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.by): - df[MARISAIO_TAGS.by.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.bz): - df[MARISAIO_TAGS.bz.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.psi): - df[MARISAIO_TAGS.psi.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - - if (tag == MARISAIO_TAGS.euleralpha): - df[MARISAIO_TAGS.rx.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.ax): - df[MARISAIO_TAGS.ax.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.ay): - df[MARISAIO_TAGS.ay.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.az): - df[MARISAIO_TAGS.az.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - - - if (tag == MARISAIO_TAGS.m): - df[MARISAIO_TAGS.m.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.h): - df[MARISAIO_TAGS.h.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.rho): - df[MARISAIO_TAGS.rho.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.P): - df[MARISAIO_TAGS.P.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.ue): - df[MARISAIO_TAGS.ue.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.ke): - df[MARISAIO_TAGS.ke.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.s): - df[MARISAIO_TAGS.s.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - - if (tag == MARISAIO_TAGS.alpha): - df[MARISAIO_TAGS.alpha.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.alphamag): - df[MARISAIO_TAGS.alphamag.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.alphau): - df[MARISAIO_TAGS.alphau.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - - if (tag == MARISAIO_TAGS.divv): - df[MARISAIO_TAGS.divv.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.divb): - df[MARISAIO_TAGS.divb.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.divbsymm): - df[MARISAIO_TAGS.divbsymm.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.curlb): - df[MARISAIO_TAGS.curlb.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.dustfrac): - df[MARISAIO_TAGS.dustfrac.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - if (tag == MARISAIO_TAGS.colour): - df[MARISAIO_TAGS.colour.name] = np.frombuffer(_marisa_read_data(fp), dtype=np.double) - - i = i + 1 - - if (not endsliceread): - raise AssertionError("Did not find end of slice tag") - - return SarracenDataFrame(df, params=params) + """ Read data from a Marisa dump file. + + Parameters + ---------- + filename : str + Name of the file to be loaded. + slicenumber : int, default=0 + The time slice to read from the data file. + + Returns + ------- + SarracenDataFrame + """ + fp = open(filename, "rb") + tags, offsets = _marisa_parse_tags(fp) + Ns = _marisa_count_slices(fp, tags) + + if slicenumber < 0: + slicenumber = Ns + slicenumber + + if (slicenumber < 0 or slicenumber >= Ns): + raise ValueError("Invalid slice number") + + verbose = False + + if verbose: + print("seeking slice number: " + str(slicenumber)) + + # find slice + + slicecounter = -1 + slicetagID = -1 + + for i in range(len(tags)): + + if tags[i] == MARISAIO_TAGS.startslice: + + slicecounter = slicecounter + 1 + + if (slicecounter == slicenumber): + slicetagID = i + + if verbose: + print("found slice: ") + print(" slicecounter: " + str(slicecounter)) + print(" slicetagID: " + str(slicetagID)) + + # all the header bit we will ignore + + if (slicetagID == -1 or slicecounter < slicenumber): + raise ValueError("Slice number not found") + + done = False + endsliceread = False + i = slicetagID + df = pd.DataFrame() + params = dict() + while not done: + fp.seek(offsets[i], 0) + + tag = tags[i] + + if (tag == MARISAIO_TAGS.endslice): + endsliceread = True + done = True + i = len(tags) + 1 + elif (tag == MARISAIO_TAGS.n): + params['n'] = _marisa_read_data(fp, np.int32)[0] + elif (tag == MARISAIO_TAGS.t): + params['t'] = _marisa_read_data(fp, np.float64)[0] + elif (tag == MARISAIO_TAGS.totalge): + params['totalge'] = _marisa_read_data(fp, np.float64)[0] + elif (tag == MARISAIO_TAGS.totalke): + params['totalke'] = _marisa_read_data(fp, np.float64)[0] + elif (tag == MARISAIO_TAGS.totalue): + params['totalue'] = _marisa_read_data(fp, np.float64)[0] + elif (tag == MARISAIO_TAGS.totalbe): + params['totalbe'] = _marisa_read_data(fp, np.float64)[0] + elif (tag == MARISAIO_TAGS.totalpsie): + params['totalpsie'] = _marisa_read_data(fp, np.float64)[0] + elif (tag == MARISAIO_TAGS.totalmomentum): + params['totalmomentum'] = _marisa_read_data(fp, np.float64)[0] + + if (tag == MARISAIO_TAGS.rx): + df[MARISAIO_TAGS.rx.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.ry): + df[MARISAIO_TAGS.ry.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.rz): + df[MARISAIO_TAGS.rz.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.vx): + df[MARISAIO_TAGS.vx.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.vy): + df[MARISAIO_TAGS.vy.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.vz): + df[MARISAIO_TAGS.vz.name] = _marisa_read_data(fp, np.double) + + if (tag == MARISAIO_TAGS.bx): + df[MARISAIO_TAGS.bx.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.by): + df[MARISAIO_TAGS.by.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.bz): + df[MARISAIO_TAGS.bz.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.psi): + df[MARISAIO_TAGS.psi.name] = _marisa_read_data(fp, np.double) + + if (tag == MARISAIO_TAGS.euleralpha): + df[MARISAIO_TAGS.rx.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.ax): + df[MARISAIO_TAGS.ax.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.ay): + df[MARISAIO_TAGS.ay.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.az): + df[MARISAIO_TAGS.az.name] = _marisa_read_data(fp, np.double) + + if (tag == MARISAIO_TAGS.m): + df[MARISAIO_TAGS.m.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.h): + df[MARISAIO_TAGS.h.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.rho): + df[MARISAIO_TAGS.rho.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.P): + df[MARISAIO_TAGS.P.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.ue): + df[MARISAIO_TAGS.ue.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.ke): + df[MARISAIO_TAGS.ke.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.s): + df[MARISAIO_TAGS.s.name] = _marisa_read_data(fp, np.double) + + if (tag == MARISAIO_TAGS.alpha): + df[MARISAIO_TAGS.alpha.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.alphamag): + df[MARISAIO_TAGS.alphamag.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.alphau): + df[MARISAIO_TAGS.alphau.name] = _marisa_read_data(fp, np.double) + + if (tag == MARISAIO_TAGS.divv): + df[MARISAIO_TAGS.divv.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.divb): + df[MARISAIO_TAGS.divb.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.divbsymm): + df[MARISAIO_TAGS.divbsymm.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.curlb): + df[MARISAIO_TAGS.curlb.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.dustfrac): + df[MARISAIO_TAGS.dustfrac.name] = _marisa_read_data(fp, np.double) + if (tag == MARISAIO_TAGS.colour): + df[MARISAIO_TAGS.colour.name] = _marisa_read_data(fp, np.double) + + i = i + 1 + + if (not endsliceread): + raise AssertionError("Did not find end of slice tag") + + return SarracenDataFrame(df, params=params) diff --git a/sarracen/readers/read_phantom.py b/sarracen/readers/read_phantom.py index 974b5b5..69126a3 100644 --- a/sarracen/readers/read_phantom.py +++ b/sarracen/readers/read_phantom.py @@ -1,12 +1,17 @@ -from typing import Union - import numpy as np import pandas as pd from ..sarracen_dataframe import SarracenDataFrame + def _read_fortran_block(fp, bytesize): - """ Helper function to read Fortran data, which is also buffered before and after by 4 bytes.""" + """ Helper function to read Fortran-written data. + + Fortran will add a 4-byte tag before and after any data writes. The value + of this tag is equal to the number of bytes written. In our case, we do a + simple sanity check that the start and end tag are consistent, but not + validate the value of the tag with the size of the data read. + """ start_tag = fp.read(4) data = fp.read(bytesize) end_tag = fp.read(4) @@ -31,42 +36,57 @@ def _read_capture_pattern(fp): def_int_dtype, def_real_dtype = def_types[0] for def_int_dtype, def_real_dtype in def_types: - i1 = np.frombuffer(fp.read(def_int_dtype().itemsize), count=1, dtype=def_int_dtype)[0] - r1 = np.frombuffer(fp.read(def_real_dtype().itemsize), count=1, dtype=def_real_dtype)[0] - i2 = np.frombuffer(fp.read(def_int_dtype().itemsize), count=1, dtype=def_int_dtype)[0] + i1 = fp.read(def_int_dtype().itemsize) + r1 = fp.read(def_real_dtype().itemsize) + i2 = fp.read(def_int_dtype().itemsize) + + i1 = np.frombuffer(i1, count=1, dtype=def_int_dtype)[0] + r1 = np.frombuffer(r1, count=1, dtype=def_real_dtype)[0] + i2 = np.frombuffer(i2, count=1, dtype=def_int_dtype)[0] - if i1 == def_int_dtype(60769) and i2 == def_int_dtype(60878) and r1 == def_real_dtype(i2): + if (i1 == def_int_dtype(60769) + and i2 == def_int_dtype(60878) + and r1 == def_real_dtype(i2)): break else: # rewind and try again fp.seek(-def_int_dtype().itemsize, 1) fp.seek(-def_real_dtype().itemsize, 1) fp.seek(-def_int_dtype().itemsize, 1) - if i1 != def_int_dtype(60769) or i2 != def_int_dtype(60878) or r1 != def_real_dtype(i2): - raise AssertionError("Could not determine default int or float precision (i1, r1, i2 mismatch). Is this a Phantom data file?") + if (i1 != def_int_dtype(60769) + or i2 != def_int_dtype(60878) + or r1 != def_real_dtype(i2)): + raise AssertionError("Could not determine default int or float " + "precision (i1, r1, i2 mismatch). " + "Is this a Phantom data file?") # iversion -- we don't actually check this - iversion = np.frombuffer(fp.read(def_int_dtype().itemsize), count=1, dtype=def_int_dtype)[0] + iversion = fp.read(def_int_dtype().itemsize) + iversion = np.frombuffer(iversion, count=1, dtype=def_int_dtype)[0] # integer 3 == 690706 - i3 = np.frombuffer(fp.read(def_int_dtype().itemsize), count=1, dtype=def_int_dtype)[0] + i3 = fp.read(def_int_dtype().itemsize) + i3 = np.frombuffer(i3, count=1, dtype=def_int_dtype)[0] if i3 != def_int_dtype(690706): - raise AssertionError("Capture pattern error. i3 mismatch. Is this a Phantom data file?") + raise AssertionError("Capture pattern error. i3 mismatch. " + "Is this a Phantom data file?") end_tag = fp.read(4) # 4-byte Fortran tag # assert tags equal if (start_tag != end_tag): - raise AssertionError("Capture pattern error. Fortran tags mismatch. Is this a Phantom data file?") - - return def_int_dtype, def_real_dtype + raise AssertionError("Capture pattern error. Fortran tags mismatch. " + "Is this a Phantom data file?") + return def_int_dtype, def_real_dtype, iversion def _read_file_identifier(fp): - """ Read the 100 character file identifier. Contains code version and date information. """ - return _read_fortran_block(fp, 100).decode('ascii').strip() + """ Read the 100 character file identifier. + The file identifier contains code version and date information. + """ + return _read_fortran_block(fp, 100).decode('ascii').strip() def _rename_duplicates(keys): @@ -103,7 +123,7 @@ def _read_global_header(fp, def_int_dtype, def_real_dtype): """ Read global variables. """ dtypes = [def_int_dtype, np.int8, np.int16, np.int32, np.int64, - def_real_dtype, np.float32, np.float64] + def_real_dtype, np.float32, np.float64] keys = [] data = [] @@ -140,11 +160,13 @@ def _read_array_block(fp, df, n, nums, def_int_dtype, def_real_dtype): count += 1 tag = original_tag + f"_{count}" - data = np.frombuffer(_read_fortran_block(fp, dtype().itemsize * n), dtype=dtype) + data = _read_fortran_block(fp, dtype().itemsize * n) + data = np.frombuffer(data, dtype=dtype) df[tag] = data return df + def _read_array_blocks(fp, def_int_dtype, def_real_dtype): """ Read particle data. Block 2 is always for sink particles?""" nblocks = np.frombuffer(_read_fortran_block(fp, 4), dtype=np.int32)[0] @@ -165,12 +187,14 @@ def _read_array_blocks(fp, def_int_dtype, def_real_dtype): df_sinks = pd.DataFrame() for i in range(0, nblocks): # This assumes the second block is only for sink particles. - # I believe this is a valid assumption as I think this is what splash assumes. + # I believe this is a valid assumption as this is what splash assumes. # For now we will just append sinks to the end of the data frame. if i == 1: - df_sinks = _read_array_block(fp, df_sinks, n[i], nums[i], def_int_dtype, def_real_dtype) + df_sinks = _read_array_block(fp, df_sinks, n[i], nums[i], + def_int_dtype, def_real_dtype) else: - df = _read_array_block(fp, df, n[i], nums[i], def_int_dtype, def_real_dtype) + df = _read_array_block(fp, df, n[i], nums[i], def_int_dtype, + def_real_dtype) return df, df_sinks @@ -183,30 +207,37 @@ def _create_mass_column(df, header_vars): df['mass'] = header_vars['massoftype'] for itype in df['itype'].unique(): if itype > 1: - df.loc[df.itype == itype, 'mass'] = header_vars[f'massoftype_{itype}'] + mass = header_vars[f'massoftype_{itype}'] + df.loc[df.itype == itype, 'mass'] = mass return df -def read_phantom(filename: str, separate_types: str = 'sinks', ignore_inactive: bool = True): + +def read_phantom(filename: str, + separate_types: str = 'sinks', + ignore_inactive: bool = True): """ Read data from a Phantom dump file. - This reads the native binary format of Phantom dump files, which in turn were derived from the binary file format - used by sphNG. + This reads the native binary format of Phantom dump files, which in turn + were derived from the binary file format used by sphNG. - Global values stored in the dump file (time step, initial momentum, hfact, Courant factor, etc) are stored within the - data frame in the dictionary ``params``. + Global values stored in the dump file (time step, initial momentum, hfact, + Courant factor, etc) are stored within the data frame in the dictionary + ``params``. Parameters ---------- filename : str Name of the file to be loaded. separate_types : {None, 'sinks', 'all'}, default='sinks' - Whether to separate different particle types into several dataframes. ``None`` returns all particle types in one - data frame. '`sinks`' separates sink particles into a second dataframe, and '`all`' returns all particle types in - different dataframes. + Whether to separate different particle types into several dataframes. + ``None`` returns all particle types in one data frame. '`sinks`' + separates sink particles into a second dataframe, and '`all`' returns + all particle types in different dataframes. ignore_inactive : {True, False}, default=True - If True, particles with negative smoothing length will not be read on import. These are - typically particles that have been accreted onto a sink particle or are otherwise inactive. + If True, particles with negative smoothing length will not be read on + import. These are typically particles that have been accreted onto a + sink particle or are otherwise inactive. Returns ------- @@ -214,26 +245,30 @@ def read_phantom(filename: str, separate_types: str = 'sinks', ignore_inactive: Notes ----- - See the `Phantom documentation `_ for a full description - of the Phantom binary file format. + See the `Phantom documentation + `_ for a full + description of the Phantom binary file format. Examples -------- - By default, SPH particles are grouped into one data frame and sink particles into a second data frame. + By default, SPH particles are grouped into one data frame and sink + particles into a second data frame. >>> sdf, sdf_sinks = sarracen.read_phantom('dumpfile_00000') - A dump file containing multiple particle types, say gas + dust + sinks, can separated into their own data frames - by specifying ``separate_types='all'``. + A dump file containing multiple particle types, say gas + dust + sinks, + can be separated into their own data frames by specifying + ``separate_types='all'``. - >>> sdf_gas, sdf_dust, sdf_sinks = sarracen.read_phantom('multiple_types_00000', separate_types='all') + >>> sdf_gas, sdf_dust, sdf_sinks = sarracen.read_phantom('dumpfile_00000', separate_types='all') """ with open(filename, 'rb') as fp: - def_int_dtype, def_real_dtype = _read_capture_pattern(fp) + def_int_dtype, def_real_dtype, iversion = _read_capture_pattern(fp) file_identifier = _read_file_identifier(fp) header_vars = _read_global_header(fp, def_int_dtype, def_real_dtype) header_vars['file_identifier'] = file_identifier + header_vars['iversion'] = iversion df, df_sinks = _read_array_blocks(fp, def_int_dtype, def_real_dtype) @@ -241,7 +276,9 @@ def read_phantom(filename: str, separate_types: str = 'sinks', ignore_inactive: df = df[df['h'] > 0] # create mass column if multiple species in single dataframe - if separate_types != 'all' and 'itype' in df and df['itype'].nunique() > 1: + if (separate_types != 'all' + and 'itype' in df + and df['itype'].nunique() > 1): df = _create_mass_column(df, header_vars) else: # create global mass parameter header_vars['mass'] = header_vars['massoftype'] @@ -251,24 +288,31 @@ def read_phantom(filename: str, separate_types: str = 'sinks', ignore_inactive: if 'itype' in df and df['itype'].nunique() > 1: for _, group in df.groupby('itype'): itype = int(group["itype"].iloc[0]) - mass_key = 'massoftype' if itype == 1 else f'massoftype_{itype}' + mass_key = 'massoftype' if itype == 1 \ + else f'massoftype_{itype}' + params = {**header_vars, **{"mass": header_vars[mass_key]}} df_list.append(SarracenDataFrame(group.dropna(axis=1), - params={**header_vars, **{"mass": header_vars[mass_key]}})) + params=params)) else: df_list = [SarracenDataFrame(df, params=header_vars)] if not df_sinks.empty: + params = {key: value for key, value in header_vars.items() + if key != 'mass'} df_list.append(SarracenDataFrame(df_sinks, - params={key: value for key, value in header_vars.items() if key != 'mass'})) + params=params)) elif separate_types == 'sinks': df_list = [SarracenDataFrame(df, params=header_vars)] if not df_sinks.empty: + params = {key: value for key, value in header_vars.items() + if key != 'mass'} df_list.append(SarracenDataFrame(df_sinks, - params={key: value for key, value in header_vars.items() if key != 'mass'})) + params=params)) else: - df_list = [SarracenDataFrame(pd.concat([df, df_sinks], ignore_index=True), - params=header_vars)] + df_list = [SarracenDataFrame(pd.concat([df, df_sinks], + ignore_index=True), + params=header_vars)] df_list = df_list[0] if len(df_list) == 1 else df_list diff --git a/sarracen/readers/read_shamrock.py b/sarracen/readers/read_shamrock.py index f7931c2..cea7211 100644 --- a/sarracen/readers/read_shamrock.py +++ b/sarracen/readers/read_shamrock.py @@ -1,5 +1,3 @@ -from typing import Union - import numpy as np import pandas as pd @@ -8,6 +6,7 @@ from ..sarracen_dataframe import SarracenDataFrame + def read_shamrock(filename, pmass): """ Read data from a SHAMROCK vtk file ('big' simulation current format). @@ -17,8 +16,8 @@ def read_shamrock(filename, pmass): filename : str Name of the file to be loaded. pmass : float - Mass of particles in the simulation (for now, it is assumed all particles) - have the same mass). + Mass of particles in the simulation (for now, it is assumed all + particles have the same mass). Returns ------- @@ -42,10 +41,8 @@ def read_shamrock(filename, pmass): numpy_array = vtk_to_numpy(vtk_array) ndim = numpy_array.ndim - if ndim==1: - print(array_name) + if ndim == 1: df[array_name] = numpy_array - else: df[array_name + 'x'] = numpy_array[:, 0] df[array_name + 'y'] = numpy_array[:, 1] @@ -58,7 +55,7 @@ def read_shamrock(filename, pmass): df['By'] = df['B/rhoy'] * df['rho'] if 'B/rhoz' in df.columns: df['Bz'] = df['B/rhoz'] * df['rho'] - + # now add position columns points = vtk_data.GetPoints() numpy_points = vtk_to_numpy(points.GetData()) @@ -69,4 +66,4 @@ def read_shamrock(filename, pmass): # finish by adding mass df['mass'] = pmass * np.ones_like(numpy_points[:, 0]) - return sarracen.SarracenDataFrame(df) + return SarracenDataFrame(df) From bee5e6321474e7b58294e47bd8a404ec0f4b05d2 Mon Sep 17 00:00:00 2001 From: Terrence Tricco Date: Fri, 19 Jul 2024 11:10:34 -0230 Subject: [PATCH 03/12] lint unit tests --- sarracen/tests/disc/test_angular_momentum.py | 22 +- sarracen/tests/disc/test_surface_density.py | 1 - .../tests/interpolate/test_interpolate.py | 1892 +++++++++++------ sarracen/tests/interpolate/test_rotation.py | 519 +++-- sarracen/tests/readers/test_read_csv.py | 26 +- sarracen/tests/readers/test_read_phantom.py | 412 ++-- sarracen/tests/test_kernels.py | 89 +- sarracen/tests/test_render.py | 151 +- sarracen/tests/test_sarracen_dataframe.py | 73 +- 9 files changed, 1971 insertions(+), 1214 deletions(-) diff --git a/sarracen/tests/disc/test_angular_momentum.py b/sarracen/tests/disc/test_angular_momentum.py index 6d3cd45..0ad5c0e 100644 --- a/sarracen/tests/disc/test_angular_momentum.py +++ b/sarracen/tests/disc/test_angular_momentum.py @@ -1,9 +1,7 @@ -import pandas as pd import numpy as np -from numpy.testing import assert_array_equal, assert_allclose +from numpy.testing import assert_array_equal from sarracen import SarracenDataFrame from sarracen.disc import angular_momentum -import pytest def test_mass_equivalency(): @@ -51,12 +49,12 @@ def test_parts_vs_whole(): 'vx': vx, 'vy': vy, 'vz': vz, 'mass': mass}) Lx_in, Ly_in, Lz_in = angular_momentum(sdf, r_in=0.0, r_out=0.5, bins=100) - Lx_out, Ly_out, Lz_out = angular_momentum(sdf, r_in=0.5, r_out=1.0, bins=100) - Lx_all, Ly_all, Lz_all = angular_momentum(sdf, r_in=0.0, r_out=1.0, bins=200) - - assert_array_equal(Lx_in, Lx_all[:100]) - assert_array_equal(Lx_out, Lx_all[100:]) - assert_array_equal(Ly_in, Ly_all[:100]) - assert_array_equal(Ly_out, Ly_all[100:]) - assert_array_equal(Lz_in, Lz_all[:100]) - assert_array_equal(Lz_out, Lz_all[100:]) \ No newline at end of file + Lx_ex, Ly_ex, Lz_ex = angular_momentum(sdf, r_in=0.5, r_out=1.0, bins=100) + Lx, Ly, Lz = angular_momentum(sdf, r_in=0.0, r_out=1.0, bins=200) + + assert_array_equal(Lx_in, Lx[:100]) + assert_array_equal(Ly_in, Ly[:100]) + assert_array_equal(Lz_in, Lz[:100]) + assert_array_equal(Lx_ex, Lx[100:]) + assert_array_equal(Ly_ex, Ly[100:]) + assert_array_equal(Lz_ex, Lz[100:]) diff --git a/sarracen/tests/disc/test_surface_density.py b/sarracen/tests/disc/test_surface_density.py index 2ff02a8..f77de94 100644 --- a/sarracen/tests/disc/test_surface_density.py +++ b/sarracen/tests/disc/test_surface_density.py @@ -1,4 +1,3 @@ -import pandas as pd import numpy as np from numpy.testing import assert_array_equal, assert_allclose from sarracen import SarracenDataFrame diff --git a/sarracen/tests/interpolate/test_interpolate.py b/sarracen/tests/interpolate/test_interpolate.py index bf086d5..d7aa316 100644 --- a/sarracen/tests/interpolate/test_interpolate.py +++ b/sarracen/tests/interpolate/test_interpolate.py @@ -8,10 +8,12 @@ from pytest import approx, raises, mark from sarracen import SarracenDataFrame -from sarracen.kernels import CubicSplineKernel, QuarticSplineKernel, QuinticSplineKernel -from sarracen.interpolate import interpolate_2d, interpolate_2d_line, interpolate_3d_cross, interpolate_3d_proj, \ - interpolate_2d_vec, interpolate_3d_vec, interpolate_3d_cross_vec, interpolate_3d_grid, interpolate_3d_line - +from sarracen.kernels import CubicSplineKernel, QuarticSplineKernel, \ + QuinticSplineKernel +from sarracen.interpolate import interpolate_2d, interpolate_2d_line, \ + interpolate_3d_cross, interpolate_3d_proj, interpolate_2d_vec, \ + interpolate_3d_vec, interpolate_3d_cross_vec, interpolate_3d_grid, \ + interpolate_3d_line backends = ['cpu'] if cuda.is_available(): @@ -21,39 +23,49 @@ @mark.parametrize("backend", backends) def test_single_particle(backend): """ - The result of interpolation over a single particle should be equal to scaled kernel - values at each point of the image. + The result of interpolation over a single particle should be equal to + scaled kernel values at each point of the image. """ - df = pd.DataFrame({'x': [0], 'y': [0], 'A': [4], 'B': [5], 'h': [0.9], 'rho': [0.4], 'm': [0.03]}) - sdf = SarracenDataFrame(df, params=dict()) + data = {'x': [0], 'y': [0], 'A': [4], 'B': [5], + 'h': [0.9], 'rho': [0.4], 'm': [0.03]} + sdf = SarracenDataFrame(data, params=dict()) kernel = CubicSplineKernel() + kernel_rad = kernel.get_radius() + bounds = (-kernel_rad, kernel_rad) + sdf.kernel = kernel sdf.backend = backend # Weight for 2D interpolation & 3D column interpolation. - w = sdf['m'] / (sdf['rho'] * sdf['h'] ** 2) + weight = sdf['m'] / (sdf['rho'] * sdf['h'] ** 2) # A mapping of pixel indices to x / y values in particle space. - real = -kernel.get_radius() + (np.arange(0, 25) + 0.5) * (2 * kernel.get_radius() / 25) - - image = interpolate_2d(sdf, 'A', x_pixels=25, y_pixels=25, xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) - image_vec = interpolate_2d_vec(sdf, 'A', 'B', x_pixels=25, y_pixels=25, - xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) + real = -kernel_rad + (np.arange(0, 25) + 0.5) * (2 * kernel_rad / 25) + + img = interpolate_2d(sdf, 'A', + x_pixels=25, y_pixels=25, + xlim=bounds, ylim=bounds, + normalize=False, hmin=False) + img_vec = interpolate_2d_vec(sdf, 'A', 'B', + x_pixels=25, y_pixels=25, + xlim=bounds, ylim=bounds, + normalize=False, hmin=False) for y in range(25): for x in range(25): - assert image[y][x] ==\ - approx(w[0] * sdf['A'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf['h'][0], 2)) - assert image_vec[0][y][x] ==\ - approx(w[0] * sdf['A'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf['h'][0], 2)) - assert image_vec[1][y][x] == \ - approx(w[0] * sdf['B'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf['h'][0], 2)) - - image = interpolate_2d_line(sdf, 'A', pixels=25, xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) + r = np.sqrt(real[x] ** 2 + real[y] ** 2) + w = kernel.w(r / sdf['h'][0], 2) + assert img[y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[0][y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[1][y][x] == approx(weight[0] * sdf['B'][0] * w) + + img = interpolate_2d_line(sdf, 'A', + pixels=25, + xlim=bounds, ylim=bounds, + normalize=False, hmin=False) for x in range(25): - assert image[x] == approx(w[0] * sdf['A'][0] * kernel.w(np.sqrt(2) * np.abs(real[x]) / sdf['h'][0], 2)) + r = np.sqrt(2) * np.abs(real[x]) + w = kernel.w(r / sdf['h'][0], 2) + assert img[x] == approx(weight[0] * sdf['A'][0] * w) # Convert the previous 2D dataframe to a 3D dataframe. sdf['z'] = -0.5 @@ -62,100 +74,121 @@ def test_single_particle(backend): column_func = kernel.get_column_kernel_func(1000) - image = interpolate_3d_proj(sdf, 'A', x_pixels=25, y_pixels=25, xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), dens_weight=False, normalize=False, hmin=False) - image_vec = interpolate_3d_vec(sdf, 'A', 'B', 'C', x_pixels=25, y_pixels=25, - xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), dens_weight=False, normalize=False, hmin=False) + img = interpolate_3d_proj(sdf, 'A', + x_pixels=25, y_pixels=25, + xlim=bounds, ylim=bounds, + dens_weight=False, + normalize=False, hmin=False) + img_vec = interpolate_3d_vec(sdf, 'A', 'B', 'C', + x_pixels=25, y_pixels=25, + xlim=bounds, ylim=bounds, + dens_weight=False, + normalize=False, hmin=False) for y in range(25): for x in range(25): - assert image[y][x] ==\ - approx(w[0] * sdf['A'][0] * column_func(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf['h'][0], 2)) - assert image_vec[0][y][x] == \ - approx(w[0] * sdf['A'][0] * column_func(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf['h'][0], 2)) - assert image_vec[1][y][x] == \ - approx(w[0] * sdf['B'][0] * column_func(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf['h'][0], 2)) + r = np.sqrt(real[x] ** 2 + real[y] ** 2) + w = column_func(r / sdf['h'][0], 2) + assert img[y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[0][y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[1][y][x] == approx(weight[0] * sdf['B'][0] * w) # Weight for 3D cross-sections. - w = sdf['m'] / (sdf['rho'] * sdf['h'] ** 3) - - image = interpolate_3d_cross(sdf, 'A', z_slice=0, x_pixels=25, y_pixels=25, - xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) - image_vec = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', 0, x_pixels=25, y_pixels=25, - xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) + weight = sdf['m'] / (sdf['rho'] * sdf['h'] ** 3) + + img = interpolate_3d_cross(sdf, 'A', + x_pixels=25, y_pixels=25, + xlim=bounds, ylim=bounds, + z_slice=0, + normalize=False, hmin=False) + img_vec = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', + x_pixels=25, y_pixels=25, + xlim=bounds, ylim=bounds, + z_slice=0, + normalize=False, hmin=False) for y in range(25): for x in range(25): - assert image[y][x] == approx(w[0] * sdf['A'][0] * - kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2 + 0.5 ** 2) / sdf['h'][0], 3)) - assert image_vec[0][y][x] == approx(w[0] * sdf['A'][0] * - kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2 + 0.5 ** 2) - / sdf['h'][0], 3)) - assert image_vec[1][y][x] == approx(w[0] * sdf['B'][0] * - kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2 + 0.5 ** 2) - / sdf['h'][0], 3)) - - bounds = (-kernel.get_radius(), kernel.get_radius()) - image = interpolate_3d_grid(sdf, 'A', x_pixels=25, y_pixels=25, z_pixels=25, xlim=bounds, ylim=bounds, - zlim=bounds, normalize=False, hmin=False) + r = np.sqrt(real[x] ** 2 + real[y] ** 2 + 0.5 ** 2) + w = kernel.w(r / sdf['h'][0], 3) + assert img[y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[0][y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[1][y][x] == approx(weight[0] * sdf['B'][0] * w) + + img = interpolate_3d_grid(sdf, 'A', + x_pixels=25, y_pixels=25, z_pixels=25, + xlim=bounds, ylim=bounds, zlim=bounds, + normalize=False, hmin=False) for z in range(25): for y in range(25): for x in range(25): - assert image[z][y][x] == approx(w[0] * sdf['A'][0] * - kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2 + (real[z] + 0.5) ** 2) - / sdf['h'][0], 3)) - - image = interpolate_3d_line(sdf, 'A', pixels=25, xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), - zlim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) + r = np.sqrt(real[x] ** 2 + real[y] ** 2 + (real[z] + 0.5) ** 2) + w = kernel.w(r / sdf['h'][0], 3) + assert img[z][y][x] == approx(weight[0] * sdf['A'][0] * w) + + img = interpolate_3d_line(sdf, 'A', + pixels=25, + xlim=bounds, ylim=bounds, zlim=bounds, + normalize=False, hmin=False) for x in range(25): - assert image[x] == approx(w[0] * sdf['A'][0] * - kernel.w(np.sqrt(2 * real[x] ** 2 + (real[x] + 0.5) ** 2) / sdf['h'][0], 3)) + r = np.sqrt(2 * real[x] ** 2 + (real[x] + 0.5) ** 2) + w = kernel.w(r / sdf['h'][0], 3) + assert img[x] == approx(weight[0] * sdf['A'][0] * w) @mark.parametrize("backend", backends) def test_single_repeated_particle(backend): """ - The result of interpolation over a single particle repeated several times should be equal to scaled kernel - values at each point of the image multiplied by the number of particles. + The result of interpolation over a single particle repeated several times + should be equal to scaled kernel values at each point of the image + multiplied by the number of particles. - If this test fails, it is likely that there is a race condition issue in the interpolation implementation. + If this test fails, it is likely that there is a race condition issue in + the interpolation implementation. """ repetitions = 10000 - df = pd.concat([pd.DataFrame({'x': [0], 'y': [0], 'A': [4], 'B': [5], 'h': [0.9], 'rho': [0.4], - 'm': [0.03]})] * repetitions, ignore_index=True) + df = pd.concat([pd.DataFrame({'x': [0], 'y': [0], 'A': [4], 'B': [5], + 'h': [0.9], 'rho': [0.4], + 'm': [0.03]})] * repetitions, + ignore_index=True) sdf = SarracenDataFrame(df, params=dict()) kernel = CubicSplineKernel() + kernel_rad = kernel.get_radius() + bounds = (-kernel_rad, kernel_rad) + sdf.kernel = kernel sdf.backend = backend # Multiplying by repetitions here is done for ease of use. - w = repetitions * sdf['m'] / (sdf['rho'] * sdf['h'] ** 2) + weight = repetitions * sdf['m'] / (sdf['rho'] * sdf['h'] ** 2) # A mapping of pixel indices to x / y values in particle space. - real = -kernel.get_radius() + (np.arange(0, 25) + 0.5) * (2 * kernel.get_radius() / 25) - - image = interpolate_2d(sdf, 'A', x_pixels=25, y_pixels=25, xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) - image_vec = interpolate_2d_vec(sdf, 'A', 'B', x_pixels=25, y_pixels=25, - xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) + real = -kernel_rad + (np.arange(0, 25) + 0.5) * (2 * kernel_rad / 25) + + img = interpolate_2d(sdf, 'A', + x_pixels=25, y_pixels=25, + xlim=bounds, ylim=bounds, + normalize=False, hmin=False) + img_vec = interpolate_2d_vec(sdf, 'A', 'B', + x_pixels=25, y_pixels=25, + xlim=bounds, ylim=bounds, + normalize=False, hmin=False) for y in range(25): for x in range(25): - assert image[y][x] == \ - approx(w[0] * sdf['A'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf['h'][0], 2)) - assert image_vec[0][y][x] == \ - approx(w[0] * sdf['A'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf['h'][0], 2)) - assert image_vec[1][y][x] == \ - approx(w[0] * sdf['B'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf['h'][0], 2)) - - image = interpolate_2d_line(sdf, 'A', pixels=25, xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) + r = np.sqrt(real[x] ** 2 + real[y] ** 2) + w = kernel.w(r / sdf['h'][0], 2) + assert img[y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[0][y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[1][y][x] == approx(weight[0] * sdf['B'][0] * w) + + img = interpolate_2d_line(sdf, 'A', + pixels=25, + xlim=bounds, ylim=bounds, + normalize=False, hmin=False) for x in range(25): - assert image[x] == approx(w[0] * sdf['A'][0] * kernel.w(np.sqrt(2) * np.abs(real[x]) / sdf['h'][0], 2)) + r = np.sqrt(2) * np.abs(real[x]) + w = kernel.w(r / sdf['h'][0], 2) + assert img[x] == approx(weight[0] * sdf['A'][0] * w) # Convert the previous 2D dataframe to a 3D dataframe. sdf['z'] = -0.5 @@ -164,83 +197,99 @@ def test_single_repeated_particle(backend): column_func = kernel.get_column_kernel_func(1000) - image = interpolate_3d_proj(sdf, 'A', x_pixels=25, y_pixels=25, xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), dens_weight=False, normalize=False, hmin=False) - image_vec = interpolate_3d_vec(sdf, 'A', 'B', 'C', x_pixels=25, y_pixels=25, - xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), dens_weight=False, normalize=False, hmin=False) + img = interpolate_3d_proj(sdf, 'A', + x_pixels=25, y_pixels=25, + xlim=bounds, ylim=bounds, + dens_weight=False, + normalize=False, hmin=False) + img_vec = interpolate_3d_vec(sdf, 'A', 'B', 'C', + x_pixels=25, y_pixels=25, + xlim=bounds, ylim=bounds, + dens_weight=False, + normalize=False, hmin=False) for y in range(25): for x in range(25): - assert image[y][x] == \ - approx(w[0] * sdf['A'][0] * column_func(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf['h'][0], 2)) - assert image_vec[0][y][x] == \ - approx(w[0] * sdf['A'][0] * column_func(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf['h'][0], 2)) - assert image_vec[1][y][x] == \ - approx(w[0] * sdf['B'][0] * column_func(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf['h'][0], 2)) + r = np.sqrt(real[x] ** 2 + real[y] ** 2) + w = column_func(r / sdf['h'][0], 2) + assert img[y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[0][y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[1][y][x] == approx(weight[0] * sdf['B'][0] * w) # Weight for 3D cross-sections - w = repetitions * sdf['m'] / (sdf['rho'] * sdf['h'] ** 3) - - image = interpolate_3d_cross(sdf, 'A', z_slice=0, x_pixels=25, y_pixels=25, - xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) - image_vec = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', 0, x_pixels=25, y_pixels=25, - xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) + weight = repetitions * sdf['m'] / (sdf['rho'] * sdf['h'] ** 3) + + img = interpolate_3d_cross(sdf, 'A', + x_pixels=25, y_pixels=25, + xlim=bounds, ylim=bounds, + z_slice=0, + normalize=False, hmin=False) + img_vec = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', + x_pixels=25, y_pixels=25, + xlim=bounds, ylim=bounds, + z_slice=0, + normalize=False, hmin=False) for y in range(25): for x in range(25): - assert image[y][x] == approx(w[0] * sdf['A'][0] * - kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2 + 0.5 ** 2) / sdf['h'][0], 3)) - assert image_vec[0][y][x] == approx(w[0] * sdf['A'][0] * - kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2 + 0.5 ** 2) - / sdf['h'][0], 3)) - assert image_vec[1][y][x] == approx(w[0] * sdf['B'][0] * - kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2 + 0.5 ** 2) - / sdf['h'][0], 3)) - - bounds = (-kernel.get_radius(), kernel.get_radius()) - image = interpolate_3d_grid(sdf, 'A', x_pixels=25, y_pixels=25, z_pixels=25, xlim=bounds, ylim=bounds, zlim=bounds, - normalize=False, hmin=False) + r = np.sqrt(real[x] ** 2 + real[y] ** 2 + 0.5 ** 2) + w = kernel.w(r / sdf['h'][0], 3) + assert img[y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[0][y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[1][y][x] == approx(weight[0] * sdf['B'][0] * w) + + img = interpolate_3d_grid(sdf, 'A', + x_pixels=25, y_pixels=25, z_pixels=25, + xlim=bounds, ylim=bounds, zlim=bounds, + normalize=False, hmin=False) for z in range(25): for y in range(25): for x in range(25): - assert image[z][y][x] == approx(w[0] * sdf['A'][0] * - kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2 + (real[z] + 0.5) ** 2) - / sdf['h'][0], 3)) - - image = interpolate_3d_line(sdf, 'A', pixels=25, xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), - zlim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) + r = np.sqrt(real[x] ** 2 + real[y] ** 2 + (real[z] + 0.5) ** 2) + w = kernel.w(r / sdf['h'][0], 3) + assert img[z][y][x] == approx(weight[0] * sdf['A'][0] * w) + + img = interpolate_3d_line(sdf, 'A', + pixels=25, + xlim=bounds, ylim=bounds, zlim=bounds, + normalize=False, hmin=False) for x in range(25): - assert image[x] == approx(w[0] * sdf['A'][0] * - kernel.w(np.sqrt(2 * real[x] ** 2 + (real[x] + 0.5) ** 2) / sdf['h'][0], 3)) + r = np.sqrt(2 * real[x] ** 2 + (real[x] + 0.5) ** 2) + w = kernel.w(r / sdf['h'][0], 3) + assert img[x] == approx(weight[0] * sdf['A'][0] * w) @mark.parametrize("backend", backends) def test_dimension_check(backend): """ - Passing a dataframe with invalid dimensions should raise a TypeError for all interpolation functions. + Passing a dataframe with invalid dimensions should raise a TypeError for + all interpolation functions. """ # First, test a basic 2D dataframe passed to 3D interpolation functions. - df = pd.DataFrame({'x': [0, 1], 'y': [0, 1], 'P': [1, 1], 'Ax': [1, 1], 'Ay': [1, 1], 'h': [1, 1], - 'rho': [1, 1], 'm': [1, 1]}) - sdf = SarracenDataFrame(df, params=dict()) + data = {'x': [0, 1], 'y': [0, 1], 'P': [1, 1], + 'Ax': [1, 1], 'Ay': [1, 1], 'h': [1, 1], + 'rho': [1, 1], 'm': [1, 1]} + sdf = SarracenDataFrame(data, params=dict()) sdf.backend = backend - for func in [interpolate_3d_proj, interpolate_3d_cross]: + for func in [interpolate_3d_proj, + interpolate_3d_cross]: with raises(TypeError): func(sdf, 'P', normalize=False, hmin=False) - for func in [interpolate_3d_vec, interpolate_3d_cross_vec, interpolate_3d_grid]: + for func in [interpolate_3d_vec, + interpolate_3d_cross_vec, + interpolate_3d_grid]: with raises(TypeError): func(sdf, 'Ax', 'Ay', 'Az', normalize=False, hmin=False) # Next, test a basic 3D dataframe passed to 2D interpolation functions. - df = pd.DataFrame({'x': [0, 1], 'y': [0, 1], 'z': [0, 1], 'P': [1, 1], 'Ax': [1, 1], 'Ay': [1, 1], 'Az': [1, 1], - 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf = SarracenDataFrame(df, params=dict()) + data = {'x': [0, 1], 'y': [0, 1], 'z': [0, 1], 'P': [1, 1], + 'Ax': [1, 1], 'Ay': [1, 1], 'Az': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf = SarracenDataFrame(data, params=dict()) sdf.backend = backend - for func in [interpolate_2d, interpolate_2d_line, interpolate_3d_line]: + for func in [interpolate_2d, + interpolate_2d_line, + interpolate_3d_line]: with raises(TypeError): func(sdf, 'P', normalize=False, hmin=False) with raises(TypeError): @@ -250,223 +299,316 @@ def test_dimension_check(backend): @mark.parametrize("backend", backends) def test_3d_xsec_equivalency(backend): """ - A single 3D column integration of a dataframe should be equivalent to the average of several evenly spaced 3D - cross-sections. + A single 3D column integration of a dataframe should be equivalent to the + average of several evenly spaced 3D cross-sections. """ - df = pd.DataFrame({'x': [0], 'y': [0], 'z': [0], 'A': [4], 'B': [6], 'C': [2], 'h': [0.9], 'rho': [0.4], - 'm': [0.03]}) - sdf = SarracenDataFrame(df, params=dict()) + data = {'x': [0], 'y': [0], 'z': [0], + 'A': [4], 'B': [6], 'C': [2], + 'h': [0.9], 'rho': [0.4], 'm': [0.03]} + sdf = SarracenDataFrame(data, params=dict()) kernel = CubicSplineKernel() sdf.kernel = kernel sdf.backend = backend samples = 250 - column_image = interpolate_3d_proj(sdf, 'A', x_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - dens_weight=False, normalize=False, hmin=False) - column_image_vec = interpolate_3d_vec(sdf, 'A', 'B', 'C', x_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - dens_weight=False, normalize=False, hmin=False) - - xsec_image = np.zeros((50, 50)) - xsec_image_vec = [np.zeros((50, 50)), np.zeros((50, 50))] + column_img = interpolate_3d_proj(sdf, 'A', + x_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=False, + normalize=False, hmin=False) + column_img_vec = interpolate_3d_vec(sdf, 'A', 'B', 'C', + x_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=False, + normalize=False, hmin=False) + + xsec_img = np.zeros((50, 50)) + xsec_img_vec = [np.zeros((50, 50)), np.zeros((50, 50))] for z in np.linspace(0, kernel.get_radius() * sdf['h'][0], samples): - xsec_image += interpolate_3d_cross(sdf, 'A', z_slice=z, x_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - normalize=False, hmin=False) - - vec_sample = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', z, x_pixels=50, xlim=(-1, 1), ylim=(-1, 1), + xsec_img += interpolate_3d_cross(sdf, 'A', + x_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=z, + normalize=False, hmin=False) + + vec_sample = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', + x_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=z, normalize=False, hmin=False) - xsec_image_vec[0] += vec_sample[0] - xsec_image_vec[1] += vec_sample[1] + xsec_img_vec[0] += vec_sample[0] + xsec_img_vec[1] += vec_sample[1] - # Scale each cross-section summation to be equivalent to the column integration. - xsec_image *= kernel.get_radius() * sdf['h'][0] * 2 / samples - xsec_image_vec[0] *= kernel.get_radius() * sdf['h'][0] * 2 / samples - xsec_image_vec[1] *= kernel.get_radius() * sdf['h'][0] * 2 / samples + # Scale each cross-section sum to be equivalent to the column integration. + xsec_img *= kernel.get_radius() * sdf['h'][0] * 2 / samples + xsec_img_vec[0] *= kernel.get_radius() * sdf['h'][0] * 2 / samples + xsec_img_vec[1] *= kernel.get_radius() * sdf['h'][0] * 2 / samples - # The tolerances are lower here to accommodate for the relatively low sample size. A larger number of samples - # would result in an unacceptable test time for the GPU backend (which already doesn't perform well with repeated - # interpolation of just one particle) - assert_allclose(xsec_image, column_image, rtol=1e-3, atol=1e-4) - assert_allclose(xsec_image_vec[0], column_image_vec[0], rtol=1e-3, atol=1e-4) - assert_allclose(xsec_image_vec[1], column_image_vec[1], rtol=1e-3, atol=1e-4) + # The tolerances are lower here to accommodate for the relatively low + # sample size. A larger number of samples would result in an unacceptable + # test time for the GPU backend (which already doesn't perform well with + # repeated interpolation of just one particle) + assert_allclose(xsec_img, column_img, rtol=1e-3, atol=1e-4) + assert_allclose(xsec_img_vec[0], column_img_vec[0], rtol=1e-3, atol=1e-4) + assert_allclose(xsec_img_vec[1], column_img_vec[1], rtol=1e-3, atol=1e-4) @mark.parametrize("backend", backends) def test_2d_xsec_equivalency(backend): """ - A single 2D interpolation should be equivalent to several combined 2D cross-sections. + A single 2D interpolation should be equivalent to several combined + 2D cross-sections. """ - # This test currently fails on both backends, since a vertical 2D cross-section currently - # returns zero for an unknown reason. - df = pd.DataFrame({'x': [0], 'y': [0], 'A': [4], 'h': [0.9], 'rho': [0.4], 'm': [0.03]}) - sdf = SarracenDataFrame(df, params=dict()) + # This test currently fails on both backends, since a vertical 2D + # cross-section currently returns zero for an unknown reason. + data = {'x': [0], 'y': [0], 'A': [4], + 'h': [0.9], 'rho': [0.4], 'm': [0.03]} + sdf = SarracenDataFrame(data, params=dict()) kernel = CubicSplineKernel() sdf.kernel = kernel sdf.backend = backend - true_image = interpolate_2d(sdf, 'A', x_pixels=50, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) + true_img = interpolate_2d(sdf, 'A', + x_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + normalize=False, hmin=False) # A mapping of pixel indices to x & y values in particle space. real = -1 + (np.arange(0, 50) + 0.5) * (2 / 50) - reconstructed_image = np.zeros((50, 50)) + recon_img = np.zeros((50, 50)) for y in range(50): - reconstructed_image[y, :] = interpolate_2d_line(sdf, 'A', pixels=50, xlim=(-1, 1), ylim=(real[y], real[y]), normalize=False, hmin=False) - assert_allclose(reconstructed_image, true_image) + recon_img[y, :] = interpolate_2d_line(sdf, 'A', + pixels=50, + xlim=(-1, 1), + ylim=(real[y], real[y]), + normalize=False, hmin=False) + assert_allclose(recon_img, true_img) - # reconstructed_image = np.zeros((50, 50)) + # reconstructed_img = np.zeros((50, 50)) # for x in range(50): - # reconstructed_image[:, x] = interpolate_2d_line(sdf, 'A', pixels=50, xlim=(real[x], real[x]), ylim=(-1, 1)) - # assert_allclose(reconstructed_image, true_image) + # reconstructed_img[:, x] = interpolate_2d_line(sdf, 'A', + # pixels=50, xlim=(real[x], real[x]), ylim=(-1, 1)) + # assert_allclose(reconstructed_img, true_img) @mark.parametrize("backend", backends) def test_corner_particles(backend): """ - Interpolation over a dataset with two particles should be equal to the sum of contributions at each point. + Interpolation over a dataset with two particles should be equal to the sum + of contributions at each point. """ kernel = CubicSplineKernel() - df_2 = pd.DataFrame({'x': [-1, 1], 'y': [-1, 1], 'A': [2, 1.5], 'B': [5, 2.3], 'h': [1.1, 1.3], 'rho': [0.55, 0.45], - 'm': [0.04, 0.05]}) - sdf_2 = SarracenDataFrame(df_2, params=dict()) + data_2 = {'x': [-1, 1], 'y': [-1, 1], + 'A': [2, 1.5], 'B': [5, 2.3], + 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]} + sdf_2 = SarracenDataFrame(data_2, params=dict()) sdf_2.kernel = kernel sdf_2.backend = backend - df_3 = pd.DataFrame({'x': [-1, 1], 'y': [-1, 1], 'z': [-1, 1], 'A': [2, 1.5], 'B': [2, 1], 'C': [7, 8], - 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]}) - sdf_3 = SarracenDataFrame(df_3, params=dict()) + data_3 = {'x': [-1, 1], 'y': [-1, 1], 'z': [-1, 1], + 'A': [2, 1.5], 'B': [2, 1], 'C': [7, 8], + 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]} + sdf_3 = SarracenDataFrame(data_3, params=dict()) sdf_3.kernel = kernel sdf_3.backend = backend # Weight for 2D interpolation, and 3D column interpolation. - w = sdf_2['m'] / (sdf_2['rho'] * sdf_2['h'] ** 2) + weight = sdf_2['m'] / (sdf_2['rho'] * sdf_2['h'] ** 2) # A mapping of pixel indices to x / y values in particle space. real = (np.arange(0, 25) + 0.5) * (2 / 25) - image = interpolate_2d(sdf_2, 'A', x_pixels=25, y_pixels=25, normalize=False, hmin=False) - image_vec = interpolate_2d_vec(sdf_2, 'A', 'B', x_pixels=25, y_pixels=25, normalize=False, hmin=False) + img = interpolate_2d(sdf_2, 'A', + x_pixels=25, y_pixels=25, + normalize=False, hmin=False) + img_vec = interpolate_2d_vec(sdf_2, 'A', 'B', + x_pixels=25, y_pixels=25, + normalize=False, hmin=False) for y in range(25): for x in range(25): - assert approx(w[0] * sdf_2['A'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf_2['h'][0], 2) - + w[1] * sdf_2['A'][1] * kernel.w(np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2) - / sdf_2['h'][1], 2)) == image[y][x] - assert approx(w[0] * sdf_2['A'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf_2['h'][0], 2) - + w[1] * sdf_2['A'][1] * kernel.w(np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2) - / sdf_2['h'][1], 2)) == image_vec[0][y][x] - assert approx(w[0] * sdf_2['B'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf_2['h'][0], 2) - + w[1] * sdf_2['B'][1] * kernel.w(np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2) - / sdf_2['h'][1], 2)) == image_vec[1][y][x] - - image = interpolate_2d_line(sdf_2, 'A', pixels=25, normalize=False, hmin=False) + r = np.sqrt(real[x] ** 2 + real[y] ** 2) + w0 = kernel.w(r / sdf_2['h'][0], 2) + + r = np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2) + w1 = kernel.w(r / sdf_2['h'][1], 2) + assert img[y][x] == approx(weight[0] * sdf_2['A'][0] * w0 + + weight[1] * sdf_2['A'][1] * w1) + assert img_vec[0][y][x] == approx(weight[0] * sdf_2['A'][0] * w0 + + weight[1] * sdf_2['A'][1] * w1) + assert img_vec[1][y][x] == approx(weight[0] * sdf_2['B'][0] * w0 + + weight[1] * sdf_2['B'][1] * w1) + + img = interpolate_2d_line(sdf_2, 'A', + pixels=25, + normalize=False, hmin=False) for x in range(25): - assert approx(w[0] * sdf_2['A'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[x] ** 2) / sdf_2['h'][0], 2) - + w[1] * sdf_2['A'][1] * kernel.w(np.sqrt(real[24 - x] ** 2 + real[24 - x] ** 2) - / sdf_2['h'][1], 2)) == image[x] + r = np.sqrt(real[x] ** 2 + real[x] ** 2) + w0 = kernel.w(r / sdf_2['h'][0], 2) + + r = np.sqrt(real[24 - x] ** 2 + real[24 - x] ** 2) + w1 = kernel.w(r / sdf_2['h'][1], 2) + assert img[x] == approx(weight[0] * sdf_2['A'][0] * w0 + + weight[1] * sdf_2['A'][1] * w1) c_kernel = kernel.get_column_kernel_func(1000) - image = interpolate_3d_proj(sdf_3, 'A', x_pixels=25, y_pixels=25, - dens_weight=False, normalize=False, hmin=False) - image_vec = interpolate_3d_vec(sdf_3, 'A', 'B', 'C', x_pixels=25, y_pixels=25, - dens_weight=False, normalize=False, hmin=False) + img = interpolate_3d_proj(sdf_3, 'A', + x_pixels=25, y_pixels=25, + dens_weight=False, + normalize=False, hmin=False) + img_vec = interpolate_3d_vec(sdf_3, 'A', 'B', 'C', + x_pixels=25, y_pixels=25, + dens_weight=False, + normalize=False, hmin=False) for y in range(25): for x in range(25): - assert approx(w[0] * sdf_3['A'][0] * c_kernel(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf_3['h'][0], 2) - + w[1] * sdf_3['A'][1] * c_kernel(np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2) - / sdf_3['h'][1], 2)) == image[y][x] - assert approx(w[0] * sdf_3['A'][0] * c_kernel(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf_3['h'][0], 2) - + w[1] * sdf_3['A'][1] * c_kernel(np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2) - / sdf_3['h'][1], 2)) == image_vec[0][y][x] - assert approx(w[0] * sdf_3['B'][0] * c_kernel(np.sqrt(real[x] ** 2 + real[y] ** 2) / sdf_3['h'][0], 2) - + w[1] * sdf_3['B'][1] * c_kernel(np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2) - / sdf_3['h'][1], 2)) == image_vec[1][y][x] + r = np.sqrt(real[x] ** 2 + real[y] ** 2) + w0 = c_kernel(r / sdf_3['h'][0], 2) + + r = np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2) + w1 = c_kernel(r / sdf_3['h'][1], 2) + assert img[y][x] == approx(weight[0] * sdf_3['A'][0] * w0 + + weight[1] * sdf_3['A'][1] * w1) + assert img_vec[0][y][x] == approx(weight[0] * sdf_3['A'][0] * w0 + + weight[1] * sdf_3['A'][1] * w1) + assert img_vec[1][y][x] == approx(weight[0] * sdf_3['B'][0] * w0 + + weight[1] * sdf_3['B'][1] * w1) # Weight for 3D cross-section interpolation. - w = sdf_3['m'] / (sdf_3['rho'] * sdf_3['h'] ** 3) + weight = sdf_3['m'] / (sdf_3['rho'] * sdf_3['h'] ** 3) - image = interpolate_3d_cross(sdf_3, 'A', x_pixels=25, y_pixels=25, normalize=False, hmin=False) - image_vec = interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', x_pixels=25, y_pixels=25, normalize=False, hmin=False) + img = interpolate_3d_cross(sdf_3, 'A', + x_pixels=25, y_pixels=25, + normalize=False, hmin=False) + img_vec = interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', + x_pixels=25, y_pixels=25, + normalize=False, hmin=False) for y in range(25): for x in range(25): - assert approx(w[0] * sdf_3['A'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2 + 1) / sdf_3['h'][0], 3) - + w[1] * sdf_3['A'][1] * kernel.w(np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2 + 1) - / sdf_3['h'][1], 3)) == image[y][x] - assert approx(w[0] * sdf_3['A'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2 + 1) / sdf_3['h'][0], 3) - + w[1] * sdf_3['A'][1] * kernel.w(np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2 + 1) - / sdf_3['h'][1], 3)) == image_vec[0][y][x] - assert approx(w[0] * sdf_3['B'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2 + 1) / sdf_3['h'][0], 3) - + w[1] * sdf_3['B'][1] * kernel.w(np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2 + 1) - / sdf_3['h'][1], 3)) == image_vec[1][y][x] - - image = interpolate_3d_grid(sdf_3, 'A', x_pixels=25, y_pixels=25, z_pixels=25, normalize=False, hmin=False) + r = np.sqrt(real[x] ** 2 + real[y] ** 2 + 1) + w0 = kernel.w(r / sdf_3['h'][0], 3) + + r = np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2 + 1) + w1 = kernel.w(r / sdf_3['h'][1], 3) + assert img[y][x] == approx(weight[0] * sdf_3['A'][0] * w0 + + weight[1] * sdf_3['A'][1] * w1) + assert img_vec[0][y][x] == approx(weight[0] * sdf_3['A'][0] * w0 + + weight[1] * sdf_3['A'][1] * w1) + assert img_vec[1][y][x] == approx(weight[0] * sdf_3['B'][0] * w0 + + weight[1] * sdf_3['B'][1] * w1) + + img = interpolate_3d_grid(sdf_3, 'A', + x_pixels=25, y_pixels=25, z_pixels=25, + normalize=False, hmin=False) for z in range(25): for y in range(25): for x in range(25): - assert approx( - w[0] * sdf_3['A'][0] * kernel.w(np.sqrt(real[x] ** 2 + real[y] ** 2 + real[z] ** 2) - / sdf_3['h'][0], 3) + - w[1] * sdf_3['A'][1] * kernel.w(np.sqrt(real[24 - x] ** 2 + real[24 - y] ** 2 + real[24 - z] ** 2) - / sdf_3['h'][1], 3)) == image[z][y][x] + r = np.sqrt(real[x] ** 2 + real[y] ** 2 + real[z] ** 2) + w0 = kernel.w(r / sdf_3['h'][0], 3) + + r = np.sqrt(real[24-x]**2 + real[24-y]**2 + real[24-z]**2) + w1 = kernel.w(r / sdf_3['h'][1], 3) + assert img[z][y][x] == approx(weight[0] * sdf_3['A'][0] * w0 + + weight[1] * sdf_3['A'][1] * w1) @mark.parametrize("backend", backends) def test_image_transpose(backend): """ - Interpolation with flipped x & y axes should be equivalent to the transpose of regular interpolation. + Interpolation with flipped x & y axes should be equivalent to the transpose + of regular interpolation. """ - df = pd.DataFrame({'x': [-1, 1], 'y': [1, -1], 'A': [2, 1.5], 'B': [5, 4], 'h': [1.1, 1.3], 'rho': [0.55, 0.45], - 'm': [0.04, 0.05]}) - sdf = SarracenDataFrame(df, params=dict()) + data = {'x': [-1, 1], 'y': [1, -1], 'A': [2, 1.5], 'B': [5, 4], + 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]} + sdf = SarracenDataFrame(data, params=dict()) sdf.backend = backend - image1 = interpolate_2d(sdf, 'A', x_pixels=20, y_pixels=20, normalize=False, hmin=False) - image2 = interpolate_2d(sdf, 'A', x='y', y='x', x_pixels=20, y_pixels=20, normalize=False, hmin=False) - assert_allclose(image1, image2.T) - - image1 = interpolate_2d_vec(sdf, 'A', 'B', x_pixels=20, y_pixels=20, normalize=False, hmin=False) - image2 = interpolate_2d_vec(sdf, 'A', 'B', x='y', y='x', x_pixels=20, y_pixels=20, normalize=False, hmin=False) - assert_allclose(image1[0], image2[0].T) - assert_allclose(image1[1], image2[1].T) - - df = pd.DataFrame({'x': [-1, 1], 'y': [1, -1], 'z': [-1, 1], 'A': [2, 1.5], 'B': [5, 4], 'C': [2.5, 3], - 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]}) - sdf = SarracenDataFrame(df, params=dict()) - - image1 = interpolate_3d_proj(sdf, 'A', x_pixels=20, y_pixels=20, normalize=False, hmin=False) - image2 = interpolate_3d_proj(sdf, 'A', x='y', y='x', x_pixels=20, y_pixels=20, normalize=False, hmin=False) - assert_allclose(image1, image2.T) - - image1 = interpolate_3d_vec(sdf, 'A', 'B', 'C', x_pixels=50, y_pixels=50, normalize=False, hmin=False) - image2 = interpolate_3d_vec(sdf, 'A', 'B', 'C', x='y', y='x', x_pixels=50, y_pixels=50, normalize=False, hmin=False) - assert_allclose(image1[0], image2[0].T) - assert_allclose(image1[1], image2[1].T) - - image1 = interpolate_3d_cross(sdf, 'A', x_pixels=50, y_pixels=50, normalize=False, hmin=False) - image2 = interpolate_3d_cross(sdf, 'A', x='y', y='x', x_pixels=50, y_pixels=50, normalize=False, hmin=False) - assert_allclose(image1, image2.T) - - image1 = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', x_pixels=20, y_pixels=20, normalize=False, hmin=False) - image2 = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', x='y', y='x', x_pixels=20, y_pixels=20, normalize=False, hmin=False) - assert_allclose(image1[0], image2[0].T) - assert_allclose(image1[1], image2[1].T) - - image1 = interpolate_3d_grid(sdf, 'A', x_pixels=20, y_pixels=20, normalize=False, hmin=False) - image2 = interpolate_3d_grid(sdf, 'A', x='y', y='x', x_pixels=20, y_pixels=20, normalize=False, hmin=False) - assert_allclose(image1, image2.transpose(0, 2, 1)) + img1 = interpolate_2d(sdf, 'A', + x_pixels=20, y_pixels=20, + normalize=False, hmin=False) + img2 = interpolate_2d(sdf, 'A', x='y', y='x', + x_pixels=20, y_pixels=20, + normalize=False, hmin=False) + assert_allclose(img1, img2.T) + + img1 = interpolate_2d_vec(sdf, 'A', 'B', + x_pixels=20, y_pixels=20, + normalize=False, hmin=False) + img2 = interpolate_2d_vec(sdf, 'A', 'B', x='y', y='x', + x_pixels=20, y_pixels=20, + normalize=False, hmin=False) + assert_allclose(img1[0], img2[0].T) + assert_allclose(img1[1], img2[1].T) + + data = {'x': [-1, 1], 'y': [1, -1], 'z': [-1, 1], + 'A': [2, 1.5], 'B': [5, 4], 'C': [2.5, 3], + 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]} + sdf = SarracenDataFrame(data, params=dict()) + + img1 = interpolate_3d_proj(sdf, 'A', + x_pixels=20, y_pixels=20, + normalize=False, hmin=False) + img2 = interpolate_3d_proj(sdf, 'A', + x='y', y='x', + x_pixels=20, y_pixels=20, + normalize=False, hmin=False) + assert_allclose(img1, img2.T) + + img1 = interpolate_3d_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=50, + normalize=False, hmin=False) + img2 = interpolate_3d_vec(sdf, 'A', 'B', 'C', + x='y', y='x', + x_pixels=50, y_pixels=50, + normalize=False, hmin=False) + assert_allclose(img1[0], img2[0].T) + assert_allclose(img1[1], img2[1].T) + + img1 = interpolate_3d_cross(sdf, 'A', + x_pixels=50, y_pixels=50, + normalize=False, hmin=False) + img2 = interpolate_3d_cross(sdf, 'A', + x='y', y='x', + x_pixels=50, y_pixels=50, + normalize=False, hmin=False) + assert_allclose(img1, img2.T) + + img1 = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', + x_pixels=20, y_pixels=20, + normalize=False, hmin=False) + img2 = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', + x='y', y='x', + x_pixels=20, y_pixels=20, + normalize=False, hmin=False) + assert_allclose(img1[0], img2[0].T) + assert_allclose(img1[1], img2[1].T) + + img1 = interpolate_3d_grid(sdf, 'A', + x_pixels=20, y_pixels=20, + normalize=False, hmin=False) + img2 = interpolate_3d_grid(sdf, 'A', + x='y', y='x', + x_pixels=20, y_pixels=20, + normalize=False, hmin=False) + assert_allclose(img1, img2.transpose(0, 2, 1)) @mark.parametrize("backend", backends) def test_default_kernel(backend): """ - Interpolation should use the kernel supplied to the function. If no kernel is supplied, the kernel attached to the - dataframe should be used. + Interpolation should use the kernel supplied to the function. If no kernel + is supplied, the kernel attached to the dataframe should be used. """ - df_2 = pd.DataFrame({'x': [0], 'y': [0], 'A': [1], 'B': [1], 'h': [1], 'rho': [1], 'm': [1]}) - sdf_2 = SarracenDataFrame(df_2, params=dict()) - df_3 = pd.DataFrame({'x': [0], 'y': [0], 'z': [0], 'A': [1], 'B': [1], 'C': [1], 'h': [1], 'rho': [1], 'm': [1]}) - sdf_3 = SarracenDataFrame(df_3, params=dict()) + data_2 = {'x': [0], 'y': [0], 'A': [1], 'B': [1], + 'h': [1], 'rho': [1], 'm': [1]} + sdf_2 = SarracenDataFrame(data_2, params=dict()) + data_3 = {'x': [0], 'y': [0], 'z': [0], + 'A': [1], 'B': [1], 'C': [1], + 'h': [1], 'rho': [1], 'm': [1]} + sdf_3 = SarracenDataFrame(data_3, params=dict()) kernel = QuarticSplineKernel() sdf_2.kernel = kernel @@ -474,232 +616,383 @@ def test_default_kernel(backend): sdf_2.backend = backend sdf_3.backend = backend - # First, test that the dataframe kernel is used in cases with no kernel supplied. - - # Each interpolation is performed over one pixel, offering an easy way to check the kernel used by the function. - image = interpolate_2d(sdf_2, 'A', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - assert image == kernel.w(0, 2) - image = interpolate_2d_vec(sdf_2, 'A', 'B', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - assert image[0] == kernel.w(0, 2) - assert image[1] == kernel.w(0, 2) - - image = interpolate_2d_line(sdf_2, 'A', pixels=1, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - assert image == kernel.w(0, 2) - - image = interpolate_3d_proj(sdf_3, 'A', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - assert image == kernel.get_column_kernel()[0] - image = interpolate_3d_vec(sdf_3, 'A', 'B', 'C', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - assert image[0] == kernel.get_column_kernel()[0] - assert image[1] == kernel.get_column_kernel()[0] - - image = interpolate_3d_cross(sdf_3, 'A', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - assert image == kernel.w(0, 3) - image = interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - assert image[0] == kernel.w(0, 3) - assert image[1] == kernel.w(0, 3) - - image = interpolate_3d_grid(sdf_3, 'A', x_pixels=1, y_pixels=1, z_pixels=1, xlim=(-1, 1), ylim=(-1, 1), - zlim=(-1, 1), normalize=False, hmin=False) - assert image == kernel.w(0, 3) - - image = interpolate_3d_line(sdf_3, 'A', pixels=1, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - assert image == kernel.w(0, 3) + # First, test that the dataframe kernel is used when no kernel is supplied. + + # Each interpolation is performed over one pixel, offering an easy way to + # check the kernel used by the function. + img = interpolate_2d(sdf_2, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + normalize=False, hmin=False) + assert img == kernel.w(0, 2) + img = interpolate_2d_vec(sdf_2, 'A', 'B', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + normalize=False, hmin=False) + assert img[0] == kernel.w(0, 2) + assert img[1] == kernel.w(0, 2) + + img = interpolate_2d_line(sdf_2, 'A', + pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + normalize=False, hmin=False) + assert img == kernel.w(0, 2) + + img = interpolate_3d_proj(sdf_3, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + normalize=False, hmin=False) + assert img == kernel.get_column_kernel()[0] + img = interpolate_3d_vec(sdf_3, 'A', 'B', 'C', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + normalize=False, hmin=False) + assert img[0] == kernel.get_column_kernel()[0] + assert img[1] == kernel.get_column_kernel()[0] + + img = interpolate_3d_cross(sdf_3, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + normalize=False, hmin=False) + assert img == kernel.w(0, 3) + img = interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + normalize=False, hmin=False) + assert img[0] == kernel.w(0, 3) + assert img[1] == kernel.w(0, 3) + + img = interpolate_3d_grid(sdf_3, 'A', + x_pixels=1, y_pixels=1, z_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), zlim=(-1, 1), + normalize=False, hmin=False) + assert img == kernel.w(0, 3) + + img = interpolate_3d_line(sdf_3, 'A', + pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + normalize=False, hmin=False) + assert img == kernel.w(0, 3) # Next, test that the kernel supplied to the function is actually used. kernel = QuinticSplineKernel() - image = interpolate_2d(sdf_2, 'A', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), kernel=kernel, normalize=False, hmin=False) - assert image == kernel.w(0, 2) - image = interpolate_2d_vec(sdf_2, 'A', 'B', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), kernel=kernel, normalize=False, hmin=False) - assert image[0] == kernel.w(0, 2) - assert image[1] == kernel.w(0, 2) - - image = interpolate_2d_line(sdf_2, 'A', pixels=1, xlim=(-1, 1), ylim=(-1, 1), kernel=kernel, normalize=False, hmin=False) - assert image == kernel.w(0, 2) - - image = interpolate_3d_proj(sdf_3, 'A', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), kernel=kernel, normalize=False, hmin=False) - assert image == kernel.get_column_kernel()[0] - image = interpolate_3d_vec(sdf_3, 'A', 'B', 'C', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), kernel=kernel, normalize=False, hmin=False) - assert image[0] == kernel.get_column_kernel()[0] - assert image[1] == kernel.get_column_kernel()[0] - - image = interpolate_3d_cross(sdf_3, 'A', kernel=kernel, x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - assert image == kernel.w(0, 3) - image = interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), - kernel=kernel, normalize=False, hmin=False) - assert image[0] == kernel.w(0, 3) - assert image[1] == kernel.w(0, 3) - - image = interpolate_3d_grid(sdf_3, 'A', x_pixels=1, y_pixels=1, z_pixels=1, xlim=(-1, 1), ylim=(-1, 1), - zlim=(-1, 1), kernel=kernel, normalize=False, hmin=False) - assert image == kernel.w(0, 3) - - image = interpolate_3d_line(sdf_3, 'A', pixels=1, xlim=(-1, 1), ylim=(-1, 1), kernel=kernel, normalize=False, hmin=False) - assert image == kernel.w(0, 3) + img = interpolate_2d(sdf_2, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + kernel=kernel, + normalize=False, hmin=False) + assert img == kernel.w(0, 2) + img = interpolate_2d_vec(sdf_2, 'A', 'B', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + kernel=kernel, + normalize=False, hmin=False) + assert img[0] == kernel.w(0, 2) + assert img[1] == kernel.w(0, 2) + + img = interpolate_2d_line(sdf_2, 'A', + pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + kernel=kernel, + normalize=False, hmin=False) + assert img == kernel.w(0, 2) + + img = interpolate_3d_proj(sdf_3, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + kernel=kernel, + normalize=False, hmin=False) + assert img == kernel.get_column_kernel()[0] + img = interpolate_3d_vec(sdf_3, 'A', 'B', 'C', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + kernel=kernel, + normalize=False, hmin=False) + assert img[0] == kernel.get_column_kernel()[0] + assert img[1] == kernel.get_column_kernel()[0] + + img = interpolate_3d_cross(sdf_3, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + kernel=kernel, + normalize=False, hmin=False) + assert img == kernel.w(0, 3) + img = interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + kernel=kernel, + normalize=False, hmin=False) + assert img[0] == kernel.w(0, 3) + assert img[1] == kernel.w(0, 3) + + img = interpolate_3d_grid(sdf_3, 'A', + x_pixels=1, y_pixels=1, z_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), zlim=(-1, 1), + kernel=kernel, + normalize=False, hmin=False) + assert img == kernel.w(0, 3) + + img = interpolate_3d_line(sdf_3, 'A', + pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + kernel=kernel, + normalize=False, hmin=False) + assert img == kernel.w(0, 3) @mark.parametrize("backend", backends) def test_column_samples(backend): """ - 3D column interpolation should use the number of integral samples supplied as an argument. + 3D column interpolation should use the number of integral samples supplied + as an argument. """ - df_3 = pd.DataFrame({'x': [0], 'y': [0], 'z': [0], 'A': [1], 'h': [1], 'rho': [1], 'm': [1]}) - sdf_3 = SarracenDataFrame(df_3, params=dict()) + data_3 = {'x': [0], 'y': [0], 'z': [0], + 'A': [1], 'h': [1], 'rho': [1], 'm': [1]} + sdf_3 = SarracenDataFrame(data_3, params=dict()) kernel = QuinticSplineKernel() sdf_3.kernel = kernel sdf_3.backend = backend - # 2 samples is used here, since a column kernel with 2 samples will be drastically different than the - # default kernel of 1000 samples. - image = interpolate_3d_proj(sdf_3, 'A', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), - integral_samples=2, normalize=False, hmin=False) - assert image == kernel.get_column_kernel(2)[0] + # 2 samples is used here, since a column kernel with 2 samples will be + # drastically different than the default kernel of 1000 samples. + img = interpolate_3d_proj(sdf_3, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + integral_samples=2, + normalize=False, hmin=False) + assert img == kernel.get_column_kernel(2)[0] -# this test is incredibly slow on the GPU backend (30min+) so it only runs on the CPU -# backend for now. -#@mark.parametrize("backend", backends) +# this test is incredibly slow on the GPU backend (30min+) so it only runs on +# the CPU backend for now. +# @mark.parametrize("backend", backends) def test_pixel_arguments(): """ - Default interpolation pixel counts should be selected to preserve the aspect ratio of the data. + Default interpolation pixel counts should be selected to preserve the + aspect ratio of the data. """ backend = 'cpu' - df_2 = pd.DataFrame({'x': [-2, 4], 'y': [3, 7], 'A': [1, 1], 'B': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf_2 = SarracenDataFrame(df_2, params=dict()) + data_2 = {'x': [-2, 4], 'y': [3, 7], 'A': [1, 1], 'B': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf_2 = SarracenDataFrame(data_2, params=dict()) sdf_2.backend = backend - df_3 = pd.DataFrame({'x': [-2, 4], 'y': [3, 7], 'z': [6, -2], 'A': [1, 1], 'B': [1, 1], 'C': [1, 1], 'h': [1, 1], - 'rho': [1, 1], 'm': [1, 1]}) - sdf_3 = SarracenDataFrame(df_3, params=dict()) + data_3 = {'x': [-2, 4], 'y': [3, 7], 'z': [6, -2], + 'A': [1, 1], 'B': [1, 1], 'C': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf_3 = SarracenDataFrame(data_3, params=dict()) sdf_3.backend = backend default_pixels = 12 # 3D grid interpolation - for axes in [('x', 'y', 'z'), ('x', 'z', 'y'), ('y', 'z', 'x'), ('y', 'x', 'z'), ('z', 'x', 'y'), ('z', 'y', 'x')]: - ratio01 = np.abs(df_3[axes[0]][1] - df_3[axes[0]][0]) / np.abs(df_3[axes[1]][1] - df_3[axes[1]][0]) - ratio02 = np.abs(df_3[axes[0]][1] - df_3[axes[0]][0]) / np.abs(df_3[axes[2]][1] - df_3[axes[2]][0]) - ratio12 = np.abs(df_3[axes[1]][1] - df_3[axes[1]][0]) / np.abs(df_3[axes[2]][1] - df_3[axes[2]][0]) - - image = interpolate_3d_grid(sdf_3, 'A', x=axes[0], y=axes[1], z=axes[2], normalize=False, hmin=False) - assert image.shape[2] / image.shape[1] == approx(ratio01, rel=1e-2) - assert image.shape[1] / image.shape[0] == approx(ratio12, rel=1e-2) - assert image.shape[2] / image.shape[0] == approx(ratio02, rel=1e-2) - - image = interpolate_3d_grid(sdf_3, 'A', x=axes[0], y=axes[1], z=axes[2], x_pixels=default_pixels, normalize=False, hmin=False) - assert image.shape == (round(default_pixels / ratio02), round(default_pixels / ratio01), default_pixels) - - image = interpolate_3d_grid(sdf_3, 'A', x=axes[0], y=axes[1], z=axes[2], y_pixels=default_pixels, normalize=False, hmin=False) - assert image.shape == (round(default_pixels / ratio12), default_pixels, round(default_pixels * ratio01)) - - image = interpolate_3d_grid(sdf_3, 'A', x=axes[0], y=axes[1], z=axes[2], x_pixels=default_pixels, - y_pixels=default_pixels, z_pixels=default_pixels, normalize=False, hmin=False) - assert image.shape == (default_pixels, default_pixels, default_pixels) + for axes in [('x', 'y', 'z'), + ('x', 'z', 'y'), + ('y', 'z', 'x'), + ('y', 'x', 'z'), + ('z', 'x', 'y'), + ('z', 'y', 'x')]: + diff_0 = np.abs(sdf_3[axes[0]][1] - sdf_3[axes[0]][0]) + diff_1 = np.abs(sdf_3[axes[1]][1] - sdf_3[axes[1]][0]) + diff_2 = np.abs(sdf_3[axes[2]][1] - sdf_3[axes[2]][0]) + + ratio01 = diff_0 / diff_1 + ratio02 = diff_0 / diff_2 + ratio12 = diff_1 / diff_2 + + img = interpolate_3d_grid(sdf_3, 'A', + x=axes[0], y=axes[1], z=axes[2], + normalize=False, hmin=False) + assert img.shape[2] / img.shape[1] == approx(ratio01, rel=1e-2) + assert img.shape[1] / img.shape[0] == approx(ratio12, rel=1e-2) + assert img.shape[2] / img.shape[0] == approx(ratio02, rel=1e-2) + + img = interpolate_3d_grid(sdf_3, 'A', + x=axes[0], y=axes[1], z=axes[2], + x_pixels=default_pixels, + normalize=False, hmin=False) + assert img.shape == (round(default_pixels / ratio02), + round(default_pixels / ratio01), default_pixels) + + img = interpolate_3d_grid(sdf_3, 'A', + x=axes[0], y=axes[1], z=axes[2], + y_pixels=default_pixels, + normalize=False, hmin=False) + assert img.shape == (round(default_pixels / ratio12), + default_pixels, + round(default_pixels * ratio01)) + + img = interpolate_3d_grid(sdf_3, 'A', + x=axes[0], y=axes[1], z=axes[2], + x_pixels=default_pixels, + y_pixels=default_pixels, + z_pixels=default_pixels, + normalize=False, hmin=False) + assert img.shape == (default_pixels, default_pixels, default_pixels) # Non-vector functions for func in [interpolate_2d, interpolate_3d_proj, interpolate_3d_cross]: - for axes in [('x', 'y'), ('x', 'z'), ('y', 'z'), ('y', 'x'), ('z', 'x'), ('z', 'y')]: - # The ratio of distance between particles in the second axis versus the distance between particles in - # the first axis. - ratio = np.abs(df_3[axes[1]][1] - df_3[axes[1]][0]) / np.abs(df_3[axes[0]][1] - df_3[axes[0]][0]) - - # Avoids passing a z-axis argument to interpolate_2d, which would result in an error. + for axes in [('x', 'y'), + ('x', 'z'), + ('y', 'z'), + ('y', 'x'), + ('z', 'x'), + ('z', 'y')]: + # The ratio of distance between particles in the second axis versus + # the distance between particles in the first axis. + ratio = np.abs(sdf_3[axes[1]][1] - sdf_3[axes[1]][0]) \ + / np.abs(sdf_3[axes[0]][1] - sdf_3[axes[0]][0]) + + # Avoids passing a z-axis argument to interpolate_2d, which would + # result in an error. if (axes[0] == 'z' or axes[1] == 'z') and func is interpolate_2d: continue - # The dataframe is selected to ensure the correct number of dimensions. + # Dataframe is selected to ensure the correct number of dimensions. sdf = sdf_2 if func is interpolate_2d else sdf_3 - # With no pixels specified, the pixels in the image will match the ratio of the data. - # The loose tolerance here accounts for the integer rounding. - image = func(sdf, 'A', x=axes[0], y=axes[1], normalize=False, hmin=False) - assert image.shape[0] / image.shape[1] == approx(ratio, rel=1e-2) - - # With one axis specified, the pixels in the other axis will be selected to match the ratio of the data. - image = func(sdf, 'A', x=axes[0], y=axes[1], x_pixels=default_pixels, normalize=False, hmin=False) - assert image.shape == (round(default_pixels * ratio), default_pixels) - - image = func(sdf, 'A', x=axes[0], y=axes[1], y_pixels=default_pixels, normalize=False, hmin=False) - assert image.shape == (default_pixels, round(default_pixels / ratio)) - - # With both axes specified, the pixels will simply match the specified counts. - image = func(sdf, 'A', x_pixels=default_pixels * 2, y_pixels=default_pixels, normalize=False, hmin=False) - assert image.shape == (default_pixels, default_pixels * 2) + # With no pixels specified, the pixels in the image will match the + # ratio of the data. The loose tolerance here accounts for the + # integer rounding. + img = func(sdf, 'A', + x=axes[0], y=axes[1], + normalize=False, hmin=False) + assert img.shape[0] / img.shape[1] == approx(ratio, rel=1e-2) + + # With one axis specified, the pixels in the other axis will be + # selected to match the ratio of the data. + img = func(sdf, 'A', + x=axes[0], y=axes[1], + x_pixels=default_pixels, + normalize=False, hmin=False) + assert img.shape == (round(default_pixels * ratio), default_pixels) + + img = func(sdf, 'A', + x=axes[0], y=axes[1], + y_pixels=default_pixels, + normalize=False, hmin=False) + assert img.shape == (default_pixels, round(default_pixels / ratio)) + + # With both axes specified, the pixels will simply match the + # specified counts. + img = func(sdf, 'A', + x_pixels=default_pixels * 2, y_pixels=default_pixels, + normalize=False, hmin=False) + assert img.shape == (default_pixels, default_pixels * 2) # 3D Vector-based functions for func in [interpolate_3d_vec, interpolate_3d_cross_vec]: - for axes in [('x', 'y'), ('x', 'z'), ('y', 'z'), ('y', 'x'), ('z', 'x'), ('z', 'y')]: - ratio = np.abs(df_3[axes[1]][1] - df_3[axes[1]][0]) / np.abs(df_3[axes[0]][1] - df_3[axes[0]][0]) + for axes in [('x', 'y'), + ('x', 'z'), + ('y', 'z'), + ('y', 'x'), + ('z', 'x'), + ('z', 'y')]: + ratio = np.abs(sdf_3[axes[1]][1] - sdf_3[axes[1]][0]) \ + / np.abs(sdf_3[axes[0]][1] - sdf_3[axes[0]][0]) # Here, the tests are performed for both vector directions. - image = func(sdf_3, 'A', 'B', 'C', x=axes[0], y=axes[1], normalize=False, hmin=False) - assert image[0].shape[0] / image[0].shape[1] == approx(ratio, rel=1e-2) - assert image[1].shape[0] / image[1].shape[1] == approx(ratio, rel=1e-2) - - image = func(sdf_3, 'A', 'B', 'C', x=axes[0], y=axes[1], x_pixels=default_pixels, normalize=False, hmin=False) - assert image[0].shape == (round(default_pixels * ratio), default_pixels) - assert image[1].shape == (round(default_pixels * ratio), default_pixels) - - image = func(sdf_3, 'A', 'B', 'C', x=axes[0], y=axes[1], y_pixels=default_pixels, normalize=False, hmin=False) - assert image[0].shape == (default_pixels, round(default_pixels / ratio)) - assert image[1].shape == (default_pixels, round(default_pixels / ratio)) - - image = func(sdf_3, 'A', 'B', 'C', x_pixels=default_pixels * 2, y_pixels=default_pixels, normalize=False, hmin=False) - assert image[0].shape == (default_pixels, default_pixels * 2) - assert image[1].shape == (default_pixels, default_pixels * 2) + img = func(sdf_3, 'A', 'B', 'C', + x=axes[0], y=axes[1], + normalize=False, hmin=False) + assert img[0].shape[0] / img[0].shape[1] == approx(ratio, rel=1e-2) + assert img[1].shape[0] / img[1].shape[1] == approx(ratio, rel=1e-2) + + img = func(sdf_3, 'A', 'B', 'C', + x=axes[0], y=axes[1], + x_pixels=default_pixels, + normalize=False, hmin=False) + assert img[0].shape == (round(default_pixels * ratio), + default_pixels) + assert img[1].shape == (round(default_pixels * ratio), + default_pixels) + + img = func(sdf_3, 'A', 'B', 'C', + x=axes[0], y=axes[1], + y_pixels=default_pixels, + normalize=False, hmin=False) + assert img[0].shape == (default_pixels, + round(default_pixels / ratio)) + assert img[1].shape == (default_pixels, + round(default_pixels / ratio)) + + img = func(sdf_3, 'A', 'B', 'C', + x_pixels=default_pixels * 2, y_pixels=default_pixels, + normalize=False, hmin=False) + assert img[0].shape == (default_pixels, default_pixels * 2) + assert img[1].shape == (default_pixels, default_pixels * 2) # 2D vector interpolation for axes in [('x', 'y'), ('y', 'x')]: - ratio = np.abs(df_3[axes[1]][1] - df_3[axes[1]][0]) / np.abs(df_3[axes[0]][1] - df_3[axes[0]][0]) - - image = interpolate_2d_vec(sdf_2, 'A', 'B', x=axes[0], y=axes[1], normalize=False, hmin=False) - assert image[0].shape[0] / image[0].shape[1] == approx(ratio, rel=1e-2) - assert image[1].shape[0] / image[1].shape[1] == approx(ratio, rel=1e-2) - - image = interpolate_2d_vec(sdf_2, 'A', 'B', x=axes[0], y=axes[1], x_pixels=default_pixels, normalize=False, hmin=False) - assert image[0].shape == (round(default_pixels * ratio), default_pixels) - assert image[1].shape == (round(default_pixels * ratio), default_pixels) - - image = interpolate_2d_vec(sdf_2, 'A', 'B', x=axes[0], y=axes[1], y_pixels=default_pixels, normalize=False, hmin=False) - assert image[0].shape == (default_pixels, round(default_pixels / ratio)) - assert image[1].shape == (default_pixels, round(default_pixels / ratio)) - - image = interpolate_2d_vec(sdf_2, 'A', 'B', x_pixels=default_pixels * 2, y_pixels=default_pixels, normalize=False, hmin=False) - assert image[0].shape == (default_pixels, default_pixels * 2) - assert image[1].shape == (default_pixels, default_pixels * 2) + ratio = np.abs(sdf_3[axes[1]][1] - sdf_3[axes[1]][0]) \ + / np.abs(sdf_3[axes[0]][1] - sdf_3[axes[0]][0]) + + img = interpolate_2d_vec(sdf_2, 'A', 'B', + x=axes[0], y=axes[1], + normalize=False, hmin=False) + assert img[0].shape[0] / img[0].shape[1] == approx(ratio, rel=1e-2) + assert img[1].shape[0] / img[1].shape[1] == approx(ratio, rel=1e-2) + + img = interpolate_2d_vec(sdf_2, 'A', 'B', + x=axes[0], y=axes[1], + x_pixels=default_pixels, + normalize=False, hmin=False) + assert img[0].shape == (round(default_pixels * ratio), default_pixels) + assert img[1].shape == (round(default_pixels * ratio), default_pixels) + + img = interpolate_2d_vec(sdf_2, 'A', 'B', + x=axes[0], y=axes[1], + y_pixels=default_pixels, + normalize=False, hmin=False) + assert img[0].shape == (default_pixels, round(default_pixels / ratio)) + assert img[1].shape == (default_pixels, round(default_pixels / ratio)) + + img = interpolate_2d_vec(sdf_2, 'A', 'B', + x_pixels=default_pixels * 2, + y_pixels=default_pixels, + normalize=False, hmin=False) + assert img[0].shape == (default_pixels, default_pixels * 2) + assert img[1].shape == (default_pixels, default_pixels * 2) @mark.parametrize("backend", backends) def test_irregular_bounds(backend): """ - When the aspect ratio of pixels is different than the aspect ratio in particle space, the interpolation functions - should still correctly interpolate to the skewed grid. + When the aspect ratio of pixels is different than the aspect ratio in + particle space, the interpolation functions should still correctly + interpolate to the skewed grid. """ - df = pd.DataFrame({'x': [0], 'y': [0], 'A': [4], 'B': [7], 'h': [0.9], 'rho': [0.4], 'm': [0.03]}) - sdf = SarracenDataFrame(df, params=dict()) + data = {'x': [0], 'y': [0], 'A': [4], 'B': [7], + 'h': [0.9], 'rho': [0.4], 'm': [0.03]} + sdf = SarracenDataFrame(data, params=dict()) kernel = CubicSplineKernel() + kernel_rad = kernel.get_radius() + bounds = (-kernel_rad, kernel_rad) sdf.kernel = kernel sdf.backend = backend # Weight for 2D interpolation and 3D column interpolation. - w = sdf['m'] / (sdf['rho'] * sdf['h'] ** 2) + weight = sdf['m'] / (sdf['rho'] * sdf['h'] ** 2) # A mapping of pixel indices to x / y values in particle space. - real_x = -kernel.get_radius() + (np.arange(0, 50) + 0.5) * (2 * kernel.get_radius() / 50) - real_y = -kernel.get_radius() + (np.arange(0, 25) + 0.5) * (2 * kernel.get_radius() / 25) - - image = interpolate_2d(sdf, 'A', x_pixels=50, y_pixels=25, xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) - image_vec = interpolate_2d_vec(sdf, 'A', 'B', x_pixels=50, y_pixels=25, - xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) + real_x = -kernel_rad + (np.arange(0, 50) + 0.5) * (2 * kernel_rad / 50) + real_y = -kernel_rad + (np.arange(0, 25) + 0.5) * (2 * kernel_rad / 25) + + img = interpolate_2d(sdf, 'A', + x_pixels=50, y_pixels=25, + xlim=bounds, ylim=bounds, + normalize=False, hmin=False) + img_vec = interpolate_2d_vec(sdf, 'A', 'B', + x_pixels=50, y_pixels=25, + xlim=bounds, ylim=bounds, + normalize=False, hmin=False) for y in range(25): for x in range(50): - assert image[y][x] == approx( - w[0] * sdf['A'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2) / sdf['h'][0], 2)) - assert image_vec[0][y][x] == approx( - w[0] * sdf['A'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2) / sdf['h'][0], 2)) - assert image_vec[1][y][x] == approx( - w[0] * sdf['B'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2) / sdf['h'][0], 2)) + r = np.sqrt(real_x[x]**2 + real_y[y]**2) + w = kernel.w(r / sdf['h'][0], 2) + assert img[y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[0][y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[1][y][x] == approx(weight[0] * sdf['B'][0] * w) # Convert the existing 2D dataframe to a 3D dataframe. sdf['C'] = 5 @@ -708,127 +1001,164 @@ def test_irregular_bounds(backend): column_func = kernel.get_column_kernel_func(1000) - image = interpolate_3d_proj(sdf, 'A', x_pixels=50, y_pixels=25, xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), dens_weight=False, normalize=False, hmin=False) - image_vec = interpolate_3d_vec(sdf, 'A', 'B', 'C', x_pixels=50, y_pixels=25, - xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), dens_weight=False, normalize=False, hmin=False) + img = interpolate_3d_proj(sdf, 'A', + x_pixels=50, y_pixels=25, + xlim=bounds, ylim=bounds, + dens_weight=False, + normalize=False, hmin=False) + img_vec = interpolate_3d_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=25, + xlim=bounds, ylim=bounds, + dens_weight=False, + normalize=False, hmin=False) for y in range(25): for x in range(50): - assert image[y][x] == approx( - w[0] * sdf['A'][0] * column_func(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2) / sdf['h'][0], 2)) - assert image_vec[0][y][x] == approx( - w[0] * sdf['A'][0] * column_func(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2) / sdf['h'][0], 2)) - assert image_vec[1][y][x] == approx( - w[0] * sdf['B'][0] * column_func(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2) / sdf['h'][0], 2)) + r = np.sqrt(real_x[x]**2 + real_y[y]**2) + w = column_func(r / sdf['h'][0], 2) + assert img[y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[0][y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[1][y][x] == approx(weight[0] * sdf['B'][0] * w) # Weight for 3D cross-section interpolation. - w = sdf['m'] / (sdf['rho'] * sdf['h'] ** 3) - - image = interpolate_3d_cross(sdf, 'A', z_slice=0, x_pixels=50, y_pixels=25, - xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) - image_vec = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', 0, x_pixels=50, y_pixels=25, - xlim=(-kernel.get_radius(), kernel.get_radius()), - ylim=(-kernel.get_radius(), kernel.get_radius()), normalize=False, hmin=False) + weight = sdf['m'] / (sdf['rho'] * sdf['h'] ** 3) + + img = interpolate_3d_cross(sdf, 'A', + x_pixels=50, y_pixels=25, + xlim=bounds, ylim=bounds, + z_slice=0, + normalize=False, hmin=False) + img_vec = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=25, + xlim=bounds, ylim=bounds, + z_slice=0, + normalize=False, hmin=False) for y in range(25): for x in range(50): - assert image[y][x] == approx( - w[0] * sdf['A'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2 + 0.5 ** 2) / sdf['h'][0], 3)) - assert image_vec[0][y][x] == approx( - w[0] * sdf['A'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2 + 0.5 ** 2) / sdf['h'][0], 3)) - assert image_vec[1][y][x] == approx( - w[0] * sdf['B'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2 + 0.5 ** 2) / sdf['h'][0], 3)) - - real_z = -kernel.get_radius() + 0.5 + (np.arange(0, 15) + 0.5) * (2 * kernel.get_radius() / 15) - limit = -kernel.get_radius(), kernel.get_radius() - - image = interpolate_3d_grid(sdf, 'A', x_pixels=50, y_pixels=25, z_pixels=15, xlim=limit, ylim=limit, zlim=limit, normalize=False, hmin=False) + r = np.sqrt(real_x[x]**2 + real_y[y]**2 + 0.5**2) + w = kernel.w(r / sdf['h'][0], 3) + assert img[y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[0][y][x] == approx(weight[0] * sdf['A'][0] * w) + assert img_vec[1][y][x] == approx(weight[0] * sdf['B'][0] * w) + + real_z = -kernel_rad + 0.5 + (np.arange(0, 15) + 0.5) * 2 * kernel_rad / 15 + + img = interpolate_3d_grid(sdf, 'A', + x_pixels=50, y_pixels=25, z_pixels=15, + xlim=bounds, ylim=bounds, zlim=bounds, + normalize=False, hmin=False) for z in range(15): for y in range(25): for x in range(50): - assert image[z][y][x] == approx( - w[0] * sdf['A'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2 + real_z[z] ** 2) / sdf['h'][0], 3)) + r = np.sqrt(real_x[x]**2 + real_y[y]**2 + real_z[z]**2) + w = kernel.w(r / sdf['h'][0], 3) + assert img[z][y][x] == approx(weight[0] * sdf['A'][0] * w) @mark.parametrize("backend", backends) def test_oob_particles(backend): """ - Particles outside the bounds of an interpolation operation should be included in the result. + Particles outside the bounds of an interpolation operation should be + included in the result. """ kernel = CubicSplineKernel() - df_2 = pd.DataFrame({'x': [0], 'y': [0], 'A': [4], 'B': [3], 'h': [1.9], 'rho': [0.4], 'm': [0.03]}) - sdf_2 = SarracenDataFrame(df_2, params=dict()) + data_2 = {'x': [0], 'y': [0], 'A': [4], 'B': [3], + 'h': [1.9], 'rho': [0.4], 'm': [0.03]} + sdf_2 = SarracenDataFrame(data_2, params=dict()) sdf_2.kernel = kernel sdf_2.backend = backend - df_3 = pd.DataFrame({'x': [0], 'y': [0], 'z': [0.5], 'A': [4], 'B': [3], 'C': [2], 'h': [1.9], 'rho': [0.4], - 'm': [0.03]}) - sdf_3 = SarracenDataFrame(df_3, params=dict()) + data_3 = {'x': [0], 'y': [0], 'z': [0.5], + 'A': [4], 'B': [3], 'C': [2], + 'h': [1.9], 'rho': [0.4], 'm': [0.03]} + sdf_3 = SarracenDataFrame(data_3, params=dict()) sdf_3.kernel = kernel sdf_3.backend = backend # Weight for 2D interpolation, and 3D column interpolation. - w = sdf_2['m'] / (sdf_2['rho'] * sdf_2['h'] ** 2) + weight = sdf_2['m'] / (sdf_2['rho'] * sdf_2['h'] ** 2) # A mapping of pixel indices to x / y values in particle space. real_x = 1 + (np.arange(0, 25) + 0.5) * (1 / 25) real_y = 1 + (np.arange(0, 25) + 0.5) * (1 / 25) - image = interpolate_2d(sdf_2, 'A', x_pixels=25, y_pixels=25, xlim=(1, 2), ylim=(1, 2), normalize=False, hmin=False) - image_vec = interpolate_2d_vec(sdf_2, 'A', 'B', x_pixels=25, y_pixels=25, xlim=(1, 2), ylim=(1, 2), normalize=False, hmin=False) - line = interpolate_2d_line(sdf_2, 'A', pixels=25, xlim=(1, 2), ylim=(1, 2), normalize=False, hmin=False) + img = interpolate_2d(sdf_2, 'A', + x_pixels=25, y_pixels=25, + xlim=(1, 2), ylim=(1, 2), + normalize=False, hmin=False) + img_vec = interpolate_2d_vec(sdf_2, 'A', 'B', + x_pixels=25, y_pixels=25, + xlim=(1, 2), ylim=(1, 2), + normalize=False, hmin=False) + line = interpolate_2d_line(sdf_2, 'A', + pixels=25, + xlim=(1, 2), ylim=(1, 2), + normalize=False, hmin=False) for y in range(25): - assert line[y] == approx( - w[0] * sdf_2['A'][0] * kernel.w(np.sqrt(real_x[y] ** 2 + real_y[y] ** 2) / sdf_2['h'][0], 2)) + r = np.sqrt(real_x[y]**2 + real_y[y]**2) + w = kernel.w(r / sdf_2['h'][0], 2) + assert line[y] == approx(weight[0] * sdf_2['A'][0] * w) for x in range(25): - assert image[y][x] == approx( - w[0] * sdf_2['A'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2) / sdf_2['h'][0], 2)) - assert image_vec[0][y][x] == approx( - w[0] * sdf_2['A'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2) / sdf_2['h'][0], 2)) - assert image_vec[1][y][x] == approx( - w[0] * sdf_2['B'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2) / sdf_2['h'][0], 2)) + r = np.sqrt(real_x[x]**2 + real_y[y]**2) + w = kernel.w(r / sdf_2['h'][0], 2) + assert img[y][x] == approx(weight[0] * sdf_2['A'][0] * w) + assert img_vec[0][y][x] == approx(weight[0] * sdf_2['A'][0] * w) + assert img_vec[1][y][x] == approx(weight[0] * sdf_2['B'][0] * w) column_func = kernel.get_column_kernel_func(1000) - image = interpolate_3d_proj(sdf_3, 'A', x_pixels=25, y_pixels=25, xlim=(1, 2), ylim=(1, 2), - dens_weight=False, normalize=False, hmin=False) - image_vec = interpolate_3d_vec(sdf_3, 'A', 'B', 'C', x_pixels=25, y_pixels=25, xlim=(1, 2), ylim=(1, 2), - dens_weight=False, normalize=False, hmin=False) + img = interpolate_3d_proj(sdf_3, 'A', + x_pixels=25, y_pixels=25, + xlim=(1, 2), ylim=(1, 2), + dens_weight=False, + normalize=False, hmin=False) + img_vec = interpolate_3d_vec(sdf_3, 'A', 'B', 'C', + x_pixels=25, y_pixels=25, + xlim=(1, 2), ylim=(1, 2), + dens_weight=False, + normalize=False, hmin=False) for y in range(25): for x in range(25): - assert image[y][x] == approx( - w[0] * sdf_3['A'][0] * column_func(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2) / sdf_3['h'][0], 2)) - assert image_vec[0][y][x] == approx( - w[0] * sdf_3['A'][0] * column_func(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2) / sdf_3['h'][0], 2)) - assert image_vec[1][y][x] == approx( - w[0] * sdf_3['B'][0] * column_func(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2) / sdf_3['h'][0], 2)) + r = np.sqrt(real_x[x]**2 + real_y[y]**2) + w = column_func(r / sdf_3['h'][0], 2) + assert img[y][x] == approx(weight[0] * sdf_3['A'][0] * w) + assert img_vec[0][y][x] == approx(weight[0] * sdf_3['A'][0] * w) + assert img_vec[1][y][x] == approx(weight[0] * sdf_3['B'][0] * w) # Weight for 3D cross-sections. - w = sdf_3['m'] / (sdf_3['rho'] * sdf_3['h'] ** 3) + weight = sdf_3['m'] / (sdf_3['rho'] * sdf_3['h'] ** 3) - image = interpolate_3d_cross(sdf_3, 'A', z_slice=0, x_pixels=25, y_pixels=25, xlim=(1, 2), ylim=(1, 2), normalize=False, hmin=False) - image_vec = interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', 0, x_pixels=25, y_pixels=25, xlim=(1, 2), ylim=(1, 2), normalize=False, hmin=False) + img = interpolate_3d_cross(sdf_3, 'A', + x_pixels=25, y_pixels=25, + xlim=(1, 2), ylim=(1, 2), + z_slice=0, + normalize=False, hmin=False) + img_vec = interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', + x_pixels=25, y_pixels=25, + xlim=(1, 2), ylim=(1, 2), + z_slice=0, + normalize=False, hmin=False) for y in range(25): for x in range(25): - assert image[y][x] == approx( - w[0] * sdf_3['A'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2 + 0.5 ** 2) / sdf_3['h'][0], 3)) - assert image_vec[0][y][x] == approx( - w[0] * sdf_3['A'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2 + 0.5 ** 2) / sdf_3['h'][0], 3)) - assert image_vec[1][y][x] == approx( - w[0] * sdf_3['B'][0] * kernel.w(np.sqrt(real_x[x] ** 2 + real_y[y] ** 2 + 0.5 ** 2) / sdf_3['h'][0], 3)) + r = np.sqrt(real_x[x]**2 + real_y[y]**2 + 0.5**2) + w = kernel.w(r / sdf_3['h'][0], 3) + assert img[y][x] == approx(weight[0] * sdf_3['A'][0] * w) + assert img_vec[0][y][x] == approx(weight[0] * sdf_3['A'][0] * w) + assert img_vec[1][y][x] == approx(weight[0] * sdf_3['B'][0] * w) real_z = 0.5 + (np.arange(0, 25) + 0.5) * (1 / 25) - image = interpolate_3d_grid(sdf_3, 'A', x_pixels=25, y_pixels=25, z_pixels=25, xlim=(1, 2), ylim=(1, 2), - zlim=(1, 2), normalize=False, hmin=False) + img = interpolate_3d_grid(sdf_3, 'A', + x_pixels=25, y_pixels=25, z_pixels=25, + xlim=(1, 2), ylim=(1, 2), zlim=(1, 2), + normalize=False, hmin=False) for z in range(25): for y in range(25): for x in range(25): - assert image[z][y][x] == approx( - w[0] * sdf_3['A'][0] * kernel.w(np.sqrt(real_x[x]**2 + real_y[y]**2 + real_z[z]**2) / sdf_3['h'][0], 3)) + r = np.sqrt(real_x[x]**2 + real_y[y]**2 + real_z[z]**2) + w = kernel.w(r / sdf_3['h'][0], 3) + assert img[z][y][x] == approx(weight[0] * sdf_3['A'][0] * w) @mark.parametrize("backend", backends) @@ -836,54 +1166,83 @@ def test_invalid_region(backend): """ Interpolation with invalid bounds should raise a ValueError. """ - df_2 = pd.DataFrame({'x': [0], 'y': [0], 'A': [4], 'B': [3], 'C': [2.5], 'h': [0.9], 'rho': [0.4], 'm': [0.03]}) - sdf_2 = SarracenDataFrame(df_2, params=dict()) - df_3 = pd.DataFrame({'x': [0], 'y': [0], 'z': [-0.5], 'A': [4], 'B': [3], 'C': [2.5], 'h': [0.9], 'rho': [0.4], - 'm': [0.03]}) - sdf_3 = SarracenDataFrame(df_3, params=dict()) + data_2 = {'x': [0], 'y': [0], 'A': [4], 'B': [3], 'C': [2.5], + 'h': [0.9], 'rho': [0.4], 'm': [0.03]} + sdf_2 = SarracenDataFrame(data_2, params=dict()) + data_3 = {'x': [0], 'y': [0], 'z': [-0.5], + 'A': [4], 'B': [3], 'C': [2.5], + 'h': [0.9], 'rho': [0.4], 'm': [0.03]} + sdf_3 = SarracenDataFrame(data_3, params=dict()) sdf_2.backend = backend sdf_3.backend = backend - for b in [(-3, 3, 3, -3, 20, 20), (3, 3, 3, 3, 20, 20), (-3, 3, -3, 3, 0, 0)]: + for b in [(-3, 3, 3, -3, 20, 20), + (3, 3, 3, 3, 20, 20), + (-3, 3, -3, 3, 0, 0)]: with raises(ValueError): - interpolate_2d(sdf_2, 'A', xlim=(b[0], b[1]), ylim=(b[2], b[3]), x_pixels=b[4], y_pixels=b[5], normalize=False, hmin=False) + interpolate_2d(sdf_2, 'A', + x_pixels=b[4], y_pixels=b[5], + xlim=(b[0], b[1]), ylim=(b[2], b[3]), + normalize=False, hmin=False) with raises(ValueError): - interpolate_2d_vec(sdf_2, 'A', 'B', 'C', xlim=(b[0], b[1]), ylim=(b[2], b[3]), x_pixels=b[4], - y_pixels=b[5], normalize=False, hmin=False) + interpolate_2d_vec(sdf_2, 'A', 'B', 'C', + x_pixels=b[4], y_pixels=b[5], + xlim=(b[0], b[1]), ylim=(b[2], b[3]), + normalize=False, hmin=False) # the first case will not fail for this type of interpolation. if not b[0] == -3 and not b[3] == -3: with raises(ValueError): - interpolate_2d_line(sdf_2, 'A', xlim=(b[0], b[1]), ylim=(b[2], b[3]), pixels=b[4], normalize=False, hmin=False) + interpolate_2d_line(sdf_2, 'A', + pixels=b[4], + xlim=(b[0], b[1]), ylim=(b[2], b[3]), + normalize=False, hmin=False) with raises(ValueError): - interpolate_3d_proj(sdf_3, 'A', xlim=(b[0], b[1]), ylim=(b[2], b[3]), x_pixels=b[4], y_pixels=b[5], normalize=False, hmin=False) + interpolate_3d_proj(sdf_3, 'A', + x_pixels=b[4], y_pixels=b[5], + xlim=(b[0], b[1]), ylim=(b[2], b[3]), + normalize=False, hmin=False) with raises(ValueError): - interpolate_3d_vec(sdf_3, 'A', 'B', 'C', xlim=(b[0], b[1]), ylim=(b[2], b[3]), x_pixels=b[4], - y_pixels=b[5], normalize=False, hmin=False) + interpolate_3d_vec(sdf_3, 'A', 'B', 'C', + xlim=(b[0], b[1]), ylim=(b[2], b[3]), + x_pixels=b[4], y_pixels=b[5], + normalize=False, hmin=False) with raises(ValueError): - interpolate_3d_cross(sdf_3, 'A', z_slice=0, x_pixels=b[4], y_pixels=b[5], xlim=(b[0], b[1]), - ylim=(b[2], b[3]), normalize=False, hmin=False) + interpolate_3d_cross(sdf_3, 'A', + x_pixels=b[4], y_pixels=b[5], + xlim=(b[0], b[1]), ylim=(b[2], b[3]), + z_slice=0, + normalize=False, hmin=False) with raises(ValueError): - interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', 0, xlim=(b[0], b[1]), ylim=(b[2], b[3]), x_pixels=b[4], - y_pixels=b[5], normalize=False, hmin=False) + interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', + xlim=(b[0], b[1]), ylim=(b[2], b[3]), + x_pixels=b[4], y_pixels=b[5], + z_slice=0, + normalize=False, hmin=False) with raises(ValueError): - interpolate_3d_grid(sdf_3, 'A', xlim=(b[0], b[1]), ylim=(b[2], b[3]), zlim=(-3, 3), x_pixels=b[4], - y_pixels=b[5], z_pixels=10, normalize=False, hmin=False) + interpolate_3d_grid(sdf_3, 'A', + x_pixels=b[4], y_pixels=b[5], z_pixels=10, + xlim=(b[0], b[1]), + ylim=(b[2], b[3]), + zlim=(-3, 3), + normalize=False, hmin=False) @mark.parametrize("backend", backends) def test_required_columns(backend): """ - Interpolation without one of the required columns will result in a KeyError. + Interpolation without one of the required columns results in a KeyError. """ - # This test is currently expected to fail on both backends, since dropping a column from a SarracenDataFrame - # returns a DataFrame. - df_2 = pd.DataFrame({'x': [-1, 1], 'y': [1, -1], 'A': [2, 1.5], 'B': [5, 4], 'C': [3, 2], 'h': [1.1, 1.3], - 'rho': [0.55, 0.45], 'm': [0.04, 0.05]}) - sdf_2 = SarracenDataFrame(df_2, params=dict()) - df_3 = pd.DataFrame({'x': [-1, 1], 'y': [1, -1], 'z': [1, -1], 'A': [2, 1.5], 'B': [5, 4], 'C': [3, 2], - 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]}) - sdf_3 = SarracenDataFrame(df_3, params=dict()) + # This test is currently expected to fail on both backends, since dropping + # a column from a SarracenDataFrame returns a DataFrame. + data_2 = {'x': [-1, 1], 'y': [1, -1], + 'A': [2, 1.5], 'B': [5, 4], 'C': [3, 2], + 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]} + sdf_2 = SarracenDataFrame(data_2, params=dict()) + data_3 = {'x': [-1, 1], 'y': [1, -1], 'z': [1, -1], + 'A': [2, 1.5], 'B': [5, 4], 'C': [3, 2], + 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]} + sdf_3 = SarracenDataFrame(data_3, params=dict()) sdf_2.backend = backend sdf_3.backend = backend @@ -891,48 +1250,60 @@ def test_required_columns(backend): for column in ['m', 'h']: sdf_dropped = sdf_2.drop(column, axis=1) with raises(KeyError): - interpolate_2d(sdf_dropped, 'A', normalize=False, hmin=False) + interpolate_2d(sdf_dropped, 'A') with raises(KeyError): - interpolate_2d_line(sdf_dropped, 'A', normalize=False, hmin=False) + interpolate_2d_line(sdf_dropped, 'A') with raises(KeyError): - interpolate_2d_vec(sdf_dropped, 'A', 'B', normalize=False, hmin=False) + interpolate_2d_vec(sdf_dropped, 'A', 'B') sdf_dropped = sdf_3.drop(column, axis=1) with raises(KeyError): - interpolate_3d_proj(sdf_dropped, 'A', normalize=False, hmin=False) + interpolate_3d_proj(sdf_dropped, 'A') with raises(KeyError): - interpolate_3d_cross(sdf_dropped, 'A', normalize=False, hmin=False) + interpolate_3d_cross(sdf_dropped, 'A') with raises(KeyError): - interpolate_3d_vec(sdf_dropped, 'A', 'B', 'C', normalize=False, hmin=False) + interpolate_3d_vec(sdf_dropped, 'A', 'B', 'C') with raises(KeyError): - interpolate_3d_cross_vec(sdf_dropped, 'A', 'B', 'C', normalize=False, hmin=False) + interpolate_3d_cross_vec(sdf_dropped, 'A', 'B', 'C') with raises(KeyError): - interpolate_3d_grid(sdf_dropped, 'A', normalize=False, hmin=False) + interpolate_3d_grid(sdf_dropped, 'A') @mark.parametrize("backend", backends) def test_exact_interpolation(backend): """ - Exact interpolation over the entire effective area of a kernel should return 1 over the particle bounds, multiplied by the weight. + Exact interpolation over the entire effective area of a kernel should + return 1 over the particle bounds, multiplied by the weight. """ - df_2 = pd.DataFrame({'x': [0], 'y': [0], 'A': [2], 'h': [1.1], 'rho': [0.55], 'm': [0.04]}) - sdf_2 = SarracenDataFrame(df_2, params=dict()) + data_2 = {'x': [0], 'y': [0], 'A': [2], + 'h': [1.1], 'rho': [0.55], 'm': [0.04]} + sdf_2 = SarracenDataFrame(data_2, params=dict()) sdf_2.backend = backend - df_3 = pd.DataFrame({'x': [0], 'y': [0], 'z': [1], 'A': [2], 'h': [1.1], 'rho': [0.55], 'm': [0.04]}) - sdf_3 = SarracenDataFrame(df_3, params=dict()) + data_3 = {'x': [0], 'y': [0], 'z': [1], 'A': [2], + 'h': [1.1], 'rho': [0.55], 'm': [0.04]} + sdf_3 = SarracenDataFrame(data_3, params=dict()) sdf_3.backend = backend kernel = CubicSplineKernel() w = sdf_2['m'] * sdf_2['A'] / (sdf_2['rho'] * sdf_2['h'] ** 2) bound = kernel.get_radius() * sdf_2['h'][0] - image = interpolate_2d(sdf_2, 'A', xlim=(-bound, bound), ylim=(-bound, bound), x_pixels=1, exact=True, normalize=False, hmin=False) + img = interpolate_2d(sdf_2, 'A', + x_pixels=1, + xlim=(-bound, bound), ylim=(-bound, bound), + exact=True, + normalize=False, hmin=False) - assert image.sum() == approx(w[0] * sdf_2['h'][0] ** 2 / (4 * bound ** 2)) + assert img.sum() == approx(w[0] * sdf_2['h'][0] ** 2 / (4 * bound ** 2)) - image = interpolate_3d_proj(sdf_3, 'A', xlim=(-bound, bound), ylim=(-bound, bound), x_pixels=1, exact=True, dens_weight=False, normalize=False, hmin=False) + img = interpolate_3d_proj(sdf_3, 'A', + x_pixels=1, + xlim=(-bound, bound), ylim=(-bound, bound), + exact=True, + dens_weight=False, + normalize=False, hmin=False) - assert image.sum() == approx(w[0] * sdf_2['h'][0] ** 2 / (4 * bound ** 2)) + assert img.sum() == approx(w[0] * sdf_2['h'][0] ** 2 / (4 * bound ** 2)) @mark.parametrize("backend", backends) @@ -940,16 +1311,22 @@ def test_density_weighted(backend): """ Enabling density weighted interpolation will change the resultant image """ - df_2 = pd.DataFrame({'x': [0], 'y': [0], 'A': [2], 'B': [3], 'h': [0.5], 'rho': [0.25], 'm': [0.75]}) - sdf_2 = SarracenDataFrame(df_2, params=dict()) - df_3 = pd.DataFrame({'x': [0], 'y': [0], 'z': [0], 'A': [2], 'B': [3], 'C': [4], 'h': [0.5], 'rho': [0.25], - 'm': [0.75]}) - sdf_3 = SarracenDataFrame(df_3, params=dict()) + data_2 = {'x': [0], 'y': [0], 'A': [2], 'B': [3], + 'h': [0.5], 'rho': [0.25], 'm': [0.75]} + sdf_2 = SarracenDataFrame(data_2, params=dict()) + data_3 = {'x': [0], 'y': [0], 'z': [0], + 'A': [2], 'B': [3], 'C': [4], + 'h': [0.5], 'rho': [0.25], 'm': [0.75]} + sdf_3 = SarracenDataFrame(data_3, params=dict()) kernel = CubicSplineKernel() sdf_2.backend = backend sdf_3.backend = backend + w_2 = kernel.w(0, 2) + w_3 = kernel.w(0, 3) + column_w = kernel.get_column_kernel()[0] + for dens_weight in [True, False]: if dens_weight: weight2d = sdf_2['m'][0] / (sdf_2['h'][0] ** 2) @@ -958,49 +1335,90 @@ def test_density_weighted(backend): weight2d = sdf_2['m'][0] / (sdf_2['rho'][0] * sdf_2['h'][0] ** 2) weight3d = sdf_2['m'][0] / (sdf_2['rho'][0] * sdf_2['h'][0] ** 3) - image = interpolate_2d(sdf_2, 'A', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), dens_weight=dens_weight, normalize=False, hmin=False) - assert image == weight2d * sdf_2['A'][0] * kernel.w(0, 2) - image = interpolate_2d_vec(sdf_2, 'A', 'B', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), dens_weight=dens_weight, normalize=False, hmin=False) - assert image[0] == weight2d * sdf_2['A'][0] * kernel.w(0, 2) - assert image[1] == weight2d * sdf_2['B'][0] * kernel.w(0, 2) - - image = interpolate_2d_line(sdf_2, 'A', pixels=1, xlim=(-1, 1), ylim=(-1, 1), dens_weight=dens_weight, normalize=False, hmin=False) - assert image[0] == weight2d * sdf_2['A'][0] * kernel.w(0, 2) - - image = interpolate_3d_proj(sdf_3, 'A', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), dens_weight=dens_weight, normalize=False, hmin=False) - assert image[0] == weight2d * sdf_2['A'][0] * kernel.get_column_kernel()[0] - image = interpolate_3d_vec(sdf_3, 'A', 'B', 'C', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), dens_weight=dens_weight, normalize=False, hmin=False) - assert image[0] == weight2d * sdf_2['A'][0] * kernel.get_column_kernel()[0] - assert image[1] == weight2d * sdf_2['B'][0] * kernel.get_column_kernel()[0] - - image = interpolate_3d_cross(sdf_3, 'A', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), dens_weight=dens_weight, normalize=False, hmin=False) - assert image[0] == weight3d * sdf_2['A'][0] * kernel.w(0, 3) - image = interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), dens_weight=dens_weight, normalize=False, hmin=False) - assert image[0] == weight3d * sdf_2['A'][0] * kernel.w(0, 3) - assert image[1] == weight3d * sdf_2['B'][0] * kernel.w(0, 3) - - image = interpolate_3d_grid(sdf_3, 'A', x_pixels=1, y_pixels=1, z_pixels=1, xlim=(-1, 1), ylim=(-1, 1), - zlim=(-1, 1), dens_weight=dens_weight, normalize=False, hmin=False) - assert image[0] == weight3d * sdf_2['A'][0] * kernel.w(0, 3) - - image = interpolate_3d_line(sdf_3, 'A', pixels=1, xlim=(-1, 1), ylim=(-1, 1), dens_weight=dens_weight, normalize=False, hmin=False) - assert image[0] == weight3d * sdf_2['A'][0] * kernel.w(0, 3) + img = interpolate_2d(sdf_2, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=dens_weight, + normalize=False, hmin=False) + assert img == weight2d * sdf_2['A'][0] * w_2 + img = interpolate_2d_vec(sdf_2, 'A', 'B', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=dens_weight, + normalize=False, hmin=False) + assert img[0] == weight2d * sdf_2['A'][0] * w_2 + assert img[1] == weight2d * sdf_2['B'][0] * w_2 + + img = interpolate_2d_line(sdf_2, 'A', + pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=dens_weight, + normalize=False, hmin=False) + assert img[0] == weight2d * sdf_2['A'][0] * w_2 + + img = interpolate_3d_proj(sdf_3, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=dens_weight, + normalize=False, hmin=False) + assert img[0] == weight2d * sdf_2['A'][0] * column_w + img = interpolate_3d_vec(sdf_3, 'A', 'B', 'C', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=dens_weight, + normalize=False, hmin=False) + assert img[0] == weight2d * sdf_2['A'][0] * column_w + assert img[1] == weight2d * sdf_2['B'][0] * column_w + + img = interpolate_3d_cross(sdf_3, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=dens_weight, + normalize=False, hmin=False) + assert img[0] == weight3d * sdf_2['A'][0] * w_3 + img = interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=dens_weight, + normalize=False, hmin=False) + assert img[0] == weight3d * sdf_2['A'][0] * w_3 + assert img[1] == weight3d * sdf_2['B'][0] * w_3 + + img = interpolate_3d_grid(sdf_3, 'A', + x_pixels=1, y_pixels=1, z_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), zlim=(-1, 1), + dens_weight=dens_weight, + normalize=False, hmin=False) + assert img[0] == weight3d * sdf_2['A'][0] * w_3 + + img = interpolate_3d_line(sdf_3, 'A', + pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=dens_weight, + normalize=False, hmin=False) + assert img[0] == weight3d * sdf_2['A'][0] * w_3 @mark.parametrize("backend", backends) def test_normalize_interpolation(backend): - sdf_2 = SarracenDataFrame({'x': [0], 'y': [0], 'A': [2], 'B': [3], 'h': [0.5], 'rho': [0.25], 'm': [0.75]}, - params=dict()) - sdf_3 = SarracenDataFrame({'x': [0], 'y': [0], 'z': [0], 'A': [2], 'B': [3], 'C': [4], 'h': [0.5], 'rho': [0.25], - 'm': [0.75]}, params=dict()) + data_2 = {'x': [0], 'y': [0], + 'A': [2], 'B': [3], + 'h': [0.5], 'rho': [0.25], 'm': [0.75]} + sdf_2 = SarracenDataFrame(data_2, params=dict()) + + data_3 = {'x': [0], 'y': [0], 'z': [0], + 'A': [2], 'B': [3], 'C': [4], + 'h': [0.5], 'rho': [0.25], 'm': [0.75]} + sdf_3 = SarracenDataFrame(data_3, params=dict()) kernel = CubicSplineKernel() sdf_2.backend = backend sdf_3.backend = backend - weight2d = sdf_2['m'][0] / (sdf_2['rho'][0] * sdf_2['h'][0] ** 2) * kernel.w(0, 2) - weight3d = sdf_2['m'][0] / (sdf_2['rho'][0] * sdf_2['h'][0] ** 3) * kernel.w(0, 3) - weight3d_column = sdf_2['m'][0] / (sdf_2['rho'][0] * sdf_2['h'][0] ** 2) * kernel.get_column_kernel()[0] + weight = sdf_2['m'][0] / (sdf_2['rho'][0] * sdf_2['h'][0] ** 2) + weight2d = weight * kernel.w(0, 2) + weight3d = weight / sdf_2['h'][0] * kernel.w(0, 3) + weight3d_column = weight * kernel.get_column_kernel()[0] for normalize in [True, False]: @@ -1008,61 +1426,99 @@ def test_normalize_interpolation(backend): norm3d = 1.0 norm3d_column = 1.0 if normalize: - norm2d = sdf_2['m'][0] / (sdf_2['rho'][0] * sdf_2['h'][0] ** 2) * kernel.w(0, 2) - norm3d = sdf_2['m'][0] / (sdf_2['rho'][0] * sdf_2['h'][0] ** 3) * kernel.w(0, 3) - norm3d_column = sdf_2['m'][0] / (sdf_2['rho'][0] * sdf_2['h'][0] ** 2) * kernel.get_column_kernel()[0] - - image = interpolate_2d(sdf_2, 'A', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), - dens_weight=False, normalize=normalize) - assert image == weight2d * sdf_2['A'][0] / norm2d - - image = interpolate_2d_vec(sdf_2, 'A', 'B', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), - dens_weight=False, normalize=normalize) - assert image[0] == weight2d * sdf_2['A'][0] / norm2d - assert image[1] == weight2d * sdf_2['B'][0] / norm2d - - image = interpolate_2d_line(sdf_2, 'A', pixels=1, xlim=(-1, 1), ylim=(-1, 1), - dens_weight=False, normalize=normalize) - assert image[0] == weight2d * sdf_2['A'][0] / norm2d - - image = interpolate_3d_proj(sdf_3, 'A', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), - dens_weight=False, normalize=normalize) - assert image[0] == weight3d_column * sdf_2['A'][0] / norm3d_column - image = interpolate_3d_vec(sdf_3, 'A', 'B', 'C', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), - dens_weight=False, normalize=normalize) - assert image[0] == weight3d_column * sdf_2['A'][0] / norm3d_column - assert image[1] == weight3d_column * sdf_2['B'][0] / norm3d_column - - image = interpolate_3d_cross(sdf_3, 'A', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), - dens_weight=False, normalize=normalize) - assert image[0] == weight3d * sdf_2['A'][0] / norm3d - image = interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', x_pixels=1, y_pixels=1, xlim=(-1, 1), ylim=(-1, 1), - dens_weight=False, normalize=normalize) - assert image[0] == weight3d * sdf_2['A'][0] / norm3d - assert image[1] == weight3d * sdf_2['B'][0] / norm3d - - image = interpolate_3d_grid(sdf_3, 'A', x_pixels=1, y_pixels=1, z_pixels=1, xlim=(-1, 1), ylim=(-1, 1), - zlim=(-1, 1), dens_weight=False, normalize=normalize) - assert image[0] == weight3d * sdf_2['A'][0] / norm3d - - image = interpolate_3d_line(sdf_3, 'A', pixels=1, xlim=(-1, 1), ylim=(-1, 1), - dens_weight=False, normalize=normalize) - assert image[0] == weight3d * sdf_2['A'][0] / norm3d + weight = sdf_2['m'][0] / (sdf_2['rho'][0] * sdf_2['h'][0] ** 2) + norm2d = weight * kernel.w(0, 2) + norm3d = weight / sdf_2['h'][0] * kernel.w(0, 3) + norm3d_column = weight * kernel.get_column_kernel()[0] + + img = interpolate_2d(sdf_2, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=False, + normalize=normalize) + assert img == weight2d * sdf_2['A'][0] / norm2d + + img = interpolate_2d_vec(sdf_2, 'A', 'B', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=False, + normalize=normalize) + assert img[0] == weight2d * sdf_2['A'][0] / norm2d + assert img[1] == weight2d * sdf_2['B'][0] / norm2d + + img = interpolate_2d_line(sdf_2, 'A', + pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=False, + normalize=normalize) + assert img[0] == weight2d * sdf_2['A'][0] / norm2d + + img = interpolate_3d_proj(sdf_3, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=False, + normalize=normalize) + assert img[0] == weight3d_column * sdf_2['A'][0] / norm3d_column + img = interpolate_3d_vec(sdf_3, 'A', 'B', 'C', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=False, + normalize=normalize) + assert img[0] == weight3d_column * sdf_2['A'][0] / norm3d_column + assert img[1] == weight3d_column * sdf_2['B'][0] / norm3d_column + + img = interpolate_3d_cross(sdf_3, 'A', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=False, + normalize=normalize) + assert img[0] == weight3d * sdf_2['A'][0] / norm3d + img = interpolate_3d_cross_vec(sdf_3, 'A', 'B', 'C', + x_pixels=1, y_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=False, + normalize=normalize) + assert img[0] == weight3d * sdf_2['A'][0] / norm3d + assert img[1] == weight3d * sdf_2['B'][0] / norm3d + + img = interpolate_3d_grid(sdf_3, 'A', + x_pixels=1, y_pixels=1, z_pixels=1, + xlim=(-1, 1), ylim=(-1, 1), zlim=(-1, 1), + dens_weight=False, + normalize=normalize) + assert img[0] == weight3d * sdf_2['A'][0] / norm3d + + img = interpolate_3d_line(sdf_3, 'A', + pixels=1, + xlim=(-1, 1), ylim=(-1, 1), + dens_weight=False, + normalize=normalize) + assert img[0] == weight3d * sdf_2['A'][0] / norm3d @mark.parametrize("backend", backends) def test_exact_interpolation_culling(backend): - sdf_2 = SarracenDataFrame({'x': [0], 'y': [0], 'A': [2], 'h': [0.4], 'rho': [0.1], 'm': [1]}, params=dict()) + data_2 = {'x': [0], 'y': [0], 'A': [2], + 'h': [0.4], 'rho': [0.1], 'm': [1]} + sdf_2 = SarracenDataFrame(data_2, params=dict()) sdf_2.backend = backend - sdf_3 = SarracenDataFrame({'x': [0], 'y': [0], 'z': [0], 'A': [2], 'h': [0.4], 'rho': [0.1], 'm': [1]}, - params=dict()) + + data_3 = {'x': [0], 'y': [0], 'z': [0], 'A': [2], + 'h': [0.4], 'rho': [0.1], 'm': [1]} + sdf_3 = SarracenDataFrame(data_3, params=dict()) sdf_3.backend = backend - image_2 = sdf_2.sph_interpolate('A', xlim=(-1, 1), ylim=(-1, 1), x_pixels=5, exact=True) - image_3 = interpolate_3d_proj(sdf_3, 'A', xlim=(-1, 1), ylim=(-1, 1), x_pixels=5, exact=True) + img_2 = sdf_2.sph_interpolate('A', + x_pixels=5, + xlim=(-1, 1), ylim=(-1, 1), + exact=True) + img_3 = interpolate_3d_proj(sdf_3, 'A', + x_pixels=5, + xlim=(-1, 1), ylim=(-1, 1), + exact=True) - assert image_2[2, 4] != 0 - assert image_3[2, 4] != 0 + assert img_2[2, 4] != 0 + assert img_3[2, 4] != 0 @mark.parametrize("backend", backends) @@ -1073,25 +1529,29 @@ def test_minimum_smoothing_length_2d(backend): xlim, ylim = (-1, 1), (-1, 1) hmin = 0.5 * (xlim[1] - xlim[0]) / pixels - sdf_a = SarracenDataFrame(data={'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], - 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], - 'h': [hmin, hmin, 0.3, 0.25, hmin, hmin, 0.2, hmin], - 'm': [0.56] * 8}, - params={'hfact': 1.2}) + data_a = {'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], + 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], + 'h': [hmin, hmin, 0.3, 0.25, hmin, hmin, 0.2, hmin], + 'm': [0.56] * 8} + sdf_a = SarracenDataFrame(data_a, params={'hfact': 1.2}) - sdf_b = SarracenDataFrame(data={'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], - 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], - 'h': [0.01, 0.01, 0.3, 0.25, 0.01, 0.01, 0.2, 0.01], - 'm': [0.56] * 8}, - params={'hfact': 1.2}) + data_b = {'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], + 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], + 'h': [0.01, 0.01, 0.3, 0.25, 0.01, 0.01, 0.2, 0.01], + 'm': [0.56] * 8} + sdf_b = SarracenDataFrame(data_b, params={'hfact': 1.2}) sdf_a.backend = backend sdf_b.backend = backend for interpolate in [interpolate_2d]: - grid = interpolate(data=sdf_a, target='rho', xlim=xlim, ylim=ylim, x_pixels=pixels, y_pixels=pixels, + grid = interpolate(sdf_a, 'rho', + x_pixels=pixels, y_pixels=pixels, + xlim=xlim, ylim=ylim, normalize=False, hmin=False) - grid_hmin = interpolate(data=sdf_b, target='rho', xlim=xlim, ylim=ylim, x_pixels=pixels, y_pixels=pixels, + grid_hmin = interpolate(sdf_b, 'rho', + x_pixels=pixels, y_pixels=pixels, + xlim=xlim, ylim=ylim, normalize=False, hmin=True) assert (grid == grid_hmin).all() @@ -1105,27 +1565,33 @@ def test_minimum_smoothing_length_3d(backend): xlim, ylim = (-1, 1), (-1, 1) hmin = 0.5 * (xlim[1] - xlim[0]) / pixels - sdf_a = SarracenDataFrame(data={'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], - 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], - 'rz': [0.1, 0.32, 0.03, -0.3, -0.2, 0.1, -0.06, 0.22], - 'h': [hmin, hmin, 0.3, 0.25, hmin, hmin, 0.2, hmin], - 'm': [0.56] * 8}, - params={'hfact': 1.2}) + data_a = {'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], + 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], + 'rz': [0.1, 0.32, 0.03, -0.3, -0.2, 0.1, -0.06, 0.22], + 'h': [hmin, hmin, 0.3, 0.25, hmin, hmin, 0.2, hmin], + 'm': [0.56] * 8} + sdf_a = SarracenDataFrame(data_a, params={'hfact': 1.2}) - sdf_b = SarracenDataFrame(data={'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], - 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], - 'rz': [0.1, 0.32, 0.03, -0.3, -0.2, 0.1, -0.06, 0.22], - 'h': [0.01, 0.01, 0.3, 0.25, 0.01, 0.01, 0.2, 0.01], - 'm': [0.56] * 8}, - params={'hfact': 1.2}) + data_b = {'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], + 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], + 'rz': [0.1, 0.32, 0.03, -0.3, -0.2, 0.1, -0.06, 0.22], + 'h': [0.01, 0.01, 0.3, 0.25, 0.01, 0.01, 0.2, 0.01], + 'm': [0.56] * 8} + sdf_b = SarracenDataFrame(data_b, params={'hfact': 1.2}) sdf_a.backend = backend sdf_b.backend = backend - for interpolate in [interpolate_3d_cross, interpolate_3d_proj, interpolate_3d_grid]: - grid = interpolate(data=sdf_a, target='rho', xlim=xlim, ylim=ylim, x_pixels=pixels, y_pixels=pixels, + for interpolate in [interpolate_3d_cross, + interpolate_3d_proj, + interpolate_3d_grid]: + grid = interpolate(sdf_a, 'rho', + x_pixels=pixels, y_pixels=pixels, + xlim=xlim, ylim=ylim, normalize=False, hmin=False) - grid_hmin = interpolate(data=sdf_b, target='rho', xlim=xlim, ylim=ylim, x_pixels=pixels, y_pixels=pixels, + grid_hmin = interpolate(sdf_b, 'rho', + x_pixels=pixels, y_pixels=pixels, + xlim=xlim, ylim=ylim, normalize=False, hmin=True) assert (grid == grid_hmin).all() @@ -1138,53 +1604,63 @@ def test_minimum_smoothing_length_1d_lines(backend): pixels = 5 xlim, ylim, zlim = (-1, 1), (-0.5, 0.5), (-0.5, 0.5) - hmin = 0.5 * np.sqrt((xlim[1] - xlim[0]) ** 2 + (ylim[1] - ylim[0]) ** 2) / pixels + hmin = 0.5 * np.sqrt((xlim[1] - xlim[0])**2 + + (ylim[1] - ylim[0])**2) / pixels - sdf_a = SarracenDataFrame(data={'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], - 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], - 'h': [hmin, hmin, 0.3, 0.25, hmin, hmin, hmin, hmin], - 'm': [0.56] * 8}, - params={'hfact': 1.2}) + data_a = {'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], + 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], + 'h': [hmin, hmin, 0.3, 0.25, hmin, hmin, hmin, hmin], + 'm': [0.56] * 8} + sdf_a = SarracenDataFrame(data_a, params={'hfact': 1.2}) - sdf_b = SarracenDataFrame(data={'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], - 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], - 'h': [0.01, 0.01, 0.3, 0.25, 0.01, 0.01, 0.2, 0.01], - 'm': [0.56] * 8}, - params={'hfact': 1.2}) + data_b = {'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], + 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], + 'h': [0.01, 0.01, 0.3, 0.25, 0.01, 0.01, 0.2, 0.01], + 'm': [0.56] * 8} + sdf_b = SarracenDataFrame(data_b, params={'hfact': 1.2}) sdf_a.backend = backend sdf_b.backend = backend - grid = interpolate_2d_line(data=sdf_a, target='rho', xlim=xlim, ylim=ylim, pixels=pixels, + grid = interpolate_2d_line(sdf_a, 'rho', + pixels=pixels, + xlim=xlim, ylim=ylim, normalize=False, hmin=False) - grid_hmin = interpolate_2d_line(data=sdf_b, target='rho', xlim=xlim, ylim=ylim, pixels=pixels, + grid_hmin = interpolate_2d_line(sdf_b, 'rho', + pixels=pixels, + xlim=xlim, ylim=ylim, normalize=False, hmin=True) assert (grid == grid_hmin).all() - hmin = 0.5 * np.sqrt((xlim[1] - xlim[0]) ** 2 + (ylim[1] - ylim[0]) ** 2 + (zlim[1] - zlim[0]) ** 2) / pixels + hmin = 0.5 * np.sqrt((xlim[1] - xlim[0])**2 + + (ylim[1] - ylim[0])**2 + + (zlim[1] - zlim[0])**2) / pixels - sdf_a = SarracenDataFrame(data={'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], - 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], - 'rz': [0.1, 0.32, 0.03, -0.3, -0.2, 0.1, -0.06, 0.22], - 'h': [hmin, hmin, 0.3, 0.25, hmin, hmin, hmin, hmin], - 'm': [0.56] * 8}, - params={'hfact': 1.2}) + data_a = {'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], + 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], + 'rz': [0.1, 0.32, 0.03, -0.3, -0.2, 0.1, -0.06, 0.22], + 'h': [hmin, hmin, 0.3, 0.25, hmin, hmin, hmin, hmin], + 'm': [0.56] * 8} + sdf_a = SarracenDataFrame(data_a, params={'hfact': 1.2}) - sdf_b = SarracenDataFrame(data={'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], - 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], - 'rz': [0.1, 0.32, 0.03, -0.3, -0.2, 0.1, -0.06, 0.22], - 'h': [0.01, 0.01, 0.3, 0.25, 0.01, 0.01, 0.2, 0.01], - 'm': [0.56] * 8}, - params={'hfact': 1.2}) + data_b = {'rx': [0.3, -0.1, 0.1, 0.1, 0.05, -0.05, -0.25, -0.2], + 'ry': [0.0, 0.1, -0.1, 0.0, -0.05, 0.07, -0.3, -0.2], + 'rz': [0.1, 0.32, 0.03, -0.3, -0.2, 0.1, -0.06, 0.22], + 'h': [0.01, 0.01, 0.3, 0.25, 0.01, 0.01, 0.2, 0.01], + 'm': [0.56] * 8} + sdf_b = SarracenDataFrame(data_b, params={'hfact': 1.2}) sdf_a.backend = backend sdf_b.backend = backend - grid = interpolate_3d_line(data=sdf_a, target='rho', xlim=xlim, ylim=ylim, zlim=zlim, pixels=pixels, + grid = interpolate_3d_line(sdf_a, 'rho', + pixels=pixels, + xlim=xlim, ylim=ylim, zlim=zlim, normalize=False, hmin=False) - grid_hmin = interpolate_3d_line(data=sdf_b, target='rho', xlim=xlim, ylim=ylim, zlim=zlim, pixels=pixels, + grid_hmin = interpolate_3d_line(sdf_b, 'rho', + pixels=pixels, + xlim=xlim, ylim=ylim, zlim=zlim, normalize=False, hmin=True) assert (grid == grid_hmin).all() - diff --git a/sarracen/tests/interpolate/test_rotation.py b/sarracen/tests/interpolate/test_rotation.py index a9112ee..ccd2fca 100644 --- a/sarracen/tests/interpolate/test_rotation.py +++ b/sarracen/tests/interpolate/test_rotation.py @@ -1,4 +1,3 @@ -import pandas as pd import numpy as np from numba import cuda from numpy.testing import assert_allclose @@ -30,17 +29,19 @@ def rotate(target, rot_z, rot_y, rot_x): float tuple of shape (3): The rotated vector. """ - pos_x1 = target[0] * np.cos(rot_z / (180 / np.pi)) - target[1] * np.sin(rot_z / (180 / np.pi)) - pos_y1 = target[0] * np.sin(rot_z / (180 / np.pi)) + target[1] * np.cos(rot_z / (180 / np.pi)) + r = 180 / np.pi + + pos_x1 = target[0] * np.cos(rot_z / r) - target[1] * np.sin(rot_z / r) + pos_y1 = target[0] * np.sin(rot_z / r) + target[1] * np.cos(rot_z / r) pos_z1 = target[2] - pos_x2 = pos_x1 * np.cos(rot_y / (180 / np.pi)) + pos_z1 * np.sin(rot_y / (180 / np.pi)) + pos_x2 = pos_x1 * np.cos(rot_y / r) + pos_z1 * np.sin(rot_y / r) pos_y2 = pos_y1 - pos_z2 = pos_x1 * -np.sin(rot_y / (180 / np.pi)) + pos_z1 * np.cos(rot_y / (180 / np.pi)) + pos_z2 = pos_x1 * -np.sin(rot_y / r) + pos_z1 * np.cos(rot_y / r) pos_x3 = pos_x2 - pos_y3 = pos_y2 * np.cos(rot_x / (180 / np.pi)) - pos_z2 * np.sin(rot_x / (180 / np.pi)) - pos_z3 = pos_y2 * np.sin(rot_x / (180 / np.pi)) + pos_z2 * np.cos(rot_x / (180 / np.pi)) + pos_y3 = pos_y2 * np.cos(rot_x / r) - pos_z2 * np.sin(rot_x / r) + pos_z3 = pos_y2 * np.sin(rot_x / r) + pos_z2 * np.cos(rot_x / r) return pos_x3, pos_y3, pos_z3 @@ -48,11 +49,12 @@ def rotate(target, rot_z, rot_y, rot_x): @mark.parametrize("backend", backends) def test_nonstandard_rotation(backend): """ - Interpolation of a rotated dataframe with nonstandard angles should function properly. + Interpolation of a rotated dataframe with nonstandard angles. """ - df = pd.DataFrame({'x': [1], 'y': [1], 'z': [1], 'A': [4], 'B': [5], 'C': [6], 'h': [0.9], 'rho': [0.4], - 'm': [0.03]}) - sdf = SarracenDataFrame(df, params=dict()) + data = {'x': [1], 'y': [1], 'z': [1], + 'A': [4], 'B': [5], 'C': [6], + 'h': [0.9], 'rho': [0.4], 'm': [0.03]} + sdf = SarracenDataFrame(data, params=dict()) kernel = CubicSplineKernel() sdf.kernel = kernel sdf.backend = backend @@ -61,17 +63,37 @@ def test_nonstandard_rotation(backend): rot_z, rot_y, rot_x = 129, 34, 50 - image_col = interpolate_3d_proj(sdf, 'A', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=[rot_z, rot_y, rot_x], rot_origin=[0, 0, 0], dens_weight=False, normalize=False, hmin=False) - image_cross = interpolate_3d_cross(sdf, 'A', z_slice=0, rotation=[rot_z, rot_y, rot_x], rot_origin=[0, 0, 0], - x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - image_colvec = interpolate_3d_vec(sdf, 'A', 'B', 'C', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=[rot_z, rot_y, rot_x], rot_origin=[0, 0, 0], dens_weight=False, normalize=False, hmin=False) - image_crossvec = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', 0, x_pixels=50, y_pixels=50, xlim=(-1, 1), - ylim=(-1, 1), rotation=[rot_z, rot_y, rot_x], rot_origin=[0, 0, 0], normalize=False, hmin=False) + img_col = interpolate_3d_proj(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=[rot_z, rot_y, rot_x], + rot_origin=[0, 0, 0], + dens_weight=False, + normalize=False, hmin=False) + img_xsec = interpolate_3d_cross(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=0, + rotation=[rot_z, rot_y, rot_x], + rot_origin=[0, 0, 0], + normalize=False, hmin=False) + img_colvec = interpolate_3d_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=[rot_z, rot_y, rot_x], + rot_origin=[0, 0, 0], + dens_weight=False, + normalize=False, hmin=False) + img_xsecvec = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=0, + rotation=[rot_z, rot_y, rot_x], + rot_origin=[0, 0, 0], + normalize=False, hmin=False) w_col = sdf['m'] / (sdf['rho'] * sdf['h'] ** 2) - w_cross = sdf['m'] / (sdf['rho'] * sdf['h'] ** 3) + w_xsec = sdf['m'] / (sdf['rho'] * sdf['h'] ** 3) pos_x, pos_y, pos_z = rotate((1, 1, 1), rot_z, rot_y, rot_x) target_x, target_y, target_z = rotate((4, 5, 6), rot_z, rot_y, rot_x) @@ -80,92 +102,144 @@ def test_nonstandard_rotation(backend): for y in range(50): for x in range(50): - assert image_col[y][x] == approx(w_col[0] * sdf['A'][0] * column_kernel( - np.sqrt((pos_x - real[x]) ** 2 + (pos_y - real[y]) ** 2) / sdf['h'][0], 3)) - assert image_colvec[0][y][x] == approx(w_col[0] * target_x * column_kernel( - np.sqrt((pos_x - real[x]) ** 2 + (pos_y - real[y]) ** 2) / sdf['h'][0], 3)) - assert image_colvec[1][y][x] == approx(w_col[0] * target_y * column_kernel( - np.sqrt((pos_x - real[x]) ** 2 + (pos_y - real[y]) ** 2) / sdf['h'][0], 3)) - assert image_cross[y][x] == approx(w_cross[0] * sdf['A'][0] * kernel.w( - np.sqrt((pos_x - real[x]) ** 2 + (pos_y - real[y]) ** 2 + pos_z ** 2) / sdf['h'][0], 3)) - assert image_crossvec[0][y][x] == approx(w_cross[0] * target_x * kernel.w( - np.sqrt((pos_x - real[x]) ** 2 + (pos_y - real[y]) ** 2 + pos_z ** 2) / sdf['h'][0], 3)) - assert image_crossvec[1][y][x] == approx(w_cross[0] * target_y * kernel.w( - np.sqrt((pos_x - real[x]) ** 2 + (pos_y - real[y]) ** 2 + pos_z ** 2) / sdf['h'][0], 3)) - - image_grid = interpolate_3d_grid(sdf, 'A', x_pixels=50, y_pixels=50, z_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - zlim=(-1, 1), rotation=[rot_z, rot_y, rot_x], rot_origin=[0, 0, 0], normalize=False, hmin=False) + r = np.sqrt((pos_x - real[x]) ** 2 + + (pos_y - real[y]) ** 2) + w = column_kernel(r / sdf['h'][0], 3) + assert img_col[y][x] == approx(w_col[0] * sdf['A'][0] * w) + assert img_colvec[0][y][x] == approx(w_col[0] * target_x * w) + assert img_colvec[1][y][x] == approx(w_col[0] * target_y * w) + + r = np.sqrt((pos_x - real[x])**2 + + (pos_y - real[y])**2 + + pos_z**2) + w = kernel.w(r / sdf['h'][0], 3) + assert img_xsec[y][x] == approx(w_xsec[0] * sdf['A'][0] * w) + assert img_xsecvec[0][y][x] == approx(w_xsec[0] * target_x * w) + assert img_xsecvec[1][y][x] == approx(w_xsec[0] * target_y * w) + + img_grid = interpolate_3d_grid(sdf, 'A', + x_pixels=50, y_pixels=50, z_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), zlim=(-1, 1), + rotation=[rot_z, rot_y, rot_x], + rot_origin=[0, 0, 0], + normalize=False, hmin=False) for z in range(50): for y in range(50): for x in range(50): - assert image_grid[z][y][x] == \ - approx(w_cross[0] * sdf['A'][0] * kernel.w(np.sqrt((pos_x - real[x]) ** 2 - + (pos_y - real[y]) ** 2 - + (pos_z - real[z]) ** 2) / sdf['h'][0], 3)) + r = np.sqrt((pos_x - real[x])**2 + + (pos_y - real[y])**2 + + (pos_z - real[z])**2) + w = kernel.w(r / sdf['h'][0], 3) + grid_value = w_xsec[0] * sdf['A'][0] * w + + assert img_grid[z][y][x] == approx(grid_value) @mark.parametrize("backend", backends) def test_scipy_rotation_equivalency(backend): """ - For interpolation functions, a [z, y, x] rotation defined with degrees should be equivalent - to the scipy version using from_euler(). + For interpolation functions, a [z, y, x] rotation defined with degrees + should be equivalent to the scipy version using from_euler(). """ - df = pd.DataFrame({'x': [1], 'y': [1], 'z': [1], 'A': [4], 'B': [3], 'C': [2], 'h': [0.9], 'rho': [0.4], - 'm': [0.03]}) - sdf = SarracenDataFrame(df, params=dict()) + data = {'x': [1], 'y': [1], 'z': [1], + 'A': [4], 'B': [3], 'C': [2], + 'h': [0.9], 'rho': [0.4], 'm': [0.03]} + sdf = SarracenDataFrame(data, params=dict()) kernel = CubicSplineKernel() sdf.kernel = kernel sdf.backend = backend - rot_z, rot_y, rot_x = 67, -34, 91 - - image1 = interpolate_3d_proj(sdf, 'A', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=[rot_z, rot_y, rot_x], rot_origin=[0, 0, 0], normalize=False, hmin=False) - image2 = interpolate_3d_proj(sdf, 'A', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=Rotation.from_euler('zyx', [rot_z, rot_y, rot_x], degrees=True), - rot_origin=[0, 0, 0], normalize=False, hmin=False) - assert_allclose(image1, image2) - - image1 = interpolate_3d_cross(sdf, 'A', z_slice=0, rotation=[rot_z, rot_y, rot_x], rot_origin=[0, 0, 0], x_pixels=50, - y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - image2 = interpolate_3d_cross(sdf, 'A', z_slice=0, - rotation=Rotation.from_euler('zyx', [rot_z, rot_y, rot_x], degrees=True), - rot_origin=[0, 0, 0], x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - assert_allclose(image1, image2) - - image1 = interpolate_3d_vec(sdf, 'A', 'B', 'C', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=[rot_z, rot_y, rot_x], rot_origin=[0, 0, 0], normalize=False, hmin=False) - image2 = interpolate_3d_vec(sdf, 'A', 'B', 'C', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=Rotation.from_euler('zyx', [rot_z, rot_y, rot_x], degrees=True), - rot_origin=[0, 0, 0], normalize=False, hmin=False) - assert_allclose(image1[0], image2[0]) - assert_allclose(image1[1], image2[1]) - - image1 = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', 0, x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=[rot_z, rot_y, rot_x], rot_origin=[0, 0, 0], normalize=False, hmin=False) - image2 = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', 0, x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=Rotation.from_euler('zyx', [rot_z, rot_y, rot_x], degrees=True), - rot_origin=[0, 0, 0], normalize=False, hmin=False) - assert_allclose(image1[0], image2[0]) - assert_allclose(image1[1], image2[1]) - - image1 = interpolate_3d_grid(sdf, 'A', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), zlim=(-1, 1), - rotation=[rot_z, rot_y, rot_x], rot_origin=[0, 0, 0], normalize=False, hmin=False) - image2 = interpolate_3d_grid(sdf, 'A', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), zlim=(-1, 1), - rotation=Rotation.from_euler('zyx', [rot_z, rot_y, rot_x], degrees=True), - rot_origin=[0, 0, 0], normalize=False, hmin=False) - assert_allclose(image1, image2) + rotation = [67, -34, 91] + scipy_rotation = Rotation.from_euler('zyx', rotation, degrees=True) + + img1 = interpolate_3d_proj(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=rotation, + rot_origin=[0, 0, 0], + normalize=False, hmin=False) + img2 = interpolate_3d_proj(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=scipy_rotation, + rot_origin=[0, 0, 0], + normalize=False, hmin=False) + assert_allclose(img1, img2) + + img1 = interpolate_3d_cross(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=0, + rotation=rotation, + rot_origin=[0, 0, 0], + normalize=False, hmin=False) + img2 = interpolate_3d_cross(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=0, + rotation=scipy_rotation, + rot_origin=[0, 0, 0], + normalize=False, hmin=False) + assert_allclose(img1, img2) + + img1 = interpolate_3d_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=rotation, + rot_origin=[0, 0, 0], + normalize=False, hmin=False) + img2 = interpolate_3d_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=scipy_rotation, + rot_origin=[0, 0, 0], + normalize=False, + hmin=False) + assert_allclose(img1[0], img2[0]) + assert_allclose(img1[1], img2[1]) + + img1 = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=0, + rotation=rotation, + rot_origin=[0, 0, 0], + normalize=False, hmin=False) + img2 = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=0, + rotation=scipy_rotation, + rot_origin=[0, 0, 0], + normalize=False, hmin=False) + assert_allclose(img1[0], img2[0]) + assert_allclose(img1[1], img2[1]) + + img1 = interpolate_3d_grid(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), zlim=(-1, 1), + rotation=rotation, + rot_origin=[0, 0, 0], + normalize=False, hmin=False) + img2 = interpolate_3d_grid(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), zlim=(-1, 1), + rotation=scipy_rotation, + rot_origin=[0, 0, 0], + normalize=False, hmin=False) + assert_allclose(img1, img2) @mark.parametrize("backend", backends) def test_quaternion_rotation(backend): """ - An alternate rotation (in this case, a quaternion) defined using scipy should function properly. + Test quaternion rotation defined using scipy. """ - df = pd.DataFrame({'x': [1], 'y': [1], 'z': [1], 'A': [4], 'B': [3], 'C': [2], 'h': [1.9], 'rho': [0.4], - 'm': [0.03]}) - sdf = SarracenDataFrame(df, params=dict()) + data = {'x': [1], 'y': [1], 'z': [1], + 'A': [4], 'B': [3], 'C': [2], + 'h': [1.9], 'rho': [0.4], 'm': [0.03]} + sdf = SarracenDataFrame(data, params=dict()) kernel = CubicSplineKernel() sdf.kernel = kernel sdf.backend = backend @@ -173,17 +247,37 @@ def test_quaternion_rotation(backend): column_kernel = kernel.get_column_kernel_func(1000) quat = Rotation.from_quat([5, 3, 8, 1]) - image_col = interpolate_3d_proj(sdf, 'A', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), rotation=quat, - rot_origin=[0, 0, 0], dens_weight=False, normalize=False, hmin=False) - image_colvec = interpolate_3d_vec(sdf, 'A', 'B', 'C', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=quat, rot_origin=[0, 0, 0], dens_weight=False, normalize=False, hmin=False) - image_cross = interpolate_3d_cross(sdf, 'A', z_slice=0, rotation=quat, rot_origin=[0, 0, 0], x_pixels=50, y_pixels=50, - xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - image_crossvec = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', 0, x_pixels=50, y_pixels=50, xlim=(-1, 1), - ylim=(-1, 1), rotation=quat, rot_origin=[0, 0, 0], normalize=False, hmin=False) + img_col = interpolate_3d_proj(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=quat, + rot_origin=[0, 0, 0], + dens_weight=False, + normalize=False, hmin=False) + img_colvec = interpolate_3d_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=quat, + rot_origin=[0, 0, 0], + dens_weight=False, + normalize=False, hmin=False) + img_xsec = interpolate_3d_cross(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=0, + rotation=quat, + rot_origin=[0, 0, 0], + normalize=False, hmin=False) + img_xsecvec = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=0, + rotation=quat, + rot_origin=[0, 0, 0], + normalize=False, hmin=False) w_col = sdf['m'] / (sdf['rho'] * sdf['h'] ** 2) - w_cross = sdf['m'] / (sdf['rho'] * sdf['h'] ** 3) + w_xsec = sdf['m'] / (sdf['rho'] * sdf['h'] ** 3) pos = quat.apply([1, 1, 1]) val = quat.apply([sdf['A'][0], sdf['B'][0], sdf['C'][0]]) @@ -191,42 +285,51 @@ def test_quaternion_rotation(backend): for y in range(50): for x in range(50): - assert image_col[y][x] == approx(w_col[0] * sdf['A'][0] * column_kernel( - np.sqrt((pos[0] - real[x]) ** 2 + (pos[1] - real[y]) ** 2) / sdf['h'][0], 3)) + r = np.sqrt((pos[0] - real[x])**2 + (pos[1] - real[y])**2) + w = column_kernel(r / sdf['h'][0], 3) + + assert img_col[y][x] == approx(w_col[0] * sdf['A'][0] * w) + assert img_colvec[0][y][x] == approx(w_col[0] * val[0] * w) + assert img_colvec[1][y][x] == approx(w_col[0] * val[1] * w) - assert image_colvec[0][y][x] == approx(w_col[0] * val[0] * column_kernel( - np.sqrt((pos[0] - real[x]) ** 2 + (pos[1] - real[y]) ** 2) / sdf['h'][0], 3)) - assert image_colvec[1][y][x] == approx(w_col[0] * val[1] * column_kernel( - np.sqrt((pos[0] - real[x]) ** 2 + (pos[1] - real[y]) ** 2) / sdf['h'][0], 3)) + r = np.sqrt((pos[0] - real[x])**2 + + (pos[1] - real[y])**2 + + pos[2]**2) + w = kernel.w(r / sdf['h'][0], 3) - assert image_cross[y][x] == approx(w_cross[0] * sdf['A'][0] * kernel.w( - np.sqrt((pos[0] - real[x]) ** 2 + (pos[1] - real[y]) ** 2 + pos[2] ** 2) / sdf['h'][0], 3)) + assert img_xsec[y][x] == approx(w_xsec[0] * sdf['A'][0] * w) - assert image_crossvec[0][y][x] == approx(w_cross[0] * val[0] * kernel.w( - np.sqrt((pos[0] - real[x]) ** 2 + (pos[1] - real[y]) ** 2 + pos[2] ** 2) / sdf['h'][0], 3)) - assert image_crossvec[1][y][x] == approx(w_cross[0] * val[1] * kernel.w( - np.sqrt((pos[0] - real[x]) ** 2 + (pos[1] - real[y]) ** 2 + pos[2] ** 2) / sdf['h'][0], 3)) + assert img_xsecvec[0][y][x] == approx(w_xsec[0] * val[0] * w) + assert img_xsecvec[1][y][x] == approx(w_xsec[0] * val[1] * w) - image_grid = interpolate_3d_grid(sdf, 'A', x_pixels=50, y_pixels=50, z_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - zlim=(-1, 1), rotation=quat, rot_origin=[0, 0, 0], normalize=False, hmin=False) + img_grid = interpolate_3d_grid(sdf, 'A', + x_pixels=50, y_pixels=50, z_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), zlim=(-1, 1), + rotation=quat, + rot_origin=[0, 0, 0], + normalize=False, hmin=False) for z in range(50): for y in range(50): for x in range(50): - assert image_grid[z][y][x] == approx(w_cross[0] * sdf['A'][0] * kernel.w( - np.sqrt((pos[0] - real[x]) ** 2 + (pos[1] - real[y]) ** 2 + (pos[2] - real[z]) ** 2) - / sdf['h'][0], 3)) + r = np.sqrt((pos[0] - real[x])**2 + + (pos[1] - real[y])**2 + + (pos[2] - real[z])**2) + w = kernel.w(r / sdf['h'][0], 3) + grid_value = w_xsec[0] * sdf['A'][0] * w + + assert img_grid[z][y][x] == approx(grid_value) @mark.parametrize("backend", backends) def test_rotation_stability(backend): """ - A rotation performed at the same location as a pixel (for 3d column & cross-section interpolation) shouldn't change - the resulting interpolation value at the pixel. + Rotation should not change values at the rotation origin. """ - df = pd.DataFrame({'x': [1, 3], 'y': [1, -1], 'z': [1, -0.5], 'A': [4, 3], 'B': [3, 2], 'C': [1, 1.5], - 'h': [0.9, 1.4], 'rho': [0.4, 0.6], 'm': [0.03, 0.06]}) - sdf = SarracenDataFrame(df, params=dict()) + data = {'x': [1, 3], 'y': [1, -1], 'z': [1, -0.5], + 'A': [4, 3], 'B': [3, 2], 'C': [1, 1.5], + 'h': [0.9, 1.4], 'rho': [0.4, 0.6], 'm': [0.03, 0.06]} + sdf = SarracenDataFrame(data, params=dict()) kernel = CubicSplineKernel() sdf.kernel = kernel sdf.backend = backend @@ -235,67 +338,115 @@ def test_rotation_stability(backend): pixel_x, pixel_y = 12, 30 for func in [interpolate_3d_proj, interpolate_3d_cross]: - image = func(sdf, 'A', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - image_rot = func(sdf, 'A', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), rotation=[237, 0, 0], - rot_origin=[real[pixel_x], real[pixel_y], 0], normalize=False, hmin=False) + img = func(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + normalize=False, hmin=False) + img_rot = func(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=[237, 0, 0], + rot_origin=[real[pixel_x], real[pixel_y], 0], + normalize=False, hmin=False) - assert image[pixel_y][pixel_x] == approx(image_rot[pixel_y][pixel_x]) + assert img[pixel_y][pixel_x] == approx(img_rot[pixel_y][pixel_x]) @mark.parametrize("backend", backends) def test_axes_rotation_separation(backend): """ - Rotations should be independent of the defined x & y interpolation axes. Similar to test_image_transpose(), but a - rotation is applied to all interpolations. + Rotations should be independent of the defined x & y interpolation axes. + Similar to test_image_transpose(), but a rotation is applied to all + interpolations. """ - df = pd.DataFrame({'x': [-1, 1], 'y': [1, -1], 'z': [1, -1], 'A': [2, 1.5], 'B': [2, 2], 'C': [4, 3], - 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]}) - sdf = SarracenDataFrame(df, params=dict()) + data = {'x': [-1, 1], 'y': [1, -1], 'z': [1, -1], + 'A': [2, 1.5], 'B': [2, 2], 'C': [4, 3], + 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]} + sdf = SarracenDataFrame(data, params=dict()) sdf.backend = backend - image1 = interpolate_3d_proj(sdf, 'A', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=[234, 90, 48], normalize=False, hmin=False) - image2 = interpolate_3d_proj(sdf, 'A', x='y', y='x', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=[234, 90, 48], normalize=False, hmin=False) - assert_allclose(image1, image2.T) - - image1 = interpolate_3d_vec(sdf, 'A', 'B', 'C', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=[234, 90, 48], normalize=False, hmin=False) - image2 = interpolate_3d_vec(sdf, 'A', 'B', 'C', x='y', y='x', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=[234, 90, 48], normalize=False, hmin=False) - assert_allclose(image1[0], image2[0].T) - assert_allclose(image1[1], image2[1].T) - - image1 = interpolate_3d_cross(sdf, 'A', z_slice=0, rotation=[234, 90, 48], x_pixels=50, y_pixels=50, xlim=(-1, 1), - ylim=(-1, 1), normalize=False, hmin=False) - image2 = interpolate_3d_cross(sdf, 'A', x='y', y='x', z_slice=0, rotation=[234, 90, 48], x_pixels=50, y_pixels=50, - xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - assert_allclose(image1, image2.T) - - image1 = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', 0, x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=[234, 90, 48], normalize=False, hmin=False) - image2 = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', 0, x='y', y='x', x_pixels=50, y_pixels=50, xlim=(-1, 1), - ylim=(-1, 1), rotation=[234, 90, 48], normalize=False, hmin=False) - assert_allclose(image1[0], image2[0].T) - assert_allclose(image1[1], image2[1].T) - - image1 = interpolate_3d_grid(sdf, 'A', x_pixels=50, y_pixels=50, z_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - zlim=(-1, 1), rotation=[234, 90, 48], normalize=False, hmin=False) - image2 = interpolate_3d_grid(sdf, 'A', x='y', y='x', x_pixels=50, y_pixels=50, z_pixels=50, xlim=(-1, 1), - ylim=(-1, 1), zlim=(-1, 1), rotation=[234, 90, 48], normalize=False, hmin=False) - assert_allclose(image1, image2.transpose(0, 2, 1)) + rotation = [234, 90, 48] + + img1 = interpolate_3d_proj(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=rotation, + normalize=False, hmin=False) + img2 = interpolate_3d_proj(sdf, 'A', x='y', y='x', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=rotation, + normalize=False, hmin=False) + assert_allclose(img1, img2.T) + + img1 = interpolate_3d_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=rotation, + normalize=False, hmin=False) + img2 = interpolate_3d_vec(sdf, 'A', 'B', 'C', x='y', y='x', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=rotation, + normalize=False, hmin=False) + assert_allclose(img1[0], img2[0].T) + assert_allclose(img1[1], img2[1].T) + + img1 = interpolate_3d_cross(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=0, + rotation=rotation, + normalize=False, hmin=False) + img2 = interpolate_3d_cross(sdf, 'A', x='y', y='x', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=0, + rotation=rotation, + normalize=False, hmin=False) + assert_allclose(img1, img2.T) + + img1 = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=0, + rotation=rotation, + normalize=False, hmin=False) + img2 = interpolate_3d_cross_vec(sdf, 'A', 'B', 'C', + x='y', y='x', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + z_slice=0, + rotation=rotation, + normalize=False, hmin=False) + assert_allclose(img1[0], img2[0].T) + assert_allclose(img1[1], img2[1].T) + + img1 = interpolate_3d_grid(sdf, 'A', + x_pixels=50, y_pixels=50, z_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), zlim=(-1, 1), + rotation=rotation, + normalize=False, hmin=False) + img2 = interpolate_3d_grid(sdf, 'A', x='y', y='x', + x_pixels=50, y_pixels=50, z_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), zlim=(-1, 1), + rotation=rotation, + normalize=False, hmin=False) + assert_allclose(img1, img2.transpose(0, 2, 1)) @mark.parametrize("backend", backends) def test_axes_rotation_equivalency(backend): """ - A rotated interpolation (at multiples of 90 degrees) should be equivalent to a transformed interpolation with - different x & y axes. For example, an interpolation rotated by 180 degrees around the z axis should be equivalent - to the transpose of an unaltered interpolation. + A rotated interpolation (at multiples of 90 degrees) should be equivalent + to a transformed interpolation with different x & y axes. For example, + an interpolation rotated by 180 degrees around the z axis should be + equivalent to the transpose of an unaltered interpolation. """ - df = pd.DataFrame({'x': [-1, 1], 'y': [1, -1], 'z': [1, -1], 'A': [2, 1.5], 'B': [2, 2], 'C': [4, 3], - 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]}) - sdf = SarracenDataFrame(df, params=dict()) + data = {'x': [-1, 1], 'y': [1, -1], 'z': [1, -1], + 'A': [2, 1.5], 'B': [2, 2], 'C': [4, 3], + 'h': [1.1, 1.3], 'rho': [0.55, 0.45], 'm': [0.04, 0.05]} + sdf = SarracenDataFrame(data, params=dict()) sdf.backend = backend x, y, z = 'x', 'y', 'z' @@ -306,12 +457,22 @@ def test_axes_rotation_equivalency(backend): rot_x, rot_y, rot_z = i_x * 90, i_y * 90, i_z * 90 for func in [interpolate_3d_proj, interpolate_3d_cross]: - image1 = func(sdf, 'A', x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), - rotation=[rot_z, rot_y, rot_x], rot_origin=[0, 0, 0], normalize=False, hmin=False) - image2 = func(sdf, 'A', x=x, y=y, x_pixels=50, y_pixels=50, xlim=(-1, 1), ylim=(-1, 1), normalize=False, hmin=False) - image2 = image2 if not flip_x else np.flip(image2, 1) - image2 = image2 if not flip_y else np.flip(image2, 0) - assert_allclose(image1, image2) + + img1 = func(sdf, 'A', + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + rotation=[rot_z, rot_y, rot_x], + rot_origin=[0, 0, 0], + normalize=False, hmin=False) + img2 = func(sdf, 'A', + x=x, y=y, + x_pixels=50, y_pixels=50, + xlim=(-1, 1), ylim=(-1, 1), + normalize=False, hmin=False) + + img2 = img2 if not flip_x else np.flip(img2, 1) + img2 = img2 if not flip_y else np.flip(img2, 0) + assert_allclose(img1, img2) y, z = z, y flip_y, flip_z = not flip_z, flip_y @@ -347,14 +508,16 @@ def test_com_rotation(): y = y - com[1] z = z - com[2] sdf_zero = SarracenDataFrame({'x': x, 'y': y, 'z': z, 'h': h, 'val': val}, - params = {'mass': mass, 'hfact': 1.2}) + params={'mass': mass, 'hfact': 1.2}) for func in [interpolate_3d_proj, interpolate_3d_cross]: - image1 = func(sdf_com, 'val', - x_pixels=50, y_pixels=50, - rotation=[35, 60, 75], rot_origin='com') - image2 = func(sdf_zero, 'val', - x_pixels=50, y_pixels=50, - rotation=[35, 60, 75], rot_origin=[0, 0, 0]) - - assert_allclose(image1, image2) + img1 = func(sdf_com, 'val', + x_pixels=50, y_pixels=50, + rotation=[35, 60, 75], + rot_origin='com') + img2 = func(sdf_zero, 'val', + x_pixels=50, y_pixels=50, + rotation=[35, 60, 75], + rot_origin=[0, 0, 0]) + + assert_allclose(img1, img2) diff --git a/sarracen/tests/readers/test_read_csv.py b/sarracen/tests/readers/test_read_csv.py index 304f9ca..1c2d774 100644 --- a/sarracen/tests/readers/test_read_csv.py +++ b/sarracen/tests/readers/test_read_csv.py @@ -1,37 +1,35 @@ import pandas as pd -import numpy as np from pandas import testing as tm from sarracen.readers import read_csv def test_get_units(): test_units = pd.Series(['va_r?s w/ sp3ci@l ch%rs [units /$:. ]', - 'a [a]', - 'a', - '[]']) + 'a [a]', + 'a', + '[]']) test_units = read_csv._get_units(test_units) answer_units = ['units /$:. ', - 'a', - None, - None] + 'a', + None, + None] answer_units = pd.Series(answer_units, name=0) tm.assert_series_equal(test_units, answer_units) - def test_get_labels(): test_labels = pd.Series(['va_r?s w/ sp3ci@l ch%rs [units /$:. ]', - 'a [a]', - 'a', - '[]']) + 'a [a]', + 'a', + '[]']) test_labels = read_csv._get_labels(test_labels) answer_labels = ['va_r?s w/ sp3ci@l ch%rs', - 'a', - 'a', - None] + 'a', + 'a', + None] answer_labels = pd.Series(answer_labels, name=0) tm.assert_series_equal(test_labels, answer_labels) diff --git a/sarracen/tests/readers/test_read_phantom.py b/sarracen/tests/readers/test_read_phantom.py index a49e5e6..4a0cf26 100644 --- a/sarracen/tests/readers/test_read_phantom.py +++ b/sarracen/tests/readers/test_read_phantom.py @@ -1,6 +1,5 @@ import pandas as pd import numpy as np -import io from pandas import testing as tm import sarracen import pytest @@ -33,10 +32,10 @@ def _create_file_identifier(): read_tag = np.array([13], dtype='int32') file_identifier = "Test of read_phantom".ljust(100) - bytes_file = bytearray(read_tag.tobytes()) - bytes_file += bytearray(map(ord, file_identifier)) - bytes_file += bytearray(read_tag.tobytes()) - return bytes_file + file = bytearray(read_tag.tobytes()) + file += bytearray(map(ord, file_identifier)) + file += bytearray(read_tag.tobytes()) + return file def _create_global_header(massoftype=1e-6, massoftype_7=None, @@ -44,43 +43,43 @@ def _create_global_header(massoftype=1e-6, massoftype_7=None, """ Construct global variables. Only massoftype in this example. """ read_tag = np.array([13], dtype='int32') - bytes_file = bytearray() + file = bytearray() for i in range(8): # loop over 8 dtypes - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) nvars = (i == 5) + (massoftype_7 is not None) if i == 5: # default real nvars = np.array([nvars], dtype='int32') else: nvars = np.array([0], dtype='int32') - bytes_file += bytearray(nvars.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(nvars.tobytes()) + file += bytearray(read_tag.tobytes()) if i == 5: # default real - bytes_file += bytearray(read_tag.tobytes()) - bytes_file += bytearray(map(ord, "massoftype".ljust(16))) + file += bytearray(read_tag.tobytes()) + file += bytearray(map(ord, "massoftype".ljust(16))) if massoftype_7 is not None: - bytes_file += bytearray(map(ord, "massoftype_7".ljust(16))) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(map(ord, "massoftype_7".ljust(16))) + file += bytearray(read_tag.tobytes()) if i == 5: - bytes_file += bytearray(read_tag.tobytes()) - bytes_file += bytearray(np.array([massoftype], dtype=def_real)) + file += bytearray(read_tag.tobytes()) + file += bytearray(np.array([massoftype], dtype=def_real)) if massoftype_7 is not None: - bytes_file += bytearray(np.array([massoftype_7], dtype=def_real)) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(np.array([massoftype_7], dtype=def_real)) + file += bytearray(read_tag.tobytes()) - return bytes_file + return file def _create_particle_array(tag, data, dtype=np.float64): read_tag = np.array([13], dtype='int32') - bytes_file = bytearray(read_tag.tobytes()) - bytes_file += bytearray(map(ord,tag.ljust(16))) - bytes_file += bytearray(read_tag.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) - bytes_file += bytearray(np.array(data, dtype=dtype).tobytes()) - bytes_file += bytearray(read_tag.tobytes()) - return bytes_file + file = bytearray(read_tag.tobytes()) + file += bytearray(map(ord, tag.ljust(16))) + file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) + file += bytearray(np.array(data, dtype=dtype).tobytes()) + file += bytearray(read_tag.tobytes()) + return file @pytest.mark.parametrize("def_int, def_real", @@ -89,31 +88,31 @@ def _create_particle_array(tag, data, dtype=np.float64): def test_determine_default_precision2(def_int, def_real): """ Test if default int / real precision can be determined. """ - bytes_file = _create_capture_pattern(def_int, def_real) - bytes_file += _create_file_identifier() - bytes_file += _create_global_header(def_int=def_int, def_real=def_real) + file = _create_capture_pattern(def_int, def_real) + file += _create_file_identifier() + file += _create_global_header(def_int=def_int, def_real=def_real) # create 1 block for gas read_tag = np.array([13], dtype='int32') - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) nblocks = np.array([1], dtype='int32') - bytes_file += bytearray(nblocks.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(nblocks.tobytes()) + file += bytearray(read_tag.tobytes()) # 2 particles storing 1 default int and real arrays - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) n = np.array([2], dtype='int64') nums = np.array([1, 0, 0, 0, 0, 1, 0, 0], dtype='int32') - bytes_file += bytearray(n.tobytes()) - bytes_file += bytearray(nums.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(n.tobytes()) + file += bytearray(nums.tobytes()) + file += bytearray(read_tag.tobytes()) # write particle arrays - bytes_file += _create_particle_array("def_int", [1, 2], dtype=def_int) - bytes_file += _create_particle_array("def_real", [1.0, 2.0], dtype=def_real) + file += _create_particle_array("def_int", [1, 2], dtype=def_int) + file += _create_particle_array("def_real", [1.0, 2.0], dtype=def_real) with tempfile.NamedTemporaryFile() as fp: - fp.write(bytes_file) + fp.write(file) fp.seek(0) sdf = sarracen.read_phantom(fp.name) @@ -123,33 +122,34 @@ def test_determine_default_precision2(def_int, def_real): def test_gas_particles_only(): - bytes_file = _create_capture_pattern(np.int32, np.float64) - bytes_file += _create_file_identifier() - bytes_file += _create_global_header() + file = _create_capture_pattern(np.int32, np.float64) + file += _create_file_identifier() + file += _create_global_header() # create 1 block for gas read_tag = np.array([13], dtype='int32') - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) nblocks = np.array([1], dtype='int32') - bytes_file += bytearray(nblocks.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(nblocks.tobytes()) + file += bytearray(read_tag.tobytes()) # 8 particles storing 4 real arrays (x, y, z, h) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) n = np.array([8], dtype='int64') nums = np.array([0, 0, 0, 0, 0, 4, 0, 0], dtype='int32') - bytes_file += bytearray(n.tobytes()) - bytes_file += bytearray(nums.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(n.tobytes()) + file += bytearray(nums.tobytes()) + file += bytearray(read_tag.tobytes()) # write 4 particle arrays - bytes_file += _create_particle_array("x", [0, 0, 0, 0, 1, 1, 1, 1]) - bytes_file += _create_particle_array("y", [0, 0, 1, 1, 0, 0, 1, 1]) - bytes_file += _create_particle_array("z", [0, 1, 0, 1, 0, 1, 0, 1]) - bytes_file += _create_particle_array("h", [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]) + file += _create_particle_array("x", [0, 0, 0, 0, 1, 1, 1, 1]) + file += _create_particle_array("y", [0, 0, 1, 1, 0, 0, 1, 1]) + file += _create_particle_array("z", [0, 1, 0, 1, 0, 1, 0, 1]) + file += _create_particle_array("h", [1.1, 1.1, 1.1, 1.1, + 1.1, 1.1, 1.1, 1.1]) with tempfile.NamedTemporaryFile() as fp: - fp.write(bytes_file) + fp.write(file) fp.seek(0) sdf = sarracen.read_phantom(fp.name, separate_types='all') @@ -157,57 +157,68 @@ def test_gas_particles_only(): assert sdf.params['mass'] == 1e-6 assert 'mass' not in sdf.columns tm.assert_series_equal(sdf['x'], pd.Series([0, 0, 0, 0, 1, 1, 1, 1]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) sdf = sarracen.read_phantom(fp.name, separate_types='sinks') assert sdf.params['massoftype'] == 1e-6 assert sdf.params['mass'] == 1e-6 assert 'mass' not in sdf.columns tm.assert_series_equal(sdf['x'], pd.Series([0, 0, 0, 0, 1, 1, 1, 1]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) sdf = sarracen.read_phantom(fp.name, separate_types=None) assert sdf.params['massoftype'] == 1e-6 assert sdf.params['mass'] == 1e-6 assert 'mass' not in sdf.columns tm.assert_series_equal(sdf['x'], pd.Series([0, 0, 0, 0, 1, 1, 1, 1]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) def test_gas_dust_particles(): - bytes_file = _create_capture_pattern(np.int32, np.float64) - bytes_file += _create_file_identifier() - bytes_file += _create_global_header(massoftype_7=1e-4) + file = _create_capture_pattern(np.int32, np.float64) + file += _create_file_identifier() + file += _create_global_header(massoftype_7=1e-4) # create 1 block for gas read_tag = np.array([13], dtype='int32') - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) nblocks = np.array([1], dtype='int32') - bytes_file += bytearray(nblocks.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(nblocks.tobytes()) + file += bytearray(read_tag.tobytes()) # 8 particles storing 4 real arrays (x, y, z, h) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) n = np.array([16], dtype='int64') nums = np.array([0, 1, 0, 0, 0, 4, 0, 0], dtype='int32') - bytes_file += bytearray(n.tobytes()) - bytes_file += bytearray(nums.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(n.tobytes()) + file += bytearray(nums.tobytes()) + file += bytearray(read_tag.tobytes()) # write 5 gas/dust particle arrays - bytes_file += _create_particle_array("itype", [1, 1, 1, 1, 1, 1, 1, 1, - 7, 7, 7, 7, 7, 7, 7, 7], np.int8) - bytes_file += _create_particle_array("x", [0, 0, 0, 0, 1, 1, 1, 1, - 0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5]) - bytes_file += _create_particle_array("y", [0, 0, 1, 1, 0, 0, 1, 1, - 0.5, 0.5, 1.5, 1.5, 0.5, 0.5, 1.5, 1.5]) - bytes_file += _create_particle_array("z", [0, 1, 0, 1, 0, 1, 0, 1, - 0.5, 1.5, 0.5, 1.5, 0.5, 1.5, 0.5, 1.5]) - bytes_file += _create_particle_array("h", [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, - 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]) + file += _create_particle_array("itype", [1, 1, 1, 1, 1, 1, 1, 1, + 7, 7, 7, 7, 7, 7, 7, 7], np.int8) + file += _create_particle_array("x", [0, 0, 0, 0, + 1, 1, 1, 1, + 0.5, 0.5, 0.5, 0.5, + 1.5, 1.5, 1.5, 1.5]) + file += _create_particle_array("y", [0, 0, 1, 1, + 0, 0, 1, 1, + 0.5, 0.5, 1.5, 1.5, + 0.5, 0.5, 1.5, 1.5]) + file += _create_particle_array("z", [0, 1, 0, 1, + 0, 1, 0, 1, + 0.5, 1.5, 0.5, 1.5, + 0.5, 1.5, 0.5, 1.5]) + file += _create_particle_array("h", [1.1, 1.1, 1.1, 1.1, + 1.1, 1.1, 1.1, 1.1, + 1.1, 1.1, 1.1, 1.1, + 1.1, 1.1, 1.1, 1.1]) with tempfile.NamedTemporaryFile() as fp: - fp.write(bytes_file) + fp.write(file) fp.seek(0) sdf_g, sdf_d = sarracen.read_phantom(fp.name, separate_types='all') @@ -221,10 +232,13 @@ def test_gas_dust_particles(): assert 'mass' not in sdf_d.columns tm.assert_series_equal(sdf_g['x'], pd.Series([0, 0, 0, 0, 1, 1, 1, 1]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf_d['x'], - pd.Series([0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5]), - check_index=False, check_names=False, check_dtype=False) + pd.Series([0.5, 0.5, 0.5, 0.5, + 1.5, 1.5, 1.5, 1.5]), + check_index=False, check_names=False, + check_dtype=False) sdf = sarracen.read_phantom(fp.name, separate_types='sinks') assert sdf.params['massoftype'] == 1e-6 @@ -235,10 +249,13 @@ def test_gas_dust_particles(): assert sdf[sdf.itype == 7]['mass'].unique() == [1e-4] tm.assert_series_equal(sdf[sdf.itype == 1]['x'], pd.Series([0, 0, 0, 0, 1, 1, 1, 1]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf[sdf.itype == 7]['x'], - pd.Series([0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5]), - check_index=False, check_names=False, check_dtype=False) + pd.Series([0.5, 0.5, 0.5, 0.5, + 1.5, 1.5, 1.5, 1.5]), + check_index=False, check_names=False, + check_dtype=False) sdf = sarracen.read_phantom(fp.name, separate_types=None) assert sdf.params['massoftype'] == 1e-6 @@ -249,57 +266,61 @@ def test_gas_dust_particles(): assert sdf[sdf.itype == 7]['mass'].unique() == [1e-4] tm.assert_series_equal(sdf[sdf.itype == 1]['x'], pd.Series([0, 0, 0, 0, 1, 1, 1, 1]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf[sdf.itype == 7]['x'], - pd.Series([0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5]), - check_index=False, check_names=False, check_dtype=False) + pd.Series([0.5, 0.5, 0.5, 0.5, 1.5, + 1.5, 1.5, 1.5]), + check_index=False, check_names=False, + check_dtype=False) def test_gas_sink_particles(): - bytes_file = _create_capture_pattern(np.int32, np.float64) - bytes_file += _create_file_identifier() - bytes_file += _create_global_header() + file = _create_capture_pattern(np.int32, np.float64) + file += _create_file_identifier() + file += _create_global_header() # create 1 block for gas read_tag = np.array([13], dtype='int32') - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) nblocks = np.array([2], dtype='int32') - bytes_file += bytearray(nblocks.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(nblocks.tobytes()) + file += bytearray(read_tag.tobytes()) # 8 particles storing 4 real arrays (x, y, z, h) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) n = np.array([8], dtype='int64') - nums = np.array([0, 0, 0, 0, 0, 4, 0, 0] , dtype='int32') - bytes_file += bytearray(n.tobytes()) - bytes_file += bytearray(nums.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + nums = np.array([0, 0, 0, 0, 0, 4, 0, 0], dtype='int32') + file += bytearray(n.tobytes()) + file += bytearray(nums.tobytes()) + file += bytearray(read_tag.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) n = np.array([1], dtype='int64') - nums = np.array([0, 0, 0, 0, 0, 7, 0, 0] , dtype='int32') - bytes_file += bytearray(n.tobytes()) - bytes_file += bytearray(nums.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + nums = np.array([0, 0, 0, 0, 0, 7, 0, 0], dtype='int32') + file += bytearray(n.tobytes()) + file += bytearray(nums.tobytes()) + file += bytearray(read_tag.tobytes()) # write 4 gas particle arrays - bytes_file += _create_particle_array("x", [0, 0, 0, 0, 1, 1, 1, 1]) - bytes_file += _create_particle_array("y", [0, 0, 1, 1, 0, 0, 1, 1]) - bytes_file += _create_particle_array("z", [0, 1, 0, 1, 0, 1, 0, 1]) - bytes_file += _create_particle_array("h", [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]) + file += _create_particle_array("x", [0, 0, 0, 0, 1, 1, 1, 1]) + file += _create_particle_array("y", [0, 0, 1, 1, 0, 0, 1, 1]) + file += _create_particle_array("z", [0, 1, 0, 1, 0, 1, 0, 1]) + file += _create_particle_array("h", [1.1, 1.1, 1.1, 1.1, + 1.1, 1.1, 1.1, 1.1]) # write 7 sink particle arrays - bytes_file += _create_particle_array("x", [0.000305]) - bytes_file += _create_particle_array("y", [-0.035809]) - bytes_file += _create_particle_array("z", [-0.000035]) - bytes_file += _create_particle_array("h", [1.0]) - bytes_file += _create_particle_array("spinx", [-3.911744e-8]) - bytes_file += _create_particle_array("spiny", [-1.326062e-8]) - bytes_file += _create_particle_array("spinz", [0.00058]) + file += _create_particle_array("x", [0.000305]) + file += _create_particle_array("y", [-0.035809]) + file += _create_particle_array("z", [-0.000035]) + file += _create_particle_array("h", [1.0]) + file += _create_particle_array("spinx", [-3.911744e-8]) + file += _create_particle_array("spiny", [-1.326062e-8]) + file += _create_particle_array("spinz", [0.00058]) with tempfile.NamedTemporaryFile() as fp: - fp.write(bytes_file) + fp.write(file) fp.seek(0) sdf, sdf_sinks = sarracen.read_phantom(fp.name, separate_types='all') @@ -310,11 +331,14 @@ def test_gas_sink_particles(): assert 'mass' not in sdf.columns assert 'mass' not in sdf_sinks.columns tm.assert_series_equal(sdf['x'], pd.Series([0, 0, 0, 0, 1, 1, 1, 1]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf_sinks['x'], pd.Series([0.000305]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf_sinks['spinx'], pd.Series([-3.911744e-8]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) sdf, sdf_sinks = sarracen.read_phantom(fp.name, separate_types='sinks') assert sdf.params['massoftype'] == 1e-6 @@ -324,76 +348,94 @@ def test_gas_sink_particles(): assert 'mass' not in sdf.columns assert 'mass' not in sdf_sinks.columns tm.assert_series_equal(sdf['x'], pd.Series([0, 0, 0, 0, 1, 1, 1, 1]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf_sinks['x'], pd.Series([0.000305]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf_sinks['spinx'], pd.Series([-3.911744e-8]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) sdf = sarracen.read_phantom(fp.name, separate_types=None) assert sdf.params['massoftype'] == 1e-6 assert sdf.params['mass'] == 1e-6 assert 'mass' not in sdf.columns - tm.assert_series_equal(sdf['x'], pd.Series([0, 0, 0, 0, 1, 1, 1, 1, 0.000305]), - check_index=False, check_names=False, check_dtype=False) - tm.assert_series_equal(sdf['h'], pd.Series([1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.0]), - check_index=False, check_names=False, check_dtype=False) + tm.assert_series_equal(sdf['x'], pd.Series([0, 0, 0, 0, + 1, 1, 1, 1, + 0.000305]), + check_index=False, check_names=False, + check_dtype=False) + tm.assert_series_equal(sdf['h'], pd.Series([1.1, 1.1, 1.1, 1.1, + 1.1, 1.1, 1.1, 1.1, + 1.0]), + check_index=False, check_names=False, + check_dtype=False) def test_gas_dust_sink_particles(): - bytes_file = _create_capture_pattern(np.int32, np.float64) - bytes_file += _create_file_identifier() - bytes_file += _create_global_header(massoftype_7=1e-4) + file = _create_capture_pattern(np.int32, np.float64) + file += _create_file_identifier() + file += _create_global_header(massoftype_7=1e-4) # create 1 block for gas read_tag = np.array([13], dtype='int32') - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) nblocks = np.array([2], dtype='int32') - bytes_file += bytearray(nblocks.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(nblocks.tobytes()) + file += bytearray(read_tag.tobytes()) # 8 particles storing 4 real arrays (x, y, z, h) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) n = np.array([16], dtype='int64') - nums = np.array([0, 1, 0, 0, 0, 4, 0, 0] , dtype='int32') - bytes_file += bytearray(n.tobytes()) - bytes_file += bytearray(nums.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + nums = np.array([0, 1, 0, 0, 0, 4, 0, 0], dtype='int32') + file += bytearray(n.tobytes()) + file += bytearray(nums.tobytes()) + file += bytearray(read_tag.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + file += bytearray(read_tag.tobytes()) n = np.array([1], dtype='int64') - nums = np.array([0, 0, 0, 0, 0, 7, 0, 0] , dtype='int32') - bytes_file += bytearray(n.tobytes()) - bytes_file += bytearray(nums.tobytes()) - bytes_file += bytearray(read_tag.tobytes()) + nums = np.array([0, 0, 0, 0, 0, 7, 0, 0], dtype='int32') + file += bytearray(n.tobytes()) + file += bytearray(nums.tobytes()) + file += bytearray(read_tag.tobytes()) # write 5 gas/dust particle arrays - bytes_file += _create_particle_array("itype", [1, 1, 1, 1, 1, 1, 1, 1, - 7, 7, 7, 7, 7, 7, 7, 7], np.int8) - bytes_file += _create_particle_array("x", [0, 0, 0, 0, 1, 1, 1, 1, - 0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5]) - bytes_file += _create_particle_array("y", [0, 0, 1, 1, 0, 0, 1, 1, - 0.5, 0.5, 1.5, 1.5, 0.5, 0.5, 1.5, 1.5]) - bytes_file += _create_particle_array("z", [0, 1, 0, 1, 0, 1, 0, 1, - 0.5, 1.5, 0.5, 1.5, 0.5, 1.5, 0.5, 1.5]) - bytes_file += _create_particle_array("h", [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, - 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]) + file += _create_particle_array("itype", [1, 1, 1, 1, 1, 1, 1, 1, + 7, 7, 7, 7, 7, 7, 7, 7], np.int8) + file += _create_particle_array("x", [0, 0, 0, 0, + 1, 1, 1, 1, + 0.5, 0.5, 0.5, 0.5, + 1.5, 1.5, 1.5, 1.5]) + file += _create_particle_array("y", [0, 0, 1, 1, + 0, 0, 1, 1, + 0.5, 0.5, 1.5, 1.5, + 0.5, 0.5, 1.5, 1.5]) + file += _create_particle_array("z", [0, 1, 0, 1, + 0, 1, 0, 1, + 0.5, 1.5, 0.5, 1.5, + 0.5, 1.5, 0.5, 1.5]) + file += _create_particle_array("h", [1.1, 1.1, 1.1, 1.1, + 1.1, 1.1, 1.1, 1.1, + 1.1, 1.1, 1.1, 1.1, + 1.1, 1.1, 1.1, 1.1]) # write 7 sink particle arrays - bytes_file += _create_particle_array("x", [0.000305]) - bytes_file += _create_particle_array("y", [-0.035809]) - bytes_file += _create_particle_array("z", [-0.000035]) - bytes_file += _create_particle_array("h", [1.0]) - bytes_file += _create_particle_array("spinx", [-3.911744e-8]) - bytes_file += _create_particle_array("spiny", [-1.326062e-8]) - bytes_file += _create_particle_array("spinz", [0.00058]) + file += _create_particle_array("x", [0.000305]) + file += _create_particle_array("y", [-0.035809]) + file += _create_particle_array("z", [-0.000035]) + file += _create_particle_array("h", [1.0]) + file += _create_particle_array("spinx", [-3.911744e-8]) + file += _create_particle_array("spiny", [-1.326062e-8]) + file += _create_particle_array("spinz", [0.00058]) with tempfile.NamedTemporaryFile() as fp: - fp.write(bytes_file) + fp.write(file) fp.seek(0) - sdf_g, sdf_d, sdf_sinks = sarracen.read_phantom(fp.name, separate_types='all') + sdf_g, sdf_d, sdf_sinks = sarracen.read_phantom(fp.name, + separate_types='all') assert sdf_g.params['massoftype'] == 1e-6 assert sdf_g.params['massoftype_7'] == 1e-4 assert sdf_g.params['mass'] == 1e-6 @@ -408,16 +450,22 @@ def test_gas_dust_sink_particles(): assert 'mass' not in sdf_sinks.columns tm.assert_series_equal(sdf_g['x'], pd.Series([0, 0, 0, 0, 1, 1, 1, 1]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf_d['x'], - pd.Series([0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5]), - check_index=False, check_names=False, check_dtype=False) + pd.Series([0.5, 0.5, 0.5, 0.5, + 1.5, 1.5, 1.5, 1.5]), + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf_sinks['x'], pd.Series([0.000305]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf_sinks['spinx'], pd.Series([-3.911744e-8]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) - sdf, sdf_sinks = sarracen.read_phantom(fp.name, separate_types='sinks') + sdf, sdf_sinks = sarracen.read_phantom(fp.name, + separate_types='sinks') assert sdf.params['massoftype'] == 1e-6 assert sdf.params['massoftype_7'] == 1e-4 assert 'mass' not in sdf.params @@ -426,18 +474,23 @@ def test_gas_dust_sink_particles(): assert sdf[sdf.itype == 7]['mass'].unique() == [1e-4] tm.assert_series_equal(sdf[sdf.itype == 1]['x'], pd.Series([0, 0, 0, 0, 1, 1, 1, 1]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf[sdf.itype == 7]['x'], - pd.Series([0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5]), - check_index=False, check_names=False, check_dtype=False) + pd.Series([0.5, 0.5, 0.5, 0.5, + 1.5, 1.5, 1.5, 1.5]), + check_index=False, check_names=False, + check_dtype=False) assert sdf_sinks.params['massoftype'] == 1e-6 assert sdf_sinks.params['massoftype_7'] == 1e-4 assert 'mass' not in sdf_sinks.params assert 'mass' not in sdf_sinks.columns tm.assert_series_equal(sdf_sinks['x'], pd.Series([0.000305]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf_sinks['spinx'], pd.Series([-3.911744e-8]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) sdf = sarracen.read_phantom(fp.name, separate_types=None) assert sdf.params['massoftype'] == 1e-6 @@ -446,11 +499,18 @@ def test_gas_dust_sink_particles(): assert 'mass' in sdf.columns assert sdf[sdf.itype == 1]['mass'].unique() == [1e-6] assert sdf[sdf.itype == 7]['mass'].unique() == [1e-4] - tm.assert_series_equal(sdf['x'], pd.Series([0, 0, 0, 0, 1, 1, 1, 1, - 0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, + tm.assert_series_equal(sdf['x'], pd.Series([0, 0, 0, 0, + 1, 1, 1, 1, + 0.5, 0.5, 0.5, 0.5, + 1.5, 1.5, 1.5, 1.5, 0.000305]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) tm.assert_series_equal(sdf['h'], pd.Series([1.1] * 16 + [1.0]), - check_index=False, check_names=False, check_dtype=False) - tm.assert_series_equal(sdf['mass'], pd.Series([1e-6] * 8 + [1e-4] * 8 + [np.nan]), - check_index=False, check_names=False, check_dtype=False) + check_index=False, check_names=False, + check_dtype=False) + tm.assert_series_equal(sdf['mass'], pd.Series([1e-6] * 8 + + [1e-4] * 8 + + [np.nan]), + check_index=False, check_names=False, + check_dtype=False) diff --git a/sarracen/tests/test_kernels.py b/sarracen/tests/test_kernels.py index ccd322a..2ce0650 100644 --- a/sarracen/tests/test_kernels.py +++ b/sarracen/tests/test_kernels.py @@ -3,7 +3,9 @@ from scipy.integrate import quad, dblquad, tplquad import numpy as np -from sarracen.kernels import CubicSplineKernel, QuarticSplineKernel, QuinticSplineKernel +from sarracen.kernels import CubicSplineKernel +from sarracen.kernels import QuarticSplineKernel +from sarracen.kernels import QuinticSplineKernel def single_kernel(x, kernel): @@ -90,82 +92,95 @@ def test_quinticspline(): @mark.parametrize("kernel", - [CubicSplineKernel(), QuarticSplineKernel(), QuinticSplineKernel()]) + [CubicSplineKernel(), + QuarticSplineKernel(), + QuinticSplineKernel()]) def test_normalization(kernel): - # Since the three integrals below are only performed in positive space, the - # resulting normalized values will not be equal to 1, rather 1/(2^dim). This - # value represents the proportion of space in 1,2, and 3 dimensions that - # has all positive coordinates. + # Since the three integrals below are only performed in positive space, + # the resulting normalized values will not be equal to 1, rather 1/(2^dim). + # This value represents the proportion of space in 1,2, and 3 dimensions + # that has all positive coordinates. - norm = quad(single_kernel, -kernel.get_radius(), kernel.get_radius(), kernel)[0] + norm = quad(single_kernel, -kernel.get_radius(), + kernel.get_radius(), kernel)[0] assert approx(norm) == 1 - norm = dblquad(double_kernel, -kernel.get_radius(), kernel.get_radius(), -kernel.get_radius(), + norm = dblquad(double_kernel, -kernel.get_radius(), + kernel.get_radius(), -kernel.get_radius(), kernel.get_radius(), [kernel])[0] assert approx(norm) == 1 - norm = tplquad(triple_kernel, -kernel.get_radius(), kernel.get_radius(), -kernel.get_radius(), - kernel.get_radius(), -kernel.get_radius(), kernel.get_radius(), [kernel])[0] + norm = tplquad(triple_kernel, -kernel.get_radius(), + kernel.get_radius(), -kernel.get_radius(), + kernel.get_radius(), -kernel.get_radius(), + kernel.get_radius(), [kernel])[0] assert approx(norm) == 1 def test_cubic_column(): kernel = CubicSplineKernel() column_kernel = kernel.get_column_kernel(10000) - + pts = np.linspace(0, kernel.get_radius(), 10000) # at q = 0, this integral is solvable analytically - assert np.interp(0, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(3 / (2 * np.pi)) + assert np.interp(0, pts, column_kernel) == approx(3 / (2 * np.pi)) # numerically calculated values - assert np.interp(0.5, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(0.33875339978) - assert np.interp(1, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(0.111036060968) - assert np.interp(1.5, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(0.0114423169642) - assert np.interp(2, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == 0 - assert np.interp(5, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == 0 + assert np.interp(0.5, pts, column_kernel) == approx(0.33875339978) + assert np.interp(1, pts, column_kernel) == approx(0.111036060968) + assert np.interp(1.5, pts, column_kernel) == approx(0.0114423169642) + assert np.interp(2, pts, column_kernel) == 0 + assert np.interp(5, pts, column_kernel) == 0 def test_quartic_column(): kernel = QuarticSplineKernel() column_kernel = kernel.get_column_kernel(10000) + pts = np.linspace(0, kernel.get_radius(), 10000) # at q = 0, this integral is solvable analytically - assert np.interp(0, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(6 / (5 * np.pi)) + assert np.interp(0, pts, column_kernel) == approx(6 / (5 * np.pi)) # numerically calculated values - assert np.interp(0.5, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(0.288815941868) - assert np.interp(1, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(0.120120735858) - assert np.interp(1.5, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(0.0233911861393) - assert np.interp(2, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(0.00116251851966) - assert np.interp(2.5, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == 0 - assert np.interp(5, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == 0 + assert np.interp(0.5, pts, column_kernel) == approx(0.288815941868) + assert np.interp(1, pts, column_kernel) == approx(0.120120735858) + assert np.interp(1.5, pts, column_kernel) == approx(0.0233911861393) + assert np.interp(2, pts, column_kernel) == approx(0.00116251851966) + assert np.interp(2.5, pts, column_kernel) == 0 + assert np.interp(5, pts, column_kernel) == 0 def test_quintic_column(): kernel = QuinticSplineKernel() column_kernel = kernel.get_column_kernel(10000) + pts = np.linspace(0, kernel.get_radius(), 10000) # at q = 0, this integral is solvable analytically - assert np.interp(0, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(1 / np.pi) + assert np.interp(0, pts, column_kernel) == approx(1 / np.pi) # numerically calculated values - assert np.interp(0.5, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(0.251567608959) - assert np.interp(1, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(0.121333261458) - assert np.interp(1.5, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(0.0328632154395) - assert np.interp(2, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(0.00403036583315) - assert np.interp(2.5, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == approx(0.0000979416858548, - rel=1e-4) - assert np.interp(3, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == 0 - assert np.interp(5, np.linspace(0, kernel.get_radius(), 10000), column_kernel) == 0 + assert np.interp(0.5, pts, column_kernel) == approx(0.251567608959) + assert np.interp(1, pts, column_kernel) == approx(0.121333261458) + assert np.interp(1.5, pts, column_kernel) == approx(0.0328632154395) + assert np.interp(2, pts, column_kernel) == approx(0.00403036583315) + assert np.interp(2.5, pts, column_kernel) == approx(0.0000979416858548, + rel=1e-4) + assert np.interp(3, pts, column_kernel) == 0 + assert np.interp(5, pts, column_kernel) == 0 @mark.parametrize("kernel", - [CubicSplineKernel(), QuarticSplineKernel(), QuinticSplineKernel()]) + [CubicSplineKernel(), + QuarticSplineKernel(), + QuinticSplineKernel()]) def test_normalized_column(kernel): - norm = dblquad(double_column, -kernel.get_radius(), kernel.get_radius(), -kernel.get_radius(), - kernel.get_radius(), [kernel.get_column_kernel_func(10000)])[0] + norm = dblquad(double_column, -kernel.get_radius(), kernel.get_radius(), + -kernel.get_radius(), kernel.get_radius(), + [kernel.get_column_kernel_func(10000)])[0] assert approx(norm) == 1 @mark.parametrize("kernel", - [CubicSplineKernel(), QuarticSplineKernel(), QuinticSplineKernel()]) + [CubicSplineKernel(), + QuarticSplineKernel(), + QuinticSplineKernel()]) def test_oob(kernel): for dimensions in range(1, 3): assert kernel.w(-1, dimensions) == 0 diff --git a/sarracen/tests/test_render.py b/sarracen/tests/test_render.py index f2d42b9..54e4a19 100644 --- a/sarracen/tests/test_render.py +++ b/sarracen/tests/test_render.py @@ -1,25 +1,28 @@ """pytest unit tests for render.py functions.""" -import pandas as pd from matplotlib import pyplot as plt from numba import cuda from numpy.testing import assert_array_equal from pytest import mark -from sarracen import SarracenDataFrame, interpolate_2d, interpolate_2d_line, interpolate_3d_proj, interpolate_3d_cross +from sarracen import SarracenDataFrame +from sarracen import interpolate_2d, interpolate_2d_line +from sarracen import interpolate_3d_proj, interpolate_3d_cross from sarracen.render import render, streamlines, arrowplot, lineplot backends = ['cpu'] if cuda.is_available(): backends.append('gpu') + @mark.parametrize("backend", backends) def test_interpolation_passthrough(backend): """ - Verify that each rendering function uses the proper underlying interpolation function. + Verify that rendering functions use proper underlying interpolation. """ - df = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'P': [1, 1], 'Ax': [3, 2], 'Ay': [2, 1], 'h': [1, 1], 'rho': [1, 1], - 'm': [1, 1]}) - sdf = SarracenDataFrame(df) + data = {'x': [3, 6], 'y': [5, 1], 'P': [1, 1], + 'Ax': [3, 2], 'Ay': [2, 1], 'h': [1, 1], + 'rho': [1, 1], 'm': [1, 1]} + sdf = SarracenDataFrame(data) sdf.backend = backend fig, ax = plt.subplots() @@ -29,21 +32,27 @@ def test_interpolation_passthrough(backend): fig, ax = plt.subplots() lineplot(sdf, 'P', xlim=(3, 6), ylim=(1, 5), ax=ax) - assert_array_equal(ax.lines[0].get_ydata(), interpolate_2d_line(sdf, 'P', xlim=(3, 6), ylim=(1, 5))) + assert_array_equal(ax.lines[0].get_ydata(), + interpolate_2d_line(sdf, 'P', xlim=(3, 6), ylim=(1, 5))) plt.close(fig) - df = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf = SarracenDataFrame(df) + data = {'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf = SarracenDataFrame(data) sdf.backend = backend fig, ax = plt.subplots() render(sdf, 'P', ax=ax) - assert_array_equal(ax.images[0].get_array(), interpolate_3d_proj(sdf, 'P')) + img = ax.images[0].get_array() + interpolation = interpolate_3d_proj(sdf, 'P') + assert_array_equal(img, interpolation) plt.close(fig) fig, ax = plt.subplots() render(sdf, 'P', xsec=1.5, ax=ax) - assert_array_equal(ax.images[0].get_array(), interpolate_3d_cross(sdf, 'P')) + img = ax.images[0].get_array() + interpolation = interpolate_3d_cross(sdf, 'P') + assert_array_equal(img, interpolation) plt.close(fig) @@ -52,8 +61,9 @@ def test_cmap(backend): """ Verify that each rendering function uses the provided color map. """ - df = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf = SarracenDataFrame(df) + data = {'x': [3, 6], 'y': [5, 1], 'P': [1, 1], 'h': [1, 1], + 'rho': [1, 1], 'm': [1, 1]} + sdf = SarracenDataFrame(data) sdf.backend = backend fig, ax = plt.subplots() @@ -61,8 +71,9 @@ def test_cmap(backend): assert ax.images[0].cmap.name == 'magma' plt.close(fig) - df = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf = SarracenDataFrame(df) + data = {'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf = SarracenDataFrame(data) sdf.backend = backend fig, ax = plt.subplots() @@ -81,14 +92,18 @@ def test_cbar_exclusion(backend): """ Verify that each rendering function respects the cbar argument. """ - df_2 = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf_2 = SarracenDataFrame(df_2) + data_2 = {'x': [3, 6], 'y': [5, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf_2 = SarracenDataFrame(data_2) sdf_2.backend = backend - df_3 = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf_3 = SarracenDataFrame(df_3) + data_3 = {'x': [3, 6], 'y': [5, 1], 'z': [2, 1], + 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf_3 = SarracenDataFrame(data_3) sdf_3.backend = backend - for args in [{'data': sdf_2, 'xsec': None}, {'data': sdf_3, 'xsec': None}, {'data': sdf_3, 'xsec': 1.5}]: + for args in [{'data': sdf_2, 'xsec': None}, + {'data': sdf_3, 'xsec': None}, + {'data': sdf_3, 'xsec': 1.5}]: fig, ax = plt.subplots() render(args['data'], 'P', xsec=args['xsec'], cbar=True, ax=ax) assert ax.images[-1].colorbar is not None @@ -103,18 +118,23 @@ def test_cbar_exclusion(backend): @mark.parametrize("backend", backends) def test_cbar_keywords(backend): """ - Verify that each rendering function respects the passed keywords for the colorbar. + Verify that rendering functions respect the passed colorbar keywords. """ - df_2 = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf_2 = SarracenDataFrame(df_2) + data_2 = {'x': [3, 6], 'y': [5, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf_2 = SarracenDataFrame(data_2) sdf_2.backend = backend - df_3 = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf_3 = SarracenDataFrame(df_3) + data_3 = {'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf_3 = SarracenDataFrame(data_3) sdf_3.backend = backend - for args in [{'data': sdf_2, 'xsec': None}, {'data': sdf_3, 'xsec': None}, {'data': sdf_3, 'xsec': 1.5}]: + for args in [{'data': sdf_2, 'xsec': None}, + {'data': sdf_3, 'xsec': None}, + {'data': sdf_3, 'xsec': 1.5}]: fig, ax = plt.subplots() - render(args['data'], 'P', xsec=args['xsec'], cbar_kws={'orientation': 'horizontal'}, ax=ax) + render(args['data'], 'P', xsec=args['xsec'], + cbar_kws={'orientation': 'horizontal'}, ax=ax) assert ax.images[-1].colorbar.orientation == 'horizontal' plt.close(fig) @@ -124,15 +144,19 @@ def test_kwargs(backend): """ Verify that each rendering function respects passed keyword arguments. """ - df_2 = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1], 'Ax': [1, 1], - 'Ay': [1, 1]}) - sdf_2 = SarracenDataFrame(df_2) + data_2 = {'x': [3, 6], 'y': [5, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1], + 'Ax': [1, 1], 'Ay': [1, 1]} + sdf_2 = SarracenDataFrame(data_2) sdf_2.backend = backend - df_3 = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf_3 = SarracenDataFrame(df_3) + data_3 = {'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf_3 = SarracenDataFrame(data_3) sdf_3.backend = backend - for args in [{'data': sdf_2, 'xsec': None}, {'data': sdf_3, 'xsec': None}, {'data': sdf_3, 'xsec': 1.5}]: + for args in [{'data': sdf_2, 'xsec': None}, + {'data': sdf_3, 'xsec': None}, + {'data': sdf_3, 'xsec': 1.5}]: fig, ax = plt.subplots() render(args['data'], 'P', xsec=args['xsec'], ax=ax, origin='upper') assert ax.images[0].origin == 'upper' @@ -159,9 +183,10 @@ def test_rotated_ticks(backend): """ A rotated plot should have no x & y ticks. """ - df = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1], - 'Ax': [1, 1], 'Ay': [1, 1], 'Az': [1, 1]}) - sdf = SarracenDataFrame(df) + data = {'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1], + 'Ax': [1, 1], 'Ay': [1, 1], 'Az': [1, 1]} + sdf = SarracenDataFrame(data) sdf.backend = backend for xsec in [None, 1.5]: @@ -172,7 +197,6 @@ def test_rotated_ticks(backend): assert ax.get_yticks().size == 0 plt.close(fig) - for func in [arrowplot, streamlines]: fig, ax = plt.subplots() func(sdf, ('Ax', 'Ay', 'Az'), rotation=[34, 23, 50], ax=ax) @@ -182,30 +206,36 @@ def test_rotated_ticks(backend): plt.close(fig) - @mark.parametrize("backend", backends) def test_plot_labels(backend): """ Verify that plot labels for each rendering function are correct. """ - df_2 = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1], 'Ax': [1, 1], - 'Ay': [1, 1]}) - sdf_2 = SarracenDataFrame(df_2) + data_2 = {'x': [3, 6], 'y': [5, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1], + 'Ax': [1, 1], 'Ay': [1, 1]} + sdf_2 = SarracenDataFrame(data_2) sdf_2.backend = backend - df_3 = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1], - 'Ax': [1, 1], 'Ay': [1, 1], 'Az': [1, 1]}) - sdf_3 = SarracenDataFrame(df_3) + data_3 = {'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1], + 'Ax': [1, 1], 'Ay': [1, 1], 'Az': [1, 1]} + sdf_3 = SarracenDataFrame(data_3) sdf_3.backend = backend - for args in [{'data': sdf_2, 'xsec': None}, {'data': sdf_3, 'xsec': None}, {'data': sdf_3, 'xsec': 0}]: + for args in [{'data': sdf_2, 'xsec': None}, + {'data': sdf_3, 'xsec': None}, + {'data': sdf_3, 'xsec': 0}]: + + column = args['data'] is sdf_3 and args['xsec'] is None + fig, ax = plt.subplots() render(args['data'], 'P', xsec=args['xsec'], ax=ax) assert ax.get_xlabel() == 'x' assert ax.get_ylabel() == 'y' assert ax.figure.axes[1].get_ylabel() == \ - ('column ' if args['data'] is sdf_3 and args['xsec'] is None else '') + 'P' + ('column ' if column else '') + 'P' plt.close(fig) fig, ax = plt.subplots() @@ -213,8 +243,8 @@ def test_plot_labels(backend): assert ax.get_xlabel() == 'y' assert ax.get_ylabel() == 'x' - assert ax.figure.axes[1].get_ylabel() == ('column ' if args['data'] is sdf_3 and args['xsec'] is None else '')\ - + 'rho' + assert ax.figure.axes[1].get_ylabel() == \ + ('column ' if column else '') + 'rho' plt.close(fig) for func in [streamlines, arrowplot]: @@ -252,16 +282,20 @@ def test_plot_bounds(backend): """ Verify that plot bounds are set correctly for each rendering function. """ - df_2 = pd.DataFrame({'x': [6, 3], 'y': [5, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf_2 = SarracenDataFrame(df_2) + data_2 = {'x': [6, 3], 'y': [5, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf_2 = SarracenDataFrame(data_2) sdf_2.backend = backend - df_3 = pd.DataFrame({'x': [6, 3], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1], - 'Ax': [1, 1], 'Ay': [1, 1], 'Az': [1, 1]}) - sdf_3 = SarracenDataFrame(df_3) + data_3 = {'x': [6, 3], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1], + 'Ax': [1, 1], 'Ay': [1, 1], 'Az': [1, 1]} + sdf_3 = SarracenDataFrame(data_3) sdf_3.backend = backend - for args in [{'data': sdf_2, 'xsec': None}, {'data': sdf_3, 'xsec': None}, {'data': sdf_3, 'xsec': 1.5}]: + for args in [{'data': sdf_2, 'xsec': None}, + {'data': sdf_3, 'xsec': None}, + {'data': sdf_3, 'xsec': 1.5}]: fig, ax = plt.subplots() render(args['data'], 'P', xsec=args['xsec'], ax=ax) @@ -269,12 +303,15 @@ def test_plot_bounds(backend): assert ax.get_ylim() == (1, 5) if args['data'] is sdf_2: - assert ax.figure.axes[1].get_ylim() == (0, interpolate_2d(sdf_2, 'P').max()) + interpolate = interpolate_2d(sdf_2, 'P') + assert ax.figure.axes[1].get_ylim() == (0, interpolate.max()) else: if args['xsec']: - assert ax.figure.axes[1].get_ylim() == (0, interpolate_3d_cross(sdf_3, 'P').max()) + interpolate = interpolate_3d_cross(sdf_3, 'P') + assert ax.figure.axes[1].get_ylim() == (0, interpolate.max()) else: - assert ax.figure.axes[1].get_ylim() == (0, interpolate_3d_proj(sdf_3, 'P').max()) + interpolate = interpolate_3d_proj(sdf_3, 'P') + assert ax.figure.axes[1].get_ylim() == (0, interpolate.max()) plt.close(fig) fig, ax = plt.subplots() diff --git a/sarracen/tests/test_sarracen_dataframe.py b/sarracen/tests/test_sarracen_dataframe.py index 9e45960..38b2336 100644 --- a/sarracen/tests/test_sarracen_dataframe.py +++ b/sarracen/tests/test_sarracen_dataframe.py @@ -1,5 +1,4 @@ """pytest unit tests for sarracen_dataframe.py functionality.""" -import pandas as pd import numpy as np from matplotlib import pyplot as plt @@ -9,8 +8,9 @@ def test_special_columns(): # The 'x', 'y', 'rho', 'm', and 'h' keywords should be detected. # A 'z' column should not be detected. - df = pd.DataFrame({'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'x': [5, 6], 'y': [5, 4], 'm': [1, 1]}) - sdf = SarracenDataFrame(df) + data = {'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], + 'x': [5, 6], 'y': [5, 4], 'm': [1, 1]} + sdf = SarracenDataFrame(data) assert sdf.xcol == 'x' assert sdf.ycol == 'y' @@ -21,8 +21,9 @@ def test_special_columns(): # The 'rx', 'ry', 'rz', 'density', and 'mass' keywords should be detected. # An 'h' column should not be detected. - df = pd.DataFrame({'ry': [-1, 1], 'density': [1, 1], 'rx': [3, 4], 'P': [1, 1], 'rz': [4, 3], 'mass': [1, 1]}) - sdf = SarracenDataFrame(df) + data = {'ry': [-1, 1], 'density': [1, 1], 'rx': [3, 4], + 'P': [1, 1], 'rz': [4, 3], 'mass': [1, 1]} + sdf = SarracenDataFrame(data) assert sdf.xcol == 'rx' assert sdf.ycol == 'ry' @@ -33,9 +34,10 @@ def test_special_columns(): # No keywords, so fall back to the first two columns for x and y. # Even though 'k' exists, this will be assumed to be 2D data. - # The 'h' column will be detected, but no density or mass column will be detected. - df = pd.DataFrame({'i': [3.4, 2.1], 'j': [4.9, 1.6], 'k': [2.3, 2.0], 'h': [1, 1], 'P': [1, 1]}) - sdf = SarracenDataFrame(df) + # The 'h' column will be detected, but not density or mass columns. + data = {'i': [3.4, 2.1], 'j': [4.9, 1.6], 'k': [2.3, 2.0], + 'h': [1, 1], 'P': [1, 1]} + sdf = SarracenDataFrame(data) assert sdf.xcol == 'i' assert sdf.ycol == 'j' @@ -47,28 +49,30 @@ def test_special_columns(): def test_dimensions(): # This should be detected as 3-dimensional data. - df = pd.DataFrame({'P': [1, 1], 'z': [4, 3], 'h': [1, 1], 'rho': [1, 1], 'x': [5, 6], 'y': [5, 4], 'm': [1, 1]}) - sdf = SarracenDataFrame(df) + data = {'P': [1, 1], 'z': [4, 3], 'h': [1, 1], 'rho': [1, 1], + 'x': [5, 6], 'y': [5, 4], 'm': [1, 1]} + sdf = SarracenDataFrame(data) assert sdf.get_dim() == 3 # This should be detected as 2-dimensional data. - df = pd.DataFrame({'P': [1, 1], 'h': [1, 1], 'y': [5, 4], 'rho': [1, 1], 'm': [1, 1], 'x': [5, 6]}) - sdf = SarracenDataFrame(df) + data = {'P': [1, 1], 'h': [1, 1], 'y': [5, 4], 'rho': [1, 1], + 'm': [1, 1], 'x': [5, 6]} + sdf = SarracenDataFrame(data) assert sdf.get_dim() == 2 # This should assumed to be 2-dimensional data. - df = pd.DataFrame({'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf = SarracenDataFrame(df) + data = {'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf = SarracenDataFrame(data) assert sdf.get_dim() == 2 def test_column_changing(): - df = pd.DataFrame({'P': [1], 'z': [2], 'h': [3], 'rho': [4], 'x': [5], 'y': [6], 'm': [7], 'd': [8], 'smooth': [9], - 'ma': [10]}) - sdf = SarracenDataFrame(df) + data = {'P': [1], 'z': [2], 'h': [3], 'rho': [4], 'x': [5], + 'y': [6], 'm': [7], 'd': [8], 'smooth': [9], 'ma': [10]} + sdf = SarracenDataFrame(data) assert sdf.xcol == 'x' assert sdf.ycol == 'y' @@ -110,8 +114,9 @@ def test_render_passthrough(): # Basic tests that both sdf.render() and render(sdf) return the same plots # 2D dataset - df = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'P': [1, 1], 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]}) - sdf = SarracenDataFrame(df) + data = {'x': [3, 6], 'y': [5, 1], 'P': [1, 1], + 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} + sdf = SarracenDataFrame(data) fig1, ax1 = plt.subplots() fig2, ax2 = plt.subplots() @@ -128,9 +133,10 @@ def test_render_passthrough(): assert repr(ax1) == repr(ax2) # 3D dataset - df = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], 'h': [1, 1], 'Ax': [5, 3], 'Ay': [2, 3], - 'Az': [1, -1], 'rho': [1, 1], 'm': [1, 1]}) - sdf = SarracenDataFrame(df) + data = {'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'P': [1, 1], + 'h': [1, 1], 'Ax': [5, 3], 'Ay': [2, 3], + 'Az': [1, -1], 'rho': [1, 1], 'm': [1, 1]} + sdf = SarracenDataFrame(data) fig1, ax1 = plt.subplots() fig2, ax2 = plt.subplots() @@ -151,24 +157,30 @@ def test_calc_density(): # Tests that the density calculation is working as intended. # 2D Data - df = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'h': [0.00683, 4.2166]}) + data = {'x': [3, 6], 'y': [5, 1], 'h': [0.00683, 4.2166]} params = {'mass': 89.3452, 'hfact': 1.2} - sdf = SarracenDataFrame(df, params) + sdf = SarracenDataFrame(data, params) sdf.calc_density() - assert sdf['rho'][0] == sdf.params['mass'] * (sdf.params['hfact'] / sdf['h'][0])**2 - assert sdf['rho'][1] == sdf.params['mass'] * (sdf.params['hfact'] / sdf['h'][1])**2 + rho_0 = sdf.params['mass'] * (sdf.params['hfact'] / sdf['h'][0])**2 + rho_1 = sdf.params['mass'] * (sdf.params['hfact'] / sdf['h'][1])**2 + + assert sdf['rho'][0] == rho_0 + assert sdf['rho'][1] == rho_1 # 3D Data - df = pd.DataFrame({'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'h': [0.0234, 7.3452]}) + data = {'x': [3, 6], 'y': [5, 1], 'z': [2, 1], 'h': [0.0234, 7.3452]} params = {'mass': 63.2353, 'hfact': 1.2} - sdf = SarracenDataFrame(df, params) + sdf = SarracenDataFrame(data, params) sdf.calc_density() - assert sdf['rho'][0] == sdf.params['mass'] * (sdf.params['hfact'] / sdf['h'][0])**3 - assert sdf['rho'][1] == sdf.params['mass'] * (sdf.params['hfact'] / sdf['h'][1])**3 + rho_0 = sdf.params['mass'] * (sdf.params['hfact'] / sdf['h'][0])**3 + rho_1 = sdf.params['mass'] * (sdf.params['hfact'] / sdf['h'][1])**3 + + assert sdf['rho'][0] == rho_0 + assert sdf['rho'][1] == rho_1 def test_centre_of_mass(): @@ -188,4 +200,3 @@ def test_centre_of_mass(): params={'mass': 3.2e-4}) assert sdf.centre_of_mass() == [0.0, 0.0, 0.0] - From e9f935b3838e36e610d3b19e42dbf96397191553 Mon Sep 17 00:00:00 2001 From: Terrence Tricco Date: Fri, 19 Jul 2024 11:21:45 -0230 Subject: [PATCH 04/12] lint sarracen dataframe --- sarracen/sarracen_dataframe.py | 399 +++++++++++++++++++++------------ 1 file changed, 259 insertions(+), 140 deletions(-) diff --git a/sarracen/sarracen_dataframe.py b/sarracen/sarracen_dataframe.py index 1c36f04..3721045 100644 --- a/sarracen/sarracen_dataframe.py +++ b/sarracen/sarracen_dataframe.py @@ -25,39 +25,46 @@ class SarracenDataFrame(DataFrame): """ A SarracenDataFrame is a pandas DataFrame with support for SPH data. - A SarracenDataFrame is a subclass of the pandas DataFrame class designed to hold SPH particle - data. Global simulation values are stored in ``params``, which is a standard Python dictionary. + A SarracenDataFrame is a subclass of the pandas DataFrame class designed to + hold SPH particle data. Global simulation values are stored in ``params``, + which is a standard Python dictionary. - Interpolation and rendering functionality requires (at a minimum) particle positions, smoothing - lengths and masses. SarracenDataFrames will attempt to identify columns which hold these data. - For uniform, constant mass particles, the particle mass can be specified in the ``params`` - dictionary. + Interpolation and rendering functionality requires (at a minimum) particle + positions, smoothing lengths and masses. SarracenDataFrames will attempt to + identify columns which hold these data. For uniform, constant mass + particles, the particle mass can be specified in the ``params`` dictionary. """ - _internal_names = pd.DataFrame._internal_names + ['_xcol', '_ycol', '_zcol', - '_mcol', '_rhocol', '_hcol', - '_vxcol', '_vycol', '_vzcol'] + _internal_names = pd.DataFrame._internal_names + ['_xcol', '_ycol', + '_zcol', '_mcol', + '_rhocol', '_hcol', + '_vxcol', '_vycol', + '_vzcol'] _internal_names_set = set(_internal_names) _metadata = ['_params', '_units', '_kernel'] def __init__(self, data=None, params=None, *args, **kwargs): """ - Construct a SarracenDataFrame from a NumPy array, dictionary, DataFrame or Iterable object. + Construct a SarracenDataFrame from a NumPy array, dictionary, DataFrame + or Iterable object. Parameters ---------- data : ndarray, Iterable, DataFrame, or dict. - Raw particle data which is passed to the pandas DataFrame constructor. Data can be specified - in a dictionary, NumPy array or another DataFrame. + Raw particle data which is passed to the pandas DataFrame + constructor. Data can be specified in a dictionary, NumPy array or + another DataFrame. params : dict, optional - Global parameters from the simulation (time, hfact, etc). If constant, uniform mass particles - are used, then the key ``mass`` stores the particle mass (rather than specifying per particle). + Global parameters from the simulation (time, hfact, etc). If + constant, uniform mass particles are used, then the key ``mass`` + stores the particle mass (rather than specifying per particle). *args : tuple, optional Additional arguments to pass to the pandas DataFrame constructor. **kwargs : dict, optional - Additional keyword arguments to pass to the pandas DataFrame constructor. + Additional keyword arguments to pass to the pandas DataFrame + constructor. See Also -------- @@ -86,7 +93,8 @@ def __init__(self, data=None, params=None, *args, **kwargs): 1 2.0 2.0 3.5 2 3.0 2.0 4.0 - Constant mass particles can specify mass in the ``params`` dictionary, rather than per particle. + Constant mass particles can specify mass in the ``params`` dictionary, + rather than per particle. >>> particles = {'x': [1.0, 2.0, 3.0], 'y': [2.0, 2.0, 2.0], 'h': [3.0, 3.5, 4.0]} >>> params = {'mass': 0.2, 'hfact': 1.2} @@ -124,16 +132,19 @@ def _identify_special_columns(self): """ Identify special columns commonly used in analysis functions. - Identify which columns in this dataset correspond to important data columns commonly used in - analysis functions. The columns which contain x, y, and z positional values are detected and - set to the `xcol`, `ycol`, and `zcol` values. As well, the columns containing smoothing length, - mass, and density information are identified and set to the `hcol`, `mcol`, and `rhocol`. + Identify which columns in this dataset correspond to important data + columns commonly used in analysis functions. The columns which contain + x, y, and z positional values are detected and set to the `xcol`, + `ycol`, and `zcol` values. As well, the columns containing smoothing + length, mass, and density information are identified and set to the + `hcol`, `mcol`, and `rhocol`. - If the x or y columns cannot be found, they are set to be the first two columns by default. - If the z, smoothing length, mass, or density columns cannot be sound, the corresponding column - label is set to `None`. + If the x or y columns cannot be found, they are set to be the first two + columns by default. If the z, smoothing length, mass, or density + columns cannot be sound, the corresponding column label is set to + `None`. """ - # First look for 'x', then 'rx', and then fallback to the first column. + # First look for 'x', then 'rx', and then default to the first column. if 'x' in self.columns: self.xcol = 'x' elif 'rx' in self.columns: @@ -141,7 +152,7 @@ def _identify_special_columns(self): elif len(self.columns) > 0: self.xcol = self.columns[0] - # First look for 'y', then 'ry', and then fallback to the second column. + # First look for 'y', then 'ry', and then default to the second column. if 'y' in self.columns: self.ycol = 'y' elif 'ry' in self.columns: @@ -149,7 +160,7 @@ def _identify_special_columns(self): elif len(self.columns) > 1: self.ycol = self.columns[1] - # First look for 'z', then 'rz', and then assume that data is 2 dimensional. + # First look for 'z', then 'rz', and then assume data is 2-dimensional. if 'z' in self.columns: self.zcol = 'z' elif 'rz' in self.columns: @@ -179,10 +190,9 @@ def _identify_special_columns(self): if 'vz' in self.columns: self.vzcol = 'vz' - def create_mass_column(self): """ - Create a new column 'm', copied from the 'massoftype' dataset parameter. + Create a new column 'm', copied from the 'massoftype' parameter. Intended for use with Phantom data dumps. @@ -192,7 +202,8 @@ def create_mass_column(self): If the 'massoftype' column does not exist in `params`. """ if 'mass' not in self.params: - raise KeyError("'mass' value does not exist in this SarracenDataFrame.") + raise KeyError("'mass' value does not exist in this " + "SarracenDataFrame.") self['m'] = self.params['mass'] self.mcol = 'm' @@ -205,21 +216,30 @@ def calc_density(self): .. math:: - \\rho = m \\left( \\frac{h_{\\rm fact}}{h} \\right)^{n_{\\rm dim}} + \\rho = m \\left( \\frac{h_{\\rm fact}}{h} + \\right)^{n_{\\rm dim}} - where :math:`m` is the particle mass, :math:`h` is the smoothing length, and :math:`h_{\\rm fact}` defines the ratio of smoothing length to particle spacing. Smoothing lengths are taken from the smoothing length column, particle masses from the mass column if present, or params if not, and hfact from params. + where :math:`m` is the particle mass, :math:`h` is the smoothing + length, and :math:`h_{\\rm fact}` defines the ratio of smoothing length + to particle spacing. Smoothing lengths are taken from the smoothing + length column, particle masses from the mass column if present, or + params if not, and hfact from params. Raises ------ KeyError - If the `hcol` column does not exist, there is no `mcol` column or `mass` in params, or if `hfact` does not exist in `params`. + If the `hcol` column does not exist, there is no `mcol` column or + `mass` in params, or if `hfact` does not exist in `params`. """ if not {self.hcol}.issubset(self.columns): - raise KeyError('Missing smoothing length data in this SarracenDataFrame') + raise KeyError('Missing smoothing length data in this ' + 'SarracenDataFrame') if 'hfact' not in self.params: - raise KeyError('hfact missing from params in this SarracenDataFrame.') - if not {self.mcol}.issubset(self.columns) and 'mass' not in self.params: - raise KeyError('Missing particle mass data in this SarracenDataFrame.') + raise KeyError('hfact missing from params in this ' + 'SarracenDataFrame.') + if self.mcol not in self.columns and 'mass' not in self.params: + raise KeyError('Missing particle mass data in this ' + 'SarracenDataFrame.') # prioritize using mass per particle, if present if {self.mcol}.issubset(self.columns): @@ -227,7 +247,8 @@ def calc_density(self): else: mass = self.params['mass'] - self['rho'] = (self.params['hfact'] / self[self.hcol]) ** (self.get_dim()) * mass + hfact = self.params['hfact'] + self['rho'] = mass * (hfact / self[self.hcol])**self.get_dim() self.rhocol = 'rho' def centre_of_mass(self): @@ -251,120 +272,216 @@ def centre_of_mass(self): return [com_x * mass, com_y * mass, com_z * mass] @_copy_doc(render) - def render(self, target: str, x: str = None, y: str = None, z: str = None, xsec: float = None, - kernel: BaseKernel = None, x_pixels: int = None, y_pixels: int = None, xlim: Tuple[float, float] = None, - ylim: Tuple[float, float] = None, cmap: Union[str, Colormap] = 'gist_heat', cbar: bool = True, - cbar_kws: dict = {}, cbar_ax: Axes = None, ax: Axes = None, exact: bool = None, backend: str = None, - integral_samples: int = 1000, rotation: Union[np.ndarray, list, Rotation] = None, - rot_origin: Union[np.ndarray, list, str] = None, log_scale: bool = None, dens_weight: bool = None, - normalize: bool = False, hmin: bool = False, **kwargs) -> Axes: - return render(self, target, x, y, z, xsec, kernel, x_pixels, y_pixels, xlim, ylim, cmap, cbar, cbar_kws, - cbar_ax, ax, exact, backend, integral_samples, rotation, rot_origin, log_scale, dens_weight, - normalize, hmin, **kwargs) + def render(self, + target: str, + x: str = None, + y: str = None, + z: str = None, + xsec: float = None, + kernel: BaseKernel = None, + x_pixels: int = None, + y_pixels: int = None, + xlim: Tuple[float, float] = None, + ylim: Tuple[float, float] = None, + cmap: Union[str, Colormap] = 'gist_heat', + cbar: bool = True, + cbar_kws: dict = {}, + cbar_ax: Axes = None, + ax: Axes = None, + exact: bool = None, + backend: str = None, + integral_samples: int = 1000, + rotation: Union[np.ndarray, list, Rotation] = None, + rot_origin: Union[np.ndarray, list, str] = None, + log_scale: bool = None, + dens_weight: bool = None, + normalize: bool = False, + hmin: bool = False, + **kwargs) -> Axes: + return render(self, target, x, y, z, xsec, kernel, x_pixels, y_pixels, + xlim, ylim, cmap, cbar, cbar_kws, cbar_ax, ax, exact, + backend, integral_samples, rotation, rot_origin, + log_scale, dens_weight, normalize, hmin, **kwargs) @_copy_doc(lineplot) - def lineplot(self, target: str, x: str = None, y: str = None, z: str = None, - kernel: BaseKernel = None, pixels: int = 512, xlim: Tuple[float, float] = None, - ylim: Tuple[float, float] = None, zlim: Tuple[float, float] = None, ax: Axes = None, - backend: str = None, log_scale: bool = False, dens_weight: bool = None, normalize: bool = False, - hmin: bool = False, **kwargs): - return lineplot(self, target, x, y, z, kernel, pixels, xlim, ylim, zlim, ax, backend, log_scale, dens_weight, - normalize, hmin, **kwargs) + def lineplot(self, + target: str, + x: str = None, + y: str = None, + z: str = None, + kernel: BaseKernel = None, + pixels: int = 512, + xlim: Tuple[float, float] = None, + ylim: Tuple[float, float] = None, + zlim: Tuple[float, float] = None, + ax: Axes = None, + backend: str = None, + log_scale: bool = False, + dens_weight: bool = None, + normalize: bool = False, + hmin: bool = False, + **kwargs): + return lineplot(self, target, x, y, z, kernel, pixels, xlim, ylim, + zlim, ax, backend, log_scale, dens_weight, normalize, + hmin, **kwargs) @_copy_doc(streamlines) - def streamlines(self, target: Union[Tuple[str, str], Tuple[str, str, str]], x: str = None, y: str = None, - z: str = None, xsec: int = None, kernel: BaseKernel = None, integral_samples: int = 1000, - rotation: Union[np.ndarray, list, Rotation] = None, rot_origin: Union[np.ndarray, list, str] = None, - x_pixels: int = None, y_pixels: int = None, xlim: Tuple[float, float] = None, - ylim: Tuple[float, float] = None, ax: Axes = None, exact: bool = None, backend: str = None, - dens_weight: bool = False, normalize: bool = False, hmin: bool = False, **kwargs) -> Axes: - return streamlines(self, target, x, y, z, xsec, kernel, integral_samples, rotation, rot_origin, x_pixels, - y_pixels, xlim, ylim, ax, exact, backend, dens_weight, normalize, hmin, **kwargs) + def streamlines(self, + target: Union[Tuple[str, str], Tuple[str, str, str]], + x: str = None, + y: str = None, + z: str = None, + xsec: int = None, + kernel: BaseKernel = None, + integral_samples: int = 1000, + rotation: Union[np.ndarray, list, Rotation] = None, + rot_origin: Union[np.ndarray, list, str] = None, + x_pixels: int = None, + y_pixels: int = None, + xlim: Tuple[float, float] = None, + ylim: Tuple[float, float] = None, + ax: Axes = None, + exact: bool = None, + backend: str = None, + dens_weight: bool = False, + normalize: bool = False, + hmin: bool = False, + **kwargs) -> Axes: + return streamlines(self, target, x, y, z, xsec, kernel, + integral_samples, rotation, rot_origin, x_pixels, + y_pixels, xlim, ylim, ax, exact, backend, + dens_weight, normalize, hmin, **kwargs) @_copy_doc(arrowplot) - def arrowplot(self, target: Union[Tuple[str, str], Tuple[str, str, str]], x: str = None, y: str = None, - z: str = None, xsec: int = None, kernel: BaseKernel = None, integral_samples: int = 1000, - rotation: Union[np.ndarray, list, Rotation] = None, rot_origin: Union[np.ndarray, list, str] = None, - x_arrows: int = None, y_arrows: int = None, xlim: Tuple[float, float] = None, - ylim: Tuple[float, float] = None, ax: Axes = None, qkey: bool = True, qkey_kws: dict = None, - exact: bool = None, backend: str = None, dens_weight: bool = None, normalize: bool = False, - hmin: bool = False, **kwargs) -> Axes: - return arrowplot(self, target, x, y, z, xsec, kernel, integral_samples, rotation, rot_origin, x_arrows, - y_arrows, xlim, ylim, ax, qkey, qkey_kws, exact, backend, dens_weight, normalize, hmin, - **kwargs) - - def sph_interpolate(self, target: str, x: str = None, y: str = None, z: str = None, kernel: BaseKernel = None, - rotation: Union[np.ndarray, list, Rotation] = None, rot_origin: Union[np.ndarray, list, str] = None, - x_pixels: int = None, y_pixels: int = None, z_pixels: int = None, xlim: Tuple[float, float] = None, - ylim: Tuple[float, float] = None, zlim: Tuple[float, float] = None, - exact: bool = None, backend: str = 'cpu', dens_weight: bool = False, - normalize: bool = False, hmin: bool = False) -> np.ndarray: - """ - Interpolate this data to a 2D or 3D grid, depending on the dimensionality of the data. + def arrowplot(self, + target: Union[Tuple[str, str], Tuple[str, str, str]], + x: str = None, + y: str = None, + z: str = None, + xsec: int = None, + kernel: BaseKernel = None, + integral_samples: int = 1000, + rotation: Union[np.ndarray, list, Rotation] = None, + rot_origin: Union[np.ndarray, list, str] = None, + x_arrows: int = None, + y_arrows: int = None, + xlim: Tuple[float, float] = None, + ylim: Tuple[float, float] = None, + ax: Axes = None, + qkey: bool = True, + qkey_kws: dict = None, + exact: bool = None, + backend: str = None, + dens_weight: bool = None, + normalize: bool = False, + hmin: bool = False, + **kwargs) -> Axes: + return arrowplot(self, target, x, y, z, xsec, kernel, integral_samples, + rotation, rot_origin, x_arrows, y_arrows, xlim, ylim, + ax, qkey, qkey_kws, exact, backend, dens_weight, + normalize, hmin, **kwargs) + + def sph_interpolate(self, + target: str, + x: str = None, + y: str = None, + z: str = None, + kernel: BaseKernel = None, + rotation: Union[np.ndarray, list, Rotation] = None, + rot_origin: Union[np.ndarray, list, str] = None, + x_pixels: int = None, + y_pixels: int = None, + z_pixels: int = None, + xlim: Tuple[float, float] = None, + ylim: Tuple[float, float] = None, + zlim: Tuple[float, float] = None, + exact: bool = None, + backend: str = 'cpu', + dens_weight: bool = False, + normalize: bool = False, + hmin: bool = False) -> np.ndarray: + """ + Interpolate this data to a 2D or 3D grid, depending on the + dimensionality of the data. Parameters ---------- target: str The column label of the target data. x, y, z: str - The column labels of the directional data to interpolate over. Defaults to the x, y, and z columns - detected in `data`. + The column labels of the directional data to interpolate over. + Defaults to the x, y, and z columns detected in `data`. kernel: BaseKernel - The kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + The kernel to use for smoothing the target data. Defaults to the + kernel specified in `data`. rotation: array_like or SciPy Rotation, optional - The rotation to apply to the data before interpolation. If defined as an array, the - order of rotations is [z, y, x] in degrees. Only applies to 3D datasets. + The rotation to apply to the data before interpolation. If defined + as an array, the order of rotations is [z, y, x] in degrees. Only + applies to 3D datasets. rot_origin: array_like or ['com', 'midpoint'], optional - Point of rotation of the data. Only applies to 3D datasets. If array_like, - then the [x, y, z] coordinates specify the point around which the data is - rotated. If 'com', then data is rotated around the centre of mass. If - 'midpoint', then data is rotated around the midpoint, that is, min + max - / 2. Defaults to the midpoint. + Point of rotation of the data. Only applies to 3D datasets. If + array_like, then the [x, y, z] coordinates specify the point around + which the data is rotated. If 'com', then data is rotated around + the centre of mass. If 'midpoint', then data is rotated around the + midpoint, that is, min + max / 2. Defaults to the midpoint. x_pixels, y_pixels, z_pixels: int, optional - Number of pixels in the output image in the x, y & z directions. Default values are chosen to keep - a consistent aspect ratio. + Number of pixels in the output image in the x, y & z directions. + Default values are chosen to keep a consistent aspect ratio. xlim, ylim, zlim: tuple of float, optional - The minimum and maximum values to use in interpolation, in particle data space. Defaults - to the minimum and maximum values of `x`, `y` and `z`. + The minimum and maximum values to use in interpolation, in particle + data space. Defaults to the minimum and maximum values of `x`, `y` + and `z`. exact: bool - Whether to use exact interpolation of the data. Only applies to 2D datasets. + Whether to use exact interpolation of the data. Only applies to + 2D datasets. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. + The computation backend to use when interpolating this data. + Defaults to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A + manually specified backend in `data` will override the default. dens_weight: bool - If True, the target will be multiplied by density. Defaults to False. + If True, the target will be multiplied by density. Defaults to + False. normalize: bool - If True, will normalize the interpolation. Defaults to False (this may change in future versions). + If True, will normalize the interpolation. Defaults to False (this + may change in future versions). hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one + grid cell / pixel. Defaults to False (this may change in a future + verison). Returns ------- ndarray (n-Dimensional) - The interpolated output image, in a multi-dimensional numpy array. The number of dimensions match the - dimensions of the data. Dimensions are structured in reverse order, where (x, y, z) -> [z, y, x]. + The interpolated output image, in a multi-dimensional numpy array. + The number of dimensions match the dimensions of the data. + Dimensions are structured in reverse order, where (x, y, z) -> + [z, y, x]. Raises ------- ValueError - If `x_pixels`, `y_pixels` or `z_pixels` are less than or equal to zero, or - if the specified `x`, `y` and `z` minimum and maximum values result in an invalid region, or - if `data` is not 2 or 3 dimensional. + If `x_pixels`, `y_pixels` or `z_pixels` are less than or equal to + zero, or if the specified `x`, `y` and `z` minimum and maximum + values result in an invalid region, or if `data` is not 2 or + 3-dimensional. KeyError - If `target`, `x`, `y`, `z`, mass, density, or smoothing length columns do not - exist in `data`. + If `target`, `x`, `y`, `z`, mass, density, or smoothing length + columns do not exist in `data`. """ if self.get_dim() == 2: if xlim is None: xlim = (None, None) if ylim is None: ylim = (None, None) - return interpolate_2d(self, target, x, y, kernel, x_pixels, y_pixels, xlim, ylim, exact, backend, + return interpolate_2d(self, target, x, y, kernel, x_pixels, + y_pixels, xlim, ylim, exact, backend, dens_weight, normalize, hmin) elif self.get_dim() == 3: - return interpolate_3d_grid(self, target, x, y, z, kernel, rotation, rot_origin, x_pixels, y_pixels, - z_pixels, xlim, ylim, zlim, backend, dens_weight, normalize, hmin) + return interpolate_3d_grid(self, target, x, y, z, kernel, rotation, + rot_origin, x_pixels, y_pixels, + z_pixels, xlim, ylim, zlim, backend, + dens_weight, normalize, hmin) @property def params(self): @@ -401,8 +518,8 @@ def xcol(self): """ str : Label of the column which contains x-positional data. - If this is set to a column which does not exist in the dataset, the column - label will remain set to the old value. + If this is set to a column which does not exist in the dataset, the + column label will remain set to the old value. """ return self._xcol @@ -416,8 +533,8 @@ def ycol(self): """ str : Label of the column which contains y-positional data. - If this is set to a column which does not exist in the dataset, the column - label will remain set to the old value. + If this is set to a column which does not exist in the dataset, the + column label will remain set to the old value. """ return self._ycol @@ -431,8 +548,8 @@ def zcol(self): """ str : Label of the column which contains z-positional data. - If this is set to a column which does not exist in the dataset, the column - label will remain set to the old value. + If this is set to a column which does not exist in the dataset, the + column label will remain set to the old value. """ return self._zcol @@ -446,8 +563,8 @@ def hcol(self): """ str : Label of the column which contains smoothing length data. - If this is set to a column which does not exist in the dataset, the column - label will remain set to the old value. + If this is set to a column which does not exist in the dataset, the + column label will remain set to the old value. """ return self._hcol @@ -461,8 +578,8 @@ def mcol(self): """ str : Label of the column which contains particle mass data. - If this is set to a column which does not exist in the dataset, the column - label will remain set to the old value. + If this is set to a column which does not exist in the dataset, the + column label will remain set to the old value. """ return self._mcol @@ -476,8 +593,8 @@ def rhocol(self): """ str : Label of the column which contains particle density data. - If this is set to a column which does not exist in the dataset, the column - label will remain set to the old value. + If this is set to a column which does not exist in the dataset, the + column label will remain set to the old value. """ return self._rhocol @@ -486,14 +603,14 @@ def rhocol(self, new_col: str): if new_col in self or new_col is None: self._rhocol = new_col - @property def vxcol(self): """ - str : Label of the column which contains the x-component of the velocity. + str : Label of the column which contains the x-component of the + velocity. - If this is set to a column which does not exist in the dataset, the column - label will remain set to the old value. + If this is set to a column which does not exist in the dataset, the + column label will remain set to the old value. """ return self._vxcol @@ -505,10 +622,11 @@ def vxcol(self, new_col: str): @property def vycol(self): """ - str : Label of the column which contains the y-component of the velocity. + str : Label of the column which contains the y-component of the + velocity. - If this is set to a column which does not exist in the dataset, the column - label will remain set to the old value. + If this is set to a column which does not exist in the dataset, the + column label will remain set to the old value. """ return self._vycol @@ -517,14 +635,14 @@ def vycol(self, new_col: str): if new_col in self or new_col is None: self._vycol = new_col - @property def vzcol(self): """ - str : Label of the column which contains the z-component of the velocity. + str : Label of the column which contains the z-component of the + velocity. - If this is set to a column which does not exist in the dataset, the column - label will remain set to the old value. + If this is set to a column which does not exist in the dataset, the + column label will remain set to the old value. """ return self._vzcol @@ -533,14 +651,14 @@ def vzcol(self, new_col: str): if new_col in self or new_col is None: self._vzcol = new_col - @property def kernel(self): """ - BaseKernel : The default kernel to use for interpolation operations with this dataset. + BaseKernel : The default kernel to use for interpolation operations + with this dataset. - If this is set to an object which is not a BaseKernel, the kernel will remain set as - the old value. + If this is set to an object which is not a BaseKernel, the kernel will + remain set as the old value. """ return self._kernel @@ -552,7 +670,8 @@ def kernel(self, new_kernel: BaseKernel): @property def backend(self): """ - ['cpu', 'gpu'] : The default backend to use for interpolation operations with this dataset. + ['cpu', 'gpu'] : The default backend to use for interpolation + operations with this dataset. 'cpu' - Best for small datasets, or cases where a GPU is not available. 'gpu' - Best for large datasets, with a CUDA-enabled GPU. From 86ca76f125d0210958605b0c6b1a42721ccca16b Mon Sep 17 00:00:00 2001 From: Terrence Tricco Date: Fri, 19 Jul 2024 13:37:20 -0230 Subject: [PATCH 05/12] lint render --- .flake8 | 4 +- sarracen/render.py | 455 +++++++++++++++++++++++++++------------------ 2 files changed, 275 insertions(+), 184 deletions(-) diff --git a/.flake8 b/.flake8 index 70134c1..be997a4 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,5 @@ [flake8] per-file-ignores = - sarracen/__init__.py:F401 \ No newline at end of file + sarracen/__init__.py:F401 + sarracen/render.py:F821 + sarracen/kernels/__init__.py:F401 \ No newline at end of file diff --git a/sarracen/render.py b/sarracen/render.py index 7fc9e56..c7d68c6 100644 --- a/sarracen/render.py +++ b/sarracen/render.py @@ -1,6 +1,7 @@ """ -Provides several rendering functions which produce matplotlib plots of SPH data. -These functions act as interfaces to interpolation functions within interpolate.py. +Provides several rendering functions which produce matplotlib plots of SPH +data. These functions act as interfaces to interpolation functions within +interpolate.py. These functions can be accessed directly, for example: render_2d(data, target) @@ -17,8 +18,9 @@ from matplotlib.axes import Axes from matplotlib.colors import Colormap, LogNorm -from .interpolate import interpolate_2d_line, interpolate_2d, interpolate_3d_proj, interpolate_3d_cross, \ - interpolate_3d_vec, interpolate_3d_cross_vec, interpolate_2d_vec, interpolate_3d_line +from .interpolate import interpolate_2d_line, interpolate_2d, \ + interpolate_3d_proj, interpolate_3d_cross, interpolate_3d_vec, \ + interpolate_3d_cross_vec, interpolate_2d_vec, interpolate_3d_line from .kernels import BaseKernel @@ -36,7 +38,8 @@ def _default_axes(data, x, y): Returns ------- x, y: str - The directional column labels to use for rendering. Defaults to the x-column detected in `data` + The directional column labels to use for rendering. Defaults to the + x-column detected in `data` """ if x is None: x = data.xcol @@ -46,9 +49,12 @@ def _default_axes(data, x, y): return x, y -def _default_bounds(data, x, y, xlim, ylim) -> Tuple[Tuple[float, float], Tuple[float, float]]: +def _default_bounds(data, x, y, + xlim, + ylim) -> Tuple[Tuple[float, float], Tuple[float, float]]: """ - Utility function to determine the 2-dimensional boundaries to use in 2D rendering. + Utility function to determine the 2-dimensional boundaries to use in 2D + rendering. Parameters ---------- @@ -57,15 +63,17 @@ def _default_bounds(data, x, y, xlim, ylim) -> Tuple[Tuple[float, float], Tuple[ x, y: str The directional column labels that will be used for rendering. xlim, ylim: float - The minimum and maximum values passed to the render function, in particle data space. + The minimum and maximum values passed to the render function, in + particle data space. Returns ------- xlim, ylim: tuple of float - The minimum and maximum values to use for rendering, in particle data space. Defaults - to the maximum and minimum values of `x` and `y`, snapped to the nearest integer. + The minimum and maximum values to use for rendering, in particle data + space. Defaults to the maximum and minimum values of `x` and `y`, + snapped to the nearest integer. """ - # boundaries of the plot default to the maximum & minimum values of the data. + # boundaries of the plot default to the max & min values of the data. x_min = xlim[0] if xlim is not None and xlim[0] is not None else None y_min = ylim[0] if ylim is not None and ylim[0] is not None else None x_max = xlim[1] if xlim is not None and xlim[1] is not None else None @@ -81,25 +89,34 @@ def _default_bounds(data, x, y, xlim, ylim) -> Tuple[Tuple[float, float], Tuple[ def _set_pixels(x_pixels, y_pixels, xlim, ylim, default): """ - Utility function to determine the number of pixels to interpolate over in 2D interpolation. + Utility function to determine the number of pixels to interpolate over in + 2D interpolation. Parameters ---------- x_pixels, y_pixels: int - The number of pixels in the x & y directions passed to the interpolation function. + The number of pixels in the x & y directions passed to the + interpolation function. xlim, ylim: tuple of float - The minimum and maximum values to use in interpolation, in particle data space. + The minimum and maximum values to use in interpolation, in particle + data space. Returns ------- x_pixels, y_pixels - The number of pixels in the x & y directions to use in 2D interpolation. + The number of pixels in the x & y directions to use in 2D + interpolation. """ - # set # of pixels to maintain an aspect ratio that is the same as the underlying bounds of the data. + # set # of pixels to maintain an aspect ratio that is the same as the + # underlying bounds of the data. + + dx = xlim[1] - xlim[0] + dy = ylim[1] - ylim[0] + if x_pixels is None and y_pixels is None: x_pixels = default if x_pixels is None: - x_pixels = int(np.rint(y_pixels * ((xlim[1] - xlim[0]) / (ylim[1] - ylim[0])))) + x_pixels = int(np.rint(y_pixels * (dx / dy))) if y_pixels is None: - y_pixels = int(np.rint(x_pixels * ((ylim[1] - ylim[0]) / (xlim[1] - xlim[0])))) + y_pixels = int(np.rint(x_pixels * (dy / dx))) return x_pixels, y_pixels @@ -141,11 +158,14 @@ def render(data: 'SarracenDataFrame', target: str Column label of the target variable. x, y, z: str, optional - Column labels of the x, y & z directional axes. Defaults to the columns detected in `data`. + Column labels of the x, y & z directional axes. Defaults to the columns + detected in `data`. xsec: float, optional. - For a 3D dataset, the z value to take a cross-section at. If none, column interpolation is performed. + For a 3D dataset, the z value to take a cross-section at. If none, + column interpolation is performed. kernel: BaseKernel, optional - Kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + Kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. x_pixels, y_pixels: int, optional Number of pixels present in the final image. xlim, ylim: tuple of float, optional @@ -159,37 +179,46 @@ def render(data: 'SarracenDataFrame', cbar_kws: dict, optional Keyword arguments to pass to matplotlib.figure.Figure.colorbar(). cbar_ax: Axes - Axes to draw the colorbar in, if not provided then space will be taken from the main Axes. + Axes to draw the colorbar in, if not provided then space will be taken + from the main Axes. ax: Axes The main axes in which to draw the rendered image. exact: bool - Whether to use exact interpolation of the data. For cross-sections this is ignored. Defaults to False. + Whether to use exact interpolation of the data. For cross-sections this + is ignored. Defaults to False. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. integral_samples: int, optional - If using column interpolation, the number of sample points to take when approximating the 2D column kernel. + If using column interpolation, the number of sample points to take when + approximating the 2D column kernel. rotation: array_like or SciPy Rotation, optional - The rotation to apply to the data before interpolation. If defined as an array, the - order of rotations is [z, y, x] in degrees. Only applies to 3D datasets. + The rotation to apply to the data before interpolation. If defined as + an array, the order of rotations is [z, y, x] in degrees. Only applies + to 3D datasets. rot_origin: array_like or ['com', 'midpoint'], optional - Point of rotation of the data. Only applies to 3D datasets. If array_like, - then the [x, y, z] coordinates specify the point around which the data is - rotated. If 'com', then data is rotated around the centre of mass. If - 'midpoint', then data is rotated around the midpoint, that is, min + max - / 2. Defaults to the midpoint. + Point of rotation of the data. Only applies to 3D datasets. If + array_like, then the [x, y, z] coordinates specify the point around + which the data is rotated. If 'com', then data is rotated around the + centre of mass. If 'midpoint', then data is rotated around the + midpoint, that is, min + max / 2. Defaults to the midpoint. log_scale: bool Whether to use a logarithmic scale for color coding. dens_weight: bool - If True, will plot the target mutliplied by the density. Defaults to True for column-integrated views, - when the target is not density, and False for everything else. + If True, will plot the target mutliplied by the density. Defaults to + True for column-integrated views, when the target is not density, and + False for everything else. normalize: bool - If True, will normalize the interpolation. Defaults to False (this may change in future versions). + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). cototation: list, optional - Moves particles to the co-rotating frame of two location. corotation contains two lists which correspond to the two x, y, z coordinates + Moves particles to the co-rotating frame of two location. corotation + contains two lists which correspond to the two x, y, z coordinates kwargs: other keyword arguments Keyword arguments to pass to ax.imshow. @@ -201,48 +230,52 @@ def render(data: 'SarracenDataFrame', Raises ------ ValueError - If `x_pixels` or `y_pixels` are less than or equal to zero, or - if the specified `x` and `y` minimum and maximums result in an invalid region. + If `x_pixels` or `y_pixels` are less than or equal to zero, or if the + specified `x` and `y` minimum and maximums result in an invalid region. KeyError - If `target`, `x`, `y`, mass, density, or smoothing length columns do not - exist in `data`. + If `target`, `x`, `y`, mass, density, or smoothing length columns do + not exist in `data`. Notes ----- - The standard render will interpolate the target quantity, :math:`A`, from the particles to a pixel - grid using the following equation: + The standard render will interpolate the target quantity, :math:`A`, from + the particles to a pixel grid using the following equation: .. math:: A_{pixel} = \\sum_b \\frac{m_b}{\\rho_b} A_b W_{ab}(h_b) - where :math:`m` is the mass, :math:`\\rho` is the density, and :math:`W` is the smoothing kernel with - smoothing length, :math:`h`. + where :math:`m` is the mass, :math:`\\rho` is the density, and :math:`W` is + the smoothing kernel with smoothing length, :math:`h`. - Normalized interpolation divides the above summation by an interpolation of a constant scalar field - equal to 1: + Normalized interpolation divides the above summation by an interpolation of + a constant scalar field equal to 1: .. math:: - A_{pixel} = \\frac{\\sum_b \\frac{m_b}{\\rho_b} A_b W_{ab}(h_b)}{\\sum_b \\frac{m_b}{\\rho_b} W_{ab}(h_b)} + A_{pixel} = \\frac{\\sum_b \\frac{m_b}{\\rho_b} A_b W_{ab}(h_b)} + {\\sum_b \\frac{m_b}{\\rho_b} W_{ab}(h_b)} - In theory, the denominator will be equal to 1 and dividing by 1 has no impact. In practice, the - particle arrangement and the smoothing kernel affects the quality of interpolation. Normalizing by - this approximation of 1 helps to account for this. + In theory, the denominator will be equal to 1 and dividing by 1 has no + impact. In practice, the particle arrangement and the smoothing kernel + affects the quality of interpolation. Normalizing by this approximation of + 1 helps to account for this. - For when to use normalized interpolation, the advice given by Splash is recommended: in general use - it for smoother renderings, but avoid when there are free surfaces, as it can cause them to be - over-exaggerated. + For when to use normalized interpolation, the advice given by Splash is + recommended: in general use it for smoother renderings, but avoid when + there are free surfaces, as it can cause them to be over-exaggerated. - Density-weighted interpolation will interpolate the quantity :math:`\\rho A`, that is, the target - :math:`A` multiplied by the density, :math:`\\rho`. If normalize=True, then density-weighted - interpolation will be normalized by the density. + Density-weighted interpolation will interpolate the quantity + :math:`\\rho A`, that is, the target :math:`A` multiplied by the density, + :math:`\\rho`. If normalize=True, then density-weighted interpolation will + be normalized by the density. - Column-integrated views of 3D data (i.e., xsec=None) will calculate the following: + Column-integrated views of 3D data (i.e., xsec=None) will calculate the + following: .. math:: - A_{pixel} = \\sum_b \\frac{m_b}{\\rho_b} A_b \int W_{ab}(h_b) dz , + A_{pixel} = \\sum_b \\frac{m_b}{\\rho_b} A_b \\int W_{ab}(h_b) dz , which uses the integral of the kernel along the chosen line of sight. """ @@ -259,14 +292,19 @@ def render(data: 'SarracenDataFrame', interpolation_type = '3d' if interpolation_type == '2d': - img = interpolate_2d(data, target, x, y, kernel, x_pixels, y_pixels, xlim, ylim, exact, backend, dens_weight, + img = interpolate_2d(data, target, x, y, kernel, x_pixels, y_pixels, + xlim, ylim, exact, backend, dens_weight, normalize, hmin) elif interpolation_type == '3d_cross': - img = interpolate_3d_cross(data, target, x, y, z, xsec, kernel, corotation, rotation, - rot_origin, x_pixels, y_pixels, xlim, ylim, backend, dens_weight, normalize, hmin) + img = interpolate_3d_cross(data, target, x, y, z, xsec, kernel, + corotation, rotation, rot_origin, x_pixels, + y_pixels, xlim, ylim, backend, dens_weight, + normalize, hmin) elif interpolation_type == '3d': - img = interpolate_3d_proj(data, target, x, y, kernel, integral_samples, corotation, rotation, rot_origin, x_pixels, - y_pixels, xlim, ylim, exact, backend, dens_weight, normalize, hmin) + img = interpolate_3d_proj(data, target, x, y, kernel, integral_samples, + corotation, rotation, rot_origin, x_pixels, + y_pixels, xlim, ylim, exact, backend, + dens_weight, normalize, hmin) else: raise ValueError('`data` is not a valid number of dimensions.') @@ -279,7 +317,8 @@ def render(data: 'SarracenDataFrame', kwargs.setdefault("origin", 'lower') kwargs.setdefault("extent", [xlim[0], xlim[1], ylim[0], ylim[1]]) if log_scale: - kwargs.setdefault("norm", LogNorm(clip=True, vmin=kwargs.get('vmin'), vmax=kwargs.get('vmax'))) + kwargs.setdefault("norm", LogNorm(clip=True, vmin=kwargs.get('vmin'), + vmax=kwargs.get('vmax'))) kwargs.pop("vmin", None) kwargs.pop("vmax", None) @@ -297,7 +336,7 @@ def render(data: 'SarracenDataFrame', if cbar: colorbar = ax.figure.colorbar(graphic, cbar_ax, ax, **cbar_kws) - if 'label' not in cbar_kws : + if 'label' not in cbar_kws: label = target if data.get_dim() == 3 and xsec is None: label = f"column {label}" @@ -334,9 +373,11 @@ def lineplot(data: 'SarracenDataFrame', target: str Column label of the target variable. x, y, z: str, optional - Column labels of the x, y & z directional axes. Defaults to the columns detected in `data`. + Column labels of the x, y & z directional axes. Defaults to the columns + detected in `data`. kernel: BaseKernel, optional - Kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + Kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. pixels: int, optional Number of samples taken across the x axis in the final plot. xlim, ylim, zlim: tuple of float, optional @@ -344,17 +385,21 @@ def lineplot(data: 'SarracenDataFrame', ax: Axes The main axes in which to draw the final plot. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. log_scale: bool Whether to use a logarithmic scale for color coding. dens_weight: bool - If True, will plot the target mutliplied by the density. Defaults to False. + If True, will plot the target mutliplied by the density. Defaults to + False. normalize: bool - If True, will normalize the interpolation. Defaults to False (this may change in future versions). + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). kwargs: other keyword arguments Keyword arguments to pass to sns.lineplot. @@ -366,19 +411,20 @@ def lineplot(data: 'SarracenDataFrame', Raises ------- ValueError - If `x_pixels` or `y_pixels` are less than or equal to zero, or - if the specified `x` and `y` minimum and maximums result in an invalid region. + If `x_pixels` or `y_pixels` are less than or equal to zero, or if the + specified `x` and `y` minimum and maximums result in an invalid region. KeyError - If `target`, `x`, `y`, mass, density, or smoothing length columns do not - exist in `data`. + If `target`, `x`, `y`, mass, density, or smoothing length columns do + not exist in `data`. """ if data.get_dim() == 2: - img = interpolate_2d_line(data, target, x, y, kernel, pixels, xlim, ylim, backend, dens_weight, normalize, - hmin) + img = interpolate_2d_line(data, target, x, y, kernel, pixels, xlim, + ylim, backend, dens_weight, normalize, hmin) else: - img = interpolate_3d_line(data, target, x, y, z, kernel, pixels, xlim, ylim, zlim, backend, dens_weight, - normalize, hmin) + img = interpolate_3d_line(data, target, x, y, z, kernel, pixels, xlim, + ylim, zlim, backend, dens_weight, normalize, + hmin) if isinstance(xlim, float) or isinstance(xlim, int): xlim = xlim, xlim @@ -389,29 +435,36 @@ def lineplot(data: 'SarracenDataFrame', xlim, ylim = _default_bounds(data, x, y, xlim, ylim) if data.get_dim() == 2: - upper_lim = np.sqrt((xlim[1] - xlim[0]) ** 2 + (ylim[1] - ylim[0]) ** 2) + upper_lim = np.sqrt((xlim[1] - xlim[0])**2 + (ylim[1] - ylim[0])**2) else: if z is None: z = data.zcol if z not in data.columns: - raise KeyError(f"z-directional column '{z}' does not exist in the provided dataset.") + raise KeyError(f"z-directional column '{z}' does not exist in the " + f"provided dataset.") if isinstance(zlim, float) or isinstance(zlim, int): zlim = zlim, zlim - z1 = data.loc[:, z].min() if zlim is None or zlim[0] is None else zlim[0] - z2 = data.loc[:, z].min() if zlim is None or zlim[1] is None else zlim[1] + zmin = data.loc[:, z].min() + z1 = zmin if zlim is None or zlim[0] is None else zlim[0] + z2 = zmin if zlim is None or zlim[1] is None else zlim[1] zlim = z2, z1 - upper_lim = np.sqrt((xlim[1] - xlim[0]) ** 2 + (ylim[1] - ylim[0]) ** 2 + (zlim[1] - zlim[0]) ** 2) + upper_lim = np.sqrt((xlim[1] - xlim[0])**2 + + (ylim[1] - ylim[0])**2 + + (zlim[1] - zlim[0])**2) - ax = sns.lineplot(x=np.linspace(0, upper_lim, img.size), y=img, ax=ax, **kwargs) + ax = sns.lineplot(x=np.linspace(0, upper_lim, img.size), + y=img, + ax=ax, **kwargs) if log_scale: ax.set(yscale='log') ax.margins(x=0, y=0) - ax.set_xlabel('cross-section ' + (f'({x}, {y})' if data.get_dim() == 2 else f'({x}, {y}, {z})')) + label = f'({x}, {y})' if data.get_dim() == 2 else f'({x}, {y}, {z})' + ax.set_xlabel('cross-section ' + label) label = target if log_scale: @@ -445,55 +498,65 @@ def streamlines(data: 'SarracenDataFrame', """ Create an SPH interpolated streamline plot of a target vector. - Render the data within a SarracenDataFrame to a 2D matplotlib object, by rendering the values - of a target vector. The contributions of all particles near the rendered area are summed and - stored to a 2D grid for the x & y axes of the target vector. This data is then used to create - a streamline plot using ax.streamlines(). + Render the data within a SarracenDataFrame to a 2D matplotlib object, by + rendering the values of a target vector. The contributions of all particles + near the rendered area are summed and stored to a 2D grid for the x & y + axes of the target vector. This data is then used to create a streamline + plot using ax.streamlines(). Parameters ---------- data: SarracenDataFrame Particle data, in a SarracenDataFrame. target: str tuple of shape (2) or (3). - Column label of the target vector. Shape must match the # of dimensions in `data`. + Column label of the target vector. Shape must match the # of dimensions + in `data`. xsec: float, optional - The z to take a cross-section at. If none, column interpolation is performed. + The z to take a cross-section at. If none, column interpolation is + performed. x, y, z: str, optional - Column label of the x, y & z directional axes. Defaults to the columns detected in `data`. + Column label of the x, y & z directional axes. Defaults to the columns + detected in `data`. kernel: BaseKernel, optional - Kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + Kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. integral_samples: int, optional - If using column interpolation, the number of sample points to take when approximating the 2D column kernel. + If using column interpolation, the number of sample points to take when + approximating the 2D column kernel. rotation: array_like or SciPy Rotation, optional - The rotation to apply to the data before interpolation. If defined as an array, the - order of rotations is [z, y, x] in degrees. + The rotation to apply to the data before interpolation. If defined as + an array, the order of rotations is [z, y, x] in degrees. rot_origin: array_like or ['com', 'midpoint'], optional - Point of rotation of the data. Only applies to 3D datasets. If array_like, - then the [x, y, z] coordinates specify the point around which the data is - rotated. If 'com', then data is rotated around the centre of mass. If - 'midpoint', then data is rotated around the midpoint, that is, min + max - / 2. Defaults to the midpoint. + Point of rotation of the data. Only applies to 3D datasets. If + array_like, then the [x, y, z] coordinates specify the point around + which the data is rotated. If 'com', then data is rotated around the + centre of mass. If 'midpoint', then data is rotated around the + midpoint, that is, min + max / 2. Defaults to the midpoint. x_pixels, y_pixels: int, optional - Number of interpolation samples to pass to ax.streamlines(). Default values are chosen to keep - a consistent aspect ratio. + Number of interpolation samples to pass to ax.streamlines(). Default + values are chosen to keep a consistent aspect ratio. xlim, ylim: float, optional - The minimum and maximum values to use in interpolation, in particle data space. Defaults - to the minimum and maximum values of `x` and `y`. + The minimum and maximum values to use in interpolation, in particle + data space. Defaults to the minimum and maximum values of `x` and `y`. ax: Axes The main axes in which to draw the rendered image. exact: bool - Whether to use exact interpolation of the data. For cross-sections this is ignored. Defaults to False. + Whether to use exact interpolation of the data. For cross-sections + this is ignored. Defaults to False. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. dens_weight: bool - If True, will plot the target mutliplied by the density. Defaults to True for column-integrated views - and False for everything else. + If True, will plot the target mutliplied by the density. Defaults to + True for column-integrated views and False for everything else. normalize: bool - If True, will normalize the interpolation. Defaults to False (this may change in future versions). + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). kwargs: other keyword arguments Keyword arguments to pass to ax.streamlines() @@ -505,15 +568,16 @@ def streamlines(data: 'SarracenDataFrame', Raises ------ ValueError - If `x_pixels` or `y_pixels` are less than or equal to zero, or - if the specified `x` and `y` minimum and maximums result in an invalid region, or - if the number of dimensions in the target vector does not match the data, or - if `data` is not 2 or 3 dimensional. + If `x_pixels` or `y_pixels` are less than or equal to zero, or if the + specified `x` and `y` minimum and maximums result in an invalid region, + or if the number of dimensions in the target vector does not match the + data, or if `data` is not 2 or 3 dimensional. KeyError - If `target`, `x`, `y`, `z` (for 3-dimensional data), mass, density, or smoothing length columns do not - exist in `data`. + If `target`, `x`, `y`, `z` (for 3-dimensional data), mass, density, or + smoothing length columns do not exist in `data`. """ - # Choose between the various interpolation functions available, based on initial data passed to this function. + # Choose between the various interpolation functions available, based on + # initial data passed to this function. interpolation_type = None if data.get_dim() == 2: @@ -529,16 +593,20 @@ def streamlines(data: 'SarracenDataFrame', interpolation_type = '3d' if interpolation_type == '2d': - img = interpolate_2d_vec(data, target[0], target[1], x, y, kernel, x_pixels, y_pixels, xlim, ylim, exact, + img = interpolate_2d_vec(data, target[0], target[1], x, y, kernel, + x_pixels, y_pixels, xlim, ylim, exact, backend, dens_weight, normalize, hmin) elif interpolation_type == '3d_cross': - img = interpolate_3d_cross_vec(data, target[0], target[1], target[2], xsec, x, y, z, kernel, rotation, - rot_origin, x_pixels, y_pixels, xlim, ylim, backend, dens_weight, normalize, + img = interpolate_3d_cross_vec(data, target[0], target[1], target[2], + xsec, x, y, z, kernel, rotation, + rot_origin, x_pixels, y_pixels, xlim, + ylim, backend, dens_weight, normalize, hmin) elif interpolation_type == '3d': - img = interpolate_3d_vec(data, target[0], target[1], target[2], x, y, kernel, integral_samples, rotation, - rot_origin, x_pixels, y_pixels, xlim, ylim, exact, backend, dens_weight, normalize, - hmin) + img = interpolate_3d_vec(data, target[0], target[1], target[2], x, y, + kernel, integral_samples, rotation, + rot_origin, x_pixels, y_pixels, xlim, ylim, + exact, backend, dens_weight, normalize, hmin) else: raise ValueError('`data` is not a valid number of dimensions.') @@ -549,14 +617,15 @@ def streamlines(data: 'SarracenDataFrame', xlim, ylim = _default_bounds(data, x, y, xlim, ylim) kwargs.setdefault("color", 'black') - ax.streamplot(np.linspace(xlim[0], xlim[1], np.size(img[0], 1)), np.linspace(ylim[0], ylim[1], np.size(img[0], 0)), + ax.streamplot(np.linspace(xlim[0], xlim[1], np.size(img[0], 1)), + np.linspace(ylim[0], ylim[1], np.size(img[0], 0)), img[0], img[1], **kwargs) ax.set_xlim(xlim) ax.set_ylim(ylim) - # remove the x & y ticks if the data is rotated, since these no longer have physical - # relevance to the displayed data. + # remove the x & y ticks if the data is rotated, since these no longer have + # physical relevance to the displayed data. if rotation is not None: ax.set_xticks([]) ax.set_yticks([]) @@ -593,40 +662,46 @@ def arrowplot(data: 'SarracenDataFrame', """ Create an SPH interpolated vector field plot of a target vector. - Render the data within a SarracenDataFrame to a 2D matplotlib object, by rendering the values - of a target vector. The contributions of all particles near the rendered area are summed and - stored to a 2D grid for the x & y axes of the target vector. This data is then used to create - an arrow plot using ax.quiver(). + Render the data within a SarracenDataFrame to a 2D matplotlib object, by + rendering the values of a target vector. The contributions of all particles + near the rendered area are summed and stored to a 2D grid for the x & y + axes of the target vector. This data is then used to create an arrow plot + using ax.quiver(). Parameters ---------- data : SarracenDataFrame Particle data, in a SarracenDataFrame. target: str tuple of shape (2) or (3). - Column label of the target vector. Shape must match the # of dimensions in `data`. + Column label of the target vector. Shape must match the # of dimensions + in `data`. xsec: float - The z to take a cross-section at. If none, column interpolation is performed. + The z to take a cross-section at. If none, column interpolation is + performed. x, y, z: str, optional - Column label of the x, y & z directional axes. Defaults to the columns detected in `data`. + Column label of the x, y & z directional axes. Defaults to the columns + detected in `data`. kernel: BaseKernel, optional - Kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + Kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. integral_samples: int, optional - If using column interpolation, the number of sample points to take when approximating the 2D column kernel. + If using column interpolation, the number of sample points to take when + approximating the 2D column kernel. rotation: array_like or SciPy Rotation, optional - The rotation to apply to the data before interpolation. If defined as an array, the order of rotations - is [z, y, x] in degrees. + The rotation to apply to the data before interpolation. If defined as + an array, the order of rotations is [z, y, x] in degrees. rot_origin: array_like or ['com', 'midpoint'], optional - Point of rotation of the data. Only applies to 3D datasets. If array_like, - then the [x, y, z] coordinates specify the point around which the data is - rotated. If 'com', then data is rotated around the centre of mass. If - 'midpoint', then data is rotated around the midpoint, that is, min + max - / 2. Defaults to the midpoint. + Point of rotation of the data. Only applies to 3D datasets. If + array_like, then the [x, y, z] coordinates specify the point around + which the data is rotated. If 'com', then data is rotated around the + centre of mass. If 'midpoint', then data is rotated around the + midpoint, that is, min + max / 2. Defaults to the midpoint. x_arrows, y_arrows: int, optional - Number of arrows in the output image in the x & y directions. Default values are chosen to keep - a consistent aspect ratio. + Number of arrows in the output image in the x & y directions. Default + values are chosen to keep a consistent aspect ratio. xlim, ylim: tuple of float, optional - The minimum and maximum values to use in interpolation, in particle data space. Defaults - to the minimum and maximum values of `x` and `y`. + The minimum and maximum values to use in interpolation, in particle + data space. Defaults to the minimum and maximum values of `x` and `y`. ax: Axes The main axes in which to draw the rendered image. qkey: bool @@ -634,18 +709,22 @@ def arrowplot(data: 'SarracenDataFrame', qkey_kws: dict Keywords to pass through to ax.quiver. exact: bool - Whether to use exact interpolation of the data. For cross-sections this is ignored. Defaults to False. + Whether to use exact interpolation of the data. For cross-sections this + is ignored. Defaults to False. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. dens_weight: bool - If True, will plot the target mutliplied by the density. Defaults to True for column-integrated views - and False for everything else. + If True, will plot the target mutliplied by the density. Defaults to + True for column-integrated views and False for everything else. normalize: bool - If True, will normalize the interpolation. Defaults to False (this may change in future versions). + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). kwargs: other keyword arguments Keyword arguments to pass to ax.quiver() @@ -657,13 +736,13 @@ def arrowplot(data: 'SarracenDataFrame', Raises ------ ValueError - If `x_arrows` or `y_arrows` are less than or equal to zero, or - if the specified `x` and `y` minimum and maximums result in an invalid region, or - if the number of dimensions in the target vector does not match the data, or - if `data` is not 2 or 3 dimensional. + If `x_arrows` or `y_arrows` are less than or equal to zero, or if the + specified `x` and `y` minimum and maximums result in an invalid region, + or if the number of dimensions in the target vector does not match the + data, or if `data` is not 2 or 3-dimensional. KeyError - If `target`, `x`, `y`, `z` (for 3-dimensional data), mass, density, or smoothing length columns do not - exist in `data`. + If `target`, `x`, `y`, `z` (for 3-dimensional data), mass, density, or + smoothing length columns do not exist in `data`. """ x, y = _default_axes(data, x, y) xlim, ylim = _default_bounds(data, x, y, xlim, ylim) @@ -684,28 +763,34 @@ def arrowplot(data: 'SarracenDataFrame', interpolation_type = '3d' if interpolation_type == '2d': - img = interpolate_2d_vec(data, target[0], target[1], x, y, kernel, x_arrows, y_arrows, xlim, ylim, exact, + img = interpolate_2d_vec(data, target[0], target[1], x, y, kernel, + x_arrows, y_arrows, xlim, ylim, exact, backend, dens_weight, normalize, hmin) elif interpolation_type == '3d_cross': - img = interpolate_3d_cross_vec(data, target[0], target[1], target[2], xsec, x, y, z, kernel, rotation, - rot_origin, x_arrows, y_arrows, xlim, ylim, backend, dens_weight, normalize, + img = interpolate_3d_cross_vec(data, target[0], target[1], target[2], + xsec, x, y, z, kernel, rotation, + rot_origin, x_arrows, y_arrows, xlim, + ylim, backend, dens_weight, normalize, hmin) elif interpolation_type == '3d': - img = interpolate_3d_vec(data, target[0], target[1], target[2], x, y, kernel, integral_samples, rotation, - rot_origin, x_arrows, y_arrows, xlim, ylim, exact, backend, dens_weight, normalize, + img = interpolate_3d_vec(data, target[0], target[1], target[2], x, y, + kernel, integral_samples, rotation, + rot_origin, x_arrows, y_arrows, xlim, ylim, + exact, backend, dens_weight, normalize, hmin) else: raise ValueError('`data` is not a valid number of dimensions.') - if ax is None: ax = plt.gca() kwargs.setdefault("angles", 'uv') kwargs.setdefault("pivot", 'mid') - ax.quiver(np.linspace(xlim[0], xlim[1], np.size(img[0], 1)), np.linspace(ylim[0], ylim[1], np.size(img[0], 0)), + ax.quiver(np.linspace(xlim[0], xlim[1], np.size(img[0], 1)), + np.linspace(ylim[0], ylim[1], np.size(img[0], 0)), img[0], img[1], **kwargs) - Q = ax.quiver(np.linspace(xlim[0], xlim[1], np.size(img[0], 1)), np.linspace(ylim[0], ylim[1], np.size(img[0], 0)), + Q = ax.quiver(np.linspace(xlim[0], xlim[1], np.size(img[0], 1)), + np.linspace(ylim[0], ylim[1], np.size(img[0], 0)), img[0], img[1], **kwargs) if qkey: @@ -716,8 +801,12 @@ def arrowplot(data: 'SarracenDataFrame', qkey_kws.setdefault('Y', 1.02) # find a reasonable default value for the quiver key length. - key_length = float(np.format_float_positional(np.mean(np.sqrt(img[0] ** 2 + img[1] ** 2)), precision=1, - unique=False, fractional=False, trim='k')) + key_length = np.mean(np.sqrt(img[0] ** 2 + img[1] ** 2)) + key_length = float(np.format_float_positional(key_length, + precision=1, + unique=False, + fractional=False, + trim='k')) qkey_kws.setdefault('U', key_length) qkey_kws.setdefault('label', f"= {qkey_kws['U']}") @@ -726,8 +815,8 @@ def arrowplot(data: 'SarracenDataFrame', ax.quiverkey(Q, **qkey_kws) - # remove the x & y ticks if the data is rotated, since these no longer have physical - # relevance to the displayed data. + # remove the x & y ticks if the data is rotated, since these no longer have + # physical relevance to the displayed data. if rotation is not None: ax.set_xticks([]) ax.set_yticks([]) @@ -738,8 +827,8 @@ def arrowplot(data: 'SarracenDataFrame', ax.set_xlim(xlim) ax.set_ylim(ylim) - # remove the x & y ticks if the data is rotated, since these no longer have physical - # relevance to the displayed data. + # remove the x & y ticks if the data is rotated, since these no longer have + # physical relevance to the displayed data. if rotation is not None: ax.set_xticks([]) ax.set_yticks([]) From 35a70fd92ba6fd32fad260c1f0b2e98d59eba317 Mon Sep 17 00:00:00 2001 From: Terrence Tricco Date: Fri, 19 Jul 2024 13:47:29 -0230 Subject: [PATCH 06/12] lint disc --- .flake8 | 3 ++- sarracen/disc/surface_density.py | 11 ++++++----- sarracen/disc/utils.py | 13 +++++++++---- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/.flake8 b/.flake8 index be997a4..f9bb980 100644 --- a/.flake8 +++ b/.flake8 @@ -2,4 +2,5 @@ per-file-ignores = sarracen/__init__.py:F401 sarracen/render.py:F821 - sarracen/kernels/__init__.py:F401 \ No newline at end of file + sarracen/kernels/__init__.py:F401 + sarracen/disc/__init__.py:F401 \ No newline at end of file diff --git a/sarracen/disc/surface_density.py b/sarracen/disc/surface_density.py index b0c1699..fa01132 100644 --- a/sarracen/disc/surface_density.py +++ b/sarracen/disc/surface_density.py @@ -1,6 +1,5 @@ import numpy as np import pandas as pd -import sys from ..sarracen_dataframe import SarracenDataFrame from .utils import _get_mass, _get_origin from .utils import _bin_particles_by_radius, _get_bin_midpoints @@ -123,7 +122,8 @@ def surface_density(data: 'SarracenDataFrame', Notes ----- The surface density averaging procedure for SPH is described in section - 3.2.6 of Lodato & Price, MNRAS (2010), `doi:10.1111/j.1365-2966.2010.16526.x + 3.2.6 of Lodato & Price, MNRAS (2010), + `doi:10.1111/j.1365-2966.2010.16526.x `_. """ @@ -287,8 +287,8 @@ def _calc_scale_height(data: 'SarracenDataFrame', Lx, Ly, Lz = _calc_angular_momentum(data, rbins, origin, unit_vector=True) zdash = rbins.map(Lx).to_numpy() * data[data.xcol] \ - + rbins.map(Ly).to_numpy() * data[data.ycol] \ - + rbins.map(Lz).to_numpy() * data[data.zcol] + + rbins.map(Ly).to_numpy() * data[data.ycol] \ + + rbins.map(Lz).to_numpy() * data[data.zcol] return zdash.groupby(rbins).std() @@ -372,7 +372,8 @@ def honH(data: 'SarracenDataFrame', origin: list = None, retbins: bool = False): """ - Calculates /H, the averaged smoothing length divided by the scale height. + Calculates /H, the averaged smoothing length divided by the scale + height. The profile is computed by segmenting the particles into radial bins (rings). The average smoothing length in each bin is divided by the scale diff --git a/sarracen/disc/utils.py b/sarracen/disc/utils.py index 31b24c4..a960365 100644 --- a/sarracen/disc/utils.py +++ b/sarracen/disc/utils.py @@ -5,9 +5,10 @@ def _get_mass(data: 'SarracenDataFrame'): - if data.mcol == None: + if data.mcol is None: if 'mass' not in data.params: - raise KeyError("'mass' column does not exist in this SarracenDataFrame.") + raise KeyError("'mass' column does not exist in this " + "SarracenDataFrame.") return data.params['mass'] return data[data.mcol] @@ -66,7 +67,8 @@ def _bin_particles_by_radius(data: 'SarracenDataFrame', r = np.sqrt((data[data.xcol] - origin[0]) ** 2 + (data[data.ycol] - origin[1]) ** 2) else: - raise ValueError("geometry should be either 'cylindrical' or 'spherical'") + raise ValueError("geometry should be either 'cylindrical' or " + "'spherical'") # should we add epsilon here? if r_in is None: @@ -84,9 +86,12 @@ def _bin_particles_by_radius(data: 'SarracenDataFrame', def _get_bin_midpoints(bin_edges: np.ndarray, - log: bool = False) -> np.ndarray: + log: bool = False) -> np.ndarray: """ Calculate the midpoint of bins given their edges. + + Parameters + ---------- bin_edges: ndarray Locations of the bin edges. log : bool, optional From f626b4555b8c77e62a7772591d7afcc42dd0ed25 Mon Sep 17 00:00:00 2001 From: Terrence Tricco Date: Fri, 19 Jul 2024 15:31:21 -0230 Subject: [PATCH 07/12] lint interpolate --- .flake8 | 1 + sarracen/__init__.py | 3 +- sarracen/interpolate/__init__.py | 7 +- sarracen/interpolate/base_backend.py | 169 ++++- sarracen/interpolate/cpu_backend.py | 372 +++++++--- sarracen/interpolate/gpu_backend.py | 378 +++++++--- sarracen/interpolate/interpolate.py | 994 ++++++++++++++++----------- sarracen/render.py | 26 +- 8 files changed, 1285 insertions(+), 665 deletions(-) diff --git a/.flake8 b/.flake8 index f9bb980..0b79836 100644 --- a/.flake8 +++ b/.flake8 @@ -3,4 +3,5 @@ per-file-ignores = sarracen/__init__.py:F401 sarracen/render.py:F821 sarracen/kernels/__init__.py:F401 + sarracen/interpolate/__init__.py:F401 sarracen/disc/__init__.py:F401 \ No newline at end of file diff --git a/sarracen/__init__.py b/sarracen/__init__.py index b711f50..a6cb6a4 100644 --- a/sarracen/__init__.py +++ b/sarracen/__init__.py @@ -8,7 +8,8 @@ from .sarracen_dataframe import SarracenDataFrame -from .interpolate import interpolate_2d, interpolate_2d_line, interpolate_3d_proj, interpolate_3d_cross +from .interpolate import interpolate_2d, interpolate_2d_line, \ + interpolate_3d_proj, interpolate_3d_cross from .render import render, streamlines, arrowplot import sarracen.disc diff --git a/sarracen/interpolate/__init__.py b/sarracen/interpolate/__init__.py index 383a631..dae22c8 100644 --- a/sarracen/interpolate/__init__.py +++ b/sarracen/interpolate/__init__.py @@ -1,6 +1,7 @@ from ..interpolate.base_backend import BaseBackend from ..interpolate.cpu_backend import CPUBackend from ..interpolate.gpu_backend import GPUBackend -from ..interpolate.interpolate import interpolate_2d_line, interpolate_2d, interpolate_3d_proj,\ - interpolate_3d_cross, interpolate_3d_vec, interpolate_3d_cross_vec, interpolate_3d_grid, interpolate_2d_vec, \ - interpolate_3d_line +from ..interpolate.interpolate import interpolate_2d_line, interpolate_2d, \ + interpolate_3d_line, interpolate_3d_proj, interpolate_3d_cross, \ + interpolate_3d_vec, interpolate_3d_cross_vec, interpolate_3d_grid, \ + interpolate_2d_vec diff --git a/sarracen/interpolate/base_backend.py b/sarracen/interpolate/base_backend.py index d880314..fc87b29 100644 --- a/sarracen/interpolate/base_backend.py +++ b/sarracen/interpolate/base_backend.py @@ -8,74 +8,171 @@ class BaseBackend: """Backend implementation of SPH interpolation functions.""" @staticmethod - def interpolate_2d_render(x: ndarray, y: ndarray, weight: ndarray, h: ndarray, weight_function: CPUDispatcher, - kernel_radius: float, x_pixels: int, y_pixels: int, x_min: float, x_max: float, - y_min: float, y_max: float, exact: bool) -> ndarray: - """ Interpolate 2D particle data to a 2D grid of pixels.""" + def interpolate_2d_render(x: ndarray, + y: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, + exact: bool) -> ndarray: + """ Interpolate 2D data to a 2D grid of pixels.""" return zeros((y_pixels, x_pixels)) @staticmethod - def interpolate_2d_render_vec(x: ndarray, y: ndarray, weight_x: ndarray, weight_y: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, y_pixels: int, - x_min: float, x_max: float, y_min: float, y_max: float, + def interpolate_2d_render_vec(x: ndarray, + y: ndarray, + weight_x: ndarray, + weight_y: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, exact: bool) -> Tuple[ndarray, ndarray]: - """ Interpolate 2D particle vector data to a pair of 2D grids of pixels. """ + """ Interpolate 2D vector data to a pair of 2D pixel grids. """ return zeros((y_pixels, x_pixels)), zeros((y_pixels, x_pixels)) @staticmethod - def interpolate_2d_line(x: ndarray, y: ndarray, weight: ndarray, h: ndarray, weight_function: CPUDispatcher, - kernel_radius: float, pixels: int, x1: float, x2: float, y1: float, y2: float) -> ndarray: - """ Interpolate 2D particle data to a 1D cross-sectional line. """ + def interpolate_2d_line(x: ndarray, + y: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + pixels: int, + x1: float, + x2: float, + y1: float, + y2: float) -> ndarray: + """ Interpolate 2D data to a 1D cross-sectional line. """ return zeros(pixels) @staticmethod - def interpolate_3d_line(x: ndarray, y: ndarray, z: ndarray, weight: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, pixels: int, x1: float, x2: float, - y1: float, y2: float, z1: float, z2: float) -> ndarray: - """ Interpolate 3D particle data to a 1D cross-sectional line. """ + def interpolate_3d_line(x: ndarray, + y: ndarray, + z: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + pixels: int, + x1: float, + x2: float, + y1: float, + y2: float, + z1: float, + z2: float) -> ndarray: + """ Interpolate 3D data to a 1D cross-sectional line. """ return zeros(pixels) @staticmethod - def interpolate_3d_projection(x: ndarray, y: ndarray, z: ndarray, weight: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, y_pixels: int, - x_min: float, x_max: float, y_min: float, y_max: float, exact: bool) -> ndarray: - """ Interpolate 3D particle data to a 2D grid of pixels, using column projection.""" + def interpolate_3d_projection(x: ndarray, + y: ndarray, + z: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, + exact: bool) -> ndarray: + """ Interpolate 3D data to a 2D pixel grid using column projection.""" return zeros((y_pixels, x_pixels)) @staticmethod - def interpolate_3d_projection_vec(x: ndarray, y: ndarray, weight_x: ndarray, weight_y: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, - y_pixels: int, x_min: float, x_max: float, y_min: float, y_max: float, + def interpolate_3d_projection_vec(x: ndarray, + y: ndarray, + weight_x: ndarray, + weight_y: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, exact: bool) -> Tuple[ndarray, ndarray]: - """ Interpolate 3D particle vector data to a pair of 2D grids of pixels, using column projection.""" + """ Interpolate 3D vector data to a pair of 2D pixel grids using + column projection.""" return zeros((y_pixels, x_pixels)), zeros((y_pixels, x_pixels)) @staticmethod - def interpolate_3d_cross(x: ndarray, y: ndarray, z: ndarray, z_slice: float, weight: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, y_pixels: int, - x_min: float, x_max: float, y_min: float, y_max: float) -> ndarray: + def interpolate_3d_cross(x: ndarray, + y: ndarray, + z: ndarray, + z_slice: float, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float) -> ndarray: """ - Interpolate 3D particle data to a pair of 2D grids of pixels, using a 3D cross-section at a specific z value. + Interpolate 3D data to a pair of 2D pixel grids using a 3D + cross-section at a specific z value. """ return zeros((y_pixels, x_pixels)) @staticmethod - def interpolate_3d_cross_vec(x: ndarray, y: ndarray, z: ndarray, z_slice: float, weight_x: ndarray, - weight_y: ndarray, h: ndarray, weight_function: CPUDispatcher, kernel_radius: float, - x_pixels: int, y_pixels: int, x_min: float, x_max: float, y_min: float, + def interpolate_3d_cross_vec(x: ndarray, + y: ndarray, + z: ndarray, + z_slice: float, + weight_x: ndarray, + weight_y: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, y_max: float) -> Tuple[ndarray, ndarray]: """ - Interpolate 3D particle vector data to a pair of 2D grids of pixels, using a 3D cross-section at a - specific z value. + Interpolate 3D vector data to a pair of 2D pixel grids using a 3D + cross-section at a specific z value. """ return zeros((y_pixels, x_pixels)), zeros((y_pixels, x_pixels)) @staticmethod - def interpolate_3d_grid(x: ndarray, y: ndarray, z: ndarray, weight: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, y_pixels: int, - z_pixels: int, x_min: float, x_max: float, y_min: float, y_max: float, z_min: float, + def interpolate_3d_grid(x: ndarray, + y: ndarray, + z: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + z_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, + z_min: float, z_max: float) -> ndarray: """ - Interpolate 3D particle data to a 3D grid of pixels. + Interpolate 3D data to a 3D grid of pixels. """ return zeros((z_pixels, y_pixels, x_pixels)) diff --git a/sarracen/interpolate/cpu_backend.py b/sarracen/interpolate/cpu_backend.py index 808438c..5587b49 100644 --- a/sarracen/interpolate/cpu_backend.py +++ b/sarracen/interpolate/cpu_backend.py @@ -12,99 +12,222 @@ class CPUBackend(BaseBackend): @staticmethod - def interpolate_2d_render(x: ndarray, y: ndarray, weight: ndarray, h: ndarray, weight_function: CPUDispatcher, - kernel_radius: float, x_pixels: int, y_pixels: int, x_min: float, x_max: float, - y_min: float, y_max: float, exact: bool) -> ndarray: + def interpolate_2d_render(x: ndarray, + y: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, + exact: bool) -> ndarray: if exact: - return CPUBackend._exact_2d_render(x, y, weight, h, x_pixels, y_pixels, x_min, x_max, y_min, y_max) - return CPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight, h, weight_function, kernel_radius, - x_pixels, y_pixels, x_min, x_max, y_min, y_max, 2) + return CPUBackend._exact_2d_render(x, y, weight, h, x_pixels, + y_pixels, x_min, x_max, + y_min, y_max) + return CPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight, h, + weight_function, kernel_radius, x_pixels, + y_pixels, x_min, x_max, y_min, y_max, 2) @staticmethod - def interpolate_2d_render_vec(x: ndarray, y: ndarray, weight_x: ndarray, weight_y: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, y_pixels: int, - x_min: float, x_max: float, y_min: float, y_max: float, + def interpolate_2d_render_vec(x: ndarray, + y: ndarray, + weight_x: ndarray, + weight_y: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, exact: bool) -> Tuple[ndarray, ndarray]: if exact: - return (CPUBackend._exact_2d_render(x, y, weight_x, h, x_pixels, y_pixels, x_min, x_max, y_min, y_max), - CPUBackend._exact_2d_render(x, y, weight_y, h, x_pixels, y_pixels, x_min, x_max, y_min, y_max)) - return (CPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight_x, h, weight_function, kernel_radius, x_pixels, + return (CPUBackend._exact_2d_render(x, y, weight_x, h, x_pixels, + y_pixels, x_min, x_max, + y_min, y_max), + CPUBackend._exact_2d_render(x, y, weight_y, h, x_pixels, + y_pixels, x_min, x_max, + y_min, y_max)) + return (CPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight_x, h, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, 2), - CPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight_y, h, weight_function, kernel_radius, x_pixels, + CPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight_y, h, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, 2)) @staticmethod - def interpolate_2d_cross(x: ndarray, y: ndarray, weight: ndarray, h: ndarray, weight_function: CPUDispatcher, - kernel_radius: float, pixels: int, x1: float, x2: float, y1: float, y2: float) -> ndarray: - return CPUBackend._fast_2d_cross_cpu(x, y, weight, h, weight_function, kernel_radius, pixels, x1, x2, y1, y2) + def interpolate_2d_cross(x: ndarray, + y: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + pixels: int, + x1: float, + x2: float, + y1: float, + y2: float) -> ndarray: + return CPUBackend._fast_2d_cross_cpu(x, y, weight, h, weight_function, + kernel_radius, pixels, x1, x2, + y1, y2) @staticmethod - def interpolate_3d_line(x: ndarray, y: ndarray, z: ndarray, weight: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, pixels: int, x1: float, x2: float, - y1: float, y2: float, z1: float, z2: float) -> ndarray: - return CPUBackend._fast_3d_line(x, y, z, weight, h, weight_function, kernel_radius, pixels, x1, x2, y1, y2, z1, - z2) + def interpolate_3d_line(x: ndarray, + y: ndarray, + z: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + pixels: int, + x1: float, + x2: float, + y1: float, + y2: float, + z1: float, + z2: float) -> ndarray: + return CPUBackend._fast_3d_line(x, y, z, weight, h, weight_function, + kernel_radius, pixels, + x1, x2, y1, y2, z1, z2) @staticmethod - def interpolate_3d_projection(x: ndarray, y: ndarray, z: ndarray, weight: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, y_pixels: int, - x_min: float, x_max: float, y_min: float, y_max: float, exact: bool) -> ndarray: + def interpolate_3d_projection(x: ndarray, + y: ndarray, + z: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, + exact: bool) -> ndarray: if exact: - return CPUBackend._exact_3d_project(x, y, weight, h, x_pixels, y_pixels, x_min, x_max, y_min, y_max) - return CPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight, h, weight_function, kernel_radius, x_pixels, + return CPUBackend._exact_3d_project(x, y, weight, h, x_pixels, + y_pixels, x_min, x_max, + y_min, y_max) + return CPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight, h, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, 2) @staticmethod - def interpolate_3d_projection_vec(x: ndarray, y: ndarray, weight_x: ndarray, weight_y: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, - y_pixels: int, x_min: float, x_max: float, y_min: float, y_max: float, + def interpolate_3d_projection_vec(x: ndarray, + y: ndarray, + weight_x: ndarray, + weight_y: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, exact: bool) -> Tuple[ndarray, ndarray]: if exact: - return (CPUBackend._exact_3d_project(x, y, weight_x, h, x_pixels, y_pixels, x_min, x_max, y_min, y_max), - CPUBackend._exact_3d_project(x, y, weight_y, h, x_pixels, y_pixels, x_min, x_max, y_min, y_max)) - return (CPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight_x, h, weight_function, kernel_radius, x_pixels, + return (CPUBackend._exact_3d_project(x, y, weight_x, h, x_pixels, + y_pixels, x_min, x_max, + y_min, y_max), + CPUBackend._exact_3d_project(x, y, weight_y, h, x_pixels, + y_pixels, x_min, x_max, + y_min, y_max)) + return (CPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight_x, h, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, 2), - CPUBackend._fast_2d(x, y, np.zeros(y.size), 0, weight_y, h, weight_function, kernel_radius, x_pixels, + CPUBackend._fast_2d(x, y, np.zeros(y.size), 0, weight_y, h, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, 2)) @staticmethod - def interpolate_3d_cross(x: ndarray, y: ndarray, z: ndarray, z_slice: float, weight: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, y_pixels: int, - x_min: float, x_max: float, y_min: float, y_max: float) -> ndarray: - return CPUBackend._fast_2d(x, y, z, z_slice, weight, h, weight_function, kernel_radius, x_pixels, y_pixels, - x_min, x_max, y_min, y_max, 3) + def interpolate_3d_cross(x: ndarray, + y: ndarray, + z: ndarray, + z_slice: float, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float) -> ndarray: + return CPUBackend._fast_2d(x, y, z, z_slice, weight, h, + weight_function, kernel_radius, x_pixels, + y_pixels, x_min, x_max, y_min, y_max, 3) @staticmethod - def interpolate_3d_cross_vec(x: ndarray, y: ndarray, z: ndarray, z_slice: float, weight_x: ndarray, - weight_y: ndarray, h: ndarray, weight_function: CPUDispatcher, kernel_radius: float, - x_pixels: int, y_pixels: int, x_min: float, x_max: float, y_min: float, + def interpolate_3d_cross_vec(x: ndarray, + y: ndarray, + z: ndarray, + z_slice: float, + weight_x: ndarray, + weight_y: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, y_max: float) -> Tuple[ndarray, ndarray]: - return (CPUBackend._fast_2d(x, y, z, z_slice, weight_x, h, weight_function, kernel_radius, x_pixels, y_pixels, - x_min, x_max, y_min, y_max, 3), - CPUBackend._fast_2d(x, y, z, z_slice, weight_y, h, weight_function, kernel_radius, x_pixels, y_pixels, - x_min, x_max, y_min, y_max, 3)) + return (CPUBackend._fast_2d(x, y, z, z_slice, weight_x, h, + weight_function, kernel_radius, x_pixels, + y_pixels, x_min, x_max, y_min, y_max, 3), + CPUBackend._fast_2d(x, y, z, z_slice, weight_y, h, + weight_function, kernel_radius, x_pixels, + y_pixels, x_min, x_max, y_min, y_max, 3)) @staticmethod - def interpolate_3d_grid(x: ndarray, y: ndarray, z: ndarray, weight: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, y_pixels: int, - z_pixels: int, x_min: float, x_max: float, y_min: float, y_max: float, z_min: float, + def interpolate_3d_grid(x: ndarray, + y: ndarray, + z: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + z_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, + z_min: float, z_max: float) -> ndarray: image = np.zeros((z_pixels, y_pixels, x_pixels)) pixwidthz = (z_max - z_min) / z_pixels for z_i in np.arange(z_pixels): z_val = z_min + (z_i + 0.5) * pixwidthz - image[z_i] = CPUBackend._fast_2d(x, y, z, z_val, weight, h, weight_function, kernel_radius, x_pixels, - y_pixels, x_min, x_max, y_min, y_max, 3) + image[z_i] = CPUBackend._fast_2d(x, y, z, z_val, weight, h, + weight_function, kernel_radius, + x_pixels, y_pixels, x_min, x_max, + y_min, y_max, 3) return image + # Underlying CPU numba-compiled code for interpolation to a 2D grid. Used + # in interpolation of 2D data, and column integration / cross-sections of + # 3D data. - # Underlying CPU numba-compiled code for interpolation to a 2D grid. Used in interpolation of 2D data, - # and column integration / cross-sections of 3D data. @staticmethod @njit(parallel=True, fastmath=True) - def _fast_2d(x_data, y_data, z_data, z_slice, w_data, h_data, weight_function, kernel_radius, x_pixels, y_pixels, + def _fast_2d(x_data, y_data, z_data, z_slice, w_data, h_data, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, n_dims): output = np.zeros((y_pixels, x_pixels)) pixwidthx = (x_max - x_min) / x_pixels @@ -118,7 +241,8 @@ def _fast_2d(x_data, y_data, z_data, z_slice, w_data, h_data, weight_function, k output_local = np.zeros((get_num_threads(), y_pixels, x_pixels)) - # thread safety: each thread has its own grid, which are combined after interpolation + # thread safety: each thread has its own grid, which are combined + # after interpolation for thread in prange(get_num_threads()): block_size = x_data.size / get_num_threads() range_start = int(thread * block_size) @@ -129,14 +253,19 @@ def _fast_2d(x_data, y_data, z_data, z_slice, w_data, h_data, weight_function, k if np.abs(dz[i]) >= kernel_radius * h_data[i]: continue - # determine maximum and minimum pixels that this particle contributes to - ipixmin = int(np.rint((x_data[i] - kernel_radius * h_data[i] - x_min) / pixwidthx)) - jpixmin = int(np.rint((y_data[i] - kernel_radius * h_data[i] - y_min) / pixwidthy)) - ipixmax = int(np.rint((x_data[i] + kernel_radius * h_data[i] - x_min) / pixwidthx)) - jpixmax = int(np.rint((y_data[i] + kernel_radius * h_data[i] - y_min) / pixwidthy)) + rad = kernel_radius * h_data[i] + + # determine pixels that this particle contributes to + ipixmin = int(np.rint((x_data[i] - rad - x_min) / pixwidthx)) + jpixmin = int(np.rint((y_data[i] - rad - y_min) / pixwidthy)) + ipixmax = int(np.rint((x_data[i] + rad - x_min) / pixwidthx)) + jpixmax = int(np.rint((y_data[i] + rad - y_min) / pixwidthy)) - if ipixmax < 0 or ipixmin > x_pixels or jpixmax < 0 or jpixmin > y_pixels: + if ipixmax < 0 or ipixmin > x_pixels: + continue + if jpixmax < 0 or jpixmin > y_pixels: continue + if ipixmin < 0: ipixmin = 0 if ipixmax > x_pixels: @@ -147,8 +276,8 @@ def _fast_2d(x_data, y_data, z_data, z_slice, w_data, h_data, weight_function, k jpixmax = y_pixels # precalculate differences in the x-direction (optimization) - dx2i = ((x_min + (np.arange(ipixmin, ipixmax) + 0.5) * pixwidthx - x_data[i]) ** 2) \ - * (1 / (h_data[i] ** 2)) + ((dz[i] ** 2) * (1 / h_data[i] ** 2)) + dx2i = (x_min + (np.arange(ipixmin, ipixmax) + 0.5) * pixwidthx + - x_data[i])**2 * (1 / (h_data[i]**2)) + ((dz[i]**2) * (1 / h_data[i]**2)) # determine differences in the y-direction ypix = y_min + (np.arange(jpixmin, jpixmax) + 0.5) * pixwidthy @@ -170,10 +299,12 @@ def _fast_2d(x_data, y_data, z_data, z_slice, w_data, h_data, weight_function, k return output - # Underlying CPU numba-compiled code for exact interpolation of 2D data to a 2D grid. + # Underlying CPU numba-compiled code for exact interpolation of 2D data to + # a 2D grid. @staticmethod @njit(parallel=True) - def _exact_2d_render(x_data, y_data, w_data, h_data, x_pixels, y_pixels, x_min, x_max, y_min, y_max): + def _exact_2d_render(x_data, y_data, w_data, h_data, x_pixels, y_pixels, + x_min, x_max, y_min, y_max): output_local = np.zeros((get_num_threads(), y_pixels, x_pixels)) pixwidthx = (x_max - x_min) / x_pixels pixwidthy = (y_max - y_min) / y_pixels @@ -188,14 +319,19 @@ def _exact_2d_render(x_data, y_data, w_data, h_data, x_pixels, y_pixels, x_min, # iterate through the indexes of non-filtered particles for i in range(range_start, range_end): - # determine maximum and minimum pixels that this particle contributes to - ipixmin = int(np.floor((x_data[i] - 2 * h_data[i] - x_min) / pixwidthx)) - jpixmin = int(np.floor((y_data[i] - 2 * h_data[i] - y_min) / pixwidthy)) - ipixmax = int(np.ceil((x_data[i] + 2 * h_data[i] - x_min) / pixwidthx)) - jpixmax = int(np.ceil((y_data[i] + 2 * h_data[i] - y_min) / pixwidthy)) + rad = 2 * h_data[i] + + # determine pixels that this particle contributes to + ipixmin = int(np.floor((x_data[i] - rad - x_min) / pixwidthx)) + jpixmin = int(np.floor((y_data[i] - rad - y_min) / pixwidthy)) + ipixmax = int(np.ceil((x_data[i] + rad - x_min) / pixwidthx)) + jpixmax = int(np.ceil((y_data[i] + rad - y_min) / pixwidthy)) - if ipixmax < 0 or ipixmin >= x_pixels or jpixmax < 0 or jpixmin >= y_pixels: + if ipixmax < 0 or ipixmin >= x_pixels: continue + if jpixmax < 0 or jpixmin > y_pixels: + continue + if ipixmin < 0: ipixmin = 0 if ipixmax > x_pixels: @@ -207,8 +343,9 @@ def _exact_2d_render(x_data, y_data, w_data, h_data, x_pixels, y_pixels, x_min, denom = 1 / np.abs(pixwidthx * pixwidthy) * h_data[i] ** 2 - # To calculate the exact surface integral of this pixel, calculate the comprising line integrals - # at each boundary of the square. + # To calculate the exact surface integral of this pixel, + # calculate the comprising line integrals at each boundary of + # the square. if jpixmax >= jpixmin: ypix = y_min + (jpixmin + 0.5) * pixwidthy dy = ypix - y_data[i] @@ -260,8 +397,9 @@ def _exact_2d_render(x_data, y_data, w_data, h_data, x_pixels, y_pixels, x_min, output_local[thread, jpix, ipix] += term[i] * wab - # The negative value of the bottom boundary is equal to the value of the top boundary of the - # pixel below this pixel. + # The negative value of the bottom boundary is equal to + # the value of the top boundary of the pixel below this + # pixel. if jpix < jpixmax - 1: output_local[thread, jpix + 1, ipix] -= term[i] * wab @@ -274,8 +412,9 @@ def _exact_2d_render(x_data, y_data, w_data, h_data, x_pixels, y_pixels, x_min, output_local[thread, jpix, ipix] += term[i] * wab - # The negative value of the right boundary is equal to the value of the left boundary of the - # pixel to the right of this pixel. + # The negative value of the right boundary is equal to + # the value of the left boundary of the pixel to the + # right of this pixel. if ipix < ipixmax - 1: output_local[thread, jpix, ipix + 1] -= term[i] * wab @@ -289,7 +428,8 @@ def _exact_2d_render(x_data, y_data, w_data, h_data, x_pixels, y_pixels, x_min, # Underlying CPU numba-compiled code for 2D->1D cross-sections. @staticmethod @njit(parallel=True, fastmath=True) - def _fast_2d_cross_cpu(x_data, y_data, w_data, h_data, weight_function, kernel_radius, pixels, x1, x2, y1, y2): + def _fast_2d_cross_cpu(x_data, y_data, w_data, h_data, weight_function, + kernel_radius, pixels, x1, x2, y1, y2): # determine the slope of the cross-section line gradient = 0 if not x2 - x1 == 0: @@ -303,30 +443,33 @@ def _fast_2d_cross_cpu(x_data, y_data, w_data, h_data, weight_function, kernel_r term = w_data / h_data ** 2 - # the intersections between the line and a particle's 'smoothing circle' are - # found by solving a quadratic equation with the below values of a, b, and c. - # if the determinant is negative, the particle does not contribute to the - # cross-section, and can be removed. + # the intersections between the line and a particle's 'smoothing + # circle' are found by solving a quadratic equation with the below + # values of a, b, and c. if the determinant is negative, the particle + # does not contribute to the cross-section, and can be removed. aa = 1 + gradient ** 2 bb = 2 * gradient * (yint - y_data) - 2 * x_data - cc = x_data ** 2 + y_data ** 2 - 2 * yint * y_data + yint ** 2 - (kernel_radius * h_data) ** 2 + cc = x_data**2 + y_data**2 - 2 * yint * y_data + yint**2 - (kernel_radius * h_data)**2 det = bb ** 2 - 4 * aa * cc - # create a filter for particles that do not contribute to the cross-section + # create a filter for particles that do not contribute to the + # cross-section filter_det = det >= 0 det = np.sqrt(det) cc = None output = np.zeros(pixels) - # the starting and ending x coordinates of the lines intersections with a particle's smoothing circle + # the starting and ending x coordinates of the lines intersections with + # a particle's smoothing circle xstart = ((-bb[filter_det] - det[filter_det]) / (2 * aa)).clip(a_min=x1, a_max=x2) xend = ((-bb[filter_det] + det[filter_det]) / (2 * aa)).clip(a_min=x1, a_max=x2) bb, det = None, None - # the start and end distances which lie within a particle's smoothing circle. - rstart = np.sqrt((xstart - x1) ** 2 + ((gradient * xstart + yint) - y1) ** 2) - rend = np.sqrt((xend - x1) ** 2 + (((gradient * xend + yint) - y1) ** 2)) + # the start and end distances which lie within a particle's smoothing + # circle. + rstart = np.sqrt((xstart - x1)**2 + ((gradient * xstart + yint) - y1)**2) + rend = np.sqrt((xend - x1)**2 + (((gradient * xend + yint) - y1)**2)) xstart, xend = None, None # the maximum and minimum pixels that each particle contributes to. @@ -336,7 +479,8 @@ def _fast_2d_cross_cpu(x_data, y_data, w_data, h_data, weight_function, kernel_r output_local = np.zeros((get_num_threads(), pixels)) - # thread safety: each thread has its own grid, which are combined after interpolation + # thread safety: each thread has its own grid, which are combined after + # interpolation for thread in prange(get_num_threads()): block_size = len(x_data[filter_det]) / get_num_threads() @@ -345,7 +489,8 @@ def _fast_2d_cross_cpu(x_data, y_data, w_data, h_data, weight_function, kernel_r # iterate through the indices of all non-filtered particles for i in range(range_start, range_end): - # determine contributions to all affected pixels for this particle + # determine contributions to all affected pixels for this + # particle xpix = x1 + (np.arange(int(ipixmin[i]), int(ipixmax[i])) + 0.5) * xpixwidth ypix = gradient * xpix + yint dy = ypix - y_data[filter_det][i] @@ -365,8 +510,8 @@ def _fast_2d_cross_cpu(x_data, y_data, w_data, h_data, weight_function, kernel_r @staticmethod @njit(parallel=True, fastmath=True) - def _fast_3d_line(x_data, y_data, z_data, w_data, h_data, weight_function, kernel_radius, pixels, x1, x2, y1, y2, - z1, z2): + def _fast_3d_line(x_data, y_data, z_data, w_data, h_data, weight_function, + kernel_radius, pixels, x1, x2, y1, y2, z1, z2): output_local = np.zeros((get_num_threads(), pixels)) dx = x2 - x1 @@ -384,12 +529,16 @@ def _fast_3d_line(x_data, y_data, z_data, w_data, h_data, weight_function, kerne for i in range(range_start, range_end): - delta = (ux * (x1 - x_data[i]) + uy * (y1 - y_data[i]) + uz * (z1 - z_data[i])) ** 2 - ((x1 - x_data[i]) ** 2 + (y1 - y_data[i]) ** 2 + (z1 - z_data[i]) ** 2) + (kernel_radius * h_data[i]) ** 2 + dx = x1 - x_data[i] + dy = y1 - y_data[i] + dz = z1 - z_data[i] + delta = (ux * dx + uy * dy + uz * dz)**2 \ + - (dx**2 + dy**2 + dz**2) + (kernel_radius * h_data[i])**2 if delta < 0: continue - d1 = -(ux * (x1 - x_data[i]) + uy * (y1 - y_data[i]) + uz * (z1 - z_data[i])) - np.sqrt(delta) - d2 = -(ux * (x1 - x_data[i]) + uy * (y1 - y_data[i]) + uz * (z1 - z_data[i])) + np.sqrt(delta) + d1 = -(ux * dx + uy * dy + uz * dz) - np.sqrt(delta) + d2 = -(ux * dx + uy * dy + uz * dz) + np.sqrt(delta) pixmin = min(max(0, round((d1 / length) * pixels)), pixels) pixmax = min(max(0, round((d2 / length) * pixels)), pixels) @@ -417,7 +566,8 @@ def _fast_3d_line(x_data, y_data, z_data, w_data, h_data, weight_function, kerne @staticmethod @njit(parallel=True) - def _exact_3d_project(x_data, y_data, w_data, h_data, x_pixels, y_pixels, x_min, x_max, y_min, y_max): + def _exact_3d_project(x_data, y_data, w_data, h_data, x_pixels, y_pixels, + x_min, x_max, y_min, y_max): output_local = np.zeros((get_num_threads(), y_pixels, x_pixels)) pixwidthx = (x_max - x_min) / x_pixels pixwidthy = (y_max - y_min) / y_pixels @@ -434,18 +584,24 @@ def _exact_3d_project(x_data, y_data, w_data, h_data, x_pixels, y_pixels, x_min, # iterate through the indexes of non-filtered particles for i in range(range_start, range_end): - # determine maximum and minimum pixels that this particle contributes to - ipixmin = int(np.floor((x_data[i] - 2 * h_data[i] - x_min) / pixwidthx)) - jpixmin = int(np.floor((y_data[i] - 2 * h_data[i] - y_min) / pixwidthy)) - ipixmax = int(np.ceil((x_data[i] + 2 * h_data[i] - x_min) / pixwidthx)) - jpixmax = int(np.ceil((y_data[i] + 2 * h_data[i] - y_min) / pixwidthy)) + rad = 2 * h_data[i] + + # determine pixels that this particle contributes to + ipixmin = int(np.floor((x_data[i] - rad - x_min) / pixwidthx)) + jpixmin = int(np.floor((y_data[i] - rad - y_min) / pixwidthy)) + ipixmax = int(np.ceil((x_data[i] + rad - x_min) / pixwidthx)) + jpixmax = int(np.ceil((y_data[i] + rad - y_min) / pixwidthy)) # The width of the z contribution of this particle. - # = 2 * kernel_radius * h[i], where kernel_radius is 2 for the cubic spline kernel. + # = 2 * kernel_radius * h[i], where kernel_radius is 2 for the + # cubic spline kernel. pixwidthz = 4 * h_data[i] - if ipixmax < 0 or ipixmin >= x_pixels or jpixmax < 0 or jpixmin >= y_pixels: + if ipixmax < 0 or ipixmin >= x_pixels: continue + if jpixmax < 0 or jpixmin >= y_pixels: + continue + if ipixmin < 0: ipixmin = 0 if ipixmax > x_pixels: @@ -465,12 +621,16 @@ def _exact_3d_project(x_data, y_data, w_data, h_data, x_pixels, y_pixels, x_min, q2 = (dx ** 2 + dy ** 2) / h_data[i] ** 2 if q2 < 4 + 3 * pixwidthx * pixwidthy / h_data[i] ** 2: - # Calculate the volume integral of this pixel by summing the comprising - # surface integrals of each surface of the cube. + # Calculate the volume integral of this pixel by + # summing the comprising surface integrals of each + # surface of the cube. # x-y surfaces - pixint = 2 * surface_int(0.5 * pixwidthz, x_data[i], y_data[i], xpix, ypix, pixwidthx, - pixwidthy, h_data[i]) + pixint = 2 * surface_int(0.5 * pixwidthz, + x_data[i], y_data[i], + xpix, ypix, + pixwidthx, pixwidthy, + h_data[i]) # x-z surfaces pixint += surface_int(ypix - y_data[i] + 0.5 * pixwidthy, x_data[i], 0, xpix, 0, pixwidthx, diff --git a/sarracen/interpolate/gpu_backend.py b/sarracen/interpolate/gpu_backend.py index b458f9d..b084eec 100644 --- a/sarracen/interpolate/gpu_backend.py +++ b/sarracen/interpolate/gpu_backend.py @@ -13,103 +13,228 @@ class GPUBackend(BaseBackend): @staticmethod - def interpolate_2d_render(x: ndarray, y: ndarray, weight: ndarray, h: ndarray, weight_function: CPUDispatcher, - kernel_radius: float, x_pixels: int, y_pixels: int, x_min: float, x_max: float, - y_min: float, y_max: float, exact: bool) -> ndarray: + def interpolate_2d_render(x: ndarray, + y: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, + exact: bool) -> ndarray: if exact: - return GPUBackend._exact_2d_render(x, y, weight, h, x_pixels, y_pixels, x_min, x_max, y_min, y_max) - return GPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight, h, weight_function, kernel_radius, x_pixels, + return GPUBackend._exact_2d_render(x, y, weight, h, x_pixels, + y_pixels, x_min, x_max, + y_min, y_max) + return GPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight, h, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, 2) @staticmethod - def interpolate_2d_render_vec(x: ndarray, y: ndarray, weight_x: ndarray, weight_y: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, y_pixels: int, - x_min: float, x_max: float, y_min: float, y_max: float, + def interpolate_2d_render_vec(x: ndarray, + y: ndarray, + weight_x: ndarray, + weight_y: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, exact: bool) -> Tuple[ndarray, ndarray]: if exact: - return (GPUBackend._exact_2d_render(x, y, weight_x, h, x_pixels, y_pixels, x_min, x_max, y_min, y_max), - GPUBackend._exact_2d_render(x, y, weight_y, h, x_pixels, y_pixels, x_min, x_max, y_min, y_max)) - return (GPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight_x, h, weight_function, kernel_radius, x_pixels, + return (GPUBackend._exact_2d_render(x, y, weight_x, h, x_pixels, + y_pixels, x_min, x_max, + y_min, y_max), + GPUBackend._exact_2d_render(x, y, weight_y, h, x_pixels, + y_pixels, x_min, x_max, + y_min, y_max)) + return (GPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight_x, h, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, 2), - GPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight_y, h, weight_function, kernel_radius, x_pixels, + GPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight_y, h, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, 2)) @staticmethod - def interpolate_2d_cross(x: ndarray, y: ndarray, weight: ndarray, h: ndarray, weight_function: CPUDispatcher, - kernel_radius: float, pixels: int, x1: float, x2: float, y1: float, y2: float) -> ndarray: - return GPUBackend._fast_2d_cross(x, y, weight, h, weight_function, kernel_radius, pixels, x1, x2, y1, y2) + def interpolate_2d_cross(x: ndarray, + y: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + pixels: int, + x1: float, + x2: float, + y1: float, + y2: float) -> ndarray: + return GPUBackend._fast_2d_cross(x, y, weight, h, weight_function, + kernel_radius, pixels, x1, x2, y1, y2) @staticmethod - def interpolate_3d_line(x: ndarray, y: ndarray, z: ndarray, weight: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, pixels: int, x1: float, x2: float, - y1: float, y2: float, z1: float, z2: float) -> ndarray: - return GPUBackend._fast_3d_line(x, y, z, weight, h, weight_function, kernel_radius, pixels, x1, x2, y1, y2, z1, - z2) + def interpolate_3d_line(x: ndarray, + y: ndarray, + z: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + pixels: int, + x1: float, + x2: float, + y1: float, + y2: float, + z1: float, + z2: float) -> ndarray: + return GPUBackend._fast_3d_line(x, y, z, weight, h, weight_function, + kernel_radius, pixels, + x1, x2, y1, y2, z1, z2) @staticmethod - def interpolate_3d_projection(x: ndarray, y: ndarray, z: ndarray, weight: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, y_pixels: int, - x_min: float, x_max: float, y_min: float, y_max: float, exact: bool) -> ndarray: + def interpolate_3d_projection(x: ndarray, + y: ndarray, + z: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, + exact: bool) -> ndarray: if exact: - return GPUBackend._exact_3d_project(x, y, weight, h, x_pixels, y_pixels, x_min, x_max, y_min, y_max) - return GPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight, h, weight_function, kernel_radius, x_pixels, + return GPUBackend._exact_3d_project(x, y, weight, h, x_pixels, + y_pixels, x_min, x_max, + y_min, y_max) + return GPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight, h, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, 2) @staticmethod - def interpolate_3d_projection_vec(x: ndarray, y: ndarray, weight_x: ndarray, weight_y: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, - y_pixels: int, x_min: float, x_max: float, y_min: float, y_max: float, + def interpolate_3d_projection_vec(x: ndarray, + y: ndarray, + weight_x: ndarray, + weight_y: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, exact: bool) -> Tuple[ndarray, ndarray]: if exact: - return (GPUBackend._exact_3d_project(x, y, weight_x, h, x_pixels, y_pixels, x_min, x_max, y_min, y_max), - GPUBackend._exact_3d_project(x, y, weight_y, h, x_pixels, y_pixels, x_min, x_max, y_min, y_max)) - return (GPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight_x, h, weight_function, kernel_radius, x_pixels, + return (GPUBackend._exact_3d_project(x, y, weight_x, h, x_pixels, + y_pixels, x_min, x_max, + y_min, y_max), + GPUBackend._exact_3d_project(x, y, weight_y, h, x_pixels, + y_pixels, x_min, x_max, + y_min, y_max)) + return (GPUBackend._fast_2d(x, y, np.zeros(x.size), 0, weight_x, h, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, 2), - GPUBackend._fast_2d(x, y, np.zeros(y.size), 0, weight_y, h, weight_function, kernel_radius, x_pixels, + GPUBackend._fast_2d(x, y, np.zeros(y.size), 0, weight_y, h, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, 2)) @staticmethod - def interpolate_3d_cross(x: ndarray, y: ndarray, z: ndarray, z_slice: float, weight: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, y_pixels: int, - x_min: float, x_max: float, y_min: float, y_max: float) -> ndarray: - return GPUBackend._fast_2d(x, y, z, z_slice, weight, h, weight_function, kernel_radius, x_pixels, y_pixels, - x_min, x_max, y_min, y_max, 3) + def interpolate_3d_cross(x: ndarray, + y: ndarray, + z: ndarray, + z_slice: float, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float) -> ndarray: + return GPUBackend._fast_2d(x, y, z, z_slice, weight, h, + weight_function, kernel_radius, x_pixels, + y_pixels, x_min, x_max, y_min, y_max, 3) @staticmethod - def interpolate_3d_cross_vec(x: ndarray, y: ndarray, z: ndarray, z_slice: float, weight_x: ndarray, - weight_y: ndarray, h: ndarray, weight_function: CPUDispatcher, kernel_radius: float, - x_pixels: int, y_pixels: int, x_min: float, x_max: float, y_min: float, + def interpolate_3d_cross_vec(x: ndarray, + y: ndarray, + z: ndarray, + z_slice: float, + weight_x: ndarray, + weight_y: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + x_min: float, + x_max: float, + y_min: float, y_max: float) -> Tuple[ndarray, ndarray]: - return (GPUBackend._fast_2d(x, y, z, z_slice, weight_x, h, weight_function, kernel_radius, x_pixels, y_pixels, - x_min, x_max, y_min, y_max, 3), - GPUBackend._fast_2d(x, y, z, z_slice, weight_y, h, weight_function, kernel_radius, x_pixels, y_pixels, - x_min, x_max, y_min, y_max, 3)) + return (GPUBackend._fast_2d(x, y, z, z_slice, weight_x, h, + weight_function, kernel_radius, x_pixels, + y_pixels, x_min, x_max, y_min, y_max, 3), + GPUBackend._fast_2d(x, y, z, z_slice, weight_y, h, + weight_function, kernel_radius, x_pixels, + y_pixels, x_min, x_max, y_min, y_max, 3)) @staticmethod - def interpolate_3d_grid(x: ndarray, y: ndarray, z: ndarray, weight: ndarray, h: ndarray, - weight_function: CPUDispatcher, kernel_radius: float, x_pixels: int, y_pixels: int, - z_pixels: int, x_min: float, x_max: float, y_min: float, y_max: float, z_min: float, + def interpolate_3d_grid(x: ndarray, + y: ndarray, + z: ndarray, + weight: ndarray, + h: ndarray, + weight_function: CPUDispatcher, + kernel_radius: float, + x_pixels: int, + y_pixels: int, + z_pixels: int, + x_min: float, + x_max: float, + y_min: float, + y_max: float, + z_min: float, z_max: float) -> ndarray: image = np.zeros((z_pixels, y_pixels, x_pixels)) pixwidthz = (z_max - z_min) / z_pixels - # todo: this should be separated from _fast_2d to reduce the unnecessary transfer of data to the graphics card. + # todo: this should be separated from _fast_2d to reduce the + # unnecessary transfer of data to the graphics card. for z_i in np.arange(z_pixels): z_val = z_min + (z_i + 0.5) * pixwidthz - image[z_i] = GPUBackend._fast_2d(x, y, z, z_val, weight, h, weight_function, kernel_radius, x_pixels, - y_pixels, x_min, x_max, y_min, y_max, 3) + image[z_i] = GPUBackend._fast_2d(x, y, z, z_val, weight, h, + weight_function, kernel_radius, + x_pixels, y_pixels, x_min, x_max, + y_min, y_max, 3) return image - # For the GPU, the numba code is compiled using a factory function approach. This is required - # since a CUDA numba kernel cannot easily take weight_function as an argument. + # For the GPU, the numba code is compiled using a factory function + # approach. This is required since a CUDA numba kernel cannot easily take + # weight_function as an argument. @staticmethod - def _fast_2d(x_data, y_data, z_data, z_slice, w_data, h_data, weight_function, kernel_radius, x_pixels, y_pixels, + def _fast_2d(x_data, y_data, z_data, z_slice, w_data, h_data, + weight_function, kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, n_dims): - # Underlying GPU numba-compiled code for interpolation to a 2D grid. Used in interpolation of 2D data, - # and column integration / cross-sections of 3D data. + # Underlying GPU numba-compiled code for interpolation to a 2D grid. + # Used in interpolation of 2D data, and column integration / + # cross-sections of 3D data. @cuda.jit(fastmath=True) - def _2d_func(z_slice, x_data, y_data, z_data, w_data, h_data, kernel_radius, x_pixels, y_pixels, x_min, x_max, + def _2d_func(z_slice, x_data, y_data, z_data, w_data, h_data, + kernel_radius, x_pixels, y_pixels, x_min, x_max, y_min, y_max, n_dims, image): pixwidthx = (x_max - x_min) / x_pixels pixwidthy = (y_max - y_min) / y_pixels @@ -121,19 +246,23 @@ def _2d_func(z_slice, x_data, y_data, z_data, w_data, h_data, kernel_radius, x_p else: dz = 0 - term = w_data[i] / h_data[i] ** n_dims - if abs(dz) >= kernel_radius * h_data[i]: return - # determine maximum and minimum pixels that this particle contributes to - ipixmin = round((x_data[i] - kernel_radius * h_data[i] - x_min) / pixwidthx) - jpixmin = round((y_data[i] - kernel_radius * h_data[i] - y_min) / pixwidthy) - ipixmax = round((x_data[i] + kernel_radius * h_data[i] - x_min) / pixwidthx) - jpixmax = round((y_data[i] + kernel_radius * h_data[i] - y_min) / pixwidthy) + term = w_data[i] / h_data[i]**n_dims + rad = kernel_radius * h_data[i] + + # determine pixels that this particle contributes to + ipixmin = round((x_data[i] - rad - x_min) / pixwidthx) + jpixmin = round((y_data[i] - rad - y_min) / pixwidthy) + ipixmax = round((x_data[i] + rad - x_min) / pixwidthx) + jpixmax = round((y_data[i] + rad - y_min) / pixwidthy) - if ipixmax < 0 or ipixmin > x_pixels or jpixmax < 0 or jpixmin > y_pixels: + if ipixmax < 0 or ipixmin > x_pixels: return + if jpixmax < 0 or jpixmin > y_pixels: + return + if ipixmin < 0: ipixmin = 0 if ipixmax > x_pixels: @@ -158,12 +287,14 @@ def _2d_func(z_slice, x_data, y_data, z_data, w_data, h_data, kernel_radius, x_p dz2 = ((dz ** 2) * (1 / h_data[i] ** 2)) - # calculate contributions at pixels i, j due to particle at x, y + # calculate contributions at pixels i, j due to + # particle at x, y q = math.sqrt(dx2 + dy2 + dz2) # add contribution to image if q < kernel_radius: - # atomic add protects the summation against race conditions. + # atomic add protects the summation against race + # conditions. wab = weight_function(q, n_dims) cuda.atomic.add(image, (jpix + jpixmin, ipix + ipixmin), term * wab) @@ -186,28 +317,35 @@ def _2d_func(z_slice, x_data, y_data, z_data, w_data, h_data, kernel_radius, x_p return d_image.copy_to_host() - # Underlying CPU numba-compiled code for exact interpolation of 2D data to a 2D grid. + # Underlying CPU numba-compiled code for exact interpolation of 2D data t + # o a 2D grid. @staticmethod - def _exact_2d_render(x_data, y_data, w_data, h_data, x_pixels, y_pixels, x_min, x_max, y_min, y_max): + def _exact_2d_render(x_data, y_data, w_data, h_data, x_pixels, y_pixels, + x_min, x_max, y_min, y_max): pixwidthx = (x_max - x_min) / x_pixels pixwidthy = (y_max - y_min) / y_pixels - # Underlying GPU numba-compiled code for interpolation to a 2D grid. Used in interpolation of 2D data, + # Underlying GPU numba-compiled code for interpolation to a 2D grid. + # Used in interpolation of 2D data, # and column integration / cross-sections of 3D data. @cuda.jit def _2d_func(x_data, y_data, w_data, h_data, image): i = cuda.grid(1) if i < len(x_data): term = w_data[i] / h_data[i] ** 2 + rad = 2 * h_data[i] - # determine maximum and minimum pixels that this particle contributes to - ipixmin = math.floor((x_data[i] - 2 * h_data[i] - x_min) / pixwidthx) - jpixmin = math.floor((y_data[i] - 2 * h_data[i] - y_min) / pixwidthy) - ipixmax = math.ceil((x_data[i] + 2 * h_data[i] - x_min) / pixwidthx) - jpixmax = math.ceil((y_data[i] + 2 * h_data[i] - y_min) / pixwidthy) + # determine pixels that this particle contributes to + ipixmin = math.floor((x_data[i] - rad - x_min) / pixwidthx) + jpixmin = math.floor((y_data[i] - rad - y_min) / pixwidthy) + ipixmax = math.ceil((x_data[i] + rad - x_min) / pixwidthx) + jpixmax = math.ceil((y_data[i] + rad - y_min) / pixwidthy) - if ipixmax < 0 or ipixmin >= x_pixels or jpixmax < 0 or jpixmin >= y_pixels: + if ipixmax < 0 or ipixmin >= x_pixels: return + if jpixmax < 0 or jpixmin >= y_pixels: + return + if ipixmin < 0: ipixmin = 0 if ipixmax > x_pixels: @@ -219,7 +357,8 @@ def _2d_func(x_data, y_data, w_data, h_data, image): denom = 1 / abs(pixwidthx * pixwidthy) * h_data[i] ** 2 - # To calculate the exact surface integral of this pixel, calculate the comprising line integrals + # To calculate the exact surface integral of this pixel, + # calculate the comprising line integrals # at each boundary of the square. if jpixmax >= jpixmin: ypix = y_min + (jpixmin + 0.5) * pixwidthy @@ -270,8 +409,9 @@ def _2d_func(x_data, y_data, w_data, h_data, image): pixint = line_int(r0, d1, d2, h_data[i]) wab = pixint * denom - # The negative value of the bottom boundary is equal to the value of the top boundary of the - # pixel below this pixel. + # The negative value of the bottom boundary is equal + # to the value of the top boundary of the pixel below + # this pixel. cuda.atomic.add(image, (jpix, ipix), term * wab) if jpix < jpixmax - 1: cuda.atomic.sub(image, (jpix + 1, ipix), term * wab) @@ -285,8 +425,9 @@ def _2d_func(x_data, y_data, w_data, h_data, image): cuda.atomic.add(image, (jpix, ipix), term * wab) - # The negative value of the right boundary is equal to the value of the left boundary of the - # pixel to the right of this pixel. + # The negative value of the right boundary is equal to + # the value of the left boundary of the pixel to the + # right of this pixel. if ipix < ipixmax - 1: cuda.atomic.sub(image, (jpix, ipix + 1), term * wab) @@ -307,10 +448,12 @@ def _2d_func(x_data, y_data, w_data, h_data, image): return d_image.copy_to_host() - # For the GPU, the numba code is compiled using a factory function approach. This is required - # since a CUDA numba kernel cannot easily take weight_function as an argument. + # For the GPU, the numba code is compiled using a factory function + # approach. This is required since a CUDA numba kernel cannot easily take + # weight_function as an argument. @staticmethod - def _fast_2d_cross(x_data, y_data, w_data, h_data, weight_function, kernel_radius, pixels, x1, x2, y1, y2): + def _fast_2d_cross(x_data, y_data, w_data, h_data, weight_function, + kernel_radius, pixels, x1, x2, y1, y2): # determine the slope of the cross-section line gradient = 0 if not x2 - x1 == 0: @@ -325,41 +468,47 @@ def _fast_2d_cross(x_data, y_data, w_data, h_data, weight_function, kernel_radiu # Underlying GPU numba-compiled code for 2D->1D cross-sections @cuda.jit(fastmath=True) - def _2d_func(x_data, y_data, w_data, h_data, kernel_radius, pixels, x1, x2, y1, y2, image): + def _2d_func(x_data, y_data, w_data, h_data, kernel_radius, pixels, + x1, x2, y1, y2, image): i = cuda.grid(1) if i < x_data.size: term = w_data[i] / h_data[i] ** 2 - # the intersections between the line and a particle's 'smoothing circle' are - # found by solving a quadratic equation with the below values of a, b, and c. - # if the determinant is negative, the particle does not contribute to the + # the intersections between the line and a particle's + # 'smoothing circle' are found by solving a quadratic equation + # with the below values of a, b, and c. if the determinant is + # negative, the particle does not contribute to the # cross-section, and can be removed. bb = 2 * gradient * (yint - y_data[i]) - 2 * x_data[i] cc = x_data[i] ** 2 + y_data[i] ** 2 - 2 * yint * y_data[i] + yint ** 2 - ( kernel_radius * h_data[i]) ** 2 det = bb ** 2 - 4 * aa * cc - # create a filter for particles that do not contribute to the cross-section. + # create a filter for particles that do not contribute to the + # cross-section. if det < 0: return det = math.sqrt(det) - # the starting and ending x coordinates of the lines intersections with a particle's smoothing circle. + # the starting and ending x coordinates of the lines + # intersections with a particle's smoothing circle. xstart = min(max(x1, (-bb - det) / (2 * aa)), x2) xend = min(max(x1, (-bb + det) / (2 * aa)), x2) - # the start and end distances which lie within a particle's smoothing circle. + # the start and end distances which lie within a particle's + # smoothing circle. rstart = math.sqrt((xstart - x1) ** 2 + ((gradient * xstart + yint) - y1) ** 2) rend = math.sqrt((xend - x1) ** 2 + (((gradient * xend + yint) - y1) ** 2)) - # the maximum and minimum pixels that each particle contributes to. + # the max and min pixels that each particle contributes to. ipixmin = min(max(0, round(rstart / pixwidth)), pixels) ipixmax = min(max(0, round(rend / pixwidth)), pixels) # iterate through all affected pixels for ipix in range(ipixmin, ipixmax): - # determine contributions to all affected pixels for this particle + # determine contributions to all affected pixels for this + # particle xpix = x1 + (ipix + 0.5) * xpixwidth ypix = gradient * xpix + yint dy = ypix - y_data[i] @@ -390,8 +539,8 @@ def _2d_func(x_data, y_data, w_data, h_data, kernel_radius, pixels, x1, x2, y1, return d_image.copy_to_host() @staticmethod - def _fast_3d_line(x_data, y_data, z_data, w_data, h_data, weight_function, kernel_radius, pixels, x1, x2, y1, y2, - z1, z2): + def _fast_3d_line(x_data, y_data, z_data, w_data, h_data, weight_function, + kernel_radius, pixels, x1, x2, y1, y2, z1, z2): dx = x2 - x1 dy = y2 - y1 dz = z2 - z1 @@ -404,16 +553,18 @@ def _2d_func(x_data, y_data, z_data, w_data, h_data, kernel_radius, pixels, x1, i = cuda.grid(1) if i < x_data.size: - delta = (ux * (x1 - x_data[i]) + uy * (y1 - y_data[i]) + uz * (z1 - z_data[i])) ** 2 \ - - ((x1 - x_data[i]) ** 2 + (y1 - y_data[i]) ** 2 + (z1 - z_data[i]) ** 2)\ - + (kernel_radius * h_data[i]) ** 2 + dx = x1 - x_data[i] + dy = y1 - y_data[i] + dz = z1 - z_data[i] + delta = (ux * dx + uy * dy + uz * dz)**2 \ + - (dx**2 + dy**2 + dz**2) + (kernel_radius * h_data[i])**2 if delta < 0: return term = w_data[i] / h_data[i] ** 3 - d1 = -(ux * (x1 - x_data[i]) + uy * (y1 - y_data[i]) + uz * (z1 - z_data[i])) - math.sqrt(delta) - d2 = -(ux * (x1 - x_data[i]) + uy * (y1 - y_data[i]) + uz * (z1 - z_data[i])) + math.sqrt(delta) + d1 = -(ux * dx + uy * dy + uz * dz) - math.sqrt(delta) + d2 = -(ux * dx + uy * dy + uz * dz) + math.sqrt(delta) pixmin = min(max(0, round((d1 / length) * pixels)), pixels) pixmax = min(max(0, round((d2 / length) * pixels)), pixels) @@ -427,7 +578,7 @@ def _2d_func(x_data, y_data, z_data, w_data, h_data, kernel_radius, pixels, x1, ydiff = ypix - y_data[i] zdiff = zpix - z_data[i] - q2 = (xdiff ** 2 + ydiff ** 2 + zdiff ** 2) * (1 / (h_data[i] ** 2)) + q2 = (xdiff**2 + ydiff**2 + zdiff**2) * (1 / h_data[i]**2) wab = weight_function(math.sqrt(q2), 3) cuda.atomic.add(image, ipix, wab * term) @@ -453,7 +604,8 @@ def _2d_func(x_data, y_data, z_data, w_data, h_data, kernel_radius, pixels, x1, return d_image.copy_to_host() @staticmethod - def _exact_3d_project(x_data, y_data, w_data, h_data, x_pixels, y_pixels, x_min, x_max, y_min, y_max): + def _exact_3d_project(x_data, y_data, w_data, h_data, x_pixels, y_pixels, + x_min, x_max, y_min, y_max): pixwidthx = (x_max - x_min) / x_pixels pixwidthy = (y_max - y_min) / y_pixels @@ -465,19 +617,24 @@ def _3d_func(x_data, y_data, w_data, h_data, image): if i < len(x_data): dfac = h_data[i] ** 3 / (pixwidthx * pixwidthy * norm3d) term = norm3d * w_data[i] / h_data[i] ** 3 + rad = 2 * h_data[i] - # determine maximum and minimum pixels that this particle contributes to - ipixmin = math.floor((x_data[i] - 2 * h_data[i] - x_min) / pixwidthx) - jpixmin = math.floor((y_data[i] - 2 * h_data[i] - y_min) / pixwidthy) - ipixmax = math.ceil((x_data[i] + 2 * h_data[i] - x_min) / pixwidthx) - jpixmax = math.ceil((y_data[i] + 2 * h_data[i] - y_min) / pixwidthy) + # determine pixels that this particle contributes to + ipixmin = math.floor((x_data[i] - rad - x_min) / pixwidthx) + jpixmin = math.floor((y_data[i] - rad - y_min) / pixwidthy) + ipixmax = math.ceil((x_data[i] + rad - x_min) / pixwidthx) + jpixmax = math.ceil((y_data[i] + rad - y_min) / pixwidthy) # The width of the z contribution of this particle. - # = 2 * kernel_radius * h[i], where kernel_radius is 2 for the cubic spline kernel. + # = 2 * kernel_radius * h[i], + # where kernel_radius is 2 for the cubic spline kernel. pixwidthz = 4 * h_data[i] - if ipixmax < 0 or ipixmin >= x_pixels or jpixmax < 0 or jpixmin >= y_pixels: + if ipixmax < 0 or ipixmin >= x_pixels: return + if jpixmax < 0 or jpixmin >= y_pixels: + return + if ipixmin < 0: ipixmin = 0 if ipixmax > x_pixels: @@ -498,8 +655,9 @@ def _3d_func(x_data, y_data, w_data, h_data, image): q2 = (dx ** 2 + dy ** 2) / h_data[i] ** 2 if q2 < 4 + 3 * pixwidthx * pixwidthy / h_data[i] ** 2: - # Calculate the volume integral of this pixel by summing the comprising - # surface integrals of each surface of the cube. + # Calculate the volume integral of this pixel by + # summing the comprising surface integrals of each + # surface of the cube. # x-y surfaces pixint = 2 * surface_int(0.5 * pixwidthz, x_data[i], y_data[i], xpix, ypix, pixwidthx, diff --git a/sarracen/interpolate/interpolate.py b/sarracen/interpolate/interpolate.py index 95cff74..f16a1a4 100644 --- a/sarracen/interpolate/interpolate.py +++ b/sarracen/interpolate/interpolate.py @@ -1,5 +1,6 @@ """ -Contains several interpolation functions which produce interpolated 2D or 1D arrays of SPH data. +Contains several interpolation functions which produce interpolated 2D or 1D +arrays of SPH data. """ import numpy as np import pandas as pd @@ -14,14 +15,16 @@ def _default_xy(data, x, y): """ - Utility function to determine the x & y columns to use during 2D interpolation. + Utility function to determine the x & y columns to use during 2D + interpolation. Parameters ---------- data: SarracenDataFrame The particle dataset to interpolate over. x, y: str - The x and y directional column labels passed to the interpolation function. + The x and y directional column labels passed to the interpolation + function. Returns ------- @@ -38,36 +41,47 @@ def _default_xy(data, x, y): def _default_xyz(data, x, y, z): """ - Utility function to determine the x, y and z columns to use during 3-D interpolation. + Utility function to determine the x, y and z columns to use during 3-D + interpolation. Parameters ---------- data: SarracenDataFrame The particle dataset to interpolate over. x, y, z: str - The x, y and z directional column labels passed to the interpolation function. + The x, y and z directional column labels passed to the interpolation + function. Returns ------- x, y, z: str The directional column labels to use in interpolation. """ + xcol = data.xcol + ycol = data.ycol + zcol = data.zcol + if x is None: - x = data.xcol if not y == data.xcol and not z == data.xcol else \ - data.ycol if not y == data.ycol and not z == data.ycol else data.zcol + x = xcol if not y == xcol and not z == xcol else \ + ycol if not y == ycol and not z == ycol else zcol if y is None: - y = data.ycol if not x == data.ycol and not z == data.ycol else \ - data.xcol if not x == data.xcol and not z == data.xcol else data.zcol + y = ycol if not x == ycol and not z == ycol else \ + xcol if not x == xcol and not z == xcol else zcol if z is None: - z = data.zcol if not x == data.zcol and not y == data.zcol else \ - data.ycol if not x == data.ycol and not y == data.ycol else data.xcol + z = zcol if not x == zcol and not y == zcol else \ + ycol if not x == ycol and not y == ycol else xcol return x, y, z -def _default_bounds(data, x, y, xlim, ylim) -> Tuple[Tuple[float, float], Tuple[float, float]]: +def _default_bounds(data, + x, + y, + xlim, + ylim) -> Tuple[Tuple[float, float], Tuple[float, float]]: """ - Utility function to determine the 2-dimensional boundaries to use in 2D interpolation. + Utility function to determine the 2-dimensional boundaries to use in 2D + interpolation. Parameters ---------- @@ -76,15 +90,17 @@ def _default_bounds(data, x, y, xlim, ylim) -> Tuple[Tuple[float, float], Tuple[ x, y: str The directional column labels that will be used in interpolation. xlim, ylim: tuple of float - The minimum and maximum values passed to the interpolation function, in particle data space. + The minimum and maximum values passed to the interpolation function, in + particle data space. Returns ------- xlim, ylim: tuple of float - The minimum and maximum values to use in interpolation, in particle data space. Defaults - to the maximum and minimum values of `x` and `y`, snapped to the nearest integer. + The minimum and maximum values to use in interpolation, in particle + data space. Defaults to the maximum and minimum values of `x` and `y`, + snapped to the nearest integer. """ - # boundaries of the plot default to the maximum & minimum values of the data. + # boundaries of the plot default to the max & min values of the data. x_min = xlim[0] if xlim is not None and xlim[0] is not None else None y_min = ylim[0] if ylim is not None and ylim[0] is not None else None x_max = xlim[1] if xlim is not None and xlim[1] is not None else None @@ -98,29 +114,41 @@ def _default_bounds(data, x, y, xlim, ylim) -> Tuple[Tuple[float, float], Tuple[ return (x_min, x_max), (y_min, y_max) -def _set_pixels(x_pixels: int, y_pixels: int, xlim: Tuple[float, float], ylim: Tuple[float, float]) -> Tuple[int, int]: +def _set_pixels(x_pixels: int, + y_pixels: int, + xlim: Tuple[float, float], + ylim: Tuple[float, float]) -> Tuple[int, int]: """ - Utility function to determine the number of pixels to interpolate over in 2D interpolation. + Utility function to determine the number of pixels to interpolate over in + 2D interpolation. Parameters ---------- x_pixels, y_pixels: int - The number of pixels in the x & y directions passed to the interpolation function. + The number of pixels in the x & y directions passed to the + interpolation function. xlim, ylim: tuple of float - The minimum and maximum values to use in interpolation, in particle data space. + The minimum and maximum values to use in interpolation, in particle + data space. Returns ------- x_pixels, y_pixels: int - The number of pixels in the x & y directions to use in 2D interpolation. + The number of pixels in the x & y directions to use in 2D + interpolation. """ - # set # of pixels to maintain an aspect ratio that is the same as the underlying bounds of the data. + # set # of pixels to maintain an aspect ratio that is the same as the + # underlying bounds of the data. + + dx = xlim[1] - xlim[0] + dy = ylim[1] - ylim[0] + if x_pixels is None and y_pixels is None: x_pixels = 512 if x_pixels is None: - x_pixels = int(np.rint(y_pixels * ((xlim[1] - xlim[0]) / (ylim[1] - ylim[0])))) + x_pixels = int(np.rint(y_pixels * (dx / dy))) if y_pixels is None: - y_pixels = int(np.rint(x_pixels * ((ylim[1] - ylim[0]) / (xlim[1] - xlim[0])))) + y_pixels = int(np.rint(x_pixels * (dy / dx))) return x_pixels, y_pixels @@ -141,33 +169,43 @@ def _verify_columns(data, x, y): Raises ------- KeyError - If `target`, `x`, `y`, mass, density, or smoothing length columns do not - exist in `data`. + If `target`, `x`, `y`, mass, density, or smoothing length columns do + not exist in `data`. """ if x not in data.columns: - raise KeyError(f"x-directional column '{x}' does not exist in the provided dataset.") + raise KeyError(f"x-directional column '{x}' does not exist in the " + f"provided dataset.") if y not in data.columns: - raise KeyError(f"y-directional column '{y}' does not exist in the provided dataset.") + raise KeyError(f"y-directional column '{y}' does not exist in the " + f"provided dataset.") if data.hcol is None: - raise KeyError("Smoothing length column does not exist in the provided dataset.") + raise KeyError("Smoothing length column does not exist in the " + "provided dataset.") -def _check_boundaries(x_pixels: int, y_pixels: int, xlim: Tuple[float, float], ylim: Tuple[float, float]): +def _check_boundaries(x_pixels: int, + y_pixels: int, + xlim: Tuple[float, float], + ylim: Tuple[float, float]): """ - Verify that the pixel count and boundaries of a 2D plot describe a valid region. + Verify that the pixel count and boundaries of a 2D plot describe a valid + region. Parameters ---------- x_pixels, y_pixels: int - The number of pixels in the x & y directions passed to the interpolation function. + The number of pixels in the x & y directions passed to the + interpolation function. xlim, ylim: tuple of float - The minimum and maximum values to use in interpolation, in particle data space. + The minimum and maximum values to use in interpolation, in particle + data space. Raises ------ ValueError - If `x_pixels` or `y_pixels` are less than or equal to zero, or - if the specified `x` and `y` minimum and maximum values result in an invalid region. + If `x_pixels` or `y_pixels` are less than or equal to zero, or if the + specified `x` and `y` minimum and maximum values result in an invalid + region. """ if xlim[1] - xlim[0] <= 0: raise ValueError("`xlim` max must be greater than min!") @@ -181,7 +219,8 @@ def _check_boundaries(x_pixels: int, y_pixels: int, xlim: Tuple[float, float], y def _check_dimension(data, dim): """ - Verify that a given dataset describes data with a required number of dimensions. + Verify that a given dataset describes data with a required number of + dimensions. Parameters ---------- @@ -213,11 +252,11 @@ def _rotate_data(data, x, y, z, rotation, rot_origin): The rotation to apply to the vector data. If defined as an array, the order of rotations is [z, y, x] in degrees rot_origin: array_like or ['com', 'midpoint'], optional - Point of rotation of the data. Only applies to 3D datasets. If array_like, - then the [x, y, z] coordinates specify the point around which the data is - rotated. If 'com', then data is rotated around the centre of mass. If - 'midpoint', then data is rotated around the midpoint, that is, min + max - / 2. Defaults to the midpoint. + Point of rotation of the data. Only applies to 3D datasets. If + array_like, then the [x, y, z] coordinates specify the point around + which the data is rotated. If 'com', then data is rotated around the + centre of mass. If 'midpoint', then data is rotated around the + midpoint, that is, min + max / 2. Defaults to the midpoint. Returns ------- @@ -234,19 +273,23 @@ def _rotate_data(data, x, y, z, rotation, rot_origin): vectors = data[[x, y, z]].to_numpy() # warn whenever rotation is applied - msg = ("The default rotation point is currently the midpoint of the x/y/z bounds, " - "but will change to [x, y, z] = [0, 0, 0] in Sarracen version 1.3.0.") + msg = ("The default rotation point is currently the midpoint of the " + "x/y/z bounds, but will change to [x, y, z] = [0, 0, 0] in " + "Sarracen version 1.3.0.") warnings.warn(msg, DeprecationWarning, stacklevel=6) if rot_origin is None: - #rot_origin = [0, 0, 0] + # rot_origin = [0, 0, 0] rot_origin = (vectors.min(0) + vectors.max(0)) / 2 elif rot_origin == 'com': rot_origin = data.centre_of_mass() elif rot_origin == 'midpoint': rot_origin = (vectors.min(0) + vectors.max(0)) / 2 - elif not isinstance(rot_origin, (list, pd.Series, np.ndarray)) and len(rot_origin) != 3: - raise ValueError("rot_origin should be an [x, y, z] point or 'com' or 'midpoint'") + elif not isinstance(rot_origin, (list, pd.Series, np.ndarray)): + raise ValueError("rot_origin should be an [x, y, z] point or " + "'com' or 'midpoint'") + elif len(rot_origin) != 3: + raise ValueError("rot_origin should specify [x, y, z] point.") vectors = vectors - rot_origin vectors = rotation.apply(vectors) @@ -263,9 +306,10 @@ def _rotate_xyz(data, x, y, z, rotation, rot_origin): """ Rotate positional data in a particle dataset. - Differs from _rotate_data() in that the returned data values are shuffled to ensure that - the rotation is always applied to the global x, y, and z columns of the dataset, no matter - the order of x, y, and z provided to this function. + Differs from _rotate_data() in that the returned data values are shuffled + to ensure that the rotation is always applied to the global x, y, and z + columns of the dataset, no matter the order of x, y, and z provided to + this function. Parameters ---------- @@ -277,30 +321,33 @@ def _rotate_xyz(data, x, y, z, rotation, rot_origin): The rotation to apply to the data. If defined as an array, the order of rotations is [z, y, x] in degrees rot_origin: array_like or ['com', 'midpoint'], optional - Point of rotation of the data. Only applies to 3D datasets. If array_like, - then the [x, y, z] coordinates specify the point around which the data is - rotated. If 'com', then data is rotated around the centre of mass. If - 'midpoint', then data is rotated around the midpoint, that is, min + max - / 2. Defaults to the midpoint. + Point of rotation of the data. Only applies to 3D datasets. If + array_like, then the [x, y, z] coordinates specify the point around + which the data is rotated. If 'com', then data is rotated around the + centre of mass. If 'midpoint', then data is rotated around the + midpoint, that is, min + max / 2. Defaults to the midpoint. Returns ------- x_data, y_data, z_data: ndarray The rotated x, y, and z directional data. """ - rotated_x, rotated_y, rotated_z = _rotate_data(data, data.xcol, data.ycol, data.zcol, rotation, rot_origin) + rotated_x, rotated_y, rotated_z = _rotate_data(data, data.xcol, data.ycol, + data.zcol, rotation, + rot_origin) x_data = rotated_x if x == data.xcol else \ rotated_y if x == data.ycol else \ - rotated_z if x == data.zcol else data[x] + rotated_z if x == data.zcol else data[x] y_data = rotated_x if y == data.xcol else \ rotated_y if y == data.ycol else \ - rotated_z if y == data.zcol else data[y] + rotated_z if y == data.zcol else data[y] z_data = rotated_x if z == data.xcol else \ rotated_y if z == data.ycol else \ - rotated_z if z == data.zcol else data[z] + rotated_z if z == data.zcol else data[z] return x_data, y_data, z_data + def _corotate(corotation, rotation): """ Calculates the rotation matrix for a corotating frame. @@ -308,7 +355,8 @@ def _corotate(corotation, rotation): Parameters ---------- corotation: array_like - The x, y, z coordinates of two locations which determines the corotating frame. Each coordinate is also array_like + The x, y, z coordinates of two locations which determines the + corotating frame. Each coordinate is also array_like. rotation: array_like, optional An additional rotation to apply to the corotating frame. @@ -324,51 +372,62 @@ def _corotate(corotation, rotation): corotation[1][1] -= corotation[0][1] corotation[1][2] -= corotation[0][2] - rot_origin=corotation[0] + rot_origin = corotation[0] angle = -np.arctan2(corotation[1][1], corotation[1][0]) if rotation is None: - rotation = np.array([angle * 180 /np.pi, 0, 0]) + rotation = np.array([angle * 180 / np.pi, 0, 0]) else: if isinstance(rotation, Rotation): rotation = rotation.as_rotvec(degrees=True) - rotation = np.array([angle * 180/np.pi + rotation[0], rotation[1], rotation[2]]) + rotation = np.array([angle * 180/np.pi + rotation[0], + rotation[1], + rotation[2]]) rotation = Rotation.from_euler('zyx', rotation, degrees=True) return rotation, rot_origin + def _get_mass(data: 'SarracenDataFrame'): - if data.mcol == None: + if data.mcol is None: if 'mass' not in data.params: - raise KeyError("'mass' column does not exist in this SarracenDataFrame.") + raise KeyError("'mass' column does not exist in this " + "SarracenDataFrame.") return data.params['mass'] return data[data.mcol].to_numpy() def _get_density(data: 'SarracenDataFrame'): - if data.rhocol == None: - if not {data.hcol}.issubset(data.columns) or 'hfact' not in data.params: - raise KeyError('Density cannot be derived from the columns in this SarracenDataFrame.') + if data.rhocol is None: + if data.hcol not in data.columns or 'hfact' not in data.params: + raise KeyError('Density cannot be derived from the columns in ' + 'this SarracenDataFrame.') - return ((data.params['hfact'] / data[data.hcol]) ** (data.get_dim()) * _get_mass(data)).to_numpy() + hfact = data.params['hfact'] + mass = _get_mass(data) + return ((hfact / data[data.hcol])**(data.get_dim()) * mass).to_numpy() return data[data.rhocol].to_numpy() -def _get_weight(data: 'SarracenDataFrame', target: Union[str, np.ndarray], dens_weight: bool): +def _get_weight(data: 'SarracenDataFrame', + target: Union[str, np.ndarray], + dens_weight: bool): if type(target) is str: if target == 'rho': target_data = _get_density(data) else: if target not in data.columns: - raise KeyError(f"Target column '{target}' does not exist in provided dataset.") + raise KeyError(f"Target column '{target}' does not exist in " + f"provided dataset.") target_data = data[target].to_numpy() elif type(target) is np.ndarray: target_data = target else: - raise KeyError(f"Target must be of type str or ndarray. Found: '{type(target)}'") + raise KeyError(f"Target must be of type str or ndarray. " + f"Found: '{type(target)}'") mass_data = _get_mass(data) if dens_weight: @@ -377,10 +436,15 @@ def _get_weight(data: 'SarracenDataFrame', target: Union[str, np.ndarray], dens_ rho_data = _get_density(data) return target_data * mass_data / rho_data -def _get_smoothing_lengths(data: 'SarracenDataFrame', hmin: float, x_pixels: int, y_pixels: int, - xlim: Tuple[float, float], ylim: Tuple[float, float]): - """ Return the smoothing length data, imposing a minimum length if hmin is True. """ - + +def _get_smoothing_lengths(data: 'SarracenDataFrame', + hmin: float, + x_pixels: int, + y_pixels: int, + xlim: Tuple[float, float], + ylim: Tuple[float, float]): + """ Return smoothing lengths, imposing a min length if hmin is True. """ + if hmin: pix_size = (xlim[1] - xlim[0]) / x_pixels pix_size = np.maximum(pix_size, (ylim[1] - ylim[0]) / y_pixels) @@ -406,11 +470,12 @@ def interpolate_2d(data: 'SarracenDataFrame', normalize: bool = True, hmin: bool = False) -> np.ndarray: """ - Interpolate particle data across two directional axes to a 2D grid of pixels. + Interpolate particle data across two directional axes to a 2D grid of + pixels. - Interpolate the data within a SarracenDataFrame to a 2D grid, by interpolating the values - of a target variable. The contributions of all particles near the interpolation area are - summed and stored to a 2D grid. + Interpolate the data within a SarracenDataFrame to a 2D grid, by + interpolating the values of a target variable. The contributions of all + particles near the interpolation area are summed and stored to a 2D grid. Parameters ---------- @@ -419,41 +484,48 @@ def interpolate_2d(data: 'SarracenDataFrame', target: str Column label of the target smoothing data. x, y: str - Column labels of the directional axes. Defaults to the x & y columns detected in `data`. + Column labels of the directional axes. Defaults to the x & y columns + detected in `data`. kernel: BaseKernel, optional - Kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + Kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. x_pixels, y_pixels: int, optional - Number of pixels in the output image in the x & y directions. Default values are chosen to keep - a consistent aspect ratio. + Number of pixels in the output image in the x & y directions. Default + values are chosen to keep a consistent aspect ratio. xlim, ylim: tuple of float, optional - The minimum and maximum values to use in interpolation, in particle data space. Defaults - to the minimum and maximum values of `x` and `y`. + The minimum and maximum values to use in interpolation, in particle + data space. Defaults to the minimum and maximum values of `x` and `y`. exact: bool Whether to use exact interpolation of the data. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. - dens_weight: bool + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. + dens_weight: bool, optional If True, the target will be multiplied by density. Defaults to False. - hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + normalize: bool, optional + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). + hmin: bool, optional + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). Returns ------- ndarray (2-Dimensional) - The interpolated output image, in a 2-dimensional numpy array. Dimensions are - structured in reverse order, where (x, y) -> [y, x]. + The interpolated output image, in a 2-dimensional numpy array. + Dimensions are structured in reverse order, where (x, y) -> [y, x]. Raises ------- ValueError - If `x_pixels` or `y_pixels` are less than or equal to zero, or - if the specified `x` and `y` minimum and maximum values result in an invalid region, or - if `data` is not 2-dimensional. + If `x_pixels` or `y_pixels` are less than or equal to zero, or if the + specified `x` and `y` minimum and maximum values result in an invalid + region, or if `data` is not 2-dimensional. KeyError - If `target`, `x`, `y`, mass, density, or smoothing length columns do not - exist in `data`. + If `target`, `x`, `y`, mass, density, or smoothing length columns do + not exist in `data`. """ _check_dimension(data, 2) x, y = _default_xy(data, x, y) @@ -469,31 +541,46 @@ def interpolate_2d(data: 'SarracenDataFrame', h_data = _get_smoothing_lengths(data, hmin, x_pixels, y_pixels, xlim, ylim) - grid = get_backend(backend). \ - interpolate_2d_render(data[x].to_numpy(), data[y].to_numpy(), w_data, h_data, kernel.w, - kernel.get_radius(), x_pixels, y_pixels, xlim[0], xlim[1], ylim[0], ylim[1], exact) + grid = get_backend(backend)\ + .interpolate_2d_render(data[x].to_numpy(), data[y].to_numpy(), + w_data, h_data, kernel.w, kernel.get_radius(), + x_pixels, y_pixels, xlim[0], xlim[1], + ylim[0], ylim[1], exact) if normalize: w_norm = _get_weight(data, np.array([1] * len(w_data)), dens_weight) - norm_grid = get_backend(backend). \ - interpolate_2d_render(data[x].to_numpy(), data[y].to_numpy(), w_norm, h_data, - kernel.w, kernel.get_radius(), x_pixels, y_pixels, xlim[0], xlim[1], ylim[0], - ylim[1], exact) + norm_grid = get_backend(backend)\ + .interpolate_2d_render(data[x].to_numpy(), data[y].to_numpy(), + w_norm, h_data, kernel.w, + kernel.get_radius(), x_pixels, y_pixels, + xlim[0], xlim[1], ylim[0], ylim[1], exact) grid = np.nan_to_num(grid / norm_grid) return grid -def interpolate_2d_vec(data: 'SarracenDataFrame', target_x: str, target_y: str, x: str = None, y: str = None, - kernel: BaseKernel = None, x_pixels: int = None, y_pixels: int = None, - xlim: Tuple[float, float] = None, ylim: Tuple[float, float] = None, exact: bool = False, - backend: str = None, dens_weight: bool = False, normalize: bool = True, hmin: bool = False): +def interpolate_2d_vec(data: 'SarracenDataFrame', + target_x: str, + target_y: str, + x: str = None, + y: str = None, + kernel: BaseKernel = None, + x_pixels: int = None, + y_pixels: int = None, + xlim: Tuple[float, float] = None, + ylim: Tuple[float, float] = None, + exact: bool = False, + backend: str = None, + dens_weight: bool = False, + normalize: bool = True, + hmin: bool = False): """ - Interpolate vector particle data across two directional axes to a 2D grid of particles. + Interpolate vector particle data across two directional axes to a 2D grid + of particles. - Interpolate the data within a SarracenDataFrame to a 2D grid, by interpolating the values - of a target vector. The contributions of all vectors near the interpolation area are - summed and stored to a 2D grid. + Interpolate the data within a SarracenDataFrame to a 2D grid, by + interpolating the values of a target vector. The contributions of all + vectors near the interpolation area are summed and stored to a 2D grid. Parameters ---------- @@ -502,41 +589,48 @@ def interpolate_2d_vec(data: 'SarracenDataFrame', target_x: str, target_y: str, target_x, target_y: str Column labels of the target vector. x, y: str - Column labels of the directional axes. Defaults to the x & y columns detected in `data`. + Column labels of the directional axes. Defaults to the x & y columns + detected in `data`. kernel: BaseKernel, optional - Kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + Kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. x_pixels, y_pixels: int, optional - Number of pixels in the output image in the x & y directions. Default values are chosen to keep - a consistent aspect ratio. + Number of pixels in the output image in the x & y directions. Default + values are chosen to keep a consistent aspect ratio. xlim, ylim: tuple of float, optional - The minimum and maximum values to use in interpolation, in particle data space. Defaults - to the minimum and maximum values of `x` and `y`. + The minimum and maximum values to use in interpolation, in particle + data space. Defaults to the minimum and maximum values of `x` and `y`. exact: bool Whether to use exact interpolation of the data. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. - dens_weight: bool + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. + dens_weight: bool, optional If True, the target will be multiplied by density. Defaults to False. - hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + normalize: bool, optional + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). + hmin: bool, optional + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). Returns ------- output_x, output_y: ndarray (2-Dimensional) - The interpolated output images, in a 2-dimensional numpy arrays. Dimensions are - structured in reverse order, where (x, y) -> [y, x]. + The interpolated output images, in a 2-dimensional numpy arrays. + Dimensions are structured in reverse order, where (x, y) -> [y, x]. Raises ------- ValueError - If `x_pixels` or `y_pixels` are less than or equal to zero, or - if the specified `x` and `y` minimum and maximum values result in an invalid region, or - if `data` is not 2-dimensional. + If `x_pixels` or `y_pixels` are less than or equal to zero, or if the + specified `x` and `y` minimum and maximum values result in an invalid + region, or if `data` is not 2-dimensional. KeyError - If `target_x`, `target_y`, `x`, `y`, mass, density, or smoothing length columns do not - exist in `data`. + If `target_x`, `target_y`, `x`, `y`, mass, density, or smoothing + length columns do not exist in `data`. """ _check_dimension(data, 2) x, y = _default_xy(data, x, y) @@ -554,24 +648,27 @@ def interpolate_2d_vec(data: 'SarracenDataFrame', target_x: str, target_y: str, h_data = _get_smoothing_lengths(data, hmin, x_pixels, y_pixels, xlim, ylim) - gridx, gridy = get_backend(backend).\ - interpolate_2d_render_vec(data[x].to_numpy(), data[y].to_numpy(), wx_data, wy_data, h_data, - kernel.w, kernel.get_radius(), x_pixels, y_pixels, xlim[0], xlim[1], ylim[0], - ylim[1], exact) + gridx, gridy = get_backend(backend)\ + .interpolate_2d_render_vec(data[x].to_numpy(), data[y].to_numpy(), + wx_data, wy_data, h_data, kernel.w, + kernel.get_radius(), x_pixels, y_pixels, + xlim[0], xlim[1], ylim[0], ylim[1], exact) if normalize: wx_norm = _get_weight(data, np.array([1] * len(wx_data)), dens_weight) wy_norm = _get_weight(data, np.array([1] * len(wy_data)), dens_weight) - norm_gridx, norm_gridy = get_backend(backend).\ - interpolate_2d_render_vec(data[x].to_numpy(), data[y].to_numpy(), wx_norm, wy_norm, - h_data, kernel.w, kernel.get_radius(), - x_pixels, y_pixels, xlim[0], xlim[1], ylim[0], ylim[1], - exact) + norm_gridx, norm_gridy = get_backend(backend)\ + .interpolate_2d_render_vec(data[x].to_numpy(), data[y].to_numpy(), + wx_norm, wy_norm, h_data, kernel.w, + kernel.get_radius(), x_pixels, y_pixels, + xlim[0], xlim[1], ylim[0], ylim[1], + exact) gridx = np.nan_to_num(gridx / norm_gridx) gridy = np.nan_to_num(gridy / norm_gridy) return (gridx, gridy) + def interpolate_2d_line(data: 'SarracenDataFrame', target: str, x: str = None, @@ -585,11 +682,13 @@ def interpolate_2d_line(data: 'SarracenDataFrame', normalize: bool = True, hmin: bool = False) -> np.ndarray: """ - Interpolate particle data across two directional axes to a 1D cross-section line. + Interpolate particle data across two directional axes to a 1D cross-section + line. - Interpolate the data within a SarracenDataFrame to a 1D line, by interpolating the values - of a target variable. The contributions of all particles near the specified line are - summed and stored to a 1-dimensional array. + Interpolate the data within a SarracenDataFrame to a 1D line, by + interpolating the values of a target variable. The contributions of all + particles near the specified line are summed and stored to a 1-dimensional + array. Parameters ---------- @@ -598,22 +697,29 @@ def interpolate_2d_line(data: 'SarracenDataFrame', target: str Column label of the target smoothing data. x, y: str - Column labels of the directional axes. Defaults to the x & y columns detected in `data`. + Column labels of the directional axes. Defaults to the x & y columns + detected in `data`. kernel: BaseKernel, optional - Kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + Kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. pixels: int, optional Number of points in the resulting line plot in the x-direction. xlim, ylim: tuple of float, optional - Starting and ending coordinates of the cross-section line (in particle data space). Defaults to - the minimum and maximum values of `x` and `y`. + Starting and ending coordinates of the cross-section line (in particle + data space). Defaults to the minimum and maximum values of `x` and `y`. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. - dens_weight: bool + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. + dens_weight: bool, optional If True, the target will be multiplied by density. Defaults to False. - hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + normalize: bool, optional + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). + hmin: bool, optional + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). Returns ------- @@ -623,12 +729,12 @@ def interpolate_2d_line(data: 'SarracenDataFrame', Raises ------- ValueError - If `x_pixels` or `y_pixels` are less than or equal to zero, or - if the specified `xlim` and `ylim` values are all the same (indicating a zero-length cross-section), or - if `data` is not 2-dimensional. + If `x_pixels` or `y_pixels` are less than or equal to zero, or if the + specified `xlim` and `ylim` values are all the same (indicating a + zero-length cross-section), or if `data` is not 2-dimensional. KeyError - If `target`, `x`, `y`, mass, density, or smoothing length columns do not - exist in `data`. + If `target`, `x`, `y`, mass, density, or smoothing length columns do + not exist in `data`. """ _check_dimension(data, 2) x, y = _default_xy(data, x, y) @@ -653,20 +759,24 @@ def interpolate_2d_line(data: 'SarracenDataFrame', raise ValueError('pixcount must be greater than zero!') if hmin: - pix_size = np.sqrt((xlim[1] - xlim[0])**2 + (ylim[1] - ylim[0])**2) / pixels + pix_size = np.sqrt((xlim[1] - xlim[0])**2 + + (ylim[1] - ylim[0])**2) / pixels h_data = np.maximum(data[data.hcol].to_numpy(), 0.5 * pix_size) else: h_data = data[data.hcol].to_numpy() - grid = get_backend(backend).\ - interpolate_2d_cross(data[x].to_numpy(), data[y].to_numpy(), w_data, h_data, kernel.w, - kernel.get_radius(), pixels, xlim[0], xlim[1], ylim[0], ylim[1]) + grid = get_backend(backend) \ + .interpolate_2d_cross(data[x].to_numpy(), data[y].to_numpy(), + w_data, h_data, kernel.w, kernel.get_radius(), + pixels, xlim[0], xlim[1], ylim[0], ylim[1]) if normalize: w_norm = _get_weight(data, np.array([1] * len(w_data)), dens_weight) - norm_grid = get_backend(backend). \ - interpolate_2d_cross(data[x].to_numpy(), data[y].to_numpy(), w_norm, h_data, - kernel.w, kernel.get_radius(), pixels, xlim[0], xlim[1], ylim[0], ylim[1]) + norm_grid = get_backend(backend) \ + .interpolate_2d_cross(data[x].to_numpy(), data[y].to_numpy(), + w_norm, h_data, kernel.w, + kernel.get_radius(), pixels, xlim[0], + xlim[1], ylim[0], ylim[1]) grid = np.nan_to_num(grid / norm_grid) return grid @@ -687,11 +797,12 @@ def interpolate_3d_line(data: 'SarracenDataFrame', normalize: bool = True, hmin: bool = False): """ - Interpolate vector particle data across three directional axes to a 1D line. + Interpolate vector particle data across three directional axes to a 1D + line. - Interpolate the data within a SarracenDataFrame to a 1D line, by interpolating the values - of a target variable. The contributions of all particles near the interpolation line are - summed and stored to a 1D array. + Interpolate the data within a SarracenDataFrame to a 1D line, by + interpolating the values of a target variable. The contributions of all + particles near the interpolation line are summed and stored to a 1D array. Parameters ---------- @@ -700,23 +811,31 @@ def interpolate_3d_line(data: 'SarracenDataFrame', target: str Column label of the target variable. x, y, z: str - Column labels of the directional axes. Defaults to the x, y & z columns detected in `data`. + Column labels of the directional axes. Defaults to the x, y & z columns + detected in `data`. kernel: BaseKernel, optional - Kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + Kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. pixels: int, optional - Number of pixels in the output image in the x & y directions. Default values are chosen to keep - a consistent aspect ratio. + Number of pixels in the output image in the x & y directions. Default + values are chosen to keep a consistent aspect ratio. xlim, ylim, zlim: tuple of float, optional - Starting and ending coordinates of the cross-section line (in particle data space). Defaults to - the minimum and maximum values of `x`, `y`, and `z`. + Starting and ending coordinates of the cross-section line (in particle + data space). Defaults to the minimum and maximum values of `x`, `y`, + and `z`. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. - dens_weight: bool + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. + dens_weight: bool, optional If True, the target will be multiplied by density. Defaults to False. - hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + normalize: bool, optional + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). + hmin: bool, optional + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). Returns ------- @@ -726,11 +845,12 @@ def interpolate_3d_line(data: 'SarracenDataFrame', Raises ------- ValueError - If `pixels` are less than or equal to zero, or - if the specified `x`, `y`, and `z` minimum and maximum values result in a zero area cross-section, or - if `data` is not 3-dimensional. + If `pixels` are less than or equal to zero, or if the specified `x`, + `y`, and `z` minimum and maximum values result in a zero area + cross-section, or if `data` is not 3-dimensional. KeyError - If `target`, `x`, `y`, mass, density, or smoothing length data does not exist in `data`. + If `target`, `x`, `y`, mass, density, or smoothing length data does + not exist in `data`. """ _check_dimension(data, 3) x, y, z = _default_xyz(data, x, y, z) @@ -760,22 +880,26 @@ def interpolate_3d_line(data: 'SarracenDataFrame', raise ValueError('pixcount must be greater than zero!') if hmin: - pix_size = np.sqrt((xlim[1] - xlim[0])**2 + (ylim[1] - ylim[0])**2 + (zlim[1] - zlim[0])**2) / pixels + pix_size = np.sqrt((xlim[1] - xlim[0])**2 + + (ylim[1] - ylim[0])**2 + + (zlim[1] - zlim[0])**2) / pixels h_data = np.maximum(data[data.hcol].to_numpy(), 0.5 * pix_size) else: h_data = data[data.hcol].to_numpy() grid = get_backend(backend) \ - .interpolate_3d_line(data[x].to_numpy(), data[y].to_numpy(), data[z].to_numpy(), w_data, h_data, - kernel.w, kernel.get_radius(), pixels, xlim[0], xlim[1], ylim[0], ylim[1], zlim[0], - zlim[1]) + .interpolate_3d_line(data[x].to_numpy(), data[y].to_numpy(), + data[z].to_numpy(), w_data, h_data, kernel.w, + kernel.get_radius(), pixels, xlim[0], xlim[1], + ylim[0], ylim[1], zlim[0], zlim[1]) if normalize: w_norm = _get_weight(data, np.array([1] * len(w_data)), dens_weight) norm_grid = get_backend(backend) \ - .interpolate_3d_line(data[x].to_numpy(), data[y].to_numpy(), data[z].to_numpy(), w_norm, - h_data, kernel.w, kernel.get_radius(), pixels, xlim[0], xlim[1], - ylim[0], ylim[1], zlim[0], zlim[1]) + .interpolate_3d_line(data[x].to_numpy(), data[y].to_numpy(), + data[z].to_numpy(), w_norm, h_data, kernel.w, + kernel.get_radius(), pixels, xlim[0], xlim[1], + ylim[0], ylim[1], zlim[0], zlim[1]) grid = np.nan_to_num(grid / norm_grid) return grid @@ -802,9 +926,9 @@ def interpolate_3d_proj(data: 'SarracenDataFrame', """ Interpolate 3D particle data to a 2D grid of pixels. - Interpolates three-dimensional particle data in a SarracenDataFrame. The data - is interpolated to a 2D grid of pixels, by summing contributions in columns which - span the z-axis. + Interpolates three-dimensional particle data in a SarracenDataFrame. The + data is interpolated to a 2D grid of pixels, by summing contributions in + columns which span the z-axis. Parameters ---------- @@ -813,60 +937,70 @@ def interpolate_3d_proj(data: 'SarracenDataFrame', target: str Column label of the target smoothing data. x, y: str - Column labels of the directional axes. Defaults to the x & y columns detected in `data`. + Column labels of the directional axes. Defaults to the x & y columns + detected in `data`. kernel: BaseKernel, optional - Kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + Kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. integral_samples: int, optional - Number of sample points to take when approximating the 2D column kernel. + Number of sample points to take when approximating the 2D column + kernel. corotation: array_like - The x, y, z coordinates of two locations which determines the corotating frame. + The x, y, z coordinates of two locations which determines the + corotating frame. rotation: array_like or SciPy Rotation, optional - The rotation to apply to the data before interpolation. If defined as an array, the - order of rotations is [z, y, x] in degrees. + The rotation to apply to the data before interpolation. If defined as + an array, the order of rotations is [z, y, x] in degrees. rot_origin: array_like or ['com', 'midpoint'], optional - Point of rotation of the data. Only applies to 3D datasets. If array_like, - then the [x, y, z] coordinates specify the point around which the data is - rotated. If 'com', then data is rotated around the centre of mass. If - 'midpoint', then data is rotated around the midpoint, that is, min + max - / 2. Defaults to the midpoint. + Point of rotation of the data. Only applies to 3D datasets. If + array_like, then the [x, y, z] coordinates specify the point around + which the data is rotated. If 'com', then data is rotated around the + centre of mass. If 'midpoint', then data is rotated around the + midpoint, that is, min + max / 2. Defaults to the midpoint. x_pixels, y_pixels: int, optional - Number of pixels in the output image in the x & y directions. Default values are chosen to keep - a consistent aspect ratio. + Number of pixels in the output image in the x & y directions. Default + values are chosen to keep a consistent aspect ratio. xlim, ylim: tuple of float, optional - The minimum and maximum values to use in interpolation, in particle data space. Defaults - to the minimum and maximum values of `x` and `y`. + The minimum and maximum values to use in interpolation, in particle + data space. Defaults to the minimum and maximum values of `x` and `y`. exact: bool Whether to use exact interpolation of the data. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. - dens_weight: bool - If True, the target will be multiplied by density. Defaults to True for column-integrated views, - when the target is not density, and False for everything else. - hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. + dens_weight: bool, optional + If True, the target will be multiplied by density. Defaults to True for + column-integrated views, when the target is not density, and False for + everything else. + normalize: bool, optional + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). + hmin: bool, optional + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). Returns ------- ndarray (2-Dimensional) - The interpolated output image, in a 2-dimensional numpy array. Dimensions are - structured in reverse order, where (x, y) -> [y, x]. + The interpolated output image, in a 2-dimensional numpy array. + Dimensions are structured in reverse order, where (x, y) -> [y, x]. Raises ------- ValueError - If `x_pixels` or `y_pixels` are less than or equal to zero, or - if the specified `x` and `y` minimum and maximums result in an invalid region, or - if the provided data is not 3-dimensional. + If `x_pixels` or `y_pixels` are less than or equal to zero, or if the + specified `x` and `y` minimum and maximums result in an invalid region, + or if the provided data is not 3-dimensional. KeyError - If `target`, `x`, `y`, mass, density, or smoothing length columns do not - exist in `data`. + If `target`, `x`, `y`, mass, density, or smoothing length columns do + not exist in `data`. Notes ----- - Since the direction of integration is assumed to be straight across the z-axis, the z-axis column - is not required for this type of interpolation. + Since the direction of integration is assumed to be straight across the + z-axis, the z-axis column is not required for this type of interpolation. """ _check_dimension(data, 3) x, y, z = _default_xyz(data, x, y, None) @@ -881,7 +1015,9 @@ def interpolate_3d_proj(data: 'SarracenDataFrame', x_pixels, y_pixels = _set_pixels(x_pixels, y_pixels, xlim, ylim) _check_boundaries(x_pixels, y_pixels, xlim, ylim) - if corotation is not None: rotation, rot_origin = _corotate(corotation, rotation) + if corotation is not None: + rotation, rot_origin = _corotate(corotation, rotation) + x_data, y_data, z_data = _rotate_xyz(data, x, y, z, rotation, rot_origin) kernel = kernel if kernel is not None else data.kernel backend = backend if backend is not None else data.backend @@ -891,19 +1027,23 @@ def interpolate_3d_proj(data: 'SarracenDataFrame', h_data = _get_smoothing_lengths(data, hmin, x_pixels, y_pixels, xlim, ylim) grid = get_backend(backend) \ - .interpolate_3d_projection(x_data, y_data, z_data, w_data, h_data, weight_function, - kernel.get_radius(), x_pixels, y_pixels, xlim[0], xlim[1], ylim[0], ylim[1], exact) + .interpolate_3d_projection(x_data, y_data, z_data, w_data, h_data, + weight_function, kernel.get_radius(), + x_pixels, y_pixels, + xlim[0], xlim[1], ylim[0], ylim[1], exact) if normalize: w_norm = _get_weight(data, np.array([1] * len(w_data)), dens_weight) norm_grid = get_backend(backend) \ - .interpolate_3d_projection(x_data, y_data, z_data, w_norm, h_data, weight_function, - kernel.get_radius(), x_pixels, y_pixels, xlim[0], xlim[1], ylim[0], - ylim[1], exact) + .interpolate_3d_projection(x_data, y_data, z_data, w_norm, h_data, + weight_function, kernel.get_radius(), + x_pixels, y_pixels, xlim[0], xlim[1], + ylim[0], ylim[1], exact) grid = np.nan_to_num(grid / norm_grid) return grid + def interpolate_3d_vec(data: 'SarracenDataFrame', target_x: str, target_y: str, @@ -926,9 +1066,9 @@ def interpolate_3d_vec(data: 'SarracenDataFrame', """ Interpolate 3D vector particle data to a 2D grid of pixels. - Interpolates three-dimensional vector particle data in a SarracenDataFrame. The data - is interpolated to a 2D grid of pixels, by summing contributions in columns which - span the z-axis. + Interpolates three-dimensional vector particle data in a SarracenDataFrame. + The data is interpolated to a 2D grid of pixels, by summing contributions + in columns which span the z-axis. Parameters ---------- @@ -937,56 +1077,65 @@ def interpolate_3d_vec(data: 'SarracenDataFrame', target_x, target_y, target_z: str Column labels of the target vector. x, y: str - Column labels of the directional axes. Defaults to the x & y columns detected in `data`. + Column labels of the directional axes. Defaults to the x & y columns + detected in `data`. kernel: BaseKernel, optional - Kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + Kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. integral_samples: int, optional - Number of sample points to take when approximating the 2D column kernel. + Number of sample points to take when approximating the 2D column + kernel. rotation: array_like or SciPy Rotation, optional - The rotation to apply to the data before interpolation. If defined as an array, the - order of rotations is [z, y, x] in degrees. + The rotation to apply to the data before interpolation. If defined as + an array, the order of rotations is [z, y, x] in degrees. rot_origin: array_like or ['com', 'midpoint'], optional - Point of rotation of the data. Only applies to 3D datasets. If array_like, - then the [x, y, z] coordinates specify the point around which the data is - rotated. If 'com', then data is rotated around the centre of mass. If - 'midpoint', then data is rotated around the midpoint, that is, min + max - / 2. Defaults to the midpoint. + Point of rotation of the data. Only applies to 3D datasets. If + array_like, then the [x, y, z] coordinates specify the point around + which the data is rotated. If 'com', then data is rotated around the + centre of mass. If 'midpoint', then data is rotated around the + midpoint, that is, min + max / 2. Defaults to the midpoint. x_pixels, y_pixels: int, optional - Number of pixels in the output image in the x & y directions. Default values are chosen to keep - a consistent aspect ratio. + Number of pixels in the output image in the x & y directions. Default + values are chosen to keep a consistent aspect ratio. xlim, ylim: tuple of float, optional - The minimum and maximum values to use in interpolation, in particle data space. Defaults - to the minimum and maximum values of `x` and `y`. + The minimum and maximum values to use in interpolation, in particle + data space. Defaults to the minimum and maximum values of `x` and `y`. exact: bool Whether to use exact interpolation of the data. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. - dens_weight: bool + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. + dens_weight: bool, optional If True, the target will be multiplied by density. Defaults to False. - hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + normalize: bool, optional + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). + hmin: bool, optional + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). Returns ------- output_x, output_y: ndarray (2-Dimensional) - The interpolated output images. Dimensions are structured in reverse order, where (x, y) -> [y, x]. + The interpolated output images. Dimensions are structured in reverse + order, where (x, y) -> [y, x]. Raises ------- ValueError - If `x_pixels` or `y_pixels` are less than or equal to zero, or - if the specified `x` and `y` minimum and maximums result in an invalid region, or - if the provided data is not 3-dimensional. + If `x_pixels` or `y_pixels` are less than or equal to zero, or if the + specified `x` and `y` minimum and maximums result in an invalid region, + or if the provided data is not 3-dimensional. KeyError - If `target_x`, `target_y`, `x`, `y`, mass, density, or smoothing length columns do not - exist in `data`. + If `target_x`, `target_y`, `x`, `y`, mass, density, or smoothing + length columns do not exist in `data`. Notes ----- - Since the direction of integration is assumed to be straight across the z-axis, the z-axis column - is not required for this type of interpolation. + Since the direction of integration is assumed to be straight across the + z-axis, the z-axis column is not required for this type of interpolation. """ _check_dimension(data, 3) @@ -999,8 +1148,11 @@ def interpolate_3d_vec(data: 'SarracenDataFrame', x_data, y_data, _ = _rotate_xyz(data, x, y, z, rotation, rot_origin) if target_z not in data.columns: - raise KeyError(f"z-directional target column '{target_z}' does not exist in the provided dataset.") - target_x_data, target_y_data, _ = _rotate_data(data, target_x, target_y, target_z, rotation, rot_origin) + raise KeyError(f"z-directional target column '{target_z}' does not " + f"exist in the provided dataset.") + target_x_data, target_y_data, _ = _rotate_data(data, target_x, target_y, + target_z, rotation, + rot_origin) wx_data = _get_weight(data, target_x_data, dens_weight) wy_data = _get_weight(data, target_y_data, dens_weight) @@ -1011,23 +1163,26 @@ def interpolate_3d_vec(data: 'SarracenDataFrame', weight_function = kernel.get_column_kernel_func(integral_samples) gridx, gridy = get_backend(backend) \ - .interpolate_3d_projection_vec(x_data, y_data, wx_data, wy_data, h_data, - weight_function, kernel.get_radius(), x_pixels, y_pixels, xlim[0], - xlim[1], ylim[0], ylim[1], exact) + .interpolate_3d_projection_vec(x_data, y_data, wx_data, wy_data, + h_data, weight_function, + kernel.get_radius(), x_pixels, y_pixels, + xlim[0], xlim[1], ylim[0], ylim[1], + exact) if normalize: wx_norm = _get_weight(data, np.array([1] * len(wx_data)), dens_weight) wy_norm = _get_weight(data, np.array([1] * len(wy_data)), dens_weight) norm_gridx, norm_gridy = get_backend(backend) \ - .interpolate_3d_projection_vec(x_data, y_data, wx_norm, wy_norm, h_data, - weight_function, kernel.get_radius(), x_pixels, y_pixels, xlim[0], - xlim[1], ylim[0], ylim[1], exact) + .interpolate_3d_projection_vec(x_data, y_data, wx_norm, wy_norm, + h_data, weight_function, + kernel.get_radius(), x_pixels, + y_pixels, xlim[0], xlim[1], + ylim[0], ylim[1], exact) gridx = np.nan_to_num(gridx / norm_gridx) gridy = np.nan_to_num(gridy / norm_gridy) return (gridx, gridy) - def interpolate_3d_cross(data: 'SarracenDataFrame', target: str, x: str = None, @@ -1049,9 +1204,10 @@ def interpolate_3d_cross(data: 'SarracenDataFrame', """ Interpolate 3D particle data to a 2D grid, using a 3D cross-section. - Interpolates particle data in a SarracenDataFrame across three directional axes to a 2D - grid of pixels. A cross-section is taken of the 3D data at a specific value of z, and - the contributions of particles near the plane are interpolated to a 2D grid. + Interpolates particle data in a SarracenDataFrame across three directional + axes to a 2D grid of pixels. A cross-section is taken of the 3D data at a + specific value of z, and the contributions of particles near the plane are + interpolated to a 2D grid. Parameters ---------- @@ -1060,57 +1216,67 @@ def interpolate_3d_cross(data: 'SarracenDataFrame', target: str The column label of the target smoothing data. z_slice: float - The z-axis value to take the cross-section at. Defaults to the midpoint of the z-directional data. + The z-axis value to take the cross-section at. Defaults to the midpoint + of the z-directional data. x, y, z: str - The column labels of the directional data to interpolate over. Defaults to the x, y, and z columns + The column labels of the directional data to interpolate over. Defaults + to the x, y, and z columns detected in `data`. kernel: BaseKernel - The kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + The kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. corotation: array_like - The x, y, z coordinates of two locations which determines the corotating frame. + The x, y, z coordinates of two locations which determines the + corotating frame. rotation: array_like or SciPy Rotation, optional - The rotation to apply to the data before interpolation. If defined as an array, the - order of rotations is [z, y, x] in degrees. + The rotation to apply to the data before interpolation. If defined as + an array, the order of rotations is [z, y, x] in degrees. rot_origin: array_like or ['com', 'midpoint'], optional - Point of rotation of the data. Only applies to 3D datasets. If array_like, - then the [x, y, z] coordinates specify the point around which the data is - rotated. If 'com', then data is rotated around the centre of mass. If - 'midpoint', then data is rotated around the midpoint, that is, min + max - / 2. Defaults to the midpoint. + Point of rotation of the data. Only applies to 3D datasets. If + array_like, then the [x, y, z] coordinates specify the point around + which the data is rotated. If 'com', then data is rotated around the + centre of mass. If 'midpoint', then data is rotated around the + midpoint, that is, min + max / 2. Defaults to the midpoint. x_pixels, y_pixels: int, optional - Number of pixels in the output image in the x & y directions. Default values are chosen to keep - a consistent aspect ratio. + Number of pixels in the output image in the x & y directions. Default + values are chosen to keep a consistent aspect ratio. xlim, ylim: tuple of float, optional - The minimum and maximum values to use in interpolation, in particle data space. Defaults - to the minimum and maximum values of `x` and `y`. + The minimum and maximum values to use in interpolation, in particle + data space. Defaults to the minimum and maximum values of `x` and `y`. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. - dens_weight: bool + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. + dens_weight: bool, optional If True, the target will be multiplied by density. Defaults to False. - hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + normalize: bool, optional + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). + hmin: bool, optional + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). Returns ------- ndarray (2-Dimensional) - The interpolated output image, in a 2-dimensional numpy array. Dimensions are - structured in reverse order, where (x, y) -> [y, x]. + The interpolated output image, in a 2-dimensional numpy array. + Dimensions are structured in reverse order, where (x, y) -> [y, x]. Raises ------- ValueError - If `pixwidthx`, `pixwidthy`, `pixcountx`, or `pixcounty` are less than or equal to zero, or - if the specified `x` and `y` minimum and maximums result in an invalid region, or - if the provided data is not 3-dimensional. + If `pixwidthx`, `pixwidthy`, `pixcountx`, or `pixcounty` are less than + or equal to zero, or if the specified `x` and `y` minimum and maximums + result in an invalid region, or if the provided data is not + 3-dimensional. KeyError - If `target`, `x`, `y`, `z`, mass, density, or smoothing length columns do not - exist in `data`. + If `target`, `x`, `y`, `z`, mass, density, or smoothing length columns + do not exist in `data`. """ _check_dimension(data, 3) - # x & y columns default to the variables determined by the SarracenDataFrame. + # x and y columns default to the variables from the SarracenDataFrame. x, y, z = _default_xyz(data, x, y, z) _verify_columns(data, x, y) @@ -1120,7 +1286,7 @@ def interpolate_3d_cross(data: 'SarracenDataFrame', w_data = _get_weight(data, target, dens_weight) - # boundaries of the plot default to the maximum & minimum values of the data. + # boundaries of the plot default to the max & min values of the data. xlim, ylim = _default_bounds(data, x, y, xlim, ylim) x_pixels, y_pixels = _set_pixels(x_pixels, y_pixels, xlim, ylim) _check_boundaries(x_pixels, y_pixels, xlim, ylim) @@ -1128,19 +1294,24 @@ def interpolate_3d_cross(data: 'SarracenDataFrame', kernel = kernel if kernel is not None else data.kernel backend = backend if backend is not None else data.backend - if corotation is not None: rotation, rot_origin = _corotate(corotation, rotation) + if corotation is not None: + rotation, rot_origin = _corotate(corotation, rotation) + x_data, y_data, z_data = _rotate_xyz(data, x, y, z, rotation, rot_origin) h_data = _get_smoothing_lengths(data, hmin, x_pixels, y_pixels, xlim, ylim) grid = get_backend(backend) \ - .interpolate_3d_cross(x_data, y_data, z_data, z_slice, w_data, h_data, kernel.w, - kernel.get_radius(), x_pixels, y_pixels, xlim[0], xlim[1], ylim[0], ylim[1]) + .interpolate_3d_cross(x_data, y_data, z_data, z_slice, w_data, h_data, + kernel.w, kernel.get_radius(), x_pixels, + y_pixels, xlim[0], xlim[1], ylim[0], ylim[1]) if normalize: w_norm = _get_weight(data, np.array([1] * len(w_data)), dens_weight) norm_grid = get_backend(backend) \ - .interpolate_3d_cross(x_data, y_data, z_data, z_slice, w_norm, h_data, kernel.w, - kernel.get_radius(), x_pixels, y_pixels, xlim[0], xlim[1], ylim[0], ylim[1]) + .interpolate_3d_cross(x_data, y_data, z_data, z_slice, w_norm, + h_data, kernel.w, kernel.get_radius(), + x_pixels, y_pixels, + xlim[0], xlim[1], ylim[0], ylim[1]) grid = np.nan_to_num(grid / norm_grid) return grid @@ -1155,7 +1326,7 @@ def interpolate_3d_cross_vec(data: 'SarracenDataFrame', y: str = None, z: str = None, kernel: BaseKernel = None, - rotation: Union[np.ndarray, list, Rotation] = None, + rotation: Union[np.ndarray, list, Rotation] = None, # noqa: E501 rot_origin: Union[np.ndarray, list, str] = None, x_pixels: int = None, y_pixels: int = None, @@ -1168,9 +1339,10 @@ def interpolate_3d_cross_vec(data: 'SarracenDataFrame', """ Interpolate 3D vector particle data to a 2D grid, using a 3D cross-section. - Interpolates vector particle data in a SarracenDataFrame across three directional axes to a 2D - grid of pixels. A cross-section is taken of the 3D data at a specific value of z, and - the contributions of vectors near the plane are interpolated to a 2D grid. + Interpolates vector particle data in a SarracenDataFrame across three + directional axes to a 2D grid of pixels. A cross-section is taken of the + 3D data at a specific value of z, and the contributions of vectors near + the plane are interpolated to a 2D grid. Parameters ---------- @@ -1179,50 +1351,59 @@ def interpolate_3d_cross_vec(data: 'SarracenDataFrame', target_x, target_y, target_z: str The column labels of the target vector. z_slice: float - The z-axis value to take the cross-section at. Defaults to the midpoint of the z-directional data. + The z-axis value to take the cross-section at. Defaults to the midpoint + of the z-directional data. x, y, z: str - The column labels of the directional data to interpolate over. Defaults to the x, y, and z columns - detected in `data`. + The column labels of the directional data to interpolate over. Defaults + to the x, y, and z columns detected in `data`. kernel: BaseKernel - The kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + The kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. rotation: array_like or SciPy Rotation, optional - The rotation to apply to the data before interpolation. If defined as an array, the - order of rotations is [z, y, x] in degrees. + The rotation to apply to the data before interpolation. If defined as + an array, the order of rotations is [z, y, x] in degrees. rot_origin: array_like or ['com', 'midpoint'], optional - Point of rotation of the data. Only applies to 3D datasets. If array_like, - then the [x, y, z] coordinates specify the point around which the data is - rotated. If 'com', then data is rotated around the centre of mass. If - 'midpoint', then data is rotated around the midpoint, that is, min + max - / 2. Defaults to the midpoint. + Point of rotation of the data. Only applies to 3D datasets. If + array_like, then the [x, y, z] coordinates specify the point around + which the data is rotated. If 'com', then data is rotated around the + centre of mass. If 'midpoint', then data is rotated around the + midpoint, that is, min + max / 2. Defaults to the midpoint. x_pixels, y_pixels: int, optional - Number of pixels in the output image in the x & y directions. Default values are chosen to keep - a consistent aspect ratio. + Number of pixels in the output image in the x & y directions. Default + values are chosen to keep a consistent aspect ratio. xlim, ylim: float, optional - The minimum and maximum values to use in interpolation, in particle data space. Defaults - to the minimum and maximum values of `x` and `y`. + The minimum and maximum values to use in interpolation, in particle + data space. Defaults to the minimum and maximum values of `x` and `y`. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. - dens_weight: bool + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. + dens_weight: bool, optional If True, the target will be multiplied by density. Defaults to False. - hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + normalize: bool, optional + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). + hmin: bool, optional + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). Returns ------- output_x, output_y: ndarray (2-Dimensional) - The interpolated output images. Dimensions are structured in reverse order, where (x, y) -> [y, x]. + The interpolated output images. Dimensions are structured in reverse + order, where (x, y) -> [y, x]. Raises ------- ValueError - If `pixwidthx`, `pixwidthy`, `pixcountx`, or `pixcounty` are less than or equal to zero, or - if the specified `x` and `y` minimum and maximums result in an invalid region, or - if the provided data is not 3-dimensional. + If `pixwidthx`, `pixwidthy`, `pixcountx`, or `pixcounty` are less than + or equal to zero, or if the specified `x` and `y` minimum and maximums + result in an invalid region, or if the provided data is not + 3-dimensional. KeyError - If `target_x`, `target_y`, `target_z`, `x`, `y`, `z`, mass, density, or smoothing length columns do not - exist in `data`. + If `target_x`, `target_y`, `target_z`, `x`, `y`, `z`, mass, density, + or smoothing length columns do not exist in `data`. """ _check_dimension(data, 3) @@ -1233,13 +1414,15 @@ def interpolate_3d_cross_vec(data: 'SarracenDataFrame', if z_slice is None: z_slice = data.loc[:, z].mean() - # boundaries of the plot default to the maximum & minimum values of the data. + # boundaries of the plot default to the max & min values of the data. xlim, ylim = _default_bounds(data, x, y, xlim, ylim) x_pixels, y_pixels = _set_pixels(x_pixels, y_pixels, xlim, ylim) _check_boundaries(x_pixels, y_pixels, xlim, ylim) x_data, y_data, z_data = _rotate_xyz(data, x, y, z, rotation, rot_origin) - target_x_data, target_y_data, _ = _rotate_data(data, target_x, target_y, target_z, rotation, rot_origin) + target_x_data, target_y_data, _ = _rotate_data(data, target_x, target_y, + target_z, rotation, + rot_origin) wx_data = _get_weight(data, target_x_data, dens_weight) wy_data = _get_weight(data, target_y_data, dens_weight) @@ -1249,17 +1432,19 @@ def interpolate_3d_cross_vec(data: 'SarracenDataFrame', backend = backend if backend is not None else data.backend gridx, gridy = get_backend(backend) \ - .interpolate_3d_cross_vec(x_data, y_data, z_data, z_slice, wx_data, wy_data, h_data, - kernel.w, kernel.get_radius(), x_pixels, y_pixels, xlim[0], xlim[1], - ylim[0], ylim[1]) + .interpolate_3d_cross_vec(x_data, y_data, z_data, z_slice, wx_data, + wy_data, h_data, kernel.w, + kernel.get_radius(), x_pixels, y_pixels, + xlim[0], xlim[1], ylim[0], ylim[1]) if normalize: wx_norm = _get_weight(data, np.array([1] * len(wx_data)), dens_weight) wy_norm = _get_weight(data, np.array([1] * len(wy_data)), dens_weight) norm_gridx, norm_gridy = get_backend(backend) \ - .interpolate_3d_cross_vec(x_data, y_data, z_data, z_slice, wx_norm, wy_norm, - h_data, kernel.w, kernel.get_radius(), - x_pixels, y_pixels, xlim[0], xlim[1], ylim[0], ylim[1]) + .interpolate_3d_cross_vec(x_data, y_data, z_data, z_slice, wx_norm, + wy_norm, h_data, kernel.w, + kernel.get_radius(), x_pixels, y_pixels, + xlim[0], xlim[1], ylim[0], ylim[1]) gridx = np.nan_to_num(gridx / norm_gridx) gridy = np.nan_to_num(gridy / norm_gridy) @@ -1287,9 +1472,9 @@ def interpolate_3d_grid(data: 'SarracenDataFrame', """ Interpolate 3D particle data to a 3D grid of pixels - Interpolates particle data in a SarracenDataFrame across three directional axes to a 3D - grid of pixels. The contributions of all particles near each 3D cell are summed and - stored in the 3D grid. + Interpolates particle data in a SarracenDataFrame across three directional + axes to a 3D grid of pixels. The contributions of all particles near each + 3D cell are summed and stored in the 3D grid. Parameters ---------- @@ -1298,49 +1483,57 @@ def interpolate_3d_grid(data: 'SarracenDataFrame', target: str The column label of the target data. x, y, z: str - The column labels of the directional data to interpolate over. Defaults to the x, y, and z columns - detected in `data`. + The column labels of the directional data to interpolate over. Defaults + to the x, y, and z columns detected in `data`. kernel: BaseKernel - The kernel to use for smoothing the target data. Defaults to the kernel specified in `data`. + The kernel to use for smoothing the target data. Defaults to the kernel + specified in `data`. rotation: array_like or SciPy Rotation, optional - The rotation to apply to the data before interpolation. If defined as an array, the - order of rotations is [z, y, x] in degrees. + The rotation to apply to the data before interpolation. If defined as + an array, the order of rotations is [z, y, x] in degrees. rot_origin: array_like or ['com', 'midpoint'], optional - Point of rotation of the data. Only applies to 3D datasets. If array_like, - then the [x, y, z] coordinates specify the point around which the data is - rotated. If 'com', then data is rotated around the centre of mass. If - 'midpoint', then data is rotated around the midpoint, that is, min + max - / 2. Defaults to the midpoint. + Point of rotation of the data. Only applies to 3D datasets. If + array_like, then the [x, y, z] coordinates specify the point around + which the data is rotated. If 'com', then data is rotated around the + centre of mass. If 'midpoint', then data is rotated around the + midpoint, that is, min + max / 2. Defaults to the midpoint. x_pixels, y_pixels, z_pixels: int, optional - Number of pixels in the output image in the x, y & z directions. Default values are chosen to keep - a consistent aspect ratio. + Number of pixels in the output image in the x, y & z directions. + Default values are chosen to keep a consistent aspect ratio. xlim, ylim, zlim: tuple of float, optional - The minimum and maximum values to use in interpolation, in particle data space. Defaults - to the minimum and maximum values of `x`, `y` and `z`. + The minimum and maximum values to use in interpolation, in particle + data space. Defaults to the minimum and maximum values of `x`, `y` + and `z`. backend: ['cpu', 'gpu'] - The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise - 'cpu' is used. A manually specified backend in `data` will override the default. - dens_weight: bool + The computation backend to use when interpolating this data. Defaults + to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually + specified backend in `data` will override the default. + dens_weight: bool, optional If True, the target will be multiplied by density. Defaults to False. - hmin: bool - If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle - contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). + normalize: bool, optional + If True, will normalize the interpolation. Defaults to False (this may + change in future versions). + hmin: bool, optional + If True, a minimum smoothing length of 0.5 * pixel size will be + imposed. This ensures each particle contributes to at least one grid + cell / pixel. Defaults to False (this may change in a future verison). Returns ------- ndarray (3-Dimensional) - The interpolated output image, in a 3-dimensional numpy array. Dimensions are structured in reverse order, - where (x, y, z) -> [z, y, x]. + The interpolated output image, in a 3-dimensional numpy array. + Dimensions are structured in reverse order, where (x, y, z) -> + [z, y, x]. Raises ------- ValueError - If `x_pixels`, `y_pixels` or `z_pixels` are less than or equal to zero, or - if the specified `x`, `y` and `z` minimum and maximum values result in an invalid region, or - if `data` is not 3-dimensional. + If `x_pixels`, `y_pixels` or `z_pixels` are less than or equal to zero, + or if the specified `x`, `y` and `z` minimum and maximum values result + in an invalid region, or if `data` is not 3-dimensional. KeyError - If `target`, `x`, `y`, `z`, mass, density, or smoothing length columns do not - exist in `data`. + If `target`, `x`, `y`, `z`, mass, density, or smoothing length columns + do not exist in `data`. """ _check_dimension(data, 3) x, y, z = _default_xyz(data, x, y, z) @@ -1356,7 +1549,11 @@ def interpolate_3d_grid(data: 'SarracenDataFrame', zlim = zlim if zlim else (data.loc[:, z].min(), data.loc[:, z].max()) x_pixels, y_pixels = _set_pixels(x_pixels, y_pixels, xlim, ylim) - z_pixels = int(np.rint(x_pixels * ((zlim[1] - zlim[0]) / (xlim[1] - xlim[0])))) if z_pixels is None else z_pixels + if z_pixels is None: + dz = zlim[1] - zlim[0] + dx = xlim[1] - xlim[0] + z_pixels = int(np.rint(x_pixels * (dz / dx))) + _check_boundaries(x_pixels, y_pixels, xlim, ylim) if zlim[1] - zlim[0] <= 0: raise ValueError("`z_max` must be greater than `z_min`!") @@ -1366,19 +1563,24 @@ def interpolate_3d_grid(data: 'SarracenDataFrame', kernel = kernel if kernel is not None else data.kernel backend = backend if backend is not None else data.backend - x_data, y_data, z_data = _rotate_xyz(data, x, y, data.zcol, rotation, rot_origin) - h_data = _get_smoothing_lengths(data, hmin, x_pixels, y_pixels, xlim, ylim) + x_data, y_data, z_data = _rotate_xyz(data, x, y, data.zcol, + rotation, rot_origin) + h_data = _get_smoothing_lengths(data, hmin, x_pixels, y_pixels, + xlim, ylim) - grid = get_backend(backend) \ - .interpolate_3d_grid(x_data, y_data, z_data, w_data, h_data, kernel.w, kernel.get_radius(), - x_pixels, y_pixels, z_pixels, xlim[0], xlim[1], ylim[0], ylim[1], zlim[0], zlim[1]) + grid = get_backend(backend)\ + .interpolate_3d_grid(x_data, y_data, z_data, w_data, h_data, kernel.w, + kernel.get_radius(), x_pixels, y_pixels, z_pixels, + xlim[0], xlim[1], ylim[0], ylim[1], + zlim[0], zlim[1]) if normalize: w_norm = _get_weight(data, np.array([1] * len(w_data)), dens_weight) - norm_grid = get_backend(backend) \ - .interpolate_3d_grid(x_data, y_data, z_data, w_norm, h_data, kernel.w, - kernel.get_radius(), x_pixels, y_pixels, z_pixels, xlim[0], xlim[1], - ylim[0], ylim[1], zlim[0], zlim[1]) + norm_grid = get_backend(backend)\ + .interpolate_3d_grid(x_data, y_data, z_data, w_norm, h_data, + kernel.w, kernel.get_radius(), x_pixels, + y_pixels, z_pixels, xlim[0], xlim[1], ylim[0], + ylim[1], zlim[0], zlim[1]) grid = np.nan_to_num(grid / norm_grid) return grid @@ -1391,8 +1593,8 @@ def get_backend(code: str) -> BaseBackend: Parameters ---------- code: str - The code associated with the particular backend. At the moment, 'cpu' for the CPU backend, and 'gpu' for - the GPU backend are supported. + The code associated with the particular backend. At the moment, 'cpu' + for the CPU backend, and 'gpu' for the GPU backend are supported. Returns ------- @@ -1402,4 +1604,4 @@ def get_backend(code: str) -> BaseBackend: return CPUBackend if code == 'gpu': return GPUBackend - raise ValueError("Invalid code!") + raise ValueError("Invalid backend") diff --git a/sarracen/render.py b/sarracen/render.py index c7d68c6..1bc7ba5 100644 --- a/sarracen/render.py +++ b/sarracen/render.py @@ -205,14 +205,14 @@ def render(data: 'SarracenDataFrame', midpoint, that is, min + max / 2. Defaults to the midpoint. log_scale: bool Whether to use a logarithmic scale for color coding. - dens_weight: bool + dens_weight: bool, optional If True, will plot the target mutliplied by the density. Defaults to True for column-integrated views, when the target is not density, and False for everything else. - normalize: bool + normalize: bool, optional If True, will normalize the interpolation. Defaults to False (this may change in future versions). - hmin: bool + hmin: bool, optional If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). @@ -390,13 +390,13 @@ def lineplot(data: 'SarracenDataFrame', specified backend in `data` will override the default. log_scale: bool Whether to use a logarithmic scale for color coding. - dens_weight: bool + dens_weight: bool, optional If True, will plot the target mutliplied by the density. Defaults to False. - normalize: bool + normalize: bool, optional If True, will normalize the interpolation. Defaults to False (this may change in future versions). - hmin: bool + hmin: bool, optional If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). @@ -540,20 +540,20 @@ def streamlines(data: 'SarracenDataFrame', data space. Defaults to the minimum and maximum values of `x` and `y`. ax: Axes The main axes in which to draw the rendered image. - exact: bool + exact: bool, optional Whether to use exact interpolation of the data. For cross-sections this is ignored. Defaults to False. backend: ['cpu', 'gpu'] The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually specified backend in `data` will override the default. - dens_weight: bool + dens_weight: bool, optional If True, will plot the target mutliplied by the density. Defaults to True for column-integrated views and False for everything else. - normalize: bool + normalize: bool, optional If True, will normalize the interpolation. Defaults to False (this may change in future versions). - hmin: bool + hmin: bool, optional If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). @@ -715,13 +715,13 @@ def arrowplot(data: 'SarracenDataFrame', The computation backend to use when interpolating this data. Defaults to 'gpu' if CUDA is enabled, otherwise 'cpu' is used. A manually specified backend in `data` will override the default. - dens_weight: bool + dens_weight: bool, optional If True, will plot the target mutliplied by the density. Defaults to True for column-integrated views and False for everything else. - normalize: bool + normalize: bool, optional If True, will normalize the interpolation. Defaults to False (this may change in future versions). - hmin: bool + hmin: bool, optional If True, a minimum smoothing length of 0.5 * pixel size will be imposed. This ensures each particle contributes to at least one grid cell / pixel. Defaults to False (this may change in a future verison). From fae05a5f67becbbe5e0420d9cdf39c866a84217c Mon Sep 17 00:00:00 2001 From: Terrence Tricco Date: Sat, 20 Jul 2024 23:27:20 -0230 Subject: [PATCH 08/12] ignore F821 errors arising from circular import avoidance --- .flake8 | 1 - sarracen/interpolate/interpolate.py | 26 +++++++++++++------------- sarracen/render.py | 8 ++++---- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/.flake8 b/.flake8 index 0b79836..df7263f 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,6 @@ [flake8] per-file-ignores = sarracen/__init__.py:F401 - sarracen/render.py:F821 sarracen/kernels/__init__.py:F401 sarracen/interpolate/__init__.py:F401 sarracen/disc/__init__.py:F401 \ No newline at end of file diff --git a/sarracen/interpolate/interpolate.py b/sarracen/interpolate/interpolate.py index f16a1a4..928d638 100644 --- a/sarracen/interpolate/interpolate.py +++ b/sarracen/interpolate/interpolate.py @@ -387,7 +387,7 @@ def _corotate(corotation, rotation): return rotation, rot_origin -def _get_mass(data: 'SarracenDataFrame'): +def _get_mass(data: 'SarracenDataFrame'): # noqa: F821 if data.mcol is None: if 'mass' not in data.params: raise KeyError("'mass' column does not exist in this " @@ -397,7 +397,7 @@ def _get_mass(data: 'SarracenDataFrame'): return data[data.mcol].to_numpy() -def _get_density(data: 'SarracenDataFrame'): +def _get_density(data: 'SarracenDataFrame'): # noqa: F821 if data.rhocol is None: if data.hcol not in data.columns or 'hfact' not in data.params: raise KeyError('Density cannot be derived from the columns in ' @@ -410,7 +410,7 @@ def _get_density(data: 'SarracenDataFrame'): return data[data.rhocol].to_numpy() -def _get_weight(data: 'SarracenDataFrame', +def _get_weight(data: 'SarracenDataFrame', # noqa: F821 target: Union[str, np.ndarray], dens_weight: bool): @@ -437,7 +437,7 @@ def _get_weight(data: 'SarracenDataFrame', return target_data * mass_data / rho_data -def _get_smoothing_lengths(data: 'SarracenDataFrame', +def _get_smoothing_lengths(data: 'SarracenDataFrame', # noqa: F821 hmin: float, x_pixels: int, y_pixels: int, @@ -455,7 +455,7 @@ def _get_smoothing_lengths(data: 'SarracenDataFrame', return h_data -def interpolate_2d(data: 'SarracenDataFrame', +def interpolate_2d(data: 'SarracenDataFrame', # noqa: F821 target: str, x: str = None, y: str = None, @@ -559,7 +559,7 @@ def interpolate_2d(data: 'SarracenDataFrame', return grid -def interpolate_2d_vec(data: 'SarracenDataFrame', +def interpolate_2d_vec(data: 'SarracenDataFrame', # noqa: F821 target_x: str, target_y: str, x: str = None, @@ -669,7 +669,7 @@ def interpolate_2d_vec(data: 'SarracenDataFrame', return (gridx, gridy) -def interpolate_2d_line(data: 'SarracenDataFrame', +def interpolate_2d_line(data: 'SarracenDataFrame', # noqa: F821 target: str, x: str = None, y: str = None, @@ -782,7 +782,7 @@ def interpolate_2d_line(data: 'SarracenDataFrame', return grid -def interpolate_3d_line(data: 'SarracenDataFrame', +def interpolate_3d_line(data: 'SarracenDataFrame', # noqa: F821 target: str, x: str = None, y: str = None, @@ -905,7 +905,7 @@ def interpolate_3d_line(data: 'SarracenDataFrame', return grid -def interpolate_3d_proj(data: 'SarracenDataFrame', +def interpolate_3d_proj(data: 'SarracenDataFrame', # noqa: F821 target: str, x: str = None, y: str = None, @@ -1044,7 +1044,7 @@ def interpolate_3d_proj(data: 'SarracenDataFrame', return grid -def interpolate_3d_vec(data: 'SarracenDataFrame', +def interpolate_3d_vec(data: 'SarracenDataFrame', # noqa: F821 target_x: str, target_y: str, target_z: str, @@ -1183,7 +1183,7 @@ def interpolate_3d_vec(data: 'SarracenDataFrame', return (gridx, gridy) -def interpolate_3d_cross(data: 'SarracenDataFrame', +def interpolate_3d_cross(data: 'SarracenDataFrame', # noqa: F821 target: str, x: str = None, y: str = None, @@ -1317,7 +1317,7 @@ def interpolate_3d_cross(data: 'SarracenDataFrame', return grid -def interpolate_3d_cross_vec(data: 'SarracenDataFrame', +def interpolate_3d_cross_vec(data: 'SarracenDataFrame', # noqa: F821 target_x: str, target_y: str, target_z: str, @@ -1451,7 +1451,7 @@ def interpolate_3d_cross_vec(data: 'SarracenDataFrame', return (gridx, gridy) -def interpolate_3d_grid(data: 'SarracenDataFrame', +def interpolate_3d_grid(data: 'SarracenDataFrame', # noqa: F821 target: str, x: str = None, y: str = None, diff --git a/sarracen/render.py b/sarracen/render.py index 1bc7ba5..84a1032 100644 --- a/sarracen/render.py +++ b/sarracen/render.py @@ -121,7 +121,7 @@ def _set_pixels(x_pixels, y_pixels, xlim, ylim, default): return x_pixels, y_pixels -def render(data: 'SarracenDataFrame', +def render(data: 'SarracenDataFrame', # noqa: F821 target: str, x: str = None, y: str = None, @@ -347,7 +347,7 @@ def render(data: 'SarracenDataFrame', return ax -def lineplot(data: 'SarracenDataFrame', +def lineplot(data: 'SarracenDataFrame', # noqa: F821 target: str, x: str = None, y: str = None, @@ -474,7 +474,7 @@ def lineplot(data: 'SarracenDataFrame', return ax -def streamlines(data: 'SarracenDataFrame', +def streamlines(data: 'SarracenDataFrame', # noqa: F821 target: Union[Tuple[str, str], Tuple[str, str, str]], x: str = None, y: str = None, @@ -636,7 +636,7 @@ def streamlines(data: 'SarracenDataFrame', return ax -def arrowplot(data: 'SarracenDataFrame', +def arrowplot(data: 'SarracenDataFrame', # noqa: F821 target: Union[Tuple[str, str], Tuple[str, str, str]], x: str = None, y: str = None, From d25957a77c6396e5750b8f7147c7f185bde4205d Mon Sep 17 00:00:00 2001 From: Terrence Tricco Date: Sat, 20 Jul 2024 23:44:06 -0230 Subject: [PATCH 09/12] lint cpu_backend --- docs/conf.py | 5 +- sarracen/interpolate/cpu_backend.py | 90 ++++++++++++++++++----------- 2 files changed, 57 insertions(+), 38 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 98416f2..b3a227c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,7 +10,6 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -import os import sys from pathlib import Path @@ -53,8 +52,8 @@ # a list of builtin themes. # html_theme = "sphinx_rtd_theme" -#html_theme = "mpl_sphinx_theme" -#html_theme = "pydata_sphinx_theme" +# html_theme = "mpl_sphinx_theme" +# html_theme = "pydata_sphinx_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, diff --git a/sarracen/interpolate/cpu_backend.py b/sarracen/interpolate/cpu_backend.py index 5587b49..71a95dd 100644 --- a/sarracen/interpolate/cpu_backend.py +++ b/sarracen/interpolate/cpu_backend.py @@ -241,8 +241,8 @@ def _fast_2d(x_data, y_data, z_data, z_slice, w_data, h_data, output_local = np.zeros((get_num_threads(), y_pixels, x_pixels)) - # thread safety: each thread has its own grid, which are combined - # after interpolation + # thread safety: + # each thread has its own grid, which are combined after interpolation for thread in prange(get_num_threads()): block_size = x_data.size / get_num_threads() range_start = int(thread * block_size) @@ -276,15 +276,15 @@ def _fast_2d(x_data, y_data, z_data, z_slice, w_data, h_data, jpixmax = y_pixels # precalculate differences in the x-direction (optimization) - dx2i = (x_min + (np.arange(ipixmin, ipixmax) + 0.5) * pixwidthx - - x_data[i])**2 * (1 / (h_data[i]**2)) + ((dz[i]**2) * (1 / h_data[i]**2)) + dx2i = ((x_min + (np.arange(ipixmin, ipixmax) + 0.5) + * pixwidthx - x_data[i])**2 + dz[i]**2) / h_data[i]**2 # determine differences in the y-direction ypix = y_min + (np.arange(jpixmin, jpixmax) + 0.5) * pixwidthy dy = ypix - y_data[i] dy2 = dy * dy * (1 / (h_data[i] ** 2)) - # calculate contributions at pixels i, j due to particle at x, y + # calculate contributions at pixels i, j from particle at x, y q2 = dx2i + dy2.reshape(len(dy2), 1) for jpix in range(jpixmax - jpixmin): @@ -292,7 +292,9 @@ def _fast_2d(x_data, y_data, z_data, z_slice, w_data, h_data, if np.sqrt(q2[jpix][ipix]) > kernel_radius: continue wab = weight_function(np.sqrt(q2[jpix][ipix]), n_dims) - output_local[thread][jpix + jpixmin, ipix + ipixmin] += term[i] * wab + jp = jpix + jpixmin + ip = ipix + ipixmin + output_local[thread][jp, ip] += term[i] * wab for i in range(get_num_threads()): output += output_local[i] @@ -401,7 +403,7 @@ def _exact_2d_render(x_data, y_data, w_data, h_data, x_pixels, y_pixels, # the value of the top boundary of the pixel below this # pixel. if jpix < jpixmax - 1: - output_local[thread, jpix + 1, ipix] -= term[i] * wab + output_local[thread, jpix+1, ipix] -= term[i] * wab # Right Boundaries r0 = 0.5 * pixwidthx + dx @@ -416,7 +418,7 @@ def _exact_2d_render(x_data, y_data, w_data, h_data, x_pixels, y_pixels, # the value of the left boundary of the pixel to the # right of this pixel. if ipix < ipixmax - 1: - output_local[thread, jpix, ipix + 1] -= term[i] * wab + output_local[thread, jpix, ipix+1] -= term[i] * wab output = np.zeros((y_pixels, x_pixels)) @@ -449,12 +451,12 @@ def _fast_2d_cross_cpu(x_data, y_data, w_data, h_data, weight_function, # does not contribute to the cross-section, and can be removed. aa = 1 + gradient ** 2 bb = 2 * gradient * (yint - y_data) - 2 * x_data - cc = x_data**2 + y_data**2 - 2 * yint * y_data + yint**2 - (kernel_radius * h_data)**2 + cc = x_data**2 + y_data**2 - 2 * yint * y_data \ + + yint**2 - (kernel_radius * h_data)**2 det = bb ** 2 - 4 * aa * cc - # create a filter for particles that do not contribute to the - # cross-section - filter_det = det >= 0 + # create a filter for particles that do not contribute + filter = det >= 0 det = np.sqrt(det) cc = None @@ -462,13 +464,15 @@ def _fast_2d_cross_cpu(x_data, y_data, w_data, h_data, weight_function, # the starting and ending x coordinates of the lines intersections with # a particle's smoothing circle - xstart = ((-bb[filter_det] - det[filter_det]) / (2 * aa)).clip(a_min=x1, a_max=x2) - xend = ((-bb[filter_det] + det[filter_det]) / (2 * aa)).clip(a_min=x1, a_max=x2) + xstart = ((-bb[filter] - det[filter]) + / (2 * aa)).clip(a_min=x1, a_max=x2) + xend = ((-bb[filter] + det[filter]) + / (2 * aa)).clip(a_min=x1, a_max=x2) bb, det = None, None - # the start and end distances which lie within a particle's smoothing - # circle. - rstart = np.sqrt((xstart - x1)**2 + ((gradient * xstart + yint) - y1)**2) + # start and end distances that are within a particle's smoothing circle + rstart = np.sqrt((xstart - x1)**2 + + ((gradient * xstart + yint) - y1)**2) rend = np.sqrt((xend - x1)**2 + (((gradient * xend + yint) - y1)**2)) xstart, xend = None, None @@ -479,29 +483,30 @@ def _fast_2d_cross_cpu(x_data, y_data, w_data, h_data, weight_function, output_local = np.zeros((get_num_threads(), pixels)) - # thread safety: each thread has its own grid, which are combined after - # interpolation + # thread safety: + # each thread has its own grid, which are combined after interpolation for thread in prange(get_num_threads()): - block_size = len(x_data[filter_det]) / get_num_threads() + block_size = len(x_data[filter]) / get_num_threads() range_start = thread * block_size range_end = (thread + 1) * block_size # iterate through the indices of all non-filtered particles for i in range(range_start, range_end): - # determine contributions to all affected pixels for this - # particle - xpix = x1 + (np.arange(int(ipixmin[i]), int(ipixmax[i])) + 0.5) * xpixwidth + # determine contributions to all pixels for this particle + xpix = x1 + (np.arange(int(ipixmin[i]), int(ipixmax[i])) + + 0.5) * xpixwidth ypix = gradient * xpix + yint - dy = ypix - y_data[filter_det][i] - dx = xpix - x_data[filter_det][i] + dy = ypix - y_data[filter][i] + dx = xpix - x_data[filter][i] - q2 = (dx * dx + dy * dy) * (1 / (h_data[filter_det][i] * h_data[filter_det][i])) + q2 = (dx**2 + dy**2) / h_data[filter][i]**2 wab = weight_function(np.sqrt(q2), 2) # add contributions to output total for ipix in range(int(ipixmax[i]) - int(ipixmin[i])): - output_local[thread][ipix + int(ipixmin[i])] += term[filter_det][i] * wab[ipix] + ip = ipix + int(ipixmin[i]) + output_local[thread][ip] += term[filter][i] * wab[ipix] for i in range(get_num_threads()): output += output_local[i] @@ -543,15 +548,18 @@ def _fast_3d_line(x_data, y_data, z_data, w_data, h_data, weight_function, pixmin = min(max(0, round((d1 / length) * pixels)), pixels) pixmax = min(max(0, round((d2 / length) * pixels)), pixels) - xpix = x1 + (np.arange(pixmin, pixmax) + 0.5) * (x2 - x1) / pixels - ypix = y1 + (np.arange(pixmin, pixmax) + 0.5) * (y2 - y1) / pixels - zpix = z1 + (np.arange(pixmin, pixmax) + 0.5) * (z2 - z1) / pixels + xpix = x1 + (np.arange(pixmin, pixmax) + + 0.5) * (x2 - x1) / pixels + ypix = y1 + (np.arange(pixmin, pixmax) + + 0.5) * (y2 - y1) / pixels + zpix = z1 + (np.arange(pixmin, pixmax) + + 0.5) * (z2 - z1) / pixels xdiff = xpix - x_data[i] ydiff = ypix - y_data[i] zdiff = zpix - z_data[i] - q2 = (xdiff ** 2 + ydiff ** 2 + zdiff ** 2) * (1 / (h_data[i] ** 2)) + q2 = (xdiff**2 + ydiff**2 + zdiff**2) / h_data[i]**2 wab = weight_function(np.sqrt(q2), 3) for ipix in range(pixmax - pixmin): @@ -633,15 +641,27 @@ def _exact_3d_project(x_data, y_data, w_data, h_data, x_pixels, y_pixels, h_data[i]) # x-z surfaces - pixint += surface_int(ypix - y_data[i] + 0.5 * pixwidthy, x_data[i], 0, xpix, 0, pixwidthx, + pixint += surface_int(ypix - y_data[i] + + 0.5 * pixwidthy, + x_data[i], 0, + xpix, 0, pixwidthx, pixwidthz, h_data[i]) - pixint += surface_int(y_data[i] - ypix + 0.5 * pixwidthy, x_data[i], 0, xpix, 0, pixwidthx, + pixint += surface_int(y_data[i] - ypix + + 0.5 * pixwidthy, + x_data[i], 0, + xpix, 0, pixwidthx, pixwidthz, h_data[i]) # y-z surfaces - pixint += surface_int(xpix - x_data[i] + 0.5 * pixwidthx, 0, y_data[i], 0, ypix, pixwidthz, + pixint += surface_int(xpix - x_data[i] + + 0.5 * pixwidthx, + 0, y_data[i], 0, + ypix, pixwidthz, pixwidthy, h_data[i]) - pixint += surface_int(x_data[i] - xpix + 0.5 * pixwidthx, 0, y_data[i], 0, ypix, pixwidthz, + pixint += surface_int(x_data[i] - xpix + + 0.5 * pixwidthx, + 0, y_data[i], 0, + ypix, pixwidthz, pixwidthy, h_data[i]) wab = pixint * dfac[i] From e8675e9a6cc4db311e1bcd19004e648d1ae25508 Mon Sep 17 00:00:00 2001 From: Terrence Tricco Date: Sun, 21 Jul 2024 11:36:07 -0230 Subject: [PATCH 10/12] lint gpu_backend --- sarracen/interpolate/gpu_backend.py | 110 +++++++++++++++++----------- 1 file changed, 66 insertions(+), 44 deletions(-) diff --git a/sarracen/interpolate/gpu_backend.py b/sarracen/interpolate/gpu_backend.py index b084eec..63e4ea1 100644 --- a/sarracen/interpolate/gpu_backend.py +++ b/sarracen/interpolate/gpu_backend.py @@ -278,14 +278,13 @@ def _2d_func(z_slice, x_data, y_data, z_data, w_data, h_data, # determine difference in the x-direction xpix = x_min + ((ipix + ipixmin) + 0.5) * pixwidthx dx = xpix - x_data[i] - dx2 = dx * dx * (1 / (h_data[i] ** 2)) + dx2 = dx**2 / h_data[i]**2 # determine difference in the y-direction ypix = y_min + ((jpix + jpixmin) + 0.5) * pixwidthy dy = ypix - y_data[i] - dy2 = dy * dy * (1 / (h_data[i] ** 2)) - - dz2 = ((dz ** 2) * (1 / h_data[i] ** 2)) + dy2 = dy**2 / h_data[i]**2 + dz2 = dz**2 / h_data[i]**2 # calculate contributions at pixels i, j due to # particle at x, y @@ -293,13 +292,14 @@ def _2d_func(z_slice, x_data, y_data, z_data, w_data, h_data, # add contribution to image if q < kernel_radius: - # atomic add protects the summation against race - # conditions. + # atomic protects against race conditions. wab = weight_function(q, n_dims) - cuda.atomic.add(image, (jpix + jpixmin, ipix + ipixmin), term * wab) + jp = jpix + jpixmin + ip = ipix + ipixmin + cuda.atomic.add(image, (jp, ip), term * wab) threadsperblock = 32 - blockspergrid = (x_data.size + (threadsperblock - 1)) // threadsperblock + blockspergrid = (x_data.size + (threadsperblock-1)) // threadsperblock # transfer relevant data to the GPU d_x = cuda.to_device(x_data) @@ -312,13 +312,15 @@ def _2d_func(z_slice, x_data, y_data, z_data, w_data, h_data, d_image = cuda.to_device(np.zeros((y_pixels, x_pixels))) # execute the newly compiled CUDA kernel. - _2d_func[blockspergrid, threadsperblock](z_slice, d_x, d_y, d_z, d_w, d_h, kernel_radius, x_pixels, y_pixels, - x_min, x_max, y_min, y_max, n_dims, d_image) + _2d_func[blockspergrid, threadsperblock](z_slice, d_x, d_y, d_z, d_w, + d_h, kernel_radius, x_pixels, + y_pixels, x_min, x_max, y_min, + y_max, n_dims, d_image) return d_image.copy_to_host() - # Underlying CPU numba-compiled code for exact interpolation of 2D data t - # o a 2D grid. + # Underlying CPU numba-compiled code for exact interpolation of 2D data + # to a 2D grid. @staticmethod def _exact_2d_render(x_data, y_data, w_data, h_data, x_pixels, y_pixels, x_min, x_max, y_min, y_max): @@ -414,7 +416,7 @@ def _2d_func(x_data, y_data, w_data, h_data, image): # this pixel. cuda.atomic.add(image, (jpix, ipix), term * wab) if jpix < jpixmax - 1: - cuda.atomic.sub(image, (jpix + 1, ipix), term * wab) + cuda.atomic.sub(image, (jpix+1, ipix), term * wab) # Right Boundaries r0 = 0.5 * pixwidthx + dx @@ -429,10 +431,10 @@ def _2d_func(x_data, y_data, w_data, h_data, image): # the value of the left boundary of the pixel to the # right of this pixel. if ipix < ipixmax - 1: - cuda.atomic.sub(image, (jpix, ipix + 1), term * wab) + cuda.atomic.sub(image, (jpix, ipix+1), term * wab) threadsperblock = 32 - blockspergrid = (x_data.size + (threadsperblock - 1)) // threadsperblock + blockspergrid = (x_data.size + (threadsperblock-1)) // threadsperblock # transfer relevant data to the GPU d_x = cuda.to_device(x_data) @@ -480,12 +482,11 @@ def _2d_func(x_data, y_data, w_data, h_data, kernel_radius, pixels, # negative, the particle does not contribute to the # cross-section, and can be removed. bb = 2 * gradient * (yint - y_data[i]) - 2 * x_data[i] - cc = x_data[i] ** 2 + y_data[i] ** 2 - 2 * yint * y_data[i] + yint ** 2 - ( - kernel_radius * h_data[i]) ** 2 + cc = x_data[i]**2 + y_data[i]**2 - 2 * yint * y_data[i] \ + + yint**2 - (kernel_radius * h_data[i])**2 det = bb ** 2 - 4 * aa * cc - # create a filter for particles that do not contribute to the - # cross-section. + # create a filter for particles that do not contribute if det < 0: return @@ -496,10 +497,12 @@ def _2d_func(x_data, y_data, w_data, h_data, kernel_radius, pixels, xstart = min(max(x1, (-bb - det) / (2 * aa)), x2) xend = min(max(x1, (-bb + det) / (2 * aa)), x2) - # the start and end distances which lie within a particle's + # start and end distances that are within a particle's # smoothing circle. - rstart = math.sqrt((xstart - x1) ** 2 + ((gradient * xstart + yint) - y1) ** 2) - rend = math.sqrt((xend - x1) ** 2 + (((gradient * xend + yint) - y1) ** 2)) + rstart = math.sqrt((xstart - x1)**2 + ((gradient * xstart + + yint) - y1)**2) + rend = math.sqrt((xend - x1)**2 + (((gradient * xend + + yint) - y1)**2)) # the max and min pixels that each particle contributes to. ipixmin = min(max(0, round(rstart / pixwidth)), pixels) @@ -507,21 +510,20 @@ def _2d_func(x_data, y_data, w_data, h_data, kernel_radius, pixels, # iterate through all affected pixels for ipix in range(ipixmin, ipixmax): - # determine contributions to all affected pixels for this - # particle + # determine contributions to all pixels for this particle xpix = x1 + (ipix + 0.5) * xpixwidth ypix = gradient * xpix + yint dy = ypix - y_data[i] dx = xpix - x_data[i] - q2 = (dx * dx + dy * dy) * (1 / (h_data[i] * h_data[i])) + q2 = (dx**2 + dy**2) / h_data[i]**2 wab = weight_function(math.sqrt(q2), 2) # add contributions to output total. cuda.atomic.add(image, ipix, wab * term) threadsperblock = 32 - blockspergrid = (x_data.size + (threadsperblock - 1)) // threadsperblock + blockspergrid = (x_data.size + (threadsperblock-1)) // threadsperblock # transfer relevant data to the GPU d_x = cuda.to_device(x_data) @@ -534,7 +536,9 @@ def _2d_func(x_data, y_data, w_data, h_data, kernel_radius, pixels, d_image = cuda.to_device(np.zeros(pixels)) # execute the newly compiled GPU kernel - _2d_func[blockspergrid, threadsperblock](d_x, d_y, d_w, d_h, kernel_radius, pixels, x1, x2, y1, y2, d_image) + _2d_func[blockspergrid, threadsperblock](d_x, d_y, d_w, d_h, + kernel_radius, pixels, x1, x2, + y1, y2, d_image) return d_image.copy_to_host() @@ -549,7 +553,8 @@ def _fast_3d_line(x_data, y_data, z_data, w_data, h_data, weight_function, ux, uy, uz = dx / length, dy / length, dz / length @cuda.jit(fastmath=True) - def _2d_func(x_data, y_data, z_data, w_data, h_data, kernel_radius, pixels, x1, x2, y1, y2, z1, z2, image): + def _2d_func(x_data, y_data, z_data, w_data, h_data, kernel_radius, + pixels, x1, x2, y1, y2, z1, z2, image): i = cuda.grid(1) if i < x_data.size: @@ -557,7 +562,8 @@ def _2d_func(x_data, y_data, z_data, w_data, h_data, kernel_radius, pixels, x1, dy = y1 - y_data[i] dz = z1 - z_data[i] delta = (ux * dx + uy * dy + uz * dz)**2 \ - - (dx**2 + dy**2 + dz**2) + (kernel_radius * h_data[i])**2 + - (dx**2 + dy**2 + dz**2) \ + + (kernel_radius * h_data[i])**2 if delta < 0: return @@ -584,7 +590,7 @@ def _2d_func(x_data, y_data, z_data, w_data, h_data, kernel_radius, pixels, x1, cuda.atomic.add(image, ipix, wab * term) threadsperblock = 32 - blockspergrid = (x_data.size + (threadsperblock - 1)) // threadsperblock + blockspergrid = (x_data.size + (threadsperblock-1)) // threadsperblock # transfer relevant data to the GPU d_x = cuda.to_device(x_data) @@ -598,8 +604,9 @@ def _2d_func(x_data, y_data, z_data, w_data, h_data, kernel_radius, pixels, x1, d_image = cuda.to_device(np.zeros(pixels)) # execute the newly compiled GPU kernel - _2d_func[blockspergrid, threadsperblock](d_x, d_y, d_z, d_w, d_h, kernel_radius, pixels, x1, x2, y1, y2, z1, z2, - d_image) + _2d_func[blockspergrid, threadsperblock](d_x, d_y, d_z, d_w, d_h, + kernel_radius, pixels, x1, x2, + y1, y2, z1, z2, d_image) return d_image.copy_to_host() @@ -654,33 +661,48 @@ def _3d_func(x_data, y_data, w_data, h_data, image): q2 = (dx ** 2 + dy ** 2) / h_data[i] ** 2 - if q2 < 4 + 3 * pixwidthx * pixwidthy / h_data[i] ** 2: + if q2 < 4 + 3 * pixwidthx * pixwidthy / h_data[i]**2: # Calculate the volume integral of this pixel by # summing the comprising surface integrals of each # surface of the cube. # x-y surfaces - pixint = 2 * surface_int(0.5 * pixwidthz, x_data[i], y_data[i], xpix, ypix, pixwidthx, - pixwidthy, h_data[i]) + pixint = 2 * surface_int(0.5 * pixwidthz, + x_data[i], y_data[i], + xpix, ypix, + pixwidthx, pixwidthy, + h_data[i]) # x-z surfaces - pixint += surface_int(ypix - y_data[i] + 0.5 * pixwidthy, x_data[i], 0, xpix, 0, pixwidthx, - pixwidthz, h_data[i]) - pixint += surface_int(y_data[i] - ypix + 0.5 * pixwidthy, x_data[i], 0, xpix, 0, pixwidthx, - pixwidthz, h_data[i]) + pixint += surface_int(ypix - y_data[i] + + 0.5 * pixwidthy, + x_data[i], 0, xpix, + 0, pixwidthx, pixwidthz, + h_data[i]) + pixint += surface_int(y_data[i] - ypix + + 0.5 * pixwidthy, + x_data[i], 0, xpix, + 0, pixwidthx, pixwidthz, + h_data[i]) # y-z surfaces - pixint += surface_int(xpix - x_data[i] + 0.5 * pixwidthx, 0, y_data[i], 0, ypix, pixwidthz, - pixwidthy, h_data[i]) - pixint += surface_int(x_data[i] - xpix + 0.5 * pixwidthx, 0, y_data[i], 0, ypix, pixwidthz, - pixwidthy, h_data[i]) + pixint += surface_int(xpix - x_data[i] + + 0.5 * pixwidthx, 0, + y_data[i], 0, ypix, + pixwidthz, pixwidthy, + h_data[i]) + pixint += surface_int(x_data[i] - xpix + + 0.5 * pixwidthx, 0, + y_data[i], 0, ypix, + pixwidthz, pixwidthy, + h_data[i]) wab = pixint * dfac cuda.atomic.add(image, (jpix, ipix), term * wab) threadsperblock = 32 - blockspergrid = (x_data.size + (threadsperblock - 1)) // threadsperblock + blockspergrid = (x_data.size + (threadsperblock-1)) // threadsperblock # transfer relevant data to the GPU d_x = cuda.to_device(x_data) From 93a3e764d6a3d630f9f4a23ab9fe8673e178f379 Mon Sep 17 00:00:00 2001 From: Terrence Tricco Date: Sun, 21 Jul 2024 12:01:22 -0230 Subject: [PATCH 11/12] linting --- sarracen/readers/read_phantom.py | 6 ++++-- sarracen/render.py | 18 +++++++++--------- sarracen/sarracen_dataframe.py | 3 +-- sarracen/tests/test_render.py | 10 ++++++---- 4 files changed, 20 insertions(+), 17 deletions(-) diff --git a/sarracen/readers/read_phantom.py b/sarracen/readers/read_phantom.py index 482c915..03f779d 100644 --- a/sarracen/readers/read_phantom.py +++ b/sarracen/readers/read_phantom.py @@ -222,8 +222,10 @@ def _create_aprmass_column(df, header_vars): return df - -def read_phantom(filename: str, separate_types: str = 'sinks', ignore_inactive: bool = True): + +def read_phantom(filename: str, + separate_types: str = 'sinks', + ignore_inactive: bool = True): """ Read data from a Phantom dump file. diff --git a/sarracen/render.py b/sarracen/render.py index ccf48bf..a446ad4 100644 --- a/sarracen/render.py +++ b/sarracen/render.py @@ -207,10 +207,10 @@ def render(data: 'SarracenDataFrame', # noqa: F821 log_scale: bool Whether to use a logarithmic scale for color coding. symlog_scale: bool - Whether to use a symmetrical logarithmic scale for color coding (i.e., - allows positive and negative values). Optionally add "linthresh" and - "linscale" to kwargs to set the linear region and the scaling of linear - values, respectively (defaults to 1e-9 and 1, respectevely). Only works + Whether to use a symmetrical logarithmic scale for color coding (i.e., + allows positive and negative values). Optionally add "linthresh" and + "linscale" to kwargs to set the linear region and the scaling of linear + values, respectively (defaults to 1e-9 and 1, respectevely). Only works if log_scale == True. cototation: list, optional Moves particles to the co-rotating frame of two location. corotation @@ -314,14 +314,14 @@ def render(data: 'SarracenDataFrame', # noqa: F821 kwargs.setdefault("extent", [xlim[0], xlim[1], ylim[0], ylim[1]]) if log_scale: if symlog_scale: - kwargs.setdefault("norm", - SymLogNorm(kwargs.pop("linthresh", 1e-9), + kwargs.setdefault("norm", + SymLogNorm(kwargs.pop("linthresh", 1e-9), linscale=kwargs.pop("linscale", 1.), - vmin=kwargs.get('vmin'), + vmin=kwargs.get('vmin'), vmax=kwargs.get('vmax'))) else: - kwargs.setdefault("norm", LogNorm(clip=True, - vmin=kwargs.get('vmin'), + kwargs.setdefault("norm", LogNorm(clip=True, + vmin=kwargs.get('vmin'), vmax=kwargs.get('vmax'))) kwargs.pop("vmin", None) kwargs.pop("vmax", None) diff --git a/sarracen/sarracen_dataframe.py b/sarracen/sarracen_dataframe.py index 71ab7db..be0a2ca 100644 --- a/sarracen/sarracen_dataframe.py +++ b/sarracen/sarracen_dataframe.py @@ -302,10 +302,9 @@ def render(self, return render(self, target, x, y, z, xsec, kernel, x_pixels, y_pixels, xlim, ylim, cmap, cbar, cbar_kws, cbar_ax, ax, exact, backend, integral_samples, rotation, rot_origin, - log_scale, symlog_scale, dens_weight, normalize, hmin, + log_scale, symlog_scale, dens_weight, normalize, hmin, **kwargs) - @_copy_doc(lineplot) def lineplot(self, target: str, diff --git a/sarracen/tests/test_render.py b/sarracen/tests/test_render.py index c935361..dfdc053 100644 --- a/sarracen/tests/test_render.py +++ b/sarracen/tests/test_render.py @@ -153,8 +153,9 @@ def test_kwargs(backend): 'h': [1, 1], 'rho': [1, 1], 'm': [1, 1]} sdf_3 = SarracenDataFrame(data_3) sdf_3.backend = backend - df_4 = pd.DataFrame({'x': [-3, 6], 'y': [5, -1], 'z': [2, 1], 'P': [-1, 1], 'h': [1, 1], 'rho': [-1, -1], 'm': [1, 1]}) - sdf_4 = SarracenDataFrame(df_4) + data_4 = {'x': [-3, 6], 'y': [5, -1], 'z': [2, 1], 'P': [-1, 1], + 'h': [1, 1], 'rho': [-1, -1], 'm': [1, 1]} + sdf_4 = SarracenDataFrame(data_4) sdf_4.backend = backend for args in [{'data': sdf_2, 'xsec': None}, @@ -164,10 +165,11 @@ def test_kwargs(backend): render(args['data'], 'P', xsec=args['xsec'], ax=ax, origin='upper') assert ax.images[0].origin == 'upper' plt.close(fig) - + for arg in [True, False]: fig, ax = plt.subplots() - render(sdf_4, 'P', ax=ax, log_scale=arg, symlog_scale=True, origin='upper', vmin=-1., vmax=1.) + render(sdf_4, 'P', ax=ax, log_scale=arg, symlog_scale=True, + origin='upper', vmin=-1., vmax=1.) assert ax.images[0].origin == 'upper' plt.close(fig) From 626d6712595bfdcfa21a8644e76cd4634c80e320 Mon Sep 17 00:00:00 2001 From: Terrence Tricco Date: Sun, 21 Jul 2024 12:08:09 -0230 Subject: [PATCH 12/12] put flake8 configuration in the right place --- .flake8 => .github/linters/.flake8 | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .flake8 => .github/linters/.flake8 (100%) diff --git a/.flake8 b/.github/linters/.flake8 similarity index 100% rename from .flake8 rename to .github/linters/.flake8