From dcc694f6ed318f06c9a2746b479a9c34e51785ef Mon Sep 17 00:00:00 2001 From: Rose Pearson Date: Tue, 12 Nov 2024 09:50:44 +1300 Subject: [PATCH] Include large internal lakes (#268) * Fixes to make river estimation more robust to weird or incomplete geometries * Added lakes and added test coverage * renamed to make functions more general * fixup: Format Python code with Black * Update version * Update tests.yml after sunsetting of mumbaforge Migrate from mamba-forge * Updated tests for new Westport tiles * fixed spelling of patches. --------- Co-authored-by: github-actions --- .github/workflows/tests.yml | 7 +- pyproject.toml | 2 +- src/geofabrics/bathymetry_estimation.py | 2 +- src/geofabrics/dem.py | 100 ++++++------- src/geofabrics/geometry.py | 43 +++++- src/geofabrics/processor.py | 138 ++++++++++++++---- src/geofabrics/version.py | 2 +- .../instruction.json | 2 +- .../test_case.py | 2 +- .../test_case.py | 2 +- .../test_case.py | 2 +- .../test_case.py | 2 +- .../data/benchmark.nc | 4 +- .../data/lake_contours.gpkg | Bin 0 -> 98304 bytes .../data/lake_outline.gpkg | Bin 0 -> 98304 bytes .../instruction.json | 1 + 16 files changed, 211 insertions(+), 98 deletions(-) create mode 100755 tests/test_many_stages_waikanae/data/lake_contours.gpkg create mode 100755 tests/test_many_stages_waikanae/data/lake_outline.gpkg diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index b6afc5d8..7fd9c57c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -39,14 +39,15 @@ jobs: - name: Setup miniconda uses: conda-incubator/setup-miniconda@v2 with: - auto-update-conda: true - miniforge-variant: Mambaforge + auto-update-conda: true # false + miniforge-version: latest channels: conda-forge # defaults automatically added python-version: ${{ matrix.python-version }} activate-environment: geofabrics_CI environment-file: environment_CI.yml - use-mamba: true auto-activate-base: false + # use-only-tar-bz2: true + - name: Conda list shell: pwsh diff --git a/pyproject.toml b/pyproject.toml index fc080aed..c7440234 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta" [project] name = "geofabrics" -version = "1.1.23" +version = "1.1.24" description = "A package for creating geofabrics for flood modelling." readme = "README.md" authors = [{ name = "Rose pearson", email = "rose.pearson@niwa.co.nz" }] diff --git a/src/geofabrics/bathymetry_estimation.py b/src/geofabrics/bathymetry_estimation.py index 8d8f4d3c..c4920b53 100644 --- a/src/geofabrics/bathymetry_estimation.py +++ b/src/geofabrics/bathymetry_estimation.py @@ -1925,6 +1925,7 @@ def _create_flat_water_polygon(self, cross_sections: geopandas.GeoDataFrame): start_xy = geopandas.GeoDataFrame( geometry=[shapely.geometry.LineString(start_xy)], crs=cross_sections.crs ) + start_xy = Channel(start_xy, resolution=self.cross_section_spacing) start_xy_spline = start_xy.get_parametric_spline_fit_points() @@ -2296,7 +2297,6 @@ def estimate_width_and_slope( maximum_threshold=max_threshold, min_channel_width=min_channel_width, ) - # generate a flat water polygon river_polygon = self._create_flat_water_polygon( cross_sections=cross_sections, diff --git a/src/geofabrics/dem.py b/src/geofabrics/dem.py index 8b90e147..b3504ba1 100644 --- a/src/geofabrics/dem.py +++ b/src/geofabrics/dem.py @@ -300,6 +300,7 @@ class DemBase(abc.ABC): "patch": 6, "stopbanks": 7, "masked feature": 8, + "lakes": 9, "interpolated": 0, "no data": -1, } @@ -1106,9 +1107,9 @@ def clip_within_polygon(self, polygon_paths: list, label: str): f"No clipping. Polygons {polygon_paths} do not overlap DEM." ) - def interpolate_elevations_within_polygon( + def add_points_within_polygon_chunked( self, - elevations: geometry.EstimatedElevationPoints, + elevations: geometry.ElevationPoints, method: str, cache_path: pathlib.Path, label: str, @@ -1171,7 +1172,7 @@ def interpolate_elevations_within_polygon( point_cloud = numpy.concatenate([edge_points, point_cloud]) # Save river points in a temporary laz file - lidar_file = cache_path / "waterways_points.laz" + lidar_file = cache_path / f"{label}_points.laz" pdal_pipeline_instructions = [ { "type": "writers.las", @@ -1196,7 +1197,7 @@ def interpolate_elevations_within_polygon( self.logger.info(f"Preparing {[len(chunked_dim_x), len(chunked_dim_y)]} chunks") # cycle through index chunks - and collect in a delayed array - self.logger.info("Running over ocean chunked") + self.logger.info(f"Running over {label} chunked") delayed_chunked_matrix = [] for i, dim_y in enumerate(chunked_dim_y): delayed_chunked_x = [] @@ -1246,11 +1247,12 @@ def interpolate_elevations_within_polygon( ) self._write_netcdf_conventions_in_place(self._dem, self.catchment_geometry.crs) - def interpolate_rivers( + def add_points_within_polygon_nearest_chunked( self, - elevations: geometry.EstimatedElevationPoints, + elevations: geometry.ElevationPoints, method: str, cache_path: pathlib.Path, + label: str, k_nearest_neighbours: int = 100, ) -> xarray.Dataset: """Performs interpolation from estimated bathymetry points within a polygon @@ -1273,23 +1275,21 @@ def interpolate_rivers( # Define the region to rasterise region_to_rasterise = elevations.polygons - # Extract and saveriver elevations - river_points = elevations.points_array - river_points_file = cache_path / "river_points.laz" + # Tempoarily save the points to add + points = elevations.points_array + points_file = cache_path / f"{label}_points.laz" pdal_pipeline_instructions = [ { "type": "writers.las", "a_srs": f"EPSG:" f"{crs['horizontal']}+" f"{crs['vertical']}", - "filename": str(river_points_file), + "filename": str(points_file), "compression": "laszip", } ] - pdal_pipeline = pdal.Pipeline( - json.dumps(pdal_pipeline_instructions), [river_points] - ) + pdal_pipeline = pdal.Pipeline(json.dumps(pdal_pipeline_instructions), [points]) pdal_pipeline.execute() - # Extract and save river/fan adjacent elevations from DEM + # Tempoarily save the adjacent points from the DEM edge_dem = self._dem.rio.clip( region_to_rasterise.dissolve().buffer(self.catchment_geometry.resolution), drop=True, @@ -1306,19 +1306,19 @@ def interpolate_rivers( drop=True, ) - # Define the river and mouth edge points + # Save provided points grid_x, grid_y = numpy.meshgrid(edge_dem.x, edge_dem.y) flat_x = grid_x.flatten() flat_y = grid_y.flatten() flat_z = edge_dem.z.values.flatten() mask_z = ~numpy.isnan(flat_z) - # Interpolate the estimated river bank heights along only the river + # Interpolate the estimated bank heights around the polygon if they exist if elevations.bank_heights_exist(): - # Get the estimated river bank heights and define a mask where nan - river_bank_points = elevations.bank_height_points() - river_bank_nan_mask = numpy.logical_not(numpy.isnan(river_bank_points["Z"])) - # Interpolate from the estimated river bank heights + # Get the estimated bank heights and define a mask where nan + bank_points = elevations.bank_height_points() + bank_nan_mask = numpy.logical_not(numpy.isnan(bank_points["Z"])) + # Interpolate from the estimated bank heights xy_out = numpy.concatenate( [[flat_x[mask_z]], [flat_y[mask_z]]], axis=0 ).transpose() @@ -1328,19 +1328,17 @@ def interpolate_rivers( "method": "linear", "strict": False, } - estimated_river_edge_z = elevation_from_points( - point_cloud=river_bank_points[river_bank_nan_mask], + estimated_edge_z = elevation_from_points( + point_cloud=bank_points[bank_nan_mask], xy_out=xy_out, options=options, ) # Use the estimated bank heights where lower than the DEM edge values - mask_z_river_edge = mask_z.copy() - mask_z_river_edge[:] = False - mask_z_river_edge[mask_z] = flat_z[mask_z] > estimated_river_edge_z - flat_z[mask_z_river_edge] = estimated_river_edge_z[ - flat_z[mask_z] > estimated_river_edge_z - ] + mask_z_edge = mask_z.copy() + mask_z_edge[:] = False + mask_z_edge[mask_z] = flat_z[mask_z] > estimated_edge_z + flat_z[mask_z_edge] = estimated_edge_z[flat_z[mask_z] > estimated_edge_z] # Use the flat_x/y/z to define edge points and heights edge_points = numpy.empty( @@ -1355,12 +1353,12 @@ def interpolate_rivers( edge_points["Y"] = flat_y[mask_z] edge_points["Z"] = flat_z[mask_z] - river_edge_file = cache_path / "river_edge_points.laz" + edge_file = cache_path / f"{label}_edge_points.laz" pdal_pipeline_instructions = [ { "type": "writers.las", "a_srs": f"EPSG:" f"{crs['horizontal']}+" f"{crs['vertical']}", - "filename": str(river_edge_file), + "filename": str(edge_file), "compression": "laszip", } ] @@ -1370,18 +1368,18 @@ def interpolate_rivers( pdal_pipeline.execute() if ( - len(river_points) < k_nearest_neighbours - or len(edge_points) < k_nearest_neighbours + len(points) < raster_options["k_nearest_neighbours"] + or len(edge_points) < raster_options["k_nearest_neighbours"] ): logging.info( - f"Fewer river or edge points than the default expected {k_nearest_neighbours}. " - f"Updating k_nearest_neighbours to {min(len(river_points), len(edge_points))}." + f"Fewer points or edge points than the default expected {raster_options['k_nearest_neighbours']}. " + f"Updating k_nearest_neighbours to {min(len(points), len(edge_points))}." ) - k_nearest_neighbours = min(len(river_points), len(edge_points)) - if k_nearest_neighbours < 3: + raster_options["k_nearest_neighbours"] = min(len(points), len(edge_points)) + if raster_options["k_nearest_neighbours"] < 3: logging.warning( - f"Not enough river or edge points to meaningfully include {k_nearest_neighbours}. " - f"Exiting without including the river and edge points." + f"Not enough points or edge points to meaningfully include {raster_options['k_nearest_neighbours']}. " + f"Exiting without including the points and edge points." ) return @@ -1396,22 +1394,24 @@ def interpolate_rivers( self.logger.info(f"Preparing {[len(chunked_dim_x), len(chunked_dim_y)]} chunks") # cycle through index chunks - and collect in a delayed array - self.logger.info("Running over ocean chunked") + self.logger.info( + "Running over points chunked - nearest of points & edge points" + ) delayed_chunked_matrix = [] for i, dim_y in enumerate(chunked_dim_y): delayed_chunked_x = [] for j, dim_x in enumerate(chunked_dim_x): self.logger.debug(f"\tLiDAR chunk {[i, j]}") # Load in points - river_points = delayed_load_tiles_in_chunk( - lidar_files=[river_points_file], + points = delayed_load_tiles_in_chunk( + lidar_files=[points_file], source_crs=raster_options["crs"], chunk_region_to_tile=None, crs=raster_options["crs"], ) - river_edge_points = delayed_load_tiles_in_chunk( - lidar_files=[river_edge_file], + edge_points = delayed_load_tiles_in_chunk( + lidar_files=[edge_file], source_crs=raster_options["crs"], chunk_region_to_tile=None, crs=raster_options["crs"], @@ -1423,8 +1423,8 @@ def interpolate_rivers( delayed_elevation_over_chunk_from_nearest( dim_x=dim_x, dim_y=dim_y, - points=river_points, - edge_points=river_edge_points, + points=points, + edge_points=edge_points, options=raster_options, ), shape=(len(dim_y), len(dim_x)), @@ -1437,17 +1437,17 @@ def interpolate_rivers( elevations = dask.array.block(delayed_chunked_matrix) # Update DEM layers - copy everyhere within the region to rasterise - rivers_mask = clip_mask( + polygon_mask = clip_mask( self._dem.z, region_to_rasterise.geometry, self.chunk_size, ) - rivers_mask.load() - self._dem["z"] = self._dem.z.where(~rivers_mask, elevations) - mask = ~(rivers_mask & self._dem.z.notnull()) + polygon_mask.load() + self._dem["z"] = self._dem.z.where(~polygon_mask, elevations) + mask = ~(polygon_mask & self._dem.z.notnull()) self._dem["data_source"] = self._dem.data_source.where( mask, - self.SOURCE_CLASSIFICATION["rivers and fans"], + self.SOURCE_CLASSIFICATION[label], ) self._dem["lidar_source"] = self._dem.lidar_source.where( mask, self.SOURCE_CLASSIFICATION["no data"] diff --git a/src/geofabrics/geometry.py b/src/geofabrics/geometry.py index c44aabcb..005f870f 100644 --- a/src/geofabrics/geometry.py +++ b/src/geofabrics/geometry.py @@ -553,7 +553,7 @@ def sample(self) -> numpy.ndarray: return points -class EstimatedElevationPoints: +class ElevationPoints: """A class for accessing estimated or measured river, mouth and waterway elevations as points. Paired elevation and polygon files are expected. The elevations are used to interpolate elevations within the polygons. @@ -630,7 +630,7 @@ def _set_up( polygon_list.append(polygon_i) # Set CRS, clip to size and reset index if len(points_list) == 0: - self.logger.warning("No waterways elevations. Ignoring.") + self.logger.warning("No elevations. Ignoring.") self._points = [] self._polygon = [] return @@ -758,6 +758,43 @@ def z(self): return self._z +class ElevationContours(ElevationPoints): + """Resample at spatial resolution at points""" + + def __init__( + self, + points_files: list, + polygon_files: list, + catchment_geometry: CatchmentGeometry, + z_labels: list = None, + ): + super(ElevationContours, self).__init__( + points_files=points_files, + polygon_files=polygon_files, + catchment_geometry=catchment_geometry, + filter_osm_ids=[], + z_labels=z_labels, + ) + self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}") + + # convert contoursto samples points at resolution + self.sample_contours(self.catchment_geometry.resolution) + + def sample_contours(self, resolution: float) -> numpy.ndarray: + """Sample the contours at the specified resolution.""" + + # convert contours to multipoints + self._points.loc[:, "geometry"] = self._points.geometry.apply( + lambda row: shapely.geometry.MultiPoint( + [ + row.interpolate(i * resolution) + for i in range(int(numpy.ceil(row.length / resolution))) + ] + ) + ) + self._points = self._points.explode(index_parts=True, ignore_index=True) + + class TileInfo: """A class for working with tiling information.""" @@ -854,7 +891,7 @@ def _get_mouth_alignment(self): # Get the river alignment and clip to the river polygon aligned_channel = geopandas.read_file(self.aligned_channel_file) - river_polygon = geopandas.read_file(self.river_polygon_file) + river_polygon = geopandas.read_file(self.river_polygon_file).make_valid() aligned_channel = aligned_channel.clip(river_polygon) # Explode incase the aligned channel is clipped into a MultiPolyLine (x, y) = aligned_channel.explode().iloc[0].geometry.xy diff --git a/src/geofabrics/processor.py b/src/geofabrics/processor.py index 2023bae5..84bb73c3 100644 --- a/src/geofabrics/processor.py +++ b/src/geofabrics/processor.py @@ -290,6 +290,7 @@ def get_instruction_general(self, key: str, subkey: str = None): "interpolation": { "rivers": "rbf", "waterways": "cubic", + "lakes": "linear", "stopbanks": "nearest", "ocean": "rbf", "lidar": "idw", @@ -297,6 +298,7 @@ def get_instruction_general(self, key: str, subkey: str = None): }, "z_labels": { "waterways": "z", + "lakes": "z", "stopbanks": "z", "rivers": "z", "ocean": None, @@ -1233,7 +1235,7 @@ def add_hydrological_features( self.logger.info(f"Incorporating waterways: {elevations}") # Load in bathymetry - estimated_elevations = geometry.EstimatedElevationPoints( + estimated_elevations = geometry.ElevationPoints( points_files=elevations, polygon_files=polygons, filter_osm_ids=self.get_instruction_general( @@ -1248,7 +1250,7 @@ def add_hydrological_features( # Call interpolate river on the DEM - the class checks to see if any pixels # actually fall inside the polygon if len(estimated_elevations.polygons) > 0: # Skip if no waterways - hydrologic_dem.interpolate_elevations_within_polygon( + hydrologic_dem.add_points_within_polygon_chunked( elevations=estimated_elevations, method=self.get_instruction_general( key="interpolation", subkey="waterways" @@ -1265,6 +1267,65 @@ def add_hydrological_features( if cached_file is not None: self.clean_cached_file(cached_file) cached_file = temp_file + # Check for lakes + if "lakes" in self.instructions["data_paths"]: + # Loop through each lake in turn adding individually + subfolder = self.get_instruction_path(key="subfolder") + z_labels = self.get_instruction_general(key="z_labels", subkey="lakes") + lakes = self.instructions["data_paths"]["lakes"] + if isinstance(z_labels, str): + z_labels = [z_labels for i in range(len(lakes))] + elif not isinstance(z_labels, list) or len(z_labels) != len(lakes): + raise ValueError( + "There is a mismatch in length between the provided z_labels " + f"and the lakes: {z_labels} {lakes}" + ) + for index, lake_dict in enumerate(lakes): + elevation = pathlib.Path(lake_dict["elevations"]) + polygon = pathlib.Path(lake_dict["extents"]) + if not elevation.is_absolute(): + elevation = subfolder / elevation + if not polygon.is_absolute(): + polygon = subfolder / polygon + + self.logger.info(f"Incorporating lake: {elevation}") + # Load in elevations + elevations = geometry.ElevationContours( + points_files=[elevation], + polygon_files=[polygon], + catchment_geometry=self.catchment_geometry, + z_labels=z_labels[index], + ) + + if ( + len(elevations.points_array) == 0 + or elevations.polygons.area.sum() + < self.catchment_geometry.resolution**2 + ): + self.logger.warning( + "No points or an area less than one grid cell in " + f"lake {elevation}. Ignoring." + ) + continue + + # Add lake to DEM + hydrologic_dem.add_points_within_polygon_nearest_chunked( + elevations=elevations, + method=self.get_instruction_general( + key="interpolation", subkey="lakes" + ), + cache_path=temp_folder, + label="lakes", + ) + temp_file = temp_folder / f"dem_added_{index + 1}_lake.nc" + self.logger.info( + f"Save temp DEM with lake {index + 1} added to netCDF: {temp_file}" + ) + hydrologic_dem.save_and_load_dem(temp_file) + # Remove previous cached file and replace with new one + if cached_file is not None: + self.clean_cached_file(cached_file) + cached_file = temp_file # Load in river bathymetry and incorporate where discernable at the resolution if "rivers" in self.instructions["data_paths"]: # Loop through each river in turn adding individually @@ -1289,7 +1350,7 @@ def add_hydrological_features( self.logger.info(f"Incorporating river: {elevation}") # Load in bathymetry - estimated_elevations = geometry.EstimatedElevationPoints( + estimated_elevations = geometry.ElevationPoints( points_files=[elevation], polygon_files=[polygon], catchment_geometry=self.catchment_geometry, @@ -1309,12 +1370,13 @@ def add_hydrological_features( # Call interpolate river on the DEM - the class checks to see if any pixels # actually fall inside the polygon - hydrologic_dem.interpolate_rivers( + hydrologic_dem.add_points_within_polygon_nearest_chunked( elevations=estimated_elevations, method=self.get_instruction_general( key="interpolation", subkey="rivers" ), cache_path=temp_folder, + label="rivers and fans", ) temp_file = temp_folder / f"dem_added_{index + 1}_rivers.nc" self.logger.info( @@ -1346,7 +1408,7 @@ def add_hydrological_features( self.logger.info(f"Incorporating stopbanks: {elevations}") # Load in bathymetry - estimated_elevations = geometry.EstimatedElevationPoints( + estimated_elevations = geometry.ElevationPoints( points_files=elevations, polygon_files=polygons, catchment_geometry=self.catchment_geometry, @@ -1358,7 +1420,7 @@ def add_hydrological_features( # Call interpolate river on the DEM - the class checks to see if any pixels # actually fall inside the polygon if len(estimated_elevations.polygons) > 0: # Skip if no stopbanks - hydrologic_dem.interpolate_elevations_within_polygon( + hydrologic_dem.add_points_within_polygon_chunked( elevations=estimated_elevations, method=self.get_instruction_general( key="interpolation", subkey="stopbanks" @@ -1570,7 +1632,7 @@ def run(self): elevation_range=None, ) patch_paths = self.get_vector_or_raster_paths( - key="patchs", data_type="raster" + key="patches", data_type="raster" ) if self.get_patch_instruction("patch_on_top"): patch_paths = patch_paths[::-1] # Reverse so first ends up on top @@ -2641,8 +2703,11 @@ def align_channel_from_osm( # Note projection function is limited between [0, osm_channel.length] end_split_length = float(osm_channel.project(network_end)) start_split_length = float(osm_channel.project(network_start)) - # Ensure the OSM line is defined upstream - if start_split_length > end_split_length: + # Ensure the OSM line is defined mouth to upstream + if ( + start_split_length > end_split_length + or start_split_length >= float(osm_channel.length) / 2 + ): # Reverse direction of the geometry osm_channel.loc[0, "geometry"] = shapely.geometry.LineString( list(osm_channel.iloc[0].geometry.coords)[::-1] @@ -2658,20 +2723,17 @@ def align_channel_from_osm( osm_channel.loc[0].geometry, split_point.loc[0], tolerance=0.1 ) osm_channel = geopandas.GeoDataFrame( - { - "geometry": [ - list(shapely.ops.split(osm_channel, split_point.loc[0]).geoms)[ - 1 - ] - ] - }, + geometry=[ + list(shapely.ops.split(osm_channel, split_point.loc[0]).geoms)[1] + ], crs=crs, ) - else: + elif start_split_length == 0 and not self.get_bathymetry_instruction( + "keep_downstream_osm" + ): self.logger.warning( - "The OSM reference line starts upstream of the" - "network line. The bottom of the network will be" - "ignored over a stright line distance of " + "The OSM reference line starts upstream of the network line. The bottom " + "of the network will be ignored over a stright line distance of " f"{osm_channel.distance(network_start)}" ) # Clip end if needed - recacluate clip position incase front clipped. @@ -2682,22 +2744,34 @@ def align_channel_from_osm( osm_channel.loc[0].geometry, split_point.loc[0], tolerance=0.1 ) osm_channel = geopandas.GeoDataFrame( - { - "geometry": [ - list(shapely.ops.split(osm_channel, split_point.loc[0]).geoms)[ - 0 - ] - ] - }, + geometry=[ + list(shapely.ops.split(osm_channel, split_point.loc[0]).geoms)[0] + ], crs=crs, ) else: self.logger.warning( - "The OSM reference line ends downstream of the" - "network line. The top of the network will be" - "ignored over a stright line distance of " + "The OSM reference line ends upstream of the network line. The top of " + "the network will be ignored over a stright line distance of " f"{osm_channel.distance(network_end)}" ) + # In case of both network points at far end ensure only short end is returned + if start_split_length == 0 and end_split_length == 0: + split_point = osm_channel.interpolate(channel.length) + osm_channel = shapely.ops.snap( + osm_channel.loc[0].geometry, split_point.loc[0], tolerance=0.1 + ) + osm_channel = geopandas.GeoDataFrame( + geometry=[ + list(shapely.ops.split(osm_channel, split_point.loc[0]).geoms)[0] + ], + crs=crs, + ) + self.logger.warning( + "The OSM reference line ends upstream of both ends of the network line. It " + "will be clipped to the total length of the network line " + f"{channel.length}. Please review if unexpected." + ) if self.debug: osm_channel.to_file( @@ -3789,7 +3863,7 @@ def maximum_elevation_in_polygon( if dem.x[-1] - dem.x[0] > 0 else slice(bbox[2], bbox[0]) ) - # breakpoint() + small_z = dem.z.sel(x=x_slice, y=y_slice) # clip to polygon and return minimum elevation @@ -3847,7 +3921,7 @@ def sample(geometry): for index, rows in points.groupby(level=0): dem_file = self.get_result_file_path(key="raw_dem", index=index) dem = self.load_dem(filename=dem_file) - # breakpoint() + zs = rows["polygons"].apply( lambda geometry: self.maximum_elevation_in_polygon( geometry=geometry, dem=dem diff --git a/src/geofabrics/version.py b/src/geofabrics/version.py index fda6eb8c..9a1caf91 100644 --- a/src/geofabrics/version.py +++ b/src/geofabrics/version.py @@ -3,4 +3,4 @@ Contains the package version information """ -__version__ = "1.1.23" +__version__ = "1.1.24" diff --git a/tests/test_add_patches_ngaruroro/instruction.json b/tests/test_add_patches_ngaruroro/instruction.json index 37019306..b940053c 100644 --- a/tests/test_add_patches_ngaruroro/instruction.json +++ b/tests/test_add_patches_ngaruroro/instruction.json @@ -17,7 +17,7 @@ "local_cache": "tests/test_add_patches_ngaruroro/data", "subfolder": "results", "extents": "catchment_boundary.geojson", - "patchs": ["../patch_1.nc", "../patch_2.tif"], + "patches": ["../patch_1.nc", "../patch_2.tif"], "raw_dem": "../initial_dem.nc", "result_dem": "test_dem.nc", "benchmark_dem": "benchmark_dem.nc" diff --git a/tests/test_dem_generation_westport_1/test_case.py b/tests/test_dem_generation_westport_1/test_case.py index 110cb866..a8e9c36f 100644 --- a/tests/test_dem_generation_westport_1/test_case.py +++ b/tests/test_dem_generation_westport_1/test_case.py @@ -55,7 +55,7 @@ class Test(base_test.Test): "CL2_BR20_2020_1000_4212.laz": 8340310, "CL2_BR20_2020_1000_4213.laz": 6094309, "CL2_BR20_2020_1000_4214.laz": 8492543, - DATASET + "_TileIndex.zip": 1125874, + DATASET + "_TileIndex.zip": 1848391, } @classmethod diff --git a/tests/test_dem_generation_westport_2/test_case.py b/tests/test_dem_generation_westport_2/test_case.py index e2dbf82c..c404ca80 100644 --- a/tests/test_dem_generation_westport_2/test_case.py +++ b/tests/test_dem_generation_westport_2/test_case.py @@ -41,7 +41,7 @@ class Test(base_test.Test): FILE_SIZES = { "CL2_BR20_2020_1000_4012.laz": 2636961, "CL2_BR20_2020_1000_4013.laz": 3653378, - DATASET + "_TileIndex.zip": 1125874, + DATASET + "_TileIndex.zip": 1848391, } @classmethod diff --git a/tests/test_dem_generation_westport_3/test_case.py b/tests/test_dem_generation_westport_3/test_case.py index b43389d5..ff40928f 100644 --- a/tests/test_dem_generation_westport_3/test_case.py +++ b/tests/test_dem_generation_westport_3/test_case.py @@ -45,7 +45,7 @@ class Test(base_test.Test): FILE_SIZES = { "CL2_BR20_2020_1000_4012.laz": 2636961, "CL2_BR20_2020_1000_4112.laz": 9036407, - DATASET + "_TileIndex.zip": 1125874, + DATASET + "_TileIndex.zip": 1848391, } @classmethod diff --git a/tests/test_dem_generation_westport_4/test_case.py b/tests/test_dem_generation_westport_4/test_case.py index 2f0f1810..53f20c71 100644 --- a/tests/test_dem_generation_westport_4/test_case.py +++ b/tests/test_dem_generation_westport_4/test_case.py @@ -47,7 +47,7 @@ class Test(base_test.Test): "CL2_BR21_2020_1000_4704.laz": 20851153, "CL2_BR21_2020_1000_4705.laz": 19749374, "CL2_BR21_2020_1000_4804.laz": 18379794, - DATASET + "_TileIndex.zip": 1125874, + DATASET + "_TileIndex.zip": 1848391, } @classmethod diff --git a/tests/test_many_stages_waikanae/data/benchmark.nc b/tests/test_many_stages_waikanae/data/benchmark.nc index 5e08ed96..b91a5d04 100644 --- a/tests/test_many_stages_waikanae/data/benchmark.nc +++ b/tests/test_many_stages_waikanae/data/benchmark.nc @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:23a43a4b0ea54fc75fb3eb326b67011ae9b7318cc635b229fcc54cac5fc74cc2 -size 110896 +oid sha256:65f580ba43b43b9712dfbe2747c3ab04ef8eb7d9c1a3251bd4ad4d38b93424d7 +size 111104 diff --git a/tests/test_many_stages_waikanae/data/lake_contours.gpkg b/tests/test_many_stages_waikanae/data/lake_contours.gpkg new file mode 100755 index 0000000000000000000000000000000000000000..75d697b085c654f0f62216e95fe14c5ddd8f18dd GIT binary patch literal 98304 zcmeI53w%@6{lIVfN=u;ud8CTSAyU#n`lgifP(z!RV4IXQ6-w2+kld!%=0)x;ZJDpO zfHHOd@i_%WolKqo`EhQ;;cN3TAJe(vrYKGvd;E2x+tlfQ=#>BO+?$)+JX%Z90sepa zv`K#F{Lbt9`<-*{Ik&y1b!8JTa-=g93Aja4#Y|*08s;oQ7>1bwzlHFtysF?!mLkBX zhGC4aQn|6^>_%0%`s);?bVE2BLNat8)9`7!Pjw%U*Nne8uPc97ez*4GyqVd(+ND_w zGY)F*R42smaRP(&6SdRZN^&<&5+l6N$3=YMZXYZ1evSpBMfg7DqnG!JT`V6IxQLjn zQ|Gi=Tvp<8+8P_JPGU@sPnU<0EVDK^oK|A9w_2Sp;;^TtGZN4sR!iM-;&iNn1dTR3 zX|*<4>s-WXv9(%@Ez2BEmzfyldXZ3&i0-w1ju=zwU^J5Te8}$>Ie~aWK|zeT`JlLn zL|Z8}k9gd{U`QluIpXI8frwr1AgM5#$g1U5sA;j?x~eome+j8DEhbib{oo3ncKS-d zIBPhJ8$zJ~>*v;Uezqs2`a*~)Im8gQwbWDop-vLlIZB{%9mfJxRfTjo%M)>` zw3m?bD9j}VGR*8zz#OAT#>6ib<~L3e;7F6(L+u{+?mt*ObZs;SNHveKR(i%aMw!^L)Syst}y0U#-{ zH2TOLGJV`|3B!CJbgw>E;5Trj-qA)I6swXP*`GeL&S7tL!H{Bik#ILFiUHE%v^86t z?c{80yPAPn)x}I^RkBV<76`B^T6A=?HQJMNFcGJ<0frNMowZfQ4a1Sh1w}!H+GIL& zLXN(*M8ohwFV`om^TSNax_iWsB(uq8Vk?qE3WiP_r`1>6@2DQNg@Jskm8m_(Z)CJueNBz#vYGJ? z@8d!NPK@-ko{+yM5ELZyCkN)H+B#Wfcw?L8oYRdAu}sANFqckYIN#VnTsC_8)kKUJTc{%#?YDV;>m{Fo0!B(dAiE*~vBhN}6eUeMa8<>`@)t8lN20G%7 z8S+KITJnG=RX`n(lj@M_Xq83qe0Hg~Hdxx4V>BCspmWG4N?Ff!9DCjt$h!>`+w9~hj>*RyHltq#| zM*R+&Kk;OE)9EI_dshAorXX{|&iok#T_EBM2_OL^fCP{L5!jRn zW@^Tk>5P3%V|`_Lc{wQ~cCMGK;oN?A&`VqqcTiZ*MFfsCa}keQ3`Gc4v$nK0=5B3q zI#$%Rb{LK@vBBJEbu_9I8qBZ~Y-@&`xL!6|HrvRByqw5;*m_t@2UuFH!Q9#c_dy)C z`VK>*vz1iTE`+cJRW%DMs%p%YwF^rtt7|GND=W$?&NExuT+1C!o2$LUK&xvo8x~Y7 zs4<*(o|$f}TA{SY2*(9`c~6(YTz;Mz)~>D&gO~F`C9IV3m zpbye70IucGd{rs3N-z`=yJQ_ng(}I6=(ZjpqKsGDsQqJ_cDNU|TnV-+Wmqk(u5^$p zD{89%-?@x-DKj;9W}po=nqg_jc?s;}_#*Cb7w;jh&Q`m_ZtYRxVqers(t42(!aXel zo19#P3wk&L>qn6bbR1S*+QrON&CKq2DRu4$+=~N@N+E}1na946xY7uP3{APcGjASr zrH%Z+j8%=TWZ`^TJ;{Hm^G*C8#pUJ5n8Sm}f)~$Md$yvI;@4Hx3(|REO?g#$dM{j1 zRk>g|hfm|Q3v0_OsQS_L1j!+qqK<4ZCpbh?DCmg28r_Wy{NM`-AOR$R1dsp{Kmter z2_OL^fCP{L68O&&@M-dmGmI0KYT=dPcX!jjlnIKVo`?|r%NZddu(G^zen~||N%;a- zW#yu(%0=asrL|R6Yb=WncOQCmZlT3>U|RKKC68McGlkEs+jAYr&ChOoVK8^=IB1tf z_jd+<@P!1B01`j~NB{{S0VIF~kN^@u0!RP}d_4$E%rxdD>>S|w|Lb8`Yy}cP0!RP} zAOR$R1dsp{Kmter2_OL#ft2;X{P+LwF}n9uTKE+SAOR$R1dsp{Kmter2_OL^fCP{L z5;$=QOwwc;H3_%>v+(}^iEB=*ITAnuNB{{S0VIF~kN^@u0!RP}Ac0{B40Hc~AEVnh zObQG}0!RP}AOR$R1dsp{Kmter2_OL^fCP?X0<$$*l?AH%x&9kN^@u0!RP}AOR$R1dsp{KmthML?S@X|HJkF ziDWUXD-u8gNB{{S0VIF~kN^@u0!RP}Ab}$%fcO88oEfG-0!RP}AOR$R1dsp{Kmter z2_OL^aH0{IQ1Ahx%Q(msd@}xxf(HvO9IqLFb6!{euKaH8#d$Ncf12H!wJ_tL=1!)0 z6fGNcP0~(pE6LqhAx3zgkBj)i-9A?2{Tv%`Lr|ac(aU?qF19D^b&FiGPMylQ ztxeWC7jar_t=3}8GKbS;CPulQBori~d##@%#*{i4jbuF^@G2-Tf;-bV# zc-+BYNF-}H;^zc`h+Xa=sW6(zs^wOwd9mHPsx+a}OGt%jF|pd~2kR%wSSO7ND<2fN zh?tD+$gs+J#IV}ztyZV&C^5=;9z%>PblT}F!8>OShjBwF6kz?_dd@#q-%QCNhA`@x zp-vLlIZB{o_e)h3(s^ZhqF1W4myq(PPcA8tVP=m4<`_LPUM_ol`D46dWQ_REoRFh$ zEzvN1(987+>-@aPvF;u*B*|=Y-(@S3LkfoK#%uK@C7R8((wLMSpDa&My1!*vla(aL zl42olguRL4b7vIeB3~CdsB@ggxw8ZJn$#ys^!4&gnXKEEBOm%%xKp z&Nns?m(AWzJ5j9H1yX}lmWIAY3!|+R)ZVAAwizy)CgNcS$DUYgWCVP{IJ$hfV<>lzls~ORkVn&I21Y4QbC&t-wk31`R4oNN{ zZ(v%sR$o@88R&>RX2=%-Ysur4Q~`BBPO3wyqg8epVc_ZH++t6JgHn2e@yVgy+F)sG zl4l;1B_~JURyHCob-B@grw-B%otmN58;zPxz45jsv`Y~(Q`@9OsU2;E2Ej_k&C;%v zsAJ~6FzEB0JQtB8WbZY5Il&X*!y*rh3TeVsnr<|j{cb^IJ+M0P!TbiR8JDd&KEn}3 zu^1K^ogyFLij76>B}IXfBCo4x`J$rcMMa%!jAmmn)N3@E%<7TcOw=Q|IiMQ2Axbr1 zC(R3~ogqTI1D!d>GP=eF*V?QceY0^y3oBNLb_7MtuFV{MQrYp9q*<_z&82JE!Ay-b z_YRrj&XF_^NWPyGmXYczNgDF(k>*-zm=RSyCYnPLpF7BJfayr_jMylvj)IO8ig;nF zN;`dfxlTUFOIakjW3u0wCglE_$zxt+3VahLYB%N-XZ=3&(v11ctGfI0?-;i+uQT`8 zy2%9>PIzsE)ql>(Ia2JvxI(G74YW!?6OJ8B5GExcG$h0sNHH;V@?JyiqC|a-W@&Rd zY<5WAY*o)c#6X8N18IiA!`1>CF4JJUUS2aV9&UZ2zEm9RaxYCD$qPQ%ico7Evn5|b zB?3Kyv=QNpz&H#;le8BRU-eO!llCJL5}grS``M<+Y^RTk?HE^o>avfs^`E+Gd=wvW|O(`6E)nXa-KS^e* zA0689Wkl3}te`lU;A2Np}$AQ3Ubacsf`hO`zPE9Sl@)oD;G}Ddn1O?XJa6i1{ZAJ)x!-AdV|DHlCQhu8z9j>ma^)DJ0mWVP))Kl zNiHE3(R&&GP%jrzEd))VD~A~qnq!RaB~PEJO-o01^rNQp#n~uy;^Xy~PG{a&=?qLc zO}?!59jnXg_<7-GV%T7S4HQe0Y3Ym{{mOYGF5b94*zpu#AtOW==kb6k1HGqe_4DRw zhKk~=uXwcNesOWK;`j00>v&|!0zGBZ%N$cqx6h?oEWt_~Z(-ss%;E0@%OlCLyM@_u zat`e@?NXnK-Zm1rbv<0r!zGC)CG?r3$YQ!XVTP0@`v3n~+NYTDx^buFZh|lPLIOwt z2_OL^fCNqm0t07cWix|8ZDR|QxfSkh%+*~l+|83}_a;2U7U%p;vZ;LTxBJk47j7ci!y|8$rJPqQ;#MKz!B$>+IPeAM)>5{Ddk) zrGOR7-uefPZ+@B4J_qri)tJM@AO~C>cl;dW)wh1{?{h(3{K1N)4}q*yka34wt?Q-pT72&JZ7i(;#iZ=X>Gi!{S)P zVeylULy++K1BYtsKyi}(AB+A7a^rHoV+qLN?KMC970CAOXYe&3Z@BU>OG&L0{`9Rf zkd=aM*L}L$1aif9|5dgf;y-@v(3M3XpMI$G+1o+h{Eu_?QKCyKub8_FVZ`~5k`T*pH=gu8C1LP+QcMKc``Hv6ne|j0n zRavG>DUV5^tIZ7{zqa1`0tF~AZ?C4{6rfk{n|vXa&+0ITss5!C_!p_1e{R0S^ZrAH z`>8w>%*bXgv&THE_x3w-2cf)uFJA2&2kG|i-uA{Rpm*c;=l?+UCBsYy!<0w39an>5 zS@8o`mO{k3d(K#REy#8Ed?-*)czyp1+RY&UMEA&#=YXtad}+4hqtihibY$dig7|44 zc055rPqDn=zY^rDe)!DYR9<=E##_Ea<0D<$3CLyTpMCaYkoS1r9GVXDD;M49rk<)) z@Y8QJ&)Wm?UD}tPr16*BeM9knkbm>%m+qMX@(-)`x4#AQle?QwrG~1$?THWn0&?d2 zbACb%^~CG9n_dI?uQ#81^E{BnPZpS{A!p3F=n`tEhSLwu`5%z8Uc92P3gnf0R?PV; z$Q|rbFE!lmxANbohGSZrUs($BDWV^j{p^jb}wa>Ia7hXZ_b7r(BUKZ43GgrqAK5g!!hq@u%^?TMmR|w@l zUh-J=G|(S>VcWCK@F^L5F&O*_e&6R)L4luLcn9ryN`g&m0#DNlKHK`u%V`GQx6j{C zJ+ox>w{xhxf8EkP8^|+s7v47sWc~iPettH{{y)6FA{XT1zy0w>TF{)^{{A-xWToH- zmvrCp8C2{$g)N>Yh|e3^HuMq5k)bW&Mv!yP`qlk!f;@iDx6QO?+`sF^e^Upz;yj%M zXwkNNzw;o-P1>gxPy?-cN3;Guko$I+{na4P-MH-m+Vkf`KC7b!eb{;Os~bVyyZhE} zRDwL=Cfn`{LAHEy!5tJ}L-s#4mw^0o{)0Euq4d6s_dZ4C?K{Rhsl)Gi@WpxkAfMSZ z^!s9vFV8sXiZIBhny)IQBZ~t15l@v!9sIqVqC6V^@eNNerVd{BU#CBM5y*4yJp2=H>zk$4Va+8JD=iy&2x$U1IKYwLe79CNx=S;i!-ynZ)XUkqH|L--s#0;?i zyl8uxt9Kp926=M#zoe1(?tQDaYC!(Qma8789rzcf`+~HCth>JF6xxALuO}YrALF;K z-A?5l*S+=44?+IXmRsy}s=MNqtTzvW{H@OH^Jxd)cbE5@w7!1+nTM!-4{XiOrUSyY zzgTak9q4y=KC$o}kbge^xAjy$iu(Ju{I@K$L+4*`^DpUu zaqFcIZl)vuk7}P|Y5U2;GfiiLyz={-=gzc{qssAVU&od8J zNiVwo*XZ73;0Iqw00|%gB!C2v01`j~NB{{S0VIF~kigf0z$8tkQIqf&SXuJ=e;cFQ z_H}3=wgCws0VIF~kN^@u0!RP}AOR$R1dzZ9N5G@WF-|R6H!ESmPXEze`tKO|pVFk~ zMRkc{cu`qdZ*OlYM2Fp;Znuvsg~tVy31QAtQCVJgdi+0Nq;$Ozc;Hk?DCqB}&;Qrx zUSr?~Uq}E6AOR$R1dsp{Kmter2_OL^fCP}hiAG>fHf$c`ec|2#z5k!3dzaCDs{2^? zK77F!501VU)4&^D|LZgx8C_1n zGX>=nPRjph{`upc9B0nkl1p-~$>uV@o3Rd}@%0rF7`R}rcKX@I+`&NfdA`Fva5(z7 z;Dk*b1ZY+JQ-O&)DTVx zSxOvD^`+904)S4!WhU+PW_T2G&lfdTKHhHr*jgJ+tTb17ba?7xkEQ0usz_!nCG;Rh z`3MS&yCA&SD*O3y?iaBO6&i?s>XKv?XT&PkBtO{Ot2f`ccEV^uK6Drn5;97XI?|WREy1^xXE!_H`NS^ z9{Qc+{l~)j6O0I_oet&Ckzj2q86B*Oqk&cN6~Q_(zdxQ`x3fr^n>NBhb<&Z(!_F`A zvysPU3^}c03kAn8gLQF`>rI)QMmNAHIUk2(!~mJC)t`N~W}s%olMC3eI|3()4QmBC zx=^_)82JE$Xl91uUI9b(9;{OOh zj{q=9&;QrtmoV^yFC>5jkN^@u0!RP}AOR$R1k?n!v}m%JYiC0cJw=?pG`78;o^N}V zd3MEJx%aQQYg||kW-@Gh|2+}HTOs@v2wxrz&xG*TA$%o-e+1!;(eNzW-p|(9_I}Q; zxGQfe{H~3L;|CHz0!ZL1BS7!}GZVkE>@i~`fCP{L5di F@PA@`m9YQ- literal 0 HcmV?d00001 diff --git a/tests/test_many_stages_waikanae/data/lake_outline.gpkg b/tests/test_many_stages_waikanae/data/lake_outline.gpkg new file mode 100755 index 0000000000000000000000000000000000000000..604a5c6274a1080bbb309e25633e7dce0a61a1e5 GIT binary patch literal 98304 zcmeI53v?S-nSdobw(R(!gfvmwG|i0~RTddPWXExw2Su?bj#52rJ(@V7omHffJXjhj znsICgC^$|Efdw|Dh0+$vc3aBAZch*FCJQYrgo)DACWQFhb z@U4C9@W%>GfZt}5$@yI>kBr_hqD$BRX*G2nKhppyb=F_e^!3&kt-oqDx8Bt}(Q>Y3 z(sE1lriN+Do)tUlo;TmGFNlAa2%H>VW!VtzY&x}C&I_p&pHJl`Q=BZMc@CVG7iP3y z(?UX?-~>_P^RiKAD8zaqEQy4?BO`2xIF0$$>Tr_1?7m=#CEh@o4Mj*WP+87NK!dQJ zq5UKjJO~9wya5tsee6(#ggo9b+u_+83`H2?RP7>Jk;t*5X`VPMY;ZcsaUq+I$-G43 zSy7VnF+r4fl0q+)?vZ#*6tglp%9AuNNkpEAiNx)6k%RkLu(Tt<9_%X9-%Z@EU4#t` zpFCi-Y#0NKn-{`(BAd-{Y5q8$=B9E9FvAGxq7W-;C?brG4paWwBq`}E6R2wAQh@4A zsD`tr9H&luH|Z(B+}*CiY^VX|5+kzIZXu?NFkj|)EE!fc8f(BeKh$cmb#|I>9aP4G zG0u!~n-+MUy*`#0b4iC%2`9!fJc+P}A|w!m@2Jnm=q&XHA}k&G#k{gnvQX{>FQg`9 zm;elgmDxufkk#`RDwq>yV0g`Nl5m12!@(%+P|+kcbGCZsP%sdVz?2e*klZ9E%Na5n z^7=iYadHDYu4lmLhM0@d6-z>~Oor3ZqO+TKBw(z;MMCU8m`(yiY*@t&(~-=JvZO=p za$VonXbX3mO@f%Ni@X{*c~TjA;Wh#9pjyj zOs69eaqQpe@b7da4?7vBn4NaIT#SAuXNZ0VXEM5p8?tm0wqaeU91M9n9O%lil-ad7 zxel&qwE3NjdRX&9VIXK?!(e^wMb*I9iqaEo6Fpmju<~2&6CmOT2_OL^fCP{L5@F0eyE9`<_c|dBYZZ&%R-zRh7)jxqs`ix@F?882zrN)*hfNP z;vU=qY1{h-cDVZnnBKu1UA_GSy}iBe9yjBOM)n6o-pKe7JGIu%*av#tgZ7(ls*K!1e=9R%76{hEqmxfZYiU8aT+YioCl3A^B z))S6Y^I)%gun)Mt!DQKETHCZK6NL-SaJ1tS1g_(x^0C~65GUbKI1mi5Q(9i}551Cd zUKB*Qr$yi*C!goVI8WgGDD#;k7qpLMmuane(`2cYp;#X7#Q|TfkqgDji+_-k(jtw_ zU-9S3=B+R)PYH7y4~~S%j&0ODWhl@&Fp17e$gkZ39uL!_^Nd$sT! z=;`aJ9-iC#dbiV@KjIKkJnt(+xSc5r2wzrADUoky77B@Ns>Nwdvs)W1a0L}E2`Da> zh#-;#9~UyQH2kImSyFBjPVl6_Udm1I`K*wj=YO;HVmAMdO9goTf0Z~EyMY9d01`j~NB{{S0VIF~kN^@u z0!Tndzi7SjGFhLxcv;jy0!RP}AOR$R1dsp{ zKmter2_OL^fCOG?1h$whj*T1Ynws>t`?c49js|_>Bf)_7{J&>S)@NU7_M<&W00|%g zB!C2v01`j~NB{{S0VIF~kiaDpSii!tvFy2j+Jyjo{@*2%M0F&91dsp{Kmter2_OL^ zfCP{L5rTBa* zH<{vOAI{WgPlP3rkauK+4H2g?zgitmvX|W#46!6S zI!p@(_mM(_N=qadsI13Hpafw(L;Fc6cn~TW@dikk^|3<{67qP%Y=>uWFce{kQ?-+1 zMIy(Jrg`G5u*vBp$AxSfdLxl|R+Qv?Oc3RrWnxt8d>Jtw zuv#{ZL3D0j2;+%tHp8X)<9vFlu~|_=5n&-Rvq@6YStih>$ED7MYEju!9+f)n-K3`w zle^nhm<=_+Tw+9)+T~tZ@mMm9ED_)JZH=~Yr`aTk34TU8mKJ25i%rQ{MdplSmvb9a z+UJK_Ew;{1^R0u*oMg;5%5B>4_w4nt#F$Gulu9@;mf=Z+Jrp5_b z8M&BOHd`%}JHZR737J%6E31M!J6F$Js9;W*f$=%RNx})93oxrxSj!{8>22pS1bv|G8s-si>^A}k$|xV7YVWZU|J6h zv0)WAEDJI(%90MX%Qb(^N{g+(-+WrF(&!gX;w$EHn%1(YH>rlcH{~AH_nDFM5+@`o zos$tnZl(e*O`WYwjm=ckh15#ja+E1KBfVrS4Q^8EhVIctzT_aazm?9BaxqzmrMWzx z1(6P!p>&YnyU@$VkPoyj<2S$P;8ta5pQ6e4x-|y%Ty-mJPl)wHb#3X>7!3?HUrxx z%0MxsY{*SP>9{^hq5~^ z3H6nI(z5h{wn&HIBopK4P%779gal0bLQ>%KYK9uUOoErG4IL$=7{?ZCZG{p`$WF%!F!*@6xn=*qE_*|j*i4z6gl`JIb;So1<*AZTL4V14aH z)xg(^u7GoF6FtkGtT!ub@4TxN9ENp3iG4#_U1g{kG}P6j%3f-L8C5?gGTD48CJHBD zInpAdILqp@VBlo)30SJCF5d}0DTsnng&`b`ap!7Vd9!Jy`TM5!RNJcNHJ0;@lMUAT zFV>~Z-?u*0_C#B6>uoJR1(mG;Y`$x=WkcA}bUItu^5^9|&vW|M3n~*mFXv|q=O?#5 zQv+8x*xPF;ZYQ375!iB;j`enJ0<+V@lU-lGot~rvBOs)Ry2@I=8 zY+VIFmhN#0&7+jV5zZ^InVdYU9L7tUE?435fOZZJ!6Gv1^9-?c>l7r5b;V9@^!TE% z3smc*hB45PUl0!&TVbdL{@6@qhFV~Jp#l{d>KSdWX}!hfaG2-9N`&iK>Hl7>IYv1f z7?5@lm7lgp*YB7;(MZr6fYN?ezZ)PWaHQ~_N!Q77n|cXj*Fwid{my- zY_;ioSyzKqyOwep^QB}tzQ(fQAY98FTxiS}F3zY|QI;C0)ha2{EG^1qYA@TAT5d(P z3v<=4wihlml`gmMe~rr4RvX(T25OnPudo@l%(kl6IBP>`GSz)@!x(HCHx*DWw&znh z>USpE#-x@Vqgq;xC{LN1nv z6YIq_Y?^b_UE{?I*gM259EP*StBty?B%`)U6A6m7ybYL=GH4r@P!{FfCP{L5LJc*PC{E9_##E|GH-)o|FH0_t*UJ+guNf%<#PnzP}(m_4~D; zBy3rK-w0^6lydZD1zWJfmAlsgP z_VYJ@oc`DE9cTi%tOchCPf$ocs* zxe<^XU;j@J{}ANX$KS*31^MA~-~8?4AiwJ-Yx5qEci#PpJI{mcvwUg$4v-K2%zXSI zkZ0~?()}R6_SD^uM6aT z4Zk$M73A-Airnhk7qW4 z{D~)5w7(DJC!YVvw>E~RIAb(}s>E=g4e)Ou<@1;)fz9Y5fs~~rOCU7s6=f3d% zU0(-T3yKf_@Mjml*iQ2y8*Q&(*P`PyL;r}5Ex_UJuSzW4TL z-}GM~f9lMK0#1fU*~B5$s?Ow*MmIv@msI6fSi2Zw3*859pBqx1$pyrYg%di8^@<|{)g zKmter2_OL^fCP{L5mn~;K7wSH5q>*0VIF~Uik#*`QNna zm2W3nhy;)T5f-eqUHkhqT1T#0~u+VoXpL}Z#?@(5wNRQVTDvP}@S1qlfSTIJEcPMAmiK?$q zRyh^Q?h6I|#J)(A_Cl-(2?&m$Z@9~z6cTndAYQk{vLWnfI%Tb)-&46nOy;*Owc~~2 zTFu#Iz#dd>R^A=mp@`c5wXzd$I1u&uNH9dg5zZ^InVdXp&~drQ zl971>q@!pJQ?f-JQS-5D6@a%!YX;c2Jb-;KIY4+bw?fbp7$)>B-G2~_Ws#>%03i}$ zM}3|lmcFk#c#$B!(c_D4=nV}XKU#LJuMq$?6V^;@B zTg^W1U7nA7jXvH-bZ=6xTu=-{=4Bo+Wv+%*XTEE*!tZppW`6GF@pJ!S`Ca;exx4~? zzJVyK;pt5^I80sjm+CLIwB{}iQ}s2WL*%EcE{C;GTv`K{$HX(|+GMefjhW~6C>y** zmUu1~%fsIB;utK!+~V;7xp3su@X#B72Wvvh`LN7xV<4SD^Z&^Wn)9cR>5se)^6 z<24qW-*2Alyr`{Ie=$pG{<_-jr0R=W3A>hjEXdByVHYv};E4=JJHYH~jnN!X&#Yf;toZFCa#oY8& z7TeaX=6Q$GJ43cXTvJBZkV$k09ui>>MVNRtJ(Ura6OfuvG^CWK>}}{qhe_~BK@?xCBU9SHFn}NPp literal 0 HcmV?d00001 diff --git a/tests/test_many_stages_waikanae/instruction.json b/tests/test_many_stages_waikanae/instruction.json index 0fd48855..b5d7cc75 100644 --- a/tests/test_many_stages_waikanae/instruction.json +++ b/tests/test_many_stages_waikanae/instruction.json @@ -135,6 +135,7 @@ "rivers": [{"extents": "rivers/river_polygon.geojson", "elevations": "rivers/river_bathymetry.geojson"}], "waterways": [{"extents": "waterways/closed_waterways_polygon.geojson", "elevations": "waterways/closed_waterways_elevation.geojson"}, {"extents": "waterways/open_waterways_polygon.geojson", "elevations": "waterways/open_waterways_elevation.geojson"}], + "lakes": [{"extents": "../lake_outline.gpkg", "elevations": "../lake_contours.gpkg"}], "result_dem": "test_dem.nc" }, "datasets": {