Skip to content

Commit

Permalink
Rename from ecl2df to res2df
Browse files Browse the repository at this point in the history
  • Loading branch information
anders-kiaer committed Jan 3, 2024
1 parent 7578250 commit 639c32a
Show file tree
Hide file tree
Showing 16 changed files with 59 additions and 59 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
- [#1183](https://github.com/equinor/webviz-subsurface/pull/1183) - Reduced loading time by lazy loading only requested plugins when application starts.
- [#1182](https://github.com/equinor/webviz-subsurface/pull/1182) - `ParameterResponseCorrelation` can now allow default None column_keys when using arrow file as input
- [#1122](https://github.com/equinor/webviz-subsurface/pull/1122) - `opm` and `ecl2df` are now optional, making `windows-subsurface` possible to install and import on non-unix based systems. **NOTE:** a lot of the functionality in `webviz-subsurface` is built on `opm` and `ecl2df`, and issues are therefore expected on eg Windows and macOS. Use with care.
- [#1122](https://github.com/equinor/webviz-subsurface/pull/1122) - `opm` and `res2df` are now optional, making `windows-subsurface` possible to install and import on non-unix based systems. **NOTE:** a lot of the functionality in `webviz-subsurface` is built on `opm` and `res2df`, and issues are therefore expected on eg Windows and macOS. Use with care.
- [#1146](https://github.com/equinor/webviz-subsurface/pull/1146) - Converted the `BhpQc` plugin to WLF (Webviz Layout Framework).
- [#1184](https://github.com/equinor/webviz-subsurface/pull/1184) - `WellAnalysis`: changes to the settings layout in the `WellOverview` view

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@
"dash_bootstrap_components>=0.10.3",
"dash-daq>=0.5.0",
"defusedxml>=0.6.0",
"ecl2df>=0.15.0; sys_platform=='linux'",
"res2df>=1.0; sys_platform=='linux'",
"flask-caching",
"fmu-ensemble>=1.2.3",
"fmu-tools>=1.8",
Expand Down
20 changes: 10 additions & 10 deletions webviz_subsurface/_datainput/pvt_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@
import numpy as np
import pandas as pd

# opm and ecl2df are only available for Linux,
# opm and res2df are only available for Linux,
# hence, ignore any import exception here to make
# it still possible to use the PvtPlugin on
# machines with other OSes.
#
# NOTE: Functions in this file cannot be used
# on non-Linux OSes.
try:
import ecl2df
import res2df
from opm.io.ecl import EclFile
except ImportError:
pass
Expand Down Expand Up @@ -202,10 +202,10 @@ def load_pvt_dataframe(
) -> pd.DataFrame:
# pylint: disable=too-many-statements

def check_if_ecl2df_is_installed() -> None:
# If ecl2df is not loaded, this machine is probably not
def check_if_res2df_is_installed() -> None:
# If res2df is not loaded, this machine is probably not
# running Linux and the modules are not available.
if "ecl2df" not in sys.modules:
if "res2df" not in sys.modules:
raise ModuleNotFoundError(
"Your operating system does not support opening and reading"
" Eclipse files. An empty data frame will be returned and your"
Expand All @@ -214,14 +214,14 @@ def check_if_ecl2df_is_installed() -> None:
" to display PVT data anyways."
)

def ecl2df_pvt_data_frame(kwargs: Any) -> pd.DataFrame:
check_if_ecl2df_is_installed()
return ecl2df.pvt.df(kwargs["realization"].get_eclfiles())
def res2df_pvt_data_frame(kwargs: Any) -> pd.DataFrame:
check_if_res2df_is_installed()
return res2df.pvt.df(kwargs["realization"].get_eclfiles())

def init_to_pvt_data_frame(kwargs: Any) -> pd.DataFrame:
# pylint: disable-msg=too-many-locals
# pylint: disable=too-many-branches
check_if_ecl2df_is_installed()
check_if_res2df_is_installed()
ecl_init_file = EclFile(
kwargs["realization"].get_eclfiles().get_initfile().get_filename()
)
Expand Down Expand Up @@ -400,7 +400,7 @@ def init_to_pvt_data_frame(kwargs: Any) -> pd.DataFrame:

return filter_pvt_data_frame(
load_ensemble_set(ensemble_paths, ensemble_set_name).apply(
init_to_pvt_data_frame if use_init_file else ecl2df_pvt_data_frame
init_to_pvt_data_frame if use_init_file else res2df_pvt_data_frame
),
drop_ensemble_duplicates,
)
8 changes: 4 additions & 4 deletions webviz_subsurface/_datainput/relative_permeability.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from .fmu_input import load_ensemble_set

try:
import ecl2df
import res2df
except ImportError:
pass

Expand All @@ -22,10 +22,10 @@ def load_satfunc(
ensemble_paths: dict,
ensemble_set_name: str = "EnsembleSet",
) -> pd.DataFrame:
def ecl2df_satfunc(kwargs: Any) -> pd.DataFrame:
return ecl2df.satfunc.df(kwargs["realization"].get_eclfiles())
def res2df_satfunc(kwargs: Any) -> pd.DataFrame:
return res2df.satfunc.df(kwargs["realization"].get_eclfiles())

return load_ensemble_set(ensemble_paths, ensemble_set_name).apply(ecl2df_satfunc)
return load_ensemble_set(ensemble_paths, ensemble_set_name).apply(res2df_satfunc)


@webvizstore
Expand Down
6 changes: 3 additions & 3 deletions webviz_subsurface/_datainput/well_completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,15 @@

import pandas as pd

# opm and ecl2df are only available for Linux,
# opm and res2df are only available for Linux,
# hence, ignore any import exception here to make
# it still possible to use the PvtPlugin on
# machines with other OSes.
#
# NOTE: Functions in this file cannot be used
# on non-Linux OSes.
try:
from ecl2df import EclFiles, common
from res2df import EclFiles, common
except ImportError:
pass

Expand Down Expand Up @@ -44,7 +44,7 @@ def read_zone_layer_mapping(
) -> pd.DataFrame:
"""Searches for all zone->layer mapping files for an ensemble. \
The files should be on lyr format and can be parsed using functionality \
from ecl2df.
from res2df.
The results are returned as a dataframe with the following columns:
* REAL
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,14 +47,14 @@ def create_vector_metadata_from_field_meta(
"""Create VectorMetadata from keywords stored in the field's metadata"""

# Note that when we query the values in the field.metadata we always get byte strings
# back from pyarrow. Further, ecl2df writes all the values as strings, so we must
# back from pyarrow. Further, res2df writes all the values as strings, so we must
# convert these to the correct types before creating the VectorMetadata instance.
# See also ecl2df code:
# https://github.com/equinor/ecl2df/blob/0e30fb8046bf17fd338bb468584985c5d816e2f6/ecl2df/summary.py#L441
# See also res2df code:
# https://github.com/equinor/res2df/blob/0e30fb8046bf17fd338bb468584985c5d816e2f6/res2df/summary.py#L441

# Currently, based on the ecl2df code, we assume that all keys except for 'get_num'
# Currently, based on the res2df code, we assume that all keys except for 'get_num'
# and 'wgname' must be present in order to return a valid metadata object
# https://github.com/equinor/ecl2df/blob/0e30fb8046bf17fd338bb468584985c5d816e2f6/ecl2df/summary.py#L541-L552
# https://github.com/equinor/res2df/blob/0e30fb8046bf17fd338bb468584985c5d816e2f6/res2df/summary.py#L541-L552

meta_dict = field.metadata
if not meta_dict:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

import datacompy # pylint: disable=import-error, useless-suppression
import dateutil.parser # type: ignore
import ecl2df
import res2df
import numpy as np
import pandas as pd
from fmu.ensemble import ScratchEnsemble
Expand Down Expand Up @@ -92,14 +92,14 @@ def _load_smry_dataframe_using_fmu(
return df


def _load_smry_dataframe_using_ecl2df(
def _load_smry_dataframe_using_res2df(
ens_path: str, frequency: Optional[Frequency]
) -> pd.DataFrame:
time_index: str = "raw"
if frequency:
time_index = frequency.value

print(f"## Loading data into DataFrame using ECL2DF time_index={time_index}...")
print(f"## Loading data into DataFrame using res2df time_index={time_index}...")

realidxregexp = re.compile(r"realization-(\d+)")
globpattern = os.path.join(ens_path, "eclipse/model/*.UNSMRY")
Expand All @@ -122,8 +122,8 @@ def _load_smry_dataframe_using_ecl2df(

print(f"R={real}: {smry_file}")

eclfiles = ecl2df.EclFiles(smry_file.replace(".UNSMRY", ""))
real_df = ecl2df.summary.df(eclfiles, time_index=time_index)
eclfiles = res2df.EclFiles(smry_file.replace(".UNSMRY", ""))
real_df = res2df.summary.df(eclfiles, time_index=time_index)
real_df.insert(0, "REAL", real)
real_df.index.name = "DATE"
per_real_df_arr.append(real_df)
Expand Down Expand Up @@ -277,7 +277,7 @@ def main() -> None:
print("## Loading data into reference DataFrame...")
# Note that for version 2.13.0 and earlier of ecl, loading via FMU will not give the
# correct results. This was remedied in https://github.com/equinor/ecl/pull/837
# reference_df = _load_smry_dataframe_using_ecl2df(ensemble_path, frequency)
# reference_df = _load_smry_dataframe_using_res2df(ensemble_path, frequency)
reference_df = _load_smry_dataframe_using_fmu(ensemble_path, frequency)

print("## Comparing get_vectors()...")
Expand Down
4 changes: 2 additions & 2 deletions webviz_subsurface/plugins/_group_tree/_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@ class GroupTree(WebvizPluginABC):
`gruptree_file` is a path to a file stored per realization (e.g. in \
`share/results/tables/gruptree.csv"`).
The `gruptree_file` file can be dumped to disk per realization by the `ECL2CSV` forward
model with subcommand `gruptree`. The forward model uses `ecl2df` to export a table
model with subcommand `gruptree`. The forward model uses `res2df` to export a table
representation of the Eclipse network:
[Link to ecl2csv gruptree documentation.](https://equinor.github.io/ecl2df/usage/gruptree.html).
[Link to ecl2csv gruptree documentation.](https://equinor.github.io/res2df/usage/gruptree.html).
**time_index**
Expand Down
8 changes: 4 additions & 4 deletions webviz_subsurface/plugins/_pvt_plot/_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@ class PvtPlot(WebvizPluginABC):
The minimum requirement is to define `ensembles`.
If no `pvt_relative_file_path` is given, the PVT data will be extracted automatically
from the simulation decks of individual realizations using `fmu_ensemble` and `ecl2df`.
from the simulation decks of individual realizations using `fmu_ensemble` and `res2df`.
If the `read_from_init_file` flag is set to True, the extraction procedure in
`ecl2df` will be replaced by an individual extracting procedure that reads the
`res2df` will be replaced by an individual extracting procedure that reads the
normalized Eclipse INIT file.
Note that the latter two extraction methods can be very slow for larger data and are therefore
not recommended unless you have a very simple model/data deck.
Expand All @@ -56,7 +56,7 @@ class PvtPlot(WebvizPluginABC):
* One column named `VISCOSITY` as the second covariate.
The file can e.g. be dumped to disc per realization by a forward model in ERT using
`ecl2df`.
`res2df`.
"""

class Ids(StrEnum):
Expand Down Expand Up @@ -110,7 +110,7 @@ def __init__(
raise ValueError(
(
"There has to be a KEYWORD or TYPE column with corresponding Eclipse keyword."
"When not providing a csv file, make sure ecl2df is installed."
"When not providing a csv file, make sure res2df is installed."
)
)

Expand Down
8 changes: 4 additions & 4 deletions webviz_subsurface/plugins/_relative_permeability.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class RelativePermeability(WebvizPluginABC):
The minimum requirement is to define `ensembles`.
If no `relpermfile` is defined, the relative permeability data will be extracted automatically
from the simulation decks of individual realizations using `fmu-ensemble`and `ecl2df` behind the
from the simulation decks of individual realizations using `fmu-ensemble`and `res2df` behind the
scenes. Note that this method can be very slow for larger data decks, and is therefore not
recommended unless you have a very simple model/data deck.
Expand All @@ -59,10 +59,10 @@ class RelativePermeability(WebvizPluginABC):
* One column **per** capillary pressure curve (e.g. `PCOW`).
The `relpermfile` file can e.g. be dumped to disk per realization by a forward model in ERT that
wraps the command `ecl2csv satfunc input_file -o output_file` (requires that you have `ecl2df`
wraps the command `ecl2csv satfunc input_file -o output_file` (requires that you have `res2df`
installed). A typical example could be:
`ecl2csv satfunc eclipse/include/props/relperm.inc -o share/results/tables/relperm.csv`.
[Link to ecl2csv satfunc documentation.](https://equinor.github.io/ecl2df/scripts.html#satfunc)
[Link to ecl2csv satfunc documentation.](https://equinor.github.io/res2df/scripts.html#satfunc)
`scalfile` is a path to __a single file of SCAL recommendations__ (for all
Expand Down Expand Up @@ -257,7 +257,7 @@ def __init__(
):
raise ValueError(
"Unrecognized saturation table keyword in data. This should not occur unless "
"there has been changes to ecl2df. Update of this plugin might be required."
"there has been changes to res2df. Update of this plugin might be required."
)
else:
self.family = 2
Expand Down
2 changes: 1 addition & 1 deletion webviz_subsurface/plugins/_rft_plotter/_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class RftPlotter(WebvizPluginABC):
01_drogon_ahm/realization-0/iter-0/share/results/tables/rft_ert.csv).
* **`rft.csv`**: A csv file containing simulated RFT data extracted from ECLIPSE RFT output files \
using [ecl2df](https://equinor.github.io/ecl2df/ecl2df.html#module-ecl2df.rft) \
using [res2df](https://equinor.github.io/res2df/res2df.html#module-res2df.rft) \
[(example file)](https://github.com/equinor/webviz-subsurface-testdata/blob/master/\
01_drogon_ahm/realization-0/iter-0/share/results/tables/rft.csv). \
Simulated RFT data can be visualized along MD if a "CONMD" column is present in \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@

import numpy as np
import pyarrow as pa
from ecl2df.vfp import pyarrow2basic_data
from ecl2df.vfp._vfpdefs import (
from res2df.vfp import pyarrow2basic_data
from res2df.vfp._vfpdefs import (
ALQ,
GFR,
THPTYPE,
Expand Down Expand Up @@ -233,7 +233,7 @@ def _read_vfp_arrow(filename: str) -> io.BytesIO:
"""Function to read the vfp arrow files and return them as
a io.BytesIO object in order to be stored as portable.
Uses the pyarrow2basic_data function from ecl2df in order
Uses the pyarrow2basic_data function from res2df in order
to convert the pyarrow table into a dictionary. But then
the columns have to be converted to strings, or lists in order
to be encoded.
Expand Down
4 changes: 2 additions & 2 deletions webviz_subsurface/plugins/_well_analysis/_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ class WellAnalysis(WebvizPluginABC):
`share/results/tables/gruptree.csv"`).
The `gruptree_file` file can be dumped to disk per realization by the `ECL2CSV` forward
model with subcommand `gruptree`. The forward model uses `ecl2df` to export a table
model with subcommand `gruptree`. The forward model uses `res2df` to export a table
representation of the Eclipse network:
[Link to ecl2csv gruptree documentation.](https://equinor.github.io/ecl2df/usage/gruptree.html).
[Link to ecl2csv gruptree documentation.](https://equinor.github.io/res2df/usage/gruptree.html).
**time_index**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ def webviz_store(self) -> Tuple[Callable, List[Dict]]:
def merge_compdat_and_connstatus(
df_compdat: pd.DataFrame, df_connstatus: pd.DataFrame
) -> pd.DataFrame:
"""This function merges the compdat data (exported with ecl2df) with the well connection
"""This function merges the compdat data (exported with res2df) with the well connection
status data (extracted from the CPI summary data). The connection status data will
be used for wells where it exists. The KH will be merged from the compdat. For wells
that are not in the connection status data, the compdat data will be used as it is.
Expand Down
10 changes: 5 additions & 5 deletions webviz_subsurface/plugins/_well_completions/_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

@deprecated_plugin(
"This plugin has been replaced by the `WellCompletion` plugin (without "
"the `s`) which is based on the `wellcompletionsdata` export from `ecl2df`. "
"the `s`) which is based on the `wellcompletionsdata` export from `res2df`. "
"The new plugin is faster and has more functionality. "
)
class WellCompletions(WebvizPluginABC):
Expand All @@ -37,19 +37,19 @@ class WellCompletions(WebvizPluginABC):
`compdat_file` is a path to a file stored per realization (e.g. in \
`share/results/tables/compdat.csv`). This file can be exported to disk per realization by using
the `ECL2CSV` forward model in ERT with subcommand `compdat`. [Link to ecl2csv compdat documentation.](https://equinor.github.io/ecl2df/usage/compdat.html)
the `ECL2CSV` forward model in ERT with subcommand `compdat`. [Link to ecl2csv compdat documentation.](https://equinor.github.io/res2df/usage/compdat.html)
The connection status history of each cell is not necessarily complete in the `ecl2df` export,
The connection status history of each cell is not necessarily complete in the `res2df` export,
because status changes resulting from ACTIONs can't be extracted from the Eclipse input
files. If the `ecl2df` export is good, it is recommended to use that. This will often be the
files. If the `res2df` export is good, it is recommended to use that. This will often be the
case for history runs. But if not, an alternative way of extracting the data is described in
the next section.
**Well Connection status input**
The `well_connection_status_file` is a path to a file stored per realization (e.g. in \
`share/results/tables/wellconnstatus.csv`. This file can be exported to disk per realization
by using the `ECL2CSV` forward model in ERT with subcommand `wellconnstatus`. [Link to ecl2csv wellconnstatus documentation.](https://equinor.github.io/ecl2df/usage/wellconnstatus.html)
by using the `ECL2CSV` forward model in ERT with subcommand `wellconnstatus`. [Link to ecl2csv wellconnstatus documentation.](https://equinor.github.io/res2df/usage/wellconnstatus.html)
This approach uses the CPI summary data to create a well connection status history: for
each well connection cell there is one line for each time the connection is opened or closed.
Expand Down
14 changes: 7 additions & 7 deletions webviz_subsurface/smry2arrow_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from pathlib import Path
from typing import List

import ecl2df
import res2df

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -47,21 +47,21 @@ def _get_parser() -> argparse.ArgumentParser:

def _convert_single_smry_file(smry_filename: str, arrow_filename: str) -> None:
"""Read summary data for single realization from disk and write it out to .arrow
file using ecl2df.
file using res2df.
"""

eclbase = (
smry_filename.replace(".DATA", "").replace(".UNSMRY", "").replace(".SMSPEC", "")
)

eclfiles = ecl2df.EclFiles(eclbase)
sum_df = ecl2df.summary.df(eclfiles)
eclfiles = res2df.EclFiles(eclbase)
sum_df = res2df.summary.df(eclfiles)

# Slight hack here, using ecl2df protected function to gain access to conversion routine
# Slight hack here, using res2df protected function to gain access to conversion routine
# pylint: disable=protected-access
sum_table = ecl2df.summary._df2pyarrow(sum_df)
sum_table = res2df.summary._df2pyarrow(sum_df)

ecl2df.summary.write_dframe_stdout_file(sum_table, arrow_filename)
res2df.summary.write_dframe_stdout_file(sum_table, arrow_filename)


def _batch_convert_smry2arrow(
Expand Down

0 comments on commit 639c32a

Please sign in to comment.