diff --git a/lib/adf_diag.py b/lib/adf_diag.py index 50558824b..2dff57f85 100644 --- a/lib/adf_diag.py +++ b/lib/adf_diag.py @@ -7,9 +7,9 @@ plotting methods themselves. """ -#++++++++++++++++++++++++++++++ -#Import standard python modules -#++++++++++++++++++++++++++++++ +# ++++++++++++++++++++++++++++++ +# Import standard python modules +# ++++++++++++++++++++++++++++++ import sys import os @@ -24,7 +24,7 @@ from pathlib import Path from typing import Optional -#Check if "PyYAML" is present in python path: +# Check if "PyYAML" is present in python path: # pylint: disable=unused-import try: import yaml @@ -33,7 +33,7 @@ print("Please install module, e.g. 'pip install pyyaml'.") sys.exit(1) -#Check if "xarray" is present in python path: +# Check if "xarray" is present in python path: try: import xarray as xr except ImportError: @@ -41,7 +41,7 @@ print("Please install module, e.g. 'pip install xarray'.") sys.exit(1) -#Check if "numpy" is present in python path: +# Check if "numpy" is present in python path: try: import numpy as np except ImportError: @@ -49,7 +49,7 @@ print("Please install module, e.g. 'pip install numpy'.") sys.exit(1) -#Check if "matplolib" is present in python path: +# Check if "matplolib" is present in python path: try: import matplotlib as mpl except ImportError: @@ -57,7 +57,7 @@ print("Please install module, e.g. 'pip install matplotlib'.") sys.exit(1) -#Check if "cartopy" is present in python path: +# Check if "cartopy" is present in python path: try: import cartopy.crs as ccrs except ImportError: @@ -67,72 +67,73 @@ # pylint: enable=unused-import -#+++++++++++++++++++++++++++++ -#Add ADF diagnostics 'scripts' -#directories to Python path -#+++++++++++++++++++++++++++++ +# +++++++++++++++++++++++++++++ +# Add ADF diagnostics 'scripts' +# directories to Python path +# +++++++++++++++++++++++++++++ -#Determine local directory path: +# Determine local directory path: _LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) -#Add "scripts" directory to path: -_DIAG_SCRIPTS_PATH = os.path.join(_LOCAL_PATH,os.pardir,"scripts") +# Add "scripts" directory to path: +_DIAG_SCRIPTS_PATH = os.path.join(_LOCAL_PATH, os.pardir, "scripts") -#Check that "scripts" directory actually exists: +# Check that "scripts" directory actually exists: if not os.path.isdir(_DIAG_SCRIPTS_PATH): - #If not, then raise error: + # If not, then raise error: ermsg = f"'{_DIAG_SCRIPTS_PATH}' directory not found. Has 'AdfDiag.py' been moved?" raise FileNotFoundError(ermsg) -#Walk through all sub-directories in "scripts" directory: +# Walk through all sub-directories in "scripts" directory: for root, dirs, files in os.walk(_DIAG_SCRIPTS_PATH): - #Add all sub-directories to python path: + # Add all sub-directories to python path: for dirname in dirs: - sys.path.append(os.path.join(root,dirname)) + sys.path.append(os.path.join(root, dirname)) -#+++++++++++++++++++++++++++++ +# +++++++++++++++++++++++++++++ -#Finally, import needed ADF module: +# Finally, import needed ADF module: from adf_web import AdfWeb - ################# -#Helper functions +# Helper functions ################# -def construct_index_info(page_dict, fnam, opf): +def construct_index_info(page_dict, fnam, opf): """ Helper function for generating web pages. d : dictionary for the index page information fnam : the image filename, img.stem --> then decompose the img file's parts. opf: outputfile for the image """ - vname, plot_desc = fnam[0:fnam.index("_")], fnam[fnam.index("_")+1:] - if 'ANN' in plot_desc: - temporal = 'ANN' - elif 'DJF' in plot_desc: - temporal = 'DJF' - elif 'JJA' in plot_desc: - temporal = 'JJA' - elif 'MAM' in plot_desc: - temporal = 'MAM' - elif 'SON' in plot_desc: - temporal = 'SON' + vname, plot_desc = fnam[0 : fnam.index("_")], fnam[fnam.index("_") + 1 :] + if "ANN" in plot_desc: + temporal = "ANN" + elif "DJF" in plot_desc: + temporal = "DJF" + elif "JJA" in plot_desc: + temporal = "JJA" + elif "MAM" in plot_desc: + temporal = "MAM" + elif "SON" in plot_desc: + temporal = "SON" else: - temporal = 'NoInfo' - plot_type = plot_desc.replace(temporal+"_", "") + temporal = "NoInfo" + plot_type = plot_desc.replace(temporal + "_", "") if vname not in page_dict: page_dict[vname] = {} if plot_type not in page_dict[vname]: page_dict[vname][plot_type] = {} page_dict[vname][plot_type][temporal] = opf + ###################################### -#Main ADF diagnostics class (AdfDiag) +# Main ADF diagnostics class (AdfDiag) ###################################### + class AdfDiag(AdfWeb): """ @@ -154,44 +155,43 @@ class AdfDiag(AdfWeb): """ def __init__(self, config_file, debug=False): - """ Initalize ADF diagnostics object. """ - #Initialize Config/Base attributes: + # Initialize Config/Base attributes: super().__init__(config_file, debug=debug) - #Add CVDP info to object: - self.__cvdp_info = self.read_config_var('diag_cvdp_info') + # Add CVDP info to object: + self.__cvdp_info = self.read_config_var("diag_cvdp_info") - #Expand CVDP climo info variable strings: + # Expand CVDP climo info variable strings: if self.__cvdp_info is not None: self.expand_references(self.__cvdp_info) - #End if + # End if - #Add averaging script names: - self.__time_averaging_scripts = self.read_config_var('time_averaging_scripts') + # Add averaging script names: + self.__time_averaging_scripts = self.read_config_var("time_averaging_scripts") - #Add regridding script names: - self.__regridding_scripts = self.read_config_var('regridding_scripts') + # Add regridding script names: + self.__regridding_scripts = self.read_config_var("regridding_scripts") - #Add analysis script names: - self.__analysis_scripts = self.read_config_var('analysis_scripts') + # Add analysis script names: + self.__analysis_scripts = self.read_config_var("analysis_scripts") - #Add plotting script names: - self.__plotting_scripts = self.read_config_var('plotting_scripts') + # Add plotting script names: + self.__plotting_scripts = self.read_config_var("plotting_scripts") # Create property needed to return "plotting_scripts" variable to user: @property def plotting_scripts(self): """Return a copy of the '__plotting_scripts' string list to user if requested.""" - #Note that a copy is needed in order to avoid having a script mistakenly - #modify this variable: + # Note that a copy is needed in order to avoid having a script mistakenly + # modify this variable: return copy.copy(self.__plotting_scripts) ######### - #Variable extraction functions + # Variable extraction functions ######### def get_cvdp_info(self, var_str, required=False): @@ -202,18 +202,21 @@ def get_cvdp_info(self, var_str, required=False): instead. """ - return self.read_config_var(var_str, - conf_dict=self.__cvdp_info, - required=required) + return self.read_config_var( + var_str, conf_dict=self.__cvdp_info, required=required + ) ######### - #Script-running functions + # Script-running functions ######### - def __diag_scripts_caller(self, scripts_dir: str, func_names: list, - default_kwargs: Optional[dict] = None, - log_section: Optional[str] = None): - + def __diag_scripts_caller( + self, + scripts_dir: str, + func_names: list, + default_kwargs: Optional[dict] = None, + log_section: Optional[str] = None, + ): """ Parse a list of scripts as provided by the config file, and call them as functions while passing in the correct inputs. @@ -226,11 +229,10 @@ def __diag_scripts_caller(self, scripts_dir: str, func_names: list, Note: Is it better to just make a child log instead? """ - #Loop over all averaging script names: + # Loop over all averaging script names: for func_name in func_names: - - #Check if func_name is a dictonary, - #this implies that the function has user-defined inputs: + # Check if func_name is a dictonary, + # this implies that the function has user-defined inputs: if isinstance(func_name, dict): emsg = "Function dictionary must be of the form: " emsg += "{function_name : {kwargs:{...}, module:'xxxx'}}" @@ -242,24 +244,29 @@ def __diag_scripts_caller(self, scripts_dir: str, func_names: list, elif isinstance(func_name, str): has_opt = False else: - raise TypeError("Provided script must either be a string or a dictionary.") + raise TypeError( + "Provided script must either be a string or a dictionary." + ) - func_script = func_name + '.py' # default behavior: Add file suffix to script name + func_script = ( + func_name + ".py" + ) # default behavior: Add file suffix to script name if has_opt: - if 'module' in opt: - func_script = opt['module'] + if "module" in opt: + func_script = opt["module"] - #Create full path to function script: - func_script_path = \ - os.path.join(os.path.join(_DIAG_SCRIPTS_PATH, scripts_dir), func_script) + # Create full path to function script: + func_script_path = os.path.join( + os.path.join(_DIAG_SCRIPTS_PATH, scripts_dir), func_script + ) - #Check that file exists in specified directory: + # Check that file exists in specified directory: if not os.path.exists(func_script_path): emsg = f"Script file '{func_script_path}' is missing. Diagnostics are ending here." self.end_diag_fail(emsg) if func_script_path not in sys.path: - #Add script path to debug log if requested: + # Add script path to debug log if requested: if log_section: dmsg = f"{log_section}: Inserting to sys.path: {func_script_path}" self.debug_log(dmsg) @@ -267,7 +274,7 @@ def __diag_scripts_caller(self, scripts_dir: str, func_names: list, dmsg = f"diag_scripts_caller: Inserting to sys.path: {func_script_path}" self.debug_log(dmsg) - #Add script to python path: + # Add script to python path: sys.path.insert(0, func_script_path) # NOTE: when we move to making this into a proper package, @@ -276,10 +283,10 @@ def __diag_scripts_caller(self, scripts_dir: str, func_names: list, # Arguments; check if user has specified custom arguments func_kwargs = default_kwargs if has_opt: - if 'kwargs' in opt: - func_kwargs = opt['kwargs'] + if "kwargs" in opt: + func_kwargs = opt["kwargs"] - #Add function calls debug log if requested: + # Add function calls debug log if requested: if log_section: dmsg = f"{log_section}: \n \t func_name = {func_name}\n " dmsg += f"\t func_kwargs = {func_kwargs}" @@ -289,17 +296,16 @@ def __diag_scripts_caller(self, scripts_dir: str, func_names: list, dmsg += f"\t func_kwargs = {func_kwargs}" self.debug_log(dmsg) - - #Call function - self.__function_caller(func_name, - func_kwargs=func_kwargs, - module_name=func_name) + # Call function + self.__function_caller( + func_name, func_kwargs=func_kwargs, module_name=func_name + ) ######### - def __function_caller(self, func_name: str, - func_kwargs: Optional[dict] = None, module_name=None): - + def __function_caller( + self, func_name: str, func_kwargs: Optional[dict] = None, module_name=None + ): """ Call a function with given arguments. @@ -312,97 +318,99 @@ def __function_caller(self, func_name: str, """ if module_name is None: - module_name = func_name #+'.py' + module_name = func_name # +'.py' # note: when we use importlib, specify the module name without the ".py" extension. module = importlib.import_module(module_name) if hasattr(module, func_name) and callable(getattr(module, func_name)): func = getattr(module, func_name) else: - emsg = f"Function '{func_name}' cannot be found in module '{module_name}.py'." + emsg = ( + f"Function '{func_name}' cannot be found in module '{module_name}.py'." + ) self.end_diag_fail(emsg) - #If kwargs are present, then run function with kwargs and return result: + # If kwargs are present, then run function with kwargs and return result: if func_kwargs: return func(self, **func_kwargs) - #Otherwise just run function as-is, and return result: + # Otherwise just run function as-is, and return result: return func(self) ######### def create_time_series(self, baseline=False): - """ Generate time series versions of the CAM history file data. """ global call_ncrcat + def call_ncrcat(cmd): - '''this is an internal function to `create_time_series` + """this is an internal function to `create_time_series` It just wraps the subprocess.call() function, so it can be used with the multiprocessing Pool that is constructed below. It is declared as global to avoid AttributeError. - ''' + """ return subprocess.run(cmd, shell=False) - #End def - #Notify user that script has started: + # End def + + # Notify user that script has started: print("\n Generating CAM time series files...") - #Check if baseline time-series files are being created: + # Check if baseline time-series files are being created: if baseline: - #Use baseline settings, while converting them all - #to lists: - case_names = [self.get_baseline_info("cam_case_name", required=True)] - cam_ts_done = [self.get_baseline_info("cam_ts_done")] + # Use baseline settings, while converting them all + # to lists: + case_names = [self.get_baseline_info("cam_case_name", required=True)] + cam_ts_done = [self.get_baseline_info("cam_ts_done")] cam_hist_locs = [self.get_baseline_info("cam_hist_loc", required=True)] - ts_dir = [self.get_baseline_info("cam_ts_loc", required=True)] - overwrite_ts = [self.get_baseline_info("cam_overwrite_ts")] - start_years = [self.climo_yrs["syear_baseline"]] - end_years = [self.climo_yrs["eyear_baseline"]] + ts_dir = [self.get_baseline_info("cam_ts_loc", required=True)] + overwrite_ts = [self.get_baseline_info("cam_overwrite_ts")] + start_years = [self.climo_yrs["syear_baseline"]] + end_years = [self.climo_yrs["eyear_baseline"]] else: - #Use test case settings, which are already lists: - case_names = self.get_cam_info("cam_case_name", required=True) - cam_ts_done = self.get_cam_info("cam_ts_done") + # Use test case settings, which are already lists: + case_names = self.get_cam_info("cam_case_name", required=True) + cam_ts_done = self.get_cam_info("cam_ts_done") cam_hist_locs = self.get_cam_info("cam_hist_loc", required=True) - ts_dir = self.get_cam_info("cam_ts_loc", required=True) - overwrite_ts = self.get_cam_info("cam_overwrite_ts") - start_years = self.climo_yrs["syears"] - end_years = self.climo_yrs["eyears"] - #End if - - #Read hist_str (component.hist_num) from the yaml file, or set to default - hist_str = self.get_basic_info('hist_str') - #If hist_str is not present, then default to 'cam.h0': + ts_dir = self.get_cam_info("cam_ts_loc", required=True) + overwrite_ts = self.get_cam_info("cam_overwrite_ts") + start_years = self.climo_yrs["syears"] + end_years = self.climo_yrs["eyears"] + # End if + + # Read hist_str (component.hist_num) from the yaml file, or set to default + hist_str = self.get_basic_info("hist_str") + # If hist_str is not present, then default to 'cam.h0': if not hist_str: - hist_str = 'cam.h0' - #End if + hist_str = "cam.h0" + # End if # get info about variable defaults res = self.variable_defaults - #Loop over cases: + # Loop over cases: for case_idx, case_name in enumerate(case_names): - - #Check if particular case should be processed: + # Check if particular case should be processed: if cam_ts_done[case_idx]: emsg = " Configuration file indicates time series files have been pre-computed" emsg += f" for case '{case_name}'. Will rely on those files directly." print(emsg) continue - #End if + # End if print(f"\t Processing time series for case '{case_name}' :") - #Extract start and end year values: + # Extract start and end year values: start_year = start_years[case_idx] - end_year = end_years[case_idx] + end_year = end_years[case_idx] - #Create path object for the CAM history file(s) location: + # Create path object for the CAM history file(s) location: starting_location = Path(cam_hist_locs[case_idx]) - #Check that path actually exists: + # Check that path actually exists: if not starting_location.is_dir(): if baseline: emsg = f"Provided baseline 'cam_hist_loc' directory '{starting_location}' " @@ -410,117 +418,127 @@ def call_ncrcat(cmd): else: emsg = "Provided 'cam_hist_loc' directory '{starting_location}' not found." emsg += " Script is ending here." - #End if + # End if self.end_diag_fail(emsg) - #End if + # End if - #Check if history files actually exist. If not then kill script: - if not list(starting_location.glob('*'+hist_str+'.*.nc')): - emsg = f"No history *{hist_str}.*.nc files found in '{starting_location}'." + # Check if history files actually exist. If not then kill script: + if not list(starting_location.glob("*" + hist_str + ".*.nc")): + emsg = ( + f"No history *{hist_str}.*.nc files found in '{starting_location}'." + ) emsg += " Script is ending here." self.end_diag_fail(emsg) - #End if + # End if - #Create empty list: + # Create empty list: files_list = [] - #Loop over start and end years: - for year in range(start_year, end_year+1): - #Add files to main file list: - for fname in starting_location.glob(f'*{hist_str}.*{str(year).zfill(4)}*.nc'): + # Loop over start and end years: + for year in range(start_year, end_year + 1): + # Add files to main file list: + for fname in starting_location.glob( + f"*{hist_str}.*{str(year).zfill(4)}*.nc" + ): files_list.append(fname) - #End for - #End for - + # End for + # End for - #Create ordered list of CAM history files: + # Create ordered list of CAM history files: hist_files = sorted(files_list) - #Open an xarray dataset from the first model history file: - hist_file_ds = xr.open_dataset(hist_files[0], decode_cf=False, decode_times=False) + # Open an xarray dataset from the first model history file: + hist_file_ds = xr.open_dataset( + hist_files[0], decode_cf=False, decode_times=False + ) - #Get a list of data variables in the 1st hist file: + # Get a list of data variables in the 1st hist file: hist_file_var_list = list(hist_file_ds.data_vars) - #Note: could use `open_mfdataset`, but that can become very slow; + # Note: could use `open_mfdataset`, but that can become very slow; # This approach effectively assumes that all files contain the same variables. - #Check what kind of vertical coordinate (if any) is being used for this model run: - #------------------------ - if 'lev' in hist_file_ds: - #Extract vertical level attributes: - lev_attrs = hist_file_ds['lev'].attrs - #First check if there is a "vert_coord" attribute: - if 'vert_coord' in lev_attrs: - vert_coord_type = lev_attrs['vert_coord'] + # Check what kind of vertical coordinate (if any) is being used for this model run: + # ------------------------ + if "lev" in hist_file_ds: + # Extract vertical level attributes: + lev_attrs = hist_file_ds["lev"].attrs + + # First check if there is a "vert_coord" attribute: + if "vert_coord" in lev_attrs: + vert_coord_type = lev_attrs["vert_coord"] else: - #Next check that the "long_name" attribute exists: - if 'long_name' in lev_attrs: - #Extract long name: - lev_long_name = lev_attrs['long_name'] - - #Check for "keywords" in the long name: - if 'hybrid level' in lev_long_name: - #Set model to hybrid vertical levels: + # Next check that the "long_name" attribute exists: + if "long_name" in lev_attrs: + # Extract long name: + lev_long_name = lev_attrs["long_name"] + + # Check for "keywords" in the long name: + if "hybrid level" in lev_long_name: + # Set model to hybrid vertical levels: vert_coord_type = "hybrid" - elif 'zeta level' in lev_long_name: - #Set model to height (z) vertical levels: + elif "zeta level" in lev_long_name: + # Set model to height (z) vertical levels: vert_coord_type = "height" else: - #Print a warning, and assume that no vertical - #level information is needed. - wmsg = "WARNING! Unable to determine the vertical coordinate" - wmsg +=f" type from the 'lev' long name, which is:\n'{lev_long_name}'." + # Print a warning, and assume that no vertical + # level information is needed. + wmsg = ( + "WARNING! Unable to determine the vertical coordinate" + ) + wmsg += f" type from the 'lev' long name, which is:\n'{lev_long_name}'." wmsg += "\nNo additional vertical coordinate information will be" wmsg += " transferred beyond the 'lev' dimension itself." print(wmsg) vert_coord_type = None - #End if + # End if else: - #Print a warning, and assume hybrid levels (for now): + # Print a warning, and assume hybrid levels (for now): wmsg = "WARNING! No long name found for the 'lev' dimension," - wmsg += " so no additional vertical coordinate information will be" + wmsg += ( + " so no additional vertical coordinate information will be" + ) wmsg += " transferred beyond the 'lev' dimension itself." print(wmsg) vert_coord_type = None - #End if (long name) - #End if (vert_coord) + # End if (long name) + # End if (vert_coord) else: - #No level dimension found, so assume there is no vertical coordinate: + # No level dimension found, so assume there is no vertical coordinate: vert_coord_type = None - #End if (lev existence) - #------------------------ + # End if (lev existence) + # ------------------------ - #Check if time series directory exists, and if not, then create it: - #Use pathlib to create parent directories, if necessary. + # Check if time series directory exists, and if not, then create it: + # Use pathlib to create parent directories, if necessary. Path(ts_dir[case_idx]).mkdir(parents=True, exist_ok=True) - #INPUT NAME TEMPLATE: $CASE.$scomp.[$type.][$string.]$date[$ending] + # INPUT NAME TEMPLATE: $CASE.$scomp.[$type.][$string.]$date[$ending] first_file_split = str(hist_files[0]).split(".") if first_file_split[-1] == "nc": - time_string_start = first_file_split[-2].replace("-","") + time_string_start = first_file_split[-2].replace("-", "") else: - time_string_start = first_file_split[-1].replace("-","") + time_string_start = first_file_split[-1].replace("-", "") last_file_split = str(hist_files[-1]).split(".") if last_file_split[-1] == "nc": - time_string_finish = last_file_split[-2].replace("-","") + time_string_finish = last_file_split[-2].replace("-", "") else: - time_string_finish = last_file_split[-1].replace("-","") + time_string_finish = last_file_split[-1].replace("-", "") time_string = "-".join([time_string_start, time_string_finish]) - #Loop over CAM history variables: + # Loop over CAM history variables: list_of_commands = [] vars_to_derive = [] - #create copy of var list that can be modified for derivable variables + # create copy of var list that can be modified for derivable variables diag_var_list = self.diag_var_list for var in diag_var_list: if var not in hist_file_var_list: vres = res.get(var, {}) if "derivable_from" in vres: - constit_list = vres['derivable_from'] + constit_list = vres["derivable_from"] for constit in constit_list: if constit not in diag_var_list: diag_var_list.append(constit) @@ -532,95 +550,102 @@ def call_ncrcat(cmd): print(msg) continue - #Check if variable has a "lev" dimension according to first file: - has_lev = bool('lev' in hist_file_ds[var].dims) + # Check if variable has a "lev" dimension according to first file: + has_lev = bool("lev" in hist_file_ds[var].dims) - #Create full path name, file name template: - #$cam_case_name.$hist_str.$variable.YYYYMM-YYYYMM.nc + # Create full path name, file name template: + # $cam_case_name.$hist_str.$variable.YYYYMM-YYYYMM.nc - ts_outfil_str = ts_dir[case_idx] + os.sep + \ - ".".join([case_name, hist_str, var, time_string, "nc" ]) + ts_outfil_str = ( + ts_dir[case_idx] + + os.sep + + ".".join([case_name, hist_str, var, time_string, "nc"]) + ) - #Check if files already exist in time series directory: + # Check if files already exist in time series directory: ts_file_list = glob.glob(ts_outfil_str) - #If files exist, then check if over-writing is allowed: + # If files exist, then check if over-writing is allowed: if ts_file_list: if not overwrite_ts[case_idx]: - #If not, then simply skip this variable: + # If not, then simply skip this variable: continue - #Notify user of new time series file: + # Notify user of new time series file: print(f"\t - time series for {var}") - #Variable list starts with just the variable + # Variable list starts with just the variable ncrcat_var_list = f"{var}" - #Determine "ncrcat" command to generate time series file: - if 'date' in hist_file_ds[var].dims: + # Determine "ncrcat" command to generate time series file: + if "date" in hist_file_ds[var].dims: ncrcat_var_list = ncrcat_var_list + ",date" - if 'datesec' in hist_file_ds[var].dims: + if "datesec" in hist_file_ds[var].dims: ncrcat_var_list = ncrcat_var_list + ",datesec" if has_lev and vert_coord_type: - #For now, only add these variables if using CAM: - if 'cam' in hist_str: - #PS might be in a different history file. If so, continue without error. + # For now, only add these variables if using CAM: + if "cam" in hist_str: + # PS might be in a different history file. If so, continue without error. ncrcat_var_list = ncrcat_var_list + ",hyam,hybm,hyai,hybi" - if 'PS' in hist_file_var_list: + if "PS" in hist_file_var_list: ncrcat_var_list = ncrcat_var_list + ",PS" print("Adding PS to file") else: wmsg = "WARNING: PS not found in history file." wmsg += " It might be needed at some point." print(wmsg) - #End if + # End if if vert_coord_type == "height": - #Adding PMID here works, but significantly increases - #the storage (disk usage) requirements of the ADF. - #This can be alleviated in the future by figuring out - #a way to determine all of the regridding targets at - #the start of the ADF run, and then regridding a single - #PMID file to each one of those targets separately. -JN - if 'PMID' in hist_file_var_list: + # Adding PMID here works, but significantly increases + # the storage (disk usage) requirements of the ADF. + # This can be alleviated in the future by figuring out + # a way to determine all of the regridding targets at + # the start of the ADF run, and then regridding a single + # PMID file to each one of those targets separately. -JN + if "PMID" in hist_file_var_list: ncrcat_var_list = ncrcat_var_list + ",PMID" print("Adding PMID to file") else: wmsg = "WARNING: PMID not found in history file." wmsg += " It might be needed at some point." print(wmsg) - #End if PMID - #End if height - #End if cam - #End if has_lev - - cmd = ["ncrcat", "-O", "-4", "-h","--no_cll_mth", "-v",ncrcat_var_list] + \ - hist_files + ["-o", ts_outfil_str] - - #Add to command list for use in multi-processing pool: + # End if PMID + # End if height + # End if cam + # End if has_lev + + cmd = ( + ["ncrcat", "-O", "-4", "-h", "--no_cll_mth", "-v", ncrcat_var_list] + + hist_files + + ["-o", ts_outfil_str] + ) + + # Add to command list for use in multi-processing pool: list_of_commands.append(cmd) - #End variable loop + # End variable loop - #Now run the "ncrcat" subprocesses in parallel: + # Now run the "ncrcat" subprocesses in parallel: with mp.Pool(processes=self.num_procs) as mpool: _ = mpool.map(call_ncrcat, list_of_commands) if vars_to_derive: - self.derive_variables(vars_to_derive=vars_to_derive,ts_dir=ts_dir[case_idx]) - #End with + self.derive_variables( + vars_to_derive=vars_to_derive, ts_dir=ts_dir[case_idx] + ) + # End with - #End cases loop + # End cases loop - #Notify user that script has ended: + # Notify user that script has ended: print(" ...CAM time series file generation has finished successfully.") ######### def create_climo(self): - """ Temporally average CAM time series data in order to generate CAM climatologies. @@ -633,41 +658,42 @@ def create_climo(self): non-weighted averaging). """ - #Extract climatology calculation config options: - calc_climo = self.get_cam_info('calc_cam_climo') + # Extract climatology calculation config options: + calc_climo = self.get_cam_info("calc_cam_climo") - #Check if climo calculation config option is a list: + # Check if climo calculation config option is a list: if isinstance(calc_climo, list): - #If so, then check if any of the entries are "True": + # If so, then check if any of the entries are "True": calc_climo = any(calc_climo) - #End if + # End if - #Next check if a baseline simulation is being used - #and no other model cases need climatologies calculated: + # Next check if a baseline simulation is being used + # and no other model cases need climatologies calculated: if not self.compare_obs and not calc_climo: - calc_bl_climo = self.get_baseline_info('calc_cam_climo') + calc_bl_climo = self.get_baseline_info("calc_cam_climo") - #Check if baseline climo calculation config option is a list, - #although it really never should be: + # Check if baseline climo calculation config option is a list, + # although it really never should be: if isinstance(calc_bl_climo, list): - #If so, then check if any of the entries are "True": + # If so, then check if any of the entries are "True": calc_bl_climo = any(calc_bl_climo) - #End if + # End if else: - #Just set to False: + # Just set to False: calc_bl_climo = False - #End if + # End if - #Check if a user wants any climatologies to be calculated: + # Check if a user wants any climatologies to be calculated: if calc_climo or calc_bl_climo: - - #If so, then extract names of time-averaging scripts: - avg_func_names = self.__time_averaging_scripts # this is a list of script names - # _OR_ - # a **list** of dictionaries with - # script names as keys that hold - # args(list), kwargs(dict), and - # module(str) + # If so, then extract names of time-averaging scripts: + avg_func_names = ( + self.__time_averaging_scripts + ) # this is a list of script names + # _OR_ + # a **list** of dictionaries with + # script names as keys that hold + # args(list), kwargs(dict), and + # module(str) if not avg_func_names: emsg = "No time_averaging_scripts provided for calculating" @@ -676,18 +702,20 @@ def create_climo(self): emsg += " or skip the calculation of climatologies." self.end_diag_fail(emsg) - #Run the listed scripts: - self.__diag_scripts_caller("averaging", avg_func_names, - log_section = "create_climo") + # Run the listed scripts: + self.__diag_scripts_caller( + "averaging", avg_func_names, log_section="create_climo" + ) else: - #If not, then notify user that climo file generation is skipped. - print("\n No climatology files were requested by user, so averaging will be skipped.") + # If not, then notify user that climo file generation is skipped. + print( + "\n No climatology files were requested by user, so averaging will be skipped." + ) ######### def regrid_climo(self): - """ Re-grid CAM climatology files to observations or baseline climatologies, in order to allow @@ -701,27 +729,29 @@ def regrid_climo(self): nearest-neighbor regridding). """ - #Extract names of re-gridding scripts: - regrid_func_names = self.__regridding_scripts # this is a list of script names - # _OR_ - # a **list** of dictionaries with - # script names as keys that hold - # kwargs(dict) and module(str) + # Extract names of re-gridding scripts: + regrid_func_names = self.__regridding_scripts # this is a list of script names + # _OR_ + # a **list** of dictionaries with + # script names as keys that hold + # kwargs(dict) and module(str) - if not regrid_func_names or all(func_names is None for func_names in regrid_func_names): + if not regrid_func_names or all( + func_names is None for func_names in regrid_func_names + ): print("\n No regridding options provided, continue.") return # NOTE: if no regridding options provided, we should skip it, but # do we need to still copy (symlink?) files into the regrid directory? - #Run the listed scripts: - self.__diag_scripts_caller("regridding", regrid_func_names, - log_section = "regrid_climo") + # Run the listed scripts: + self.__diag_scripts_caller( + "regridding", regrid_func_names, log_section="regrid_climo" + ) ######### def perform_analyses(self): - """ Performs statistical and other analyses as specified by the user. This currently only includes the AMWG table generation. @@ -730,44 +760,46 @@ def perform_analyses(self): inputs in a time series format. """ - #Extract names of plotting scripts: + # Extract names of plotting scripts: anly_func_names = self.__analysis_scripts # this is a list of script names - # _OR_ - # a **list** of dictionaries with - # script names as keys that hold - # args(list), kwargs(dict), and module(str) + # _OR_ + # a **list** of dictionaries with + # script names as keys that hold + # args(list), kwargs(dict), and module(str) - #If no scripts are listed, then exit routine: + # If no scripts are listed, then exit routine: if not anly_func_names: - print("\n Nothing listed under 'analysis_scripts', exiting 'perform_analyses' method.") + print( + "\n Nothing listed under 'analysis_scripts', exiting 'perform_analyses' method." + ) return - #End if + # End if - #Set "data_name" variable, which depends on "compare_obs": + # Set "data_name" variable, which depends on "compare_obs": if self.compare_obs: data_name = "obs" else: - #Set data_name to basline case: - data_name = self.get_baseline_info('cam_case_name', required=True) + # Set data_name to basline case: + data_name = self.get_baseline_info("cam_case_name", required=True) - #Attempt to grab baseline start_years (not currently required): + # Attempt to grab baseline start_years (not currently required): syear_baseline = self.climo_yrs["syear_baseline"] eyear_baseline = self.climo_yrs["eyear_baseline"] - #If years exist, then add them to the data_name string: + # If years exist, then add them to the data_name string: if syear_baseline and eyear_baseline: data_name += f"_{syear_baseline}_{eyear_baseline}" - #End if - #End if + # End if + # End if - #Run the listed scripts: - self.__diag_scripts_caller("analysis", anly_func_names, - log_section = "perform_analyses") + # Run the listed scripts: + self.__diag_scripts_caller( + "analysis", anly_func_names, log_section="perform_analyses" + ) ######### def create_plots(self): - """ Generate ADF diagnostic plots. The actual plotting is done using the @@ -778,185 +810,273 @@ def create_plots(self): main ADF diagnostics routines. """ - #Extract names of plotting scripts: + # Extract names of plotting scripts: plot_func_names = self.__plotting_scripts # this is a list of script names - # _OR_ - # a **list** of dictionaries with - # script names as keys that hold - # args(list), kwargs(dict), and module(str) - + # _OR_ + # a **list** of dictionaries with + # script names as keys that hold + # args(list), kwargs(dict), and module(str) - #If no scripts are listed, then exit routine: + # If no scripts are listed, then exit routine: if not plot_func_names: - print("\n Nothing listed under 'plotting_scripts', so no plots will be made.") + print( + "\n Nothing listed under 'plotting_scripts', so no plots will be made." + ) return - #End if + # End if - #Set "data_name" variable, which depends on "compare_obs": + # Set "data_name" variable, which depends on "compare_obs": if self.compare_obs: data_name = "obs" else: - #Set data_name to basline case: - data_name = self.get_baseline_info('cam_case_name', required=True) + # Set data_name to basline case: + data_name = self.get_baseline_info("cam_case_name", required=True) - #Attempt to grab baseline start_years (not currently required): + # Attempt to grab baseline start_years (not currently required): syear_baseline = self.climo_yrs["syear_baseline"] eyear_baseline = self.climo_yrs["eyear_baseline"] - #If years exist, then add them to the data_name string: + # If years exist, then add them to the data_name string: if syear_baseline and eyear_baseline: data_name += f"_{syear_baseline}_{eyear_baseline}" - #End if - #End if + # End if + # End if - #Run the listed scripts: - self.__diag_scripts_caller("plotting", plot_func_names, - log_section = "create_plots") + # Run the listed scripts: + self.__diag_scripts_caller( + "plotting", plot_func_names, log_section="create_plots" + ) ######### def setup_run_cvdp(self): - """ Create CVDP directory tree, generate namelist file and edit driver.ncl needed to run CVDP. Submit CVDP diagnostics. """ - #import needed standard modules: + # import needed standard modules: import shutil - #Case names: - case_names = self.get_cam_info('cam_case_name', required=True) + # Case names: + case_names = self.get_cam_info("cam_case_name", required=True) - #Start years (not currently required): - syears = self.climo_yrs['syears'] + # Start years (not currently required): + syears = self.climo_yrs["syears"] - #End year (not currently rquired): - eyears = self.climo_yrs['eyears'] + # End year (not currently rquired): + eyears = self.climo_yrs["eyears"] - #Timeseries locations: - cam_ts_loc = self.get_cam_info('cam_ts_loc') + # Timeseries locations: + cam_ts_loc = self.get_cam_info("cam_ts_loc") - #set CVDP directory, recursively copy cvdp codebase to the CVDP directory + # set CVDP directory, recursively copy cvdp codebase to the CVDP directory if len(case_names) > 1: - cvdp_dir = self.get_cvdp_info('cvdp_loc', required=True)+case_names[0]+'_multi_case' + cvdp_dir = ( + self.get_cvdp_info("cvdp_loc", required=True) + + case_names[0] + + "_multi_case" + ) else: - cvdp_dir = self.get_cvdp_info('cvdp_loc', required=True)+case_names[0] - #end if + cvdp_dir = self.get_cvdp_info("cvdp_loc", required=True) + case_names[0] + # end if if not os.path.isdir(cvdp_dir): - shutil.copytree(self.get_cvdp_info('cvdp_codebase_loc', required=True),cvdp_dir) - #End if - - #check to see if there is a CAM baseline case. If there is, read in relevant information. - if not self.get_basic_info('compare_obs'): - case_name_baseline = self.get_baseline_info('cam_case_name') - syears_baseline = self.climo_yrs['syear_baseline'] - eyears_baseline = self.climo_yrs['eyear_baseline'] - baseline_ts_loc = self.get_baseline_info('cam_ts_loc') - #End if - - #Loop over cases to create individual text array to be written to namelist file. + shutil.copytree( + self.get_cvdp_info("cvdp_codebase_loc", required=True), cvdp_dir + ) + # End if + + # check to see if there is a CAM baseline case. If there is, read in relevant information. + if not self.get_basic_info("compare_obs"): + case_name_baseline = self.get_baseline_info("cam_case_name") + syears_baseline = self.climo_yrs["syear_baseline"] + eyears_baseline = self.climo_yrs["eyear_baseline"] + baseline_ts_loc = self.get_baseline_info("cam_ts_loc") + # End if + + # Loop over cases to create individual text array to be written to namelist file. row_list = [] for case_idx, case_name in enumerate(case_names): - row = [case_name,' | ',str(cam_ts_loc[case_idx]),os.sep,' | ', - str(syears[case_idx]),' | ',str(eyears[case_idx])] + row = [ + case_name, + " | ", + str(cam_ts_loc[case_idx]), + os.sep, + " | ", + str(syears[case_idx]), + " | ", + str(eyears[case_idx]), + ] row_list.append("".join(row)) - #End for + # End for - #Create new namelist file. If CAM baseline case present add it to list, - #namelist file must end in a blank line. - with open(os.path.join(cvdp_dir, "namelist"), 'w', encoding='utf-8') as fnml: + # Create new namelist file. If CAM baseline case present add it to list, + # namelist file must end in a blank line. + with open(os.path.join(cvdp_dir, "namelist"), "w", encoding="utf-8") as fnml: for rowtext in row_list: fnml.write(rowtext) - #End for - fnml.write('\n\n') + # End for + fnml.write("\n\n") if "baseline_ts_loc" in locals(): - rowb = [case_name_baseline,' | ',str(baseline_ts_loc),os.sep,' | ', - str(syears_baseline),' | ',str(eyears_baseline)] + rowb = [ + case_name_baseline, + " | ", + str(baseline_ts_loc), + os.sep, + " | ", + str(syears_baseline), + " | ", + str(eyears_baseline), + ] rowtextb = "".join(rowb) fnml.write(rowtextb) - fnml.write('\n\n') - #End if - #End with - - #modify driver.ncl to set the proper output directory, webpage title, and location - #of CVDP NCL scripts, set modular = True (to run multiple CVDP scripts at once), - #and modify the modular_list to exclude all scripts focused solely on non-atmospheric - #variables, and set tar_output to True if cvdp_tar: true - with open(os.path.join(cvdp_dir, "driver.ncl"), 'r', encoding='utf-8') as f_in, \ - open(os.path.join(cvdp_dir, f"driver.{case_names[0]}.ncl"), 'w', \ - encoding='utf-8') as f_out: + fnml.write("\n\n") + # End if + # End with + + # modify driver.ncl to set the proper output directory, webpage title, and location + # of CVDP NCL scripts, set modular = True (to run multiple CVDP scripts at once), + # and modify the modular_list to exclude all scripts focused solely on non-atmospheric + # variables, and set tar_output to True if cvdp_tar: true + with open( + os.path.join(cvdp_dir, "driver.ncl"), "r", encoding="utf-8" + ) as f_in, open( + os.path.join(cvdp_dir, f"driver.{case_names[0]}.ncl"), "w", encoding="utf-8" + ) as f_out: for line in f_in: - if ' outdir ' in line: - line = ' outdir = "'+cvdp_dir+'/output/"' - if ' webpage_title ' in line: + if " outdir " in line: + line = ' outdir = "' + cvdp_dir + '/output/"' + if " webpage_title " in line: line = ' webpage_title = "ADF/CVDP Comparison"' - if 'directory path of CVDP NCL scripts' in line: - line = ' zp = "'+cvdp_dir+'/ncl_scripts/"' - if ' modular = ' in line: + if "directory path of CVDP NCL scripts" in line: + line = ' zp = "' + cvdp_dir + '/ncl_scripts/"' + if " modular = " in line: line = ' modular = "True"' - if ' modular_list = ' in line: + if " modular_list = " in line: line = ' modular_list = "' - line += 'psl.nam_nao,psl.pna_npo,tas.trends_timeseries,snd.trends,' - line += 'psl.trends,amo,pdo,sst.indices,pr.trends_timeseries,' - line += 'psl.sam_psa,sst.mean_stddev,' - line += 'psl.mean_stddev,pr.mean_stddev,sst.trends_timeseries,' + line += "psl.nam_nao,psl.pna_npo,tas.trends_timeseries,snd.trends," + line += "psl.trends,amo,pdo,sst.indices,pr.trends_timeseries," + line += "psl.sam_psa,sst.mean_stddev," + line += "psl.mean_stddev,pr.mean_stddev,sst.trends_timeseries," line += 'tas.mean_stddev,ipo"' - if self.get_cvdp_info('cvdp_tar'): - if ' tar_output ' in line: + if self.get_cvdp_info("cvdp_tar"): + if " tar_output " in line: line = ' tar_output = "True"' - #End if - #End if + # End if + # End if f_out.write(line) - #End for - #End with - - #Submit the CVDP driver script in background mode, send output to cvdp.out file - with open(os.path.join(cvdp_dir,'cvdp.out'), 'w', encoding='utf-8') as subout: - _ = subprocess.Popen([f'cd {cvdp_dir}; ncl -Q '+ \ - os.path.join(cvdp_dir,f'driver.{case_names[0]}.ncl')], - shell=True, stdout=subout, close_fds=True) - #End with - - print(' ') - print('CVDP is running in background. ADF continuing.') - print(f'CVDP terminal output is located in {cvdp_dir}/cvdp.out') - if self.get_cvdp_info('cvdp_tar'): - print('CVDP graphical and netCDF file output can be found here:' + \ - f' {cvdp_dir}/output/cvdp.tar') - print('Open index.html (within cvdp.tar file) in web browser to view CVDP results.') + # End for + # End with + + # Submit the CVDP driver script in background mode, send output to cvdp.out file + with open(os.path.join(cvdp_dir, "cvdp.out"), "w", encoding="utf-8") as subout: + _ = subprocess.Popen( + [ + f"cd {cvdp_dir}; ncl -Q " + + os.path.join(cvdp_dir, f"driver.{case_names[0]}.ncl") + ], + shell=True, + stdout=subout, + close_fds=True, + ) + # End with + + print(" ") + print("CVDP is running in background. ADF continuing.") + print(f"CVDP terminal output is located in {cvdp_dir}/cvdp.out") + if self.get_cvdp_info("cvdp_tar"): + print( + "CVDP graphical and netCDF file output can be found here:" + + f" {cvdp_dir}/output/cvdp.tar" + ) + print( + "Open index.html (within cvdp.tar file) in web browser to view CVDP results." + ) else: - print(f'CVDP graphical and netCDF file output can be found here: {cvdp_dir}/output/') - print(f'Open {cvdp_dir}/output/index.html file in web browser to view CVDP results.') - #End if - print('For CVDP information visit: https://www.cesm.ucar.edu/working_groups/CVC/cvdp/') - print(' ') + print( + f"CVDP graphical and netCDF file output can be found here: {cvdp_dir}/output/" + ) + print( + f"Open {cvdp_dir}/output/index.html file in web browser to view CVDP results." + ) + # End if + print( + "For CVDP information visit: https://www.cesm.ucar.edu/working_groups/CVC/cvdp/" + ) + print(" ") ######### - def derive_variables(self,vars_to_derive=None,ts_dir=None): - + def derive_variables(self, vars_to_derive=None, ts_dir=None, overwrite=None): """ - Derive variables acccording to steps given here. Since derivations will depend on the + Derive variables acccording to steps given here. Since derivations will depend on the variable, each variable to derive will need its own set of steps below. + + Caution: this method assumes that there will be one time series file per variable + + If the file for the derived variable exists, the kwarg `overwrite` determines + whether to overwrite the file (true) or exit with a warning message. """ for var in vars_to_derive: if var == "PRECT": - # PRECT can be found by simply adding PRECL and PRECC - # grab file names for the PRECL and PRECC files from the case ts directory - if glob.glob(os.path.join(ts_dir,"*PRECC*")) and glob.glob(os.path.join(ts_dir,"*PRECL*")): - constit_files=sorted(glob.glob(os.path.join(ts_dir,"*PREC*"))) - else: - ermsg = "PRECC and PRECL were not both present; PRECT cannot be calculated." - ermsg += " Please remove PRECT from diag_var_list or find the relevant CAM files." - raise FileNotFoundError(ermsg) - # create new file name for PRECT - prect_file = constit_files[0].replace('PRECC','PRECT') - # append PRECC to the file containing PRECL - os.system(f"ncks -A -v PRECC {constit_files[0]} {constit_files[1]}") - # create new file with the sum of PRECC and PRECL - os.system(f"ncap2 -s 'PRECT=(PRECC+PRECL)' {constit_files[1]} {prect_file}") - -############### + # PRECT can be found by simply adding PRECL and PRECC + # grab file names for the PRECL and PRECC files from the case ts directory + if glob.glob(os.path.join(ts_dir, "*PRECC*")) and glob.glob( + os.path.join(ts_dir, "*PRECL*") + ): + constit_files = sorted(glob.glob(os.path.join(ts_dir, "*PREC*"))) + else: + ermsg = "PRECC and PRECL were not both present; PRECT cannot be calculated." + ermsg += " Please remove PRECT from diag_var_list or find the relevant CAM files." + raise FileNotFoundError(ermsg) + # create new file name for PRECT + prect_file = constit_files[0].replace("PRECC", "PRECT") + if Path(prect_file).is_file(): + if overwrite: + Path(prect_file).unlink() + else: + print( + f"[{__name__}] Warning: PRECT file was found and overwrite is False. Will use existing file." + ) + continue + # append PRECC to the file containing PRECL + os.system(f"ncks -A -v PRECC {constit_files[0]} {constit_files[1]}") + # create new file with the sum of PRECC and PRECL + os.system( + f"ncap2 -s 'PRECT=(PRECC+PRECL)' {constit_files[1]} {prect_file}" + ) + if var == "RESTOM": + # RESTOM = FSNT-FLNT + # Have to be more precise than with PRECT because FSNTOA, FSTNC, etc are valid variables + if glob.glob(os.path.join(ts_dir, "*.FSNT.*")) and glob.glob( + os.path.join(ts_dir, "*.FLNT.*") + ): + input_files = [ + sorted(glob.glob(os.path.join(ts_dir, f"*.{v}.*"))) + for v in ["FLNT", "FSNT"] + ] + constit_files = [] + for elem in input_files: + constit_files += elem + else: + ermsg = "FSNT and FLNT were not both present; RESTOM cannot be calculated." + ermsg += " Please remove RESTOM from diag_var_list or find the relevant CAM files." + raise FileNotFoundError(ermsg) + # create new file name for RESTOM + derived_file = constit_files[0].replace("FLNT", "RESTOM") + if Path(derived_file).is_file(): + if overwrite: + Path(derived_file).unlink() + else: + print( + f"[{__name__}] Warning: RESTOM file was found and overwrite is False. Will use existing file." + ) + continue + # append FSNT to the file containing FLNT + os.system(f"ncks -A -v FLNT {constit_files[0]} {constit_files[1]}") + # create new file with the difference of FLNT and FSNT + os.system( + f"ncap2 -s 'RESTOM=(FSNT-FLNT)' {constit_files[1]} {derived_file}" + ) diff --git a/lib/adf_variable_defaults.yaml b/lib/adf_variable_defaults.yaml index dc9a22075..8a0df8f1b 100644 --- a/lib/adf_variable_defaults.yaml +++ b/lib/adf_variable_defaults.yaml @@ -713,6 +713,19 @@ QRS: #+++++++++++++++++ # Category: TOA energy flux #+++++++++++++++++ +RESTOM: + colormap: "RdBu_r" + contour_levels_range: [-100, 100, 5] + diff_colormap: "seismic" + diff_contour_range: [-10, 10, 0.5] + scale_factor: 1 + add_offset: 0 + new_unit: "W m$^{-2}$" + mpl: + colorbar: + label : "W m$^{-2}$" + category: "TOA energy flux" + derivable_from: ['FLNT','FSNT'] SWCF: colormap: "Blues" diff --git a/scripts/analysis/amwg_table.py b/scripts/analysis/amwg_table.py index e7765d3cb..286573770 100644 --- a/scripts/analysis/amwg_table.py +++ b/scripts/analysis/amwg_table.py @@ -150,9 +150,6 @@ def amwg_table(adf): output_locs.append(output_locs[0]) #----------------------------------------- - #Create (empty) dictionary to use for the - #residual top of model (RESTOM) radiation calculation: - restom_dict = {} #Loop over CAM cases: for case_idx, case_name in enumerate(case_names): @@ -182,9 +179,6 @@ def amwg_table(adf): Path.unlink(output_csv_file) #End if - #Save case name as a new key in the RESTOM dictonary: - restom_dict[case_name] = {} - #Create/reset new variable that potentially stores the re-gridded #ocean fraction xarray data-array: ocn_frc_da = None @@ -270,14 +264,6 @@ def amwg_table(adf): # Note: we should be able to handle (lat, lon) or (ncol,) cases, at least data = pf.spatial_average(data) # changes data "in place" - #Add necessary data for RESTOM calcs below - if var == "FLNT": - restom_dict[case_name][var] = data - #Copy units for RESTOM as well: - restom_units = unit_str - if var == "FSNT": - restom_dict[case_name][var] = data - # In order to get correct statistics, average to annual or seasonal data = pf.annual_mean(data, whole_years=True, time_name='time') @@ -305,35 +291,13 @@ def amwg_table(adf): #End of var_list loop #-------------------- - if "FSNT" and "FLNT" in var_list: - #RESTOM Calcs - var = "RESTOM" #RESTOM = FSNT-FLNT - print(f"\t - Variable '{var}' being added to table") - data = restom_dict[case_name]["FSNT"] - restom_dict[case_name]["FLNT"] - # In order to get correct statistics, average to annual or seasonal - data = pf.annual_mean(data, whole_years=True, time_name='time') - # These get written to our output file: - stats_list = _get_row_vals(data) - row_values = [var, restom_units] + stats_list - # col (column) values declared above - - # Format entries: - dfentries = {c:[row_values[i]] for i,c in enumerate(cols)} - - # Add entries to Pandas structure: - df = pd.DataFrame(dfentries) - - # Check if the output CSV file exists, - # if so, then append to it: - if output_csv_file.is_file(): - df.to_csv(output_csv_file, mode='a', header=False, index=False) - else: - df.to_csv(output_csv_file, header=cols, index=False) - #End if - - else: - #Print message to debug log: - adf.debug_log("RESTOM not calculated because FSNT and/or FLNT variables not in dataset") + # Move RESTOM to top of table + #---------------------------- + if 'RESTOM' in var_list: + table_df = pd.read_csv(output_csv_file) + table_df = pd.concat([table_df[table_df['variable'] == 'RESTOM'], table_df]).reset_index(drop = True) + table_df = table_df.drop_duplicates() + table_df.to_csv(output_csv_file, header=cols, index=False) #End if # last step is to add table dataframe to website (if enabled): @@ -343,7 +307,6 @@ def amwg_table(adf): #End of model case loop #---------------------- - #Notify user that script has ended: print(" ...AMWG variable table has been generated successfully.")