Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

A whole bunch of pylint warning/error/refactor fixes #384

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 15 additions & 6 deletions .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,14 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=logging-format-interpolation,too-few-public-methods,import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating,fixme
disable=missing-module-docstring,logging-fstring-interpolation,import-star-module-level,too-few-public-methods,backtick,fixme

# JUSTIFICATION:
# missing-module-docstring -> many of our modules are self-explanitory, they hold a single class or set of functions which are well-documented already, the few modules which are more like 'scripts' are indeed documented in multiple ways as well.
# logging-fstring-interpolation -> this is a relic of the past, f-string everywhere is more consisten and more performant.
# too-few-public-methods -> there's a few data classes and the likes that exist currently which aren't promoted to real data classes (needs to be fixed eventually)
# backtick -> this is used in places it genuinely looks cleaner to split code over multiple lines.
# fixme -> these are used as reminders w/ links to GH issues for future work that needs to happen.


[REPORTS]
Expand Down Expand Up @@ -226,7 +233,7 @@ expected-line-ending-format=
[BASIC]

# Good variable names which should always be accepted, separated by a comma
good-names=ts,i,j,k,ex,Run,_,x,w,f,n,v,t,r,d,c,cd,g,r,q,V,m,A,b,log,y,id,ds,gt,md
good-names=ts,i,j,k,ex,Run,_,x,w,f,n,v,t,r,d,c,cd,g,r,q,V,m,A,b,log,y,id,ds,gt,md,ax,im

# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
Expand All @@ -243,10 +250,10 @@ include-naming-hint=no
property-classes=abc.abstractproperty

# Regular expression matching correct function names
function-rgx=[a-z_][a-z0-9_]{2,30}$
function-rgx=[a-z_][a-z0-9_]{2,40}$

# Naming hint for function names
function-name-hint=[a-z_][a-z0-9_]{2,30}$
function-name-hint=[a-z_][a-z0-9_]{2,40}$

# Regular expression matching correct variable names
variable-rgx=[a-z_][a-z0-9_]{2,30}$
Expand Down Expand Up @@ -320,14 +327,16 @@ max-nested-blocks=5
[DESIGN]

# Maximum number of arguments for function / method
max-args=5
# - PyRate allows up to 8 (some functions have a few settings that don't make sense to make data-classes for)
max-args=8

# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*

# Maximum number of locals for function / method body
max-locals=15
# - PyRate has some numerically complicated functions that don't make sense to split up
max-locals=20

# Maximum number of return / yield for function / method body
max-returns=6
Expand Down
215 changes: 145 additions & 70 deletions pyrate/configuration.py

Large diffs are not rendered by default.

51 changes: 35 additions & 16 deletions pyrate/constants.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
import os
import re
from pathlib import Path
import numpy as np
from pyrate.core.mpiops import comm

PYRATEPATH = Path(__file__).parent.parent


__version__ = "0.6.0"
CLI_DESCRIPTION = """
PyRate workflow:
PyRate workflow:

Step 1: conv2tif
Step 2: prepifg
Expand All @@ -21,8 +21,6 @@
more details.
"""

from pyrate.core.mpiops import comm

NO_OF_PARALLEL_PROCESSES = comm.Get_size()

CONV2TIF = 'conv2tif'
Expand Down Expand Up @@ -107,13 +105,20 @@
NO_DATA_VALUE = 'noDataValue'
#: FLOAT; No data averaging threshold for prepifg
NO_DATA_AVERAGING_THRESHOLD = 'noDataAveragingThreshold'
# BOOL (1/2/3); Re-project data from Line of sight, 1 = vertical, 2 = horizontal, 3 = no conversion
# INT (1/2/3); Re-project data from Line of sight
# 1 = vertical
# 2 = horizontal
# 3 = no conversion
# REPROJECTION = 'prjflag' # NOT CURRENTLY USED
#: BOOL (0/1): Convert no data values to Nan
NAN_CONVERSION = 'nan_conversion'

# Prepifg parameters
#: BOOL (1/2/3/4); Method for cropping interferograms, 1 = minimum overlapping area (intersection), 2 = maximum area (union), 3 = customised area, 4 = all ifgs already same size
#: INT (1/2/3/4); Method for cropping interferograms
# 1 = minimum overlapping area (intersection)
# 2 = maximum area (union)
# 3 = customised area
# 4 = all ifgs already same size
IFG_CROP_OPT = 'ifgcropopt'
#: INT; Multi look factor for interferogram preparation in x dimension
IFG_LKSX = 'ifglksx'
Expand Down Expand Up @@ -141,9 +146,12 @@
REFNY = "refny"
#: INT; Dimension of reference pixel search window (in number of pixels)
REF_CHIP_SIZE = 'refchipsize'
#: FLOAT; Minimum fraction of observations required in search window for pixel to be a viable reference pixel
#: FLOAT; Minimum fraction of observations required in search window for pixel to be
# a viable reference pixel
REF_MIN_FRAC = 'refminfrac'
#: BOOL (1/2); Reference phase estimation method (1: median of the whole interferogram, 2: median within the window surrounding the reference pixel)
#: INT (1/2); Reference phase estimation method
# 1: median of the whole interferogram
# 2: median within the window surrounding the reference pixel)
REF_EST_METHOD = 'refest'

MAXVAR = 'maxvar'
Expand All @@ -166,7 +174,8 @@
#: STR; Name of the file list containing the pool of available baseline files
BASE_FILE_LIST = 'basefilelist'

#: STR; Name of the file containing the GAMMA lookup table between lat/lon and radar coordinates (row/col)
#: STR; Name of the file containing the GAMMA lookup table between lat/lon and
# radar coordinates (row/col)
LT_FILE = 'ltfile'

# atmospheric error correction parameters NOT CURRENTLY USED
Expand All @@ -190,9 +199,12 @@
# orbital error correction/parameters
#: BOOL (1/0); Perform orbital error correction (1: yes, 0: no)
ORBITAL_FIT = 'orbfit'
#: BOOL (1/2); Method for orbital error correction (1: independent, 2: network)
#: INT (1/2); Method for orbital error correction (1: independent, 2: network)
ORBITAL_FIT_METHOD = 'orbfitmethod'
#: BOOL (1/2/3) Polynomial order of orbital error model (1: planar in x and y - 2 parameter model, 2: quadratic in x and y - 5 parameter model, 3: quadratic in x and cubic in y - part-cubic 6 parameter model)
#: INT (1/2/3) Polynomial order of orbital error model
# 1: planar in x and y - 2 parameter model
# 2: quadratic in x and y - 5 parameter model
# 3: quadratic in x and cubic in y - part-cubic 6 parameter model
ORBITAL_FIT_DEGREE = 'orbfitdegrees'
#: INT; Multi look factor for orbital error calculation in x dimension
ORBITAL_FIT_LOOKS_X = 'orbfitlksx'
Expand All @@ -205,7 +217,9 @@
ORBFIT_INTERCEPT = 'orbfitintercept'

# Stacking parameters
#: FLOAT; Threshold ratio between 'model minus observation' residuals and a-priori observation standard deviations for stacking estimate acceptance (otherwise remove furthest outlier and re-iterate)
#: FLOAT; Threshold ratio between 'model minus observation' residuals and a-priori observation
# standard deviations for stacking estimate acceptance
# (otherwise remove furthest outlier and re-iterate)
LR_NSIG = 'nsig'
#: INT; Number of required observations per pixel for stacking to occur
LR_PTHRESH = 'pthr'
Expand All @@ -230,7 +244,8 @@
SLPF_CUTOFF = 'slpfcutoff'
#: INT (1/0); Do spatial interpolation at NaN locations (1 for interpolation, 0 for zero fill)
SLPF_NANFILL = 'slpnanfill'
#: #: STR; Method for spatial interpolation (one of: linear, nearest, cubic), only used when slpnanfill=1
#: #: STR; Method for spatial interpolation (one of: linear, nearest, cubic),
# only used when slpnanfill=1
SLPF_NANFILL_METHOD = 'slpnanfill_method'

# DEM error correction parameters
Expand All @@ -251,7 +266,10 @@
# tsinterp is automatically assigned in the code; not needed in conf file
# TIME_SERIES_INTERP = 'tsinterp'

#: BOOL (0/1/2); Use parallelisation/Multi-threading (0: in serial, 1: in parallel by rows, 2: in parallel by pixel)
#: INT (0/1/2); Use parallelisation/Multi-threading
# 0: in serial
# 1: in parallel by rows
# 2: in parallel by pixel
PARALLEL = 'parallel'
#: INT; Number of processes for multi-threading
PROCESSES = 'processes'
Expand All @@ -271,7 +289,9 @@
PART_CUBIC: 'PART CUBIC'}

# geometry outputs
GEOMETRY_OUTPUT_TYPES = ['rdc_azimuth', 'rdc_range', 'look_angle', 'incidence_angle', 'azimuth_angle', 'range_dist']
GEOMETRY_OUTPUT_TYPES = [
'rdc_azimuth', 'rdc_range', 'look_angle', 'incidence_angle', 'azimuth_angle', 'range_dist'
]

# sign convention for phase data
SIGNAL_POLARITY = 'signal_polarity'
Expand Down Expand Up @@ -302,4 +322,3 @@
GEOMETRY_DIR = 'geometry_dir'
TIMESERIES_DIR = 'timeseries_dir'
VELOCITY_DIR = 'velocity_dir'

37 changes: 19 additions & 18 deletions pyrate/conv2tif.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python script converts ROI_PAC or GAMMA format input interferograms
This Python script converts ROI_PAC or GAMMA format input interferograms
into geotiff format files
"""
# -*- coding: utf-8 -*-
import os
from typing import Tuple, List
from pathlib import Path

from joblib import Parallel, delayed
import numpy as np
from pathlib import Path

import pyrate.constants as C
from pyrate.core.prepifg_helper import PreprocessError
Expand Down Expand Up @@ -52,7 +53,7 @@ def main(params):

if params[C.PROCESSOR] == 2: # if geotif
log.warning("'conv2tif' step not required for geotiff!")
return
return None

mpi_vs_multiprocess_logging("conv2tif", params)

Expand Down Expand Up @@ -80,7 +81,7 @@ def do_geotiff(unw_paths: List[MultiplePaths], params: dict) -> List[str]:
parallel = params[C.PARALLEL]

if parallel:
log.info("Running geotiff conversion in parallel with {} processes".format(params[C.PROCESSES]))
log.info("Running geotiff conversion in parallel with {params[C.PROCESSES]} processes")
dest_base_ifgs = Parallel(n_jobs=params[C.PROCESSES], verbose=shared.joblib_log_level(
C.LOG_LEVEL))(
delayed(_geotiff_multiprocessing)(p, params) for p in unw_paths)
Expand All @@ -99,19 +100,19 @@ def _geotiff_multiprocessing(unw_path: MultiplePaths, params: dict) -> Tuple[str
processor = params[C.PROCESSOR] # roipac or gamma

# Create full-res geotiff if not already on disk
if not os.path.exists(dest):
if processor == GAMMA:
header = gamma.gamma_header(unw_path.unwrapped_path, params)
elif processor == ROIPAC:
log.info("Warning: ROI_PAC support will be deprecated in a future PyRate release")
header = roipac.roipac_header(unw_path.unwrapped_path, params)
else:
raise PreprocessError('Processor must be ROI_PAC (0) or GAMMA (1)')
header[ifc.INPUT_TYPE] = unw_path.input_type
shared.write_fullres_geotiff(header, unw_path.unwrapped_path, dest, nodata=params[
C.NO_DATA_VALUE])
Path(dest).chmod(0o444) # readonly output
return dest, True
else:
if os.path.exists(dest):
log.warning(f"Full-res geotiff already exists in {dest}! Returning existing geotiff!")
return dest, False

if processor == GAMMA:
header = gamma.gamma_header(unw_path.unwrapped_path, params)
elif processor == ROIPAC:
log.info("Warning: ROI_PAC support will be deprecated in a future PyRate release")
header = roipac.roipac_header(unw_path.unwrapped_path, params)
else:
raise PreprocessError('Processor must be ROI_PAC (0) or GAMMA (1)')
header[ifc.INPUT_TYPE] = unw_path.input_type
shared.write_fullres_geotiff(header, unw_path.unwrapped_path, dest, nodata=params[
C.NO_DATA_VALUE])
Path(dest).chmod(0o444) # readonly output
return dest, True
33 changes: 17 additions & 16 deletions pyrate/core/algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@ def least_squares_covariance(A, b, v):
V = diag(1.0 / v.squeeze())
q, r = qr(A) # Orthogonal-triangular Decomposition
efg = dot(q.T, dot(V, q)) # TODO: round it??
# pylint: disable=invalid-sequence-index
# JUSTIFICATION: Pylint is just wrong here, even if we cast to int(n) it complains...
g = efg[n:, n:] # modified to 0 indexing
cd = dot(q.T, b) # q.T * b
f = efg[:n, n:] # TODO: check +1/indexing
Expand Down Expand Up @@ -147,15 +149,14 @@ def ifg_date_lookup(ifgs, date_pair):
# and not in order
if date_pair[0] > date_pair[1]:
date_pair = date_pair[1], date_pair[0]
except:
raise ValueError("Bad date_pair arg to ifg_date_lookup()")
except Exception as error:
raise ValueError("Bad date_pair arg to ifg_date_lookup()") from error

for i in ifgs:
if date_pair == (i.first, i.second):
return i

raise ValueError("Cannot find Ifg with first/second"
"image dates of %s" % str(date_pair))
raise ValueError(f"Cannot find Ifg with first/second image dates of {date_pair}")


def ifg_date_index_lookup(ifgs, date_pair):
Expand All @@ -178,14 +179,14 @@ def ifg_date_index_lookup(ifgs, date_pair):
try:
if date_pair[0] > date_pair[1]:
date_pair = date_pair[1], date_pair[0]
except:
raise ValueError("Bad date_pair arg to ifg_date_lookup()")
except Exception as error:
raise ValueError("Bad date_pair arg to ifg_date_lookup()") from error

for i, _ in enumerate(ifgs):
if date_pair == (ifgs[i].first, ifgs[i].second):
return i

raise ValueError("Cannot find Ifg with first/second image dates of %s" % str(date_pair))
raise ValueError(f"Cannot find Ifg with first/second image dates of {date_pair}")


def get_epochs(ifgs: Union[Iterable, Dict]) -> Tuple[EpochList, int]:
Expand Down Expand Up @@ -234,10 +235,10 @@ def first_second_ids(dates):
"""

dset = sorted(set(dates))
return dict([(date_, i) for i, date_ in enumerate(dset)])
return { date_:i for i, date_ in enumerate(dset) }


def factorise_integer(n, memo={}, left=2):
def factorise_integer(n, memo=None, left=2):
"""
Returns two factors a and b of a supplied number n such that a * b = n.
The two factors are evaluated to be as close to each other in size as possible
Expand All @@ -252,26 +253,26 @@ def factorise_integer(n, memo={}, left=2):
:rtype: int
"""
n = int(n)
if (n, left) in memo:
if memo is not None and (n, left) in memo:
return memo[(n, left)]
if left == 1:
return n, [n]
i = 2
best = n
bestTuple = [n]
best_tuple = [n]
while i * i <= n:
if n % i == 0:
rem = factorise_integer(n / i, memo, left - 1)
if rem[0] + i < best:
best = rem[0] + i
bestTuple = [i] + rem[1]
best_tuple = [i] + rem[1]
i += 1

# handle edge case when only one processor is available
if bestTuple == [4]:
if best_tuple == [4]:
return 2, 2

if len(bestTuple) == 1:
bestTuple.append(1)
if len(best_tuple) == 1:
best_tuple.append(1)

return int(bestTuple[0]), int(bestTuple[1])
return int(best_tuple[0]), int(best_tuple[1])
6 changes: 3 additions & 3 deletions pyrate/core/aps.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def _make_aps_corrections(ts_aps: np.ndarray, ifgs: List[Ifg], params: dict) ->
:param params: Dictionary of PyRate configuration parameters.
"""
log.debug('Reconstructing interferometric observations from time series')
# get first and second image indices
# get first and second image indices
_ , n = mpiops.run_once(get_epochs, ifgs)
index_first, index_second = n[:len(ifgs)], n[len(ifgs):]

Expand Down Expand Up @@ -305,7 +305,7 @@ def gaussian_spatial_filter(image: np.ndarray, cutoff: float, x_size: float,

# Estimate sigma value for Gaussian kernel function in spectral domain
# by converting cutoff distance to wavenumber and applying a scaling
# factor based on fixed kernel window size.
# factor based on fixed kernel window size.
sigma = np.std(dist) * (1 / cutoff)
# Calculate kernel weights
wgt = _kernel(dist, sigma)
Expand Down Expand Up @@ -333,7 +333,7 @@ def temporal_high_pass_filter(tsincr: np.ndarray, epochlist: EpochList,
log.info('Applying temporal high-pass filter')
threshold = params[C.TLPF_PTHR]
cutoff_day = params[C.TLPF_CUTOFF]
if cutoff_day < 1 or type(cutoff_day) != int:
if cutoff_day < 1 or not isinstance(cutoff_day, int):
raise ValueError(f'tlpf_cutoff must be an integer greater than or '
f'equal to 1 day. Value provided = {cutoff_day}')

Expand Down
Loading