Skip to content

Commit

Permalink
Remove unused mypy ignores (#794)
Browse files Browse the repository at this point in the history
* Remove unused mypy ignores

* Rerun with numpy-1.23.5
  • Loading branch information
jklaise authored May 22, 2023
1 parent 0361fbd commit 21ca540
Show file tree
Hide file tree
Showing 35 changed files with 62 additions and 60 deletions.
2 changes: 1 addition & 1 deletion alibi_detect/ad/model_distillation.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def score(self, X: np.ndarray, batch_size: int = int(1e10), return_predictions:

# scale predictions
if self.temperature != 1.:
y = y ** (1 / self.temperature) # type: ignore
y = y ** (1 / self.temperature)
y = (y / tf.reshape(tf.reduce_sum(y, axis=-1), (-1, 1))).numpy()

if self.loss_type == 'kld':
Expand Down
4 changes: 2 additions & 2 deletions alibi_detect/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,8 @@ def _nested_detector(self):
"""
The low-level nested detector.
"""
detector = self._detector if hasattr(self, '_detector') else self # type: ignore[attr-defined]
detector = detector._detector if hasattr(detector, '_detector') else detector # type: ignore[attr-defined]
detector = self._detector if hasattr(self, '_detector') else self
detector = detector._detector if hasattr(detector, '_detector') else detector
return detector


Expand Down
14 changes: 7 additions & 7 deletions alibi_detect/cd/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,9 +151,9 @@ def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[Union[np.ndarray, list
x_ref = self.preprocess_fn(self.x_ref)
else:
x_ref = self.x_ref
return x_ref, x # type: ignore[return-value]
return x_ref, x
else:
return self.x_ref, x # type: ignore[return-value]
return self.x_ref, x

def get_splits(
self,
Expand Down Expand Up @@ -412,9 +412,9 @@ def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[Union[np.ndarray, list
x_ref = self.preprocess_fn(self.x_ref)
else:
x_ref = self.x_ref
return x_ref, x # type: ignore[return-value]
return x_ref, x
else:
return self.x_ref, x # type: ignore[return-value]
return self.x_ref, x

def get_splits(self, x_ref: Union[np.ndarray, list], x: Union[np.ndarray, list]) \
-> Tuple[Tuple[Union[np.ndarray, list], Union[np.ndarray, list]],
Expand All @@ -441,7 +441,7 @@ def get_splits(self, x_ref: Union[np.ndarray, list], x: Union[np.ndarray, list])

if isinstance(x_ref, np.ndarray):
x_ref_tr, x_ref_te = x_ref[idx_ref_tr], x_ref[idx_ref_te]
x_cur_tr, x_cur_te = x[idx_cur_tr], x[idx_cur_te] # type: ignore[call-overload]
x_cur_tr, x_cur_te = x[idx_cur_tr], x[idx_cur_te]
elif isinstance(x, list):
x_ref_tr, x_ref_te = [x_ref[_] for _ in idx_ref_tr], [x_ref[_] for _ in idx_ref_te]
x_cur_tr, x_cur_te = [x[_] for _ in idx_cur_tr], [x[_] for _ in idx_cur_te]
Expand Down Expand Up @@ -814,7 +814,7 @@ def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, return_
pass
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_x_ref) # type: ignore[arg-type]
# used for reservoir sampling
self.n += len(x) # type: ignore
self.n += len(x)

# populate drift dict
cd = concept_drift_dict()
Expand Down Expand Up @@ -1205,7 +1205,7 @@ def predict(self, # type: ignore[override]
if isinstance(self.update_ref, dict) and self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_ref) # type: ignore[arg-type]
self.c_ref = update_reference(self.c_ref, c, self.n, self.update_ref) # type: ignore[arg-type]
self.c_ref = update_reference(self.c_ref, c, self.n, self.update_ref)
# used for reservoir sampling
self.n += len(x)

Expand Down
8 changes: 4 additions & 4 deletions alibi_detect/cd/base_online.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,8 @@ def _initialise_state(self) -> None:
an example).
"""
self.t = 0 # corresponds to a test set of ref data
self.test_stats = np.array([]) # type: ignore[var-annotated]
self.drift_preds = np.array([]) # type: ignore[var-annotated]
self.test_stats = np.array([])
self.drift_preds = np.array([])

def reset(self) -> None:
"""
Expand Down Expand Up @@ -404,9 +404,9 @@ def _initialise_state(self) -> None:
an example).
"""
self.t = 0
self.xs = np.array([]) # type: ignore[var-annotated]
self.xs = np.array([])
self.test_stats = np.empty([0, len(self.window_sizes), self.n_features])
self.drift_preds = np.array([]) # type: ignore[var-annotated]
self.drift_preds = np.array([])

@abstractmethod
def _check_drift(self, test_stats: np.ndarray, thresholds: np.ndarray) -> int:
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/cd/chisquare.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def __init__(
'Dict[int, int], Dict[int, List[int]]')
else: # infer number of possible categories for each feature from reference data
x_flat = self.x_ref.reshape(self.x_ref.shape[0], -1)
categories_per_feature = {f: list(np.unique(x_flat[:, f])) # type: ignore
categories_per_feature = {f: list(np.unique(x_flat[:, f]))
for f in range(self.n_features)}
self.x_ref_categories = categories_per_feature

Expand Down
6 changes: 3 additions & 3 deletions alibi_detect/cd/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,20 +167,20 @@ def __init__(
[kwargs.pop(k, None) for k in pop_kwargs]
if dataset is None:
kwargs.update({'dataset': TFDataset})
self._detector = ClassifierDriftTF(*args, **kwargs) # type: ignore
self._detector = ClassifierDriftTF(*args, **kwargs)
elif backend == Framework.PYTORCH:
pop_kwargs = ['use_calibration', 'calibration_kwargs', 'use_oob']
[kwargs.pop(k, None) for k in pop_kwargs]
if dataset is None:
kwargs.update({'dataset': TorchDataset})
if dataloader is None:
kwargs.update({'dataloader': DataLoader})
self._detector = ClassifierDriftTorch(*args, **kwargs) # type: ignore
self._detector = ClassifierDriftTorch(*args, **kwargs)
else:
pop_kwargs = ['reg_loss_fn', 'optimizer', 'learning_rate', 'batch_size', 'preprocess_batch_fn',
'epochs', 'train_kwargs', 'device', 'dataset', 'dataloader', 'verbose']
[kwargs.pop(k, None) for k in pop_kwargs]
self._detector = ClassifierDriftSklearn(*args, **kwargs) # type: ignore
self._detector = ClassifierDriftSklearn(*args, **kwargs)
self.meta = self._detector.meta

def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
Expand Down
4 changes: 2 additions & 2 deletions alibi_detect/cd/context_aware.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,9 @@ def __init__(

if backend == Framework.TENSORFLOW:
kwargs.pop('device', None)
self._detector = ContextMMDDriftTF(*args, **kwargs) # type: ignore
self._detector = ContextMMDDriftTF(*args, **kwargs)
else:
self._detector = ContextMMDDriftTorch(*args, **kwargs) # type: ignore
self._detector = ContextMMDDriftTorch(*args, **kwargs)
self.meta = self._detector.meta

def predict(self, x: Union[np.ndarray, list], c: np.ndarray,
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/cd/keops/mmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
x_ref = torch.from_numpy(x_ref).float() # type: ignore[assignment]
x = torch.from_numpy(x).float() # type: ignore[assignment]
# compute kernel matrix, MMD^2 and apply permutation test
m, n = x_ref.shape[0], x.shape[0] # type: ignore[union-attr]
m, n = x_ref.shape[0], x.shape[0]
perms = [torch.randperm(m + n) for _ in range(self.n_permutations)]
# TODO - Rethink typings (related to https://github.com/SeldonIO/alibi-detect/issues/540)
x_all = torch.cat([x_ref, x], 0) # type: ignore[list-item]
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/cd/learned_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def __init__(
else:
detector = LearnedKernelDriftKeops

self._detector = detector(*args, **kwargs) # type: ignore
self._detector = detector(*args, **kwargs)
self.meta = self._detector.meta

def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
Expand Down
4 changes: 2 additions & 2 deletions alibi_detect/cd/lsdd.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,9 @@ def __init__(

if backend == Framework.TENSORFLOW:
kwargs.pop('device', None)
self._detector = LSDDDriftTF(*args, **kwargs) # type: ignore
self._detector = LSDDDriftTF(*args, **kwargs)
else:
self._detector = LSDDDriftTorch(*args, **kwargs) # type: ignore
self._detector = LSDDDriftTorch(*args, **kwargs)
self.meta = self._detector.meta

def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, return_distance: bool = True) \
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/cd/lsdd_online.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def __init__(

if backend == Framework.TENSORFLOW:
kwargs.pop('device', None)
self._detector = LSDDDriftOnlineTF(*args, **kwargs) # type: ignore
self._detector = LSDDDriftOnlineTF(*args, **kwargs)
else:
self._detector = LSDDDriftOnlineTorch(*args, **kwargs) # type: ignore
self.meta = self._detector.meta
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/cd/mmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def __init__(
from alibi_detect.utils.keops.kernels import GaussianRBF # type: ignore
kwargs.update({'kernel': GaussianRBF})

self._detector = detector(*args, **kwargs) # type: ignore
self._detector = detector(*args, **kwargs)
self.meta = self._detector.meta

def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, return_distance: bool = True) \
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/cd/mmd_online.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def __init__(

if backend == Framework.TENSORFLOW:
kwargs.pop('device', None)
self._detector = MMDDriftOnlineTF(*args, **kwargs) # type: ignore
self._detector = MMDDriftOnlineTF(*args, **kwargs)
else:
self._detector = MMDDriftOnlineTorch(*args, **kwargs) # type: ignore
self.meta = self._detector.meta
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/cd/pytorch/context_aware.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def score(self, # type: ignore[override]
"""
x_ref, x = self.preprocess(x)
x_ref = torch.from_numpy(x_ref).to(self.device) # type: ignore[assignment]
c_ref = torch.from_numpy(self.c_ref).to(self.device) # type: ignore[assignment]
c_ref = torch.from_numpy(self.c_ref).to(self.device)

# Hold out a portion of contexts for conditioning on
n, n_held = len(c), int(len(c)*self.prop_c_held)
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/cd/pytorch/learned_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
self.kernel = deepcopy(self.original_kernel) if self.retrain_from_scratch else self.kernel
self.kernel = self.kernel.to(self.device)
train_args = [self.j_hat, (dl_ref_tr, dl_cur_tr), self.device]
LearnedKernelDriftTorch.trainer(*train_args, **self.train_kwargs) # type: ignore
LearnedKernelDriftTorch.trainer(*train_args, **self.train_kwargs)

if isinstance(x_ref_te, np.ndarray) and isinstance(x_cur_te, np.ndarray):
x_all = np.concatenate([x_ref_te, x_cur_te], axis=0)
Expand Down
4 changes: 2 additions & 2 deletions alibi_detect/cd/pytorch/lsdd.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ def _initialize_kernel(self, x_ref: torch.Tensor):
def _configure_normalization(self, x_ref: torch.Tensor, eps: float = 1e-12):
x_ref_means = x_ref.mean(0)
x_ref_stds = x_ref.std(0)
self._normalize = lambda x: (torch.as_tensor(x) - x_ref_means) / (x_ref_stds + eps) # type: ignore[assignment]
self._unnormalize = lambda x: (torch.as_tensor(x) * (x_ref_stds + eps) # type: ignore[assignment]
self._normalize = lambda x: (torch.as_tensor(x) - x_ref_means) / (x_ref_stds + eps)
self._unnormalize = lambda x: (torch.as_tensor(x) * (x_ref_stds + eps)
+ x_ref_means).cpu().numpy()

def _configure_kernel_centers(self, x_ref: torch.Tensor):
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/cd/pytorch/lsdd_online.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def __init__(
else:
sigma = torch.from_numpy(sigma).to(self.device) if isinstance(sigma, # type: ignore[assignment]
np.ndarray) else None
self.kernel = GaussianRBF(sigma) # type: ignore[arg-type]
self.kernel = GaussianRBF(sigma)

if self.n_kernel_centers is None:
self.n_kernel_centers = 2 * window_size
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/cd/pytorch/mmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
x = torch.from_numpy(x).to(self.device) # type: ignore[assignment]
# compute kernel matrix, MMD^2 and apply permutation test using the kernel matrix
# TODO: (See https://github.com/SeldonIO/alibi-detect/issues/540)
n = x.shape[0] # type: ignore
n = x.shape[0]
kernel_mat = self.kernel_matrix(x_ref, x) # type: ignore[arg-type]
kernel_mat = kernel_mat - torch.diag(kernel_mat.diag()) # zero diagonal
mmd2 = mmd2_from_kernel_matrix(kernel_mat, n, permute=False, zero_diag=False)
Expand Down
4 changes: 2 additions & 2 deletions alibi_detect/cd/pytorch/spot_the_diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,8 +223,8 @@ def predict(
and the trained model.
"""
preds = self._detector.predict(x, return_p_val, return_distance, return_probs, return_model=True)
preds['data']['diffs'] = preds['data']['model'].diffs.detach().cpu().numpy() # type: ignore
preds['data']['diff_coeffs'] = preds['data']['model'].coeffs.detach().cpu().numpy() # type: ignore
preds['data']['diffs'] = preds['data']['model'].diffs.detach().cpu().numpy()
preds['data']['diff_coeffs'] = preds['data']['model'].coeffs.detach().cpu().numpy()
if not return_model:
del preds['data']['model']
return preds
2 changes: 1 addition & 1 deletion alibi_detect/cd/spot_the_diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def __init__(
[kwargs.pop(k, None) for k in pop_kwargs]
if dataset is None:
kwargs.update({'dataset': TFDataset})
self._detector = SpotTheDiffDriftTF(*args, **kwargs) # type: ignore
self._detector = SpotTheDiffDriftTF(*args, **kwargs)
else:
if dataset is None:
kwargs.update({'dataset': TorchDataset})
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/cd/tensorflow/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def score(self, x: np.ndarray) -> Tuple[float, float, np.ndarray, np.ndarray, #
self.train_kwargs['optimizer'] = optimizer.__class__.from_config(optimizer.get_config())
train_args = [self.model, self.loss_fn, None]
self.train_kwargs.update({'dataset': ds_tr})
trainer(*train_args, **self.train_kwargs) # type: ignore
trainer(*train_args, **self.train_kwargs)
preds = self.predict_fn(x_te, self.model)
preds_oof_list.append(preds)
idx_oof_list.append(idx_te)
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/cd/tensorflow/learned_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:

self.kernel = clone_model(self.original_kernel) if self.retrain_from_scratch else self.kernel
train_args = [self.j_hat, (ds_ref_tr, ds_cur_tr)]
LearnedKernelDriftTF.trainer(*train_args, **self.train_kwargs) # type: ignore
LearnedKernelDriftTF.trainer(*train_args, **self.train_kwargs)

if isinstance(x_ref_te, np.ndarray) and isinstance(x_cur_te, np.ndarray):
x_all = np.concatenate([x_ref_te, x_cur_te], axis=0)
Expand Down
4 changes: 2 additions & 2 deletions alibi_detect/cd/tensorflow/lsdd.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@ def _initialize_kernel(self, x_ref: tf.Tensor):
def _configure_normalization(self, x_ref: tf.Tensor, eps: float = 1e-12):
x_ref_means = tf.reduce_mean(x_ref, axis=0)
x_ref_stds = tf.math.reduce_std(x_ref, axis=0)
self._normalize = lambda x: (x - x_ref_means) / (x_ref_stds + eps) # type: ignore[assignment]
self._unnormalize = lambda x: (x * (x_ref_stds + eps) + x_ref_means).numpy() # type: ignore[assignment]
self._normalize = lambda x: (x - x_ref_means) / (x_ref_stds + eps)
self._unnormalize = lambda x: (x * (x_ref_stds + eps) + x_ref_means).numpy()

def _configure_kernel_centers(self, x_ref: tf.Tensor):
"Set aside reference samples to act as kernel centers"
Expand Down
4 changes: 2 additions & 2 deletions alibi_detect/cd/tensorflow/spot_the_diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,8 +219,8 @@ def predict(
and the trained model.
"""
preds = self._detector.predict(x, return_p_val, return_distance, return_probs, return_model=True)
preds['data']['diffs'] = preds['data']['model'].diffs.numpy() # type: ignore
preds['data']['diff_coeffs'] = preds['data']['model'].coeffs.numpy() # type: ignore
preds['data']['diffs'] = preds['data']['model'].diffs.numpy()
preds['data']['diff_coeffs'] = preds['data']['model'].coeffs.numpy()
if not return_model:
del preds['data']['model']
return preds
2 changes: 1 addition & 1 deletion alibi_detect/cd/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def new_model_fn(x: Union[np.ndarray, list]) -> np.ndarray:
# shuffle
n_x = len(x)
perm = np.random.permutation(n_x)
x = x[perm] if is_np else [x[i] for i in perm] # type: ignore[call-overload]
x = x[perm] if is_np else [x[i] for i in perm]
# add extras if necessary
final_batch_size = n_x % batch_size
if final_batch_size != 0:
Expand Down
4 changes: 2 additions & 2 deletions alibi_detect/od/llr.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,12 +189,12 @@ def fit(self,
# train semantic model
args = [self.dist_s, loss_fn, X]
kwargs.update({'y_train': y, 'optimizer': optimizer_s})
trainer(*args, **kwargs) # type: ignore[arg-type]
trainer(*args, **kwargs)

# train background model
args = [self.dist_b, loss_fn, X_back]
kwargs.update({'y_train': y_back, 'optimizer': optimizer_b})
trainer(*args, **kwargs) # type: ignore[arg-type]
trainer(*args, **kwargs)

def infer_threshold(self,
X: np.ndarray,
Expand Down
2 changes: 1 addition & 1 deletion alibi_detect/od/pytorch/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def predict(self, x: torch.Tensor) -> TorchOutlierDetectorOutput:
ValueError
Raised if the detector is not fit on reference data.
"""
self.check_fitted() # type: ignore
self.check_fitted()
raw_scores = self.score(x)
scores = self._ensembler(raw_scores)

Expand Down
4 changes: 2 additions & 2 deletions alibi_detect/saving/_tensorflow/loading.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ def load_detector_legacy(filepath: Union[str, os.PathLike], suffix: str, **kwarg
model = load_model(model_dir, custom_objects=custom_objects)
detector = init_ad_md(state_dict, md, model)
elif detector_name == 'OutlierProphet':
detector = init_od_prophet(state_dict) # type: ignore[assignment]
detector = init_od_prophet(state_dict)
elif detector_name == 'SpectralResidual':
detector = init_od_sr(state_dict) # type: ignore[assignment]
elif detector_name == 'OutlierSeq2Seq':
Expand All @@ -291,7 +291,7 @@ def load_detector_legacy(filepath: Union[str, os.PathLike], suffix: str, **kwarg
logger.warning('No model found in {}, setting `model` to `None`.'.format(model_dir))
model = None
if detector_name == 'KSDrift':
load_fn = init_cd_ksdrift # type: ignore[assignment]
load_fn = init_cd_ksdrift
elif detector_name == 'MMDDriftTF':
load_fn = init_cd_mmddrift # type: ignore[assignment]
elif detector_name == 'ChiSquareDrift':
Expand Down
6 changes: 3 additions & 3 deletions alibi_detect/saving/saving.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def _save_detector_config(detector: ConfigurableDetector,

# Get the detector config (with artefacts still within it)
if hasattr(detector, 'get_config'):
cfg = detector.get_config() # type: ignore[union-attr] # TODO - remove once all detectors have get_config
cfg = detector.get_config() # TODO - remove once all detectors have get_config
cfg = validate_config(cfg, resolved=True)
else:
raise NotImplementedError(f'{detector_name} does not yet support config.toml based saving.')
Expand Down Expand Up @@ -492,7 +492,7 @@ def _save_kernel_config(kernel: Callable,
# if a DeepKernel
if hasattr(kernel, 'proj'):
if hasattr(kernel, 'get_config'):
cfg_kernel = kernel.get_config() # type: ignore[attr-defined]
cfg_kernel = kernel.get_config()
else:
raise AttributeError("The detector's `kernel` must have a .get_config() method for it to be saved.")
# Serialize the kernels (if needed)
Expand All @@ -511,7 +511,7 @@ def _save_kernel_config(kernel: Callable,
else: # if an object
kernel_class = kernel.__class__
if hasattr(kernel, 'get_config'):
cfg_kernel = kernel.get_config() # type: ignore[attr-defined]
cfg_kernel = kernel.get_config()
cfg_kernel['init_sigma_fn'], _ = _serialize_object(cfg_kernel['init_sigma_fn'], base_path,
local_path.joinpath('init_sigma_fn'))
else:
Expand Down
Loading

0 comments on commit 21ca540

Please sign in to comment.