diff --git a/pcntoolkit/normative.py b/pcntoolkit/normative.py index e2ee9d81..275f8e99 100755 --- a/pcntoolkit/normative.py +++ b/pcntoolkit/normative.py @@ -447,7 +447,7 @@ def estimate(covfile, respfile, **kwargs): kwargs['trbefile'] = 'be_kfold_tr_tempfile.pkl' kwargs['tsbefile'] = 'be_kfold_ts_tempfile.pkl' - # estimate the models for all response variables + # estimate the models for all subjects for i in range(0, len(nz)): print("Estimating model ", i+1, "of", len(nz)) nm = norm_init(Xz_tr, Yz_tr[:, i], alg=alg, **kwargs) @@ -500,14 +500,7 @@ def estimate(covfile, respfile, **kwargs): else: Ytest = Y[ts, nz[i]] - if alg=='hbr': - if outscaler in ['standardize', 'minmax', 'robminmax']: - Ytestz = Y_scaler.transform(Ytest.reshape(-1,1), index=i) - else: - Ytestz = Ytest.reshape(-1,1) - Z[ts, nz[i]] = nm.get_mcmc_zscores(Xz_ts, Ytestz, **kwargs) - else: - Z[ts, nz[i]] = (Ytest - Yhat[ts, nz[i]]) / \ + Z[ts, nz[i]] = (Ytest - Yhat[ts, nz[i]]) / \ np.sqrt(S2[ts, nz[i]]) except Exception as e: @@ -757,7 +750,6 @@ def predict(covfile, respfile, maskfile=None, **kwargs): Xz = X # estimate the models for all subjects - #TODO Z-scores adaptation for SHASH HBR for i, m in enumerate(models): print("Prediction by model ", i+1, "of", feature_num) nm = norm_init(Xz) @@ -814,7 +806,7 @@ def predict(covfile, respfile, maskfile=None, **kwargs): warp_param = nm.blr.hyp[1:nm.blr.warp.get_n_params()+1] Yw[:,i] = nm.blr.warp.f(Y[:,i], warp_param) - Y = Yw + Y = Yw; else: warp = False @@ -1071,7 +1063,6 @@ def transfer(covfile, respfile, testcov=None, testresp=None, maskfile=None, else: warp = False - #TODO Z-scores adaptation for SHASH HBR Z = (Yte - Yhat) / np.sqrt(S2) print("Evaluating the model ...") diff --git a/pcntoolkit/normative_model/norm_hbr.py b/pcntoolkit/normative_model/norm_hbr.py index e4718071..f8ff9da6 100644 --- a/pcntoolkit/normative_model/norm_hbr.py +++ b/pcntoolkit/normative_model/norm_hbr.py @@ -488,7 +488,7 @@ def get_mcmc_quantiles(self, X, batch_effects=None, z_scores=None): return quantiles.mean(axis=-1) - def get_mcmc_zscores(self, X, y, **kwargs): + def get_mcmc_zscores(self, X, y, batch_effects=None): """ Computes zscores of data given an estimated model @@ -496,17 +496,13 @@ def get_mcmc_zscores(self, X, y, **kwargs): Args: X ([N*p]ndarray): covariates y ([N*1]ndarray): response variables + batch_effects (ndarray): the batch effects corresponding to X """ - + # Set batch effects to zero if none are provided print(self.configs['likelihood']) - - tsbefile = kwargs.get("tsbefile", None) - if tsbefile is not None: - batch_effects_test = fileio.load(tsbefile) - else: # Set batch effects to zero if none are provided - print("Could not find batch-effects file! Initializing all as zeros ...") - batch_effects_test = np.zeros([X.shape[0], 1]) - + if batch_effects is None: + batch_effects = batch_effects_test = np.zeros([X.shape[0], 1]) + # Determine the variables to predict if self.configs["likelihood"] == "Normal": var_names = ["mu_samples", "sigma_samples","sigma_plus_samples"] @@ -529,7 +525,7 @@ def get_mcmc_zscores(self, X, y, **kwargs): # Do a forward to get the posterior predictive in the idata self.hbr.predict( X=X, - batch_effects=batch_effects_test, + batch_effects=batch_effects, batch_effects_maps=self.batch_effects_maps, pred="single", var_names=var_names+["y_like"], @@ -540,7 +536,7 @@ def get_mcmc_zscores(self, X, y, **kwargs): self.hbr.idata, "posterior_predictive", var_names=var_names ) - # Remove superfluous var_names + # Remove superfluous var_nammes var_names.remove('sigma_samples') if 'delta_samples' in var_names: var_names.remove('delta_samples') @@ -557,7 +553,7 @@ def get_mcmc_zscores(self, X, y, **kwargs): *array_of_vars, kwargs={"y": y, "likelihood": self.configs['likelihood']}, ) - return z_scores.mean(axis=-1).values + return z_scores.mean(axis=-1) diff --git a/pcntoolkit/util/utils.py b/pcntoolkit/util/utils.py index e96d9b2c..3d22bb57 100644 --- a/pcntoolkit/util/utils.py +++ b/pcntoolkit/util/utils.py @@ -1141,19 +1141,15 @@ def fit(self, X): self.max[i] = np.median(np.sort(X[:,i])[-int(np.round(X.shape[0] * self.tail)):]) - def transform(self, X, index=None): + def transform(self, X): if self.scaler_type == 'standardize': - if index is None: - X = (X - self.m) / self.s - else: - X = (X - self.m[index]) / self.s[index] + + X = (X - self.m) / self.s elif self.scaler_type in ['minmax', 'robminmax']: - if index is None: - X = (X - self.min) / (self.max - self.min) - else: - X = (X - self.min[index]) / (self.max[index] - self.min[index]) + + X = (X - self.min) / (self.max - self.min) if self.adjust_outliers: diff --git a/setup.py b/setup.py index 59bff139..811f7cb1 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ from setuptools import setup, find_packages setup(name='pcntoolkit', - version='0.29', + version='0.28', description='Predictive Clinical Neuroscience toolkit', url='http://github.com/amarquand/PCNtoolkit', author='Andre Marquand',