From ec9c7cc13980863c80d473624456b8d15cfbc079 Mon Sep 17 00:00:00 2001 From: "S.M.Kia" Date: Wed, 8 Nov 2023 16:48:38 +0100 Subject: [PATCH] A quick patch to fix z-scores in SHASH HBR. --- pcntoolkit/normative.py | 15 ++++++++++++--- pcntoolkit/normative_model/norm_hbr.py | 22 +++++++++++++--------- 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/pcntoolkit/normative.py b/pcntoolkit/normative.py index 275f8e99..e2ee9d81 100755 --- a/pcntoolkit/normative.py +++ b/pcntoolkit/normative.py @@ -447,7 +447,7 @@ def estimate(covfile, respfile, **kwargs): kwargs['trbefile'] = 'be_kfold_tr_tempfile.pkl' kwargs['tsbefile'] = 'be_kfold_ts_tempfile.pkl' - # estimate the models for all subjects + # estimate the models for all response variables for i in range(0, len(nz)): print("Estimating model ", i+1, "of", len(nz)) nm = norm_init(Xz_tr, Yz_tr[:, i], alg=alg, **kwargs) @@ -500,7 +500,14 @@ def estimate(covfile, respfile, **kwargs): else: Ytest = Y[ts, nz[i]] - Z[ts, nz[i]] = (Ytest - Yhat[ts, nz[i]]) / \ + if alg=='hbr': + if outscaler in ['standardize', 'minmax', 'robminmax']: + Ytestz = Y_scaler.transform(Ytest.reshape(-1,1), index=i) + else: + Ytestz = Ytest.reshape(-1,1) + Z[ts, nz[i]] = nm.get_mcmc_zscores(Xz_ts, Ytestz, **kwargs) + else: + Z[ts, nz[i]] = (Ytest - Yhat[ts, nz[i]]) / \ np.sqrt(S2[ts, nz[i]]) except Exception as e: @@ -750,6 +757,7 @@ def predict(covfile, respfile, maskfile=None, **kwargs): Xz = X # estimate the models for all subjects + #TODO Z-scores adaptation for SHASH HBR for i, m in enumerate(models): print("Prediction by model ", i+1, "of", feature_num) nm = norm_init(Xz) @@ -806,7 +814,7 @@ def predict(covfile, respfile, maskfile=None, **kwargs): warp_param = nm.blr.hyp[1:nm.blr.warp.get_n_params()+1] Yw[:,i] = nm.blr.warp.f(Y[:,i], warp_param) - Y = Yw; + Y = Yw else: warp = False @@ -1063,6 +1071,7 @@ def transfer(covfile, respfile, testcov=None, testresp=None, maskfile=None, else: warp = False + #TODO Z-scores adaptation for SHASH HBR Z = (Yte - Yhat) / np.sqrt(S2) print("Evaluating the model ...") diff --git a/pcntoolkit/normative_model/norm_hbr.py b/pcntoolkit/normative_model/norm_hbr.py index f8ff9da6..e4718071 100644 --- a/pcntoolkit/normative_model/norm_hbr.py +++ b/pcntoolkit/normative_model/norm_hbr.py @@ -488,7 +488,7 @@ def get_mcmc_quantiles(self, X, batch_effects=None, z_scores=None): return quantiles.mean(axis=-1) - def get_mcmc_zscores(self, X, y, batch_effects=None): + def get_mcmc_zscores(self, X, y, **kwargs): """ Computes zscores of data given an estimated model @@ -496,13 +496,17 @@ def get_mcmc_zscores(self, X, y, batch_effects=None): Args: X ([N*p]ndarray): covariates y ([N*1]ndarray): response variables - batch_effects (ndarray): the batch effects corresponding to X """ - # Set batch effects to zero if none are provided + print(self.configs['likelihood']) - if batch_effects is None: - batch_effects = batch_effects_test = np.zeros([X.shape[0], 1]) - + + tsbefile = kwargs.get("tsbefile", None) + if tsbefile is not None: + batch_effects_test = fileio.load(tsbefile) + else: # Set batch effects to zero if none are provided + print("Could not find batch-effects file! Initializing all as zeros ...") + batch_effects_test = np.zeros([X.shape[0], 1]) + # Determine the variables to predict if self.configs["likelihood"] == "Normal": var_names = ["mu_samples", "sigma_samples","sigma_plus_samples"] @@ -525,7 +529,7 @@ def get_mcmc_zscores(self, X, y, batch_effects=None): # Do a forward to get the posterior predictive in the idata self.hbr.predict( X=X, - batch_effects=batch_effects, + batch_effects=batch_effects_test, batch_effects_maps=self.batch_effects_maps, pred="single", var_names=var_names+["y_like"], @@ -536,7 +540,7 @@ def get_mcmc_zscores(self, X, y, batch_effects=None): self.hbr.idata, "posterior_predictive", var_names=var_names ) - # Remove superfluous var_nammes + # Remove superfluous var_names var_names.remove('sigma_samples') if 'delta_samples' in var_names: var_names.remove('delta_samples') @@ -553,7 +557,7 @@ def get_mcmc_zscores(self, X, y, batch_effects=None): *array_of_vars, kwargs={"y": y, "likelihood": self.configs['likelihood']}, ) - return z_scores.mean(axis=-1) + return z_scores.mean(axis=-1).values