Skip to content

Commit

Permalink
reverted changes to SHASH
Browse files Browse the repository at this point in the history
  • Loading branch information
amarquand committed Nov 14, 2023
1 parent ec9c7cc commit df800f4
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 35 deletions.
15 changes: 3 additions & 12 deletions pcntoolkit/normative.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ def estimate(covfile, respfile, **kwargs):
kwargs['trbefile'] = 'be_kfold_tr_tempfile.pkl'
kwargs['tsbefile'] = 'be_kfold_ts_tempfile.pkl'

# estimate the models for all response variables
# estimate the models for all subjects
for i in range(0, len(nz)):
print("Estimating model ", i+1, "of", len(nz))
nm = norm_init(Xz_tr, Yz_tr[:, i], alg=alg, **kwargs)
Expand Down Expand Up @@ -500,14 +500,7 @@ def estimate(covfile, respfile, **kwargs):
else:
Ytest = Y[ts, nz[i]]

if alg=='hbr':
if outscaler in ['standardize', 'minmax', 'robminmax']:
Ytestz = Y_scaler.transform(Ytest.reshape(-1,1), index=i)
else:
Ytestz = Ytest.reshape(-1,1)
Z[ts, nz[i]] = nm.get_mcmc_zscores(Xz_ts, Ytestz, **kwargs)
else:
Z[ts, nz[i]] = (Ytest - Yhat[ts, nz[i]]) / \
Z[ts, nz[i]] = (Ytest - Yhat[ts, nz[i]]) / \
np.sqrt(S2[ts, nz[i]])

except Exception as e:
Expand Down Expand Up @@ -757,7 +750,6 @@ def predict(covfile, respfile, maskfile=None, **kwargs):
Xz = X

# estimate the models for all subjects
#TODO Z-scores adaptation for SHASH HBR
for i, m in enumerate(models):
print("Prediction by model ", i+1, "of", feature_num)
nm = norm_init(Xz)
Expand Down Expand Up @@ -814,7 +806,7 @@ def predict(covfile, respfile, maskfile=None, **kwargs):

warp_param = nm.blr.hyp[1:nm.blr.warp.get_n_params()+1]
Yw[:,i] = nm.blr.warp.f(Y[:,i], warp_param)
Y = Yw
Y = Yw;
else:
warp = False

Expand Down Expand Up @@ -1071,7 +1063,6 @@ def transfer(covfile, respfile, testcov=None, testresp=None, maskfile=None,
else:
warp = False

#TODO Z-scores adaptation for SHASH HBR
Z = (Yte - Yhat) / np.sqrt(S2)

print("Evaluating the model ...")
Expand Down
22 changes: 9 additions & 13 deletions pcntoolkit/normative_model/norm_hbr.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,25 +488,21 @@ def get_mcmc_quantiles(self, X, batch_effects=None, z_scores=None):
return quantiles.mean(axis=-1)


def get_mcmc_zscores(self, X, y, **kwargs):
def get_mcmc_zscores(self, X, y, batch_effects=None):

"""
Computes zscores of data given an estimated model
Args:
X ([N*p]ndarray): covariates
y ([N*1]ndarray): response variables
batch_effects (ndarray): the batch effects corresponding to X
"""

# Set batch effects to zero if none are provided
print(self.configs['likelihood'])

tsbefile = kwargs.get("tsbefile", None)
if tsbefile is not None:
batch_effects_test = fileio.load(tsbefile)
else: # Set batch effects to zero if none are provided
print("Could not find batch-effects file! Initializing all as zeros ...")
batch_effects_test = np.zeros([X.shape[0], 1])

if batch_effects is None:
batch_effects = batch_effects_test = np.zeros([X.shape[0], 1])

# Determine the variables to predict
if self.configs["likelihood"] == "Normal":
var_names = ["mu_samples", "sigma_samples","sigma_plus_samples"]
Expand All @@ -529,7 +525,7 @@ def get_mcmc_zscores(self, X, y, **kwargs):
# Do a forward to get the posterior predictive in the idata
self.hbr.predict(
X=X,
batch_effects=batch_effects_test,
batch_effects=batch_effects,
batch_effects_maps=self.batch_effects_maps,
pred="single",
var_names=var_names+["y_like"],
Expand All @@ -540,7 +536,7 @@ def get_mcmc_zscores(self, X, y, **kwargs):
self.hbr.idata, "posterior_predictive", var_names=var_names
)

# Remove superfluous var_names
# Remove superfluous var_nammes
var_names.remove('sigma_samples')
if 'delta_samples' in var_names:
var_names.remove('delta_samples')
Expand All @@ -557,7 +553,7 @@ def get_mcmc_zscores(self, X, y, **kwargs):
*array_of_vars,
kwargs={"y": y, "likelihood": self.configs['likelihood']},
)
return z_scores.mean(axis=-1).values
return z_scores.mean(axis=-1)



Expand Down
14 changes: 5 additions & 9 deletions pcntoolkit/util/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1141,19 +1141,15 @@ def fit(self, X):
self.max[i] = np.median(np.sort(X[:,i])[-int(np.round(X.shape[0] * self.tail)):])


def transform(self, X, index=None):
def transform(self, X):

if self.scaler_type == 'standardize':
if index is None:
X = (X - self.m) / self.s
else:
X = (X - self.m[index]) / self.s[index]

X = (X - self.m) / self.s

elif self.scaler_type in ['minmax', 'robminmax']:
if index is None:
X = (X - self.min) / (self.max - self.min)
else:
X = (X - self.min[index]) / (self.max[index] - self.min[index])

X = (X - self.min) / (self.max - self.min)

if self.adjust_outliers:

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from setuptools import setup, find_packages

setup(name='pcntoolkit',
version='0.29',
version='0.28',
description='Predictive Clinical Neuroscience toolkit',
url='http://github.com/amarquand/PCNtoolkit',
author='Andre Marquand',
Expand Down

0 comments on commit df800f4

Please sign in to comment.