Skip to content

Commit

Permalink
workflow updated-double scglue is removed
Browse files Browse the repository at this point in the history
  • Loading branch information
janursa committed Nov 28, 2024
1 parent b76d6c0 commit ec8efd8
Show file tree
Hide file tree
Showing 5 changed files with 9 additions and 36 deletions.
12 changes: 2 additions & 10 deletions runs.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -170,17 +170,9 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Submitted batch job 7851343\n"
]
}
],
"outputs": [],
"source": [
"from src.helper import calculate_scores\n",
"if False: # consensus: run this after updating grns\n",
Expand Down
5 changes: 4 additions & 1 deletion src/metrics/regression_2/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,10 @@ def main(par: Dict[str, Any]) -> pd.DataFrame:
# Evaluate GRN
verbose_print(par['verbose'], f'Compute metrics for layer: {layer}', 3)
verbose_print(par['verbose'], f'Static approach (theta=0):', 3)
score_static_min = static_approach(net_matrix, n_features_theta_min, X, groups, gene_names, tf_names, par['reg_type'], n_jobs=par['num_workers'])
if (n_features_theta_min!=0).any()==False:
score_static_min = np.nan
else:
score_static_min = static_approach(net_matrix, n_features_theta_min, X, groups, gene_names, tf_names, par['reg_type'], n_jobs=par['num_workers'])
verbose_print(par['verbose'], f'Static approach (theta=0.5):', 3)
score_static_median = static_approach(net_matrix, n_features_theta_median, X, groups, gene_names, tf_names, par['reg_type'], n_jobs=par['num_workers'])
print(f'Static approach (theta=1):', flush=True)
Expand Down
6 changes: 3 additions & 3 deletions src/metrics/regression_2/script.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@

## VIASH START
par = {
'evaluation_data': 'resources/grn-benchmark/evaluation_data.h5ad',
'evaluation_data': 'resources/evaluation_datasets/op_perturbation.h5ad',
'layer': 'X_norm',
"prediction": "resources/grn_models/celloracle.csv",
"prediction": "output/models/collectri.csv",
'tf_all': 'resources/prior/tf_all.csv',
"max_n_links": 50000,
'consensus': 'resources/prior/consensus-num-regulators.json',
'consensus': 'output/models/op_consensus-num-regulators.json',
'score': 'output/score_regression2.h5ad',
'reg_type': 'ridge',
'static_only': True,
Expand Down
21 changes: 0 additions & 21 deletions src/utils/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,24 +114,3 @@ def read_gmt(file_path:str) -> dict[str, list[str]]:
'genes': genes
}
return gene_sets
def quantile_transformation(values, one_sided=False, log1p_scale=True):
from sklearn.preprocessing import QuantileTransformer
if log1p_scale:
log_data = np.log1p(values) # log(x + 1) to avoid log(0)
if one_sided:
output_distribution = 'uniform'
else:
output_distribution = 'normal'
quantile_transformer = QuantileTransformer(output_distribution=output_distribution)
transformed_data = quantile_transformer.fit_transform(log_data.reshape(-1, 1)).reshape(len(log_data))
return transformed_data
def zscore_transformation(values, one_sided=False, log1p_scale=True):
if log1p_scale:
log_data = np.log1p(values) # log(x + 1) to avoid log(0)
if one_sided:
mean = 0
else:
mean = np.mean(values)
std = np.std(values)
transformed_data = (log_data-mean)/std
return transformed_data
1 change: 0 additions & 1 deletion src/workflows/run_benchmark/main.nf
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ workflow run_wf {
grnboost2,
ppcor,
scenic,
scglue,
scgpt,

pearson_corr,
Expand Down

0 comments on commit ec8efd8

Please sign in to comment.