-
Notifications
You must be signed in to change notification settings - Fork 0
/
default.env
executable file
·76 lines (63 loc) · 2.27 KB
/
default.env
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
## Get hostname with the following command:
## $ hostname -f
##
## Configure an environment per hostname:
## [hostname1]
## ...
##
## Use the same environment for multiple hostnames:
## [hostname2, hostname3, ...]
## ...
##
## Using group
## [hostname1, hostname2, ... : group]
## [group]
## ...
##
## Using an asterisk in hostnames (IMPORTANT: only one * is allowed in hostnames)
##
## [host*name1]
##
## [*hostname2, hostname3*]
# Stanford Kundaje group clusters (out of SGE)
[vayu, mitra, durga]
conda_env = aquas_chipseq
conda_env_py3 = aquas_chipseq_py3
conda_bin_dir = /software/miniconda3/bin
species_file = $script_dir/species/kundaje.conf
unlimited_mem_wt= true # unlimited max. memory and walltime on Kundaje clusters
nice = 10
nth = 4
# Stanford Kundaje group clusters (controlled with SGE)
[nandi, kali, amold, wotan, kadru, surya, indra]
conda_env = aquas_chipseq
conda_env_py3 = aquas_chipseq_py3
conda_bin_dir = /software/miniconda3/bin
species_file = $script_dir/species/kundaje.conf
unlimited_mem_wt= true # unlimited max. memory and walltime on Kundaje clusters
system = sge # force to use SGE (Sun Grid Engine)
nice = 20
nth = 4
# Stanford SCG : login node, computing nodes, file transfer servers
[scg*.stanford.edu, scg*.local, carmack.stanford.edu, crick.stanford.edu]
conda_env = aquas_chipseq
conda_env_py3 = aquas_chipseq_py3
species_file = $script_dir/species/scg.conf
nth = 8 # number of threads for each pipeline run
wt_spp = 72h # walltime for spp
system = sge # force to use SGE (Sun Grid Engine) on SCG3/4 even though a user doesn't explicitly specify SGE on command line with 'bds -s sge chipseq.bds ...'
cluster_task_delay = 10
# Stanford Sherlock clusters
[sherlock*.stanford.edu, sh-*.local, sh-*.int, sh-ln*.stanford.edu]
conda_env = aquas_chipseq
conda_env_py3 = aquas_chipseq_py3
species_file = $script_dir/species/sherlock.conf
nth = 8 # number of threads for each pipeline run
wt_spp = 47h # walltime for spp
system = slurm # force to use SLURM
cluster_task_delay = 30
# default (if no section with hostname is found)
[default]
conda_env = aquas_chipseq
conda_env_py3 = aquas_chipseq_py3
species_file = $script_dir/species/aquas_chipseq_species.conf #(DEF_SPECIES_FILE: DO NOT REMOVE THIS COMMENT!)