-
Notifications
You must be signed in to change notification settings - Fork 0
/
submit.yml.erb
127 lines (110 loc) · 4.17 KB
/
submit.yml.erb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
<%
groups = OodSupport::User.new.groups.map(&:name).drop(1)
# your image location will differ
ppn = num_cores.blank? ? 1 : num_cores.to_i
walltime = (bc_num_hours.to_i * 60)
%>
---
batch_connect:
template: basic
websockify_cmd: '/usr/bin/websockify'
conn_params:
- ssh_tunnel
- connection_string
- staged_root
script_wrapper: |
module purge
module load singularity
cat << "CTRSCRIPT" > container.sh
export PATH="$PATH:/opt/TurboVNC/bin"
%s
CTRSCRIPT
export container_image=<%= default_kernel %>
# Benchmark info
echo "TIMING - Starting jupyter at: $(date)"
# Launch the Jupyter Notebook Server
JUPYTER_TMPDIR=${TMPDIR:-/tmp}/$(id -un)-jupyter/
## for conda
export SINGULARITYENV_CONDA_PKGS_DIRS=${HOME}/.conda/pkgs
## for pip
export SINGULARITYENV_XDG_CACHE_HOME=${JUPYTER_TMPDIR}/xdg_cache_home
## job specific tmp in case there are conflicts with different users running on the same node
WORKDIR=${HOME}/jupyter/${SLURM_JOB_ID}
# This will be very helpful to make sure that pytohn packages respect the number of cores a user
# actually asked for.
export SINGULARITYENV_MKL_NUM_THREADS=${SLURM_NPROCS}
export SINGULARITYENV_NUMEXPR_NUM_THREADS=${SLURM_NPROCS}
export SINGULARITYENV_OMP_NUM_THREADS=${SLURM_NPROCS}
mkdir -m 700 -p ${WORKDIR}/tmp
## for specific package mne
export SINGULARITYENV_NUMBA_CACHE_DIR=$HOME/.cache
## Need to add the --nv flag if we are running on a GPU
<%- if gres_value != "" %>
export SING_GPU="--nv"
<%- else %>
export SING_GPU=""
<%- end %>
# All our software in /software so have to bind that
export SING_BINDS="--bind /software:/software"
## bind some extra stuff to be able to talk to slurm from within the container
export SING_BINDS="$SING_BINDS -B /etc/passwd -B /etc/slurm -B `which sbatch ` -B `which srun ` -B `which sacct ` -B `which scontrol ` -B `which salloc ` -B /usr/lib64/slurm/ -B /usr/lib64/libmunge.so.2 -B /usr/lib64/libmunge.so.2.0.0 "
# only bind /kellogg is individual is part of kellogg group
export SING_BINDS="$SING_BINDS <%= groups.include?('kellogg') ? "--bind /kellogg/:/kellogg/" : "" %>"
# only bind /scratch/<netid> if individual has a scratch space
export SING_BINDS="$SING_BINDS <%= File.directory?("/scratch/#{User.new.name}") ? "--bind /scratch/#{User.new.name}:/scratch/#{User.new.name}" : "" %>"
# Only bind projects directories that the user would have access to based on their unix groups
<%- groups.each do |group| %>
export SING_BINDS="$SING_BINDS <%= File.directory?("/projects/#{group}") ? "--bind /projects/#{group}:/projects/#{group}" : "" %>"
<%- end %>
## job specific tmp dir
export SING_BINDS=" $SING_BINDS -B ${WORKDIR}/tmp:/tmp "
singularity exec $SING_GPU $SING_BINDS "${container_image}" /bin/bash container.sh
header: |
#!/bin/bash
. ~/.bashrc
script:
<%- if user_email != "" %>
email_on_started: true
<%- end %>
native:
# What partition is the user submitting to
- "--partition"
- "<%= slurm_partition %>"
# Under what account is the user submitting this job
- "--account"
- "<%= slurm_account %>"
# How much time (in hours)
- "--time"
- "<%= walltime %>"
# How many nodes (always 1)
<%- if number_of_nodes != "" %>
- "--nodes"
- "<%= number_of_nodes %>"
<%- else %>
- "--nodes"
- "1"
<%- end %>
# How many CPUs
- "--ntasks-per-node"
- "<%= ppn %>"
# How much memory
- "--mem"
- "<%= memory_per_node %>G"
# Job Name
- "--job-name"
- "<%= job_name %>"
# If the user supplies an e-mail, then they will get an e-mail when the job begins
<%- if user_email != "" %>
- "--mail-user"
- "<%= user_email %>"
<%- end %>
# If the user requested a GPU, then we need to add this argument to our job submit command
<%- if gres_value != "" %>
- "--gres"
- "<%= gres_value %>"
<%- end %>
# If the user requested some kind of constraint, the apply that
<%- if constraint != "" %>
- "--constraint"
- "<%= constraint %>"
<%- end %>