forked from nf-core/configs
-
Notifications
You must be signed in to change notification settings - Fork 0
/
sanger.config
87 lines (74 loc) · 2.76 KB
/
sanger.config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
// Extract the name of the cluster to tune the parameters below
def clustername = "farm22"
try {
clustername = ['/bin/bash', '-c', 'lsid | awk \'$0 ~ /^My cluster name is/ {print $5}\''].execute().text.trim()
} catch (java.io.IOException e) {
System.err.println("WARNING: Could not run lsid to determine current cluster, defaulting to farm")
}
// Profile details
params {
config_profile_description = "The Wellcome Sanger Institute HPC cluster (${clustername}) profile"
config_profile_contact = 'Priyanka Surana (@priyanka-surana)'
config_profile_url = 'https://www.sanger.ac.uk'
}
// Queue and LSF submission options
process {
executor = 'lsf'
// Currently a single set of rules for all clusters, but we could use $clustername to apply
// different rules to different clusters.
queue = {
if ( task.time >= 15.day ) {
if ( task.memory > 680.GB ) {
error "There is no queue for jobs that need >680 GB and >15 days"
} else {
"basement"
}
} else if ( task.memory > 720.GB ) {
"teramem"
} else if ( task.memory > 350.GB ) {
"hugemem"
} else if ( task.time > 7.day ) {
"basement"
} else if ( task.time > 2.day ) {
"week"
} else if ( task.time > 12.hour ) {
"long"
} else if ( task.time > 1.min ) {
"normal"
} else {
"small"
}
}
withLabel: gpu {
clusterOptions = { "-M "+task.memory.toMega()+" -R 'select[ngpus>0 && mem>="+task.memory.toMega()+"] rusage[ngpus_physical=1.00,mem="+task.memory.toMega()+"] span[ptile=1]' -gpu 'mode=exclusive_process'" }
queue = { task.time > 12.h ? 'gpu-huge' : task.time > 48.h ? 'gpu-basement' : 'gpu-normal' }
containerOptions = {
workflow.containerEngine == "singularity" ? '--containall --cleanenv --nv':
( workflow.containerEngine == "docker" ? '--gpus all': null )
}
}
}
// Executor details
executor {
name = 'lsf'
perJobMemLimit = true
poolSize = 4
submitRateLimit = '5 sec'
killBatchSize = 50
}
// Max resources
if (clustername.startsWith("tol")) {
// tol cluster
params.max_memory = 1.4.TB
params.max_cpus = 64
params.max_time = 89280.min // 62 days
// As opposite to the farm settings below, we don't mount any filesystem by default.
// Pipelines that need to see certain filesystems have to set singularity.runOptions themselves
} else {
// defaults for the main farm
params.max_memory = 2.9.TB
params.max_cpus = 256
params.max_time = 43200.min // 30 days
// Mount all filesystems by default
singularity.runOptions = '--bind /lustre --bind /nfs --bind /data --bind /software'
}