-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathnextflow.config
111 lines (103 loc) · 2.99 KB
/
nextflow.config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
/*
* -------------------------------------------------
* peterk87/nf-biohansel-sra-benchmark Nextflow config file
* -------------------------------------------------
* Default config options for all environments.
* Cluster-specific config options should be saved
* in the conf folder and imported under a profile
* name here.
*/
// Workflow manifest
manifest {
name = 'peterk87/nf-biohansel-sra-benchmark'
author = 'Peter Kruczkiewicz'
homePage = 'https://github.com/peterk87/nf-biohansel-sra-benchmark'
description = 'Nextflow Workflow for benchmarking biohansel against NCBI SRA genomes'
mainScript = 'main.nf'
nextflowVersion = '>=0.32.0'
version = '1.0dev'
}
// Global default params, used in configs
params {
// Container slug. Stable releases should specify release tag!
// Developmental code should specify :latest
container = 'peterk87/nf-biohansel-sra-benchmark:latest'
// Workflow options
outdir = "results"
help = false
schemesdir = 'schemes'
n_genomes = 96
iterations = 10
thread_combos = '1,2,4,8,16,32'
random_seed = 42
tracedir = "${params.outdir}/pipeline_info"
}
// Load base.config by default for all pipelines
includeConfig 'conf/base.config'
// Workflow execution config profiles
profiles {
conda { process.conda = "$baseDir/environment.yml" }
singularity {
singularity.enabled = true
singularity.autoMounts = true
process.container = { "shub://${params.container}" }
}
slurm {
process {
executor = 'slurm'
queue = params.slurm_queue
}
}
test { includeConfig 'conf/test.config' }
}
// Capture exit codes from upstream processes when piping
process.shell = ['/bin/bash', '-euo', 'pipefail']
// Trace information
timeline {
enabled = true
file = "${params.tracedir}/timeline.html"
}
report {
enabled = true
file = "${params.tracedir}/report.html"
}
trace {
enabled = true
file = "${params.tracedir}/trace.txt"
}
dag {
enabled = true
file = "${params.tracedir}/dag.svg"
}
// Function to ensure that resource requirements don't go beyond
// a maximum limit
def check_max(obj, type) {
if(type == 'memory'){
try {
if(obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1)
return params.max_memory as nextflow.util.MemoryUnit
else
return obj
} catch (all) {
println " ### ERROR ### Max memory '${params.max_memory}' is not valid! Using default value: $obj"
return obj
}
} else if(type == 'time'){
try {
if(obj.compareTo(params.max_time as nextflow.util.Duration) == 1)
return params.max_time as nextflow.util.Duration
else
return obj
} catch (all) {
println " ### ERROR ### Max time '${params.max_time}' is not valid! Using default value: $obj"
return obj
}
} else if(type == 'cpus'){
try {
return Math.min( obj, params.max_cpus as int )
} catch (all) {
println " ### ERROR ### Max cpus '${params.max_cpus}' is not valid! Using default value: $obj"
return obj
}
}
}