mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-11 04:23:10 +00:00
58 lines
1.9 KiB
Text
58 lines
1.9 KiB
Text
params {
|
|
config_profile_description = 'The Sage Bionetworks profile'
|
|
config_profile_contact = 'Bruno Grande (@BrunoGrandePhD)'
|
|
config_profile_url = 'https://github.com/Sage-Bionetworks-Workflows'
|
|
}
|
|
|
|
process {
|
|
|
|
cpus = { check_max( 1 * slow(task.attempt), 'cpus' ) }
|
|
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
|
|
time = { check_max( 24.h * task.attempt, 'time' ) }
|
|
|
|
errorStrategy = { task.exitStatus in [143,137,104,134,139,247] ? 'retry' : 'finish' }
|
|
maxRetries = 5
|
|
maxErrors = '-1'
|
|
|
|
// Process-specific resource requirements
|
|
withLabel:process_low {
|
|
cpus = { check_max( 4 * slow(task.attempt), 'cpus' ) }
|
|
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
|
time = { check_max( 24.h * task.attempt, 'time' ) }
|
|
}
|
|
withLabel:process_medium {
|
|
cpus = { check_max( 12 * slow(task.attempt), 'cpus' ) }
|
|
memory = { check_max( 36.GB * task.attempt, 'memory' ) }
|
|
time = { check_max( 48.h * task.attempt, 'time' ) }
|
|
}
|
|
withLabel:process_high {
|
|
cpus = { check_max( 24 * slow(task.attempt), 'cpus' ) }
|
|
memory = { check_max( 72.GB * task.attempt, 'memory' ) }
|
|
time = { check_max( 96.h * task.attempt, 'time' ) }
|
|
}
|
|
withLabel:process_long {
|
|
time = { check_max( 192.h * task.attempt, 'time' ) }
|
|
}
|
|
withLabel:process_high_memory {
|
|
memory = { check_max( 200.GB * task.attempt, 'memory' ) }
|
|
}
|
|
|
|
}
|
|
|
|
aws {
|
|
region = "us-east-1"
|
|
}
|
|
|
|
params {
|
|
igenomes_base = 's3://sage-igenomes/igenomes'
|
|
max_memory = 500.GB
|
|
max_cpus = 64
|
|
max_time = 168.h // One week
|
|
}
|
|
|
|
// Function to slow the increase of the resource multipler
|
|
// as attempts are made. The rationale is that some CPUs
|
|
// don't need to be increased as fast as memory.
|
|
def slow(attempt, factor = 2) {
|
|
return Math.ceil( attempt / factor) as int
|
|
}
|