1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-09-21 14:02:05 +00:00
nf-configs/conf/sage.config

127 lines
4.1 KiB
Text
Raw Normal View History

// Config profile metadata
params {
config_profile_description = 'The Sage Bionetworks profile'
config_profile_contact = 'Bruno Grande (@BrunoGrandePhD)'
config_profile_url = 'https://github.com/Sage-Bionetworks-Workflows'
}
// Leverage us-east-1 mirror of select human and mouse genomes
params {
igenomes_base = 's3://sage-igenomes/igenomes'
}
// Enable retries globally for certain exit codes
process {
errorStrategy = { task.exitStatus in [143,137,104,134,139,247] ? 'retry' : 'finish' }
maxRetries = 5
maxErrors = '-1'
}
// Increase time limit to allow file transfers to finish
// The default is 12 hours, which results in timeouts
threadPool.FileTransfer.maxAwait = '24 hour'
// Configure Nextflow to be more reliable on AWS
aws {
region = "us-east-1"
client {
uploadChunkSize = 209715200
}
}
executor {
name = 'awsbatch'
// Ensure unlimited queue size on AWS Batch
queueSize = 100000
// Slow down the rate at which AWS Batch jobs accumulate in
// the queue (an attempt to prevent orphaned EBS volumes)
submitRateLimit = '5 / 1 sec'
}
// Disabling resource allocation tweaks for now
//
// params {
// max_memory = 500.GB
// max_cpus = 64
// max_time = 168.h // One week
// }
//
// process {
//
// cpus = { check_max( 1 * slow(task.attempt), 'cpus' ) }
// memory = { check_max( 6.GB * task.attempt, 'memory' ) }
// time = { check_max( 24.h * task.attempt, 'time' ) }
//
// // Process-specific resource requirements
// withLabel:process_low {
// cpus = { check_max( 4 * slow(task.attempt), 'cpus' ) }
// memory = { check_max( 12.GB * task.attempt, 'memory' ) }
// time = { check_max( 24.h * task.attempt, 'time' ) }
// }
// withLabel:process_medium {
// cpus = { check_max( 12 * slow(task.attempt), 'cpus' ) }
// memory = { check_max( 36.GB * task.attempt, 'memory' ) }
// time = { check_max( 48.h * task.attempt, 'time' ) }
// }
// withLabel:process_high {
// cpus = { check_max( 24 * slow(task.attempt), 'cpus' ) }
// memory = { check_max( 72.GB * task.attempt, 'memory' ) }
// time = { check_max( 96.h * task.attempt, 'time' ) }
// }
// withLabel:process_long {
// time = { check_max( 192.h * task.attempt, 'time' ) }
// }
// withLabel:process_high_memory {
// memory = { check_max( 128.GB * task.attempt, 'memory' ) }
// }
//
// // Preventing Sarek labels from using the actual maximums
// withLabel:memory_max {
// memory = { check_max( 128.GB * task.attempt, 'memory' ) }
// }
// withLabel:cpus_max {
// cpus = { check_max( 24 * slow(task.attempt), 'cpus' ) }
// }
//
// }
// Function to slow the increase of the resource multipler
// as attempts are made. The rationale is that some CPUs
// don't need to be increased as fast as memory.
def slow(attempt, factor = 2) {
return Math.ceil( attempt / factor) as int
}
2022-06-23 16:44:16 +00:00
// Function to ensure that resource requirements don't go
// beyond a maximum limit (copied here for Sarek v2)
2022-06-23 16:44:16 +00:00
def check_max(obj, type) {
if (type == 'memory') {
try {
if (obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1)
return params.max_memory as nextflow.util.MemoryUnit
else
return obj
} catch (all) {
println " ### ERROR ### Max memory '${params.max_memory}' is not valid! Using default value: $obj"
return obj
}
} else if (type == 'time') {
try {
if (obj.compareTo(params.max_time as nextflow.util.Duration) == 1)
return params.max_time as nextflow.util.Duration
else
return obj
} catch (all) {
println " ### ERROR ### Max time '${params.max_time}' is not valid! Using default value: $obj"
return obj
}
} else if (type == 'cpus') {
try {
return Math.min( obj, params.max_cpus as int )
} catch (all) {
println " ### ERROR ### Max cpus '${params.max_cpus}' is not valid! Using default value: $obj"
return obj
}
}
}