commit
70e3eafeb1
@ -0,0 +1,14 @@
|
||||
//Profile config names for nf-core/configs
|
||||
params {
|
||||
config_profile_description = 'Centre for Genomic Regulation (CRG) cluster profile provided by nf-core/configs'
|
||||
config_profile_contact = 'Athanasios Baltzis (@athbaltzis)'
|
||||
config_profile_url = 'http://www.linux.crg.es/index.php/Main_Page'
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'crg'
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
}
|
@ -0,0 +1,104 @@
|
||||
params {
|
||||
config_profile_description = 'HKI clusters profile provided by nf-core/configs.'
|
||||
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||
config_profile_url = 'https://leibniz-hki.de'
|
||||
}
|
||||
|
||||
profiles {
|
||||
apate {
|
||||
params {
|
||||
config_profile_description = 'apate HKI cluster profile provided by nf-core/configs'
|
||||
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||
config_profile_url = 'https://leibniz-hki.de'
|
||||
max_memory = 128.GB
|
||||
max_cpus = 32
|
||||
max_time = 1440.h
|
||||
}
|
||||
process {
|
||||
executor = 'local'
|
||||
maxRetries = 2
|
||||
}
|
||||
|
||||
executor {
|
||||
queueSize = 8
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
cacheDir = '/Net/Groups/ccdata/apps/singularity'
|
||||
}
|
||||
|
||||
conda {
|
||||
cacheDir = '/Net/Groups/ccdata/apps/conda_envs'
|
||||
}
|
||||
|
||||
cleanup = true
|
||||
}
|
||||
|
||||
aither {
|
||||
params {
|
||||
config_profile_description = 'aither HKI cluster profile provided by nf-core/configs'
|
||||
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||
config_profile_url = 'https://leibniz-hki.de'
|
||||
max_memory = 128.GB
|
||||
max_cpus = 32
|
||||
max_time = 1440.h
|
||||
}
|
||||
process {
|
||||
executor = 'local'
|
||||
maxRetries = 2
|
||||
}
|
||||
|
||||
executor {
|
||||
queueSize = 8
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
cacheDir = '/Net/Groups/ccdata/apps/singularity'
|
||||
}
|
||||
|
||||
conda {
|
||||
cacheDir = '/Net/Groups/ccdata/apps/conda_envs'
|
||||
}
|
||||
|
||||
cleanup = true
|
||||
}
|
||||
|
||||
arges {
|
||||
params {
|
||||
config_profile_description = 'arges HKI cluster profile provided by nf-core/configs'
|
||||
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||
config_profile_url = 'https://leibniz-hki.de'
|
||||
max_memory = 64.GB
|
||||
max_cpus = 12
|
||||
max_time = 1440.h
|
||||
}
|
||||
process {
|
||||
executor = 'local'
|
||||
maxRetries = 2
|
||||
}
|
||||
|
||||
executor {
|
||||
queueSize = 8
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
cacheDir = '/Net/Groups/ccdata/apps/singularity'
|
||||
}
|
||||
|
||||
conda {
|
||||
cacheDir = '/Net/Groups/ccdata/apps/conda_envs'
|
||||
}
|
||||
|
||||
cleanup = true
|
||||
}
|
||||
|
||||
debug {
|
||||
cleanup = false
|
||||
}
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
params {
|
||||
config_profile_contact = 'Adrija Kalvisa <adrija.kalvisa@sund.ku.dk>'
|
||||
config_profile_description = 'dangpufl01 configuration'
|
||||
config_profile_url = ''
|
||||
|
||||
// General cpus/memory/time requirements
|
||||
max_cpus = 30
|
||||
max_memory = 200.GB
|
||||
max_time = 72.h
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
|
||||
}
|
||||
|
||||
executor {
|
||||
queueSize = 5
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
runOptions = '--bind /projects:/projects'
|
||||
}
|
@ -0,0 +1,21 @@
|
||||
params {
|
||||
config_profile_description = 'University of Hawaii at Manoa'
|
||||
config_profile_url = 'http://www.hawaii.edu/its/ci/'
|
||||
config_profile_contact = 'Cedric Arisdakessian'
|
||||
|
||||
max_memory = 400.GB
|
||||
max_cpus = 96
|
||||
max_time = 72.h
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = 'shared,exclusive,kill-shared,kill-exclusive'
|
||||
module = 'tools/Singularity'
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
cacheDir = "$HOME/.singularity_images_cache"
|
||||
autoMounts = true
|
||||
}
|
@ -0,0 +1,73 @@
|
||||
// Sheffield Bioinformatics Core Configuration Profile - ShARC
|
||||
// Custom Pipeline Resource Config for nf-core/atacseq
|
||||
|
||||
// process-specific resource requirements - reduced specification from those in atacseq/conf/base.config
|
||||
|
||||
process {
|
||||
|
||||
// error and retry handling
|
||||
|
||||
errorStrategy = { task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
maxRetries = 2
|
||||
|
||||
|
||||
// process labels
|
||||
|
||||
withLabel:process_low {
|
||||
cpus = { check_max( 2 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 4.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_medium {
|
||||
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 6.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_high {
|
||||
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 128.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 8.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_long {
|
||||
time = { check_max( 12.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
// function 'check_max()' to ensure that resource requirements don't go beyond maximum limit
|
||||
|
||||
def check_max(obj, type) {
|
||||
if (type == 'memory') {
|
||||
try {
|
||||
if (obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1)
|
||||
return params.max_memory as nextflow.util.MemoryUnit
|
||||
else
|
||||
return obj
|
||||
} catch (all) {
|
||||
println " ### ERROR ### Max memory '${params.max_memory}' is not valid! Using default value: $obj"
|
||||
return obj
|
||||
}
|
||||
} else if (type == 'time') {
|
||||
try {
|
||||
if (obj.compareTo(params.max_time as nextflow.util.Duration) == 1)
|
||||
return params.max_time as nextflow.util.Duration
|
||||
else
|
||||
return obj
|
||||
} catch (all) {
|
||||
println " ### ERROR ### Max time '${params.max_time}' is not valid! Using default value: $obj"
|
||||
return obj
|
||||
}
|
||||
} else if (type == 'cpus') {
|
||||
try {
|
||||
return Math.min(obj, params.max_cpus as int)
|
||||
} catch (all) {
|
||||
println " ### ERROR ### Max cpus '${params.max_cpus}' is not valid! Using default value: $obj"
|
||||
return obj
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,73 @@
|
||||
// Sheffield Bioinformatics Core Configuration Profile - ShARC
|
||||
// Custom Pipeline Resource Config for nf-core/chipseq
|
||||
|
||||
// process-specific resource requirements - reduced specification from those in chipseq/conf/base.config
|
||||
|
||||
process {
|
||||
|
||||
// error and retry handling
|
||||
|
||||
errorStrategy = { task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
maxRetries = 2
|
||||
|
||||
|
||||
// process labels
|
||||
|
||||
withLabel:process_low {
|
||||
cpus = { check_max( 2 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 4.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_medium {
|
||||
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 6.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_high {
|
||||
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 128.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 8.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_long {
|
||||
time = { check_max( 12.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
// function 'check_max()' to ensure that resource requirements don't go beyond maximum limit
|
||||
|
||||
def check_max(obj, type) {
|
||||
if (type == 'memory') {
|
||||
try {
|
||||
if (obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1)
|
||||
return params.max_memory as nextflow.util.MemoryUnit
|
||||
else
|
||||
return obj
|
||||
} catch (all) {
|
||||
println " ### ERROR ### Max memory '${params.max_memory}' is not valid! Using default value: $obj"
|
||||
return obj
|
||||
}
|
||||
} else if (type == 'time') {
|
||||
try {
|
||||
if (obj.compareTo(params.max_time as nextflow.util.Duration) == 1)
|
||||
return params.max_time as nextflow.util.Duration
|
||||
else
|
||||
return obj
|
||||
} catch (all) {
|
||||
println " ### ERROR ### Max time '${params.max_time}' is not valid! Using default value: $obj"
|
||||
return obj
|
||||
}
|
||||
} else if (type == 'cpus') {
|
||||
try {
|
||||
return Math.min(obj, params.max_cpus as int)
|
||||
} catch (all) {
|
||||
println " ### ERROR ### Max cpus '${params.max_cpus}' is not valid! Using default value: $obj"
|
||||
return obj
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,29 @@
|
||||
// Profile config names for nf-core/configs
|
||||
|
||||
params {
|
||||
// Specific nf-core/configs params
|
||||
config_profile_contact = 'Edmund Miller(@emiller88)'
|
||||
config_profile_description = 'nf-core/demultiplex AWS Tower profile provided by nf-core/configs'
|
||||
}
|
||||
|
||||
aws {
|
||||
batch {
|
||||
maxParallelTransfers = 24
|
||||
maxTransferAttempts = 3
|
||||
}
|
||||
client {
|
||||
maxConnections = 24
|
||||
uploadMaxThreads = 24
|
||||
maxErrorRetry = 3
|
||||
socketTimeout = 3600000
|
||||
uploadRetrySleep = 1000
|
||||
uploadChunkSize = 32.MB
|
||||
}
|
||||
}
|
||||
|
||||
process {
|
||||
withName: BASES2FASTQ {
|
||||
cpus = 16
|
||||
memory = 48.GB
|
||||
}
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
profiles {
|
||||
crg {
|
||||
params {
|
||||
config_profile_contact = 'Athanasios Baltzis (@athbaltzis)'
|
||||
config_profile_description = 'nf-core/proteinfold CRG profile provided by nf-core/configs'
|
||||
}
|
||||
executor.name = 'crg'
|
||||
process {
|
||||
queue = 'short-sl7,long-sl7'
|
||||
withName: 'RUN_AF2|RUN_AF2_PRED|COLABFOLD_BATCH' {
|
||||
cpus = 1
|
||||
memory = "30 GB"
|
||||
queue = params.use_gpu ? 'gpu' : 'long-sl7'
|
||||
clusterOptions = { ( task.queue == 'gpu' ? '-l gpu=1' : '' ) }
|
||||
}
|
||||
withName: 'ARIA2' {
|
||||
time = '12h'
|
||||
}
|
||||
withName: 'MMSEQS_COLABFOLDSEARCH' {
|
||||
queue = 'mem_512'
|
||||
memory = "100 GB"
|
||||
cpus = 8
|
||||
time = '12h'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,77 @@
|
||||
// Sheffield Bioinformatics Core Configuration Profile - ShARC
|
||||
// Custom Pipeline Resource Config for nf-core/rnaseq
|
||||
|
||||
// process-specific resource requirements - reduced specification from those in rnaseq/conf/base.config
|
||||
|
||||
process {
|
||||
|
||||
// error and retry handling
|
||||
|
||||
errorStrategy = { task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
maxRetries = 2
|
||||
|
||||
|
||||
// process labels
|
||||
|
||||
withLabel:process_low {
|
||||
cpus = { check_max( 2 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 4.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_medium {
|
||||
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 6.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_high {
|
||||
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 128.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 8.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_long {
|
||||
time = { check_max( 12.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_high_memory {
|
||||
memory = { check_max( 160.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
// function 'check_max()' to ensure that resource requirements don't go beyond maximum limit
|
||||
|
||||
def check_max(obj, type) {
|
||||
if (type == 'memory') {
|
||||
try {
|
||||
if (obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1)
|
||||
return params.max_memory as nextflow.util.MemoryUnit
|
||||
else
|
||||
return obj
|
||||
} catch (all) {
|
||||
println " ### ERROR ### Max memory '${params.max_memory}' is not valid! Using default value: $obj"
|
||||
return obj
|
||||
}
|
||||
} else if (type == 'time') {
|
||||
try {
|
||||
if (obj.compareTo(params.max_time as nextflow.util.Duration) == 1)
|
||||
return params.max_time as nextflow.util.Duration
|
||||
else
|
||||
return obj
|
||||
} catch (all) {
|
||||
println " ### ERROR ### Max time '${params.max_time}' is not valid! Using default value: $obj"
|
||||
return obj
|
||||
}
|
||||
} else if (type == 'cpus') {
|
||||
try {
|
||||
return Math.min(obj, params.max_cpus as int)
|
||||
} catch (all) {
|
||||
println " ### ERROR ### Max cpus '${params.max_cpus}' is not valid! Using default value: $obj"
|
||||
return obj
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,111 @@
|
||||
// Sheffield Bioinformatics Core Configuration Profile - ShARC
|
||||
// Custom Pipeline Resource Config for nf-core/sarek
|
||||
|
||||
// process-specific resource requirements - reduced specification from those in sarek/conf/base.config
|
||||
|
||||
process {
|
||||
|
||||
// error and retry handling
|
||||
|
||||
errorStrategy = { task.exitStatus in [143,137,104,134,139,140,247] ? 'retry' : 'finish' }
|
||||
maxRetries = 2
|
||||
|
||||
|
||||
// process labels
|
||||
|
||||
withLabel:process_low {
|
||||
cpus = { check_max( 2 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 4.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_medium {
|
||||
cpus = { check_max( 6 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 72.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 6.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_high {
|
||||
cpus = { check_max( 12 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 192.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 8.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_long {
|
||||
time = { check_max( 12.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withLabel:process_high_memory {
|
||||
memory = { check_max( 240.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
|
||||
|
||||
// process name
|
||||
|
||||
withName:'BWAMEM1_MEM|BWAMEM2_MEM' {
|
||||
cpus = { check_max( 12 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 192.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 8.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withName:'FASTP' {
|
||||
cpus = { check_max( 12 * task.attempt, 'cpus' ) }
|
||||
}
|
||||
|
||||
withName:'FASTQC|FASTP|MOSDEPTH|SAMTOOLS_CONVERT' {
|
||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
|
||||
withName:'GATK4_APPLYBQSR|GATK4_APPLYBQSR_SPARK|GATK4_BASERECALIBRATOR|SAMTOOLS_STATS' {
|
||||
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
|
||||
}
|
||||
|
||||
withName:'GATK4_APPLYBQSR|GATK4_APPLYBQSR_SPARK|GATK4_BASERECALIBRATOR|GATK4_GATHERBQSRREPORTS' {
|
||||
memory = { check_max( 72.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
|
||||
withName:'GATK4_MARKDUPLICATES' {
|
||||
cpus = { check_max( 12 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 240.GB * task.attempt, 'memory' ) }
|
||||
time = { check_max( 12.h * task.attempt, 'time' ) }
|
||||
}
|
||||
|
||||
withName:'FREEBAYES|SAMTOOLS_STATS|SAMTOOLS_INDEX|UNZIP' {
|
||||
cpus = { check_max( 1 * task.attempt, 'cpus' ) }
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
// function 'check_max()' to ensure that resource requirements don't go beyond maximum limit
|
||||
|
||||
def check_max(obj, type) {
|
||||
if (type == 'memory') {
|
||||
try {
|
||||
if (obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1)
|
||||
return params.max_memory as nextflow.util.MemoryUnit
|
||||
else
|
||||
return obj
|
||||
} catch (all) {
|
||||
println " ### ERROR ### Max memory '${params.max_memory}' is not valid! Using default value: $obj"
|
||||
return obj
|
||||
}
|
||||
} else if (type == 'time') {
|
||||
try {
|
||||
if (obj.compareTo(params.max_time as nextflow.util.Duration) == 1)
|
||||
return params.max_time as nextflow.util.Duration
|
||||
else
|
||||
return obj
|
||||
} catch (all) {
|
||||
println " ### ERROR ### Max time '${params.max_time}' is not valid! Using default value: $obj"
|
||||
return obj
|
||||
}
|
||||
} else if (type == 'cpus') {
|
||||
try {
|
||||
return Math.min(obj, params.max_cpus as int)
|
||||
} catch (all) {
|
||||
println " ### ERROR ### Max cpus '${params.max_cpus}' is not valid! Using default value: $obj"
|
||||
return obj
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,59 @@
|
||||
// Sheffield Bioinformatics Core Configuration Profile - ShARC
|
||||
// Base Institutional Configuration
|
||||
|
||||
|
||||
// nf-core specific parameters displayed in header summary of each run
|
||||
|
||||
params {
|
||||
|
||||
config_profile_description = 'Sheffield Bioinformatics Core - ShARC'
|
||||
config_profile_contact = 'Lewis Quayle (l.quayle@sheffield.ac.uk)'
|
||||
config_profile_url = 'https://docs.hpc.shef.ac.uk/en/latest/sharc/index.html'
|
||||
|
||||
}
|
||||
|
||||
|
||||
// hpc resource limits
|
||||
|
||||
params {
|
||||
|
||||
max_cpus = 16
|
||||
max_memory = 256.GB
|
||||
max_time = 96.h
|
||||
|
||||
}
|
||||
|
||||
|
||||
// hpc configuration specific to ShARC
|
||||
|
||||
process {
|
||||
|
||||
// scheduler
|
||||
|
||||
executor = 'sge'
|
||||
penv = 'smp'
|
||||
queue = { task.time <= 6.h ? 'shortint.q' : 'all.q' }
|
||||
clusterOptions = { "-l rmem=${ (task.memory.toGiga() / task.cpus) }G" }
|
||||
|
||||
}
|
||||
|
||||
|
||||
// optional executor settings
|
||||
|
||||
executor {
|
||||
|
||||
queueSize = 10
|
||||
submitRateLimit = '1 sec'
|
||||
|
||||
}
|
||||
|
||||
|
||||
// container engine
|
||||
|
||||
singularity {
|
||||
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,14 @@
|
||||
params {
|
||||
config_profile_description = 'Telethon Institute of Genetic and Medicine (TIGEM) provided by nf-core/configs.'
|
||||
config_profile_contact = 'Giuseppe Martone (@giusmar)'
|
||||
config_profile_url = 'https://github.com/giusmar'
|
||||
}
|
||||
|
||||
process.executor = 'slurm'
|
||||
google.zone = 'europe-west1'
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
cacheDir = 'work/singularity'
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
params {
|
||||
|
||||
config_profile_description = 'University College London Myriad cluster'
|
||||
config_profile_contact = 'Chris Wyatt (ucbtcdr@ucl.ac.uk)'
|
||||
config_profile_url = 'https://www.rc.ucl.ac.uk/docs/Clusters/Myriad/'
|
||||
|
||||
}
|
||||
|
||||
process {
|
||||
executor='sge'
|
||||
penv = 'smp'
|
||||
}
|
||||
|
||||
params {
|
||||
// Defaults only, expecting to be overwritten
|
||||
max_memory = 128.GB
|
||||
max_cpus = 36
|
||||
max_time = 72.h
|
||||
// igenomes_base = 's3://ngi-igenomes/igenomes/'
|
||||
}
|
||||
|
||||
// optional executor settings
|
||||
|
||||
executor {
|
||||
|
||||
queueSize = 10
|
||||
submitRateLimit = '1 sec'
|
||||
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
}
|
@ -0,0 +1,45 @@
|
||||
# nf-core/configs: GIS Aquila Configuration
|
||||
|
||||
All nf-core pipelines have been successfully configured for use on the cluster of the GIS (Genome Institute of Singapore (Aquila)).
|
||||
|
||||
To use, run the pipeline with `-profile gis`. This will download and launch the [`gis.config`](../conf/gis.config) which has been pre-configured with a setup suitable for the GIS Aquila cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
## How to use on GIS core
|
||||
|
||||
Before running the pipeline you will need to load Nextflow using the environment module system on GIS Aquila. You can do this by issuing the commands below:
|
||||
|
||||
```bash
|
||||
# Login to a compute node
|
||||
srun --pty bash
|
||||
|
||||
## Load Nextflow and Singularity environment modules
|
||||
module purge
|
||||
source /mnt/projects/rpd/rc/init.2017-04
|
||||
module load miniconda3
|
||||
|
||||
|
||||
# Run a nextflow pipeline with dependencies bundled in a conda environment
|
||||
set +u
|
||||
source activate nfcore-rnaseq-1.0dev
|
||||
set -u
|
||||
|
||||
# Run a downloaded/git-cloned nextflow workflow from
|
||||
nextflow run \\
|
||||
nf-core/workflow \\
|
||||
-resume \\
|
||||
-profile gis \\
|
||||
--email my-email@example.org \\
|
||||
-c my-specific.config
|
||||
...
|
||||
|
||||
|
||||
# Or use the nf-core client
|
||||
nextflow run nf-core/rnaseq ...
|
||||
|
||||
```
|
||||
|
||||
## Databanks
|
||||
|
||||
A local copy of several genomes are available in `/mnt/projects/rpd/genomes.testing/S3_igenomes/` directory.
|
||||
|
||||
> NB: You will need an account to use the HPC cluster on GIS in order to run the pipeline. If in doubt contact IT or go to [Andreas Wilm](https://github.com/andreas-wilm)
|
@ -0,0 +1,24 @@
|
||||
# nf-core/configs: HKI Configuration
|
||||
|
||||
All nf-core pipelines have been successfully configured for use on clusters at the [Leibniz Institute for Natural Product Research and Infection Biology Hans Knöll Institute](https://www.leibniz-hki.de/en).
|
||||
|
||||
To use, run the pipeline with `-profile hki,<cluster>`. This will download and launch the [`hki.config`](../conf/hki.config) which contains specific profiles for each cluster. The number of parallel jobs that run is currently limited to 8.
|
||||
|
||||
The currently available profiles are:
|
||||
|
||||
- apate (uses singularity, cleanup set to true by default)
|
||||
- arges (uses singularity, cleanup set to true by default)
|
||||
- aither (uses singularity, cleanup set to true by default)
|
||||
- debug (sets cleanup to false for debugging purposes, use e.g. `profile hki,<cluster>,debug`)
|
||||
|
||||
Note that Nextflow is not necessarily installed by default on the HKI HPC cluster(s). You will need to install it into a directory you have write access to.
|
||||
Follow these instructions from the Nextflow documentation.
|
||||
|
||||
- Install Nextflow : [here](https://www.nextflow.io/docs/latest/getstarted.html#)
|
||||
|
||||
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline
|
||||
has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
|
||||
|
||||
> NB: You will need an account to use the HKI HPC clusters in order to run the pipeline. If in doubt contact the ICT Service Desk.
|
||||
> NB: Nextflow will need to submit the jobs via SLURM to the HKI HPC clusters and as such the commands above will have to be executed on the login
|
||||
> node. If in doubt contact ICT.
|
@ -0,0 +1,35 @@
|
||||
# nf-core/configs: ku_sund_dangpu configuration
|
||||
|
||||
All nf-core pipelines have been successfully configured for use on the DANGPU at the
|
||||
Novo Nordisk Foundation Center for Stem Cell Medicine (reNEW) and the Novo Nordisk Foundation Center for Protein Research (CPR) at the University of Copenhagen.
|
||||
|
||||
To use, run the pipeline with `-profile ku_sund_dangpu`. This will download and launch the [`ku_sund_dangpu.config`](../conf/ku_sund_dangpu.config) which has been pre-configured with a setup suitable for the DANGPU.
|
||||
|
||||
## Modules
|
||||
|
||||
Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on DANGPU. You can do this by issuing the commands below:
|
||||
|
||||
```bash
|
||||
## Load Nextflow and Singularity environment modules
|
||||
module purge
|
||||
module load java/11.0.15 nextflow/22.04.4 singularity/3.8.0
|
||||
# alternative modules for older nextflow version (v.21) that works with java 8:
|
||||
# module load jdk/1.8.0_291 nextflow/21.04.1.5556 singularity/3.8.0
|
||||
export NXF_OPTS='-Xms1g -Xmx4g'
|
||||
export NXF_HOME=/projects/dan1/people/${USER}/cache/nxf-home
|
||||
export NXF_TEMP=/scratch/tmp
|
||||
export NXF_SINGULARITY_CACHEDIR=/projects/dan1/people/${USER}/cache/singularity-images
|
||||
```
|
||||
|
||||
Create the user-specific nextflow directories if they don't exist yet:
|
||||
|
||||
```
|
||||
mkdir $NXF_SINGULARITY_CACHEDIR
|
||||
mkdir $NXF_HOME
|
||||
```
|
||||
|
||||
Finally, download and test the pipeline of choice using the `-profile ku_sund_dangpu`. Note that normally you would run resource-intensive commands with slurm, but in case of nf-core pipelines you do not have to do this: we have pre-configured slurm to be the resource manager within the `ku_sund_dangpu profile`. Just make sure that the pipeline is run within a tmux session.
|
||||
|
||||
```
|
||||
nextflow run nf-core/rnaseq -profile test,ku_sund_dangpu
|
||||
```
|
@ -0,0 +1,45 @@
|
||||
# nf-core/configs Mana (at University of Hawaii at Manoa) Configuration
|
||||
|
||||
To use, run the pipeline with `-profile mana`. It will use the following parameters for Mana (UHM HPCC):
|
||||
|
||||
- Load singularity and use it as default container technology
|
||||
- Setup a container cache directory in your home (~/.singularity_images_cache)
|
||||
- Select appropriate queues (currently: `shared,exclusive,kill-shared,kill-exclusive`)
|
||||
- Set the maximum available resources (available in 09/02/2022):
|
||||
- CPUs: 96
|
||||
- Memory: 400.GB
|
||||
- Time: 72.h
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
In order to run a nf-core pipeline on Mana, you will need to setup nextflow in your environment.
|
||||
At the moment, nextflow is not available as a module (but might be in the future).
|
||||
|
||||
### Install nextflow in a conda environment
|
||||
|
||||
Before we start, we will need to work on an interactive node (currently, mana doesn't let you execute any program in the login node):
|
||||
|
||||
```bash
|
||||
# Request an interactive sandbox node for 30 min
|
||||
srun --pty -t 30 -p sandbox /bin/bash
|
||||
```
|
||||
|
||||
To setup nextflow on your account, follow these steps.
|
||||
|
||||
```bash
|
||||
# Load the latest anaconda3 module
|
||||
module load lang/Anaconda3/2022.05
|
||||
|
||||
# Initialize environment
|
||||
. $(conda info --base)/etc/profile.d/conda.sh
|
||||
|
||||
# Install nextflow (here in base environment, but you can create a new one if you'd like)
|
||||
conda install -c bioconda nextflow
|
||||
```
|
||||
|
||||
If you want these settings to be persistent, you can add the first 2 commands in your .bash_profile file like this:
|
||||
|
||||
```bash
|
||||
echo "module load lang/Anaconda3/2022.05" >> ~/.bash_profile
|
||||
echo "$(conda info --base)/etc/profile.d/conda.sh" >> ~/.bash_profile
|
||||
```
|
@ -0,0 +1,11 @@
|
||||
# nf-core/configs: ATAC-Seq Specific Configuration - Sheffield Bioinformatics Core Facility ShARC
|
||||
|
||||
Specific configuration for [nf-co.re/atacseq](https://nf-co.re/atacseq) pipeline
|
||||
|
||||
## Usage
|
||||
|
||||
To use, run nextflow with the pipeline using `-profile sbc_sharc` (note the single hyphen).
|
||||
|
||||
This will download and launch the atacseq specific [`sbc_sharc.config`](../../../conf/pipeline/atacseq/sbc_sharc.config) which has been pre-configured with a setup suitable for the [University of Sheffield ShARC cluster](https://docs.hpc.shef.ac.uk/en/latest/index.html) and will automatically load the appropriate pipeline-specific configuration file.
|
||||
|
||||
Example: `nextflow run nf-core/atacseq -profile sbc_sharc`
|
@ -0,0 +1,11 @@
|
||||
# nf-core/configs: ChIP-Seq Specific Configuration - Sheffield Bioinformatics Core Facility ShARC
|
||||
|
||||
Specific configuration for [nf-co.re/chipseq](https://nf-co.re/chipseq) pipeline
|
||||
|
||||
## Usage
|
||||
|
||||
To use, run nextflow with the pipeline using `-profile sbc_sharc` (note the single hyphen).
|
||||
|
||||
This will download and launch the chipseq specific [`sbc_sharc.config`](../../../conf/pipeline/chipseq/sbc_sharc.config) which has been pre-configured with a setup suitable for the [University of Sheffield ShARC cluster](https://docs.hpc.shef.ac.uk/en/latest/index.html) and will automatically load the appropriate pipeline-specific configuration file.
|
||||
|
||||
Example: `nextflow run nf-core/chipseq -profile sbc_sharc`
|
@ -0,0 +1,19 @@
|
||||
# nf-core/configs: AWS Tower Demultiplex specific configuration
|
||||
|
||||
Extra specific configuration for demultiplex pipeline
|
||||
|
||||
## Usage
|
||||
|
||||
To use, run the pipeline with `-profile aws_tower`.
|
||||
|
||||
This will download and launch the demultiplex specific [`aws_tower.config`](../../../conf/pipeline/demultiplex/aws_tower.config) which has been pre-configured with a setup suitable for AWS batch through tower.
|
||||
|
||||
Example: `nextflow run nf-core/demultiplex -profile aws_tower`
|
||||
|
||||
## eager specific configurations for eva
|
||||
|
||||
Specific configurations for AWS has been made for demultiplex.
|
||||
|
||||
### General profiles
|
||||
|
||||
- The general AWS Tower profile runs with default nf-core/demultiplex parameters, but with modifications to account file transfer speed and optimized bases2fastq resources.
|
@ -0,0 +1,23 @@
|
||||
# nf-core/configs: CRG proteinfold specific configuration
|
||||
|
||||
Extra specific configuration for proteinfold pipeline
|
||||
|
||||
## Usage
|
||||
|
||||
To use, run the pipeline with `-profile crg`.
|
||||
|
||||
This will download and launch the proteinfold specific [`crg.config`](../../../conf/pipeline/proteinfold/crg.config) which has been pre-configured with a setup suitable for the sge cluster.
|
||||
|
||||
Example: `nextflow run nf-core/proteinfold -profile crg`
|
||||
|
||||
## proteinfold specific configurations for CRG
|
||||
|
||||
Specific configurations for CRG has been made for proteinfold.
|
||||
|
||||
### General profiles
|
||||
|
||||
<!-- TODO -->
|
||||
|
||||
### Contextual profiles
|
||||
|
||||
<!-- TODO -->
|
@ -0,0 +1,11 @@
|
||||
# nf-core/configs: RNA-Seq Specific Configuration - Sheffield Bioinformatics Core Facility ShARC
|
||||
|
||||
Specific configuration for [nf-co.re/rnaseq](https://nf-co.re/rnaseq) pipeline
|
||||
|
||||
## Usage
|
||||
|
||||
To use, run nextflow with the pipeline using `-profile sbc_sharc` (note the single hyphen).
|
||||
|
||||
This will download and launch the rnaseq specific [`sbc_sharc.config`](../../../conf/pipeline/rnaseq/sbc_sharc.config) which has been pre-configured with a setup suitable for the [University of Sheffield ShARC cluster](https://docs.hpc.shef.ac.uk/en/latest/index.html) and will automatically load the appropriate pipeline-specific configuration file.
|
||||
|
||||
Example: `nextflow run nf-core/rnaseq -profile sbc_sharc`
|
@ -0,0 +1,11 @@
|
||||
# nf-core/configs: Sarek Specific Configuration - Sheffield Bioinformatics Core Facility ShARC
|
||||
|
||||
Specific configuration for [nf-co.re/sarek](https://nf-co.re/sarek) pipeline
|
||||
|
||||
## Usage
|
||||
|
||||
To use, run nextflow with the pipeline using `-profile sbc_sharc` (note the single hyphen).
|
||||
|
||||
This will download and launch the sarek specific [`sbc_sharc.config`](../../../conf/pipeline/sarek/sbc_sharc.config) which has been pre-configured with a setup suitable for the [University of Sheffield ShARC cluster](https://docs.hpc.shef.ac.uk/en/latest/index.html) and will automatically load the appropriate pipeline-specific configuration file.
|
||||
|
||||
Example: `nextflow run nf-core/sarek -profile sbc_sharc`
|
@ -0,0 +1,40 @@
|
||||
# nf-core/configs: Sheffield Bioinformatics Core Facility ShARC Configuration
|
||||
|
||||
## Using the SBC_ShARC Institutional Configuration Profile
|
||||
|
||||
To use [`sbc_sharc.config`](../conf/sbc_sharc.config), run nextflow with an nf-core pipeline using `-profile sbc_sharc` (note the single hyphen).
|
||||
|
||||
This will download and launch [`sbc_sharc.config`](../conf/sbc_sharc.config) which has been pre-configured with a setup suitable for the ShARC cluster and will automatically load the appropriate pipeline-specific configuration file.
|
||||
|
||||
The following nf-core pipelines have been successfully configured for use on the the [University of Sheffield ShARC cluster](https://docs.hpc.shef.ac.uk/en/latest/index.html):
|
||||
|
||||
- [nf-co.re/atacseq](https://nf-co.re/atacseq)
|
||||
- [nf-co.re/chipseq](https://nf-co.re/chipseq)
|
||||
- [nf-co.re/rnaseq](https://nf-co.re/rnaseq)
|
||||
- [nf-co.re/sarek](https://nf-co.re/sarek)
|
||||
|
||||
When using [`sbc_sharc.config`](../conf/sbc_sharc.config) with the pipelines listed above, the appropriate configuration file from the list below will be loaded automatically:
|
||||
|
||||
- [atacseq sbc_sharc.config](../conf/pipeline/atacseq/sbc_sharc.config)
|
||||
- [chipseq sbc_sharc.config](../conf/pipeline/chipseq/sbc_sharc.config)
|
||||
- [rnaseq sbc_sharc.config](../conf/pipeline/rnaseq/sbc_sharc.config)
|
||||
- [sarek sbc_sharc.config](../conf/pipeline/sarek/sbc_sharc.config)
|
||||
|
||||
The [`sbc_sharc.config`](../conf/sbc_sharc.config) configuration file might work with other nf-core pipelines as it stands but we cannot guarantee they will run without issue. We will be continuing to create, test and optimise configurations for new pipelines in the future.
|
||||
|
||||
## A Note on Singularity Containers
|
||||
|
||||
The [`sbc_sharc.config`](../conf/sbc_sharc.config) configuration file supports running nf-core pipelines with Singularity containers; Singularity images will be downloaded automatically before execution of the pipeline.
|
||||
|
||||
When you run nextflow for the first time, Singularity will create a hidden directory `.singularity` in your `$HOME` directory `/home/$USER` which has very very limited (10GB) space available. It is therefore a good idea to create a directory somewhere else (e.g., `/data/$USER`) with more room and link the locations. To do this, run the following series of commands:
|
||||
|
||||
```shell
|
||||
# change directory to $HOME
|
||||
cd $HOME
|
||||
|
||||
# make the directory that will be linked to
|
||||
mkdir /data/$USER/.singularity
|
||||
|
||||
# link the new directory with the existing one
|
||||
ln -s /data/$USER/.singularity .singularity
|
||||
```
|
@ -0,0 +1,7 @@
|
||||
# nf-core/configs: TIGEM configuration
|
||||
|
||||
To use, run the pipeline with `-profile tigem`. This will download and launch the tigem.config which has been pre-configured with a setup suitable for the TIGEM personal biocluster.
|
||||
|
||||
---
|
||||
|
||||
This configuration profile can be used on TIGEM clusters, with the pre-installed SLURM job scheduling system. An additional parameter is `google.zone` to allow downloading data from GCP for a specific time zone. It should not interfere with any local or other AWS configuration.
|
@ -0,0 +1,51 @@
|
||||
# nf-core/configs: Myriad Configuration
|
||||
|
||||
All nf-core pipelines have been successfully configured for use on UCL's myriad cluster [University College London](https://www.rc.ucl.ac.uk/docs/Clusters/Myriad/).
|
||||
|
||||
To use, run the pipeline with `-profile ucl_myriad`. This will download and launch the [`ucl_myriad.config`](../conf/ucl_myriad.config) which has been pre-configured with a setup suitable for the myriad cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
## Using Nextflow on Myriad
|
||||
|
||||
Before running the pipeline you will need to install and configure Nextflow and Singularity.
|
||||
|
||||
### Singularity
|
||||
|
||||
This can be done with the following commands:
|
||||
|
||||
```bash
|
||||
## Load Singularity environment modules - these commands can be placed in your ~/.bashrc also
|
||||
module add java/openjdk-11/11.0.1
|
||||
module add singularity-env/1.0.0
|
||||
```
|
||||
|
||||
Then set the correct configuration of the cache directories, where <YOUR_ID> is replaced with you credentials which you can find by entering `whoami` into the terminal once you are logged into myriad. Once you have added your credentials save these lines into your .bashrc file in the base directory (e.g. /home/<YOUR_ID>/.bashrc):
|
||||
|
||||
```bash
|
||||
# Set all the Singularity cache dirs to Scratch
|
||||
export SINGULARITY_CACHEDIR=/home/<YOUR_ID>/Scratch/.singularity/
|
||||
export SINGULARITY_TMPDIR=/home/<YOUR_ID>/Scratch/.singularity/tmp
|
||||
export SINGULARITY_LOCALCACHEDIR=/home/<YOUR_ID>/Scratch/.singularity/localcache
|
||||
export SINGULARITY_PULLFOLDER=/home/<YOUR_ID>/Scratch/.singularity/pull
|
||||
|
||||
# Bind your Scratch directory so it is accessible from inside the container
|
||||
export SINGULARITY_BINDPATH=/scratch/scratch/<YOUR_ID>
|
||||
```
|
||||
|
||||
### Nextflow
|
||||
|
||||
Download the latest release of nextflow. Warning: the self-update line should update to the latest version, but sometimes not, so please check which is the latest release (https://github.com/nextflow-io/nextflow/releases), you can then manually set this by entering (`NXF_VER=XX.XX.X`).
|
||||
|
||||
```bash
|
||||
## Download Nextflow-all
|
||||
curl -s https://get.nextflow.io | bash
|
||||
NXF_VER=22.10.0
|
||||
nextflow -self-update
|
||||
chmod a+x nextflow
|
||||
mv nextflow ~/bin/nextflow
|
||||
```
|
||||
|
||||
Then make sure that your bin PATH is executable, by placing the following line in your .bashrc:
|
||||
|
||||
```bash
|
||||
export PATH=$PATH:/home/<YOUR_ID>/bin
|
||||
```
|
@ -0,0 +1,13 @@
|
||||
/*
|
||||
* -------------------------------------------------
|
||||
* nfcore/atacseq custom profile Nextflow config file
|
||||
* -------------------------------------------------
|
||||
* Config options for custom environments.
|
||||
* Cluster-specific config options should be saved
|
||||
* in the conf/pipeline/atacseq folder and imported
|
||||
* under a profile name here.
|
||||
*/
|
||||
|
||||
profiles {
|
||||
sbc_sharc { includeConfig "${params.custom_config_base}/conf/pipeline/atacseq/sbc_sharc.config" }
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
/*
|
||||
* -------------------------------------------------
|
||||
* nfcore/chipseq custom profile Nextflow config file
|
||||
* -------------------------------------------------
|
||||
* Config options for custom environments.
|
||||
* Cluster-specific config options should be saved
|
||||
* in the conf/pipeline/chipseq folder and imported
|
||||
* under a profile name here.
|
||||
*/
|
||||
|
||||
profiles {
|
||||
sbc_sharc { includeConfig "${params.custom_config_base}/conf/pipeline/chipseq/sbc_sharc.config" }
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
/*
|
||||
* -------------------------------------------------
|
||||
* nfcore/demultiplex custom profile Nextflow config file
|
||||
* -------------------------------------------------
|
||||
* Config options for custom environments.
|
||||
* Cluster-specific config options should be saved
|
||||
* in the conf/pipeline/demultiplex folder and imported
|
||||
* under a profile name here.
|
||||
*/
|
||||
|
||||
profiles {
|
||||
aws_tower { includeConfig "${params.custom_config_base}/conf/pipeline/demultiplex/aws_tower.config" }
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
/*
|
||||
* -------------------------------------------------
|
||||
* nfcore/proteinfold custom profile Nextflow config file
|
||||
* -------------------------------------------------
|
||||
* Config options for custom environments.
|
||||
* Cluster-specific config options should be saved
|
||||
* in the conf/pipeline/proteinfold folder and imported
|
||||
* under a profile name here.
|
||||
*/
|
||||
|
||||
profiles {
|
||||
crg { includeConfig "${params.custom_config_base}/conf/pipeline/proteinfold/crg.config" }
|
||||
}
|
Loading…
Reference in New Issue