diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 38ddb4f..3abcba6 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -43,6 +43,7 @@ jobs: - "cbe" - "ccga_dx" - "ccga_med" + - "cedars" - "cfc" - "cfc_dev" - "cheaha" diff --git a/README.md b/README.md index 55fa3b9..398acca 100644 --- a/README.md +++ b/README.md @@ -99,6 +99,7 @@ Currently documentation is available for the following systems: - [CBE](docs/cbe.md) - [CCGA_DX](docs/ccga_dx.md) - [CCGA_MED](docs/ccga_med.md) +- [Cedars-Sinai](docs/cedars.md) - [CFC](docs/cfc.md) - [CHEAHA](docs/cheaha.md) - [Computerome](docs/computerome.md) diff --git a/conf/cedars.config b/conf/cedars.config new file mode 100644 index 0000000..d9b9027 --- /dev/null +++ b/conf/cedars.config @@ -0,0 +1,26 @@ +//Profile config names for nf-core/configs +params { + config_profile_description = 'Cedars-Sinai Medical Center HPC Profile' + config_profile_contact = 'Alex Rajewski (@rajewski)' + config_profile_url = 'https://www.cedars-sinai.edu/research/cores/informatics-computing/resources.html' + max_memory = 90.GB + max_cpus = 10 + max_time = 240.h +} + +// Specify the queing system +executor { + name = "sge" +} + +process { + penv = 'smp' + beforeScript = + """ + module load 'singularity/3.6.0' + """ +} + +singularity { + enabled = true +} diff --git a/conf/pipeline/eager/eva.config b/conf/pipeline/eager/eva.config index 63aaa86..b563828 100644 --- a/conf/pipeline/eager/eva.config +++ b/conf/pipeline/eager/eva.config @@ -69,6 +69,11 @@ process { clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" } errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' } } + + withName: fastqc_after_clipping { + clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" } + errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' } + } withName: adapter_removal { clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" } @@ -261,6 +266,11 @@ profiles { clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" } errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' } } + + withName: fastqc_after_clipping { + clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" } + errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' } + } withName: adapter_removal { clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" } @@ -441,6 +451,11 @@ profiles { clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" } errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' } } + + withName: fastqc_after_clipping { + clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" } + errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' } + } withName: adapter_removal { clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" } diff --git a/conf/pipeline/sarek/munin.config b/conf/pipeline/sarek/munin.config index 6a73b8c..77f76f0 100644 --- a/conf/pipeline/sarek/munin.config +++ b/conf/pipeline/sarek/munin.config @@ -23,7 +23,7 @@ params { // Specific nf-core/sarek process configuration process { withLabel:sentieon { - module = {params.sentieon ? 'sentieon/202112.00' : null} + module = {params.sentieon ? 'sentieon/202112.02' : null} container = {params.sentieon ? null : container} } } diff --git a/docs/cedars.md b/docs/cedars.md new file mode 100644 index 0000000..2abaf5d --- /dev/null +++ b/docs/cedars.md @@ -0,0 +1,7 @@ +# Cedars-Sinai Medical Center HPC + +- You will need HPC access from EIS, which can be requested in the Service Center. +- You will need to load the nextflow module on the HPC before running any pipelines (`module load nextflow`). This should automatically load Java as well. +- Run this with `-profile cedars` +- By default this config file does not specify a queue for submission, and things will thus go to `all.q`. Because of that, the memory and cpu limits have been set accordingly. +- We highly recommend specifying a location of a cache directory to store singularity images (so you re-use them across runs, and not pull each time), by specifying the location with the `$NXF_SINGULARITY_CACHE_DIR` bash environment variable in your `.bash_profile` or `.bashrc` diff --git a/nfcore_custom.config b/nfcore_custom.config index 9b088b1..939fd3a 100644 --- a/nfcore_custom.config +++ b/nfcore_custom.config @@ -24,6 +24,7 @@ profiles { cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" } ccga_dx { includeConfig "${params.custom_config_base}/conf/ccga_dx.config" } ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" } + cedars { includeConfig "${params.custom_config_base}/conf/cedars.config" } cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" } cfc_dev { includeConfig "${params.custom_config_base}/conf/cfc_dev.config" } cheaha { includeConfig "${params.custom_config_base}/conf/cheaha.config" }