diff --git a/conf/mpcdf.config b/conf/mpcdf.config index 481ebc4..c6c3c9c 100644 --- a/conf/mpcdf.config +++ b/conf/mpcdf.config @@ -1,24 +1,28 @@ params { config_profile_description = 'MPCDF HPC profiles (unoffically) provided by nf-core/configs.' config_profile_contact = 'James Fellows Yates (@jfy133)' - config_profile_url = 'https://www.mpcdf.mpg.de/services/computing' + config_profile_url = 'https://www.mpcdf.mpg.de/services/supercomputing' } profiles { cobra { - // Does not have singularity! Conda module must be used, but it is - // recommended to set NXF_CONDA_CACHEDIR var in ~/.bash{_profile,rc} - // To create common cache dir process { - beforeScript = 'module load anaconda/3/2020.02' + beforeScript = 'module load singularity' executor = 'slurm' } executor { queueSize = 8 } + + // Set $NXF_SINGULARITY_CACHEDIR in your ~/.bash_profile + // to stop downloading the same image for every run + singularity { + enabled = true + autoMounts = true + } params { config_profile_description = 'MPCDF cobra profile (unofficially) provided by nf-core/configs.' @@ -28,18 +32,22 @@ profiles { } } raven { - // Does not have singularity! Conda module must be used, but it is - // recommended to set NXF_CONDA_CACHEDIR var in ~/.bash{_profile,rc} - // to create common cache dir process { - beforeScript = 'module load anaconda/3/2020.02' + beforeScript = 'module load singularity' executor = 'slurm' } executor { queueSize = 8 } + + // Set $NXF_SINGULARITY_CACHEDIR in your ~/.bash_profile + // to stop downloading the same image for every run + singularity { + enabled = true + autoMounts = true + } params { config_profile_description = 'MPCDF raven profile (unofficially) provided by nf-core/configs.' @@ -47,5 +55,8 @@ profiles { max_cpus = 192 max_time = 24.h } + } + debug { + cleanup = false } } diff --git a/docs/mpcdf.md b/docs/mpcdf.md index af16d76..73ed52f 100644 --- a/docs/mpcdf.md +++ b/docs/mpcdf.md @@ -4,18 +4,16 @@ All nf-core pipelines have been successfully configured for use on the HPCs at [ > :warning: these profiles are not officially supported by the MPCDF. -To run Nextflow, the `jdk` module must be loaded. To use the nf-core profile(s), run the pipeline with `-profile ,mpcdf`. +To run Nextflow, the `jdk` module must be loaded. To use the nf-core profile(s), run the pipeline with `-profile mpcdf,`. -Currently the following clusters are supported: cobra, raven +Currently profiles for the following clusters are supported: `cobra`, `raven` + +All profiles use `singularity` as the corresponding containerEngine. To prevent repeatedly downloading the same singularity image for every pipeline run, for all profiles we recommend specifying a cache location in your `~/.bash_profile` with the `$NXF_SINGULARITY_CACHEDIR` bash variable. >NB: Nextflow will need to submit the jobs via SLURM to the clusters and as such the commands above will have to be executed on one of the head nodes. Check the [MPCDF documentation](https://www.mpcdf.mpg.de/services/computing). ## cobra -Cobra does not currently support singularity, therefore the anaconda/module is loaded for each process. - -Due to this, we also recommend setting the `$NXF_CONDA_CACHEDIR` to a location of your choice to store all environments (so to prevent nextflow building the environment on every run). - To use: `-profile cobra,mpcdf` Sets the following parameters: @@ -31,10 +29,6 @@ Sets the following parameters: ## raven -Raven does not currently support singularity, therefore `module load anaconda/3/2020.02` is loaded for each process. - -Due to this, we also recommend setting the `$NXF_CONDA_CACHEDIR` to a location of your choice to store all environments (so to prevent nextflow building the environment on every run). - To use: `-profile raven,mpcdf` Sets the following parameters: