mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-11 04:23:10 +00:00
Merge branch 'master' into master
This commit is contained in:
commit
ed4436c1a6
18 changed files with 88 additions and 72 deletions
|
@ -22,7 +22,7 @@ The Nextflow [`-c`](https://www.nextflow.io/docs/latest/config.html) parameter c
|
||||||
|
|
||||||
The config files hosted in this repository define a set of parameters which are specific to compute environments at different Institutions but generic enough to be used with all nf-core pipelines.
|
The config files hosted in this repository define a set of parameters which are specific to compute environments at different Institutions but generic enough to be used with all nf-core pipelines.
|
||||||
|
|
||||||
All nf-core pipelines inherit the functionality provided by Nextflow, and as such custom config files can contain parameters/definitions that are available to both. For example, if you have the ability to use [Singularity](https://singularity.lbl.gov/) on your HPC you can add and customise the Nextflow [`singularity`](https://www.nextflow.io/docs/latest/config.html#scope-singularity) scope in your config file. Similarly, you can define a Nextflow [`executor`](https://www.nextflow.io/docs/latest/executor.html) depending on the job submission process available on your cluster. In contrast, the `params` section in your custom config file will typically define parameters that are specific to nf-core pipelines.
|
All nf-core pipelines inherit the functionality provided by Nextflow, and as such custom config files can contain parameters/definitions that are available to both. For example, if you have the ability to use [Singularity](https://singularity.lbl.gov/) on your HPC you can add and customize the Nextflow [`singularity`](https://www.nextflow.io/docs/latest/config.html#scope-singularity) scope in your config file. Similarly, you can define a Nextflow [`executor`](https://www.nextflow.io/docs/latest/executor.html) depending on the job submission process available on your cluster. In contrast, the `params` section in your custom config file will typically define parameters that are specific to nf-core pipelines.
|
||||||
|
|
||||||
You should be able to get a good idea as to how other people are customising the execution of their nf-core pipelines by looking at some of the config files in [`nf-core/configs`](https://github.com/nf-core/configs/tree/master/conf).
|
You should be able to get a good idea as to how other people are customising the execution of their nf-core pipelines by looking at some of the config files in [`nf-core/configs`](https://github.com/nf-core/configs/tree/master/conf).
|
||||||
|
|
||||||
|
@ -80,8 +80,9 @@ You will have to create a [Markdown document](https://www.markdownguide.org/gett
|
||||||
|
|
||||||
See [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs) for examples.
|
See [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs) for examples.
|
||||||
|
|
||||||
Currently documentation is available for the following clusters:
|
Currently documentation is available for the following systems:
|
||||||
|
|
||||||
|
* [AWSBATCH](docs/awsbatch.md)
|
||||||
* [BIGPURPLE](docs/bigpurple.md)
|
* [BIGPURPLE](docs/bigpurple.md)
|
||||||
* [BINAC](docs/binac.md)
|
* [BINAC](docs/binac.md)
|
||||||
* [CBE](docs/cbe.md)
|
* [CBE](docs/cbe.md)
|
||||||
|
@ -91,11 +92,11 @@ Currently documentation is available for the following clusters:
|
||||||
* [CRICK](docs/crick.md)
|
* [CRICK](docs/crick.md)
|
||||||
* [CZBIOHUB_AWS](docs/czbiohub.md)
|
* [CZBIOHUB_AWS](docs/czbiohub.md)
|
||||||
* [CZBIOHUB_AWS_HIGHPRIORITY](docs/czbiohub.md)
|
* [CZBIOHUB_AWS_HIGHPRIORITY](docs/czbiohub.md)
|
||||||
|
* [DENBI_QBIC](docs/denbi_qbic.md)
|
||||||
* [GENOUEST](docs/genouest.md)
|
* [GENOUEST](docs/genouest.md)
|
||||||
* [GIS](docs/gis.md)
|
* [GIS](docs/gis.md)
|
||||||
* [HEBBE](docs/hebbe.md)
|
* [HEBBE](docs/hebbe.md)
|
||||||
* [KRAKEN](docs/kraken.md)
|
* [KRAKEN](docs/kraken.md)
|
||||||
* [MENDEL](docs/mendel.md)
|
|
||||||
* [MUNIN](docs/munin.md)
|
* [MUNIN](docs/munin.md)
|
||||||
* [PASTEUR](docs/pasteur.md)
|
* [PASTEUR](docs/pasteur.md)
|
||||||
* [PHOENIX](docs/phoenix.md)
|
* [PHOENIX](docs/phoenix.md)
|
||||||
|
|
14
conf/awsbatch.config
Normal file
14
conf/awsbatch.config
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
//Nextflow config file for running on AWS batch
|
||||||
|
|
||||||
|
params {
|
||||||
|
config_profile_name = 'AWSBATCH'
|
||||||
|
config_profile_description = 'AWSBATCH Cloud Profile'
|
||||||
|
config_profile_contact = 'Alexander Peltzer (@apeltzer)'
|
||||||
|
config_profile_url = 'https://aws.amazon.com/batch/'
|
||||||
|
}
|
||||||
|
|
||||||
|
aws.region = params.awsregion
|
||||||
|
process.executor = 'awsbatch'
|
||||||
|
process.queue = params.awsqueue
|
||||||
|
executor.awscli = '/home/ec2-user/miniconda/bin/aws'
|
||||||
|
params.tracedir = './'
|
|
@ -10,14 +10,15 @@ singularity {
|
||||||
}
|
}
|
||||||
|
|
||||||
process {
|
process {
|
||||||
beforeScript = 'module load devel/singularity/3.0.3'
|
beforeScript = 'module load devel/singularity/3.4.2'
|
||||||
executor = 'pbs'
|
executor = 'pbs'
|
||||||
queue = 'short'
|
queue = 'short'
|
||||||
|
process.queue = { task.memory > 128.GB ? 'smp': task.time <= 20.m ? 'tiny' : task.time <= 48.h ? 'short' : task.time <= 168.h ? 'short' : 'long'}
|
||||||
}
|
}
|
||||||
|
|
||||||
params {
|
params {
|
||||||
igenomes_base = '/nfsmounts/igenomes'
|
igenomes_base = '/nfsmounts/igenomes'
|
||||||
max_memory = 128.GB
|
max_memory = 1000.GB
|
||||||
max_cpus = 28
|
max_cpus = 28
|
||||||
max_time = 48.h
|
max_time = 48.h
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,25 +8,16 @@ params {
|
||||||
process {
|
process {
|
||||||
executor = 'slurm'
|
executor = 'slurm'
|
||||||
module = 'singularity/3.2.1'
|
module = 'singularity/3.2.1'
|
||||||
queue = 'c'
|
queue = { task.memory <= 170.GB ? 'c' : 'm' }
|
||||||
|
clusterOptions = { task.time <= 8.h ? '--qos short': task.time <= 48.h ? '--qos medium' : '--qos long' }
|
||||||
}
|
}
|
||||||
|
|
||||||
singularity.enabled = true
|
singularity.enabled = true
|
||||||
|
|
||||||
params {
|
params {
|
||||||
target_qos = 'medium'
|
|
||||||
params.max_cpus = 36
|
|
||||||
params.max_memory = 170.GB
|
|
||||||
igenomesIgnore = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if (params.target_qos == 'short') {
|
|
||||||
params.max_time = 8.h
|
|
||||||
process.clusterOptions = '--qos short'
|
|
||||||
} else if (params.target_qos == 'medium') {
|
|
||||||
params.max_time = 2.d
|
|
||||||
process.clusterOptions = '--qos medium'
|
|
||||||
} else {
|
|
||||||
params.max_time = 14.d
|
params.max_time = 14.d
|
||||||
process.clusterOptions = '--qos long'
|
params.max_cpus = 36
|
||||||
|
params.max_memory = 4.TB
|
||||||
|
igenomes_ignore = true
|
||||||
|
igenomesIgnore = true //deprecated
|
||||||
}
|
}
|
|
@ -11,7 +11,7 @@ singularity {
|
||||||
}
|
}
|
||||||
|
|
||||||
process {
|
process {
|
||||||
beforeScript = 'module load qbic/singularity_slurm/3.0.3'
|
beforeScript = 'module load devel/singularity/3.4.2'
|
||||||
executor = 'slurm'
|
executor = 'slurm'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,8 @@ params {
|
||||||
awsregion = "us-west-2"
|
awsregion = "us-west-2"
|
||||||
awsqueue = "nextflow"
|
awsqueue = "nextflow"
|
||||||
|
|
||||||
igenomesIgnore = true
|
igenomes_ignore = true
|
||||||
|
igenomesIgnore = true //deprecated
|
||||||
|
|
||||||
fc_extra_attributes = 'gene_name'
|
fc_extra_attributes = 'gene_name'
|
||||||
fc_group_features = 'gene_id'
|
fc_group_features = 'gene_id'
|
||||||
|
|
26
conf/denbi_qbic.config
Normal file
26
conf/denbi_qbic.config
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
//Profile config names for nf-core/configs
|
||||||
|
params {
|
||||||
|
config_profile_description = 'de.NBI cluster profile provided by nf-core/configs.'
|
||||||
|
config_profile_contact = 'Alexander Peltzer (@apeltzer)'
|
||||||
|
config_profile_url = 'https://cloud.denbi.de/'
|
||||||
|
}
|
||||||
|
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
executor = 'pbs'
|
||||||
|
queue = 'batch'
|
||||||
|
}
|
||||||
|
|
||||||
|
params {
|
||||||
|
max_memory = 512.GB
|
||||||
|
max_cpus = 28
|
||||||
|
max_time = 960.h
|
||||||
|
}
|
||||||
|
|
||||||
|
weblog{
|
||||||
|
enabled = true
|
||||||
|
url = 'https://services.qbic.uni-tuebingen.de/flowstore/workflows'
|
||||||
|
}
|
|
@ -16,7 +16,8 @@ process {
|
||||||
}
|
}
|
||||||
|
|
||||||
params {
|
params {
|
||||||
igenomesIgnore = true
|
igenomes_ignore = true
|
||||||
|
igenomesIgnore = true //deprecated
|
||||||
max_memory = 750.GB
|
max_memory = 750.GB
|
||||||
max_cpus = 80
|
max_cpus = 80
|
||||||
max_time = 336.h
|
max_time = 336.h
|
||||||
|
|
|
@ -18,7 +18,8 @@ process {
|
||||||
}
|
}
|
||||||
|
|
||||||
params {
|
params {
|
||||||
igenomesIgnore = true
|
igenomes_ignore = true
|
||||||
|
igenomesIgnore = true //deprecated
|
||||||
saveReference = true
|
saveReference = true
|
||||||
max_memory = 64.GB
|
max_memory = 64.GB
|
||||||
max_cpus = 20
|
max_cpus = 20
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
//Profile config names for nf-core/configs
|
|
||||||
params {
|
|
||||||
config_profile_description = 'GMI MENDEL cluster profile provided by nf-core/configs'
|
|
||||||
config_profile_contact = 'Patrick Hüther (@phue)'
|
|
||||||
config_profile_url = 'http://www.gmi.oeaw.ac.at/'
|
|
||||||
}
|
|
||||||
|
|
||||||
manifest {
|
|
||||||
nextflowVersion = '>=19.01.0'
|
|
||||||
}
|
|
||||||
|
|
||||||
process {
|
|
||||||
beforeScript = 'module load Miniconda3/4.6.7'
|
|
||||||
executor = 'pbspro'
|
|
||||||
clusterOptions = { "-P $params.project" }
|
|
||||||
}
|
|
||||||
|
|
||||||
params {
|
|
||||||
max_cpus = 32
|
|
||||||
max_memory = 128.GB
|
|
||||||
max_time = 192.h
|
|
||||||
igenomesIgnore = true
|
|
||||||
}
|
|
|
@ -16,7 +16,8 @@ process {
|
||||||
}
|
}
|
||||||
|
|
||||||
params {
|
params {
|
||||||
igenomesIgnore = true
|
igenomes_ignore = true
|
||||||
|
igenomesIgnore = true //deprecated
|
||||||
max_memory = 256.GB
|
max_memory = 256.GB
|
||||||
max_cpus = 28
|
max_cpus = 28
|
||||||
max_time = 24.h
|
max_time = 24.h
|
||||||
|
|
|
@ -14,7 +14,7 @@ singularity {
|
||||||
|
|
||||||
process {
|
process {
|
||||||
executor = 'slurm'
|
executor = 'slurm'
|
||||||
queue = 'short'
|
queue = { task.memory > 756.GB ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' }
|
||||||
}
|
}
|
||||||
|
|
||||||
executor {
|
executor {
|
||||||
|
@ -22,9 +22,9 @@ executor {
|
||||||
}
|
}
|
||||||
|
|
||||||
params {
|
params {
|
||||||
max_memory = 256.GB
|
max_memory = 2.TB
|
||||||
max_cpus = 32
|
max_cpus = 32
|
||||||
max_time = 2.h
|
max_time = 720.h
|
||||||
//Illumina iGenomes reference file path
|
//Illumina iGenomes reference file path
|
||||||
igenomes_base = "/projects1/public_data/igenomes/"
|
igenomes_base = "/projects1/public_data/igenomes/"
|
||||||
}
|
}
|
4
docs/awsbatch.md
Normal file
4
docs/awsbatch.md
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
# nf-core/configs: awsbatch Configuration
|
||||||
|
|
||||||
|
To be used with `awsbatch`.
|
||||||
|
Custom queue and region can be entered with `params.awsqueue` and `params.region` respectively.
|
8
docs/denbi_qbic.md
Normal file
8
docs/denbi_qbic.md
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
# nf-core/configs: de.NBI QBIC Configuration
|
||||||
|
|
||||||
|
All nf-core pipelines have been successfully configured for use on the de.NBI Cloud cluster. This is a virtual cluster that has been set up using the [virtual cluster setup scripts](https://github.com/MaximilianHanussek/virtual_cluster_local_ips).
|
||||||
|
|
||||||
|
To use, run the pipeline with `-profile denbi_qbic`. This will download and launch the [`denbi_qbic.config`](../conf/denbi_qbic.config) which has been pre-configured with a setup suitable for the automatically created cluster. Using this profile, a Docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||||
|
|
||||||
|
>NB: You will need an account to use de.NBI Cluster in order to run the pipeline. If in doubt contact IT.
|
||||||
|
>NB: Nextflow will need to submit the jobs via the job scheduler to the cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
|
@ -1,18 +0,0 @@
|
||||||
# nf-core/configs: MENDEL Configuration
|
|
||||||
|
|
||||||
All nf-core pipelines have been successfully configured for use on the MENDEL CLUSTER at the Gregor Mendel Institute (GMI).
|
|
||||||
|
|
||||||
To use, run the pipeline with `-profile conda,mendel`. This will download and launch the [`mendel.config`](../conf/mendel.config) which has been pre-configured with a setup suitable for the MENDEL cluster. A Conda environment will be created automatically and software dependencies will be resolved via [bioconda](https://bioconda.github.io/).
|
|
||||||
|
|
||||||
Before running the pipeline you will need to load Conda using the environment module system on MENDEL. You can do this by issuing the commands below:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
## Load Nextflow and Conda environment modules
|
|
||||||
module purge
|
|
||||||
module load Nextflow
|
|
||||||
module load Miniconda/4.6.7
|
|
||||||
```
|
|
||||||
|
|
||||||
>NB: You will need an account to use the HPC cluster in order to run the pipeline. If in doubt contact the HPC team.
|
|
||||||
|
|
||||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact the HPC team.
|
|
|
@ -10,7 +10,8 @@ To use, run the pipeline with `-profile shh`. This will download and launch the
|
||||||
|
|
||||||
however this will likely change to a read-only directory in the future that will be managed by IT.
|
however this will likely change to a read-only directory in the future that will be managed by IT.
|
||||||
|
|
||||||
Note that **the configuration file is currently optimised for `nf-core/eager`**. It will submit to the short queue but with a walltime of 2 hours.
|
This configuration will automatically choose the correct SLURM queue (`short`,`medium`,`long`,`supercruncher`) depending on the time and memory required by each process.
|
||||||
|
Please note that there is no `supercruncher` queue on CDAG.
|
||||||
|
|
||||||
>NB: You will need an account and VPN access to use the cluster at MPI-SHH in order to run the pipeline. If in doubt contact IT.
|
>NB: You will need an account and VPN access to use the cluster at MPI-SHH in order to run the pipeline. If in doubt contact IT.
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,12 @@ If running on Bianca, you will have no internet connection and these configs wil
|
||||||
Please use the nf-core helper tool on a different system to download the required pipeline files, and transfer them to bianca.
|
Please use the nf-core helper tool on a different system to download the required pipeline files, and transfer them to bianca.
|
||||||
This helper tool bundles the config files in this repo together with the pipeline files, so the profile will still be available.
|
This helper tool bundles the config files in this repo together with the pipeline files, so the profile will still be available.
|
||||||
|
|
||||||
|
Note that Bianca only allocates 7 GB memory per core so the max memory needs to be limited:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
--max_memory "112GB"
|
||||||
|
```
|
||||||
|
|
||||||
## Getting more memory
|
## Getting more memory
|
||||||
If your nf-core pipeline run is running out of memory, you can run on a fat node with more memory using the following nextflow flags:
|
If your nf-core pipeline run is running out of memory, you can run on a fat node with more memory using the following nextflow flags:
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ params.custom_config_version = 'master'
|
||||||
params.custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}"
|
params.custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}"
|
||||||
|
|
||||||
profiles {
|
profiles {
|
||||||
|
awsbatch { includeConfig "${params.custom_config_base}/conf/awsbatch.config" }
|
||||||
bigpurple { includeConfig "${params.custom_config_base}/conf/bigpurple.config" }
|
bigpurple { includeConfig "${params.custom_config_base}/conf/bigpurple.config" }
|
||||||
binac { includeConfig "${params.custom_config_base}/conf/binac.config" }
|
binac { includeConfig "${params.custom_config_base}/conf/binac.config" }
|
||||||
cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" }
|
cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" }
|
||||||
|
@ -22,11 +23,11 @@ profiles {
|
||||||
czbiohub_aws { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config" }
|
czbiohub_aws { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config" }
|
||||||
czbiohub_aws_highpriority { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config"; includeConfig "${params.custom_config_base}/conf/czbiohub_aws_highpriority.config" }
|
czbiohub_aws_highpriority { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config"; includeConfig "${params.custom_config_base}/conf/czbiohub_aws_highpriority.config" }
|
||||||
genotoul { includeConfig "${params.custom_config_base}/conf/genotoul.config" }
|
genotoul { includeConfig "${params.custom_config_base}/conf/genotoul.config" }
|
||||||
|
denbi_qbic { includeConfig "${params.custom_config_base}/conf/denbi_qbic.config" }
|
||||||
genouest { includeConfig "${params.custom_config_base}/conf/genouest.config" }
|
genouest { includeConfig "${params.custom_config_base}/conf/genouest.config" }
|
||||||
gis { includeConfig "${params.custom_config_base}/conf/gis.config" }
|
gis { includeConfig "${params.custom_config_base}/conf/gis.config" }
|
||||||
hebbe { includeConfig "${params.custom_config_base}/conf/hebbe.config" }
|
hebbe { includeConfig "${params.custom_config_base}/conf/hebbe.config" }
|
||||||
kraken { includeConfig "${params.custom_config_base}/conf/kraken.config" }
|
kraken { includeConfig "${params.custom_config_base}/conf/kraken.config" }
|
||||||
mendel { includeConfig "${params.custom_config_base}/conf/mendel.config" }
|
|
||||||
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
|
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
|
||||||
pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" }
|
pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" }
|
||||||
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
|
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
|
||||||
|
|
Loading…
Reference in a new issue