1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-11-24 09:09:56 +00:00

Merge branch 'master' into hasta

This commit is contained in:
Mei Wu 2021-09-27 16:12:23 +02:00 committed by GitHub
commit a8a2027f70
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 192 additions and 52 deletions

View file

@ -15,52 +15,54 @@ jobs:
name: Run ${{ matrix.profile }} profile name: Run ${{ matrix.profile }} profile
needs: test_all_profiles needs: test_all_profiles
strategy: strategy:
matrix: matrix:
profile: profile:
- "abims" - 'abims'
- "aws_tower" - 'aws_tower'
- "awsbatch" - 'awsbatch'
- "bi" - 'bi'
- "bigpurple" - 'bigpurple'
- "binac" - 'binac'
- "biohpc_gen" - 'biohpc_gen'
- "cambridge" - 'cambridge'
- "cbe" - 'cbe'
- "ccga_dx" - 'ccga_dx'
- "ccga_med" - 'ccga_med'
- "cfc" - 'cfc'
- "cfc_dev" - 'cfc_dev'
- "crick" - 'computerome'
- "denbi_qbic" - 'crick'
- "ebc" - 'denbi_qbic'
- "eddie" - 'ebc'
- "eva" - 'eddie'
- "genotoul" - 'eva'
- "genouest" - 'genotoul'
- "gis" - 'genouest'
- "google" - 'gis'
- "hasta" - 'google'
- "hebbe" - 'hasta'
- "icr_davros" - 'hebbe'
- "ifb_core" - 'icr_davros'
- "imperial" - 'ifb_core'
- "imperial_mb" - 'imperial'
- "jax" - 'imperial_mb'
- "lugh" - 'jax'
- "mpcdf" - 'lugh'
- "munin" - 'mpcdf'
- "nu_genomics" - 'munin'
- "oist" - 'nu_genomics'
- "pasteur" - 'oist'
- "phoenix" - 'pasteur'
- "prince" - 'phoenix'
- "sanger" - 'prince'
- "seg_globe" - 'rosalind'
- "uct_hpc" - 'sanger'
- "uppmax" - 'seg_globe'
- "utd_ganymede" - 'uct_hpc'
- "utd_sysbio" - 'uppmax'
- "uzh" - 'utd_ganymede'
- 'utd_sysbio'
- 'uzh'
steps: steps:
- uses: actions/checkout@v1 - uses: actions/checkout@v1
- name: Install Nextflow - name: Install Nextflow

View file

@ -102,6 +102,7 @@ Currently documentation is available for the following systems:
* [CCGA_DX](docs/ccga_dx.md) * [CCGA_DX](docs/ccga_dx.md)
* [CCGA_MED](docs/ccga_med.md) * [CCGA_MED](docs/ccga_med.md)
* [CFC](docs/cfc.md) * [CFC](docs/cfc.md)
* [Computerome](docs/computerome.md)
* [CRICK](docs/crick.md) * [CRICK](docs/crick.md)
* [CZBIOHUB_AWS](docs/czbiohub.md) * [CZBIOHUB_AWS](docs/czbiohub.md)
* [DENBI_QBIC](docs/denbi_qbic.md) * [DENBI_QBIC](docs/denbi_qbic.md)
@ -122,6 +123,7 @@ Currently documentation is available for the following systems:
* [PASTEUR](docs/pasteur.md) * [PASTEUR](docs/pasteur.md)
* [PHOENIX](docs/phoenix.md) * [PHOENIX](docs/phoenix.md)
* [PRINCE](docs/prince.md) * [PRINCE](docs/prince.md)
* [ROSALIND](docs/rosalind.md)
* [SANGER](docs/sanger.md) * [SANGER](docs/sanger.md)
* [SEG_GLOBE](docs/seg_globe.md) * [SEG_GLOBE](docs/seg_globe.md)
* [UCT_HPC](docs/uct_hpc.md) * [UCT_HPC](docs/uct_hpc.md)

29
conf/computerome.config Normal file
View file

@ -0,0 +1,29 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'Computerome 2.0 cluster profile provided by nf-core/configs.'
config_profile_contact = 'Marc Trunjer Kusk Nielsen (@marcmtk)'
config_profile_url = 'https://www.computerome.dk/'
project = null
cache_dir = "/home/projects/$params.project/scratch"
schema_ignore_params = "project,cache_dir,genomes,modules"
//Thin nodes with 192GB and Fat nodes with ~1500GB. Torque should be allowed to handle this
max_memory = 1500.GB
max_cpus = 40
//There is no max walltime on the cluster, but a week seems sensible if not directly specified
max_time = 168.h
}
singularity {
enabled = true
autoMounts = true
cacheDir = params.cache_dir
}
process {
beforeScript = "module load tools singularity/3.8.0; export _JAVA_OPTIONS=-Djava.io.tmpdir=$params.cache_dir"
executor = 'pbs'
queueSize = 2000
clusterOptions = "-A $params.project -W group_list=$params.project"
}

View file

@ -6,7 +6,10 @@ params {
profiles { profiles {
cobra { cobra {
cleanup = true
process { process {
beforeScript = 'module load singularity' beforeScript = 'module load singularity'
@ -31,8 +34,11 @@ profiles {
max_time = 24.h max_time = 24.h
} }
} }
raven { raven {
cleanup = true
process { process {
beforeScript = 'module load singularity' beforeScript = 'module load singularity'
executor = 'slurm' executor = 'slurm'
@ -56,6 +62,7 @@ profiles {
max_time = 24.h max_time = 24.h
} }
} }
debug { debug {
cleanup = false cleanup = false
} }

View file

@ -9,7 +9,7 @@ params {
// Specific nf-core/eager process configuration // Specific nf-core/eager process configuration
process { process {
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1 -XX:+PrintCommandLineFlags"' beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1"'
maxRetries = 2 maxRetries = 2
@ -90,6 +90,7 @@ process {
withName: additional_library_merge { withName: additional_library_merge {
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" } clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
} }
withName: malt { withName: malt {
@ -157,7 +158,7 @@ profiles {
process { process {
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1 -XX:+PrintCommandLineFlags"' beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1"'
maxRetries = 2 maxRetries = 2
@ -238,6 +239,7 @@ profiles {
withName: additional_library_merge { withName: additional_library_merge {
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" } clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
} }
withName: malt { withName: malt {
@ -247,7 +249,6 @@ profiles {
} }
withName:hostremoval_input_fastq { withName:hostremoval_input_fastq {
cpus = { check_max( 1, 'cpus' ) }
memory = { check_max( 32.GB * task.attempt, 'memory' ) } memory = { check_max( 32.GB * task.attempt, 'memory' ) }
time = 1440.h time = 1440.h
} }
@ -310,7 +311,7 @@ profiles {
process { process {
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1 -XX:+PrintCommandLineFlags"' beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1"'
maxRetries = 2 maxRetries = 2
@ -383,6 +384,7 @@ profiles {
withName: library_merge { withName: library_merge {
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" } clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
} }
withName: seqtype_merge { withName: seqtype_merge {
@ -391,10 +393,10 @@ profiles {
withName: additional_library_merge { withName: additional_library_merge {
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" } clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
} }
withName:hostremoval_input_fastq { withName:hostremoval_input_fastq {
cpus = { check_max( 1, 'cpus' ) }
memory = { check_max( 32.GB * task.attempt, 'memory' ) } memory = { check_max( 32.GB * task.attempt, 'memory' ) }
time = 1440.h time = 1440.h
} }

29
conf/rosalind.config Normal file
View file

@ -0,0 +1,29 @@
params {
config_profile_description = 'Kings College London Rosalind HPC'
config_profile_contact = 'Theo Portlock'
config_profile_url = 'https://www.rosalind.kcl.ac.uk/'
}
singularity {
enabled = true
autoMounts = true
docker.enabled = false
}
params {
max_memory = 64.GB
max_cpus = 16
max_time = 24.h
partition = 'shared'
schema_ignore_params = 'partition,genomes,modules'
}
process {
executor = 'slurm'
maxRetries = 3
clusterOptions = { "--partition=$params.partition" }
}
executor {
submitRateLimit = '1 sec'
}

View file

@ -15,6 +15,10 @@ process{
queue = 'normal' queue = 'normal'
errorStrategy = { task.attempt <= 5 ? "retry" : "finish" } errorStrategy = { task.attempt <= 5 ? "retry" : "finish" }
process.maxRetries = 5 process.maxRetries = 5
withLabel:process_long {
queue = 'long'
max_time = 48.h
}
} }
executor{ executor{

View file

@ -1,7 +1,7 @@
# nf-core/configs: Cambridge HPC Configuration # nf-core/configs: Cambridge HPC Configuration
All nf-core pipelines have been successfully configured for use on the Cambridge HPC cluster at the [The University of Cambridge](https://www.cam.ac.uk/). All nf-core pipelines have been successfully configured for use on the Cambridge HPC cluster at the [The University of Cambridge](https://www.cam.ac.uk/).
To use, run the pipeline with `-profile cambridge`. This will download and launch the [`cambridge.config`](../conf/cambridge.config) whichhas been pre-configured To use, run the pipeline with `-profile cambridge`. This will download and launch the [`cambridge.config`](../conf/cambridge.config) which has been pre-configured
with a setup suitable for the Cambridge HPC cluster. Using this profile, either a docker image containing all of the required software will be downloaded, with a setup suitable for the Cambridge HPC cluster. Using this profile, either a docker image containing all of the required software will be downloaded,
and converted to a Singularity image or a Singularity image downloaded directly before execution of the pipeline. and converted to a Singularity image or a Singularity image downloaded directly before execution of the pipeline.

38
docs/computerome.md Normal file
View file

@ -0,0 +1,38 @@
# nf-core/configs: Computerome 2.0 Configuration
To use, run the pipeline with `-profile computerome`. This will download and launch the [`computerome.config`](../conf/computerome.config) which has been pre-configured with a setup suitable for the Computerome cluster.
## Using the Computerome config profile
Before running the pipeline you will need to load `Nextflow` using the environment module system (this can be done with e.g. `module load tools Nextflow/<VERSION>` where `VERSION` is e.g. `20.10`).
To use, run the pipeline with `-profile computerome` (one hyphen).
This will download and launch the [`computerome.config`](../conf/computerome.config) which has been pre-configured with a setup suitable for the Computerome servers.
It will enable `Nextflow` to manage the pipeline jobs via the `Torque` job scheduler.
Using this profile, `Singularity` image(s) containing required software(s) will be downloaded before execution of the pipeline.
Recent version of `Nextflow` also support the environment variable `NXF_SINGULARITY_CACHEDIR` which can be used to supply images. The computerome configuration uses your project's scratch folder as the cachedir if not specified.
In addition to this config profile, you will also need to specify a Computerome project id.
You can do this with the `--project` flag (two hyphens) when launching `Nextflow`.
For example:
```bash
# Launch a nf-core pipeline with the computerome profile for the project id ab00002
$ nextflow run nf-core/<PIPELINE> -profile computerome --project ab00002 [...]
```
> NB: If you're not sure what your Computerome project ID is, try running `groups`.
Remember to use `-bg` to launch `Nextflow` in the background, so that the pipeline doesn't exit if you leave your terminal session.
Alternatively, you can also launch `Nextflow` in a `screen` or a `tmux` session.
## About Computerome 2.0
The Danish National Supercomputer for Life Sciences (a.k.a. Computerome) is installed at the DTU National Lifescience Center at Technical University of Denmark.
The computer hardware is funded with grants from Technical University of Denmark (DTU), University of Copenhagen (KU) and Danish e-infrastructure Cooperation (DeiC) - also, it is the official Danish ELIXIR Node.
Computerome 1.0 was opened in November 2014 at #121 on TOP500 Supercomputing Sites.
The current setup, Computerome 2.0, was opened in 2019. It's compute resources consists of 31760 CPU cores with 210 TeraBytes of memory, connected to 17 PetaBytes of High-performance storage,

25
docs/rosalind.md Normal file
View file

@ -0,0 +1,25 @@
# nf-core/configs: Rosalind Configuration
All nf-core pipelines have been successfully configured for use on the Rosalind CLuster at [Kings College London](https://rosalind.kcl.ac.uk/).
To use, run the pipeline with `-profile rosalind`. This will download and launch the [`rosalind.config`](../conf/rosalind.config) which has been pre-configured with a setup suitable for the rosalind cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
## Using Nextflow on Rosalind
Before running the pipeline you will need to configure Nextflow and Singularity. There is no Nextflow module on Rosalind at this time. This can be done with the following commands:
```bash
## Load Singularity environment modules - these commands can be placed in your ~/.bashrc also
module load apps/openjdk
module load apps/singularity
## Download Nextflow-all
wget https://github.com/nextflow-io/nextflow/releases/download/v21.04.3/nextflow-21.04.3-all
chmod a+x nextflow-21.04.3-all
mv nextflow-21.04.3-all ~/bin/nextflow
```
By default, the shared partition is used for job submission. Other partitions can be specified using the `--partition <PARTITION NAME>` argument to the run.
## Additional information
The default shared partition resource limits are defined as ten percent of the total available to the cluster at any one point in time. The limitations defined by this configuration are conservative and are projected to be increased as greater computational resources are introduced in the near future.

View file

@ -23,6 +23,7 @@ profiles {
ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" } ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" }
cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" } cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" }
cfc_dev { includeConfig "${params.custom_config_base}/conf/cfc_dev.config" } cfc_dev { includeConfig "${params.custom_config_base}/conf/cfc_dev.config" }
computerome { includeConfig "${params.custom_config_base}/conf/computerome.config" }
crick { includeConfig "${params.custom_config_base}/conf/crick.config" } crick { includeConfig "${params.custom_config_base}/conf/crick.config" }
czbiohub_aws { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config" } czbiohub_aws { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config" }
denbi_qbic { includeConfig "${params.custom_config_base}/conf/denbi_qbic.config" } denbi_qbic { includeConfig "${params.custom_config_base}/conf/denbi_qbic.config" }
@ -48,6 +49,7 @@ profiles {
pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" } pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" }
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" } phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
prince { includeConfig "${params.custom_config_base}/conf/prince.config" } prince { includeConfig "${params.custom_config_base}/conf/prince.config" }
rosalind { includeConfig "${params.custom_config_base}/conf/rosalind.config" }
sanger { includeConfig "${params.custom_config_base}/conf/sanger.config"} sanger { includeConfig "${params.custom_config_base}/conf/sanger.config"}
seg_globe { includeConfig "${params.custom_config_base}/conf/seg_globe.config"} seg_globe { includeConfig "${params.custom_config_base}/conf/seg_globe.config"}
uct_hpc { includeConfig "${params.custom_config_base}/conf/uct_hpc.config" } uct_hpc { includeConfig "${params.custom_config_base}/conf/uct_hpc.config" }