mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-13 05:03:10 +00:00
Merge branch 'master' into hasta
This commit is contained in:
commit
a8a2027f70
11 changed files with 192 additions and 52 deletions
94
.github/workflows/main.yml
vendored
94
.github/workflows/main.yml
vendored
|
@ -15,52 +15,54 @@ jobs:
|
|||
name: Run ${{ matrix.profile }} profile
|
||||
needs: test_all_profiles
|
||||
strategy:
|
||||
matrix:
|
||||
profile:
|
||||
- "abims"
|
||||
- "aws_tower"
|
||||
- "awsbatch"
|
||||
- "bi"
|
||||
- "bigpurple"
|
||||
- "binac"
|
||||
- "biohpc_gen"
|
||||
- "cambridge"
|
||||
- "cbe"
|
||||
- "ccga_dx"
|
||||
- "ccga_med"
|
||||
- "cfc"
|
||||
- "cfc_dev"
|
||||
- "crick"
|
||||
- "denbi_qbic"
|
||||
- "ebc"
|
||||
- "eddie"
|
||||
- "eva"
|
||||
- "genotoul"
|
||||
- "genouest"
|
||||
- "gis"
|
||||
- "google"
|
||||
- "hasta"
|
||||
- "hebbe"
|
||||
- "icr_davros"
|
||||
- "ifb_core"
|
||||
- "imperial"
|
||||
- "imperial_mb"
|
||||
- "jax"
|
||||
- "lugh"
|
||||
- "mpcdf"
|
||||
- "munin"
|
||||
- "nu_genomics"
|
||||
- "oist"
|
||||
- "pasteur"
|
||||
- "phoenix"
|
||||
- "prince"
|
||||
- "sanger"
|
||||
- "seg_globe"
|
||||
- "uct_hpc"
|
||||
- "uppmax"
|
||||
- "utd_ganymede"
|
||||
- "utd_sysbio"
|
||||
- "uzh"
|
||||
matrix:
|
||||
profile:
|
||||
- 'abims'
|
||||
- 'aws_tower'
|
||||
- 'awsbatch'
|
||||
- 'bi'
|
||||
- 'bigpurple'
|
||||
- 'binac'
|
||||
- 'biohpc_gen'
|
||||
- 'cambridge'
|
||||
- 'cbe'
|
||||
- 'ccga_dx'
|
||||
- 'ccga_med'
|
||||
- 'cfc'
|
||||
- 'cfc_dev'
|
||||
- 'computerome'
|
||||
- 'crick'
|
||||
- 'denbi_qbic'
|
||||
- 'ebc'
|
||||
- 'eddie'
|
||||
- 'eva'
|
||||
- 'genotoul'
|
||||
- 'genouest'
|
||||
- 'gis'
|
||||
- 'google'
|
||||
- 'hasta'
|
||||
- 'hebbe'
|
||||
- 'icr_davros'
|
||||
- 'ifb_core'
|
||||
- 'imperial'
|
||||
- 'imperial_mb'
|
||||
- 'jax'
|
||||
- 'lugh'
|
||||
- 'mpcdf'
|
||||
- 'munin'
|
||||
- 'nu_genomics'
|
||||
- 'oist'
|
||||
- 'pasteur'
|
||||
- 'phoenix'
|
||||
- 'prince'
|
||||
- 'rosalind'
|
||||
- 'sanger'
|
||||
- 'seg_globe'
|
||||
- 'uct_hpc'
|
||||
- 'uppmax'
|
||||
- 'utd_ganymede'
|
||||
- 'utd_sysbio'
|
||||
- 'uzh'
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Install Nextflow
|
||||
|
|
|
@ -102,6 +102,7 @@ Currently documentation is available for the following systems:
|
|||
* [CCGA_DX](docs/ccga_dx.md)
|
||||
* [CCGA_MED](docs/ccga_med.md)
|
||||
* [CFC](docs/cfc.md)
|
||||
* [Computerome](docs/computerome.md)
|
||||
* [CRICK](docs/crick.md)
|
||||
* [CZBIOHUB_AWS](docs/czbiohub.md)
|
||||
* [DENBI_QBIC](docs/denbi_qbic.md)
|
||||
|
@ -122,6 +123,7 @@ Currently documentation is available for the following systems:
|
|||
* [PASTEUR](docs/pasteur.md)
|
||||
* [PHOENIX](docs/phoenix.md)
|
||||
* [PRINCE](docs/prince.md)
|
||||
* [ROSALIND](docs/rosalind.md)
|
||||
* [SANGER](docs/sanger.md)
|
||||
* [SEG_GLOBE](docs/seg_globe.md)
|
||||
* [UCT_HPC](docs/uct_hpc.md)
|
||||
|
|
29
conf/computerome.config
Normal file
29
conf/computerome.config
Normal file
|
@ -0,0 +1,29 @@
|
|||
//Profile config names for nf-core/configs
|
||||
params {
|
||||
config_profile_description = 'Computerome 2.0 cluster profile provided by nf-core/configs.'
|
||||
config_profile_contact = 'Marc Trunjer Kusk Nielsen (@marcmtk)'
|
||||
config_profile_url = 'https://www.computerome.dk/'
|
||||
project = null
|
||||
cache_dir = "/home/projects/$params.project/scratch"
|
||||
schema_ignore_params = "project,cache_dir,genomes,modules"
|
||||
|
||||
//Thin nodes with 192GB and Fat nodes with ~1500GB. Torque should be allowed to handle this
|
||||
max_memory = 1500.GB
|
||||
max_cpus = 40
|
||||
|
||||
//There is no max walltime on the cluster, but a week seems sensible if not directly specified
|
||||
max_time = 168.h
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
cacheDir = params.cache_dir
|
||||
}
|
||||
|
||||
process {
|
||||
beforeScript = "module load tools singularity/3.8.0; export _JAVA_OPTIONS=-Djava.io.tmpdir=$params.cache_dir"
|
||||
executor = 'pbs'
|
||||
queueSize = 2000
|
||||
clusterOptions = "-A $params.project -W group_list=$params.project"
|
||||
}
|
|
@ -6,7 +6,10 @@ params {
|
|||
|
||||
|
||||
profiles {
|
||||
|
||||
cobra {
|
||||
|
||||
cleanup = true
|
||||
|
||||
process {
|
||||
beforeScript = 'module load singularity'
|
||||
|
@ -31,8 +34,11 @@ profiles {
|
|||
max_time = 24.h
|
||||
}
|
||||
}
|
||||
|
||||
raven {
|
||||
|
||||
cleanup = true
|
||||
|
||||
process {
|
||||
beforeScript = 'module load singularity'
|
||||
executor = 'slurm'
|
||||
|
@ -56,6 +62,7 @@ profiles {
|
|||
max_time = 24.h
|
||||
}
|
||||
}
|
||||
|
||||
debug {
|
||||
cleanup = false
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ params {
|
|||
// Specific nf-core/eager process configuration
|
||||
process {
|
||||
|
||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1 -XX:+PrintCommandLineFlags"'
|
||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1"'
|
||||
|
||||
maxRetries = 2
|
||||
|
||||
|
@ -90,6 +90,7 @@ process {
|
|||
|
||||
withName: additional_library_merge {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G,virtual_free=${(task.memory.toGiga() * 2)}G" }
|
||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
|
||||
withName: malt {
|
||||
|
@ -157,7 +158,7 @@ profiles {
|
|||
|
||||
process {
|
||||
|
||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1 -XX:+PrintCommandLineFlags"'
|
||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1"'
|
||||
|
||||
maxRetries = 2
|
||||
|
||||
|
@ -238,6 +239,7 @@ profiles {
|
|||
|
||||
withName: additional_library_merge {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G,virtual_free=${(task.memory.toGiga() * 3)}G" }
|
||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
|
||||
withName: malt {
|
||||
|
@ -247,7 +249,6 @@ profiles {
|
|||
}
|
||||
|
||||
withName:hostremoval_input_fastq {
|
||||
cpus = { check_max( 1, 'cpus' ) }
|
||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||
time = 1440.h
|
||||
}
|
||||
|
@ -310,7 +311,7 @@ profiles {
|
|||
|
||||
process {
|
||||
|
||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1 -XX:+PrintCommandLineFlags"'
|
||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1"'
|
||||
|
||||
maxRetries = 2
|
||||
|
||||
|
@ -383,6 +384,7 @@ profiles {
|
|||
|
||||
withName: library_merge {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
|
||||
withName: seqtype_merge {
|
||||
|
@ -391,10 +393,10 @@ profiles {
|
|||
|
||||
withName: additional_library_merge {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G,virtual_free=${(task.memory.toGiga() * 6)}G" }
|
||||
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
|
||||
withName:hostremoval_input_fastq {
|
||||
cpus = { check_max( 1, 'cpus' ) }
|
||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||
time = 1440.h
|
||||
}
|
||||
|
|
29
conf/rosalind.config
Normal file
29
conf/rosalind.config
Normal file
|
@ -0,0 +1,29 @@
|
|||
params {
|
||||
config_profile_description = 'Kings College London Rosalind HPC'
|
||||
config_profile_contact = 'Theo Portlock'
|
||||
config_profile_url = 'https://www.rosalind.kcl.ac.uk/'
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
docker.enabled = false
|
||||
}
|
||||
|
||||
params {
|
||||
max_memory = 64.GB
|
||||
max_cpus = 16
|
||||
max_time = 24.h
|
||||
partition = 'shared'
|
||||
schema_ignore_params = 'partition,genomes,modules'
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
maxRetries = 3
|
||||
clusterOptions = { "--partition=$params.partition" }
|
||||
}
|
||||
|
||||
executor {
|
||||
submitRateLimit = '1 sec'
|
||||
}
|
|
@ -15,6 +15,10 @@ process{
|
|||
queue = 'normal'
|
||||
errorStrategy = { task.attempt <= 5 ? "retry" : "finish" }
|
||||
process.maxRetries = 5
|
||||
withLabel:process_long {
|
||||
queue = 'long'
|
||||
max_time = 48.h
|
||||
}
|
||||
}
|
||||
|
||||
executor{
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# nf-core/configs: Cambridge HPC Configuration
|
||||
|
||||
All nf-core pipelines have been successfully configured for use on the Cambridge HPC cluster at the [The University of Cambridge](https://www.cam.ac.uk/).
|
||||
To use, run the pipeline with `-profile cambridge`. This will download and launch the [`cambridge.config`](../conf/cambridge.config) whichhas been pre-configured
|
||||
To use, run the pipeline with `-profile cambridge`. This will download and launch the [`cambridge.config`](../conf/cambridge.config) which has been pre-configured
|
||||
with a setup suitable for the Cambridge HPC cluster. Using this profile, either a docker image containing all of the required software will be downloaded,
|
||||
and converted to a Singularity image or a Singularity image downloaded directly before execution of the pipeline.
|
||||
|
||||
|
|
38
docs/computerome.md
Normal file
38
docs/computerome.md
Normal file
|
@ -0,0 +1,38 @@
|
|||
# nf-core/configs: Computerome 2.0 Configuration
|
||||
|
||||
To use, run the pipeline with `-profile computerome`. This will download and launch the [`computerome.config`](../conf/computerome.config) which has been pre-configured with a setup suitable for the Computerome cluster.
|
||||
|
||||
## Using the Computerome config profile
|
||||
|
||||
Before running the pipeline you will need to load `Nextflow` using the environment module system (this can be done with e.g. `module load tools Nextflow/<VERSION>` where `VERSION` is e.g. `20.10`).
|
||||
|
||||
To use, run the pipeline with `-profile computerome` (one hyphen).
|
||||
This will download and launch the [`computerome.config`](../conf/computerome.config) which has been pre-configured with a setup suitable for the Computerome servers.
|
||||
It will enable `Nextflow` to manage the pipeline jobs via the `Torque` job scheduler.
|
||||
Using this profile, `Singularity` image(s) containing required software(s) will be downloaded before execution of the pipeline.
|
||||
|
||||
Recent version of `Nextflow` also support the environment variable `NXF_SINGULARITY_CACHEDIR` which can be used to supply images. The computerome configuration uses your project's scratch folder as the cachedir if not specified.
|
||||
|
||||
In addition to this config profile, you will also need to specify a Computerome project id.
|
||||
You can do this with the `--project` flag (two hyphens) when launching `Nextflow`.
|
||||
For example:
|
||||
|
||||
```bash
|
||||
# Launch a nf-core pipeline with the computerome profile for the project id ab00002
|
||||
$ nextflow run nf-core/<PIPELINE> -profile computerome --project ab00002 [...]
|
||||
```
|
||||
|
||||
> NB: If you're not sure what your Computerome project ID is, try running `groups`.
|
||||
|
||||
Remember to use `-bg` to launch `Nextflow` in the background, so that the pipeline doesn't exit if you leave your terminal session.
|
||||
Alternatively, you can also launch `Nextflow` in a `screen` or a `tmux` session.
|
||||
|
||||
## About Computerome 2.0
|
||||
|
||||
The Danish National Supercomputer for Life Sciences (a.k.a. Computerome) is installed at the DTU National Lifescience Center at Technical University of Denmark.
|
||||
|
||||
The computer hardware is funded with grants from Technical University of Denmark (DTU), University of Copenhagen (KU) and Danish e-infrastructure Cooperation (DeiC) - also, it is the official Danish ELIXIR Node.
|
||||
|
||||
Computerome 1.0 was opened in November 2014 at #121 on TOP500 Supercomputing Sites.
|
||||
|
||||
The current setup, Computerome 2.0, was opened in 2019. It's compute resources consists of 31760 CPU cores with 210 TeraBytes of memory, connected to 17 PetaBytes of High-performance storage,
|
25
docs/rosalind.md
Normal file
25
docs/rosalind.md
Normal file
|
@ -0,0 +1,25 @@
|
|||
# nf-core/configs: Rosalind Configuration
|
||||
|
||||
All nf-core pipelines have been successfully configured for use on the Rosalind CLuster at [Kings College London](https://rosalind.kcl.ac.uk/).
|
||||
To use, run the pipeline with `-profile rosalind`. This will download and launch the [`rosalind.config`](../conf/rosalind.config) which has been pre-configured with a setup suitable for the rosalind cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
## Using Nextflow on Rosalind
|
||||
|
||||
Before running the pipeline you will need to configure Nextflow and Singularity. There is no Nextflow module on Rosalind at this time. This can be done with the following commands:
|
||||
|
||||
```bash
|
||||
## Load Singularity environment modules - these commands can be placed in your ~/.bashrc also
|
||||
module load apps/openjdk
|
||||
module load apps/singularity
|
||||
|
||||
## Download Nextflow-all
|
||||
wget https://github.com/nextflow-io/nextflow/releases/download/v21.04.3/nextflow-21.04.3-all
|
||||
chmod a+x nextflow-21.04.3-all
|
||||
mv nextflow-21.04.3-all ~/bin/nextflow
|
||||
```
|
||||
|
||||
By default, the shared partition is used for job submission. Other partitions can be specified using the `--partition <PARTITION NAME>` argument to the run.
|
||||
|
||||
## Additional information
|
||||
|
||||
The default shared partition resource limits are defined as ten percent of the total available to the cluster at any one point in time. The limitations defined by this configuration are conservative and are projected to be increased as greater computational resources are introduced in the near future.
|
|
@ -23,6 +23,7 @@ profiles {
|
|||
ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" }
|
||||
cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" }
|
||||
cfc_dev { includeConfig "${params.custom_config_base}/conf/cfc_dev.config" }
|
||||
computerome { includeConfig "${params.custom_config_base}/conf/computerome.config" }
|
||||
crick { includeConfig "${params.custom_config_base}/conf/crick.config" }
|
||||
czbiohub_aws { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config" }
|
||||
denbi_qbic { includeConfig "${params.custom_config_base}/conf/denbi_qbic.config" }
|
||||
|
@ -48,6 +49,7 @@ profiles {
|
|||
pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" }
|
||||
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
|
||||
prince { includeConfig "${params.custom_config_base}/conf/prince.config" }
|
||||
rosalind { includeConfig "${params.custom_config_base}/conf/rosalind.config" }
|
||||
sanger { includeConfig "${params.custom_config_base}/conf/sanger.config"}
|
||||
seg_globe { includeConfig "${params.custom_config_base}/conf/seg_globe.config"}
|
||||
uct_hpc { includeConfig "${params.custom_config_base}/conf/uct_hpc.config" }
|
||||
|
|
Loading…
Reference in a new issue