mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-12-22 10:38:16 +00:00
Merge branch 'master' of github.com:nf-core/configs into feat/hasta_update
This commit is contained in:
commit
7ff2977a29
37 changed files with 845 additions and 64 deletions
|
@ -8,5 +8,5 @@ trim_trailing_whitespace = true
|
||||||
indent_size = 4
|
indent_size = 4
|
||||||
indent_style = space
|
indent_style = space
|
||||||
|
|
||||||
[*.{md,yml,yaml}]
|
[*.{md,yml,yaml,cff}]
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
|
|
55
.github/workflows/fix-linting.yml
vendored
Normal file
55
.github/workflows/fix-linting.yml
vendored
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
name: Fix linting from a comment
|
||||||
|
on:
|
||||||
|
issue_comment:
|
||||||
|
types: [created]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
# Only run if comment is on a PR with the main repo, and if it contains the magic keywords
|
||||||
|
if: >
|
||||||
|
contains(github.event.comment.html_url, '/pull/') &&
|
||||||
|
contains(github.event.comment.body, '@nf-core-bot fix linting') &&
|
||||||
|
github.repository == 'nf-core/configs'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
# Use the @nf-core-bot token to check out so we can push later
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.nf_core_bot_auth_token }}
|
||||||
|
|
||||||
|
# Action runs on the issue comment, so we don't get the PR by default
|
||||||
|
# Use the gh cli to check out the PR
|
||||||
|
- name: Checkout Pull Request
|
||||||
|
run: gh pr checkout ${{ github.event.issue.number }}
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.nf_core_bot_auth_token }}
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v2
|
||||||
|
|
||||||
|
- name: Install Prettier
|
||||||
|
run: npm install -g prettier @prettier/plugin-php
|
||||||
|
|
||||||
|
# Check that we actually need to fix something
|
||||||
|
- name: Run 'prettier --check'
|
||||||
|
id: prettier_status
|
||||||
|
run: |
|
||||||
|
if prettier --check ${GITHUB_WORKSPACE}; then
|
||||||
|
echo "::set-output name=result::pass"
|
||||||
|
else
|
||||||
|
echo "::set-output name=result::fail"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Run 'prettier --write'
|
||||||
|
if: steps.prettier_status.outputs.result == 'fail'
|
||||||
|
run: prettier --write ${GITHUB_WORKSPACE}
|
||||||
|
|
||||||
|
- name: Commit & push changes
|
||||||
|
if: steps.prettier_status.outputs.result == 'fail'
|
||||||
|
run: |
|
||||||
|
git config user.email "core@nf-co.re"
|
||||||
|
git config user.name "nf-core-bot"
|
||||||
|
git config push.default upstream
|
||||||
|
git add .
|
||||||
|
git status
|
||||||
|
git commit -m "[automated] Fix linting with Prettier"
|
||||||
|
git push
|
5
.github/workflows/main.yml
vendored
5
.github/workflows/main.yml
vendored
|
@ -43,6 +43,7 @@ jobs:
|
||||||
- "cbe"
|
- "cbe"
|
||||||
- "ccga_dx"
|
- "ccga_dx"
|
||||||
- "ccga_med"
|
- "ccga_med"
|
||||||
|
- "cedars"
|
||||||
- "cfc"
|
- "cfc"
|
||||||
- "cfc_dev"
|
- "cfc_dev"
|
||||||
- "cheaha"
|
- "cheaha"
|
||||||
|
@ -65,6 +66,8 @@ jobs:
|
||||||
- "jax"
|
- "jax"
|
||||||
- "lugh"
|
- "lugh"
|
||||||
- "marvin"
|
- "marvin"
|
||||||
|
- "medair"
|
||||||
|
- "mjolnir_globe"
|
||||||
- "maestro"
|
- "maestro"
|
||||||
- "mpcdf"
|
- "mpcdf"
|
||||||
- "munin"
|
- "munin"
|
||||||
|
@ -75,6 +78,7 @@ jobs:
|
||||||
- "phoenix"
|
- "phoenix"
|
||||||
- "prince"
|
- "prince"
|
||||||
- "rosalind"
|
- "rosalind"
|
||||||
|
- "sage"
|
||||||
- "sahmri"
|
- "sahmri"
|
||||||
- "sanger"
|
- "sanger"
|
||||||
- "seg_globe"
|
- "seg_globe"
|
||||||
|
@ -85,6 +89,7 @@ jobs:
|
||||||
- "utd_sysbio"
|
- "utd_sysbio"
|
||||||
- "uzh"
|
- "uzh"
|
||||||
- "vai"
|
- "vai"
|
||||||
|
- "vsc_ugent"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Install Nextflow
|
- name: Install Nextflow
|
||||||
|
|
56
CITATION.cff
Normal file
56
CITATION.cff
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
cff-version: 1.2.0
|
||||||
|
message: "If you use `nf-core tools` in your work, please cite the `nf-core` publication"
|
||||||
|
authors:
|
||||||
|
- family-names: Ewels
|
||||||
|
given-names: Philip
|
||||||
|
- family-names: Peltzer
|
||||||
|
given-names: Alexander
|
||||||
|
- family-names: Fillinger
|
||||||
|
given-names: Sven
|
||||||
|
- family-names: Patel
|
||||||
|
given-names: Harshil
|
||||||
|
- family-names: Alneberg
|
||||||
|
given-names: Johannes
|
||||||
|
- family-names: Wilm
|
||||||
|
given-names: Andreas
|
||||||
|
- family-names: Ulysse Garcia
|
||||||
|
given-names: Maxime
|
||||||
|
- family-names: Di Tommaso
|
||||||
|
given-names: Paolo
|
||||||
|
- family-names: Nahnsen
|
||||||
|
given-names: Sven
|
||||||
|
title: "The nf-core framework for community-curated bioinformatics pipelines."
|
||||||
|
version: 2.4.1
|
||||||
|
doi: 10.1038/s41587-020-0439-x
|
||||||
|
date-released: 2022-05-16
|
||||||
|
url: https://github.com/nf-core/tools
|
||||||
|
prefered-citation:
|
||||||
|
type: article
|
||||||
|
authors:
|
||||||
|
- family-names: Ewels
|
||||||
|
given-names: Philip
|
||||||
|
- family-names: Peltzer
|
||||||
|
given-names: Alexander
|
||||||
|
- family-names: Fillinger
|
||||||
|
given-names: Sven
|
||||||
|
- family-names: Patel
|
||||||
|
given-names: Harshil
|
||||||
|
- family-names: Alneberg
|
||||||
|
given-names: Johannes
|
||||||
|
- family-names: Wilm
|
||||||
|
given-names: Andreas
|
||||||
|
- family-names: Ulysse Garcia
|
||||||
|
given-names: Maxime
|
||||||
|
- family-names: Di Tommaso
|
||||||
|
given-names: Paolo
|
||||||
|
- family-names: Nahnsen
|
||||||
|
given-names: Sven
|
||||||
|
doi: 10.1038/s41587-020-0439-x
|
||||||
|
journal: nature biotechnology
|
||||||
|
start: 276
|
||||||
|
end: 278
|
||||||
|
title: "The nf-core framework for community-curated bioinformatics pipelines."
|
||||||
|
issue: 3
|
||||||
|
volume: 38
|
||||||
|
year: 2020
|
||||||
|
url: https://dx.doi.org/10.1038/s41587-020-0439-x
|
15
README.md
15
README.md
|
@ -10,7 +10,6 @@ A repository for hosting Nextflow configuration files containing custom paramete
|
||||||
- [Configuration and parameters](#configuration-and-parameters)
|
- [Configuration and parameters](#configuration-and-parameters)
|
||||||
- [Offline usage](#offline-usage)
|
- [Offline usage](#offline-usage)
|
||||||
- [Adding a new config](#adding-a-new-config)
|
- [Adding a new config](#adding-a-new-config)
|
||||||
- [Checking user hostnames](#checking-user-hostnames)
|
|
||||||
- [Testing](#testing)
|
- [Testing](#testing)
|
||||||
- [Documentation](#documentation)
|
- [Documentation](#documentation)
|
||||||
- [Uploading to `nf-core/configs`](#uploading-to-nf-coreconfigs)
|
- [Uploading to `nf-core/configs`](#uploading-to-nf-coreconfigs)
|
||||||
|
@ -99,6 +98,7 @@ Currently documentation is available for the following systems:
|
||||||
- [CBE](docs/cbe.md)
|
- [CBE](docs/cbe.md)
|
||||||
- [CCGA_DX](docs/ccga_dx.md)
|
- [CCGA_DX](docs/ccga_dx.md)
|
||||||
- [CCGA_MED](docs/ccga_med.md)
|
- [CCGA_MED](docs/ccga_med.md)
|
||||||
|
- [Cedars-Sinai](docs/cedars.md)
|
||||||
- [CFC](docs/cfc.md)
|
- [CFC](docs/cfc.md)
|
||||||
- [CHEAHA](docs/cheaha.md)
|
- [CHEAHA](docs/cheaha.md)
|
||||||
- [Computerome](docs/computerome.md)
|
- [Computerome](docs/computerome.md)
|
||||||
|
@ -120,6 +120,8 @@ Currently documentation is available for the following systems:
|
||||||
- [LUGH](docs/lugh.md)
|
- [LUGH](docs/lugh.md)
|
||||||
- [MAESTRO](docs/maestro.md)
|
- [MAESTRO](docs/maestro.md)
|
||||||
- [MARVIN](docs/marvin.md)
|
- [MARVIN](docs/marvin.md)
|
||||||
|
- [MEDAIR](docs/medair.md)
|
||||||
|
- [MJOLNIR_GLOBE](docs/mjolnir_globe.md)
|
||||||
- [MPCDF](docs/mpcdf.md)
|
- [MPCDF](docs/mpcdf.md)
|
||||||
- [MUNIN](docs/munin.md)
|
- [MUNIN](docs/munin.md)
|
||||||
- [NU_GENOMICS](docs/nu_genomics.md)
|
- [NU_GENOMICS](docs/nu_genomics.md)
|
||||||
|
@ -129,6 +131,7 @@ Currently documentation is available for the following systems:
|
||||||
- [PHOENIX](docs/phoenix.md)
|
- [PHOENIX](docs/phoenix.md)
|
||||||
- [PRINCE](docs/prince.md)
|
- [PRINCE](docs/prince.md)
|
||||||
- [ROSALIND](docs/rosalind.md)
|
- [ROSALIND](docs/rosalind.md)
|
||||||
|
- [SAGE BIONETWORKS](docs/sage.md)
|
||||||
- [SANGER](docs/sanger.md)
|
- [SANGER](docs/sanger.md)
|
||||||
- [SEG_GLOBE](docs/seg_globe.md)
|
- [SEG_GLOBE](docs/seg_globe.md)
|
||||||
- [UCT_HPC](docs/uct_hpc.md)
|
- [UCT_HPC](docs/uct_hpc.md)
|
||||||
|
@ -138,6 +141,7 @@ Currently documentation is available for the following systems:
|
||||||
- [UTD_SYSBIO](docs/utd_sysbio.md)
|
- [UTD_SYSBIO](docs/utd_sysbio.md)
|
||||||
- [UZH](docs/uzh.md)
|
- [UZH](docs/uzh.md)
|
||||||
- [VAI](docs/vai.md)
|
- [VAI](docs/vai.md)
|
||||||
|
- [VSC_UGENT](docs/vsc_ugent.md)
|
||||||
|
|
||||||
### Uploading to `nf-core/configs`
|
### Uploading to `nf-core/configs`
|
||||||
|
|
||||||
|
@ -191,13 +195,18 @@ Currently documentation is available for the following pipelines within specific
|
||||||
- [UPPMAX](docs/pipeline/ampliseq/uppmax.md)
|
- [UPPMAX](docs/pipeline/ampliseq/uppmax.md)
|
||||||
- eager
|
- eager
|
||||||
- [EVA](docs/pipeline/eager/eva.md)
|
- [EVA](docs/pipeline/eager/eva.md)
|
||||||
|
- mag
|
||||||
|
- [EVA](docs/pipeline/mag/eva.md)
|
||||||
- rnafusion
|
- rnafusion
|
||||||
- [MUNIN](docs/pipeline/rnafusion/munin.md)
|
- [MUNIN](docs/pipeline/rnafusion/munin.md)
|
||||||
|
- rnavar
|
||||||
|
- [MUNIN](docs/pipeline/rnavar/munin.md)
|
||||||
- sarek
|
- sarek
|
||||||
- [MUNIN](docs/pipeline/sarek/munin.md)
|
- [MUNIN](docs/pipeline/sarek/munin.md)
|
||||||
- [UPPMAX](docs/pipeline/sarek/uppmax.md)
|
- [UPPMAX](docs/pipeline/sarek/uppmax.md)
|
||||||
- rnavar
|
- taxprofiler
|
||||||
- [MUNIN](docs/pipeline/rnavar/munin.md)
|
- [EVA](docs/pipeline/taxprofiler/eva.md)
|
||||||
|
- [hasta](docs/pipeline/taxprofiler/hasta.md)
|
||||||
|
|
||||||
### Pipeline-specific documentation
|
### Pipeline-specific documentation
|
||||||
|
|
||||||
|
|
26
conf/cedars.config
Normal file
26
conf/cedars.config
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
//Profile config names for nf-core/configs
|
||||||
|
params {
|
||||||
|
config_profile_description = 'Cedars-Sinai Medical Center HPC Profile'
|
||||||
|
config_profile_contact = 'Alex Rajewski (@rajewski)'
|
||||||
|
config_profile_url = 'https://www.cedars-sinai.edu/research/cores/informatics-computing/resources.html'
|
||||||
|
max_memory = 90.GB
|
||||||
|
max_cpus = 10
|
||||||
|
max_time = 240.h
|
||||||
|
}
|
||||||
|
|
||||||
|
// Specify the queing system
|
||||||
|
executor {
|
||||||
|
name = "sge"
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
penv = 'smp'
|
||||||
|
beforeScript =
|
||||||
|
"""
|
||||||
|
module load 'singularity/3.6.0'
|
||||||
|
"""
|
||||||
|
}
|
||||||
|
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
}
|
|
@ -17,7 +17,7 @@ process {
|
||||||
executor = 'sge'
|
executor = 'sge'
|
||||||
penv = 'smp'
|
penv = 'smp'
|
||||||
queue = 'all.q'
|
queue = 'all.q'
|
||||||
clusterOptions = { "-S /bin/bash -V -j y -o output.log -l h_vmem=${task.memory.toGiga()}G" }
|
clusterOptions = { "-S /bin/bash -V -j y -o output.sge -l h_vmem=${task.memory.toGiga()}G" }
|
||||||
}
|
}
|
||||||
|
|
||||||
executor {
|
executor {
|
||||||
|
@ -37,7 +37,7 @@ profiles {
|
||||||
|
|
||||||
process {
|
process {
|
||||||
queue = { task.memory > 700.GB ? 'bigmem.q' : 'archgen.q' }
|
queue = { task.memory > 700.GB ? 'bigmem.q' : 'archgen.q' }
|
||||||
clusterOptions = { "-S /bin/bash -V -j y -o output.log -l h_vmem=${task.memory.toGiga()}G" }
|
clusterOptions = { "-S /bin/bash -V -j y -o output.sge -l h_vmem=${task.memory.toGiga()}G" }
|
||||||
}
|
}
|
||||||
|
|
||||||
singularity {
|
singularity {
|
||||||
|
|
|
@ -15,7 +15,8 @@ google.zone = params.google_zone
|
||||||
google.lifeSciences.debug = params.google_debug
|
google.lifeSciences.debug = params.google_debug
|
||||||
workDir = params.google_bucket
|
workDir = params.google_bucket
|
||||||
google.lifeSciences.preemptible = params.google_preemptible
|
google.lifeSciences.preemptible = params.google_preemptible
|
||||||
|
|
||||||
if (google.lifeSciences.preemptible) {
|
if (google.lifeSciences.preemptible) {
|
||||||
process.errorStrategy = { task.exitStatus==14 ? 'retry' : 'terminate' }
|
process.errorStrategy = { task.exitStatus in [8,10,14] ? 'retry' : 'terminate' }
|
||||||
process.maxRetries = 5
|
process.maxRetries = 5
|
||||||
}
|
}
|
|
@ -10,6 +10,7 @@ params {
|
||||||
|
|
||||||
singularity {
|
singularity {
|
||||||
enabled = true
|
enabled = true
|
||||||
|
envWhitelist = ['_JAVA_OPTIONS']
|
||||||
}
|
}
|
||||||
|
|
||||||
params {
|
params {
|
||||||
|
|
|
@ -2,23 +2,22 @@
|
||||||
params {
|
params {
|
||||||
config_profile_description = 'The IFB core cluster profile'
|
config_profile_description = 'The IFB core cluster profile'
|
||||||
config_profile_contact = 'https://community.france-bioinformatique.fr'
|
config_profile_contact = 'https://community.france-bioinformatique.fr'
|
||||||
config_profile_url = 'https://www.france-bioinformatique.fr/'
|
config_profile_url = 'https://ifb-elixirfr.gitlab.io/cluster/doc/cluster-desc/'
|
||||||
}
|
}
|
||||||
|
|
||||||
singularity {
|
singularity {
|
||||||
// need one image per execution
|
|
||||||
enabled = true
|
enabled = true
|
||||||
runOptions = '-B /shared'
|
runOptions = '-B /shared'
|
||||||
}
|
}
|
||||||
|
|
||||||
process {
|
process {
|
||||||
executor = 'slurm'
|
executor = 'slurm'
|
||||||
|
queue = { task.time <= 24.h ? 'fast' : 'long' }
|
||||||
}
|
}
|
||||||
|
|
||||||
params {
|
params {
|
||||||
igenomes_ignore = true
|
igenomes_ignore = true
|
||||||
// Max resources requested by a normal node on genotoul.
|
max_memory = 252.GB
|
||||||
max_memory = 240.GB
|
max_cpus = 56
|
||||||
max_cpus = 28
|
max_time = 720.h
|
||||||
max_time = 96.h
|
|
||||||
}
|
}
|
||||||
|
|
46
conf/medair.config
Normal file
46
conf/medair.config
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
//Profile config names for nf-core/configs
|
||||||
|
params {
|
||||||
|
config_profile_description = 'Cluster profile for medair (local cluster of Clinical Genomics Gothenburg)'
|
||||||
|
config_profile_contact = 'Clinical Genomics, Gothenburg (cgg-rd@gu.se, cgg-it@gu.se)'
|
||||||
|
config_profile_url = 'https://www.scilifelab.se/units/clinical-genomics-goteborg/'
|
||||||
|
}
|
||||||
|
|
||||||
|
//Nextflow parameters
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
cacheDir = "/apps/bio/dependencies/nf-core/singularities"
|
||||||
|
}
|
||||||
|
|
||||||
|
profiles {
|
||||||
|
|
||||||
|
wgs {
|
||||||
|
process {
|
||||||
|
queue = 'wgs.q'
|
||||||
|
executor = 'sge'
|
||||||
|
penv = 'mpi'
|
||||||
|
process.clusterOptions = '-l excl=1'
|
||||||
|
params.max_cpus = 40
|
||||||
|
params.max_time = 48.h
|
||||||
|
params.max_memory = 128.GB
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
production {
|
||||||
|
process {
|
||||||
|
queue = 'production.q'
|
||||||
|
executor = 'sge'
|
||||||
|
penv = 'mpi'
|
||||||
|
process.clusterOptions = '-l excl=1'
|
||||||
|
params.max_cpus = 40
|
||||||
|
params.max_time = 480.h
|
||||||
|
params.max_memory = 128.GB
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Specific parameter for pipelines that can use Sentieon (e.g. nf-core/sarek, nf-core/raredisease)
|
||||||
|
process {
|
||||||
|
withLabel:'sentieon' {
|
||||||
|
container = "/apps/bio/singularities/sentieon-211204-peta.simg"
|
||||||
|
}
|
||||||
|
}
|
25
conf/mjolnir_globe.config
Normal file
25
conf/mjolnir_globe.config
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
//Profile config names for nf-core/configs
|
||||||
|
params {
|
||||||
|
config_profile_description = 'Section for Hologenomics and Section for Molecular Ecology and Evolution @ Globe Institute, University of Copenhagen - mjolnir_globe profile provided by nf-core/configs.'
|
||||||
|
config_profile_contact = 'Aashild Vaagene (@ashildv)'
|
||||||
|
config_profile_url = 'https://globe.ku.dk/research/'
|
||||||
|
max_memory = 500.GB
|
||||||
|
max_cpus = 50
|
||||||
|
max_time = 720.h
|
||||||
|
}
|
||||||
|
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
autoMounts = true
|
||||||
|
cacheDir = '/maps/projects/mjolnir1/data/cache/nf-core/singularity'
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
executor = 'slurm'
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup = true
|
||||||
|
|
||||||
|
executor {
|
||||||
|
queueSize = 10
|
||||||
|
}
|
|
@ -61,7 +61,7 @@ profiles {
|
||||||
|
|
||||||
params {
|
params {
|
||||||
config_profile_description = 'MPCDF raven profile (unofficially) provided by nf-core/configs.'
|
config_profile_description = 'MPCDF raven profile (unofficially) provided by nf-core/configs.'
|
||||||
memory = 2000000.MB
|
max_memory = 2000000.MB
|
||||||
max_cpus = 72
|
max_cpus = 72
|
||||||
max_time = 24.h
|
max_time = 24.h
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,11 +6,15 @@ params {
|
||||||
config_profile_description = 'nf-core/eager EVA profile provided by nf-core/configs'
|
config_profile_description = 'nf-core/eager EVA profile provided by nf-core/configs'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
env {
|
||||||
|
_JAVA_OPTIONS = "-XX:ParallelGCThreads=1"
|
||||||
|
OPENBLAS_NUM_THREADS = 1
|
||||||
|
OMP_NUM_THREADS = 1
|
||||||
|
}
|
||||||
|
|
||||||
// Specific nf-core/eager process configuration
|
// Specific nf-core/eager process configuration
|
||||||
process {
|
process {
|
||||||
|
|
||||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1"'
|
|
||||||
|
|
||||||
maxRetries = 2
|
maxRetries = 2
|
||||||
|
|
||||||
// Solution for clusterOptions comes from here: https://github.com/nextflow-io/nextflow/issues/332 + personal toMega conversion
|
// Solution for clusterOptions comes from here: https://github.com/nextflow-io/nextflow/issues/332 + personal toMega conversion
|
||||||
|
@ -70,6 +74,11 @@ process {
|
||||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
withName: fastqc_after_clipping {
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
|
}
|
||||||
|
|
||||||
withName: adapter_removal {
|
withName: adapter_removal {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
|
@ -184,6 +193,17 @@ process {
|
||||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
withName:get_software_versions {
|
||||||
|
cache = false
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h=!(bionode06)" }
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toMega() * 8)}M" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
|
}
|
||||||
|
|
||||||
|
withName:multiqc {
|
||||||
|
clusterOptions = { "-S /bin/bash -V -j y -o output.log -l h_vmem=${task.memory.toGiga() * 2}G" }
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
profiles {
|
profiles {
|
||||||
|
@ -202,8 +222,6 @@ profiles {
|
||||||
|
|
||||||
process {
|
process {
|
||||||
|
|
||||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1"'
|
|
||||||
|
|
||||||
maxRetries = 2
|
maxRetries = 2
|
||||||
|
|
||||||
// Solution for clusterOptions comes from here: https://github.com/nextflow-io/nextflow/issues/332 + personal toMega conversion
|
// Solution for clusterOptions comes from here: https://github.com/nextflow-io/nextflow/issues/332 + personal toMega conversion
|
||||||
|
@ -256,6 +274,11 @@ profiles {
|
||||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
withName: fastqc_after_clipping {
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
|
}
|
||||||
|
|
||||||
withName: adapter_removal {
|
withName: adapter_removal {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
|
@ -358,7 +381,6 @@ profiles {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'ignore' }
|
errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'ignore' }
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,8 +398,6 @@ profiles {
|
||||||
|
|
||||||
process {
|
process {
|
||||||
|
|
||||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1"'
|
|
||||||
|
|
||||||
maxRetries = 2
|
maxRetries = 2
|
||||||
|
|
||||||
// Solution for clusterOptions comes from here: https://github.com/nextflow-io/nextflow/issues/332 + personal toMega conversion
|
// Solution for clusterOptions comes from here: https://github.com/nextflow-io/nextflow/issues/332 + personal toMega conversion
|
||||||
|
@ -430,6 +450,11 @@ profiles {
|
||||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
withName: fastqc_after_clipping {
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
|
}
|
||||||
|
|
||||||
withName: adapter_removal {
|
withName: adapter_removal {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
|
@ -533,7 +558,6 @@ profiles {
|
||||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||||
errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'ignore' }
|
errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'ignore' }
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
14
conf/pipeline/mag/eva.config
Normal file
14
conf/pipeline/mag/eva.config
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
params {
|
||||||
|
// Specific nf-core/configs params
|
||||||
|
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||||
|
config_profile_description = 'nf-core/mag EVA profile provided by nf-core/configs'
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
|
||||||
|
withName: FASTQC {
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -20,6 +20,7 @@ process {
|
||||||
enabled: false,
|
enabled: false,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
withName:'QUALIMAP_BAMQC' {
|
||||||
|
ext.args = { "--java-mem-size=${task.memory.giga / 1.15 as long}G" }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
29
conf/pipeline/taxprofiler/eva.config
Normal file
29
conf/pipeline/taxprofiler/eva.config
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
params {
|
||||||
|
// Specific nf-core/configs params
|
||||||
|
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||||
|
config_profile_description = 'nf-core/taxprofiler EVA profile provided by nf-core/configs'
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
|
||||||
|
withName: BBMAP_BBDUK {
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
|
}
|
||||||
|
|
||||||
|
withName: MALT_RUN {
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 4)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
|
}
|
||||||
|
|
||||||
|
withName: METAPHLAN3 {
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
|
}
|
||||||
|
|
||||||
|
withName: MEGAN_RMA2INFO {
|
||||||
|
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 4)}G" }
|
||||||
|
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
16
conf/pipeline/taxprofiler/hasta.config
Normal file
16
conf/pipeline/taxprofiler/hasta.config
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
params {
|
||||||
|
// Specific nf-core/configs params
|
||||||
|
config_profile_contact = 'Sofia Stamouli (@sofstam)'
|
||||||
|
config_profile_description = 'nf-core/taxprofiler HASTA profile provided by nf-core/configs'
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
|
||||||
|
withName:'BBMAP_BBDUK' {
|
||||||
|
memory = { check_max( 80.GB * task.attempt, 'memory' ) }
|
||||||
|
}
|
||||||
|
|
||||||
|
withName: 'MALT_RUN' {
|
||||||
|
memory = { check_max( 80.GB * task.attempt, 'memory' ) }
|
||||||
|
}
|
||||||
|
}
|
|
@ -13,18 +13,18 @@ params {
|
||||||
// Please use 'MN908947.3' if possible because all primer sets are available / have been pre-prepared relative to that assembly
|
// Please use 'MN908947.3' if possible because all primer sets are available / have been pre-prepared relative to that assembly
|
||||||
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/NC_045512.2/GCF_009858895.2_ASM985889v3_genomic.200409.fna.gz'
|
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/NC_045512.2/GCF_009858895.2_ASM985889v3_genomic.200409.fna.gz'
|
||||||
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/NC_045512.2/GCF_009858895.2_ASM985889v3_genomic.200409.gff.gz'
|
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/NC_045512.2/GCF_009858895.2_ASM985889v3_genomic.200409.gff.gz'
|
||||||
nextclade_dataset = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/nextclade_sars-cov-2_MN908947_2022-01-18T12_00_00Z.tar.gz'
|
nextclade_dataset = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/nextclade_sars-cov-2_MN908947_2022-06-14T12_00_00Z.tar.gz'
|
||||||
nextclade_dataset_name = 'sars-cov-2'
|
nextclade_dataset_name = 'sars-cov-2'
|
||||||
nextclade_dataset_reference = 'MN908947'
|
nextclade_dataset_reference = 'MN908947'
|
||||||
nextclade_dataset_tag = '2022-01-18T12:00:00Z'
|
nextclade_dataset_tag = '2022-06-14T12:00:00Z'
|
||||||
}
|
}
|
||||||
'MN908947.3' {
|
'MN908947.3' {
|
||||||
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.fna.gz'
|
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.fna.gz'
|
||||||
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.gff.gz'
|
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.gff.gz'
|
||||||
nextclade_dataset = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/nextclade_sars-cov-2_MN908947_2022-01-18T12_00_00Z.tar.gz'
|
nextclade_dataset = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/nextclade_sars-cov-2_MN908947_2022-06-14T12_00_00Z.tar.gz'
|
||||||
nextclade_dataset_name = 'sars-cov-2'
|
nextclade_dataset_name = 'sars-cov-2'
|
||||||
nextclade_dataset_reference = 'MN908947'
|
nextclade_dataset_reference = 'MN908947'
|
||||||
nextclade_dataset_tag = '2022-01-18T12:00:00Z'
|
nextclade_dataset_tag = '2022-06-14T12:00:00Z'
|
||||||
primer_sets {
|
primer_sets {
|
||||||
artic {
|
artic {
|
||||||
'1' {
|
'1' {
|
||||||
|
|
100
conf/sage.config
Normal file
100
conf/sage.config
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
params {
|
||||||
|
config_profile_description = 'The Sage Bionetworks profile'
|
||||||
|
config_profile_contact = 'Bruno Grande (@BrunoGrandePhD)'
|
||||||
|
config_profile_url = 'https://github.com/Sage-Bionetworks-Workflows'
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
|
||||||
|
cpus = { check_max( 1 * slow(task.attempt), 'cpus' ) }
|
||||||
|
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
|
||||||
|
time = { check_max( 24.h * task.attempt, 'time' ) }
|
||||||
|
|
||||||
|
errorStrategy = { task.exitStatus in [143,137,104,134,139,247] ? 'retry' : 'finish' }
|
||||||
|
maxRetries = 5
|
||||||
|
maxErrors = '-1'
|
||||||
|
|
||||||
|
// Process-specific resource requirements
|
||||||
|
withLabel:process_low {
|
||||||
|
cpus = { check_max( 4 * slow(task.attempt), 'cpus' ) }
|
||||||
|
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
|
||||||
|
time = { check_max( 24.h * task.attempt, 'time' ) }
|
||||||
|
}
|
||||||
|
withLabel:process_medium {
|
||||||
|
cpus = { check_max( 12 * slow(task.attempt), 'cpus' ) }
|
||||||
|
memory = { check_max( 36.GB * task.attempt, 'memory' ) }
|
||||||
|
time = { check_max( 48.h * task.attempt, 'time' ) }
|
||||||
|
}
|
||||||
|
withLabel:process_high {
|
||||||
|
cpus = { check_max( 24 * slow(task.attempt), 'cpus' ) }
|
||||||
|
memory = { check_max( 72.GB * task.attempt, 'memory' ) }
|
||||||
|
time = { check_max( 96.h * task.attempt, 'time' ) }
|
||||||
|
}
|
||||||
|
withLabel:process_long {
|
||||||
|
time = { check_max( 192.h * task.attempt, 'time' ) }
|
||||||
|
}
|
||||||
|
withLabel:process_high_memory {
|
||||||
|
memory = { check_max( 128.GB * task.attempt, 'memory' ) }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preventing Sarek labels from using the actual maximums
|
||||||
|
withLabel:memory_max {
|
||||||
|
memory = { check_max( 128.GB * task.attempt, 'memory' ) }
|
||||||
|
}
|
||||||
|
withLabel:cpus_max {
|
||||||
|
cpus = { check_max( 24 * slow(task.attempt), 'cpus' ) }
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
aws {
|
||||||
|
region = "us-east-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
params {
|
||||||
|
igenomes_base = 's3://sage-igenomes/igenomes'
|
||||||
|
max_memory = 500.GB
|
||||||
|
max_cpus = 64
|
||||||
|
max_time = 168.h // One week
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function to slow the increase of the resource multipler
|
||||||
|
// as attempts are made. The rationale is that some CPUs
|
||||||
|
// don't need to be increased as fast as memory.
|
||||||
|
def slow(attempt, factor = 2) {
|
||||||
|
return Math.ceil( attempt / factor) as int
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Function to ensure that resource requirements don't go
|
||||||
|
// beyond a maximum limit (copied here for Sarek v2)
|
||||||
|
def check_max(obj, type) {
|
||||||
|
if (type == 'memory') {
|
||||||
|
try {
|
||||||
|
if (obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1)
|
||||||
|
return params.max_memory as nextflow.util.MemoryUnit
|
||||||
|
else
|
||||||
|
return obj
|
||||||
|
} catch (all) {
|
||||||
|
println " ### ERROR ### Max memory '${params.max_memory}' is not valid! Using default value: $obj"
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
} else if (type == 'time') {
|
||||||
|
try {
|
||||||
|
if (obj.compareTo(params.max_time as nextflow.util.Duration) == 1)
|
||||||
|
return params.max_time as nextflow.util.Duration
|
||||||
|
else
|
||||||
|
return obj
|
||||||
|
} catch (all) {
|
||||||
|
println " ### ERROR ### Max time '${params.max_time}' is not valid! Using default value: $obj"
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
} else if (type == 'cpus') {
|
||||||
|
try {
|
||||||
|
return Math.min( obj, params.max_cpus as int )
|
||||||
|
} catch (all) {
|
||||||
|
println " ### ERROR ### Max cpus '${params.max_cpus}' is not valid! Using default value: $obj"
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,35 +1,33 @@
|
||||||
|
// Profile details
|
||||||
params {
|
params {
|
||||||
config_profile_description = 'The Wellcome Sanger Institute HPC cluster profile'
|
config_profile_description = 'The Wellcome Sanger Institute HPC cluster (farm5) profile'
|
||||||
config_profile_contact = 'Anthony Underwood (@aunderwo)'
|
config_profile_contact = 'Priyanka Surana (@priyanka-surana)'
|
||||||
config_profile_url = 'https://www.sanger.ac.uk/group/informatics-support-group/'
|
config_profile_url = 'https://www.sanger.ac.uk'
|
||||||
}
|
|
||||||
|
|
||||||
singularity {
|
|
||||||
enabled = true
|
|
||||||
cacheDir = "${baseDir}/singularity"
|
|
||||||
runOptions = '--bind /lustre --bind /nfs/pathnfs01 --bind /nfs/pathnfs02 --bind /nfs/pathnfs03 --bind /nfs/pathnfs04 --bind /nfs/pathnfs05 --bind /nfs/pathnfs06 --no-home'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Queue and retry strategy
|
||||||
process{
|
process{
|
||||||
executor = 'lsf'
|
executor = 'lsf'
|
||||||
queue = 'normal'
|
queue = { task.time < 12.h ? 'normal' : task.time < 48.h ? 'long' : 'basement' }
|
||||||
errorStrategy = { task.attempt <= 5 ? "retry" : "finish" }
|
errorStrategy = 'retry'
|
||||||
process.maxRetries = 5
|
maxRetries = 5
|
||||||
withLabel:process_long {
|
|
||||||
queue = 'long'
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Executor details
|
||||||
executor{
|
executor{
|
||||||
name = 'lsf'
|
name = 'lsf'
|
||||||
perJobMemLimit = true
|
perJobMemLimit = true
|
||||||
poolSize = 4
|
poolSize = 4
|
||||||
submitRateLimit = '5 sec'
|
submitRateLimit = '5 sec'
|
||||||
killBatchSize = 50
|
killBatchSize = 50
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Max resources
|
||||||
params {
|
params {
|
||||||
max_memory = 128.GB
|
max_memory = 683.GB
|
||||||
max_cpus = 64
|
max_cpus = 256
|
||||||
max_time = 48.h
|
max_time = 720.h
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For singularity
|
||||||
|
singularity.runOptions = '--bind /lustre --bind /nfs'
|
||||||
|
|
117
conf/vsc_ugent.config
Normal file
117
conf/vsc_ugent.config
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
// Define the Scratch directory
|
||||||
|
def scratch_dir = System.getenv("VSC_SCRATCH_VO_USER") ?: "scratch/"
|
||||||
|
|
||||||
|
// Specify the work directory
|
||||||
|
workDir = "$scratch_dir/work"
|
||||||
|
|
||||||
|
// Perform work directory cleanup when the run has succesfully completed
|
||||||
|
// cleanup = true
|
||||||
|
|
||||||
|
// Reduce the job submit rate to about 5 per second, this way the server won't be bombarded with jobs
|
||||||
|
executor {
|
||||||
|
submitRateLimit = '3 sec'
|
||||||
|
}
|
||||||
|
|
||||||
|
// Specify that singularity should be used and where the cache dir will be for the images
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
autoMounts = true
|
||||||
|
cacheDir = "$scratch_dir/singularity"
|
||||||
|
}
|
||||||
|
|
||||||
|
env {
|
||||||
|
SINGULARITY_CACHEDIR="$scratch_dir/.singularity"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Define profiles for each cluster
|
||||||
|
profiles {
|
||||||
|
skitty {
|
||||||
|
params {
|
||||||
|
config_profile_description = 'HPC_SKITTY profile for use on the Skitty cluster of the VSC HPC.'
|
||||||
|
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
|
||||||
|
config_profile_url = 'https://www.ugent.be/hpc/en'
|
||||||
|
max_memory = 177.GB
|
||||||
|
max_cpus = 36
|
||||||
|
max_time = 72.h
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
executor = 'slurm'
|
||||||
|
queue = 'skitty'
|
||||||
|
maxRetries = 2
|
||||||
|
scratch = "$scratch_dir"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
swalot {
|
||||||
|
params {
|
||||||
|
config_profile_description = 'HPC_SWALOT profile for use on the Swalot cluster of the VSC HPC.'
|
||||||
|
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
|
||||||
|
config_profile_url = 'https://www.ugent.be/hpc/en'
|
||||||
|
max_memory = 116.GB
|
||||||
|
max_cpus = 20
|
||||||
|
max_time = 72.h
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
executor = 'slurm'
|
||||||
|
queue = 'swalot'
|
||||||
|
maxRetries = 2
|
||||||
|
scratch = "$scratch_dir"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
victini {
|
||||||
|
params {
|
||||||
|
config_profile_description = 'HPC_VICTINI profile for use on the Victini cluster of the VSC HPC.'
|
||||||
|
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
|
||||||
|
config_profile_url = 'https://www.ugent.be/hpc/en'
|
||||||
|
max_memory = 88.GB
|
||||||
|
max_cpus = 36
|
||||||
|
max_time = 72.h
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
executor = 'slurm'
|
||||||
|
queue = 'victini'
|
||||||
|
maxRetries = 2
|
||||||
|
scratch = "$scratch_dir"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
kirlia {
|
||||||
|
params {
|
||||||
|
config_profile_description = 'HPC_KIRLIA profile for use on the Kirlia cluster of the VSC HPC.'
|
||||||
|
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
|
||||||
|
config_profile_url = 'https://www.ugent.be/hpc/en'
|
||||||
|
max_memory = 738.GB
|
||||||
|
max_cpus = 36
|
||||||
|
max_time = 72.h
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
executor = 'slurm'
|
||||||
|
queue = 'kirlia'
|
||||||
|
maxRetries = 2
|
||||||
|
scratch = "$scratch_dir"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
doduo {
|
||||||
|
params {
|
||||||
|
config_profile_description = 'HPC_DODUO profile for use on the Doduo cluster of the VSC HPC.'
|
||||||
|
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
|
||||||
|
config_profile_url = 'https://www.ugent.be/hpc/en'
|
||||||
|
max_memory = 250.GB
|
||||||
|
max_cpus = 96
|
||||||
|
max_time = 72.h
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
executor = 'slurm'
|
||||||
|
queue = 'doduo'
|
||||||
|
maxRetries = 2
|
||||||
|
scratch = "$scratch_dir"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
7
docs/cedars.md
Normal file
7
docs/cedars.md
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# Cedars-Sinai Medical Center HPC
|
||||||
|
|
||||||
|
- You will need HPC access from EIS, which can be requested in the Service Center.
|
||||||
|
- You will need to load the nextflow module on the HPC before running any pipelines (`module load nextflow`). This should automatically load Java as well.
|
||||||
|
- Run this with `-profile cedars`
|
||||||
|
- By default this config file does not specify a queue for submission, and things will thus go to `all.q`. Because of that, the memory and cpu limits have been set accordingly.
|
||||||
|
- We highly recommend specifying a location of a cache directory to store singularity images (so you re-use them across runs, and not pull each time), by specifying the location with the `$NXF_SINGULARITY_CACHE_DIR` bash environment variable in your `.bash_profile` or `.bashrc`
|
|
@ -6,7 +6,8 @@ To use, run the pipeline with `-profile ifb_core`. This will download and launch
|
||||||
|
|
||||||
## How to use on IFB core
|
## How to use on IFB core
|
||||||
|
|
||||||
Before running the pipeline you will need to load Nextflow using the environment module system on IFB core. You can do this by issuing the commands below:
|
Here is [the link to the cluster's documentation](https://ifb-elixirfr.gitlab.io/cluster/doc/quick-start/).
|
||||||
|
Before running the pipeline you will need to load Nextflow and other dependencies using the environment module system on IFB core. You can do this by issuing the commands below:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Login to a compute node
|
# Login to a compute node
|
||||||
|
@ -14,7 +15,10 @@ srun --pty bash
|
||||||
|
|
||||||
## Load Nextflow and Singularity environment modules
|
## Load Nextflow and Singularity environment modules
|
||||||
module purge
|
module purge
|
||||||
module load nextflow/20.04.1
|
module load nextflow
|
||||||
|
module load singularity
|
||||||
|
module load openjdk
|
||||||
|
|
||||||
|
|
||||||
# Run a downloaded/git-cloned nextflow workflow from
|
# Run a downloaded/git-cloned nextflow workflow from
|
||||||
nextflow run \\
|
nextflow run \\
|
||||||
|
|
70
docs/medair.md
Normal file
70
docs/medair.md
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
# nf-core/configs: Medair Configuration
|
||||||
|
|
||||||
|
All nf-core pipelines have been successfully configured for use on the Medair cluster at Clinical Genomics Gothenburg.
|
||||||
|
|
||||||
|
To use, run the pipeline with `-profile medair`. This will download and launch the [`medair.config`](../conf/medair.config) which has been pre-configured with a setup suitable for the Medair cluster.
|
||||||
|
It will enable Nextflow to manage the pipeline jobs via the `SGE` job scheduler.
|
||||||
|
Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||||
|
|
||||||
|
You will need an account to use the Medair cluster in order to download or run pipelines. If in doubt, contact cgg-it.
|
||||||
|
|
||||||
|
## Download nf-core pipelines
|
||||||
|
|
||||||
|
### Set-up: load Nextflow and nf-core tools
|
||||||
|
|
||||||
|
First you need to load relevant softwares: Nextflow and nf-core tools. You can do it as follow:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
## Load Nextflow
|
||||||
|
module load nextflow
|
||||||
|
## Load nf-core tools
|
||||||
|
module load miniconda
|
||||||
|
source activate nf-core
|
||||||
|
```
|
||||||
|
|
||||||
|
### Storage of Singularity images
|
||||||
|
|
||||||
|
When downloading a nf-core pipeline for the first time (or a specific version of a pipeline), you can choose to store the Singularity image for future use. We chose to have a central location for these images on medair: `/apps/bio/dependencies/nf-core/singularities`.
|
||||||
|
|
||||||
|
For Nexflow to know where to store new images, run or add the following to your `.bashrc`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export NXF_SINGULARITY_CACHEDIR="/apps/bio/dependencies/nf-core/singularities"
|
||||||
|
```
|
||||||
|
|
||||||
|
> Comment: This was also added to cronuser.
|
||||||
|
|
||||||
|
### Download a pipeline
|
||||||
|
|
||||||
|
We have started to download pipelines in the following location: `/apps/bio/repos/nf-core/`
|
||||||
|
|
||||||
|
Use the `nf-core download --singularity-cache-only` command to start a download. It will open an interactive menu. Choose `singularity` for the software container image, and `none` for the compression type.
|
||||||
|
|
||||||
|
## Run nf-core pipelines
|
||||||
|
|
||||||
|
Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands below will have to be executed on one of the login nodes. If in doubt contact cgg-it (cgg-it[at]gu.se).
|
||||||
|
|
||||||
|
### Set-up: load Nextflow and Singularity
|
||||||
|
|
||||||
|
Before running a pipeline you will need to load Nextflow and Singularity using the environment module system on Medair. You can do this by issuing the commands below:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
## Load Nextflow and Singularity environment modules
|
||||||
|
module purge
|
||||||
|
module load nextflow
|
||||||
|
module load singularity
|
||||||
|
```
|
||||||
|
|
||||||
|
### Choose a profile
|
||||||
|
|
||||||
|
Depending on what you are running, you can choose between the `wgs` and `production` profiles. Jobs running with the `wgs` profile run on a queue with higher priority. Jobs running with the `production` profile can last longer (max time: 20 days, versus 2 days for the `wgs` profile).
|
||||||
|
|
||||||
|
For example, the following job would run with the `wgs` profile:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
run nextflow nf-core/raredisease -profile medair,wgs
|
||||||
|
```
|
||||||
|
|
||||||
|
### Sentieon
|
||||||
|
|
||||||
|
In some pipelines (sarek, raredisease) it is possible to use Sentieon for alignment and variant calling. If ones uses the label `sentieon` for running a process, the config file contains the path to the Sentieon singularity image on Medair.
|
31
docs/mjolnir_globe.md
Normal file
31
docs/mjolnir_globe.md
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
# nf-core/configs: Section for Hologenomics at GLOBE, University of Copenhagen (Mjolnir server) Configuration
|
||||||
|
|
||||||
|
> **NB:** You will need an account on Mjolnir to run the pipeline. If in doubt contact IT.
|
||||||
|
|
||||||
|
Prior to running the pipeline for the first time with the `mjolnir_globe.config` (../conf/mjolnir_globe.config), users **must** create a hidden directory called `.tmp_nfcore` in their data/project directory on Mjolnir where the temp files from nf-core pipelines will be re-directed by the `NXF_TEMP` command (see below).
|
||||||
|
|
||||||
|
The contents of the `.tmp_nfcore` directory should be periodically deleted manually to save on space.
|
||||||
|
If the `NXF_TEMP` command is not used to properly re-direct temp files the `/tmp` directory on the compute nodes will be used and quickly filled up, which blocks anyone from working on these nodes until the offending user removes their files.
|
||||||
|
|
||||||
|
The following lines **must** be added by users to their `~/.bash_profile`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#re-direct tmp files away from /tmp directories on compute nodes or the headnode
|
||||||
|
export NXF_TEMP=/maps/projects/mjolnir1/people/$USER/.tmp_nfcore
|
||||||
|
|
||||||
|
# nextflow - limiting memory of virtual java machine
|
||||||
|
NXF_OPTS='-Xms1g -Xmx4g'
|
||||||
|
```
|
||||||
|
|
||||||
|
Once you have created the `.tmp_nfcore` directory and added the above lines of code to your `.bash_profile` you can run an nf-core pipeline.
|
||||||
|
|
||||||
|
Before running a pipeline you will need to load Java, Miniconda, Singularity and Nextflow. You can do this by including the commands below in your SLURM/sbatch script:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
## Load Java and Nextflow environment modules
|
||||||
|
module purge
|
||||||
|
module load jdk/1.8.0_291 miniconda singularity/3.8.0 nextflow/21.04.1.5556
|
||||||
|
```
|
||||||
|
|
||||||
|
All of the intermediate output files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
|
||||||
|
The `mjolnir_globe` config contains a `cleanup` command that removes the `work/` directory automatically once the pipeline has completeed successfully. If the run does not complete successfully then the `work/` dir should be removed manually to save storage space.
|
15
docs/pipeline/mag/eva.md
Normal file
15
docs/pipeline/mag/eva.md
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
# nf-core/configs: eva mag specific configuration
|
||||||
|
|
||||||
|
Extra specific configuration for mag pipeline
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To use, run the pipeline with `-profile eva`.
|
||||||
|
|
||||||
|
This will download and launch the mag specific [`eva.config`](../../../conf/pipeline/mag/eva.config) which has been pre-configured with a setup suitable for the MPI-EVA cluster.
|
||||||
|
|
||||||
|
Example: `nextflow run nf-core/mag -profile eva`
|
||||||
|
|
||||||
|
## mag specific configurations for eva
|
||||||
|
|
||||||
|
Specific configurations for eva has been made for mag, primarily adjusting SGE memory requirements of Java tools (e.g. FastQC).
|
19
docs/pipeline/taxprofiler/eva.md
Normal file
19
docs/pipeline/taxprofiler/eva.md
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
# nf-core/configs: eva taxprofiler specific configuration
|
||||||
|
|
||||||
|
Extra specific configuration for taxprofiler pipeline
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To use, run the pipeline with `-profile eva`.
|
||||||
|
|
||||||
|
This will download and launch the taxprofiler specific [`eva.config`](../../../conf/pipeline/taxprofiler/eva.config) which has been pre-configured with a setup suitable for the MPI-EVA cluster.
|
||||||
|
|
||||||
|
Example: `nextflow run nf-core/taxprofiler -profile eva`
|
||||||
|
|
||||||
|
## taxprofiler specific configurations for eva
|
||||||
|
|
||||||
|
Specific configurations for eva has been made for taxprofiler.
|
||||||
|
|
||||||
|
### General profiles
|
||||||
|
|
||||||
|
- The general MPI-EVA profile runs with default nf-core/taxprofiler parameters, but with modifications to account for issues SGE have with Java and python tools, nameling: BBDUK, MALT, MetaPhlAn3, and MEGAN
|
19
docs/pipeline/taxprofiler/hasta.md
Normal file
19
docs/pipeline/taxprofiler/hasta.md
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
# nf-core/configs: eva taxprofiler specific configuration
|
||||||
|
|
||||||
|
Extra specific configuration for taxprofiler pipeline
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To use, run the pipeline with `-profile hasta`.
|
||||||
|
|
||||||
|
This will download and launch the taxprofiler specific [`hasta.config`](../../../conf/pipeline/taxprofiler/hasta.config) which has been pre-configured with a setup suitable for the hasta cluster.
|
||||||
|
|
||||||
|
Example: `nextflow run nf-core/taxprofiler -profile hasta`
|
||||||
|
|
||||||
|
## taxprofiler specific configurations for hasta
|
||||||
|
|
||||||
|
Specific configurations for hasta has been made for taxprofiler.
|
||||||
|
|
||||||
|
### General profiles
|
||||||
|
|
||||||
|
- The general hasta profile runs with default nf-core/taxprofiler parameters, but with modifications to account for issues with: BBDUK and MALT.
|
27
docs/sage.md
Normal file
27
docs/sage.md
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
# nf-core/configs: Sage Bionetworks Global Configuration
|
||||||
|
|
||||||
|
To use this custom configuration, run the pipeline with `-profile sage`. This will download and load the [`sage.config`](../conf/sage.config), which contains a number of optimizations relevant to Sage employees running workflows on AWS (_e.g._ using Nextflow Tower). This profile will also load any applicable pipeline-specific configuration.
|
||||||
|
|
||||||
|
This global configuration includes the following tweaks:
|
||||||
|
|
||||||
|
- Update the default value for `igenomes_base` to `s3://sage-igenomes`
|
||||||
|
- Increase the default time limits because we run pipelines on AWS
|
||||||
|
- Enable retries by default when exit codes relate to insufficient memory
|
||||||
|
- Allow pending jobs to finish if the number of retries are exhausted
|
||||||
|
- Slow the increase in the number of allocated CPU cores on retries
|
||||||
|
- Define the `check_max()` function, which is missing in Sarek v2
|
||||||
|
|
||||||
|
## Additional information about iGenomes
|
||||||
|
|
||||||
|
The following iGenomes prefixes have been copied from `s3://ngi-igenomes/` (`eu-west-1`) to `s3://sage-igenomes` (`us-east-1`). See [this script](https://github.com/Sage-Bionetworks-Workflows/nextflow-infra/blob/main/bin/mirror-igenomes.sh) for more information. The `sage-igenomes` S3 bucket has been configured to openly available, but files cannot be downloaded out of `us-east-1` to avoid egress charges. You can check the `conf/igenomes.config` file in each nf-core pipeline to figure out the mapping between genome IDs (_i.e._ for `--genome`) and iGenomes prefixes ([example](https://github.com/nf-core/rnaseq/blob/89bf536ce4faa98b4d50a8ec0a0343780bc62e0a/conf/igenomes.config#L14-L26)).
|
||||||
|
|
||||||
|
- **Human Genome Builds**
|
||||||
|
- `Homo_sapiens/Ensembl/GRCh37`
|
||||||
|
- `Homo_sapiens/GATK/GRCh37`
|
||||||
|
- `Homo_sapiens/UCSC/hg19`
|
||||||
|
- `Homo_sapiens/GATK/GRCh38`
|
||||||
|
- `Homo_sapiens/NCBI/GRCh38`
|
||||||
|
- `Homo_sapiens/UCSC/hg38`
|
||||||
|
- **Mouse Genome Builds**
|
||||||
|
- `Mus_musculus/Ensembl/GRCm38`
|
||||||
|
- `Mus_musculus/UCSC/mm10`
|
|
@ -2,8 +2,6 @@
|
||||||
|
|
||||||
To use, run the pipeline with `-profile sanger`. This will download and launch the [`sanger.config`](../conf/sanger.config) which has been
|
To use, run the pipeline with `-profile sanger`. This will download and launch the [`sanger.config`](../conf/sanger.config) which has been
|
||||||
pre-configured with a setup suitable for the Wellcome Sanger Institute LSF cluster.
|
pre-configured with a setup suitable for the Wellcome Sanger Institute LSF cluster.
|
||||||
Using this profile, either a docker image containing all of the required software will be downloaded, and converted to a Singularity image or
|
|
||||||
a Singularity image downloaded directly before execution of the pipeline.
|
|
||||||
|
|
||||||
## Running the workflow on the Wellcome Sanger Institute cluster
|
## Running the workflow on the Wellcome Sanger Institute cluster
|
||||||
|
|
||||||
|
@ -14,10 +12,12 @@ The latest version of Nextflow is not installed by default on the cluster. You w
|
||||||
A recommended place to move the `nextflow` executable to is `~/bin` so that it's in the `PATH`.
|
A recommended place to move the `nextflow` executable to is `~/bin` so that it's in the `PATH`.
|
||||||
|
|
||||||
Nextflow manages each process as a separate job that is submitted to the cluster by using the `bsub` command.
|
Nextflow manages each process as a separate job that is submitted to the cluster by using the `bsub` command.
|
||||||
Since the Nextflow pipeline will submit individual jobs for each process to the cluster and dependencies will be provided bu Singularity images you shoudl make sure that your account has access to the Singularity binary by adding these lines to your `.bashrc` file
|
|
||||||
|
If asking Nextflow to use Singularity to run the individual jobs,
|
||||||
|
you should make sure that your account has access to the Singularity binary by adding these lines to your `.bashrc` file
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
[[ -f /software/pathogen/farm5 ]] && module load ISG/singularity
|
[[ -f /software/modules/ISG/singularity ]] && module load ISG/singularity
|
||||||
```
|
```
|
||||||
|
|
||||||
Nextflow shouldn't run directly on the submission node but on a compute node.
|
Nextflow shouldn't run directly on the submission node but on a compute node.
|
||||||
|
@ -26,16 +26,16 @@ To do so make a shell script with a similar structure to the following code and
|
||||||
```bash
|
```bash
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#BSUB -o /path/to/a/log/dir/%J.o
|
#BSUB -o /path/to/a/log/dir/%J.o
|
||||||
#BSUB -e /path/to/a/log/dir//%J.e
|
#BSUB -e /path/to/a/log/dir/%J.e
|
||||||
#BSUB -M 8000
|
#BSUB -M 8000
|
||||||
#BSUB -q long
|
#BSUB -q oversubscribed
|
||||||
#BSUB -n 4
|
#BSUB -n 2
|
||||||
|
|
||||||
export HTTP_PROXY='http://wwwcache.sanger.ac.uk:3128'
|
export HTTP_PROXY='http://wwwcache.sanger.ac.uk:3128'
|
||||||
export HTTPS_PROXY='http://wwwcache.sanger.ac.uk:3128'
|
export HTTPS_PROXY='http://wwwcache.sanger.ac.uk:3128'
|
||||||
export NXF_ANSI_LOG=false
|
export NXF_ANSI_LOG=false
|
||||||
export NXF_OPTS="-Xms8G -Xmx8G -Dnxf.pool.maxThreads=2000"
|
export NXF_OPTS="-Xms8G -Xmx8G -Dnxf.pool.maxThreads=2000"
|
||||||
export NXF_VER=21.04.0-edge
|
export NXF_VER=22.04.0-5697
|
||||||
|
|
||||||
|
|
||||||
nextflow run \
|
nextflow run \
|
||||||
|
|
35
docs/vsc_ugent.md
Normal file
35
docs/vsc_ugent.md
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
# nf-core/configs: University of Ghent High Performance Computing Infrastructure (VSC)
|
||||||
|
|
||||||
|
> **NB:** You will need an [account](https://www.ugent.be/hpc/en/access/faq/access) to use the HPC cluster to run the pipeline.
|
||||||
|
|
||||||
|
First you should go to the cluster you want to run the pipeline on. You can check what clusters have the most free space on this [link](https://shieldon.ugent.be:8083/pbsmon-web-users/). Use the following commands to easily switch between clusters:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
module purge
|
||||||
|
module swap cluster/<CLUSTER>
|
||||||
|
```
|
||||||
|
|
||||||
|
Before running the pipeline you will need to create a PBS script to submit as a job.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
module load Nextflow
|
||||||
|
|
||||||
|
nextflow run <pipeline> -profile vsc_ugent,<CLUSTER> <Add your other parameters>
|
||||||
|
```
|
||||||
|
|
||||||
|
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
|
||||||
|
The config contains a `cleanup` command that removes the `work/` directory automatically once the pipeline has completed successfully. If the run does not complete successfully then the `work/` dir should be removed manually to save storage space. The default work directory is set to `$VSC_SCRATCH_VO_USER/work` per this configuration
|
||||||
|
|
||||||
|
You can also add several TORQUE options to the PBS script. More about this on this [link](http://hpcugent.github.io/vsc_user_docs/pdf/intro-HPC-linux-gent.pdf#appendix.B).
|
||||||
|
|
||||||
|
To submit your job to the cluster by using the following command:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
qsub <script name>.pbs
|
||||||
|
```
|
||||||
|
|
||||||
|
> **NB:** The profile only works for the clusters `skitty`, `swalot`, `victini`, `kirlia` and `doduo`.
|
||||||
|
|
||||||
|
> **NB:** The default directory where the `work/` and `singularity/` (cache directory for images) is located in `$VSC_SCRATCH_VO_USER`.
|
|
@ -24,6 +24,7 @@ profiles {
|
||||||
cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" }
|
cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" }
|
||||||
ccga_dx { includeConfig "${params.custom_config_base}/conf/ccga_dx.config" }
|
ccga_dx { includeConfig "${params.custom_config_base}/conf/ccga_dx.config" }
|
||||||
ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" }
|
ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" }
|
||||||
|
cedars { includeConfig "${params.custom_config_base}/conf/cedars.config" }
|
||||||
cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" }
|
cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" }
|
||||||
cfc_dev { includeConfig "${params.custom_config_base}/conf/cfc_dev.config" }
|
cfc_dev { includeConfig "${params.custom_config_base}/conf/cfc_dev.config" }
|
||||||
cheaha { includeConfig "${params.custom_config_base}/conf/cheaha.config" }
|
cheaha { includeConfig "${params.custom_config_base}/conf/cheaha.config" }
|
||||||
|
@ -48,6 +49,8 @@ profiles {
|
||||||
lugh { includeConfig "${params.custom_config_base}/conf/lugh.config" }
|
lugh { includeConfig "${params.custom_config_base}/conf/lugh.config" }
|
||||||
maestro { includeConfig "${params.custom_config_base}/conf/maestro.config" }
|
maestro { includeConfig "${params.custom_config_base}/conf/maestro.config" }
|
||||||
marvin { includeConfig "${params.custom_config_base}/conf/marvin.config" }
|
marvin { includeConfig "${params.custom_config_base}/conf/marvin.config" }
|
||||||
|
medair { includeConfig "${params.custom_config_base}/conf/medair.config" }
|
||||||
|
mjolnir_globe { includeConfig "${params.custom_config_base}/conf/mjolnir_globe.config" }
|
||||||
mpcdf { includeConfig "${params.custom_config_base}/conf/mpcdf.config" }
|
mpcdf { includeConfig "${params.custom_config_base}/conf/mpcdf.config" }
|
||||||
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
|
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
|
||||||
nihbiowulf { includeConfig "${params.custom_config_base}/conf/nihbiowulf.config" }
|
nihbiowulf { includeConfig "${params.custom_config_base}/conf/nihbiowulf.config" }
|
||||||
|
@ -57,6 +60,7 @@ profiles {
|
||||||
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
|
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
|
||||||
prince { includeConfig "${params.custom_config_base}/conf/prince.config" }
|
prince { includeConfig "${params.custom_config_base}/conf/prince.config" }
|
||||||
rosalind { includeConfig "${params.custom_config_base}/conf/rosalind.config" }
|
rosalind { includeConfig "${params.custom_config_base}/conf/rosalind.config" }
|
||||||
|
sage { includeConfig "${params.custom_config_base}/conf/sage.config" }
|
||||||
sahmri { includeConfig "${params.custom_config_base}/conf/sahmri.config" }
|
sahmri { includeConfig "${params.custom_config_base}/conf/sahmri.config" }
|
||||||
sanger { includeConfig "${params.custom_config_base}/conf/sanger.config"}
|
sanger { includeConfig "${params.custom_config_base}/conf/sanger.config"}
|
||||||
seg_globe { includeConfig "${params.custom_config_base}/conf/seg_globe.config"}
|
seg_globe { includeConfig "${params.custom_config_base}/conf/seg_globe.config"}
|
||||||
|
@ -67,4 +71,5 @@ profiles {
|
||||||
utd_sysbio { includeConfig "${params.custom_config_base}/conf/utd_sysbio.config" }
|
utd_sysbio { includeConfig "${params.custom_config_base}/conf/utd_sysbio.config" }
|
||||||
uzh { includeConfig "${params.custom_config_base}/conf/uzh.config" }
|
uzh { includeConfig "${params.custom_config_base}/conf/uzh.config" }
|
||||||
vai { includeConfig "${params.custom_config_base}/conf/vai.config" }
|
vai { includeConfig "${params.custom_config_base}/conf/vai.config" }
|
||||||
|
vsc_ugent { includeConfig "${params.custom_config_base}/conf/vsc_ugent.config" }
|
||||||
}
|
}
|
||||||
|
|
13
pipeline/mag.config
Normal file
13
pipeline/mag.config
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
/*
|
||||||
|
* -------------------------------------------------
|
||||||
|
* nfcore/mag custom profile Nextflow config file
|
||||||
|
* -------------------------------------------------
|
||||||
|
* Config options for custom environments.
|
||||||
|
* Cluster-specific config options should be saved
|
||||||
|
* in the conf/pipeline/mag folder and imported
|
||||||
|
* under a profile name here.
|
||||||
|
*/
|
||||||
|
|
||||||
|
profiles {
|
||||||
|
eva { includeConfig "${params.custom_config_base}/conf/pipeline/mag/eva.config" }
|
||||||
|
}
|
14
pipeline/taxprofiler.config
Normal file
14
pipeline/taxprofiler.config
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
/*
|
||||||
|
* -------------------------------------------------
|
||||||
|
* nfcore/taxprofiler custom profile Nextflow config file
|
||||||
|
* -------------------------------------------------
|
||||||
|
* Config options for custom environments.
|
||||||
|
* Cluster-specific config options should be saved
|
||||||
|
* in the conf/pipeline/taxprofiler folder and imported
|
||||||
|
* under a profile name here.
|
||||||
|
*/
|
||||||
|
|
||||||
|
profiles {
|
||||||
|
hasta { includeConfig "${params.custom_config_base}/conf/pipeline/taxprofiler/hasta.config" }
|
||||||
|
eva { includeConfig "${params.custom_config_base}/conf/pipeline/taxprofiler/eva.config" }
|
||||||
|
}
|
Loading…
Reference in a new issue