mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-24 09:09:56 +00:00
Merge branch 'master' into gis-branch
This commit is contained in:
commit
b38a0077c2
75 changed files with 896 additions and 418 deletions
12
.editorconfig
Normal file
12
.editorconfig
Normal file
|
@ -0,0 +1,12 @@
|
|||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
|
||||
[*.{md,yml,yaml}]
|
||||
indent_size = 2
|
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -5,16 +5,17 @@ about: A new cluster config
|
|||
|
||||
Please follow these steps before submitting your PR:
|
||||
|
||||
* [ ] If your PR is a work in progress, include `[WIP]` in its title
|
||||
* [ ] Your PR targets the `master` branch
|
||||
* [ ] You've included links to relevant issues, if any
|
||||
- [ ] If your PR is a work in progress, include `[WIP]` in its title
|
||||
- [ ] Your PR targets the `master` branch
|
||||
- [ ] You've included links to relevant issues, if any
|
||||
|
||||
Steps for adding a new config profile:
|
||||
* [ ] Add your custom config file to the `conf/` directory
|
||||
* [ ] Add your documentation file to the `docs/` directory
|
||||
* [ ] Add your custom profile to the `nfcore_custom.config` file in the top-level directory
|
||||
* [ ] Add your custom profile to the `README.md` file in the top-level directory
|
||||
* [ ] Add your profile name to the `profile:` scope in `.github/workflows/main.yml`
|
||||
|
||||
- [ ] Add your custom config file to the `conf/` directory
|
||||
- [ ] Add your documentation file to the `docs/` directory
|
||||
- [ ] Add your custom profile to the `nfcore_custom.config` file in the top-level directory
|
||||
- [ ] Add your custom profile to the `README.md` file in the top-level directory
|
||||
- [ ] Add your profile name to the `profile:` scope in `.github/workflows/main.yml`
|
||||
|
||||
<!--
|
||||
If you require/still waiting for a review, please feel free to request from @nf-core/configs-team
|
||||
|
|
5
.github/markdownlint.yml
vendored
5
.github/markdownlint.yml
vendored
|
@ -1,5 +0,0 @@
|
|||
# Markdownlint configuration file
|
||||
default: true,
|
||||
line-length: false
|
||||
no-duplicate-header:
|
||||
siblings_only: true
|
30
.github/workflows/linting.yml
vendored
30
.github/workflows/linting.yml
vendored
|
@ -1,18 +1,22 @@
|
|||
name: Markdown linting
|
||||
# This workflow is triggered on pushes and PRs to the repository.
|
||||
on: [push, pull_request]
|
||||
name: Code Linting
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
Markdown:
|
||||
runs-on: ubuntu-18.04
|
||||
prettier:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-node@v2
|
||||
- name: Install markdownlint
|
||||
run: |
|
||||
npm install -g markdownlint-cli
|
||||
- name: Run Markdownlint
|
||||
run: |
|
||||
markdownlint ${GITHUB_WORKSPACE} -c ${GITHUB_WORKSPACE}/.github/markdownlint.yml
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install NodeJS
|
||||
uses: actions/setup-node@v2
|
||||
|
||||
- name: Install Prettier
|
||||
run: npm install -g prettier
|
||||
|
||||
- name: Run Prettier --check
|
||||
run: prettier --check ${GITHUB_WORKSPACE}
|
||||
|
|
114
.github/workflows/main.yml
vendored
114
.github/workflows/main.yml
vendored
|
@ -3,7 +3,6 @@ name: Configs tests
|
|||
on: [pull_request, push]
|
||||
|
||||
jobs:
|
||||
|
||||
test_all_profiles:
|
||||
runs-on: ubuntu-latest
|
||||
name: Check if all profiles are tested
|
||||
|
@ -30,61 +29,64 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
profile:
|
||||
- 'abims'
|
||||
- 'alice'
|
||||
- 'aws_tower'
|
||||
- 'awsbatch'
|
||||
- 'azurebatch'
|
||||
- 'bi'
|
||||
- 'bigpurple'
|
||||
- 'binac'
|
||||
- 'biohpc_gen'
|
||||
- 'cambridge'
|
||||
- 'leicester'
|
||||
- 'cbe'
|
||||
- 'ccga_dx'
|
||||
- 'ccga_med'
|
||||
- 'cfc'
|
||||
- 'cfc_dev'
|
||||
- 'cheaha'
|
||||
- 'computerome'
|
||||
- 'crick'
|
||||
- 'denbi_qbic'
|
||||
- 'ebc'
|
||||
- 'eddie'
|
||||
- 'eva'
|
||||
- 'fgcz'
|
||||
- 'genotoul'
|
||||
- 'genouest'
|
||||
- 'gis'
|
||||
- 'google'
|
||||
- 'hasta'
|
||||
- 'hebbe'
|
||||
- 'icr_davros'
|
||||
- 'ifb_core'
|
||||
- 'imperial'
|
||||
- 'jax'
|
||||
- 'lugh'
|
||||
- 'marvin'
|
||||
- 'maestro'
|
||||
- 'mpcdf'
|
||||
- 'munin'
|
||||
- 'nu_genomics'
|
||||
- 'nihbiowulf'
|
||||
- 'oist'
|
||||
- 'pasteur'
|
||||
- 'phoenix'
|
||||
- 'prince'
|
||||
- 'rosalind'
|
||||
- 'sanger'
|
||||
- 'seg_globe'
|
||||
- 'uct_hpc'
|
||||
- 'unibe_ibu'
|
||||
- 'uppmax'
|
||||
- 'utd_ganymede'
|
||||
- 'utd_sysbio'
|
||||
- 'uzh'
|
||||
- 'vai'
|
||||
- "abims"
|
||||
- "alice"
|
||||
- "aws_tower"
|
||||
- "awsbatch"
|
||||
- "azurebatch"
|
||||
- "bi"
|
||||
- "bigpurple"
|
||||
- "binac"
|
||||
- "biohpc_gen"
|
||||
- "cambridge"
|
||||
- "leicester"
|
||||
- "cbe"
|
||||
- "ccga_dx"
|
||||
- "ccga_med"
|
||||
- "cedars"
|
||||
- "cfc"
|
||||
- "cfc_dev"
|
||||
- "cheaha"
|
||||
- "computerome"
|
||||
- "crick"
|
||||
- "denbi_qbic"
|
||||
- "ebc"
|
||||
- "eddie"
|
||||
- "eva"
|
||||
- "fgcz"
|
||||
- "genotoul"
|
||||
- "genouest"
|
||||
- "gis"
|
||||
- "google"
|
||||
- "hasta"
|
||||
- "hebbe"
|
||||
- "icr_davros"
|
||||
- "ifb_core"
|
||||
- "imperial"
|
||||
- "jax"
|
||||
- "lugh"
|
||||
- "marvin"
|
||||
- "maestro"
|
||||
- "mpcdf"
|
||||
- "munin"
|
||||
- "nu_genomics"
|
||||
- "nihbiowulf"
|
||||
- "oist"
|
||||
- "pasteur"
|
||||
- "phoenix"
|
||||
- "prince"
|
||||
- "rosalind"
|
||||
- "sahmri"
|
||||
- "sanger"
|
||||
- "seg_globe"
|
||||
- "uct_hpc"
|
||||
- "unibe_ibu"
|
||||
- "uppmax"
|
||||
- "utd_ganymede"
|
||||
- "utd_sysbio"
|
||||
- "uzh"
|
||||
- "vai"
|
||||
- "vsc_ugent"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Install Nextflow
|
||||
|
|
7
.prettierignore
Normal file
7
.prettierignore
Normal file
|
@ -0,0 +1,7 @@
|
|||
# gitignore
|
||||
.nextflow*
|
||||
work/
|
||||
data/
|
||||
results/
|
||||
.DS_Store
|
||||
*.code-workspace
|
1
.prettierrc.yml
Normal file
1
.prettierrc.yml
Normal file
|
@ -0,0 +1 @@
|
|||
printWidth: 120
|
183
README.md
183
README.md
|
@ -6,20 +6,20 @@ A repository for hosting Nextflow configuration files containing custom paramete
|
|||
|
||||
## Table of contents <!-- omit in toc -->
|
||||
|
||||
* [Using an existing config](#using-an-existing-config)
|
||||
* [Configuration and parameters](#configuration-and-parameters)
|
||||
* [Offline usage](#offline-usage)
|
||||
* [Adding a new config](#adding-a-new-config)
|
||||
* [Checking user hostnames](#checking-user-hostnames)
|
||||
* [Testing](#testing)
|
||||
* [Documentation](#documentation)
|
||||
* [Uploading to `nf-core/configs`](#uploading-to-nf-coreconfigs)
|
||||
* [Adding a new pipeline-specific config](#adding-a-new-pipeline-specific-config)
|
||||
* [Pipeline-specific institutional documentation](#pipeline-specific-institutional-documentation)
|
||||
* [Pipeline-specific documentation](#pipeline-specific-documentation)
|
||||
* [Enabling pipeline-specific configs within a pipeline](#enabling-pipeline-specific-configs-within-a-pipeline)
|
||||
* [Create the pipeline-specific `nf-core/configs` files](#create-the-pipeline-specific-nf-coreconfigs-files)
|
||||
* [Help](#help)
|
||||
- [Using an existing config](#using-an-existing-config)
|
||||
- [Configuration and parameters](#configuration-and-parameters)
|
||||
- [Offline usage](#offline-usage)
|
||||
- [Adding a new config](#adding-a-new-config)
|
||||
- [Checking user hostnames](#checking-user-hostnames)
|
||||
- [Testing](#testing)
|
||||
- [Documentation](#documentation)
|
||||
- [Uploading to `nf-core/configs`](#uploading-to-nf-coreconfigs)
|
||||
- [Adding a new pipeline-specific config](#adding-a-new-pipeline-specific-config)
|
||||
- [Pipeline-specific institutional documentation](#pipeline-specific-institutional-documentation)
|
||||
- [Pipeline-specific documentation](#pipeline-specific-documentation)
|
||||
- [Enabling pipeline-specific configs within a pipeline](#enabling-pipeline-specific-configs-within-a-pipeline)
|
||||
- [Create the pipeline-specific `nf-core/configs` files](#create-the-pipeline-specific-nf-coreconfigs-files)
|
||||
- [Help](#help)
|
||||
|
||||
## Using an existing config
|
||||
|
||||
|
@ -86,68 +86,70 @@ See [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs
|
|||
|
||||
Currently documentation is available for the following systems:
|
||||
|
||||
* [ABIMS](docs/abims.md)
|
||||
* [ALICE](docs/alice.md)
|
||||
* [AWSBATCH](docs/awsbatch.md)
|
||||
* [AWS_TOWER](docs/aws_tower.md)
|
||||
* [AZUREBATCH](docs/azurebatch.md)
|
||||
* [BIGPURPLE](docs/bigpurple.md)
|
||||
* [BI](docs/bi.md)
|
||||
* [BINAC](docs/binac.md)
|
||||
* [BIOHPC_GEN](docs/biohpc_gen.md)
|
||||
* [CAMBRIDGE](docs/cambridge.md)
|
||||
* [CBE](docs/cbe.md)
|
||||
* [CCGA_DX](docs/ccga_dx.md)
|
||||
* [CCGA_MED](docs/ccga_med.md)
|
||||
* [CFC](docs/cfc.md)
|
||||
* [CHEAHA](docs/cheaha.md)
|
||||
* [Computerome](docs/computerome.md)
|
||||
* [CRICK](docs/crick.md)
|
||||
* [CZBIOHUB_AWS](docs/czbiohub.md)
|
||||
* [DENBI_QBIC](docs/denbi_qbic.md)
|
||||
* [EBC](docs/ebc.md)
|
||||
* [EVA](docs/eva.md)
|
||||
* [FGCZ](docs/fgcz.md)
|
||||
* [GENOTOUL](docs/genotoul.md)
|
||||
* [GENOUEST](docs/genouest.md)
|
||||
* [GIS](docs/gis.md)
|
||||
* [GOOGLE](docs/google.md)
|
||||
* [HASTA](docs/hasta.md)
|
||||
* [HEBBE](docs/hebbe.md)
|
||||
* [ICR_DAVROS](docs/icr_davros.md)
|
||||
* [IMPERIAL](docs/imperial.md)
|
||||
* [JAX](docs/jax.md)
|
||||
* [LUGH](docs/lugh.md)
|
||||
* [MAESTRO](docs/maestro.md)
|
||||
* [MARVIN](docs/marvin.md)
|
||||
* [MPCDF](docs/mpcdf.md)
|
||||
* [MUNIN](docs/munin.md)
|
||||
* [NU_GENOMICS](docs/nu_genomics.md)
|
||||
* [NIHBIOWULF](docs/nihbiowulf.md)
|
||||
* [OIST](docs/oist.md)
|
||||
* [PASTEUR](docs/pasteur.md)
|
||||
* [PHOENIX](docs/phoenix.md)
|
||||
* [PRINCE](docs/prince.md)
|
||||
* [ROSALIND](docs/rosalind.md)
|
||||
* [SANGER](docs/sanger.md)
|
||||
* [SEG_GLOBE](docs/seg_globe.md)
|
||||
* [UCT_HPC](docs/uct_hpc.md)
|
||||
* [UNIBE_IBU](docs/unibe_ibu.md)
|
||||
* [UPPMAX](docs/uppmax.md)
|
||||
* [UTD_GANYMEDE](docs/utd_ganymede.md)
|
||||
* [UTD_SYSBIO](docs/utd_sysbio.md)
|
||||
* [UZH](docs/uzh.md)
|
||||
* [VAI](docs/vai.md)
|
||||
- [ABIMS](docs/abims.md)
|
||||
- [ALICE](docs/alice.md)
|
||||
- [AWSBATCH](docs/awsbatch.md)
|
||||
- [AWS_TOWER](docs/aws_tower.md)
|
||||
- [AZUREBATCH](docs/azurebatch.md)
|
||||
- [BIGPURPLE](docs/bigpurple.md)
|
||||
- [BI](docs/bi.md)
|
||||
- [BINAC](docs/binac.md)
|
||||
- [BIOHPC_GEN](docs/biohpc_gen.md)
|
||||
- [CAMBRIDGE](docs/cambridge.md)
|
||||
- [CBE](docs/cbe.md)
|
||||
- [CCGA_DX](docs/ccga_dx.md)
|
||||
- [CCGA_MED](docs/ccga_med.md)
|
||||
- [Cedars-Sinai](docs/cedars.md)
|
||||
- [CFC](docs/cfc.md)
|
||||
- [CHEAHA](docs/cheaha.md)
|
||||
- [Computerome](docs/computerome.md)
|
||||
- [CRICK](docs/crick.md)
|
||||
- [CZBIOHUB_AWS](docs/czbiohub.md)
|
||||
- [DENBI_QBIC](docs/denbi_qbic.md)
|
||||
- [EBC](docs/ebc.md)
|
||||
- [EVA](docs/eva.md)
|
||||
- [FGCZ](docs/fgcz.md)
|
||||
- [GENOTOUL](docs/genotoul.md)
|
||||
- [GENOUEST](docs/genouest.md)
|
||||
- [GIS](docs/gis.md)
|
||||
- [GOOGLE](docs/google.md)
|
||||
- [HASTA](docs/hasta.md)
|
||||
- [HEBBE](docs/hebbe.md)
|
||||
- [ICR_DAVROS](docs/icr_davros.md)
|
||||
- [IMPERIAL](docs/imperial.md)
|
||||
- [JAX](docs/jax.md)
|
||||
- [LUGH](docs/lugh.md)
|
||||
- [MAESTRO](docs/maestro.md)
|
||||
- [MARVIN](docs/marvin.md)
|
||||
- [MPCDF](docs/mpcdf.md)
|
||||
- [MUNIN](docs/munin.md)
|
||||
- [NU_GENOMICS](docs/nu_genomics.md)
|
||||
- [NIHBIOWULF](docs/nihbiowulf.md)
|
||||
- [OIST](docs/oist.md)
|
||||
- [PASTEUR](docs/pasteur.md)
|
||||
- [PHOENIX](docs/phoenix.md)
|
||||
- [PRINCE](docs/prince.md)
|
||||
- [ROSALIND](docs/rosalind.md)
|
||||
- [SANGER](docs/sanger.md)
|
||||
- [SEG_GLOBE](docs/seg_globe.md)
|
||||
- [UCT_HPC](docs/uct_hpc.md)
|
||||
- [UNIBE_IBU](docs/unibe_ibu.md)
|
||||
- [UPPMAX](docs/uppmax.md)
|
||||
- [UTD_GANYMEDE](docs/utd_ganymede.md)
|
||||
- [UTD_SYSBIO](docs/utd_sysbio.md)
|
||||
- [UZH](docs/uzh.md)
|
||||
- [VAI](docs/vai.md)
|
||||
- [VSC_UGENT](docs/vsc_ugent.md)
|
||||
|
||||
### Uploading to `nf-core/configs`
|
||||
|
||||
[Fork](https://help.github.com/articles/fork-a-repo/) the [`nf-core/configs`](https://github.com/nf-core/configs/) repository to your own GitHub account.
|
||||
Within the local clone of your fork:
|
||||
|
||||
* **add** the custom config file to the [`conf/`](https://github.com/nf-core/configs/tree/master/conf) directory
|
||||
* **add** the documentation file to the [`docs/`](https://github.com/nf-core/configs/tree/master/docs) directory
|
||||
* **edit** and add your custom profile to the [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) file in the top-level directory of the clone
|
||||
* **edit** and add your custom profile to the [`README.md`](https://github.com/nf-core/configs/blob/master/README.md) file in the top-level directory of the clone
|
||||
- **add** the custom config file to the [`conf/`](https://github.com/nf-core/configs/tree/master/conf) directory
|
||||
- **add** the documentation file to the [`docs/`](https://github.com/nf-core/configs/tree/master/docs) directory
|
||||
- **edit** and add your custom profile to the [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) file in the top-level directory of the clone
|
||||
- **edit** and add your custom profile to the [`README.md`](https://github.com/nf-core/configs/blob/master/README.md) file in the top-level directory of the clone
|
||||
|
||||
In order to ensure that the config file is tested automatically with GitHub Actions please add your profile name to the `profile:` scope (under strategy matrix) in [`.github/workflows/main.yml`](.github/workflows/main.yml). If you forget to do this the tests will fail with the error:
|
||||
|
||||
|
@ -186,25 +188,30 @@ Note that pipeline-specific configs are not required and should only be added if
|
|||
|
||||
Currently documentation is available for the following pipelines within specific profiles:
|
||||
|
||||
* ampliseq
|
||||
* [BINAC](docs/pipeline/ampliseq/binac.md)
|
||||
* [UPPMAX](docs/pipeline/ampliseq/uppmax.md)
|
||||
* eager
|
||||
* [EVA](docs/pipeline/eager/eva.md)
|
||||
* rnafusion
|
||||
* [MUNIN](docs/pipeline/rnafusion/munin.md)
|
||||
* sarek
|
||||
* [MUNIN](docs/pipeline/sarek/munin.md)
|
||||
* [UPPMAX](docs/pipeline/sarek/uppmax.md)
|
||||
* rnavar
|
||||
* [MUNIN](docs/pipeline/rnavar/munin.md)
|
||||
- ampliseq
|
||||
- [BINAC](docs/pipeline/ampliseq/binac.md)
|
||||
- [UPPMAX](docs/pipeline/ampliseq/uppmax.md)
|
||||
- eager
|
||||
- [EVA](docs/pipeline/eager/eva.md)
|
||||
- mag
|
||||
- [EVA](docs/pipeline/mag/eva.md)
|
||||
- rnafusion
|
||||
- [MUNIN](docs/pipeline/rnafusion/munin.md)
|
||||
- rnavar
|
||||
- [MUNIN](docs/pipeline/rnavar/munin.md)
|
||||
- sarek
|
||||
- [MUNIN](docs/pipeline/sarek/munin.md)
|
||||
- [UPPMAX](docs/pipeline/sarek/uppmax.md)
|
||||
- taxprofiler
|
||||
- [EVA](docs/pipeline/taxprofiler/eva.md)
|
||||
- [hasta](docs/pipeline/taxprofiler/hasta.md)
|
||||
|
||||
### Pipeline-specific documentation
|
||||
|
||||
Currently documentation is available for the following pipeline:
|
||||
|
||||
* viralrecon
|
||||
* [genomes](docs/pipeline/viralrecon/genomes.md)
|
||||
- viralrecon
|
||||
- [genomes](docs/pipeline/viralrecon/genomes.md)
|
||||
|
||||
### Enabling pipeline-specific configs within a pipeline
|
||||
|
||||
|
@ -233,7 +240,7 @@ We will be notified automatically when you have created your pull request, and p
|
|||
[Fork](https://help.github.com/articles/fork-a-repo/) the [`nf-core/configs`](https://github.com/nf-core/configs/) repository to your own GitHub account.
|
||||
And add or edit the following files in the local clone of your fork.
|
||||
|
||||
* `pipeline/<PIPELINE>.config`
|
||||
- `pipeline/<PIPELINE>.config`
|
||||
|
||||
If not already created, create the `pipeline/<PIPELINE>.config` file, and add your custom profile to the profile scope
|
||||
|
||||
|
@ -243,18 +250,18 @@ profiles {
|
|||
}
|
||||
```
|
||||
|
||||
* `conf/pipeline/<PIPELINE>/<PROFILE>.config`
|
||||
- `conf/pipeline/<PIPELINE>/<PROFILE>.config`
|
||||
|
||||
Add the custom configuration file to the `conf/pipeline/<PIPELINE>/` directory.
|
||||
Make sure to add an extra `params` section with `params.config_profile_description`, `params.config_profile_contact` to the top of `pipeline/<PIPELINE>.config` and set to reasonable values.
|
||||
Users will get information on who wrote the pipeline-specific configuration profile then when executing the nf-core pipeline and can report back if there are things missing for example.
|
||||
|
||||
* `docs/pipeline/<PIPELINE>/<PROFILE>.md`
|
||||
- `docs/pipeline/<PIPELINE>/<PROFILE>.md`
|
||||
|
||||
Add the documentation file to the `docs/pipeline/<PIPELINE>/` directory.
|
||||
You will also need to edit and add your custom profile to the [`README.md`](https://github.com/nf-core/configs/blob/master/README.md) file in the top-level directory of the clone.
|
||||
|
||||
* `README.md`
|
||||
- `README.md`
|
||||
|
||||
Edit this file, and add the new pipeline-specific institutional profile to the list in the section Pipeline specific documentation
|
||||
|
||||
|
|
26
conf/cedars.config
Normal file
26
conf/cedars.config
Normal file
|
@ -0,0 +1,26 @@
|
|||
//Profile config names for nf-core/configs
|
||||
params {
|
||||
config_profile_description = 'Cedars-Sinai Medical Center HPC Profile'
|
||||
config_profile_contact = 'Alex Rajewski (@rajewski)'
|
||||
config_profile_url = 'https://www.cedars-sinai.edu/research/cores/informatics-computing/resources.html'
|
||||
max_memory = 90.GB
|
||||
max_cpus = 10
|
||||
max_time = 240.h
|
||||
}
|
||||
|
||||
// Specify the queing system
|
||||
executor {
|
||||
name = "sge"
|
||||
}
|
||||
|
||||
process {
|
||||
penv = 'smp'
|
||||
beforeScript =
|
||||
"""
|
||||
module load 'singularity/3.6.0'
|
||||
"""
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
}
|
|
@ -17,7 +17,7 @@ process {
|
|||
executor = 'sge'
|
||||
penv = 'smp'
|
||||
queue = 'all.q'
|
||||
clusterOptions = { "-S /bin/bash -V -j y -o output.log -l h_vmem=${task.memory.toGiga()}G" }
|
||||
clusterOptions = { "-S /bin/bash -V -j y -o output.sge -l h_vmem=${task.memory.toGiga()}G" }
|
||||
}
|
||||
|
||||
executor {
|
||||
|
@ -36,8 +36,8 @@ profiles {
|
|||
}
|
||||
|
||||
process {
|
||||
queue = 'archgen.q'
|
||||
clusterOptions = { "-S /bin/bash -V -j y -o output.log -l h_vmem=${task.memory.toGiga()}G" }
|
||||
queue = { task.memory > 700.GB ? 'bigmem.q' : 'archgen.q' }
|
||||
clusterOptions = { "-S /bin/bash -V -j y -o output.sge -l h_vmem=${task.memory.toGiga()}G" }
|
||||
}
|
||||
|
||||
singularity {
|
||||
|
|
|
@ -15,7 +15,8 @@ google.zone = params.google_zone
|
|||
google.lifeSciences.debug = params.google_debug
|
||||
workDir = params.google_bucket
|
||||
google.lifeSciences.preemptible = params.google_preemptible
|
||||
|
||||
if (google.lifeSciences.preemptible) {
|
||||
process.errorStrategy = { task.exitStatus==14 ? 'retry' : 'terminate' }
|
||||
process.errorStrategy = { task.exitStatus in [8,10,14] ? 'retry' : 'terminate' }
|
||||
process.maxRetries = 5
|
||||
}
|
|
@ -10,6 +10,7 @@ params {
|
|||
|
||||
singularity {
|
||||
enabled = true
|
||||
envWhitelist = ['_JAVA_OPTIONS']
|
||||
}
|
||||
|
||||
params {
|
||||
|
|
|
@ -2,23 +2,22 @@
|
|||
params {
|
||||
config_profile_description = 'The IFB core cluster profile'
|
||||
config_profile_contact = 'https://community.france-bioinformatique.fr'
|
||||
config_profile_url = 'https://www.france-bioinformatique.fr/'
|
||||
config_profile_url = 'https://ifb-elixirfr.gitlab.io/cluster/doc/cluster-desc/'
|
||||
}
|
||||
|
||||
singularity {
|
||||
// need one image per execution
|
||||
enabled = true
|
||||
runOptions = '-B /shared'
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = { task.time <= 24.h ? 'fast' : 'long' }
|
||||
}
|
||||
|
||||
params {
|
||||
igenomes_ignore = true
|
||||
// Max resources requested by a normal node on genotoul.
|
||||
max_memory = 240.GB
|
||||
max_cpus = 28
|
||||
max_time = 96.h
|
||||
max_memory = 252.GB
|
||||
max_cpus = 56
|
||||
max_time = 720.h
|
||||
}
|
||||
|
|
|
@ -20,9 +20,9 @@ process {
|
|||
maxForks = 46
|
||||
|
||||
// Limit cpus for Mutect2
|
||||
withName:'Mutect2|Mutect2Single' {
|
||||
withName:'Mutect2|Mutect2Single|PileupSummariesForMutect2' {
|
||||
time = {48.h * task.attempt}
|
||||
maxForks = 23
|
||||
maxForks = 12
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -70,6 +70,11 @@ process {
|
|||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName: fastqc_after_clipping {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName: adapter_removal {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
|
@ -184,6 +189,25 @@ process {
|
|||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName:get_software_versions {
|
||||
cache = false
|
||||
clusterOptions = { "-S /bin/bash -V -l h=!(bionode06)" }
|
||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1 -Xmx512m"; export OPENBLAS_NUM_THREADS=1; export OMP_NUM_THREADS=1'
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toMega())}M" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName:eigenstrat_snp_coverage {
|
||||
beforeScript = 'export OPENBLAS_NUM_THREADS=1; export OMP_NUM_THREADS=1'
|
||||
}
|
||||
|
||||
withName:kraken_merge {
|
||||
beforeScript = 'export OPENBLAS_NUM_THREADS=1; export OMP_NUM_THREADS=1'
|
||||
}
|
||||
|
||||
withName:multiqc {
|
||||
beforeScript = 'export OPENBLAS_NUM_THREADS=1; export OMP_NUM_THREADS=1;'
|
||||
}
|
||||
}
|
||||
|
||||
profiles {
|
||||
|
@ -256,6 +280,11 @@ profiles {
|
|||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName: fastqc_after_clipping {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName: adapter_removal {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
|
@ -358,7 +387,6 @@ profiles {
|
|||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||
errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'ignore' }
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -430,6 +458,11 @@ profiles {
|
|||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName: fastqc_after_clipping {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName: adapter_removal {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
|
@ -533,7 +566,6 @@ profiles {
|
|||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||
errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'ignore' }
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
14
conf/pipeline/mag/eva.config
Normal file
14
conf/pipeline/mag/eva.config
Normal file
|
@ -0,0 +1,14 @@
|
|||
params {
|
||||
// Specific nf-core/configs params
|
||||
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||
config_profile_description = 'nf-core/mag EVA profile provided by nf-core/configs'
|
||||
}
|
||||
|
||||
process {
|
||||
|
||||
withName: FASTQC {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
}
|
|
@ -7,4 +7,7 @@ process {
|
|||
cpus = { check_max( 16 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 80.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
withName:'QUALIMAP_BAMQC' {
|
||||
ext.args = { "--java-mem-size=${task.memory.giga / 1.15 as long}G" }
|
||||
}
|
||||
}
|
|
@ -23,7 +23,7 @@ params {
|
|||
// Specific nf-core/sarek process configuration
|
||||
process {
|
||||
withLabel:sentieon {
|
||||
module = {params.sentieon ? 'sentieon/202112.00' : null}
|
||||
module = {params.sentieon ? 'sentieon/202112.02' : null}
|
||||
container = {params.sentieon ? null : container}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,3 +26,8 @@ if (hostname ==~ "r.*") {
|
|||
if (hostname ==~ "i.*") {
|
||||
params.single_cpu_mem = 15.GB
|
||||
}
|
||||
|
||||
// Miarka-specific config
|
||||
if (hostname ==~ "m.*") {
|
||||
params.single_cpu_mem = 7.GB
|
||||
}
|
||||
|
|
29
conf/pipeline/taxprofiler/eva.config
Normal file
29
conf/pipeline/taxprofiler/eva.config
Normal file
|
@ -0,0 +1,29 @@
|
|||
params {
|
||||
// Specific nf-core/configs params
|
||||
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||
config_profile_description = 'nf-core/taxprofiler EVA profile provided by nf-core/configs'
|
||||
}
|
||||
|
||||
process {
|
||||
|
||||
withName: BBMAP_BBDUK {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName: MALT_RUN {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 4)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName: METAPHLAN3 {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName: MEGAN_RMA2INFO {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 4)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
}
|
16
conf/pipeline/taxprofiler/hasta.config
Normal file
16
conf/pipeline/taxprofiler/hasta.config
Normal file
|
@ -0,0 +1,16 @@
|
|||
params {
|
||||
// Specific nf-core/configs params
|
||||
config_profile_contact = 'Sofia Stamouli (@sofstam)'
|
||||
config_profile_description = 'nf-core/taxprofiler HASTA profile provided by nf-core/configs'
|
||||
}
|
||||
|
||||
process {
|
||||
|
||||
withName:'BBMAP_BBDUK' {
|
||||
memory = { check_max( 80.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
|
||||
withName: 'MALT_RUN' {
|
||||
memory = { check_max( 80.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
}
|
34
conf/sahmri.config
Normal file
34
conf/sahmri.config
Normal file
|
@ -0,0 +1,34 @@
|
|||
params {
|
||||
config_profile_description = 'South Australian Health and Medical Research Institute (SAHMRI) HPC cluster profile.'
|
||||
config_profile_contact = 'Nathan Watson-Haigh (nathan.watson-haigh@sahmri.com)'
|
||||
config_profile_url = "https://sahmri.org.au"
|
||||
max_memory = 375.GB
|
||||
max_cpus = 32
|
||||
max_time = 14.d
|
||||
igenomes_base = '/cancer/storage/shared/igenomes/references/'
|
||||
}
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = 'sahmri_prod_hpc,sahmri_cancer_hpc'
|
||||
maxRetries = 2
|
||||
|
||||
cpus = { check_max( 2 * task.attempt, 'cpus') }
|
||||
memory = { check_max( 1.GB * task.attempt, 'memory') }
|
||||
time = { check_max( 10.m * task.attempt, 'time') }
|
||||
}
|
||||
executor {
|
||||
queueSize = 50
|
||||
submitRateLimit = '10 sec'
|
||||
}
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
beforeScript = 'export PATH=/apps/opt/singularity/latest/bin:${PATH}'
|
||||
cacheDir = '/cancer/storage/shared/simg'
|
||||
}
|
||||
cleanup = true
|
||||
profiles {
|
||||
debug {
|
||||
cleanup = false
|
||||
}
|
||||
}
|
|
@ -23,7 +23,7 @@ singularity {
|
|||
|
||||
def hostname = "r1"
|
||||
try {
|
||||
hostname = "sinfo --local -N -h | grep -m 1 -F -v CLUSTER: | cut -f1 -d' ' ".execute().text.trim()
|
||||
hostname = ['/bin/bash', '-c', 'sinfo --local -N -h | grep -m 1 -F -v CLUSTER: | cut -f1 -d" "'].execute().text.trim()
|
||||
} catch (java.io.IOException e) {
|
||||
System.err.println("WARNING: Could not run sinfo to determine current cluster, defaulting to rackham")
|
||||
}
|
||||
|
@ -36,6 +36,20 @@ def clusterOptionsCreator = { m ->
|
|||
return base
|
||||
}
|
||||
|
||||
// cluster is miarka
|
||||
if (hostname.startsWith("m")) {
|
||||
// job will fit on a regular node
|
||||
if (m <= 357.GB) {
|
||||
return base
|
||||
}
|
||||
// job requires at least a 2TB node
|
||||
if (m <= 2000.GB) {
|
||||
return base + " --mem 2TB "
|
||||
}
|
||||
// job requires the largest node
|
||||
return base + " -C mem4TB "
|
||||
}
|
||||
|
||||
if (m <= 250.GB) {
|
||||
return base + " -p node -C mem256GB "
|
||||
}
|
||||
|
@ -77,6 +91,14 @@ if (hostname.startsWith("i")) {
|
|||
params.config_profile_description = 'UPPMAX (Irma) cluster profile provided by nf-core/configs.'
|
||||
}
|
||||
|
||||
// Cluster: Miarka
|
||||
if (hostname.startsWith("m")) {
|
||||
params.max_memory = 357.GB
|
||||
params.max_cpus = 48
|
||||
params.max_time = 480.h
|
||||
params.config_profile_description = 'UPPMAX (Miarka) cluster profile provided by nf-core/configs.'
|
||||
}
|
||||
|
||||
// Cluster: Rackham
|
||||
if (hostname.startsWith("r")) {
|
||||
params.max_cpus = 20
|
||||
|
|
115
conf/vsc_ugent.config
Normal file
115
conf/vsc_ugent.config
Normal file
|
@ -0,0 +1,115 @@
|
|||
// Specify the work directory
|
||||
workDir = "$VSC_SCRATCH_VO_USER/work"
|
||||
|
||||
// Perform work directory cleanup when the run has succesfully completed
|
||||
cleanup = true
|
||||
|
||||
// Reduce the job submit rate to about 10 per second, this way the server won't be bombarded with jobs
|
||||
executor {
|
||||
submitRateLimit = '10 sec'
|
||||
}
|
||||
|
||||
// Specify that singularity should be used and where the cache dir will be for the images
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
cacheDir = "$VSC_SCRATCH_VO_USER/singularity"
|
||||
}
|
||||
|
||||
// Define profiles for each cluster
|
||||
profiles {
|
||||
skitty {
|
||||
params {
|
||||
config_profile_description = 'HPC_SKITTY profile for use on the Skitty cluster of the VSC HPC.'
|
||||
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
|
||||
config_profile_url = 'https://www.ugent.be/hpc/en'
|
||||
max_memory = 177.GB
|
||||
max_cpus = 36
|
||||
max_time = 72.h
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = 'skitty'
|
||||
maxRetries = 2
|
||||
beforeScript = "export SINGULARITY_CACHEDIR=$VSC_SCRATCH_VO_USER/.singularity"
|
||||
scratch = "$VSC_SCRATCH_VO_USER"
|
||||
}
|
||||
}
|
||||
|
||||
swalot {
|
||||
params {
|
||||
config_profile_description = 'HPC_SWALOT profile for use on the Swalot cluster of the VSC HPC.'
|
||||
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
|
||||
config_profile_url = 'https://www.ugent.be/hpc/en'
|
||||
max_memory = 116.GB
|
||||
max_cpus = 20
|
||||
max_time = 72.h
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = 'swalot'
|
||||
maxRetries = 2
|
||||
beforeScript = "export SINGULARITY_CACHEDIR=$VSC_SCRATCH_VO_USER/.singularity"
|
||||
scratch = "$VSC_SCRATCH_VO_USER"
|
||||
}
|
||||
}
|
||||
|
||||
victini {
|
||||
params {
|
||||
config_profile_description = 'HPC_VICTINI profile for use on the Victini cluster of the VSC HPC.'
|
||||
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
|
||||
config_profile_url = 'https://www.ugent.be/hpc/en'
|
||||
max_memory = 88.GB
|
||||
max_cpus = 36
|
||||
max_time = 72.h
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = 'victini'
|
||||
maxRetries = 2
|
||||
beforeScript = "export SINGULARITY_CACHEDIR=$VSC_SCRATCH_VO_USER/.singularity"
|
||||
scratch = "$VSC_SCRATCH_VO_USER"
|
||||
}
|
||||
}
|
||||
|
||||
kirlia {
|
||||
params {
|
||||
config_profile_description = 'HPC_KIRLIA profile for use on the Kirlia cluster of the VSC HPC.'
|
||||
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
|
||||
config_profile_url = 'https://www.ugent.be/hpc/en'
|
||||
max_memory = 738.GB
|
||||
max_cpus = 36
|
||||
max_time = 72.h
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = 'kirlia'
|
||||
maxRetries = 2
|
||||
beforeScript = "export SINGULARITY_CACHEDIR=$VSC_SCRATCH_VO_USER/.singularity"
|
||||
scratch = "$VSC_SCRATCH_VO_USER"
|
||||
}
|
||||
}
|
||||
|
||||
doduo {
|
||||
params {
|
||||
config_profile_description = 'HPC_DODUO profile for use on the Doduo cluster of the VSC HPC.'
|
||||
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
|
||||
config_profile_url = 'https://www.ugent.be/hpc/en'
|
||||
max_memory = 250.GB
|
||||
max_cpus = 96
|
||||
max_time = 72.h
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = 'doduo'
|
||||
maxRetries = 2
|
||||
beforeScript = "export SINGULARITY_CACHEDIR=$VSC_SCRATCH_VO_USER/.singularity"
|
||||
scratch = "$VSC_SCRATCH_VO_USER"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -6,4 +6,4 @@ To use, run the pipeline with `-profile bi`. This will download and launch the [
|
|||
|
||||
Before running the pipeline you will need to follow the internal documentation to run Nextflow on our systems. Similar to that, you need to set an environment variable `NXF_GLOBAL_CONFIG` to the path of the internal global config which is not publicly available here.
|
||||
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
|
|
|
@ -21,5 +21,5 @@ git clone this repo, copy the `bigpurple.config` from the conf folder and then y
|
|||
|
||||
`nextflow run nf-core/<pipeline name> -c bigpurple.config <additional flags>`
|
||||
|
||||
>NB: You will need an account to use the HPC cluster BigPurple in order to run the pipeline. If in doubt contact MCIT.
|
||||
>NB: You will need to install nextflow in your home directory - instructions are on nextflow.io (or ask the writer of this profile). The reason there is no module for nextflow on the cluster, is that the development cycle of nextflow is rapid and it's easy to update yourself: `nextflow self-update`
|
||||
> NB: You will need an account to use the HPC cluster BigPurple in order to run the pipeline. If in doubt contact MCIT.
|
||||
> NB: You will need to install nextflow in your home directory - instructions are on nextflow.io (or ask the writer of this profile). The reason there is no module for nextflow on the cluster, is that the development cycle of nextflow is rapid and it's easy to update yourself: `nextflow self-update`
|
||||
|
|
|
@ -13,5 +13,5 @@ module load devel/java_jdk/1.8.0u112
|
|||
module load devel/singularity/3.0.1
|
||||
```
|
||||
|
||||
>NB: You will need an account to use the HPC cluster BINAC in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
> NB: You will need an account to use the HPC cluster BINAC in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
|
|
|
@ -12,6 +12,6 @@ module purge
|
|||
module load nextflow charliecloud/0.22
|
||||
```
|
||||
|
||||
>NB: Charliecloud support requires Nextflow version `21.03.0-edge` or later.
|
||||
>NB: You will need an account to use the LRZ Linux cluster as well as group access to the biohpc_gen cluster in order to run nf-core pipelines.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes.
|
||||
> NB: Charliecloud support requires Nextflow version `21.03.0-edge` or later.
|
||||
> NB: You will need an account to use the LRZ Linux cluster as well as group access to the biohpc_gen cluster in order to run nf-core pipelines.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes.
|
||||
|
|
|
@ -15,4 +15,4 @@ has finished successfully because it can get quite large, and all of the main ou
|
|||
|
||||
> NB: You will need an account to use the Cambridge HPC cluster in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via SLURM to the Cambridge HPC cluster and as such the commands above will have to be executed on one of the login
|
||||
nodes. If in doubt contact IT.
|
||||
> nodes. If in doubt contact IT.
|
||||
|
|
|
@ -14,5 +14,5 @@ module load nextflow/21.10.6
|
|||
|
||||
A local copy of the [AWS-iGenomes](https://registry.opendata.aws/aws-igenomes/) resource has been made available on CBE so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline. You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
||||
|
||||
>NB: You will need an account to use the HPC cluster on CBE in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
> NB: You will need an account to use the HPC cluster on CBE in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
|
|
|
@ -5,4 +5,5 @@ Deployment and testing of nf-core pipelines at the CCGA DX cluster is on-going.
|
|||
To use, run the pipeline with `-profile ccga_dx`. This will download and launch the [`ccga_dx.config`](../conf/ccga_dx.config) which has been pre-configured with a setup suitable for the CCGA cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
Before running the pipeline you will need to have Nextflow installed.
|
||||
>NB: Access to the CCGA DX cluster is restricted to IKMB/CCGA employes. Please talk to Marc Hoeppner to get access (@marchoeppner).
|
||||
|
||||
> NB: Access to the CCGA DX cluster is restricted to IKMB/CCGA employes. Please talk to Marc Hoeppner to get access (@marchoeppner).
|
||||
|
|
|
@ -13,4 +13,4 @@ module load nextflow
|
|||
module load singularity
|
||||
```
|
||||
|
||||
>NB: Access to the CCGA Med cluster is restricted to IKMB/CCGA employees. Please talk to Marc Hoeppner to get access (@marchoeppner).
|
||||
> NB: Access to the CCGA Med cluster is restricted to IKMB/CCGA employees. Please talk to Marc Hoeppner to get access (@marchoeppner).
|
||||
|
|
7
docs/cedars.md
Normal file
7
docs/cedars.md
Normal file
|
@ -0,0 +1,7 @@
|
|||
# Cedars-Sinai Medical Center HPC
|
||||
|
||||
- You will need HPC access from EIS, which can be requested in the Service Center.
|
||||
- You will need to load the nextflow module on the HPC before running any pipelines (`module load nextflow`). This should automatically load Java as well.
|
||||
- Run this with `-profile cedars`
|
||||
- By default this config file does not specify a queue for submission, and things will thus go to `all.q`. Because of that, the memory and cpu limits have been set accordingly.
|
||||
- We highly recommend specifying a location of a cache directory to store singularity images (so you re-use them across runs, and not pull each time), by specifying the location with the `$NXF_SINGULARITY_CACHE_DIR` bash environment variable in your `.bash_profile` or `.bashrc`
|
|
@ -13,7 +13,7 @@ module load devel/java_jdk/1.8.0u121
|
|||
module load devel/singularity/3.4.2
|
||||
```
|
||||
|
||||
>NB: You will need an account to use the HPC cluster CFC in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
> NB: You will need an account to use the HPC cluster CFC in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
|
||||
The queues are set to be `qbic` or `compute` and will be chosen automatically for you depending on your job submission.
|
||||
|
|
|
@ -15,9 +15,9 @@ module load Nextflow
|
|||
|
||||
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
|
||||
|
||||
>NB: You will need an account to use the HPC cluster on Cheaha in order to run the pipeline. If in doubt contact UAB IT Research Computing.</br></br>
|
||||
>NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes (or alternatively in an interactive partition, but be aware of time limit). If in doubt contact UAB IT Research Computing.</br></br>
|
||||
>NB: Instead of using `module load Nextflow`, you may instead create a conda environment (e.g: `conda create -p $USER_DATA/nf-core_nextflow_env nf-core nextflow`) if you would like to have a more personalized environment of Nextflow (versions which may not be modules yet) and nf-core tools. This __requires__ you to instead do the following:
|
||||
> NB: You will need an account to use the HPC cluster on Cheaha in order to run the pipeline. If in doubt contact UAB IT Research Computing.</br></br>
|
||||
> NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes (or alternatively in an interactive partition, but be aware of time limit). If in doubt contact UAB IT Research Computing.</br></br>
|
||||
> NB: Instead of using `module load Nextflow`, you may instead create a conda environment (e.g: `conda create -p $USER_DATA/nf-core_nextflow_env nf-core nextflow`) if you would like to have a more personalized environment of Nextflow (versions which may not be modules yet) and nf-core tools. This **requires** you to instead do the following:
|
||||
|
||||
```bash
|
||||
module purge
|
||||
|
@ -27,4 +27,4 @@ module load Anaconda3
|
|||
conda activate $USER_DATA/nf-core_nextflow_env
|
||||
```
|
||||
|
||||
>NB: while the jobs for each process of the pipeline are sent to the appropriate nodes, the current session must remain active while the pipeline is running. We recommend to use `screen` prior to loading any modules/environments. Once the pipeline starts you can detach the screen session by typing `Ctrl-a d` so you can safely logout of HPC, while keeping the pipeline active (and you may resume the screen session with `screen -r`). Other similar tools (e.g. `tmux`) may also be used.
|
||||
> NB: while the jobs for each process of the pipeline are sent to the appropriate nodes, the current session must remain active while the pipeline is running. We recommend to use `screen` prior to loading any modules/environments. Once the pipeline starts you can detach the screen session by typing `Ctrl-a d` so you can safely logout of HPC, while keeping the pipeline active (and you may resume the screen session with `screen -r`). Other similar tools (e.g. `tmux`) may also be used.
|
||||
|
|
|
@ -19,5 +19,5 @@ Alternatively, if you are running the pipeline regularly for genomes that arent
|
|||
|
||||
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
|
||||
|
||||
>NB: You will need an account to use the HPC cluster on CAMP in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
> NB: You will need an account to use the HPC cluster on CAMP in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
|
|
|
@ -22,7 +22,7 @@ Now you can run pipelines with abandon!
|
|||
|
||||
### 2. Make a GitHub repo for your workflows (optional :)
|
||||
|
||||
To make sharing your pipelines and commands easy between your teammates, it's best to share code in a GitHub repository. One way is to store the commands in a Makefile ([example](https://github.com/czbiohub/kh-workflows/blob/master/nf-kmer-similarity/Makefile)) which can contain multiple `nextflow run` commands so that you don't need to remember the S3 bucket or output directory for every single one. [Makefiles](https://kbroman.org/minimal_make/) are broadly used in the software community for running many complex commands. Makefiles can have a lot of dependencies and be confusing, so we're only going to write *simple* Makefiles.
|
||||
To make sharing your pipelines and commands easy between your teammates, it's best to share code in a GitHub repository. One way is to store the commands in a Makefile ([example](https://github.com/czbiohub/kh-workflows/blob/master/nf-kmer-similarity/Makefile)) which can contain multiple `nextflow run` commands so that you don't need to remember the S3 bucket or output directory for every single one. [Makefiles](https://kbroman.org/minimal_make/) are broadly used in the software community for running many complex commands. Makefiles can have a lot of dependencies and be confusing, so we're only going to write _simple_ Makefiles.
|
||||
|
||||
```bash
|
||||
rnaseq:
|
||||
|
@ -120,12 +120,12 @@ You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
|||
|
||||
For Human and Mouse, we use [GENCODE](https://www.gencodegenes.org/) gene annotations. This doesn't change how you would specify the genome name, only that the pipelines run with the `czbiohub_aws` profile would be with GENCODE rather than iGenomes.
|
||||
|
||||
>NB: You will need an account to use the HPC cluster on PROFILE CLUSTER in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
> NB: You will need an account to use the HPC cluster on PROFILE CLUSTER in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
|
||||
## High Priority Queue
|
||||
|
||||
If you would like to run with the *High Priority* queue, specify the `highpriority` config profile after `czbiohub_aws`. When applied after the main `czbiohub_aws` config, it overwrites the process `queue` identifier.
|
||||
If you would like to run with the _High Priority_ queue, specify the `highpriority` config profile after `czbiohub_aws`. When applied after the main `czbiohub_aws` config, it overwrites the process `queue` identifier.
|
||||
|
||||
To use it, submit your run with with `-profile czbiohub_aws,highpriority`.
|
||||
|
||||
|
|
|
@ -4,5 +4,5 @@ All nf-core pipelines have been successfully configured for use on the de.NBI Cl
|
|||
|
||||
To use, run the pipeline with `-profile denbi_qbic`. This will download and launch the [`denbi_qbic.config`](../conf/denbi_qbic.config) which has been pre-configured with a setup suitable for the automatically created cluster. Using this profile, a Docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
>NB: You will need an account to use de.NBI Cluster in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
> NB: You will need an account to use de.NBI Cluster in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
|
|
13
docs/eva.md
13
docs/eva.md
|
@ -4,7 +4,11 @@ All nf-core pipelines have been successfully configured for use on the Departmen
|
|||
|
||||
To use, run the pipeline with `-profile eva`. You can further with optimise submissions by specifying which cluster queue you are using e,g, `-profile eva,archgen`. This will download and launch the [`eva.config`](../conf/eva.config) which has been pre-configured with a setup suitable for the `all.q` queue. The number of parallel jobs that run is currently limited to 8.
|
||||
|
||||
Using this profile, a docker image containing all of the required software will be downloaded, and converted to a `singularity` image before execution of the pipeline. The image will currently be centrally stored here:
|
||||
Using this profile, a docker image containing all of the required software will be downloaded, and converted to a `singularity` image before execution of the pipeline.
|
||||
|
||||
Institute-specific pipeline profiles exists for:
|
||||
|
||||
- eager
|
||||
|
||||
## Additional Profiles
|
||||
|
||||
|
@ -16,12 +20,13 @@ If you specify `-profile eva,archgen` you will be able to use the nodes availabl
|
|||
|
||||
Note the following characteristics of this profile:
|
||||
|
||||
- By default, job resources are assigned a maximum number of CPUs of 32, 256 GB maximum memory and 720.h maximum wall time.
|
||||
- By default, job resources are assigned a maximum number of CPUs of 32, 256 GB maximum memory and 365 day maximum wall time.
|
||||
- Using this profile will currently store singularity images in a cache under `/mnt/archgen/users/singularity_scratch/cache/`. All archgen users currently have read/write access to this directory, however this will likely change to a read-only directory in the future that will be managed by the IT team.
|
||||
- Intermediate files will be _automatically_ cleaned up (see `debug` below if you don't want this to happen) on successful run completion.
|
||||
- Jobs submitted with >700.GB will automatically be submitted to the dynamic `bigmem.q`.
|
||||
|
||||
>NB: You will need an account and VPN access to use the cluster at MPI-EVA in order to run the pipeline. If in doubt contact the IT team.
|
||||
>NB: Nextflow will need to submit the jobs via SGE to the clusters and as such the commands above will have to be executed on one of the head nodes. If in doubt contact IT.
|
||||
> NB: You will need an account and VPN access to use the cluster at MPI-EVA in order to run the pipeline. If in doubt contact the IT team.
|
||||
> NB: Nextflow will need to submit the jobs via SGE to the clusters and as such the commands above will have to be executed on one of the head nodes. If in doubt contact IT.
|
||||
|
||||
### debug
|
||||
|
||||
|
|
|
@ -65,11 +65,11 @@ sbatch nfcore-rnaseq.sh
|
|||
|
||||
By default, available mount points are:
|
||||
|
||||
* /bank
|
||||
* /home
|
||||
* /save
|
||||
* /work
|
||||
* /work2
|
||||
- /bank
|
||||
- /home
|
||||
- /save
|
||||
- /work
|
||||
- /work2
|
||||
|
||||
To have access to specific other mount point (such as nosave or project)
|
||||
you can add a config profile file with option `-profile` and which contain:
|
||||
|
@ -84,5 +84,5 @@ A local copy of several genomes are available in `/bank` directory. See
|
|||
our [databank page](http://bioinfo.genotoul.fr/index.php/resources-2/databanks/)
|
||||
to search for your favorite genome.
|
||||
|
||||
>NB: You will need an account to use the HPC cluster on Genotoul in order
|
||||
to run the pipeline. If in doubt see [http://bioinfo.genotoul.fr/](http://bioinfo.genotoul.fr/).
|
||||
> NB: You will need an account to use the HPC cluster on Genotoul in order
|
||||
> to run the pipeline. If in doubt see [http://bioinfo.genotoul.fr/](http://bioinfo.genotoul.fr/).
|
||||
|
|
|
@ -19,4 +19,4 @@ Alternatively, if you are running the pipeline regularly for genomes that arent
|
|||
|
||||
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large. All of the main output files will be saved in the `results/` directory.
|
||||
|
||||
>NB: Nextflow will need to submit the jobs via LSF to the HPC cluster. This can be done from an interactive or normal job. If in doubt contact Scientific Computing.
|
||||
> NB: Nextflow will need to submit the jobs via LSF to the HPC cluster. This can be done from an interactive or normal job. If in doubt contact Scientific Computing.
|
||||
|
|
|
@ -6,7 +6,8 @@ To use, run the pipeline with `-profile ifb_core`. This will download and launch
|
|||
|
||||
## How to use on IFB core
|
||||
|
||||
Before running the pipeline you will need to load Nextflow using the environment module system on IFB core. You can do this by issuing the commands below:
|
||||
Here is [the link to the cluster's documentation](https://ifb-elixirfr.gitlab.io/cluster/doc/quick-start/).
|
||||
Before running the pipeline you will need to load Nextflow and other dependencies using the environment module system on IFB core. You can do this by issuing the commands below:
|
||||
|
||||
```bash
|
||||
# Login to a compute node
|
||||
|
@ -14,7 +15,10 @@ srun --pty bash
|
|||
|
||||
## Load Nextflow and Singularity environment modules
|
||||
module purge
|
||||
module load nextflow/20.04.1
|
||||
module load nextflow
|
||||
module load singularity
|
||||
module load openjdk
|
||||
|
||||
|
||||
# Run a downloaded/git-cloned nextflow workflow from
|
||||
nextflow run \\
|
||||
|
@ -37,4 +41,4 @@ A local copy of several genomes are available in `/shared/bank` directory. See
|
|||
our [databank page](https://ifb-elixirfr.gitlab.io/cluster/doc/banks/)
|
||||
to search for your favorite genome.
|
||||
|
||||
>NB: You will need an account to use the HPC cluster on IFB core in order to run the pipeline. If in doubt contact IT or go to [account page](https://my.cluster.france-bioinformatique.fr/manager2/login).
|
||||
> NB: You will need an account to use the HPC cluster on IFB core in order to run the pipeline. If in doubt contact IT or go to [account page](https://my.cluster.france-bioinformatique.fr/manager2/login).
|
||||
|
|
|
@ -12,7 +12,7 @@ module load anaconda3/personal
|
|||
conda install -c bioconda nextflow
|
||||
```
|
||||
|
||||
>NB: You will need an account to use the HPC cluster CX1 in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
>NB: To submit jobs to the Imperial College MEDBIO cluster, use `-profile imperial,medbio` instead.
|
||||
>NB: You will need a restricted access account to use the HPC cluster MEDBIO.
|
||||
> NB: You will need an account to use the HPC cluster CX1 in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
> NB: To submit jobs to the Imperial College MEDBIO cluster, use `-profile imperial,medbio` instead.
|
||||
> NB: You will need a restricted access account to use the HPC cluster MEDBIO.
|
||||
|
|
|
@ -4,5 +4,5 @@ All nf-core pipelines have been successfully configured for use on the JAX Sumne
|
|||
|
||||
To use, run the pipeline with `-profile jax`. This will download and launch the [`jax.config`](../conf/jax.config) which has been pre-configured with a setup suitable for JAX Sumner cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline and slurm will be used as well.
|
||||
|
||||
>NB: You will need an account to use the HPC cluster JAX in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow should not be executed on the login nodes. If in doubt contact IT.
|
||||
> NB: You will need an account to use the HPC cluster JAX in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow should not be executed on the login nodes. If in doubt contact IT.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
All nf-core pipelines have been successfully configured for use on the ALICE and SPECTRE cluster at the University of Leicester.
|
||||
|
||||
To use, run the pipeline with `-profile leicester`. This will download and launch the [`leicester.config`](../conf/leicester.config ) which has been pre-configured with a setup suitable for the Leicester cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
To use, run the pipeline with `-profile leicester`. This will download and launch the [`leicester.config`](../conf/leicester.config) which has been pre-configured with a setup suitable for the Leicester cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
>NB: You will need an account to use the ALICE and SPECTRE cluster in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
> NB: You will need an account to use the ALICE and SPECTRE cluster in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
|
|
|
@ -23,12 +23,12 @@ The configuration file will load prerequisite modules for users (`Java` & `Singu
|
|||
## Queue Resources
|
||||
|
||||
| Queue | Hostnames | Max Memory | Max CPUS | Max Time |
|
||||
|---------|----------------|------------|----------|----------|
|
||||
| ------- | -------------- | ---------- | -------- | -------- |
|
||||
| MSC | compute[01-03] | 32GB | 16 | 336.h |
|
||||
| Normal | compute[10-29] | 64GB | 16 | 240.h |
|
||||
| Highmem | compute[04-09] | 128GB | 32 | 2880.h |
|
||||
|
||||
***
|
||||
---
|
||||
|
||||
The configuration profile design is very simple. If your process exceeds 64GB memory or 16 cpus, it is sent to the `highmem` queue. If not, it is sent to the `normal` queue. Please do not use the `MSC` queue, this is reserved for Masters students.
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ Currently profiles for the following clusters are supported: `cobra`, `raven`
|
|||
|
||||
All profiles use `singularity` as the corresponding containerEngine. To prevent repeatedly downloading the same singularity image for every pipeline run, for all profiles we recommend specifying a cache location in your `~/.bash_profile` with the `$NXF_SINGULARITY_CACHEDIR` bash variable.
|
||||
|
||||
>NB: Nextflow will need to submit the jobs via SLURM to the clusters and as such the commands above will have to be executed on one of the head nodes. Check the [MPCDF documentation](https://www.mpcdf.mpg.de/services/computing).
|
||||
> NB: Nextflow will need to submit the jobs via SLURM to the clusters and as such the commands above will have to be executed on one of the head nodes. Check the [MPCDF documentation](https://www.mpcdf.mpg.de/services/computing).
|
||||
|
||||
## Global Profiles
|
||||
|
||||
|
|
|
@ -33,4 +33,4 @@ Example: `nextflow run -profile munin,docker`
|
|||
A local copy of the iGenomes resource has been made available on the MUNIN cluster so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline.
|
||||
You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
||||
|
||||
>NB: You will need an account to use the MUNIN cluster in order to run the pipeline. If in doubt contact @szilva.
|
||||
> NB: You will need an account to use the MUNIN cluster in order to run the pipeline. If in doubt contact @szilva.
|
||||
|
|
|
@ -21,5 +21,5 @@ A partial local copy of the iGenomes resource is available on Biowulf. This is a
|
|||
|
||||
You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
||||
|
||||
>NB: You will need an account to use the HPC cluster on Biowulf in order to run the pipeline. If in doubt contact CIT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster. The master process submitting jobs should be run either as a batch job or on an interactive node - not on the biowulf login node. If in doubt contact Biowulf staff.
|
||||
> NB: You will need an account to use the HPC cluster on Biowulf in order to run the pipeline. If in doubt contact CIT.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster. The master process submitting jobs should be run either as a batch job or on an interactive node - not on the biowulf login node. If in doubt contact Biowulf staff.
|
||||
|
|
10
docs/oist.md
10
docs/oist.md
|
@ -25,9 +25,9 @@ ml bioinfo-ugrp-modules
|
|||
ml Other/Nextflow
|
||||
```
|
||||
|
||||
>NB: You will need an account to use the _Deigo_ cluster in order to run the
|
||||
>pipeline. If in doubt contact IT.
|
||||
> NB: You will need an account to use the _Deigo_ cluster in order to run the
|
||||
> pipeline. If in doubt contact IT.
|
||||
>
|
||||
>NB: Nextflow will submit the jobs via the SLURM scheduler to the HPC cluster
|
||||
>and as such the commands above will have to be executed on one of the login
|
||||
>nodes. If in doubt contact IT.
|
||||
> NB: Nextflow will submit the jobs via the SLURM scheduler to the HPC cluster
|
||||
> and as such the commands above will have to be executed on one of the login
|
||||
> nodes. If in doubt contact IT.
|
||||
|
|
|
@ -14,4 +14,4 @@ Example: `nextflow run nf-core/ampliseq -profile binac`
|
|||
|
||||
Specific configurations for BINAC has been made for ampliseq.
|
||||
|
||||
* Specifies the `TZ` `ENV` variable to be `Europe/Berlin` to fix a QIIME2 issue
|
||||
- Specifies the `TZ` `ENV` variable to be `Europe/Berlin` to fix a QIIME2 issue
|
||||
|
|
|
@ -14,4 +14,4 @@ Example: `nextflow run nf-core/ampliseq -profile uppmax`
|
|||
|
||||
Specific configurations for UPPMAX has been made for ampliseq.
|
||||
|
||||
* Makes sure that a fat node is allocated for training and applying a Bayesian classifier.
|
||||
- Makes sure that a fat node is allocated for training and applying a Bayesian classifier.
|
||||
|
|
15
docs/pipeline/mag/eva.md
Normal file
15
docs/pipeline/mag/eva.md
Normal file
|
@ -0,0 +1,15 @@
|
|||
# nf-core/configs: eva mag specific configuration
|
||||
|
||||
Extra specific configuration for mag pipeline
|
||||
|
||||
## Usage
|
||||
|
||||
To use, run the pipeline with `-profile eva`.
|
||||
|
||||
This will download and launch the mag specific [`eva.config`](../../../conf/pipeline/mag/eva.config) which has been pre-configured with a setup suitable for the MPI-EVA cluster.
|
||||
|
||||
Example: `nextflow run nf-core/mag -profile eva`
|
||||
|
||||
## mag specific configurations for eva
|
||||
|
||||
Specific configurations for eva has been made for mag, primarily adjusting SGE memory requirements of Java tools (e.g. FastQC).
|
|
@ -14,5 +14,5 @@ Example: `nextflow run nf-core/rnafusion -profile munin`
|
|||
|
||||
Specific configurations for `MUNIN` has been made for rnafusion.
|
||||
|
||||
* `cpus`, `memory` and `time` max requirements.
|
||||
* Paths to specific references and indexes
|
||||
- `cpus`, `memory` and `time` max requirements.
|
||||
- Paths to specific references and indexes
|
||||
|
|
|
@ -16,34 +16,34 @@ Specific configurations for `MUNIN` has been made for rnavar.
|
|||
|
||||
Genome references
|
||||
|
||||
* Path to `fasta`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa`
|
||||
* Path to `fasta_fai`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa.fai`
|
||||
* Path to `gtf`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.gtf`
|
||||
* Path to `gene_bed`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.bed`
|
||||
- Path to `fasta`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa`
|
||||
- Path to `fasta_fai`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa.fai`
|
||||
- Path to `gtf`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.gtf`
|
||||
- Path to `gene_bed`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.bed`
|
||||
|
||||
Known genome resources
|
||||
|
||||
* Path to `dbsnp`: `/data1/references/annotations/GATK_bundle/dbsnp_146.hg38.vcf.gz`
|
||||
* Path to `dbsnp_tbi`: `/data1/references/annotations/GATK_bundle/dbsnp_146.hg38.vcf.gz.tbi`
|
||||
* Path to `known_indels`: `/data1/references/annotations/GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf.gz`
|
||||
* Path to `known_indels_tbi`: `/data1/references/annotations/GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf.gz.tbi`
|
||||
- Path to `dbsnp`: `/data1/references/annotations/GATK_bundle/dbsnp_146.hg38.vcf.gz`
|
||||
- Path to `dbsnp_tbi`: `/data1/references/annotations/GATK_bundle/dbsnp_146.hg38.vcf.gz.tbi`
|
||||
- Path to `known_indels`: `/data1/references/annotations/GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf.gz`
|
||||
- Path to `known_indels_tbi`: `/data1/references/annotations/GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf.gz.tbi`
|
||||
|
||||
STAR index
|
||||
|
||||
* Path to `star_index`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/STAR.2.7.9a_2x151bp/`
|
||||
* Params `read_length` set to `151`
|
||||
- Path to `star_index`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/STAR.2.7.9a_2x151bp/`
|
||||
- Params `read_length` set to `151`
|
||||
|
||||
Variant annotation configurations
|
||||
|
||||
* Params `annotation_cache` and `cadd_cache` set to `true`
|
||||
* Params `snpeff_db` set to `GRCh38.99`
|
||||
* Params `vep_cache_version` set to `99`
|
||||
* Params `vep_genome` set to `GRCh38`
|
||||
* Path to `snpeff_cache`: `/data1/cache/snpEff/`
|
||||
* Path to `vep_cache`: `/data1/cache/VEP/`
|
||||
* Path to `pon`: `/data1/PON/vcfs/BTB.PON.vcf.gz`
|
||||
* Path to `pon_index`: `/data1/PON/vcfs/BTB.PON.vcf.gz.tbi`
|
||||
* Path to `cadd_indels`: `/data1/cache/CADD/v1.4/InDels.tsv.gz`
|
||||
* Path to `cadd_indels_tbi`: `/data1/cache/CADD/v1.4/InDels.tsv.gz.tbi`
|
||||
* Path to `cadd_wg_snvs`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz`
|
||||
* Path to `cadd_wg_snvs_tbi`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz.tbi`
|
||||
- Params `annotation_cache` and `cadd_cache` set to `true`
|
||||
- Params `snpeff_db` set to `GRCh38.99`
|
||||
- Params `vep_cache_version` set to `99`
|
||||
- Params `vep_genome` set to `GRCh38`
|
||||
- Path to `snpeff_cache`: `/data1/cache/snpEff/`
|
||||
- Path to `vep_cache`: `/data1/cache/VEP/`
|
||||
- Path to `pon`: `/data1/PON/vcfs/BTB.PON.vcf.gz`
|
||||
- Path to `pon_index`: `/data1/PON/vcfs/BTB.PON.vcf.gz.tbi`
|
||||
- Path to `cadd_indels`: `/data1/cache/CADD/v1.4/InDels.tsv.gz`
|
||||
- Path to `cadd_indels_tbi`: `/data1/cache/CADD/v1.4/InDels.tsv.gz.tbi`
|
||||
- Path to `cadd_wg_snvs`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz`
|
||||
- Path to `cadd_wg_snvs_tbi`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz.tbi`
|
||||
|
|
|
@ -14,14 +14,14 @@ Example: `nextflow run nf-core/sarek -profile munin`
|
|||
|
||||
Specific configurations for `MUNIN` has been made for sarek.
|
||||
|
||||
* Params `annotation_cache` and `cadd_cache` set to `true`
|
||||
* Params `vep_cache_version` set to `95`
|
||||
* Path to `snpeff_cache`: `/data1/cache/snpEff/`
|
||||
* Path to `vep_cache`: `/data1/cache/VEP/`
|
||||
* Path to `pon`: `/data1/PON/vcfs/BTB.PON.vcf.gz`
|
||||
* Path to `pon_index`: `/data1/PON/vcfs/BTB.PON.vcf.gz.tbi`
|
||||
* Path to `cadd_indels`: `/data1/cache/CADD/v1.4/InDels.tsv.gz`
|
||||
* Path to `cadd_indels_tbi`: `/data1/cache/CADD/v1.4/InDels.tsv.gz.tbi`
|
||||
* Path to `cadd_wg_snvs`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz`
|
||||
* Path to `cadd_wg_snvs_tbi`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz.tbi`
|
||||
* Load module `Sentieon` for Processes with `sentieon` labels
|
||||
- Params `annotation_cache` and `cadd_cache` set to `true`
|
||||
- Params `vep_cache_version` set to `95`
|
||||
- Path to `snpeff_cache`: `/data1/cache/snpEff/`
|
||||
- Path to `vep_cache`: `/data1/cache/VEP/`
|
||||
- Path to `pon`: `/data1/PON/vcfs/BTB.PON.vcf.gz`
|
||||
- Path to `pon_index`: `/data1/PON/vcfs/BTB.PON.vcf.gz.tbi`
|
||||
- Path to `cadd_indels`: `/data1/cache/CADD/v1.4/InDels.tsv.gz`
|
||||
- Path to `cadd_indels_tbi`: `/data1/cache/CADD/v1.4/InDels.tsv.gz.tbi`
|
||||
- Path to `cadd_wg_snvs`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz`
|
||||
- Path to `cadd_wg_snvs_tbi`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz.tbi`
|
||||
- Load module `Sentieon` for Processes with `sentieon` labels
|
||||
|
|
|
@ -14,5 +14,4 @@ Example: `nextflow run nf-core/sarek -profile uppmax`
|
|||
|
||||
Specific configurations for uppmax clusters has been made for sarek.
|
||||
|
||||
* Set paths to reference genomes
|
||||
* Set path to singularity containers for `irma`
|
||||
- Set paths to reference genomes
|
||||
|
|
|
@ -14,8 +14,8 @@ Example: `nextflow run nf-core/scflow -profile imperial`
|
|||
|
||||
Specific configurations for Imperial have been made for scflow.
|
||||
|
||||
* Singularity `enabled` and `autoMounts` set to `true`
|
||||
* Singularity `cacheDir` path set to an RDS location
|
||||
* Singularity `runOptions` path set to bind (`-B`) RDS paths with container paths.
|
||||
* Params `ctd_folder` set to an RDS location.
|
||||
* Parms `ensembl_mappings` set to an RDS location.
|
||||
- Singularity `enabled` and `autoMounts` set to `true`
|
||||
- Singularity `cacheDir` path set to an RDS location
|
||||
- Singularity `runOptions` path set to bind (`-B`) RDS paths with container paths.
|
||||
- Params `ctd_folder` set to an RDS location.
|
||||
- Parms `ensembl_mappings` set to an RDS location.
|
||||
|
|
19
docs/pipeline/taxprofiler/eva.md
Normal file
19
docs/pipeline/taxprofiler/eva.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
# nf-core/configs: eva taxprofiler specific configuration
|
||||
|
||||
Extra specific configuration for taxprofiler pipeline
|
||||
|
||||
## Usage
|
||||
|
||||
To use, run the pipeline with `-profile eva`.
|
||||
|
||||
This will download and launch the taxprofiler specific [`eva.config`](../../../conf/pipeline/taxprofiler/eva.config) which has been pre-configured with a setup suitable for the MPI-EVA cluster.
|
||||
|
||||
Example: `nextflow run nf-core/taxprofiler -profile eva`
|
||||
|
||||
## taxprofiler specific configurations for eva
|
||||
|
||||
Specific configurations for eva has been made for taxprofiler.
|
||||
|
||||
### General profiles
|
||||
|
||||
- The general MPI-EVA profile runs with default nf-core/taxprofiler parameters, but with modifications to account for issues SGE have with Java and python tools, nameling: BBDUK, MALT, MetaPhlAn3, and MEGAN
|
19
docs/pipeline/taxprofiler/hasta.md
Normal file
19
docs/pipeline/taxprofiler/hasta.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
# nf-core/configs: eva taxprofiler specific configuration
|
||||
|
||||
Extra specific configuration for taxprofiler pipeline
|
||||
|
||||
## Usage
|
||||
|
||||
To use, run the pipeline with `-profile hasta`.
|
||||
|
||||
This will download and launch the taxprofiler specific [`hasta.config`](../../../conf/pipeline/taxprofiler/hasta.config) which has been pre-configured with a setup suitable for the hasta cluster.
|
||||
|
||||
Example: `nextflow run nf-core/taxprofiler -profile hasta`
|
||||
|
||||
## taxprofiler specific configurations for hasta
|
||||
|
||||
Specific configurations for hasta has been made for taxprofiler.
|
||||
|
||||
### General profiles
|
||||
|
||||
- The general hasta profile runs with default nf-core/taxprofiler parameters, but with modifications to account for issues with: BBDUK and MALT.
|
|
@ -16,5 +16,5 @@ git clone this repo, copy the `prince.config` from the conf folder and then you
|
|||
|
||||
`nextflow run nf-core/<pipeline name> -c prince.config <additional flags>`
|
||||
|
||||
>NB: You will need an account to use the HPC cluster Prince in order to run the pipeline. If in doubt contact the HPC admins.
|
||||
>NB: Rather than using the nextflow module, I recommend you install nextflow in your home directory - instructions are on nextflow.io (or ask the writer of this profile). The reason this is better than using the module for nextflow on the cluster, is that the development cycle of nextflow is rapid and it's easy to update your installation yourself: `nextflow self-update`.
|
||||
> NB: You will need an account to use the HPC cluster Prince in order to run the pipeline. If in doubt contact the HPC admins.
|
||||
> NB: Rather than using the nextflow module, I recommend you install nextflow in your home directory - instructions are on nextflow.io (or ask the writer of this profile). The reason this is better than using the module for nextflow on the cluster, is that the development cycle of nextflow is rapid and it's easy to update your installation yourself: `nextflow self-update`.
|
||||
|
|
18
docs/sahmri.md
Normal file
18
docs/sahmri.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
# nf-core/configs: SAHMRI HPC Configuration
|
||||
|
||||
All nf-core pipelines have been successfully configured for use on the HPC cluster at [SAHMRI](https://sahmri.org.au/).
|
||||
To use, run the pipeline with `-profile sahmri`. This will download and launch the [`sahmri.config`](../conf/sahmri.config) which has been pre-configured
|
||||
with a setup suitable for the SAHMRI HPC cluster. Using this profile, either a docker image containing all of the required software will be downloaded,
|
||||
and converted to a Singularity image or a Singularity image downloaded directly before execution of the pipeline.
|
||||
|
||||
The latest version of Nextflow is not installed by default on the SAHMRI HPC cluster. You will need to install it into a directory you have write access to.
|
||||
Follow these instructions from the Nextflow documentation.
|
||||
|
||||
- Install Nextflow : [here](https://www.nextflow.io/docs/latest/getstarted.html#)
|
||||
|
||||
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline
|
||||
has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
|
||||
|
||||
> NB: You will need an account to use the SAHMRI HPC cluster in order to run the pipeline. If in doubt contact the ICT Service Desk.
|
||||
> NB: Nextflow will need to submit the jobs via SLURM to the SAHMRI HPC cluster and as such the commands above will have to be executed on the login
|
||||
> node. If in doubt contact ICT.
|
|
@ -20,5 +20,5 @@ module load Singularity/2.6.0
|
|||
A local copy of the iGenomes resource has been made available on PROFILE CLUSTER so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline.
|
||||
You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
||||
|
||||
>NB: You will need an account to use the HPC cluster on PROFILE CLUSTER in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
> NB: You will need an account to use the HPC cluster on PROFILE CLUSTER in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
|
|
|
@ -58,15 +58,15 @@ All jobs will be submitted to fat nodes using this method, so it's only for use
|
|||
|
||||
The UPPMAX nf-core configuration profile uses the `hostname` of the active environment to automatically apply the following resource limits:
|
||||
|
||||
* `rackham`
|
||||
* cpus available: 20 cpus
|
||||
* memory available: 125 GB
|
||||
* `bianca`
|
||||
* cpus available: 16 cpus
|
||||
* memory available: 109 GB
|
||||
* `irma`
|
||||
* cpus available: 16 cpus
|
||||
* memory available: 250 GB
|
||||
- `rackham`
|
||||
- cpus available: 20 cpus
|
||||
- memory available: 125 GB
|
||||
- `bianca`
|
||||
- cpus available: 16 cpus
|
||||
- memory available: 109 GB
|
||||
- `miarka`
|
||||
- cpus available: 48 cpus
|
||||
- memory available: 357 GB
|
||||
|
||||
## Development config
|
||||
|
||||
|
@ -83,10 +83,10 @@ To use it, submit with `-profile uppmax,devel`.
|
|||
|
||||
> :warning: For more information, please follow the following guides:
|
||||
>
|
||||
> * [UPPMAX `bianca` user guide](http://uppmax.uu.se/support/user-guides/bianca-user-guide/).
|
||||
> * [nf-core guide for running offline](https://nf-co.re/usage/offline)
|
||||
> * [nf-core `tools` guide for downloading pipelines for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use).
|
||||
> * [UPPMAX `Singularity` guide](https://www.uppmax.uu.se/support-sv/user-guides/singularity-user-guide/).
|
||||
> - [UPPMAX `bianca` user guide](http://uppmax.uu.se/support/user-guides/bianca-user-guide/).
|
||||
> - [nf-core guide for running offline](https://nf-co.re/usage/offline)
|
||||
> - [nf-core `tools` guide for downloading pipelines for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use).
|
||||
> - [UPPMAX `Singularity` guide](https://www.uppmax.uu.se/support-sv/user-guides/singularity-user-guide/).
|
||||
|
||||
For security reasons, there is no internet access on `bianca` so you can't download from or upload files to the cluster directly.
|
||||
Before running a nf-core pipeline on `bianca` you will first have to download the pipeline and singularity images needed elsewhere and transfer them via the `wharf` area to your own `bianca` project.
|
||||
|
|
|
@ -14,5 +14,5 @@ module load singularity
|
|||
|
||||
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
|
||||
|
||||
>NB: You will need an account to use the HPC cluster on Ganymede in order to run the pipeline. If in doubt contact Ganymedeadmins.
|
||||
>NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact GanymedeAdmins.
|
||||
> NB: You will need an account to use the HPC cluster on Ganymede in order to run the pipeline. If in doubt contact Ganymedeadmins.
|
||||
> NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact GanymedeAdmins.
|
||||
|
|
|
@ -12,5 +12,5 @@ module purge
|
|||
module load singularity
|
||||
```
|
||||
|
||||
>NB: You will need an account to use the HPC cluster on Sysbio in order to run the pipeline. If in doubt contact OIT.
|
||||
>NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact OIT.
|
||||
> NB: You will need an account to use the HPC cluster on Sysbio in order to run the pipeline. If in doubt contact OIT.
|
||||
> NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact OIT.
|
||||
|
|
|
@ -4,5 +4,5 @@ All nf-core pipelines have been successfully configured for use on the UZH clust
|
|||
|
||||
To use, run the pipeline with `-profile uzh`. This will download and launch the [`uzh.config`](../conf/uzh.config) which has been pre-configured with a setup suitable for the UZH cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
>NB: You will need an account to use the HPC cluster UZH in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
> NB: You will need an account to use the HPC cluster UZH in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
|
|
|
@ -9,6 +9,6 @@ module load singularity
|
|||
NXF_OPTS="-Xmx500m" MALLOC_ARENA_MAX=4 nextflow run <pipeline>
|
||||
```
|
||||
|
||||
>NB: You will need an account to use the HPC in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on the login node. If in doubt contact IT.
|
||||
>NB: The submit node limits the amount of memory available to each user. The `NXF_OPTS` and `MALLOC_ARENA_MAX` parameters above prevent Nextflow from allocating more memory than the scheduler will allow.
|
||||
> NB: You will need an account to use the HPC in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on the login node. If in doubt contact IT.
|
||||
> NB: The submit node limits the amount of memory available to each user. The `NXF_OPTS` and `MALLOC_ARENA_MAX` parameters above prevent Nextflow from allocating more memory than the scheduler will allow.
|
||||
|
|
35
docs/vsc_ugent.md
Normal file
35
docs/vsc_ugent.md
Normal file
|
@ -0,0 +1,35 @@
|
|||
# nf-core/configs: University of Ghent High Performance Computing Infrastructure (VSC)
|
||||
|
||||
> **NB:** You will need an [account](https://www.ugent.be/hpc/en/access/faq/access) to use the HPC cluster to run the pipeline.
|
||||
|
||||
First you should go to the cluster you want to run the pipeline on. You can check what clusters have the most free space on this [link](https://shieldon.ugent.be:8083/pbsmon-web-users/). Use the following commands to easily switch between clusters:
|
||||
|
||||
```shell
|
||||
module purge
|
||||
module swap cluster/<CLUSTER>
|
||||
```
|
||||
|
||||
Before running the pipeline you will need to create a PBS script to submit as a job.
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
module load Nextflow
|
||||
|
||||
nextflow run <pipeline> -profile vsc_ugent,<CLUSTER> <Add your other parameters>
|
||||
```
|
||||
|
||||
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
|
||||
The config contains a `cleanup` command that removes the `work/` directory automatically once the pipeline has completed successfully. If the run does not complete successfully then the `work/` dir should be removed manually to save storage space. The default work directory is set to `$VSC_SCRATCH_VO_USER/work` per this configuration
|
||||
|
||||
You can also add several TORQUE options to the PBS script. More about this on this [link](http://hpcugent.github.io/vsc_user_docs/pdf/intro-HPC-linux-gent.pdf#appendix.B).
|
||||
|
||||
To submit your job to the cluster by using the following command:
|
||||
|
||||
```shell
|
||||
qsub <script name>.pbs
|
||||
```
|
||||
|
||||
> **NB:** The profile only works for the clusters `skitty`, `swalot`, `victini`, `kirlia` and `doduo`.
|
||||
|
||||
> **NB:** The default directory where the `work/` and `singularity/` (cache directory for images) is located in `$VSC_SCRATCH_VO_USER`.
|
|
@ -24,6 +24,7 @@ profiles {
|
|||
cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" }
|
||||
ccga_dx { includeConfig "${params.custom_config_base}/conf/ccga_dx.config" }
|
||||
ccga_med { includeConfig "${params.custom_config_base}/conf/ccga_med.config" }
|
||||
cedars { includeConfig "${params.custom_config_base}/conf/cedars.config" }
|
||||
cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" }
|
||||
cfc_dev { includeConfig "${params.custom_config_base}/conf/cfc_dev.config" }
|
||||
cheaha { includeConfig "${params.custom_config_base}/conf/cheaha.config" }
|
||||
|
@ -57,6 +58,7 @@ profiles {
|
|||
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
|
||||
prince { includeConfig "${params.custom_config_base}/conf/prince.config" }
|
||||
rosalind { includeConfig "${params.custom_config_base}/conf/rosalind.config" }
|
||||
sahmri { includeConfig "${params.custom_config_base}/conf/sahmri.config" }
|
||||
sanger { includeConfig "${params.custom_config_base}/conf/sanger.config"}
|
||||
seg_globe { includeConfig "${params.custom_config_base}/conf/seg_globe.config"}
|
||||
uct_hpc { includeConfig "${params.custom_config_base}/conf/uct_hpc.config" }
|
||||
|
@ -66,4 +68,5 @@ profiles {
|
|||
utd_sysbio { includeConfig "${params.custom_config_base}/conf/utd_sysbio.config" }
|
||||
uzh { includeConfig "${params.custom_config_base}/conf/uzh.config" }
|
||||
vai { includeConfig "${params.custom_config_base}/conf/vai.config" }
|
||||
vsc_ugent { includeConfig "${params.custom_config_base}/conf/vsc_ugent.config" }
|
||||
}
|
||||
|
|
13
pipeline/mag.config
Normal file
13
pipeline/mag.config
Normal file
|
@ -0,0 +1,13 @@
|
|||
/*
|
||||
* -------------------------------------------------
|
||||
* nfcore/mag custom profile Nextflow config file
|
||||
* -------------------------------------------------
|
||||
* Config options for custom environments.
|
||||
* Cluster-specific config options should be saved
|
||||
* in the conf/pipeline/mag folder and imported
|
||||
* under a profile name here.
|
||||
*/
|
||||
|
||||
profiles {
|
||||
eva { includeConfig "${params.custom_config_base}/conf/pipeline/mag/eva.config" }
|
||||
}
|
14
pipeline/taxprofiler.config
Normal file
14
pipeline/taxprofiler.config
Normal file
|
@ -0,0 +1,14 @@
|
|||
/*
|
||||
* -------------------------------------------------
|
||||
* nfcore/taxprofiler custom profile Nextflow config file
|
||||
* -------------------------------------------------
|
||||
* Config options for custom environments.
|
||||
* Cluster-specific config options should be saved
|
||||
* in the conf/pipeline/taxprofiler folder and imported
|
||||
* under a profile name here.
|
||||
*/
|
||||
|
||||
profiles {
|
||||
hasta { includeConfig "${params.custom_config_base}/conf/pipeline/taxprofiler/hasta.config" }
|
||||
eva { includeConfig "${params.custom_config_base}/conf/pipeline/taxprofiler/eva.config" }
|
||||
}
|
Loading…
Reference in a new issue