mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-22 00:26:03 +00:00
Merge pull request #354 from Emiller88/prettier
This commit is contained in:
commit
2c18f8c5da
50 changed files with 383 additions and 363 deletions
12
.editorconfig
Normal file
12
.editorconfig
Normal file
|
@ -0,0 +1,12 @@
|
|||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
|
||||
[*.{md,yml,yaml}]
|
||||
indent_size = 2
|
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -5,16 +5,17 @@ about: A new cluster config
|
|||
|
||||
Please follow these steps before submitting your PR:
|
||||
|
||||
* [ ] If your PR is a work in progress, include `[WIP]` in its title
|
||||
* [ ] Your PR targets the `master` branch
|
||||
* [ ] You've included links to relevant issues, if any
|
||||
- [ ] If your PR is a work in progress, include `[WIP]` in its title
|
||||
- [ ] Your PR targets the `master` branch
|
||||
- [ ] You've included links to relevant issues, if any
|
||||
|
||||
Steps for adding a new config profile:
|
||||
* [ ] Add your custom config file to the `conf/` directory
|
||||
* [ ] Add your documentation file to the `docs/` directory
|
||||
* [ ] Add your custom profile to the `nfcore_custom.config` file in the top-level directory
|
||||
* [ ] Add your custom profile to the `README.md` file in the top-level directory
|
||||
* [ ] Add your profile name to the `profile:` scope in `.github/workflows/main.yml`
|
||||
|
||||
- [ ] Add your custom config file to the `conf/` directory
|
||||
- [ ] Add your documentation file to the `docs/` directory
|
||||
- [ ] Add your custom profile to the `nfcore_custom.config` file in the top-level directory
|
||||
- [ ] Add your custom profile to the `README.md` file in the top-level directory
|
||||
- [ ] Add your profile name to the `profile:` scope in `.github/workflows/main.yml`
|
||||
|
||||
<!--
|
||||
If you require/still waiting for a review, please feel free to request from @nf-core/configs-team
|
||||
|
|
5
.github/markdownlint.yml
vendored
5
.github/markdownlint.yml
vendored
|
@ -1,5 +0,0 @@
|
|||
# Markdownlint configuration file
|
||||
default: true,
|
||||
line-length: false
|
||||
no-duplicate-header:
|
||||
siblings_only: true
|
30
.github/workflows/linting.yml
vendored
30
.github/workflows/linting.yml
vendored
|
@ -1,18 +1,22 @@
|
|||
name: Markdown linting
|
||||
# This workflow is triggered on pushes and PRs to the repository.
|
||||
on: [push, pull_request]
|
||||
name: Code Linting
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
Markdown:
|
||||
runs-on: ubuntu-18.04
|
||||
prettier:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-node@v2
|
||||
- name: Install markdownlint
|
||||
run: |
|
||||
npm install -g markdownlint-cli
|
||||
- name: Run Markdownlint
|
||||
run: |
|
||||
markdownlint ${GITHUB_WORKSPACE} -c ${GITHUB_WORKSPACE}/.github/markdownlint.yml
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install NodeJS
|
||||
uses: actions/setup-node@v2
|
||||
|
||||
- name: Install Prettier
|
||||
run: npm install -g prettier
|
||||
|
||||
- name: Run Prettier --check
|
||||
run: prettier --check ${GITHUB_WORKSPACE}
|
||||
|
|
113
.github/workflows/main.yml
vendored
113
.github/workflows/main.yml
vendored
|
@ -3,7 +3,6 @@ name: Configs tests
|
|||
on: [pull_request, push]
|
||||
|
||||
jobs:
|
||||
|
||||
test_all_profiles:
|
||||
runs-on: ubuntu-latest
|
||||
name: Check if all profiles are tested
|
||||
|
@ -30,62 +29,62 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
profile:
|
||||
- 'abims'
|
||||
- 'alice'
|
||||
- 'aws_tower'
|
||||
- 'awsbatch'
|
||||
- 'azurebatch'
|
||||
- 'bi'
|
||||
- 'bigpurple'
|
||||
- 'binac'
|
||||
- 'biohpc_gen'
|
||||
- 'cambridge'
|
||||
- 'leicester'
|
||||
- 'cbe'
|
||||
- 'ccga_dx'
|
||||
- 'ccga_med'
|
||||
- 'cfc'
|
||||
- 'cfc_dev'
|
||||
- 'cheaha'
|
||||
- 'computerome'
|
||||
- 'crick'
|
||||
- 'denbi_qbic'
|
||||
- 'ebc'
|
||||
- 'eddie'
|
||||
- 'eva'
|
||||
- 'fgcz'
|
||||
- 'genotoul'
|
||||
- 'genouest'
|
||||
- 'gis'
|
||||
- 'google'
|
||||
- 'hasta'
|
||||
- 'hebbe'
|
||||
- 'icr_davros'
|
||||
- 'ifb_core'
|
||||
- 'imperial'
|
||||
- 'jax'
|
||||
- 'lugh'
|
||||
- 'marvin'
|
||||
- 'maestro'
|
||||
- 'mpcdf'
|
||||
- 'munin'
|
||||
- 'nu_genomics'
|
||||
- 'nihbiowulf'
|
||||
- 'oist'
|
||||
- 'pasteur'
|
||||
- 'phoenix'
|
||||
- 'prince'
|
||||
- 'rosalind'
|
||||
- 'sahmri'
|
||||
- 'sanger'
|
||||
- 'seg_globe'
|
||||
- 'uct_hpc'
|
||||
- 'unibe_ibu'
|
||||
- 'uppmax'
|
||||
- 'utd_ganymede'
|
||||
- 'utd_sysbio'
|
||||
- 'uzh'
|
||||
- 'vai'
|
||||
- "abims"
|
||||
- "alice"
|
||||
- "aws_tower"
|
||||
- "awsbatch"
|
||||
- "azurebatch"
|
||||
- "bi"
|
||||
- "bigpurple"
|
||||
- "binac"
|
||||
- "biohpc_gen"
|
||||
- "cambridge"
|
||||
- "leicester"
|
||||
- "cbe"
|
||||
- "ccga_dx"
|
||||
- "ccga_med"
|
||||
- "cfc"
|
||||
- "cfc_dev"
|
||||
- "cheaha"
|
||||
- "computerome"
|
||||
- "crick"
|
||||
- "denbi_qbic"
|
||||
- "ebc"
|
||||
- "eddie"
|
||||
- "eva"
|
||||
- "fgcz"
|
||||
- "genotoul"
|
||||
- "genouest"
|
||||
- "gis"
|
||||
- "google"
|
||||
- "hasta"
|
||||
- "hebbe"
|
||||
- "icr_davros"
|
||||
- "ifb_core"
|
||||
- "imperial"
|
||||
- "jax"
|
||||
- "lugh"
|
||||
- "marvin"
|
||||
- "maestro"
|
||||
- "mpcdf"
|
||||
- "munin"
|
||||
- "nu_genomics"
|
||||
- "nihbiowulf"
|
||||
- "oist"
|
||||
- "pasteur"
|
||||
- "phoenix"
|
||||
- "prince"
|
||||
- "rosalind"
|
||||
- "sahmri"
|
||||
- "sanger"
|
||||
- "seg_globe"
|
||||
- "uct_hpc"
|
||||
- "unibe_ibu"
|
||||
- "uppmax"
|
||||
- "utd_ganymede"
|
||||
- "utd_sysbio"
|
||||
- "uzh"
|
||||
- "vai"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Install Nextflow
|
||||
|
|
7
.prettierignore
Normal file
7
.prettierignore
Normal file
|
@ -0,0 +1,7 @@
|
|||
# gitignore
|
||||
.nextflow*
|
||||
work/
|
||||
data/
|
||||
results/
|
||||
.DS_Store
|
||||
*.code-workspace
|
1
.prettierrc.yml
Normal file
1
.prettierrc.yml
Normal file
|
@ -0,0 +1 @@
|
|||
printWidth: 120
|
176
README.md
176
README.md
|
@ -6,20 +6,20 @@ A repository for hosting Nextflow configuration files containing custom paramete
|
|||
|
||||
## Table of contents <!-- omit in toc -->
|
||||
|
||||
* [Using an existing config](#using-an-existing-config)
|
||||
* [Configuration and parameters](#configuration-and-parameters)
|
||||
* [Offline usage](#offline-usage)
|
||||
* [Adding a new config](#adding-a-new-config)
|
||||
* [Checking user hostnames](#checking-user-hostnames)
|
||||
* [Testing](#testing)
|
||||
* [Documentation](#documentation)
|
||||
* [Uploading to `nf-core/configs`](#uploading-to-nf-coreconfigs)
|
||||
* [Adding a new pipeline-specific config](#adding-a-new-pipeline-specific-config)
|
||||
* [Pipeline-specific institutional documentation](#pipeline-specific-institutional-documentation)
|
||||
* [Pipeline-specific documentation](#pipeline-specific-documentation)
|
||||
* [Enabling pipeline-specific configs within a pipeline](#enabling-pipeline-specific-configs-within-a-pipeline)
|
||||
* [Create the pipeline-specific `nf-core/configs` files](#create-the-pipeline-specific-nf-coreconfigs-files)
|
||||
* [Help](#help)
|
||||
- [Using an existing config](#using-an-existing-config)
|
||||
- [Configuration and parameters](#configuration-and-parameters)
|
||||
- [Offline usage](#offline-usage)
|
||||
- [Adding a new config](#adding-a-new-config)
|
||||
- [Checking user hostnames](#checking-user-hostnames)
|
||||
- [Testing](#testing)
|
||||
- [Documentation](#documentation)
|
||||
- [Uploading to `nf-core/configs`](#uploading-to-nf-coreconfigs)
|
||||
- [Adding a new pipeline-specific config](#adding-a-new-pipeline-specific-config)
|
||||
- [Pipeline-specific institutional documentation](#pipeline-specific-institutional-documentation)
|
||||
- [Pipeline-specific documentation](#pipeline-specific-documentation)
|
||||
- [Enabling pipeline-specific configs within a pipeline](#enabling-pipeline-specific-configs-within-a-pipeline)
|
||||
- [Create the pipeline-specific `nf-core/configs` files](#create-the-pipeline-specific-nf-coreconfigs-files)
|
||||
- [Help](#help)
|
||||
|
||||
## Using an existing config
|
||||
|
||||
|
@ -86,68 +86,68 @@ See [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs
|
|||
|
||||
Currently documentation is available for the following systems:
|
||||
|
||||
* [ABIMS](docs/abims.md)
|
||||
* [ALICE](docs/alice.md)
|
||||
* [AWSBATCH](docs/awsbatch.md)
|
||||
* [AWS_TOWER](docs/aws_tower.md)
|
||||
* [AZUREBATCH](docs/azurebatch.md)
|
||||
* [BIGPURPLE](docs/bigpurple.md)
|
||||
* [BI](docs/bi.md)
|
||||
* [BINAC](docs/binac.md)
|
||||
* [BIOHPC_GEN](docs/biohpc_gen.md)
|
||||
* [CAMBRIDGE](docs/cambridge.md)
|
||||
* [CBE](docs/cbe.md)
|
||||
* [CCGA_DX](docs/ccga_dx.md)
|
||||
* [CCGA_MED](docs/ccga_med.md)
|
||||
* [CFC](docs/cfc.md)
|
||||
* [CHEAHA](docs/cheaha.md)
|
||||
* [Computerome](docs/computerome.md)
|
||||
* [CRICK](docs/crick.md)
|
||||
* [CZBIOHUB_AWS](docs/czbiohub.md)
|
||||
* [DENBI_QBIC](docs/denbi_qbic.md)
|
||||
* [EBC](docs/ebc.md)
|
||||
* [EVA](docs/eva.md)
|
||||
* [FGCZ](docs/fgcz.md)
|
||||
* [GENOTOUL](docs/genotoul.md)
|
||||
* [GENOUEST](docs/genouest.md)
|
||||
* [GIS](docs/gis.md)
|
||||
* [GOOGLE](docs/google.md)
|
||||
* [HASTA](docs/hasta.md)
|
||||
* [HEBBE](docs/hebbe.md)
|
||||
* [ICR_DAVROS](docs/icr_davros.md)
|
||||
* [IMPERIAL](docs/imperial.md)
|
||||
* [JAX](docs/jax.md)
|
||||
* [LUGH](docs/lugh.md)
|
||||
* [MAESTRO](docs/maestro.md)
|
||||
* [MARVIN](docs/marvin.md)
|
||||
* [MPCDF](docs/mpcdf.md)
|
||||
* [MUNIN](docs/munin.md)
|
||||
* [NU_GENOMICS](docs/nu_genomics.md)
|
||||
* [NIHBIOWULF](docs/nihbiowulf.md)
|
||||
* [OIST](docs/oist.md)
|
||||
* [PASTEUR](docs/pasteur.md)
|
||||
* [PHOENIX](docs/phoenix.md)
|
||||
* [PRINCE](docs/prince.md)
|
||||
* [ROSALIND](docs/rosalind.md)
|
||||
* [SANGER](docs/sanger.md)
|
||||
* [SEG_GLOBE](docs/seg_globe.md)
|
||||
* [UCT_HPC](docs/uct_hpc.md)
|
||||
* [UNIBE_IBU](docs/unibe_ibu.md)
|
||||
* [UPPMAX](docs/uppmax.md)
|
||||
* [UTD_GANYMEDE](docs/utd_ganymede.md)
|
||||
* [UTD_SYSBIO](docs/utd_sysbio.md)
|
||||
* [UZH](docs/uzh.md)
|
||||
* [VAI](docs/vai.md)
|
||||
- [ABIMS](docs/abims.md)
|
||||
- [ALICE](docs/alice.md)
|
||||
- [AWSBATCH](docs/awsbatch.md)
|
||||
- [AWS_TOWER](docs/aws_tower.md)
|
||||
- [AZUREBATCH](docs/azurebatch.md)
|
||||
- [BIGPURPLE](docs/bigpurple.md)
|
||||
- [BI](docs/bi.md)
|
||||
- [BINAC](docs/binac.md)
|
||||
- [BIOHPC_GEN](docs/biohpc_gen.md)
|
||||
- [CAMBRIDGE](docs/cambridge.md)
|
||||
- [CBE](docs/cbe.md)
|
||||
- [CCGA_DX](docs/ccga_dx.md)
|
||||
- [CCGA_MED](docs/ccga_med.md)
|
||||
- [CFC](docs/cfc.md)
|
||||
- [CHEAHA](docs/cheaha.md)
|
||||
- [Computerome](docs/computerome.md)
|
||||
- [CRICK](docs/crick.md)
|
||||
- [CZBIOHUB_AWS](docs/czbiohub.md)
|
||||
- [DENBI_QBIC](docs/denbi_qbic.md)
|
||||
- [EBC](docs/ebc.md)
|
||||
- [EVA](docs/eva.md)
|
||||
- [FGCZ](docs/fgcz.md)
|
||||
- [GENOTOUL](docs/genotoul.md)
|
||||
- [GENOUEST](docs/genouest.md)
|
||||
- [GIS](docs/gis.md)
|
||||
- [GOOGLE](docs/google.md)
|
||||
- [HASTA](docs/hasta.md)
|
||||
- [HEBBE](docs/hebbe.md)
|
||||
- [ICR_DAVROS](docs/icr_davros.md)
|
||||
- [IMPERIAL](docs/imperial.md)
|
||||
- [JAX](docs/jax.md)
|
||||
- [LUGH](docs/lugh.md)
|
||||
- [MAESTRO](docs/maestro.md)
|
||||
- [MARVIN](docs/marvin.md)
|
||||
- [MPCDF](docs/mpcdf.md)
|
||||
- [MUNIN](docs/munin.md)
|
||||
- [NU_GENOMICS](docs/nu_genomics.md)
|
||||
- [NIHBIOWULF](docs/nihbiowulf.md)
|
||||
- [OIST](docs/oist.md)
|
||||
- [PASTEUR](docs/pasteur.md)
|
||||
- [PHOENIX](docs/phoenix.md)
|
||||
- [PRINCE](docs/prince.md)
|
||||
- [ROSALIND](docs/rosalind.md)
|
||||
- [SANGER](docs/sanger.md)
|
||||
- [SEG_GLOBE](docs/seg_globe.md)
|
||||
- [UCT_HPC](docs/uct_hpc.md)
|
||||
- [UNIBE_IBU](docs/unibe_ibu.md)
|
||||
- [UPPMAX](docs/uppmax.md)
|
||||
- [UTD_GANYMEDE](docs/utd_ganymede.md)
|
||||
- [UTD_SYSBIO](docs/utd_sysbio.md)
|
||||
- [UZH](docs/uzh.md)
|
||||
- [VAI](docs/vai.md)
|
||||
|
||||
### Uploading to `nf-core/configs`
|
||||
|
||||
[Fork](https://help.github.com/articles/fork-a-repo/) the [`nf-core/configs`](https://github.com/nf-core/configs/) repository to your own GitHub account.
|
||||
Within the local clone of your fork:
|
||||
|
||||
* **add** the custom config file to the [`conf/`](https://github.com/nf-core/configs/tree/master/conf) directory
|
||||
* **add** the documentation file to the [`docs/`](https://github.com/nf-core/configs/tree/master/docs) directory
|
||||
* **edit** and add your custom profile to the [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) file in the top-level directory of the clone
|
||||
* **edit** and add your custom profile to the [`README.md`](https://github.com/nf-core/configs/blob/master/README.md) file in the top-level directory of the clone
|
||||
- **add** the custom config file to the [`conf/`](https://github.com/nf-core/configs/tree/master/conf) directory
|
||||
- **add** the documentation file to the [`docs/`](https://github.com/nf-core/configs/tree/master/docs) directory
|
||||
- **edit** and add your custom profile to the [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) file in the top-level directory of the clone
|
||||
- **edit** and add your custom profile to the [`README.md`](https://github.com/nf-core/configs/blob/master/README.md) file in the top-level directory of the clone
|
||||
|
||||
In order to ensure that the config file is tested automatically with GitHub Actions please add your profile name to the `profile:` scope (under strategy matrix) in [`.github/workflows/main.yml`](.github/workflows/main.yml). If you forget to do this the tests will fail with the error:
|
||||
|
||||
|
@ -186,25 +186,25 @@ Note that pipeline-specific configs are not required and should only be added if
|
|||
|
||||
Currently documentation is available for the following pipelines within specific profiles:
|
||||
|
||||
* ampliseq
|
||||
* [BINAC](docs/pipeline/ampliseq/binac.md)
|
||||
* [UPPMAX](docs/pipeline/ampliseq/uppmax.md)
|
||||
* eager
|
||||
* [EVA](docs/pipeline/eager/eva.md)
|
||||
* rnafusion
|
||||
* [MUNIN](docs/pipeline/rnafusion/munin.md)
|
||||
* sarek
|
||||
* [MUNIN](docs/pipeline/sarek/munin.md)
|
||||
* [UPPMAX](docs/pipeline/sarek/uppmax.md)
|
||||
* rnavar
|
||||
* [MUNIN](docs/pipeline/rnavar/munin.md)
|
||||
- ampliseq
|
||||
- [BINAC](docs/pipeline/ampliseq/binac.md)
|
||||
- [UPPMAX](docs/pipeline/ampliseq/uppmax.md)
|
||||
- eager
|
||||
- [EVA](docs/pipeline/eager/eva.md)
|
||||
- rnafusion
|
||||
- [MUNIN](docs/pipeline/rnafusion/munin.md)
|
||||
- sarek
|
||||
- [MUNIN](docs/pipeline/sarek/munin.md)
|
||||
- [UPPMAX](docs/pipeline/sarek/uppmax.md)
|
||||
- rnavar
|
||||
- [MUNIN](docs/pipeline/rnavar/munin.md)
|
||||
|
||||
### Pipeline-specific documentation
|
||||
|
||||
Currently documentation is available for the following pipeline:
|
||||
|
||||
* viralrecon
|
||||
* [genomes](docs/pipeline/viralrecon/genomes.md)
|
||||
- viralrecon
|
||||
- [genomes](docs/pipeline/viralrecon/genomes.md)
|
||||
|
||||
### Enabling pipeline-specific configs within a pipeline
|
||||
|
||||
|
@ -233,7 +233,7 @@ We will be notified automatically when you have created your pull request, and p
|
|||
[Fork](https://help.github.com/articles/fork-a-repo/) the [`nf-core/configs`](https://github.com/nf-core/configs/) repository to your own GitHub account.
|
||||
And add or edit the following files in the local clone of your fork.
|
||||
|
||||
* `pipeline/<PIPELINE>.config`
|
||||
- `pipeline/<PIPELINE>.config`
|
||||
|
||||
If not already created, create the `pipeline/<PIPELINE>.config` file, and add your custom profile to the profile scope
|
||||
|
||||
|
@ -243,18 +243,18 @@ profiles {
|
|||
}
|
||||
```
|
||||
|
||||
* `conf/pipeline/<PIPELINE>/<PROFILE>.config`
|
||||
- `conf/pipeline/<PIPELINE>/<PROFILE>.config`
|
||||
|
||||
Add the custom configuration file to the `conf/pipeline/<PIPELINE>/` directory.
|
||||
Make sure to add an extra `params` section with `params.config_profile_description`, `params.config_profile_contact` to the top of `pipeline/<PIPELINE>.config` and set to reasonable values.
|
||||
Users will get information on who wrote the pipeline-specific configuration profile then when executing the nf-core pipeline and can report back if there are things missing for example.
|
||||
|
||||
* `docs/pipeline/<PIPELINE>/<PROFILE>.md`
|
||||
- `docs/pipeline/<PIPELINE>/<PROFILE>.md`
|
||||
|
||||
Add the documentation file to the `docs/pipeline/<PIPELINE>/` directory.
|
||||
You will also need to edit and add your custom profile to the [`README.md`](https://github.com/nf-core/configs/blob/master/README.md) file in the top-level directory of the clone.
|
||||
|
||||
* `README.md`
|
||||
- `README.md`
|
||||
|
||||
Edit this file, and add the new pipeline-specific institutional profile to the list in the section Pipeline specific documentation
|
||||
|
||||
|
|
|
@ -15,4 +15,4 @@ has finished successfully because it can get quite large, and all of the main ou
|
|||
|
||||
> NB: You will need an account to use the Cambridge HPC cluster in order to run the pipeline. If in doubt contact IT.
|
||||
> NB: Nextflow will need to submit the jobs via SLURM to the Cambridge HPC cluster and as such the commands above will have to be executed on one of the login
|
||||
nodes. If in doubt contact IT.
|
||||
> nodes. If in doubt contact IT.
|
||||
|
|
|
@ -5,4 +5,5 @@ Deployment and testing of nf-core pipelines at the CCGA DX cluster is on-going.
|
|||
To use, run the pipeline with `-profile ccga_dx`. This will download and launch the [`ccga_dx.config`](../conf/ccga_dx.config) which has been pre-configured with a setup suitable for the CCGA cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
Before running the pipeline you will need to have Nextflow installed.
|
||||
|
||||
> NB: Access to the CCGA DX cluster is restricted to IKMB/CCGA employes. Please talk to Marc Hoeppner to get access (@marchoeppner).
|
||||
|
|
|
@ -17,7 +17,7 @@ All of the intermediate files required to run the pipeline will be stored in the
|
|||
|
||||
> NB: You will need an account to use the HPC cluster on Cheaha in order to run the pipeline. If in doubt contact UAB IT Research Computing.</br></br>
|
||||
> NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes (or alternatively in an interactive partition, but be aware of time limit). If in doubt contact UAB IT Research Computing.</br></br>
|
||||
>NB: Instead of using `module load Nextflow`, you may instead create a conda environment (e.g: `conda create -p $USER_DATA/nf-core_nextflow_env nf-core nextflow`) if you would like to have a more personalized environment of Nextflow (versions which may not be modules yet) and nf-core tools. This __requires__ you to instead do the following:
|
||||
> NB: Instead of using `module load Nextflow`, you may instead create a conda environment (e.g: `conda create -p $USER_DATA/nf-core_nextflow_env nf-core nextflow`) if you would like to have a more personalized environment of Nextflow (versions which may not be modules yet) and nf-core tools. This **requires** you to instead do the following:
|
||||
|
||||
```bash
|
||||
module purge
|
||||
|
|
|
@ -22,7 +22,7 @@ Now you can run pipelines with abandon!
|
|||
|
||||
### 2. Make a GitHub repo for your workflows (optional :)
|
||||
|
||||
To make sharing your pipelines and commands easy between your teammates, it's best to share code in a GitHub repository. One way is to store the commands in a Makefile ([example](https://github.com/czbiohub/kh-workflows/blob/master/nf-kmer-similarity/Makefile)) which can contain multiple `nextflow run` commands so that you don't need to remember the S3 bucket or output directory for every single one. [Makefiles](https://kbroman.org/minimal_make/) are broadly used in the software community for running many complex commands. Makefiles can have a lot of dependencies and be confusing, so we're only going to write *simple* Makefiles.
|
||||
To make sharing your pipelines and commands easy between your teammates, it's best to share code in a GitHub repository. One way is to store the commands in a Makefile ([example](https://github.com/czbiohub/kh-workflows/blob/master/nf-kmer-similarity/Makefile)) which can contain multiple `nextflow run` commands so that you don't need to remember the S3 bucket or output directory for every single one. [Makefiles](https://kbroman.org/minimal_make/) are broadly used in the software community for running many complex commands. Makefiles can have a lot of dependencies and be confusing, so we're only going to write _simple_ Makefiles.
|
||||
|
||||
```bash
|
||||
rnaseq:
|
||||
|
@ -125,7 +125,7 @@ For Human and Mouse, we use [GENCODE](https://www.gencodegenes.org/) gene annota
|
|||
|
||||
## High Priority Queue
|
||||
|
||||
If you would like to run with the *High Priority* queue, specify the `highpriority` config profile after `czbiohub_aws`. When applied after the main `czbiohub_aws` config, it overwrites the process `queue` identifier.
|
||||
If you would like to run with the _High Priority_ queue, specify the `highpriority` config profile after `czbiohub_aws`. When applied after the main `czbiohub_aws` config, it overwrites the process `queue` identifier.
|
||||
|
||||
To use it, submit your run with with `-profile czbiohub_aws,highpriority`.
|
||||
|
||||
|
|
|
@ -65,11 +65,11 @@ sbatch nfcore-rnaseq.sh
|
|||
|
||||
By default, available mount points are:
|
||||
|
||||
* /bank
|
||||
* /home
|
||||
* /save
|
||||
* /work
|
||||
* /work2
|
||||
- /bank
|
||||
- /home
|
||||
- /save
|
||||
- /work
|
||||
- /work2
|
||||
|
||||
To have access to specific other mount point (such as nosave or project)
|
||||
you can add a config profile file with option `-profile` and which contain:
|
||||
|
@ -85,4 +85,4 @@ our [databank page](http://bioinfo.genotoul.fr/index.php/resources-2/databanks/)
|
|||
to search for your favorite genome.
|
||||
|
||||
> NB: You will need an account to use the HPC cluster on Genotoul in order
|
||||
to run the pipeline. If in doubt see [http://bioinfo.genotoul.fr/](http://bioinfo.genotoul.fr/).
|
||||
> to run the pipeline. If in doubt see [http://bioinfo.genotoul.fr/](http://bioinfo.genotoul.fr/).
|
||||
|
|
|
@ -23,12 +23,12 @@ The configuration file will load prerequisite modules for users (`Java` & `Singu
|
|||
## Queue Resources
|
||||
|
||||
| Queue | Hostnames | Max Memory | Max CPUS | Max Time |
|
||||
|---------|----------------|------------|----------|----------|
|
||||
| ------- | -------------- | ---------- | -------- | -------- |
|
||||
| MSC | compute[01-03] | 32GB | 16 | 336.h |
|
||||
| Normal | compute[10-29] | 64GB | 16 | 240.h |
|
||||
| Highmem | compute[04-09] | 128GB | 32 | 2880.h |
|
||||
|
||||
***
|
||||
---
|
||||
|
||||
The configuration profile design is very simple. If your process exceeds 64GB memory or 16 cpus, it is sent to the `highmem` queue. If not, it is sent to the `normal` queue. Please do not use the `MSC` queue, this is reserved for Masters students.
|
||||
|
||||
|
|
|
@ -14,4 +14,4 @@ Example: `nextflow run nf-core/ampliseq -profile binac`
|
|||
|
||||
Specific configurations for BINAC has been made for ampliseq.
|
||||
|
||||
* Specifies the `TZ` `ENV` variable to be `Europe/Berlin` to fix a QIIME2 issue
|
||||
- Specifies the `TZ` `ENV` variable to be `Europe/Berlin` to fix a QIIME2 issue
|
||||
|
|
|
@ -14,4 +14,4 @@ Example: `nextflow run nf-core/ampliseq -profile uppmax`
|
|||
|
||||
Specific configurations for UPPMAX has been made for ampliseq.
|
||||
|
||||
* Makes sure that a fat node is allocated for training and applying a Bayesian classifier.
|
||||
- Makes sure that a fat node is allocated for training and applying a Bayesian classifier.
|
||||
|
|
|
@ -14,5 +14,5 @@ Example: `nextflow run nf-core/rnafusion -profile munin`
|
|||
|
||||
Specific configurations for `MUNIN` has been made for rnafusion.
|
||||
|
||||
* `cpus`, `memory` and `time` max requirements.
|
||||
* Paths to specific references and indexes
|
||||
- `cpus`, `memory` and `time` max requirements.
|
||||
- Paths to specific references and indexes
|
||||
|
|
|
@ -16,34 +16,34 @@ Specific configurations for `MUNIN` has been made for rnavar.
|
|||
|
||||
Genome references
|
||||
|
||||
* Path to `fasta`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa`
|
||||
* Path to `fasta_fai`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa.fai`
|
||||
* Path to `gtf`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.gtf`
|
||||
* Path to `gene_bed`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.bed`
|
||||
- Path to `fasta`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa`
|
||||
- Path to `fasta_fai`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa.fai`
|
||||
- Path to `gtf`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.gtf`
|
||||
- Path to `gene_bed`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_annot.bed`
|
||||
|
||||
Known genome resources
|
||||
|
||||
* Path to `dbsnp`: `/data1/references/annotations/GATK_bundle/dbsnp_146.hg38.vcf.gz`
|
||||
* Path to `dbsnp_tbi`: `/data1/references/annotations/GATK_bundle/dbsnp_146.hg38.vcf.gz.tbi`
|
||||
* Path to `known_indels`: `/data1/references/annotations/GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf.gz`
|
||||
* Path to `known_indels_tbi`: `/data1/references/annotations/GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf.gz.tbi`
|
||||
- Path to `dbsnp`: `/data1/references/annotations/GATK_bundle/dbsnp_146.hg38.vcf.gz`
|
||||
- Path to `dbsnp_tbi`: `/data1/references/annotations/GATK_bundle/dbsnp_146.hg38.vcf.gz.tbi`
|
||||
- Path to `known_indels`: `/data1/references/annotations/GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf.gz`
|
||||
- Path to `known_indels_tbi`: `/data1/references/annotations/GATK_bundle/Mills_and_1000G_gold_standard.indels.hg38.vcf.gz.tbi`
|
||||
|
||||
STAR index
|
||||
|
||||
* Path to `star_index`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/STAR.2.7.9a_2x151bp/`
|
||||
* Params `read_length` set to `151`
|
||||
- Path to `star_index`: `/data1/references/CTAT_GenomeLib_v37_Mar012021/GRCh38_gencode_v37_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/STAR.2.7.9a_2x151bp/`
|
||||
- Params `read_length` set to `151`
|
||||
|
||||
Variant annotation configurations
|
||||
|
||||
* Params `annotation_cache` and `cadd_cache` set to `true`
|
||||
* Params `snpeff_db` set to `GRCh38.99`
|
||||
* Params `vep_cache_version` set to `99`
|
||||
* Params `vep_genome` set to `GRCh38`
|
||||
* Path to `snpeff_cache`: `/data1/cache/snpEff/`
|
||||
* Path to `vep_cache`: `/data1/cache/VEP/`
|
||||
* Path to `pon`: `/data1/PON/vcfs/BTB.PON.vcf.gz`
|
||||
* Path to `pon_index`: `/data1/PON/vcfs/BTB.PON.vcf.gz.tbi`
|
||||
* Path to `cadd_indels`: `/data1/cache/CADD/v1.4/InDels.tsv.gz`
|
||||
* Path to `cadd_indels_tbi`: `/data1/cache/CADD/v1.4/InDels.tsv.gz.tbi`
|
||||
* Path to `cadd_wg_snvs`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz`
|
||||
* Path to `cadd_wg_snvs_tbi`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz.tbi`
|
||||
- Params `annotation_cache` and `cadd_cache` set to `true`
|
||||
- Params `snpeff_db` set to `GRCh38.99`
|
||||
- Params `vep_cache_version` set to `99`
|
||||
- Params `vep_genome` set to `GRCh38`
|
||||
- Path to `snpeff_cache`: `/data1/cache/snpEff/`
|
||||
- Path to `vep_cache`: `/data1/cache/VEP/`
|
||||
- Path to `pon`: `/data1/PON/vcfs/BTB.PON.vcf.gz`
|
||||
- Path to `pon_index`: `/data1/PON/vcfs/BTB.PON.vcf.gz.tbi`
|
||||
- Path to `cadd_indels`: `/data1/cache/CADD/v1.4/InDels.tsv.gz`
|
||||
- Path to `cadd_indels_tbi`: `/data1/cache/CADD/v1.4/InDels.tsv.gz.tbi`
|
||||
- Path to `cadd_wg_snvs`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz`
|
||||
- Path to `cadd_wg_snvs_tbi`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz.tbi`
|
||||
|
|
|
@ -14,14 +14,14 @@ Example: `nextflow run nf-core/sarek -profile munin`
|
|||
|
||||
Specific configurations for `MUNIN` has been made for sarek.
|
||||
|
||||
* Params `annotation_cache` and `cadd_cache` set to `true`
|
||||
* Params `vep_cache_version` set to `95`
|
||||
* Path to `snpeff_cache`: `/data1/cache/snpEff/`
|
||||
* Path to `vep_cache`: `/data1/cache/VEP/`
|
||||
* Path to `pon`: `/data1/PON/vcfs/BTB.PON.vcf.gz`
|
||||
* Path to `pon_index`: `/data1/PON/vcfs/BTB.PON.vcf.gz.tbi`
|
||||
* Path to `cadd_indels`: `/data1/cache/CADD/v1.4/InDels.tsv.gz`
|
||||
* Path to `cadd_indels_tbi`: `/data1/cache/CADD/v1.4/InDels.tsv.gz.tbi`
|
||||
* Path to `cadd_wg_snvs`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz`
|
||||
* Path to `cadd_wg_snvs_tbi`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz.tbi`
|
||||
* Load module `Sentieon` for Processes with `sentieon` labels
|
||||
- Params `annotation_cache` and `cadd_cache` set to `true`
|
||||
- Params `vep_cache_version` set to `95`
|
||||
- Path to `snpeff_cache`: `/data1/cache/snpEff/`
|
||||
- Path to `vep_cache`: `/data1/cache/VEP/`
|
||||
- Path to `pon`: `/data1/PON/vcfs/BTB.PON.vcf.gz`
|
||||
- Path to `pon_index`: `/data1/PON/vcfs/BTB.PON.vcf.gz.tbi`
|
||||
- Path to `cadd_indels`: `/data1/cache/CADD/v1.4/InDels.tsv.gz`
|
||||
- Path to `cadd_indels_tbi`: `/data1/cache/CADD/v1.4/InDels.tsv.gz.tbi`
|
||||
- Path to `cadd_wg_snvs`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz`
|
||||
- Path to `cadd_wg_snvs_tbi`: `/data1/cache/CADD/v1.4/whole_genome_SNVs.tsv.gz.tbi`
|
||||
- Load module `Sentieon` for Processes with `sentieon` labels
|
||||
|
|
|
@ -14,5 +14,5 @@ Example: `nextflow run nf-core/sarek -profile uppmax`
|
|||
|
||||
Specific configurations for uppmax clusters has been made for sarek.
|
||||
|
||||
* Set paths to reference genomes
|
||||
* Set path to singularity containers for `irma`
|
||||
- Set paths to reference genomes
|
||||
- Set path to singularity containers for `irma`
|
||||
|
|
|
@ -14,8 +14,8 @@ Example: `nextflow run nf-core/scflow -profile imperial`
|
|||
|
||||
Specific configurations for Imperial have been made for scflow.
|
||||
|
||||
* Singularity `enabled` and `autoMounts` set to `true`
|
||||
* Singularity `cacheDir` path set to an RDS location
|
||||
* Singularity `runOptions` path set to bind (`-B`) RDS paths with container paths.
|
||||
* Params `ctd_folder` set to an RDS location.
|
||||
* Parms `ensembl_mappings` set to an RDS location.
|
||||
- Singularity `enabled` and `autoMounts` set to `true`
|
||||
- Singularity `cacheDir` path set to an RDS location
|
||||
- Singularity `runOptions` path set to bind (`-B`) RDS paths with container paths.
|
||||
- Params `ctd_folder` set to an RDS location.
|
||||
- Parms `ensembl_mappings` set to an RDS location.
|
||||
|
|
|
@ -15,4 +15,4 @@ has finished successfully because it can get quite large, and all of the main ou
|
|||
|
||||
> NB: You will need an account to use the SAHMRI HPC cluster in order to run the pipeline. If in doubt contact the ICT Service Desk.
|
||||
> NB: Nextflow will need to submit the jobs via SLURM to the SAHMRI HPC cluster and as such the commands above will have to be executed on the login
|
||||
node. If in doubt contact ICT.
|
||||
> node. If in doubt contact ICT.
|
||||
|
|
|
@ -58,15 +58,15 @@ All jobs will be submitted to fat nodes using this method, so it's only for use
|
|||
|
||||
The UPPMAX nf-core configuration profile uses the `hostname` of the active environment to automatically apply the following resource limits:
|
||||
|
||||
* `rackham`
|
||||
* cpus available: 20 cpus
|
||||
* memory available: 125 GB
|
||||
* `bianca`
|
||||
* cpus available: 16 cpus
|
||||
* memory available: 109 GB
|
||||
* `irma`
|
||||
* cpus available: 16 cpus
|
||||
* memory available: 250 GB
|
||||
- `rackham`
|
||||
- cpus available: 20 cpus
|
||||
- memory available: 125 GB
|
||||
- `bianca`
|
||||
- cpus available: 16 cpus
|
||||
- memory available: 109 GB
|
||||
- `irma`
|
||||
- cpus available: 16 cpus
|
||||
- memory available: 250 GB
|
||||
|
||||
## Development config
|
||||
|
||||
|
@ -83,10 +83,10 @@ To use it, submit with `-profile uppmax,devel`.
|
|||
|
||||
> :warning: For more information, please follow the following guides:
|
||||
>
|
||||
> * [UPPMAX `bianca` user guide](http://uppmax.uu.se/support/user-guides/bianca-user-guide/).
|
||||
> * [nf-core guide for running offline](https://nf-co.re/usage/offline)
|
||||
> * [nf-core `tools` guide for downloading pipelines for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use).
|
||||
> * [UPPMAX `Singularity` guide](https://www.uppmax.uu.se/support-sv/user-guides/singularity-user-guide/).
|
||||
> - [UPPMAX `bianca` user guide](http://uppmax.uu.se/support/user-guides/bianca-user-guide/).
|
||||
> - [nf-core guide for running offline](https://nf-co.re/usage/offline)
|
||||
> - [nf-core `tools` guide for downloading pipelines for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use).
|
||||
> - [UPPMAX `Singularity` guide](https://www.uppmax.uu.se/support-sv/user-guides/singularity-user-guide/).
|
||||
|
||||
For security reasons, there is no internet access on `bianca` so you can't download from or upload files to the cluster directly.
|
||||
Before running a nf-core pipeline on `bianca` you will first have to download the pipeline and singularity images needed elsewhere and transfer them via the `wharf` area to your own `bianca` project.
|
||||
|
|
Loading…
Reference in a new issue