1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-09-21 14:02:05 +00:00

Merge pull request #4 from nf-core/master

Syncing
This commit is contained in:
James A. Fellows Yates 2019-11-26 15:24:44 +01:00 committed by GitHub
commit c7c8c096f8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
48 changed files with 945 additions and 131 deletions

5
.github/markdownlint.yml vendored Normal file
View file

@ -0,0 +1,5 @@
# Markdownlint configuration file
default: true,
line-length: false
no-duplicate-header:
siblings_only: true

20
.github/workflows/linting.yml vendored Normal file
View file

@ -0,0 +1,20 @@
name: Markdown linting
# This workflow is triggered on pushes and PRs to the repository.
on: [push, pull_request]
jobs:
Markdown:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v1
- uses: actions/setup-node@v1
with:
node-version: '10'
- name: Install markdownlint
run: |
npm install -g markdownlint-cli
- name: Run Markdownlint
run: |
markdownlint ${GITHUB_WORKSPACE} -c ${GITHUB_WORKSPACE}/.github/markdownlint.yml

29
.github/workflows/main.yml vendored Normal file
View file

@ -0,0 +1,29 @@
name: Configs tests
on: [pull_request, push]
jobs:
test_all_profiles:
runs-on: ubuntu-latest
name: Check if all profiles are tested
steps:
- uses: actions/checkout@v1
- name: Check whether profiles are all tested
run: |
python ${GITHUB_WORKSPACE}/bin/cchecker.py ${GITHUB_WORKSPACE}/nfcore_custom.config ${GITHUB_WORKSPACE}/.github/workflows/main.yml
profile_test:
runs-on: ubuntu-latest
name: Run ${{ matrix.profile }} profile
needs: test_all_profiles
strategy:
matrix:
profile: ['awsbatch', 'bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga', 'cfc', 'crick', 'denbi_qbic', 'genouest', 'gis', 'hebbe', 'kraken', 'munin', 'pasteur', 'phoenix', 'prince', 'shh', 'uct_hex', 'uppmax_devel', 'uppmax', 'uzh']
steps:
- uses: actions/checkout@v1
- name: Install Nextflow
run: |
wget -qO- get.nextflow.io | bash
sudo mv nextflow /usr/local/bin/
- name: Check ${{ matrix.profile }} profile
env:
SCRATCH: '~'
run: nextflow run ${GITHUB_WORKSPACE}/configtest.nf --custom_config_base=${GITHUB_WORKSPACE} -profile ${{ matrix.profile }}

1
.gitignore vendored
View file

@ -3,3 +3,4 @@ work/
data/
results/
.DS_Store
*.code-workspace

View file

@ -1,29 +0,0 @@
sudo: required
language: python
jdk: openjdk8
services:
- docker
python:
- '3.6'
cache: pip
matrix:
fast_finish: true
install:
# Install Nextflow
- mkdir /tmp/nextflow && cd /tmp/nextflow
- wget -qO- get.nextflow.io | bash
- sudo ln -s /tmp/nextflow/nextflow /usr/local/bin/nextflow
- mkdir -p ${TRAVIS_BUILD_DIR}/tests && cd ${TRAVIS_BUILD_DIR}/tests
env:
- NXF_VER='18.10.1' # Specify a minimum NF version that should be tested and work
- NXF_VER='' # Plus: get the latest NF version and check, that it works
script:
# Run the pipeline with the test profile and test remote config
- |
grep "{.*includeConfig.*[a-z]*\.config\"" ${TRAVIS_BUILD_DIR}/nfcore_custom.config | \
tr -s ' ' | \
cut -d " " -f 2 | \
xargs -I {} nextflow run ${TRAVIS_BUILD_DIR}/configtest.nf -profile {}

View file

@ -1,20 +1,22 @@
<img src="docs/images/nf-core-logo.png" width="400">
# [![nf-core/configs](docs/images/nfcore-configs_logo.png)](https://github.com/nf-core/configs)
# [nf-core/configs](https://github.com/nf-core/configs)
[![Build Status](https://travis-ci.org/nf-core/configs.svg?branch=master)](https://travis-ci.org/nf-core/configs)
[![Lint Status](https://github.com/nf-core/configs/workflows/nfcore%20configs%20tests/badge.svg)](https://github.com/nf-core/configs/workflows/nfcore%20configs%20tests/badge.svg)
A repository for hosting nextflow config files containing custom parameters required to run nf-core pipelines at different Institutions.
## Table of contents
* [Using an existing config](#using-an-existing-config)
* [![nf-core/configs](https://github.com/nf-core/configs)](#nf-coreconfigshttpsgithubcomnf-coreconfigs)
* [Table of contents](#table-of-contents)
* [Using an existing config](#using-an-existing-config)
* [Configuration and parameters](#configuration-and-parameters)
* [Offline usage](#offline-usage)
* [Adding a new config](#adding-a-new-config)
* [Adding a new config](#adding-a-new-config)
* [Checking user hostnames](#checking-user-hostnames)
* [Testing](#testing)
* [Documentation](#documentation)
* [Uploading to `nf-core/configs`](#uploading-to-nf-coreconfigs)
* [Help](#help)
* [Help](#help)
## Using an existing config
@ -24,7 +26,7 @@ The Nextflow [`-c`](https://www.nextflow.io/docs/latest/config.html) parameter c
The config files hosted in this repository define a set of parameters which are specific to compute environments at different Institutions but generic enough to be used with all nf-core pipelines.
All nf-core pipelines inherit the functionality provided by Nextflow, and as such custom config files can contain parameters/definitions that are available to both. For example, if you have the ability to use [Singularity](https://singularity.lbl.gov/) on your HPC you can add and customise the Nextflow [`singularity`](https://www.nextflow.io/docs/latest/config.html#scope-singularity) scope in your config file. Similarly, you can define a Nextflow [`executor`](https://www.nextflow.io/docs/latest/executor.html) depending on the job submission process available on your cluster. In contrast, the `params` section in your custom config file will typically define parameters that are specific to nf-core pipelines.
All nf-core pipelines inherit the functionality provided by Nextflow, and as such custom config files can contain parameters/definitions that are available to both. For example, if you have the ability to use [Singularity](https://singularity.lbl.gov/) on your HPC you can add and customize the Nextflow [`singularity`](https://www.nextflow.io/docs/latest/config.html#scope-singularity) scope in your config file. Similarly, you can define a Nextflow [`executor`](https://www.nextflow.io/docs/latest/executor.html) depending on the job submission process available on your cluster. In contrast, the `params` section in your custom config file will typically define parameters that are specific to nf-core pipelines.
You should be able to get a good idea as to how other people are customising the execution of their nf-core pipelines by looking at some of the config files in [`nf-core/configs`](https://github.com/nf-core/configs/tree/master/conf).
@ -82,31 +84,43 @@ You will have to create a [Markdown document](https://www.markdownguide.org/gett
See [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs) for examples.
Currently documentation is available for the following clusters:
Currently documentation is available for the following systems:
* [AWSBATCH](docs/awsbatch.md)
* [BIGPURPLE](docs/bigpurple.md)
* [BINAC](docs/binac.md)
* [CBE](docs/cbe.md)
* [CCGA](docs/ccga.md)
* [CCGA_DX](/docs/ccga_dx.md)
* [CFC](docs/binac.md)
* [CRICK](docs/crick.md)
* [CZBIOHUB_AWS](docs/czbiohub.md)
* [CZBIOHUB_AWS_HIGHPRIORITY](docs/czbiohub.md)
* [DENBI_QBIC](docs/denbi_qbic.md)
* [GENOUEST](docs/genouest.md)
* [GIS](docs/gis.md)
* [HEBBE](docs/hebbe.md)
* [MENDEL](docs/mendel.md)
* [KRAKEN](docs/kraken.md)
* [MUNIN](docs/munin.md)
* [PASTEUR](docs/pasteur.md)
* [PHOENIX](docs/phoenix.md)
* [PRINCE](docs/prince.md)
* [SHH](docs/shh.md)
* [UCT_HEX](docs/uct_hex.md)
* [UPPMAX-DEVEL](docs/uppmax-devel.md)
* [UPPMAX](docs/uppmax.md)
* [UPPMAX_DEVEL](docs/uppmax.md)
* [UZH](docs/uzh.md)
### Uploading to `nf-core/configs`
[Fork](https://help.github.com/articles/fork-a-repo/) the `nf-core/configs` repository to your own GitHub account. Within the local clone of your fork add the custom config file to the [`conf/`](https://github.com/nf-core/configs/tree/master/conf) directory, and the documentation file to the [`docs/`](https://github.com/nf-core/configs/tree/master/docs) directory. You will also need to edit and add your custom profile to the [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) file in the top-level directory of the clone.
Afterwards, make sure to edit the `.github/main.yml` file and add your profile name to alphabetically sorted `profile:` scope. This way, it will be tested automatically using GitHub Actions.
Commit and push these changes to your local clone on GitHub, and then [create a pull request](https://help.github.com/articles/creating-a-pull-request-from-a-fork/) on the `nf-core/configs` GitHub repo with the appropriate information.
We will be notified automatically when you have created your pull request, and providing that everything adheres to nf-core guidelines we will endeavour to approve your pull request as soon as possible.
## Help
If you have any questions or issues please send us a message on [Slack](https://nf-core-invite.herokuapp.com/).
If you have any questions or issues please send us a message on [Slack](https://nf-co.re/join/slack).

71
bin/cchecker.py Normal file
View file

@ -0,0 +1,71 @@
#!/usr/bin/env python
#######################################################################
#######################################################################
## Created on November 26 to check pipeline configs for nf-core/configs
#######################################################################
#######################################################################
import os
import sys
import argparse
import re
############################################
############################################
## PARSE ARGUMENTS
############################################
############################################
Description = 'Double check custom config file and github actions file to test all cases'
Epilog = """Example usage: python cchecker.py <nfcore_custom.config> <github_actions_file>"""
argParser = argparse.ArgumentParser(description=Description, epilog=Epilog)
## REQUIRED PARAMETERS
argParser.add_argument('CUSTOM_CONFIG', help="Input nfcore_custom.config.")
argParser.add_argument('GITHUB_CONFIG', help="Input Github Actions YAML")
args = argParser.parse_args()
############################################
############################################
## MAIN FUNCTION
############################################
############################################
def check_config(Config, Github):
regex = 'includeConfig*'
ERROR_STR = 'ERROR: Please check config file! Did you really update the profiles?'
## CHECK Config First
config_profiles = set()
with open(Config, 'r') as cfg:
for line in cfg:
if re.search(regex, line):
hit = line.split('/')[2].split('.')[0]
config_profiles.add(hit.strip())
###Check Github Config now
tests = set()
###Ignore these profiles
ignore_me = ['czbiohub_aws_highpriority', 'czbiohub_aws']
tests.update(ignore_me)
with open(Github, 'r') as ghfile:
for line in ghfile:
if re.search('profile: ', line):
line = line.replace('\'','').replace('[','').replace(']','').replace('\n','')
profiles = line.split(':')[1].split(',')
for p in profiles:
tests.add(p.strip())
###Check if sets are equal
if tests == config_profiles:
sys.exit(0)
else:
#Maybe report what is missing here too
print("Tests don't seem to test these profiles properly!\n")
print(config_profiles.symmetric_difference(tests))
sys.exit(1)
check_config(Config=args.CUSTOM_CONFIG,Github=args.GITHUB_CONFIG)

14
conf/awsbatch.config Normal file
View file

@ -0,0 +1,14 @@
//Nextflow config file for running on AWS batch
params {
awsqueue = false
awsregion = 'eu-west-1'
config_profile_contact = 'Alexander Peltzer (@apeltzer)'
config_profile_description = 'AWSBATCH Cloud Profile'
config_profile_name = 'AWSBATCH'
config_profile_url = 'https://aws.amazon.com/batch/'
tracedir = './'
}
process.executor = 'awsbatch'
executor.awscli = '/home/ec2-user/miniconda/bin/aws'

27
conf/bigpurple.config Normal file
View file

@ -0,0 +1,27 @@
singularityDir = "/gpfs/scratch/${USER}/singularity_images_nextflow"
params {
config_profile_description = """
NYU School of Medicine BigPurple cluster profile provided by nf-core/configs.
module load both singularity/3.1 and squashfs-tools/4.3 before running the pipeline with this profile!!
Run from your scratch or lab directory - Nextflow makes a lot of files!!
Also consider running the pipeline on a compute node (srun --pty /bin/bash -t=01:00:00) the first time, as it will be pulling the docker image, which will be converted into a singularity image, which is heavy on the login node and will take some time. Subsequent runs can be done on the login node, as the docker image will only be pulled and converted once. By default the images will be stored in $singularityDir
""".stripIndent()
config_profile_contact = 'Tobias Schraink (@tobsecret)'
config_profile_url = 'https://github.com/nf-core/configs/blob/master/docs/bigpurple.md'
}
singularity {
enabled = true
autoMounts = true
cacheDir = singularityDir
}
process {
beforeScript = """
module load singularity/3.1
module load squashfs-tools/4.3
"""
.stripIndent()
executor = 'slurm'
}

View file

@ -10,14 +10,20 @@ singularity {
}
process {
beforeScript = 'module load devel/singularity/3.0.1'
beforeScript = 'module load devel/singularity/3.4.2'
executor = 'pbs'
queue = 'short'
process.queue = { task.memory > 128.GB ? 'smp': task.time <= 20.m ? 'tiny' : task.time <= 48.h ? 'short' : task.time <= 168.h ? 'short' : 'long'}
}
params {
igenomes_base = '/nfsmounts/igenomes'
max_memory = 128.GB
max_memory = 1000.GB
max_cpus = 28
max_time = 48.h
}
weblog{
enabled = true
url = 'https://services.qbic.uni-tuebingen.de/flowstore/workflows'
}

23
conf/cbe.config Executable file
View file

@ -0,0 +1,23 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'CLIP BATCH ENVIRONMENT (CBE) cluster profile provided by nf-core/configs'
config_profile_contact = 'Patrick Hüther (@phue)'
config_profile_url = 'http://www.gmi.oeaw.ac.at/'
}
process {
executor = 'slurm'
module = 'singularity/3.2.1'
queue = { task.memory <= 170.GB ? 'c' : 'm' }
clusterOptions = { task.time <= 8.h ? '--qos short': task.time <= 48.h ? '--qos medium' : '--qos long' }
}
singularity.enabled = true
params {
params.max_time = 14.d
params.max_cpus = 36
params.max_memory = 4.TB
igenomes_ignore = true
igenomesIgnore = true //deprecated
}

View file

@ -2,15 +2,21 @@
params {
config_profile_description = 'CCGA cluster profile provided by nf-core/configs.'
config_profile_contact = 'Marc Hoeppner (@marchoeppner)'
config_profile_url = 'https://www.ikmb.uni-kiel.de/'
config_profile_url = 'https://www.ccga.uni-kiel.de/'
}
/*
* -------------------------------------------------
* Nextflow config file with environment modules for RZCluster in Kiel
* Nextflow config file for CCGA cluster in Kiel
* -------------------------------------------------
*/
singularity {
enabled = true
runOptions = "-B /ifs -B /scratch -B /work_beegfs"
cacheDir = "/ifs/data/nfs_share/ikmb_repository/singularity_cache/"
}
executor {
queueSize=100
}
@ -29,4 +35,7 @@ params {
// illumina iGenomes reference file paths on RZCluster
igenomes_base = '/ifs/data/nfs_share/ikmb_repository/references/iGenomes/references/'
saveReference = true
max_memory = 128.GB
max_cpus = 16
max_time = 120.h
}

38
conf/ccga_dx.config Normal file
View file

@ -0,0 +1,38 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'CCGA DX cluster profile provided by nf-core/configs.'
config_profile_contact = 'Marc Hoeppner (@marchoeppner)'
config_profile_url = 'https://www.ccga.uni-kiel.de/'
}
/*
* -------------------------------------------------
* Nextflow config file for CCGA cluster in Kiel
* -------------------------------------------------
*/
singularity {
enabled = true
runOptions = "-B /mnt"
}
executor {
queueSize=100
}
process {
// Global process config
executor = 'slurm'
queue = 'htc'
}
params {
// illumina iGenomes reference file paths on DX Cluster
igenomes_base = '/mnt/ld_ng_out/sukmb352/references/iGenomes/references/'
saveReference = true
max_memory = 250.GB
max_cpus = 20
max_time = 240.h
}

View file

@ -7,13 +7,19 @@ params {
singularity {
enabled = true
cacheDir = '/nfsmounts/container'
}
process {
beforeScript = 'module load qbic/singularity_slurm/3.0.3'
beforeScript = 'module load devel/singularity/3.4.2'
executor = 'slurm'
}
weblog{
enabled = true
url = 'https://services.qbic.uni-tuebingen.de/flowstore/workflows'
}
params {
igenomes_base = '/nfsmounts/igenomes'
max_memory = 60.GB

130
conf/czbiohub_aws.config Normal file
View file

@ -0,0 +1,130 @@
/*
* -------------------------------------------------
* Nextflow config file for Chan Zuckerberg Biohub
* -------------------------------------------------
* Defines reference genomes, using iGenome paths
* Imported under the default 'standard' Nextflow
* profile in nextflow.config
*/
//Profile config names for nf-core/configs
params {
config_profile_description = 'Chan Zuckerberg Biohub AWS Batch profile provided by nf-core/configs.'
config_profile_contact = 'Olga Botvinnik (@olgabot)'
config_profile_url = 'https://www.czbiohub.org/'
}
docker {
enabled = true
}
process {
executor = 'awsbatch'
queue = 'default-971039e0-830c-11e9-9e0b-02c5b84a8036'
errorStrategy = 'ignore'
}
workDir = "s3://czb-nextflow/intermediates/"
aws.region = 'us-west-2'
executor.awscli = '/home/ec2-user/miniconda/bin/aws'
params.tracedir = './'
params {
saveReference = true
// Largest SPOT instances available on AWS: https://ec2instances.info/
max_memory = 1952.GB
max_cpus = 96
max_time = 240.h
// Compatible with multiple versions of rnaseq pipeline
seq_center = "czbiohub"
seqCenter = "czbiohub"
// illumina iGenomes reference file paths on CZ Biohub reference s3 bucket
// No final slash because it's added later
igenomes_base = "s3://czbiohub-reference/igenomes"
// GENCODE (human + mouse) reference file paths on CZ Biohub reference s3 bucket
// No final slash because it's added later
gencode_base = "s3://czbiohub-reference/gencode"
transgenes_base = "s3://czbiohub-reference/transgenes"
// AWS configurations
awsregion = "us-west-2"
awsqueue = "nextflow"
igenomes_ignore = true
igenomesIgnore = true //deprecated
fc_extra_attributes = 'gene_name'
fc_group_features = 'gene_id'
fc_group_features_type = 'gene_type'
trim_pattern = '_+S\\d+'
// GENCODE GTF and fasta files
genomes {
'GRCh38' {
fasta = "${params.gencode_base}/human/v30/GRCh38.p12.genome.ERCC92.fa"
gtf = "${params.gencode_base}/human/v30/gencode.v30.annotation.ERCC92.gtf"
transcript_fasta = "${params.gencode_base}/human/v30/gencode.v30.transcripts.ERCC92.fa"
star = "${params.gencode_base}/human/v30/STARIndex/"
salmon_index = "${params.gencode_base}/human/v30/salmon_index/"
}
'GRCm38' {
fasta = "${params.gencode_base}/mouse/vM21/GRCm38.p6.genome.ERCC92.fa"
gtf = "${params.gencode_base}/mouse/vM21/gencode.vM21.annotation.ERCC92.gtf"
transcript_fasta = "${params.gencode_base}/mouse/vM21/gencode.vM21.transcripts.ERCC92.fa"
star = "${params.gencode_base}/mouse/vM21/STARIndex/"
}
}
transgenes {
'ChR2' {
fasta = "${params.transgenes_base}/ChR2/ChR2.fa"
gtf = "${params.transgenes_base}/ChR2/ChR2.gtf"
}
'Cre' {
fasta = "${params.transgenes_base}/Cre/Cre.fa"
gtf = "${params.transgenes_base}/Cre/Cre.gtf"
}
'ERCC' {
fasta = "${params.transgenes_base}/ERCC92/ERCC92.fa"
gtf = "${params.transgenes_base}/ERCC92/ERCC92.gtf"
}
'GCaMP6m' {
fasta = "${params.transgenes_base}/GCaMP6m/GCaMP6m.fa"
gtf = "${params.transgenes_base}/GCaMP6m/GCaMP6m.gtf"
}
'GFP' {
fasta = "${params.transgenes_base}/Gfp/Gfp.fa"
gtf = "${params.transgenes_base}/Gfp/Gfp.gtf"
}
'NpHR' {
fasta = "${params.transgenes_base}/NpHR/NpHR.fa"
gtf = "${params.transgenes_base}/NpHR/NpHR.gtf"
}
'RCaMP' {
fasta = "${params.transgenes_base}/RCaMP/RCaMP.fa"
gtf = "${params.transgenes_base}/RCaMP/RCaMP.gtf"
}
'RGECO' {
fasta = "${params.transgenes_base}/RGECO/RGECO.fa"
gtf = "${params.transgenes_base}/RGECO/RGECO.gtf"
}
'Tdtom' {
fasta = "${params.transgenes_base}/Tdtom/Tdtom.fa"
gtf = "${params.transgenes_base}/Tdtom/Tdtom.gtf"
}
'Car-T' {
fasta = "${params.transgenes_base}/car-t/car-t.fa"
gtf = "${params.transgenes_base}/car-t/car-t.gtf"
}
'zsGreen' {
fasta = "${params.transgenes_base}/zsGreen/zsGreen.fa"
gtf = "${params.transgenes_base}/zsGreen/zsGreen.gtf"
}
}
}

View file

@ -0,0 +1,12 @@
/*
* -------------------------------------------------
* Nextflow config file for Chan Zuckerberg Biohub
* -------------------------------------------------
* Defines reference genomes, using iGenome paths
* Imported under the default 'standard' Nextflow
* profile in nextflow.config
*/
process {
queue = 'highpriority-971039e0-830c-11e9-9e0b-02c5b84a8036'
}

26
conf/denbi_qbic.config Normal file
View file

@ -0,0 +1,26 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'de.NBI cluster profile provided by nf-core/configs.'
config_profile_contact = 'Alexander Peltzer (@apeltzer)'
config_profile_url = 'https://cloud.denbi.de/'
}
singularity {
enabled = true
}
process {
executor = 'pbs'
queue = 'batch'
}
params {
max_memory = 512.GB
max_cpus = 28
max_time = 960.h
}
weblog{
enabled = true
url = 'https://services.qbic.uni-tuebingen.de/flowstore/workflows'
}

24
conf/genouest.config Normal file
View file

@ -0,0 +1,24 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'The GenOuest cluster profile'
config_profile_contact = 'Anthony Bretaudeau (@abretaud)'
config_profile_url = 'https://www.genouest.org'
}
singularity {
enabled = true
autoMounts = true
runOptions = '-B /scratch:/scratch -B /local:/local -B /db:/db'
}
process {
executor = 'slurm'
}
params {
igenomes_ignore = true
igenomesIgnore = true //deprecated
max_memory = 750.GB
max_cpus = 80
max_time = 336.h
}

View file

@ -18,7 +18,8 @@ process {
}
params {
igenomesIgnore = true
igenomes_ignore = true
igenomesIgnore = true //deprecated
saveReference = true
max_memory = 64.GB
max_cpus = 20

24
conf/kraken.config Normal file
View file

@ -0,0 +1,24 @@
//Profile config names for nf-core/configs
params {
config_profile_name = 'KRAKEN'
config_profile_description = 'Jenkins cluster provided by nf-core/configs.'
config_profile_contact = 'Maxime Garcia or Johannes Alneberg'
config_profile_url = 'kraken.dyn.scilifelab.se'
}
process {
executor = 'local'
}
docker {
enabled = true
mountFlags = 'z'
fixOwnership = true
}
params {
max_memory = 60.GB
max_cpus = 16
max_time = 72.h
igenomes_base = '/share/igenomes/'
}

View file

@ -1,23 +0,0 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'GMI MENDEL cluster profile provided by nf-core/configs'
config_profile_contact = 'Patrick Hüther (@phue)'
config_profile_url = 'http://www.gmi.oeaw.ac.at/'
}
manifest {
nextflowVersion = '>=19.01.0'
}
process {
beforeScript = {'module load Singularity; module load Miniconda3'}
executor = 'pbspro'
clusterOptions = { "-P $params.project" }
}
params {
max_cpus = 32
max_memory = 128.GB
max_time = 192.h
igenomesIgnore = true
}

View file

@ -1,20 +1,21 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'Big iron cluster profile provided by nf-core/configs.'
config_profile_description = 'MUNIN profile provided by nf-core/configs.'
config_profile_contact = 'Szilveszter Juhos (@szilva)'
config_profile_url = ''
}
process {
executor = 'local'
maxForks = 46
}
// To use singularity, use nextflow run -profile munin,singularity
singularity {
enabled = true
autoMounts = true
}
// To use docker instead of singularity, use nextflow run -profile munin,docker
// To use docker, use nextflow run -profile munin,docker
docker {
enabled = false
mountFlags = 'z'
@ -22,11 +23,11 @@ docker {
}
params {
saveReference = true
max_memory = 128.GB
max_cpus = 16
// general params
max_memory = 752.GB
max_cpus = 46
max_time = 72.h
// illumina iGenomes reference file paths on UPPMAX
igenomes_base = '/data0/btb/references/igenomes/'
// Local AWS iGenomes reference file paths on munin
igenomes_base = '/data1/references/igenomes/'
}

24
conf/pasteur.config Normal file
View file

@ -0,0 +1,24 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'The Institut Pasteur HPC cluster profile'
config_profile_contact = 'Remi Planel (@rplanel)'
config_profile_url = 'https://research.pasteur.fr/en/service/tars-cluster'
}
singularity {
enabled = true
autoMounts = true
runOptions = '-B /local/scratch:/tmp'
}
process {
executor = 'slurm'
}
params {
igenomes_ignore = true
igenomesIgnore = true //deprecated
max_memory = 256.GB
max_cpus = 28
max_time = 24.h
}

23
conf/prince.config Normal file
View file

@ -0,0 +1,23 @@
singularityDir = "$SCRATCH/singularity_images_nextflow"
singularityModule = "singularity/3.2.1"
squashfsModule = "squashfs/4.3"
params {
config_profile_description = """
NYU prince cluster profile provided by nf-core/configs.
Run from your scratch directory, the output files may be large!
Please consider running the pipeline on a compute node the first time, as it will be pulling the docker image, which will be converted into a singularity image, which is heavy on the login node. Subsequent runs can be done on the login node, as the docker image will only be pulled and converted once. By default the images will be stored in $singularityDir
""".stripIndent()
config_profile_contact = 'Tobias Schraink (@tobsecret)'
config_profile_url = 'https://github.com/nf-core/configs/blob/master/docs/prince.md'
}
singularity {
enabled = true
cacheDir = singularityDir
}
process {
beforeScript = "module load $singularityModule $squashfsModule"
executor = 'slurm'
}

View file

@ -7,16 +7,24 @@ params {
singularity {
enabled = true
autoMounts = true
runOptions = '-B /run/shm:/run/shm'
cacheDir = "/projects1/singularity_scratch/cache/"
}
process {
executor = 'slurm'
queue = 'short'
queue = { task.memory > 756.GB ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' }
}
executor {
queueSize = 16
}
params {
max_memory = 256.GB
max_memory = 2.TB
max_cpus = 32
max_time = 2.h
max_time = 720.h
//Illumina iGenomes reference file path
igenomes_base = "/projects1/public_data/igenomes/"
}

4
docs/awsbatch.md Normal file
View file

@ -0,0 +1,4 @@
# nf-core/configs: awsbatch Configuration
To be used with `awsbatch`.
Custom queue and region can be entered with `params.awsqueue` and `params.region` respectively.

25
docs/bigpurple.md Normal file
View file

@ -0,0 +1,25 @@
# nf-core/configs: BigPurple Configuration
## nf-core pipelines that use this repo
All nf-core pipelines that use this config repo (which is most), can be run on BigPurple. **Before** running a pipeline for the first time, go into an interactive slurm session on a compute node (`srun --pty --time=02:00:00 -c 2`), as the docker image for the pipeline will need to be pulled and converted. Once in the interactive session:
```bash
module load singularity/3.1
module load squashfs-tools/4.3
```
Now, run the pipeline of your choice with `-profile bigpurple`. This will download and launch the bigpurple.config which has been pre-configured with a setup suitable for the BigPurple cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a singularity image before execution of the pipeline.
An example commandline:
`nextflow run nf-core/<pipeline name> -profile bigpurple <additional flags>`
## nf-core pipelines that do not use this repo
If the pipeline has not yet been configured to use this config, then you will have to do it manually.
git clone this repo, copy the `bigpurple.config` from the conf folder and then you can invoke the pipeline like this:
`nextflow run nf-core/<pipeline name> -c bigpurple.config <additional flags>`
>NB: You will need an account to use the HPC cluster BigPurple in order to run the pipeline. If in doubt contact MCIT.
>NB: You will need to install nextflow in your home directory - instructions are on nextflow.io (or ask the writer of this profile). The reason there is no module for nextflow on the cluster, is that the development cycle of nextflow is rapid and it's easy to update yourself: `nextflow self-update`

View file

@ -13,7 +13,5 @@ module load devel/java_jdk/1.8.0u112
module load devel/singularity/3.0.1
```
>NB: You will need an account to use the HPC cluster BINAC in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

17
docs/cbe.md Normal file
View file

@ -0,0 +1,17 @@
# nf-core/configs: CBE Configuration
All nf-core pipelines have been successfully configured for use on the CLIP BATCH ENVIRONMENT (CBE) cluster at the Vienna BioCenter (VBC).
To use, run the pipeline with `-profile cbe`. This will download and launch the [`cbe.config`](../conf/cbe.config) which has been pre-configured with a setup suitable for the CBE cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on CBE. You can do this by issuing the commands below:
```bash
## Load Nextflow and Singularity environment modules
module purge
module load nextflow/19.04.0
module load singularity/3.2.1
```
>NB: You will need an account to use the HPC cluster on CBE in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

18
docs/ccga.md Normal file
View file

@ -0,0 +1,18 @@
# nf-core/configs: CCGA Configuration
Deployment and testing of nf-core pipelines at the CCGA cluster is on-going.
To use, run the pipeline with `-profile ccga`. This will download and launch the [`ccga.config`](../conf/ccga.config) which has been pre-configured with a setup suitable for the CCGA cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on the cluster. You can do this by issuing the commands below:
```bash
## Load Nextflow and Singularity environment modules
module purge
module load IKMB
module load Java/1.8.0
module load Nextflow
module load singularity3.1.0
```
>NB: Access to the CCGA cluster is restricted to IKMB/CCGA employes. Please talk to Marc Hoeppner to get access (@marchoeppner).

8
docs/ccga_dx.md Normal file
View file

@ -0,0 +1,8 @@
# nf-core/configs: CCGA DX Configuration
Deployment and testing of nf-core pipelines at the CCGA DX cluster is on-going.
To use, run the pipeline with `-profile ccga_dx`. This will download and launch the [`ccga_dx.config`](../conf/ccga_dx.config) which has been pre-configured with a setup suitable for the CCGA cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
Before running the pipeline you will need to have Nextflow installed.
>NB: Access to the CCGA DX cluster is restricted to IKMB/CCGA employes. Please talk to Marc Hoeppner to get access (@marchoeppner).

View file

@ -10,10 +10,8 @@ Before running the pipeline you will need to load Nextflow and Singularity using
## Load Nextflow and Singularity environment modules
module purge
module load devel/java_jdk/1.8.0u121
module load qbic/singularity_slurm/3.0.1
module load qbic/singularity_slurm/3.0.3
```
>NB: You will need an account to use the HPC cluster CFC in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

View file

@ -20,5 +20,4 @@ Alternatively, if you are running the pipeline regularly for genomes that arent
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
>NB: You will need an account to use the HPC cluster on CAMP in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

124
docs/czbiohub.md Normal file
View file

@ -0,0 +1,124 @@
# nf-core/configs: CZ Biohub Configuration
All nf-core pipelines have been successfully configured for use on the AWS Batch at the Chan Zuckerberg Biohub here.
To use, run the pipeline with `-profile czbiohub_aws`. This will download and launch the [`czbiohub_aws.config`](../conf/czbiohub_aws.config) which has been pre-configured with a setup suitable for the AWS Batch. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
Ask Olga (olga.botvinnik@czbiohub.org) if you have any questions!
## Run the pipeline from a small AWS EC2 Instance
The pipeline will monitor and submit jobs to AWS Batch on your behalf. To ensure that the pipeline is successful, it will need to be run from a computer that has constant internet connection. Unfortunately for us, Biohub has spotty WiFi and even for short pipelines, it is highly recommended to run them from AWS.
### 1. Start tmux
[tmux](https://hackernoon.com/a-gentle-introduction-to-tmux-8d784c404340) is a "Terminal Multiplexer" that allows for commands to continue running even when you have closed your laptop. Start a new tmux session with `tmux new` and we'll name this session `nextflow`.
```bash
tmux new -n nextflow
```
Now you can run pipelines with abandon!
### 2. Make a GitHub repo for your workflows (optional :)
To make sharing your pipelines and commands easy between your teammates, it's best to share code in a GitHub repository. One way is to store the commands in a Makefile ([example](https://github.com/czbiohub/kh-workflows/blob/master/nf-kmer-similarity/Makefile)) which can contain multiple `nextflow run` commands so that you don't need to remember the S3 bucket or output directory for every single one. [Makefiles](https://kbroman.org/minimal_make/) are broadly used in the software community for running many complex commands. Makefiles can have a lot of dependencies and be confusing, so we're only going to write *simple* Makefiles.
```bash
rnaseq:
nextflow run -profile czbiohub_aws nf-core/rnaseq \
--reads 's3://czb-maca/Plate_seq/24_month/180626_A00111_0166_BH5LNVDSXX/fastqs/*{R1,R2}*.fastq.gz' \
--genome GRCm38 \
--outdir s3://olgabot-maca/nextflow-test/
```
Human_Mouse_Zebrafish:
```bash
nextflow run czbiohub/nf-kmer-similarity -latest -profile aws \
--samples s3://kmer-hashing/hematopoeisis/smartseq2/human_mouse_zebrafish/samples.csv
```
Merkin2012_AWS:
```bash
nextflow run czbiohub/nf-kmer-similarity -latest --sra "SRP016501" \
-r olgabot/support-csv-directory-or-sra \-profile aws
```
In this example, one would run the `rnaseq` rule and the nextflow command beneath it with:
```bash
make rnaseq
```
If one wanted to run a different command, e.g. `human_mouse_zebrafish`, they would specify that command instead. For example:
```bash
make human_mouse_zebrafish
```
Makefiles are a very useful way of storing longer commands with short mnemonic words.
Once you [create a new repository](https://github.com/organizations/czbiohub/repositories/new) (best to initialize with a `.gitignore`, license - MIT and `README`), clone that repository to your EC2 instance. For example, if the repository is called `kh-workflows`, this is what the command would look like:
```bash
git clone https://github.com/czbiohub/kh-workflows
```
Now both create and edit a `Makefile`:
```bash
cd
nano Makefile
```
Write your rule with a colon after it, and on the next line must be a **tab**, not spaces. Once you're done, exit the program (the `^` command shown in nano means "Control"), write the file, add it to git, commit it, and push it up to GitHub.
```bash
git add Makefile
git commit -m "Added makefile"
git push origin master
```
### 3. Run your workflow
Remember to specify `-profile czbiohub_aws` to grab the CZ Biohub-specific AWS configurations, and an `--outdir` with an AWS S3 bucket so you don't run out of space on your small AMI
```bash
nextflow run -profile czbiohub_aws nf-core/rnaseq \
--reads 's3://czb-maca/Plate_seq/24_month/180626_A00111_0166_BH5LNVDSXX/fastqs/*{R1,R2}*.fastq.gz' \
--genome GRCm38 \
--outdir s3://olgabot-maca/nextflow-test/
```
### 4. If you lose connection, how do you restart the jobs
If you close your laptop, get onto the train, or lose WiFi connection, you may lose connection to AWS and may need to restart the jobs. To reattach, use the command `tmux attach` and you should see your Nextflow output! To get the named session, use:
```bash
tmux attach -n nextflow
```
To restart the jobs from where you left off, add the `-resume` flag to your `nextflow` command:
```bash
nextflow run -profile czbiohub_aws nf-core/rnaseq \
--reads 's3://czb-maca/Plate_seq/24_month/180626_A00111_0166_BH5LNVDSXX/fastqs/*{R1,R2}*.fastq.gz' \
--genome GRCm38 \
--outdir s3://olgabot-maca/nextflow-test/ \
-resume
```
It's important that this command be re-run from the same directory as there is a "hidden" `.nextflow` folder that contains all the metadata and information about previous runs.
## iGenomes specific configuration
A local copy of the iGenomes resource has been made available on `s3://czbiohub-reference/igenomes` (in `us-west-2` region) so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline.
You can do this by simply using the `--genome <GENOME_ID>` parameter.
For Human and Mouse, we use [GENCODE](https://www.gencodegenes.org/) gene annotations. This doesn't change how you would specify the genome name, only that the pipelines run with the `czbiohub_aws` profile would be with GENCODE rather than iGenomes.
>NB: You will need an account to use the HPC cluster on PROFILE CLUSTER in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

8
docs/denbi_qbic.md Normal file
View file

@ -0,0 +1,8 @@
# nf-core/configs: de.NBI QBIC Configuration
All nf-core pipelines have been successfully configured for use on the de.NBI Cloud cluster. This is a virtual cluster that has been set up using the [virtual cluster setup scripts](https://github.com/MaximilianHanussek/virtual_cluster_local_ips).
To use, run the pipeline with `-profile denbi_qbic`. This will download and launch the [`denbi_qbic.config`](../conf/denbi_qbic.config) which has been pre-configured with a setup suitable for the automatically created cluster. Using this profile, a Docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
>NB: You will need an account to use de.NBI Cluster in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

38
docs/genouest.md Normal file
View file

@ -0,0 +1,38 @@
# nf-core/configs: GenOuest Configuration
All nf-core pipelines have been successfully configured for use on the GenOuest cluster.
To use, run the pipeline with `-profile genouest`. This will download and launch the [`genouest.config`](../conf/genouest.config) which has been pre-configured with a setup suitable for the GenOuest cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
## Running the workflow on the GenOuest cluster
Nextflow is installed on the GenOuest cluster. Some documentation is available on the [GenOuest website](https://www.genouest.org/howto/#nextflow).
You need to activate it like this:
```bash
source /local/env/envnextflow-19.07.0.sh
```
Nextflow manages each process as a separate job that is submitted to the cluster by using the sbatch command.
Nextflow shouldn't run directly on the submission node but on a compute node. Run nextflow from a compute node:
```bash
# Login to a compute node
srun --pty bash
# Load the dependencies if not done before
source /local/env/envnextflow-19.07.0.sh
# Run a downloaded/git-cloned nextflow workflow from
nextflow run \\
/path/to/nf-core/workflow \\
-resume
-profile genouest \\
--email my-email@example.org \\
-c my-specific.config
...
# Or use the nf-core client
nextflow run nf-core/rnaseq ...
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

10
docs/kraken.md Normal file
View file

@ -0,0 +1,10 @@
# nf-core/configs: KRAKEN Configuration
This profile can be **only** combined with `jenkins.config`. It is used for
testing pipeline with real data on **in-house** cluster located at SciLifeLab.
To use, run the pipeline with `-profile kraken`. This will download and launch
the [`kraken.config`](../conf/kraken.config) which has been pre-configured to
test the pipeline using `docker` by default.
Example: `nextflow run -profile kraken,jenkins`

View file

@ -1,30 +0,0 @@
# nf-core/configs: MENDEL Configuration
All nf-core pipelines have been successfully configured for use on the MENDEL CLUSTER at the Gregor Mendel Institute (GMI).
To use, run the pipeline with `-profile conda,mendel`. This will download and launch the [`mendel.config`](../conf/mendel.config) which has been pre-configured with a setup suitable for the MENDEL cluster. A Conda environment will be created automatically and software dependencies will be downloaded from ['bioconda'](https://bioconda.github.io/).
Theoretically, using `-profile singularity,mendel` would download a docker image containing all of the required software, and convert it to a Singularity image before execution of the pipeline. However, there is a regression in the Singularity deployment on MENDEL which renders containers downloaded from public repositories unusable because they lack the /lustre mountpoint.
If you want to run the pipeline containerized anyway you will have to build the image yourself (on a machine where you have root access) using the provided `Singularity` file in the pipeline repository:
```bash
cd /path/to/pipeline-repository
echo 'mkdir /lustre > Singularity'
singularity build nf-core-methylseq-custom.simg Singularity
```
After you copied the container image to the cluster filesystem, make sure to pass the path to the image to the pipeline with `-with-singularity /path/to/nf-core-methylseq-custom.simg`
Before running the pipeline you will need to load Nextflow and Conda using the environment module system on MENDEL. You can do this by issuing the commands below:
```bash
## Load Nextflow and Conda environment modules
module purge
module load Nextflow
module load Miniconda3 # not needed if using Singularity
```
>NB: You will need an account to use the HPC cluster in order to run the pipeline. If in doubt contact the HPC team.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact the HPC team.

View file

@ -1,15 +1,31 @@
# nf-core/configs: MUNIN Configuration
All nf-core pipelines have been successfully configured for use on the MUNIN cluster aka big iron.
All nf-core pipelines have been successfully configured for use on the MUNIN cluster.
To use, run the pipeline with `-profile munin`. This will download and launch the [`munin.config`](../conf/munin.config) which has been pre-configured with a setup suitable for the MUNIN cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
## Usage
To use, run the pipeline with `-profile munin`.
This will download and launch the [`munin.config`](../conf/munin.config) which has been pre-configured with a setup suitable for the MUNIN cluster.
Example: `nextflow run -profile munin`
## Docker
### Singularity
This is the default behavior of this configuration profile.
Using this profile, if no singularity image are available, one will be downloaded from dockerhub, and converted to a Singularity image before execution of the pipeline.
It is also possible to specify the singularity profile:
Example: `nextflow run -profile munin,singularity`
### Docker
It is also possible to execute the pipeline using Docker.
Using this profile, if no docker image are available, one will be downloaded from dockerhub before execution of the pipeline.
Example: `nextflow run -profile munin,docker`
## Below are non-mandatory information on iGenomes specific configuration

56
docs/pasteur.md Normal file
View file

@ -0,0 +1,56 @@
# nf-core/configs: Institut Pasteur Configuration
All nf-core pipelines have been successfully configured for use on the tars cluster at the Institut Pasteur.
To use, run the pipeline with `-profile pasteur`. This will download and launch the [`pasteur.config`](../conf/pasteur.config) which has been pre-configured with a setup suitable for the Pasteur cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
## Running the workflow on the Pasteur cluster
Nextflow is not installed by default on the Pasteur cluster.
- Install Nextflow : [here](https://www.nextflow.io/docs/latest/getstarted.html#)
Nextflow manages each process as a separate job that is submitted to the cluster by using the `sbatch` command.
Nextflow shouldn't run directly on the submission node but on a compute node.
The compute nodes don't have access to internet so you need to run it offline.
To do that:
1. Create a virtualenv to install nf-core
```bash
module purge
module load Python/3.6.0
module load java
module load singularity
cd /path/to/nf-core/workflows
virtualenv .venv -p python3
. .venv/bin/activate
```
2. Install nf-core: [here](https://nf-co.re/tools#installation)
3. Get nf-core pipeline and container: [here](https://nf-co.re/tools#downloading-pipelines-for-offline-use)
4. Get the nf-core Pasteur profile: [here](https://github.com/nf-core/rnaseq/blob/master/docs/usage.md#--custom_config_base)
5. Run nextflow on a compute node:
```bash
# create a terminal
tmux
# Get a compute node
salloc
# Load the dependencies if not done before
module purge
module load java
module load singularity
# Run nextflow workflow
nextflow run \\
/path/to/pipeline-dir/from/step/3/workflow \\
-resume
-profile pasteur \\
-with-singularity /path/to/pipeline-dir/from/step/3/singularity-images/singularity.img \\
--email my-email@pasteur.fr \\
--custom_config_base /path/to/configs/from/step/4/ \\
-c my-specific.config
```

20
docs/prince.md Normal file
View file

@ -0,0 +1,20 @@
# nf-core/configs: Prince Configuration
## nf-core pipelines that use this repo
All nf-core pipelines that use this config repo (which is most), can be run on prince. **Before** running a pipeline for the first time, go into an interactive slurm session on a compute node (`srun --pty --time=02:00:00 -c 2`), as the docker image for the pipeline will need to be pulled and converted.
Now, run the pipeline of your choice with `-profile prince`. This will download and launch the prince.config which has been pre-configured with a setup suitable for the prince cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a singularity image before execution of the pipeline. This step **takes time**!!
An example commandline:
`nextflow run nf-core/<pipeline name> -profile prince <additional flags>`
## nf-core pipelines that do not use this repo
If the pipeline has not yet been configured to use this config, then you will have to do it manually.
git clone this repo, copy the `prince.config` from the conf folder and then you can invoke the pipeline like this:
`nextflow run nf-core/<pipeline name> -c prince.config <additional flags>`
>NB: You will need an account to use the HPC cluster Prince in order to run the pipeline. If in doubt contact the HPC admins.
>NB: Rather than using the nextflow module, I recommend you install nextflow in your home directory - instructions are on nextflow.io (or ask the writer of this profile). The reason this is better than using the module for nextflow on the cluster, is that the development cycle of nextflow is rapid and it's easy to update your installation yourself: `nextflow self-update`.

View file

@ -10,8 +10,8 @@ To use, run the pipeline with `-profile shh`. This will download and launch the
however this will likely change to a read-only directory in the future that will be managed by IT.
Note that **the configuration file is currently optimised for `nf-core/eager`**. It will submit to the short queue but with a walltime of 2 hours.
This configuration will automatically choose the correct SLURM queue (`short`,`medium`,`long`,`supercruncher`) depending on the time and memory required by each process.
Please note that there is no `supercruncher` queue on CDAG.
>NB: You will need an account and VPN access to use the cluster at MPI-SHH in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via SLURM to the clusters and as such the commands above will have to be executed on one of the head nodes. If in doubt contact IT.

View file

@ -4,7 +4,7 @@ All nf-core pipelines have been successfully configured for use on the PROFILE C
To use, run the pipeline with `-profile PROFILENAME`. This will download and launch the [`profile.config`](../conf/profile.config) which has been pre-configured with a setup suitable for the PROFILE cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
## Below are non-mandatory information e.g. on modules to load etc.
## Below are non-mandatory information e.g. on modules to load etc
Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on PROFILE CLUSTER. You can do this by issuing the commands below:
@ -20,7 +20,5 @@ module load Singularity/2.6.0
A local copy of the iGenomes resource has been made available on PROFILE CLUSTER so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline.
You can do this by simply using the `--genome <GENOME_ID>` parameter.
>NB: You will need an account to use the HPC cluster on PROFILE CLUSTER in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

View file

@ -3,6 +3,7 @@
All nf-core pipelines have been successfully configured for use on the Swedish UPPMAX clusters.
## Using the UPPMAX config profile
To use, run the pipeline with `-profile uppmax` (one hyphen). This will download and launch the [`uppmax.config`](../conf/uppmax.config) which has been pre-configured with a setup suitable for the UPPMAX servers. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
In addition to this config profile, you will also need to specify an UPPMAX project id.
@ -18,15 +19,24 @@ This config enables Nextflow to manage the pipeline jobs via the Slurm job sched
Just run Nextflow on a login node and it will handle everything else.
## Using iGenomes references
A local copy of the iGenomes resource has been made available on all UPPMAX clusters so you should be able to run the pipeline against any reference available in the `igenomes.config`.
You can do this by simply using the `--genome <GENOME_ID>` parameter.
## Running offline with Bianca
If running on Bianca, you will have no internet connection and these configs will not be loaded.
Please use the nf-core helper tool on a different system to download the required pipeline files, and transfer them to bianca.
This helper tool bundles the config files in this repo together with the pipeline files, so the profile will still be available.
Note that Bianca only allocates 7 GB memory per core so the max memory needs to be limited:
```bash
--max_memory "112GB"
```
## Getting more memory
If your nf-core pipeline run is running out of memory, you can run on a fat node with more memory using the following nextflow flags:
```bash
@ -41,6 +51,7 @@ Note that each job will still start with the same request as normal, but restart
All jobs will be submitted to fat nodes using this method, so it's only for use in extreme circumstances.
## Uppmax-devel config
If doing pipeline development work on Uppmax, this profile allows for faster testing.
Applied after main UPPMAX config, it overwrites certain parts of the config and submits jobs to the `devcore` queue, which has much faster queue times.

View file

@ -4,7 +4,5 @@ All nf-core pipelines have been successfully configured for use on the UZH clust
To use, run the pipeline with `-profile uzh`. This will download and launch the [`uzh.config`](../conf/uzh.config) which has been pre-configured with a setup suitable for the UZH cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
>NB: You will need an account to use the HPC cluster UZH in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

View file

@ -11,20 +11,33 @@
params.custom_config_version = 'master'
params.custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}"
//Please use a new line per include Config section to allow easier linting/parsing. Thank you.
profiles {
awsbatch { includeConfig "${params.custom_config_base}/conf/awsbatch.config" }
bigpurple { includeConfig "${params.custom_config_base}/conf/bigpurple.config" }
binac { includeConfig "${params.custom_config_base}/conf/binac.config" }
cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" }
ccga { includeConfig "${params.custom_config_base}/conf/ccga.config" }
ccga_dx { includeConfig "${params.custom_config_base}/conf/ccga_dx.config" }
cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" }
crick { includeConfig "${params.custom_config_base}/conf/crick.config" }
czbiohub_aws { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config" }
czbiohub_aws_highpriority { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config";
includeConfig "${params.custom_config_base}/conf/czbiohub_aws_highpriority.config" }
denbi_qbic { includeConfig "${params.custom_config_base}/conf/denbi_qbic.config" }
genouest { includeConfig "${params.custom_config_base}/conf/genouest.config" }
gis { includeConfig "${params.custom_config_base}/conf/gis.config" }
hebbe { includeConfig "${params.custom_config_base}/conf/hebbe.config" }
mendel { includeConfig "${params.custom_config_base}/conf/mendel.config" }
kraken { includeConfig "${params.custom_config_base}/conf/kraken.config" }
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" }
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
prince { includeConfig "${params.custom_config_base}/conf/prince.config" }
shh { includeConfig "${params.custom_config_base}/conf/shh.config" }
uct_hex { includeConfig "${params.custom_config_base}/conf/uct_hex.config" }
uppmax_devel { includeConfig "${params.custom_config_base}/conf/uppmax.config"; includeConfig "${params.custom_config_base}/conf/uppmax-devel.config" }
uppmax { includeConfig "${params.custom_config_base}/conf/uppmax.config" }
uppmax_devel { includeConfig "${params.custom_config_base}/conf/uppmax.config";
includeConfig "${params.custom_config_base}/conf/uppmax_devel.config" }
uzh { includeConfig "${params.custom_config_base}/conf/uzh.config" }
}
@ -35,6 +48,7 @@ params {
// This is a groovy map, not a nextflow parameter set
hostnames = [
crick: ['.thecrick.org'],
genouest: ['.genouest.org'],
uppmax: ['.uppmax.uu.se']
]
}