1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-11-22 00:26:03 +00:00

Merge branch 'master' into sarek

This commit is contained in:
Maxime Garcia 2019-11-27 13:27:43 +01:00 committed by GitHub
commit 2669401a72
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
31 changed files with 374 additions and 135 deletions

5
.github/markdownlint.yml vendored Normal file
View file

@ -0,0 +1,5 @@
# Markdownlint configuration file
default: true,
line-length: false
no-duplicate-header:
siblings_only: true

20
.github/workflows/linting.yml vendored Normal file
View file

@ -0,0 +1,20 @@
name: Markdown linting
# This workflow is triggered on pushes and PRs to the repository.
on: [push, pull_request]
jobs:
Markdown:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v1
- uses: actions/setup-node@v1
with:
node-version: '10'
- name: Install markdownlint
run: |
npm install -g markdownlint-cli
- name: Run Markdownlint
run: |
markdownlint ${GITHUB_WORKSPACE} -c ${GITHUB_WORKSPACE}/.github/markdownlint.yml

29
.github/workflows/main.yml vendored Normal file
View file

@ -0,0 +1,29 @@
name: Configs tests
on: [pull_request, push]
jobs:
test_all_profiles:
runs-on: ubuntu-latest
name: Check if all profiles are tested
steps:
- uses: actions/checkout@v1
- name: Check whether profiles are all tested
run: |
python ${GITHUB_WORKSPACE}/bin/cchecker.py ${GITHUB_WORKSPACE}/nfcore_custom.config ${GITHUB_WORKSPACE}/.github/workflows/main.yml
profile_test:
runs-on: ubuntu-latest
name: Run ${{ matrix.profile }} profile
needs: test_all_profiles
strategy:
matrix:
profile: ['awsbatch', 'bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga', 'cfc', 'crick', 'denbi_qbic', 'genotoul', 'genouest', 'gis', 'hebbe', 'kraken', 'munin', 'pasteur', 'phoenix', 'prince', 'shh_sdag', 'shh_cdag', 'uct_hex', 'uppmax_devel', 'uppmax', 'uzh']
steps:
- uses: actions/checkout@v1
- name: Install Nextflow
run: |
wget -qO- get.nextflow.io | bash
sudo mv nextflow /usr/local/bin/
- name: Check ${{ matrix.profile }} profile
env:
SCRATCH: '~'
run: nextflow run ${GITHUB_WORKSPACE}/configtest.nf --custom_config_base=${GITHUB_WORKSPACE} -profile ${{ matrix.profile }}

View file

@ -1,30 +0,0 @@
sudo: required
language: python
jdk: openjdk8
services:
- docker
python:
- '3.6'
cache: pip
matrix:
fast_finish: true
install:
# Install Nextflow
- mkdir /tmp/nextflow && cd /tmp/nextflow
- wget -qO- get.nextflow.io | bash
- sudo ln -s /tmp/nextflow/nextflow /usr/local/bin/nextflow
- mkdir -p ${TRAVIS_BUILD_DIR}/tests && cd ${TRAVIS_BUILD_DIR}/tests
env:
- NXF_VER='18.10.1' SCRATCH='~' # Specify a minimum NF version that should be tested and work. Set SCRATCH for prince.config.
- NXF_VER='' SCRATCH='~' # Plus: get the latest NF version and check, that it works. Set SCRATCH for prince.config.
script:
# Run the pipeline with the test profile and test remote config
- |
grep "{.*includeConfig.*[a-z]*\.config\"" ${TRAVIS_BUILD_DIR}/nfcore_custom.config | \
tr -s ' ' | \
cut -d " " -f 2 | \
grep -v "czbiohub_aws" | \
xargs -I {} nextflow run ${TRAVIS_BUILD_DIR}/configtest.nf --custom_config_base=${TRAVIS_BUILD_DIR} -profile {}

View file

@ -1,15 +1,17 @@
# ![nf-core/configs](docs/images/nfcore-configs_logo.png)
# [![nf-core/configs](docs/images/nfcore-configs_logo.png "nf-core/configs")](https://github.com/nf-core/configs)
[![Build Status](https://travis-ci.org/nf-core/configs.svg?branch=master)](https://travis-ci.org/nf-core/configs)
[![Lint Status](https://github.com/nf-core/configs/workflows/Configs%20tests/badge.svg)](https://github.com/nf-core/configs/workflows/Configs%20tests/badge.svg)
A repository for hosting Nextflow configuration files containing custom parameters required to run nf-core pipelines at different Institutions.
## Table of contents
* [Table of contents](#table-of-contents)
* [Using an existing config](#using-an-existing-config)
* [Configuration and parameters](#configuration-and-parameters)
* [Offline usage](#offline-usage)
* [Adding a new config](#adding-a-new-config)
* [Checking user hostnames](#checking-user-hostnames)
* [Testing](#testing)
* [Documentation](#documentation)
* [Uploading to `nf-core/configs`](#uploading-to-nf-coreconfigs)
@ -103,6 +105,7 @@ Currently documentation is available for the following systems:
* [CZBIOHUB_AWS](docs/czbiohub.md)
* [CZBIOHUB_AWS_HIGHPRIORITY](docs/czbiohub.md)
* [DENBI_QBIC](docs/denbi_qbic.md)
* [GENOTOUL](docs/genotoul.md)
* [GENOUEST](docs/genouest.md)
* [GIS](docs/gis.md)
* [HEBBE](docs/hebbe.md)
@ -124,6 +127,8 @@ Within the local clone of your fork add the custom config file to the [`conf/`](
You will also need to edit and add your custom profile to the [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) file in the top-level directory of the clone.
You will also need to edit and add your custom profile to the [`README.md`](https://github.com/nf-core/configs/blob/master/README.md) file in the top-level directory of the clone.
Afterwards, make sure to edit the `.github/main.yml` file and add your profile name to the alphabetically sorted `profile:` scope. This way, it will be tested automatically using GitHub Actions. If you forget to do this, tests will fail and complain about that.
Commit and push these changes to your local clone on GitHub, and then [create a pull request](https://help.github.com/articles/creating-a-pull-request-from-a-fork/) on the `nf-core/configs` GitHub repo with the appropriate information.
We will be notified automatically when you have created your pull request, and providing that everything adheres to nf-core guidelines we will endeavour to approve your pull request as soon as possible.

71
bin/cchecker.py Normal file
View file

@ -0,0 +1,71 @@
#!/usr/bin/env python
#######################################################################
#######################################################################
## Created on November 26 to check pipeline configs for nf-core/configs
#######################################################################
#######################################################################
import os
import sys
import argparse
import re
############################################
############################################
## PARSE ARGUMENTS
############################################
############################################
Description = 'Double check custom config file and github actions file to test all cases'
Epilog = """Example usage: python cchecker.py <nfcore_custom.config> <github_actions_file>"""
argParser = argparse.ArgumentParser(description=Description, epilog=Epilog)
## REQUIRED PARAMETERS
argParser.add_argument('CUSTOM_CONFIG', help="Input nfcore_custom.config.")
argParser.add_argument('GITHUB_CONFIG', help="Input Github Actions YAML")
args = argParser.parse_args()
############################################
############################################
## MAIN FUNCTION
############################################
############################################
def check_config(Config, Github):
regex = 'includeConfig*'
ERROR_STR = 'ERROR: Please check config file! Did you really update the profiles?'
## CHECK Config First
config_profiles = set()
with open(Config, 'r') as cfg:
for line in cfg:
if re.search(regex, line):
hit = line.split('/')[2].split('.')[0]
config_profiles.add(hit.strip())
###Check Github Config now
tests = set()
###Ignore these profiles
ignore_me = ['czbiohub_aws_highpriority', 'czbiohub_aws']
tests.update(ignore_me)
with open(Github, 'r') as ghfile:
for line in ghfile:
if re.search('profile: ', line):
line = line.replace('\'','').replace('[','').replace(']','').replace('\n','')
profiles = line.split(':')[1].split(',')
for p in profiles:
tests.add(p.strip())
###Check if sets are equal
if tests == config_profiles:
sys.exit(0)
else:
#Maybe report what is missing here too
print("Tests don't seem to test these profiles properly. Please check whether you added the profile to the Github Actions testing YAML.\n")
print(config_profiles.symmetric_difference(tests))
sys.exit(1)
check_config(Config=args.CUSTOM_CONFIG,Github=args.GITHUB_CONFIG)

View file

@ -10,7 +10,5 @@ params {
tracedir = './'
}
aws.region = params.awsregion
process.executor = 'awsbatch'
process.queue = params.awsqueue
executor.awscli = '/home/ec2-user/miniconda/bin/aws'

View file

@ -12,12 +12,14 @@ process {
clusterOptions = { task.time <= 8.h ? '--qos short': task.time <= 48.h ? '--qos medium' : '--qos long' }
}
singularity.enabled = true
singularity {
enabled = true
cacheDir = '/scratch-cbe/shared/containers'
}
params {
params.max_time = 14.d
params.max_cpus = 36
params.max_memory = 4.TB
igenomes_ignore = true
igenomesIgnore = true //deprecated
}
igenomes_base = '/resources/references/igenomes'
}

View file

@ -22,7 +22,7 @@ weblog{
params {
igenomes_base = '/nfsmounts/igenomes'
max_memory = 60.GB
max_memory = 500.GB
max_cpus = 20
max_time = 140.h
}

27
conf/genotoul.config Normal file
View file

@ -0,0 +1,27 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'The Genotoul cluster profile'
config_profile_contact = 'support.bioinfo.genotoul@inra.fr'
config_profile_url = 'http://bioinfo.genotoul.fr/'
}
singularity {
// need one image per execution
enabled = true
runOptions = '-B /bank -B /work2 -B /work -B /save -B /home'
}
process {
executor = 'slurm'
}
params {
save_reference = true
igenomes_ignore = true
igenomesIgnore = true //deprecated
// Max resources requested by a normal node on genotoul.
max_memory = 120.GB
max_cpus = 48
max_time = 96.h
}

View file

@ -18,10 +18,6 @@ singularity {
}
process {
beforeScript = """
module load $singularityModule
module load $squashfsModule
"""
.stripIndent()
beforeScript = "module load $singularityModule $squashfsModule"
executor = 'slurm'
}

View file

@ -1,7 +1,7 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'MPI SHH cluster profile provided by nf-core/configs.'
config_profile_contact = 'James Fellows Yates (@jfy133)'
config_profile_description = 'MPI-SHH CDAG cluster profile provided by nf-core/configs.'
config_profile_contact = 'James Fellows Yates (@jfy133), Maxime Borry (@Maxibor)'
config_profile_url = 'https://shh.mpg.de'
}
@ -14,7 +14,7 @@ singularity {
process {
executor = 'slurm'
queue = { task.memory > 756.GB ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' }
queue = { task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' }
}
executor {
@ -22,9 +22,9 @@ executor {
}
params {
max_memory = 2.TB
max_memory = 256.GB
max_cpus = 32
max_time = 720.h
//Illumina iGenomes reference file path
igenomes_base = "/projects1/public_data/igenomes/"
}
}

30
conf/shh_sdag.config Normal file
View file

@ -0,0 +1,30 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'MPI-SHH SDAG cluster profile provided by nf-core/configs.'
config_profile_contact = 'James Fellows Yates (@jfy133), Maxime Borry (@Maxibor)'
config_profile_url = 'https://shh.mpg.de'
}
singularity {
enabled = true
autoMounts = true
runOptions = '-B /run/shm:/run/shm'
cacheDir = "/projects1/singularity_scratch/cache/"
}
process {
executor = 'slurm'
queue = { task.memory > 756.GB || task.cpus > 64 ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' }
}
executor {
queueSize = 16
}
params {
max_memory = 2.TB
max_cpus = 128
max_time = 720.h
//Illumina iGenomes reference file path
igenomes_base = "/projects1/public_data/igenomes/"
}

View file

@ -1,4 +1,4 @@
# nf-core/configs: awsbatch Configuration
To be used with `awsbatch`.
Custom queue and region can be entered with `params.awsqueue` and `params.region` respectively.
Custom queue and region can be entered with `params.awsqueue` and `params.region` respectively.

View file

@ -1,24 +1,25 @@
# nf-core/configs: BigPurple Configuration
## nf-core pipelines that use this repo
All nf-core pipelines that use this config repo (which is most), can be run on BigPurple. **Before** running a pipeline for the first time, go into an interactive slurm session on a compute node (`srun --pty --time=02:00:00 -c 2`), as the docker image for the pipeline will need to be pulled and converted. Once in the interactive session:
```
```bash
module load singularity/3.1
module load squashfs-tools/4.3
```
Now, run the pipeline of your choice with `-profile bigpurple`. This will download and launch the bigpurple.config which has been pre-configured with a setup suitable for the BigPurple cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a singularity image before execution of the pipeline.
An example commandline:
An example commandline:
`nextflow run nf-core/<pipeline name> -profile bigpurple <additional flags>`
## nf-core pipelines that do not use this repo
If the pipeline has not yet been configured to use this config, then you will have to do it manually.
If the pipeline has not yet been configured to use this config, then you will have to do it manually.
git clone this repo, copy the `bigpurple.config` from the conf folder and then you can invoke the pipeline like this:
`nextflow run nf-core/<pipeline name> -c bigpurple.config <additional flags>`
>NB: You will need an account to use the HPC cluster BigPurple in order to run the pipeline. If in doubt contact MCIT.
>NB: You will need to install nextflow in your home directory - instructions are on nextflow.io (or ask the writer of this profile). The reason there is no module for nextflow on the cluster, is that the development cycle of nextflow is rapid and it's easy to update yourself: `nextflow self-update`

View file

@ -13,7 +13,5 @@ module load devel/java_jdk/1.8.0u112
module load devel/singularity/3.0.1
```
>NB: You will need an account to use the HPC cluster BINAC in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

View file

@ -13,6 +13,7 @@ module load nextflow/19.04.0
module load singularity/3.2.1
```
>NB: You will need an account to use the HPC cluster on CBE in order to run the pipeline. If in doubt contact IT.
A local copy of the [AWS-iGenomes](https://registry.opendata.aws/aws-igenomes/) resource has been made available on CBE so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline. You can do this by simply using the `--genome <GENOME_ID>` parameter.
>NB: You will need an account to use the HPC cluster on CBE in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

View file

@ -1,6 +1,6 @@
# nf-core/configs: CCGA Configuration
Deployment and testing of nf-core pipelines at the CCGA cluster is on-going.
Deployment and testing of nf-core pipelines at the CCGA cluster is on-going.
To use, run the pipeline with `-profile ccga`. This will download and launch the [`ccga.config`](../conf/ccga.config) which has been pre-configured with a setup suitable for the CCGA cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
@ -9,9 +9,9 @@ Before running the pipeline you will need to load Nextflow and Singularity using
```bash
## Load Nextflow and Singularity environment modules
module purge
module load IKMB
module load Java/1.8.0
module load Nextflow
module load IKMB
module load Java/1.8.0
module load Nextflow
module load singularity3.1.0
```

View file

@ -1,9 +1,8 @@
# nf-core/configs: CCGA DX Configuration
Deployment and testing of nf-core pipelines at the CCGA DX cluster is on-going.
Deployment and testing of nf-core pipelines at the CCGA DX cluster is on-going.
To use, run the pipeline with `-profile ccga_dx`. This will download and launch the [`ccga_dx.config`](../conf/ccga_dx.config) which has been pre-configured with a setup suitable for the CCGA cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
Before running the pipeline you will need to have Nextflow installed.
Before running the pipeline you will need to have Nextflow installed.
>NB: Access to the CCGA DX cluster is restricted to IKMB/CCGA employes. Please talk to Marc Hoeppner to get access (@marchoeppner).

View file

@ -13,7 +13,5 @@ module load devel/java_jdk/1.8.0u121
module load qbic/singularity_slurm/3.0.3
```
>NB: You will need an account to use the HPC cluster CFC in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

View file

@ -20,5 +20,4 @@ Alternatively, if you are running the pipeline regularly for genomes that arent
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
>NB: You will need an account to use the HPC cluster on CAMP in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

View file

@ -14,7 +14,7 @@ The pipeline will monitor and submit jobs to AWS Batch on your behalf. To ensure
[tmux](https://hackernoon.com/a-gentle-introduction-to-tmux-8d784c404340) is a "Terminal Multiplexer" that allows for commands to continue running even when you have closed your laptop. Start a new tmux session with `tmux new` and we'll name this session `nextflow`.
```
```bash
tmux new -n nextflow
```
@ -22,89 +22,87 @@ Now you can run pipelines with abandon!
### 2. Make a GitHub repo for your workflows (optional :)
To make sharing your pipelines and commands easy between your teammates, it's best to share code in a GitHub repository. One way is to store the commands in a Makefile ([example](https://github.com/czbiohub/kh-workflows/blob/master/nf-kmer-similarity/Makefile)) which can contain multiple `nextflow run` commands so that you don't need to remember the S3 bucket or output directory for every single one. [Makefiles](https://kbroman.org/minimal_make/) are broadly used in the software community for running many complex commands. Makefiles can have a lot of dependencies and be confusing, so we're only going to write *simple* Makefiles.
```
```bash
rnaseq:
nextflow run -profile czbiohub_aws nf-core/rnaseq \
nextflow run -profile czbiohub_aws nf-core/rnaseq \
--reads 's3://czb-maca/Plate_seq/24_month/180626_A00111_0166_BH5LNVDSXX/fastqs/*{R1,R2}*.fastq.gz' \
--genome GRCm38 \
--outdir s3://olgabot-maca/nextflow-test/
```
human_mouse_zebrafish:
nextflow run czbiohub/nf-kmer-similarity -latest -profile aws \
--samples s3://kmer-hashing/hematopoeisis/smartseq2/human_mouse_zebrafish/samples.csv
Human_Mouse_Zebrafish:
```bash
nextflow run czbiohub/nf-kmer-similarity -latest -profile aws \
--samples s3://kmer-hashing/hematopoeisis/smartseq2/human_mouse_zebrafish/samples.csv
```
merkin2012_aws:
nextflow run czbiohub/nf-kmer-similarity -latest --sra "SRP016501" \
-r olgabot/support-csv-directory-or-sra \
-profile aws
Merkin2012_AWS:
```bash
nextflow run czbiohub/nf-kmer-similarity -latest --sra "SRP016501" \
-r olgabot/support-csv-directory-or-sra \-profile aws
```
In this example, one would run the `rnaseq` rule and the nextflow command beneath it with:
```
```bash
make rnaseq
```
If one wanted to run a different command, e.g. `human_mouse_zebrafish`, they would specify that command instead. For example:
```
```bash
make human_mouse_zebrafish
```
Makefiles are a very useful way of storing longer commands with short mnemonic words.
Once you [create a new repository](https://github.com/organizations/czbiohub/repositories/new) (best to initialize with a `.gitignore`, license - MIT and `README`), clone that repository to your EC2 instance. For example, if the repository is called `kh-workflows`, this is what the command would look like:
```
```bash
git clone https://github.com/czbiohub/kh-workflows
```
Now both create and edit a `Makefile`:
```
```bash
cd
nano Makefile
```
Write your rule with a colon after it, and on the next line must be a **tab**, not spaces. Once you're done, exit the program (the `^` command shown in nano means "Control"), write the file, add it to git, commit it, and push it up to GitHub.
```
```bash
git add Makefile
git commit -m "Added makefile"
git push origin master
```
### 3. Run your workflow!!
### 3. Run your workflow
Remember to specify `-profile czbiohub_aws` to grab the CZ Biohub-specific AWS configurations, and an `--outdir` with an AWS S3 bucket so you don't run out of space on your small AMI
```
```bash
nextflow run -profile czbiohub_aws nf-core/rnaseq \
--reads 's3://czb-maca/Plate_seq/24_month/180626_A00111_0166_BH5LNVDSXX/fastqs/*{R1,R2}*.fastq.gz' \
--genome GRCm38 \
--outdir s3://olgabot-maca/nextflow-test/
```
### 4. If you lose connection, how do you restart the jobs?
### 4. If you lose connection, how do you restart the jobs
If you close your laptop, get onto the train, or lose WiFi connection, you may lose connection to AWS and may need to restart the jobs. To reattach, use the command `tmux attach` and you should see your Nextflow output! To get the named session, use:
```
```bash
tmux attach -n nextflow
```
To restart the jobs from where you left off, add the `-resume` flag to your `nextflow` command:
```
```bash
nextflow run -profile czbiohub_aws nf-core/rnaseq \
--reads 's3://czb-maca/Plate_seq/24_month/180626_A00111_0166_BH5LNVDSXX/fastqs/*{R1,R2}*.fastq.gz' \
--genome GRCm38 \
@ -122,7 +120,5 @@ You can do this by simply using the `--genome <GENOME_ID>` parameter.
For Human and Mouse, we use [GENCODE](https://www.gencodegenes.org/) gene annotations. This doesn't change how you would specify the genome name, only that the pipelines run with the `czbiohub_aws` profile would be with GENCODE rather than iGenomes.
>NB: You will need an account to use the HPC cluster on PROFILE CLUSTER in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

View file

@ -1,6 +1,6 @@
# nf-core/configs: de.NBI QBIC Configuration
All nf-core pipelines have been successfully configured for use on the de.NBI Cloud cluster. This is a virtual cluster that has been set up using the [virtual cluster setup scripts](https://github.com/MaximilianHanussek/virtual_cluster_local_ips).
All nf-core pipelines have been successfully configured for use on the de.NBI Cloud cluster. This is a virtual cluster that has been set up using the [virtual cluster setup scripts](https://github.com/MaximilianHanussek/virtual_cluster_local_ips).
To use, run the pipeline with `-profile denbi_qbic`. This will download and launch the [`denbi_qbic.config`](../conf/denbi_qbic.config) which has been pre-configured with a setup suitable for the automatically created cluster. Using this profile, a Docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.

88
docs/genotoul.md Normal file
View file

@ -0,0 +1,88 @@
# nf-core/configs: Bioinfo Genotoul Configuration
All nf-core pipelines have been successfully configured for use on the Bioinfo Genotoul cluster at the INRA toulouse.
To use, run the pipeline with `-profile genotoul`. This will download and
launch the [`genotoul.config`](../conf/genotoul.config) which has been
pre-configured with a setup suitable for the Bioinfo Genotoul cluster.
Using this profile, a docker image containing all of the required software
will be downloaded, and converted to a Singularity image before execution
of the pipeline. Images are stored for all users in following directory `/usr/local/bioinfo/src/NextflowWorkflows/singularity-img/`.
## Running the workflow ib the Genologin cluster
Before running the pipeline you will need to load Nextflow and
Singularity using the environment module system on Genotoul. You can do
this by issuing the commands below:
Once connected on our frontal node :
```bash
# Login to a compute node
srun --mem=4G --pty bash
```
Setup default nextflow and singularity home directory (to be done only one time):
```bash
sh /usr/local/bioinfo/src/NextflowWorkflows/create_nfx_dirs.sh
```
Load environment :
```bash
module purge
module load bioinfo/nfcore-Nextflow-v19.04.0
```
Try a test workflow (for example the methylseq workflow) :
```bash
nextflow run nf-core/methylseq -profile genotoul,test
```
Create launch script `nfcore-rnaseq.sh` :
```bash
#!/bin/bash
#SBATCH -p workq
#SBATCH -t 1:00:00 #time in hour
#SBATCH --mem=4G
#SBATCH --mail-type=BEGIN,END,FAIL
module load bioinfo/nfcore-Nextflow-v19.04.0
nextflow run nf-core/methylseq -profile genotoul,test
```
Launch on the cluster with sbatch:
```bash
sbatch nfcore-rnaseq.sh
```
## Mounted directory
By default, available mount points are:
* /bank
* /home
* /save
* /work
* /work2
To have access to specific other mount point (such as nosave or project)
you can add a config profile file with option `-profile` and which contain:
```bash
singularity.runOptions = '-B /directory/to/mount'
```
## Databanks
A local copy of several genomes are available in `/bank` directory. See
our [databank page](http://bioinfo.genotoul.fr/index.php/resources-2/databanks/)
to search for your favorite genome.
>NB: You will need an account to use the HPC cluster on Genotoul in order
to run the pipeline. If in doubt see [http://bioinfo.genotoul.fr/](http://bioinfo.genotoul.fr/).

View file

@ -4,40 +4,40 @@ All nf-core pipelines have been successfully configured for use on the tars clus
To use, run the pipeline with `-profile pasteur`. This will download and launch the [`pasteur.config`](../conf/pasteur.config) which has been pre-configured with a setup suitable for the Pasteur cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
## Running the workflow on the Pasteur cluster
Nextflow is not installed by default on the Pasteur cluster.
- Install Nextflow : [here](https://www.nextflow.io/docs/latest/getstarted.html#)
Nextflow manages each process as a separate job that is submitted to the cluster by using the sbatch command.
Nextflow manages each process as a separate job that is submitted to the cluster by using the `sbatch` command.
Nextflow shouldn't run directly on the submission node but on a compute node.
The compute nodes don't have access to internet so you need to run it offline.
The compute nodes don't have access to internet so you need to run it offline.
To do that:
1. Create a virtualenv to install nf-core
```bash
module purge
module load Python/3.6.0
module load java
module load singularity
cd /path/to/nf-core/workflows
virtualenv .venv -p python3
. .venv/bin/activate
```
```bash
module purge
module load Python/3.6.0
module load java
module load singularity
cd /path/to/nf-core/workflows
virtualenv .venv -p python3
. .venv/bin/activate
```
2. Install nf-core: [here](https://nf-co.re/tools#installation)
3. Get nf-core pipeline and container: [here](https://nf-co.re/tools#downloading-pipelines-for-offline-use)
4. Get the nf-core Pasteur profile: [here](https://github.com/nf-core/rnaseq/blob/master/docs/usage.md#--custom_config_base)
5. Run nextflow on a compute node:
5. Run nextflow on a compute node:
```bash
# create a terminal
tmux
tmux
# Get a compute node
salloc
# Load the dependencies if not done before
module purge
module load java
@ -46,11 +46,11 @@ module load singularity
# Run nextflow workflow
nextflow run \\
/path/to/pipeline-dir/from/step/3/workflow \\
-resume
-resume
-profile pasteur \\
-with-singularity /path/to/pipeline-dir/from/step/3/singularity-images/singularity.img \\
--email my-email@pasteur.fr \\
--custom_config_base /path/to/configs/from/step/4/ \\
-c my-specific.config
...
```

View file

@ -1,5 +1,7 @@
# nf-core/configs: Prince Configuration
## nf-core pipelines that use this repo
All nf-core pipelines that use this config repo (which is most), can be run on prince. **Before** running a pipeline for the first time, go into an interactive slurm session on a compute node (`srun --pty --time=02:00:00 -c 2`), as the docker image for the pipeline will need to be pulled and converted.
Now, run the pipeline of your choice with `-profile prince`. This will download and launch the prince.config which has been pre-configured with a setup suitable for the prince cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a singularity image before execution of the pipeline. This step **takes time**!!
@ -8,13 +10,11 @@ An example commandline:
`nextflow run nf-core/<pipeline name> -profile prince <additional flags>`
## nf-core pipelines that do not use this repo
If the pipeline has not yet been configured to use this config, then you will have to do it manually.
git clone this repo, copy the `prince.config` from the conf folder and then you can invoke the pipeline like this:
`nextflow run nf-core/<pipeline name> -c prince.config <additional flags>`
>NB: You will need an account to use the HPC cluster Prince in order to run the pipeline. If in doubt contact the HPC admins.
>NB: Rather than using the nextflow module, I recommend you install nextflow in your home directory - instructions are on nextflow.io (or ask the writer of this profile). The reason this is better than using the module for nextflow on the cluster, is that the development cycle of nextflow is rapid and it's easy to update your installation yourself: `nextflow self-update`
>NB: Rather than using the nextflow module, I recommend you install nextflow in your home directory - instructions are on nextflow.io (or ask the writer of this profile). The reason this is better than using the module for nextflow on the cluster, is that the development cycle of nextflow is rapid and it's easy to update your installation yourself: `nextflow self-update`.

View file

@ -2,17 +2,15 @@
All nf-core pipelines have been successfully configured for use on the Department of Archaeogenetic's SDAG/CDAG clusters at the [Max Planck Institute for the Science of Human History (MPI-SHH)](http://shh.mpg.de).
To use, run the pipeline with `-profile shh`. This will download and launch the [`shh.config`](../conf/shh.config) which has been pre-configured with a setup suitable for the SDAG and CDAG clusters. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. The image will currently be centrally stored here:
To use, run the pipeline either with `-profile shh_sdag` or `-profile ssh_cdag`. This will download and launch the [`shh.config`](../conf/shh.config) which has been pre-configured with a setup suitable for the SDAG and CDAG clusters respectively. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. The image will currently be centrally stored here:
```bash
/projects1/singularity_scratch/cache/
```
however this will likely change to a read-only directory in the future that will be managed by IT.
however this will likely change to a read-only directory in the future that will be managed by the IT team.
This configuration will automatically choose the correct SLURM queue (`short`,`medium`,`long`,`supercruncher`) depending on the time and memory required by each process.
Please note that there is no `supercruncher` queue on CDAG.
>NB: You will need an account and VPN access to use the cluster at MPI-SHH in order to run the pipeline. If in doubt contact IT.
This configuration will automatically choose the correct SLURM queue (`short`,`medium`,`long`) depending on the time and memory required by each process. `ssh_sdag` additionally allows for submission of jobs to the `supercruncher` when a job's requested memory exceeds 756GB.
>NB: You will need an account and VPN access to use the cluster at MPI-SHH in order to run the pipeline. If in doubt contact the IT team.
>NB: Nextflow will need to submit the jobs via SLURM to the clusters and as such the commands above will have to be executed on one of the head nodes. If in doubt contact IT.

View file

@ -4,7 +4,7 @@ All nf-core pipelines have been successfully configured for use on the PROFILE C
To use, run the pipeline with `-profile PROFILENAME`. This will download and launch the [`profile.config`](../conf/profile.config) which has been pre-configured with a setup suitable for the PROFILE cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
## Below are non-mandatory information e.g. on modules to load etc.
## Below are non-mandatory information e.g. on modules to load etc
Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on PROFILE CLUSTER. You can do this by issuing the commands below:
@ -20,7 +20,5 @@ module load Singularity/2.6.0
A local copy of the iGenomes resource has been made available on PROFILE CLUSTER so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline.
You can do this by simply using the `--genome <GENOME_ID>` parameter.
>NB: You will need an account to use the HPC cluster on PROFILE CLUSTER in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

View file

@ -3,6 +3,7 @@
All nf-core pipelines have been successfully configured for use on the Swedish UPPMAX clusters.
## Using the UPPMAX config profile
To use, run the pipeline with `-profile uppmax` (one hyphen). This will download and launch the [`uppmax.config`](../conf/uppmax.config) which has been pre-configured with a setup suitable for the UPPMAX servers. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
In addition to this config profile, you will also need to specify an UPPMAX project id.
@ -18,10 +19,12 @@ This config enables Nextflow to manage the pipeline jobs via the Slurm job sched
Just run Nextflow on a login node and it will handle everything else.
## Using iGenomes references
A local copy of the iGenomes resource has been made available on all UPPMAX clusters so you should be able to run the pipeline against any reference available in the `igenomes.config`.
You can do this by simply using the `--genome <GENOME_ID>` parameter.
## Running offline with Bianca
If running on Bianca, you will have no internet connection and these configs will not be loaded.
Please use the nf-core helper tool on a different system to download the required pipeline files, and transfer them to bianca.
This helper tool bundles the config files in this repo together with the pipeline files, so the profile will still be available.
@ -33,6 +36,7 @@ Note that Bianca only allocates 7 GB memory per core so the max memory needs to
```
## Getting more memory
If your nf-core pipeline run is running out of memory, you can run on a fat node with more memory using the following nextflow flags:
```bash
@ -47,6 +51,7 @@ Note that each job will still start with the same request as normal, but restart
All jobs will be submitted to fat nodes using this method, so it's only for use in extreme circumstances.
## Uppmax-devel config
If doing pipeline development work on Uppmax, this profile allows for faster testing.
Applied after main UPPMAX config, it overwrites certain parts of the config and submits jobs to the `devcore` queue, which has much faster queue times.

View file

@ -4,7 +4,5 @@ All nf-core pipelines have been successfully configured for use on the UZH clust
To use, run the pipeline with `-profile uzh`. This will download and launch the [`uzh.config`](../conf/uzh.config) which has been pre-configured with a setup suitable for the UZH cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
>NB: You will need an account to use the HPC cluster UZH in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.

View file

@ -11,6 +11,7 @@
params.custom_config_version = 'master'
params.custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}"
//Please use a new line per include Config section to allow easier linting/parsing. Thank you.
profiles {
awsbatch { includeConfig "${params.custom_config_base}/conf/awsbatch.config" }
bigpurple { includeConfig "${params.custom_config_base}/conf/bigpurple.config" }
@ -21,7 +22,10 @@ profiles {
cfc { includeConfig "${params.custom_config_base}/conf/cfc.config" }
crick { includeConfig "${params.custom_config_base}/conf/crick.config" }
czbiohub_aws { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config" }
czbiohub_aws_highpriority { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config"; includeConfig "${params.custom_config_base}/conf/czbiohub_aws_highpriority.config" }
czbiohub_aws_highpriority {
includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config";
includeConfig "${params.custom_config_base}/conf/czbiohub_aws_highpriority.config"}
genotoul { includeConfig "${params.custom_config_base}/conf/genotoul.config" }
denbi_qbic { includeConfig "${params.custom_config_base}/conf/denbi_qbic.config" }
genouest { includeConfig "${params.custom_config_base}/conf/genouest.config" }
gis { includeConfig "${params.custom_config_base}/conf/gis.config" }
@ -31,10 +35,12 @@ profiles {
pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" }
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
prince { includeConfig "${params.custom_config_base}/conf/prince.config" }
shh { includeConfig "${params.custom_config_base}/conf/shh.config" }
shh_sdag { includeConfig "${params.custom_config_base}/conf/shh_sdag.config" }
shh_cdag { includeConfig "${params.custom_config_base}/conf/shh_cdag.config" }
uct_hex { includeConfig "${params.custom_config_base}/conf/uct_hex.config" }
uppmax { includeConfig "${params.custom_config_base}/conf/uppmax.config" }
uppmax_devel { includeConfig "${params.custom_config_base}/conf/uppmax.config"; includeConfig "${params.custom_config_base}/conf/uppmax_devel.config" }
uppmax_devel { includeConfig "${params.custom_config_base}/conf/uppmax.config";
includeConfig "${params.custom_config_base}/conf/uppmax_devel.config" }
uzh { includeConfig "${params.custom_config_base}/conf/uzh.config" }
}
@ -45,6 +51,7 @@ params {
// This is a groovy map, not a nextflow parameter set
hostnames = [
crick: ['.thecrick.org'],
genotoul: ['.genologin1.toulouse.inra.fr', '.genologin2.toulouse.inra.fr'],
genouest: ['.genouest.org'],
uppmax: ['.uppmax.uu.se']
]