mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-24 17:19:54 +00:00
Merge branch 'master' into olgabot/czb-update
This commit is contained in:
commit
474e98daaf
26 changed files with 200 additions and 64 deletions
10
.travis.yml
10
.travis.yml
|
@ -9,10 +9,6 @@ cache: pip
|
||||||
matrix:
|
matrix:
|
||||||
fast_finish: true
|
fast_finish: true
|
||||||
|
|
||||||
before_install:
|
|
||||||
# PRs to master are only ok if coming from dev branch
|
|
||||||
- '[ $TRAVIS_PULL_REQUEST = "false" ] || [ $TRAVIS_BRANCH != "master" ] || ([ $TRAVIS_PULL_REQUEST_SLUG = $TRAVIS_REPO_SLUG ] && [ $TRAVIS_PULL_REQUEST_BRANCH = "dev" ])'
|
|
||||||
|
|
||||||
install:
|
install:
|
||||||
# Install Nextflow
|
# Install Nextflow
|
||||||
- mkdir /tmp/nextflow && cd /tmp/nextflow
|
- mkdir /tmp/nextflow && cd /tmp/nextflow
|
||||||
|
@ -26,4 +22,8 @@ env:
|
||||||
|
|
||||||
script:
|
script:
|
||||||
# Run the pipeline with the test profile and test remote config
|
# Run the pipeline with the test profile and test remote config
|
||||||
- grep "{.*includeConfig.*[a-z]*\.config\"" ${TRAVIS_BUILD_DIR}/nfcore_custom.config | tr -s ' ' | cut -d " " -f 2 | xargs -I {} nextflow run ${TRAVIS_BUILD_DIR}/configtest.nf -profile {}
|
- |
|
||||||
|
grep "{.*includeConfig.*[a-z]*\.config\"" ${TRAVIS_BUILD_DIR}/nfcore_custom.config | \
|
||||||
|
tr -s ' ' | \
|
||||||
|
cut -d " " -f 2 | \
|
||||||
|
xargs -I {} nextflow run ${TRAVIS_BUILD_DIR}/configtest.nf -profile {}
|
||||||
|
|
15
README.md
15
README.md
|
@ -1,6 +1,4 @@
|
||||||
<img src="docs/images/nf-core-logo.png" width="400">
|
# ![nf-core/configs](docs/images/nfcore-configs_logo.png)
|
||||||
|
|
||||||
# [nf-core/configs](https://github.com/nf-core/configs)
|
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/nf-core/configs.svg?branch=master)](https://travis-ci.org/nf-core/configs)
|
[![Build Status](https://travis-ci.org/nf-core/configs.svg?branch=master)](https://travis-ci.org/nf-core/configs)
|
||||||
|
|
||||||
|
@ -58,7 +56,14 @@ nextflow run /path/to/pipeline/ -c /path/to/my/configs/configs-master/conf/my_co
|
||||||
|
|
||||||
If you decide to upload your custom config file to `nf-core/configs` then this will ensure that your custom config file will be automatically downloaded, and available at run-time to all nf-core pipelines, and to everyone within your organisation. You will simply have to specify `-profile <config_name>` in the command used to run the pipeline. See [`nf-core/configs`](https://github.com/nf-core/configs/tree/master/conf) for examples.
|
If you decide to upload your custom config file to `nf-core/configs` then this will ensure that your custom config file will be automatically downloaded, and available at run-time to all nf-core pipelines, and to everyone within your organisation. You will simply have to specify `-profile <config_name>` in the command used to run the pipeline. See [`nf-core/configs`](https://github.com/nf-core/configs/tree/master/conf) for examples.
|
||||||
|
|
||||||
Please also make sure to add an extra `params` section with `params. config_profile_name`, `params.config_profile_description`, `params.config_profile_contact` and `params.config_profile_url` set to reasonable values. Users will get information on who wrote the configuration profile then when executing a nf-core pipeline and can report back if there are things missing for example.
|
Please also make sure to add an extra `params` section with `params.config_profile_description`, `params.config_profile_contact` and `params.config_profile_url` set to reasonable values. Users will get information on who wrote the configuration profile then when executing a nf-core pipeline and can report back if there are things missing for example.
|
||||||
|
|
||||||
|
## Checking user hostnames
|
||||||
|
|
||||||
|
If your cluster has a set of consistent hostnames, nf-core pipelines can check that users are using your profile.
|
||||||
|
Add one or more hostname substrings to `params.hostnames` under a key that matches the profile name.
|
||||||
|
If the user's hostname contains this string at the start of a run or when a run fails and their profile
|
||||||
|
does not contain the profile name, a warning message will be printed.
|
||||||
|
|
||||||
### Testing
|
### Testing
|
||||||
|
|
||||||
|
@ -78,6 +83,7 @@ See [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs
|
||||||
Currently documentation is available for the following clusters:
|
Currently documentation is available for the following clusters:
|
||||||
|
|
||||||
* [BINAC](docs/binac.md)
|
* [BINAC](docs/binac.md)
|
||||||
|
* [BIGPURPLE](docs/bigpurple.md)
|
||||||
* [CCGA](docs/ccga.md)
|
* [CCGA](docs/ccga.md)
|
||||||
* [CFC](docs/binac.md)
|
* [CFC](docs/binac.md)
|
||||||
* [CRICK](docs/crick.md)
|
* [CRICK](docs/crick.md)
|
||||||
|
@ -86,6 +92,7 @@ Currently documentation is available for the following clusters:
|
||||||
* [MENDEL](docs/mendel.md)
|
* [MENDEL](docs/mendel.md)
|
||||||
* [MUNIN](docs/munin.md)
|
* [MUNIN](docs/munin.md)
|
||||||
* [PHOENIX](docs/phoenix.md)
|
* [PHOENIX](docs/phoenix.md)
|
||||||
|
* [PRINCE](docs/prince.md)
|
||||||
* [SHH](docs/shh.md)
|
* [SHH](docs/shh.md)
|
||||||
* [UCT_HEX](docs/uct_hex.md)
|
* [UCT_HEX](docs/uct_hex.md)
|
||||||
* [UPPMAX-DEVEL](docs/uppmax-devel.md)
|
* [UPPMAX-DEVEL](docs/uppmax-devel.md)
|
||||||
|
|
27
conf/bigpurple.config
Normal file
27
conf/bigpurple.config
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
singularityDir = "/gpfs/scratch/${USER}/singularity_images_nextflow"
|
||||||
|
|
||||||
|
params {
|
||||||
|
config_profile_description = """
|
||||||
|
NYU School of Medicine BigPurple cluster profile provided by nf-core/configs.
|
||||||
|
module load both singularity/3.1 and squashfs-tools/4.3 before running the pipeline with this profile!!
|
||||||
|
Run from your scratch or lab directory - Nextflow makes a lot of files!!
|
||||||
|
Also consider running the pipeline on a compute node (srun --pty /bin/bash -t=01:00:00) the first time, as it will be pulling the docker image, which will be converted into a singularity image, which is heavy on the login node and will take some time. Subsequent runs can be done on the login node, as the docker image will only be pulled and converted once. By default the images will be stored in $singularityDir
|
||||||
|
""".stripIndent()
|
||||||
|
config_profile_contact = 'Tobias Schraink (@tobsecret)'
|
||||||
|
config_profile_url = 'https://github.com/nf-core/configs/blob/master/docs/bigpurple.md'
|
||||||
|
}
|
||||||
|
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
autoMounts = true
|
||||||
|
cacheDir = singularityDir
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
beforeScript = """
|
||||||
|
module load singularity/3.1
|
||||||
|
module load squashfs-tools/4.3
|
||||||
|
"""
|
||||||
|
.stripIndent()
|
||||||
|
executor = 'slurm'
|
||||||
|
}
|
|
@ -1,23 +1,22 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
params {
|
params {
|
||||||
config_profile_name = 'BINAC'
|
|
||||||
config_profile_description = 'BINAC cluster profile provided by nf-core/configs.'
|
config_profile_description = 'BINAC cluster profile provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Alexander Peltzer (@apeltzer)'
|
config_profile_contact = 'Alexander Peltzer (@apeltzer)'
|
||||||
config_profile_url = 'https://www.bwhpc-c5.de/wiki/index.php/Category:BwForCluster_BinAC'
|
config_profile_url = 'https://www.bwhpc-c5.de/wiki/index.php/Category:BwForCluster_BinAC'
|
||||||
}
|
}
|
||||||
|
|
||||||
singularity {
|
singularity {
|
||||||
enabled = true
|
enabled = true
|
||||||
}
|
}
|
||||||
|
|
||||||
process {
|
process {
|
||||||
beforeScript = 'module load devel/singularity/3.0.1'
|
beforeScript = 'module load devel/singularity/3.0.3'
|
||||||
executor = 'pbs'
|
executor = 'pbs'
|
||||||
queue = 'short'
|
queue = 'short'
|
||||||
}
|
}
|
||||||
|
|
||||||
params {
|
params {
|
||||||
igenomesIgnore = false
|
igenomes_base = '/nfsmounts/igenomes'
|
||||||
igenomes_base = '/beegfs/work/igenomes'
|
|
||||||
max_memory = 128.GB
|
max_memory = 128.GB
|
||||||
max_cpus = 28
|
max_cpus = 28
|
||||||
max_time = 48.h
|
max_time = 48.h
|
||||||
|
|
|
@ -1,17 +1,21 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
params {
|
params {
|
||||||
config_profile_name = 'CCGA'
|
|
||||||
config_profile_description = 'CCGA cluster profile provided by nf-core/configs.'
|
config_profile_description = 'CCGA cluster profile provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Marc Hoeppner (@marchoeppner)'
|
config_profile_contact = 'Marc Hoeppner (@marchoeppner)'
|
||||||
config_profile_url = 'https://www.ikmb.uni-kiel.de/'
|
config_profile_url = 'https://www.ccga.uni-kiel.de/'
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* -------------------------------------------------
|
* -------------------------------------------------
|
||||||
* Nextflow config file with environment modules for RZCluster in Kiel
|
* Nextflow config file for CCGA cluster in Kiel
|
||||||
* -------------------------------------------------
|
* -------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
runOptions = "-B /ifs -B /scratch -B /work_beegfs"
|
||||||
|
}
|
||||||
|
|
||||||
executor {
|
executor {
|
||||||
queueSize=100
|
queueSize=100
|
||||||
}
|
}
|
||||||
|
@ -25,9 +29,12 @@ process {
|
||||||
clusterOptions = { "--qos=ikmb_a" }
|
clusterOptions = { "--qos=ikmb_a" }
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
params {
|
params {
|
||||||
// illumina iGenomes reference file paths on RZCluster
|
// illumina iGenomes reference file paths on RZCluster
|
||||||
igenomes_base = '/ifs/data/nfs_share/ikmb_repository/references/iGenomes/references/'
|
igenomes_base = '/ifs/data/nfs_share/ikmb_repository/references/iGenomes/references/'
|
||||||
saveReference = true
|
saveReference = true
|
||||||
|
max_memory = 128.GB
|
||||||
|
max_cpus = 16
|
||||||
|
max_time = 120.h
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
params {
|
params {
|
||||||
config_profile_name = 'CFC'
|
|
||||||
config_profile_description = 'QBiC Core Facility cluster profile provided by nf-core/configs.'
|
config_profile_description = 'QBiC Core Facility cluster profile provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Alexander Peltzer (@apeltzer)'
|
config_profile_contact = 'Alexander Peltzer (@apeltzer)'
|
||||||
config_profile_url = 'http://qbic.uni-tuebingen.de/'
|
config_profile_url = 'http://qbic.uni-tuebingen.de/'
|
||||||
|
@ -15,9 +14,13 @@ process {
|
||||||
executor = 'slurm'
|
executor = 'slurm'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
weblog{
|
||||||
|
enabled = true
|
||||||
|
url = 'http://services.qbic.uni-tuebingen.de:8080/workflows'
|
||||||
|
}
|
||||||
|
|
||||||
params {
|
params {
|
||||||
igenomesIgnore = false
|
igenomes_base = '/nfsmounts/igenomes'
|
||||||
igenomes_base = '/sfs/7/igenomes'
|
|
||||||
max_memory = 60.GB
|
max_memory = 60.GB
|
||||||
max_cpus = 20
|
max_cpus = 20
|
||||||
max_time = 140.h
|
max_time = 140.h
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
params {
|
params {
|
||||||
config_profile_name = 'CRICK'
|
|
||||||
config_profile_description = 'The Francis Crick Institute CAMP HPC cluster profile provided by nf-core/configs.'
|
config_profile_description = 'The Francis Crick Institute CAMP HPC cluster profile provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Harshil Patel (@drpatelh )'
|
config_profile_contact = 'Harshil Patel (@drpatelh)'
|
||||||
config_profile_url = 'https://www.crick.ac.uk/research/platforms-and-facilities/scientific-computing/technologies'
|
config_profile_url = 'https://www.crick.ac.uk/research/platforms-and-facilities/scientific-computing/technologies'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ params {
|
||||||
fasta = "${params.gencode_base}/mouse/vM21/GRCm38.p6.genome.ERCC92.fa"
|
fasta = "${params.gencode_base}/mouse/vM21/GRCm38.p6.genome.ERCC92.fa"
|
||||||
gtf = "${params.gencode_base}/mouse/vM21/gencode.vM21.annotation.ERCC92.gtf"
|
gtf = "${params.gencode_base}/mouse/vM21/gencode.vM21.annotation.ERCC92.gtf"
|
||||||
transcript_fasta = "${params.gencode_base}/mouse/vM21/gencode.vM21.transcripts.ERCC92.fa"
|
transcript_fasta = "${params.gencode_base}/mouse/vM21/gencode.vM21.transcripts.ERCC92.fa"
|
||||||
start = "${params.gencode_base}/mouse/vM21/STARIndex/"
|
star = "${params.gencode_base}/mouse/vM21/STARIndex/"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,9 @@
|
||||||
/*
|
//Profile config names for nf-core/configs
|
||||||
* -------------------------------------------------
|
params {
|
||||||
* Nextflow config file for GIS (Aquila)
|
config_profile_description = 'Genome Institute of Singapore (Aquila) cluster profile provided by nf-core/configs.'
|
||||||
* -------------------------------------------------
|
config_profile_contact = 'Andreas Wilm (@andreas-wilm)'
|
||||||
* Defines reference genomes, using iGenome paths
|
config_profile_url = 'https://www.a-star.edu.sg/gis/'
|
||||||
* Imported under the default 'standard' Nextflow
|
}
|
||||||
* profile in nextflow.config
|
|
||||||
*/
|
|
||||||
|
|
||||||
process {
|
process {
|
||||||
executor = 'sge'
|
executor = 'sge'
|
||||||
|
@ -20,4 +18,3 @@ params {
|
||||||
// illumina iGenomes reference file paths on GIS Aquila
|
// illumina iGenomes reference file paths on GIS Aquila
|
||||||
igenomes_base = '/mnt/projects/rpd/genomes.testing/S3_igenomes/'
|
igenomes_base = '/mnt/projects/rpd/genomes.testing/S3_igenomes/'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
params {
|
params {
|
||||||
config_profile_name = 'HEBBE'
|
|
||||||
config_profile_description = 'Gothenburg Hebbe cluster profile provided by nf-core/configs.'
|
config_profile_description = 'Gothenburg Hebbe cluster profile provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Phil Ewels (@ewels )'
|
config_profile_contact = 'Phil Ewels (@ewels)'
|
||||||
config_profile_url = 'http://www.c3se.chalmers.se/index.php/Hebbe'
|
config_profile_url = 'http://www.c3se.chalmers.se/index.php/Hebbe'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
params {
|
params {
|
||||||
config_profile_name = 'MENDEL'
|
|
||||||
config_profile_description = 'GMI MENDEL cluster profile provided by nf-core/configs'
|
config_profile_description = 'GMI MENDEL cluster profile provided by nf-core/configs'
|
||||||
config_profile_contact = 'Patrick Hüther (@phue)'
|
config_profile_contact = 'Patrick Hüther (@phue)'
|
||||||
config_profile_url = 'http://www.gmi.oeaw.ac.at/'
|
config_profile_url = 'http://www.gmi.oeaw.ac.at/'
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
params {
|
params {
|
||||||
config_profile_name = 'MUNIN'
|
|
||||||
config_profile_description = 'Big iron cluster profile provided by nf-core/configs.'
|
config_profile_description = 'Big iron cluster profile provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Szilveszter Juhos (@szilva)'
|
config_profile_contact = 'Szilveszter Juhos (@szilva)'
|
||||||
config_profile_url = ''
|
config_profile_url = ''
|
||||||
|
@ -29,5 +28,5 @@ params {
|
||||||
max_cpus = 16
|
max_cpus = 16
|
||||||
max_time = 72.h
|
max_time = 72.h
|
||||||
// illumina iGenomes reference file paths on UPPMAX
|
// illumina iGenomes reference file paths on UPPMAX
|
||||||
igenomes_base = '/data0/btb/references/igenomes/'
|
igenomes_base = '/data1/references/igenomes/'
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
params {
|
params {
|
||||||
config_profile_name = 'PHOENIX'
|
|
||||||
config_profile_description = 'University of Adelaide Phoenix HPC cluster profile provided by nf-core/configs'
|
config_profile_description = 'University of Adelaide Phoenix HPC cluster profile provided by nf-core/configs'
|
||||||
config_profile_contact = 'Yassine Souilmi / Alexander Peltzer (@yassineS, @apeltzer)'
|
config_profile_contact = 'Yassine Souilmi / Alexander Peltzer (@yassineS, @apeltzer)'
|
||||||
config_profile_url = 'https://www.adelaide.edu.au/phoenix/'
|
config_profile_url = 'https://www.adelaide.edu.au/phoenix/'
|
||||||
|
|
27
conf/prince.config
Normal file
27
conf/prince.config
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
singularityDir = "$SCRATCH/singularity_images_nextflow"
|
||||||
|
singularityModule = "singularity/3.2.1"
|
||||||
|
squashfsModule = "squashfs/4.3"
|
||||||
|
|
||||||
|
params {
|
||||||
|
config_profile_description = """
|
||||||
|
NYU prince cluster profile provided by nf-core/configs.
|
||||||
|
Run from your scratch directory, the output files may be large!
|
||||||
|
Please consider running the pipeline on a compute node the first time, as it will be pulling the docker image, which will be converted into a singularity image, which is heavy on the login node. Subsequent runs can be done on the login node, as the docker image will only be pulled and converted once. By default the images will be stored in $singularityDir
|
||||||
|
""".stripIndent()
|
||||||
|
config_profile_contact = 'Tobias Schraink (@tobsecret)'
|
||||||
|
config_profile_url = 'https://github.com/nf-core/configs/blob/master/docs/prince.md'
|
||||||
|
}
|
||||||
|
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
cacheDir = singularityDir
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
beforeScript = """
|
||||||
|
module load $singularityModule
|
||||||
|
module load $squashfsModule
|
||||||
|
"""
|
||||||
|
.stripIndent()
|
||||||
|
executor = 'slurm'
|
||||||
|
}
|
|
@ -1,6 +1,5 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
params {
|
params {
|
||||||
config_profile_name = 'SHH'
|
|
||||||
config_profile_description = 'MPI SHH cluster profile provided by nf-core/configs.'
|
config_profile_description = 'MPI SHH cluster profile provided by nf-core/configs.'
|
||||||
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||||
config_profile_url = 'https://shh.mpg.de'
|
config_profile_url = 'https://shh.mpg.de'
|
||||||
|
@ -8,7 +7,9 @@ params {
|
||||||
|
|
||||||
singularity {
|
singularity {
|
||||||
enabled = true
|
enabled = true
|
||||||
cacheDir = "/projects1/users/$USER/nextflow/nf_cache/singularity/"
|
autoMounts = true
|
||||||
|
runOptions = '-B /run/shm:/run/shm'
|
||||||
|
cacheDir = "/projects1/singularity_scratch/cache/"
|
||||||
}
|
}
|
||||||
|
|
||||||
process {
|
process {
|
||||||
|
@ -16,8 +17,14 @@ process {
|
||||||
queue = 'short'
|
queue = 'short'
|
||||||
}
|
}
|
||||||
|
|
||||||
params {
|
executor {
|
||||||
max_memory = 734.GB
|
queueSize = 16
|
||||||
max_cpus = 64
|
|
||||||
max_time = 2.h
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
params {
|
||||||
|
max_memory = 256.GB
|
||||||
|
max_cpus = 32
|
||||||
|
max_time = 2.h
|
||||||
|
//Illumina iGenomes reference file path
|
||||||
|
igenomes_base = "/projects1/public_data/igenomes/"
|
||||||
|
}
|
|
@ -1,6 +1,5 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
params {
|
params {
|
||||||
config_profile_name = 'uct_hex'
|
|
||||||
config_profile_description = 'University of Cape Town HEX cluster config file provided by nf-core/configs.'
|
config_profile_description = 'University of Cape Town HEX cluster config file provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Katie Lennard (@kviljoen)'
|
config_profile_contact = 'Katie Lennard (@kviljoen)'
|
||||||
config_profile_url = 'http://hpc.uct.ac.za/index.php/hex-3/'
|
config_profile_url = 'http://hpc.uct.ac.za/index.php/hex-3/'
|
||||||
|
@ -22,4 +21,3 @@ executor{
|
||||||
executor = 'pbs'
|
executor = 'pbs'
|
||||||
jobName = { "$task.tag" }
|
jobName = { "$task.tag" }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
// Profile config names for nf-core/configs
|
// Profile config names for nf-core/configs
|
||||||
params {
|
params {
|
||||||
config_profile_name = 'UPPMAX-devel'
|
|
||||||
config_profile_description = 'Testing & development profile for UPPMAX, provided by nf-core/configs.'
|
config_profile_description = 'Testing & development profile for UPPMAX, provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Phil Ewels (@ewels)'
|
config_profile_contact = 'Phil Ewels (@ewels)'
|
||||||
config_profile_url = 'https://www.uppmax.uu.se/'
|
config_profile_url = 'https://www.uppmax.uu.se/'
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
params {
|
params {
|
||||||
config_profile_name = 'UPPMAX'
|
|
||||||
config_profile_description = 'Swedish UPPMAX cluster profile provided by nf-core/configs.'
|
config_profile_description = 'Swedish UPPMAX cluster profile provided by nf-core/configs.'
|
||||||
config_profile_contact = 'Phil Ewels (@ewels)'
|
config_profile_contact = 'Phil Ewels (@ewels)'
|
||||||
config_profile_url = 'https://www.uppmax.uu.se/'
|
config_profile_url = 'https://www.uppmax.uu.se/'
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
//Profile config names for nf-core/configs
|
//Profile config names for nf-core/configs
|
||||||
params{
|
params{
|
||||||
config_profile_name = 'UZH'
|
|
||||||
config_profile_description = 'UZH science cloud profile provided by nf-core/configs'
|
config_profile_description = 'UZH science cloud profile provided by nf-core/configs'
|
||||||
config_profile_contact = 'Judith Neukamm/Alexander Peltzer (@JudithNeukamm, @apeltzer)'
|
config_profile_contact = 'Judith Neukamm/Alexander Peltzer (@JudithNeukamm, @apeltzer)'
|
||||||
config_profile_url = 'https://www.id.uzh.ch/en/scienceit/infrastructure/sciencecloud.html'
|
config_profile_url = 'https://www.id.uzh.ch/en/scienceit/infrastructure/sciencecloud.html'
|
||||||
|
|
24
docs/bigpurple.md
Normal file
24
docs/bigpurple.md
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
# nf-core/configs: BigPurple Configuration
|
||||||
|
## nf-core pipelines that use this repo
|
||||||
|
All nf-core pipelines that use this config repo (which is most), can be run on BigPurple. **Before** running a pipeline for the first time, go into an interactive slurm session on a compute node (`srun --pty --time=02:00:00 -c 2`), as the docker image for the pipeline will need to be pulled and converted. Once in the interactive session:
|
||||||
|
|
||||||
|
```
|
||||||
|
module load singularity/3.1
|
||||||
|
module load squashfs-tools/4.3
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, run the pipeline of your choice with `-profile bigpurple`. This will download and launch the bigpurple.config which has been pre-configured with a setup suitable for the BigPurple cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a singularity image before execution of the pipeline.
|
||||||
|
An example commandline:
|
||||||
|
|
||||||
|
`nextflow run nf-core/<pipeline name> -profile bigpurple <additional flags>`
|
||||||
|
|
||||||
|
## nf-core pipelines that do not use this repo
|
||||||
|
If the pipeline has not yet been configured to use this config, then you will have to do it manually.
|
||||||
|
git clone this repo, copy the `bigpurple.config` from the conf folder and then you can invoke the pipeline like this:
|
||||||
|
|
||||||
|
`nextflow run nf-core/<pipeline name> -c bigpurple.config <additional flags>`
|
||||||
|
|
||||||
|
|
||||||
|
>NB: You will need an account to use the HPC cluster BigPurple in order to run the pipeline. If in doubt contact MCIT.
|
||||||
|
|
||||||
|
>NB: You will need to install nextflow in your home directory - instructions are on nextflow.io (or ask the writer of this profile). The reason there is no module for nextflow on the cluster, is that the development cycle of nextflow is rapid and it's easy to update yourself: `nextflow self-update`
|
18
docs/ccga.md
Normal file
18
docs/ccga.md
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
# nf-core/configs: CCGA Configuration
|
||||||
|
|
||||||
|
Deployment and testing of nf-core pipelines at the CCGA cluster is on-going.
|
||||||
|
|
||||||
|
To use, run the pipeline with `-profile ccga`. This will download and launch the [`ccga.config`](../conf/ccga.config) which has been pre-configured with a setup suitable for the CCGA cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||||
|
|
||||||
|
Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on the cluster. You can do this by issuing the commands below:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
## Load Nextflow and Singularity environment modules
|
||||||
|
module purge
|
||||||
|
module load IKMB
|
||||||
|
module load Java/1.8.0
|
||||||
|
module load Nextflow
|
||||||
|
module load singularity3.1.0
|
||||||
|
```
|
||||||
|
|
||||||
|
>NB: Access to the CCGA cluster is restricted to IKMB/CCGA employes. Please talk to Marc Hoeppner to get access (@marchoeppner).
|
|
@ -22,6 +22,7 @@ Now you can run pipelines with abandon!
|
||||||
|
|
||||||
### 2. Make a GitHub repo for your workflows (optional :)
|
### 2. Make a GitHub repo for your workflows (optional :)
|
||||||
|
|
||||||
|
|
||||||
To make sharing your pipelines and commands easy between your teammates, it's best to share code in a GitHub repository. One way is to store the commands in a Makefile ([example](https://github.com/czbiohub/kh-workflows/blob/master/nf-kmer-similarity/Makefile)) which can contain multiple `nextflow run` commands so that you don't need to remember the S3 bucket or output directory for every single one. [Makefiles](https://kbroman.org/minimal_make/) are broadly used in the software community for running many complex commands. Makefiles can have a lot of dependencies and be confusing, so we're only going to write *simple* Makefiles.
|
To make sharing your pipelines and commands easy between your teammates, it's best to share code in a GitHub repository. One way is to store the commands in a Makefile ([example](https://github.com/czbiohub/kh-workflows/blob/master/nf-kmer-similarity/Makefile)) which can contain multiple `nextflow run` commands so that you don't need to remember the S3 bucket or output directory for every single one. [Makefiles](https://kbroman.org/minimal_make/) are broadly used in the software community for running many complex commands. Makefiles can have a lot of dependencies and be confusing, so we're only going to write *simple* Makefiles.
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -82,6 +83,7 @@ git push origin master
|
||||||
|
|
||||||
### 3. Run your workflow!!
|
### 3. Run your workflow!!
|
||||||
|
|
||||||
|
|
||||||
Remember to specify `-profile czbiohub_aws` to grab the CZ Biohub-specific AWS configurations, and an `--outdir` with an AWS S3 bucket so you don't run out of space on your small AMI
|
Remember to specify `-profile czbiohub_aws` to grab the CZ Biohub-specific AWS configurations, and an `--outdir` with an AWS S3 bucket so you don't run out of space on your small AMI
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -115,6 +117,7 @@ It's important that this command be re-run from the same directory as there is a
|
||||||
## iGenomes specific configuration
|
## iGenomes specific configuration
|
||||||
|
|
||||||
A local copy of the iGenomes resource has been made available on `s3://czbiohub-reference/igenomes` (in `us-west-2` region) so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline.
|
A local copy of the iGenomes resource has been made available on `s3://czbiohub-reference/igenomes` (in `us-west-2` region) so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline.
|
||||||
|
|
||||||
You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
||||||
|
|
||||||
For Human and Mouse, we use [GENCODE](https://www.gencodegenes.org/) gene annotations. This doesn't change how you would specify the genome name, only that the pipelines run with the `czbiohub_aws` profile would be with GENCODE rather than iGenomes.
|
For Human and Mouse, we use [GENCODE](https://www.gencodegenes.org/) gene annotations. This doesn't change how you would specify the genome name, only that the pipelines run with the `czbiohub_aws` profile would be with GENCODE rather than iGenomes.
|
||||||
|
|
BIN
docs/images/nfcore-configs_logo.png
Executable file
BIN
docs/images/nfcore-configs_logo.png
Executable file
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
20
docs/prince.md
Normal file
20
docs/prince.md
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
# nf-core/configs: Prince Configuration
|
||||||
|
## nf-core pipelines that use this repo
|
||||||
|
All nf-core pipelines that use this config repo (which is most), can be run on prince. **Before** running a pipeline for the first time, go into an interactive slurm session on a compute node (`srun --pty --time=02:00:00 -c 2`), as the docker image for the pipeline will need to be pulled and converted.
|
||||||
|
|
||||||
|
Now, run the pipeline of your choice with `-profile prince`. This will download and launch the prince.config which has been pre-configured with a setup suitable for the prince cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a singularity image before execution of the pipeline. This step **takes time**!!
|
||||||
|
An example commandline:
|
||||||
|
|
||||||
|
`nextflow run nf-core/<pipeline name> -profile prince <additional flags>`
|
||||||
|
|
||||||
|
## nf-core pipelines that do not use this repo
|
||||||
|
If the pipeline has not yet been configured to use this config, then you will have to do it manually.
|
||||||
|
git clone this repo, copy the `prince.config` from the conf folder and then you can invoke the pipeline like this:
|
||||||
|
|
||||||
|
`nextflow run nf-core/<pipeline name> -c prince.config <additional flags>`
|
||||||
|
|
||||||
|
|
||||||
|
>NB: You will need an account to use the HPC cluster Prince in order to run the pipeline. If in doubt contact the HPC admins.
|
||||||
|
|
||||||
|
>NB: Rather than using the nextflow module, I recommend you install nextflow in your home directory - instructions are on nextflow.io (or ask the writer of this profile). The reason this is better than using the module for nextflow on the cluster, is that the development cycle of nextflow is rapid and it's easy to update your installation yourself: `nextflow self-update`
|
||||||
|
|
20
docs/shh.md
20
docs/shh.md
|
@ -1,21 +1,17 @@
|
||||||
# nf-core/configs: SHH Configuration
|
# nf-core/configs: SHH Configuration
|
||||||
|
|
||||||
All nf-core pipelines have been successfully configured for use on the Department of Archaeogenetic's SDAG cluster at the [Max Planck Institute for the Science of Human History (MPI-SHH)](http://shh.mpg.de).
|
All nf-core pipelines have been successfully configured for use on the Department of Archaeogenetic's SDAG/CDAG clusters at the [Max Planck Institute for the Science of Human History (MPI-SHH)](http://shh.mpg.de).
|
||||||
|
|
||||||
To use, run the pipeline with `-profile shh`. This will download and launch the [`shh.config`](../conf/shh.config) which has been pre-configured with a setup suitable for the SDAG cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
To use, run the pipeline with `-profile shh`. This will download and launch the [`shh.config`](../conf/shh.config) which has been pre-configured with a setup suitable for the SDAG and CDAG clusters. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. The image will currently be centrally stored here:
|
||||||
|
|
||||||
Note that the configuration file is currently optimised for `nf-core/eager`. It
|
|
||||||
will submit to the medium queue but with a walltime of 48 hours.
|
|
||||||
|
|
||||||
## Preparation
|
|
||||||
Before running the pipeline you will need to create a the following folder in your `/projects1/users/` directory. This will be used to store the singularity software images, which will take up too much space for your home directory.
|
|
||||||
|
|
||||||
This should be named as follows, replacing `<your_user>` with your username:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
"/projects1/users/<your_user>/nextflow/nf_cache/singularity/"
|
/projects1/singularity_scratch/cache/
|
||||||
```
|
```
|
||||||
|
|
||||||
|
however this will likely change to a read-only directory in the future that will be managed by IT.
|
||||||
|
|
||||||
|
Note that **the configuration file is currently optimised for `nf-core/eager`**. It will submit to the short queue but with a walltime of 2 hours.
|
||||||
|
|
||||||
>NB: You will need an account and VPN access to use the cluster at MPI-SHH in order to run the pipeline. If in doubt contact IT.
|
>NB: You will need an account and VPN access to use the cluster at MPI-SHH in order to run the pipeline. If in doubt contact IT.
|
||||||
|
|
||||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the lhead nodes. If in doubt contact IT.
|
>NB: Nextflow will need to submit the jobs via SLURM to the clusters and as such the commands above will have to be executed on one of the head nodes. If in doubt contact IT.
|
||||||
|
|
|
@ -28,9 +28,20 @@ profiles {
|
||||||
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
|
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
|
||||||
shh { includeConfig "${params.custom_config_base}/conf/shh.config" }
|
shh { includeConfig "${params.custom_config_base}/conf/shh.config" }
|
||||||
uct_hex { includeConfig "${params.custom_config_base}/conf/uct_hex.config" }
|
uct_hex { includeConfig "${params.custom_config_base}/conf/uct_hex.config" }
|
||||||
uppmax_devel { includeConfig "${params.custom_config_base}/conf/uppmax.config"
|
uppmax_devel { includeConfig "${params.custom_config_base}/conf/uppmax.config"; includeConfig "${params.custom_config_base}/conf/uppmax-devel.config" }
|
||||||
includeConfig "${params.custom_config_base}/conf/uppmax-devel.config"
|
|
||||||
}
|
|
||||||
uppmax { includeConfig "${params.custom_config_base}/conf/uppmax.config" }
|
uppmax { includeConfig "${params.custom_config_base}/conf/uppmax.config" }
|
||||||
uzh { includeConfig "${params.custom_config_base}/conf/uzh.config" }
|
uzh { includeConfig "${params.custom_config_base}/conf/uzh.config" }
|
||||||
|
prince { includeConfig "${params.custom_config_base}/conf/prince.config" }
|
||||||
|
bigpurple { includeConfig "${params.custom_config_base}/conf/bigpurple.config" }
|
||||||
|
}
|
||||||
|
|
||||||
|
// If user hostnames contain one of these substring and they are
|
||||||
|
// not running the associated profile, it will trigger a warning message
|
||||||
|
// Should be defined here for all profiles (not within profile config)
|
||||||
|
params {
|
||||||
|
// This is a groovy map, not a nextflow parameter set
|
||||||
|
hostnames = [
|
||||||
|
crick: ['.thecrick.org'],
|
||||||
|
uppmax: ['.uppmax.uu.se']
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue