mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-12-24 02:58:17 +00:00
update with upstream
This commit is contained in:
commit
0909708b97
58 changed files with 1137 additions and 160 deletions
5
.github/workflows/main.yml
vendored
5
.github/workflows/main.yml
vendored
|
@ -16,7 +16,7 @@ jobs:
|
|||
needs: test_all_profiles
|
||||
strategy:
|
||||
matrix:
|
||||
profile: ['awsbatch', 'bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'kraken', 'munin', 'pasteur', 'phoenix', 'prince', 'shh', 'uct_hex', 'uppmax', 'utd_ganymede', 'uzh']
|
||||
profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'imperial', 'imperial_mb', 'kraken', 'mpcdf', 'munin', 'oist', 'pasteur', 'phoenix', 'prince', 'seg_globe', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh']
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Install Nextflow
|
||||
|
@ -24,6 +24,7 @@ jobs:
|
|||
wget -qO- get.nextflow.io | bash
|
||||
sudo mv nextflow /usr/local/bin/
|
||||
- name: Check ${{ matrix.profile }} profile
|
||||
env:
|
||||
env:
|
||||
SCRATCH: '~'
|
||||
NXF_GLOBAL_CONFIG: awsbatch.config
|
||||
run: nextflow run ${GITHUB_WORKSPACE}/configtest.nf --custom_config_base=${GITHUB_WORKSPACE} -profile ${{ matrix.profile }}
|
||||
|
|
22
README.md
22
README.md
|
@ -15,6 +15,7 @@ A repository for hosting Nextflow configuration files containing custom paramete
|
|||
* [Documentation](#documentation)
|
||||
* [Uploading to `nf-core/configs`](#uploading-to-nf-coreconfigs)
|
||||
* [Adding a new pipeline-specific config](#adding-a-new-pipeline-specific-config)
|
||||
* [Pipeline-specific institutional documentation](#pipeline-specific-institutional-documentation)
|
||||
* [Pipeline-specific documentation](#pipeline-specific-documentation)
|
||||
* [Enabling pipeline-specific configs within a pipeline](#enabling-pipeline-specific-configs-within-a-pipeline)
|
||||
* [Create the pipeline-specific `nf-core/configs` files](#create-the-pipeline-specific-nf-coreconfigs-files)
|
||||
|
@ -93,8 +94,10 @@ See [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs
|
|||
|
||||
Currently documentation is available for the following systems:
|
||||
|
||||
* [ABIMS](docs/abims.md)
|
||||
* [AWSBATCH](docs/awsbatch.md)
|
||||
* [BIGPURPLE](docs/bigpurple.md)
|
||||
* [BI](docs/bi.md)
|
||||
* [BINAC](docs/binac.md)
|
||||
* [CBE](docs/cbe.md)
|
||||
* [CCGA_DX](docs/ccga_dx.md)
|
||||
|
@ -102,7 +105,6 @@ Currently documentation is available for the following systems:
|
|||
* [CFC](docs/cfc.md)
|
||||
* [CRICK](docs/crick.md)
|
||||
* [CZBIOHUB_AWS](docs/czbiohub.md)
|
||||
* [CZBIOHUB_AWS_HIGHPRIORITY](docs/czbiohub.md)
|
||||
* [DENBI_QBIC](docs/denbi_qbic.md)
|
||||
* [EBC](docs/ebc.md)
|
||||
* [GENOTOUL](docs/genotoul.md)
|
||||
|
@ -110,13 +112,17 @@ Currently documentation is available for the following systems:
|
|||
* [GIS](docs/gis.md)
|
||||
* [GOOGLE](docs/google.md)
|
||||
* [HEBBE](docs/hebbe.md)
|
||||
* [ICR_DAVROS](docs/icr_davros.md)
|
||||
* [KRAKEN](docs/kraken.md)
|
||||
* [MPCDF](docs/mpcdf.md)
|
||||
* [MUNIN](docs/munin.md)
|
||||
* [OIST](docs/oist.md)
|
||||
* [PASTEUR](docs/pasteur.md)
|
||||
* [PHOENIX](docs/phoenix.md)
|
||||
* [PRINCE](docs/prince.md)
|
||||
* [SEG_GLOBE](docs/seg_globe.md)
|
||||
* [SHH](docs/shh.md)
|
||||
* [UCT_HEX](docs/uct_hex.md)
|
||||
* [UCT_HPC](docs/uct_hpc.md)
|
||||
* [UPPMAX](docs/uppmax.md)
|
||||
* [UTD_GANYMEDE](docs/utd_ganymede.md)
|
||||
* [UZH](docs/uzh.md)
|
||||
|
@ -159,12 +165,13 @@ Each configuration file will add new params and overwrite the params already exi
|
|||
|
||||
Note that pipeline-specific configs are not required and should only be added if needed.
|
||||
|
||||
### Pipeline-specific documentation
|
||||
### Pipeline-specific institutional documentation
|
||||
|
||||
Currently documentation is available for the following pipeline within the specific profile:
|
||||
Currently documentation is available for the following pipelines within specific profiles:
|
||||
|
||||
* ampliseq
|
||||
* [BINAC](docs/pipeline/ampliseq/binac.md)
|
||||
* [UPPMAX](docs/pipeline/ampliseq/uppmax.md)
|
||||
* eager
|
||||
* [SHH](docs/pipeline/eager/shh.md)
|
||||
* rnafusion
|
||||
|
@ -173,6 +180,13 @@ Currently documentation is available for the following pipeline within the speci
|
|||
* [MUNIN](docs/pipeline/sarek/munin.md)
|
||||
* [UPPMAX](docs/pipeline/sarek/uppmax.md)
|
||||
|
||||
### Pipeline-specific documentation
|
||||
|
||||
Currently documentation is available for the following pipeline:
|
||||
|
||||
* viralrecon
|
||||
* [genomes](docs/pipeline/viralrecon/genomes.md)
|
||||
|
||||
### Enabling pipeline-specific configs within a pipeline
|
||||
|
||||
:warning: **This has to be done on a fork of the `nf-core/<PIPELINE>` repository.**
|
||||
|
|
|
@ -45,11 +45,11 @@ def check_config(Config, Github):
|
|||
if re.search(regex, line):
|
||||
hit = line.split('/')[2].split('.')[0]
|
||||
config_profiles.add(hit.strip())
|
||||
|
||||
###Check Github Config now
|
||||
|
||||
### Check Github Config now
|
||||
tests = set()
|
||||
###Ignore these profiles
|
||||
ignore_me = ['czbiohub_aws_highpriority', 'czbiohub_aws']
|
||||
### Ignore these profiles
|
||||
ignore_me = ['czbiohub_aws']
|
||||
tests.update(ignore_me)
|
||||
with open(Github, 'r') as ghfile:
|
||||
for line in ghfile:
|
||||
|
@ -58,11 +58,11 @@ def check_config(Config, Github):
|
|||
profiles = line.split(':')[1].split(',')
|
||||
for p in profiles:
|
||||
tests.add(p.strip())
|
||||
|
||||
|
||||
###Check if sets are equal
|
||||
if tests == config_profiles:
|
||||
sys.exit(0)
|
||||
else:
|
||||
else:
|
||||
#Maybe report what is missing here too
|
||||
print("Tests don't seem to test these profiles properly. Please check whether you added the profile to the Github Actions testing YAML.\n")
|
||||
print(config_profiles.symmetric_difference(tests))
|
||||
|
|
24
conf/abims.config
Normal file
24
conf/abims.config
Normal file
|
@ -0,0 +1,24 @@
|
|||
//Profile config names for nf-core/configs
|
||||
params {
|
||||
config_profile_description = 'The ABiMS cluster profile'
|
||||
config_profile_contact = 'Gildas Le Corguillé (@lecorguille)'
|
||||
config_profile_url = 'https://abims.sb-roscoff.fr'
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = false
|
||||
runOptions = '-B /scratch:/scratch -B /scratch2:/scratch2 -B /shared:/shared'
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
}
|
||||
|
||||
params {
|
||||
igenomes_ignore = true
|
||||
igenomesIgnore = true //deprecated
|
||||
max_memory = 750.GB
|
||||
max_cpus = 200
|
||||
max_time = 24.h
|
||||
}
|
14
conf/bi.config
Normal file
14
conf/bi.config
Normal file
|
@ -0,0 +1,14 @@
|
|||
params{
|
||||
config_profile_description = 'Boehringer Ingelheim internal profile provided by nf-core/configs.'
|
||||
config_profile_contact = 'Alexander Peltzer (@apeltzer)'
|
||||
config_profile_url = 'https://www.boehringer-ingelheim.com/'
|
||||
}
|
||||
|
||||
params.globalConfig = System.getenv('NXF_GLOBAL_CONFIG')
|
||||
if(params.globalConfig == null)
|
||||
{
|
||||
def errorMessage = "WARNING: For bi.config requires NXF_GLOBAL_CONFIG env var to be set. Point it to global.config file if you want to use this profile."
|
||||
System.err.println(errorMessage)
|
||||
}else{
|
||||
includeConfig params.globalConfig
|
||||
}
|
|
@ -7,9 +7,9 @@ params {
|
|||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
module = 'singularity/3.4.1'
|
||||
queue = { task.memory <= 170.GB ? 'c' : 'm' }
|
||||
clusterOptions = { task.time <= 8.h ? '--qos short': task.time <= 48.h ? '--qos medium' : '--qos long' }
|
||||
module = 'anaconda3/2019.10'
|
||||
}
|
||||
|
||||
singularity {
|
||||
|
|
|
@ -14,6 +14,7 @@ process {
|
|||
beforeScript = 'module load devel/singularity/3.4.2'
|
||||
executor = 'slurm'
|
||||
queue = { task.memory > 60.GB || task.cpus > 20 ? 'qbic' : 'compute' }
|
||||
scratch = 'true'
|
||||
}
|
||||
|
||||
weblog{
|
||||
|
|
|
@ -13,6 +13,7 @@ process {
|
|||
beforeScript = 'module load devel/singularity/3.4.2'
|
||||
executor = 'slurm'
|
||||
queue = { task.memory > 60.GB || task.cpus > 20 ? 'qbic' : 'compute' }
|
||||
scratch = 'true'
|
||||
}
|
||||
|
||||
weblog{
|
||||
|
|
|
@ -11,7 +11,6 @@ singularity {
|
|||
}
|
||||
|
||||
process {
|
||||
beforeScript = 'module load Singularity/2.6.0-foss-2016b'
|
||||
executor = 'slurm'
|
||||
}
|
||||
|
||||
|
|
|
@ -135,3 +135,12 @@ params {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
profiles {
|
||||
highpriority {
|
||||
process {
|
||||
queue = 'highpriority-971039e0-830c-11e9-9e0b-02c5b84a8036'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
/*
|
||||
* -------------------------------------------------
|
||||
* Nextflow config file for Chan Zuckerberg Biohub
|
||||
* -------------------------------------------------
|
||||
* Defines reference genomes, using iGenome paths
|
||||
* Imported under the default 'standard' Nextflow
|
||||
* profile in nextflow.config
|
||||
*/
|
||||
|
||||
process {
|
||||
queue = 'highpriority-971039e0-830c-11e9-9e0b-02c5b84a8036'
|
||||
}
|
|
@ -8,7 +8,7 @@
|
|||
cleanup = true
|
||||
|
||||
conda {
|
||||
cacheDir = '/ebc_data/nf-core/conda'
|
||||
cacheDir = '/gpfs/space/GI/ebc_data/software/nf-core/conda'
|
||||
}
|
||||
process {
|
||||
executor = 'slurm'
|
||||
|
@ -16,7 +16,7 @@
|
|||
beforeScript = 'module load nextflow'
|
||||
}
|
||||
executor {
|
||||
queueSize = 16
|
||||
queueSize = 64
|
||||
}
|
||||
params {
|
||||
max_memory = 12.GB
|
||||
|
|
39
conf/icr_davros.config
Normal file
39
conf/icr_davros.config
Normal file
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* -------------------------------------------------
|
||||
* Nextflow nf-core config file for ICR davros HPC
|
||||
* -------------------------------------------------
|
||||
* Defines LSF process executor and singularity
|
||||
* settings.
|
||||
*
|
||||
*/
|
||||
params {
|
||||
config_profile_description = "Nextflow nf-core profile for ICR davros HPC"
|
||||
config_profile_contact = "Adrian Larkeryd (@adrlar)"
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
runOptions = "--bind /mnt:/mnt --bind /data:/data"
|
||||
// autoMounts = true // autoMounts sometimes causes a rare bug with the installed version of singularity
|
||||
}
|
||||
|
||||
executor {
|
||||
// This is set because of an issue with too many
|
||||
// singularity containers launching at once, they
|
||||
// cause an singularity error with exit code 255.
|
||||
submitRateLimit = "2 sec"
|
||||
}
|
||||
|
||||
process {
|
||||
executor = "LSF"
|
||||
}
|
||||
|
||||
params {
|
||||
// LSF cluster set up with memory tied to cores,
|
||||
// it can't be requested. Locked at 12G per core.
|
||||
cpus = 10
|
||||
max_cpus = 20
|
||||
max_memory = 12.GB
|
||||
max_time = 168.h
|
||||
igenomes_base = "/mnt/scratch/readonly/igenomes"
|
||||
}
|
37
conf/imperial.config
Normal file
37
conf/imperial.config
Normal file
|
@ -0,0 +1,37 @@
|
|||
//Profile config names for nf-core/configs
|
||||
|
||||
params {
|
||||
// Config Params
|
||||
config_profile_description = 'Imperial College London - HPC Profile -- provided by nf-core/configs.'
|
||||
config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)'
|
||||
config_profile_url = 'https://www.imperial.ac.uk/admin-services/ict/self-service/research-support/rcs/'
|
||||
|
||||
// Resources
|
||||
max_memory = 256.GB
|
||||
max_cpus = 32
|
||||
max_time = 72.h
|
||||
}
|
||||
|
||||
executor {
|
||||
$pbspro {
|
||||
queueSize = 50
|
||||
}
|
||||
|
||||
$local {
|
||||
cpus = 2
|
||||
queueSize = 1
|
||||
memory = '32 GB'
|
||||
}
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
runOptions = "-B /rds/,/rdsgpfs/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
||||
}
|
||||
|
||||
process {
|
||||
|
||||
executor = 'pbspro'
|
||||
|
||||
}
|
44
conf/imperial_mb.config
Normal file
44
conf/imperial_mb.config
Normal file
|
@ -0,0 +1,44 @@
|
|||
//Profile config names for nf-core/configs
|
||||
|
||||
params {
|
||||
// Config Params
|
||||
config_profile_description = 'Imperial College London - MEDBIO QUEUE - HPC Profile -- provided by nf-core/configs.'
|
||||
config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)'
|
||||
config_profile_url = 'https://www.imperial.ac.uk/bioinformatics-data-science-group/resources/uk-med-bio/'
|
||||
|
||||
// Resources
|
||||
max_memory = 640.GB
|
||||
max_cpus = 32
|
||||
max_time = 168.h
|
||||
}
|
||||
|
||||
executor {
|
||||
$pbspro {
|
||||
queueSize = 50
|
||||
}
|
||||
|
||||
$local {
|
||||
cpus = 2
|
||||
queueSize = 1
|
||||
memory = '32 GB'
|
||||
}
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
runOptions = "-B /rds/,/rdsgpfs/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
||||
}
|
||||
|
||||
process {
|
||||
|
||||
executor = 'pbspro'
|
||||
queue = 'pqmedbio-tput'
|
||||
|
||||
//queue = 'med-bio' //!! this is an alias and shouldn't be used
|
||||
|
||||
withLabel:process_large {
|
||||
queue = 'pqmedbio-large'
|
||||
}
|
||||
|
||||
}
|
51
conf/mpcdf.config
Normal file
51
conf/mpcdf.config
Normal file
|
@ -0,0 +1,51 @@
|
|||
params {
|
||||
config_profile_description = 'MPCDF HPC profiles (unoffically) provided by nf-core/configs.'
|
||||
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||
config_profile_url = 'https://www.mpcdf.mpg.de/services/computing'
|
||||
}
|
||||
|
||||
|
||||
profiles {
|
||||
cobra {
|
||||
// Does not have singularity! Conda module must be used, but it is
|
||||
// recommended to set NXF_CONDA_CACHEDIR var in ~/.bash{_profile,rc}
|
||||
// To create common cache dir
|
||||
|
||||
process {
|
||||
beforeScript = 'module load anaconda/3/2020.02'
|
||||
executor = 'slurm'
|
||||
}
|
||||
|
||||
executor {
|
||||
queueSize = 8
|
||||
}
|
||||
|
||||
params {
|
||||
config_profile_description = 'MPCDF cobra profile (unofficially) provided by nf-core/configs.'
|
||||
max_memory = 725.GB
|
||||
max_cpus = 80
|
||||
max_time = 24.h
|
||||
}
|
||||
}
|
||||
raven {
|
||||
// Does not have singularity! Conda module must be used, but it is
|
||||
// recommended to set NXF_CONDA_CACHEDIR var in ~/.bash{_profile,rc}
|
||||
// to create common cache dir
|
||||
|
||||
process {
|
||||
beforeScript = 'module load anaconda/3/2020.02'
|
||||
executor = 'slurm'
|
||||
}
|
||||
|
||||
executor {
|
||||
queueSize = 8
|
||||
}
|
||||
|
||||
params {
|
||||
config_profile_description = 'MPCDF raven profile (unofficially) provided by nf-core/configs.'
|
||||
max_memory = 368.GB
|
||||
max_cpus = 192
|
||||
max_time = 24.h
|
||||
}
|
||||
}
|
||||
}
|
22
conf/oist.config
Normal file
22
conf/oist.config
Normal file
|
@ -0,0 +1,22 @@
|
|||
//Profile config names for nf-core/configs
|
||||
params {
|
||||
config_profile_description = 'The Okinawa Institute of Science and Technology Graduate University (OIST) HPC cluster profile provided by nf-core/configs.'
|
||||
config_profile_contact = 'OISTs Bioinformatics User Group <BioinfoUgrp@oist.jp>'
|
||||
config_profile_url = 'https://github.com/nf-core/configs/blob/master/docs/oist.md'
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = 'compute'
|
||||
clusterOptions = '-C zen2'
|
||||
}
|
||||
|
||||
params {
|
||||
max_memory = 500.GB
|
||||
max_cpus = 128
|
||||
max_time = 90.h
|
||||
}
|
20
conf/pipeline/ampliseq/uppmax.config
Normal file
20
conf/pipeline/ampliseq/uppmax.config
Normal file
|
@ -0,0 +1,20 @@
|
|||
// Profile config names for nf-core/configs
|
||||
params {
|
||||
// Specific nf-core/configs params
|
||||
config_profile_contact = 'Daniel Lundin (daniel.lundin@lnu.se)'
|
||||
config_profile_description = 'nf-core/ampliseq UPPMAX profile provided by nf-core/configs'
|
||||
}
|
||||
|
||||
process {
|
||||
withName: classifier_extract_seq {
|
||||
clusterOptions = { "-A $params.project -p core -n 1 -t 7-00:00:00 ${params.clusterOptions ?: ''}" }
|
||||
}
|
||||
|
||||
withName: classifier_train {
|
||||
clusterOptions = { "-A $params.project -C fat -p node -N 1 -t 24:00:00 ${params.clusterOptions ?: ''}" }
|
||||
}
|
||||
|
||||
withName: classifier {
|
||||
clusterOptions = { "-A $params.project -C fat -p node -N 1 ${params.clusterOptions ?: ''}" }
|
||||
}
|
||||
}
|
64
conf/pipeline/eager/mpcdf.config
Normal file
64
conf/pipeline/eager/mpcdf.config
Normal file
|
@ -0,0 +1,64 @@
|
|||
// Profile config names for nf-core/configs
|
||||
|
||||
params {
|
||||
// Specific nf-core/configs params
|
||||
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||
config_profile_description = 'nf-core/eager MPCDF profile provided by nf-core/configs'
|
||||
}
|
||||
|
||||
profile {
|
||||
cobra {
|
||||
// Specific nf-core/eager process configuration
|
||||
process {
|
||||
|
||||
withName: malt {
|
||||
maxRetries = 1
|
||||
memory = 725.GB
|
||||
cpus = 40
|
||||
time = 24.h
|
||||
}
|
||||
|
||||
withLabel:'sc_tiny'{
|
||||
cpus = { check_max( 1, 'cpus' ) }
|
||||
memory = { check_max( 1.GB * task.attempt, 'memory' ) }
|
||||
time = 24.h
|
||||
}
|
||||
|
||||
withLabel:'sc_small'{
|
||||
cpus = { check_max( 1, 'cpus' ) }
|
||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||
time = 24.h
|
||||
}
|
||||
|
||||
withLabel:'sc_medium'{
|
||||
cpus = { check_max( 1, 'cpus' ) }
|
||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||
time = 24.h
|
||||
}
|
||||
|
||||
withLabel:'mc_small'{
|
||||
cpus = { check_max( 2 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||
time = 24.h
|
||||
}
|
||||
|
||||
withLabel:'mc_medium' {
|
||||
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||
time = 24.h
|
||||
}
|
||||
|
||||
withLabel:'mc_large'{
|
||||
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||
time = 24.h
|
||||
}
|
||||
|
||||
withLabel:'mc_huge'{
|
||||
cpus = { check_max( 32 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 256.GB * task.attempt, 'memory' ) }
|
||||
time = 24.h
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -9,11 +9,206 @@ params {
|
|||
|
||||
// Specific nf-core/eager process configuration
|
||||
process {
|
||||
|
||||
maxRetries = 2
|
||||
|
||||
withName: malt {
|
||||
maxRetries = 1
|
||||
memory = { task.attempt > 1 ? 1900.GB : 725.GB }
|
||||
memory = { task.attempt > 1 ? 1900.GB : 725.GB }
|
||||
cpus = { task.attempt > 1 ? 112 : 64 }
|
||||
time = 1440.h
|
||||
queue = { task.memory > 756.GB ? 'supercruncher' : 'long' }
|
||||
}
|
||||
|
||||
withLabel:'sc_tiny'{
|
||||
cpus = { check_max( 1, 'cpus' ) }
|
||||
memory = { check_max( 1.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
withLabel:'sc_small'{
|
||||
cpus = { check_max( 1, 'cpus' ) }
|
||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
withLabel:'sc_medium'{
|
||||
cpus = { check_max( 1, 'cpus' ) }
|
||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
withLabel:'mc_small'{
|
||||
cpus = { check_max( 2, 'cpus' ) }
|
||||
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
withLabel:'mc_medium' {
|
||||
cpus = { check_max( 4, 'cpus' ) }
|
||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
withLabel:'mc_large'{
|
||||
cpus = { check_max( 8, 'cpus' ) }
|
||||
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
withLabel:'mc_huge'{
|
||||
cpus = { check_max( 32, 'cpus' ) }
|
||||
memory = { check_max( 256.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
profiles {
|
||||
|
||||
big_data {
|
||||
|
||||
params {
|
||||
// Specific nf-core/configs params
|
||||
config_profile_contact = 'James Fellows Yates (@jfy133)'
|
||||
config_profile_description = 'nf-core/eager big-data SHH profile provided by nf-core/configs'
|
||||
}
|
||||
|
||||
executor {
|
||||
queueSize = 6
|
||||
}
|
||||
|
||||
process {
|
||||
|
||||
maxRetries = 2
|
||||
|
||||
withName:malt {
|
||||
maxRetries = 1
|
||||
memory = { task.attempt > 1 ? 1900.GB : 725.GB }
|
||||
cpus = { task.attempt > 1 ? 112 : 64 }
|
||||
time = 1440.h
|
||||
queue = { task.memory > 756.GB ? 'supercruncher' : 'long' }
|
||||
}
|
||||
|
||||
withName:hostremoval_input_fastq {
|
||||
cpus = { check_max( 1, 'cpus' ) }
|
||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||
time = 1440.h
|
||||
queue = 'long'
|
||||
}
|
||||
|
||||
withLabel:'sc_tiny'{
|
||||
cpus = { check_max( 1, 'cpus' ) }
|
||||
memory = { check_max( 2.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
withLabel:'sc_small'{
|
||||
cpus = { check_max( 1, 'cpus' ) }
|
||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
withLabel:'sc_medium'{
|
||||
cpus = { check_max( 1, 'cpus' ) }
|
||||
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
withLabel:'mc_small'{
|
||||
cpus = { check_max( 2, 'cpus' ) }
|
||||
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
withLabel:'mc_medium' {
|
||||
cpus = { check_max( 4, 'cpus' ) }
|
||||
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
withLabel:'mc_large'{
|
||||
cpus = { check_max( 8, 'cpus' ) }
|
||||
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
|
||||
withLabel:'mc_huge'{
|
||||
cpus = { check_max( 32, 'cpus' ) }
|
||||
memory = { check_max( 512.GB * task.attempt, 'memory' ) }
|
||||
time = { task.attempt == 3 ? 1440.h : task.attempt == 2 ? 48.h : 2.h }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
microbiome_screening {
|
||||
process {
|
||||
// Assuming NCBI NT-sized DB
|
||||
withName: malt {
|
||||
maxRetries = 1
|
||||
memory = 1900.GB
|
||||
cpus = 112
|
||||
time = 1440.h
|
||||
queue = 'supercruncher'
|
||||
}
|
||||
}
|
||||
}
|
||||
// IMPORTANT this profile is not reproducible due to hardcoded paths. For initial/automated screening ONLY.
|
||||
hops {
|
||||
params {
|
||||
config_profile_description = 'Rough HOPS screening MPI-SHH profile, provided by nf-core/configs.'
|
||||
fasta = '/projects1/Reference_Genomes/Human/HG19/hg19_complete.fasta'
|
||||
bwa_index = '/projects1/Reference_Genomes/Human/HG19/hg19_complete.fasta'
|
||||
fasta_index = '/projects1/Reference_Genomes/Human/HG19/hg19_complete.fasta.fai'
|
||||
seq_dict = '/projects1/Reference_Genomes/Human/HG19/hg19_complete.dict'
|
||||
bwaalnn = 0.01
|
||||
bwaalnl = 16
|
||||
run_bam_filtering = true
|
||||
bam_unmapped_type = 'fastq'
|
||||
run_metagenomic_screening = true
|
||||
metagenomic_tool = 'malt'
|
||||
metagenomic_min_support_reads = 1
|
||||
database = '/projects1/malt/databases/indexed/index040/full-bac-full-vir-etal-nov_2017'
|
||||
percent_identity = 85
|
||||
malt_mode = 'BlastN'
|
||||
malt_alignment_mode = 'SemiGlobal'
|
||||
malt_top_percent = 1
|
||||
malt_min_support_mode = 'reads'
|
||||
malt_max_queries = 100
|
||||
malt_memory_mode = 'load'
|
||||
run_maltextract = true
|
||||
maltextract_taxon_list = '/projects1/users/key/anc5h/soi.backup/List_of_pathogens_KB_fmk12_wViruses1.txt'
|
||||
maltextract_ncbifiles = '/projects1/clusterhomes/huebler/RMASifter/RMA_Extractor_Resources/'
|
||||
maltextract_filter = 'def_anc'
|
||||
maltextract_toppercent = 0.01
|
||||
maltextract_destackingoff = false
|
||||
maltextract_downsamplingoff = false
|
||||
maltextract_duplicateremovaloff = false
|
||||
maltextract_matches = false
|
||||
maltextract_megansummary = true
|
||||
maltextract_percentidentity = 85.0
|
||||
maltextract_topalignment = false
|
||||
}
|
||||
}
|
||||
pathogen_loose {
|
||||
params {
|
||||
config_profile_description = 'Pathogen (loose) MPI-SHH profile, provided by nf-core/configs.'
|
||||
bwaalnn = 0.01
|
||||
bwaalnl = 16
|
||||
}
|
||||
}
|
||||
pathogen_strict {
|
||||
params {
|
||||
config_profile_description = 'Pathogen (strict) MPI-SHH SDAG profile, provided by nf-core/configs.'
|
||||
bwaalnn = 0.1
|
||||
bwaalnl = 32
|
||||
}
|
||||
}
|
||||
human {
|
||||
params {
|
||||
config_profile_description = 'Human MPI-SHH SDAG profile, provided by nf-core/configs.'
|
||||
bwaalnn = 0.01
|
||||
bwaalnl = 16500
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,57 +4,7 @@ params {
|
|||
max_cpus = 24
|
||||
max_memory = 256.GB
|
||||
max_time = 72.h
|
||||
container_version = '1.1.0'
|
||||
|
||||
// Tool versions
|
||||
arriba_version = '1.1.0'
|
||||
ericscript_version = '0.5.5'
|
||||
fusioncatcher_version = '1.20'
|
||||
fusion_inspector_version = '1.3.1-star2.7.0f'
|
||||
pizzly_version = '0.37.3'
|
||||
squid_version = '1.5-star2.7.0f'
|
||||
star_fusion_version = '1.6.0'
|
||||
|
||||
// Paths
|
||||
reference_base = '/data1/references/rnafusion'
|
||||
containerPath = "file:///data1/containers/rnafusion_containers_v${container_version}"
|
||||
containerPathCommon = "file:///data1/containers/rnafusion_common"
|
||||
|
||||
// References
|
||||
fasta = "${params.reference_base}/1.1.0/Homo_sapiens.GRCh38_r97.all.fa"
|
||||
gtf = "${params.reference_base}/1.1.0/Homo_sapiens.GRCh38_r97.gtf"
|
||||
transcript = "${params.reference_base}/1.1.0/Homo_sapiens.GRCh38_r97.cdna.all.fa.gz"
|
||||
databases = "${params.reference_base}/1.1.0/databases"
|
||||
|
||||
star_index = "${params.reference_base}/1.1.0/star_index/star"
|
||||
arriba_ref = "${params.reference_base}/arriba_ref"
|
||||
fusioncatcher_ref = "${params.reference_base}/fusioncatcher_ref/human_v98"
|
||||
star_fusion_ref = "${params.reference_base}/star_fusion/1.6.0/ensembl/ctat_genome_lib_build_dir"
|
||||
ericscript_ref = "${params.reference_base}/ericscript_ref/ericscript_db_homosapiens_ensembl84"
|
||||
}
|
||||
|
||||
process {
|
||||
container = "${params.containerPath}/rnafusion_1.1.0.img"
|
||||
|
||||
withName: "arriba|arriba_visualization" {
|
||||
container = "${params.containerPath}/rnafusion_arriba_v${params.arriba_version}.img"
|
||||
}
|
||||
withName: "star_fusion|download_star_fusion" {
|
||||
container = "${params.containerPath}/rnafusion_star-fusion_v${params.star_fusion_version}.img"
|
||||
}
|
||||
withName:fusioncatcher {
|
||||
container = "${params.containerPath}/rnafusion_fusioncatcher_v${params.fusioncatcher_version}.img"
|
||||
}
|
||||
withName:fusion_inspector {
|
||||
container = "${params.containerPath}/rnafusion_fusion-inspector_v${params.fusion_inspector_version}.img"
|
||||
}
|
||||
withName:ericscript {
|
||||
container = "${params.containerPathCommon}/rnafusion_ericscript_v${params.ericscript_version}.img"
|
||||
}
|
||||
withName:pizzly {
|
||||
container = "${params.containerPathCommon}/rnafusion_pizzly_v${params.pizzly_version}.img"
|
||||
}
|
||||
withName:squid {
|
||||
container = "${params.containerPath}/rnafusion_squid_v${params.squid_version}.img"
|
||||
}
|
||||
genomes_base = '/data1/references/rnafusion/dev/'
|
||||
}
|
||||
|
|
13
conf/pipeline/sarek/icr_davros.config
Normal file
13
conf/pipeline/sarek/icr_davros.config
Normal file
|
@ -0,0 +1,13 @@
|
|||
/*
|
||||
* -------------------------------------------------
|
||||
* Nextflow nf-core config file for ICR davros HPC
|
||||
* -------------------------------------------------
|
||||
*/
|
||||
process {
|
||||
errorStrategy = {task.exitStatus in [104,134,137,139,141,143,255] ? 'retry' : 'finish'}
|
||||
maxRetries = 5
|
||||
withName:MapReads {
|
||||
memory = {check_resource(12.GB)}
|
||||
time = {check_resource(48.h * task.attempt)}
|
||||
}
|
||||
}
|
|
@ -4,14 +4,25 @@ params {
|
|||
config_profile_contact = 'Maxime Garcia (@MaxUlysse)'
|
||||
config_profile_description = 'nf-core/sarek uppmax profile provided by nf-core/configs'
|
||||
|
||||
singleCPUmem = 7000.MB
|
||||
single_cpu_mem = 7000.MB
|
||||
// Just useful until iGenomes is updated on UPPMAX
|
||||
igenomeIgnore = true
|
||||
igenomes_ignore = true
|
||||
genomes_base = params.genome == 'GRCh37' ? '/sw/data/uppnex/ToolBox/ReferenceAssemblies/hg38make/bundle/2.8/b37' : '/sw/data/uppnex/ToolBox/hg38bundle'
|
||||
}
|
||||
if (hostname ==~ "r.*") {
|
||||
params.singleCPUmem = 6400.MB
|
||||
|
||||
def hostname = "hostname".execute().text.trim()
|
||||
|
||||
if (hostname ==~ "r.*") {
|
||||
params.single_cpu_mem = 6400.MB
|
||||
|
||||
process {
|
||||
withName:BamQC {
|
||||
cpus = {params.max_cpus}
|
||||
memory = {params.max_memory}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hostname ==~ "i.*") {
|
||||
params.singleCPUmem = 15.GB
|
||||
params.single_cpu_mem = 15.GB
|
||||
}
|
||||
|
|
18
conf/pipeline/scflow/imperial.config
Normal file
18
conf/pipeline/scflow/imperial.config
Normal file
|
@ -0,0 +1,18 @@
|
|||
// scflow/imperial specific profile config
|
||||
|
||||
params {
|
||||
// Config Params
|
||||
config_profile_description = 'Imperial College London - HPC - nf-core/scFlow Profile -- provided by nf-core/configs.'
|
||||
config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)'
|
||||
|
||||
// Analysis Resource Params
|
||||
ctd_folder = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/Analyses/scFlowResources/refs/ctd"
|
||||
ensembl_mappings = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/Analyses/scFlowResources/src/ensembl-ids/ensembl_mappings.tsv"
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
cacheDir = "/rds/general/user/$USER/projects/ukdrmultiomicsproject/live/.singularity-cache"
|
||||
runOptions = "-B /rds/,/rdsgpfs/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
|
||||
}
|
20
conf/pipeline/viralrecon/genomes.config
Normal file
20
conf/pipeline/viralrecon/genomes.config
Normal file
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
* -------------------------------------------------
|
||||
* nfcore/viralrecon custom profile Nextflow config file
|
||||
* -------------------------------------------------
|
||||
* Defines viral reference genomes for all environments.
|
||||
*/
|
||||
|
||||
params {
|
||||
// Genome reference file paths
|
||||
genomes {
|
||||
'NC_045512.2' {
|
||||
fasta = "https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/genome/NC_045512.2/GCF_009858895.2_ASM985889v3_genomic.200409.fna.gz"
|
||||
gff = "https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/genome/NC_045512.2/GCF_009858895.2_ASM985889v3_genomic.200409.gff.gz"
|
||||
}
|
||||
'MN908947.3' {
|
||||
fasta = "https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.fna.gz"
|
||||
gff = "https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.gff.gz"
|
||||
}
|
||||
}
|
||||
}
|
27
conf/seg_globe.config
Normal file
27
conf/seg_globe.config
Normal file
|
@ -0,0 +1,27 @@
|
|||
//Profile config names for nf-core/configs
|
||||
params {
|
||||
config_profile_description = 'Section for Evolutionary Genomics @ GLOBE, University of Copenhagen - seg_globe profile provided by nf-core/configs.'
|
||||
config_profile_contact = 'Aashild Vaagene (@ashildv)'
|
||||
config_profile_url = 'https://globe.ku.dk/research/evogenomics/'
|
||||
max_memory = 250.GB
|
||||
max_cpus = 35
|
||||
max_time = 720.h
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
cacheDir = '/shared/volume/hologenomics/data/cache/nf-eager/singularity'
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = { task.time < 24.h ? 'hologenomics-short' : task.time < 168.h ? 'hologenomics' : 'hologenomics-long' }
|
||||
}
|
||||
|
||||
cleanup = true
|
||||
|
||||
executor {
|
||||
queueSize = 8
|
||||
}
|
||||
|
|
@ -3,27 +3,6 @@ params {
|
|||
config_profile_description = 'Generic MPI-SHH cluster(s) profile provided by nf-core/configs.'
|
||||
config_profile_contact = 'James Fellows Yates (@jfy133), Maxime Borry (@Maxibor)'
|
||||
config_profile_url = 'https://shh.mpg.de'
|
||||
}
|
||||
|
||||
cleanup = true
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
runOptions = '-B /run/shm:/run/shm'
|
||||
cacheDir = "/projects1/singularity_scratch/cache/"
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = { task.memory > 756.GB || task.cpus > 64 ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' }
|
||||
}
|
||||
|
||||
executor {
|
||||
queueSize = 16
|
||||
}
|
||||
|
||||
params {
|
||||
max_memory = 256.GB
|
||||
max_cpus = 32
|
||||
max_time = 720.h
|
||||
|
@ -31,13 +10,45 @@ params {
|
|||
igenomes_base = "/projects1/public_data/igenomes/"
|
||||
}
|
||||
|
||||
// Preform work directory cleanup after a successful run
|
||||
cleanup = true
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
autoMounts = true
|
||||
cacheDir = "/projects1/singularity_scratch/cache/"
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
}
|
||||
|
||||
executor {
|
||||
queueSize = 8
|
||||
}
|
||||
|
||||
profiles {
|
||||
cdag {
|
||||
config_profile_description = 'MPI-SHH CDAG profile, provided by nf-core/configs.'
|
||||
params {
|
||||
config_profile_description = 'CDAG MPI-SHH profile, provided by nf-core/configs.'
|
||||
}
|
||||
// delete when CDAG will be fixed
|
||||
process {
|
||||
queue = 'long'
|
||||
}
|
||||
}
|
||||
sdag {
|
||||
config_profile_description = 'MPI-SHH SDAG profile, provided by nf-core/configs.'
|
||||
max_memory = 2.TB
|
||||
max_cpus = 128
|
||||
params {
|
||||
config_profile_description = 'SDAG MPI-SHH profile, provided by nf-core/configs.'
|
||||
max_memory = 2.TB
|
||||
max_cpus = 128
|
||||
}
|
||||
process {
|
||||
queue = { task.memory > 756.GB || task.cpus > 64 ? 'supercruncher': task.time <= 2.h ? 'short' : task.time <= 48.h ? 'medium': 'long' }
|
||||
}
|
||||
}
|
||||
// Profile to deactivate automatic cleanup of work directory after a successful run. Overwrites cleanup option.
|
||||
debug {
|
||||
cleanup = false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
//Profile config names for nf-core/configs
|
||||
params {
|
||||
config_profile_description = 'University of Cape Town HEX cluster config file provided by nf-core/configs.'
|
||||
config_profile_contact = 'Katie Lennard (@kviljoen)'
|
||||
config_profile_url = 'http://hpc.uct.ac.za/index.php/hex-3/'
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
cacheDir = "/scratch/DB/bio/singularity-containers"
|
||||
}
|
||||
|
||||
process {
|
||||
stageInMode = 'symlink'
|
||||
stageOutMode = 'rsync'
|
||||
queue = 'UCTlong'
|
||||
clusterOptions = { "-M $params.email -m abe -l nodes=1:ppn=1:series600" }
|
||||
}
|
||||
|
||||
executor{
|
||||
executor = 'pbs'
|
||||
jobName = { "$task.tag" }
|
||||
}
|
41
conf/uct_hpc.config
Normal file
41
conf/uct_hpc.config
Normal file
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* -------------------------------------------------
|
||||
* HPC cluster config file
|
||||
* -------------------------------------------------
|
||||
* http://www.hpc.uct.ac.za/
|
||||
*/
|
||||
|
||||
params {
|
||||
config_profile_description = 'University of Cape Town High Performance Cluster config file provided by nf-core/configs.'
|
||||
config_profile_contact = 'Katie Lennard (@kviljoen)'
|
||||
config_profile_url = 'http://hpc.uct.ac.za/index.php/hpc-cluster/'
|
||||
|
||||
singularity_cache_dir = "/bb/DB/bio/singularity-containers/"
|
||||
igenomes_base = '/bb/DB/bio/rna-seq/references'
|
||||
max_memory = 384.GB
|
||||
max_cpus = 40
|
||||
max_time = 1000.h
|
||||
hpc_queue = 'ada'
|
||||
hpc_account = '--account cbio'
|
||||
genome = 'GRCh37'
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
cacheDir = params.singularity_cache_dir
|
||||
autoMounts = true
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
queue = params.hpc_queue
|
||||
// Increasing maxRetries, this will overwrite what we have in base.config
|
||||
maxRetries = 4
|
||||
clusterOptions = params.hpc_account
|
||||
stageInMode = 'symlink'
|
||||
stageOutMode = 'rsync'
|
||||
}
|
||||
|
||||
executor {
|
||||
queueSize = 15
|
||||
}
|
|
@ -15,7 +15,7 @@ process {
|
|||
}
|
||||
|
||||
params {
|
||||
saveReference = true
|
||||
save_reference = true
|
||||
|
||||
max_memory = 125.GB
|
||||
max_cpus = 16
|
||||
|
@ -26,7 +26,7 @@ params {
|
|||
|
||||
def hostname = "hostname".execute().text.trim()
|
||||
|
||||
if (hostname ==~ "b.*") {
|
||||
if (hostname ==~ "b.*" || hostname ==~ "s.*") {
|
||||
params.max_memory = 109.GB
|
||||
}
|
||||
|
||||
|
|
|
@ -12,13 +12,13 @@ singularity {
|
|||
}
|
||||
|
||||
process {
|
||||
beforeScript = 'module load singularity/2.4.5'
|
||||
beforeScript = 'module load singularity/3.2.1'
|
||||
executor = 'slurm'
|
||||
queue = 'genomics'
|
||||
queue = { task.memory >= 32.GB && task.cpu <= 12 ? 'Kim': task.memory <= 24.GB && task.cpu <= 8 ? 'smallmem' : 'genomics' }
|
||||
}
|
||||
|
||||
params {
|
||||
max_memory = 32.GB
|
||||
max_memory = 128.GB
|
||||
max_cpus = 16
|
||||
max_time = 48.h
|
||||
max_time = 96.h
|
||||
}
|
||||
|
|
80
docs/abims.md
Normal file
80
docs/abims.md
Normal file
|
@ -0,0 +1,80 @@
|
|||
# nf-core/configs: ABiMS Configuration
|
||||
|
||||
All nf-core pipelines have been successfully configured for use on the ABiMS cluster.
|
||||
|
||||
To use, run the pipeline with `-profile abims`. This will download and launch the [`abims.config`](../conf/abims.config) which has been pre-configured with a setup suitable for the ABiMS cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
## Request an account
|
||||
|
||||
You will need an account to use the HPC cluster on ABiMS in order
|
||||
to run the pipeline. If in doubt see [http://abims.sb-roscoff.fr/account](http://abims.sb-roscoff.fr/account).
|
||||
|
||||
## Running the workflow on the ABiMS cluster
|
||||
|
||||
Nextflow is installed on the ABiMS cluster.
|
||||
|
||||
You need to activate it like this:
|
||||
|
||||
```bash
|
||||
module load nextflow slurm-drmaa graphviz
|
||||
```
|
||||
|
||||
Nextflow manages each process as a separate job that is submitted to the cluster by using the sbatch command.
|
||||
Nextflow shouldn't run directly on the submission node but on a compute node. Run nextflow from a compute node:
|
||||
|
||||
```bash
|
||||
# Load the dependencies if not done before
|
||||
module load nextflow slurm-drmaa graphviz
|
||||
|
||||
# Run a downloaded/git-cloned nextflow workflow from
|
||||
srun nextflow run \
|
||||
/path/to/nf-core/workflow \
|
||||
-profile abims \
|
||||
--email my-email@example.org \
|
||||
-c my-specific.config
|
||||
...
|
||||
|
||||
# Or use let nf-core client download the workflow
|
||||
srun nextflow run nf-core/rnaseq -profile abims ...
|
||||
|
||||
# To launch in background
|
||||
sbatch --wrap "nextflow run nf-core/rnaseq -profile abims ..."
|
||||
```
|
||||
|
||||
Or write a sbatch script
|
||||
|
||||
> nfcore-rnaseq.sh
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
#SBATCH -p fast
|
||||
#SBATCH --mem=4G
|
||||
|
||||
module load nextflow slurm-drmaa graphviz
|
||||
nextflow run nf-core/rnaseq -profile abims ...
|
||||
```
|
||||
|
||||
Launch on the cluster with sbatch:
|
||||
|
||||
```bash
|
||||
sbatch nfcore-rnaseq.sh
|
||||
```
|
||||
|
||||
### Hello, world
|
||||
|
||||
nf-core provides some test for each workflow:
|
||||
|
||||
```bash
|
||||
module load nextflow slurm-drmaa graphviz
|
||||
nextflow run nf-core/rnaseq -profile abims,test
|
||||
```
|
||||
|
||||
## Singularity images mutualized directory
|
||||
|
||||
To reduce the disk usage, nf-core images can be stored in a mutualized directory: `/shared/software/singularity/images/nf-core/`
|
||||
|
||||
The environment variable `NXF_SINGULARITY_CACHEDIR: /shared/data/cache/nextflow` will indicate this directory to nextflow.
|
||||
|
||||
## Databanks
|
||||
|
||||
A local copy of several genomes are available in `/shared/bank/` directory.
|
9
docs/bi.md
Normal file
9
docs/bi.md
Normal file
|
@ -0,0 +1,9 @@
|
|||
# nf-core/configs: BI Configuration
|
||||
|
||||
All nf-core pipelines have been successfully configured for use at Boehringer Ingelheim.
|
||||
|
||||
To use, run the pipeline with `-profile bi`. This will download and launch the [`bi.config`](../conf/bi.config) which has been pre-configured with a setup suitable for the BI systems. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
Before running the pipeline you will need to follow the internal documentation to run Nextflow on our systems. Similar to that, you need to set an environment variable `NXF_GLOBAL_CONFIG` to the path of the internal global config which is not publicly available here.
|
||||
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
|
@ -4,13 +4,12 @@ All nf-core pipelines have been successfully configured for use on the CLIP BATC
|
|||
|
||||
To use, run the pipeline with `-profile cbe`. This will download and launch the [`cbe.config`](../conf/cbe.config) which has been pre-configured with a setup suitable for the CBE cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
Before running the pipeline you will need to load Nextflow and Singularity using the environment module system on CBE. You can do this by issuing the commands below:
|
||||
Before running the pipeline you will need to load Nextflow using the environment module system on CBE. You can do this by issuing the commands below:
|
||||
|
||||
```bash
|
||||
## Load Nextflow and Singularity environment modules
|
||||
## Load Nextflow environment module
|
||||
module purge
|
||||
module load nextflow/19.04.0
|
||||
module load singularity/3.2.1
|
||||
```
|
||||
|
||||
A local copy of the [AWS-iGenomes](https://registry.opendata.aws/aws-igenomes/) resource has been made available on CBE so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline. You can do this by simply using the `--genome <GENOME_ID>` parameter.
|
||||
|
|
|
@ -10,7 +10,7 @@ Before running the pipeline you will need to load Nextflow and Singularity using
|
|||
## Load Nextflow and Singularity environment modules
|
||||
module purge
|
||||
module load Nextflow/19.10.0
|
||||
module load Singularity/2.6.0-foss-2016b
|
||||
module load Singularity
|
||||
```
|
||||
|
||||
A local copy of the [AWS-iGenomes](https://github.com/ewels/AWS-iGenomes) resource has been made available on CAMP so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline. You can do this by simply using the `--genome <GENOME_ID>` parameter. Some of the more exotic genomes may not have been downloaded onto CAMP so have a look in the `igenomes_base` path specified in [`crick.config`](../conf/crick.config), and if your genome of interest isnt present please contact [BABS](mailto:bioinformatics@crick.ac.uk).
|
||||
|
|
|
@ -122,3 +122,11 @@ For Human and Mouse, we use [GENCODE](https://www.gencodegenes.org/) gene annota
|
|||
|
||||
>NB: You will need an account to use the HPC cluster on PROFILE CLUSTER in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
|
||||
## High Priority Queue
|
||||
|
||||
If you would like to run with the _High Priority_ queue, specify the `highpriority` config profile after `czbiohub_aws`. When applied after the main `czbiohub_aws` config, it overwrites the process `queue` identifier.
|
||||
|
||||
To use it, submit your run with with `-profile czbiohub_aws,highpriority`.
|
||||
|
||||
**Note that the order of config profiles here is important.** For example, `-profile highpriority,czbiohub_aws` will not work.
|
||||
|
|
|
@ -14,21 +14,21 @@ nextflow run nf-core/rnaseq -profile test,google --google_bucket <gs://your_buck
|
|||
|
||||
### Required Parameters
|
||||
|
||||
#### `--google-bucket`
|
||||
#### `--google_bucket`
|
||||
|
||||
The Google Cloud Storage bucket location to be used as a Nextflow work directory. Can also be specified with (`-w gs://your_bucket/work`).
|
||||
|
||||
### Optional Parameters
|
||||
|
||||
#### `--google-zone`
|
||||
#### `--google_zone`
|
||||
|
||||
The Google zone where the computation is executed in Compute Engine VMs. Multiple zones can be provided separating them by a comma. Default (`europe-west2-c`).
|
||||
|
||||
#### `--google-preemptible`
|
||||
#### `--google_preemptible`
|
||||
|
||||
Enables the usage of preemptible virtual machines with a retry error statergy for up to 5 retries. Default (`true`).
|
||||
|
||||
#### `--google-debug`
|
||||
#### `--google_debug`
|
||||
|
||||
Copies the /google debug directory from the VM to the task bucket directory. Useful for debugging. Default (`false`).
|
||||
|
||||
|
|
22
docs/icr_davros.md
Normal file
22
docs/icr_davros.md
Normal file
|
@ -0,0 +1,22 @@
|
|||
# nf-core/configs: Institute of Cancer Research (Davros HPC) Configuration
|
||||
|
||||
Deployment and testing of nf-core pipelines at the Davros cluster is on-going.
|
||||
|
||||
To run an nf-core pipeline on Davros, run the pipeline with `-profile icr_davros`. This will download and launch the [`icr_davros.config`](../conf/icr_davros.config) which has been pre-configured with a setup suitable for the Davros HPC cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
Before running the pipeline you will need to load Nextflow using the environment module system. You can do this by issuing the commands below:
|
||||
|
||||
```bash
|
||||
## Load Nextflow environment modules
|
||||
module load Nextflow/19.10.0
|
||||
```
|
||||
|
||||
Singularity is installed on the compute nodes of Davros, but not the login nodes. There is no module for Singularity.
|
||||
|
||||
A subset of the [AWS-iGenomes](https://github.com/ewels/AWS-iGenomes) resource has been made available locally on Davros so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline you want to execute. You can do this by simply using the `--genome <GENOME_ID>` parameter. Some of the more exotic genomes may not have been downloaded onto Davros so have a look in the `igenomes_base` path specified in [`icr_davros.config`](../conf/icr_davros.config), and if your genome of interest isn't present please contact [Scientific Computing](mailto:schelpdesk@icr.ac.uk).
|
||||
|
||||
Alternatively, if you are running the pipeline regularly for genomes that arent available in the iGenomes resource, we recommend creating a config file with paths to your reference genome indices (see [`reference genomes documentation`](https://nf-co.re/usage/reference_genomes) for instructions).
|
||||
|
||||
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large. All of the main output files will be saved in the `results/` directory.
|
||||
|
||||
>NB: Nextflow will need to submit the jobs via LSF to the HPC cluster. This can be done from an interactive or normal job. If in doubt contact Scientific Computing.
|
16
docs/imperial.md
Normal file
16
docs/imperial.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
# nf-core/configs: Imperial CX1 HPC Configuration
|
||||
|
||||
All nf-core pipelines have been successfully configured for use on the CX1 cluster at Imperial College London HPC.
|
||||
|
||||
To use, run the pipeline with `-profile imperial`. This will download and launch the [`imperial.config`](../conf/imperial.config) which has been pre-configured with a setup suitable for the CX1 cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
Before running the pipeline you will need to load Nextflow using the environment module system on the CX1 cluster. You can do this by issuing the commands below:
|
||||
|
||||
```bash
|
||||
## Load Nextflow and Singularity environment modules
|
||||
module load Nextflow
|
||||
```
|
||||
|
||||
>NB: You will need an account to use the HPC cluster CX1 in order to run the pipeline. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
>NB: To submit jobs to the Imperial College MEDBIO cluster, use `-profile imperial_mb` instead.
|
16
docs/imperial_mb.md
Normal file
16
docs/imperial_mb.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
# nf-core/configs: Imperial MEDBIO HPC Configuration
|
||||
|
||||
All nf-core pipelines have been successfully configured for use on the MEDBIO cluster at Imperial College London HPC.
|
||||
|
||||
To use, run the pipeline with `-profile imperial_mb`. This will download and launch the [`imperial_mb.config`](../conf/imperial_mb.config) which has been pre-configured with a setup suitable for the MEDBIO cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||
|
||||
Before running the pipeline you will need to load Nextflow using the environment module system on the head node. You can do this by issuing the commands below:
|
||||
|
||||
```bash
|
||||
## Load Nextflow and Singularity environment modules
|
||||
module load Nextflow
|
||||
```
|
||||
|
||||
>NB: You will need an account to use the HPC cluster MEDBIO in order to run the pipeline. Access to the MEDBIO queue is exclusive. If in doubt contact IT.
|
||||
>NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.
|
||||
>NB: To submit jobs to the standard CX1 cluster at Imperial College, use `-profile imperial` instead.
|
45
docs/mpcdf.md
Normal file
45
docs/mpcdf.md
Normal file
|
@ -0,0 +1,45 @@
|
|||
# nf-core/configs: MPCDF Configuration
|
||||
|
||||
All nf-core pipelines have been successfully configured for use on the HPCs at [Max Planck Computing and Data Facility](https://www.mpcdf.mpg.de/).
|
||||
|
||||
> :warning: these profiles are not officially supported by the MPCDF.
|
||||
|
||||
To run Nextflow, the `jdk` module must be loaded. To use the nf-core profile(s), run the pipeline with `-profile <cluster>,mpcdf`.
|
||||
|
||||
Currently the following clusters are supported: cobra, raven
|
||||
|
||||
>NB: Nextflow will need to submit the jobs via SLURM to the clusters and as such the commands above will have to be executed on one of the head nodes. Check the [MPCDF documentation](https://www.mpcdf.mpg.de/services/computing).
|
||||
|
||||
## cobra
|
||||
|
||||
Cobra does not currently support singularity, therefore the anaconda/module is loaded for each process.
|
||||
|
||||
Due to this, we also recommend setting the `$NXF_CONDA_CACHEDIR` to a location of your choice to store all environments (so to prevent nextflow building the environment on every run).
|
||||
|
||||
To use: `-profile cobra,mpcdf`
|
||||
|
||||
Sets the following parameters:
|
||||
|
||||
- Maximum parallel running jobs: 8
|
||||
- Max. memory: 750.GB
|
||||
- Max. CPUs: 80
|
||||
- Max. walltime: 24.h
|
||||
|
||||
## draco
|
||||
|
||||
:hammer_and_wrench: under testing.
|
||||
|
||||
## raven
|
||||
|
||||
Raven does not currently support singularity, therefore `module load anaconda/3/2020.02` is loaded for each process.
|
||||
|
||||
Due to this, we also recommend setting the `$NXF_CONDA_CACHEDIR` to a location of your choice to store all environments (so to prevent nextflow building the environment on every run).
|
||||
|
||||
To use: `-profile raven,mpcdf`
|
||||
|
||||
Sets the following parameters:
|
||||
|
||||
- Maximum parallel running jobs: 8
|
||||
- Max. memory: 368.GB
|
||||
- Max. CPUs: 192
|
||||
- Max. walltime: 24.h
|
33
docs/oist.md
Normal file
33
docs/oist.md
Normal file
|
@ -0,0 +1,33 @@
|
|||
# nf-core/configs: OIST Configuration
|
||||
|
||||
The nf-core pipelines [rnaseq](https://nf-co.re/rnaseq) and
|
||||
[eager](https://nf-co.re/eager) have been successfully tested on the _Deigo_
|
||||
cluster at the Okinawa Institute of Science and Technology Graduate University
|
||||
([OIST](https://www.oist.jp)). We have no reason to expect that other
|
||||
pipelines would not work.
|
||||
|
||||
To use, run the pipeline with `-profile oist`. This will download and launch
|
||||
the [`oist.config`](../conf/oist.config) which has been pre-configured with a
|
||||
setup suitable for _Deigo_. Using this profile, a docker image containing all
|
||||
of the required software will be downloaded, and converted to a Singularity
|
||||
image before execution of the pipeline.
|
||||
|
||||
## Below are non-mandatory information e.g. on modules to load etc
|
||||
|
||||
Before running the pipeline you will need to load Nextflow and Singularity
|
||||
using the environment module system on _Deigo_. You can do this by issuing the
|
||||
commands below:
|
||||
|
||||
```bash
|
||||
## Load the latest Nextflow and Singularity environment modules
|
||||
ml purge
|
||||
ml bioinfo-ugrp-modules
|
||||
ml Other/Nextflow
|
||||
```
|
||||
|
||||
>NB: You will need an account to use the _Deigo_ cluster in order to run the
|
||||
>pipeline. If in doubt contact IT.
|
||||
>
|
||||
>NB: Nextflow will submit the jobs via the SLURM scheduler to the HPC cluster
|
||||
>and as such the commands above will have to be executed on one of the login
|
||||
>nodes. If in doubt contact IT.
|
17
docs/pipeline/ampliseq/uppmax.md
Normal file
17
docs/pipeline/ampliseq/uppmax.md
Normal file
|
@ -0,0 +1,17 @@
|
|||
# nf-core/configs: uppmax ampliseq specific configuration
|
||||
|
||||
Extra specific configuration for the ampliseq pipeline.
|
||||
|
||||
## Usage
|
||||
|
||||
To use, run the pipeline with `-profile uppmax`.
|
||||
|
||||
This will download and launch the ampliseq specific [`uppmax.config`](../../../conf/pipeline/ampliseq/uppmax.config) which has been pre-configured with a setup suitable for the UPPMAX cluster.
|
||||
|
||||
Example: `nextflow run nf-core/ampliseq -profile uppmax`
|
||||
|
||||
## ampliseq specific configurations for uppmax
|
||||
|
||||
Specific configurations for UPPMAX has been made for ampliseq.
|
||||
|
||||
* Makes sure that a fat node is allocated for training and applying a Bayesian classifier.
|
|
@ -14,4 +14,23 @@ Example: `nextflow run nf-core/eager -profile shh`
|
|||
|
||||
Specific configurations for shh has been made for eager.
|
||||
|
||||
* If running with the MALT module turned on, the MALT process by default will be sent to the long queue with a resource requirement minimum of 725GB and 64 cores. If this fails, the process will be tried once more only and sent to the supercruncher queue. The module will not retry after this, and pipeline will fail.
|
||||
### General profiles
|
||||
|
||||
* If running with the MALT module turned on, the MALT process by default will be sent to the long queue with a resource requirement minimum of 725GB and 64 cores. If this fails, the process will be tried once more only and sent to the supercruncher queue. The module will not retry after this, and pipeline will fail. Note, this will only work on SDAG.
|
||||
|
||||
### Contextual profiles
|
||||
|
||||
#### Microbiome Sciences
|
||||
|
||||
* `microbiome_screening` runs MALT straight to supercruncher (with no retries!) and full resources requested due to microbiome screening databases often easily reach this size
|
||||
|
||||
#### Human Pop-Gen
|
||||
|
||||
* `human`: optimised for mapping of human aDNA reads (i.e. bwa aln defaults as `-l 16500, -n 0.01`)
|
||||
|
||||
#### Pathogen
|
||||
|
||||
* `pathogen_loose`: optimised for mapping of human aDNA reads (i.e. bwa aln defaults as `-l 16 -n 0.01`)
|
||||
* `pathogen_strict`: optimised for mapping of human aDNA reads (i.e. bwa aln defaults as `-l 32, -n 0.1`)
|
||||
* `hops`: profile with default paths and parameters for automated/initial pathogen screening.
|
||||
* :warning: This is NOT a reproducible profile as it contains hardcoded paths. This should only be used for initial/automated screening where you wish to quickly check for any possible positives; after which you should re-do screening in a reproducible manner for publication!
|
||||
|
|
|
@ -15,5 +15,4 @@ Example: `nextflow run nf-core/rnafusion -profile munin`
|
|||
Specific configurations for `MUNIN` has been made for rnafusion.
|
||||
|
||||
* `cpus`, `memory` and `time` max requirements.
|
||||
* Paths to specific containers
|
||||
* Paths to specific references and indexes
|
||||
|
|
21
docs/pipeline/scflow/imperial.md
Normal file
21
docs/pipeline/scflow/imperial.md
Normal file
|
@ -0,0 +1,21 @@
|
|||
# nf-core/configs: Imperial scflow Specific Configuration
|
||||
|
||||
Extra specific configuration for the scflow pipeline
|
||||
|
||||
## Usage
|
||||
|
||||
To use, run the pipeline with `-profile imperial` or `-profile imperial_mb`.
|
||||
|
||||
This will download and launch the scflow specific [`imperial.config`](../../../conf/pipeline/scflow/imperial.config) which has been pre-configured with a setup suitable for the Imperial HPC cluster.
|
||||
|
||||
Example: `nextflow run nf-core/scflow -profile imperial`
|
||||
|
||||
## scflow specific configurations for Imperial
|
||||
|
||||
Specific configurations for Imperial have been made for scflow.
|
||||
|
||||
* Singularity `enabled` and `autoMounts` set to `true`
|
||||
* Singularity `cacheDir` path set to an RDS location
|
||||
* Singularity `runOptions` path set to bind (`-B`) RDS paths with container paths.
|
||||
* Params `ctd_folder` set to an RDS location.
|
||||
* Parms `ensembl_mappings` set to an RDS location.
|
9
docs/pipeline/viralrecon/genomes.md
Normal file
9
docs/pipeline/viralrecon/genomes.md
Normal file
|
@ -0,0 +1,9 @@
|
|||
# nf-core/configs: viralrecon specific configuration
|
||||
|
||||
Extra specific configuration for viralrecon pipeline
|
||||
|
||||
## Usage
|
||||
|
||||
Will be used automatically when running the pipeline with the shared configs in the nf-core/configs repository
|
||||
|
||||
This will download and launch the viralrecon specific [`viralrecon.config`](../../../conf/pipeline/viralrecon/genomes.config) which has been pre-configured with custom genomes.
|
21
docs/seg_globe.md
Normal file
21
docs/seg_globe.md
Normal file
|
@ -0,0 +1,21 @@
|
|||
# nf-core/configs: Section for Evolutionary Genomics at GLOBE, Univeristy of Copenhagen (hologenomics partition on HPC) Configuration
|
||||
|
||||
> **NB:** You will need an account to use the HPC cluster to run the pipeline. If in doubt contact IT.
|
||||
|
||||
The profile is configured to run with Singularity version 3.6.3-1.el7 which is part of the OS installtion and does not need to be loaded as a module.
|
||||
|
||||
Before running the pipeline you will need to load Java, miniconda and Nextflow. You can do this by including the commands below in your SLURM/sbatch script:
|
||||
|
||||
```bash
|
||||
## Load Java and Nextflow environment modules
|
||||
module purge
|
||||
module load lib
|
||||
module load java/v1.8.0_202-jdk miniconda nextflow/v20.07.1.5412
|
||||
```
|
||||
|
||||
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
|
||||
The config contains a `cleanup` command that removes the `work/` directory automatically once the pipeline has completeed successfully. If the run does not complete successfully then the `work/` dir should be removed manually to save storage space.
|
||||
|
||||
This configuration will automatically choose the correct SLURM queue (short,medium,long) depending on the time and memory required by each process.
|
||||
|
||||
> **NB:** Nextflow will need to submit the jobs via SLURM to the HPC cluster and as such the commands above will have to be submitted from one of the login nodes.
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
All nf-core pipelines have been successfully configured for use on the Department of Archaeogenetic's SDAG/CDAG clusters at the [Max Planck Institute for the Science of Human History (MPI-SHH)](http://shh.mpg.de).
|
||||
|
||||
To use, run the pipeline with `-profile ssh`. You can further with optimise submissions by specifying which cluster you are using with `-profile shh,sdag` or `-profile ssh,cdag`. This will download and launch the [`shh.config`](../conf/shh.config) which has been pre-configured with a setup suitable for the SDAG and CDAG clusters respectively. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. The image will currently be centrally stored here:
|
||||
To use, run the pipeline with `-profile shh`. You can further with optimise submissions by specifying which cluster you are using with `-profile shh,sdag` or `-profile shh,cdag`. This will download and launch the [`shh.config`](../conf/shh.config) which has been pre-configured with a setup suitable for the SDAG and CDAG clusters respectively. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. The image will currently be centrally stored here:
|
||||
|
||||
```bash
|
||||
/projects1/singularity_scratch/cache/
|
||||
|
@ -10,7 +10,7 @@ To use, run the pipeline with `-profile ssh`. You can further with optimise subm
|
|||
|
||||
however this will likely change to a read-only directory in the future that will be managed by the IT team.
|
||||
|
||||
This configuration will automatically choose the correct SLURM queue (`short`,`medium`,`long`) depending on the time and memory required by each process. `-profile ssh,sdag` additionally allows for submission of jobs to the `supercruncher` queue when a job's requested memory exceeds 756GB.
|
||||
This configuration will automatically choose the correct SLURM queue (`short`,`medium`,`long`) depending on the time and memory required by each process. `-profile shh,sdag` additionally allows for submission of jobs to the `supercruncher` queue when a job's requested memory exceeds 756GB.
|
||||
|
||||
>NB: You will need an account and VPN access to use the cluster at MPI-SHH in order to run the pipeline. If in doubt contact the IT team.
|
||||
>NB: Nextflow will need to submit the jobs via SLURM to the clusters and as such the commands above will have to be executed on one of the head nodes. If in doubt contact IT.
|
||||
|
|
5
docs/uct_hpc.md
Normal file
5
docs/uct_hpc.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
# nf-core/configs: UCT HPC config
|
||||
|
||||
University of Cape Town [High Performance Cluster](http://hpc.uct.ac.za/index.php/hpc-cluster/) config.
|
||||
|
||||
For help or more information, please contact Katie Lennard (@kviljoen).
|
|
@ -2,6 +2,10 @@
|
|||
|
||||
All nf-core pipelines have been successfully configured for use on the Swedish UPPMAX clusters.
|
||||
|
||||
## Getting help
|
||||
|
||||
We have a Slack channel dedicated to UPPMAX users on the nf-core Slack: [https://nfcore.slack.com/channels/uppmax](https://nfcore.slack.com/channels/uppmax)
|
||||
|
||||
## Using the UPPMAX config profile
|
||||
|
||||
To use, run the pipeline with `-profile uppmax` (one hyphen).
|
||||
|
@ -12,14 +16,19 @@ In addition to this config profile, you will also need to specify an UPPMAX proj
|
|||
You can do this with the `--project` flag (two hyphens) when launching nextflow. For example:
|
||||
|
||||
```bash
|
||||
nextflow run nf-core/PIPELINE -profile uppmax --project SNIC 2018/1-234 # ..rest of pipeline flags
|
||||
nextflow run nf-core/PIPELINE -profile uppmax --project snic2018-1-234 # ..rest of pipeline flags
|
||||
```
|
||||
|
||||
> NB: If you're not sure what your UPPMAX project ID is, try running `groups` or checking SUPR.
|
||||
|
||||
Before running the pipeline you will need to either install Nextflow or load it using the environment module system.
|
||||
|
||||
This config enables Nextflow to manage the pipeline jobs via the Slurm job scheduler.
|
||||
This config enables Nextflow to manage the pipeline jobs via the Slurm job scheduler and using Singularity for software management.
|
||||
|
||||
Just run Nextflow on a login node and it will handle everything else.
|
||||
|
||||
Remember to use `-bg` to launch Nextflow in the background, so that the pipeline doesn't exit if you leave your terminal session.
|
||||
|
||||
## Using iGenomes references
|
||||
|
||||
A local copy of the iGenomes resource has been made available on all UPPMAX clusters so you should be able to run the pipeline against any reference available in the `igenomes.config`.
|
||||
|
@ -40,7 +49,7 @@ Note that each job will still start with the same request as normal, but restart
|
|||
|
||||
All jobs will be submitted to fat nodes using this method, so it's only for use in extreme circumstances.
|
||||
|
||||
## How to specify a UPPMAX cluster
|
||||
## Different UPPMAX clusters
|
||||
|
||||
The UPPMAX nf-core configuration profile uses the `hostname` of the active environment to automatically apply the following resource limits:
|
||||
|
||||
|
@ -64,3 +73,15 @@ All jobs are limited to 1 hour to be eligible for this queue and only one job al
|
|||
It is not suitable for use with real data.
|
||||
|
||||
To use it, submit with `-profile uppmax,devel`.
|
||||
|
||||
## Running on Bianca
|
||||
|
||||
For security reasons, there is no internet access on Bianca so you can't download from or upload files to the cluster directly. Before running a nf-core pipeline on Bianca you will first have to download the pipeline and singularity images needed elsewhere and transfer them via the wharf area to your Bianca project.
|
||||
|
||||
You can follow the guide for downloading pipelines [for offline use](https://nf-co.re/tools#downloading-pipelines-for-offline-use). Note that you will have to download the singularity images as well.
|
||||
|
||||
Next transfer the pipeline and the singularity images to your project. Before running the pipeline you will have to indicate to nextflow where the singularity images are located by setting `NXF_SINGULARITY_CACHEDIR` :
|
||||
|
||||
`export NXF_SINGULARITY_CACHEDIR=Your_Location_For_The_Singularity_directory/.`
|
||||
|
||||
You should now be able to run your nf-core pipeline on bianca.
|
||||
|
|
|
@ -10,7 +10,9 @@
|
|||
|
||||
//Please use a new line per include Config section to allow easier linting/parsing. Thank you.
|
||||
profiles {
|
||||
abims { includeConfig "${params.custom_config_base}/conf/abims.config" }
|
||||
awsbatch { includeConfig "${params.custom_config_base}/conf/awsbatch.config" }
|
||||
bi { includeConfig "${params.custom_config_base}/conf/bi.config" }
|
||||
bigpurple { includeConfig "${params.custom_config_base}/conf/bigpurple.config" }
|
||||
binac { includeConfig "${params.custom_config_base}/conf/binac.config" }
|
||||
cbe { includeConfig "${params.custom_config_base}/conf/cbe.config" }
|
||||
|
@ -20,10 +22,10 @@ profiles {
|
|||
cfc_dev { includeConfig "${params.custom_config_base}/conf/cfc_dev.config" }
|
||||
crick { includeConfig "${params.custom_config_base}/conf/crick.config" }
|
||||
czbiohub_aws { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config" }
|
||||
czbiohub_aws_highpriority {
|
||||
includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config";
|
||||
includeConfig "${params.custom_config_base}/conf/czbiohub_aws_highpriority.config"}
|
||||
ebc { includeConfig "${params.custom_config_base}/conf/ebc.config" }
|
||||
icr_davros { includeConfig "${params.custom_config_base}/conf/icr_davros.config" }
|
||||
imperial { includeConfig "${params.custom_config_base}/conf/imperial.config" }
|
||||
imperial_mb { includeConfig "${params.custom_config_base}/conf/imperial_mb.config" }
|
||||
genotoul { includeConfig "${params.custom_config_base}/conf/genotoul.config" }
|
||||
google { includeConfig "${params.custom_config_base}/conf/google.config" }
|
||||
denbi_qbic { includeConfig "${params.custom_config_base}/conf/denbi_qbic.config" }
|
||||
|
@ -31,12 +33,15 @@ profiles {
|
|||
gis { includeConfig "${params.custom_config_base}/conf/gis.config" }
|
||||
hebbe { includeConfig "${params.custom_config_base}/conf/hebbe.config" }
|
||||
kraken { includeConfig "${params.custom_config_base}/conf/kraken.config" }
|
||||
mpcdf { includeConfig "${params.custom_config_base}/conf/mpcdf.config" }
|
||||
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
|
||||
oist { includeConfig "${params.custom_config_base}/conf/oist.config" }
|
||||
pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" }
|
||||
phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" }
|
||||
prince { includeConfig "${params.custom_config_base}/conf/prince.config" }
|
||||
seg_globe { includeConfig "${params.custom_config_base}/conf/seg_globe.config"}
|
||||
shh { includeConfig "${params.custom_config_base}/conf/shh.config" }
|
||||
uct_hex { includeConfig "${params.custom_config_base}/conf/uct_hex.config" }
|
||||
uct_hpc { includeConfig "${params.custom_config_base}/conf/uct_hpc.config" }
|
||||
uppmax { includeConfig "${params.custom_config_base}/conf/uppmax.config" }
|
||||
utd_ganymede { includeConfig "${params.custom_config_base}/conf/utd_ganymede.config" }
|
||||
uzh { includeConfig "${params.custom_config_base}/conf/uzh.config" }
|
||||
|
@ -52,6 +57,9 @@ params {
|
|||
cbe: ['.cbe.vbc.ac.at'],
|
||||
cfc: ['.hpc.uni-tuebingen.de'],
|
||||
crick: ['.thecrick.org'],
|
||||
icr_davros: ['.davros.compute.estate'],
|
||||
imperial: ['.hpc.ic.ac.uk'],
|
||||
imperial_mb: ['.hpc.ic.ac.uk'],
|
||||
genotoul: ['.genologin1.toulouse.inra.fr', '.genologin2.toulouse.inra.fr'],
|
||||
genouest: ['.genouest.org'],
|
||||
uppmax: ['.uppmax.uu.se'],
|
||||
|
|
|
@ -10,4 +10,5 @@
|
|||
|
||||
profiles {
|
||||
binac { includeConfig "${params.custom_config_base}/conf/pipeline/ampliseq/binac.config" }
|
||||
}
|
||||
uppmax { includeConfig "${params.custom_config_base}/conf/pipeline/ampliseq/uppmax.config" }
|
||||
}
|
||||
|
|
|
@ -10,4 +10,6 @@
|
|||
|
||||
profiles {
|
||||
shh { includeConfig "${params.custom_config_base}/conf/pipeline/eager/shh.config" }
|
||||
}
|
||||
mpcdf { includeConfig "${params.custom_config_base}/conf/pipeline/eager/mpcdf.config" }
|
||||
|
||||
}
|
||||
|
|
13
pipeline/rnafusion.config
Normal file
13
pipeline/rnafusion.config
Normal file
|
@ -0,0 +1,13 @@
|
|||
/*
|
||||
* -------------------------------------------------
|
||||
* nfcore/rnafusion custom profile Nextflow config file
|
||||
* -------------------------------------------------
|
||||
* Config options for custom environments.
|
||||
* Cluster-specific config options should be saved
|
||||
* in the conf/pipeline/rnafusion folder and imported
|
||||
* under a profile name here.
|
||||
*/
|
||||
|
||||
profiles {
|
||||
munin { includeConfig "${params.custom_config_base}/conf/pipeline/rnafusion/munin.config" }
|
||||
}
|
|
@ -11,4 +11,5 @@
|
|||
profiles {
|
||||
munin { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/munin.config" }
|
||||
uppmax { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/uppmax.config" }
|
||||
icr_davros { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/icr_davros.config" }
|
||||
}
|
14
pipeline/scflow.config
Normal file
14
pipeline/scflow.config
Normal file
|
@ -0,0 +1,14 @@
|
|||
/*
|
||||
* -------------------------------------------------
|
||||
* nfcore/scflow custom profile Nextflow config file
|
||||
* -------------------------------------------------
|
||||
* Config options for custom environments.
|
||||
* Cluster-specific config options should be saved
|
||||
* in the conf/pipeline/scflow folder and imported
|
||||
* under a profile name here.
|
||||
*/
|
||||
|
||||
profiles {
|
||||
imperial { includeConfig "${params.custom_config_base}/conf/pipeline/scflow/imperial.config" }
|
||||
imperial_mb { includeConfig "${params.custom_config_base}/conf/pipeline/scflow/imperial.config" } // intended
|
||||
}
|
7
pipeline/viralrecon.config
Normal file
7
pipeline/viralrecon.config
Normal file
|
@ -0,0 +1,7 @@
|
|||
/*
|
||||
* -------------------------------------------------
|
||||
* nfcore/viralrecon custom profile Nextflow config file
|
||||
* -------------------------------------------------
|
||||
*/
|
||||
|
||||
includeConfig "${params.custom_config_base}/conf/pipeline/viralrecon/genomes.config"
|
Loading…
Reference in a new issue