diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index c057f2b..3936468 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -88,6 +88,8 @@ jobs: - "sanger" - "sbc_sharc" - "seg_globe" + - "tigem" + - "ucl_myriad" - "uct_hpc" - "unibe_ibu" - "uppmax" diff --git a/README.md b/README.md index c476905..fe38eb9 100644 --- a/README.md +++ b/README.md @@ -142,6 +142,8 @@ Currently documentation is available for the following systems: - [SANGER](docs/sanger.md) - [SBC_SHARC](docs/sbc_sharc.md) - [SEG_GLOBE](docs/seg_globe.md) +- [TIGEM](docs/tigem.md) +- [UCL_MYRIAD](docs/ucl_myriad.md) - [UCT_HPC](docs/uct_hpc.md) - [UNIBE_IBU](docs/unibe_ibu.md) - [UPPMAX](docs/uppmax.md) @@ -205,6 +207,8 @@ Currently documentation is available for the following pipelines within specific - [SBC_SHARC](docs/pipeline/atacseq/sbc_sharc.md) - chipseq - [SBC_SHARC](docs/pipeline/chipseq/sbc_sharc.md) +- demultiplex + - [AWS_TOWER](docs/pipeline/demultiplex/aws_tower.md) - eager - [EVA](docs/pipeline/eager/eva.md) - mag diff --git a/conf/munin.config b/conf/munin.config index 5f794d6..0fca214 100644 --- a/conf/munin.config +++ b/conf/munin.config @@ -29,7 +29,7 @@ process { singularity { cacheDir = '/data1/containers/' enabled = true - runOptions = "--bind /media/BTB_2021_01" + //runOptions = "--bind /media/BTB_2021_01" } // To use docker, use nextflow run -profile munin,docker diff --git a/conf/pipeline/atacseq/sbc_sharc.config b/conf/pipeline/atacseq/sbc_sharc.config index e50695c..7cd0e4a 100644 --- a/conf/pipeline/atacseq/sbc_sharc.config +++ b/conf/pipeline/atacseq/sbc_sharc.config @@ -1,35 +1,33 @@ -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Sheffield Bioinformatics Core Configuration Profile - ShARC - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Custom Pipeline Resource Config for nf-core/atacseq - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ - +// Sheffield Bioinformatics Core Configuration Profile - ShARC +// Custom Pipeline Resource Config for nf-core/atacseq // process-specific resource requirements - reduced specification from those in atacseq/conf/base.config process { + // error and retry handling + + errorStrategy = { task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish' } + maxRetries = 2 + + + // process labels + withLabel:process_low { cpus = { check_max( 2 * task.attempt, 'cpus' ) } - memory = { check_max( 4.GB * task.attempt, 'memory' ) } + memory = { check_max( 12.GB * task.attempt, 'memory' ) } time = { check_max( 4.h * task.attempt, 'time' ) } } withLabel:process_medium { cpus = { check_max( 4 * task.attempt, 'cpus' ) } - memory = { check_max( 8.GB * task.attempt, 'memory' ) } + memory = { check_max( 32.GB * task.attempt, 'memory' ) } time = { check_max( 6.h * task.attempt, 'time' ) } } withLabel:process_high { cpus = { check_max( 8 * task.attempt, 'cpus' ) } - memory = { check_max( 16.GB * task.attempt, 'memory' ) } + memory = { check_max( 128.GB * task.attempt, 'memory' ) } time = { check_max( 8.h * task.attempt, 'time' ) } } @@ -72,3 +70,4 @@ def check_max(obj, type) { } } } + diff --git a/conf/pipeline/chipseq/sbc_sharc.config b/conf/pipeline/chipseq/sbc_sharc.config index 60912f3..0e66333 100644 --- a/conf/pipeline/chipseq/sbc_sharc.config +++ b/conf/pipeline/chipseq/sbc_sharc.config @@ -1,35 +1,33 @@ -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Sheffield Bioinformatics Core Configuration Profile - ShARC - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Custom Pipeline Resource Config for nf-core/chipseq - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ - +// Sheffield Bioinformatics Core Configuration Profile - ShARC +// Custom Pipeline Resource Config for nf-core/chipseq // process-specific resource requirements - reduced specification from those in chipseq/conf/base.config process { + + // error and retry handling + + errorStrategy = { task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish' } + maxRetries = 2 + + + // process labels withLabel:process_low { cpus = { check_max( 2 * task.attempt, 'cpus' ) } - memory = { check_max( 4.GB * task.attempt, 'memory' ) } + memory = { check_max( 12.GB * task.attempt, 'memory' ) } time = { check_max( 4.h * task.attempt, 'time' ) } } withLabel:process_medium { cpus = { check_max( 4 * task.attempt, 'cpus' ) } - memory = { check_max( 8.GB * task.attempt, 'memory' ) } + memory = { check_max( 32.GB * task.attempt, 'memory' ) } time = { check_max( 6.h * task.attempt, 'time' ) } } withLabel:process_high { cpus = { check_max( 8 * task.attempt, 'cpus' ) } - memory = { check_max( 16.GB * task.attempt, 'memory' ) } + memory = { check_max( 128.GB * task.attempt, 'memory' ) } time = { check_max( 8.h * task.attempt, 'time' ) } } @@ -72,3 +70,4 @@ def check_max(obj, type) { } } } + diff --git a/conf/pipeline/demultiplex/aws_tower.config b/conf/pipeline/demultiplex/aws_tower.config new file mode 100644 index 0000000..520487f --- /dev/null +++ b/conf/pipeline/demultiplex/aws_tower.config @@ -0,0 +1,29 @@ +// Profile config names for nf-core/configs + +params { + // Specific nf-core/configs params + config_profile_contact = 'Edmund Miller(@emiller88)' + config_profile_description = 'nf-core/demultiplex AWS Tower profile provided by nf-core/configs' +} + +aws { + batch { + maxParallelTransfers = 24 + maxTransferAttempts = 3 + } + client { + maxConnections = 24 + uploadMaxThreads = 24 + maxErrorRetry = 3 + socketTimeout = 3600000 + uploadRetrySleep = 1000 + uploadChunkSize = 32.MB + } +} + +process { + withName: BASES2FASTQ { + cpus = 16 + memory = 48.GB + } +} diff --git a/conf/pipeline/rnaseq/sbc_sharc.config b/conf/pipeline/rnaseq/sbc_sharc.config index 52bf0ff..d9d9878 100644 --- a/conf/pipeline/rnaseq/sbc_sharc.config +++ b/conf/pipeline/rnaseq/sbc_sharc.config @@ -1,35 +1,33 @@ -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Sheffield Bioinformatics Core Configuration Profile - ShARC - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Custom Pipeline Resource Config for nf-core/rnaseq - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ - +// Sheffield Bioinformatics Core Configuration Profile - ShARC +// Custom Pipeline Resource Config for nf-core/rnaseq // process-specific resource requirements - reduced specification from those in rnaseq/conf/base.config process { + + // error and retry handling + + errorStrategy = { task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish' } + maxRetries = 2 + + + // process labels withLabel:process_low { cpus = { check_max( 2 * task.attempt, 'cpus' ) } - memory = { check_max( 4.GB * task.attempt, 'memory' ) } + memory = { check_max( 12.GB * task.attempt, 'memory' ) } time = { check_max( 4.h * task.attempt, 'time' ) } } withLabel:process_medium { cpus = { check_max( 4 * task.attempt, 'cpus' ) } - memory = { check_max( 8.GB * task.attempt, 'memory' ) } + memory = { check_max( 32.GB * task.attempt, 'memory' ) } time = { check_max( 6.h * task.attempt, 'time' ) } } withLabel:process_high { cpus = { check_max( 8 * task.attempt, 'cpus' ) } - memory = { check_max( 16.GB * task.attempt, 'memory' ) } + memory = { check_max( 128.GB * task.attempt, 'memory' ) } time = { check_max( 8.h * task.attempt, 'time' ) } } @@ -38,7 +36,7 @@ process { } withLabel:process_high_memory { - memory = { check_max( 60.GB * task.attempt, 'memory' ) } + memory = { check_max( 160.GB * task.attempt, 'memory' ) } } } diff --git a/conf/pipeline/sarek/sbc_sharc.config b/conf/pipeline/sarek/sbc_sharc.config index 204d73b..3a7f5b9 100644 --- a/conf/pipeline/sarek/sbc_sharc.config +++ b/conf/pipeline/sarek/sbc_sharc.config @@ -1,38 +1,33 @@ -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Sheffield Bioinformatics Core Configuration Profile - ShARC - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Custom Pipeline Resource Config for nf-core/sarek - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ - +// Sheffield Bioinformatics Core Configuration Profile - ShARC +// Custom Pipeline Resource Config for nf-core/sarek // process-specific resource requirements - reduced specification from those in sarek/conf/base.config process { - + // error and retry handling + + errorStrategy = { task.exitStatus in [143,137,104,134,139,140,247] ? 'retry' : 'finish' } + maxRetries = 2 + + // process labels withLabel:process_low { cpus = { check_max( 2 * task.attempt, 'cpus' ) } - memory = { check_max( 4.GB * task.attempt, 'memory' ) } + memory = { check_max( 16.GB * task.attempt, 'memory' ) } time = { check_max( 4.h * task.attempt, 'time' ) } } withLabel:process_medium { - cpus = { check_max( 4 * task.attempt, 'cpus' ) } - memory = { check_max( 8.GB * task.attempt, 'memory' ) } + cpus = { check_max( 6 * task.attempt, 'cpus' ) } + memory = { check_max( 72.GB * task.attempt, 'memory' ) } time = { check_max( 6.h * task.attempt, 'time' ) } } withLabel:process_high { - cpus = { check_max( 8 * task.attempt, 'cpus' ) } - memory = { check_max( 16.GB * task.attempt, 'memory' ) } + cpus = { check_max( 12 * task.attempt, 'cpus' ) } + memory = { check_max( 192.GB * task.attempt, 'memory' ) } time = { check_max( 8.h * task.attempt, 'time' ) } } @@ -41,7 +36,7 @@ process { } withLabel:process_high_memory { - memory = { check_max( 60.GB * task.attempt, 'memory' ) } + memory = { check_max( 240.GB * task.attempt, 'memory' ) } } @@ -49,7 +44,7 @@ process { withName:'BWAMEM1_MEM|BWAMEM2_MEM' { cpus = { check_max( 12 * task.attempt, 'cpus' ) } - memory = { check_max( 16.GB * task.attempt, 'memory' ) } + memory = { check_max( 192.GB * task.attempt, 'memory' ) } time = { check_max( 8.h * task.attempt, 'time' ) } } @@ -66,11 +61,13 @@ process { } withName:'GATK4_APPLYBQSR|GATK4_APPLYBQSR_SPARK|GATK4_BASERECALIBRATOR|GATK4_GATHERBQSRREPORTS' { - memory = { check_max( 16.GB * task.attempt, 'memory' ) } + memory = { check_max( 72.GB * task.attempt, 'memory' ) } } withName:'GATK4_MARKDUPLICATES' { - memory = { check_max( 16.GB * task.attempt, 'memory' ) } + cpus = { check_max( 12 * task.attempt, 'cpus' ) } + memory = { check_max( 240.GB * task.attempt, 'memory' ) } + time = { check_max( 12.h * task.attempt, 'time' ) } } withName:'FREEBAYES|SAMTOOLS_STATS|SAMTOOLS_INDEX|UNZIP' { diff --git a/conf/sbc_sharc.config b/conf/sbc_sharc.config index 20b8661..91cf4e3 100644 --- a/conf/sbc_sharc.config +++ b/conf/sbc_sharc.config @@ -1,39 +1,25 @@ -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Sheffield Bioinformatics Core Configuration Profile - ShARC +// Sheffield Bioinformatics Core Configuration Profile - ShARC +// Base Institutional Configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Base Institutional Configuration - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ +// nf-core specific parameters displayed in header summary of each run params { - - // nf-core specific parameters displayed in header summary of each run config_profile_description = 'Sheffield Bioinformatics Core - ShARC' config_profile_contact = 'Lewis Quayle (l.quayle@sheffield.ac.uk)' config_profile_url = 'https://docs.hpc.shef.ac.uk/en/latest/sharc/index.html' - - // hpc resource limits - - max_cpus = 16 - max_memory = 64.GB - max_time = 96.h } -// container engine +// hpc resource limits -singularity { - - enabled = true - autoMounts = true +params { + + max_cpus = 16 + max_memory = 256.GB + max_time = 96.h } @@ -47,11 +33,27 @@ process { executor = 'sge' penv = 'smp' queue = { task.time <= 6.h ? 'shortint.q' : 'all.q' } - clusterOptions = { "-l rmem=${task.memory.toGiga()}G" } - - // error and retry handling - - errorStrategy = { task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish' } - maxRetries = 2 + clusterOptions = { "-l rmem=${ (task.memory.toGiga() / task.cpus) }G" } } + + +// optional executor settings + +executor { + + queueSize = 10 + submitRateLimit = '1 sec' + +} + + +// container engine + +singularity { + + enabled = true + autoMounts = true + +} + diff --git a/conf/tigem.config b/conf/tigem.config new file mode 100644 index 0000000..b89a4ed --- /dev/null +++ b/conf/tigem.config @@ -0,0 +1,14 @@ +params { + config_profile_description = 'Telethon Institute of Genetic and Medicine (TIGEM) provided by nf-core/configs.' + config_profile_contact = 'Giuseppe Martone (@giusmar)' + config_profile_url = 'https://github.com/giusmar' +} + +process.executor = 'slurm' +google.zone = 'europe-west1' + +singularity { + enabled = true + autoMounts = true + cacheDir = 'work/singularity' +} diff --git a/conf/ucl_myriad.config b/conf/ucl_myriad.config new file mode 100644 index 0000000..3f9425c --- /dev/null +++ b/conf/ucl_myriad.config @@ -0,0 +1,34 @@ +params { + + config_profile_description = 'University College London Myriad cluster' + config_profile_contact = 'Chris Wyatt (ucbtcdr@ucl.ac.uk)' + config_profile_url = 'https://www.rc.ucl.ac.uk/docs/Clusters/Myriad/' + +} + +process { + executor='sge' + penv = 'smp' +} + +params { + // Defaults only, expecting to be overwritten + max_memory = 128.GB + max_cpus = 36 + max_time = 72.h + // igenomes_base = 's3://ngi-igenomes/igenomes/' +} + +// optional executor settings + +executor { + + queueSize = 10 + submitRateLimit = '1 sec' + +} + +singularity { + enabled = true + autoMounts = true +} \ No newline at end of file diff --git a/conf/vsc_ugent.config b/conf/vsc_ugent.config index 0bc6ffd..b46b347 100644 --- a/conf/vsc_ugent.config +++ b/conf/vsc_ugent.config @@ -7,9 +7,11 @@ workDir = "$scratch_dir/work" // Perform work directory cleanup when the run has succesfully completed // cleanup = true -// Reduce the job submit rate to about 5 per second, this way the server won't be bombarded with jobs +// Reduce the job submit rate to about 3 per second, this way the server won't be bombarded with jobs +// Limit queueSize to keep job rate under control and avoid timeouts executor { submitRateLimit = '3 sec' + queueSize = 50 } // Specify that singularity should be used and where the cache dir will be for the images @@ -114,4 +116,4 @@ profiles { scratch = "$scratch_dir" } } -} \ No newline at end of file +} diff --git a/docs/pipeline/demultiplex/aws_tower.md b/docs/pipeline/demultiplex/aws_tower.md new file mode 100644 index 0000000..eb87186 --- /dev/null +++ b/docs/pipeline/demultiplex/aws_tower.md @@ -0,0 +1,19 @@ +# nf-core/configs: AWS Tower Demultiplex specific configuration + +Extra specific configuration for demultiplex pipeline + +## Usage + +To use, run the pipeline with `-profile aws_tower`. + +This will download and launch the demultiplex specific [`aws_tower.config`](../../../conf/pipeline/demultiplex/aws_tower.config) which has been pre-configured with a setup suitable for AWS batch through tower. + +Example: `nextflow run nf-core/demultiplex -profile aws_tower` + +## eager specific configurations for eva + +Specific configurations for AWS has been made for demultiplex. + +### General profiles + +- The general AWS Tower profile runs with default nf-core/demultiplex parameters, but with modifications to account file transfer speed and optimized bases2fastq resources. diff --git a/docs/tigem.md b/docs/tigem.md new file mode 100644 index 0000000..e562fe4 --- /dev/null +++ b/docs/tigem.md @@ -0,0 +1,7 @@ +# nf-core/configs: TIGEM configuration + +To use, run the pipeline with `-profile tigem`. This will download and launch the tigem.config which has been pre-configured with a setup suitable for the TIGEM personal biocluster. + +--- + +This configuration profile can be used on TIGEM clusters, with the pre-installed SLURM job scheduling system. An additional parameter is `google.zone` to allow downloading data from GCP for a specific time zone. It should not interfere with any local or other AWS configuration. diff --git a/docs/ucl_myriad.md b/docs/ucl_myriad.md new file mode 100644 index 0000000..1884a48 --- /dev/null +++ b/docs/ucl_myriad.md @@ -0,0 +1,51 @@ +# nf-core/configs: Myriad Configuration + +All nf-core pipelines have been successfully configured for use on UCL's myriad cluster [University College London](https://www.rc.ucl.ac.uk/docs/Clusters/Myriad/). + +To use, run the pipeline with `-profile ucl_myriad`. This will download and launch the [`ucl_myriad.config`](../conf/ucl_myriad.config) which has been pre-configured with a setup suitable for the myriad cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. + +## Using Nextflow on Myriad + +Before running the pipeline you will need to install and configure Nextflow and Singularity. + +### Singularity + +This can be done with the following commands: + +```bash +## Load Singularity environment modules - these commands can be placed in your ~/.bashrc also +module add java/openjdk-11/11.0.1 +module add singularity-env/1.0.0 +``` + +Then set the correct configuration of the cache directories, where is replaced with you credentials which you can find by entering `whoami` into the terminal once you are logged into myriad. Once you have added your credentials save these lines into your .bashrc file in the base directory (e.g. /home//.bashrc): + +```bash +# Set all the Singularity cache dirs to Scratch +export SINGULARITY_CACHEDIR=/home//Scratch/.singularity/ +export SINGULARITY_TMPDIR=/home//Scratch/.singularity/tmp +export SINGULARITY_LOCALCACHEDIR=/home//Scratch/.singularity/localcache +export SINGULARITY_PULLFOLDER=/home//Scratch/.singularity/pull + +# Bind your Scratch directory so it is accessible from inside the container +export SINGULARITY_BINDPATH=/scratch/scratch/ +``` + +### Nextflow + +Download the latest release of nextflow. Warning: the self-update line should update to the latest version, but sometimes not, so please check which is the latest release (https://github.com/nextflow-io/nextflow/releases), you can then manually set this by entering (`NXF_VER=XX.XX.X`). + +```bash +## Download Nextflow-all +curl -s https://get.nextflow.io | bash +NXF_VER=22.10.0 +nextflow -self-update +chmod a+x nextflow +mv nextflow ~/bin/nextflow +``` + +Then make sure that your bin PATH is executable, by placing the following line in your .bashrc: + +```bash +export PATH=$PATH:/home//bin +``` diff --git a/docs/uppmax.md b/docs/uppmax.md index 891762a..dd664fb 100644 --- a/docs/uppmax.md +++ b/docs/uppmax.md @@ -8,7 +8,16 @@ We have a Slack channel dedicated to UPPMAX users on the nf-core Slack: [https:/ ## Using the UPPMAX config profile -Before running the pipeline you will need to either install `Nextflow` or load it using the environment module system (this can be done with e.g. `module load bioinfo-tools Nextflow/` where `VERSION` is e.g. `20.10`). +The recommended way to activate `Nextflow`, `nf-core`, and any pipeline +available in `nf-core` on UPPMAX is to use the [module system](https://www.uppmax.uu.se/resources/software/module-system/): + +```bash +# Log in to the desired cluster +ssh @{rackham,miarka,bianca}.uppmax.uu.se + +# Activate the modules, you can also choose to use a specific version with e.g. `Nextflow/21.10`. +module load bioinfo-tools Nextflow nf-core nf-core-pipelines +``` To use, run the pipeline with `-profile uppmax` (one hyphen). This will download and launch the [`uppmax.config`](../conf/uppmax.config) which has been pre-configured with a setup suitable for the UPPMAX servers. @@ -94,21 +103,13 @@ Before running a nf-core pipeline on `bianca` you will first have to download th In this guide, we use `rackham` to download and transfer files to the `wharf` area, but it can also be done on your own computer. If you use `rackham` to download the pipeline and the singularity containers, we recommend using an interactive session (cf [interactive guide](https://www.uppmax.uu.se/support/faq/running-jobs-faq/how-can-i-run-interactively-on-a-compute-node/)), which is what we do in the following guide. +It is recommended to activate `Nextflow`, `nf-core` and your `nf-core` +pipeline through the module system (see **Using the UPPMAX config profile** +above). In case you need a specific version of any of these tools you can +follow the guide below. + ### Download and install Nextflow -You can use the `Nextflow` UPPMAX provided `module`, but if necessary, you can also download a more recent version. - -```bash -# Connect to bianca -$ ssh -A -@bianca.uppmax.uu.se - -# See the available versions for the module -module spider Nextflow - -# Load a specific version of the Nextflow module -module load bioinfo-tools Nextflow/` -``` - ```bash # Connect to rackham $ ssh -X @rackham.uppmax.uu.se @@ -160,19 +161,6 @@ $ export NXF_SINGULARITY_CACHEDIR=/castor/project/proj_nobackup/singularity-imag ### Install nf-core tools -You can use the `nf-core` UPPMAX provided `module`, but if necessary, you can also download a more recent version. - -```bash -# Connect to rackham -$ ssh -X @rackham.uppmax.uu.se - -# See the available versions for the module -module spider nf-core - -# Load a specific version of the nf-core module -module load bioinfo-tools nf-core/` -``` - ```bash # Connect to rackham $ ssh -X @rackham.uppmax.uu.se @@ -254,7 +242,7 @@ And then `nf-core/` can be used with: $ nextflow run ~/ -profile uppmax --project --genome ... ``` -## Update a pipeline +### Update a pipeline To update, repeat the same steps as for installing and update the link. diff --git a/nfcore_custom.config b/nfcore_custom.config index aec233f..51c1121 100644 --- a/nfcore_custom.config +++ b/nfcore_custom.config @@ -70,6 +70,8 @@ profiles { sanger { includeConfig "${params.custom_config_base}/conf/sanger.config"} sbc_sharc { includeConfig "${params.custom_config_base}/conf/sbc_sharc.config"} seg_globe { includeConfig "${params.custom_config_base}/conf/seg_globe.config"} + tigem { includeConfig "${params.custom_config_base}/conf/tigem.config"} + ucl_myriad { includeConfig "${params.custom_config_base}/conf/ucl_myriad.config"} uct_hpc { includeConfig "${params.custom_config_base}/conf/uct_hpc.config" } unibe_ibu { includeConfig "${params.custom_config_base}/conf/unibe_ibu.config" } uppmax { includeConfig "${params.custom_config_base}/conf/uppmax.config" } diff --git a/pipeline/demultiplex.config b/pipeline/demultiplex.config new file mode 100644 index 0000000..46c3e08 --- /dev/null +++ b/pipeline/demultiplex.config @@ -0,0 +1,13 @@ +/* + * ------------------------------------------------- + * nfcore/demultiplex custom profile Nextflow config file + * ------------------------------------------------- + * Config options for custom environments. + * Cluster-specific config options should be saved + * in the conf/pipeline/demultiplex folder and imported + * under a profile name here. + */ + +profiles { + aws_tower { includeConfig "${params.custom_config_base}/conf/pipeline/demultiplex/aws_tower.config" } +}