1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-11-27 01:59:56 +00:00

Merge branch 'master' into gis-branch

This commit is contained in:
Alexander Peltzer 2022-10-19 10:38:51 +02:00 committed by GitHub
commit cdc095c4f1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
18 changed files with 288 additions and 128 deletions

View file

@ -88,6 +88,8 @@ jobs:
- "sanger"
- "sbc_sharc"
- "seg_globe"
- "tigem"
- "ucl_myriad"
- "uct_hpc"
- "unibe_ibu"
- "uppmax"

View file

@ -142,6 +142,8 @@ Currently documentation is available for the following systems:
- [SANGER](docs/sanger.md)
- [SBC_SHARC](docs/sbc_sharc.md)
- [SEG_GLOBE](docs/seg_globe.md)
- [TIGEM](docs/tigem.md)
- [UCL_MYRIAD](docs/ucl_myriad.md)
- [UCT_HPC](docs/uct_hpc.md)
- [UNIBE_IBU](docs/unibe_ibu.md)
- [UPPMAX](docs/uppmax.md)
@ -205,6 +207,8 @@ Currently documentation is available for the following pipelines within specific
- [SBC_SHARC](docs/pipeline/atacseq/sbc_sharc.md)
- chipseq
- [SBC_SHARC](docs/pipeline/chipseq/sbc_sharc.md)
- demultiplex
- [AWS_TOWER](docs/pipeline/demultiplex/aws_tower.md)
- eager
- [EVA](docs/pipeline/eager/eva.md)
- mag

View file

@ -29,7 +29,7 @@ process {
singularity {
cacheDir = '/data1/containers/'
enabled = true
runOptions = "--bind /media/BTB_2021_01"
//runOptions = "--bind /media/BTB_2021_01"
}
// To use docker, use nextflow run -profile munin,docker

View file

@ -1,35 +1,33 @@
/*
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sheffield Bioinformatics Core Configuration Profile - ShARC
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Custom Pipeline Resource Config for nf-core/atacseq
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
// Sheffield Bioinformatics Core Configuration Profile - ShARC
// Custom Pipeline Resource Config for nf-core/atacseq
// process-specific resource requirements - reduced specification from those in atacseq/conf/base.config
process {
// error and retry handling
errorStrategy = { task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish' }
maxRetries = 2
// process labels
withLabel:process_low {
cpus = { check_max( 2 * task.attempt, 'cpus' ) }
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
time = { check_max( 4.h * task.attempt, 'time' ) }
}
withLabel:process_medium {
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
time = { check_max( 6.h * task.attempt, 'time' ) }
}
withLabel:process_high {
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
memory = { check_max( 128.GB * task.attempt, 'memory' ) }
time = { check_max( 8.h * task.attempt, 'time' ) }
}
@ -72,3 +70,4 @@ def check_max(obj, type) {
}
}
}

View file

@ -1,35 +1,33 @@
/*
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sheffield Bioinformatics Core Configuration Profile - ShARC
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Custom Pipeline Resource Config for nf-core/chipseq
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
// Sheffield Bioinformatics Core Configuration Profile - ShARC
// Custom Pipeline Resource Config for nf-core/chipseq
// process-specific resource requirements - reduced specification from those in chipseq/conf/base.config
process {
// error and retry handling
errorStrategy = { task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish' }
maxRetries = 2
// process labels
withLabel:process_low {
cpus = { check_max( 2 * task.attempt, 'cpus' ) }
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
time = { check_max( 4.h * task.attempt, 'time' ) }
}
withLabel:process_medium {
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
time = { check_max( 6.h * task.attempt, 'time' ) }
}
withLabel:process_high {
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
memory = { check_max( 128.GB * task.attempt, 'memory' ) }
time = { check_max( 8.h * task.attempt, 'time' ) }
}
@ -72,3 +70,4 @@ def check_max(obj, type) {
}
}
}

View file

@ -0,0 +1,29 @@
// Profile config names for nf-core/configs
params {
// Specific nf-core/configs params
config_profile_contact = 'Edmund Miller(@emiller88)'
config_profile_description = 'nf-core/demultiplex AWS Tower profile provided by nf-core/configs'
}
aws {
batch {
maxParallelTransfers = 24
maxTransferAttempts = 3
}
client {
maxConnections = 24
uploadMaxThreads = 24
maxErrorRetry = 3
socketTimeout = 3600000
uploadRetrySleep = 1000
uploadChunkSize = 32.MB
}
}
process {
withName: BASES2FASTQ {
cpus = 16
memory = 48.GB
}
}

View file

@ -1,35 +1,33 @@
/*
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sheffield Bioinformatics Core Configuration Profile - ShARC
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Custom Pipeline Resource Config for nf-core/rnaseq
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
// Sheffield Bioinformatics Core Configuration Profile - ShARC
// Custom Pipeline Resource Config for nf-core/rnaseq
// process-specific resource requirements - reduced specification from those in rnaseq/conf/base.config
process {
// error and retry handling
errorStrategy = { task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish' }
maxRetries = 2
// process labels
withLabel:process_low {
cpus = { check_max( 2 * task.attempt, 'cpus' ) }
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
time = { check_max( 4.h * task.attempt, 'time' ) }
}
withLabel:process_medium {
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
memory = { check_max( 32.GB * task.attempt, 'memory' ) }
time = { check_max( 6.h * task.attempt, 'time' ) }
}
withLabel:process_high {
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
memory = { check_max( 128.GB * task.attempt, 'memory' ) }
time = { check_max( 8.h * task.attempt, 'time' ) }
}
@ -38,7 +36,7 @@ process {
}
withLabel:process_high_memory {
memory = { check_max( 60.GB * task.attempt, 'memory' ) }
memory = { check_max( 160.GB * task.attempt, 'memory' ) }
}
}

View file

@ -1,38 +1,33 @@
/*
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sheffield Bioinformatics Core Configuration Profile - ShARC
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Custom Pipeline Resource Config for nf-core/sarek
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
// Sheffield Bioinformatics Core Configuration Profile - ShARC
// Custom Pipeline Resource Config for nf-core/sarek
// process-specific resource requirements - reduced specification from those in sarek/conf/base.config
process {
// error and retry handling
errorStrategy = { task.exitStatus in [143,137,104,134,139,140,247] ? 'retry' : 'finish' }
maxRetries = 2
// process labels
withLabel:process_low {
cpus = { check_max( 2 * task.attempt, 'cpus' ) }
memory = { check_max( 4.GB * task.attempt, 'memory' ) }
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
time = { check_max( 4.h * task.attempt, 'time' ) }
}
withLabel:process_medium {
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
memory = { check_max( 8.GB * task.attempt, 'memory' ) }
cpus = { check_max( 6 * task.attempt, 'cpus' ) }
memory = { check_max( 72.GB * task.attempt, 'memory' ) }
time = { check_max( 6.h * task.attempt, 'time' ) }
}
withLabel:process_high {
cpus = { check_max( 8 * task.attempt, 'cpus' ) }
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
cpus = { check_max( 12 * task.attempt, 'cpus' ) }
memory = { check_max( 192.GB * task.attempt, 'memory' ) }
time = { check_max( 8.h * task.attempt, 'time' ) }
}
@ -41,7 +36,7 @@ process {
}
withLabel:process_high_memory {
memory = { check_max( 60.GB * task.attempt, 'memory' ) }
memory = { check_max( 240.GB * task.attempt, 'memory' ) }
}
@ -49,7 +44,7 @@ process {
withName:'BWAMEM1_MEM|BWAMEM2_MEM' {
cpus = { check_max( 12 * task.attempt, 'cpus' ) }
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
memory = { check_max( 192.GB * task.attempt, 'memory' ) }
time = { check_max( 8.h * task.attempt, 'time' ) }
}
@ -66,11 +61,13 @@ process {
}
withName:'GATK4_APPLYBQSR|GATK4_APPLYBQSR_SPARK|GATK4_BASERECALIBRATOR|GATK4_GATHERBQSRREPORTS' {
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
memory = { check_max( 72.GB * task.attempt, 'memory' ) }
}
withName:'GATK4_MARKDUPLICATES' {
memory = { check_max( 16.GB * task.attempt, 'memory' ) }
cpus = { check_max( 12 * task.attempt, 'cpus' ) }
memory = { check_max( 240.GB * task.attempt, 'memory' ) }
time = { check_max( 12.h * task.attempt, 'time' ) }
}
withName:'FREEBAYES|SAMTOOLS_STATS|SAMTOOLS_INDEX|UNZIP' {

View file

@ -1,39 +1,25 @@
/*
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sheffield Bioinformatics Core Configuration Profile - ShARC
// Sheffield Bioinformatics Core Configuration Profile - ShARC
// Base Institutional Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Base Institutional Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
// nf-core specific parameters displayed in header summary of each run
params {
// nf-core specific parameters displayed in header summary of each run
config_profile_description = 'Sheffield Bioinformatics Core - ShARC'
config_profile_contact = 'Lewis Quayle (l.quayle@sheffield.ac.uk)'
config_profile_url = 'https://docs.hpc.shef.ac.uk/en/latest/sharc/index.html'
// hpc resource limits
max_cpus = 16
max_memory = 64.GB
max_time = 96.h
}
// container engine
// hpc resource limits
singularity {
enabled = true
autoMounts = true
params {
max_cpus = 16
max_memory = 256.GB
max_time = 96.h
}
@ -47,11 +33,27 @@ process {
executor = 'sge'
penv = 'smp'
queue = { task.time <= 6.h ? 'shortint.q' : 'all.q' }
clusterOptions = { "-l rmem=${task.memory.toGiga()}G" }
// error and retry handling
errorStrategy = { task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish' }
maxRetries = 2
clusterOptions = { "-l rmem=${ (task.memory.toGiga() / task.cpus) }G" }
}
// optional executor settings
executor {
queueSize = 10
submitRateLimit = '1 sec'
}
// container engine
singularity {
enabled = true
autoMounts = true
}

14
conf/tigem.config Normal file
View file

@ -0,0 +1,14 @@
params {
config_profile_description = 'Telethon Institute of Genetic and Medicine (TIGEM) provided by nf-core/configs.'
config_profile_contact = 'Giuseppe Martone (@giusmar)'
config_profile_url = 'https://github.com/giusmar'
}
process.executor = 'slurm'
google.zone = 'europe-west1'
singularity {
enabled = true
autoMounts = true
cacheDir = 'work/singularity'
}

34
conf/ucl_myriad.config Normal file
View file

@ -0,0 +1,34 @@
params {
config_profile_description = 'University College London Myriad cluster'
config_profile_contact = 'Chris Wyatt (ucbtcdr@ucl.ac.uk)'
config_profile_url = 'https://www.rc.ucl.ac.uk/docs/Clusters/Myriad/'
}
process {
executor='sge'
penv = 'smp'
}
params {
// Defaults only, expecting to be overwritten
max_memory = 128.GB
max_cpus = 36
max_time = 72.h
// igenomes_base = 's3://ngi-igenomes/igenomes/'
}
// optional executor settings
executor {
queueSize = 10
submitRateLimit = '1 sec'
}
singularity {
enabled = true
autoMounts = true
}

View file

@ -7,9 +7,11 @@ workDir = "$scratch_dir/work"
// Perform work directory cleanup when the run has succesfully completed
// cleanup = true
// Reduce the job submit rate to about 5 per second, this way the server won't be bombarded with jobs
// Reduce the job submit rate to about 3 per second, this way the server won't be bombarded with jobs
// Limit queueSize to keep job rate under control and avoid timeouts
executor {
submitRateLimit = '3 sec'
queueSize = 50
}
// Specify that singularity should be used and where the cache dir will be for the images
@ -114,4 +116,4 @@ profiles {
scratch = "$scratch_dir"
}
}
}
}

View file

@ -0,0 +1,19 @@
# nf-core/configs: AWS Tower Demultiplex specific configuration
Extra specific configuration for demultiplex pipeline
## Usage
To use, run the pipeline with `-profile aws_tower`.
This will download and launch the demultiplex specific [`aws_tower.config`](../../../conf/pipeline/demultiplex/aws_tower.config) which has been pre-configured with a setup suitable for AWS batch through tower.
Example: `nextflow run nf-core/demultiplex -profile aws_tower`
## eager specific configurations for eva
Specific configurations for AWS has been made for demultiplex.
### General profiles
- The general AWS Tower profile runs with default nf-core/demultiplex parameters, but with modifications to account file transfer speed and optimized bases2fastq resources.

7
docs/tigem.md Normal file
View file

@ -0,0 +1,7 @@
# nf-core/configs: TIGEM configuration
To use, run the pipeline with `-profile tigem`. This will download and launch the tigem.config which has been pre-configured with a setup suitable for the TIGEM personal biocluster.
---
This configuration profile can be used on TIGEM clusters, with the pre-installed SLURM job scheduling system. An additional parameter is `google.zone` to allow downloading data from GCP for a specific time zone. It should not interfere with any local or other AWS configuration.

51
docs/ucl_myriad.md Normal file
View file

@ -0,0 +1,51 @@
# nf-core/configs: Myriad Configuration
All nf-core pipelines have been successfully configured for use on UCL's myriad cluster [University College London](https://www.rc.ucl.ac.uk/docs/Clusters/Myriad/).
To use, run the pipeline with `-profile ucl_myriad`. This will download and launch the [`ucl_myriad.config`](../conf/ucl_myriad.config) which has been pre-configured with a setup suitable for the myriad cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
## Using Nextflow on Myriad
Before running the pipeline you will need to install and configure Nextflow and Singularity.
### Singularity
This can be done with the following commands:
```bash
## Load Singularity environment modules - these commands can be placed in your ~/.bashrc also
module add java/openjdk-11/11.0.1
module add singularity-env/1.0.0
```
Then set the correct configuration of the cache directories, where <YOUR_ID> is replaced with you credentials which you can find by entering `whoami` into the terminal once you are logged into myriad. Once you have added your credentials save these lines into your .bashrc file in the base directory (e.g. /home/<YOUR_ID>/.bashrc):
```bash
# Set all the Singularity cache dirs to Scratch
export SINGULARITY_CACHEDIR=/home/<YOUR_ID>/Scratch/.singularity/
export SINGULARITY_TMPDIR=/home/<YOUR_ID>/Scratch/.singularity/tmp
export SINGULARITY_LOCALCACHEDIR=/home/<YOUR_ID>/Scratch/.singularity/localcache
export SINGULARITY_PULLFOLDER=/home/<YOUR_ID>/Scratch/.singularity/pull
# Bind your Scratch directory so it is accessible from inside the container
export SINGULARITY_BINDPATH=/scratch/scratch/<YOUR_ID>
```
### Nextflow
Download the latest release of nextflow. Warning: the self-update line should update to the latest version, but sometimes not, so please check which is the latest release (https://github.com/nextflow-io/nextflow/releases), you can then manually set this by entering (`NXF_VER=XX.XX.X`).
```bash
## Download Nextflow-all
curl -s https://get.nextflow.io | bash
NXF_VER=22.10.0
nextflow -self-update
chmod a+x nextflow
mv nextflow ~/bin/nextflow
```
Then make sure that your bin PATH is executable, by placing the following line in your .bashrc:
```bash
export PATH=$PATH:/home/<YOUR_ID>/bin
```

View file

@ -8,7 +8,16 @@ We have a Slack channel dedicated to UPPMAX users on the nf-core Slack: [https:/
## Using the UPPMAX config profile
Before running the pipeline you will need to either install `Nextflow` or load it using the environment module system (this can be done with e.g. `module load bioinfo-tools Nextflow/<VERSION>` where `VERSION` is e.g. `20.10`).
The recommended way to activate `Nextflow`, `nf-core`, and any pipeline
available in `nf-core` on UPPMAX is to use the [module system](https://www.uppmax.uu.se/resources/software/module-system/):
```bash
# Log in to the desired cluster
ssh <USER>@{rackham,miarka,bianca}.uppmax.uu.se
# Activate the modules, you can also choose to use a specific version with e.g. `Nextflow/21.10`.
module load bioinfo-tools Nextflow nf-core nf-core-pipelines
```
To use, run the pipeline with `-profile uppmax` (one hyphen).
This will download and launch the [`uppmax.config`](../conf/uppmax.config) which has been pre-configured with a setup suitable for the UPPMAX servers.
@ -94,21 +103,13 @@ Before running a nf-core pipeline on `bianca` you will first have to download th
In this guide, we use `rackham` to download and transfer files to the `wharf` area, but it can also be done on your own computer.
If you use `rackham` to download the pipeline and the singularity containers, we recommend using an interactive session (cf [interactive guide](https://www.uppmax.uu.se/support/faq/running-jobs-faq/how-can-i-run-interactively-on-a-compute-node/)), which is what we do in the following guide.
It is recommended to activate `Nextflow`, `nf-core` and your `nf-core`
pipeline through the module system (see **Using the UPPMAX config profile**
above). In case you need a specific version of any of these tools you can
follow the guide below.
### Download and install Nextflow
You can use the `Nextflow` UPPMAX provided `module`, but if necessary, you can also download a more recent version.
```bash
# Connect to bianca
$ ssh -A <USER>-<BIANCA_PROJECT>@bianca.uppmax.uu.se
# See the available versions for the module
module spider Nextflow
# Load a specific version of the Nextflow module
module load bioinfo-tools Nextflow/<VERSION>`
```
```bash
# Connect to rackham
$ ssh -X <USER>@rackham.uppmax.uu.se
@ -160,19 +161,6 @@ $ export NXF_SINGULARITY_CACHEDIR=/castor/project/proj_nobackup/singularity-imag
### Install nf-core tools
You can use the `nf-core` UPPMAX provided `module`, but if necessary, you can also download a more recent version.
```bash
# Connect to rackham
$ ssh -X <USER>@rackham.uppmax.uu.se
# See the available versions for the module
module spider nf-core
# Load a specific version of the nf-core module
module load bioinfo-tools nf-core/<VERSION>`
```
```bash
# Connect to rackham
$ ssh -X <USER>@rackham.uppmax.uu.se
@ -254,7 +242,7 @@ And then `nf-core/<PIPELINE>` can be used with:
$ nextflow run ~/<PIPELINE> -profile uppmax --project <BIANCA_PROJECT> --genome <GENOME_ASSEMBLY> ...
```
## Update a pipeline
### Update a pipeline
To update, repeat the same steps as for installing and update the link.

View file

@ -70,6 +70,8 @@ profiles {
sanger { includeConfig "${params.custom_config_base}/conf/sanger.config"}
sbc_sharc { includeConfig "${params.custom_config_base}/conf/sbc_sharc.config"}
seg_globe { includeConfig "${params.custom_config_base}/conf/seg_globe.config"}
tigem { includeConfig "${params.custom_config_base}/conf/tigem.config"}
ucl_myriad { includeConfig "${params.custom_config_base}/conf/ucl_myriad.config"}
uct_hpc { includeConfig "${params.custom_config_base}/conf/uct_hpc.config" }
unibe_ibu { includeConfig "${params.custom_config_base}/conf/unibe_ibu.config" }
uppmax { includeConfig "${params.custom_config_base}/conf/uppmax.config" }

View file

@ -0,0 +1,13 @@
/*
* -------------------------------------------------
* nfcore/demultiplex custom profile Nextflow config file
* -------------------------------------------------
* Config options for custom environments.
* Cluster-specific config options should be saved
* in the conf/pipeline/demultiplex folder and imported
* under a profile name here.
*/
profiles {
aws_tower { includeConfig "${params.custom_config_base}/conf/pipeline/demultiplex/aws_tower.config" }
}