mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-12-22 10:38:16 +00:00
Merge branch 'master' into medair
This commit is contained in:
commit
6aa78833c3
6 changed files with 51 additions and 61 deletions
|
@ -61,7 +61,7 @@ profiles {
|
|||
|
||||
params {
|
||||
config_profile_description = 'MPCDF raven profile (unofficially) provided by nf-core/configs.'
|
||||
memory = 2000000.MB
|
||||
max_memory = 2000000.MB
|
||||
max_cpus = 72
|
||||
max_time = 24.h
|
||||
}
|
||||
|
|
|
@ -6,11 +6,15 @@ params {
|
|||
config_profile_description = 'nf-core/eager EVA profile provided by nf-core/configs'
|
||||
}
|
||||
|
||||
env {
|
||||
_JAVA_OPTIONS = "-XX:ParallelGCThreads=1"
|
||||
OPENBLAS_NUM_THREADS = 1
|
||||
OMP_NUM_THREADS = 1
|
||||
}
|
||||
|
||||
// Specific nf-core/eager process configuration
|
||||
process {
|
||||
|
||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1"'
|
||||
|
||||
maxRetries = 2
|
||||
|
||||
// Solution for clusterOptions comes from here: https://github.com/nextflow-io/nextflow/issues/332 + personal toMega conversion
|
||||
|
@ -69,17 +73,17 @@ process {
|
|||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
|
||||
withName: fastqc_after_clipping {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName: adapter_removal {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
|
||||
withName: bwa {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga())}G,h=!(bionode01|bionode02|bionode03|bionode04|bionode05|bionode06)" }
|
||||
}
|
||||
|
@ -188,26 +192,18 @@ process {
|
|||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
|
||||
withName:get_software_versions {
|
||||
cache = false
|
||||
clusterOptions = { "-S /bin/bash -V -l h=!(bionode06)" }
|
||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1 -Xmx512m"; export OPENBLAS_NUM_THREADS=1; export OMP_NUM_THREADS=1'
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toMega())}M" }
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toMega() * 8)}M" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
withName:eigenstrat_snp_coverage {
|
||||
beforeScript = 'export OPENBLAS_NUM_THREADS=1; export OMP_NUM_THREADS=1'
|
||||
}
|
||||
|
||||
withName:kraken_merge {
|
||||
beforeScript = 'export OPENBLAS_NUM_THREADS=1; export OMP_NUM_THREADS=1'
|
||||
}
|
||||
|
||||
withName:multiqc {
|
||||
beforeScript = 'export OPENBLAS_NUM_THREADS=1; export OMP_NUM_THREADS=1;'
|
||||
clusterOptions = { "-S /bin/bash -V -j y -o output.log -l h_vmem=${task.memory.toGiga() * 2}G" }
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
profiles {
|
||||
|
@ -226,8 +222,6 @@ profiles {
|
|||
|
||||
process {
|
||||
|
||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1"'
|
||||
|
||||
maxRetries = 2
|
||||
|
||||
// Solution for clusterOptions comes from here: https://github.com/nextflow-io/nextflow/issues/332 + personal toMega conversion
|
||||
|
@ -279,7 +273,7 @@ profiles {
|
|||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
|
||||
withName: fastqc_after_clipping {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
|
@ -404,8 +398,6 @@ profiles {
|
|||
|
||||
process {
|
||||
|
||||
beforeScript = 'export _JAVA_OPTIONS="-XX:ParallelGCThreads=1"'
|
||||
|
||||
maxRetries = 2
|
||||
|
||||
// Solution for clusterOptions comes from here: https://github.com/nextflow-io/nextflow/issues/332 + personal toMega conversion
|
||||
|
@ -457,7 +449,7 @@ profiles {
|
|||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
}
|
||||
|
||||
|
||||
withName: fastqc_after_clipping {
|
||||
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 6)}G" }
|
||||
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
|
||||
|
|
|
@ -13,18 +13,18 @@ params {
|
|||
// Please use 'MN908947.3' if possible because all primer sets are available / have been pre-prepared relative to that assembly
|
||||
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/NC_045512.2/GCF_009858895.2_ASM985889v3_genomic.200409.fna.gz'
|
||||
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/NC_045512.2/GCF_009858895.2_ASM985889v3_genomic.200409.gff.gz'
|
||||
nextclade_dataset = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/nextclade_sars-cov-2_MN908947_2022-01-18T12_00_00Z.tar.gz'
|
||||
nextclade_dataset = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/nextclade_sars-cov-2_MN908947_2022-06-14T12_00_00Z.tar.gz'
|
||||
nextclade_dataset_name = 'sars-cov-2'
|
||||
nextclade_dataset_reference = 'MN908947'
|
||||
nextclade_dataset_tag = '2022-01-18T12:00:00Z'
|
||||
nextclade_dataset_tag = '2022-06-14T12:00:00Z'
|
||||
}
|
||||
'MN908947.3' {
|
||||
fasta = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.fna.gz'
|
||||
gff = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/GCA_009858895.3_ASM985889v3_genomic.200409.gff.gz'
|
||||
nextclade_dataset = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/nextclade_sars-cov-2_MN908947_2022-01-18T12_00_00Z.tar.gz'
|
||||
nextclade_dataset = 'https://github.com/nf-core/test-datasets/raw/viralrecon/genome/MN908947.3/nextclade_sars-cov-2_MN908947_2022-06-14T12_00_00Z.tar.gz'
|
||||
nextclade_dataset_name = 'sars-cov-2'
|
||||
nextclade_dataset_reference = 'MN908947'
|
||||
nextclade_dataset_tag = '2022-01-18T12:00:00Z'
|
||||
nextclade_dataset_tag = '2022-06-14T12:00:00Z'
|
||||
primer_sets {
|
||||
artic {
|
||||
'1' {
|
||||
|
|
|
@ -1,35 +1,33 @@
|
|||
// Profile details
|
||||
params {
|
||||
config_profile_description = 'The Wellcome Sanger Institute HPC cluster profile'
|
||||
config_profile_contact = 'Anthony Underwood (@aunderwo)'
|
||||
config_profile_url = 'https://www.sanger.ac.uk/group/informatics-support-group/'
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
cacheDir = "${baseDir}/singularity"
|
||||
runOptions = '--bind /lustre --bind /nfs/pathnfs01 --bind /nfs/pathnfs02 --bind /nfs/pathnfs03 --bind /nfs/pathnfs04 --bind /nfs/pathnfs05 --bind /nfs/pathnfs06 --no-home'
|
||||
config_profile_description = 'The Wellcome Sanger Institute HPC cluster (farm5) profile'
|
||||
config_profile_contact = 'Priyanka Surana (@priyanka-surana)'
|
||||
config_profile_url = 'https://www.sanger.ac.uk'
|
||||
}
|
||||
|
||||
// Queue and retry strategy
|
||||
process{
|
||||
executor = 'lsf'
|
||||
queue = 'normal'
|
||||
errorStrategy = { task.attempt <= 5 ? "retry" : "finish" }
|
||||
process.maxRetries = 5
|
||||
withLabel:process_long {
|
||||
queue = 'long'
|
||||
}
|
||||
executor = 'lsf'
|
||||
queue = { task.time < 12.h ? 'normal' : task.time < 48.h ? 'long' : 'basement' }
|
||||
errorStrategy = 'retry'
|
||||
maxRetries = 5
|
||||
}
|
||||
|
||||
// Executor details
|
||||
executor{
|
||||
name = 'lsf'
|
||||
perJobMemLimit = true
|
||||
poolSize = 4
|
||||
submitRateLimit = '5 sec'
|
||||
killBatchSize = 50
|
||||
name = 'lsf'
|
||||
perJobMemLimit = true
|
||||
poolSize = 4
|
||||
submitRateLimit = '5 sec'
|
||||
killBatchSize = 50
|
||||
}
|
||||
|
||||
// Max resources
|
||||
params {
|
||||
max_memory = 128.GB
|
||||
max_cpus = 64
|
||||
max_time = 48.h
|
||||
max_memory = 683.GB
|
||||
max_cpus = 256
|
||||
max_time = 720.h
|
||||
}
|
||||
|
||||
// For singularity
|
||||
singularity.runOptions = '--bind /lustre --bind /nfs'
|
||||
|
|
|
@ -9,7 +9,7 @@ workDir = "$scratch_dir/work"
|
|||
|
||||
// Reduce the job submit rate to about 5 per second, this way the server won't be bombarded with jobs
|
||||
executor {
|
||||
submitRateLimit = '5 sec'
|
||||
submitRateLimit = '3 sec'
|
||||
}
|
||||
|
||||
// Specify that singularity should be used and where the cache dir will be for the images
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
To use, run the pipeline with `-profile sanger`. This will download and launch the [`sanger.config`](../conf/sanger.config) which has been
|
||||
pre-configured with a setup suitable for the Wellcome Sanger Institute LSF cluster.
|
||||
Using this profile, either a docker image containing all of the required software will be downloaded, and converted to a Singularity image or
|
||||
a Singularity image downloaded directly before execution of the pipeline.
|
||||
|
||||
## Running the workflow on the Wellcome Sanger Institute cluster
|
||||
|
||||
|
@ -14,10 +12,12 @@ The latest version of Nextflow is not installed by default on the cluster. You w
|
|||
A recommended place to move the `nextflow` executable to is `~/bin` so that it's in the `PATH`.
|
||||
|
||||
Nextflow manages each process as a separate job that is submitted to the cluster by using the `bsub` command.
|
||||
Since the Nextflow pipeline will submit individual jobs for each process to the cluster and dependencies will be provided bu Singularity images you shoudl make sure that your account has access to the Singularity binary by adding these lines to your `.bashrc` file
|
||||
|
||||
If asking Nextflow to use Singularity to run the individual jobs,
|
||||
you should make sure that your account has access to the Singularity binary by adding these lines to your `.bashrc` file
|
||||
|
||||
```bash
|
||||
[[ -f /software/pathogen/farm5 ]] && module load ISG/singularity
|
||||
[[ -f /software/modules/ISG/singularity ]] && module load ISG/singularity
|
||||
```
|
||||
|
||||
Nextflow shouldn't run directly on the submission node but on a compute node.
|
||||
|
@ -26,16 +26,16 @@ To do so make a shell script with a similar structure to the following code and
|
|||
```bash
|
||||
#!/bin/bash
|
||||
#BSUB -o /path/to/a/log/dir/%J.o
|
||||
#BSUB -e /path/to/a/log/dir//%J.e
|
||||
#BSUB -e /path/to/a/log/dir/%J.e
|
||||
#BSUB -M 8000
|
||||
#BSUB -q long
|
||||
#BSUB -n 4
|
||||
#BSUB -q oversubscribed
|
||||
#BSUB -n 2
|
||||
|
||||
export HTTP_PROXY='http://wwwcache.sanger.ac.uk:3128'
|
||||
export HTTPS_PROXY='http://wwwcache.sanger.ac.uk:3128'
|
||||
export NXF_ANSI_LOG=false
|
||||
export NXF_OPTS="-Xms8G -Xmx8G -Dnxf.pool.maxThreads=2000"
|
||||
export NXF_VER=21.04.0-edge
|
||||
export NXF_VER=22.04.0-5697
|
||||
|
||||
|
||||
nextflow run \
|
||||
|
|
Loading…
Reference in a new issue