diff --git a/conf/cbe.config b/conf/cbe.config index 6e47361..3332b1e 100755 --- a/conf/cbe.config +++ b/conf/cbe.config @@ -12,12 +12,14 @@ process { clusterOptions = { task.time <= 8.h ? '--qos short': task.time <= 48.h ? '--qos medium' : '--qos long' } } -singularity.enabled = true +singularity { + enabled = true + cacheDir = '/scratch-cbe/shared/containers' +} params { params.max_time = 14.d params.max_cpus = 36 params.max_memory = 4.TB - igenomes_ignore = true - igenomesIgnore = true //deprecated -} \ No newline at end of file + igenomes_base = '/resources/references/igenomes' +} diff --git a/docs/cbe.md b/docs/cbe.md index 5b17a15..8d597a0 100644 --- a/docs/cbe.md +++ b/docs/cbe.md @@ -13,5 +13,7 @@ module load nextflow/19.04.0 module load singularity/3.2.1 ``` +A local copy of the [AWS-iGenomes](https://registry.opendata.aws/aws-igenomes/) resource has been made available on CBE so you should be able to run the pipeline against any reference available in the `igenomes.config` specific to the nf-core pipeline. You can do this by simply using the `--genome ` parameter. + >NB: You will need an account to use the HPC cluster on CBE in order to run the pipeline. If in doubt contact IT. >NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT.