1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-11-13 05:03:10 +00:00

Merge branch 'master' into bgrande/sage-aws

This commit is contained in:
Bruno Grande 2022-09-01 13:21:36 -07:00 committed by GitHub
commit 80831358f9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 200 additions and 6 deletions

View file

@ -30,6 +30,7 @@ jobs:
matrix:
profile:
- "abims"
- "adcra"
- "alice"
- "aws_tower"
- "awsbatch"
@ -49,6 +50,7 @@ jobs:
- "cheaha"
- "computerome"
- "crick"
- "crukmi"
- "denbi_qbic"
- "ebc"
- "eddie"

View file

@ -67,6 +67,8 @@ Before adding your config file to nf-core/configs, we highly recommend writing a
N.B. In your config file, please also make sure to add an extra `params` section with `params.config_profile_description`, `params.config_profile_contact` and `params.config_profile_url` set to reasonable values.
Users will get information on who wrote the configuration profile then when executing a nf-core pipeline and can report back if there are things missing for example.
N.B. If you try to specify a shell environment variable within your profile, in some cases you may get an error during testing of something like `Unknown config attribute env.USER_SCRATCH -- check config file: /home/runner/work/configs/configs/nextflow.config` (where the bash environment variable is `$USER_SCRATCH`). This is because the github runner will not have your institutional environment variables set. To fix this you can define this as an internal variable, and set a fallback value for that variable. A good example is in the [VSC_UGENT profile](`https://github.com/nf-core/configs/blob/69468e7ca769643b151a6cfd1ab24185fc341c06/conf/vsc_ugent.config#L2`).
### Testing
If you want to add a new custom config file to `nf-core/configs` please test that your pipeline of choice runs as expected by using the [`-c`](https://www.nextflow.io/docs/latest/config.html) parameter.
@ -86,6 +88,7 @@ See [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs
Currently documentation is available for the following systems:
- [ABIMS](docs/abims.md)
- [ADCRA](docs/adcra.md)
- [ALICE](docs/alice.md)
- [AWSBATCH](docs/awsbatch.md)
- [AWS_TOWER](docs/aws_tower.md)
@ -103,6 +106,7 @@ Currently documentation is available for the following systems:
- [CHEAHA](docs/cheaha.md)
- [Computerome](docs/computerome.md)
- [CRICK](docs/crick.md)
- [Cancer Research UK Manchester Institute](docs/crukmi.md)
- [CZBIOHUB_AWS](docs/czbiohub.md)
- [DENBI_QBIC](docs/denbi_qbic.md)
- [EBC](docs/ebc.md)
@ -203,6 +207,7 @@ Currently documentation is available for the following pipelines within specific
- rnavar
- [MUNIN](docs/pipeline/rnavar/munin.md)
- sarek
- [Cancer Research UK Manchester Institute](docs/pipeline/sarek/crukmi.md)
- [MUNIN](docs/pipeline/sarek/munin.md)
- [UPPMAX](docs/pipeline/sarek/uppmax.md)
- taxprofiler

40
conf/adcra.config Normal file
View file

@ -0,0 +1,40 @@
/*
* --------------------------------------------------------------
* nf-core pipelines config file for AD project using CRA HPC
* --------------------------------------------------------------
*/
params {
config_profile_name = 'adcra'
config_profile_description = 'CRA HPC profile provided by nf-core/configs'
config_profile_contact = 'Kalayanee Chairat (@kalayaneech)'
config_profile_url = 'https://bioinformatics.kmutt.ac.th/'
}
params {
max_cpus = 16
max_memory = 128.GB
max_time = 120.h
}
// Specify the job scheduler
executor {
name = 'slurm'
queueSize = 20
submitRateLimit = '6/1min'
}
singularity {
enabled = true
autoMounts = true
}
process {
scratch = true
queue = 'unlimit'
queueStatInterval = '10 min'
maxRetries = 3
errorStrategy = { task.attempt <=3 ? 'retry' : 'finish' }
cache = 'lenient'
exitStatusReadTimeoutMillis = '2700000'
}

View file

@ -1,3 +1,6 @@
// Define the Scratch directory
def scratch_dir = System.getenv("USER_SCRATCH") ?: "/tmp"
params {
config_profile_name = 'cheaha'
config_profile_description = 'University of Alabama at Birmingham Cheaha HPC'
@ -6,14 +9,14 @@ params {
}
env {
TMPDIR="$USER_SCRATCH"
SINGULARITY_TMPDIR="$USER_SCRATCH"
TMPDIR="$USER"
SINGULARITY_TMPDIR="$scratch_dir"
}
singularity {
enabled = true
autoMounts = true
runOptions = "--contain --workdir $USER_SCRATCH"
runOptions = "--contain --workdir $scratch_dir"
}
process {

52
conf/crukmi.config Normal file
View file

@ -0,0 +1,52 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'Cancer Research UK Manchester Institute HPC cluster profile provided by nf-core/configs'
config_profile_contact = 'Stephen Kitcatt, Simon Pearce (@skitcattCRUKMI, @sppearce)'
config_profile_url = 'http://scicom.picr.man.ac.uk/projects/user-support/wiki'
}
env {
SINGULARITY_CACHEDIR = '/lmod/nextflow_software'
}
singularity {
enabled = true
autoMounts = true
}
process {
beforeScript = 'module load apps/singularity/3.8.0'
executor = 'pbs'
errorStrategy = {task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish'}
maxErrors = '-1'
maxRetries = 3
withLabel:process_low {
cpus = { check_max( 1 * task.attempt, 'cpus' ) }
memory = { check_max( 5.GB * task.attempt, 'memory' ) }
}
withLabel:process_medium {
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
memory = { check_max( 20.GB * task.attempt, 'memory' ) }
}
withLabel:process_high {
cpus = { check_max( 16 * task.attempt, 'cpus' ) }
memory = { check_max( 80.GB * task.attempt, 'memory' ) }
}
}
executor {
name = 'pbs'
queueSize = 1000
pollInterval = '10 sec'
}
params {
max_memory = 2000.GB
max_cpus = 32
max_time = 72.h
}

View file

@ -0,0 +1,18 @@
// Profile config names for nf-core/configs
params {
// Specific nf-core/configs params
config_profile_description = 'Cancer Research UK Manchester Institute HPC cluster profile provided by nf-core/configs'
config_profile_contact = 'Stephen Kitcatt, Simon Pearce (@skitcattCRUKMI, @sppearce)'
config_profile_url = 'http://scicom.picr.man.ac.uk/projects/user-support/wiki'
}
// Specific nf-core/sarek process configuration
process {
withName: 'SAMTOOLS_MPILEUP' {
cpus = 1
memory = { 5.GB * task.attempt }
}
}

39
docs/adcra.md Normal file
View file

@ -0,0 +1,39 @@
# nf-core/configs: CRA HPC Configuration
nfcore pipeline sarek and rnaseq have been tested on the CRA HPC.
## Before running the pipeline
- You will need an account to use the CRA HPC cluster in order to run the pipeline.
- Make sure that Singularity and Nextflow are installed.
- Downlode pipeline singularity images to a HPC system using [nf-core tools](https://nf-co.re/tools/#downloading-pipelines-for-offline-use)
```
$ conda install nf-core
$ nf-core download
```
- You will need to specify a Singularity cache directory in your ~./bashrc. This will store your container images in this cache directory without repeatedly downloading them every time you run a pipeline. Since space on home directory is limited, using lustre file system is recommended.
```
export NXF_SINGULARITY_CACHEDIR = "/lustre/fs0/storage/yourCRAAccount/cache_dir"
```
- Download iGenome reference to be used as a local copy.
```
$ aws s3 --no-sign-request --region eu-west-1 sync s3://ngi-igenomes/igenomes/Homo_sapiens/GATK/GRCh38/ /lustre/fs0/storage/yourCRAAccount/references/Homo_sapiens/GATK/GRCh38/
```
## Running the pipeline using the adcra config profile
- Run the pipeline within a [screen](https://linuxize.com/post/how-to-use-linux-screen/) or [tmux](https://linuxize.com/post/getting-started-with-tmux/) session.
- Specify the config profile with `-profile adcra`.
- Using lustre file systems to store results (`--outdir`) and intermediate files (`-work-dir`) is recommended.
```
nextflow run /path/to/nf-core/<pipeline-name> -profile adcra \
--genome GRCh38 \
--igenomes_base /path/to/genome_references/ \
... # the rest of pipeline flags
```

15
docs/crukmi.md Normal file
View file

@ -0,0 +1,15 @@
# nf-core/configs: Cancer Research UK Manchester Institute Configuration
All nf-core pipelines have been successfully configured for the use on the HPC (phoenix) at Cancer Research UK Manchester Institute.
To use, run the pipeline with `-profile crukmi`. This will download and launch the [`crukmi.config`](../conf/crukmi.config) which has been pre-configured with a setup suitable for the phoenix HPC. Using this profile, singularity images will be downloaded to run on the cluster.
Before running the pipeline you will need to load Nextflow using the environment module system, for example via:
```bash
## Load Nextflow and Singularity environment modules
module purge
module load apps/nextflow/22.04.5
```
The pipeline should always be executed inside a workspace on the `/scratch/` system. All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory.

View file

@ -0,0 +1,17 @@
# nf-core/configs: CRUK-MI sarek specific configuration
Extra specific configuration for sarek pipeline
## Usage
To use, run the pipeline with `-profile crukmi`.
This will download and launch the sarek specific [`crukmi.config`](../../../conf/pipeline/sarek/munin.config) which has been pre-configured with a setup suitable for the Cancer Research UK Manchester Institute cluster (phoenix).
Example: `nextflow run nf-core/sarek -profile crukmi`
## Sarek specific configurations for CRUK-MI
Specific configurations for `CRUK-MI` has been made for sarek.
- Initial requested resources for SAMTOOLS_MPILEUP are only 5GB and 1 core.

View file

@ -11,6 +11,7 @@
//Please use a new line per include Config section to allow easier linting/parsing. Thank you.
profiles {
abims { includeConfig "${params.custom_config_base}/conf/abims.config" }
adcra { includeConfig "${params.custom_config_base}/conf/adcra.config" }
alice { includeConfig "${params.custom_config_base}/conf/alice.config" }
aws_tower { includeConfig "${params.custom_config_base}/conf/aws_tower.config" }
awsbatch { includeConfig "${params.custom_config_base}/conf/awsbatch.config" }
@ -30,6 +31,7 @@ profiles {
cheaha { includeConfig "${params.custom_config_base}/conf/cheaha.config" }
computerome { includeConfig "${params.custom_config_base}/conf/computerome.config" }
crick { includeConfig "${params.custom_config_base}/conf/crick.config" }
crukmi { includeConfig "${params.custom_config_base}/conf/crukmi.config" }
czbiohub_aws { includeConfig "${params.custom_config_base}/conf/czbiohub_aws.config" }
denbi_qbic { includeConfig "${params.custom_config_base}/conf/denbi_qbic.config" }
ebc { includeConfig "${params.custom_config_base}/conf/ebc.config" }

View file

@ -9,10 +9,11 @@
*/
profiles {
munin { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/munin.config" }
uppmax { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/uppmax.config" }
icr_davros { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/icr_davros.config" }
cfc { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/cfc.config" }
cfc_dev { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/cfc.config" }
crukmi { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/crukmi.config" }
eddie { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/eddie.config" }
icr_davros { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/icr_davros.config" }
munin { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/munin.config" }
uppmax { includeConfig "${params.custom_config_base}/conf/pipeline/sarek/uppmax.config" }
}