1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-11-21 16:16:04 +00:00

adding adcra configuration

This commit is contained in:
kalayaneech 2022-08-25 16:17:09 +07:00
parent 99df08f409
commit 93c4ba67a0
3 changed files with 70 additions and 0 deletions

40
conf/adcra.config Normal file
View file

@ -0,0 +1,40 @@
/*
* --------------------------------------------------------------
* nf-core pipelines config file for AD project using CRA HPC
* --------------------------------------------------------------
*/
params {
config_profile_name = 'adcra'
config_profile_description = 'CRA HPC profile provided by nf-core/configs'
config_profile_contact = 'Kalayanee Chairat (@kalayaneech)'
config_profile_url = 'https://bioinformatics.kmutt.ac.th/'
}
params {
max_cpus = 16
max_memory = 128.GB
max_time = 120.h
}
// Specify the job scheduler
executor {
name = 'slurm'
queueSize = 20
submitRateLimit = '6/1min'
}
Singularity {
enabled = true
autoMounts = true
}
process {
scratch = true
queue = 'unlimit'
queueStatInterval = '10 min'
maxRetries = 3
errorStrategy = { task.attempt <=3 ? 'retry' : 'finish' }
cache = 'lenient'
exitStatusReadTimeoutMillis = '2700000'
}

29
docs/adcra.md Normal file
View file

@ -0,0 +1,29 @@
# nf-core/configs: CRA HPC Configuration
nfcore pipeline sarek and rnaseq have been tested on the CRA HPC.
## Before running the pipeline
- You will need an account to use the CRA HPC cluster in order to run the pipeline.
- Make sure that Singularity and Nextflow are installed.
- Downlode pipeline singularity images to a HPC system using [nf-core tools](https://nf-co.re/tools/#downloading-pipelines-for-offline-use)
```
$ conda install nf-core
$ nf-core download
```
- You will need to specify a Singularity cache directory in your ~./bashrc. This will store your container images in this cache directory without repeatedly downloading them every time you run a pipeline. Since space on home directory is limited, using lustre file system is recommended.
```
export NXF_SINGULARITY_CACHEDIR = "/lustre/fs0/storage/yourCRAAccount/cache_dir"
```
- Download iGenome reference to be used as a local copy.
```
$ aws s3 --no-sign-request --region eu-west-1 sync s3://ngi-igenomes/igenomes/Homo_sapiens/GATK/GRCh38/ /lustre/fs0/storage/yourCRAAccount/references/Homo_sapiens/GATK/GRCh38/
```
## Running the pipeline using the adcra config profile
- Run the pipeline within a [screen](https://linuxize.com/post/how-to-use-linux-screen/) or [tmux](https://linuxize.com/post/getting-started-with-tmux/) session.
- Specify the config profile with ```-profile adcra```.
- Using lustre file systems to store results (```--outdir```) and intermediate files (```-work-dir```) is recommended.
```
nextflow run /path/to/nf-core/<pipeline-name> -profile adcra \
--genome GRCh38 \
--igenomes_base /path/to/genome_references/ \
... # the rest of pipeline flags
```

View file

@ -11,6 +11,7 @@
//Please use a new line per include Config section to allow easier linting/parsing. Thank you.
profiles {
abims { includeConfig "${params.custom_config_base}/conf/abims.config" }
adcra { includeConfig "${params.custom_config_base}/conf/adcra.config" }
alice { includeConfig "${params.custom_config_base}/conf/alice.config" }
aws_tower { includeConfig "${params.custom_config_base}/conf/aws_tower.config" }
awsbatch { includeConfig "${params.custom_config_base}/conf/awsbatch.config" }