mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-22 16:29:55 +00:00
Merge pull request #211 from DoaneAS/master
Adding WCM.config for Weill Cornell Medicine cluster
This commit is contained in:
commit
5988c51361
2 changed files with 52 additions and 0 deletions
28
conf/wcm.config
Normal file
28
conf/wcm.config
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
singularityDir = "/athena/elementolab/scratch/reference/.singularity/singularity_images_nextflow"
|
||||||
|
|
||||||
|
params {
|
||||||
|
config_profile_description = 'Weill Cornell Medicine, Scientific Computing Unit Slurm cluster profile provided by nf-core/configs'
|
||||||
|
config_profile_contact = 'Ashley Stephen Doane, PhD (@DoaneAS)'
|
||||||
|
igenomes_base = '/athena/elementolab/scratch/reference/igenomes'
|
||||||
|
}
|
||||||
|
|
||||||
|
singularity {
|
||||||
|
enabled = true
|
||||||
|
envWhitelist='SINGULARITY_BINDPATH'
|
||||||
|
cacheDir = "/athena/elementolab/scratch/reference/.singularity/singularity_images_nextflow"
|
||||||
|
autoMounts = true
|
||||||
|
}
|
||||||
|
|
||||||
|
process {
|
||||||
|
executor = 'slurm'
|
||||||
|
queue = 'panda_physbio'
|
||||||
|
scratch = true
|
||||||
|
scratch = '/scratchLocal/`whoami`_${SLURM_JOBID}'
|
||||||
|
}
|
||||||
|
|
||||||
|
params {
|
||||||
|
max_memory = 32.GB
|
||||||
|
max_cpus = 8
|
||||||
|
max_time = 24.h
|
||||||
|
}
|
||||||
|
|
24
docs/wcm.md
Normal file
24
docs/wcm.md
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
# nf-core/configs: Weill Cornell Medicine Configuration
|
||||||
|
|
||||||
|
All nf-core pipelines have been successfully configured for use on the panda cluster at the WCM.
|
||||||
|
|
||||||
|
To use, run the pipeline with `-profile wcm`. This will download and launch the [`wcm.config`](../conf/wcm.config) which has been pre-configured with a setup suitable for the WCM slurm cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
|
||||||
|
|
||||||
|
## Running the workflow on the Pasteur cluster
|
||||||
|
|
||||||
|
Nextflow is not installed by default on the WCM cluster.
|
||||||
|
|
||||||
|
- Install Nextflow : [here](https://www.nextflow.io/docs/latest/getstarted.html#)
|
||||||
|
|
||||||
|
Nextflow manages each process as a separate job that is submitted to the cluster by using the `sbatch` command.
|
||||||
|
Nextflow shouldn't run directly on a login node but on a compute node or lab-specific interactive server when configured as a submit host.
|
||||||
|
|
||||||
|
1. Run nextflow on a compute node or interactive server with submit host capability:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run nextflow workflow
|
||||||
|
nextflow run \\
|
||||||
|
nf-core/chipseq \\
|
||||||
|
-resume \\
|
||||||
|
-profile test,wcm
|
||||||
|
```
|
Loading…
Reference in a new issue