mirror of
https://github.com/MillironX/nf-configs.git
synced 2024-11-21 16:16:04 +00:00
initial commit
This commit is contained in:
parent
dc8705e7cb
commit
ae0adb056f
3 changed files with 67 additions and 0 deletions
51
conf/hasta.config
Normal file
51
conf/hasta.config
Normal file
|
@ -0,0 +1,51 @@
|
|||
// Profile config names for nf-core/configs
|
||||
params {
|
||||
config_profile_description = 'Hasta, a local cluster setup at Clinical Genomics, Stockholm.'
|
||||
config_profile_contact = 'Clinical Genomics, Stockholm'
|
||||
config_profile_url = 'https://github.com/Clinical-Genomics'
|
||||
priority = null
|
||||
clusterOptions = null
|
||||
schema_ignore_params = "priority,clusterOptions"
|
||||
}
|
||||
|
||||
singularity {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
params {
|
||||
max_memory = 180.GB
|
||||
max_cpus = 36
|
||||
max_time = 336.h
|
||||
|
||||
igenomes_ignore = true
|
||||
}
|
||||
|
||||
process {
|
||||
executor = 'slurm'
|
||||
clusterOptions = { "-A $params.priority ${params.clusterOptions ?: ''}" }
|
||||
|
||||
withName:'PICARD_MARKDUPLICATES' {
|
||||
cpus = { check_max( 13 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 130.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
withName:'DEEPVARIANT' {
|
||||
cpus = { check_max( 16 * task.attempt, 'cpus' ) }
|
||||
memory = { check_max( 80.GB * task.attempt, 'memory' ) }
|
||||
}
|
||||
}
|
||||
|
||||
profiles {
|
||||
dev_prio {
|
||||
params {
|
||||
priority = 'development'
|
||||
clusterOptions = "--qos=low"
|
||||
}
|
||||
}
|
||||
|
||||
prod_prio {
|
||||
params {
|
||||
priority = 'production'
|
||||
clusterOptions = "--qos=low"
|
||||
}
|
||||
}
|
||||
}
|
15
docs/hasta.md
Normal file
15
docs/hasta.md
Normal file
|
@ -0,0 +1,15 @@
|
|||
# nf-core/configs: Hasta Configuration
|
||||
|
||||
## Using the Hasta config profile
|
||||
Before running the pipeline `Nextflow` will need to be install in the conda environment being used.
|
||||
|
||||
To use, run the pipeline with `-profile hasta` (one hyphen).
|
||||
This will download and launch the [`hasta.config`](../conf/hasta.config) which has been pre-configured with a setup suitable for the hasta servers.
|
||||
It will enable `Nextflow` to manage the pipeline jobs via the `Slurm` job scheduler.
|
||||
Using this profile, `Docker` image(s) containing required software(s) will be downloaded, and converted to `Singularity` image(s) if needed before execution of the pipeline.
|
||||
|
||||
Recent version of `Nextflow` also support the environment variable `NXF_SINGULARITY_CACHEDIR` which can be used to supply images. A use case: `NXF_SINGULARITY_CACHEDIR=/path/to/images; export NXF_SINGULARITY_CACHEDIR` before running the pipeline.
|
||||
|
||||
## Development and production config
|
||||
|
||||
Each user on hasta has a priority based on their allocated team, either development or production. To enable this when submitting a job to Slurm, submit with `-profile hasta,dev_prio` or `-profile hasta,prod_prio`. This overwrites certain parts of the config and submits the job based on different priorities.
|
|
@ -33,6 +33,7 @@ profiles {
|
|||
genouest { includeConfig "${params.custom_config_base}/conf/genouest.config" }
|
||||
gis { includeConfig "${params.custom_config_base}/conf/gis.config" }
|
||||
google { includeConfig "${params.custom_config_base}/conf/google.config" }
|
||||
hasta { includeConfig "${params.custom_config_base}/conf/hasta.config" }
|
||||
hebbe { includeConfig "${params.custom_config_base}/conf/hebbe.config" }
|
||||
icr_davros { includeConfig "${params.custom_config_base}/conf/icr_davros.config" }
|
||||
ifb_core { includeConfig "${params.custom_config_base}/conf/ifb_core.config" }
|
||||
|
|
Loading…
Reference in a new issue