1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-11-10 20:13:09 +00:00

Merge branch 'master' into master

This commit is contained in:
Phil Ewels 2021-03-24 12:36:15 +01:00 committed by GitHub
commit bfab371c94
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 178 additions and 3 deletions

View file

@ -16,7 +16,7 @@ jobs:
needs: test_all_profiles
strategy:
matrix:
profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'imperial', 'imperial_mb', 'kraken', 'mpcdf', 'munin', 'oist', 'pasteur', 'phoenix', 'prince', 'seg_globe', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh']
profile: ['abims', 'awsbatch', 'bi','bigpurple', 'binac', 'cbe', 'ccga_dx', 'ccga_med', 'cfc', 'cfc_dev', 'crick', 'denbi_qbic', 'ebc', 'genotoul', 'genouest', 'gis', 'google', 'hebbe', 'icr_davros', 'ifb_core', 'imperial', 'imperial_mb', 'jax', 'kraken', 'mpcdf', 'munin', 'oist', 'pasteur', 'phoenix', 'prince', 'seg_globe', 'shh', 'uct_hpc', 'uppmax', 'utd_ganymede', 'uzh']
steps:
- uses: actions/checkout@v1
- name: Install Nextflow

View file

@ -113,6 +113,7 @@ Currently documentation is available for the following systems:
* [GOOGLE](docs/google.md)
* [HEBBE](docs/hebbe.md)
* [ICR_DAVROS](docs/icr_davros.md)
* [JAX](docs/jax.md)
* [KRAKEN](docs/kraken.md)
* [MPCDF](docs/mpcdf.md)
* [MUNIN](docs/munin.md)

View file

@ -2,13 +2,13 @@
params {
config_profile_description = 'CLIP BATCH ENVIRONMENT (CBE) cluster profile provided by nf-core/configs'
config_profile_contact = 'Patrick Hüther (@phue)'
config_profile_url = 'http://www.gmi.oeaw.ac.at/'
config_profile_url = 'https://clip.science'
}
process {
executor = 'slurm'
queue = { task.memory <= 170.GB ? 'c' : 'm' }
clusterOptions = { task.time <= 8.h ? '--qos short': task.time <= 48.h ? '--qos medium' : '--qos long' }
clusterOptions = { task.time <= 1.h ? '--qos rapid' : task.time <= 8.h ? '--qos short': task.time <= 48.h ? '--qos medium' : '--qos long' }
module = 'anaconda3/2019.10'
}

24
conf/ifb_core.config Normal file
View file

@ -0,0 +1,24 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'The IFB core cluster profile'
config_profile_contact = 'https://community.france-bioinformatique.fr'
config_profile_url = 'https://www.france-bioinformatique.fr/'
}
singularity {
// need one image per execution
enabled = true
runOptions = '-B /shared'
}
process {
executor = 'slurm'
}
params {
igenomes_ignore = true
// Max resources requested by a normal node on genotoul.
max_memory = 240.GB
max_cpus = 28
max_time = 96.h
}

21
conf/jax.config Normal file
View file

@ -0,0 +1,21 @@
params {
config_profile_description = 'The Jackson Laboratory Sumner HPC profile provided by nf-core/configs.'
config_profile_contact = 'Asaf Peer (@peera)'
config_profile_url = 'https://jacksonlaboratory.sharepoint.com/sites/ResearchIT/SitePages/Welcome-to-Sumner.aspx'
}
executor.$slurm.queueSize = 250
process {
executor = "slurm"
queue = "compute"
clusterOptions = {task.time < 72.h ? '-q batch' : '-q long'}
module = "slurm"
beforeScript = 'module load singularity'
}
singularity.enabled = true
singularity.autoMounts = true
params {
max_memory = 768.GB
max_cpus = 70
max_time = 336.h
}

View file

@ -5,6 +5,11 @@ params {
config_profile_url = 'http://docs.oithpc.utdallas.edu/'
}
env {
TMPDIR = '/home/$USER/scratch/tmp'
SINGULARITY_CACHEDIR = '/home/$USER/scratch/tmp'
}
singularity {
enabled = true
envWhitelist='SINGULARITY_BINDPATH'
@ -15,6 +20,28 @@ process {
beforeScript = 'module load singularity/3.2.1'
executor = 'slurm'
queue = { task.memory >= 32.GB && task.cpu <= 12 ? 'Kim': task.memory <= 24.GB && task.cpu <= 8 ? 'smallmem' : 'genomics' }
withName:TRIMGALORE {
memory = 31.GB
}
withLabel:process_low {
cpus = { check_max( 2 * task.attempt, 'cpus' ) }
memory = { check_max( 12.GB * task.attempt, 'memory' ) }
time = { check_max( 6.h * task.attempt, 'time' ) }
}
withLabel:process_medium {
cpus = { check_max( 16 * task.attempt, 'cpus' ) }
memory = { check_max( 31.GB * task.attempt, 'memory' ) }
time = { check_max( 8.h * task.attempt, 'time' ) }
}
withLabel:process_high {
cpus = { check_max( 12 * task.attempt, 'cpus' ) }
memory = { check_max( 120.GB * task.attempt, 'memory' ) }
time = { check_max( 16.h * task.attempt, 'time' ) }
}
}
params {

28
conf/wcm.config Normal file
View file

@ -0,0 +1,28 @@
singularityDir = "/athena/elementolab/scratch/reference/.singularity/singularity_images_nextflow"
params {
config_profile_description = 'Weill Cornell Medicine, Scientific Computing Unit Slurm cluster profile provided by nf-core/configs'
config_profile_contact = 'Ashley Stephen Doane, PhD (@DoaneAS)'
igenomes_base = '/athena/elementolab/scratch/reference/igenomes'
}
singularity {
enabled = true
envWhitelist='SINGULARITY_BINDPATH'
cacheDir = "/athena/elementolab/scratch/reference/.singularity/singularity_images_nextflow"
autoMounts = true
}
process {
executor = 'slurm'
queue = 'panda_physbio'
scratch = true
scratch = '/scratchLocal/`whoami`_${SLURM_JOBID}'
}
params {
max_memory = 32.GB
max_cpus = 8
max_time = 24.h
}

40
docs/ifb_core.md Normal file
View file

@ -0,0 +1,40 @@
# nf-core/configs: IFB core Configuration
All nf-core pipelines have been successfully configured for use on the cluster of the IFB (Institut Francais de Bioinformatique).
To use, run the pipeline with `-profile ifb_core`. This will download and launch the [`ifb_core.config`](../conf/ifb_core.config) which has been pre-configured with a setup suitable for the IFB core cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
## How to use on IFB core
Before running the pipeline you will need to load Nextflow using the environment module system on IFB core. You can do this by issuing the commands below:
```bash
# Login to a compute node
srun --pty bash
## Load Nextflow and Singularity environment modules
module purge
module load nextflow/20.04.1
# Run a downloaded/git-cloned nextflow workflow from
nextflow run \\
nf-core/workflow \\
-resume
-profile ifb_core \\
--email my-email@example.org \\
-c my-specific.config
...
# Or use the nf-core client
nextflow run nf-core/rnaseq ...
```
## Databanks
A local copy of several genomes are available in `/shared/bank` directory. See
our [databank page](https://ifb-elixirfr.gitlab.io/cluster/doc/banks/)
to search for your favorite genome.
>NB: You will need an account to use the HPC cluster on IFB core in order to run the pipeline. If in doubt contact IT or go to [account page](https://my.cluster.france-bioinformatique.fr/manager2/login).

8
docs/jax.md Normal file
View file

@ -0,0 +1,8 @@
# nf-core/configs: JAX Configuration
All nf-core pipelines have been successfully configured for use on the JAX Sumner cluster at The Jackson Laboratory.
To use, run the pipeline with `-profile jax`. This will download and launch the [`jax.config`](../conf/jax.config) which has been pre-configured with a setup suitable for JAX Sumner cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline and slurm will be used as well.
>NB: You will need an account to use the HPC cluster JAX in order to run the pipeline. If in doubt contact IT.
>NB: Nextflow should not be executed on the login nodes. If in doubt contact IT.

24
docs/wcm.md Normal file
View file

@ -0,0 +1,24 @@
# nf-core/configs: Weill Cornell Medicine Configuration
All nf-core pipelines have been successfully configured for use on the panda cluster at the WCM.
To use, run the pipeline with `-profile wcm`. This will download and launch the [`wcm.config`](../conf/wcm.config) which has been pre-configured with a setup suitable for the WCM slurm cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
## Running the workflow on the Pasteur cluster
Nextflow is not installed by default on the WCM cluster.
- Install Nextflow : [here](https://www.nextflow.io/docs/latest/getstarted.html#)
Nextflow manages each process as a separate job that is submitted to the cluster by using the `sbatch` command.
Nextflow shouldn't run directly on a login node but on a compute node or lab-specific interactive server when configured as a submit host.
1. Run nextflow on a compute node or interactive server with submit host capability:
```bash
# Run nextflow workflow
nextflow run \\
nf-core/chipseq \\
-resume \\
-profile test,wcm
```

View file

@ -25,6 +25,7 @@ profiles {
ebc { includeConfig "${params.custom_config_base}/conf/ebc.config" }
eddie { includeConfig "${params.custom_config_base}/conf/eddie.config" }
icr_davros { includeConfig "${params.custom_config_base}/conf/icr_davros.config" }
ifb_core { includeConfig "${params.custom_config_base}/conf/ifb_core.config" }
imperial { includeConfig "${params.custom_config_base}/conf/imperial.config" }
imperial_mb { includeConfig "${params.custom_config_base}/conf/imperial_mb.config" }
genotoul { includeConfig "${params.custom_config_base}/conf/genotoul.config" }
@ -46,6 +47,7 @@ profiles {
uppmax { includeConfig "${params.custom_config_base}/conf/uppmax.config" }
utd_ganymede { includeConfig "${params.custom_config_base}/conf/utd_ganymede.config" }
uzh { includeConfig "${params.custom_config_base}/conf/uzh.config" }
jax { includeConfig "${params.custom_config_base}/conf/jax.config" }
}
// If user hostnames contain one of these substring and they are