1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-11-14 13:43:09 +00:00

Fix binac resources

This commit is contained in:
Alexander Peltzer 2019-10-28 13:22:12 +01:00
commit eb6d23254d
No known key found for this signature in database
GPG key ID: A4A9322B50AF95A0
8 changed files with 34 additions and 40 deletions

View file

@ -20,7 +20,7 @@ params {
igenomes_base = '/nfsmounts/igenomes' igenomes_base = '/nfsmounts/igenomes'
max_memory = 1000.GB max_memory = 1000.GB
max_cpus = 28 max_cpus = 28
max_time = 168.h max_time = 48.h
} }
weblog{ weblog{

View file

@ -8,25 +8,16 @@ params {
process { process {
executor = 'slurm' executor = 'slurm'
module = 'singularity/3.2.1' module = 'singularity/3.2.1'
queue = 'c' queue = { task.memory <= 170.GB ? 'c' : 'm' }
clusterOptions = { task.time <= 8.h ? '--qos short': task.time <= 48.h ? '--qos medium' : '--qos long' }
} }
singularity.enabled = true singularity.enabled = true
params { params {
target_qos = 'medium'
params.max_cpus = 36
params.max_memory = 170.GB
igenomesIgnore = true
}
if (params.target_qos == 'short') {
params.max_time = 8.h
process.clusterOptions = '--qos short'
} else if (params.target_qos == 'medium') {
params.max_time = 2.d
process.clusterOptions = '--qos medium'
} else {
params.max_time = 14.d params.max_time = 14.d
process.clusterOptions = '--qos long' params.max_cpus = 36
params.max_memory = 4.TB
igenomes_ignore = true
igenomesIgnore = true //deprecated
} }

View file

@ -55,7 +55,8 @@ params {
awsregion = "us-west-2" awsregion = "us-west-2"
awsqueue = "nextflow" awsqueue = "nextflow"
igenomesIgnore = true igenomes_ignore = true
igenomesIgnore = true //deprecated
fc_extra_attributes = 'gene_name' fc_extra_attributes = 'gene_name'
fc_group_features = 'gene_id' fc_group_features = 'gene_id'

View file

@ -16,7 +16,8 @@ process {
} }
params { params {
igenomesIgnore = true igenomes_ignore = true
igenomesIgnore = true //deprecated
max_memory = 750.GB max_memory = 750.GB
max_cpus = 80 max_cpus = 80
max_time = 336.h max_time = 336.h

View file

@ -18,7 +18,8 @@ process {
} }
params { params {
igenomesIgnore = true igenomes_ignore = true
igenomesIgnore = true //deprecated
saveReference = true saveReference = true
max_memory = 64.GB max_memory = 64.GB
max_cpus = 20 max_cpus = 20

View file

@ -19,5 +19,6 @@ params {
max_cpus = 32 max_cpus = 32
max_memory = 128.GB max_memory = 128.GB
max_time = 192.h max_time = 192.h
igenomesIgnore = true igenomes_ignore = true
igenomesIgnore = true //deprecated
} }

View file

@ -16,7 +16,8 @@ process {
} }
params { params {
igenomesIgnore = true igenomes_ignore = true
igenomesIgnore = true //deprecated
max_memory = 256.GB max_memory = 256.GB
max_cpus = 28 max_cpus = 28
max_time = 24.h max_time = 24.h

View file

@ -4,35 +4,33 @@ All nf-core pipelines have been successfully configured for use on the tars clus
To use, run the pipeline with `-profile pasteur`. This will download and launch the [`pasteur.config`](../conf/pasteur.config) which has been pre-configured with a setup suitable for the Pasteur cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline. To use, run the pipeline with `-profile pasteur`. This will download and launch the [`pasteur.config`](../conf/pasteur.config) which has been pre-configured with a setup suitable for the Pasteur cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.
## Running the workflow on the Pasteur cluster ## Running the workflow on the Pasteur cluster
Nextflow is not installed by default on the Pasteur cluster. Nextflow is not installed by default on the Pasteur cluster.
- Install Nextflow : [here](https://www.nextflow.io/docs/latest/getstarted.html#) - Install Nextflow : [here](https://www.nextflow.io/docs/latest/getstarted.html#)
Nextflow manages each process as a separate job that is submitted to the cluster by using the sbatch command. Nextflow manages each process as a separate job that is submitted to the cluster by using the sbatch command.
Nextflow shouldn't run directly on the submission node but on a compute node. Nextflow shouldn't run directly on the submission node but on a compute node.
The compute nodes don't have access to internet so you need to run it offline. The compute nodes don't have access to internet so you need to run it offline.
To do that: To do that:
1. Create a virtualenv to install nf-core 1. Create a virtualenv to install nf-core
```bash
```bash module purge
module purge module load Python/3.6.0
module load Python/3.6.0 module load java
module load java module load singularity
module load singularity cd /path/to/nf-core/workflows
cd /path/to/nf-core/workflows virtualenv .venv -p python3
virtualenv .venv -p python3 . .venv/bin/activate
. .venv/bin/activate ```
```
2. Install nf-core: [here](https://nf-co.re/tools#installation) 2. Install nf-core: [here](https://nf-co.re/tools#installation)
3. Get nf-core pipeline and container: [here](https://nf-co.re/tools#downloading-pipelines-for-offline-use) 3. Get nf-core pipeline and container: [here](https://nf-co.re/tools#downloading-pipelines-for-offline-use)
4. Get the nf-core Pasteur profile: [here](https://github.com/nf-core/configs#offline-usage) 4. Get the nf-core Pasteur profile: [here](https://github.com/nf-core/rnaseq/blob/master/docs/usage.md#--custom_config_base)
5. Run nextflow on a compute node: 5. Run nextflow on a compute node:
```bash ```bash
# create a terminal # create a terminal
tmux tmux