1
0
Fork 0
mirror of https://github.com/MillironX/nf-configs.git synced 2024-11-10 20:13:09 +00:00

Merge branch 'nf-core:master' into feat/hasta_java_memory_fix

This commit is contained in:
Emil Bertilsson 2022-06-09 15:10:46 +02:00 committed by GitHub
commit c8c795595a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 434 additions and 13 deletions

55
.github/workflows/fix-linting.yml vendored Normal file
View file

@ -0,0 +1,55 @@
name: Fix linting from a comment
on:
issue_comment:
types: [created]
jobs:
deploy:
# Only run if comment is on a PR with the main repo, and if it contains the magic keywords
if: >
contains(github.event.comment.html_url, '/pull/') &&
contains(github.event.comment.body, '@nf-core-bot fix linting') &&
github.repository == 'nf-core/configs'
runs-on: ubuntu-latest
steps:
# Use the @nf-core-bot token to check out so we can push later
- uses: actions/checkout@v3
with:
token: ${{ secrets.nf_core_bot_auth_token }}
# Action runs on the issue comment, so we don't get the PR by default
# Use the gh cli to check out the PR
- name: Checkout Pull Request
run: gh pr checkout ${{ github.event.issue.number }}
env:
GITHUB_TOKEN: ${{ secrets.nf_core_bot_auth_token }}
- uses: actions/setup-node@v2
- name: Install Prettier
run: npm install -g prettier @prettier/plugin-php
# Check that we actually need to fix something
- name: Run 'prettier --check'
id: prettier_status
run: |
if prettier --check ${GITHUB_WORKSPACE}; then
echo "::set-output name=result::pass"
else
echo "::set-output name=result::fail"
fi
- name: Run 'prettier --write'
if: steps.prettier_status.outputs.result == 'fail'
run: prettier --write ${GITHUB_WORKSPACE}
- name: Commit & push changes
if: steps.prettier_status.outputs.result == 'fail'
run: |
git config user.email "core@nf-co.re"
git config user.name "nf-core-bot"
git config push.default upstream
git add .
git status
git commit -m "[automated] Fix linting with Prettier"
git push

View file

@ -66,6 +66,7 @@ jobs:
- "jax"
- "lugh"
- "marvin"
- "mjolnir_globe"
- "maestro"
- "mpcdf"
- "munin"
@ -86,6 +87,7 @@ jobs:
- "utd_sysbio"
- "uzh"
- "vai"
- "vsc_ugent"
steps:
- uses: actions/checkout@v1
- name: Install Nextflow

View file

@ -121,6 +121,7 @@ Currently documentation is available for the following systems:
- [LUGH](docs/lugh.md)
- [MAESTRO](docs/maestro.md)
- [MARVIN](docs/marvin.md)
- [MJOLNIR_GLOBE](docs/mjolnir_globe.md)
- [MPCDF](docs/mpcdf.md)
- [MUNIN](docs/munin.md)
- [NU_GENOMICS](docs/nu_genomics.md)
@ -139,6 +140,7 @@ Currently documentation is available for the following systems:
- [UTD_SYSBIO](docs/utd_sysbio.md)
- [UZH](docs/uzh.md)
- [VAI](docs/vai.md)
- [VSC_UGENT](docs/vsc_ugent.md)
### Uploading to `nf-core/configs`
@ -192,13 +194,18 @@ Currently documentation is available for the following pipelines within specific
- [UPPMAX](docs/pipeline/ampliseq/uppmax.md)
- eager
- [EVA](docs/pipeline/eager/eva.md)
- mag
- [EVA](docs/pipeline/mag/eva.md)
- rnafusion
- [MUNIN](docs/pipeline/rnafusion/munin.md)
- rnavar
- [MUNIN](docs/pipeline/rnavar/munin.md)
- sarek
- [MUNIN](docs/pipeline/sarek/munin.md)
- [UPPMAX](docs/pipeline/sarek/uppmax.md)
- rnavar
- [MUNIN](docs/pipeline/rnavar/munin.md)
- taxprofiler
- [EVA](docs/pipeline/taxprofiler/eva.md)
- [hasta](docs/pipeline/taxprofiler/hasta.md)
### Pipeline-specific documentation

View file

@ -17,7 +17,7 @@ process {
executor = 'sge'
penv = 'smp'
queue = 'all.q'
clusterOptions = { "-S /bin/bash -V -j y -o output.log -l h_vmem=${task.memory.toGiga()}G" }
clusterOptions = { "-S /bin/bash -V -j y -o output.sge -l h_vmem=${task.memory.toGiga()}G" }
}
executor {
@ -37,7 +37,7 @@ profiles {
process {
queue = { task.memory > 700.GB ? 'bigmem.q' : 'archgen.q' }
clusterOptions = { "-S /bin/bash -V -j y -o output.log -l h_vmem=${task.memory.toGiga()}G" }
clusterOptions = { "-S /bin/bash -V -j y -o output.sge -l h_vmem=${task.memory.toGiga()}G" }
}
singularity {

View file

@ -15,7 +15,8 @@ google.zone = params.google_zone
google.lifeSciences.debug = params.google_debug
workDir = params.google_bucket
google.lifeSciences.preemptible = params.google_preemptible
if (google.lifeSciences.preemptible) {
process.errorStrategy = { task.exitStatus==14 ? 'retry' : 'terminate' }
process.errorStrategy = { task.exitStatus in [8,10,14] ? 'retry' : 'terminate' }
process.maxRetries = 5
}

View file

@ -2,23 +2,22 @@
params {
config_profile_description = 'The IFB core cluster profile'
config_profile_contact = 'https://community.france-bioinformatique.fr'
config_profile_url = 'https://www.france-bioinformatique.fr/'
config_profile_url = 'https://ifb-elixirfr.gitlab.io/cluster/doc/cluster-desc/'
}
singularity {
// need one image per execution
enabled = true
runOptions = '-B /shared'
}
process {
executor = 'slurm'
queue = { task.time <= 24.h ? 'fast' : 'long' }
}
params {
igenomes_ignore = true
// Max resources requested by a normal node on genotoul.
max_memory = 240.GB
max_cpus = 28
max_time = 96.h
max_memory = 252.GB
max_cpus = 56
max_time = 720.h
}

25
conf/mjolnir_globe.config Normal file
View file

@ -0,0 +1,25 @@
//Profile config names for nf-core/configs
params {
config_profile_description = 'Section for Hologenomics and Section for Molecular Ecology and Evolution @ Globe Institute, University of Copenhagen - mjolnir_globe profile provided by nf-core/configs.'
config_profile_contact = 'Aashild Vaagene (@ashildv)'
config_profile_url = 'https://globe.ku.dk/research/'
max_memory = 500.GB
max_cpus = 50
max_time = 720.h
}
singularity {
enabled = true
autoMounts = true
cacheDir = '/maps/projects/mjolnir1/data/cache/nf-core/singularity'
}
process {
executor = 'slurm'
}
cleanup = true
executor {
queueSize = 10
}

View file

@ -197,6 +197,10 @@ process {
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
}
withName:eigenstrat_snp_coverage {
beforeScript = 'export OPENBLAS_NUM_THREADS=1; export OMP_NUM_THREADS=1'
}
withName:kraken_merge {
beforeScript = 'export OPENBLAS_NUM_THREADS=1; export OMP_NUM_THREADS=1'
}

View file

@ -0,0 +1,14 @@
params {
// Specific nf-core/configs params
config_profile_contact = 'James Fellows Yates (@jfy133)'
config_profile_description = 'nf-core/mag EVA profile provided by nf-core/configs'
}
process {
withName: FASTQC {
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
}
}

View file

@ -0,0 +1,29 @@
params {
// Specific nf-core/configs params
config_profile_contact = 'James Fellows Yates (@jfy133)'
config_profile_description = 'nf-core/taxprofiler EVA profile provided by nf-core/configs'
}
process {
withName: BBMAP_BBDUK {
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 2)}G" }
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
}
withName: MALT_RUN {
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 4)}G" }
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
}
withName: METAPHLAN3 {
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 3)}G" }
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
}
withName: MEGAN_RMA2INFO {
clusterOptions = { "-S /bin/bash -V -l h_vmem=${(task.memory.toGiga() * 4)}G" }
errorStrategy = { task.exitStatus in [1,143,137,104,134,139,140] ? 'retry' : 'finish' }
}
}

View file

@ -0,0 +1,16 @@
params {
// Specific nf-core/configs params
config_profile_contact = 'Sofia Stamouli (@sofstam)'
config_profile_description = 'nf-core/taxprofiler HASTA profile provided by nf-core/configs'
}
process {
withName:'BBMAP_BBDUK' {
memory = { check_max( 80.GB * task.attempt, 'memory' ) }
}
withName: 'MALT_RUN' {
memory = { check_max( 80.GB * task.attempt, 'memory' ) }
}
}

117
conf/vsc_ugent.config Normal file
View file

@ -0,0 +1,117 @@
// Define the Scratch directory
def scratch_dir = System.getenv("VSC_SCRATCH_VO_USER") ?: "scratch/"
// Specify the work directory
workDir = "$scratch_dir/work"
// Perform work directory cleanup when the run has succesfully completed
// cleanup = true
// Reduce the job submit rate to about 10 per second, this way the server won't be bombarded with jobs
executor {
submitRateLimit = '10 sec'
}
// Specify that singularity should be used and where the cache dir will be for the images
singularity {
enabled = true
autoMounts = true
cacheDir = "$scratch_dir/singularity"
}
env {
SINGULARITY_CACHEDIR="$scratch_dir/.singularity"
}
// Define profiles for each cluster
profiles {
skitty {
params {
config_profile_description = 'HPC_SKITTY profile for use on the Skitty cluster of the VSC HPC.'
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 177.GB
max_cpus = 36
max_time = 72.h
}
process {
executor = 'slurm'
queue = 'skitty'
maxRetries = 2
scratch = "$scratch_dir"
}
}
swalot {
params {
config_profile_description = 'HPC_SWALOT profile for use on the Swalot cluster of the VSC HPC.'
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 116.GB
max_cpus = 20
max_time = 72.h
}
process {
executor = 'slurm'
queue = 'swalot'
maxRetries = 2
scratch = "$scratch_dir"
}
}
victini {
params {
config_profile_description = 'HPC_VICTINI profile for use on the Victini cluster of the VSC HPC.'
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 88.GB
max_cpus = 36
max_time = 72.h
}
process {
executor = 'slurm'
queue = 'victini'
maxRetries = 2
scratch = "$scratch_dir"
}
}
kirlia {
params {
config_profile_description = 'HPC_KIRLIA profile for use on the Kirlia cluster of the VSC HPC.'
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 738.GB
max_cpus = 36
max_time = 72.h
}
process {
executor = 'slurm'
queue = 'kirlia'
maxRetries = 2
scratch = "$scratch_dir"
}
}
doduo {
params {
config_profile_description = 'HPC_DODUO profile for use on the Doduo cluster of the VSC HPC.'
config_profile_contact = 'Nicolas Vannieuwkerke (@nvnieuwk)'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 250.GB
max_cpus = 96
max_time = 72.h
}
process {
executor = 'slurm'
queue = 'doduo'
maxRetries = 2
scratch = "$scratch_dir"
}
}
}

View file

@ -6,7 +6,8 @@ To use, run the pipeline with `-profile ifb_core`. This will download and launch
## How to use on IFB core
Before running the pipeline you will need to load Nextflow using the environment module system on IFB core. You can do this by issuing the commands below:
Here is [the link to the cluster's documentation](https://ifb-elixirfr.gitlab.io/cluster/doc/quick-start/).
Before running the pipeline you will need to load Nextflow and other dependencies using the environment module system on IFB core. You can do this by issuing the commands below:
```bash
# Login to a compute node
@ -14,7 +15,10 @@ srun --pty bash
## Load Nextflow and Singularity environment modules
module purge
module load nextflow/20.04.1
module load nextflow
module load singularity
module load openjdk
# Run a downloaded/git-cloned nextflow workflow from
nextflow run \\

31
docs/mjolnir_globe.md Normal file
View file

@ -0,0 +1,31 @@
# nf-core/configs: Section for Hologenomics at GLOBE, University of Copenhagen (Mjolnir server) Configuration
> **NB:** You will need an account on Mjolnir to run the pipeline. If in doubt contact IT.
Prior to running the pipeline for the first time with the `mjolnir_globe.config` (../conf/mjolnir_globe.config), users **must** create a hidden directory called `.tmp_nfcore` in their data/project directory on Mjolnir where the temp files from nf-core pipelines will be re-directed by the `NXF_TEMP` command (see below).
The contents of the `.tmp_nfcore` directory should be periodically deleted manually to save on space.
If the `NXF_TEMP` command is not used to properly re-direct temp files the `/tmp` directory on the compute nodes will be used and quickly filled up, which blocks anyone from working on these nodes until the offending user removes their files.
The following lines **must** be added by users to their `~/.bash_profile`:
```bash
#re-direct tmp files away from /tmp directories on compute nodes or the headnode
export NXF_TEMP=/maps/projects/mjolnir1/people/$USER/.tmp_nfcore
# nextflow - limiting memory of virtual java machine
NXF_OPTS='-Xms1g -Xmx4g'
```
Once you have created the `.tmp_nfcore` directory and added the above lines of code to your `.bash_profile` you can run an nf-core pipeline.
Before running a pipeline you will need to load Java, Miniconda, Singularity and Nextflow. You can do this by including the commands below in your SLURM/sbatch script:
```bash
## Load Java and Nextflow environment modules
module purge
module load jdk/1.8.0_291 miniconda singularity/3.8.0 nextflow/21.04.1.5556
```
All of the intermediate output files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
The `mjolnir_globe` config contains a `cleanup` command that removes the `work/` directory automatically once the pipeline has completeed successfully. If the run does not complete successfully then the `work/` dir should be removed manually to save storage space.

15
docs/pipeline/mag/eva.md Normal file
View file

@ -0,0 +1,15 @@
# nf-core/configs: eva mag specific configuration
Extra specific configuration for mag pipeline
## Usage
To use, run the pipeline with `-profile eva`.
This will download and launch the mag specific [`eva.config`](../../../conf/pipeline/mag/eva.config) which has been pre-configured with a setup suitable for the MPI-EVA cluster.
Example: `nextflow run nf-core/mag -profile eva`
## mag specific configurations for eva
Specific configurations for eva has been made for mag, primarily adjusting SGE memory requirements of Java tools (e.g. FastQC).

View file

@ -0,0 +1,19 @@
# nf-core/configs: eva taxprofiler specific configuration
Extra specific configuration for taxprofiler pipeline
## Usage
To use, run the pipeline with `-profile eva`.
This will download and launch the taxprofiler specific [`eva.config`](../../../conf/pipeline/taxprofiler/eva.config) which has been pre-configured with a setup suitable for the MPI-EVA cluster.
Example: `nextflow run nf-core/taxprofiler -profile eva`
## taxprofiler specific configurations for eva
Specific configurations for eva has been made for taxprofiler.
### General profiles
- The general MPI-EVA profile runs with default nf-core/taxprofiler parameters, but with modifications to account for issues SGE have with Java and python tools, nameling: BBDUK, MALT, MetaPhlAn3, and MEGAN

View file

@ -0,0 +1,19 @@
# nf-core/configs: eva taxprofiler specific configuration
Extra specific configuration for taxprofiler pipeline
## Usage
To use, run the pipeline with `-profile hasta`.
This will download and launch the taxprofiler specific [`hasta.config`](../../../conf/pipeline/taxprofiler/hasta.config) which has been pre-configured with a setup suitable for the hasta cluster.
Example: `nextflow run nf-core/taxprofiler -profile hasta`
## taxprofiler specific configurations for hasta
Specific configurations for hasta has been made for taxprofiler.
### General profiles
- The general hasta profile runs with default nf-core/taxprofiler parameters, but with modifications to account for issues with: BBDUK and MALT.

35
docs/vsc_ugent.md Normal file
View file

@ -0,0 +1,35 @@
# nf-core/configs: University of Ghent High Performance Computing Infrastructure (VSC)
> **NB:** You will need an [account](https://www.ugent.be/hpc/en/access/faq/access) to use the HPC cluster to run the pipeline.
First you should go to the cluster you want to run the pipeline on. You can check what clusters have the most free space on this [link](https://shieldon.ugent.be:8083/pbsmon-web-users/). Use the following commands to easily switch between clusters:
```shell
module purge
module swap cluster/<CLUSTER>
```
Before running the pipeline you will need to create a PBS script to submit as a job.
```bash
#!/bin/bash
module load Nextflow
nextflow run <pipeline> -profile vsc_ugent,<CLUSTER> <Add your other parameters>
```
All of the intermediate files required to run the pipeline will be stored in the `work/` directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large, and all of the main output files will be saved in the `results/` directory anyway.
The config contains a `cleanup` command that removes the `work/` directory automatically once the pipeline has completed successfully. If the run does not complete successfully then the `work/` dir should be removed manually to save storage space. The default work directory is set to `$VSC_SCRATCH_VO_USER/work` per this configuration
You can also add several TORQUE options to the PBS script. More about this on this [link](http://hpcugent.github.io/vsc_user_docs/pdf/intro-HPC-linux-gent.pdf#appendix.B).
To submit your job to the cluster by using the following command:
```shell
qsub <script name>.pbs
```
> **NB:** The profile only works for the clusters `skitty`, `swalot`, `victini`, `kirlia` and `doduo`.
> **NB:** The default directory where the `work/` and `singularity/` (cache directory for images) is located in `$VSC_SCRATCH_VO_USER`.

View file

@ -49,6 +49,7 @@ profiles {
lugh { includeConfig "${params.custom_config_base}/conf/lugh.config" }
maestro { includeConfig "${params.custom_config_base}/conf/maestro.config" }
marvin { includeConfig "${params.custom_config_base}/conf/marvin.config" }
mjolnir_globe { includeConfig "${params.custom_config_base}/conf/mjolnir_globe.config" }
mpcdf { includeConfig "${params.custom_config_base}/conf/mpcdf.config" }
munin { includeConfig "${params.custom_config_base}/conf/munin.config" }
nihbiowulf { includeConfig "${params.custom_config_base}/conf/nihbiowulf.config" }
@ -68,4 +69,5 @@ profiles {
utd_sysbio { includeConfig "${params.custom_config_base}/conf/utd_sysbio.config" }
uzh { includeConfig "${params.custom_config_base}/conf/uzh.config" }
vai { includeConfig "${params.custom_config_base}/conf/vai.config" }
vsc_ugent { includeConfig "${params.custom_config_base}/conf/vsc_ugent.config" }
}

13
pipeline/mag.config Normal file
View file

@ -0,0 +1,13 @@
/*
* -------------------------------------------------
* nfcore/mag custom profile Nextflow config file
* -------------------------------------------------
* Config options for custom environments.
* Cluster-specific config options should be saved
* in the conf/pipeline/mag folder and imported
* under a profile name here.
*/
profiles {
eva { includeConfig "${params.custom_config_base}/conf/pipeline/mag/eva.config" }
}

View file

@ -0,0 +1,14 @@
/*
* -------------------------------------------------
* nfcore/taxprofiler custom profile Nextflow config file
* -------------------------------------------------
* Config options for custom environments.
* Cluster-specific config options should be saved
* in the conf/pipeline/taxprofiler folder and imported
* under a profile name here.
*/
profiles {
hasta { includeConfig "${params.custom_config_base}/conf/pipeline/taxprofiler/hasta.config" }
eva { includeConfig "${params.custom_config_base}/conf/pipeline/taxprofiler/eva.config" }
}