/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nf-core/taxprofiler Nextflow config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default config options for all compute environments ---------------------------------------------------------------------------------------- */ // Global default params, used in configs params { // TODO nf-core: Specify your pipeline's command line flags // Input options input = null // References genome = null igenomes_base = 's3://ngi-igenomes/igenomes' igenomes_ignore = false // MultiQC options multiqc_config = null multiqc_title = null max_multiqc_email_size = '25.MB' // Boilerplate options outdir = null tracedir = "${params.outdir}/pipeline_info" publish_dir_mode = 'copy' email = null email_on_fail = null plaintext_email = false monochrome_logs = false help = false validate_params = true show_hidden_params = false schema_ignore_params = 'genomes,fasta' enable_conda = false // Config options custom_config_version = 'master' custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}" config_profile_description = null config_profile_contact = null config_profile_url = null config_profile_name = null // Max resource options // Defaults only, expecting to be overwritten max_memory = '128.GB' max_cpus = 16 max_time = '240.h' // Databases databases = null // FASTQ preprocessing perform_shortread_qc = false shortread_qc_tool = 'fastp' shortread_qc_skipadaptertrim = false shortread_qc_mergepairs = false shortread_qc_excludeunmerged = false shortread_qc_adapter1 = null shortread_qc_adapter2 = null shortread_qc_minlength = 15 perform_longread_qc = false longread_qc_run_clip = false longread_qc_run_filter = false longread_qc_minlength = 1000 longread_qc_keep_percent = 90 longread_qc_target_bases = 500000000 save_preprocessed_reads = false // Complexity filtering perform_shortread_complexityfilter = false shortread_complexityfilter_tool = 'bbduk' shortread_complexityfilter_entropy = 0.3 shortread_complexityfilter_bbduk_windowsize = 50 shortread_complexityfilter_bbduk_mask = false shortread_complexityfilter_prinseqplusplus_mode = 'entropy' shortread_complexityfilter_prinseqplusplus_dustscore = 0.5 shortread_complexityfilter_fastp_threshold = 30 save_complexityfiltered_reads = false // run merging perform_runmerging = false save_runmerged_reads = false // Host Removal perform_shortread_hostremoval = false perform_longread_hostremoval = false hostremoval_reference = null shortread_hostremoval_index = null longread_hostremoval_index = null save_hostremoval_index = false save_hostremoval_mapped = false save_hostremoval_unmapped = false // MALT run_malt = false malt_mode = 'BlastN' malt_generate_megansummary = false malt_save_reads = false // kraken2 run_kraken2 = false kraken2_save_reads = false kraken2_save_readclassification = false // centrifuge run_centrifuge = false centrifuge_save_reads = false // metaphlan3 run_metaphlan3 = false // kaiju run_kaiju = false kaiju_taxon_name = 'species' // diamond run_diamond = false diamond_output_format = 'tsv' // TSV is only format with taxonomic information apparently diamond_save_reads = false // this will override default diamond output format so no taxonomic profile is generated! // krona run_krona = false krona_taxonomy_directory = null } // Load base.config by default for all pipelines includeConfig 'conf/base.config' // Load nf-core custom profiles from different Institutions try { includeConfig "${params.custom_config_base}/nfcore_custom.config" } catch (Exception e) { System.err.println("WARNING: Could not load nf-core/config profiles: ${params.custom_config_base}/nfcore_custom.config") } // Load nf-core/taxprofiler custom profiles from different institutions. // Warning: Uncomment only if a pipeline-specific instititutional config already exists on nf-core/configs! try { includeConfig "${params.custom_config_base}/pipeline/taxprofiler.config" } catch (Exception e) { System.err.println("WARNING: Could not load nf-core/config/taxprofiler profiles: ${params.custom_config_base}/pipeline/taxprofiler.config") } profiles { debug { process.beforeScript = 'echo $HOSTNAME' } conda { params.enable_conda = true docker.enabled = false singularity.enabled = false podman.enabled = false shifter.enabled = false charliecloud.enabled = false } docker { docker.enabled = true docker.userEmulation = true singularity.enabled = false podman.enabled = false shifter.enabled = false charliecloud.enabled = false } singularity { singularity.enabled = true singularity.autoMounts = true docker.enabled = false podman.enabled = false shifter.enabled = false charliecloud.enabled = false } podman { podman.enabled = true docker.enabled = false singularity.enabled = false shifter.enabled = false charliecloud.enabled = false } shifter { shifter.enabled = true docker.enabled = false singularity.enabled = false podman.enabled = false charliecloud.enabled = false } charliecloud { charliecloud.enabled = true docker.enabled = false singularity.enabled = false podman.enabled = false shifter.enabled = false } test { includeConfig 'conf/test.config' } test_full { includeConfig 'conf/test_full.config' } test_noprofiling { includeConfig 'conf/test_noprofiling.config' } test_nopreprocessing { includeConfig 'conf/test_nopreprocessing.config' } } // Load igenomes.config if required if (!params.igenomes_ignore) { includeConfig 'conf/igenomes.config' } else { params.genomes = [:] } // Export these variables to prevent local Python/R libraries from conflicting with those in the container // The JULIA depot path has been adjusted to a fixed path `/usr/local/share/julia` that needs to be used for packages in the container. // See https://apeltzer.github.io/post/03-julia-lang-nextflow/ for details on that. Once we have a common agreement on where to keep Julia packages, this is adjustable. env { PYTHONNOUSERSITE = '1' R_PROFILE_USER = "/.Rprofile" R_ENVIRON_USER = "/.Renviron" JULIA_DEPOT_PATH = "/usr/local/share/julia" } // Capture exit codes from upstream processes when piping process.shell = ['/bin/bash', '-euo', 'pipefail'] def trace_timestamp = new java.util.Date().format( 'yyyy-MM-dd_HH-mm-ss') timeline { enabled = true file = "${params.tracedir}/execution_timeline_${trace_timestamp}.html" } report { enabled = true file = "${params.tracedir}/execution_report_${trace_timestamp}.html" } trace { enabled = true file = "${params.tracedir}/execution_trace_${trace_timestamp}.txt" } dag { enabled = true file = "${params.tracedir}/pipeline_dag_${trace_timestamp}.html" } manifest { name = 'nf-core/taxprofiler' author = 'nf-core community' homePage = 'https://github.com/nf-core/taxprofiler' description = 'Taxonomic profiling of shotgun metagenomic data' mainScript = 'main.nf' nextflowVersion = '!>=21.10.3' version = '1.0dev' } // Load modules.config for DSL2 module specific options includeConfig 'conf/modules.config' // Function to ensure that resource requirements don't go beyond // a maximum limit def check_max(obj, type) { if (type == 'memory') { try { if (obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1) return params.max_memory as nextflow.util.MemoryUnit else return obj } catch (all) { println " ### ERROR ### Max memory '${params.max_memory}' is not valid! Using default value: $obj" return obj } } else if (type == 'time') { try { if (obj.compareTo(params.max_time as nextflow.util.Duration) == 1) return params.max_time as nextflow.util.Duration else return obj } catch (all) { println " ### ERROR ### Max time '${params.max_time}' is not valid! Using default value: $obj" return obj } } else if (type == 'cpus') { try { return Math.min( obj, params.max_cpus as int ) } catch (all) { println " ### ERROR ### Max cpus '${params.max_cpus}' is not valid! Using default value: $obj" return obj } } }