From 710a7bb7741253f2b236e57fe2f42d1988820d74 Mon Sep 17 00:00:00 2001 From: Niklas Schandry Date: Mon, 27 Jan 2025 09:54:55 +0100 Subject: [PATCH] update biohpc_gen config to reduce load on slurm DB --- conf/biohpc_gen.config | 9 +++++++++ docs/biohpc_gen.md | 4 +++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/conf/biohpc_gen.config b/conf/biohpc_gen.config index ab61b59cb..0b117ae1f 100755 --- a/conf/biohpc_gen.config +++ b/conf/biohpc_gen.config @@ -14,6 +14,15 @@ process { executor = 'slurm' queue = { task.memory <= 1536.GB ? (task.time > 2.d || task.memory > 384.GB ? 'biohpc_gen_production' : 'biohpc_gen_normal') : 'biohpc_gen_highmem' } clusterOptions = '--clusters=biohpc_gen' + array = 25 +} + +executor { + $slurm { + queueStatInterval = '10 min' + pollInterval = '30 sec' + submitRateLimit = '25sec' + } } charliecloud { diff --git a/docs/biohpc_gen.md b/docs/biohpc_gen.md index 2025e2acb..c41e823c4 100644 --- a/docs/biohpc_gen.md +++ b/docs/biohpc_gen.md @@ -44,4 +44,6 @@ These are then available as modules (please confirm the module name using module module load nextflow/24.04.2-gcc12 charliecloud/0.35-gcc12 ``` -> NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. +> NB: bioHPC compute nodes are submit hosts. This means you can submit the nextflow head job via sbatch. + +> NB: Sometimes you may want to have jobs submitted 'locally' in a large nextflow job. Details on this can be found here https://doku.lrz.de/nextflow-on-hpc-systems-test-operation-788693597.html