diff --git a/simulatingrisk/hawkdovemulti/simrisk_batch.slurm b/simulatingrisk/hawkdovemulti/simrisk_batch.slurm index 9d234ee..fae4efb 100644 --- a/simulatingrisk/hawkdovemulti/simrisk_batch.slurm +++ b/simulatingrisk/hawkdovemulti/simrisk_batch.slurm @@ -3,7 +3,7 @@ #SBATCH --nodes=1 # node count #SBATCH --ntasks=1 # total number of tasks across all nodes #SBATCH --cpus-per-task=20 # cpu-cores per task -#SBATCH --mem-per-cpu=600M # memory per cpu-core +#SBATCH --mem-per-cpu=525M # memory per cpu-core #SBATCH --time=02:00:00 # total run time limit (HH:MM:SS) #SBATCH --mail-type=begin # send email when job begins #SBATCH --mail-type=end # send email when job ends @@ -19,6 +19,7 @@ # (and make sure the directory exists) # - add an SBATCH array directive if desired # - customize the batch run command as appropriate +# - configure the time appropriately for the batch run module purge module load anaconda3/2023.9 @@ -27,12 +28,12 @@ conda activate simrisk # change working directory for data output cd /scratch/network//simrisk -# test run: one iteration, max of 125 steps, no progress bar -# (typically completes in less than 15 minutes when running on 20 CPUs) -#simrisk-hawkdovemulti-batchrun --iterations 1 --max-step 125 --no-progress +# test run: one iteration, max of 200 steps, no progress bar +# (completed in ~18 minutes on 20 CPUs) +#simrisk-hawkdovemulti-batchrun --iterations 1 --max-step 200 --no-progress -# longer run: 10 iterations, max of 125 steps, no progress bar -#simrisk-hawkdovemulti-batchrun --iterations 10 --max-step 125 --no-progress +# longer run: 10 iterations, max of 200 steps, no progress bar +#simrisk-hawkdovemulti-batchrun --iterations 10 --max-step 200 --no-progress # To generate data for a larger total number of iterations, # run the script as a job array.