diff --git a/pyproject.toml b/pyproject.toml
index aafbc23..f1bd1bf 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -13,11 +13,14 @@ classifiers = [
dependencies = [
"mesa==2.1.5",
# "mesa @ git+https://github.com/projectmesa/mesa.git@main",
- "matplotlib",
+ "matplotlib", # required by mesa
"altair>5.0.1"
]
dynamic = ["version", "readme"]
+[project.scripts]
+simrisk-hawkdovemulti-batchrun = "simulatingrisk.hawkdovemulti.batch_run:main"
+
[tool.setuptools.dynamic]
version = {attr = "simulatingrisk.__version__"}
readme = {file = ["README.md"]}
diff --git a/simulatingrisk/hawkdove/README.md b/simulatingrisk/hawkdove/README.md
index 4bdec59..3e38869 100644
--- a/simulatingrisk/hawkdove/README.md
+++ b/simulatingrisk/hawkdove/README.md
@@ -1,6 +1,6 @@
# Hawk-Dove with risk attitudes
-Hawk/Dove game with variable risk attitudes
+Hawk/Dove game with risk attitudes
## Game description
@@ -14,6 +14,7 @@ This is a variant of the Hawk/Dove Game: https://en.wikipedia.org/wiki/Chicken_(
BACKGROUND: An unpublished paper by Simon Blessenohl shows that the equilibrium in this game is different for EU maximizers than for REU maximizers (all with the same risk-attitude), and that REU maximizers do better as a population (basically, play DOVE more often)
We want to know: what happens when different people have _different_ risk-attitudes.
+(See also variant simulation [Hawk/Dove game with multiple risk attitudes](../hawkdovemulti/). )
GAME: Hawk-Dove with risk-attitudes
@@ -49,7 +50,7 @@ This game has a discrete set of options instead of probability, so instead of de
r | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
Plays H when: |
- never |
+ never |
$\geq1$ D |
$\geq2$ D |
$\geq3$ D |
@@ -62,7 +63,7 @@ This game has a discrete set of options instead of probability, so instead of de
|
risk seeking |
- EU maximizer (risk neutral) |
+ EU maximizer (risk neutral) |
EU maximizer (risk neutral) |
risk avoidant |
diff --git a/simulatingrisk/hawkdove/model.py b/simulatingrisk/hawkdove/model.py
index f8929d0..5cc9aa0 100644
--- a/simulatingrisk/hawkdove/model.py
+++ b/simulatingrisk/hawkdove/model.py
@@ -201,7 +201,7 @@ class HawkDoveModel(mesa.Model):
agent_class = HawkDoveAgent
#: supported neighborhood sizes
neighborhood_sizes = {4, 8, 24}
- #: minimu risk level
+ #: minimum risk level
min_risk_level = 0
#: maximum risk level allowed
max_risk_level = 9
@@ -212,7 +212,7 @@ def __init__(
play_neighborhood=8,
observed_neighborhood=8,
hawk_odds=0.5,
- random_play_odds=0.01,
+ random_play_odds=0.00,
):
super().__init__()
# assume a fully-populated square grid
@@ -276,10 +276,12 @@ def step(self):
self.datacollector.collect(self)
if self.converged:
self.running = False
- print(
- f"Stopping after {self.schedule.steps} rounds. "
- + f"Final rolling average % hawk: {round(self.rolling_percent_hawk, 2)}"
- )
+ # FIXME: this output is annoying in batch runs
+ # print(
+ # f"Stopping after {self.schedule.steps} rounds. "
+ # + "Final rolling average % hawk: "
+ # + f"{self.rolling_percent_hawk: .2f}"
+ # )
@property
def max_agent_points(self):
diff --git a/simulatingrisk/hawkdovemulti/README.md b/simulatingrisk/hawkdovemulti/README.md
new file mode 100644
index 0000000..031720f
--- /dev/null
+++ b/simulatingrisk/hawkdovemulti/README.md
@@ -0,0 +1,63 @@
+# Hawk-Dove with multiple risk attitudes
+
+This is a variation of the [Hawk/Dove game with risk attitudes](../hawkdove/).
+This version adds multiple risk attitudes, with options for updating
+risk attitudes periodically based on comparing success of neighboring agents.
+
+The basic mechanics of the game are the same. This model adds options
+for agent risk adjustment (none, adopt, average) and period of risk
+adjustment (by default, every ten rounds). The payoff used to compare
+agents when adjusting risk attitudes can either be recent (since the
+last adjustment round) or total points for the whole game. The
+adjustment neighborhood, or which neighboring agents are considered
+when adjusting risk attitudes, can be configured to 4, 8, or 24.
+
+Initial risk attitudes are set by the model. Risk distribution can
+be configured to use a normal distribution, uniform (random), bimodal,
+skewed left, or skewed right.
+
+Like the base hawk/dove risk attitude game, there is also a
+configuration to add some chance of agents playing hawk/dove randomly
+instead of choosing based on the rules of the game.
+
+## Batch running
+
+This module includes a custom batch run script to run the simulation and
+collect data across a large combination of parameters and generate data
+files with collected model and agent data.
+
+To run the script locally from the root project directory:
+```sh
+simulatingrisk/hawkdovemulti/batch_run.py
+```
+Use `-h` or `--help` to see options.
+
+If this project has been installed with pip or similar, the script is
+available as `simrisk-hawkdovemulti-batchrun`.
+
+To run the batch run script on an HPC cluster:
+
+- Create a conda environment and install dependencies and this project.
+ (Major mesa dependencies available with conda are installed first as
+ conda packages)
+
+```sh
+module load anaconda3/2023.9
+conda create --name simrisk pandas networkx matplotlib numpy tqdm click
+conda activate simrisk
+pip install git+https://github.com/Princeton-CDH/simulating-risk.git@hawkdove-batchrun
+```
+For convenience, an example [slurm batch script](simrisk_batch.slurm) is
+included for running the batch run script (some portions are
+specific to Princeton's Research Computing HPC environment.)
+
+- Customize the slurm batch script as desired, copy it to the cluster, and submit
+the job: `sbatch simrisk_batch.slurm`
+
+By default, the batch run script will use all available processors, and will
+create model and agent data files under a `data/hawkdovemulti/` directory
+relative to the working directory where the script is called.
+
+
+
+
diff --git a/simulatingrisk/hawkdovemulti/batch_run.py b/simulatingrisk/hawkdovemulti/batch_run.py
new file mode 100755
index 0000000..e7efab7
--- /dev/null
+++ b/simulatingrisk/hawkdovemulti/batch_run.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python
+
+import argparse
+import csv
+from datetime import datetime
+import multiprocessing
+import os
+
+from tqdm.auto import tqdm
+
+from mesa.batchrunner import _make_model_kwargs, _collect_data
+
+from simulatingrisk.hawkdovemulti.model import HawkDoveMultipleRiskModel
+
+
+neighborhood_sizes = list(HawkDoveMultipleRiskModel.neighborhood_sizes)
+
+# combination of parameters we want to run
+params = {
+ "grid_size": [10, 25, 50], # 100],
+ "risk_adjustment": ["adopt", "average"],
+ "play_neighborhood": neighborhood_sizes,
+ "observed_neighborhood": neighborhood_sizes,
+ "adjust_neighborhood": neighborhood_sizes,
+ "hawk_odds": [0.5, 0.25, 0.75],
+ "adjust_every": [2, 10, 20],
+ "risk_distribution": HawkDoveMultipleRiskModel.risk_distribution_options,
+ "adjust_payoff": HawkDoveMultipleRiskModel.supported_adjust_payoffs,
+ # random?
+}
+
+
+# method for multiproc running model with a set of params
+def run_hawkdovemulti_model(args):
+ run_id, iteration, params, max_steps = args
+ # simplified model runner adapted from mesa batch run code
+
+ model = HawkDoveMultipleRiskModel(**params)
+ while model.running and model.schedule.steps <= max_steps:
+ model.step()
+
+ # collect data for the last step
+ # (scheduler is 1-based index but data collection is 0-based)
+ step = model.schedule.steps - 1
+
+ model_data, all_agents_data = _collect_data(model, step)
+
+ # combine run id, step, and params, with collected model data
+ run_data = {"RunId": run_id, "iteration": iteration, "Step": step}
+ run_data.update(params)
+ run_data.update(model_data)
+
+ agent_data = [
+ {
+ "RunId": run_id,
+ "iteration": iteration,
+ "Step": step,
+ **agent_data,
+ }
+ for agent_data in all_agents_data
+ ]
+
+ return run_data, agent_data
+
+
+def batch_run(
+ params, iterations, number_processes, max_steps, progressbar, file_prefix
+):
+ param_combinations = _make_model_kwargs(params)
+ total_param_combinations = len(param_combinations)
+ total_runs = total_param_combinations * iterations
+ print(
+ f"{total_param_combinations} parameter combinations, "
+ + f"{iterations} iteration{'s' if iterations != 1 else ''}, "
+ + f"{total_runs} total runs"
+ )
+
+ # create a list of all the parameters to run, with run id and iteration
+ runs_list = []
+ run_id = 0
+ for params in param_combinations:
+ for iteration in range(iterations):
+ runs_list.append((run_id, iteration, params, max_steps))
+ run_id += 1
+
+ # collect data in a directory for this model
+ data_dir = os.path.join("data", "hawkdovemulti")
+ os.makedirs(data_dir, exist_ok=True)
+ datestr = datetime.today().isoformat().replace(".", "_").replace(":", "")
+ model_output_filename = os.path.join(data_dir, f"{file_prefix}{datestr}_model.csv")
+ agent_output_filename = os.path.join(data_dir, f"{file_prefix}{datestr}_agent.csv")
+ print(
+ f"Saving data collection results to:\n {model_output_filename}"
+ + f"\n {agent_output_filename}"
+ )
+ # open output files so data can be written as it is generated
+ with open(model_output_filename, "w", newline="") as model_output_file, open(
+ agent_output_filename, "w", newline=""
+ ) as agent_output_file:
+ model_dict_writer = None
+ agent_dict_writer = None
+
+ # adapted from mesa batch run code
+ with tqdm(total=total_runs, disable=not progressbar) as pbar:
+ with multiprocessing.Pool(number_processes) as pool:
+ for model_data, agent_data in pool.imap_unordered(
+ run_hawkdovemulti_model, runs_list
+ ):
+ # initialize dictwriter and start csv after the first batch
+ if model_dict_writer is None:
+ # get field names from first entry
+ model_dict_writer = csv.DictWriter(
+ model_output_file, model_data.keys()
+ )
+ model_dict_writer.writeheader()
+
+ model_dict_writer.writerow(model_data)
+
+ if agent_dict_writer is None:
+ # get field names from first entry
+ agent_dict_writer = csv.DictWriter(
+ agent_output_file, agent_data[0].keys()
+ )
+ agent_dict_writer.writeheader()
+
+ agent_dict_writer.writerows(agent_data)
+
+ pbar.update()
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ prog="hawk/dove batch_run",
+ description="Batch run for hawk/dove multiple risk attitude simulation.",
+ epilog="""Data files will be created in data/hawkdovemulti/
+ relative to current path.""",
+ )
+ parser.add_argument(
+ "-i",
+ "--iterations",
+ type=int,
+ help="Number of iterations to run for each set of parameters "
+ + "(default: %(default)s)",
+ default=100,
+ )
+ parser.add_argument(
+ "-m",
+ "--max-steps",
+ help="Maximum steps to run simulations if they have not already "
+ + "converged (default: %(default)s)",
+ default=125, # typically converges quickly, around step 60 without randomness
+ type=int,
+ )
+ parser.add_argument(
+ "-p",
+ "--processes",
+ type=int,
+ help="Number of processes to use (default: all available CPUs)",
+ default=None,
+ )
+ parser.add_argument(
+ "--progress",
+ help="Display progress bar (default: %(default)s)",
+ action=argparse.BooleanOptionalAction,
+ default=True,
+ )
+ parser.add_argument(
+ "--file-prefix",
+ help="Prefix for data filenames (no prefix by default)",
+ default="",
+ )
+ # may want to add an option to configure output dir
+
+ args = parser.parse_args()
+ batch_run(
+ params,
+ args.iterations,
+ args.processes,
+ args.max_steps,
+ args.progress,
+ args.file_prefix,
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/simulatingrisk/hawkdovemulti/model.py b/simulatingrisk/hawkdovemulti/model.py
index 9b919b0..e511d76 100644
--- a/simulatingrisk/hawkdovemulti/model.py
+++ b/simulatingrisk/hawkdovemulti/model.py
@@ -127,6 +127,11 @@ def category(cls, val):
return "majority risk avoidant"
return "no majority"
+ def __str__(self):
+ # override string method to return just the numeric value,
+ # for better serialization of collected data
+ return str(self.value)
+
class HawkDoveMultipleRiskModel(HawkDoveModel):
"""
diff --git a/simulatingrisk/hawkdovemulti/simrisk_batch.slurm b/simulatingrisk/hawkdovemulti/simrisk_batch.slurm
new file mode 100644
index 0000000..fae4efb
--- /dev/null
+++ b/simulatingrisk/hawkdovemulti/simrisk_batch.slurm
@@ -0,0 +1,44 @@
+#!/bin/bash
+#SBATCH --job-name=simrisk # job short name
+#SBATCH --nodes=1 # node count
+#SBATCH --ntasks=1 # total number of tasks across all nodes
+#SBATCH --cpus-per-task=20 # cpu-cores per task
+#SBATCH --mem-per-cpu=525M # memory per cpu-core
+#SBATCH --time=02:00:00 # total run time limit (HH:MM:SS)
+#SBATCH --mail-type=begin # send email when job begins
+#SBATCH --mail-type=end # send email when job ends
+#SBATCH --mail-type=fail # send email if job fails
+#SBATCH --mail-user=EMAIL
+
+# Template for batch running hawkdovemulti simulation with slurm.
+# Assumes a conda environment named simrisk is set up with required dependencies.
+#
+# Update before using:
+# - EMAIL for slurm notification
+# - customize path for working directory (set username if using Princeton HPC)
+# (and make sure the directory exists)
+# - add an SBATCH array directive if desired
+# - customize the batch run command as appropriate
+# - configure the time appropriately for the batch run
+
+module purge
+module load anaconda3/2023.9
+conda activate simrisk
+
+# change working directory for data output
+cd /scratch/network//simrisk
+
+# test run: one iteration, max of 200 steps, no progress bar
+# (completed in ~18 minutes on 20 CPUs)
+#simrisk-hawkdovemulti-batchrun --iterations 1 --max-step 200 --no-progress
+
+# longer run: 10 iterations, max of 200 steps, no progress bar
+#simrisk-hawkdovemulti-batchrun --iterations 10 --max-step 200 --no-progress
+
+# To generate data for a larger total number of iterations,
+# run the script as a job array.
+# e.g. for 100 iterations, run with --iterations 10 and 10 tasks with #SBATCH --array=0-9
+# and add a file prefix option to generate separate files that can be grouped
+simrisk-hawkdovemulti-batchrun --iterations 10 --max-step 125 --no-progress --file-prefix "job${SLURM_ARRAY_JOB_ID}_task${SLURM_ARRAY_TASK_ID}_"
+
+
diff --git a/tests/test_hawkdovemulti.py b/tests/test_hawkdovemulti.py
index 7638082..98a6a41 100644
--- a/tests/test_hawkdovemulti.py
+++ b/tests/test_hawkdovemulti.py
@@ -113,6 +113,12 @@ def test_riskstate_label():
assert RiskState.category(13) == "no majority"
+def test_riskstate_str():
+ # serialize as string of number for data output in batch runs
+ assert str(RiskState.c1) == "1"
+ assert str(RiskState.c13) == "13"
+
+
def test_most_successful_neighbor():
# initialize two agents with a mock model
# first, measure success based on total/cumulative payoff