Skip to content

Commit

Permalink
create do_max.py
Browse files Browse the repository at this point in the history
  • Loading branch information
alanlujan91 committed Apr 12, 2023
1 parent 8c8ef20 commit a06c0d3
Show file tree
Hide file tree
Showing 12 changed files with 99 additions and 59 deletions.
Empty file added Code/Options/__init__.py
Empty file.
14 changes: 14 additions & 0 deletions Code/Options/all_options.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,3 +67,17 @@
"do_agg_shocks": True, # Solve the FBS aggregate shocks version of the model
"do_liquid": False, # Matches liquid assets data when True, net worth data when False
}

all_options["LCSpecPoint"] = {
"do_param_dist": False, # Do param-dist version if True, param-point if False
"do_lifecycle": True, # Use lifecycle model if True, perpetual youth if False
"do_agg_shocks": True, # Solve the FBS aggregate shocks version of the model
"do_liquid": False, # Matches liquid assets data when True, net worth data when False
}

all_options["LCSpecDist"] = {
"do_param_dist": True, # Do param-dist version if True, param-point if False
"do_lifecycle": True, # Use lifecycle model if True, perpetual youth if False
"do_agg_shocks": True, # Solve the FBS aggregate shocks version of the model
"do_liquid": False, # Matches liquid assets data when True, net worth data when False
}
6 changes: 3 additions & 3 deletions Code/Tests/test_results.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
"""
To be run after `do_min.py`.
Depends on having files in Results.
Depends on having files in results.
Will test the output files in Results for similarity with the results in the original
Will test the output files in results for similarity with the results in the original
cstwMPC paper.
"""
import pathlib
Expand Down Expand Up @@ -128,7 +128,7 @@

for filename, targets in results_files_and_targets.items():
with open(
pathlib.PurePath(pathlib.Path(__file__).parent, "../Results", filename),
pathlib.PurePath(pathlib.Path(__file__).parent, "../results", filename),
encoding="utf-8",
) as f:
data = f.read()
Expand Down
2 changes: 1 addition & 1 deletion Code/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,7 +526,7 @@ def show_many_stats(self, spec_name=None):
# Save results to disk
if spec_name is not None:
with open(
self.my_file_path + "/Results/" + spec_name + "Results.txt",
self.my_file_path + "/results/" + spec_name + "Results.txt",
"w",
encoding="utf-8",
) as f:
Expand Down
29 changes: 17 additions & 12 deletions Code/estimation.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,7 @@
from HARK.utilities import get_lorenz_shares
from scipy.optimize import minimize, minimize_scalar, root_scalar

from Code.agents import AggDoWAgent, AggDoWMarket, DoWAgent, DoWMarket


def mystr(number):
return f"{number:.3f}"
from code.agents import AggDoWAgent, AggDoWMarket, DoWAgent, DoWMarket


def get_ky_ratio_difference(
Expand Down Expand Up @@ -145,7 +141,7 @@ def find_lorenz_distance_at_target_ky(


def get_target_ky_and_find_lorenz_distance(
x, economy=None, param_name=None, param_count=None, dist_type=None
x, economy, param_name, param_count, dist_type
):
center, spread = x
# Make sure we actually calculate simulated Lorenz points
Expand Down Expand Up @@ -192,7 +188,7 @@ def calc_stationary_age_dstn(LivPrb, terminal_period):
for t in range(top):
MrkvArray[t, 0] = 1.0 - LivPrb[t]
MrkvArray[t, t + 1] = LivPrb[t]
MrkvArray[t + 1, 0] = 1.0
MrkvArray[t + 1, 0] = 1.0

w, v = np.linalg.eig(np.transpose(MrkvArray))
idx = (np.abs(w - 1.0)).argmin()
Expand Down Expand Up @@ -321,10 +317,10 @@ def make_agents(options, params, agent_class, param_count):
dropout_type = agent_class(**params.init_dropout)
dropout_type.AgeDstn = calc_stationary_age_dstn(dropout_type.LivPrb, True)
highschool_type = deepcopy(dropout_type)
highschool_type(**params.adj_highschool)
highschool_type.assign_parameters(**params.adj_highschool)
highschool_type.AgeDstn = calc_stationary_age_dstn(highschool_type.LivPrb, True)
college_type = deepcopy(dropout_type)
college_type(**params.adj_college)
college_type.assign_parameters(**params.adj_college)
college_type.AgeDstn = calc_stationary_age_dstn(college_type.LivPrb, True)
dropout_type.update()
highschool_type.update()
Expand Down Expand Up @@ -491,14 +487,23 @@ def estimate(options, params):
economy.center_estimate = center_estimate
economy.spread_estimate = spread_estimate
economy.show_many_stats(spec_name)
print(f"These results have been saved to ./Code/Results/{spec_name}.txt\n\n")
print(f"These results have been saved to ./code/results/{spec_name}.txt\n\n")

return economy


class Estimator:
def __init__(self, options, parameters):
self.options = options
self.parameters = parameters

def estimate(self):
return estimate(self.options, self.parameters)


if __name__ == "__main__":
import Code.calibration as parameters
from Code.Options.all_options import all_options
import code.calibration as parameters
from code.options.all_options import all_options

basic_options = all_options["UseUniformBetaDist"].copy()
basic_options.update(all_options["DoStandardWork"])
Expand Down
46 changes: 23 additions & 23 deletions Code/figures.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""
This module makes some figures for cstwMPC. It requires that quite a few specifications
of the model have been estimated, with the results stored in ./Results.
of the model have been estimated, with the results stored in ./results.
"""

import matplotlib.pyplot as plt
Expand All @@ -12,7 +12,7 @@
my_file_path = os.path.dirname(os.path.abspath(__file__))


f = open(my_file_path + "/Results/LCbetaPointNetWorthLorenzFig.txt")
f = open(my_file_path + "/results/LCbetaPointNetWorthLorenzFig.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
lorenz_percentiles = []
Expand All @@ -27,7 +27,7 @@
scf_lorenz = np.array(scf_lorenz)
beta_point_lorenz = np.array(beta_point_lorenz)

f = open(my_file_path + "/Results/LCbetaDistNetWorthLorenzFig.txt")
f = open(my_file_path + "/results/LCbetaDistNetWorthLorenzFig.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
beta_dist_lorenz = []
Expand All @@ -36,7 +36,7 @@
f.close()
beta_dist_lorenz = np.array(beta_dist_lorenz)

f = open(my_file_path + "/Results/LCbetaPointNetWorthMPCfig.txt")
f = open(my_file_path + "/results/LCbetaPointNetWorthMPCfig.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
mpc_percentiles = []
Expand All @@ -48,7 +48,7 @@
mpc_percentiles = np.asarray(mpc_percentiles)
mpc_beta_point = np.asarray(mpc_beta_point)

f = open(my_file_path + "/Results/LCbetaDistNetWorthMPCfig.txt")
f = open(my_file_path + "/results/LCbetaDistNetWorthMPCfig.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
mpc_beta_dist = []
Expand All @@ -57,7 +57,7 @@
f.close()
mpc_beta_dist = np.asarray(mpc_beta_dist)

f = open(my_file_path + "/Results/LCbetaDistLiquidMPCfig.txt")
f = open(my_file_path + "/results/LCbetaDistLiquidMPCfig.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
mpc_beta_dist_liquid = []
Expand All @@ -66,7 +66,7 @@
f.close()
mpc_beta_dist_liquid = np.asarray(mpc_beta_dist_liquid)

f = open(my_file_path + "/Results/LCbetaDistNetWorthKappaByAge.txt")
f = open(my_file_path + "/results/LCbetaDistNetWorthKappaByAge.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
kappa_mean_age = []
Expand All @@ -82,7 +82,7 @@
age_list = np.array(list(range(len(kappa_mean_age))), dtype=float) * 0.25 + 24.0
f.close()

f = open(my_file_path + "/Results/LC_KYbyBeta.txt")
f = open(my_file_path + "/results/LC_KYbyBeta.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
KY_by_beta_lifecycle = []
Expand All @@ -94,7 +94,7 @@
KY_by_beta_lifecycle = np.array(KY_by_beta_lifecycle)
f.close()

f = open(my_file_path + "/Results/IH_KYbyBeta.txt")
f = open(my_file_path + "/results/IH_KYbyBeta.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
KY_by_beta_infinite = []
Expand Down Expand Up @@ -152,7 +152,7 @@
plt.show()


f = open(my_file_path + "/Results/IHbetaPointNetWorthLorenzFig.txt")
f = open(my_file_path + "/results/IHbetaPointNetWorthLorenzFig.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
lorenz_percentiles = []
Expand All @@ -167,7 +167,7 @@
scf_lorenz = np.array(scf_lorenz)
beta_point_lorenz = np.array(beta_point_lorenz)

f = open(my_file_path + "/Results/IHbetaDistNetWorthLorenzFig.txt")
f = open(my_file_path + "/results/IHbetaDistNetWorthLorenzFig.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
beta_dist_lorenz = []
Expand All @@ -177,7 +177,7 @@
beta_dist_lorenz = np.array(beta_dist_lorenz)


f = open(my_file_path + "/Results/IHbetaPointLiquidLorenzFig.txt")
f = open(my_file_path + "/results/IHbetaPointLiquidLorenzFig.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
beta_point_lorenz_liquid = []
Expand All @@ -186,7 +186,7 @@
f.close()
beta_point_lorenz_liquid = np.array(beta_point_lorenz_liquid)

f = open(my_file_path + "/Results/IHbetaDistLiquidLorenzFig.txt")
f = open(my_file_path + "/results/IHbetaDistLiquidLorenzFig.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
beta_dist_lorenz_liquid = []
Expand All @@ -195,7 +195,7 @@
f.close()
beta_dist_lorenz_liquid = np.array(beta_dist_lorenz_liquid)

f = open(my_file_path + "/Results/IHbetaPointNetWorthMPCfig.txt")
f = open(my_file_path + "/results/IHbetaPointNetWorthMPCfig.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
mpc_percentiles = []
Expand All @@ -207,7 +207,7 @@
mpc_percentiles = np.asarray(mpc_percentiles)
mpc_beta_point = np.asarray(mpc_beta_point)

f = open(my_file_path + "/Results/IHbetaDistNetWorthMPCfig.txt")
f = open(my_file_path + "/results/IHbetaDistNetWorthMPCfig.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
mpc_beta_dist = []
Expand All @@ -216,7 +216,7 @@
f.close()
mpc_beta_dist = np.asarray(mpc_beta_dist)

f = open(my_file_path + "/Results/IHbetaDistLiquidMPCfig.txt")
f = open(my_file_path + "/results/IHbetaDistLiquidMPCfig.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
mpc_beta_dist_liquid = []
Expand Down Expand Up @@ -276,43 +276,43 @@
plt.show()


f = open(my_file_path + "/Results/SensitivityRho.txt")
f = open(my_file_path + "/results/SensitivityRho.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
rho_sensitivity = np.array(raw_data)
f.close()

f = open(my_file_path + "/Results/SensitivityXiSigma.txt")
f = open(my_file_path + "/results/SensitivityXiSigma.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
xi_sigma_sensitivity = np.array(raw_data)
f.close()

f = open(my_file_path + "/Results/SensitivityPsiSigma.txt")
f = open(my_file_path + "/results/SensitivityPsiSigma.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
psi_sigma_sensitivity = np.array(raw_data)
f.close()

f = open(my_file_path + "/Results/SensitivityMu.txt")
f = open(my_file_path + "/results/SensitivityMu.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
mu_sensitivity = np.array(raw_data)
f.close()

f = open(my_file_path + "/Results/SensitivityUrate.txt")
f = open(my_file_path + "/results/SensitivityUrate.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
urate_sensitivity = np.array(raw_data)
f.close()

f = open(my_file_path + "/Results/SensitivityMortality.txt")
f = open(my_file_path + "/results/SensitivityMortality.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
mortality_sensitivity = np.array(raw_data)
f.close()

f = open(my_file_path + "/Results/SensitivityG.txt")
f = open(my_file_path + "/results/SensitivityG.txt")
my_reader = csv.reader(f, delimiter="\t")
raw_data = list(my_reader)
g_sensitivity = np.array(raw_data)
Expand Down
8 changes: 3 additions & 5 deletions cstwMPC.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -30,18 +30,16 @@
"source": [
"# This cell does some standard python setup!\n",
"\n",
"import os # Tools for navigating the filesystem\n",
"import code.calibration as parameters\n",
"import warnings # The warnings package allows us to ignore some harmless but alarming warning messages\n",
"from code.calibration import SCF_wealth, SCF_weights\n",
"from code.estimation import estimate\n",
"\n",
"# Import related generic python packages\n",
"import matplotlib.pyplot as plt # Plotting tools\n",
"import numpy as np\n",
"from HARK.utilities import get_lorenz_shares\n",
"\n",
"import Code.calibration as parameters\n",
"from Code.calibration import SCF_wealth, SCF_weights\n",
"from Code.estimation import estimate\n",
"\n",
"warnings.filterwarnings(\"ignore\")\n",
"\n",
"\n",
Expand Down
8 changes: 3 additions & 5 deletions cstwMPC.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,18 +24,16 @@
# %% code_folding=[]
# This cell does some standard python setup!

import os # Tools for navigating the filesystem
import code.calibration as parameters
import warnings # The warnings package allows us to ignore some harmless but alarming warning messages
from code.calibration import SCF_wealth, SCF_weights
from code.estimation import estimate

# Import related generic python packages
import matplotlib.pyplot as plt # Plotting tools
import numpy as np
from HARK.utilities import get_lorenz_shares

import Code.calibration as parameters
from Code.calibration import SCF_wealth, SCF_weights
from Code.estimation import estimate

warnings.filterwarnings("ignore")


Expand Down
4 changes: 2 additions & 2 deletions do_custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
This module runs a custom model specification, with all options specified by the user.
"""

import Code.calibration as parameters
from Code.estimation import estimate
import code.calibration as parameters
from code.estimation import estimate

param_name = "DiscFac" # Which parameter to introduce heterogeneity in
dist_type = "uniform" # Which type of distribution to use
Expand Down
27 changes: 27 additions & 0 deletions do_max.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
"""
This file will run the two main specifications of the cstwMPC project: FBS-style
aggregate shocks, perpetual youth, matching net worth. Will run both beta-point
and beta-dist versions.
"""

import code.calibration as parameters
from code.estimation import estimate
from code.options.all_options import all_options

basic_options = all_options["UseUniformBetaDist"].copy()
basic_options.update(all_options["DoStandardWork"])

# Run beta-point model

point_options = basic_options.copy()
point_options.update(all_options["LCSpecPoint"])

estimate(point_options, parameters)

# Run beta-dist model

dist_options = basic_options.copy()
dist_options["do_combo_estimation"] = True
dist_options.update(all_options["LCSpecDist"])

estimate(dist_options, parameters)
Loading

0 comments on commit a06c0d3

Please sign in to comment.