diff --git a/HARK/model.py b/HARK/model.py index 9a199dfe0..4f4ab6715 100644 --- a/HARK/model.py +++ b/HARK/model.py @@ -270,7 +270,11 @@ def get_dynamics(self): return self.dynamics def get_vars(self): - return list(self.shocks.keys()) + list(self.dynamics.keys()) + return ( + list(self.shocks.keys()) + + list(self.dynamics.keys()) + + list(self.reward.keys()) + ) def transition(self, pre, dr): """ @@ -314,6 +318,9 @@ def get_decision_value_function(self, dr, continuation): Given a decision rule and a continuation value function, return a function for the value at the decision step/tac, after the shock have been realized. + + ## TODO: it would be better to systematize these value functions per block + ## better, then construct them with 'partial' methods """ srvf = self.get_state_rule_value_function_from_continuation(continuation) diff --git a/HARK/simulation/monte_carlo.py b/HARK/simulation/monte_carlo.py index ad99f0c0e..4cb8f4f64 100644 --- a/HARK/simulation/monte_carlo.py +++ b/HARK/simulation/monte_carlo.py @@ -16,6 +16,8 @@ from HARK.model import DBlock from HARK.model import construct_shocks, simulate_dynamics +from HARK.utilities import apply_fun_to_vals + def draw_shocks(shocks: Mapping[str, Distribution], conditions: Sequence[int]): """ @@ -29,6 +31,7 @@ def draw_shocks(shocks: Mapping[str, Distribution], conditions: Sequence[int]): conditions: Sequence[int] An array of conditions, one for each agent. Typically these will be agent ages. + # TODO: generalize this to wider range of inputs. Parameters ------------ @@ -51,6 +54,7 @@ def draw_shocks(shocks: Mapping[str, Distribution], conditions: Sequence[int]): draws[shock_var] = shock.draw(conditions) else: draws[shock_var] = shock.draw(len(conditions)) + # this is hacky if there are no conditions. return draws @@ -396,3 +400,240 @@ def clear_history(self): for var_name in self.vars: self.history[var_name] = np.empty((self.T_sim, self.agent_count)) self.history[var_name].fill(np.nan) + + +class MonteCarloSimulator(Simulator): + """ + A Monte Carlo simulation engine based. + + Unlike the AgentTypeMonteCarloSimulator HARK.core.AgentType, + this class does make any assumptions about aging or mortality. + It operates only on model information passed in as blocks. + + It also does not have read_shocks functionality; + it is a strict subset of the AgentTypeMonteCarloSimulator functionality. + + Parameters + ------------ + + calibration: Mapping[str, Any] + + block : DBlock + Has shocks, dynamics, and rewards + + dr: Mapping[str, Callable] + + initial: dict + + seed : int + A seed for this instance's random number generator. + + Attributes + ---------- + agent_count : int + The number of agents of this type to use in simulation. + + T_sim : int + The number of periods to simulate. + """ + + state_vars = [] + + def __init__( + self, calibration, block: DBlock, dr, initial, seed=0, agent_count=1, T_sim=10 + ): + super().__init__() + + self.calibration = calibration + self.block = block + + # shocks are exogenous (but for age) but can depend on calibration + raw_shocks = block.get_shocks() + self.shocks = construct_shocks(raw_shocks, calibration) + + self.dynamics = block.get_dynamics() + self.dr = dr + self.initial = initial + + self.seed = seed # NOQA + self.agent_count = agent_count # TODO: pass this in at block level + self.T_sim = T_sim + + # changes here from HARK.core.AgentType + self.vars = block.get_vars() + + self.vars_now = {v: None for v in self.vars} + self.vars_prev = self.vars_now.copy() + + self.shock_history = {} + self.newborn_init_history = {} + self.history = {} + + self.reset_rng() # NOQA + + def reset_rng(self): + """ + Reset the random number generator for this type. + """ + self.RNG = np.random.default_rng(self.seed) + + def initialize_sim(self): + """ + Prepares for a new simulation. Resets the internal random number generator, + makes initial states for all agents (using sim_birth), clears histories of tracked variables. + """ + if self.T_sim <= 0: + raise Exception( + "T_sim represents the largest number of observations " + + "that can be simulated for an agent, and must be a positive number." + ) + + self.reset_rng() + self.t_sim = 0 + all_agents = np.ones(self.agent_count, dtype=bool) + blank_array = np.empty(self.agent_count) + blank_array[:] = np.nan + for var in self.vars: + if self.vars_now[var] is None: + self.vars_now[var] = copy(blank_array) + + self.t_cycle = np.zeros( + self.agent_count, dtype=int + ) # Which cycle period each agent is on + + for var_name in self.initial: + self.newborn_init_history[var_name] = ( + np.zeros((self.T_sim, self.agent_count)) + np.nan + ) + + self.sim_birth(all_agents) + + self.clear_history() + return None + + def sim_one_period(self): + """ + Simulates one period for this type. Calls the methods get_mortality(), get_shocks() or + read_shocks, get_states(), get_controls(), and get_poststates(). These should be defined for + AgentType subclasses, except get_mortality (define its components sim_death and sim_birth + instead) and read_shocks. + """ + + # state_{t-1} + for var in self.vars: + self.vars_prev[var] = self.vars_now[var] + + if isinstance(self.vars_now[var], np.ndarray): + self.vars_now[var] = np.empty(self.agent_count) + self.vars_now[var][:] = np.nan + else: + # Probably an aggregate variable. It may be getting set by the Market. + pass + + shocks_now = {} + + shocks_now = draw_shocks( + self.shocks, + np.zeros(self.agent_count), # TODO: stupid hack to remove age calculations. + # this needs a little more thought + ) + + pre = self.calibration # for AgentTypeMC, this is conditional on age + # TODO: generalize indexing into calibration. + + pre.update(self.vars_prev) + pre.update(shocks_now) + + # Won't work for 3.8: self.parameters | self.vars_prev | shocks_now + + dr = self.dr # AgentTypeMC chooses rule by age; + # that generalizes to age as a DR argument? + + post = simulate_dynamics(self.dynamics, pre, dr) + + for r in self.block.reward: + post[r] = apply_fun_to_vals(self.block.reward[r], post) + + self.vars_now = post + + def sim_birth(self, which_agents): + """ + Makes new agents for the simulation. Takes a boolean array as an input, indicating which + agent indices are to be "born". Does nothing by default, must be overwritten by a subclass. + + Parameters + ---------- + which_agents : np.array(Bool) + Boolean array of size self.agent_count indicating which agents should be "born". + + Returns + ------- + None + """ + + initial_vals = draw_shocks(self.initial, np.zeros(which_agents.sum())) + + if np.sum(which_agents) > 0: + for varn in initial_vals: + self.vars_now[varn][which_agents] = initial_vals[varn] + self.newborn_init_history[varn][self.t_sim, which_agents] = ( + initial_vals[varn] + ) + + def simulate(self, sim_periods=None): + """ + Simulates this agent type for a given number of periods. Defaults to + self.T_sim if no input. + + Records histories of attributes named in self.track_vars in + self.history[varname]. + + Parameters + ---------- + sim_periods : int + Number of periods to simulate. + + Returns + ------- + history : dict + The history tracked during the simulation. + """ + if not hasattr(self, "t_sim"): + raise Exception( + "It seems that the simulation variables were not initialize before calling " + + "simulate(). Call initialize_sim() to initialize the variables before calling simulate() again." + ) + if sim_periods is not None and self.T_sim < sim_periods: + raise Exception( + "To simulate, sim_periods has to be larger than the maximum data set size " + + "T_sim. Either increase the attribute T_sim of this agent type instance " + + "and call the initialize_sim() method again, or set sim_periods <= T_sim." + ) + + # Ignore floating point "errors". Numpy calls it "errors", but really it's excep- + # tions with well-defined answers such as 1.0/0.0 that is np.inf, -1.0/0.0 that is + # -np.inf, np.inf/np.inf is np.nan and so on. + with np.errstate( + divide="ignore", over="ignore", under="ignore", invalid="ignore" + ): + if sim_periods is None: + sim_periods = self.T_sim + + for t in range(sim_periods): + self.sim_one_period() + + # track all the vars -- shocks and dynamics + for var_name in self.vars: + self.history[var_name][self.t_sim, :] = self.vars_now[var_name] + + self.t_sim += 1 + + return self.history + + def clear_history(self): + """ + Clears the histories. + """ + for var_name in self.vars: + self.history[var_name] = np.empty((self.T_sim, self.agent_count)) + self.history[var_name].fill(np.nan) diff --git a/HARK/simulation/test_monte_carlo.py b/HARK/simulation/test_monte_carlo.py index 7f2329703..c8ed97103 100644 --- a/HARK/simulation/test_monte_carlo.py +++ b/HARK/simulation/test_monte_carlo.py @@ -159,3 +159,49 @@ def test_simulate(self): b1 = history["m"][1] - self.dr["c"][1](history["m"][1]) self.assertTrue((a1 == b1).all()) + + +class test_MonteCarloSimulator(unittest.TestCase): + def setUp(self): + self.calibration = { # TODO + "G": 1.05, + } + self.block = DBlock( + **{ + "shocks": { + "theta": MeanOneLogNormal(1), + "agg_R": Aggregate(MeanOneLogNormal(1)), + }, + "dynamics": { + "b": lambda agg_R, G, a: agg_R * G * a, + "m": lambda b, theta: b + theta, + "c": Control(["m"]), + "a": lambda m, c: m - c, + }, + } + ) + + self.initial = {"a": MeanOneLogNormal(1)} + + self.dr = {"c": lambda m: m / 2} + + def test_simulate(self): + self.simulator = MonteCarloSimulator( + self.calibration, + self.block, + self.dr, + self.initial, + agent_count=3, + ) + + self.simulator.initialize_sim() + history = self.simulator.simulate() + + a1 = history["a"][5] + b1 = ( + history["a"][4] * history["agg_R"][5] * self.calibration["G"] + + history["theta"][5] + - history["c"][5] + ) + + self.assertTrue((a1 == b1).all()) diff --git a/HARK/utilities.py b/HARK/utilities.py index 7f9216ffa..fae7aa4d5 100644 --- a/HARK/utilities.py +++ b/HARK/utilities.py @@ -14,6 +14,8 @@ import numpy as np # Python's numeric library, abbreviated "np" from scipy.interpolate import interp1d +from inspect import signature + # try: # import matplotlib.pyplot as plt # Python's plotting library # except ImportError: @@ -112,6 +114,22 @@ def distance(self, other): return 10000.0 +def apply_fun_to_vals(fun, vals): + """ + Applies a function to the arguments defined in `vals`. + This is equivalent to `fun(**vals)`, except + that `vals` may contain keys that are not named arguments + of `fun`. + + Parameters + ---------- + fun: callable + + vals: dict + """ + return fun(*[vals[var] for var in signature(fun).parameters]) + + # ======================================================= # ================ Other useful functions =============== # =======================================================