From 50b017af4b8053874955e5d03e6a5d565d22d4fc Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 26 Mar 2020 10:11:17 -0700 Subject: [PATCH 01/57] dagger initial implementation --- flow/dagger/Untitled.ipynb | 486 +++++++++++++++++++++++++++ flow/dagger/env_params_test.py | 47 +++ flow/dagger/imitating_agent.py | 25 ++ flow/dagger/imitating_controller.py | 78 +++++ flow/dagger/imitating_controller2.py | 94 ++++++ flow/dagger/replay_buffer.py | 60 ++++ flow/dagger/run.py | 73 ++++ flow/dagger/trainer.py | 113 +++++++ flow/dagger/useless.py | 147 ++++++++ flow/dagger/utils.py | 114 +++++++ 10 files changed, 1237 insertions(+) create mode 100644 flow/dagger/Untitled.ipynb create mode 100644 flow/dagger/env_params_test.py create mode 100644 flow/dagger/imitating_agent.py create mode 100644 flow/dagger/imitating_controller.py create mode 100644 flow/dagger/imitating_controller2.py create mode 100644 flow/dagger/replay_buffer.py create mode 100644 flow/dagger/run.py create mode 100644 flow/dagger/trainer.py create mode 100644 flow/dagger/useless.py create mode 100644 flow/dagger/utils.py diff --git a/flow/dagger/Untitled.ipynb b/flow/dagger/Untitled.ipynb new file mode 100644 index 000000000..a6153ffc6 --- /dev/null +++ b/flow/dagger/Untitled.ipynb @@ -0,0 +1,486 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" + ] + } + ], + "source": [ + "import tensorflow as tf\n", + "import numpy as np\n", + "import gym" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n" + ] + } + ], + "source": [ + "from env_params import flow_params as flow_params" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departLane in InFlows is deprecated, use depart_lane instead.\n", + " PendingDeprecationWarning\n", + "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", + " PendingDeprecationWarning\n", + "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departLane in InFlows is deprecated, use depart_lane instead.\n", + " PendingDeprecationWarning\n", + "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", + " PendingDeprecationWarning\n" + ] + } + ], + "source": [ + "from flow.utils.registry import make_create_env\n", + "from flow.controllers import IDMController, ContinuousRouter\n", + "from flow.core.experiment import Experiment\n", + "from flow.core.params import SumoParams, EnvParams, \\\n", + " InitialConfig, NetParams\n", + "from flow.core.params import VehicleParams\n", + "from flow.networks.ring import RingNetwork, ADDITIONAL_NET_PARAMS\n", + "import numpy as np\n", + "from flow.core.experiment import Experiment\n", + "from flow.core.params import InFlows\n", + "from flow.core.params import SumoLaneChangeParams\n", + "from flow.core.params import SumoCarFollowingParams\n", + "from flow.core.params import VehicleParams\n", + "from flow.controllers import ContinuousRouter\n", + "from flow.benchmarks.bottleneck0 import flow_params\n", + "from flow.benchmarks.bottleneck0 import SCALING" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# sim_params = flow_params['sim']\n", + "# env_params = flow_params['env']\n", + "# net_params = flow_params['net']\n", + "\n", + "# # we want no autonomous vehicles in the simulation\n", + "# vehicles = VehicleParams()\n", + "# vehicles.add(veh_id='human',\n", + "# car_following_params=SumoCarFollowingParams(\n", + "# speed_mode=9,\n", + "# ),\n", + "# routing_controller=(ContinuousRouter, {}),\n", + "# lane_change_params=SumoLaneChangeParams(\n", + "# lane_change_mode=0,\n", + "# ),\n", + "# num_vehicles=1 * SCALING)\n", + "\n", + "# # only include human vehicles in inflows\n", + "# flow_rate = 2300 * SCALING\n", + "# inflow = InFlows()\n", + "# inflow.add(veh_type='human', edge='1',\n", + "# vehs_per_hour=flow_rate,\n", + "# departLane='random', departSpeed=10)\n", + "# net_params.inflows = inflow\n", + "\n", + "# # modify the rendering to match what is requested\n", + "# # sim_params.render = render\n", + "\n", + "# # set the evaluation flag to True\n", + "# env_params.evaluate = True\n", + "\n", + "# flow_params['env'].horizon = env_params.horizon" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "create_env, _ = make_create_env(flow_params)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/subprocess.py:786: ResourceWarning: subprocess 28341 is still running\n", + " ResourceWarning, source=self)\n" + ] + }, + { + "data": { + "text/plain": [ + "array([0. , 0.1 , 0.05 , 0. , 0. ,\n", + " 0. , 0.05 , 0.05 , 0. , 0.05 ,\n", + " 0. , 0.05 , 0.05 , 0. , 0.1 ,\n", + " 0. , 0.05 , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0.05 , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0.05 ,\n", + " 0. , 0.05 , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0.3234498 , 0.35290716, 0. , 0. ,\n", + " 0. , 0.39880784, 0.41698796, 0. , 0.4171411 ,\n", + " 0. , 0.49073983, 0.40911561, 0. , 0.43184929,\n", + " 0. , 0.41929399, 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0.27513936, 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0.44301522,\n", + " 0. , 0.44301522, 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. ])" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "state = env.reset()\n", + "state" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'bottleneck_0'" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "flow_params[\"exp_tag\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rl_actions = {}\n", + "for veh_id in env.k.vehicle.get_ids():\n", + " print(veh_id)\n", + " rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env)\n", + "print(flow_params.env['horizon'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "state, reward, done, _ = env.step(env.action_space.sample())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.action_space.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from env_params_akash import name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from env_params_akash import flow_params_akash" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "create_env, _ = make_create_env(flow_params_akash)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "state = env.reset()\n", + "state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(1000):\n", + " env.step(env.action_space.sample())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rl_actions = {}\n", + "for veh_id in env.k.vehicle.get_ids():\n", + " print(veh_id)\n", + " rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rl_actions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.step(rl_actions)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(200):\n", + " rl_actions = {}\n", + " for veh_id in env.k.vehicle.get_ids():\n", + " # print(veh_id)\n", + " rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env)\n", + " print(env.get_state())\n", + " env.step(rl_actions)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from flow.controllers.car_following_models import IDMController\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vehicle_id = env.k.vehicle.get_ids()[0]\n", + "vehicle_id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "car_following_params = SumoCarFollowingParams()\n", + "idm_controller = IDMController(vehicle_id, car_following_params=car_following_params)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "idm_controller.get_action(env)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ob, rew, done, _ = env.step(rl_actions)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from env_params import flow_params\n", + "create_env, _ = make_create_env(flow_params)\n", + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.k.vehicle.get_ids()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flow", + "language": "python", + "name": "flow" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/flow/dagger/env_params_test.py b/flow/dagger/env_params_test.py new file mode 100644 index 000000000..ffd870e86 --- /dev/null +++ b/flow/dagger/env_params_test.py @@ -0,0 +1,47 @@ +from flow.networks.ring import RingNetwork +name = "ring_example" + +from flow.core.params import VehicleParams +vehicles = VehicleParams() + +from flow.controllers.car_following_models import IDMController +from flow.controllers.routing_controllers import ContinuousRouter +from imitating_controller import ImitatingController +vehicles.add("human", + acceleration_controller=(IDMController, {}), + routing_controller=(ContinuousRouter, {}), + num_vehicles=22) + +from flow.networks.ring import ADDITIONAL_NET_PARAMS +from flow.core.params import NetParams +net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS) + +from flow.core.params import InitialConfig +initial_config = InitialConfig(spacing="uniform", perturbation=1) + +from flow.core.params import TrafficLightParams +traffic_lights = TrafficLightParams() + +from flow.envs.ring.accel import AccelEnv +from flow.core.params import SumoParams +sim_params = SumoParams(sim_step=0.1, render=False, emission_path='data') + +from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS +from flow.core.params import EnvParams +env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) + +flow_params_test = dict( + exp_tag='ring_example', + env_name=AccelEnv, + network=RingNetwork, + simulator='traci', + sim=sim_params, + env=env_params, + net=net_params, + veh=vehicles, + initial=initial_config, + tls=traffic_lights, +) + +# number of time steps +flow_params_test['env'].horizon = 3000 diff --git a/flow/dagger/imitating_agent.py b/flow/dagger/imitating_agent.py new file mode 100644 index 000000000..1abe33995 --- /dev/null +++ b/flow/dagger/imitating_agent.py @@ -0,0 +1,25 @@ +import numpy as np +import tensorflow as tf +import time +from imitating_controller import * +from replay_buffer + +class Imitating_Agent(object): + def __init__(self, sess, env, params): + self.env = env + self.sess = sess + self.params = params + + self.policy = Imitator_Policy(sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate']) + + self.replay_buffer = ReplayBuffer(self.params['replay_buffer_size']) + + + def train(self, obs, acts): + self.policy.update(obs, acts) + + def add_to_replay_buffer(self, rollout_list): + self.replay_buffer.add_rollouts(rollout_list) + + def sample_data(self, batch_size): + return self.replay_buffer.sample_batch(batch_size) diff --git a/flow/dagger/imitating_controller.py b/flow/dagger/imitating_controller.py new file mode 100644 index 000000000..c3e03415f --- /dev/null +++ b/flow/dagger/imitating_controller.py @@ -0,0 +1,78 @@ +import numpy as np +import tensorflow +from tensorflow import keras +import tensorflow as tf +from utils import * +from flow.controllers.base_controller import BaseController +from replay_buffer import ReplayBuffer +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Dense +from tensorflow.keras.activations import * + + + +class ImitatingController(BaseController): + + def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): + + BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) + self.sess = sess + self.action_dim = action_dim + self.obs_dim = obs_dim + self.num_layers = num_layers + self.size = size + self.learning_rate = learning_rate + self.training = training + self.model = Sequential() + self.build_network() + + + + if self.training: + self.replay_buffer = ReplayBuffer(replay_buffer_size) + else: + self.replay_buffer = None + + def build_network(self): + self.model.add(Dense(self.size, input_dim=self.obs_dim, activation='tanh')) + for _ in range(self.num_layers): + self.model.add(Dense(self.size, activation='relu')) + # No activation + self.model.add(Dense(self.action_dim)) + self.model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) + + + def train(self, observation_batch, action_batch): + assert(self.training, "Policy must be trainable") + #print("Training: observation_batch is ", observation_batch) + #print("action_batch is ", action_batch) + print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) + print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) + + # print("ACTION BATCH: ", action_batch.shape) + action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) + # print("TEST BATCH: ", observation_batch) + history = self.model.fit(observation_batch, action_batch) + # print("LOSS: ", ret) + + def get_accel_from_observation(self, observation): + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + if len(observation.shape)<=1: + observation = observation[None] + # print("OBS: ", observation) + ret_val = self.model.predict(observation) + # print("ACCEL: ", ret_val) + # print("RET_VAL SHAPE", ret_val.shape) + return ret_val + + def get_accel(self, env): + # TODO make this get_accel(self, env) + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + observation = env.get_state() + return self.get_accel_from_observation(observation) + + def add_to_replay_buffer(self, rollout_list): + self.replay_buffer.add_rollouts(rollout_list) + + def sample_data(self, batch_size): + return self.replay_buffer.sample_batch(batch_size) diff --git a/flow/dagger/imitating_controller2.py b/flow/dagger/imitating_controller2.py new file mode 100644 index 000000000..64b6798ca --- /dev/null +++ b/flow/dagger/imitating_controller2.py @@ -0,0 +1,94 @@ +import numpy as np +import tensorflow as tf +from utils import * +import tensorflow_probability as tfp +from flow.controllers.base_controller import BaseController +from replay_buffer import ReplayBuffer + + + +class ImitatingController(BaseController): + + def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): + + BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) + self.sess = sess + self.action_dim = action_dim + self.obs_dim = obs_dim + self.num_layers = num_layers + self.size = size + self.learning_rate = learning_rate + self.training = training + + + + with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): + self.build_network() + + + if self.training: + self.replay_buffer = ReplayBuffer(replay_buffer_size) + else: + self.replay_buffer = None + + self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] + self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) + + def build_network(self): + self.define_placeholders() + self.define_forward_pass() + if self.training: + with tf.variable_scope('train', reuse=tf.AUTO_REUSE): + self.define_train_op() + + def define_placeholders(self): + self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) + # print('DEBUG ', self.obs_dim) + self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) + + if self.training: + self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) + + def define_forward_pass(self): + pred_action = build_mlp(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) + self.action_predictions = pred_action + print("ACTION PREDICTIONS TYPE ", type(self.action_predictions)) + + def define_train_op(self): + true_actions = self.action_labels_placeholder + predicted_actions = self.action_predictions + + self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) + self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) + + def train(self, observation_batch, action_batch): + assert(self.training, "Policy must be trainable") + # print("ACTION BATCH: ", action_batch.shape) + print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) + print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) + action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) + # print("TEST BATCH: ", observation_batch) + ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) + # print("LOSS: ", ret) + + def get_accel_from_observation(self, observation): + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + if len(observation.shape)<=1: + observation = observation[None] + # print("OBS: ", observation) + ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] + # print("ACCEL: ", ret_val) + # print("RET_VAL SHAPE", ret_val.shape) + return ret_val + + def get_accel(self, env): + # TODO make this get_accel(self, env) + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + observation = env.get_state() + return self.get_accel_from_observation(observation) + + def add_to_replay_buffer(self, rollout_list): + self.replay_buffer.add_rollouts(rollout_list) + + def sample_data(self, batch_size): + return self.replay_buffer.sample_batch(batch_size) diff --git a/flow/dagger/replay_buffer.py b/flow/dagger/replay_buffer.py new file mode 100644 index 000000000..c7bbb56d0 --- /dev/null +++ b/flow/dagger/replay_buffer.py @@ -0,0 +1,60 @@ +import time +import numpy as np +import tensorflow as tf +import gym +import os +from utils import * + + +class ReplayBuffer(object): + def __init__(self, max_size=100000): + + self.max_size = max_size + + # store each rollout + self.rollouts = [] + + # store component arrays from each rollout + self.observations = None + self.actions = None + self.expert_actions = None + self.rewards = None + self.next_observations = None + self.terminals = None + + + def add_rollouts(self, rollouts_list): + """ + Add a list of rollouts to the replay buffer + """ + + for rollout in rollouts_list: + self.rollouts.append(rollout) + + observations, actions, expert_actions, rewards, next_observations, terminals = unpack_rollouts(rollouts_list) + if self.observations is None: + self.observations = observations[-self.max_size:] + self.actions = actions[-self.max_size:] + self.expert_actions = expert_actions[-self.max_size:] + self.rewards = rewards[-self.max_size:] + self.next_observations = next_observations[-self.max_size:] + self.terminals = terminals[-self.max_size:] + else: + self.observations = np.concatenate([self.observations, observations])[-self.max_size:] + print("SHAPES: ", self.actions.shape, actions.shape) + self.actions = np.concatenate([self.actions, actions])[-self.max_size:] + self.expert_actions = np.concatenate([self.expert_actions, expert_actions])[-self.max_size:] + self.rewards = np.concatenate([self.rewards, rewards])[-self.max_size:] + self.next_observations = np.concatenate([self.next_observations, next_observations])[-self.max_size:] + self.terminals = np.concatenate([self.terminals, terminals])[-self.max_size:] + + def sample_batch(self, batch_size): + """ + Sample a batch of data (with size batch_size) from replay buffer. + Returns data in separate numpy arrays of observations, actions, rewards, next_observations, terminals + """ + assert self.observations is not None and self.actions is not None and self.expert_actions is not None and self.rewards is not None and self.next_observations is not None and self.terminals is not None + + size = len(self.observations) + rand_inds = np.random.randint(0, size, batch_size) + return self.observations[rand_inds], self.actions[rand_inds], self.expert_actions[rand_inds], self.rewards[rand_inds], self.next_observations[rand_inds], self.terminals[rand_inds] diff --git a/flow/dagger/run.py b/flow/dagger/run.py new file mode 100644 index 000000000..40e200505 --- /dev/null +++ b/flow/dagger/run.py @@ -0,0 +1,73 @@ +import os +import time +import numpy as np +import tensorflow as tf +from trainer import Trainer +from flow.controllers.car_following_models import IDMController + + +class Runner(object): + + def __init__(self, params): + + + # agent_params = { + # 'n_layers': params['n_layers'], + # 'size': params['size'], + # 'learning_rate': params['learning_rate'], + # 'max_replay_buffer_size': params['max_replay_buffer_size'], + # } + # + # self.params = params + # self.params['agent_class'] = BCAgent + # self.params['agent_params'] = agent_params + + # initialize trainer + self.params = params + self.trainer = Trainer(params) + + + + def run_training_loop(self): + + self.trainer.run_training_loop(n_iter=self.params['n_iter']) + + +def main(): + import argparse + parser = argparse.ArgumentParser() + # parser.add_argument('--expert_policy_file', '-epf', type=str, required=True) # relative to where you're running this script from + # parser.add_argument('--expert_data', '-ed', type=str, required=True) #relative to where you're running this script from + # parser.add_argument('--env_name', '-env', type=str, help='choices: Ant-v2, Humanoid-v2, Walker-v2, HalfCheetah-v2, Hopper-v2', required=True) + # parser.add_argument('--exp_name', '-exp', type=str, default='pick an experiment name', required=True) + # parser.add_argument('--do_dagger', action='store_true') + parser.add_argument('--ep_len', type=int) + + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) + parser.add_argument('--n_iter', '-n', type=int, default=5) + + parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=5000) + + parser.add_argument('--train_batch_size', type=int, + default=100) # number of sampled data points to be used per gradient/train step + + parser.add_argument('--num_layers', type=int, default=2) # depth, of policy to be learned + parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned + parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning + parser.add_argument('--replay_buffer_size', type=int, default=1000000) + + args = parser.parse_args() + + # convert args to dictionary + params = vars(args) + + assert args.n_iter>1, ('DAGGER needs more than 1 iteration (n_iter>1) of training, to iteratively query the expert and train (after 1st warmstarting from behavior cloning).') + + + # run training + train = Runner(params) + train.run_training_loop() + +if __name__ == "__main__": + main() diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py new file mode 100644 index 000000000..f48b9058d --- /dev/null +++ b/flow/dagger/trainer.py @@ -0,0 +1,113 @@ +import time +from collections import OrderedDict +import pickle +import numpy as np +import tensorflow as tf +import gym +import os +from flow.utils.registry import make_create_env +from env_params_test import flow_params_test +from imitating_controller2 import ImitatingController +from flow.controllers.car_following_models import IDMController +from flow.core.params import SumoCarFollowingParams +from utils import * + +class Trainer(object): + + def __init__(self, params): + self.params = params + self.sess = create_tf_session() + + # TODO: replace this with appropriate Flow env + # print('ERROR CHECK ', flow_params_test['exp_tag']) + create_env, _ = make_create_env(flow_params_test) + self.env = create_env() + self.env.reset() + + self.vehicle_id = self.env.k.vehicle.get_ids()[0] + + obs_dim = self.env.observation_space.shape[0] + + # TODO: make sure this is correct + action_dim = (1,)[0] + self.params['action_dim'] = action_dim + self.params['obs_dim'] = obs_dim + + car_following_params = SumoCarFollowingParams() + self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params) + self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) + + tf.global_variables_initializer().run(session=self.sess) + + + def run_training_loop(self, n_iter): + """ + :param n_iter: number of (dagger) iterations + :param collect_policy: + :param eval_policy: + :param initial_expertdata: + :param relabel_with_expert: whether to perform dagger + :param start_relabel_with_expert: iteration at which to start relabel with expert + :param expert_policy: + """ + + # init vars at beginning of training + self.total_envsteps = 0 + self.start_time = time.time() + + for itr in range(n_iter): + print("\n\n********** Iteration %i ************"%itr) + + # collect trajectories, to be used for training + if itr == 0: + training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) + else: + training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) + + paths, envsteps_this_batch = training_returns + self.total_envsteps += envsteps_this_batch + + # add collected data to replay buffer + self.controller.add_to_replay_buffer(paths) + + # train agent (using sampled data from replay buffer) + loss = self.train_controller() + + def collect_training_trajectories(self, itr, batch_size): + """ + :param itr: + :param load_initial_expertdata: path to expert data pkl file + :param collect_policy: the current policy using which we collect data + :param batch_size: the number of transitions we collect + :return: + paths: a list trajectories + envsteps_this_batch: the sum over the numbers of environment steps in paths + train_video_paths: paths which also contain videos for visualization purposes + """ + + if itr == 0: + collect_controller = self.expert_controller + else: + collect_controller = self.controller + + print("\nCollecting data to be used for training...") + paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) + + return paths, envsteps_this_batch + + def train_controller(self): + print('Training controller using sampled data from replay buffer') + for train_step in range(self.params['num_agent_train_steps_per_iter']): + # TODO: fix this + ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) + self.controller.train(ob_batch, expert_ac_batch) + + + # def do_relabel_with_expert(self, paths): + # print("Relabelling collected observations with labels from an expert policy...") + # + # for i in range(len(paths)): + # acs = self.expert_policy.get_action(paths[i]["observation"]) + # paths[i]["action"] = acs + # + # return paths diff --git a/flow/dagger/useless.py b/flow/dagger/useless.py new file mode 100644 index 000000000..86f3ee9ad --- /dev/null +++ b/flow/dagger/useless.py @@ -0,0 +1,147 @@ +# """Benchmark for bottleneck0. +# Bottleneck in which the actions are specifying a desired velocity in a segment +# of space. The autonomous penetration rate in this example is 10%. +# - **Action Dimension**: (?, ) +# - **Observation Dimension**: (?, ) +# - **Horizon**: 1000 steps +# """ +# from flow.envs import BottleneckDesiredVelocityEnv +# from flow.networks import BottleneckNetwork +# from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ +# InFlows, SumoCarFollowingParams, SumoLaneChangeParams +# from flow.core.params import TrafficLightParams +# from flow.core.params import VehicleParams +# from flow.controllers import RLController, ContinuousRouter +# +# # time horizon of a single rollout +# HORIZON = 1500 +# +# SCALING = 1 +# NUM_LANES = 4 * SCALING # number of lanes in the widest highway +# DISABLE_TB = True +# DISABLE_RAMP_METER = True +# AV_FRAC = 0.10 +# +# vehicles = VehicleParams() +# vehicles.add( +# veh_id="human", +# routing_controller=(ContinuousRouter, {}), +# car_following_params=SumoCarFollowingParams( +# speed_mode=9, +# ), +# lane_change_params=SumoLaneChangeParams( +# lane_change_mode=0, +# ), +# num_vehicles=1 * SCALING) +# vehicles.add( +# veh_id="rl", +# acceleration_controller=(RLController, {}), +# routing_controller=(ContinuousRouter, {}), +# car_following_params=SumoCarFollowingParams( +# speed_mode=9, +# ), +# lane_change_params=SumoLaneChangeParams( +# lane_change_mode=0, +# ), +# num_vehicles=1 * SCALING) +# +# controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True), +# ("4", 2, True), ("5", 1, False)] +# num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)] +# +# additional_env_params = { +# "target_velocity": 40, +# "disable_tb": True, +# "disable_ramp_metering": True, +# "controlled_segments": controlled_segments, +# "symmetric": False, +# "observed_segments": num_observed_segments, +# "reset_inflow": False, +# "lane_change_duration": 5, +# "max_accel": 3, +# "max_decel": 3, +# "inflow_range": [1200, 2500] +# } +# +# # flow rate +# flow_rate = 2000 * SCALING +# +# # percentage of flow coming out of each lane +# inflow = InFlows() +# inflow.add( +# veh_type="human", +# edge="1", +# vehs_per_hour=flow_rate * (1 - AV_FRAC), +# departLane="random", +# departSpeed=10) +# inflow.add( +# veh_type="rl", +# edge="1", +# vehs_per_hour=flow_rate * AV_FRAC, +# departLane="random", +# departSpeed=10) +# +# traffic_lights = TrafficLightParams() +# if not DISABLE_TB: +# traffic_lights.add(node_id="2") +# if not DISABLE_RAMP_METER: +# traffic_lights.add(node_id="3") +# +# additional_net_params = {"scaling": SCALING, "speed_limit": 23} +# net_params = NetParams( +# inflows=inflow, +# additional_params=additional_net_params) +# +# flow_params = dict( +# # name of the experiment +# exp_tag="bottleneck_0", +# +# # name of the flow environment the experiment is running on +# env_name=BottleneckDesiredVelocityEnv, +# +# # name of the network class the experiment is running on +# network=BottleneckNetwork, +# +# # simulator that is used by the experiment +# simulator='traci', +# +# # sumo-related parameters (see flow.core.params.SumoParams) +# sim=SumoParams( +# sim_step=0.5, +# render=False, +# print_warnings=False, +# restart_instance=True, +# ), +# +# # environment related parameters (see flow.core.params.EnvParams) +# env=EnvParams( +# warmup_steps=40, +# sims_per_step=1, +# horizon=HORIZON, +# additional_params=additional_env_params, +# ), +# +# # network-related parameters (see flow.core.params.NetParams and the +# # network's documentation or ADDITIONAL_NET_PARAMS component) +# net=NetParams( +# inflows=inflow, +# additional_params=additional_net_params, +# ), +# +# # vehicles to be placed in the network at the start of a rollout (see +# # flow.core.params.VehicleParams) +# veh=vehicles, +# +# # parameters specifying the positioning of vehicles upon initialization/ +# # reset (see flow.core.params.InitialConfig) +# initial=InitialConfig( +# spacing="uniform", +# min_gap=5, +# lanes_distribution=float("inf"), +# edges_distribution=["2", "3", "4", "5"], +# ), +# +# # traffic lights to be introduced to specific nodes (see +# # flow.core.params.TrafficLightParams) +# tls=traffic_lights, +# ) diff --git a/flow/dagger/utils.py b/flow/dagger/utils.py new file mode 100644 index 000000000..9074cb205 --- /dev/null +++ b/flow/dagger/utils.py @@ -0,0 +1,114 @@ +import tensorflow as tf +import os +import numpy as np +import math + +# class agnostic helper functions + +def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length): + observation = env.reset() + + observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] + traj_length = 0 + + while True: + observations.append(observation) + action = controller.get_action(env) + assert action is not None, "action is None" + assert (not math.isnan(action)), "action is a nan" + + actions.append(action) + + expert_action = expert_controller.get_action(env) + assert expert_action is not None, "expert actio is None" + assert (not math.isnan(expert_action)), "expert action is a nan" + expert_actions.append(expert_action) + + rl_actions = {} + for veh_id in env.k.vehicle.get_ids(): + if veh_id == vehicle_id: + rl_actions[veh_id] = action + else: + rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env) + + observation, reward, done, _ = env.step(rl_actions) + traj_length += 1 + next_observations.append(observation) + rewards.append(reward) + terminate_rollout = traj_length == max_trajectory_length or done + terminals.append(terminate_rollout) + + if terminate_rollout: + break + + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) + + +def sample_trajectories(env, vehicle_id, controller, expert_controller, min_batch_timesteps, max_trajectory_length): + total_envsteps = 0 + trajectories = [] + + while total_envsteps < min_batch_timesteps: + trajectory = sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length) + trajectories.append(trajectory) + + traj_env_steps = len(trajectory["rewards"]) + total_envsteps += traj_env_steps + + return trajectories, total_envsteps + +def traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals): + return {"observations" : np.array(observations, dtype=np.float32), + "actions" : np.array(actions, dtype=np.float32), + "expert_actions": np.array(expert_actions, dtype=np.float32), + "rewards" : np.array(rewards, dtype=np.float32), + "next_observations": np.array(next_observations, dtype=np.float32), + "terminals": np.array(terminals, dtype=np.float32)} + +def unpack_rollouts(rollouts_list): + """ + Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays + rollouts: list of rollout dictionaries + rollout dictionary: dictionary with keys "observations", "actions", "rewards", "next_observations", "is_terminals" + return separate np arrays of observations, actions, rewards, next_observations, and is_terminals + """ + observations = np.concatenate([rollout["observations"] for rollout in rollouts_list]) + actions = np.concatenate([rollout["actions"] for rollout in rollouts_list]) + expert_actions = np.concatenate([rollout["expert_actions"] for rollout in rollouts_list]) + rewards = np.concatenate([rollout["rewards"] for rollout in rollouts_list]) + next_observations = np.concatenate([rollout["next_observations"] for rollout in rollouts_list]) + terminals = np.concatenate([rollout["terminals"] for rollout in rollouts_list]) + + return observations, actions, expert_actions, rewards, next_observations, terminals + + +# Below are tensorflow related functions +def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): + """ + Builds a MLP + + arguments: + input_placeholder: placeholder variable for the state (batch_size, input_size) + scope: variable scope of the network + + n_layers: number of hidden layers + size: dimension of each hidden layer + activation: activation of each hidden layer + + output_size: size of the output layer + output_activation: activation of the output layer + + returns: + output_placeholder: the result of a forward pass through the hidden layers + the output layer + """ + output_placeholder = input_placeholder + with tf.variable_scope(scope): + for _ in range(n_layers): + output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) + output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation) + return output_placeholder + +def create_tf_session(): + config = tf.ConfigProto(device_count={'GPU': 0}) + sess = tf.Session(config=config) + return sess From 1fc027d95ac18248ca0df1e7e5c0dbc622c6ca19 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 26 Mar 2020 12:48:08 -0700 Subject: [PATCH 02/57] Changed environment to be single agent RL --- flow/dagger/.idea/dagger.iml | 12 ++++ flow/dagger/Untitled.ipynb | 103 +++++++++++++++++++----------- flow/dagger/env_params_test.py | 110 +++++++++++++++++++++----------- flow/dagger/env_params_test2.py | 47 ++++++++++++++ flow/dagger/trainer.py | 7 +- flow/dagger/utils.py | 30 ++++++--- 6 files changed, 221 insertions(+), 88 deletions(-) create mode 100644 flow/dagger/.idea/dagger.iml create mode 100644 flow/dagger/env_params_test2.py diff --git a/flow/dagger/.idea/dagger.iml b/flow/dagger/.idea/dagger.iml new file mode 100644 index 000000000..0bc0e0321 --- /dev/null +++ b/flow/dagger/.idea/dagger.iml @@ -0,0 +1,12 @@ + + + + + + + + + + \ No newline at end of file diff --git a/flow/dagger/Untitled.ipynb b/flow/dagger/Untitled.ipynb index a6153ffc6..0f1ac9809 100644 --- a/flow/dagger/Untitled.ipynb +++ b/flow/dagger/Untitled.ipynb @@ -43,29 +43,14 @@ "metadata": {}, "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n" + "ename": "ModuleNotFoundError", + "evalue": "No module named 'env_params'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0menv_params\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mflow_params\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mflow_params\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'env_params'" ] } ], @@ -283,7 +268,7 @@ "metadata": {}, "outputs": [], "source": [ - "from env_params_akash import name" + "from env_params_test import name" ] }, { @@ -297,25 +282,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ - "from env_params_akash import flow_params_akash" + "from env_params_test import flow_params" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "create_env, _ = make_create_env(flow_params_akash)" + "create_env, _ = make_create_env(flow_params)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -324,9 +309,31 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "-----------------------\n", + "ring length: 265\n", + "v_max: 5.37714246265477\n", + "-----------------------\n" + ] + }, + { + "data": { + "text/plain": [ + "array([ 0.31246011, -0.00413767, 0.04496073])" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "state = env.reset()\n", "state" @@ -334,24 +341,42 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ - "for i in range(1000):\n", - " env.step(env.action_space.sample())" + "vehicle_id = 'rl_0'" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ - "rl_actions = {}\n", - "for veh_id in env.k.vehicle.get_ids():\n", - " print(veh_id)\n", - " rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env)" + "from flow.controllers.car_following_models import IDMController\n", + "car_following_params = SumoCarFollowingParams()\n", + "idm_controller = IDMController(vehicle_id, car_following_params=car_following_params)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(array([ 0.31200989, -0.00526746, 0.04493147]), 0.6436939709782903, False, {})" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.step(idm_controller.get_action(env))" ] }, { diff --git a/flow/dagger/env_params_test.py b/flow/dagger/env_params_test.py index ffd870e86..20ced1ce9 100644 --- a/flow/dagger/env_params_test.py +++ b/flow/dagger/env_params_test.py @@ -1,47 +1,85 @@ -from flow.networks.ring import RingNetwork -name = "ring_example" +"""Ring road example. +Trains a single autonomous vehicle to stabilize the flow of 21 human-driven +vehicles in a variable length ring road. +""" +from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams +from flow.core.params import VehicleParams, SumoCarFollowingParams +from flow.controllers import RLController, IDMController, ContinuousRouter +from flow.envs import WaveAttenuationPOEnv +from flow.networks import RingNetwork -from flow.core.params import VehicleParams +# time horizon of a single rollout +HORIZON = 3000 +# number of rollouts per training iteration +N_ROLLOUTS = 20 +# number of parallel workers +N_CPUS = 2 + +# We place one autonomous vehicle and 22 human-driven vehicles in the network vehicles = VehicleParams() +vehicles.add( + veh_id="human", + acceleration_controller=(IDMController, { + "noise": 0.2 + }), + car_following_params=SumoCarFollowingParams( + min_gap=0 + ), + routing_controller=(ContinuousRouter, {}), + num_vehicles=21) +vehicles.add( + veh_id="rl", + acceleration_controller=(RLController, {}), + routing_controller=(ContinuousRouter, {}), + num_vehicles=1) -from flow.controllers.car_following_models import IDMController -from flow.controllers.routing_controllers import ContinuousRouter -from imitating_controller import ImitatingController -vehicles.add("human", - acceleration_controller=(IDMController, {}), - routing_controller=(ContinuousRouter, {}), - num_vehicles=22) +flow_params = dict( + # name of the experiment + exp_tag="stabilizing_the_ring", -from flow.networks.ring import ADDITIONAL_NET_PARAMS -from flow.core.params import NetParams -net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS) + # name of the flow environment the experiment is running on + env_name=WaveAttenuationPOEnv, -from flow.core.params import InitialConfig -initial_config = InitialConfig(spacing="uniform", perturbation=1) + # name of the network class the experiment is running on + network=RingNetwork, -from flow.core.params import TrafficLightParams -traffic_lights = TrafficLightParams() + # simulator that is used by the experiment + simulator='traci', -from flow.envs.ring.accel import AccelEnv -from flow.core.params import SumoParams -sim_params = SumoParams(sim_step=0.1, render=False, emission_path='data') + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.1, + render=False, + restart_instance=False + ), -from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS -from flow.core.params import EnvParams -env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=750, + clip_actions=False, + additional_params={ + "max_accel": 1, + "max_decel": 1, + "ring_length": [220, 270], + }, + ), -flow_params_test = dict( - exp_tag='ring_example', - env_name=AccelEnv, - network=RingNetwork, - simulator='traci', - sim=sim_params, - env=env_params, - net=net_params, + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + additional_params={ + "length": 260, + "lanes": 1, + "speed_limit": 30, + "resolution": 40, + }, ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) veh=vehicles, - initial=initial_config, - tls=traffic_lights, -) -# number of time steps -flow_params_test['env'].horizon = 3000 + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) diff --git a/flow/dagger/env_params_test2.py b/flow/dagger/env_params_test2.py new file mode 100644 index 000000000..7140af720 --- /dev/null +++ b/flow/dagger/env_params_test2.py @@ -0,0 +1,47 @@ +from flow.networks.ring import RingNetwork +name = "ring_example" + +from flow.core.params import VehicleParams +vehicles = VehicleParams() + +from flow.controllers.car_following_models import IDMController +from flow.controllers.routing_controllers import ContinuousRouter +from imitating_controller2 import ImitatingController +vehicles.add("human", + acceleration_controller=(IDMController, {}), + routing_controller=(ContinuousRouter, {}), + num_vehicles=22) + +from flow.networks.ring import ADDITIONAL_NET_PARAMS +from flow.core.params import NetParams +net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS) + +from flow.core.params import InitialConfig +initial_config = InitialConfig(spacing="uniform", perturbation=1) + +from flow.core.params import TrafficLightParams +traffic_lights = TrafficLightParams() + +from flow.envs.ring.accel import AccelEnv +from flow.core.params import SumoParams +sim_params = SumoParams(sim_step=0.1, render=False, emission_path='data') + +from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS +from flow.core.params import EnvParams +env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) + +flow_params_test = dict( + exp_tag='ring_example', + env_name=AccelEnv, + network=RingNetwork, + simulator='traci', + sim=sim_params, + env=env_params, + net=net_params, + veh=vehicles, + initial=initial_config, + tls=traffic_lights, +) + +# number of time steps +flow_params_test['env'].horizon = 3000 diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py index f48b9058d..9dd7ae326 100644 --- a/flow/dagger/trainer.py +++ b/flow/dagger/trainer.py @@ -6,7 +6,7 @@ import gym import os from flow.utils.registry import make_create_env -from env_params_test import flow_params_test +from env_params_test import flow_params from imitating_controller2 import ImitatingController from flow.controllers.car_following_models import IDMController from flow.core.params import SumoCarFollowingParams @@ -20,11 +20,12 @@ def __init__(self, params): # TODO: replace this with appropriate Flow env # print('ERROR CHECK ', flow_params_test['exp_tag']) - create_env, _ = make_create_env(flow_params_test) + create_env, _ = make_create_env(flow_params) self.env = create_env() self.env.reset() - self.vehicle_id = self.env.k.vehicle.get_ids()[0] + assert 'rl_0' in self.env.k.vehicle.get_ids() + self.vehicle_id = 'rl_0' obs_dim = self.env.observation_space.shape[0] diff --git a/flow/dagger/utils.py b/flow/dagger/utils.py index 9074cb205..00ae864aa 100644 --- a/flow/dagger/utils.py +++ b/flow/dagger/utils.py @@ -6,32 +6,42 @@ # class agnostic helper functions def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length): + print("CONTROLLER: ", controller) observation = env.reset() + print("VEHICLE ID: ", vehicle_id) + print("VEHICLE IDS: ", env.k.vehicle.get_ids()) + assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" + observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] traj_length = 0 while True: observations.append(observation) action = controller.get_action(env) - assert action is not None, "action is None" - assert (not math.isnan(action)), "action is a nan" + #assert action is not None, "action is None" + #assert (not math.isnan(action)), "action is a nan" + assert not (len(env.k.vehicle.get_edge(vehicle_id)) == 0), "Case One" + assert not (env.k.vehicle.get_edge(vehicle_id)[0] == ":"), "Case Two" actions.append(action) expert_action = expert_controller.get_action(env) - assert expert_action is not None, "expert actio is None" + assert env is not None, "environment is None" + assert expert_action is not None, "expert action is None" assert (not math.isnan(expert_action)), "expert action is a nan" expert_actions.append(expert_action) - rl_actions = {} - for veh_id in env.k.vehicle.get_ids(): - if veh_id == vehicle_id: - rl_actions[veh_id] = action - else: - rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env) + # rl_actions = {} + # for veh_id in env.k.vehicle.get_ids(): + # if veh_id == vehicle_id: + # rl_actions[veh_id] = action + # else: + # rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env) + + # observation, reward, done, _ = env.step(rl_actions) + observation, reward, done, _ = env.step(action) - observation, reward, done, _ = env.step(rl_actions) traj_length += 1 next_observations.append(observation) rewards.append(reward) From d01aeb5548ab583c4aae03ad28e4c5607dd4fcbe Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 26 Mar 2020 13:26:44 -0700 Subject: [PATCH 03/57] cleaned up code --- flow/dagger/imitating_agent.py | 3 ++- flow/dagger/imitating_controller.py | 12 +++--------- flow/dagger/imitating_controller2.py | 9 ++------- flow/dagger/replay_buffer.py | 1 - flow/dagger/run.py | 19 ------------------- flow/dagger/trainer.py | 3 +-- flow/dagger/utils.py | 9 +++++++-- 7 files changed, 15 insertions(+), 41 deletions(-) diff --git a/flow/dagger/imitating_agent.py b/flow/dagger/imitating_agent.py index 1abe33995..4e9e3f443 100644 --- a/flow/dagger/imitating_agent.py +++ b/flow/dagger/imitating_agent.py @@ -1,10 +1,11 @@ import numpy as np import tensorflow as tf import time -from imitating_controller import * +from imitating_controller2 import * from replay_buffer class Imitating_Agent(object): + # ignore this class! def __init__(self, sess, env, params): self.env = env self.sess = sess diff --git a/flow/dagger/imitating_controller.py b/flow/dagger/imitating_controller.py index c3e03415f..f85040856 100644 --- a/flow/dagger/imitating_controller.py +++ b/flow/dagger/imitating_controller.py @@ -12,6 +12,7 @@ class ImitatingController(BaseController): + # Implementation in Keras just for testing def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): @@ -44,29 +45,22 @@ def build_network(self): def train(self, observation_batch, action_batch): assert(self.training, "Policy must be trainable") - #print("Training: observation_batch is ", observation_batch) - #print("action_batch is ", action_batch) + print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) - # print("ACTION BATCH: ", action_batch.shape) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - # print("TEST BATCH: ", observation_batch) history = self.model.fit(observation_batch, action_batch) - # print("LOSS: ", ret) def get_accel_from_observation(self, observation): # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays if len(observation.shape)<=1: observation = observation[None] - # print("OBS: ", observation) ret_val = self.model.predict(observation) - # print("ACCEL: ", ret_val) - # print("RET_VAL SHAPE", ret_val.shape) + return ret_val def get_accel(self, env): - # TODO make this get_accel(self, env) # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays observation = env.get_state() return self.get_accel_from_observation(observation) diff --git a/flow/dagger/imitating_controller2.py b/flow/dagger/imitating_controller2.py index 64b6798ca..ce3b26b0e 100644 --- a/flow/dagger/imitating_controller2.py +++ b/flow/dagger/imitating_controller2.py @@ -8,6 +8,7 @@ class ImitatingController(BaseController): + # Implementation in Tensorflow def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): @@ -43,7 +44,6 @@ def build_network(self): def define_placeholders(self): self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) - # print('DEBUG ', self.obs_dim) self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) if self.training: @@ -67,22 +67,17 @@ def train(self, observation_batch, action_batch): print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - # print("TEST BATCH: ", observation_batch) ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) - # print("LOSS: ", ret) def get_accel_from_observation(self, observation): # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays if len(observation.shape)<=1: observation = observation[None] - # print("OBS: ", observation) ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] - # print("ACCEL: ", ret_val) - # print("RET_VAL SHAPE", ret_val.shape) + return ret_val def get_accel(self, env): - # TODO make this get_accel(self, env) # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays observation = env.get_state() return self.get_accel_from_observation(observation) diff --git a/flow/dagger/replay_buffer.py b/flow/dagger/replay_buffer.py index c7bbb56d0..a6717fc90 100644 --- a/flow/dagger/replay_buffer.py +++ b/flow/dagger/replay_buffer.py @@ -41,7 +41,6 @@ def add_rollouts(self, rollouts_list): self.terminals = terminals[-self.max_size:] else: self.observations = np.concatenate([self.observations, observations])[-self.max_size:] - print("SHAPES: ", self.actions.shape, actions.shape) self.actions = np.concatenate([self.actions, actions])[-self.max_size:] self.expert_actions = np.concatenate([self.expert_actions, expert_actions])[-self.max_size:] self.rewards = np.concatenate([self.rewards, rewards])[-self.max_size:] diff --git a/flow/dagger/run.py b/flow/dagger/run.py index 40e200505..162059d5f 100644 --- a/flow/dagger/run.py +++ b/flow/dagger/run.py @@ -10,24 +10,10 @@ class Runner(object): def __init__(self, params): - - # agent_params = { - # 'n_layers': params['n_layers'], - # 'size': params['size'], - # 'learning_rate': params['learning_rate'], - # 'max_replay_buffer_size': params['max_replay_buffer_size'], - # } - # - # self.params = params - # self.params['agent_class'] = BCAgent - # self.params['agent_params'] = agent_params - # initialize trainer self.params = params self.trainer = Trainer(params) - - def run_training_loop(self): self.trainer.run_training_loop(n_iter=self.params['n_iter']) @@ -36,11 +22,6 @@ def run_training_loop(self): def main(): import argparse parser = argparse.ArgumentParser() - # parser.add_argument('--expert_policy_file', '-epf', type=str, required=True) # relative to where you're running this script from - # parser.add_argument('--expert_data', '-ed', type=str, required=True) #relative to where you're running this script from - # parser.add_argument('--env_name', '-env', type=str, help='choices: Ant-v2, Humanoid-v2, Walker-v2, HalfCheetah-v2, Hopper-v2', required=True) - # parser.add_argument('--exp_name', '-exp', type=str, default='pick an experiment name', required=True) - # parser.add_argument('--do_dagger', action='store_true') parser.add_argument('--ep_len', type=int) parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py index 9dd7ae326..3c72b0e63 100644 --- a/flow/dagger/trainer.py +++ b/flow/dagger/trainer.py @@ -18,12 +18,11 @@ def __init__(self, params): self.params = params self.sess = create_tf_session() - # TODO: replace this with appropriate Flow env - # print('ERROR CHECK ', flow_params_test['exp_tag']) create_env, _ = make_create_env(flow_params) self.env = create_env() self.env.reset() + # might need to replace this hardcode assert 'rl_0' in self.env.k.vehicle.get_ids() self.vehicle_id = 'rl_0' diff --git a/flow/dagger/utils.py b/flow/dagger/utils.py index 00ae864aa..c5af25c61 100644 --- a/flow/dagger/utils.py +++ b/flow/dagger/utils.py @@ -6,7 +6,9 @@ # class agnostic helper functions def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length): - print("CONTROLLER: ", controller) + + print("COLLECTING CONTROLLER: ", controller) + print("EXPERT CONTROLLER: ", expert_controller) observation = env.reset() print("VEHICLE ID: ", vehicle_id) @@ -22,6 +24,7 @@ def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajec #assert action is not None, "action is None" #assert (not math.isnan(action)), "action is a nan" assert not (len(env.k.vehicle.get_edge(vehicle_id)) == 0), "Case One" + # point of error: assert not (env.k.vehicle.get_edge(vehicle_id)[0] == ":"), "Case Two" actions.append(action) @@ -40,6 +43,7 @@ def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajec # rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env) # observation, reward, done, _ = env.step(rl_actions) + observation, reward, done, _ = env.step(action) traj_length += 1 @@ -93,9 +97,10 @@ def unpack_rollouts(rollouts_list): # Below are tensorflow related functions + def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): """ - Builds a MLP + Builds a feedfoward neural net arguments: input_placeholder: placeholder variable for the state (batch_size, input_size) From 920dd7346175e4f76b4e46b6bb2bacb34b0041ae Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sat, 28 Mar 2020 18:13:41 -0700 Subject: [PATCH 04/57] check for None or Nan action before adding to buffer --- flow/dagger/env_params_test2.py | 2 +- flow/dagger/imitating_agent.py | 4 ++-- flow/dagger/imitating_controller.py | 2 +- flow/dagger/imitating_controller2.py | 6 +++--- flow/dagger/replay_buffer.py | 2 ++ flow/dagger/run.py | 2 +- flow/dagger/trainer.py | 3 ++- flow/dagger/utils.py | 31 +++++++++------------------- 8 files changed, 22 insertions(+), 30 deletions(-) diff --git a/flow/dagger/env_params_test2.py b/flow/dagger/env_params_test2.py index 7140af720..ffd870e86 100644 --- a/flow/dagger/env_params_test2.py +++ b/flow/dagger/env_params_test2.py @@ -6,7 +6,7 @@ from flow.controllers.car_following_models import IDMController from flow.controllers.routing_controllers import ContinuousRouter -from imitating_controller2 import ImitatingController +from imitating_controller import ImitatingController vehicles.add("human", acceleration_controller=(IDMController, {}), routing_controller=(ContinuousRouter, {}), diff --git a/flow/dagger/imitating_agent.py b/flow/dagger/imitating_agent.py index 4e9e3f443..8aabadace 100644 --- a/flow/dagger/imitating_agent.py +++ b/flow/dagger/imitating_agent.py @@ -1,11 +1,11 @@ import numpy as np import tensorflow as tf import time -from imitating_controller2 import * +from imitating_controller import * from replay_buffer class Imitating_Agent(object): - # ignore this class! + # ignore this class! def __init__(self, sess, env, params): self.env = env self.sess = sess diff --git a/flow/dagger/imitating_controller.py b/flow/dagger/imitating_controller.py index f85040856..eadeb0ab8 100644 --- a/flow/dagger/imitating_controller.py +++ b/flow/dagger/imitating_controller.py @@ -47,7 +47,7 @@ def train(self, observation_batch, action_batch): assert(self.training, "Policy must be trainable") print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) - print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) + assert (not np.any(np.isnan(action_batch))), "TRAIN ERROR ACTION NAN" action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) history = self.model.fit(observation_batch, action_batch) diff --git a/flow/dagger/imitating_controller2.py b/flow/dagger/imitating_controller2.py index ce3b26b0e..4b877877e 100644 --- a/flow/dagger/imitating_controller2.py +++ b/flow/dagger/imitating_controller2.py @@ -8,7 +8,7 @@ class ImitatingController(BaseController): - # Implementation in Tensorflow + # Implementation in Tensorflow def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): @@ -64,8 +64,8 @@ def define_train_op(self): def train(self, observation_batch, action_batch): assert(self.training, "Policy must be trainable") # print("ACTION BATCH: ", action_batch.shape) - print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) - print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) + # print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) + # print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) diff --git a/flow/dagger/replay_buffer.py b/flow/dagger/replay_buffer.py index a6717fc90..1213b985e 100644 --- a/flow/dagger/replay_buffer.py +++ b/flow/dagger/replay_buffer.py @@ -32,6 +32,8 @@ def add_rollouts(self, rollouts_list): self.rollouts.append(rollout) observations, actions, expert_actions, rewards, next_observations, terminals = unpack_rollouts(rollouts_list) + assert (not np.any(np.isnan(expert_actions))), "REPLAY BUFFER ERROR" + if self.observations is None: self.observations = observations[-self.max_size:] self.actions = actions[-self.max_size:] diff --git a/flow/dagger/run.py b/flow/dagger/run.py index 162059d5f..67bac9dda 100644 --- a/flow/dagger/run.py +++ b/flow/dagger/run.py @@ -22,7 +22,7 @@ def run_training_loop(self): def main(): import argparse parser = argparse.ArgumentParser() - parser.add_argument('--ep_len', type=int) + parser.add_argument('--ep_len', type=int, default=3000) parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) parser.add_argument('--n_iter', '-n', type=int, default=5) diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py index 3c72b0e63..b532c04c6 100644 --- a/flow/dagger/trainer.py +++ b/flow/dagger/trainer.py @@ -7,7 +7,7 @@ import os from flow.utils.registry import make_create_env from env_params_test import flow_params -from imitating_controller2 import ImitatingController +from imitating_controller import ImitatingController from flow.controllers.car_following_models import IDMController from flow.core.params import SumoCarFollowingParams from utils import * @@ -100,6 +100,7 @@ def train_controller(self): for train_step in range(self.params['num_agent_train_steps_per_iter']): # TODO: fix this ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) + print(expert_ac_batch) self.controller.train(ob_batch, expert_ac_batch) diff --git a/flow/dagger/utils.py b/flow/dagger/utils.py index c5af25c61..43f1f5fc2 100644 --- a/flow/dagger/utils.py +++ b/flow/dagger/utils.py @@ -19,31 +19,20 @@ def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajec traj_length = 0 while True: - observations.append(observation) action = controller.get_action(env) - #assert action is not None, "action is None" - #assert (not math.isnan(action)), "action is a nan" - assert not (len(env.k.vehicle.get_edge(vehicle_id)) == 0), "Case One" - # point of error: - assert not (env.k.vehicle.get_edge(vehicle_id)[0] == ":"), "Case Two" + expert_action = expert_controller.get_action(env) + if (expert_action is None or math.isnan(expert_action)): + print("HIT CASE") + observation, reward, done, _ = env.step(action) + traj_length += 1 + terminate_rollout = traj_length == max_trajectory_length or done + if terminate_rollout: + break + continue + observations.append(observation) actions.append(action) - - expert_action = expert_controller.get_action(env) - assert env is not None, "environment is None" - assert expert_action is not None, "expert action is None" - assert (not math.isnan(expert_action)), "expert action is a nan" expert_actions.append(expert_action) - - # rl_actions = {} - # for veh_id in env.k.vehicle.get_ids(): - # if veh_id == vehicle_id: - # rl_actions[veh_id] = action - # else: - # rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env) - - # observation, reward, done, _ = env.step(rl_actions) - observation, reward, done, _ = env.step(action) traj_length += 1 From 630a100f6ad37ed8b26600002a4e49108cf4e25e Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sat, 28 Mar 2020 18:39:36 -0700 Subject: [PATCH 05/57] Fixed dimension bug --- flow/dagger/env_params_test2.py | 2 +- flow/dagger/imitating_agent.py | 2 +- flow/dagger/imitating_controller.py | 6 ++---- flow/dagger/imitating_controller2.py | 4 ---- flow/dagger/trainer.py | 3 +-- flow/dagger/utils.py | 7 ++++--- 6 files changed, 9 insertions(+), 15 deletions(-) diff --git a/flow/dagger/env_params_test2.py b/flow/dagger/env_params_test2.py index ffd870e86..7140af720 100644 --- a/flow/dagger/env_params_test2.py +++ b/flow/dagger/env_params_test2.py @@ -6,7 +6,7 @@ from flow.controllers.car_following_models import IDMController from flow.controllers.routing_controllers import ContinuousRouter -from imitating_controller import ImitatingController +from imitating_controller2 import ImitatingController vehicles.add("human", acceleration_controller=(IDMController, {}), routing_controller=(ContinuousRouter, {}), diff --git a/flow/dagger/imitating_agent.py b/flow/dagger/imitating_agent.py index 8aabadace..f5b09dee3 100644 --- a/flow/dagger/imitating_agent.py +++ b/flow/dagger/imitating_agent.py @@ -1,7 +1,7 @@ import numpy as np import tensorflow as tf import time -from imitating_controller import * +from imitating_controller2 import * from replay_buffer class Imitating_Agent(object): diff --git a/flow/dagger/imitating_controller.py b/flow/dagger/imitating_controller.py index eadeb0ab8..2537d70b8 100644 --- a/flow/dagger/imitating_controller.py +++ b/flow/dagger/imitating_controller.py @@ -40,14 +40,12 @@ def build_network(self): self.model.add(Dense(self.size, activation='relu')) # No activation self.model.add(Dense(self.action_dim)) - self.model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) + self.model.compile(loss='mean_squared_error', optimizer='adam') def train(self, observation_batch, action_batch): assert(self.training, "Policy must be trainable") - - print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) - assert (not np.any(np.isnan(action_batch))), "TRAIN ERROR ACTION NAN" + assert (not np.any(np.isnan(action_batch))), "NANs in training labels" action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) history = self.model.fit(observation_batch, action_batch) diff --git a/flow/dagger/imitating_controller2.py b/flow/dagger/imitating_controller2.py index 4b877877e..65c7c9d1d 100644 --- a/flow/dagger/imitating_controller2.py +++ b/flow/dagger/imitating_controller2.py @@ -52,7 +52,6 @@ def define_placeholders(self): def define_forward_pass(self): pred_action = build_mlp(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) self.action_predictions = pred_action - print("ACTION PREDICTIONS TYPE ", type(self.action_predictions)) def define_train_op(self): true_actions = self.action_labels_placeholder @@ -63,9 +62,6 @@ def define_train_op(self): def train(self, observation_batch, action_batch): assert(self.training, "Policy must be trainable") - # print("ACTION BATCH: ", action_batch.shape) - # print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) - # print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py index b532c04c6..3c72b0e63 100644 --- a/flow/dagger/trainer.py +++ b/flow/dagger/trainer.py @@ -7,7 +7,7 @@ import os from flow.utils.registry import make_create_env from env_params_test import flow_params -from imitating_controller import ImitatingController +from imitating_controller2 import ImitatingController from flow.controllers.car_following_models import IDMController from flow.core.params import SumoCarFollowingParams from utils import * @@ -100,7 +100,6 @@ def train_controller(self): for train_step in range(self.params['num_agent_train_steps_per_iter']): # TODO: fix this ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) - print(expert_ac_batch) self.controller.train(ob_batch, expert_ac_batch) diff --git a/flow/dagger/utils.py b/flow/dagger/utils.py index 43f1f5fc2..177fc620f 100644 --- a/flow/dagger/utils.py +++ b/flow/dagger/utils.py @@ -11,8 +11,6 @@ def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajec print("EXPERT CONTROLLER: ", expert_controller) observation = env.reset() - print("VEHICLE ID: ", vehicle_id) - print("VEHICLE IDS: ", env.k.vehicle.get_ids()) assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] @@ -20,9 +18,12 @@ def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajec while True: action = controller.get_action(env) + + if type(action) == np.ndarray: + action = action.flatten()[0] + expert_action = expert_controller.get_action(env) if (expert_action is None or math.isnan(expert_action)): - print("HIT CASE") observation, reward, done, _ = env.step(action) traj_length += 1 terminate_rollout = traj_length == max_trajectory_length or done From 722f4393515247907bd14260b803fd5fb6510533 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 12 Apr 2020 18:32:00 -0700 Subject: [PATCH 06/57] Added testing for imitation learning with dagger, more code cleanup --- flow/controllers/dagger/.idea/dagger.iml | 12 ++ flow/controllers/dagger/bottleneck_env.py | 150 +++++++++++++++ .../dagger/imitating_controller.py | 104 ++++++++++ flow/controllers/dagger/replay_buffer.py | 64 +++++++ flow/controllers/dagger/ring_env.py | 85 +++++++++ flow/controllers/dagger/run.py | 79 ++++++++ flow/controllers/dagger/trainer.py | 179 ++++++++++++++++++ flow/controllers/dagger/utils.py | 173 +++++++++++++++++ 8 files changed, 846 insertions(+) create mode 100644 flow/controllers/dagger/.idea/dagger.iml create mode 100644 flow/controllers/dagger/bottleneck_env.py create mode 100644 flow/controllers/dagger/imitating_controller.py create mode 100644 flow/controllers/dagger/replay_buffer.py create mode 100644 flow/controllers/dagger/ring_env.py create mode 100644 flow/controllers/dagger/run.py create mode 100644 flow/controllers/dagger/trainer.py create mode 100644 flow/controllers/dagger/utils.py diff --git a/flow/controllers/dagger/.idea/dagger.iml b/flow/controllers/dagger/.idea/dagger.iml new file mode 100644 index 000000000..0bc0e0321 --- /dev/null +++ b/flow/controllers/dagger/.idea/dagger.iml @@ -0,0 +1,12 @@ + + + + + + + + + + \ No newline at end of file diff --git a/flow/controllers/dagger/bottleneck_env.py b/flow/controllers/dagger/bottleneck_env.py new file mode 100644 index 000000000..820244a87 --- /dev/null +++ b/flow/controllers/dagger/bottleneck_env.py @@ -0,0 +1,150 @@ +"""Bottleneck example. +Bottleneck in which the actions are specifying a desired velocity +in a segment of space +""" +from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ + InFlows, SumoCarFollowingParams, SumoLaneChangeParams +from flow.core.params import TrafficLightParams +from flow.core.params import VehicleParams +from flow.controllers import RLController, ContinuousRouter, \ + SimLaneChangeController +from flow.envs import BottleneckDesiredVelocityEnv +from flow.networks import BottleneckNetwork + +# time horizon of a single rollout +HORIZON = 1000 +# number of parallel workers +N_CPUS = 2 +# number of rollouts per training iteration +N_ROLLOUTS = N_CPUS * 4 + +SCALING = 1 +NUM_LANES = 4 * SCALING # number of lanes in the widest highway +DISABLE_TB = True +DISABLE_RAMP_METER = True +AV_FRAC = 0.10 + +vehicles = VehicleParams() +vehicles.add( + veh_id="human", + lane_change_controller=(SimLaneChangeController, {}), + routing_controller=(ContinuousRouter, {}), + car_following_params=SumoCarFollowingParams( + speed_mode="all_checks", + ), + lane_change_params=SumoLaneChangeParams( + lane_change_mode=0, + ), + num_vehicles=1 * SCALING) +vehicles.add( + veh_id="followerstopper", + acceleration_controller=(RLController, {}), + lane_change_controller=(SimLaneChangeController, {}), + routing_controller=(ContinuousRouter, {}), + car_following_params=SumoCarFollowingParams( + speed_mode=9, + ), + lane_change_params=SumoLaneChangeParams( + lane_change_mode=0, + ), + num_vehicles=1 * SCALING) + +controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True), + ("4", 2, True), ("5", 1, False)] +num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)] +additional_env_params = { + "target_velocity": 40, + "disable_tb": True, + "disable_ramp_metering": True, + "controlled_segments": controlled_segments, + "symmetric": False, + "observed_segments": num_observed_segments, + "reset_inflow": False, + "lane_change_duration": 5, + "max_accel": 3, + "max_decel": 3, + "inflow_range": [1000, 2000] +} + +# flow rate +flow_rate = 2300 * SCALING + +# percentage of flow coming out of each lane +inflow = InFlows() +inflow.add( + veh_type="human", + edge="1", + vehs_per_hour=flow_rate * (1 - AV_FRAC), + departLane="random", + departSpeed=10) +inflow.add( + veh_type="followerstopper", + edge="1", + vehs_per_hour=flow_rate * AV_FRAC, + departLane="random", + departSpeed=10) + +traffic_lights = TrafficLightParams() +if not DISABLE_TB: + traffic_lights.add(node_id="2") +if not DISABLE_RAMP_METER: + traffic_lights.add(node_id="3") + +additional_net_params = {"scaling": SCALING, "speed_limit": 23} +net_params = NetParams( + inflows=inflow, + additional_params=additional_net_params) + +flow_params = dict( + # name of the experiment + exp_tag="DesiredVelocity", + + # name of the flow environment the experiment is running on + env_name=BottleneckDesiredVelocityEnv, + + # name of the network class the experiment is running on + network=BottleneckNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + print_warnings=False, + restart_instance=True, + ), + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + warmup_steps=40, + sims_per_step=1, + horizon=HORIZON, + additional_params=additional_env_params, + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflow, + additional_params=additional_net_params, + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig( + spacing="uniform", + min_gap=5, + lanes_distribution=float("inf"), + edges_distribution=["2", "3", "4", "5"], + ), + + # traffic lights to be introduced to specific nodes (see + # flow.core.params.TrafficLightParams) + tls=traffic_lights, +) diff --git a/flow/controllers/dagger/imitating_controller.py b/flow/controllers/dagger/imitating_controller.py new file mode 100644 index 000000000..0adffb6cd --- /dev/null +++ b/flow/controllers/dagger/imitating_controller.py @@ -0,0 +1,104 @@ +import numpy as np +import tensorflow as tf +from utils import * +import tensorflow_probability as tfp +from flow.controllers.base_controller import BaseController +from replay_buffer import ReplayBuffer + + +class ImitatingController(BaseController): + """ + Controller which learns to imitate another given expert controller. + """ + # Implementation in Tensorflow + + def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): + + BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) + self.sess = sess + self.action_dim = action_dim + self.obs_dim = obs_dim + self.num_layers = num_layers + self.size = size + self.learning_rate = learning_rate + self.training = training + self.inject_noise=inject_noise + self.noise_variance = noise_variance + + with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): + self.build_network() + + + if self.training: + self.replay_buffer = ReplayBuffer(replay_buffer_size) + else: + self.replay_buffer = None + + self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] + self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) + + def build_network(self): + """ + Defines neural network for choosing actions. + """ + self.define_placeholders() + self.define_forward_pass() + if self.training: + with tf.variable_scope('train', reuse=tf.AUTO_REUSE): + self.define_train_op() + + + def define_placeholders(self): + """ + Defines input, output, and training placeholders for neural net + """ + self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) + self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) + + if self.training: + self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) + + def define_forward_pass(self): + pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) + self.action_predictions = pred_action + print("TYPE: ", type(self.obs_placeholder)) + + if self.inject_noise == 1: + self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) + + def define_train_op(self): + true_actions = self.action_labels_placeholder + predicted_actions = self.action_predictions + + self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) + self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) + + def train(self, observation_batch, action_batch): + action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) + ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) + + def get_accel_from_observation(self, observation): + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + if len(observation.shape)<=1: + observation = observation[None] + ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] + + return ret_val + + def get_accel(self, env): + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + observation = env.get_state() + return self.get_accel_from_observation(observation) + + def add_to_replay_buffer(self, rollout_list): + """ Add rollouts to replay buffer """ + + self.replay_buffer.add_rollouts(rollout_list) + + def sample_data(self, batch_size): + """ Sample a batch of data from replay buffer """ + + return self.replay_buffer.sample_batch(batch_size) + + def save_network(self, save_path): + self.saver.save(self.sess, save_path) diff --git a/flow/controllers/dagger/replay_buffer.py b/flow/controllers/dagger/replay_buffer.py new file mode 100644 index 000000000..4e362bd41 --- /dev/null +++ b/flow/controllers/dagger/replay_buffer.py @@ -0,0 +1,64 @@ +import time +import numpy as np +import tensorflow as tf +import gym +import os +from utils import * + + +class ReplayBuffer(object): + """ Replay buffer class to store state, action, expert_action, reward, next_state, terminal tuples""" + + def __init__(self, max_size=100000): + + self.max_size = max_size + + # store each rollout + self.rollouts = [] + + # store component arrays from each rollout + self.observations = None + self.actions = None + self.expert_actions = None + self.rewards = None + self.next_observations = None + self.terminals = None + + + def add_rollouts(self, rollouts_list): + """ + Add a list of rollouts to the replay buffer + """ + + for rollout in rollouts_list: + self.rollouts.append(rollout) + + observations, actions, expert_actions, rewards, next_observations, terminals = unpack_rollouts(rollouts_list) + + assert (not np.any(np.isnan(expert_actions))), "Invalid actions added to replay buffer" + + if self.observations is None: + self.observations = observations[-self.max_size:] + self.actions = actions[-self.max_size:] + self.expert_actions = expert_actions[-self.max_size:] + self.rewards = rewards[-self.max_size:] + self.next_observations = next_observations[-self.max_size:] + self.terminals = terminals[-self.max_size:] + else: + self.observations = np.concatenate([self.observations, observations])[-self.max_size:] + self.actions = np.concatenate([self.actions, actions])[-self.max_size:] + self.expert_actions = np.concatenate([self.expert_actions, expert_actions])[-self.max_size:] + self.rewards = np.concatenate([self.rewards, rewards])[-self.max_size:] + self.next_observations = np.concatenate([self.next_observations, next_observations])[-self.max_size:] + self.terminals = np.concatenate([self.terminals, terminals])[-self.max_size:] + + def sample_batch(self, batch_size): + """ + Sample a batch of data (with size batch_size) from replay buffer. + Returns data in separate numpy arrays of observations, actions, rewards, next_observations, terminals + """ + assert self.observations is not None and self.actions is not None and self.expert_actions is not None and self.rewards is not None and self.next_observations is not None and self.terminals is not None + + size = len(self.observations) + rand_inds = np.random.randint(0, size, batch_size) + return self.observations[rand_inds], self.actions[rand_inds], self.expert_actions[rand_inds], self.rewards[rand_inds], self.next_observations[rand_inds], self.terminals[rand_inds] diff --git a/flow/controllers/dagger/ring_env.py b/flow/controllers/dagger/ring_env.py new file mode 100644 index 000000000..20ced1ce9 --- /dev/null +++ b/flow/controllers/dagger/ring_env.py @@ -0,0 +1,85 @@ +"""Ring road example. +Trains a single autonomous vehicle to stabilize the flow of 21 human-driven +vehicles in a variable length ring road. +""" +from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams +from flow.core.params import VehicleParams, SumoCarFollowingParams +from flow.controllers import RLController, IDMController, ContinuousRouter +from flow.envs import WaveAttenuationPOEnv +from flow.networks import RingNetwork + +# time horizon of a single rollout +HORIZON = 3000 +# number of rollouts per training iteration +N_ROLLOUTS = 20 +# number of parallel workers +N_CPUS = 2 + +# We place one autonomous vehicle and 22 human-driven vehicles in the network +vehicles = VehicleParams() +vehicles.add( + veh_id="human", + acceleration_controller=(IDMController, { + "noise": 0.2 + }), + car_following_params=SumoCarFollowingParams( + min_gap=0 + ), + routing_controller=(ContinuousRouter, {}), + num_vehicles=21) +vehicles.add( + veh_id="rl", + acceleration_controller=(RLController, {}), + routing_controller=(ContinuousRouter, {}), + num_vehicles=1) + +flow_params = dict( + # name of the experiment + exp_tag="stabilizing_the_ring", + + # name of the flow environment the experiment is running on + env_name=WaveAttenuationPOEnv, + + # name of the network class the experiment is running on + network=RingNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.1, + render=False, + restart_instance=False + ), + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=750, + clip_actions=False, + additional_params={ + "max_accel": 1, + "max_decel": 1, + "ring_length": [220, 270], + }, + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + additional_params={ + "length": 260, + "lanes": 1, + "speed_limit": 30, + "resolution": 40, + }, ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) diff --git a/flow/controllers/dagger/run.py b/flow/controllers/dagger/run.py new file mode 100644 index 000000000..c647f37cd --- /dev/null +++ b/flow/controllers/dagger/run.py @@ -0,0 +1,79 @@ +import os +import time +import numpy as np +import tensorflow as tf +from trainer import Trainer +from flow.controllers.car_following_models import IDMController + + +class Runner(object): + """ Class to run imitation learning (training and evaluation) """ + + def __init__(self, params): + + # initialize trainer + self.params = params + self.trainer = Trainer(params) + + def run_training_loop(self): + + self.trainer.run_training_loop(n_iter=self.params['n_iter']) + + def evaluate(self): + self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) + + def save_controller_network(self): + self.trainer.save_controller_network() + + +def main(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--ep_len', type=int, default=3000) + + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) + parser.add_argument('--n_iter', '-n', type=int, default=5) + + parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=3000) + + parser.add_argument('--train_batch_size', type=int, + default=100) # number of sampled data points to be used per gradient/train step + + parser.add_argument('--num_layers', type=int, default=3) # depth, of policy to be learned + parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned + parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning + parser.add_argument('--replay_buffer_size', type=int, default=1000000) + parser.add_argument('--save_path', type=str, default='') + parser.add_argument('--save_model', type=int, default=0) + parser.add_argument('--num_eval_episodes', type=int, default=10) + parser.add_argument('--inject_noise', type=int, default=0) + parser.add_argument('--noise_variance',type=float, default=0.5) + parser.add_argument('--vehicle_id', type=str, default='rl_0') + + args = parser.parse_args() + + # convert args to dictionary + params = vars(args) + print("INJECT: ", params['inject_noise']) + assert args.n_iter>1, ('DAgger needs >1 iteration') + + + # run training + train = Runner(params) + train.run_training_loop() + + # evaluate + train.evaluate() + print("DONE") + + if params['save_model'] == 1: + train.save_controller_network() + + # tensorboard + if params['save_model'] == 1: + writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + +if __name__ == "__main__": + main() diff --git a/flow/controllers/dagger/trainer.py b/flow/controllers/dagger/trainer.py new file mode 100644 index 000000000..03364f528 --- /dev/null +++ b/flow/controllers/dagger/trainer.py @@ -0,0 +1,179 @@ +import time +from collections import OrderedDict +import pickle +import numpy as np +import tensorflow as tf +import gym +import os +from flow.utils.registry import make_create_env +from bottleneck_env import flow_params +from imitating_controller import ImitatingController +from flow.controllers.car_following_models import IDMController +from flow.controllers.velocity_controllers import FollowerStopper +from flow.core.params import SumoCarFollowingParams +from utils import * + +class Trainer(object): + """ + Class to initialize and run training for imitation learning (with DAgger) + """ + + def __init__(self, params): + self.params = params + self.sess = create_tf_session() + + create_env, _ = make_create_env(flow_params) + self.env = create_env() + self.env.reset() + + print(self.env.k.vehicle.get_ids()) + assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() + self.vehicle_id = self.params['vehicle_id'] + + obs_dim = self.env.observation_space.shape[0] + + action_dim = (1,)[0] + self.params['action_dim'] = action_dim + self.params['obs_dim'] = obs_dim + + car_following_params = SumoCarFollowingParams() + self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params, inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) + # self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) + self.expert_controller = FollowerStopper(self.vehicle_id, car_following_params = car_following_params) + + tf.global_variables_initializer().run(session=self.sess) + + + def run_training_loop(self, n_iter): + """ + Trains controller for n_iter iterations + + Args: + param n_iter: number of iterations to execute training + """ + + # init vars at beginning of training + self.total_envsteps = 0 + self.start_time = time.time() + + for itr in range(n_iter): + print("\n\n********** Iteration %i ************"%itr) + + # collect trajectories, to be used for training + if itr == 0: + # first iteration is standard behavioral cloning + training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) + else: + training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) + + paths, envsteps_this_batch = training_returns + self.total_envsteps += envsteps_this_batch + + # add collected data to replay buffer + self.controller.add_to_replay_buffer(paths) + + # train controller (using sampled data from replay buffer) + loss = self.train_controller() + + def collect_training_trajectories(self, itr, batch_size): + """ + Collect (state, action, reward, next_state, terminal) tuples for training + + Args: + itr: iteration of training during which functino is called + batch_size: number of tuples to collect + Returns: + paths: list of trajectories + envsteps_this_batch: the sum over the numbers of environment steps in paths + """ + + if itr == 0: + collect_controller = self.expert_controller + else: + collect_controller = self.controller + + print("\nCollecting data to be used for training...") + paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) + + return paths, envsteps_this_batch + + def train_controller(self): + """ + Trains controller using data sampled from replay buffer + """ + + print('Training controller using sampled data from replay buffer') + for train_step in range(self.params['num_agent_train_steps_per_iter']): + ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) + self.controller.train(ob_batch, expert_ac_batch) + + def evaluate_controller(self, num_trajs = 10): + """ + Evaluates a trained controller on similarity with expert with respect to action taken and total reward per rollout + + Args: + num_trajs: number of trajectories to evaluate performance on + """ + + print("\n\n********** Evaluation ************ \n") + + trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.controller, self.expert_controller, num_trajs, self.params['ep_len']) + + average_imitator_reward = 0 + total_imitator_steps = 0 + average_imitator_reward_per_rollout = 0 + + action_errors = np.array([]) + average_action_expert = 0 + average_action_imitator = 0 + + # compare actions taken in each step of trajectories + for traj in trajectories: + imitator_actions = traj['actions'] + expert_actions = traj['expert_actions'] + + average_action_expert += np.sum(expert_actions) + average_action_imitator += np.sum(imitator_actions) + + action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) + action_errors = np.append(action_errors, action_error) + + average_imitator_reward += np.sum(traj['rewards']) + total_imitator_steps += len(traj['rewards']) + average_imitator_reward_per_rollout += np.sum(traj['rewards']) + + average_imitator_reward = average_imitator_reward / total_imitator_steps + average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) + + average_action_expert = average_action_expert / total_imitator_steps + average_action_imitator = average_action_imitator / total_imitator_steps + + + expert_trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.expert_controller, self.expert_controller, num_trajs, self.params['ep_len']) + + average_expert_reward = 0 + total_expert_steps = 0 + average_expert_reward_per_rollout = 0 + + # compare reward accumulated in trajectories collected via expert vs. via imitator + for traj in expert_trajectories: + average_expert_reward += np.sum(traj['rewards']) + total_expert_steps += len(traj['rewards']) + average_expert_reward_per_rollout += np.sum(traj['rewards']) + + average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) + average_expert_reward = average_expert_reward / total_expert_steps + + print("\nAVERAGE REWARD PER STEP EXPERT: ", average_expert_reward) + print("AVERAGE REWARD PER STEP IMITATOR: ", average_imitator_reward) + print("AVERAGE REWARD PER STEP DIFFERENCE: ", np.abs(average_expert_reward - average_imitator_reward), "\n") + + print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) + print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) + print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: ", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") + + print("MEAN ACTION ERROR: ", np.mean(action_errors), "\n") + + def save_controller_network(self): + print("Saving tensorflow model to: ", self.params['save_path']) + self.controller.save_network(self.params['save_path']) diff --git a/flow/controllers/dagger/utils.py b/flow/controllers/dagger/utils.py new file mode 100644 index 000000000..a5bf7acfa --- /dev/null +++ b/flow/controllers/dagger/utils.py @@ -0,0 +1,173 @@ +import tensorflow as tf +import os +import numpy as np +import math + +""" Class agnostic helper functions """ + +def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length): + """ + Samples a trajectory for a given vehicle using the actions prescribed by specified controller. + + Args: + env: environment + vehicle_id: id of the vehicle that is being controlled/tracked during trajectory + controller: subclass of BaseController, decides actions taken by vehicle + expert_controller: subclass of BaseController, "expert" for imitation learning + max_trajectory_length: maximum steps in a trajectory + + Returns: + Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples + """ + + print("COLLECTING CONTROLLER: ", controller) + print("EXPERT CONTROLLER: ", expert_controller) + observation = env.reset() + + assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" + + observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] + traj_length = 0 + + while True: + action = controller.get_action(env) + + if type(action) == np.ndarray: + action = action.flatten()[0] + + expert_action = expert_controller.get_action(env) + if (expert_action is None or math.isnan(expert_action)): + observation, reward, done, _ = env.step(action) + traj_length += 1 + terminate_rollout = traj_length == max_trajectory_length or done + if terminate_rollout: + break + continue + + observations.append(observation) + actions.append(action) + expert_actions.append(expert_action) + observation, reward, done, _ = env.step(action) + + traj_length += 1 + next_observations.append(observation) + rewards.append(reward) + terminate_rollout = (traj_length == max_trajectory_length) or done + terminals.append(terminate_rollout) + + if terminate_rollout: + break + + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) + + +def sample_trajectories(env, vehicle_id, controller, expert_controller, min_batch_timesteps, max_trajectory_length): + """ + Samples trajectories to collect at least min_batch_timesteps steps in the environment + + Args: + env: environment + vehicle_id: id of vehicle being tracked/controlled + controller: subclass of BaseController, decides actions taken by vehicle + expert_controller: subclass of BaseController, "expert" for imitation learning + min_batch_timesteps: minimum number of environment steps to collect + max_trajectory_length: maximum steps in a trajectory + + Returns: + List of rollout dictionaries, total steps taken by environment + """ + total_envsteps = 0 + trajectories = [] + + while total_envsteps < min_batch_timesteps: + trajectory = sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length) + trajectories.append(trajectory) + + traj_env_steps = len(trajectory["rewards"]) + total_envsteps += traj_env_steps + + return trajectories, total_envsteps + +def sample_n_trajectories(env, vehicle_id, controller, expert_controller, n, max_trajectory_length): + """ + Collects a fixed number of trajectories. + + Args: + env: environment + vehicle_id: id of vehicle being tracked/controlled + controller: subclass of BaseController, decides actions taken by vehicle + expert_controller: subclass of BaseController, "expert" for imitation learning + n: number of trajectories to collect + max_trajectory_length: maximum steps in a trajectory + + Returns: + List of rollout dictionaries + + """ + trajectories = [] + for _ in range(n): + trajectory = sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length) + trajectories.append(trajectory) + + return trajectories + + +def traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals): + """ + Collects individual observation, action, expert_action, rewards, next observation, terminal arrays into a single rollout dictionary + """ + return {"observations" : np.array(observations, dtype=np.float32), + "actions" : np.array(actions, dtype=np.float32), + "expert_actions": np.array(expert_actions, dtype=np.float32), + "rewards" : np.array(rewards, dtype=np.float32), + "next_observations": np.array(next_observations, dtype=np.float32), + "terminals": np.array(terminals, dtype=np.float32)} + + +def unpack_rollouts(rollouts_list): + """ + Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays + rollouts: list of rollout dictionaries, rollout dictionary: dictionary with keys "observations", "actions", "rewards", "next_observations", "is_terminals" + return separate np arrays of observations, actions, rewards, next_observations, and is_terminals + """ + observations = np.concatenate([rollout["observations"] for rollout in rollouts_list]) + actions = np.concatenate([rollout["actions"] for rollout in rollouts_list]) + expert_actions = np.concatenate([rollout["expert_actions"] for rollout in rollouts_list]) + rewards = np.concatenate([rollout["rewards"] for rollout in rollouts_list]) + next_observations = np.concatenate([rollout["next_observations"] for rollout in rollouts_list]) + terminals = np.concatenate([rollout["terminals"] for rollout in rollouts_list]) + + return observations, actions, expert_actions, rewards, next_observations, terminals + + +# Below are tensorflow related functions + +def build_neural_net(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): + """ + Builds a feedfoward neural network for action prediction + + arguments: + input_placeholder: placeholder variable for the state (batch_size, input_size) + scope: variable scope of the network + + n_layers: number of hidden layers + size: dimension of each hidden layer + activation: activation of each hidden layer + + output_size: size of the output layer + output_activation: activation of the output layer + + returns: + output_placeholder: the result of pass through Neural Network + """ + output_placeholder = input_placeholder + with tf.variable_scope(scope): + for _ in range(n_layers): + output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) + output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation) + return output_placeholder + +def create_tf_session(): + config = tf.ConfigProto(device_count={'GPU': 0}) + sess = tf.Session(config=config) + return sess From e9d763487ff998ddba40ef7e222c080749d1c872 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 12 Apr 2020 18:33:51 -0700 Subject: [PATCH 07/57] Code cleanup, added testing/eval for imitation learning --- flow/dagger/.idea/dagger.iml | 12 - flow/dagger/Untitled.ipynb | 511 --------------------------- flow/dagger/env_params_test.py | 85 ----- flow/dagger/env_params_test2.py | 47 --- flow/dagger/imitating_agent.py | 26 -- flow/dagger/imitating_controller.py | 76 ++-- flow/dagger/imitating_controller2.py | 85 ----- flow/dagger/replay_buffer.py | 61 ---- flow/dagger/run.py | 32 +- flow/dagger/trainer.py | 133 +++++-- flow/dagger/useless.py | 147 -------- flow/dagger/utils.py | 119 ------- 12 files changed, 183 insertions(+), 1151 deletions(-) delete mode 100644 flow/dagger/.idea/dagger.iml delete mode 100644 flow/dagger/Untitled.ipynb delete mode 100644 flow/dagger/env_params_test.py delete mode 100644 flow/dagger/env_params_test2.py delete mode 100644 flow/dagger/imitating_agent.py delete mode 100644 flow/dagger/imitating_controller2.py delete mode 100644 flow/dagger/replay_buffer.py delete mode 100644 flow/dagger/useless.py delete mode 100644 flow/dagger/utils.py diff --git a/flow/dagger/.idea/dagger.iml b/flow/dagger/.idea/dagger.iml deleted file mode 100644 index 0bc0e0321..000000000 --- a/flow/dagger/.idea/dagger.iml +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/flow/dagger/Untitled.ipynb b/flow/dagger/Untitled.ipynb deleted file mode 100644 index 0f1ac9809..000000000 --- a/flow/dagger/Untitled.ipynb +++ /dev/null @@ -1,511 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" - ] - } - ], - "source": [ - "import tensorflow as tf\n", - "import numpy as np\n", - "import gym" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "ename": "ModuleNotFoundError", - "evalue": "No module named 'env_params'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0menv_params\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mflow_params\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mflow_params\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'env_params'" - ] - } - ], - "source": [ - "from env_params import flow_params as flow_params" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departLane in InFlows is deprecated, use depart_lane instead.\n", - " PendingDeprecationWarning\n", - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", - " PendingDeprecationWarning\n", - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departLane in InFlows is deprecated, use depart_lane instead.\n", - " PendingDeprecationWarning\n", - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", - " PendingDeprecationWarning\n" - ] - } - ], - "source": [ - "from flow.utils.registry import make_create_env\n", - "from flow.controllers import IDMController, ContinuousRouter\n", - "from flow.core.experiment import Experiment\n", - "from flow.core.params import SumoParams, EnvParams, \\\n", - " InitialConfig, NetParams\n", - "from flow.core.params import VehicleParams\n", - "from flow.networks.ring import RingNetwork, ADDITIONAL_NET_PARAMS\n", - "import numpy as np\n", - "from flow.core.experiment import Experiment\n", - "from flow.core.params import InFlows\n", - "from flow.core.params import SumoLaneChangeParams\n", - "from flow.core.params import SumoCarFollowingParams\n", - "from flow.core.params import VehicleParams\n", - "from flow.controllers import ContinuousRouter\n", - "from flow.benchmarks.bottleneck0 import flow_params\n", - "from flow.benchmarks.bottleneck0 import SCALING" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "# sim_params = flow_params['sim']\n", - "# env_params = flow_params['env']\n", - "# net_params = flow_params['net']\n", - "\n", - "# # we want no autonomous vehicles in the simulation\n", - "# vehicles = VehicleParams()\n", - "# vehicles.add(veh_id='human',\n", - "# car_following_params=SumoCarFollowingParams(\n", - "# speed_mode=9,\n", - "# ),\n", - "# routing_controller=(ContinuousRouter, {}),\n", - "# lane_change_params=SumoLaneChangeParams(\n", - "# lane_change_mode=0,\n", - "# ),\n", - "# num_vehicles=1 * SCALING)\n", - "\n", - "# # only include human vehicles in inflows\n", - "# flow_rate = 2300 * SCALING\n", - "# inflow = InFlows()\n", - "# inflow.add(veh_type='human', edge='1',\n", - "# vehs_per_hour=flow_rate,\n", - "# departLane='random', departSpeed=10)\n", - "# net_params.inflows = inflow\n", - "\n", - "# # modify the rendering to match what is requested\n", - "# # sim_params.render = render\n", - "\n", - "# # set the evaluation flag to True\n", - "# env_params.evaluate = True\n", - "\n", - "# flow_params['env'].horizon = env_params.horizon" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/subprocess.py:786: ResourceWarning: subprocess 28341 is still running\n", - " ResourceWarning, source=self)\n" - ] - }, - { - "data": { - "text/plain": [ - "array([0. , 0.1 , 0.05 , 0. , 0. ,\n", - " 0. , 0.05 , 0.05 , 0. , 0.05 ,\n", - " 0. , 0.05 , 0.05 , 0. , 0.1 ,\n", - " 0. , 0.05 , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0.05 , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0.05 ,\n", - " 0. , 0.05 , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0.3234498 , 0.35290716, 0. , 0. ,\n", - " 0. , 0.39880784, 0.41698796, 0. , 0.4171411 ,\n", - " 0. , 0.49073983, 0.40911561, 0. , 0.43184929,\n", - " 0. , 0.41929399, 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0.27513936, 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0.44301522,\n", - " 0. , 0.44301522, 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. ])" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "state = env.reset()\n", - "state" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'bottleneck_0'" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "flow_params[\"exp_tag\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "rl_actions = {}\n", - "for veh_id in env.k.vehicle.get_ids():\n", - " print(veh_id)\n", - " rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env)\n", - "print(flow_params.env['horizon'])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "state, reward, done, _ = env.step(env.action_space.sample())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from env_params_test import name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "name" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "from env_params_test import flow_params" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "-----------------------\n", - "ring length: 265\n", - "v_max: 5.37714246265477\n", - "-----------------------\n" - ] - }, - { - "data": { - "text/plain": [ - "array([ 0.31246011, -0.00413767, 0.04496073])" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "state = env.reset()\n", - "state" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "vehicle_id = 'rl_0'" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.controllers.car_following_models import IDMController\n", - "car_following_params = SumoCarFollowingParams()\n", - "idm_controller = IDMController(vehicle_id, car_following_params=car_following_params)" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(array([ 0.31200989, -0.00526746, 0.04493147]), 0.6436939709782903, False, {})" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.step(idm_controller.get_action(env))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "rl_actions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.step(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for i in range(200):\n", - " rl_actions = {}\n", - " for veh_id in env.k.vehicle.get_ids():\n", - " # print(veh_id)\n", - " rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env)\n", - " print(env.get_state())\n", - " env.step(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.controllers.car_following_models import IDMController\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vehicle_id = env.k.vehicle.get_ids()[0]\n", - "vehicle_id" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "car_following_params = SumoCarFollowingParams()\n", - "idm_controller = IDMController(vehicle_id, car_following_params=car_following_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "idm_controller.get_action(env)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ob, rew, done, _ = env.step(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from env_params import flow_params\n", - "create_env, _ = make_create_env(flow_params)\n", - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.k.vehicle.get_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "flow", - "language": "python", - "name": "flow" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/flow/dagger/env_params_test.py b/flow/dagger/env_params_test.py deleted file mode 100644 index 20ced1ce9..000000000 --- a/flow/dagger/env_params_test.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Ring road example. -Trains a single autonomous vehicle to stabilize the flow of 21 human-driven -vehicles in a variable length ring road. -""" -from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams -from flow.core.params import VehicleParams, SumoCarFollowingParams -from flow.controllers import RLController, IDMController, ContinuousRouter -from flow.envs import WaveAttenuationPOEnv -from flow.networks import RingNetwork - -# time horizon of a single rollout -HORIZON = 3000 -# number of rollouts per training iteration -N_ROLLOUTS = 20 -# number of parallel workers -N_CPUS = 2 - -# We place one autonomous vehicle and 22 human-driven vehicles in the network -vehicles = VehicleParams() -vehicles.add( - veh_id="human", - acceleration_controller=(IDMController, { - "noise": 0.2 - }), - car_following_params=SumoCarFollowingParams( - min_gap=0 - ), - routing_controller=(ContinuousRouter, {}), - num_vehicles=21) -vehicles.add( - veh_id="rl", - acceleration_controller=(RLController, {}), - routing_controller=(ContinuousRouter, {}), - num_vehicles=1) - -flow_params = dict( - # name of the experiment - exp_tag="stabilizing_the_ring", - - # name of the flow environment the experiment is running on - env_name=WaveAttenuationPOEnv, - - # name of the network class the experiment is running on - network=RingNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.1, - render=False, - restart_instance=False - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - warmup_steps=750, - clip_actions=False, - additional_params={ - "max_accel": 1, - "max_decel": 1, - "ring_length": [220, 270], - }, - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - additional_params={ - "length": 260, - "lanes": 1, - "speed_limit": 30, - "resolution": 40, - }, ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig(), -) diff --git a/flow/dagger/env_params_test2.py b/flow/dagger/env_params_test2.py deleted file mode 100644 index 7140af720..000000000 --- a/flow/dagger/env_params_test2.py +++ /dev/null @@ -1,47 +0,0 @@ -from flow.networks.ring import RingNetwork -name = "ring_example" - -from flow.core.params import VehicleParams -vehicles = VehicleParams() - -from flow.controllers.car_following_models import IDMController -from flow.controllers.routing_controllers import ContinuousRouter -from imitating_controller2 import ImitatingController -vehicles.add("human", - acceleration_controller=(IDMController, {}), - routing_controller=(ContinuousRouter, {}), - num_vehicles=22) - -from flow.networks.ring import ADDITIONAL_NET_PARAMS -from flow.core.params import NetParams -net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS) - -from flow.core.params import InitialConfig -initial_config = InitialConfig(spacing="uniform", perturbation=1) - -from flow.core.params import TrafficLightParams -traffic_lights = TrafficLightParams() - -from flow.envs.ring.accel import AccelEnv -from flow.core.params import SumoParams -sim_params = SumoParams(sim_step=0.1, render=False, emission_path='data') - -from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS -from flow.core.params import EnvParams -env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) - -flow_params_test = dict( - exp_tag='ring_example', - env_name=AccelEnv, - network=RingNetwork, - simulator='traci', - sim=sim_params, - env=env_params, - net=net_params, - veh=vehicles, - initial=initial_config, - tls=traffic_lights, -) - -# number of time steps -flow_params_test['env'].horizon = 3000 diff --git a/flow/dagger/imitating_agent.py b/flow/dagger/imitating_agent.py deleted file mode 100644 index f5b09dee3..000000000 --- a/flow/dagger/imitating_agent.py +++ /dev/null @@ -1,26 +0,0 @@ -import numpy as np -import tensorflow as tf -import time -from imitating_controller2 import * -from replay_buffer - -class Imitating_Agent(object): - # ignore this class! - def __init__(self, sess, env, params): - self.env = env - self.sess = sess - self.params = params - - self.policy = Imitator_Policy(sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate']) - - self.replay_buffer = ReplayBuffer(self.params['replay_buffer_size']) - - - def train(self, obs, acts): - self.policy.update(obs, acts) - - def add_to_replay_buffer(self, rollout_list): - self.replay_buffer.add_rollouts(rollout_list) - - def sample_data(self, batch_size): - return self.replay_buffer.sample_batch(batch_size) diff --git a/flow/dagger/imitating_controller.py b/flow/dagger/imitating_controller.py index 2537d70b8..0adffb6cd 100644 --- a/flow/dagger/imitating_controller.py +++ b/flow/dagger/imitating_controller.py @@ -1,20 +1,18 @@ import numpy as np -import tensorflow -from tensorflow import keras import tensorflow as tf from utils import * +import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController from replay_buffer import ReplayBuffer -from tensorflow.keras.models import Sequential -from tensorflow.keras.layers import Dense -from tensorflow.keras.activations import * - class ImitatingController(BaseController): - # Implementation in Keras just for testing + """ + Controller which learns to imitate another given expert controller. + """ + # Implementation in Tensorflow - def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): + def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) self.sess = sess @@ -24,9 +22,11 @@ def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning self.size = size self.learning_rate = learning_rate self.training = training - self.model = Sequential() - self.build_network() + self.inject_noise=inject_noise + self.noise_variance = noise_variance + with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): + self.build_network() if self.training: @@ -34,27 +34,54 @@ def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning else: self.replay_buffer = None + self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] + self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) + def build_network(self): - self.model.add(Dense(self.size, input_dim=self.obs_dim, activation='tanh')) - for _ in range(self.num_layers): - self.model.add(Dense(self.size, activation='relu')) - # No activation - self.model.add(Dense(self.action_dim)) - self.model.compile(loss='mean_squared_error', optimizer='adam') + """ + Defines neural network for choosing actions. + """ + self.define_placeholders() + self.define_forward_pass() + if self.training: + with tf.variable_scope('train', reuse=tf.AUTO_REUSE): + self.define_train_op() - def train(self, observation_batch, action_batch): - assert(self.training, "Policy must be trainable") - assert (not np.any(np.isnan(action_batch))), "NANs in training labels" + def define_placeholders(self): + """ + Defines input, output, and training placeholders for neural net + """ + self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) + self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) + + if self.training: + self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) + + def define_forward_pass(self): + pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) + self.action_predictions = pred_action + print("TYPE: ", type(self.obs_placeholder)) + if self.inject_noise == 1: + self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) + + def define_train_op(self): + true_actions = self.action_labels_placeholder + predicted_actions = self.action_predictions + + self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) + self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) + + def train(self, observation_batch, action_batch): action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - history = self.model.fit(observation_batch, action_batch) + ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) def get_accel_from_observation(self, observation): # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays if len(observation.shape)<=1: observation = observation[None] - ret_val = self.model.predict(observation) + ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] return ret_val @@ -64,7 +91,14 @@ def get_accel(self, env): return self.get_accel_from_observation(observation) def add_to_replay_buffer(self, rollout_list): + """ Add rollouts to replay buffer """ + self.replay_buffer.add_rollouts(rollout_list) def sample_data(self, batch_size): + """ Sample a batch of data from replay buffer """ + return self.replay_buffer.sample_batch(batch_size) + + def save_network(self, save_path): + self.saver.save(self.sess, save_path) diff --git a/flow/dagger/imitating_controller2.py b/flow/dagger/imitating_controller2.py deleted file mode 100644 index 65c7c9d1d..000000000 --- a/flow/dagger/imitating_controller2.py +++ /dev/null @@ -1,85 +0,0 @@ -import numpy as np -import tensorflow as tf -from utils import * -import tensorflow_probability as tfp -from flow.controllers.base_controller import BaseController -from replay_buffer import ReplayBuffer - - - -class ImitatingController(BaseController): - # Implementation in Tensorflow - - def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): - - BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) - self.sess = sess - self.action_dim = action_dim - self.obs_dim = obs_dim - self.num_layers = num_layers - self.size = size - self.learning_rate = learning_rate - self.training = training - - - - with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): - self.build_network() - - - if self.training: - self.replay_buffer = ReplayBuffer(replay_buffer_size) - else: - self.replay_buffer = None - - self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] - self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) - - def build_network(self): - self.define_placeholders() - self.define_forward_pass() - if self.training: - with tf.variable_scope('train', reuse=tf.AUTO_REUSE): - self.define_train_op() - - def define_placeholders(self): - self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) - self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) - - if self.training: - self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) - - def define_forward_pass(self): - pred_action = build_mlp(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) - self.action_predictions = pred_action - - def define_train_op(self): - true_actions = self.action_labels_placeholder - predicted_actions = self.action_predictions - - self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) - self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) - - def train(self, observation_batch, action_batch): - assert(self.training, "Policy must be trainable") - action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) - - def get_accel_from_observation(self, observation): - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - if len(observation.shape)<=1: - observation = observation[None] - ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] - - return ret_val - - def get_accel(self, env): - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - observation = env.get_state() - return self.get_accel_from_observation(observation) - - def add_to_replay_buffer(self, rollout_list): - self.replay_buffer.add_rollouts(rollout_list) - - def sample_data(self, batch_size): - return self.replay_buffer.sample_batch(batch_size) diff --git a/flow/dagger/replay_buffer.py b/flow/dagger/replay_buffer.py deleted file mode 100644 index 1213b985e..000000000 --- a/flow/dagger/replay_buffer.py +++ /dev/null @@ -1,61 +0,0 @@ -import time -import numpy as np -import tensorflow as tf -import gym -import os -from utils import * - - -class ReplayBuffer(object): - def __init__(self, max_size=100000): - - self.max_size = max_size - - # store each rollout - self.rollouts = [] - - # store component arrays from each rollout - self.observations = None - self.actions = None - self.expert_actions = None - self.rewards = None - self.next_observations = None - self.terminals = None - - - def add_rollouts(self, rollouts_list): - """ - Add a list of rollouts to the replay buffer - """ - - for rollout in rollouts_list: - self.rollouts.append(rollout) - - observations, actions, expert_actions, rewards, next_observations, terminals = unpack_rollouts(rollouts_list) - assert (not np.any(np.isnan(expert_actions))), "REPLAY BUFFER ERROR" - - if self.observations is None: - self.observations = observations[-self.max_size:] - self.actions = actions[-self.max_size:] - self.expert_actions = expert_actions[-self.max_size:] - self.rewards = rewards[-self.max_size:] - self.next_observations = next_observations[-self.max_size:] - self.terminals = terminals[-self.max_size:] - else: - self.observations = np.concatenate([self.observations, observations])[-self.max_size:] - self.actions = np.concatenate([self.actions, actions])[-self.max_size:] - self.expert_actions = np.concatenate([self.expert_actions, expert_actions])[-self.max_size:] - self.rewards = np.concatenate([self.rewards, rewards])[-self.max_size:] - self.next_observations = np.concatenate([self.next_observations, next_observations])[-self.max_size:] - self.terminals = np.concatenate([self.terminals, terminals])[-self.max_size:] - - def sample_batch(self, batch_size): - """ - Sample a batch of data (with size batch_size) from replay buffer. - Returns data in separate numpy arrays of observations, actions, rewards, next_observations, terminals - """ - assert self.observations is not None and self.actions is not None and self.expert_actions is not None and self.rewards is not None and self.next_observations is not None and self.terminals is not None - - size = len(self.observations) - rand_inds = np.random.randint(0, size, batch_size) - return self.observations[rand_inds], self.actions[rand_inds], self.expert_actions[rand_inds], self.rewards[rand_inds], self.next_observations[rand_inds], self.terminals[rand_inds] diff --git a/flow/dagger/run.py b/flow/dagger/run.py index 67bac9dda..e53030bf7 100644 --- a/flow/dagger/run.py +++ b/flow/dagger/run.py @@ -7,6 +7,7 @@ class Runner(object): + """ Class to run imitation learning (training and evaluation) """ def __init__(self, params): @@ -18,6 +19,12 @@ def run_training_loop(self): self.trainer.run_training_loop(n_iter=self.params['n_iter']) + def evaluate(self): + self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) + + def save_controller_network(self): + self.trainer.save_controller_network() + def main(): import argparse @@ -28,27 +35,44 @@ def main(): parser.add_argument('--n_iter', '-n', type=int, default=5) parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=5000) + parser.add_argument('--init_batch_size', type=int, default=3000) parser.add_argument('--train_batch_size', type=int, default=100) # number of sampled data points to be used per gradient/train step - parser.add_argument('--num_layers', type=int, default=2) # depth, of policy to be learned + parser.add_argument('--num_layers', type=int, default=3) # depth, of policy to be learned parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning parser.add_argument('--replay_buffer_size', type=int, default=1000000) + parser.add_argument('--save_path', type=str, default='') + parser.add_argument('--save_model', type=int, default=0) + parser.add_argument('--num_eval_episodes', type=int, default=10) + parser.add_argument('--inject_noise', type=int, default=0) + parser.add_argument('--noise_variance',type=float, default=0.5) + parser.add_argument('--vehicle_id',type=str, default='rl_0') args = parser.parse_args() # convert args to dictionary params = vars(args) - - assert args.n_iter>1, ('DAGGER needs more than 1 iteration (n_iter>1) of training, to iteratively query the expert and train (after 1st warmstarting from behavior cloning).') + print("INJECT: ", params['inject_noise']) + assert args.n_iter>1, ('DAgger needs >1 iteration') # run training train = Runner(params) train.run_training_loop() + # evaluate + train.evaluate() + print("DONE") + + if params['save_model'] == 1: + train.save_controller_network() + + # tensorboard + writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + if __name__ == "__main__": main() diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py index 3c72b0e63..141021ef3 100644 --- a/flow/dagger/trainer.py +++ b/flow/dagger/trainer.py @@ -6,13 +6,17 @@ import gym import os from flow.utils.registry import make_create_env -from env_params_test import flow_params -from imitating_controller2 import ImitatingController +from bottleneck_env import flow_params +from imitating_controller import ImitatingController from flow.controllers.car_following_models import IDMController +from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import SumoCarFollowingParams from utils import * class Trainer(object): + """ + Class to initialize and run training for imitation learning (with DAgger) + """ def __init__(self, params): self.params = params @@ -22,33 +26,31 @@ def __init__(self, params): self.env = create_env() self.env.reset() - # might need to replace this hardcode - assert 'rl_0' in self.env.k.vehicle.get_ids() - self.vehicle_id = 'rl_0' + assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() + self.vehicle_id = self.params['vehicle_id'] obs_dim = self.env.observation_space.shape[0] - # TODO: make sure this is correct action_dim = (1,)[0] self.params['action_dim'] = action_dim self.params['obs_dim'] = obs_dim car_following_params = SumoCarFollowingParams() - self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params) - self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) + self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params, inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) + + # initialize expert controller + # self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) + self.expert_controller = FollowerStopper(self.vehicle_id, car_following_params = car_following_params) tf.global_variables_initializer().run(session=self.sess) def run_training_loop(self, n_iter): """ - :param n_iter: number of (dagger) iterations - :param collect_policy: - :param eval_policy: - :param initial_expertdata: - :param relabel_with_expert: whether to perform dagger - :param start_relabel_with_expert: iteration at which to start relabel with expert - :param expert_policy: + Trains controller for n_iter iterations + + Args: + param n_iter: number of iterations to execute training """ # init vars at beginning of training @@ -60,6 +62,7 @@ def run_training_loop(self, n_iter): # collect trajectories, to be used for training if itr == 0: + # first iteration is standard behavioral cloning training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) else: training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) @@ -70,19 +73,19 @@ def run_training_loop(self, n_iter): # add collected data to replay buffer self.controller.add_to_replay_buffer(paths) - # train agent (using sampled data from replay buffer) + # train controller (using sampled data from replay buffer) loss = self.train_controller() def collect_training_trajectories(self, itr, batch_size): """ - :param itr: - :param load_initial_expertdata: path to expert data pkl file - :param collect_policy: the current policy using which we collect data - :param batch_size: the number of transitions we collect - :return: - paths: a list trajectories + Collect (state, action, reward, next_state, terminal) tuples for training + + Args: + itr: iteration of training during which functino is called + batch_size: number of tuples to collect + Returns: + paths: list of trajectories envsteps_this_batch: the sum over the numbers of environment steps in paths - train_video_paths: paths which also contain videos for visualization purposes """ if itr == 0: @@ -90,24 +93,88 @@ def collect_training_trajectories(self, itr, batch_size): else: collect_controller = self.controller - print("\nCollecting data to be used for training...") + print("\nCollecting data for training") paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) return paths, envsteps_this_batch def train_controller(self): + """ + Trains controller using data sampled from replay buffer + """ + print('Training controller using sampled data from replay buffer') + for train_step in range(self.params['num_agent_train_steps_per_iter']): - # TODO: fix this ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) self.controller.train(ob_batch, expert_ac_batch) + def evaluate_controller(self, num_trajs = 10): + """ + Evaluates a trained controller on similarity with expert with respect to action taken and total reward per rollout + + Args: + num_trajs: number of trajectories to evaluate performance on + """ + + print("\n\n********** Evaluation ************\n\n") + + trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.controller, self.expert_controller, num_trajs, self.params['ep_len']) + + average_imitator_reward = 0 + total_imitator_steps = 0 + average_imitator_reward_per_rollout = 0 + + action_errors = np.array([]) + average_action_expert = 0 + average_action_imitator = 0 + + for traj in trajectories: + imitator_actions = traj['actions'] + expert_actions = traj['expert_actions'] + + average_action_expert += np.sum(expert_actions) + average_action_imitator += np.sum(imitator_actions) + + action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) + action_errors = np.append(action_errors, action_error) + + average_imitator_reward += np.sum(traj['rewards']) + total_imitator_steps += len(traj['rewards']) + average_imitator_reward_per_rollout += np.sum(traj['rewards']) + + average_imitator_reward = average_imitator_reward / total_imitator_steps + average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) + + average_action_expert = average_action_expert / total_imitator_steps + average_action_imitator = average_action_imitator / total_imitator_steps + + + print('Evaluating controller on reward') + expert_trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.expert_controller, self.expert_controller, num_trajs, self.params['ep_len']) + + average_expert_reward = 0 + total_expert_steps = 0 + average_expert_reward_per_rollout = 0 + + for traj in expert_trajectories: + average_expert_reward += np.sum(traj['rewards']) + total_expert_steps += len(traj['rewards']) + average_expert_reward_per_rollout += np.sum(traj['rewards']) + + average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) + average_expert_reward = average_expert_reward / total_expert_steps + + print("\nAVERAGE REWARD PER STEP EXPERT: ", average_expert_reward) + print("AVERAGE REWARD PER STEP IMITATOR: ", average_imitator_reward) + print("AVERAGE REWARD PER STEP DIFFERENCE: \n", np.abs(average_expert_reward - average_imitator_reward)) + print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) + print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) + print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: \n", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout)) + print("MEAN ACTION, IMITATOR: ", average_action_imitator) + print("MEAN ACTION, EXPERT: ", average_action_expert) + print("MEAN ACTION ERROR: \n", np.mean(action_errors)) - # def do_relabel_with_expert(self, paths): - # print("Relabelling collected observations with labels from an expert policy...") - # - # for i in range(len(paths)): - # acs = self.expert_policy.get_action(paths[i]["observation"]) - # paths[i]["action"] = acs - # - # return paths + def save_controller_network(self): + print("Saving tensorflow model to: ", self.params['save_path']) + self.controller.save_network(self.params['save_path']) diff --git a/flow/dagger/useless.py b/flow/dagger/useless.py deleted file mode 100644 index 86f3ee9ad..000000000 --- a/flow/dagger/useless.py +++ /dev/null @@ -1,147 +0,0 @@ -# """Benchmark for bottleneck0. -# Bottleneck in which the actions are specifying a desired velocity in a segment -# of space. The autonomous penetration rate in this example is 10%. -# - **Action Dimension**: (?, ) -# - **Observation Dimension**: (?, ) -# - **Horizon**: 1000 steps -# """ -# from flow.envs import BottleneckDesiredVelocityEnv -# from flow.networks import BottleneckNetwork -# from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ -# InFlows, SumoCarFollowingParams, SumoLaneChangeParams -# from flow.core.params import TrafficLightParams -# from flow.core.params import VehicleParams -# from flow.controllers import RLController, ContinuousRouter -# -# # time horizon of a single rollout -# HORIZON = 1500 -# -# SCALING = 1 -# NUM_LANES = 4 * SCALING # number of lanes in the widest highway -# DISABLE_TB = True -# DISABLE_RAMP_METER = True -# AV_FRAC = 0.10 -# -# vehicles = VehicleParams() -# vehicles.add( -# veh_id="human", -# routing_controller=(ContinuousRouter, {}), -# car_following_params=SumoCarFollowingParams( -# speed_mode=9, -# ), -# lane_change_params=SumoLaneChangeParams( -# lane_change_mode=0, -# ), -# num_vehicles=1 * SCALING) -# vehicles.add( -# veh_id="rl", -# acceleration_controller=(RLController, {}), -# routing_controller=(ContinuousRouter, {}), -# car_following_params=SumoCarFollowingParams( -# speed_mode=9, -# ), -# lane_change_params=SumoLaneChangeParams( -# lane_change_mode=0, -# ), -# num_vehicles=1 * SCALING) -# -# controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True), -# ("4", 2, True), ("5", 1, False)] -# num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)] -# -# additional_env_params = { -# "target_velocity": 40, -# "disable_tb": True, -# "disable_ramp_metering": True, -# "controlled_segments": controlled_segments, -# "symmetric": False, -# "observed_segments": num_observed_segments, -# "reset_inflow": False, -# "lane_change_duration": 5, -# "max_accel": 3, -# "max_decel": 3, -# "inflow_range": [1200, 2500] -# } -# -# # flow rate -# flow_rate = 2000 * SCALING -# -# # percentage of flow coming out of each lane -# inflow = InFlows() -# inflow.add( -# veh_type="human", -# edge="1", -# vehs_per_hour=flow_rate * (1 - AV_FRAC), -# departLane="random", -# departSpeed=10) -# inflow.add( -# veh_type="rl", -# edge="1", -# vehs_per_hour=flow_rate * AV_FRAC, -# departLane="random", -# departSpeed=10) -# -# traffic_lights = TrafficLightParams() -# if not DISABLE_TB: -# traffic_lights.add(node_id="2") -# if not DISABLE_RAMP_METER: -# traffic_lights.add(node_id="3") -# -# additional_net_params = {"scaling": SCALING, "speed_limit": 23} -# net_params = NetParams( -# inflows=inflow, -# additional_params=additional_net_params) -# -# flow_params = dict( -# # name of the experiment -# exp_tag="bottleneck_0", -# -# # name of the flow environment the experiment is running on -# env_name=BottleneckDesiredVelocityEnv, -# -# # name of the network class the experiment is running on -# network=BottleneckNetwork, -# -# # simulator that is used by the experiment -# simulator='traci', -# -# # sumo-related parameters (see flow.core.params.SumoParams) -# sim=SumoParams( -# sim_step=0.5, -# render=False, -# print_warnings=False, -# restart_instance=True, -# ), -# -# # environment related parameters (see flow.core.params.EnvParams) -# env=EnvParams( -# warmup_steps=40, -# sims_per_step=1, -# horizon=HORIZON, -# additional_params=additional_env_params, -# ), -# -# # network-related parameters (see flow.core.params.NetParams and the -# # network's documentation or ADDITIONAL_NET_PARAMS component) -# net=NetParams( -# inflows=inflow, -# additional_params=additional_net_params, -# ), -# -# # vehicles to be placed in the network at the start of a rollout (see -# # flow.core.params.VehicleParams) -# veh=vehicles, -# -# # parameters specifying the positioning of vehicles upon initialization/ -# # reset (see flow.core.params.InitialConfig) -# initial=InitialConfig( -# spacing="uniform", -# min_gap=5, -# lanes_distribution=float("inf"), -# edges_distribution=["2", "3", "4", "5"], -# ), -# -# # traffic lights to be introduced to specific nodes (see -# # flow.core.params.TrafficLightParams) -# tls=traffic_lights, -# ) diff --git a/flow/dagger/utils.py b/flow/dagger/utils.py deleted file mode 100644 index 177fc620f..000000000 --- a/flow/dagger/utils.py +++ /dev/null @@ -1,119 +0,0 @@ -import tensorflow as tf -import os -import numpy as np -import math - -# class agnostic helper functions - -def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length): - - print("COLLECTING CONTROLLER: ", controller) - print("EXPERT CONTROLLER: ", expert_controller) - observation = env.reset() - - assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" - - observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] - traj_length = 0 - - while True: - action = controller.get_action(env) - - if type(action) == np.ndarray: - action = action.flatten()[0] - - expert_action = expert_controller.get_action(env) - if (expert_action is None or math.isnan(expert_action)): - observation, reward, done, _ = env.step(action) - traj_length += 1 - terminate_rollout = traj_length == max_trajectory_length or done - if terminate_rollout: - break - continue - - observations.append(observation) - actions.append(action) - expert_actions.append(expert_action) - observation, reward, done, _ = env.step(action) - - traj_length += 1 - next_observations.append(observation) - rewards.append(reward) - terminate_rollout = traj_length == max_trajectory_length or done - terminals.append(terminate_rollout) - - if terminate_rollout: - break - - return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) - - -def sample_trajectories(env, vehicle_id, controller, expert_controller, min_batch_timesteps, max_trajectory_length): - total_envsteps = 0 - trajectories = [] - - while total_envsteps < min_batch_timesteps: - trajectory = sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length) - trajectories.append(trajectory) - - traj_env_steps = len(trajectory["rewards"]) - total_envsteps += traj_env_steps - - return trajectories, total_envsteps - -def traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals): - return {"observations" : np.array(observations, dtype=np.float32), - "actions" : np.array(actions, dtype=np.float32), - "expert_actions": np.array(expert_actions, dtype=np.float32), - "rewards" : np.array(rewards, dtype=np.float32), - "next_observations": np.array(next_observations, dtype=np.float32), - "terminals": np.array(terminals, dtype=np.float32)} - -def unpack_rollouts(rollouts_list): - """ - Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays - rollouts: list of rollout dictionaries - rollout dictionary: dictionary with keys "observations", "actions", "rewards", "next_observations", "is_terminals" - return separate np arrays of observations, actions, rewards, next_observations, and is_terminals - """ - observations = np.concatenate([rollout["observations"] for rollout in rollouts_list]) - actions = np.concatenate([rollout["actions"] for rollout in rollouts_list]) - expert_actions = np.concatenate([rollout["expert_actions"] for rollout in rollouts_list]) - rewards = np.concatenate([rollout["rewards"] for rollout in rollouts_list]) - next_observations = np.concatenate([rollout["next_observations"] for rollout in rollouts_list]) - terminals = np.concatenate([rollout["terminals"] for rollout in rollouts_list]) - - return observations, actions, expert_actions, rewards, next_observations, terminals - - -# Below are tensorflow related functions - -def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): - """ - Builds a feedfoward neural net - - arguments: - input_placeholder: placeholder variable for the state (batch_size, input_size) - scope: variable scope of the network - - n_layers: number of hidden layers - size: dimension of each hidden layer - activation: activation of each hidden layer - - output_size: size of the output layer - output_activation: activation of the output layer - - returns: - output_placeholder: the result of a forward pass through the hidden layers + the output layer - """ - output_placeholder = input_placeholder - with tf.variable_scope(scope): - for _ in range(n_layers): - output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) - output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation) - return output_placeholder - -def create_tf_session(): - config = tf.ConfigProto(device_count={'GPU': 0}) - sess = tf.Session(config=config) - return sess From 83a78876df6243582c06db0be6163e6a47b80938 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 12 Apr 2020 18:35:43 -0700 Subject: [PATCH 08/57] Moved file to within controller class --- flow/dagger/imitating_controller.py | 104 ---------------- flow/dagger/run.py | 78 ------------ flow/dagger/trainer.py | 180 ---------------------------- 3 files changed, 362 deletions(-) delete mode 100644 flow/dagger/imitating_controller.py delete mode 100644 flow/dagger/run.py delete mode 100644 flow/dagger/trainer.py diff --git a/flow/dagger/imitating_controller.py b/flow/dagger/imitating_controller.py deleted file mode 100644 index 0adffb6cd..000000000 --- a/flow/dagger/imitating_controller.py +++ /dev/null @@ -1,104 +0,0 @@ -import numpy as np -import tensorflow as tf -from utils import * -import tensorflow_probability as tfp -from flow.controllers.base_controller import BaseController -from replay_buffer import ReplayBuffer - - -class ImitatingController(BaseController): - """ - Controller which learns to imitate another given expert controller. - """ - # Implementation in Tensorflow - - def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): - - BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) - self.sess = sess - self.action_dim = action_dim - self.obs_dim = obs_dim - self.num_layers = num_layers - self.size = size - self.learning_rate = learning_rate - self.training = training - self.inject_noise=inject_noise - self.noise_variance = noise_variance - - with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): - self.build_network() - - - if self.training: - self.replay_buffer = ReplayBuffer(replay_buffer_size) - else: - self.replay_buffer = None - - self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] - self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) - - def build_network(self): - """ - Defines neural network for choosing actions. - """ - self.define_placeholders() - self.define_forward_pass() - if self.training: - with tf.variable_scope('train', reuse=tf.AUTO_REUSE): - self.define_train_op() - - - def define_placeholders(self): - """ - Defines input, output, and training placeholders for neural net - """ - self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) - self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) - - if self.training: - self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) - - def define_forward_pass(self): - pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) - self.action_predictions = pred_action - print("TYPE: ", type(self.obs_placeholder)) - - if self.inject_noise == 1: - self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) - - def define_train_op(self): - true_actions = self.action_labels_placeholder - predicted_actions = self.action_predictions - - self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) - self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) - - def train(self, observation_batch, action_batch): - action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) - - def get_accel_from_observation(self, observation): - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - if len(observation.shape)<=1: - observation = observation[None] - ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] - - return ret_val - - def get_accel(self, env): - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - observation = env.get_state() - return self.get_accel_from_observation(observation) - - def add_to_replay_buffer(self, rollout_list): - """ Add rollouts to replay buffer """ - - self.replay_buffer.add_rollouts(rollout_list) - - def sample_data(self, batch_size): - """ Sample a batch of data from replay buffer """ - - return self.replay_buffer.sample_batch(batch_size) - - def save_network(self, save_path): - self.saver.save(self.sess, save_path) diff --git a/flow/dagger/run.py b/flow/dagger/run.py deleted file mode 100644 index e53030bf7..000000000 --- a/flow/dagger/run.py +++ /dev/null @@ -1,78 +0,0 @@ -import os -import time -import numpy as np -import tensorflow as tf -from trainer import Trainer -from flow.controllers.car_following_models import IDMController - - -class Runner(object): - """ Class to run imitation learning (training and evaluation) """ - - def __init__(self, params): - - # initialize trainer - self.params = params - self.trainer = Trainer(params) - - def run_training_loop(self): - - self.trainer.run_training_loop(n_iter=self.params['n_iter']) - - def evaluate(self): - self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) - - def save_controller_network(self): - self.trainer.save_controller_network() - - -def main(): - import argparse - parser = argparse.ArgumentParser() - parser.add_argument('--ep_len', type=int, default=3000) - - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) - parser.add_argument('--n_iter', '-n', type=int, default=5) - - parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=3000) - - parser.add_argument('--train_batch_size', type=int, - default=100) # number of sampled data points to be used per gradient/train step - - parser.add_argument('--num_layers', type=int, default=3) # depth, of policy to be learned - parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned - parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning - parser.add_argument('--replay_buffer_size', type=int, default=1000000) - parser.add_argument('--save_path', type=str, default='') - parser.add_argument('--save_model', type=int, default=0) - parser.add_argument('--num_eval_episodes', type=int, default=10) - parser.add_argument('--inject_noise', type=int, default=0) - parser.add_argument('--noise_variance',type=float, default=0.5) - parser.add_argument('--vehicle_id',type=str, default='rl_0') - - args = parser.parse_args() - - # convert args to dictionary - params = vars(args) - print("INJECT: ", params['inject_noise']) - assert args.n_iter>1, ('DAgger needs >1 iteration') - - - # run training - train = Runner(params) - train.run_training_loop() - - # evaluate - train.evaluate() - print("DONE") - - if params['save_model'] == 1: - train.save_controller_network() - - # tensorboard - writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) - - -if __name__ == "__main__": - main() diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py deleted file mode 100644 index 141021ef3..000000000 --- a/flow/dagger/trainer.py +++ /dev/null @@ -1,180 +0,0 @@ -import time -from collections import OrderedDict -import pickle -import numpy as np -import tensorflow as tf -import gym -import os -from flow.utils.registry import make_create_env -from bottleneck_env import flow_params -from imitating_controller import ImitatingController -from flow.controllers.car_following_models import IDMController -from flow.controllers.velocity_controllers import FollowerStopper -from flow.core.params import SumoCarFollowingParams -from utils import * - -class Trainer(object): - """ - Class to initialize and run training for imitation learning (with DAgger) - """ - - def __init__(self, params): - self.params = params - self.sess = create_tf_session() - - create_env, _ = make_create_env(flow_params) - self.env = create_env() - self.env.reset() - - assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() - self.vehicle_id = self.params['vehicle_id'] - - obs_dim = self.env.observation_space.shape[0] - - action_dim = (1,)[0] - self.params['action_dim'] = action_dim - self.params['obs_dim'] = obs_dim - - car_following_params = SumoCarFollowingParams() - self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params, inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) - - # initialize expert controller - # self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) - self.expert_controller = FollowerStopper(self.vehicle_id, car_following_params = car_following_params) - - tf.global_variables_initializer().run(session=self.sess) - - - def run_training_loop(self, n_iter): - """ - Trains controller for n_iter iterations - - Args: - param n_iter: number of iterations to execute training - """ - - # init vars at beginning of training - self.total_envsteps = 0 - self.start_time = time.time() - - for itr in range(n_iter): - print("\n\n********** Iteration %i ************"%itr) - - # collect trajectories, to be used for training - if itr == 0: - # first iteration is standard behavioral cloning - training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) - else: - training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) - - paths, envsteps_this_batch = training_returns - self.total_envsteps += envsteps_this_batch - - # add collected data to replay buffer - self.controller.add_to_replay_buffer(paths) - - # train controller (using sampled data from replay buffer) - loss = self.train_controller() - - def collect_training_trajectories(self, itr, batch_size): - """ - Collect (state, action, reward, next_state, terminal) tuples for training - - Args: - itr: iteration of training during which functino is called - batch_size: number of tuples to collect - Returns: - paths: list of trajectories - envsteps_this_batch: the sum over the numbers of environment steps in paths - """ - - if itr == 0: - collect_controller = self.expert_controller - else: - collect_controller = self.controller - - print("\nCollecting data for training") - paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) - - return paths, envsteps_this_batch - - def train_controller(self): - """ - Trains controller using data sampled from replay buffer - """ - - print('Training controller using sampled data from replay buffer') - - for train_step in range(self.params['num_agent_train_steps_per_iter']): - ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) - self.controller.train(ob_batch, expert_ac_batch) - - def evaluate_controller(self, num_trajs = 10): - """ - Evaluates a trained controller on similarity with expert with respect to action taken and total reward per rollout - - Args: - num_trajs: number of trajectories to evaluate performance on - """ - - print("\n\n********** Evaluation ************\n\n") - - trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.controller, self.expert_controller, num_trajs, self.params['ep_len']) - - average_imitator_reward = 0 - total_imitator_steps = 0 - average_imitator_reward_per_rollout = 0 - - action_errors = np.array([]) - average_action_expert = 0 - average_action_imitator = 0 - - for traj in trajectories: - imitator_actions = traj['actions'] - expert_actions = traj['expert_actions'] - - average_action_expert += np.sum(expert_actions) - average_action_imitator += np.sum(imitator_actions) - - action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) - action_errors = np.append(action_errors, action_error) - - average_imitator_reward += np.sum(traj['rewards']) - total_imitator_steps += len(traj['rewards']) - average_imitator_reward_per_rollout += np.sum(traj['rewards']) - - average_imitator_reward = average_imitator_reward / total_imitator_steps - average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) - - average_action_expert = average_action_expert / total_imitator_steps - average_action_imitator = average_action_imitator / total_imitator_steps - - - print('Evaluating controller on reward') - expert_trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.expert_controller, self.expert_controller, num_trajs, self.params['ep_len']) - - average_expert_reward = 0 - total_expert_steps = 0 - average_expert_reward_per_rollout = 0 - - for traj in expert_trajectories: - average_expert_reward += np.sum(traj['rewards']) - total_expert_steps += len(traj['rewards']) - average_expert_reward_per_rollout += np.sum(traj['rewards']) - - average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) - average_expert_reward = average_expert_reward / total_expert_steps - - print("\nAVERAGE REWARD PER STEP EXPERT: ", average_expert_reward) - print("AVERAGE REWARD PER STEP IMITATOR: ", average_imitator_reward) - print("AVERAGE REWARD PER STEP DIFFERENCE: \n", np.abs(average_expert_reward - average_imitator_reward)) - print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: \n", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout)) - print("MEAN ACTION, IMITATOR: ", average_action_imitator) - print("MEAN ACTION, EXPERT: ", average_action_expert) - print("MEAN ACTION ERROR: \n", np.mean(action_errors)) - - def save_controller_network(self): - print("Saving tensorflow model to: ", self.params['save_path']) - self.controller.save_network(self.params['save_path']) From c668336689d0b061244b43ce72fe3c4928bcbc9f Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 12 Apr 2020 18:37:55 -0700 Subject: [PATCH 09/57] Renamed directory, code cleanup, evaluation script --- flow/controllers/{dagger => imitation_learning}/.idea/dagger.iml | 0 flow/controllers/{dagger => imitation_learning}/bottleneck_env.py | 0 .../{dagger => imitation_learning}/imitating_controller.py | 0 flow/controllers/{dagger => imitation_learning}/replay_buffer.py | 0 flow/controllers/{dagger => imitation_learning}/ring_env.py | 0 flow/controllers/{dagger => imitation_learning}/run.py | 0 flow/controllers/{dagger => imitation_learning}/trainer.py | 0 flow/controllers/{dagger => imitation_learning}/utils.py | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename flow/controllers/{dagger => imitation_learning}/.idea/dagger.iml (100%) rename flow/controllers/{dagger => imitation_learning}/bottleneck_env.py (100%) rename flow/controllers/{dagger => imitation_learning}/imitating_controller.py (100%) rename flow/controllers/{dagger => imitation_learning}/replay_buffer.py (100%) rename flow/controllers/{dagger => imitation_learning}/ring_env.py (100%) rename flow/controllers/{dagger => imitation_learning}/run.py (100%) rename flow/controllers/{dagger => imitation_learning}/trainer.py (100%) rename flow/controllers/{dagger => imitation_learning}/utils.py (100%) diff --git a/flow/controllers/dagger/.idea/dagger.iml b/flow/controllers/imitation_learning/.idea/dagger.iml similarity index 100% rename from flow/controllers/dagger/.idea/dagger.iml rename to flow/controllers/imitation_learning/.idea/dagger.iml diff --git a/flow/controllers/dagger/bottleneck_env.py b/flow/controllers/imitation_learning/bottleneck_env.py similarity index 100% rename from flow/controllers/dagger/bottleneck_env.py rename to flow/controllers/imitation_learning/bottleneck_env.py diff --git a/flow/controllers/dagger/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py similarity index 100% rename from flow/controllers/dagger/imitating_controller.py rename to flow/controllers/imitation_learning/imitating_controller.py diff --git a/flow/controllers/dagger/replay_buffer.py b/flow/controllers/imitation_learning/replay_buffer.py similarity index 100% rename from flow/controllers/dagger/replay_buffer.py rename to flow/controllers/imitation_learning/replay_buffer.py diff --git a/flow/controllers/dagger/ring_env.py b/flow/controllers/imitation_learning/ring_env.py similarity index 100% rename from flow/controllers/dagger/ring_env.py rename to flow/controllers/imitation_learning/ring_env.py diff --git a/flow/controllers/dagger/run.py b/flow/controllers/imitation_learning/run.py similarity index 100% rename from flow/controllers/dagger/run.py rename to flow/controllers/imitation_learning/run.py diff --git a/flow/controllers/dagger/trainer.py b/flow/controllers/imitation_learning/trainer.py similarity index 100% rename from flow/controllers/dagger/trainer.py rename to flow/controllers/imitation_learning/trainer.py diff --git a/flow/controllers/dagger/utils.py b/flow/controllers/imitation_learning/utils.py similarity index 100% rename from flow/controllers/dagger/utils.py rename to flow/controllers/imitation_learning/utils.py From f54eebc7c9ff97a54b06951c5c8047c0fbd81eaa Mon Sep 17 00:00:00 2001 From: Akash Velu <31679538+akashvelu@users.noreply.github.com> Date: Mon, 13 Apr 2020 12:23:50 -0700 Subject: [PATCH 10/57] Delete dagger.iml --- flow/controllers/imitation_learning/.idea/dagger.iml | 12 ------------ 1 file changed, 12 deletions(-) delete mode 100644 flow/controllers/imitation_learning/.idea/dagger.iml diff --git a/flow/controllers/imitation_learning/.idea/dagger.iml b/flow/controllers/imitation_learning/.idea/dagger.iml deleted file mode 100644 index 0bc0e0321..000000000 --- a/flow/controllers/imitation_learning/.idea/dagger.iml +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - \ No newline at end of file From eb7b3a2596dbd430f58fe200863fda1eb2c7ba0b Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 14 Apr 2020 10:54:26 -0700 Subject: [PATCH 11/57] initial multiagent imitation learning implementation --- flow/controllers/dagger/run.py | 78 +++++++ flow/controllers/dagger/trainer.py | 179 +++++++++++++++ .../imitation_learning/Untitled.ipynb | 215 ++++++++++++++++++ .../imitation_learning/bottleneck_env.py | 2 +- .../imitation_learning/i210_multiagent.py | 192 ++++++++++++++++ .../imitating_controller.py | 93 +------- .../imitation_learning/imitating_network.py | 102 +++++++++ .../imitation_learning/multiagent_ring_env.py | 99 ++++++++ flow/controllers/imitation_learning/run.py | 8 +- .../controllers/imitation_learning/trainer.py | 62 +++-- flow/controllers/imitation_learning/utils.py | 102 ++++++++- 11 files changed, 1012 insertions(+), 120 deletions(-) create mode 100644 flow/controllers/dagger/run.py create mode 100644 flow/controllers/dagger/trainer.py create mode 100644 flow/controllers/imitation_learning/Untitled.ipynb create mode 100644 flow/controllers/imitation_learning/i210_multiagent.py create mode 100644 flow/controllers/imitation_learning/imitating_network.py create mode 100644 flow/controllers/imitation_learning/multiagent_ring_env.py diff --git a/flow/controllers/dagger/run.py b/flow/controllers/dagger/run.py new file mode 100644 index 000000000..faa7d4ee6 --- /dev/null +++ b/flow/controllers/dagger/run.py @@ -0,0 +1,78 @@ +import os +import time +import numpy as np +import tensorflow as tf +from trainer import Trainer +from flow.controllers.car_following_models import IDMController + + +class Runner(object): + """ Class to run imitation learning (training and evaluation) """ + + def __init__(self, params): + + # initialize trainer + self.params = params + self.trainer = Trainer(params) + + def run_training_loop(self): + + self.trainer.run_training_loop(n_iter=self.params['n_iter']) + + def evaluate(self): + self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) + + def save_controller_network(self): + self.trainer.save_controller_network() + + +def main(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--ep_len', type=int, default=3000) + + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) + parser.add_argument('--n_iter', '-n', type=int, default=5) + + parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=3000) + + parser.add_argument('--train_batch_size', type=int, + default=100) # number of sampled data points to be used per gradient/train step + + parser.add_argument('--num_layers', type=int, default=3) # depth, of policy to be learned + parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned + parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning + parser.add_argument('--replay_buffer_size', type=int, default=1000000) + parser.add_argument('--save_path', type=str, default='') + parser.add_argument('--save_model', type=int, default=0) + parser.add_argument('--num_eval_episodes', type=int, default=10) + parser.add_argument('--inject_noise', type=int, default=0) + parser.add_argument('--noise_variance',type=float, default=0.5) + parser.add_argument('--vehicle_id', type=str, default='rl_0') + + args = parser.parse_args() + + # convert args to dictionary + params = vars(args) + assert args.n_iter>1, ('DAgger needs >1 iteration') + + + # run training + train = Runner(params) + train.run_training_loop() + + # evaluate + train.evaluate() + print("DONE") + + if params['save_model'] == 1: + train.save_controller_network() + + # tensorboard + if params['save_model'] == 1: + writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + +if __name__ == "__main__": + main() diff --git a/flow/controllers/dagger/trainer.py b/flow/controllers/dagger/trainer.py new file mode 100644 index 000000000..03364f528 --- /dev/null +++ b/flow/controllers/dagger/trainer.py @@ -0,0 +1,179 @@ +import time +from collections import OrderedDict +import pickle +import numpy as np +import tensorflow as tf +import gym +import os +from flow.utils.registry import make_create_env +from bottleneck_env import flow_params +from imitating_controller import ImitatingController +from flow.controllers.car_following_models import IDMController +from flow.controllers.velocity_controllers import FollowerStopper +from flow.core.params import SumoCarFollowingParams +from utils import * + +class Trainer(object): + """ + Class to initialize and run training for imitation learning (with DAgger) + """ + + def __init__(self, params): + self.params = params + self.sess = create_tf_session() + + create_env, _ = make_create_env(flow_params) + self.env = create_env() + self.env.reset() + + print(self.env.k.vehicle.get_ids()) + assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() + self.vehicle_id = self.params['vehicle_id'] + + obs_dim = self.env.observation_space.shape[0] + + action_dim = (1,)[0] + self.params['action_dim'] = action_dim + self.params['obs_dim'] = obs_dim + + car_following_params = SumoCarFollowingParams() + self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params, inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) + # self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) + self.expert_controller = FollowerStopper(self.vehicle_id, car_following_params = car_following_params) + + tf.global_variables_initializer().run(session=self.sess) + + + def run_training_loop(self, n_iter): + """ + Trains controller for n_iter iterations + + Args: + param n_iter: number of iterations to execute training + """ + + # init vars at beginning of training + self.total_envsteps = 0 + self.start_time = time.time() + + for itr in range(n_iter): + print("\n\n********** Iteration %i ************"%itr) + + # collect trajectories, to be used for training + if itr == 0: + # first iteration is standard behavioral cloning + training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) + else: + training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) + + paths, envsteps_this_batch = training_returns + self.total_envsteps += envsteps_this_batch + + # add collected data to replay buffer + self.controller.add_to_replay_buffer(paths) + + # train controller (using sampled data from replay buffer) + loss = self.train_controller() + + def collect_training_trajectories(self, itr, batch_size): + """ + Collect (state, action, reward, next_state, terminal) tuples for training + + Args: + itr: iteration of training during which functino is called + batch_size: number of tuples to collect + Returns: + paths: list of trajectories + envsteps_this_batch: the sum over the numbers of environment steps in paths + """ + + if itr == 0: + collect_controller = self.expert_controller + else: + collect_controller = self.controller + + print("\nCollecting data to be used for training...") + paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) + + return paths, envsteps_this_batch + + def train_controller(self): + """ + Trains controller using data sampled from replay buffer + """ + + print('Training controller using sampled data from replay buffer') + for train_step in range(self.params['num_agent_train_steps_per_iter']): + ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) + self.controller.train(ob_batch, expert_ac_batch) + + def evaluate_controller(self, num_trajs = 10): + """ + Evaluates a trained controller on similarity with expert with respect to action taken and total reward per rollout + + Args: + num_trajs: number of trajectories to evaluate performance on + """ + + print("\n\n********** Evaluation ************ \n") + + trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.controller, self.expert_controller, num_trajs, self.params['ep_len']) + + average_imitator_reward = 0 + total_imitator_steps = 0 + average_imitator_reward_per_rollout = 0 + + action_errors = np.array([]) + average_action_expert = 0 + average_action_imitator = 0 + + # compare actions taken in each step of trajectories + for traj in trajectories: + imitator_actions = traj['actions'] + expert_actions = traj['expert_actions'] + + average_action_expert += np.sum(expert_actions) + average_action_imitator += np.sum(imitator_actions) + + action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) + action_errors = np.append(action_errors, action_error) + + average_imitator_reward += np.sum(traj['rewards']) + total_imitator_steps += len(traj['rewards']) + average_imitator_reward_per_rollout += np.sum(traj['rewards']) + + average_imitator_reward = average_imitator_reward / total_imitator_steps + average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) + + average_action_expert = average_action_expert / total_imitator_steps + average_action_imitator = average_action_imitator / total_imitator_steps + + + expert_trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.expert_controller, self.expert_controller, num_trajs, self.params['ep_len']) + + average_expert_reward = 0 + total_expert_steps = 0 + average_expert_reward_per_rollout = 0 + + # compare reward accumulated in trajectories collected via expert vs. via imitator + for traj in expert_trajectories: + average_expert_reward += np.sum(traj['rewards']) + total_expert_steps += len(traj['rewards']) + average_expert_reward_per_rollout += np.sum(traj['rewards']) + + average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) + average_expert_reward = average_expert_reward / total_expert_steps + + print("\nAVERAGE REWARD PER STEP EXPERT: ", average_expert_reward) + print("AVERAGE REWARD PER STEP IMITATOR: ", average_imitator_reward) + print("AVERAGE REWARD PER STEP DIFFERENCE: ", np.abs(average_expert_reward - average_imitator_reward), "\n") + + print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) + print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) + print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: ", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") + + print("MEAN ACTION ERROR: ", np.mean(action_errors), "\n") + + def save_controller_network(self): + print("Saving tensorflow model to: ", self.params['save_path']) + self.controller.save_network(self.params['save_path']) diff --git a/flow/controllers/imitation_learning/Untitled.ipynb b/flow/controllers/imitation_learning/Untitled.ipynb new file mode 100644 index 000000000..875fe73b6 --- /dev/null +++ b/flow/controllers/imitation_learning/Untitled.ipynb @@ -0,0 +1,215 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "ename": "ImportError", + "evalue": "cannot import name 'energy_consumption'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mregistry\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmake_create_env\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mi210_multiagent\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mflow_params\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mflow_params_multi\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mring_env\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mflow_params\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mflow_params_single\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontrollers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcar_following_models\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mIDMController\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/i210_multiagent.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparams\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSumoParams\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparams\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSumoLaneChangeParams\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 20\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrewards\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0menergy_consumption\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 21\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnetworks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mi210_subnetwork\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mI210SubNetwork\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mEDGES_DISTRIBUTION\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menvs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmultiagent\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mi210\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mI210MultiEnv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mADDITIONAL_ENV_PARAMS\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mImportError\u001b[0m: cannot import name 'energy_consumption'" + ] + } + ], + "source": [ + "import time\n", + "import pickle\n", + "import numpy as np\n", + "import gym\n", + "import os\n", + "from flow.utils.registry import make_create_env\n", + "from i210_multiagent import flow_params as flow_params_multi\n", + "from ring_env import flow_params as flow_params_single\n", + "from flow.controllers.car_following_models import IDMController\n", + "from flow.controllers.velocity_controllers import FollowerStopper\n", + "from flow.core.params import SumoCarFollowingParams\n", + "from utils import *" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "create_env, _ = make_create_env(flow_params_multi)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "-----------------------\n", + "ring length: 264\n", + "v_max: 5.329679917416892\n", + "-----------------------\n" + ] + }, + { + "data": { + "text/plain": [ + "{'rl_0_0': array([0.30672195, 0.00223007, 0.02625558]),\n", + " 'rl_1_0': array([ 0.34392208, -0.00785657, 0.02819709])}" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.reset()" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['human_0',\n", + " 'human_1',\n", + " 'human_2',\n", + " 'human_3',\n", + " 'human_4',\n", + " 'human_5',\n", + " 'human_6',\n", + " 'human_7',\n", + " 'human_8',\n", + " 'human_9',\n", + " 'human_10',\n", + " 'human_11',\n", + " 'human_12',\n", + " 'human_13',\n", + " 'human_14',\n", + " 'human_15',\n", + " 'human_16',\n", + " 'human_17',\n", + " 'human_18',\n", + " 'human_19',\n", + " 'human_20',\n", + " 'rl_0']" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.k.vehicle.get_ids()" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "rl_actions = {'rl_0': env.action_space.sample()}\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.step(None)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'rl_0_0': array([0.25527085, 0.00670868, 0.02368258]),\n", + " 'rl_1_0': array([ 0.24537913, -0.00482127, 0.02289928])}" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.get_state()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['rl_0_0', 'rl_1_0']" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list(env.get_state().keys())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flow", + "language": "python", + "name": "flow" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/flow/controllers/imitation_learning/bottleneck_env.py b/flow/controllers/imitation_learning/bottleneck_env.py index 820244a87..c0fabedda 100644 --- a/flow/controllers/imitation_learning/bottleneck_env.py +++ b/flow/controllers/imitation_learning/bottleneck_env.py @@ -111,7 +111,7 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( sim_step=0.5, - render=False, + render=True, print_warnings=False, restart_instance=True, ), diff --git a/flow/controllers/imitation_learning/i210_multiagent.py b/flow/controllers/imitation_learning/i210_multiagent.py new file mode 100644 index 000000000..6efbf1e3c --- /dev/null +++ b/flow/controllers/imitation_learning/i210_multiagent.py @@ -0,0 +1,192 @@ +"""Multi-agent I-210 example. +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +import os +import numpy as np + +from ray.tune.registry import register_env + +from flow.controllers import RLController +from flow.controllers.car_following_models import IDMController +import flow.config as config +from flow.core.params import EnvParams +from flow.core.params import NetParams +from flow.core.params import InitialConfig +from flow.core.params import InFlows +from flow.core.params import VehicleParams +from flow.core.params import SumoParams +from flow.core.params import SumoLaneChangeParams +from flow.core.rewards import energy_consumption +from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION +from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS +from flow.utils.registry import make_create_env + +# SET UP PARAMETERS FOR THE SIMULATION + +# number of steps per rollout +HORIZON = 4000 + +VEH_PER_HOUR_BASE_119257914 = 10800 +VEH_PER_HOUR_BASE_27414345 = 321 +VEH_PER_HOUR_BASE_27414342 = 421 + +# percentage of autonomous vehicles compared to human vehicles on highway +PENETRATION_RATE = 10 + +# SET UP PARAMETERS FOR THE ENVIRONMENT +additional_env_params = ADDITIONAL_ENV_PARAMS.copy() +additional_env_params.update({ + 'max_accel': 2.6, + 'max_decel': 4.5, + # configure the observation space. Look at the I210MultiEnv class for more info. + 'lead_obs': True, + # whether to add in a reward for the speed of nearby vehicles + "local_reward": True +}) + +# CREATE VEHICLE TYPES AND INFLOWS +# no vehicles in the network +vehicles = VehicleParams() +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic"), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), +) +vehicles.add( + "av", + acceleration_controller=(RLController, {}), + num_vehicles=0, +) + +inflow = InFlows() +# main highway +pen_rate = PENETRATION_RATE / 100 +assert pen_rate < 1.0, "your penetration rate is over 100%" +assert pen_rate > 0.0, "your penetration rate should be above zero" +inflow.add( + veh_type="human", + edge="119257914", + vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * (1 - pen_rate)), + # probability=1.0, + depart_lane="random", + departSpeed=20) +# # on ramp +# inflow.add( +# veh_type="human", +# edge="27414345", +# vehs_per_hour=321 * pen_rate, +# depart_lane="random", +# depart_speed=20) +# inflow.add( +# veh_type="human", +# edge="27414342#0", +# vehs_per_hour=421 * pen_rate, +# depart_lane="random", +# depart_speed=20) + +# Now add the AVs +# main highway +inflow.add( + veh_type="av", + edge="119257914", + vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * pen_rate), + # probability=1.0, + depart_lane="random", + depart_speed=20) +# # on ramp +# inflow.add( +# veh_type="av", +# edge="27414345", +# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414345 * pen_rate), +# depart_lane="random", +# depart_speed=20) +# inflow.add( +# veh_type="av", +# edge="27414342#0", +# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414342 * pen_rate), +# depart_lane="random", +# depart_speed=20) + +NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml") + +flow_params = dict( + # name of the experiment + exp_tag='I_210_subnetwork', + + # name of the flow environment the experiment is running on + env_name=I210MultiEnv, + + # name of the network class the experiment is running on + network=I210SubNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # simulation-related parameters + sim=SumoParams( + sim_step=0.5, + render=False, + color_by_speed=False, + restart_instance=True, + use_ballistic=True + ), + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + sims_per_step=1, + warmup_steps=0, + additional_params=additional_env_params, + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflow, + template=NET_TEMPLATE + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig( + edges_distribution=EDGES_DISTRIBUTION, + ), +) + +# SET UP RLLIB MULTI-AGENT FEATURES + +create_env, env_name = make_create_env(params=flow_params, version=0) + +# register as rllib env +register_env(env_name, create_env) + +# multiagent configuration +test_env = create_env() +obs_space = test_env.observation_space +act_space = test_env.action_space + +POLICY_GRAPHS = {'av': (None, obs_space, act_space, {})} + +POLICIES_TO_TRAIN = ['av'] + + +def policy_mapping_fn(_): + """Map a policy in RLlib.""" + return 'av' + + +custom_callables = { + "avg_speed": lambda env: np.mean([speed for speed in + env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]), + "avg_outflow": lambda env: np.nan_to_num( + env.k.vehicle.get_outflow_rate(120)), + "avg_energy": lambda env: -1*energy_consumption(env, 0.1) +} diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 0adffb6cd..9c7cb0b71 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -12,93 +12,16 @@ class ImitatingController(BaseController): """ # Implementation in Tensorflow - def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): + def __init__(self, veh_id, action_network, multiagent, car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) - self.sess = sess - self.action_dim = action_dim - self.obs_dim = obs_dim - self.num_layers = num_layers - self.size = size - self.learning_rate = learning_rate - self.training = training - self.inject_noise=inject_noise - self.noise_variance = noise_variance - - with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): - self.build_network() - - - if self.training: - self.replay_buffer = ReplayBuffer(replay_buffer_size) - else: - self.replay_buffer = None - - self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] - self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) - - def build_network(self): - """ - Defines neural network for choosing actions. - """ - self.define_placeholders() - self.define_forward_pass() - if self.training: - with tf.variable_scope('train', reuse=tf.AUTO_REUSE): - self.define_train_op() - - - def define_placeholders(self): - """ - Defines input, output, and training placeholders for neural net - """ - self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) - self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) - - if self.training: - self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) - - def define_forward_pass(self): - pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) - self.action_predictions = pred_action - print("TYPE: ", type(self.obs_placeholder)) - - if self.inject_noise == 1: - self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) - - def define_train_op(self): - true_actions = self.action_labels_placeholder - predicted_actions = self.action_predictions - - self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) - self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) - - def train(self, observation_batch, action_batch): - action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) - - def get_accel_from_observation(self, observation): - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - if len(observation.shape)<=1: - observation = observation[None] - ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] - - return ret_val + self.action_network = action_network + self.multiagent = multiagent def get_accel(self, env): - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - observation = env.get_state() - return self.get_accel_from_observation(observation) - - def add_to_replay_buffer(self, rollout_list): - """ Add rollouts to replay buffer """ - - self.replay_buffer.add_rollouts(rollout_list) - - def sample_data(self, batch_size): - """ Sample a batch of data from replay buffer """ - - return self.replay_buffer.sample_batch(batch_size) + if self.multiagent: + observation = env.get_state()[self.veh_id] + else: + observation = env.get_state() - def save_network(self, save_path): - self.saver.save(self.sess, save_path) + return self.action_network.get_accel_from_observation(observation) diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py new file mode 100644 index 000000000..383b10beb --- /dev/null +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -0,0 +1,102 @@ +import numpy as np +import tensorflow as tf +from utils import * +import tensorflow_probability as tfp +from flow.controllers.base_controller import BaseController +from replay_buffer import ReplayBuffer + + +class ImitatingNetwork(): + """ + Neural network which learns to imitate another given expert controller. + """ + # Implementation in Tensorflow + + def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars'): + + self.sess = sess + self.action_dim = action_dim + self.obs_dim = obs_dim + self.num_layers = num_layers + self.size = size + self.learning_rate = learning_rate + self.training = training + self.inject_noise=inject_noise + self.noise_variance = noise_variance + + with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): + self.build_network() + + if self.training: + self.replay_buffer = ReplayBuffer(replay_buffer_size) + else: + self.replay_buffer = None + + self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] + self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) + + def build_network(self): + """ + Defines neural network for choosing actions. + """ + self.define_placeholders() + self.define_forward_pass() + if self.training: + with tf.variable_scope('train', reuse=tf.AUTO_REUSE): + self.define_train_op() + + + def define_placeholders(self): + """ + Defines input, output, and training placeholders for neural net + """ + self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) + self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) + + if self.training: + self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) + + def define_forward_pass(self): + pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) + self.action_predictions = pred_action + print("TYPE: ", type(self.obs_placeholder)) + + if self.inject_noise == 1: + self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) + + def define_train_op(self): + true_actions = self.action_labels_placeholder + predicted_actions = self.action_predictions + + self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) + self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) + + def train(self, observation_batch, action_batch): + action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) + ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) + + def get_accel_from_observation(self, observation): + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + if len(observation.shape)<=1: + observation = observation[None] + ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] + + return ret_val + + def get_accel(self, env): + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + observation = env.get_state() + return self.get_accel_from_observation(observation) + + def add_to_replay_buffer(self, rollout_list): + """ Add rollouts to replay buffer """ + + self.replay_buffer.add_rollouts(rollout_list) + + def sample_data(self, batch_size): + """ Sample a batch of data from replay buffer """ + + return self.replay_buffer.sample_batch(batch_size) + + def save_network(self, save_path): + self.saver.save(self.sess, save_path) diff --git a/flow/controllers/imitation_learning/multiagent_ring_env.py b/flow/controllers/imitation_learning/multiagent_ring_env.py new file mode 100644 index 000000000..538679ed0 --- /dev/null +++ b/flow/controllers/imitation_learning/multiagent_ring_env.py @@ -0,0 +1,99 @@ +"""Ring road example. +Trains a number of autonomous vehicles to stabilize the flow of 22 vehicles in +a variable length ring road. +""" +from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams +from flow.core.params import VehicleParams, SumoCarFollowingParams +from flow.controllers import RLController, IDMController, ContinuousRouter +from flow.envs.multiagent import MultiAgentWaveAttenuationPOEnv +from flow.networks import RingNetwork +from flow.utils.registry import make_create_env + +# time horizon of a single rollout +HORIZON = 3000 +# number of rollouts per training iteration +N_ROLLOUTS = 20 +# number of parallel workers +N_CPUS = 2 +# number of automated vehicles. Must be less than or equal to 22. +NUM_AUTOMATED = 2 + + +# We evenly distribute the automated vehicles in the network. +num_human = 22 - NUM_AUTOMATED +humans_remaining = num_human + +vehicles = VehicleParams() +for i in range(NUM_AUTOMATED): + # Add one automated vehicle. + vehicles.add( + veh_id="rl_{}".format(i), + acceleration_controller=(RLController, {}), + routing_controller=(ContinuousRouter, {}), + num_vehicles=1) + + # Add a fraction of the remaining human vehicles. + vehicles_to_add = round(humans_remaining / (NUM_AUTOMATED - i)) + humans_remaining -= vehicles_to_add + vehicles.add( + veh_id="human_{}".format(i), + acceleration_controller=(IDMController, { + "noise": 0.2 + }), + car_following_params=SumoCarFollowingParams( + min_gap=0 + ), + routing_controller=(ContinuousRouter, {}), + num_vehicles=vehicles_to_add) + + +flow_params = dict( + # name of the experiment + exp_tag="multiagent_ring", + + # name of the flow environment the experiment is running on + env_name=MultiAgentWaveAttenuationPOEnv, + + # name of the network class the experiment is running on + network=RingNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.1, + render=True, + restart_instance=False + ), + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=750, + clip_actions=False, + additional_params={ + "max_accel": 1, + "max_decel": 1, + "ring_length": [220, 270], + }, + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + additional_params={ + "length": 260, + "lanes": 1, + "speed_limit": 30, + "resolution": 40, + }, ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index c647f37cd..b88f7fbd7 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -1,7 +1,7 @@ import os import time import numpy as np -import tensorflow as tf +#import tensorflow as tf from trainer import Trainer from flow.controllers.car_following_models import IDMController @@ -34,8 +34,8 @@ def main(): parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) parser.add_argument('--n_iter', '-n', type=int, default=5) - parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=3000) + parser.add_argument('--batch_size', type=int, default=10000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=30000) parser.add_argument('--train_batch_size', type=int, default=100) # number of sampled data points to be used per gradient/train step @@ -50,12 +50,12 @@ def main(): parser.add_argument('--inject_noise', type=int, default=0) parser.add_argument('--noise_variance',type=float, default=0.5) parser.add_argument('--vehicle_id', type=str, default='rl_0') + parser.add_argument('--multiagent', type=bool, default=False) args = parser.parse_args() # convert args to dictionary params = vars(args) - print("INJECT: ", params['inject_noise']) assert args.n_iter>1, ('DAgger needs >1 iteration') diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 03364f528..d9c1b3164 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -2,15 +2,16 @@ from collections import OrderedDict import pickle import numpy as np -import tensorflow as tf import gym import os from flow.utils.registry import make_create_env -from bottleneck_env import flow_params +from multiagent_ring_env import flow_params from imitating_controller import ImitatingController +from imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import SumoCarFollowingParams +import tensorflow as tf from utils import * class Trainer(object): @@ -19,34 +20,51 @@ class Trainer(object): """ def __init__(self, params): + + # param setup self.params = params self.sess = create_tf_session() + # environment setup create_env, _ = make_create_env(flow_params) self.env = create_env() - self.env.reset() + init_state = self.env.reset() - print(self.env.k.vehicle.get_ids()) - assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() - self.vehicle_id = self.params['vehicle_id'] + # vehicle setup + self.multiagent = params['multiagent'] - obs_dim = self.env.observation_space.shape[0] + # TODO: remove print + print("MULTI: ", self.multiagent) + + if self.multiagent: + self.vehicle_ids = list(init_state.keys()) + else: + print("IDS: ", self.env.k.vehicle.get_ids()) + assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() + self.vehicle_ids = [self.params['vehicle_id']] + # neural net setup + obs_dim = self.env.observation_space.shape[0] action_dim = (1,)[0] self.params['action_dim'] = action_dim self.params['obs_dim'] = obs_dim - car_following_params = SumoCarFollowingParams() - self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params, inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) - # self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) - self.expert_controller = FollowerStopper(self.vehicle_id, car_following_params = car_following_params) + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) tf.global_variables_initializer().run(session=self.sess) + # controllers setup + car_following_params = SumoCarFollowingParams() + self.expert_controllers = [] + self.controllers = [] + for vehicle_id in self.vehicle_ids: + self.expert_controllers.append(FollowerStopper(vehicle_id, car_following_params=car_following_params)) + self.controllers.append(ImitatingController(vehicle_id, self.action_network, self.multiagent, car_following_params=car_following_params)) + def run_training_loop(self, n_iter): """ - Trains controller for n_iter iterations + Trains imitator for n_iter iterations Args: param n_iter: number of iterations to execute training @@ -70,7 +88,7 @@ def run_training_loop(self, n_iter): self.total_envsteps += envsteps_this_batch # add collected data to replay buffer - self.controller.add_to_replay_buffer(paths) + self.action_network.add_to_replay_buffer(paths) # train controller (using sampled data from replay buffer) loss = self.train_controller() @@ -88,14 +106,14 @@ def collect_training_trajectories(self, itr, batch_size): """ if itr == 0: - collect_controller = self.expert_controller + collect_controllers = self.expert_controllers else: - collect_controller = self.controller + collect_controllers = self.controllers print("\nCollecting data to be used for training...") - paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) + trajectories, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_ids, collect_controllers, self.expert_controllers, batch_size, self.params['ep_len'], self.multiagent) - return paths, envsteps_this_batch + return trajectories, envsteps_this_batch def train_controller(self): """ @@ -104,8 +122,8 @@ def train_controller(self): print('Training controller using sampled data from replay buffer') for train_step in range(self.params['num_agent_train_steps_per_iter']): - ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) - self.controller.train(ob_batch, expert_ac_batch) + ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.action_network.sample_data(self.params['train_batch_size']) + self.action_network.train(ob_batch, expert_ac_batch) def evaluate_controller(self, num_trajs = 10): """ @@ -117,7 +135,7 @@ def evaluate_controller(self, num_trajs = 10): print("\n\n********** Evaluation ************ \n") - trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.controller, self.expert_controller, num_trajs, self.params['ep_len']) + trajectories = sample_n_trajectories(self.env, self.vehicle_ids, self.controllers, self.expert_controllers, num_trajs, self.params['ep_len'], self.multiagent) average_imitator_reward = 0 total_imitator_steps = 0 @@ -149,7 +167,7 @@ def evaluate_controller(self, num_trajs = 10): average_action_imitator = average_action_imitator / total_imitator_steps - expert_trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.expert_controller, self.expert_controller, num_trajs, self.params['ep_len']) + expert_trajectories = sample_n_trajectories(self.env, self.vehicle_ids, self.expert_controllers, self.expert_controllers, num_trajs, self.params['ep_len'], self.multiagent) average_expert_reward = 0 total_expert_steps = 0 @@ -176,4 +194,4 @@ def evaluate_controller(self, num_trajs = 10): def save_controller_network(self): print("Saving tensorflow model to: ", self.params['save_path']) - self.controller.save_network(self.params['save_path']) + self.action_network.save_network(self.params['save_path']) diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index a5bf7acfa..6e694ea01 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -5,17 +5,15 @@ """ Class agnostic helper functions """ -def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length): +def sample_trajectory_singleagent(env, vehicle_id, controller, expert_controller, max_trajectory_length): """ Samples a trajectory for a given vehicle using the actions prescribed by specified controller. - Args: env: environment vehicle_id: id of the vehicle that is being controlled/tracked during trajectory controller: subclass of BaseController, decides actions taken by vehicle expert_controller: subclass of BaseController, "expert" for imitation learning max_trajectory_length: maximum steps in a trajectory - Returns: Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ @@ -61,7 +59,85 @@ def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajec return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) -def sample_trajectories(env, vehicle_id, controller, expert_controller, min_batch_timesteps, max_trajectory_length): +def sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controllers, max_trajectory_length): + """ + Samples a trajectory for a given set of vehicles using the actions prescribed by specified controller. + + Args: + env: environment + vehicle_ids: id of the vehicle that is being controlled/tracked during trajectory + controllers: subclass of BaseController, decides actions taken by vehicle + expert_controllers: subclass of BaseController, "expert" for imitation learning + max_trajectory_length: maximum steps in a trajectory + + Returns: + Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples + """ + + print("COLLECTING CONTROLLER: ", controllers[0]) + print("EXPERT CONTROLLER: ", expert_controllers[0]) + observation_dict = env.reset() + + for vehicle_id in vehicle_ids: + assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" + + observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] + traj_length = 0 + + while True: + rl_actions = dict() + invalid_expert_action = False + expert_action_dict = dict() + + for i in range(len(vehicle_ids)): + vehicle_id = vehicle_ids[i] + controller = controllers[i] + expert_controller = expert_controllers[i] + + action = controller.get_action(env) + + if type(action) == np.ndarray: + action = action.flatten()[0] + + expert_action = expert_controller.get_action(env) + expert_action_dict[vehicle_id] = expert_action + + if (expert_action is None or math.isnan(expert_action)): + invalid_expert_action = True + + rl_actions[vehicle_id] = action + + if invalid_expert_action: + # invalid action in rl_actions, so default control to SUMO + observations_dict, reward_dict, done_dict, _ = env.step(None) + traj_length += 1 + terminate_rollout = traj_length == max_trajectory_length or done_dict['__all__'] + if terminate_rollout: + break + continue + + for vehicle_id in vehicle_ids: + observations.append(observation_dict[vehicle_id]) + actions.append(rl_actions[vehicle_id]) + expert_actions.append(expert_action_dict[vehicle_id]) + + observation_dict, reward_dict, done_dict, _ = env.step(rl_actions) + terminate_rollout = done_dict['__all__'] or (traj_length == max_trajectory_length) + + for vehicle_id in vehicle_ids: + next_observations.append(observation_dict[vehicle_id]) + rewards.append(reward_dict[vehicle_id]) + terminals.append(terminate_rollout) + + traj_length += 1 + + if terminate_rollout: + break + + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) + + +def sample_trajectories(env, vehicle_ids, controllers, expert_controllers, min_batch_timesteps, max_trajectory_length, multiagent): """ Samples trajectories to collect at least min_batch_timesteps steps in the environment @@ -80,15 +156,20 @@ def sample_trajectories(env, vehicle_id, controller, expert_controller, min_batc trajectories = [] while total_envsteps < min_batch_timesteps: - trajectory = sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length) + + if multiagent: + trajectory = sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controllers, max_trajectory_length) + else: + trajectory = sample_trajectory_singleagent(env, vehicle_ids[0], controllers[0], expert_controllers[0], max_trajectory_length) + trajectories.append(trajectory) - traj_env_steps = len(trajectory["rewards"]) + traj_env_steps = len(trajectory["rewards"]) / len(vehicle_ids) total_envsteps += traj_env_steps return trajectories, total_envsteps -def sample_n_trajectories(env, vehicle_id, controller, expert_controller, n, max_trajectory_length): +def sample_n_trajectories(env, vehicle_ids, controllers, expert_controllers, n, max_trajectory_length, multiagent): """ Collects a fixed number of trajectories. @@ -106,7 +187,12 @@ def sample_n_trajectories(env, vehicle_id, controller, expert_controller, n, max """ trajectories = [] for _ in range(n): - trajectory = sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length) + + if multiagent: + trajectory = sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controllers, max_trajectory_length) + else: + trajectory = sample_trajectory_singleagent(env, vehicle_ids[0], controllers[0], expert_controllers[0], max_trajectory_length) + trajectories.append(trajectory) return trajectories From cb4cae82166743642cca71d7ea6ed5eb2e00f1c8 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 23 Apr 2020 10:40:00 -0700 Subject: [PATCH 12/57] Added multiagent capabilities for imitation learning --- .../imitating_controller.py | 1 - .../imitation_learning/imitating_network.py | 46 ++++- .../imitation_learning/replay_buffer.py | 24 ++- .../imitation_learning/replay_script.py | 80 +++++++++ flow/controllers/imitation_learning/run.py | 20 +-- .../controllers/imitation_learning/trainer.py | 46 +++-- flow/controllers/imitation_learning/utils.py | 160 +++++++++--------- .../imitation_learning/utils_tensorflow.py | 35 ++++ 8 files changed, 285 insertions(+), 127 deletions(-) create mode 100644 flow/controllers/imitation_learning/replay_script.py create mode 100644 flow/controllers/imitation_learning/utils_tensorflow.py diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 9c7cb0b71..a3f6864ae 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -1,6 +1,5 @@ import numpy as np import tensorflow as tf -from utils import * import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController from replay_buffer import ReplayBuffer diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 383b10beb..8c7d35b27 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -1,6 +1,6 @@ import numpy as np import tensorflow as tf -from utils import * +from utils_tensorflow import * import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController from replay_buffer import ReplayBuffer @@ -12,7 +12,7 @@ class ImitatingNetwork(): """ # Implementation in Tensorflow - def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars'): + def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars', load_existing=False, load_path=''): self.sess = sess self.action_dim = action_dim @@ -24,16 +24,21 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r self.inject_noise=inject_noise self.noise_variance = noise_variance - with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): - self.build_network() + if load_existing: + self.load_network(load_path) + + else: + with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): + self.build_network() if self.training: self.replay_buffer = ReplayBuffer(replay_buffer_size) else: self.replay_buffer = None - self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] - self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) + if not load_existing: + self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] + self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) def build_network(self): """ @@ -46,25 +51,45 @@ def build_network(self): self.define_train_op() + def load_network(self, path): + """ + Load tensorflow model from the path specified, set action prediction to proper placeholder + """ + loader = tf.train.import_meta_graph(path + 'model.ckpt.meta') + loader.restore(self.sess, path+'model.ckpt') + + self.obs_placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/obs:0') + self.action_predictions = tf.get_default_graph().get_tensor_by_name('policy_vars/network_scope/Output_Layer/BiasAdd:0') + + if self.inject_noise == 1: + self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) + + def define_placeholders(self): """ Defines input, output, and training placeholders for neural net """ - self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) + self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="observation", dtype=tf.float32) self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) if self.training: self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) + def define_forward_pass(self): + """ + Build network and initialize proper action prediction op + """ pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) self.action_predictions = pred_action - print("TYPE: ", type(self.obs_placeholder)) if self.inject_noise == 1: self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) def define_train_op(self): + """ + Defines training operations for network + """ true_actions = self.action_labels_placeholder predicted_actions = self.action_predictions @@ -72,6 +97,9 @@ def define_train_op(self): self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) def train(self, observation_batch, action_batch): + """ + Executes one training step for the given batch of observation and action data + """ action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) @@ -100,3 +128,5 @@ def sample_data(self, batch_size): def save_network(self, save_path): self.saver.save(self.sess, save_path) + # tensorboard + writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) diff --git a/flow/controllers/imitation_learning/replay_buffer.py b/flow/controllers/imitation_learning/replay_buffer.py index 4e362bd41..77902814c 100644 --- a/flow/controllers/imitation_learning/replay_buffer.py +++ b/flow/controllers/imitation_learning/replay_buffer.py @@ -1,9 +1,6 @@ import time import numpy as np -import tensorflow as tf -import gym import os -from utils import * class ReplayBuffer(object): @@ -33,7 +30,7 @@ def add_rollouts(self, rollouts_list): for rollout in rollouts_list: self.rollouts.append(rollout) - observations, actions, expert_actions, rewards, next_observations, terminals = unpack_rollouts(rollouts_list) + observations, actions, expert_actions, rewards, next_observations, terminals = self.unpack_rollouts(rollouts_list) assert (not np.any(np.isnan(expert_actions))), "Invalid actions added to replay buffer" @@ -61,4 +58,21 @@ def sample_batch(self, batch_size): size = len(self.observations) rand_inds = np.random.randint(0, size, batch_size) - return self.observations[rand_inds], self.actions[rand_inds], self.expert_actions[rand_inds], self.rewards[rand_inds], self.next_observations[rand_inds], self.terminals[rand_inds] + return self.observations[rand_inds], self.actions[rand_inds], self.expert_actions[rand_inds] + + + + def unpack_rollouts(self, rollouts_list): + """ + Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays + rollouts: list of rollout dictionaries, rollout dictionary: dictionary with keys "observations", "actions", "rewards", "next_observations", "is_terminals" + return separate np arrays of observations, actions, rewards, next_observations, and is_terminals + """ + observations = np.concatenate([rollout["observations"] for rollout in rollouts_list]) + actions = np.concatenate([rollout["actions"] for rollout in rollouts_list]) + expert_actions = np.concatenate([rollout["expert_actions"] for rollout in rollouts_list]) + rewards = np.concatenate([rollout["rewards"] for rollout in rollouts_list]) + next_observations = np.concatenate([rollout["next_observations"] for rollout in rollouts_list]) + terminals = np.concatenate([rollout["terminals"] for rollout in rollouts_list]) + + return observations, actions, expert_actions, rewards, next_observations, terminals diff --git a/flow/controllers/imitation_learning/replay_script.py b/flow/controllers/imitation_learning/replay_script.py new file mode 100644 index 000000000..5e3984e0d --- /dev/null +++ b/flow/controllers/imitation_learning/replay_script.py @@ -0,0 +1,80 @@ +import time +import numpy as np +import gym +import os +from flow.utils.registry import make_create_env +from i210_multiagent import flow_params as flow_params +from utils import * +from imitating_network import * +from utils_tensorflow import * +from flow.core.experiment import Experiment +from flow.core.params import SimParams + + + +def run_experiment(): + create_env, _ = make_create_env(flow_params) + env = create_env() + + obs_dim = env.observation_space.shape[0] + action_dim = (1,)[0] + + sess = create_tf_session() + action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/') + + def get_rl_actions(state): + rl_actions = {} + for vehicle_id in state.keys(): + obs = state[vehicle_id] + action = action_network.get_accel_from_observation(obs) + rl_actions[vehicle_id] = action + return rl_actions + + exp = Experiment(flow_params) + exp.run(num_runs=1, rl_actions=get_rl_actions, convert_to_csv=True) + + + +def run_rollout(): + + create_env, _ = make_create_env(flow_params) + env = create_env() + + obs_dim = env.observation_space.shape[0] + action_dim = (1,)[0] + + sess = create_tf_session() + action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/') + + init_state = env.reset() + + test_state = np.array([[1.0,1.0,1.0]], dtype='float32') + + reward = 0 + while(True): + rl_vehicles = env.k.vehicle.get_rl_ids() + if len(rl_vehicles) == 0: + observation_dict, reward_dict, done_dict, _ = env.step(None) + reward += sum(reward_dict.values()) + if done_dict['__all__']: + break + continue + + rl_actions = {} + observations = env.get_state() + + for vehicle_id in rl_vehicles: + obs = observations[vehicle_id] + action = action_network.get_accel_from_observation(obs) + rl_actions[vehicle_id] = action + + + observation_dict, reward_dict, done_dict, _ = env.step(rl_actions) + reward += sum(reward_dict.values()) + if done_dict['__all__']: + break + + print("Final Reward: ", reward) + +if __name__ == "__main__": + run_experiment() diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index b88f7fbd7..2b7e823cc 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -32,10 +32,10 @@ def main(): parser.add_argument('--ep_len', type=int, default=3000) parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) - parser.add_argument('--n_iter', '-n', type=int, default=5) + parser.add_argument('--n_iter', type=int, default=5) - parser.add_argument('--batch_size', type=int, default=10000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=30000) + parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=3000) parser.add_argument('--train_batch_size', type=int, default=100) # number of sampled data points to be used per gradient/train step @@ -46,7 +46,7 @@ def main(): parser.add_argument('--replay_buffer_size', type=int, default=1000000) parser.add_argument('--save_path', type=str, default='') parser.add_argument('--save_model', type=int, default=0) - parser.add_argument('--num_eval_episodes', type=int, default=10) + parser.add_argument('--num_eval_episodes', type=int, default=30) parser.add_argument('--inject_noise', type=int, default=0) parser.add_argument('--noise_variance',type=float, default=0.5) parser.add_argument('--vehicle_id', type=str, default='rl_0') @@ -63,16 +63,14 @@ def main(): train = Runner(params) train.run_training_loop() - # evaluate - train.evaluate() - print("DONE") - + # save model after training if params['save_model'] == 1: train.save_controller_network() - # tensorboard - if params['save_model'] == 1: - writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + # evaluate + train.evaluate() + print("DONE") if __name__ == "__main__": diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index d9c1b3164..937ab4793 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -5,7 +5,7 @@ import gym import os from flow.utils.registry import make_create_env -from multiagent_ring_env import flow_params +from i210_multiagent import flow_params from imitating_controller import ImitatingController from imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController @@ -13,6 +13,7 @@ from flow.core.params import SumoCarFollowingParams import tensorflow as tf from utils import * +from utils_tensorflow import * class Trainer(object): """ @@ -36,12 +37,7 @@ def __init__(self, params): # TODO: remove print print("MULTI: ", self.multiagent) - if self.multiagent: - self.vehicle_ids = list(init_state.keys()) - else: - print("IDS: ", self.env.k.vehicle.get_ids()) - assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() - self.vehicle_ids = [self.params['vehicle_id']] + self.vehicle_ids = self.env.k.vehicle.get_rl_ids() # neural net setup obs_dim = self.env.observation_space.shape[0] @@ -55,11 +51,11 @@ def __init__(self, params): # controllers setup car_following_params = SumoCarFollowingParams() - self.expert_controllers = [] - self.controllers = [] + self.controllers = dict() for vehicle_id in self.vehicle_ids: - self.expert_controllers.append(FollowerStopper(vehicle_id, car_following_params=car_following_params)) - self.controllers.append(ImitatingController(vehicle_id, self.action_network, self.multiagent, car_following_params=car_following_params)) + expert = FollowerStopper(vehicle_id, car_following_params=car_following_params) + imitator = ImitatingController(vehicle_id, self.action_network, self.multiagent, car_following_params=car_following_params) + self.controllers[vehicle_id] = (imitator, expert) def run_training_loop(self, n_iter): @@ -105,13 +101,8 @@ def collect_training_trajectories(self, itr, batch_size): envsteps_this_batch: the sum over the numbers of environment steps in paths """ - if itr == 0: - collect_controllers = self.expert_controllers - else: - collect_controllers = self.controllers - print("\nCollecting data to be used for training...") - trajectories, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_ids, collect_controllers, self.expert_controllers, batch_size, self.params['ep_len'], self.multiagent) + trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, batch_size, self.params['ep_len'], self.multiagent, use_expert=itr==0) return trajectories, envsteps_this_batch @@ -122,7 +113,7 @@ def train_controller(self): print('Training controller using sampled data from replay buffer') for train_step in range(self.params['num_agent_train_steps_per_iter']): - ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.action_network.sample_data(self.params['train_batch_size']) + ob_batch, ac_batch, expert_ac_batch = self.action_network.sample_data(self.params['train_batch_size']) self.action_network.train(ob_batch, expert_ac_batch) def evaluate_controller(self, num_trajs = 10): @@ -135,7 +126,7 @@ def evaluate_controller(self, num_trajs = 10): print("\n\n********** Evaluation ************ \n") - trajectories = sample_n_trajectories(self.env, self.vehicle_ids, self.controllers, self.expert_controllers, num_trajs, self.params['ep_len'], self.multiagent) + trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, False) average_imitator_reward = 0 total_imitator_steps = 0 @@ -146,7 +137,9 @@ def evaluate_controller(self, num_trajs = 10): average_action_imitator = 0 # compare actions taken in each step of trajectories - for traj in trajectories: + for traj_pair in trajectories: + traj = traj_pair[0] + traj_len = traj_pair[1] imitator_actions = traj['actions'] expert_actions = traj['expert_actions'] @@ -157,7 +150,7 @@ def evaluate_controller(self, num_trajs = 10): action_errors = np.append(action_errors, action_error) average_imitator_reward += np.sum(traj['rewards']) - total_imitator_steps += len(traj['rewards']) + total_imitator_steps += traj_len average_imitator_reward_per_rollout += np.sum(traj['rewards']) average_imitator_reward = average_imitator_reward / total_imitator_steps @@ -167,16 +160,18 @@ def evaluate_controller(self, num_trajs = 10): average_action_imitator = average_action_imitator / total_imitator_steps - expert_trajectories = sample_n_trajectories(self.env, self.vehicle_ids, self.expert_controllers, self.expert_controllers, num_trajs, self.params['ep_len'], self.multiagent) + expert_trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, True) average_expert_reward = 0 total_expert_steps = 0 average_expert_reward_per_rollout = 0 # compare reward accumulated in trajectories collected via expert vs. via imitator - for traj in expert_trajectories: + for traj_pair in expert_trajectories: + traj = traj_pair[0] + traj_len = traj_pair[1] average_expert_reward += np.sum(traj['rewards']) - total_expert_steps += len(traj['rewards']) + total_expert_steps += traj_len average_expert_reward_per_rollout += np.sum(traj['rewards']) average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) @@ -188,8 +183,9 @@ def evaluate_controller(self, num_trajs = 10): print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: ", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") + print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: \n", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") + print("MEAN EXPERT ACTION: ", average_action_expert) print("MEAN ACTION ERROR: ", np.mean(action_errors), "\n") def save_controller_network(self): diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 6e694ea01..499e06f1d 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -2,10 +2,15 @@ import os import numpy as np import math +from flow.core.params import SumoCarFollowingParams +from imitating_controller import ImitatingController +from imitating_network import ImitatingNetwork +from flow.controllers.car_following_models import IDMController +from flow.controllers.velocity_controllers import FollowerStopper """ Class agnostic helper functions """ -def sample_trajectory_singleagent(env, vehicle_id, controller, expert_controller, max_trajectory_length): +def sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert): """ Samples a trajectory for a given vehicle using the actions prescribed by specified controller. Args: @@ -18,25 +23,57 @@ def sample_trajectory_singleagent(env, vehicle_id, controller, expert_controller Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ - print("COLLECTING CONTROLLER: ", controller) - print("EXPERT CONTROLLER: ", expert_controller) + vehicle_ids = env.k.vehicle.get_rl_ids() + print("VEHICLE IDS: ", vehicle_ids) + assert len(vehicle_ids) <= 1, "Not single-agent" observation = env.reset() - assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" + if len(vehicle_ids) == 1: + vehicle_id = vehicle_ids[0] + else: + vehicle_id = None observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] traj_length = 0 while True: - action = controller.get_action(env) + # update vehicle ids and make sure it is single agent + vehicle_ids = env.k.vehicle.get_rl_ids() + if len(vehicle_ids) == 0: + observation, reward, done, _ = env.step(None) + if done: + break + continue + + assert len(vehicle_ids) == 1, "Not single agent" + + # init controllers if vehicle id is new + vehicle_id = vehicle_ids[0] + if vehicle_id not in set(controllers.get_keys()): + + expert = FollowerStopper(vehicle_id, car_following_params=car_following_params) + imitator = ImitatingController(vehicle_id, action_network, false, car_following_params=car_following_params) + controllers[vehicle_id] = (imitator, expert) + + # decide which controller to use to collect trajectory + expert_controller = controllers[vehicle_id][1] + if use_expert: + controller = expert_controller + else: + controller = controllers[vehicle_id][0] + + + print("COLLECTING CONTROLLER: ", controller) + print("EXPERT CONTROLLER: ", expert_controller) + + action = controller.get_action(env) if type(action) == np.ndarray: action = action.flatten()[0] expert_action = expert_controller.get_action(env) if (expert_action is None or math.isnan(expert_action)): observation, reward, done, _ = env.step(action) - traj_length += 1 terminate_rollout = traj_length == max_trajectory_length or done if terminate_rollout: break @@ -56,10 +93,10 @@ def sample_trajectory_singleagent(env, vehicle_id, controller, expert_controller if terminate_rollout: break - return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length -def sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controllers, max_trajectory_length): +def sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert): """ Samples a trajectory for a given set of vehicles using the actions prescribed by specified controller. @@ -74,25 +111,45 @@ def sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controlle Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ - print("COLLECTING CONTROLLER: ", controllers[0]) - print("EXPERT CONTROLLER: ", expert_controllers[0]) observation_dict = env.reset() - for vehicle_id in vehicle_ids: - assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" - observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] traj_length = 0 while True: + vehicle_ids = env.k.vehicle.get_rl_ids() + if len(vehicle_ids) == 0: + print("NO RL VEHICLES") + observation_dict, reward, done, _ = env.step(None) + print(env.k.vehicle.get_rl_ids()) + if done['__all__']: + break + continue + + # actions taken by collecting controller rl_actions = dict() invalid_expert_action = False - expert_action_dict = dict() + # actions taken by expert + expert_action_dict= dict() for i in range(len(vehicle_ids)): vehicle_id = vehicle_ids[i] - controller = controllers[i] - expert_controller = expert_controllers[i] + + if vehicle_id not in set(controllers.keys()): + car_following_params = SumoCarFollowingParams() + + expert = FollowerStopper(vehicle_id, car_following_params=car_following_params) + imitator = ImitatingController(vehicle_id, action_network, True, car_following_params=car_following_params) + controllers[vehicle_id] = (imitator, expert) + + expert_controller = controllers[vehicle_id][1] + if use_expert: + controller = expert_controller + else: + controller = controllers[vehicle_id][0] + + if traj_length == 0 and i == 0: + print("COLLECTOR: ", controller) action = controller.get_action(env) @@ -109,8 +166,7 @@ def sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controlle if invalid_expert_action: # invalid action in rl_actions, so default control to SUMO - observations_dict, reward_dict, done_dict, _ = env.step(None) - traj_length += 1 + observation_dict, reward_dict, done_dict, _ = env.step(None) terminate_rollout = traj_length == max_trajectory_length or done_dict['__all__'] if terminate_rollout: break @@ -134,10 +190,10 @@ def sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controlle if terminate_rollout: break - return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length -def sample_trajectories(env, vehicle_ids, controllers, expert_controllers, min_batch_timesteps, max_trajectory_length, multiagent): +def sample_trajectories(env, controllers, action_network, min_batch_timesteps, max_trajectory_length, multiagent, use_expert): """ Samples trajectories to collect at least min_batch_timesteps steps in the environment @@ -158,18 +214,17 @@ def sample_trajectories(env, vehicle_ids, controllers, expert_controllers, min_b while total_envsteps < min_batch_timesteps: if multiagent: - trajectory = sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controllers, max_trajectory_length) + trajectory, traj_length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert) else: - trajectory = sample_trajectory_singleagent(env, vehicle_ids[0], controllers[0], expert_controllers[0], max_trajectory_length) + trajectory, traj_length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert) trajectories.append(trajectory) - traj_env_steps = len(trajectory["rewards"]) / len(vehicle_ids) - total_envsteps += traj_env_steps + total_envsteps += traj_length return trajectories, total_envsteps -def sample_n_trajectories(env, vehicle_ids, controllers, expert_controllers, n, max_trajectory_length, multiagent): +def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_length, multiagent, use_expert): """ Collects a fixed number of trajectories. @@ -189,11 +244,11 @@ def sample_n_trajectories(env, vehicle_ids, controllers, expert_controllers, n, for _ in range(n): if multiagent: - trajectory = sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controllers, max_trajectory_length) + trajectory, length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert) else: - trajectory = sample_trajectory_singleagent(env, vehicle_ids[0], controllers[0], expert_controllers[0], max_trajectory_length) + trajectory, length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert) - trajectories.append(trajectory) + trajectories.append((trajectory, length)) return trajectories @@ -208,52 +263,3 @@ def traj_dict(observations, actions, expert_actions, rewards, next_observations, "rewards" : np.array(rewards, dtype=np.float32), "next_observations": np.array(next_observations, dtype=np.float32), "terminals": np.array(terminals, dtype=np.float32)} - - -def unpack_rollouts(rollouts_list): - """ - Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays - rollouts: list of rollout dictionaries, rollout dictionary: dictionary with keys "observations", "actions", "rewards", "next_observations", "is_terminals" - return separate np arrays of observations, actions, rewards, next_observations, and is_terminals - """ - observations = np.concatenate([rollout["observations"] for rollout in rollouts_list]) - actions = np.concatenate([rollout["actions"] for rollout in rollouts_list]) - expert_actions = np.concatenate([rollout["expert_actions"] for rollout in rollouts_list]) - rewards = np.concatenate([rollout["rewards"] for rollout in rollouts_list]) - next_observations = np.concatenate([rollout["next_observations"] for rollout in rollouts_list]) - terminals = np.concatenate([rollout["terminals"] for rollout in rollouts_list]) - - return observations, actions, expert_actions, rewards, next_observations, terminals - - -# Below are tensorflow related functions - -def build_neural_net(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): - """ - Builds a feedfoward neural network for action prediction - - arguments: - input_placeholder: placeholder variable for the state (batch_size, input_size) - scope: variable scope of the network - - n_layers: number of hidden layers - size: dimension of each hidden layer - activation: activation of each hidden layer - - output_size: size of the output layer - output_activation: activation of the output layer - - returns: - output_placeholder: the result of pass through Neural Network - """ - output_placeholder = input_placeholder - with tf.variable_scope(scope): - for _ in range(n_layers): - output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) - output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation) - return output_placeholder - -def create_tf_session(): - config = tf.ConfigProto(device_count={'GPU': 0}) - sess = tf.Session(config=config) - return sess diff --git a/flow/controllers/imitation_learning/utils_tensorflow.py b/flow/controllers/imitation_learning/utils_tensorflow.py new file mode 100644 index 000000000..57000323f --- /dev/null +++ b/flow/controllers/imitation_learning/utils_tensorflow.py @@ -0,0 +1,35 @@ +import numpy as np +import tensorflow as tf + + +# Below are tensorflow related functions + +def build_neural_net(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): + """ + Builds a feedfoward neural network for action prediction + + arguments: + input_placeholder: placeholder variable for the state (batch_size, input_size) + scope: variable scope of the network + + n_layers: number of hidden layers + size: dimension of each hidden layer + activation: activation of each hidden layer + + output_size: size of the output layer + output_activation: activation of the output layer + + returns: + output_placeholder: the result of pass through Neural Network + """ + output_placeholder = input_placeholder + with tf.variable_scope(scope): + for _ in range(n_layers): + output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) + output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation,name='Output_Layer') + return output_placeholder + +def create_tf_session(): + config = tf.ConfigProto(device_count={'GPU': 0}) + sess = tf.Session(config=config) + return sess From 23e2ba328c964609f111bafdd9af1463e66d8a91 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Fri, 1 May 2020 00:28:38 -0700 Subject: [PATCH 13/57] Multiagent changes, added stochastic policies --- flow/controllers/dagger/run.py | 78 -- flow/controllers/dagger/trainer.py | 179 ---- .../imitation_learning/Untitled.ipynb | 787 ++++++++++++++++-- .../imitation_learning/Useless/Untitled.ipynb | 438 ++++++++++ .../Useless/Untitled1.ipynb | 96 +++ .../imitation_learning/bottleneck_env.py | 2 +- ...ents.1587254017.Akashs-MacBook-Pro-2.local | Bin 0 -> 265723 bytes ...ents.1587339098.Akashs-MacBook-Pro-2.local | Bin 0 -> 267581 bytes ...ents.1587776769.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587779365.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587780241.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587781276.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587789385.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587841939.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587848505.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587855757.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587860905.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587860969.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes .../imitation_learning/i210_multiagent.py | 3 +- .../i210_multiagent_ghost.py | 181 ++++ .../imitating_controller.py | 23 +- .../imitation_learning/imitating_network.py | 79 +- .../imitation_learning/multiagent_ring_env.py | 2 +- .../imitation_learning/replay_script.py | 2 +- flow/controllers/imitation_learning/run.py | 9 +- .../singleagent_straight_road.py | 163 ++++ .../controllers/imitation_learning/trainer.py | 48 +- flow/controllers/imitation_learning/utils.py | 101 ++- .../imitation_learning/utils_tensorflow.py | 2 +- 29 files changed, 1771 insertions(+), 422 deletions(-) delete mode 100644 flow/controllers/dagger/run.py delete mode 100644 flow/controllers/dagger/trainer.py create mode 100644 flow/controllers/imitation_learning/Useless/Untitled.ipynb create mode 100644 flow/controllers/imitation_learning/Useless/Untitled1.ipynb create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587254017.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587339098.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587776769.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587779365.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587780241.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587781276.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587789385.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587841939.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587848505.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587855757.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860905.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860969.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/i210_multiagent_ghost.py create mode 100644 flow/controllers/imitation_learning/singleagent_straight_road.py diff --git a/flow/controllers/dagger/run.py b/flow/controllers/dagger/run.py deleted file mode 100644 index faa7d4ee6..000000000 --- a/flow/controllers/dagger/run.py +++ /dev/null @@ -1,78 +0,0 @@ -import os -import time -import numpy as np -import tensorflow as tf -from trainer import Trainer -from flow.controllers.car_following_models import IDMController - - -class Runner(object): - """ Class to run imitation learning (training and evaluation) """ - - def __init__(self, params): - - # initialize trainer - self.params = params - self.trainer = Trainer(params) - - def run_training_loop(self): - - self.trainer.run_training_loop(n_iter=self.params['n_iter']) - - def evaluate(self): - self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) - - def save_controller_network(self): - self.trainer.save_controller_network() - - -def main(): - import argparse - parser = argparse.ArgumentParser() - parser.add_argument('--ep_len', type=int, default=3000) - - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) - parser.add_argument('--n_iter', '-n', type=int, default=5) - - parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=3000) - - parser.add_argument('--train_batch_size', type=int, - default=100) # number of sampled data points to be used per gradient/train step - - parser.add_argument('--num_layers', type=int, default=3) # depth, of policy to be learned - parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned - parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning - parser.add_argument('--replay_buffer_size', type=int, default=1000000) - parser.add_argument('--save_path', type=str, default='') - parser.add_argument('--save_model', type=int, default=0) - parser.add_argument('--num_eval_episodes', type=int, default=10) - parser.add_argument('--inject_noise', type=int, default=0) - parser.add_argument('--noise_variance',type=float, default=0.5) - parser.add_argument('--vehicle_id', type=str, default='rl_0') - - args = parser.parse_args() - - # convert args to dictionary - params = vars(args) - assert args.n_iter>1, ('DAgger needs >1 iteration') - - - # run training - train = Runner(params) - train.run_training_loop() - - # evaluate - train.evaluate() - print("DONE") - - if params['save_model'] == 1: - train.save_controller_network() - - # tensorboard - if params['save_model'] == 1: - writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) - - -if __name__ == "__main__": - main() diff --git a/flow/controllers/dagger/trainer.py b/flow/controllers/dagger/trainer.py deleted file mode 100644 index 03364f528..000000000 --- a/flow/controllers/dagger/trainer.py +++ /dev/null @@ -1,179 +0,0 @@ -import time -from collections import OrderedDict -import pickle -import numpy as np -import tensorflow as tf -import gym -import os -from flow.utils.registry import make_create_env -from bottleneck_env import flow_params -from imitating_controller import ImitatingController -from flow.controllers.car_following_models import IDMController -from flow.controllers.velocity_controllers import FollowerStopper -from flow.core.params import SumoCarFollowingParams -from utils import * - -class Trainer(object): - """ - Class to initialize and run training for imitation learning (with DAgger) - """ - - def __init__(self, params): - self.params = params - self.sess = create_tf_session() - - create_env, _ = make_create_env(flow_params) - self.env = create_env() - self.env.reset() - - print(self.env.k.vehicle.get_ids()) - assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() - self.vehicle_id = self.params['vehicle_id'] - - obs_dim = self.env.observation_space.shape[0] - - action_dim = (1,)[0] - self.params['action_dim'] = action_dim - self.params['obs_dim'] = obs_dim - - car_following_params = SumoCarFollowingParams() - self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params, inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) - # self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) - self.expert_controller = FollowerStopper(self.vehicle_id, car_following_params = car_following_params) - - tf.global_variables_initializer().run(session=self.sess) - - - def run_training_loop(self, n_iter): - """ - Trains controller for n_iter iterations - - Args: - param n_iter: number of iterations to execute training - """ - - # init vars at beginning of training - self.total_envsteps = 0 - self.start_time = time.time() - - for itr in range(n_iter): - print("\n\n********** Iteration %i ************"%itr) - - # collect trajectories, to be used for training - if itr == 0: - # first iteration is standard behavioral cloning - training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) - else: - training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) - - paths, envsteps_this_batch = training_returns - self.total_envsteps += envsteps_this_batch - - # add collected data to replay buffer - self.controller.add_to_replay_buffer(paths) - - # train controller (using sampled data from replay buffer) - loss = self.train_controller() - - def collect_training_trajectories(self, itr, batch_size): - """ - Collect (state, action, reward, next_state, terminal) tuples for training - - Args: - itr: iteration of training during which functino is called - batch_size: number of tuples to collect - Returns: - paths: list of trajectories - envsteps_this_batch: the sum over the numbers of environment steps in paths - """ - - if itr == 0: - collect_controller = self.expert_controller - else: - collect_controller = self.controller - - print("\nCollecting data to be used for training...") - paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) - - return paths, envsteps_this_batch - - def train_controller(self): - """ - Trains controller using data sampled from replay buffer - """ - - print('Training controller using sampled data from replay buffer') - for train_step in range(self.params['num_agent_train_steps_per_iter']): - ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) - self.controller.train(ob_batch, expert_ac_batch) - - def evaluate_controller(self, num_trajs = 10): - """ - Evaluates a trained controller on similarity with expert with respect to action taken and total reward per rollout - - Args: - num_trajs: number of trajectories to evaluate performance on - """ - - print("\n\n********** Evaluation ************ \n") - - trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.controller, self.expert_controller, num_trajs, self.params['ep_len']) - - average_imitator_reward = 0 - total_imitator_steps = 0 - average_imitator_reward_per_rollout = 0 - - action_errors = np.array([]) - average_action_expert = 0 - average_action_imitator = 0 - - # compare actions taken in each step of trajectories - for traj in trajectories: - imitator_actions = traj['actions'] - expert_actions = traj['expert_actions'] - - average_action_expert += np.sum(expert_actions) - average_action_imitator += np.sum(imitator_actions) - - action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) - action_errors = np.append(action_errors, action_error) - - average_imitator_reward += np.sum(traj['rewards']) - total_imitator_steps += len(traj['rewards']) - average_imitator_reward_per_rollout += np.sum(traj['rewards']) - - average_imitator_reward = average_imitator_reward / total_imitator_steps - average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) - - average_action_expert = average_action_expert / total_imitator_steps - average_action_imitator = average_action_imitator / total_imitator_steps - - - expert_trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.expert_controller, self.expert_controller, num_trajs, self.params['ep_len']) - - average_expert_reward = 0 - total_expert_steps = 0 - average_expert_reward_per_rollout = 0 - - # compare reward accumulated in trajectories collected via expert vs. via imitator - for traj in expert_trajectories: - average_expert_reward += np.sum(traj['rewards']) - total_expert_steps += len(traj['rewards']) - average_expert_reward_per_rollout += np.sum(traj['rewards']) - - average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) - average_expert_reward = average_expert_reward / total_expert_steps - - print("\nAVERAGE REWARD PER STEP EXPERT: ", average_expert_reward) - print("AVERAGE REWARD PER STEP IMITATOR: ", average_imitator_reward) - print("AVERAGE REWARD PER STEP DIFFERENCE: ", np.abs(average_expert_reward - average_imitator_reward), "\n") - - print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: ", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") - - print("MEAN ACTION ERROR: ", np.mean(action_errors), "\n") - - def save_controller_network(self): - print("Saving tensorflow model to: ", self.params['save_path']) - self.controller.save_network(self.params['save_path']) diff --git a/flow/controllers/imitation_learning/Untitled.ipynb b/flow/controllers/imitation_learning/Untitled.ipynb index 875fe73b6..d412275b8 100644 --- a/flow/controllers/imitation_learning/Untitled.ipynb +++ b/flow/controllers/imitation_learning/Untitled.ipynb @@ -6,40 +6,554 @@ "metadata": {}, "outputs": [ { - "ename": "ImportError", - "evalue": "cannot import name 'energy_consumption'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mregistry\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmake_create_env\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mi210_multiagent\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mflow_params\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mflow_params_multi\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mring_env\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mflow_params\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mflow_params_single\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontrollers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcar_following_models\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mIDMController\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/i210_multiagent.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparams\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSumoParams\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparams\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSumoLaneChangeParams\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 20\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrewards\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0menergy_consumption\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 21\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnetworks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mi210_subnetwork\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mI210SubNetwork\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mEDGES_DISTRIBUTION\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menvs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmultiagent\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mi210\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mI210MultiEnv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mADDITIONAL_ENV_PARAMS\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mImportError\u001b[0m: cannot import name 'energy_consumption'" + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", + "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", + " PendingDeprecationWarning\n" ] } ], "source": [ - "import time\n", - "import pickle\n", - "import numpy as np\n", - "import gym\n", - "import os\n", - "from flow.utils.registry import make_create_env\n", - "from i210_multiagent import flow_params as flow_params_multi\n", - "from ring_env import flow_params as flow_params_single\n", - "from flow.controllers.car_following_models import IDMController\n", - "from flow.controllers.velocity_controllers import FollowerStopper\n", - "from flow.core.params import SumoCarFollowingParams\n", - "from utils import *" + "import numpy as np\n", + "import gym\n", + "from i210_multiagent import flow_params" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from flow.utils.registry import make_create_env\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "create_env, _ = make_create_env(flow_params)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.action_space.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.reset()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "len(env.get_state())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.action_space.sample()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.action_space.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.k.vehicle.get_rl_ids()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.step({})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.action_space.shape[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from flow.controllers.velocity_controllers import FollowerStopper\n", + "from flow.core.params import SumoCarFollowingParams\n", + "car_following_params = SumoCarFollowingParams()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expert = FollowerStopper('followerstopper_0', car_following_params=car_following_params)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expert.get_action(env)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "len(env.k.vehicle.get_ids())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expert2 = FollowerStopper('flow_10.1', car_following_params=car_following_params)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expert2.get_action(env)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow as tf" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t = tf.convert_to_tensor(np.array([1,2]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t.get_shape()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t[0:1]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mean = tf.convert_to_tensor(np.array([1.0,2.0]))\n", + "cov = tf.convert_to_tensor(np.array([1.0,1.0]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "tf.random_normal(tf.shape(tf.convert_to_tensor(np.array([1, 1]))), np.array([0,0]), np.array([1,1]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cov" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tf.cast(tf.shape(mean), tf.int64)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mean" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.diag(np.array([1,1]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow_probability as tfp" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tfd = tfp.distributions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tfd" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sess = tf.Session()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mvn = tfd.MultivariateNormalDiag(loc=mean, scale_diag=cov)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cov" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mvn.prob([-1, 0]).eval(session=sess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sess.run(mean)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mvn.prob([1, 2.5]).eval(session=sess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sess.run(mvn.sample(1))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "inp = tf.placeholder(shape=[None, 2], name=\"obs\", dtype=tf.float32)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "out = inp\n", + "for _ in range(2):\n", + " out = tf.layers.dense(out, 30, activation=tf.tanh)\n", + "out = tf.layers.dense(out, 2, activation=None, name=\"output\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pred = out" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "type(pred)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tf.global_variables_initializer().run(session=sess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "obs = np.array([1,1])\n", + "obs = obs[None]\n", + "ret = sess.run([pred], feed_dict={inp:obs})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "obs = np.array([[1,1], [1,1]])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ret = sess.run([pred], feed_dict={inp:obs})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ret" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "type(ret)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "batch = np.array([[3,3],[4,4],[1,1]])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "log_likelihood = sess.run(mvn.log_prob(batch))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "log_likelihood" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sess.run(tf.reduce_mean(log_likelihood))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.mean(log_likelihood)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "labels_batch = tf.placeholder(shape=[None, 2], dtype=tf.float64)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ll = mvn.log_prob(labels_batch)\n", + "loss = tf.reduce_mean(ll, axis=-1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "b = batch.reshape(batch.shape[0], 2)" + ] + }, + { + "cell_type": "code", + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "create_env, _ = make_create_env(flow_params_multi)" + "sess.run([loss], feed_dict={labels_batch:b})" ] }, { @@ -48,33 +562,41 @@ "metadata": {}, "outputs": [], "source": [ - "env = create_env()" + "from singleagent_straight_road import flow_params\n", + "from flow.utils.registry import make_create_env\n" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, + "outputs": [], + "source": [ + "create_env, _ = make_create_env(flow_params)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "-----------------------\n", - "ring length: 264\n", - "v_max: 5.329679917416892\n", - "-----------------------\n" - ] - }, { "data": { "text/plain": [ - "{'rl_0_0': array([0.30672195, 0.00223007, 0.02625558]),\n", - " 'rl_1_0': array([ 0.34392208, -0.00785657, 0.02819709])}" + "array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0.])" ] }, - "execution_count": 4, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -85,63 +607,61 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "['human_0',\n", - " 'human_1',\n", - " 'human_2',\n", - " 'human_3',\n", - " 'human_4',\n", - " 'human_5',\n", - " 'human_6',\n", - " 'human_7',\n", - " 'human_8',\n", - " 'human_9',\n", - " 'human_10',\n", - " 'human_11',\n", - " 'human_12',\n", - " 'human_13',\n", - " 'human_14',\n", - " 'human_15',\n", - " 'human_16',\n", - " 'human_17',\n", - " 'human_18',\n", - " 'human_19',\n", - " 'human_20',\n", - " 'rl_0']" + "[]" ] }, - "execution_count": 19, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "env.k.vehicle.get_ids()" + "env.k.vehicle.get_rl_ids()" ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 8, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "Box(8,)" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "rl_actions = {'rl_0': env.action_space.sample()}\n" + "env.action_space" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ - "env.step(None)" + "for i in range(100):\n", + " env.step(None)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": 10, @@ -150,8 +670,7 @@ { "data": { "text/plain": [ - "{'rl_0_0': array([0.25527085, 0.00670868, 0.02368258]),\n", - " 'rl_1_0': array([ 0.24537913, -0.00482127, 0.02289928])}" + "24" ] }, "execution_count": 10, @@ -160,7 +679,39 @@ } ], "source": [ - "env.get_state()" + "len(env.get_state())" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['rl_highway_inflow_10.0', 'rl_highway_inflow_10.1', 'rl_highway_inflow_10.2']" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.k.vehicle.get_rl_ids()" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "rl_actions = {}\n", + "for vehicle_id in env.k.vehicle.get_rl_ids():\n", + " rl_actions[vehicle_id] = 1.0\n", + " " ] }, { @@ -171,7 +722,7 @@ { "data": { "text/plain": [ - "['rl_0_0', 'rl_1_0']" + "['rl_highway_inflow_10.0', 'rl_highway_inflow_10.2', 'rl_highway_inflow_10.1']" ] }, "execution_count": 13, @@ -180,7 +731,97 @@ } ], "source": [ - "list(env.get_state().keys())" + "env.get_sorted_rl_ids()" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "rl_actions = [1,1,1,0,0,0,0,0]\n", + "rl_actions = np.array(rl_actions)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0\n", + "rl_highway_inflow_10.0\n", + "1\n", + "rl_highway_inflow_10.2\n", + "2\n", + "rl_highway_inflow_10.1\n" + ] + }, + { + "data": { + "text/plain": [ + "(array([0.54393322, 0.06077194, 0.56137638, 0.40959813, 0.0259221 ,\n", + " 0.4041333 , 0.42759098, 0.02818569, 0.42912874, 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. ]),\n", + " 0.1718155323023197,\n", + " False,\n", + " {})" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.step(rl_actions)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "error\n" + ] + } + ], + "source": [ + "try:\n", + " test(1)\n", + "except:\n", + " print(\"error\")" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "ename": "AssertionError", + "evalue": "blah", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAssertionError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32massert\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"blah\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mAssertionError\u001b[0m: blah" + ] + } + ], + "source": [ + "assert False, \"blah\"" ] }, { diff --git a/flow/controllers/imitation_learning/Useless/Untitled.ipynb b/flow/controllers/imitation_learning/Useless/Untitled.ipynb new file mode 100644 index 000000000..982ef03a7 --- /dev/null +++ b/flow/controllers/imitation_learning/Useless/Untitled.ipynb @@ -0,0 +1,438 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", + "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", + " PendingDeprecationWarning\n" + ] + } + ], + "source": [ + "import time\n", + "import pickle\n", + "import numpy as np\n", + "import gym\n", + "import os\n", + "from flow.utils.registry import make_create_env\n", + "from i210_multiagent import flow_params as flow_params_multi\n", + "from flow.controllers.car_following_models import IDMController\n", + "from flow.core.params import SumoCarFollowingParams\n", + "from utils import *\n", + "from imitating_network import *\n", + "from utils_tensorflow import *" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "create_env, _ = make_create_env(flow_params_multi)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "obs_dim = env.observation_space.shape[0]\n", + "action_dim = (1,)[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "3" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "obs_dim" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "sess = create_tf_session()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Restoring parameters from /Users/akashvelu/Documents/models2/model.ckpt\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Restoring parameters from /Users/akashvelu/Documents/models2/model.ckpt\n" + ] + } + ], + "source": [ + "action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/subprocess.py:786: ResourceWarning: subprocess 11185 is still running\n", + " ResourceWarning, source=self)\n" + ] + }, + { + "data": { + "text/plain": [ + "{}" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "init_state = env.reset()\n", + "init_state" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{}" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.get_state()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0\n", + "1\n", + "OBS: [[0.4 1. 0. ]]\n", + "SHAPE: (1, 3)\n", + "TYPE: float64\n" + ] + }, + { + "ename": "InvalidArgumentError", + "evalue": "You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1333\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1335\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1318\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1406\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[{{node policy_vars/obs}} = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]", + "\nDuring handling of the above exception, another exception occurred:\n", + "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;31m# print(len(obs.shape))\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;31m# print(obs[None].shape)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0maction\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maction_network\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_accel_from_observation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0mrl_actions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mvehicle_id\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maction\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0menv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrl_actions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\u001b[0m in \u001b[0;36mget_accel_from_observation\u001b[0;34m(self, observation)\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"SHAPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 89\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"TYPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 90\u001b[0;31m \u001b[0mret_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maction_predictions\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mobs_placeholder\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 91\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mret_val\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 928\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 930\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 931\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1150\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1151\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1153\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1154\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1326\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1327\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1329\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1330\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1347\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0merror_interpolation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1348\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1350\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n" + ] + } + ], + "source": [ + "for i in range(100):\n", + " print(i)\n", + " rl_vehicles = env.k.vehicle.get_rl_ids()\n", + " if len(rl_vehicles) == 0:\n", + " env.step(None)\n", + " continue\n", + " \n", + " rl_actions = {}\n", + " observations = env.get_state()\n", + "# print(observations)\n", + " for vehicle_id in rl_vehicles:\n", + " obs = observations[vehicle_id]\n", + "# print(len(obs.shape))\n", + "# print(obs[None].shape)\n", + " action = action_network.get_accel_from_observation(obs)\n", + " rl_actions[vehicle_id] = action\n", + " env.step(rl_actions)\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "dtype('float32')" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "t=np.array([[1.0,1.0,1.0]], dtype='float32')\n", + "t.dtype" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OBS: [[1. 1. 1.]]\n", + "SHAPE: (1, 3)\n", + "TYPE: float32\n" + ] + }, + { + "ename": "InvalidArgumentError", + "evalue": "You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1333\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1335\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1318\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1406\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[{{node policy_vars/obs}} = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]", + "\nDuring handling of the above exception, another exception occurred:\n", + "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0maction_network\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_accel_from_observation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m~/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\u001b[0m in \u001b[0;36mget_accel_from_observation\u001b[0;34m(self, observation)\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"SHAPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 89\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"TYPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 90\u001b[0;31m \u001b[0mret_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maction_predictions\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mobs_placeholder\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 91\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mret_val\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 928\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 930\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 931\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1150\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1151\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1153\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1154\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1326\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1327\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1329\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1330\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1347\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0merror_interpolation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1348\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1350\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n" + ] + } + ], + "source": [ + "action_network.get_accel_from_observation(t)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(40):\n", + " env.step(None)\n", + " env.render()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.get_state()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def test(d):\n", + " d['asdf'] = 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t = dict()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test(t)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "set(t.keys())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "b = np.array([1,2,3])\n", + "print(b.dtype)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/obs:0')" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "placeholder" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flow", + "language": "python", + "name": "flow" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/flow/controllers/imitation_learning/Useless/Untitled1.ipynb b/flow/controllers/imitation_learning/Useless/Untitled1.ipynb new file mode 100644 index 000000000..b93658a05 --- /dev/null +++ b/flow/controllers/imitation_learning/Useless/Untitled1.ipynb @@ -0,0 +1,96 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" + ] + } + ], + "source": [ + "import tensorflow as tf" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from i210_multiagent import flow_params" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from imitating_network import ImitatingNetwork" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from flow.utils.registry import make_create_env" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from utils_tensorflow import *" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flow", + "language": "python", + "name": "flow" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/flow/controllers/imitation_learning/bottleneck_env.py b/flow/controllers/imitation_learning/bottleneck_env.py index c0fabedda..820244a87 100644 --- a/flow/controllers/imitation_learning/bottleneck_env.py +++ b/flow/controllers/imitation_learning/bottleneck_env.py @@ -111,7 +111,7 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( sim_step=0.5, - render=True, + render=False, print_warnings=False, restart_instance=True, ), diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587254017.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587254017.Akashs-MacBook-Pro-2.local new file mode 100644 index 0000000000000000000000000000000000000000..0e64e0dc20ed4f255f8e9f110f2f9c0bf6a3ad75 GIT binary patch literal 265723 zcmeIb3y>Vgc^`hWdpI0EfFLm>C1~_8gOX+GC~9#BfR9tjJRB)f{Qu2|2#^#-QLDw> z0^D%1d)nOt38fM%iOW$OJ8`U(lh{txKXECx<0P^yl@u!~TIEEFttfuPaU{i-6gzsQ zWGQ+$~lxH{Cj(00@v=0 zhl8z)-3z_Rv@}~a*8V|Sola>9jXM1*WrysYDMk_x62fO3l+0O2)JAJCR z)1SsS!u9Ki;{D6tWB)~QYq`Dh64;Y{AXfbqb|L1+>}|~kS5i9m#!R3r~O?49Wk z18^G|xsBlWBe<7mchsLfGoC!%oo3}~tMh|V?9bDUjHd&eupGq; zD~tc)wtxH_&r7uOf(kE-&{|&z#YgYirug_lH$L9qf(QNe@o;8zk5HS7uMvJsLRoRj z(6ry*UW;3Y1&fPTURKf40f6=8G^UV$8!G^d>66)mdm%baqEg$#8{mVj5LP_hJrlQ2 z?Vd?Octr2nAY=xkm0UoPhk&$h^P52gcYb#mx9{H_CZSC?gb0aNt@ao|#PH!3ob>kG zxP5YaI|SE6B|*ppvOh~efIo=f6>h?d;nCh?&^t5iKe{G{RLdAEhhRU+zLWlTchoyi z3L%nUJQDD=uy1QTnvnvnKgnoa&uS$u(7}6KaElvXos-k);M^!)qF)n7aU}jWmkMTg z+V7I`y=E3U+-3x3O|tQAFMXH^;AXDk9UNS6;-*t?M=q zl5#7p2RBw0)BXZ}D}tB1OkZ$D^h7GIbS-lwTBUA?er^&*)yLsyT6TlVR3SN_lK>K= zmRHoyVPQ?{rC8C!uSM`G7e#j@Uvm;p8AY)3Gqf%n@NtKnaer?{>XLYgeof>aIjIwB zN_HIk=Z!S7&PW3|-5Hbk+9UCALHyO3^jGlP5&VEn37f=YNof%8&A+inyPRcr^{(6q z`$@3wb<;+*H;b3z#RuN-*j=t@!A2n;_*)L)<8*KIRJ@q}f8xq~F@W=w;8PLYXAy08 zwQk$~+9KkvLMOh(CE^x?Xd3slX;1(0klMrodCQm z!~fL#pL{M&H(_`w+xPI(bZp>%tp#rkVR1swF1PUEY?}F0+fT6=C}2Z|0^qq;7&|B$ z`A}A6#Y|jXjfp$*6Su}W;dcWGuxRMRn{pUYrcHg(w0rrwz8}f#6MmxwcLnfg+SrG& zN7CAdAw-&c3;SV&44;YMm99b*dI}*W+2&d;{{6hH(w9grJ~faJwV>n5o806(3nd9* zgPNwpL9E)w!^ic~v~L4P%D5^uAkLBtW5G5*fsaOTyWI-1K9gS^5?X!)8b#!j(lqq) zd_@@<`>FqA@2B$N^e$>?U<2T9jN^)P%Af)KGduQ{6O-N2hP~yFC1{P8M(LsR5D)_V zx*cv~LXtetQAj~pHJBBA-YK_8K!6p$$Qsjn(W&G#Fh1~K>>v%%Q3WAz2$4BPU_(K8 z3n;ntgbU#BIK>+dC$K>or4Y(Vx~jqnDOj5aQosh8D^kD)bwmmZdu3l2pIg$%f;KVXp`nMtlF4P254kJo$q=3!a+5)&B1&o-% zpB<3`2TGlE6ToF=kCwRMgcLB=g*HUho7W3jf#3r+yA`=Z0T-lzJuSB+)?$u5Xh#az zCk-}unIHvhrcOu!`>?!J=`6XjX!MG>HVbz_3f8i&UQm!XQb1|W4Jn|*2`(ytd4`rQ z&`zCh&av!=#4+<-&H45MC!`=(iL^<;+62?b>_|aQzJd*f-kigPQX4#QLJD#&>LLa7 zvC{uAph!-`U{>(Q4oJaTM?wm$8eZ+VVFf~ba$dm}_Y3H@5EP;V7p!0nffX>hSa(e1 z29`zPa61ZIu!6Nr5&511D^^el!Ce(>!O00LI5rQgfDJNNtbh$Fq$mBZ0!%VxL#X@f zfLH+=$jFNyR=|c(^AZp%U;~+W5gM%8y9!)JD6oRsuLe~11F)!u+nc~9>x31s51VA? zzN^56niE#wM93E_V1sC15^%u^7!i{I-FFqZP!mkq-sZZH^u`L!7eYiGOz#g%8axDpvqSV69mx>x~y z%#Ic0n1U}1)K(X8)u z?^NC1J8;1Yjv=rDCKo$az}Y~lwR!9-I93ZQD1=Z>GU&d7<8`qDE;<_D5GeFZMmoUj5XLcUl58^jAMU_?yD=Z+P)P!mkqjup6&^u`L< zAhu@N=?w%%%pg)ntiXYi8&=>%D40E3;)WAez-BBoB9)HdvcU@2be*sQ_Tk!C0ejGn z6|helY_P1Kyh66RA+eZ%VkOlHD=65KIu$Ecz#gEPFMk>WvCx~u>v-PqQ8FE z-~HKuSOFWz$crCVz=lxs5)dn31DSXc8m!t_fy)RKE677RZbtwcCfyc{+MN*b@PpI7 z0yd;ccJBKMT&Ou=1x|!~downO7goTCm;~sK6}V6nOxcbVxRCV53fLf)Rw8o33K%hi zNFA{P2TE>OffJ!%_GoStBPXnY%~)te0=+@NXs=W{372A(XoC zLF9xL{^9)_~6`Vj|1xzk> ztbntDQloyLK?L%duHmB$NoZEYRx?O0FD1od|w363?gBPQN#u_9wIMnaQ}fz15q}(zBL3&-4f9o|>so zr`5b;tY}OZ7T$R${FrpwA8I1vMGA-5kpjz`OX(5w9ro>;j~)LDz<4j zC8$*z-a%L`_%dc)qN zyY*Cmi(K)syS3FPw{$E%Fn(|c{$m8MaiV{Ee9v&)n|0Wo63L@)b$U)IAj7;gj>Zd1 ze{lvmQA1eC(yHx#_NKpZ=MYD(XjyTRaCZGl?y!w%MSVpJK|>c( zNR-!(aSvB|BD$M~PVJsg@2@zYxP6=`G^rsj9gLH9sZaO&JKgQU`Du2{1W5_L83PFC3SpHJs7z`R>l7KP9KScV162>9BLLX-!p6q)D~JYe?@upO8*^cjqhK zld)IVFT^_DeYnm=tp*DveL8~IIyn%pr0mV~?r`vQziB;jIbEv|WiTWw5PukfaJQ0^ z;Krl=G~=w3CDf+*!*)pY(c-aREnOa zoA)yvWv#{jn5{WozE1ytBG_(P1YAMhSfRv)^|n^=ina=Vwbh&e0`^ml36ACM6{#09 zHE{6V2)3HCnrau<81GekaO@6AlR)m{D6h(NDg+z@hoMQM&0|yIfS+!|!vVM|QYI^S zynuwJxJ_nmjWnOn{54oMq;=fwi><(lnMbX5i3H3gf_n8mK@~sRf*%U$0C~;WfKw%5 zY0|o~NClPp(ewP3HvL6NZ-T^DtVvKMmM-gzA(7*x-2g5=xXi;RHCyabq@o_4 z^vwv4)>Sjbl>%4?q7#t}T!Z==VJP?y5xm-o=WMX<>g8MX6HeSfI|2j}K5F2BDRNj# zNZMN64H0z4(eEW|xwHP{^2Px_>}p{-H}T&5o20fgIwFRJ*vcrV@tt&Ui<{=32Y~Dc zTpc9ig@Lg3l~7Ayb`|_i1jn5sRHa1ELRM3yHd)`#PiwEewEBDXIhsoz^}!*@Du&xK zR-vW5jzsA;sr!_aDyy}iE&zN%x&}ns6ic|GTPmuv&QS=ieq6LW*}rmKO4}Y>i1$9+ z?+x!BTyR_@%kr(bHN_%DZT@7%JbW?|@0QXMe15FKe9kpAXE;&;Ub{tFmQLLkKZ$}9 z;o2?-(c0~68~#!?hM$}Cwg+T2bILFk>ZSO=`j_GRO{1jabuUi4NAn_9xgzGe@baDr zZZZv)F0AAs&dz$`Ht$l-?g8v)BY4y_&+6Y48@DpE>!$MSR2)hvMb|~*_b5T2(lF}I zPKv_y9K@>6q#;R`dex<&UuePUsw{lvMA{&>4pvW64ap&~YQUZuezFB`2`Q=B?he`I zcWyG?B^x>VJNaIxtMJQh)oKuawp1ir4W{^7V9VA@%XXIE!H$4`m&?5WO!d2&W1POs>o9cRK}_a z>(|8lm%qpUi=-ccB)@=D%G ze{~QSoktX$z+XggDx7QN(SoFi0v`&f)0BDPHdZYxL94}rOPv_tueX}uey!xb3E2jg z!LI}jj>T~U^T`tp$al?dHYA^fEF;Rb$bim56p{48dA72cT3`onBhEQIDKMNJ*L#aOv|x z>d^XT5R`}$XQp1Zz=mJSdw&Za3TcF=2g5$O0g?QF;vX6~cA3%mX9kWabrG`0cbZgk zvjJIvAn%WoX449G{9*);1(8|`>vWWktA2Sm-Dp{Fv*mLk;Dl6W{8URP4sJY2Q-UqE zDmtwGz^W*2El2R75SVizz*FhD2=Simi~DQR<=pG+{3oh^Ae@zQo*l*sd4mdoBZ(q* z-gKV6@2GKq_*e+llegT3eJsPM1rJl%(D<~h2lp;AWp@U$T#$@ioo&2aU*eYj)}WhvlI^hKByo_kfG}m{wNYJ z+yHA51v#Z3Fk^~zClMWvj4%^?Bt#ie%rUQc$}%FLIEjh^WRqg5nR}Pw6Ke|J0rD1fb>Lx8khd6(NhSbrSkROeS~%NEBOq@vZqAEysS{?~VKKZ;aU>X& z+t~b}onpKq)`UQP3#t|6X@Wqh)JD+YVm<|z@bL(q^l!@L13ncUru+Gxr4Z_%3-5~p z>@TYnCLANg;&PnCc>ZXeIseTzoNr3_%G9pwKbtjy0PTcXG0b9cz3IDJ;J+BBnIJtj zlf6B?|3WCY)6&>9O+sh$i3zZ7Bvye9JH^<3E>H)hKzoYk0K!+=|V)?m2caGl1aK&^Y+R326qh5<02 zKW%~k=0mf6^9WF5;-`3!|E_!uD-0c~9kWFk3I054u2d`boj0%0A|*K|G5D1hJk^wQ z$c=gqo4uv6nl;_v{34%_*?{84}Q%y>dB z@Yovf^gHCO?P;G}Ew(x6O;2ubQ>-ih;a;*6EZ)!kkjy=RlpdtHsmOG%)O%|*I1W;g z!$6)F9&?U64rB1be!DYa{R>fQP*#TqO{jb5b>S!5`3twIw(H4o`r1CP=K13J_kie@4}Pu6SYFRgCTK_Z#bzu0`d-; z^Zr)#u=n?71nHuIo_-%M(XWZ(4D{eO(e#CU8{+MIz3lcKa#Mnyv)iC1t-jGY z``L%c&D;Ip9#@iNkHQCP>`}0jb;y{(-M_{vw<`l&LUhD9HcBJiDkAOU@L2pvfJj^Q z)byww=U0_oxvuuVLIP|c+%&5?TeK)vaeR(VvI{rY-YWczXYQjRmu9}xz0p(mrv1$_ z`z~<}+0JW=cGh(6UOI!T`@pwr9QLQ0*m1`e&@23?G(Z-s_iy)Rz3$oZrX$-E?^X$n~()8}hq}l2$G*m)T z@!;8$DLLRWQW4fd_hc$w=GT)c*|wx4ubxaAkhg2Uo=hSvJ7pO#Lic3Sz#K=7_GHHz z>&YaBbk1z=$&LvCC$)Qwg$8Pqr4ICvy~eDX{ttm?*k1>tv$n zWS0xllZivn64tXPQ;`hNlZj+?UK1__R{Imr!d}I|d@^8_755D2yH2L!+L8dh*2%>2 z0`+7fQP~m8uo1c^6USxDG~ScZkc;oh@{sM-lZmYB+}+uZ3mtj)WNY*2$y6FBSU@b8 zdG}-z*Rl%)*$OF*!Bu*mJ()_H-aVN#Tb+f5N=PamJbN-F2lL2CDqfsD*?XN#$+jgW zdG%z{fV^G%^<)xZ*(u9_5xOUn2Ie?wv?p6@tS6Hg(mAueCtDMGGLOEi+D}Ap=VW>sfHa0>dB-5dAs)O$t1$EQb7p%_wkq^wO|6rC(DQvVl_0sE?B$mC$pUm_jsh=D*j8f+qHe4s69p$bT#$}T z93n_ZrXm@jBNNH$d?s81thO-{?1-gCLU&x^xQrPaUIaf~<0=}(-s3FbYWp#_aP8HNiEQg^#<_+o^aKAZg42Aq zFrtP}y*Ghv6?yWe%Ubt9i?72SbnR%*3S8?ow^I?5x*A=fJ zSCK)yDb}v8-k9GM`zL;{f-9O_>mU8iRYqj4(ClwlhFJ!4?h6?-VabzY83uu8{QUH49(>-g+w0r%4Ix3S%-zC z1W#rBsPuc+RMv)H@;ggKumPQ?37O32)pcRCoa@zO9x4mm(kdCT1tj>T2yU*r)|vjS zw@Qxse5OC?3`T?5pf~JZ=nZ$t{i1h`N7LELZAAjU1#XUCRY}0A2zrK`O{loi;OyD6 zce-6^@ZktvYMg5fkM<^m-kD+l(X}+LPM_*c`rF-6?|eUQ#S8dZUkm%vh|wF2`jd>( ze%M2ZllxX%>kDp^|84|AH%UV|FX)rg>EPTbUZP(UGpl0$t`ZZpwsHd;B5^Tzo zetNXgedKi4*v(=>MCE^D;ZSfiMegz7 zvyy$TXW0kVOQr?qwz2!w7TnD#bMyc8@-?XFhUdYJ`{uSakv&rxl?k}ifXS-t&Q zRrGr3SPne!%@*9_$*>$ad&>v>M0slRM*YZ5Lg^YqLAzUb` z3^Z@DtCKt1^A2}uoiZxa7bX@z-@;_Of<-w%_=qbBYeAa8U=W5eC}5QZUyfkI!-(W? z;3Hwv3P~9z(477Os_`gn$y7sN=M*Fi{0XYv;P?^(0oqqp^#WQhatMJhqM8hjNjVfC z%3h8drpe$QuU4atICuRR17Ah87d&fHec&f&f44UUm2_W=VtJGghl-Mq1Og)((aBKJ zZg1M1k(+eJ6V_;ONv~~~Rcqz71&px@o%j|P7^9ez2xPRg!lHF#d4qr$=|A{P1ds9j zIn@jWbxk$Y$$)&ur`PmvWOdPjO(1pA!iu6M(qPT-6KzDBXyYwjyk{^Rs@8jXF$OS` zq$$0WKIEmD>i`C$?f$df9QAD*)`QZkBR1e0n3h)5hme{$2a0TitFn<7`*KEFks55u z^0zTft;n*RsOmRZB0WS?D-uv7t72iqOQdxHuuQ~?LiA*MEPM?E*a*x? zLBdbcL8M^fmjD>`*MfO|$@i6d_c93%UqUv_Mc9+pfMvJjYwXAXL1lZ(8;lq5HDsI2 zF)9ZLRKv#6!?npAfvQyn@rBwEK@pnLHWAez^Xy4s!dKowgkuU6IBd5#txdh|8?rHG zLs=kO-f7K^F%H{h({RFe8*7W%EEJo%32eQ9#C91~4R3gC*Fsn}6AZR%8-i}0_kxkw z5G(V-cG-ZLK7>lPQc|kmRo)fmh5)lU0}k6|k5dzrb~+-0s6Y>sv;+c*q-EOeh{6BZ zc$&pXTPujecG;j*>i(%qpjW0YFL~HF$6>p|OjWh69Gh6}%Y}Ahnn9tSY*vk)wvecDctD;lfk_W3b&^ zvg-I@&@2+$<(ZR$grB61h+z?9@e)cmry;Oij$c*n$Ld@Byn+M=9JVX)_WTGBIBZv7 zR1OfRHkrdNacwe3;BpQUIBZvxw-hFP<=uFoz+t<^?TF1|yKE>+zst9c#-NhSq8nkL_AX zR}vXw->!X-0N5@Yq?)R%eul$#*?_rLo^QzvhwVyC%jpb(FG>_K*serEk1dk04JJwB_U-b_NkPI-(q(|~Fhmo-IBb{W zH#D}JYc9k=EI4de;7te*9x4bI3l!^0$bGv4qjG>iV2uJjT$?;!Y&SPGb&+9m8XUGO z$VYw$$XDKt2MQdvd;BsylY!020s5uhPvLm(Pr+?fn)z)T-PsuId_3*M*5D5&)WHXFySPB?6r4MI)Of8VY^ z50kV60#$!6<|V7n5D%Ri;z_U+1SN=eOM_O?ZkFli5P*zWO5j_uMJ zgvWL{&V(Q$RK$knUg6y+>^kSK5`JV57>Vt2Y|3E+fjDy1FxW2lxFTGH#&&bbs!L*n zW|8}LdFG@b;V0?lw&T$$Blqoc{3@%X{CX%17@BMD(qX#-Z_khLfZMk#Fe(QKRKvzW zv$!^yBcKQpwY@ow(^7L=W>ma!BF|(HE?UwMb%%M@(jf1oz_pD<&)K8-LVEkd;P z{-5+Hsz`PIthsEDkozauRJ<^@NL5u}+8Q<+CyXsZz8lX535Br<^f19gAfN~yrcmG| zWpz}t85-{0c#%o}5F*ekLx`6=Y@B~F!km&R%+yqKc+FkF)I3884&#vc6zUIR!t&dK zUffpt4h8HH)qy=CB*q~zE!QH5K3k%Q!8jxmmw!mXVH`4>Qc&@iy#?cl!h{_p#&N<0 zZg_4BV43uevBxX^=_RL^J6hRW`O*AH>_>{-W4!OcUgu_1&2rkSo{ z2|m4?9Jb(P8ZP=f)4^~&VuMsg^~@)4UUL)GNA|!2no)Llvb*g0zbdZtDKMy=o#Dk) z#__(B?AzoOZVxB*b6hbj88N9JYxu{ztH}|5p7dsk%u>2L<7dca+G$4CeTKhAJb8JQ zPSwH_qj4Jlx|Uc>A~{o?!MRCqdq8eCpLY1asZ@D?Z+8Fgu$w;6neFZj`<<=vXf_!S zyW9PpKDp#*v~{t&S_OU=yn;uriYKa~V!&lB>@@vsK)`;%>H^L=Pvox2<-oGBAB< zn;rHoE^u&mj=jMVesd2zUS`+qsjT*uJaL897LOyMX7OwWOeYBVqMSR4+gDWJ z-R9*^!;rHD6S)Qmy%@NX>7!=wrMHp%Ti@D&@U0KOw%Q?o7!gB@VR5ws|M(SU!hJsz zz7$~?)3s5f3lNCQlP6Jum_3dO#OzsgAQlK*QC92~h}SMvAZGfwM1hzcd!{mEYapf% zV*)W778!`S$FC?8_6o#n9oIlyfy21^786*hI+PY{D6<0u;_{dXmHk>PV)i&D5VL2| zfmk4LMOm>|AU<}f0x{Ev(u~d9FE+rxB4+O>s|?v1i0Q+aK+J|k24e2BqzdMZEWVPIB5!SrofB#!GW1yC>lLtM?$*YUL z1dH_&=E2Du{booA2A?B?5`)c=0lJ-XH;kuZx~$6%Hb;ha1e_x)K!eSZwE+R=$YOJ_ zIkKc#&avDOgWV{wqS-A5EjlS7_QExfUHTwR~2)@r4eMw+i zqH#FU#NRwNX*YUi(M@x5vL;wI60hL{%kTm4(3?y2}#og!;E?G&jq*(tID#ZHlS zeRhhhnX^-*hH9tClD}w3dYI<`{1tc!Z%g^jP^0bcl!Tzto^EF5bY%0U!k88|mH%@e z{IiSXn1R$es3pfs^lQR-OrkaQ#5i&By=91sFF2!ykOF*!Mu2qSv+oJ5wp5V2MVCU69sct4Hgxj%-#tjdl(I}p)?HE)!?lK zlU5Dp#j4EWtwt6x8l(eh7_8%iw-wA;HCR;GGJCg`@dTqTMpwg)MO_NfR-)+b1(Q|{ z=FNf3;_aHr8APp^SScrr&gs-DQS^?2IjaVXjz(th4h?(tWnZ$0F$Fe3_#;-9u|$8g z;L@tXyc;_tXZoYTH9|+y$WCt>*^y^H zZPL%TOF!QM--=;zFdEEqc3v$kljx9}{D7t8HP%nF%`wyI;M~am`Mf4$^pD!;lh4;3 zfz}M}Bd{dQp0M&;w zCifTJ$Jm~U{9|`1sJUS2bT2_pdiO`a@XtSW^8fkO-&%$rxiS9S4X_Sva+XxQ z1ktlcqSNWui{xuUzP^KeJ@R58zg$e0U)HnLmxI?wt;MB1?S0GfUif+h<>7bnerS{Z z4CJAAMp3*EqVwgq7xHhfgZ`ru%E&5t?YM@zX+PZXluFVTU?4-VYl0{h7%dfl_*$@$(a zUYgD(gMOLXhSk!yhdg}MT4!k*W^eYZEPxXm8oHizNJ z_Z0)K-vII2b*0xF*niDlh<}o3^sTUX&w#Xeq&M0goSz37;yo!lcv&j)o zI7@mN@K0neDscBY*n2NI$dQ~asGFti+tfw&9Xv$516~8g0%|&)!H6ueq(aq92XLCIIrexO@Y29Xronu5{Q$t)ayA-?c0jBq-2@1C z!TyJay{-OJIjvOla$?g7Kg!vzYy$|<$ z!@CC;m;lnR4w0YA51vS$r+g8E$en};jvd5)Wa)JFSDm?#IGBHedX!+fV<=Y@iC~s? zNF*IRdw4HN76BPka;>_xy~$fzk~atrxOZ}b%7r3esZGFb;$URz3eAmL#hmeY@C*)eyeG$m~d zKBakfE1kqp&}aNY*he--&j#dFT9O-17vGC~i}|4Zl!Birz@xp%pm%0SwyBU9lhrRY ztEKhQK`~BA8IOd;)Kq7^!6?lqd9l!n7mmD0yhn!n6Y;E-rPbH0uD#~y+KVpGZ*G}v zt=)3$mUF#Zw{Jgtw%0#;NBrgA+Y1x8mWNu#XQnY9WcmGO(aH~0mi$S9w|*$zzx+M+ zUqrS-wpU)lSW^8Jb|L1+>}59QKWtN4vqOM8MZ9geN3I1Qi{SPC zT%8{nju!tHxBcVi0`U^*_M3&`qxUS+P4$R$!p9Fb&d2*(@Swjw#L7r4l#)&TH7Y;EDU=j~Yod}MWCGcrO?ZD0fo-8ZUwja* z$5C9thuG@Cw$)WG#gZ=Iqk)V|t&#nL{BFy-$IOv+jmA zdOyRzi{NHQiAmR9IU3Mhg26{3xYfo*3OiQsC1dr6;*zP2{0d#{epWGI>o<&Sz;8w3 z)jKM?N~bS4qu>|@>Z!dE$bodF+51>A>48?zELD7#;V1FZdipxo2K-FRZcv#jBnNcc zxs24ZW`~PKYT{8z+-4iB_~6$fc$Jf#O1|bKoYK><^E0$A8}M<5oWTH9`ZbYz3$T~s1b&L(2HUCwHCZFgvA+K_;5Cz8eGvw z^VVmoZL?PoR+JsRkFmgWtuQuSH1c6QgtYQu2=UF7kD#Q^eg zg@gytH?Tg`f{wF+NIizLP?8WfsA)PJ#HwvPeB_&UnKp2wEMcSu#94B43KJiV;C8ze zpj&;VpnPP2#r4n&3dm*57f8Hylo2gEC4X z6oa^97A{CZu_xbef!_-$U~}N(x<69Ds0w(rv~{5wq=1dg^W%pUFp5TAN~EB=(2oWI z*vM|YxF7|LqKOwjq`+l_Y=fZ4l&xqC1`e?KrNb9Mg%eW1o;1nM9Vu|3cHRXka3VxH zGaYE<)z-4X!38N`A2tcl9Vu|3CYZ9l$#o$qEdOKL6*h>ik#Ir^7%_uL9gzYDN}Y5Q zfE6ilB1G)b=J8?d(gi7CGZxwq#iF8@wk!RGs`;-K6Tt;3V7$pKiM5zx589Ce_DO>c zYWfqK2@oA9XdiSO zrMn;nITw-#Itt0u<(IyR=NbOk0V!bDdB^#?Sk`rR(sa6E1wwo>_50%A>fs1ZSV3{$ zbGe(qAxOGn1)L4c7b{rH6cN)s1y-z}5P~yAP}rc;u7cv-bl#<HZ9QPqON$0;@ykAJGt! zvO#o+gA-Q3h?oTEjup626HM9O=DLvd#tPUV)+WGZR{+4T&!>SC174s4o2*lR%|2|XA9$=FAPm4p7W+mG z{t%9r5-U*hf`b9-@#3dB;Di;hCrz?*#|m7iIbj7( zgnWB5Hi#Eiz=)Ux=#CY*P!mkqjup6&^u`Lab*n@VgfPKP zsaUZB_Mpwd!wAJ`UqOC6F<)k07FYqDmlIY%KPtFrAP9g`uKKJD0f-f)sfl!X10h#Q zU95mUX2%M0@)eC=23P@|niE!#b0K-46)T{RmHvgn3UV4IHM{I9I9?kope?E$D_~)U zVg&+3AihV?_`ZVU2&{m~#f}wlHc)DG9$3NgT3A6LgmRLuFM&AiD=5w?=Uv)_73d*= zXn`Cuh)Kk34tyju;t)>#HKS^%A9$>Q!$7p}3nu!rPFR5x zA>ZDN4dR6rFd`-ax?=?{)C5zuV+AfGy|Dr|h^3W?oZdlT#0(;J#0nfJxnTuPgo4?l z&9Pmu0yblz5fM_7Wt31m2G~TZeTxEXh@7wjMzS_mz#g?k!oK-l%HgXX~^ijoUj7=QN4@mo4QEl zp7_~Tol^o8B5=V9*v(1Vm4)u770}1*SV2y{qH&`qdI&}BhX$oMVFfuCk_TF`0{WQY z9R%*UMLf?=`wC9f#tLYQYR3v#m{DK_wcazx+@suzAmR*8`wC7VumUC*J66EiKp9c} zu!0EWGhGS)gHh*vzc=bm>D3?I{$w(qbgCDDRPP3{p3B%RB$NoZEYRx?TsI<=Mhib0 ziRV*mr{9}S`xA0A6}v1dz3-y)On-3hsaf#?T6J-IVd0&3iv5#rt^LQOefg+~h!-gw zV%r>+HRZ_9oq}r}|q@cc;5sAYjcG*580HrT_T0l-jchgXM^;3HPQ{wh-vRQ$GZ>LxmYMer%L#A1g~{+AYMsXSJ)j6p6*}Hdiui% zHfpmr9`&agXPqpeHq9TtjQOk|cXNq{Dlc=b8NWj=#=~nb&h80Ky!G!zA(L_Gg4YR` zkEXFxSI)e<1-I4aJ-;6@zbDbL66C!)3%!X?G)O!&rV~aXKX_*g`n9D}^fcYPpXn%T zE%wK3&FS)W`u`KbcGDu@3i8GZC3MI1tF7h)5U`(WOmM8YTB*~geK&%wrmUvg#Wlt| zSDa}>=StPGT4|F&u1+2fwk~$bW$V+9Qw8N1I1Ei1Z62Ev2mEv!9uB}&rQq=b5}M*R znYlI6d_MEnpb7mb@{;U|@WobO#mu8tyF>!^sSrQff*%U$0C~;WfKw%5XlM0U-MUR|m;> zVIXXMCDam_T?M}r!EvVuRVmT4kkwSFP4<4|Z;;nsTK&EH9L*(<`c#=@6~k>AtI$$j zN1}9_)O|`ymF^hh7o=-Iv`w*uE4rm3iY3+*T>ZGH%|{F;fq&(?l(s#%5bu4s-y7aN zxZt=*mgQS1CcW(eSt398kLE?JvSM*vczI6*H<<>zXD}S* zAr@;{DMi;s;`b;)pwckv&Q6NL^&G^i z&!izqmU`9Ip})|A(^Xmc%89f=xKPrN91^Pr?2`J)7Q7{-q-MK2WS8H$$#{3CyWQW( z_c|q~6hN!5I+8~kAlhApUv8^bgYdJZBH0>`W|Q$S6-s}!-5+gTjF;~tUuT2aMHP~V zACEe9@?=oy$a*a;s}D=vtsz26hGdA`@@+V>18Q$9A3;x(_Ud* z2w--ng?HhX1KSqeLqxEI?3*O4l7hRsF7%gjI{-_kDNkh28vN5Jq}e^tKNq*j|J6ZU z%9-h!iob~9R5;hjqXkJ31wIr|rz!KoZLC^Yf>w)m?*DqL3GUZQ?wgQpU>W>M(BN1c zH!z<((SUr{>}EsqNysvyT#F3oEJP7WFPvv9i>dWhtE`L(2i_Oh|FYOiqYup}q1Du2 zwHK`j*+Z9X;Ky6==5TtITi|>r-oyTVcF{Gk?0z~tO#*DZrwwO9i=)~ob8T|as*hUf z69FY7E41{`?V>||GrTE&G)#vqYgucyK>(;?G+pB$02)D)Lmpa^`RM9wp{uvRo8vb% zqbmcL@pBP8?(J3Cq927b@)*G0z@7{iW^~gDdl%zQUmp<>7Qi7mF zoH#S}vIRE0mfzokhe8_R>A|p1Za^gepZJFcj$LLn{+WRzN?n94&g;VJ3Rwg^O-jAl zfUHnV)3;3F7bAEqh}2S8r=xUSZYvmOvt{~*Z~8AiBrc51_^Fmo9Nc)4rUa{VSp88& zQQTT42Udl^oKvIA1b8Yv7a`u0eQ|$Hx}1BR7j1~@9|&iqoM(qOr~o*UDB@CLuDj0_ zZvL?lswZ!`D^%~c#XHc+7(sH6$Va(CfzLfbRw#;TDZA(9Rw#;bsVn=tgh-IojfjN_ zX7z3O(GZAJF(vX1eXAmHJaODeT%2RV_=}w1tAKwWR1hS{&~*ua6p0sZfHjGNoYD_U z!D-eI4@YJc=_4V^h+>X;#Z#6M0mVsF6d;=vQ_bAF6rWh^yXKa3!O$NjE9R7bL*A?) zUmV|NspOF)up^fOau&%af{G-~{`t!$p#>cP;w?0%bWQ^CI=!%>J4Nhp6b9riW|_Mu z5opn@2NY_issJEwG2P5XNy01|InhaV%W^?L-eM9mCKwLnErwt{Z~%~3rb$5F4~77F z3%WY+Feu1djK(Ar05~jY$_g#bs}Yd57&qs|y}}8C`WD0M6i0$VxsA;q+9}2>VoeCt zx1g$5UL&Y)F>b+)4orfNNARS7Q%;ZjynAr=Y=1(wg!UC3=BxNoM*Z0{;|V!PaBIBN z?~vW2(|&orXKC4W(1rI!0rr|TDVDd&(I^&B>POJg-_y21TzjZx3@0(`v< zJI#vBxPP)SXUh3Y28~HjHi*IuS0=@Ny{F)8yd7r0o+fDId^$S|e5DPOc`ICiK+y9-i9;r+Zt6j;n7{y2AJfSQ*+2Sx1f!jth6wPL@6 zA#sjxI8!+S@(!Bw{#Ny{_xEN5>7s$2ejhK#$oDB(ZlUO6%Q<`SpbJ5V}MY$H*fcYdt6D9JqjPFu}8s9)*)jCcmEoz+^!6$65>`7Kp%(4;zt4m(5eHv zR?HjeQ9I7BD!X!B?SF*?*g&|Mlj?kqMY0PoSL3QJCc)2m<~|y7Y34iK8$ESz+TSd* z?-JLL?Yy>VXIPIX<&{cLr=Eh-IE>9?=}zLlSvHeoY~%!9q&BdpN#rL#U<%t zUnMpUwJuQK0NN*L8h8ZHG^neZ&^RSe8016Ema&w#!?nTqSG+QRi@;&?gdt8-Fa z=$=f?jHM^r@b1ZI$i??$dC2za$wbz5NwSRCbL+{D&7&t%X`tlw67*ye*P2}9*ON(O zaFw2CPo|QlcTXnGR%fB15|WAs&z?-l!8|gOiWm2c&YaBbk1z=$&Lv&S5GFg zu5NLJ@H;Zk6= zk@4)wR1C}~16Ema&w#$`WGb#L3DB!26USq1OtY%fAECTKj!@nx^IzzmP0g(LIvEYQ z_?|2e*c$xF+FNrC-Iqy|6QE;-u1?kAd zA%b*dDv|*@GLfv#XTl}GY8&HO)~gtpPyVa2;-3F}J2Dm5mh|Vc~W*vLlvZ zBXq|lj?0*_u9E$9jjL!Bdylh#tL?|!!nId7CbF$_b!Qtc^aKAZg42AqF`Y=O5=>rR zP}y*`hIo{S^J)L8A<}L7qU_0Oru(;hvtIY?cyhisi{89urS6%B&f7V+iM}0oipL7PJ!EDeQb}#gXyX1b+yT+sGOmXSJ*|TTwT)C~t zy|=*4@vADiR~1;#kW&hsZaRPuNAObPTw{2&HyQNK4Ev9+Igw0(tm#v|Nq@UL>YeY$ zt#|>?!?mz44J*CDs6WXlk&AW-adIhZYkdK}8-dVG(ooI|`s8#vI5&!y=-0%|s+hm4 z#00IC-j7z$4RDYcz<~5-{cg?_IetoW0ahy3U2;|z(f`wf(Yf@bD)@y6ju@4fy_3#$ z6lVSdu%wy1!6nw}xx_Qi{qUu8iF~flPnTeVc5{jR(>j-6#6*{7vOF7bKgb;Z0pt0H^eL7d#~o$uU7v&i5bB%_dj<0;D| zFNHlcv)r&5FGuk*DTQesTqJjlFRa{AWRwov65s5UQRuj00Kyh<5F9nNIyQ@{TUS>h#>k-_^b5vWe zq>JH8R+X#P3S$k&68L5d?(t+;4jjG$R9*MVPZ42dit;sx%b;Qs{O`y!R1-&%!iTTS zgGj>?i{bYpP~6>ZQ*GK!cFYyWi&({R-R413aV%!w8?83b?Gb79wr3a9R_|zJTC9Tq z9>HsQ9^67k!Icm$TnAM}1vICX`r=wvUo5R>qJ+=4FxjqPQ4SD3;!46=kTEczC8`b! zhP4j(as(S5MkI#=9|@PC0L{ON(?38p9)&HLY6xfVX&_#}pP3GHFL>6Z`oK@l4Tl7>uSKyu z%7;Tm$wvZ#5sm0%m}a*(?as&z*5e6lG`JwqHstlziU4D@v!bGPWO;)?80o*2*A_6w zDsQUVrS>beG6G-8-utJL_FW!Eljc5~X zyv2+642DD1dM_`=0A`XjrI*r&t^*j1w)@X^^YXIoWlM&(J$s=FzJY0JMSTdV$;UWW zDz&E6Ak%liI3Kkh=A%e>@NG;}E3zyns`?Fmy>vP+5m|(hqJfH|-D^cAB8Li~u1jGJW+uMQG^R)R!$`F?)1PM{ zi8YX)cn-CR5#HT~Q#>z-1g+9&p2p^3EXm^y%%7GRrB}^4m3=FMlipkj$xN|Zz$;RO z&u{RZ7QDfmX*r$wi!o`!Ka6mUmts&^4*nuSpyOgnBR@$=&0qGmny++FADO|YFi;KQ zx9CHwDUEd}1_TDNDwmHbhIT{K4qw24H9TiR5b>CCC0>LCe&EX(xQ1s_4jc7h1fCiK zuHhfAT9YwccxXi(EKCOVH4I=QFee2GKS>*a4Q^jOUP38=QGYF%=a+n6sdq1v;P546 z!(4>%xkTwz3AoJ`=VZvA5 zWu@R@lBU3j!*+|)+SKd5b8L*+O!(Tu+!*7qT{eyrwi~c5X5;ew>Dyu)w#&xQ@P@~B zErew=!CdoTpPC{e^sRuE3}qAW&^Ghh5^@WR8F$NYq}bQ8;)Ahf3Q-1hy;6 zTM84t@@{SsD0e0twp-kec*(I{HWR+KFgD%DeYGomKwkL?Qd zFiA@wph#L?*e)ZksckKYgChMQv0XL~Ds}(VCD1EVvX_Zkgn`6%g_){qT{$+f+P8sy zyKF9eh&QJ&+`e5lPNr=o&jPk9l*O7ONE9bdk@sqFFV?W>A@}XF$F@hOg1gRVY}?{Tr1DFWQN0bC8p&b41q676fxMYMB?&KX*g_GW>ZRP z{<1F}tkJZF!*-9kV7o$KH-Jc?;Tt+R@=TN@1m9Jb3dCj|*VNtXe_Lkb4BL}I%fzpC1g)wh8m zTRJLV*%~2MB(^K?_WTGBxP7|iV2uJjT$^mhc8fOI!xw5t1XY8K!*&Jv$nOC8 z%DeGEfx~u>XS|t)yjR!C<@gK`w17!(qD;S3)wYY`MGavK#VD28n682SebC z5=9KQE0Iv7w4x95ir1+8rRv6T*sjc`l+^rXKc}>Y!*-8f&e$%U91`2*I5P(k8Dtf6 zjsY`MmBtuEB4-na?Q(3&VFQ6Ua?~)`F88=1T&Nx0BH*BCa4u;aw#zdo1qnY%H@6)x z=NES-1IMqjI?Aty3XJX2QJsAdiR}u!JwL(&4%-zNl>-E-VdJ1#T${`hPy~t43qZL& z)s8*LeY>K(r7+rx{bQf&YOzyyb)$+hvWgu&8Qo(j)ODR(e%Xdc0)haSkn; zCK6*~k5@6aT8ssP)7We(aN3HZCyJ058yh4P#wO6i1P_6LB6xT;B#by}|74_UNd6qP z^`FgyO5Hz%2=vMjg6Y{9Da#h+2)-C$PRV5C^@vPfa|oDP*ytcu9+ohL+`lIADbyd& zgJ6Q&za}v)*CL2MTcU`;I3yB^kYcJwyj)0Kk=P6lhjGYkN?j3&x>n zV_30)DYodCVldHnx)BoN;5gF<5yP;d5R9DL3O*}AD-rVDWR6WaY#=&sjv5Bz;2y6T zE!3tm6O*|TTw3^Bx- zeu(VCav`qixY&Z1X}IX`Ob5g9$S}w!Z(eib<0E_E0nI48JK0V2{9hH<`4kw`&d%^+ zD&u(HN%n2>3b%)o`Z=x`mW-Ixk2U<`-PPm>KTmqIL}n@7o$)i|GVL@Y>psI@Bc8mx zN~db!iP1QXe_cx~CXt+}&fwgnw>==Yn@>CZ-&Cr+zc;&oci2rI=*)I^hW*agcr=@g zhu!V|PM_S6G}^k@UG<6-x!_k~IFPO|jr!9WxvBg@+}Sba_%HxE6t#b67i*<+A2rbGC*zO@74TOWRHwL|_eB8C>j;%W!}@hi%N z`+g?u8iu{PFlsS%qi(eaTL=({%abQjftWpx3B>GKbRZT8Tv1l+6^Ped194%*mGxSJN8Uv$ksqiAI1b?HY_p_bB|w9ChQf6*E+6&*i;W=5HhL`rBOFq z5tqkAsO;BT5wpiJftWpu4#Wb1E6R$!0`W1|Kx{H%Ol)TQP@1tNHV3GP**nTAL$(HD z`Y~TyWX3wGnu|VL8vSP15 zeB3ntr5Vimk*h?ok? zE1@+^1x?i#0GcQ_o~i8D8i?8Bm_W>)MF(Pmz!hc1UV-?;Tmmr@J|Yk+wqh>?sEFAv z1(hLN12KIV6NuTc$Uw|JenpwES0Fy&9EeR_9VW0+btsKmAKxSR`}-iNg5ufo^7-9i z+$O8#PK&VS761F+su=^dgq=L-Nlso}^d(rVmoN`b-sm?&LNNFo8I%}ojtt}Nl)GU( z71L#1cCa}ztRvtYSpgbsj;u2XI7b$ngUyj8&2o-q8HnCkpxYKkUh#RUCSFLVF)2RA z?P^L4kf)#y7IWwm<8E!|v=zI30X zcNV)EeY5?d-g2C4^!=X^e4jD;lEAb?<8Y#hzj`pbllkwYF}fOjuLfVT>7Q&^9S+Fe;tRdWw4>Uzc(wGUvC_ANm_z9c4*^Vj z7y9BQYA=927kb0petga5{^h;#!sedyMM9$dV>34gh z?e3I>pwgbWS9JCo&Tcudax=5LBbzrBR=2RJ{2zRFk(}zAItR7nc!_>ZIFCuRrk)rl zF1~!C2%?##JHL`+^N8Hi6K6}ej@+e|Zl}mB-A<8Ox}74kbUQ_A>2`|D((M$frQ0bM zmY%@U_Uz&gG1cqf;M#$R`w#vqyf8Db>U%r!@_pp}o!MY^Q82axZ{^I9p%#0|F+fY* ztzmCEjjxA8@wLP8*7`>FL3*^QE>Qo`BD^^J*u&(#p7BHztB+jM+3sH;&bEHnLyxQ< zUAY>%ozu(z-J$y@Nb@C`mnMA6Ce}&HlAc=?uLWi@$PJA zch=3=pT=u&biH*rs`HQJG<_KP&rzPHBt+4vm`Ws>XwG%g=V-+q=ZJ--Xax&D!sjBm zCes#Wifhzuxh{9l#Nl+otr3PohlzlBQgCxnC$&X_-_(}YXuTvV~{0tnZdPE z>Kg{k?R@Q5H)@v|(RjZ=G;{}se4d%s@%;IrB z4N|HaZag)3qF~Od!J^`m**j6Phfx=!t6|WhE`{|}G2r%-0?Arj_!a9HcEQ zlSXo_`14Wu7pu|}Ytqlhq@RyVKcA3(zE%49HtFZvrJwJBZ^f`U7!77QJFga&Np#3f ze!x=l8tbRo=9uYpaBgJ(d|sw8`bTZ_$>-~iK+6dC5m;VkPgr>__Qp`2?L4r&;XVS5 zx$N`y5m-24PuO{&{>?sbAAva-d*b^A%~g-F$^Aw5F}7zS|JYp$YA#qh-Aj-YdA$!2 zVrU*@n2nMRoQ%x9h}m#tyyjDQncsYPGUa>_jFE>q)|iCut1lsuP&p@r>T?8GupkU; zEZh)Lm-E393M8X4pb2xr$cR?&-ZJ9I_{pe@q?~1A>_{L~&IuzU7OS}#Vbsr&Dd&SD z6v&Rsq~dKyEZca>h$G`CqcWRuGs4)BK&Y70k=F8yk9_uexIW8^y)(mpy4zroelv|f zc76OC*Te5bU@M^Wn7Mv5zQvXcnZ{{F;`;IQsJZo{Yj-(P=dZV&kVVY#`R?v$aCSU7 zzkc-g2%d|;*70Pkf`p%&Z6nf?dMGz;AI^EX-H9^K$veoAaMO$da`5zu<#D26Dr&-n z?S#AHvn=1aKaex?de_+qXA~<`^KrYek=dp~cQCg6lZ-2yv9XWmJgwV#%9M{+5leqA zDq^2y%ve=aJm9LL;sKr^q>5;%;i4i|l(@wMRtmYQD5uVIRL_f8_H|Ja`>eByiU(X( zR6M{lgj5l&bX-)#Y9dD!k9E33b+-XDME{;SoWX#;Lnh%iIFTK#GFDYIJi zYW24@-szKVUL$hIGpn~;f+G7Yv$d<$AL$0I1^U(MPwSCat3R>^Wc8w=m~$;HiU;^a zj4Um#R(~!kDvCK*6%`KzP|@?D`h%ZSzMIFr@=Owtu!B*%L}vL^t^omNsnQyd72ZK_ zNMJ_t-pl$?!}Ri%H~sXar8&*?*r&|XW1o_z_y4uvP96xN4Dr8FtxPkNlVG-F`gijm zuT6mEd`~Jl`)~A{3-2^}g8{$Tg8FCP^UG}ARU)K&ye3AlyL?g9n@%pMQ@-<2iRvG< z;C1!sQjD3}(x%7bOhjHZa`8OqGY^$$aE?}pCbJ=gi*N*(cs!}#9gUxhN<=6}Bc35f zqxudal3-%;rh|4gX+A1Z{Yndj#UiJuE7Hv@>J7EcTqeTA6@XGMut)soq!Jm*0SUK- z)uJ*TPk@Q4z5@zW&<-e2QHcoafQ5*VnU0?r`5v9=BsxA0$gw1#I$lLVfr(`MI8a4_ zNWw!@CFGJWyyHbB(otCzE>;P2v9U@b!o-D$RWc=ftdc3IM23r1Y^@Pnl@r@4%$|t~ z5vzDA_*lhLQHcl_t2nCISS65PVnP%PA{`$GER;s#UpP?}i?Qj!hqzcJknj+dNCz(} zk&a4KxL767#l|X$2oo0~R>_p`u}Y?-5*aR5u|;=utl|hTQ6XX#PX!;Vcq%Fp;bIj> z6&tGr5==~pSS8Z&almF`Re?+6W0gR{LsTLiyr@JvDyzc9DuFIGR!KydxDc^Qri71G zG9{JBaIuQ*K0(JSjsO!CB3AKK@Ue=gq7o4ZB_j{x6^zFO7 WNq@WBpG?M+PQHZs+kf_@$NztIWgI^M literal 0 HcmV?d00001 diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587339098.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587339098.Akashs-MacBook-Pro-2.local new file mode 100644 index 0000000000000000000000000000000000000000..d1f73ec681fceb20a96903b3512fd110cce38d52 GIT binary patch literal 267581 zcmeIb3zTF>eIGoxdvN?CE1dG-BQ{onupuj*D^HHZKE`5*d$rG0aAZ~ff$zwpe> z$t9=8-Tv^}+s^fd)7AEmzUa1Z-}cQY``~Z?;Pu^ozjep|oqcZU*FL`&#&GHOXwcs{ z-#OPEPglX(BVgYHJ}>}asr8!zvNJ)6_>+r7APBwGGHIM^BOOt*KYoypnmc5f12 z374-PiuW)6HTf@!8;i~57r~zF1F`I{umdqWW_M%S9}WG7jkLo?;;@76soo#}x29o# zKY}}0c89&`lcVtyoyo>%yVu(64JW`(jMLH{kiackTiZjQD(JH!6z z(Rizc#kuE}(QqaH&e|_AS8QuNZ=Hg4ZACpj4%q!+4 zCB+;7SY1rR2>Q3S3@|_G^)}^Z3rG zB!owFp9(^zKUA4Kkz(W_MfhY8!ENmf;^tjDgCw-+h7cjrY6wv+8w?zNy#Y6Sdv4sk zd2=%a*GMHn$RPX6z17FgjKceD1TVJ}UJNI@<9_$lpm(ypdg?$^N;S|+ey>UkCa#H@Vfs9KGCZl+70}k8q)w+2y>7NqFxn@1S%&%SL0YBX^8&swW!2zuVkT_~t z>vOKGF3UYmr|6GF@CqA6w*+6a5>DYs&K&9H{t1hmaaVVW>XLW?e@(<5IjItAY}@9w zRJl{s_f57(D6Do-*qh^jbt?N6$*b`1Bltd(64sGxqtYPWn}1`Db~(%T>MhiDCG1C0 zyW2@S*6uW3i0AKq-2=B+^AB&g2p=c9!?W>x`u~Y7^Th-$0gN?xCW1Q+qV23yZQEZ> zL-Pu>;=?wfnGT|8+|#B#{l|t#edCTow5gG!gK}9$03?{!ovUnAEw2~z<)-(U4?OmC znr_1IQnc^kr+E{r8#NH|kp{dWgvD_^yV$~qvuWZ}&1El%vE4W?Cw0BICgK9{bR&!% z&_+I#RT(i8TURqT^FC^T&jb=6ZRo?Bau`u^O}&rEG~GaEpYWRvxFvu;y-Z3Jr+0x{1Ti&Nbs=;ZZKOx)@QP-LtM+x8M~(QalyEdAk)D$ z=m7jXb3c_+QM;&a))M!00P6@NCFc~;2KYHM_LdV9-O_};<&P!K8rQb}9zv-O0Q_q+ z+(v{Xc%Y?_3jg!qOJi2>C9B*b00Bn)B5O>mMJHX4Z{0idL^A9E{=f{U1fevCVcPpfzJqx6O3^G%sfDCGh6u6{)G6XW9 zeCq;T=1)NYzH3yV4^~J48A!{EA5uVukn<7{DIf#sc=1CDY(~g7t?&vm)I;*+elN3O zAqFd?z=|`nArTLhwLKAr=1TK17At6HipcjA7_kB>gzBoW!U~Sg0xKYc%oHmigL3I<7j_kpdB~e^ zyIlo@hL#sUtbhz5pJqU;fDEMLg=?@X?<%kv0b>PphrdfHMTSWW61vBquTBg_77IUE zVFhGJo$Ty)71&U-!V0Vi`CPHVDc5>{9N`LMbXG0j!djCcjLQad8U8mzDa!dqpmfIMi% z3dkolHmL14yd+g0RzTSjIu#>UKprfLMlTKxwr91g;Ajn40j4=KtN;(kxv0eJT)M)X zk&@4e5CC6YXu4~wT?M&Hs$vEBF*8<>lfMwL0z5S0p|A(Z1xo#b-)UUT+CPjV*`aoJvSIw?JGE12`ivN zuvZ1CaICO`W3#{t$RIPt3do?E`k}?Vf#8x%$q@4XI^eznGLV)RKdgWZA?GC^RzL>Q z@xnD&m9YYw5q5K50hu2MGFV{+sA_felu0tTI-BTU0YvK*9{h3P|5+->D)) zFdtT6wXfiq16DxfV#W#>8z?k73#{N+C9Hr7!CnJIXXyr&yVWm|-GfhTe0}#kI##ga zNrP+j$b#$OSll)oSs)ongBqkqQnCe*{@em|jNU2#2_2qy$Kd)6EyLxUBun*~T@X)gOqp546w|Dh4k>?yhs(38-s z3jwLIID@|=Rc`}*HKb4};!u;x2NW-dJ@|`P-&sWPxdyy0R2aZ0L_kYJuQ&vx7S?V} zg8hp21$apo=GqPYpNzqWAY@$tG|rkcOrO=C2K(rQizssqGjb8J0lo~zPWr=>CpWJm|sng!9GL| zSy)_ONKfF8=inU?+|Jss+hoSw;S=PHVs0)987#+SCi~X+qM6LCMsw-$$R1P4F20x$ zu^dltFyYVPRdQA`(89P5!r}FP^`>KNuw5`uC!MZ#N^ZwtN{tf4JFu zswl1$wIcjP1Fo;k`|;kOw}DQn$6qZgLDs8rZ1RZ)iHF9D5>zFXw>F?xSt_)r>E!)P zOInPQKW1BTFI=bpUlDB9Edr(>udPs$G(&vcQ`x}3Xw)Zwfc;c$f}_EtN}u*u5p2|D zHRUd@Hr~rd)37rd)FhxgNQzrQtqK9lz+r42>kqffX~0T5XpE>{B6rOxX6|=+@K$@|v**r%J-oG}e{2h(G+T2CVzX z5Pa#SYsJd%Tkut4;j-33KqqHc@8@UXlZwrT)$ycn3oC9mLdLjK0PAq*M6iJ?YCuc$ z>EA@~N-NXO2J5U`xJ5tW#16DWKp^6y0v?zmhn0k6Cwmp@WRIiYfmSf5XnSw%I3Tlh zqXhhPy z5-JJItb*T-_zhu7L!cE3Dbcf#RadFa_f(aZR)4QLM>ENzK2;`K#o$)&&05OqNN%&A z`6;a1r0P>RRZ>{M9%KBfurW(HLjhN`OGQ=IIjDJ8kBep}`{%YxX`B6X@!tD--NCK> zbC!!_S-$1p*fB_v#$VOM;nNX+jh2$&?ABm>a^x3nLvw~BQSjO>(voy)xA=(^oNzak zS%}uGuTA(HcxVza$tC8-afi!9CtOLzNd!vFw%_%hE?uG{HZN z_`AiO?314q#s4JZSzl@d-z`_YU_r{T?G9&zxD5Tll-8&OE(f?IJ zTvSt|oWOS@I3CV5@@Rol1jUB}>NI7Zd!-FWTS>2q%P2GVKisH;`*y*79kLB9gZBju zj`U1~+2n~DT*-a@eKzeHNOnIFo+bh|-qVCrp~X>dl$ka;Xw`?U z^pSv)5fxf^=qBxuABG3xlVLh!QOnAh83cf$qp2DP4$ugi9P-eT%tlwIsjeP{hvNt9 z(Uk_w_}K^^^7g81(GSBJc?=+L;NqoN_cwYz9U^a(rf2mX6#f8Ck}2rNnL`a+7plk z^=VI0+*pj@!yzzdD!}9Exd`!|?2G+t(#71X?EFWn=T2~R0#5178P5*mgsedYz>(-g zGudjq*ZRGF_;?7_qqp3-nEg1K7lb7X8DUl^=(MEW zbA2lmbX=@Yf0y71qPo#xVS<=P6W$gAaiUW~-{7|@0>=}}jl@1ja{P8sL7*Um*CqT> zBv!Zq)+8u7sUHx-h*XCYT@~q3q>qLuBj_CSil-e$cY=x}&HnkzCZPo#0OF;ZlR760@hUdB zo7^)9$V+FLyC)JTZPo({HB(gpke5z3b5S+WQrIT%lR-dUItdXI3E zB9Qk(AwXVAR|Osh1$pUcL^1(@LrPOrXkkXRfV_0voEQ5pB?#(EhgT^M1%qN6n?JNe z$D^?(1nNtvs+Cs@>PyF^+$b@6ba>xm{?L(l%BQ5mjyh6R z7Ba!#?Q`S5s!_D`zL$!TR(70nG7}@MF2B#9!)$9b&9r4hYESAk|N4k?7rt2cy?I0Vwj5t2(DAM54ZoV#&E$P zJhe#yWy{V=GO8=$Nvp#E=+8GB;J*z~Z{I8yRGauQGUUHMU%?8&iEKkP4IuoAG5n9H zzIu)BK(Ak+1$(vvf%i4wY+cSFw(L28_GoQ|Yr56_jkQtF^8);76SnIWnRX{-ZO)YQ zm-K6splA@*X{a#lG2P;K7EZ^TVYc`wq6<;@e`&&a)(Ypqx!MYl)L~|*Zd!~CsTMck zRQ$$TwTO7Mwlnz23Z=Xmfy1a*4#(YrHipIfnIDpwN0Gvf6xSGOF4}^RG~kW4USKO4 zF(6dV5VixcVt;eZ@vR)a;m?}(S=N>%6>!CX`ZG7Mq-!nPYHSv#NE9}_3#q_9kbJ5M zZ>q;z%I4*kD{K|Oy3Q=s>yslKc+=hF2cK=o?teQui7v6j9FK|{#$o2a_q2#?2))~9Z?E9oCsct433DI2bF zk~`5Ip3VBIMe)PiMciYbH*D4%OE1zXq zkX4$xqpj30lUaz|(9cOErtjz8S>dP>v*;9g&^KL`wNv3>SM#iM@+DX+G$~rS(ndkH*!fIXQ^0|l?un=|j8 zOyF9Pi~M>rVGO3y^X$oF()8}hgxRVrG*nL}So=hMtI%N?sLic3Czzj#V_GCv}<=h^oCleS_IkUMZJKB0S^kgzYR_V#w0eUh^ zkrzg+$G|O{Gw|)nc+rV2mv4LV>&f^bu*rM&WHOQn!T0USc(N+536}!P?(bRH%NSrX zAT8`egN^*Sm@R7z)Vn9+M^~vWTu;W2mn+e?C*z5Vj#z|^&^;MHE@7tjo(zXv_ns^d z*Rs!^7mLlib=QlB$3L*a4Y)_M729oor*ADerG3bA@1jhQOi%VQ!w$^=9hs%TO{I>c>*9aKH*ww_880}|;evEz{18DpG8xGL z9T`tnHTh zCwn@5hu6vA?>6kO;FC=!FSf8Xh=!kMp!5E4X!2R)Y-xrT0vBcVM;k|w@{PCux3L_jCSW9%6jpEKw#XSa( z#1DBZj7}eVt@0COdOnvsQgP)tu)bF_m(>j3;Fy=10H0#sG7rB6H_EKzs$o9fWP+x z6r~!4Uun7?KZZ*gc9tsDh00~@Sh++V^2%i_K~aa)QkFxfGImt_y=^LM!awtSZ;!J9 zmAbf0=JVPvH(Jj1N-_(T1#W4jjFApV!wGZ>7NTOS z+Uw-(q9d#nmomezMzEw+Woz6+CubH{73syG8m9=8p6qGdr9E4BdGhHW@^+Ve%AC7s z&33n(N#ia=>~`*we_G`(gxIssUC7Ms+$H~XFte4@bRTHblwyxHAq z-HEeL|1BsJp?{+Z$v`*K4Ag?d@im4F#1dZSE@O}4MO2yN>U=F28 z1g~NF!LgV9je$eG_gzks2CNp-WbSB=ObK3=+T2A4-)O+CEOV9FZ3&L*Cp_8Ju9^#ThT ze;UDSS&k~}6?H$#8rLA;umK^xW|}W^Wf2R$-GJLY85RSFuK;CN*0NJ{u(As)c>MRs zGgKo-62gbC%#F#4wju5T27W&R$^GLd)uvr(OJ8wtX;*iw;q|mMISkjtudpgjvaa|A zum^Y9m#rUMjN_=qbBYeCw;;4FH1O`$PN_<96u9!4aF10Mf zwRiI^hfafk?9^^Bd_5Zi!Jjxa84QzRC~zoy8EP&~2J?8i8of}$!8e@R3zjvZKJb&X zzuW7AO1d9LvaHI6LrKYp0)ZBdXk{2@r#tCP(R~7=F=;ejO-q0kXvK$ZN&q?(k54qS zGN7@vxQ4Ka^dB26ni`H9@P$Zr!-6lz*#bZiKCdL@&VhA6<<8u)1b$3Nb5XcMW3 zRh8cS?ft<(wodFTG{H;~$-(lzL*RuOT0whR;AH^);b!lt&h&h`U~#0Wsoc}!L>y}{z?IW)CA0ZFowmzipm zfyzs{#^j@0K-YCvM<4|zg|y`>^&)ZkaAtHdga%5EG_DjjkQ_>Y`URcB{xFhl>GbFM z!boC2(S+kHFIG)GZ_6@`#yE}5!dQ}rYM6g(cT?Gajo@Z)uDE1YNBUXziZx<}guiIO z>%5s3)0w{*lREsP@@h=YDeb$F13F#`142^sm;H>=+A}Ut4dR~mq2-iDx)TQ!IVT(h ztof=7Si^G01rb{uHP8YN5rh~i(27sKk0}Ru;p;AN4a=q&Hmbu2EHwwXhJCzjO@$wL z*ps}9)b7(m@2H5kU%IR9_2b`9fH+@IDE~qVWweESOYft zDc|pE#2z+)z_%RRWQI{OKsYsQ3_Z6tnITZNDn(6U5VNvHc8@fG+C--YnPpE16Tb4U zJy5t|yY!Sd`MPh&#+VGX%h)cNh64Y@jTZ@!E)515K(fIbLDw91t=<*vhV7D}6};t5 zvBMI8fv{*MF4(SVhyV>S8De=>*e)3`(}%i}wEz+kc+PFj_cF{!905tv(!B}bC1rWJYtXeLw#zf=pSn1DMe6dBhlz7HY?qsyxJBSXyv+hrb?gbTV~#zWrnVz(Of_bD%3n{nUMulnPQ9HM+^}7aH!eI_voqs3;)v~XjEVum0c+&wxwXkG_*q&p zxB#$S%%UT<%gIOF#P2Kb+5?3fwo7kStRLGYL+vuQOQxZ~KmD;?GJscG?1t@zwh01Yu2VrJMb8L*7)%FtB4+%!O8veicy=#5OV^oFV7*GO7478h*SJjm{*GB<2j z;7Z+hSwzkd3zcL(rM$H)ORbLkcF7>b6oqu=fG-LZU9eq&#D$;I+^}7dO(CiI%iffb z!lXTL!*-9>h3#U(yNK;FoT-C|1`(4LzAo@oE(jwC?1=3$Y>Lp01LDX~bHR3*$0gxH zx4XeZvhu=Oqgh96mt{@}5`K~{0)U4Ubo{#Q+hzEb)qbSD^_a67vgKP;9dB;fF2@@e z9s)tOcz|-z;J)K? zH4PDoXTr%?t3ng&HJ9A0UA4hCgU|Q_K z9PmYfq6@Ywkht(unj5w&vMD4rf7#C{t+`>l$Lhj%G2varb{WpNAQA|?$@wcq6?gE) z?Mw!SO)+dZAdU<*7i^b#ykfX;K?rjd$m?Gn9_V%^1IwHcB>W_;IS_KK2p;Yda@@De z@GGs3X0dNKSFsudcHFni@%HQp4{q2l$EX+}oZ4gtnssZF83K|ZQQ4a%xNH!oal>|b zc?)5}SKhS;3KSSmRyk3OyChC~=q~Ao*XSX;*TJ#4Z8&6CLZ1g~@l8PKSznRtBw9Tb z5W!zoJaAM=)a;A3O{Kew)MS&|?D2w(MDJ{b$qC2-g;*3w zAG3@`=vuQAZ0s=|^~+fit#ZpXE~$mMLP6cHM&9?@O4_HqTmp2hS*7dh;?rMIa(}02fglih;EMW zjL|LcqyQ;j<^#|C{Hu9KS6$}Qg%_0rc{bFk-n6@-9^&-r)0VfO>xXEohp@kBeFI*i z;G(xZ=?_Lj%^;t?rfnyxkM4oH6{GBIZz6qk*uU}}E_(@f>*nq4!TD6i@jm)(@^ZU} zlj>^}?3T@>I(ErE-dRbO*m>eoICX0`*V-5jr{mF}v)SA3p$nXb8|OPK z%P|~CaTbvo zFQ-hIxY9=xpGzxU?4HSbDwAA!_L;5ZZW(*CT}OJjd2`cfw>leizJ*if4C*wv2A|vm z_j*|@DgwxAhRkZOW{FFzmOkzfHR-e2!b0M({iK*X$`sG3z`M!H9fu)n3r2Fa488R2 z5|`~}MEDg0xZ<{&Irx{pwFBZ?9ez!>!~)-ez>k3XkO*eejXTeg9i785Q}7t6kC)Rp)^fQq;{0z+oM#)_Cc z?h=T}v(AB-Bk-KEVy}w$sBIwDHFYjQmFPoi*@g%oAP|%HuVsd848-_hmq1L0bqvJJ zp-k4CR~bS ztPZJBYvMb6Z4caE#$xPNHMmVsI-vM4I|2_BOK2pwvLCOs+l)*iHR;E8xY}WE{6wCy2ljoFy zzll|V{lW-m;I@c{&cc0npULc79mFH}xqXn7S|A#|X;t5m6+GgJy9|voo5TW7lO&k|XM07H=bMHvo+f64cJ#B_AV>J9sI#OT$PbLj_96j zx2C<}WHjy!yIZ{pdG+G$^JIj-YOeof6OA#C&igp9UIlepuZF~~zl@EOJfc-}$?K*y zioCHjelG86*NuyJq*XOj#2u)aBJmb8Mcgl%DUyQ2Oc8f?W{SiTd6QD(LI>an;YF-f zp*w!Id!6oZvojg=Hww$Th8Wh;`b()<&8C6W-b@kOyqO}k`2-d= zr{}kk(Ow1z+Xo`%Klq#Q{LIkGZxF?cccM5o?N85hW_RFm#xO}Oy%!y+w9wfYbSIPe zayS%UnjOlNeGnfYs|wyAG;5|r8F8V;`h=eXy4~KByo|Si)fGjjZ&;tzipP7!7?7!Q?6xVWVO8QLBA4w#ch0Z#RaKf~SAPUw6c{^7 z88cNLc~xFw?Wk524qH?5fhyZY_BJp4M}fg()(n!;O4Qvk#Z1)hB9nY!01hI_rv`s4 zF!w6ToP9kgviGWDW-i;~NRCwqN$lN*P%U^wT|=A$&dpY-wZMX?D7ULB3{@x3pNj$wA!U5@jcs@;@ID zf3YGw(H4F_D*Sv*`1w`B&({e*UoZT8gYff>@UF{YzCY|wb9Rn$ODsGJBiT(fne@*L zlULb3M3#N=W3SXaR-l~ukyOdG#!vIqji1kJ0Yd+Xi9Y&#)e*=(9L%qI1QPhkoXkAn zfJ{Db9)W~2@`QN=92>~z%_9(}Ay0fSCs({xfZ0`I`WV@fkvrJ;OQ@-_a*4&Ft(RoT z*9V}5j3S?z2Q^|=%-rsFpBqE1nB6Uv=&|0%OzJitBE?)>$S_P1f#2aU8-|S6e9*&q z*^rTr!+cmW#eCcdF{}{@-C12iXci2iVonY^%WHeQ8k;+b3u`2ZB2gFfaU;aA<|Csb zOmlN`kr9dCo_55L@sm-JAD9`r*bzsln3Ic)NCskN_S=dS;SXKw!fpZ~o@_>n8)FJ1ww&_pL<#S0KU zwG^F5zn({56ZG{h=w347P^;brV-Dfejx!Q3z4fzb~Z+#iPhXcFDd>Ct$rJB=46 z(=j?&Gg&|t{OcaLW%VE&z~7<&zvXogpz-(T2H>mtsWOHXDeLgjERJ{);+aq)^WS3h19RG(?%paat2y;K|Ywt~k*<6g_ zHITj;Q({gZxejHaE0zDX(7d~MhCH4IyfpBkX%JssJpj;N%tk}e4v^KP+Y8|q*njV! zyU{y48f^B)W!vFRW$6lX8Z$nWIhjwFxsW?uav$2sG1|I+*iSt$6>%00Qh`a#aemV4 zZN|<1aGEr(np?UIeH#6uUHsZujp%%-;li@9T93xAxBw z0i<6oB0rWNJd!?-;}}t&!Tb~0qd3bQL9q^r1hcS>B5D7r ztM;N~ae&=gg?@7dLR5rpF<0$P4#8ghxk)=;B}6futQW$dKbc;gy4fqPj*djBGzO6G zZgCk6#lmW%y%vqu8}v4&@g=YiZF<=3Py3_cRgh{Rb&>RIFSP@etKAEGj;Bm>eu)e; z5C4m4WKQ{Gqs5DW{#qjOJK)f96wk)J&0G7YPxr>?_|~4%FjO9rIu)uWj_;gOJ}O2W z92aw&`mrc|ICBLOZQ>WeAv7rzI*^1qC_twBo|fiM^f#a4rE?|hd!pCd?xc~F2_5Dl z;3~+dNn~;u{iQoROL(CHqDUsI0E_K~lm`?ZR$(#yc$OV=S4vaTCg)R%XE)MG3^;wp z&xd_z=k>IY&elb_;ROAj=bO(5#iu0vOaM-H$Nlc90ovk%VoX-Q;H;L`O9%NlC1gAl z7E@E5cKgFLpX9|tBc5A&0a|N6n_5n`-I4th@~q{Bm1|ep*B)uV;2i$u@OYzr_~_v? z-Rm}QIDNX?J91W4M%sUq+`UF&kvr-8Rwk_mwsfpaO68P`rQfugQNA+FIFM zei30w_E*?}m>sj5*_i*Zk#^Wf9Cpw>)f)uh)->$z3s+X~m@GzW@_vLpAs=OGl%zU4 zI7{LCzR8YO?oOy?ww<-HwAe(w#v1y!?687BmYSI>Wty6sC>rqb2ySDQ=*HDnUvohE zFJ1prPY2>9Qf?p)#7FljqMP!W_=u02YUkqv4Yi0)HC$n=Lq%#n|nRa-Yd8ANa> zwc@uqLz`|05hAS`Pt2$1DY=JS`1J;Ghjcr6R-8gfA-F~=2|^~2{TZAYJ{y5)89q;K zrptibHCVbMY+1NH5yu@Swd$^Q#hbU@+W^gWLau1Q$h`Hggj^i_!SGHtr`T>_P+91` z4>>>ZTM=k>nQ{^nZvV10pdj(oARPE;1lO6;c(!oGmWppdH+;~yoj5boZ2ef4+|;#} z-+}D9GZX%8#9tsJv#WIaCTDb=K-m=uED|rQrf+?%@wI|RKJe2Gvq5F55FF5MU#z2+ z3nO$EQIOrUABo@O zxW9F?zgc$KYw+(Q_&$>o@}+~AtYH}6Fv^dTOzhQLsOw5$WdpBc*un;D@OF#vks(7F zK9txpUrf+iw9iERjoOkxhSng1wC!ojp0({RyBo3 z{)`*@FlK~W`!Ix%J*pd-kV5=I1jcRXBn6s!3LkBj`wF4O$4qm*mq9}C;Rdv<4MgZM ztc8+OiBoWMTpE{5fk9bT0p_2FG}#ER`G_x2~1E%A%tR*L6HKw-<(Bl zt1s|-Aq8X(Y+Uz83JBGfhO>H*0x~koj~`M%C~A4B1t}mS+wo$96cCC!Ui^>(n-TDG zKdOpptIl(1DD;+t!3JcqR!9N)uugXNNP!KtEgPi3iV*6|yxLbXhzf78LJ9~GodE5T z0vl?aDVv*I8+E4M-lEM{zp6j)GdrJDeZNP!ihVD@Mdq770&X3VuAReJWc z$YQr#rhSG>|4Sxf2P;@11%#y7k{F9Q@}LUC{Np?_{cY1k2WF&lr5o8 zv6kG7!h{V{kiTll7GvDzfWSxr9*)oHnqCxakOEATb5WNDFrQjDSU}?YV1pFoDiHuY zV-t)YGb05#`I5#9qx6uT9G=r?SmMc6NI}kp;DMGx3c{*}DflA`q<~yX9_R038Q0lS z(`knlaPcV>aIu%5+I|84S$-da6;?oRtuA&GSOiI1tbnnBLZh?53fh?>V!Wroh!s#F z6q5{!70~ytS(lzMRzT*!Ms|O!fKb)c53PEz0x~koj~`Y*C~A2rumY~gR4wI}-yCq> zdO)TJHdq1qu#Ok5!HR_!1B|;0Y(}71LG~VS+&D7BT?J%*seMqW!U`)OPwHf6j}_QZ zv%(6j2>JGAWDqZ`fDq9M&>kzWp~jiAxy`j9>5UbTL5$0VHoFQ4F^xzqu>uQ9c36QG zA!LuHM$rq9A(H?btblx&Yecv(ZklT#tbj};AXY#~R>lg*gJ!INd{SeBl3t-Gsa(2# zGNu~{VTBctAzDJGVl0EngMm0S*q+s{g8axKw#?+P0?ybq?UC+90+-$s&&vucz#rvY zwAO~-x6(OrcKKh&6#6;_aQA$Xt>E5MHx z{^!A$jsqL4;Apx9H9Huro3U$1q*79xu`p2D~;PkjbWr*0{JtKCG!9 zYQhT0$luQ9c36QG zpZEM20y1N+5n)oT8OaQU6_AMp#0m(>%2)w;(2Nz3PikyX+o8B%1!PP`k7BY4 zn|%djh{{+2d9WxNy*M=SVzaLxKYEufGdZjv@V){(9ACBQnl77t1(+u1qRy`fq~<8@ z19Isnb3R4P!U`+MRZ?|d0e;Mk73Abg8ZV5}gW;L&E6BO1iWT6;g0QMHTN|w4SY@mL zx2R^UfP|Ud#|n-)U#wi$4Bn*GxJJO19!Jbl4&r2`Srgm3bnFrSkZ z;sSI!+{s|N;(9O)-K8ST-)QX zc(Gv-4?IjyLx`%y(sD-ypK8FdYUU#}Lz=Ds5*a1IY*mX^_)A~jJ(+9Fu2mS5cLsG+7y&wY zO!HbbXSXRcoM&^jtY#1ye&NetDJBT$N|YY9Y-@p;ngnmIB9+9?9<&bZD=4}9YP6M} z^LM0zt%-nR6~Ps5>&vE&p>NpdFX8+J<2Dc6bSJik#va4K?ULYMk4O!|@wJNxP`19- zQhlYf7;J-X{*pyyGb!}#hloV*8^X!8NL>A4Ypd5Cb|(0OlTL3u9*tY&>rToypd=Lq zmT(tJ;F}!^)+}+92;LU)M{z58KzX1yMwf7rd$H21Jz7up`e)8gO@zNQg2@fd<*P~;d7vALR@2v650Q-@i|Y$$;|KZ`yd#3!S^IUH3__Wn zAa^9hWRQL9d(j|ss}XmGe^a?ijE+b4m`ZlB*o27XczTgf!kxpbfBZuJ=J# zoN11nX{6_ya#sW>$CKJuK7G9)8*HVa7UW}azq?8&)9$pt(V1*?2ie=ZUD+|>iU!r@|2987~MV?b*C+IeMWLpa^_~ipr-#4W zX(+wrD!u9|vHLhuXi`C3RLrUXmO_T<6(yE4O?;w3;-RrZPAaLqwE?}#QlULfC+}xk(proBG24oJ;X3{QieR&D5ikXLZH1C^ z$Mi26^$8$gKUJIH=o_O|`n11_V52UpDR*(T@m@9>%kB_03FwySL4V_X2i+?_X;~El zmVralq~YeVE^)xSn{ZzMu4u|61&Mr`rD-pbU zm1`J)II#om5DHI9A<+N?1}+k0!r0cH8e7GL^xTMNs% ziTCE;B;2~DjWb#zhE!}+mV5~1ca@wgh~Q4tKfGdenXhj z5NL%$O7tvb)m3U+h9%k3>hD$OXeN17W?Z&7BwEGbR`1PP%IipOv!D4XtlOmOQ$nh= z#~8mVY|K*5P{0-KQW3@yD+;c9Tr@k`Ket^<+w7l<_ukj*4sPwAvs@(0@-6?yjzNm% z`F=X$uhCKxe0HqCe9<;EXE;(!z-zlmOVX*`;wMsY!rf42AzD@XqPAV&>zto-j^stGbmh`E;Jqh;m+A()y+0V_A?14Spu#-^#)2EJR=CL9+28GrLauP^;unQYqRNhwqVsK<={>qHr|_G3qlpS(cXj zTnkAW^$NexfD<9d#&SY!kXtCJNe+oo12m$jL8|UN2JJHu^e|lbq zDqSXPg2`hNr-iXSL#`uB~Xm>s17zrP8m!y9l}Tgdqfebo|NlA1N9 z0=($z-x;dO$Ka9pp_(*_JnSDtaF4gnQ00YFEW4$~qZ;~$5r4P1lil)@qWGU>MJAa& ze4zoIaI#t_o3Zc6i^-eEXD0EL(qAW`b(C-yUI&pc2}8~0c4@cvOW`G}m&E%Q|C;<4 z2|uREFW{8&Hm~USB%3PSqq-`KRO=K?F#tZ%fV;w(8|q~-avWaFN!?zd$~cy&^e+dt zExd;aVF}qcNmwNXcXi^I(eeRUIDzM`+ne@JqL5~HckfKxME_R>aZ#$9q(}H}1joa< zMjkCtilF#VK%J({bFZ}FXe;Se^YV&tsS*SHaH9_H+XeS^$TqMH-WN1D(lZfelP7AB zZ(C(8)cwa?Vrq4T%0wwN?RyXJ5A1(gOvcd%XOz%tszqVG8j$Q!tLDb{_T&!!;x($2e3_?m$pMVRWw`){_phS!~Gxd@M zHuv|oKG1-BLmJ_U{-B30dqn@A_=g6DU1Bu;nSmjST?DP=oj{e`w2u}bCfU2A#-8RE zBX}T))KXZdC3Rf+9TDmKxY?19oW?cp_NMpjg^?LM)xwE`8&6V~U{wyQKL036588qc zhrpauqss(%JUtg7-jjW?e@(iWdzGF4NcDGzvr@*h!y8lp9ElgP{mc7m_lJ*%P(6Cf zU7~usE#BpmF*L5OG!h7sdw4#I6$?$5 zgtvu2oamI$H~6iJ!12U#BeBns9KRh@5GcstbqRkIi4|^uH3>>i>IcLyBGutUS7it8 zs!z}OXoxa`&M~ieiZUXgIFX71WD`2o%)RZi2`wGDt{`17^oPmloYHU5n-%Db<=ZT! zJdy-<nc&X;3&WRvi#p^Z(ucT6kARsTDW$vCxptM;J zDAY_<0YF|l-ONQ%!j$`-5`PH-^3q9&m|!@NmkvRC-~b@6NE3m)9|{5TQo1VeFeu1N zMZjrK(n5EvPRY zmvW64=|I!JJ1wB2iQ=N#v!q88>| z1!8!A6kwBCsXAe}L1veOCE7zstIYXtH({$T4M_95stK*v4g&NPdc_cnaoa-ojt2Oz z$SEd>&(0*TP;dB;L;YJ79px`)tg+iS)hEM!z5$QaWsqB>bN3g5R@jOEYK~dRw7|1& zGX7PKqNVq}RE)H;N>%e;cCSzF91&Ht}O*$bWynf)xW!WF5i(M^s ztT{*fn9tVb9Ae9!189%dR=B2H-QQRn^*k@YpEhB;UXf{cQr6~7Ie$sNHVKLbQP}I4 z?%X>Ir{m2qJNFbpBj?i|j{7f77|&Ya95`270g^h*Zm4@m)x8O);y2c+Zp5R#s(-RV zDQ`yLFzS`Vad)7NVex+Ehh*ka1YsKcH@nkr=k#d2)t$x*C?db^fm<|purN1ae53(y zwDkg8(TD+|a)z)Sh!y*rYmRT_=na3?w9m4(EUADi2GpOqfhApQ*;Zq-I7Q-Fk~2Ql zgg4dWEoJj^%N4c?U|naH>h;MH4!r4tAbhqVyZp)b%;Lh7bMT2^mo&h8C{pQ?a`Sqf2+mt~iY1#p7o!4O$PW(4rbCY)iNQ+LutCE1jt zFq3yBcg2MelYMJji4|FHJxdGp$KlcV;YtO%<=|Lfwx#(Re7p($8L5&>sMc>URN}j{ zQUR&3q%3-E5mnwq)%iwvJbtu3b)r36rf~I-E4-h?GmehPn)EvO##)@@PIQN7v%YGP zd}kf*XB8F-C0Jr|Yw(>42M@|6#8m=J0mt{B)^GaA2T4WFtD)eva-+Ehf8V+FCU(Jb zEjmNqT0Y>IYNTT@l1<*=Nw_S{bpM{@S{<8ABAu01h_0*;`&9MertBiI;ELnYl=Z=q7IJ3ox?TmUUw&FEl-sAu?Z9L4BLS4l zSrx-=dPa$-50uML+KJ4)66) zz%ncL84w-CYJHTIE!X9in@fO_E3m3$+%mVQMVpQ4e2&dE3y-*xp)p&7pZ3gsIOOsj zL0q9R%R{#Hwo^0fDt9j;_RM;+qb*g<5wItdX`tYB7kV;*Yeg>d>&b*Mm}<_mCzDCj zyC)N7tFq8gJ(-LL&z?-m0h5tzU8*q5(5XwtH2C&pQnn2#$*U(524wBpuO|}-i%wbS zC3N2<49sw(S$tdb?#Yg}%DFvEPbM&=a%OW+cC__u=*eV)tkRRU1N3B;A}@^i!sy9( z(TOgXW0QV889xLzdC#6qMiRYO?Aw#^WK~`hE(Mm|-?OlnF~DR%im8N8Y-Ltv*OSS( zHY7l=o{S$a=iIj^0YrmdMAS^m%5immcWWvA=WfgzPMn|rc0*OPH= zM7DXc&*M)syk#kv3*Vstetd z$(gCWC&MAvy(h~#x~sFqXZT_00ozvM=~_=*0eR!~P0B*>v(^3tNL|_<07p-{(~?rA(81 zyvUZ{SAt({+8-ikDT{I>V-iqcJ4hb(5{Oolg^hLT%S8{s{qb?%my0B^#2!3(uip!Q zylJVz2)Ajzmgq1W#hsywdkh|lAM#clXD_xr*T+0FPq_f0I}@#s3v_l}{z`NinZUae zn|#B8yRCoZ_u7G^X=Qq?OWEu;U+CKR{8cQTUan>+xf_lwoJ7QeCKA#*URs z4}0T`AiCohje)L+yU>z~x%rdEC6&-D=&5vrqpmC=;Q7qY24C+>Po{yogF@ zTt?5MOV#I=Z={*11&8Bn44H@>m(LjL6RmLm{AL8NVfn$am;H@_L(cn3N#k;oG+?#h zkhv}1s;dxFo4ah_8x6RXWv(*2Ex}R!geSWimYtqK_GRl@mgg^n6-a!w@GTE%e>;M8 zmYwR-7UF@Qv_tS@EJ*(-0?F;$tP~7N?W~m5dVvLvKaJqEEJu~~inr01!T}BsI~2bwD-I+%m3*Lh+|Yj(@!A&?ZtDsw%zt+xvroY@OIwXo8s} zl7r=ahrkOn6s)`qpg-K~J=K|>PuCodH2sWDU3I4yQLMqAyR@{tKDg9miE(9v%j#4w z3oNBK0fxgYf5)Y%}4RD-yueP}tQk?uqT)i|umEW(bt6>cR;Us7-WgkXiPGFySlj5WzqfY#gy&ddi!8 z-S5TjBD(32Sr1cFBye2?{$M9fBxF&n0Pb1SCnz ztAiuN6}uY@B9s)eDqv*e)4E$(=j4Ya}fiiwm}E9t5W{gFh5r7iJlgObgtw zUGjLYm1kQrbHjE8ro|r20bdj-x?sBk2}w%Rz1-#%!sT#{!H~fX+ZEXqlA6ElO*LQP zfIc#F#CF@efbHTj`OvO|;@q%ZhBGdRaJ8vsSP8!3@}^TK=hg`PfE%{UuqlQO2gH$~ z=7Q}qk5>#AE|6L-S$VL~XcpYCU6wf^Ncc&*2ml^Z(DCbr?K1qzYClrn28L{NrP_L_ zBeu)&_Us4`ZrCozs2Cue+GGa1bZe6t0+Jv>+hh-4s2mYw4YC`y%gb8`6Tb4+;j$ru zh$FU3Z&iHuV7p|>j@T}F9K#@`Wv|+>T{13TJB7t|$ruXW+_7B);ZjTNuy5Bi#I7{N zWKxdUE_poD2bZc!C8ccZEN?1?7D`t0x-=zZ&ftdalE<+L`eVBsJ(r}#5zr+qA7Sk_ zw#zf=pSn1DMSAiQk->f)v0ZMavRYS+O{Dg%0ox^WVg0raY?q9a`IwaFy0Bd`GRt9T zY?q9oBEkaJrq#KjH1 zj@T~4rWiIH5J!fZ3%1KVE(sU9r63Q^_9OLe zU~CtUY9Acj_U&@KJv+jK8@9_aDh3D#tdXPV)+Xz*-OAo9!DWk#>mF9pHql|>y`f z3_gz7F2kl6HXIN~hMEhu%RDX#7jm!GXg)Y>gXian?Xt`XLBdbcMS$=yCyf{#v0a8= zS?wpOZ)bWYL#|6VY?tGW3lAPD5Iq|8zxnl1`El7kp~mq{298lNKsYsQ3^eQ3CNl&i zK|)tkc<82b>~X|)d3g(A!dKq42MY8;!^tWqigCBWX%F2c(SqCQgs%WfU-gdiLul9N zA-mVXvAAtGWLGjg?_&CDJ%Ycih~G;F^n%VqLoYRO#9dHTz<1Q|ay^HQA&#dz1u%kgnzwoZy{} zFgXD^pb(1!>BF#q76lX`pli)eu(8K<)Gud6w8|~lxTF@6?1~|Ekndfn%%X#?DT@GI zYgXyHIysMA?D3s@De?$yRK!Oeb>l0muHd^qxwcTMQ1b+kVN#2&DP9IK2u)ZlES$TNq;aJ`VR8xYufP9J#e>Tl%4HOq>m2!SH8n#Z@F&WyuCd* zpUODiN54&8Zuf9feT{8WLJQ{R1 zd)qy9`_pjad}pN`TIY%g;sAr^x>%0kKw8@kdy^@;nEhPbxOz_0QQ_;--@6B1&vJ!z zm&x4Hd~p_$884?ynYhwN6Q4^fUF@F8dMcA#dG?vD6R5H;zu*}_8Nu>GW%JIWN#sldC*${mLx zYYRqlwG6#p8+D2BD@Iy;+;M5`a}NHcZ|#8iR)=3x?T|n05JTy($OhfK;JF!udwwQt z8-}S3JJbrf>ZBH2EtCfc#KozaPJx&_?qY)ES?55^5x5wLd#$XQ>9lPFv8IgIWcF*Uh{@wFftWn&9Edpr&nYYRs)&!;24bBNy98CD52+a&B7A^AOy0ki8L}}D zL~vgRkv@`^#92-Kqw+2}%bPA7)43fno`bHHGabtv)1Rmkr%$O2P7A=I~Y0!WM#4X8dkm-r5<&O|;T(wScSNEpXy-#Tdrr zZ%)9x|0E~FP$iL{_+@gJGhMvbM40A1p5#jN^CU>`(ZjM*ZUXhNN~ zO7>+;z7x~SYvE=J`(+9`IfJD#+v@5l-s@x)yQ$;GJb zqL|LbsGMzZRRz;v(K;@)^}eb3ftvo~O0@z|>h=kN#fa8*X~IHG&9-J14> zlhL>{>~8fYl2AIUGL^q-uK#5dO=cb)EplMJ3Ou!54YFN-85>Y}=&NX_ z*G-EVc?W9zT;BYy8&~;APiv-#TU9ef;#OvgxP&oNB>ke9B5niC6p6{g55kLBtD2nc zZudIf;bvzt=x;b!)-}YimeyZN&1!C4`ak&kJnUyij~B3QCyehTN~2G-6Hls`a#GEd z`4?+crfmJXk;&w9Q%iSnZ%dFq4?75 z)TQi$_yAc&tqNb8hZklayAQoiG#V@7_tGV3-{(2xcB{ACd;jW@i&1*CO?JZm?;WN2 z3PHUDd;oqN^=g8lDMGqvY*hlc{d_(b!9jjL@jmJUoWtq-BQfzLGIJ{&?2LA%+dI=v z#{ML3!;#g-RZ*3H+JN%qC{JAiFj*<>2%;&sw9Fwb^+1zqTZ&RN_L59@m_??`ub3_` z<1Juyg~Zcn*?ZAe0UA_ZNR9xRD#vb{f*MwpYa(*lR!mJTa><^2#sHk(@%y5{*ip)u zsq)B+?GkH8wU+9z)g&KCAu+Ww@oTsLQDE?xHG`zI5_NYhG=up-3I;VM0e>to_bSSq zeLX0$_bM%WF8h>8epWE3)?oHJABY;fjxuR7QBul^EMBK&(ZNLXffNi@(ctxzIim(? z{VlS0eGx)k)J5njxG|_pBHEG$Z=g&XHJG;!B8xW^EV`%*Q&ljiQJ0K4(Fbp&%o#OE z`yG+J8zlo39J?r$45i@Npj7xzq*Np6{s`sLsKUHgJ0vFhBLzn;I>clZ{8ZE-y7m;8 zTZFx0(ZZ-i(qbkh2ANXEMP-Drf<=SMBo+jpj{sZR7!D(=07=z8(bod zujr4l{X`atf=e*ot;w8%$ zEGKV4C*Vyo2GG@M%ZA61#bQw-CQN7i(a(|?Xa7LX%x$)_;SLv=_k``=KL_SNHF{+3@V5=hf0BduOD#9t)Mnxo9+rs)#-iKt&g;#NK#_l1yGZU91w(cHMHt7pzKzpOwCd$gI$zM8)mZQ!e2dK9x_I ztGCOv`e-e>-yI;>61~&2dPFn5d}UBQJz?okF+K7r{q)GE#OeLN4d5zu%#gZA;Bd|K zpb#|uTiK6SCO{m;lSUjw+sCo4ze z>1G!7y2@rQ5+ULWKq(iP%5#bSwcPs_G8NQE_%aj*3h~ zt`0~=h0bF5+r5K%eADwc|StYWFiMC2B$7^<#y6-R=I$;koP7~|uBES+$oqWHCYtl~&` zh>EAdEVUa+1c5DSBv%vq2=;0nS(L3SoKctqQ`KMMkAqzjUH%MEA(u7XL?tS zdb)@1o|Tq7j1vb4u@g@0 zZI4e+`;&9M*d$SY z9@D~qiMhWM1L~s58HQ+2D3qLICx8c(w_82TjTBS z?r3mkJlSqzaqhWgJetm0H^5SNbJ&|s<2D?LuO5y!*VgTZXuSyboa+sD`|)!(;^k(% zh}6|sS#BaLSx5g^$u?U_N!`n^w3a;rpNQbKc0=m8+8&Hzf1a)+JRR78;*w$x z0IV&iVFdkKZviY$`~9uBad@G%7ow9W5H)=q00p#^UMD38A8v%O;_2?`xOrmtbP~cN zdQSx*GZ?8%o=7qBkRp6Ch~Tz&hjH_+-C+{ibVG;`X*GnXS_T7$UunS2-kuvbZ{FGp z!8KAz5HiUAa&PsqGo$c68^O!$gcrl9-ek}_J?x+AREt1|U_Z*flm1qB)Z0dd5K1r} z3HUj%Z*x4Fp+c=c$!J|cY9%J>!TTC;#E!4_&C}`N>?mHsUlU7lB>pzndM+G9wZven zH|yhiFmAj&GO4VE#JM}|cTv&u#9(wbdnACRz`u{+YDDZ1&K&9H{+}#z#$CM`s!QS}{54Va%1M=wW7|Hjr%Ilo z{%^W7MuD=2LgWJftJB%9NYn(x&<~iDv4LD1l?d_P{2O!B%UQQqbL&djk8(k;n>MVy zS-cc4-hIadx7f2y8G5@#7&+M+JsvNn|DV`0U)Yr;kg*QWL~y4;wB6OJ?fa`~h+c(u ze8eV1(?JvseA>LH|JYEiZ{Sg=HZ^o~P%hgDzy#C6d+}9n;-C58qn1s4FkXr_K0Gg$ zjr>O&@cIxI$Mx=FD<96LS%cHne(UOH-b>V6JO6Yej7`&qK8%N;mOcz2wyE-xnhFj3 zOdtW$#y-3$hY@Ak+^ZXNY^)?5wR}Vkzutgb0{Am-@WYr9YVpGm3TBT6Rlw&Xc)6_* zx!yw3GWck-RcafA?j2hWw`gMG?IR6n+wvwh`_@89LfByCh965q*YNu&BkeX#AW2ce z2#tuf|Ap{m7GRFu^C3H!&3T#HmHm~q`-B1SSk+ z6xdMPwm}N42>EtqWDwOR0Jx(3=k^x7kR%{A=)4Xgms|}(XY!_@9fj+OkA(@;T>#{0>Ycvk{F9Q@}LJSYZWZP%b@%-Bmz_koVUCcNLI< zw7mFX1!M?0F9ERvGLViJuEDCjtH5Rij1@?C{d-wB+y1lmY}~W}HduibXXG^jV@qX2 z%?c~9BIJt|kU_k#0zyP*eD+v@4K>b`&26p?NpGxx3}RYzw89DqF^xzqu>uQ9c36QG zp`|9RzN*&P646I*mjm{tq@-LC%HXfyQ<_pJoVFHB5mGR&cB` zR)Aa7#JI13gqaM>v~N(6(^Pryz-C{;F$b)G$i<8mFg8#`%z0o1$0}h3R0zc+ZDAX$ zHxQ0j#R?cCRol-y?JFR&F@OqISOKA>sUO;&Zy=B%g(E3lwshZR^63TBU{uFwW6 zAT#C~5iX1yMhS%#kckAu3JA%{SOIy^j1`biYHU!_D-k@+f}DIA zD+tl&7nbcVMk}l!=b|cBfFBDnOWk%AE3DvnWvl?VsAjBygc*z#P+Y{eK~0^)3>sLy zgK*pdDw@vfKnMODmfPuf{DYsf#1F!9Re%UJab>yd z0G$$dDwwXg9!!K20-HT+j;0L)5eonir){?UCC?%f@Sht2&7Qus5YUs*s|x|Cp}tOW zMgfs<N|@F{-6POgbD)~g$QVA=oN>6)L1;Ce!+f4 z`vSZq3#;26gAQe_k3WB<4oGsELIj^`!0~G4BQ!&r|WpqxN>cH|kE`yqg`P+@DOwllBSx)2)LuXZn-=XtN(5 zD*Y;{D6oXPSOVYhV6RVteZkuz{zz^m54|b6He=R3JsJ15Hha@qdc1V|$^PK%<1>07 zZy6R?Sa|DAe21y%b>N-C(F;x!;qQ((L{3jw-dIYH*hi;XGfPy>*5T55{7bO7hCT-G zh~PHXe%&TB>5ZNsXBBe;D`e1$$%OZ9>_rn^Sc?__9h2-amFxnHDG|$w^d=MjJYFSd zB?B$|<{&JO(Y+}7T`77D3!QJuT@|1lPikNJ^aBX80guA{?kYw0J{xSJi%fdM-lV(v zcz+Y!YO=ez*+&ev7hb3a4x>Zh?KE^^ zcRRhZV>_|?I8ta*L0m;tPz5zqB|p*c?{v2Y+tchYXY|6;$-&lB>Fp+fE;Zmz22&cq zXm`82GwDy!LH6y}qI5cJpRZd}vTVk~_sY#`X@6$t5GB6iJsEpd{eoZKtRhAu`k407 zueNd^mYM7&{O)k@M89r5F{xFl5NR-6Sb#r_fV)o0N^s**f0}XD&JyYz`P0-a2mr~? zilWsH{5L=D<`NHOUS?Y}{&@szl?{_>FJ8QDFdQb{`uC!MZ#g67cHl8@TXl#~}17grna zWus}>84YR@&>bYjEumJ0fMwt?Hjj0Q1AeLr_XXgJrc6@scmWA@ahuHS8fiA4`D;+K z-K|kHM}jNDA2$LkW*)WL1ro4Nh4^t{+k>N9QwPXv#u}U|2}{#hS6c40i{NiJV8cI# z;L9&vD^`Brg0B(_x3m@lIyt*~KR*wjRBSe^jwgLbSaGuvGRBnxScgL=f(=|z1Ez0- zC_yKxe)z2jUTJ08*IDnLi5+N%fI!4Y1w1fC4l46BxS zqu-8JFlT6cZ|yjs55Dv(wicFi6YtHxNw{@Q8)vjc45`?Z>LL9@`pS=;=D!C3*$>$| zNX82RVXG^llEBO=_??K~5T-N)TA`2&Shq>lr*NtyZe0Om{Hm}qOF2UUSF}q-Rn|GEc~_5%#(V_-%62JjYj7^! zdtbjdymfHSa*-^{xBMGB1}U27`{{_kMoUR>erqr?@3CDg`+{v~&Tu3OUfV@ll1}Xw zKaqkH?uIf8(VBz!7W}nxm3em3+Zv$N%qbVJJp${S;QMu>q~mqZPrFC+B38O0X1nn6 zo(Nu|8!TN|$wQo(^~7%8Mmd)Q8~j|vzm$RiJ z^YQYX=<94SJ1;}>+{eq8Y99?M9Z|1^W%cR%01v~1@dKsz0et-)v!jaT@PQ_r32(q< zZ6W6`^i@l6N#>=ovs7b`!o%@{HEFE7s?^y(ir^k^ouSGLr&xANjYl=~lM#QnxRc%T zlcM;a$Fx4*fNnThZII2_x97#=&6Bg!_)6)olh8U!xZ|dR$hXxg?Z|!!ym;-hc>nTW zk^ds$M-sZ4fRbOpDdlZm(eFt%Rk%lWRTin%DXP$K((1!+HsG#s=7xG%j2wp-b5gfg z*sTaCW85zVwk^De2w@4?H%V9}1$TAgV^Qk>ESQHS|Eq$y zsHR3af$v6eBAjdF(E_CiiVp?UY05nJN*j*0l3rC`UKQN`NTUw!I|cW3$hMjcNDx$+ z0=z$HaHMA<%qLIOAm5HkQia)|d=j#ZDApnaItx-n(hKL=(qd|DmC8gZH0>(q2Lk(F z7L#%G!5JmAn(C#CkMkmy6h4Cf8rk+7lc2^rqBFRO-zJXoUj3SBl;xeQyL01d&<_>$IhgD}Q-6eQ~$i%e&~Fy)ZIkr&>61 zaN|kp60FK$)n{}?abr1xkA%RSQ=`iScp^O)A>NaHv42h4ZRbByJ$Hhm6L3mr&Ukis zg9?Bn(TQfVO;_l7r!Dkvy%qZtAykjva+j#yZi{#631h0@C~Upl-> zaVQuR+t~b}9XcM3H6c)6N>#1AT2Nm)F6Bmv(WArr9`%Qg#8W;c9VYjh(5OFqay&r? z32u&e`fcu<{(2$*feJFx()s%fS-aA*8e{t1{=m(}eB1G$76I zswT8vI|$HI=oRDAml7*$!aEw^zapoYAU->jyh6R{KMwV8RdkfUoUzWnsi-_K4nEg_ zhwC!PZPK~>3qdRF#D6u%EM)5L)=kF0tx>e}zL$!TR(70nG8ZGY6&vx=2tI=jv#rrI z)0PdXJ*m_D>m#0C_)=Y@e_EqSQh2Ifr6u#tpWfZ^*FtQCSA@U3;ww7k+H5%mwrbOB z7?|sS)fg@qgr_zsplsP)O~!RaJZW{vZtznAJX#lkZ#KYx8=~I6SuChF@ndAje}BG$ z6$4IW8_MDTMD^8cbO(C<3Z+@zI*FzGm>;jpImDJd2hbj=t#D1Zy1%hD>Umy(KWV~F zy&}`@q^!-Ea{iJ*Z4wj>qOjKq-MRNToQb!>?A%iXjhs(=ANlW1n9N(@95`270g^h* z9Mx?bj|;mPQM=fJ)A1W?wF@K+_~{CzycvPRs8zA#4X?#s21+<6Aj;!?&CES=N>%6>!CX`g1q1q-QPLYHSv# zNKk`pUDP?_Q%!hNJ>F6_FSlG_s{qz@=BZwv(C@&TW>F-9&o*TDKc(A0@hMEKrZr>Z zTjy|A5H9MoF2XhBb4_9Ts_;+*;@QEf2=A=q4p<2JaP>F8@O3qQ8Z=&jaBRm#AQlC1}o-IRAgI}od zeiAQIHeBN*cd|EnJnO5L$#;ouxSv;8ur_d-7U0h+96TtO5LXE>1svahTEFQdA0!ny zuLjCO^+t0Y{-Ja0P3(f>T9LvYa7;DQF&N1vulFQemS(ztPjan}O(v1fR#g!)i7U88 zB=%sh+{m;#E$nTR2Dk^|f%txp07pbu)`ztx!$+F3i^PH}j!RS42TNMWnYHVB6{tp@ z)(hnhsB*jTX#8*h<#JZVaGRca<>>?EGL&{A^KjG z@>!MzS*58v)=nKbnTN;=L)Di)646Qfxp!7Ls>CchMIQ7`S7q%~IM~%Z>zw>5e5k@6 zb~D-fQ=mjH_FLF(8DKg#%Ow>inubr6WuDWcSgnt;vgNwma&rk#as^hEj9X>`wP>}f zj)q>JV{^^IBd%m<%+}#&J#!xpxqL?uS7^-gkZrx~)Xcid-HV8A>ptE+*|D~&<_Or6 z$u!{5ic#@+_hbUsid^K^lL=!m)tqNfCX=RjPbSP(Wuc*ZG8qq^J(-jPCL__X7CO<$ zc$r^MCS}`@lDv8{VL;Zd{dzKiu;`SUg)pXy2;GwjlVZ5_?#Yg|%eg&FPbN%S<;>=u z>{$DIp(m3GvPw_Z3DA>Sikzk}Q`KX+#^&3T@uCx5F5mXz*OT!>V3YUk$z&wad&Ryz z8BbQ_HQ`cV+5J5Wdl>`s$$({6>@%QmPbTBqkN~}UGJd?AbKjnfCn`E(5jH~iWc;{< z8B=uf?#Xb-b??dYknPoz@vN)dy%7D}da}+udNP>?E@)3Ca4ot(o{Ri?GGPp+n)B?* zWYYBR$%NUeEHqR?lJVf#lSw(4M@EwIVxN(G*U6-88&Z;2PbLh=+O=O#CJ+{#vIvf$ z`z~Q%h9g}c=-rcbYU{}ahE&dM?#Vh_Po~+$Xl?K1USq*~olGXkDm~e1fS$}!Jd?nT6&TTiw+kDg4XfeYG` z30y03kzY?HjKNfTo;{gNn%+H`Fk6*{hDt~>9z1(8DF^e&NHSjRGm>vlCS}`@lDv8{ zVL;Zd{dzKiu;`RUa17m(2?H}6)!LJ-*4C2=45^&i+>@w%JM2fbqY|UQdG$ zd+uG736kl_UTWB(8K5Jx6nJ66rW%WDf7AE+YT3ES3r=*nARQS$M39b5MlwK0#*5?DL=RDw&LHL;CaT$oTOBb!0qI(GiQV5xV2z$0f`dSIK^= z!nHV(U4vP`RrX_c;o7Sk|cM)UW2jh#jI~0u#~qii{9gFuO-mIqeTbk3%OuTFTaZnB}fBIX&o2(dx_|Jm`T9SdQX_ z*5a8nXKsRDi{M4tDaUZCHyQL!5BsM&>5PI@D`Mb(7*ANWS|>p za%sbn_-aFPVF|amOWC7%8CBo70$)V;vI{xEP_)B0BX|wV501U;ZwwrA-ggDbO|VwX zigQWBd}OZaAt^C=!Z#XlE6ZGEcH4rZ`Uy{VMJzi#gY3)JrOk~Vf|RF4gEHYe5p1yR zRF}385B#KE#0w8en0Wr92qd>~vr;f9wYyqY>qQnc{v?8%SdJ>|6?H#sje8MAh?Xpb z!FL*PnuAMs1g?%>VO4QteeCl|AA9-6LDa`CW@w+J*3cY`249Nc)hrKoA%k+I4i}n1WyOTE z5X4dfRs4A$zOWOCh%p$*C&6#oGqMp=_PlS7?HnB$5N>eTTpcGwSv_yeV0oz)RYfk`24`AXerd34FwM_wr!C^_1=QrJLp zC;{phbqf2#NVc`ppXZAsiTy+qPO!XKHTAqL%QPC}G&T=oNgk|W{%xGff_P1lG#O-?I4o1;goA)JUv&X%SkAa0QZHEJE-LuC3tYppDTa;e zFak@>0j^;mFI$tuWeeAEmzyGfXh1gjmJ49RF((8GKS^tV4W`j_mrx2|R9_2b`9fMVZIDE~qVWweESOaz!bEz08EzS7A!0;`{Hkn~m3=mEY8$-{nO=bv`t?J?l5~xjd zYLHp>gfQVN?;zn)7Z}~JU3%J^eBD>o7`qgWWGDk<%Q~7CH+IB!$ut!BCvLn*faJh6 z1rF2Up2@ zIZ&w#uA{kj#CAEB10*euo=ei=2vkg3URtWU-!Vhz9kE@WN&nQv(JNAympn{Oup73^ z%~V$Fim^!&+O=uMFT8X6tjo)jZrHAwXAaxFU`fmk+ck3Mj_n#ri^k%D?V1O{sm$OH zfxNDS{W@a10#{rz;|$EiR-h$FGM{qiCQF8n*e)3)%ik!t2pjhu@I`^53$`nekfb!P zRRcm^fz4Q`SJ<@Yi0zVj5ZMb!&0qGWMUXHK2yWPJr!H(46TX1hF2k8Rh-eyJ*^XQn z;3pSEsg^G7$#uha88*eR;ea?Y)LgJ#=5a~5(Cu#UkgUA^)o9ic+hv&(f`p%>ivZvu z1s%U`*e=7btllN{t;d{0?9z3^b~)a@H^PG(w#zXp1_%eNk)!9jM zC^B?TDgsOr!cWl={ zxYQCm?AtXB5dhmIL(C1^B?D&q;8Inf4wnpI*Wt3uCdPfhj@T}l5jH`g8FmPw96gt$ z#Sy4j*}7nvWIRRMGW5od*e)3qOI@gNeWhNcE-!hQICsN#InHIZt{9t0?d!#h9Xr?lpV?H;qicDXebO+Van!FliuG$1gz?b~HI z^-X-;|$DA)5w#)JMy%8SVuw9N(F+eyqYz#E()+RFqBte3%7kk)Mh(MFR z-s!M!mzTE?CVb^xd!TT`c8|NA$>7p1lA$gzwo9g=z(09x*Nab|w%Bdou7NjqY}Y`z z)Dk;jyQU!mG{j_xxna9xz%sVWRk8-uOSV(N%P>pztaPiB+nEd;%K?%WN6#f`aRhWp z%S)v4v}K`Kag&qVnG8IW{;7+jSEMd45gCNxcqRikQ(3Jm#wJqxdhv2$v0XFI-qIaOY}ZIyG!_?Z*F4CDO=WJ_uD})7cUk2V-ee5HAau%e z+_x(*Ev7REd{Ln2g6#?d|5zi~^Ib}J=eY+x?LQ?aW{hZR88@7A=f@8aQ2H~+? zhBG0E@MqZa@j?TU$zp{I0Fg5+1~bPq85lOju;G9>GSpnKUFPwM;le|Z@?fD+q$9S= zGA9HHKS>t>!b1u=e%(V%pmgX5VD9HU}@ zaKIWldTwp99^2K`6dtBlJxI7=yPSLi?Azt&)gCC&VKt|!oG8ZK|E4{3mvqDH^z7Vg z;dtCJoSiG7&!f8dCZP09UmxmdX8NRIncLvdfP?iMIgyFTrD4_V#^S&i_ z=dz;V&-+$ckFH+f61+h(Eq{yP5WG^jg8y6b96>e5TovO#Z>t!>_f95ZmN(0!$Hr8; zwnNRnSld*(yGTtosm&fQ%1HFiMwpy{98icwf%FkD+L!_xpli)eva!c>)Gud6w8||v z!lV}Ryy4I}Hb#-zcx!6;7bMG5!OTkEZ={e#9k&fZ-`sCU|sY1;Y zK!!;zwx*3Tz6L0-c68E5>F)LbE+xd40Makwimy;~?0?0lzoN#ct8RQ}A0nopv&Y;k zPln6yp{VjBKD`vfvRK6gk)ua1#ZIl-JJ!C2SUtA9HArMHS1v(xpgDJ!>7YD?1_Rj* z_1W9h!+vivLN~{EC+L=UQh<~eWRS9`p8WLH9XnBdY!BS67-e^73+bcF z{*~`=*-Nn7H}C8W&!;ku_t9^Ym)Sj>R9~ZDw`?ZWv10b|?rO5a&J&l?nWZ4+pF|gh zrzu_a8TO|1T+w9+Ds=TW=yK8sYO&@5_ zc6WyT_U3prn~aCut^Q6QUC=bzJl|a{mu(A0h2j8%=elUca3Dp$M*ZmwUCe$iZoGU! z(*|WXti${E!0TA9uFX{C$ZGg(h%lB?f)W-GZ{#@=i< zkREQ{+A`WL-J0)ulcdx{)iw3H#8cptd*EI#i$z5MS604<;J48+TY__nF zIBY*D=8iJOvnudzv2w>@$l8LDTmyt&mt9RnA5zQKW2WoytG=}Z;#(blO|?V*utN-` z!y+4W^MYq*5bpb#ux%KY<@sts)T`XcwI3i57pH1E1!D5JiwTlvodYpP;8Gy&x3gxZ z+qqDInCRmI1!8ifo6L}nffzsR5{SvLj)9nY{8?qfUV*sNwhhFk?FN2*9CDsCFG?*~ z6W;>_;^GJlnf)3oV)D34ASTZ`2V#!Ev&xFSD&k`oDi9NWNX@wZikQ5AEi+_eAjS{7 z1Y$C*V<2W8e^!~WS0Fxi!2&T>htz`W4aCL!Dl+>u24eEKOCToCItOBoz_ZGVy#n!Z z+dxdwH5VL$2w!TmhNvq;+5DjivbkAi$i_g7A9e}EWLU>Q%sl?AGGVVkeEfn1Vyq6S zQR@Qn*Y?2uWh};SRfF3Er2~qOup{t5v4lo)EBo^{vB8Zw z6Wl=laDFJ zFfM;{0_OcEITeN~iTuPble?Vh;=LxqH1F{wSDK$ELAtO+8GgOr3=zTLb0nZ?usIUw zy4V7g_{x#s3Bl$_P@RBtB&Bw+Ig%czm}5(|V~qA%o3xC|?T7qilJgZETLnZV;j2@J$?-O64qq zdMDs=sSNi5Onc}0{L#hFgFWYZ!`*&-*~b3mz45}vp6&5gf5^TXu~$`O!|yM`;f&-7 zME6vuJ?oFA<4Jea+wM=vs~2xyBqRJ~bK@_XXpBX4-p7HBDyY*&H6(W9rEHw!5v`(2 z-Y~6EyHKk+gwkinzNoQzVwio0J;dKLCFn zUdUS2^zq(KzuOyab*IC@W?@-Z6T@2BcnLMDg$?Qd><9n)BJ5{IkC(7*CyehTN~2G- z6Hltxd{WKk`4?+cHgEm9k;yl5n@_B5-Z*l;H_A+r*u0q{ws|u}V)JH-*yha?sm&*_ zv^6`wgN*iaIM_K5G5^8egsU<`E5AV$FW-sc)NC+2&zaqU#~8yTmFZq|q|#D%bJ&|s z<164${G9AirtE|G09i$e313@;7i1s154}z_o+#q?%4KNZ=Q-qdYq#8c|Ju>krEtDO zcEVl?=b358jiY-t@5dg1A4k2KU}%bvE*e`s5&jP%ILOZ@-ba0ab2y!UBqp9jW^RRp z-SO^hXLr`k*q_E7IJ(w2998*;1g#DzUyAb7B>U z8hcr$JIo@}YthL@qmpI-TF}tExxI zJAM&-L163{Wz1B0-XI~GB?7d1cHN6AMrdPLm?cOPMs8C@Ez{7OyR4=Ayy; z<$)mnCu;CI%A8SywEh;^yRKl*Meu}fFgqw|@OsLmQG->F;xYFo<4X3 zWzMKU+V6<$-Jp$zE=nasDR?$GH~d#ps*!YmlyYfQVcx495)=JV#ayb%-_H>6+|p}L zak)jt}%X^r*8awUQiJFM@{t6=c|rD_TgZD%_EQuLgr-V z0S9FAdGiP)oRKHYBVbP_pEr*{oQ6E{{hVC!Rsm*LiRoixM@H^oKPaK5#>yoYi?&{p zAzv4O7BY%_W**dtSuu0F+kI{fwPJR+RHDavA2X@f`VcAR;zEXDiU|C6huJV>yyk-* z#><9`bR6cxk}2lnMu=gJNa)V$5<;_J2o-a3&{^I@;MLgNNnBVXK@^F)n2#GFhBY4< z6=9m2lZ%W<{PwgXhK!$#iu}OL$i^V_fd3{G28=pV0? zPpw2J)34{z*93ii3;Me90zki9OqXufvXz^IS454)r9I7k%kf@7$1WE~_{IC7iT3fK zhu#`R@ji&Qi*L{6-(C*;nWT+=iI#)1-{ko@{mouz3{xsGz@F^G%=ex2`vb5yKh`k) zX&)@6Z^58%6Epz?ETiwx1Xf;BO#Di?qH`!}G#B?QE$`cZ;Ie~<;wzqa)yh@;9SV!5 z$Kzpwj*?qBEF3RK{{l}zzL_dzcG5qC$4Y0KZ5~J`fqux*ZTj~WB#c~$E2z6dA01@p zC1&t^9_+3b&kHO+?1JnS-6H+3~u#mK73>~RP9zxy$*8p97O{X&$p#_^%sEX;p4lLo9%hJ($ z=#%N!)9Dk)Uy+S$?QW*er$&yd_tA7`j81UuQR`pe|8Sc5!}AJZ?uUJyy{Ryp%Q3tL z(l=vD%;_W7p)7Qz^1lX}clXbd$J2n920kzA>0D{?;Z9w z`;U)@Tm4Ddc6d`+c`oD^_2DncV!F(Q-08CW&`yr=_Wh$l>Vc_o38MjRX$ zbDR3HD1A6{1rlxI=ffd1DHJ-8gt{m|ru&{&7Ecbgp5mo*CG2~m-{0w`k(3D?<|5!Q zWYi=wIfDMu8$C{Vp#h>uCTjr8ou!lq6du-KIsJH+9dlPoQ_>daQ;KId(@6|Dea2V8 zKD6_CHb7_VqTFzje$VsG=Y!%?5`Lxtr+Sk?@AMFDaX~RAt6y+dOY5bBe4G+89tw-8 zsm^+XQJPQkVxbW)tUMpBwLhL(PPW~V{S)%6*3#-VtDS3(cAkF@e{*EA**S9T$l2bt zTi2gC)9W9-A^yzg_QG4>Di(ejpPu$7=X$BqV>Z~byVs)D50o|%ph9o$P`rQnugHH9 z+G5#ky^ydb`z!21%#Pj5Y>gjQM?ko=!*WuQIP9=@x<8!OKkV-bS6A@ZEJkedzJxs~ zA7yNmth&26Tj6`Z$<9{hQyf}3;0+B*P7CJwt&T!if=(za?rP)I5X4j zERo#UwVvOF?7Bk}{zJrHBqXz|bo!h#mfVLx8u8Lv`o`8eUnA&vl3XF^%*)R<%m$UI zLU2I4{jrW()_RXCs|rWf2pT>b!7HrnRPZ$`;S`?a%#m*H|H&d}5MUL5O;o+I?!Br~ z$SK%DX38S6>18^WU2L{js(AKvdJzvGhJL`LjC?5}CTkkTH;(e7B@=r!7yMl*taRXY z4Qn3Z?G|AqLy9uID6wVUdIFRacqZa+*p>t|v?f`xedn%rrd@g3k!M}G%Wg+92R2k{ zp@B41n;JSR&a^H3Gar2P=~gIyB4sxcj2B%Sk9fIp4!nG{0k1bWvz1=7tvq*Yyq8V0 z2B#})*Rw<}?caUvgJKErbR&#S(}q5bhoF`|3?bB1DR3+rvAir4J`+fQw6PCw%3(yw zHTPZysBU*MlZRh#z%2p%88`T0%m}skVFO>)9lZmvf<%`N$0vAQU-ci&eN|4AbJ3vPz6YP8 zx5K;57+lUzz9-chgUcUFoHbq;r3b@7Ug!IFX4s7gN$@}u?3OH*0x~koj~`M%C~A4B1t}mS+wo$9 z6cCC!Ui^>(n-TD`KV7hQDf-ATsc2}!&&$bFhce)U6;eQk)XB~sDX^ioZG#k85vmI* zAcI>W1?0mz0oo%4Hq5@}0gOn26(L`w zfDB?%1=t`3gc#R`aOhabpcyG3pVYXu zcA3)0N!%a^E2Mz3CG;uQk}E1(FAfd12R2AS{^})LjB%R;0wV?a`7oRd=~^xuqyW=Y zyQqe~1Z8vC1DC%CTXs$nv9Ll4$lI{lkplKe0e;Mk6y)Sf8rOQF3wVGBQuv`pDPV&X z0k8ski*>P^z#>T6 zVg-y16kN^&E9hj3i1D5RBUV6#P)yQx4~NyR0{R{{>(X<^3dkJTKFc2~AXGK=L%SZV zfQ-!Yjy9#VZz*qs(s}uifI+fUD$W%;j zxPcW`K!(xD&K@hUp=N~@SP}B=&B!3Cy$M!W0U@FjpgmS#Lya?KbDL{J(tB3{8N}G; z+UzPI#55wc#0o4Z*ep;IxI!Q{a}OdHs}(s*#JAeVvM zHFZk2!3uIN1P`6*u`WGl ztbojc4YdAP0imj?A8Nu1$jB@|epmsasO6;=tbmMc$BPYCKq%^X;To*USb@z50rnM; zVN%h!;R7uOtFXcf$dEeO*<%GZ)U2=qD?+}#85zV2D`|9RzNE5MHhVO7Hv*kA?6D`N$?MKxmuB+N*#0>0n(!4a&mg5wTY0g;Ot zD`0G((5NS905({`@k&?$6@oQHK)Fc`>2CE)WcT1JAYa!z-=C28PFTNo0}P_{AVKf@ z2H6@$j~bxk6kivNN9{I)cl?8&dpay@adqRoxI(AIoeHKat_Kt0gurI+ zj0Qw38bBPg=`U~i&y9d)Ptk>do`haq2uO|X>->X@;?M;6YDl3{#GxjW4=7#^d+-;p zzHy4+4;pYss4#$0h=7)cUU3LWEv(&|1p5{33-FSB&$T_0eFMVL%7*bJSb~3~E44-N zsRkUcW}jAgTjFg0#rJ$r|qQY{2i@eYgt1d zFW-rd8kh}c=XD)&$(#ze^_;ZU#=l{ozl8G_ETEh0tqNO13w#Vi=19e!3w%r?%GTFd zs;_hwgKf~wU$UrdCWXHJ5RnLeO*p|8NntQ*Z})qn?i62m((O+s<4L=G{Ym*ol%%4- z67FJ&VRplVEsj#0N(66<_#?TMJfJ+#pP);*$URx&eYozC!a#sZ?$CKJuK7G9)8@yIkf_xP2cUS3jhAxcR>`pg(!``I3`FMZxiSBfF z6I~dDZUoA1?)sDv@|2989N#t^_hxN!jYe`xas{d+h)4ybnU})RSYfF_I&|I`w`iy2 z<}PxtfumLNVNW>>68)4k)mMq#$B{yl3gSvZ!1TtfI1L1-{w*%bO7~vU*Hl$bi7qwZP6ksNz-V{7 zi>_*$q5`j--HzBkU$>?hRVW=r>SPNGX@6$t5GB51eEDNlzu=cQtB6sxKZz(;umA5vaUs)=&r|IVXOj}xOkw0czaqD(^#w|3B3WBBcyM$?r z1OG3At-3|P6y&uPO3oeAzi!kgfPno}ZGxjkSCu~PFC*Bj%WBG9Ty4B_Zleeit+Hh` z(k21j8a*6rp6{Z2=BI6|LclU`Xqq(KJk})+_^Bq`7l138GD*SX1tiqPZ8EcKq}hDt zuR$I9QRFMZ72%H?ffX~4TI~V}nDOaqeHf|`KQ3&0aCB?x0C~+=gHt78X&UQFYxv}E zH(##1+Wf>P6Qjc zD#cr&Qt(?5ywb|Fv%$Knbr=2RYMw!aY%MJ3Cf=KWlW^;rHqK~?7*eqr^^_hDVyF2z0kS?o--m1+B;$pE zu+^1NNnmCb{7%Gg2vZsYtx!mbo`tNsN^J@5U>m7^w>n32$)hskdh1OrSIXiO&?*ME zdT-WJUPp49{mf5c-6mC^!l{zN0`?f=SA~sP${7l{qFpM&SYk!NRga5iC;L~nOKDq! zbMfB$`n}<;gL9UPWLduD_ahmks6u=xAMojjzeY<*aDHnr&OI7hEBk_NXwGn?n1I)I zk(Q)WyTwnW;Do!O%tEwOFDk{s7W}nxm3em3+Zv$N%qbVJJp${S;QMu>q~mqZPrFC+ zB37}cVi)k<6TwS#gWWb54)YLaW<9Z+cOhr{0QPec|5grGXCeAB50Z@+nb~#IhuS5F zl1kBb@$o%U5XgOYLKLp%AVz&Ar^?cDpKBq>La&N`0sdJ7PKF#C%L%nXZlRt2%2tEiXNyI$ zIUda><6$b4{s`?l-aOCjK9(SP?&Iasc{HeWM7>ovNkGIZH<%Lr$yQRjX z8v4nIzgyhNZuv=3{Ew}XG@zVh_VD=zbi>JNgKWmWJufD2o}8V=S4w}Kgw|2QU3eWt zzQlN&txjoc_DkT!YnR3Qm;Z|V7YRQCN`3*Sl(%_BzbDyL;U3jhStM|$_?r#5E1bEZ zUKS(A;l-TP?G?ndlpv0FBP;w;VB5lbh!B>LeUpS$QgBxsF&Rf6oKZrnsg7z-5u6I?aoGlbtN{;&)2rA5=R5K4^Y621*FduS$?!B$ z+x4C%oDMCHa-+<&$w8|=Y^4tel#HlQmG2eiwM9GRhu{tIsW2U~sQoYi6s4+a95_HD zXmZFyOH!+@Xu)!Z>f{l4D1JjdI_a*ej01i?f(O05DqHl!a7G>j$Q!tLDb{_T&!!;x($2e970M`pMVSB=2C}J+d)twMx2>?$pRa*CR!0C`d|a@4QYfY2g5$P>=FHc z;vX6qc8SsWX9k8Sb`iA3cM?@{vjJLwm}W1R(h#928{oYWJP<@`DXi0$I|c{E<}U2+ z*S!*uUYl@tI4fm5JB$;u1{DBD;zgXk-l+0w>#f+I2%&oPmOHoHfK_j|#XHc+7(sFm z&quLB;a*_`S)riQl6KGatx(W$u|EA>f+vXT#yktTtx&uz1mZ-egucOVRRoSFmK%wE zj^y}_pn^a_2Cqx_qe!f91FT61PBnAy>Ze_`xUL{wF!YDX=$z7T(3=(Li{;xar96@ZcI3jotdr|+2Ng-0{qvVi zLJK+o#7i|Nbxs8FD$WNpcqNs37?79FGIviTP}-~q6l$ib03a`&Zswvz!i;#|Q+DJ* zKwdft5fcmt^3owl4;%pG6=@=n_roDTUP@O59tH(@>1aeU0f0kFQ&cE*wpJ}*E*&@L z#X9ALL4E1)D#f8-P;6uKhj!?AG}eSbeJNG7@@heS>9~{|Wz<109X{&cl;bl$ZylUD z)1RO%p?yh*$-V6|>d&4WPtZYvo8z5+n>&~`KNVGnBowAEzz^_&D8MGOQgy;`gUl`m zOJY`P`5~kfs1xR;V+Q_C6Si4ePW#CebC%|JRTEmT9R%np6if>F)~!K(M+5v<*C@U;+I;T7R8ulR~ixi(u)fvwu~ieWAqAUO2Oz7_djHHHfY;i*ju*t*Ac zC1YuI7y$kGW&`}UA?od$#e!-RKSqZ9_vb5E5$aYEC$f&<|0k-iUZXqE>sM$IR;@Wl z`81T~- zN_jH^hf%K_j=LRg42$hYGcdAa2ZTLrMLGf(yUgnkF!bU_e4+mPM=lx}y#r!cXa){KpBox@o{xTw#% z2-lF$HHGP`!b8%roNVB${?^fn2K&*%%xJnZp7y)xReurudc%I#wTAJsT#-vG;ol~< zS0K(!14RVmB+^SIV!FGvEW`!)nF?5#r4SW)S$5f204GS^5F%^Hi~v5_gtM%@>sAIU z$)+5InY=5xD=vhX>|5JPtjKcf#rlPbA{YW?;? zCBD0>6_5%`%A(g6f#ywAoo|H4;z#OJC)%^6e*0oQd;8)SD!iY>GmehRLYd=uJH4?M zC%N>ytgl)o-&u$Id4&aQ1E<=8KdW%?pj<*+CBPJLeE(_vrjLA(vKlB0)f>%q_=nD| zH?a$jYtb3<*75Pt%33*`=|a=Y+o z{BQu}a#qD~o1RhP=>z35ly)NXaOUI)TO-MJJ&(@r-&8oBRzAzJAgeTW$FeFRnTNRGls@&9e&m` z_u-K1-k9Yf+j`rnnRS)B7oxXZkzDug$&R&EHAldnOs0W?*UF23{ymw%wdevtdNN@Q zrkeBY$z;;>?#YDNsw^~APbTBRvnP{sFprERt zkX3rJPJo`wQsjjZmxO6rW0NdYE_nB3yy!%i%k!XLPsR^{P2RI7laWO475nyNJXw|3 zgiC>C_xCL9Wem(G1D08_4h`VjlgYR?BtWm8j2|!Oe05H$3*D2+nW?=e!y(tbC(A>& zS5L;Xu5$MxV$ZE7>&&AklWD+Vd5nt3d!0<+T9J$V*2#o1m`cyHCzDCjyC)N7tFq8g z2}#C-XHO>OU>+Gs#*2MM^6kl_Y#UONS5GDk$lA4EPbLr+osxQgD|AmL49sv;Yfsjx zttS&0QaQ7^C+l!M8GVnk)K7RdXW-kD$pl%YCtD5BlUa(q+?y^Gc3l#yu}$;t$#~I; zE*GRHC;T=RLZlF3L0=*W1oDxV3L0LyKRXGbPufXROpQ3;>c%B;+< zBa?A$NPk`(89yFrV&fka8DmHGQx&eok?b1G0*k8dXn@(PAVQWxip!1-66aAuoYwHCfnLkG@>=0Nfv+@O`;R5=-oJ&-?sd_~T7W z6-KyC^R+~W`6%uzRotWSaQvXR;y8P;?YTbgnR&{U3f-A#eO#ck>+)Bk%gBV;mDnuz z8O*N4_xQbbAZc2eUh670`;AXe`;&9M*uepY7Mb-AmF-kV>CA8o)J zSZ=E3a@N!oiOnwqu>;`m{eVTOX5n8pU5_EdB@H`EmFhy}GT-lR-JkVV z(V?SH_9yMZXfPY}hTU_$;V!!K^p^2xIztO5d+^T(Hefl57g~#F&YZcabtBEs$DtD+ zE#+rEuyPleoF4S1b{8%DS_CiBPC15Cy~&_=de}eJX*~z_r3GwnFzQd@2AV0lWqF!i z-`ZHqe%P9wM|Zh4jw)|QTIm>g6n>wHDFk>Ir$S)g5*h9qr!!6G~ia2xytOe1xNK0p6rTPc6tWcm#wRC z2@g@q^Q(nAg**%2iC}|er@FL-c;F{(Q!yDrc<@INNN(X~rC?BMceSk6i!5mTNdz~s z997mU>VDW7S5lyQn6FhNs1_x^(}3GN85RSFuK;CN*RoS|urft9ukQFk9O-{Yo}n5! zk`O+8WgdbrQxJmRi$HP@xkJ`R7!AG@!K+yw>_P_R$`me~87Kvd=a8~tX$?z=>nLmEiVh65i7>vpvjVZxcy&pI_8)RuHL#03olUv_FY7{0hbVC$N) zR>M^_iHzTwnfu&fF7fuEdf4hd*q zjAVJ04TqAF4+R1(8qv=1({69touPxO#}m?MFrL-a@YQH>Fh(;gDjF-x>j=U~|FvFC z3yW1~$46`m3p!=^d?dSR!I$G~0f6vXUI#$4_M0}}Stw9x`0=Jgn@B~Ys`M6b8w`iC zbz)zk31*T=4w&~H0x!%^X#R45!Dy@hR5xGVXnNU_rs_^lcwdJF%R96ac zR5mJaD(XH9e^}Bbd%4%2xiqyr%VMIc-eB?c9GY65fFxN}MYE`|1vIOu>b#_muI;Rj zKnhF>Y0FpYCg%vmIYI*^M;liP8%Pc%KwXu>YRpWm-)hq%{9z>9+Ud{p#gW8*q6sHh zUaXpW-j-z=jd2>Ahp{9N)-eAzPGzQeUQ?=r{~5u}-du6XjHw2VM;yp{EuVl{?zd!_ z6#l#ccX%@`rZayrCUy8nn0TycxJ*`CUQLGYMh@tBDGUfn&0qF`#AFZ%JmUh@Ans`& zT25)CJJCQj4(lWNm~8TjG`-zAeANZ4VL4L=5d(B6?#}Swwp56h;F_U(XP(j1DTmT!6IUz{+NjivNG$Kd=jOuH_EWhacQoVbT z1c$FVHq12a32VSyjiC{$tiJHDYjb$`mSdaDFe(NJr-qH8=hh}O1j<&W#dJO7TRCo| zn?{@(WR^W4O!&&X_CVo=?b6fUdfF*xEwh7`tJ+WE=(li5o8xAi0R5z+ojD z!JPqMGC4~u3Xw- zG8@Zw*bUnygJ2UBb~-u)QI4KV(&7kIOj=%2mZvR)uE7o4<(c$PT^zk4b$Q9d#JL-` z%gt0)>x!|7g!US+T{0K@6y}EQl5sL0lk!{_wo684IaIzQ0dCkX8AHjPJGN^iEgFjp zwrd{b!lp7eY**llOJ;#q70D2G`H43{Q*d#^cFBy0DGKS#0bdj-x?sBk2}w$$s{&r} z+9`iYdO$~PS7cL2YW}h})qG*no+Gx~xd7NM9+USg4T|E3?J}HkK}5t+;tk6qA) zoP80^0XJ-yVN--|91ur_nhUneJgy5D9#WGB3yt;20)-p4%Q7bf2|q~}0l-5r8WA{R zy9~dw+K<$?9CV?XMJwhJYkUgmCZ>4&{jL^70nKgs;4HE1*GmcEfh*?TQy1+a)t$b9x=vE*S@d zPo-tA3ybZN$vI-XHbXpa|kVx^cvIc_#f+7e~+J9Dz7@ z!*;ou%4%IPHc4t$+ph&CSws_#m#wMVbu-W6_^%#Fb8~5py-0_3M4N6l;*Z? zS7cL2YW}hhleSFB(GlA{W`pfo;~jzF8$3BjY?tAT3nGCq8BD%h;)<9IAC5HGmm{{z zuqlQO2gH$~=7Q}qk4wS@T?%r+z;bzqfPNhJ?Xt`XLBdbc^+7gx%8uAB!>_FNBlT@9 zK5y?eZrCoz8y6l*HHNc8%i3vo%Xh?fIYz|*;nXHG(5zdV%n-PkgM{0@U0&WonDCW% zQAxPlkuKPzca`+2zjb!p?M#LmHa(G4QsETvd*aYt;I z41!J2AKT^Vxg;%)fFx;^Um`7x%mq#$WJ-gFh5r z7dF*7p2;9^#U-;+-zCc@yvdlDmmobp!PxOk27zg@2Xnv|1&S`%u0TSP(&Rvkg~)?0 zSCV-;?%O4glQ1A8HGkO`4(4N7#>&y{OornZGq#H-=ZNhxoN+-!;_)t>GA3Uxah;(c zM{Jj2Qw$pph$BPI1>0pFmxK#1Y&Vy;JmhE~^KkoiS>}Ww;U{Sw+)@M&cO!Mgb{T%9 z)lq(}RA6j3*E83*YSinvZ5CrgVP?mEA58y6+r3XlTm&s|2jQ8_gXj} zcMNCeN`|je{YUT@70=y}449RTTm+F682)UP;)A61iK;RG_9pyL5M>MCTcwjsR1!^U zvO_1dERqxvg)e$57v9!@JA-IKC`*E9A|RuN9wtiHmLzxQvZCS7`&Lz-X0TYj%>2J*K06IV+-7Zn+UAwGbDH)IClld*hK5OrDpf0=m|$ z(sgxm9=X_K&6cE4@?^&d$3*a5pIlogRj7Fa$S|qJ*0fQ^SK3v3wJ1$@w+C=3A+`jN zei1i(g`#m}_=-<|MUBt8y3tIf&X|JsFt}Hq43}I~DOEHAKq53hI@ZI-9rzKSUW#E^ ztYU)5(W94Qr&ij+bdklDw+4yq<;o?94(1bI2C^IKv$v^-{oZ7R?t<@5&<*mW04eV} zgJ*v6)jW!zI7AY9@WOH+&xShPpY>MB5L_{!pd~qw^30htHyMWLkRi%kWEZvDj0~Ib zVg(odo#|jW9%%;o^wk|ZQGIL=+^ra8cV`Rfqs#u4?{L{$uG=^7>Q%sl?A zGGVVk+-cheVuUsFo1OjoIOIHOUX)sJy%lkB1cuChjTJF@+$9i`XPpBvN8njy#abfgcPU)uxsm$4YTRSj+vlny99 z!j8ZL#S$9Ht?b7u?KUHmNKN{-5LBL3mOUs-#0EF!R1^R__TDZlwR?TN9Kq-JfaF4U z2SdkztSnYv&B|9}<|&0Fz-BEEN5M=WH+fbW_?uV-*e{H52EGwsh0k~IT(u%Sf_LwO zq|AC|+uOUtxQSNUtrl?gy9G`@rWnJx{LKlN_n+ic7^)=l6TeLEa;A&-nh4Xp$CF%X zex3yB!V+cp^?ox%1cT3!fTqFbNTBOt3sB-KM}j8=nhO#$dduZt0W4!!9U%n+!=^sa(7i5mARc> zx~SYqE=A=5+9`IfH<`Ap@5l-s@x)yQ$)%|5qL}WbsGMzZtv@QGmlVEF80iLe{F#?L9mFZf~^Joel?^g=Jk$3~OcMCDg1IHl+WvAN=c! zu%8({Uc$DWFus#0jXu#%JgH*yNj012U#wBty!GowCf~?yKC!lWSOWHlM)K*6jQaGTO`GVCO)@{0Dy%uF4Fp{033Hd?$)iv%%~?Yiuk>98QS-G z4!PahE%)BPcC>XVobQmGu$RJlX4-M%=w8jSWC!5KQLiQ#nj)l&#>Tt^svV+LB^AIQ zL~xLwPrQ%%0OxQz|42+ciOk#z2fO3l+0O2)o3THQJ8*QZaX6~-4+&ZwP`(u9sY?JR zE2SMlG$hPKt$J`sbsZo(wkt(rFUxd?S!BBWis|xl-U8NE6`j6eeYV{3M}@?^SBz6r z<=E{|P{XRkO++p`3NF=JSl4ps`c5ExL163{Wz1B0{pb8k2x83(UQWGG|{8itN2g%bv@QIg+0h45~GV-`x{6 zcr9hpWTK>$6$leXb%v{t(=qiL{P?yAd;J=bm zjimddluM%u^Iq+cnCOodOuC34lU1--%?{F|@o~9D*eez-j7lUeW>R9%t(4IxdKYsb zGlb7YfGur;JI!uyILy~9@|G6!pB%&uE|Es^9RBB{;xAT(CpyB<$Aq7c3qQX~`1xAl z=j()@uNQv40p4{5EDlD4SsDd zZufhm?)1&Oy-9zo+n-FvlXkWYzIJrw?>-l<$Uf9NJ?y7D^aklS)A(Cg#Q)<8_?-w$ zrFk9#){e#+OcLh^A_-jK*nT`cY;Wyo=N6VNzSiWZvnPK}&@$?HySqCYoEcBH*N$Ev z!P60#dZ{d0BYreMGMn=-H+CP+dAZ)iOO`KKPTqn}z?)_apsUkbhR2b`Vo@U|OlSPj z&ypBt|3J>nYi(!49WE|W&2O5G4b3(cjfJu0pCnwFjE#Ie=V{H%lc$PETChI?GWtFCtmVMn&Ya)+(Y8*s6#=z%t~h zB3uF3sEE`GmMY3WZmXjF<19zjR78@Zjf%);tyM%HuvHO#fMv*0MYsU5Q4uLsELD_$ z+*U>T$61aF6@4QDO)TOXYMPLat3#{g0ivvmPvzYGV70MWCM9Y^OC*{!!c9I;QXR|j zvs{vQJ(Zrx`Q*+!=4+$Uw74uxRk`vqSkB78?sz9o;IL}*^~!hdhJ8hq_?5fkFWr5q zN~HU!dD&m868ppKtZY=iU#b#^1LSRO>TzIG$dkIvCclx-l8~v1%$Z@LdMa5)%~)BDE_;3{>@kh(|UaLx3f5H$T;*^gHyKpe%BO3wc4{pP~Pzq~Ah z_coyVEdr#DNk!CMMMApGYhnls%;SkSom^0oGmY38%DM)Em1D49CaRxqz-w44cE*5P zO>9e*PB&tk8T2?2kr$0zJok98A!}#B&C#f6G8=Mm5rzN}k0%v(M`PzAvmPf$L!QCd zrIVxaB#4-*JDNbp+0g_#GEu$10o>w~m80==GmE;TvYCrSh`0h!$^|AlR*;>gBr=ue zBompF10u@QYMnA2OMr-~x&v}loE?y(A`_9T15yz=W;%9a=zHf(C(!Y6K!zo0ICm2j zMJw{k$sxvYBs@gL({UFSPe*1|Zn27^>l&*BB1Bvcu}Y-m9;-x3GLgB(DzbKoD=CL4 z!w?{%@}%M(tJt~7MC9s#jAOeM3(Um4Viixv#{n6Z0>&x|+quUojs&qNhnUCHaTgU& zM`l%Sv5KSX8mj~%L|hKBN~Gi-t3*mNk-5bxvWV{-s~7@AR1UFgmiPshgrO|j}@ z4X89fZWV+Ft$v6_I80!UiVhK47-G5P-VkJFGE4i722=_dDz-C&yDP*ri+Vm3xm<`3 z1)7~qj2~)1rC`E|k(xN_h3<5oB&9+uFa)Vwbf*H$N+vch=cNp1w&(6|3>_l6FvPr_ Pmz|kJe82GJ&o%x(Y2dEVUa+1c5DSBrEdA#r(FnS(KmSoKctqQ`K6(MafFqm|IER&O=EGrhZw zdb)@1o|Tq7j2#Csv13l0<6~o=lh_U?Bu;n)4j7v#*f9<^B*w9^Q0xFUubcq3iIbCK z6UV80>sH;Wy7jtMb$eDiE6y^nzJJyK{{OG)R$V=Z|NF%s{DGx?b8~CI^|kjseRFcj zsd2YIyzceqdc)~z`^eA#=>K^1TT%AGV|%}K+rGPQKbC!N>2qJ$3vY$1wnu~h#`(^< z?s(D~otpH<=epDWXc+ImXVBf~ogED}d*kK(uxE36e!CYp4oA!12M0T&o$2<@v@Kcg`9-iN`#>!FE9^kbj@{h|Gi;g^dacKM<6&>mx^vi{_Pc}rTYKZyxI5e&ZFP2r z{nMlIRtt-B&n=_jWV(D4EOa&o-N__w!J+ut!DwT3&2EV07sH-&-N8;TzG6LIY{v6Q zU5%y1CbE(>^p90{b2%xgdl42^vq#`l5xm}RNF7&O{bB6S)0Kp$1M9FD#dFK^|LTUH zd5YyFT7G_smw9Nc&V}Nm`(#~wyw;A74>#a$e|_AW8Qp!@=Hg4ZACpj4%ro?ol41@3 ztS+Wu1pQlE2AH4pdYf_M>bd2;5S>7QsOjSXD4?bE+9^T!Xd{FbPj*hl&Eq?#k`Nxz zeKH7{{*Xv3@{yLxZ9W}Da9cZrxOvykAPH@{Aw-C@8bpK!1BYK}z|G#C8#iy>+zi1r zQb`apf$YyDyw6AQGCSeLaI!n@cTWv^C)?E`&}Fb6W#4gcvoq{&p+X2H7>@+}JlMA} z8ctE6)*ENEE+@4T6ZPN&4LD@SSL^1}@FW|3ZK99H|u5j1jJq@!#WvUPy&`JP_qn5Qk=PV4l$7xgn zACKS_Hi~WuzGfwy!qc2N(#`!pTjY$px>Hn_#0&UqqUx2CDj_G;-4wNTlkE|Tnx{}y zqZO@nYrV!3>83;gG4y>VWvn9?MQ^>Wtj)m*6UO4yI0cej&vuH9+85YHdG zHgmajl7 zK4cTi>1-68e%j@y|I$!xYUn5}F53ve1k=KMO}E^{KmFlHEt~jYycBJGcwQ_U`Hwf? zjUg)pjxKAcUbwE^v?`9eWm>#V6edmp*OQ;je-O&j_!9)eo>Fof8q%12O2H0*PM z1V|hE@TMF_lxcHsvd00fLU zM%I{Ci%tbMw(Y3|4B&s2fei5bX0Qh7sDuz$gvcBtFrgr<1uT1l15`@z53S-2gA5PQtR1}SJ)MG9P!DjCE80I*S2i$Yi-1!Pc5q<}#{luPqYWjC1zva)=i1Kksl z!U^Gc@k0vq(+r3dkb!i1@k0u1M#wg=5T9!LDK-e+V1*P|aYmN@jSZCzwJjT@z>1J> zXGR9`LJ9~Go$=Wt1vb<;Q#LobHYB-i0ABIf)Xvx-1r{^5L<%e@wbIQ1VGJADqk(-A z9^Bv-!i^PDKt9a1A(F*KFKw6F8d>wtCgI2=AuFVS@Fun-#$t{0 zpxaR3qYNvgfU+g@Db|uR=`WCl+aLw)tgGh~#32P-{iW*~UHU3KxCT-H8>9eJF}bM6 z{7Me$j5}B%1-VLu%>u?I7(Zr43Ucx#Y)IjWI-Tph&|ThENI}kp;DMGxst+l^(}F*? zKnmI|0Vyy}ex+rH6>#xM(&iW8#RERD!V20BSOJlXamPf5v4MgmPtd>$D`;nm$oCW& zu>vZDVv@FUx7t;3q$*ax#B^;x@6>*g*%&|ttNxnM;?k3PR{_IUSge4|!v`xMG_<_< zVFhFe`7{G!1!N!{FI~|H| zP_x1ctO)sH1!NE}tbh>F3D6!Zu%X78vboK*A?b}3kU>nXgw?JBLQEr4ORT_xk{woH zMW}A9fK0*)DH6mW&rqqhe1}h-ERmKX)gJ!INd{SeBX(_8*ne-B?e38_UO{}m2 z%9hZn7_kEKU{N%BacJVj1}ivXzzSSiK1@>s0r0?=rW>`w3h;28i*f~ohJI>&;!6b{ z9G5&<1Qo2Xf?Oq4u>$;<87s)im$3r68z{J(1y*pR z5>`NkV7;Aya$|)R9Ic8KxFj7ihyem%BfA!du)+$+pql!j<@p8z8A9G)2Yds845a17 z4=W%;$ax8f6_9~+yl@RxWvswvgx$mn$o#5f1>{Mc?Ch}u8){ZqffXTNtbh#Sg%uDY zIsw{a1vb<;Q#NA-HYB~V0y0Pttbh>Hh}04*u%Kjz6<84pW{;*$%H|COGGlckVi-l% zjCiH$Qad687OeIa5Z)?d1>`|9RzNC!&t9G9Ha2`k7|QWYz}kD0N8oO~H8 z2+`*kmhCP^E36>rqAFH^9}6){-QEN%tl(&6tN^#DW~_jO8H^R609(r$1vc*>9Cg47 zh+NEA0b>J&MrVN)9Ib>EP$3kPw1sU_Lpr8@iR^BCa^oAi=h5+k6;B*oqbCtu4@cv+ z;UogdNE*~2J)+VcH9*HHz9ATo40PbnVfmGVcmAWFwZsp?a#er`HF0H$B%o8`P6pEz z*Mo_0LSVC(pE~F-Om<%ZYx1VOzY+YGMnJQt=t4kGLa#0aq{jAj{$f|1V))gNLZyg9 zO(q{uyd3u6FJ66T5y2lc;EqsX0HY8AEe*Zm5Rh6}yEO^+E7}*}C0UqjdrJL>1RRJe zMJZW>|UDas{QMIEKqjcR*^IA1$x9OOiXIpN5$V^S#{NT%b zDXIvlMwA}9Y-@p;ngnmIB9+9?9uy9&hKTge>nYmwoWH|W>|1c$DuOGTPtQqffR6~i zX`jEUzEy#n?!?y6@^(0tSi!nw-<>JF08NzJB~Z4$&Qg8r;%SYxK{r2;p|Y72O#2}s z5&W8Pf-MqEf7sgUb%&kFTXwQzlzZdxXxuuEf4a4Q`gCvH8*cRC%SyjWDhe#&E|$PI zJQS?i>J#1`@keqidFW2iwHedSsqv`0xzU|W)8nOEPxSg{&Q9rpyk%HmZtiWbhfB(#Syv}CBG|0 zzw-s1Z^~U2pd3$XU-|S|L2?>Y2yMA6?FKvw_qnSS+55D=fi5!X4!Yyc#@XHmy47T7 zW21*ICYe7rx_cWwBZNF9qbEkU4MyE*i(I3ToP4a7d9k*xew#?{vgv9cw`iy2<}Pxt z0e8x?^5Z3|;=`WuIV!Zm3avvN0vwmEB%IwilRIo}T9MyMgK*)wYTz(B1l~?V$9J~U zD?7FlyN@GW`Apvo%4)dQ99Azd@{SdgzmCcI4Yn^4Y-rRlm;-| z+3IYMdlPh!ee3looeo;(>(-RaHF)@5=~jUHGdqVU@fGjM*sJOn{PJcMF{<__QBMQZ zN59(2fmmj;m+(7-{^Pw1SxxK3BSv zWRGWBSi)aMuv*zLsrKUe+xmk+;;nx#3YmlOi1kk?iym1}s*j_Kbv>Jvb~eyTRXsWH0-&FRZR4+no8!A4zHQ|{tw z#SV3ML*)i4zxo+AmXC}9+)DBm4sAymjd+DLWcG@`t4{1 zbBeb2UeGw;uh?2x&P}{G|0dzqHEo>H5;3G=GwLZ_1hCWmvjC9&h^>QUybut!x)Lf0 z%&daniTDj+N<*L(3MtXEkX2WyEx{f5HcP#<`n%ORnn@n@sWQ2+aa-03k zPhs6ARiDDCQd0{m0yTV9*qEi9p@1verJ^e99Mrt4$3?S~{Tth*w9Wpxc<;Tv?%>w` zIm<<|EZ_2P>=>kIp6_QP{u(VM!P%|BFmPox6dzsB`9<5%oZ(0myta$9B%RtVej)`Y z+zn+GqBZMl6aH4Y$~-gfZuZe?=7bB_mQhp^f8ovWy}D7-@jB-xox^z%t5{QEC!o|M zyf=btbc3Y}D|v`Bv!2+^yO49anNNDY9_6R4*La!<<{)+~jC~L}BPN)qc>tNL+Rg)YNqXx{9dUpdJ z2`Q=R&NkZRcV;}=K^r`J+xcFnlDU;G>LUnKlULRS+|@(Vbnyv-~6J;|mD_o%MQB7r-_-)z8L;mi&7vKToI zFXp6fuh4?d#D>%j&o2eGExd;aVF}qcNmwNXcXd_hFXVOr7EWNE8i$+k^HE5%JJvfB zH_`u9L0pQNshW!KMsPfwYvj=ar3i`-1=MNEJoic)j<%9sH7~Cy)~DkBhZ=Qo-!8bX zL$-ls@WG(Lk)DY#n>P%@%I3p3iJ9r8o)KzuSxhb(GY6+FWLP;@j^NM5WBk)lCKs~zBfEhm*!Gqpjl`Z;VI3te%z zjellfh+-E(YkVhAB{%J(1qk$B>EULfT1D9a?~CC6AW}9T)4<-z9j0sBX-WwWiBb1-v~3;zXx}zQJ!*1db<` z8;O06L>WQnm{&YS84*yNNJRm% z37sm=CKTo7YX|F>deDNQKTJmFlzxNWtUzBZ-)1T0ktDDqlVBBD=Uw>ipdv}LfBv#b zXh8>nc&X;3&WRviYF~=o>;^v#1M<>Y=I)6EN}Kh7Ld{ea0OY09&0Mrdn9(xtq^c$^ z2*^t(A!35zKwdfo>45`)ydq5m@_sY~$V=&}z{8**FCC3YCIE0qX^ILhe6$vjmyVnB zVjoC@Y&)dGs}zTVL9va^AKIbg(O44#^`%rx%2Nb^LaDW&!E`>9%k-tgNByBA@sv+V zhsnJrH0({E7>&_Ef*YglUW+^DI6oCt*FY9h5+Q~UMFBRMm8uhl8)SAlSfV|IRG62J z8Tk86*s4nd()_M!LhH4I06m3XF)n>6v7{!vvjP4qa*7G!vopyn)EoZeP(M~hNBPSc zYwTK!ddJrn8t`yk2DwE#cYh&hg`N1X=9q;{t7PlG>)+KVT6*70#YihVPC1#0kye-A z=g?ucHJWDHvLUr6b((*D#JLM!tWEOIYcxp;Pt~imWVZQ}B#p=s#e%Pg*b1)*e|cqB zbjr2aatdtKrq?hq*Z-<9TrdbvZBjtlva^zm>WX;M>JXtnZlDi!3;0$8{I?#ofm{?6~#>ThK;jAEB)Ms6UYslxC!t_<)!Re|$%SLJq?ML5__M{Fc z+oMUZqqyOZ$tl0yu-|p9VZ1C?-Y%0XwYplRk;AkIt!ypT?1_a0FkSG{+6 zE#14!Kbs0Tf}g2?g;@$wk(XtcjRkOm5IkR?MuL5b^+oyWzz@zcQ0rbLI4#SbA(OXN;DDm`xav4fHky$u%a)_;w zW;Khzf5K!@*=qCOCO2ognlmT1>s#4jw&&WPLT(F z(^XkJ6%KYa&pIc+0w1ZchuuuJ{uC&Yi~SaMTLzep&9XCpwnM?x(uay`7*1UT%fonxB^6SZjF_=ovvnP{D)4L}VW~;K$ zP(7K92hW~N$^ny+XjluKXk@%tcYxsAlS$b&q$IDNOc;>0YrmdMAS^m%5immcWWvA< zN4553M_T3F9;PP~7*aX2xhFf)VtX=f5iRWriWM_!doM2(&3X4^GC@}9$=U&WGE0#c zMtouPWW4A^m&>=k`1NG`5ZL5BdomeG^j@)VPsWo~c}=(!SayHU!d}L}Y%*Y(73+xL z-IK|o=nQYEHaXe z7yFFl+mlJzHl!r4o=g~!wQIkgOdu>eWf3q!_hiDr3`e#0WbN8|GJzqLGn;#|HrJC; zRKqqqNg6QzS6O4{-IK`#S*0gi3DA>SiriG}@Z!n#HuAsXw@P}ilkuVxT`ovZ#t(t3 zKF^*^MlwK8#*gb{2OMZfwfA5}*C#q6gxOFmqJkrM0 zt2(NY&^?))8B>_@UMIsL*S#mpL$+5>#@B#sYf^=juk^wq0o~+7e!X?1q9hr;)CjZg0K6InSkBix| z$0U7sF7l(R)E2HIHThC;Ng=hfeHA8}?W5$)=MRTi6;z!_PC& zeLk;xDP@}6?L}7d8q)il_J_z>%A#DzGYKfL9V8EV2}G+&>wHicW0$_6bU)k|ANPGj zNfLD61AZ_3@usB;BV6jKB|6lqI9fKGq560f9*!UM)(2-VwmsJ;Ju^?a=%PClt&a^C|!>5b2Ir~T2;{stJCSU6Kr`I)9_ zX`-V%EZ+vB@dtjS0S~a;RL$kAsVNehUj||az~B1;i&D+Pzizr7LxxKlc9tsDh01B{ zSh++V^vY>0K~aa)QkFw!Dt1)-y>0qz!oT!;e~+^PmAbf$=JWb4H(Jj1N-_(T1@3{R zjFOZ`xf!hmJnc8@Kwy{+CLC__YXLtetWUC%fZ*_tc7N0lF6xjcMHdi65v@qD7d+`QS{YTb#mPyekb z6QO^j3CTbxj2Dqxy_A8=?we@3YQdrST0^>GXR*xP;2y<`sEo%n=g}qZbMUPQUc>T( zV=wy~1BaaVRboowsvdln^5o?tPr@o?8ot?pTUq8Rv)d9J)lYb`D`MH{8Dw9!uEHfe zL@5s#CL5Kc$L~b2&azWo+Cn_=leVdt3?V%D;|L_TaI;b{D7CXvRx1e_9!k~60lsoI zs2ct>g4eMeRn{x&e%KmU=BkxuBr9<}WF^KD_)Y_E^JG{I9KHgSU0usg(ZR}6DGC*C z2$xWl*(M|V-;rmiMvf$e4_}!Fk%o!y--|$U54lOTX;<3PR~$FcBH|ES8^6M;h{&4V z7r-9eJYTwga50MKGbAu8A9yEC;9zI8Gu_^q;%;gZFT%oV_Hp=f1g~a!unQTKD=u7c zRfc9zSyXTqf*2D}TafqRgvBp4T(TWyQ4A11;!46=kTx(lOH^KFX!Hr+h+xgbh{SN< zBjF+xIP%QjHWH3yMp}?W+ zWvIC{8O-D5YBYfDSN45L2!d}qwHGXFLVe&T=Kx~Sg#@%OMzXxhhC@lohXR2Xjc8@C zXQw;qOwqjpqcLeTUQKI-6==nWY-$BM6c0HxvwESiw77=ghV&onM@`M<;08Ps$!=Qk z6;7|7O%eT62NNg_F5-ggMR zFhjRWcv;}30R7=+@5xTSHqz9z{^=bRYw+hTEiJDPE;W_XiO}O_qq43LnWiYKW_^SN zf8o;9@+^yqs(ORP({pHQc>6w;Qj)Qy%G#3=!x zfs&(*D}@athZ3NEL8q`kjAUCo{dvAHlGsl+;W*2SRa4K~vP`2fPGhq$mgK=2=HJ@g zRQ5k3xY?U4E}30$xvR7$3xC;wJG_||)0w{*lRErE(hC*6hfi{AR5wI$jC` zLQ?aW{fyGu(=JdA;-2=Q<&;Lc6Ne>?obYl8u;!~SU=7O|7eq=Gu?V2@)v%@&mQ}*; zqJnR@z%?wJV%Vq-Be2vQ;2QSvvNaX4!Cgp-$Y4;Cu$Tnjb^&ZS=7b>OC+Q%9(TE@g zFsiQwv;3m(OZDyr5*)tn*f7(uC#(T`*k-Mf1-|XrCNqqR0m7+aW9YfH$qa$ARRwYI z5Dt~LiB1hN%bpM>eB~X2FAdVUmcH}ofbG)L-sJ1PAsb^d)GlMYWEu+m6E|KYK)O`I zWB|zqZvLBeqKjX8PbN zS#JM;s{8RR%n!#jHk$k23><2wo3-Zj_;qk zIC}Lq!DJ{$Y?nNqaW1QMFX$BJhV7bp=CEBZ)+iPem%Rdtd=(Siuw5f}?%1x8v}i0Y z*sggHjO`jCK~XSzsivID+^}7Nt7ls>bHjE8rp0vTfG-LZU9eq&ge0YfDS|>RdV(9a zE3zphHGkO$axjA%gB!Nnt_$16gm)3!WjN!4NU12@b!->U)Dhcd*c8Kt1LDX~bHR3* z$0gxHR{^^~YIrn7{j0+|-LPGjIUz{+NxBFS9#YWp>xS(z{L1QG0$q5_ImE(`8@9{w z#)St-5A4BVxM1Um?Q)EY0m1=mjrMD|q!*-c=QMv1ckTQ9fBxF&n0Pb z1SFNMS0hY_D+*ad-zQ0!j@T}l2bQ`};rdFwNL?-{6PcRP#oYGo67m`6vRYS+O{Dhq z;)Q$5-Q(30H*D9;Gl%VZaVm#x`8`w$yx0-jHFD>U?HWmo#^QqQng`k4ROW{53S4o? zOwxCG*<7jflI17f1Pzhaao?`MwAh0=;EMu97i?D`AxUXEu*OR<@=nYodpB%XWK&3L z{<5D@T64p8kJN?jV#2$K?J}HkL4>P_HO+mfUAeg+O0h((YS1SCO1S5tUcQ03U;xNn!2w-6?LIT92F&!qrK(a%>D|7{YF?K{j?CF{-!6F^o1j0o%h7X5S{#9jNy|rmZg8(7w#zf= zpSn1DMe6dBhY133!*;ou%4%IPHj&!bip|n7=ESIQGTtI$DDU>-!8`+7aq78LnBmKec>SrV;phZx63gq1_%eN zk)!9Ldjswaq6wia38D$ZUdbq+MAI{2BzNbs zqT$c`R#}g(Uh&sE2g-Ryp#NL(96>e5TovVsmUg_*`pKg z5lO;FNC$jZBTP;}4k*N;K>7&CtSPXi*P5MRV~^>mU(SkXm0NCvNiF1im8+zEl2$?L zE+$H^HLG-8ot#H5_LNE$0TL0)6wT~pJ(j1?;JZG#wos~2^8}D#Qj4uAqKrZ4$_;hC z(nsm;_5dy=#FhZkFXD5h!G8b$fuWLSQe|8AaeBRrP!&JwlHaH5nn@;H&>dt1#b%y*~^tn5FN}WzVu}` z)Msx~4|?745MANk8KaBeNdZ!Rp9-G-#aHtvf?^#Xy6~cMAkT(6)th!#)I*#;ecJLC zbo~%*^$_+Kt#80f6kPPSC;h=_s2Sw5*S76M^@%-jOfkyN_9oIthy5$x;j;Hyw{G6v z9-L2Q9PgvwCNHylIH|ry!EV`1s$<3MND(3>B&pW zbjlW(pp!n(n(k~5daaGoa5^3h zI-9-i9=gD3xN*L-vK+&Kv@!=*-7@xOyN>j5^X8_}Zt0c~ z=SwMNmak6xYvm$j4nDmH?(wo%R0NRK44KuwoFy)?TKc#{)TGa53k!+E_LE}nC{sMA z0`Dd(cN~VSEf~qwGW1dtcG<8rpqm#w zH-m7`&xCEmaNtrP*QV5{HL*EBATCbTbPB}eaTgOL&pHQUj=;r0+-qgcOs74QKum-$ zwORcdt+b}_55(k1H<=+D12KNsB@mNg9Ro4*_;bpHy#jH&WgCe7t68re4mn$zM*{@n z;s^|x{TeG`^0-SNCeJztVvfLb%8I=z;v=?!SXWKB6v;#%F0na4MNHnmmKm}!5aWkk z0x=oZF%UD4Kc`ICD-a*C4#c#pb3np6SoTtL)s(;WKoiCLDl+>u24eEKOCToCItOBo zz;nuqy#n#k-3r7+A5zOU6v+VsG1=TKGh|~R#t*v$Vlu2_AZ8waPMNS*AUn8PKI)Z2RfaF4U2SdkztSnYv%gR?{<|&0Fz-BEEN5M=W zH+fDO_?uY;*e{H52EJ~&!Y8vqb)c1OSY}qGJvRx#FYJS)%z9?qTRVfeiB{UJ7I5{u z1x`Gs7{j>y%?X(IpX6j1swDCgzfA6Ori=HQ2-CdVlU!+jo&@Q_5@q;}eltV_gU^wG zrorY&(ClIhP~s~`f+qxBX0q zQA-P*jX`%Zi7$uC;^$?DHDv^{gJt3C^YFs#$9vIRN~5tNelJ~u_I;j1Znt{NJ@>61 zUcMO4x5-Y}i{U&o?YMDxujaM+1MuUhR}&0P5z<9tt0%($K?Dc+`NaFE4{#2r^N+;D zlgP}iaIiDlnQre)I~n_vxDAI_8&^kF{vkoD1Im}9Jaq}cWTmtth(^(*s?)b*$%r{* z$`cO&gvMTy=?=5Vbomw2<)yp@tge(heP-JJYrpar;!Uf+$W%FY+Z5EWDsdB$%eIzF z=a>HE%&Bc;4ZbKac7!r!syynUmQM#`j7gL&&9 zvUp>`qKgJGRRx2d8oY@zXVf6=cSQDXl1Q=O*hQ&iCrJ zz8vQJ!~Qg9=LomN!lN*f-9(c~|I9FXmF+`h*%v?dO3h;h%9$TYm0V~1G*8|5`MjVY z^becpqt90zf$YP<{F+A~8HCKq%mWU{@|k&1BWA_S?U?)A7;44rZmC3%^*&}& zulFHR%*BNa!xRzt?GCeH$au{MJ&czP8Rx!4N9u ziNLF|xs$lCMuI33buk||LJVs@GAhC}HzyYvk@)RtM+_N185Q|~nURYfafFIFxyXoQ zAZA7`>SxFl^Kl~-$c~Cs$=cCUWATOWdD%^8KmGE?T>9Vp{?0Q8_Wi{B@BXc?z3=Ip zzyDW%cM*Q%%J_>{z$!G+iCFOhL{Bb7C(^Iy(boiheJlF9^g=+toKKf-RX^!o#_H$T=e{b?V}r*FZaZxb{D1T3TP&;*vQDJFg;T;9GcYBcBf zEG+KZf8dgXm&KPq|Ei^{U_X08@cgOKXpo>2>Xxn+POYPVfhQr~OqDV_?w!VCr8CVo z52TYoKji2({reIUMlQr9)ES|V_OtU6Gx$3u)Fah|qMwc;_gfJpO1e# zl6HI<96*D0PIWgP-yV;)>mUDKxN?0w>TYgyC)0EW=`n>jkIzg}bD11Q<7eNjq!U5E z8K=L=zDa+xbS0I4(m32)U4Z#^qqz@m1bq2Z@;o4-+mkKjETNNe@ota>yp?0=62O1K zt^n(>=k;&d=?+S6vj@4&)v)wEbimasAZ}k;c+G+Rm+Xc3-AJR4!TfD~)BwgXzuDiK zEI}U~oA_iWbv0JS$g0WY5GRZ!y#&}NG8g5zdkyTp9UVuB4qjBvQuJ-?BKrv|2k+M>zyHwrvWbwd}tcP zS62@Jv=_6{P_zSNHR<+3xCQp#Gw5#g&W;9~y>Z!gcvD%r0`_1($6u29beRje(%$F~|8yueTXD`@?C{xO#5sQuJx`i+1swt2hH)U4(`9{N)Fu z#?oa4Ke^q%6EXi<*n4lUJGixfjtC(AY7zOd{NUmAdCV6wh}=n#;K-5EOA9BmzpBgy z#lid&*rPbh9YL`Ui3GE-jUs9P$*cFGWO0DqT7`ad1wvGWZ82BxO)i7I_;ZtXzDkH< zI$1A-L4PuRdFp1bczJXXebs|8|`&ywBDe%F^w;QeQ49eW`Eir4X=h& z1F4IoUwf$?s9fzH*mFE(n)6F!pn3RTOe1s39~&)R1oYPuiQf*F9Y^tO+}pghfBJN9 zjE-;ZDGfvAA*oZLYU23LDdnSL#KCbfx2YeC(uXrwAkij%0bGVAg+d3CPzMFbbl=m` z{E7bNle~1Ugnf_qdfS~ek}{#gTm)PV88wMa4xztvhi3^dG(Z%|WEEhsy^!*N!ow;o zrXSC;W9~|6O4{UnO7ZMQI*9?N&-g0Xhjw01`{-<4lp9Xa?|HuYd{BH!!p{WYWOv-} zo*JMnE-1!i^$X5wX}xrik5fX%Lt!yB)oHgsO!G-zEHvV|r5B*J_Oq$wWZNCtKOxUr zURb$qrG4Gu_6yG8Zw`$&+J}xDI@7&=^TyMsyS>9V#h?4aUU(~9#lkP6QtK*>|E12H>xH?uW@L-IcKz< zF*XG zDLl<}ewx;04gRx5&LF@l{+g(IW!-yKrI3?uaVvn>Q`yx#^mTRaZ{6%~mThmw(D#{? zkuN30WKF~P#!-H>WMZ%8LTy(HD;;=U!xkV|gLhbjkqjxy@S?<)`C@|BqJ28zZ`hUu zG_)oeqn(-e_=UE4^r2dG6MDFPkPl)$|q(?aEmj))mkA?i&UlfTtQ^Y??OoVLSx2^kE3G zO_i5dsoMI_1ri`_?8BRK7*TS~y_YD}9V&jA&4KQ8)SOk2b*RU2!62b<9zoS7^rs!%+l0#|V z1de{Y7}h>!#iG{J89W4czp@J)A~;Y(Jz zMF0ehI7Zr@>JC!L&;i6^-a4kT;qifL7y$n)k1fFOo5311lM+H;5h8PV#00#u7EtKS z7bWAq8X(Y?tMa6cDN{4QC8ez{Pw;GqPM> zNq2@5$n;Wx+F1IM4{Ld;1t}mSv$|2E9yUk;p{V1<4=J!20WbUKSf?9OK!!<0Gb5yc z45^cyJyKvpZOaBJup)#yGnHFW$l0Y4B7=hsQb0be6QDg(U_*^FWpk5jL(&^5AcJU@ zf$fn3LQEr4OQgVpQY+mI5GGtAdo*-FMauQEOBONxG8_8=E@xp3)D(o6xa#e#Nzy>KGZ^LFs3fLnB_%Sn5kdrTITDI&&u3XE6*6+$t|pjZKY51Vyq9ag~f)Y^U?oz9xSYI((a zyhz_3@B$UcWUcyZ@?lN=(Aq_;fJ`qOFSTF=WSV^9QZ#*FgB1{pI=yfWRxG?AVBA$; zGXlm67-O!q1%NoC8^}QB$AJu1y9&sYI@#G{1vb>IumUSWs5kS%1jryN#K8(HAVhQm zw8si;sBxxjZgXu&dSeA-5MvWyv#Wp*(}>g(E3lwshZR^63TBT6!~txu0y1N+5vdfN z0s#wTBIQ;_1}uQht^z``GFCtyG-CzilNuY;b|@}b0U5JKtbh#B5;_%Q8B88DIe3_e zY=sr%M-Z`PCWjSh-V<=?v+!_y)uL;`NkKnJmZ7sBQZ z1bQAB>(V-`pauwl%z=&US{%Zvza~^Q^#h9)1l%$}MrQd@uf^N!D2E5V@GK z0>%akje3FxR#?H&N>~9ELNQ6}eFeynj;UWFyBl8t`G)TK-k7|1!uquvU=X2W3zq$F zDP+ndJ!*iCQ+z`(9{sl&yz?LZ>{I^iLs+f~5P=b^615A*Y zL^vU^+4CibzH*9|nIi~@(>DF3#~~8%Um5|;o}vo@Jqf+K5Re+%*ZBt*)vlPYh7>AA z9BMN8fa2w_2Y>Mz`~&`=0e6H70~mz}Xldvbhk(?O-5R7pB1GgLA>b?87vLrNo@;x` z4Tt}ftPqE&Qh0^WG~j48^AVaM%^?92870ANRof%^yPdpMGMFErD-qk(q$zYw)ub8F z+)bVZza7DAELqbZq#)Vy5amZBdR`;WTfVv}i~z0sX^5bF^K4;vg5YnOQ6(V6 zvM_9gbt)c^{k^SZ`i7c)!pqE6_GpXMh!ELPlx=jY5OplYh5@IsQzV*Fmkh#@p zj+IMOxk`+VN%ojZcG1{`h~;>Cu}{LC$E)P5WV3~F9b~TiL06t>HkdWi^G&&{0+i!P z?JJ+YUXTqtK|TuixvO+CMHj|wbS4|!L3iBQINRHJyffL^Ko-gmQd^FuUwz7HmIp=;J^8CHlT_errGk*;#T}lg!e60~!F|(-Ec1gfK72+p_Z4ZuaO&uVw8EbH= zBrHv1U1<%U{BQ%-{bLBe{L-~z<@YW4D$(|hL}Y{G?#}+#LLht%c>>#C!8^5^i17#u+UULn=0< zddQ-1`qr|Y=AQ+C>_==JB;$pEu+^1NNnl1-f!~Sv4Pi<{pcTq&FiFNID#8=LTb-ks z{=$eM+AyiMyAvNBF9+F-ti^0avt3MHox0 zD7flz(d=aZ#&#)fvwtq$dvC8hxV3-Ia*-^{xBMGB1}U23@@FId8Z9NkXU7`M7i~jx zh9kuUyta$9B%RtVej)`Y+zn+GqBYysCj6~(m3d~|-Rz^)%n28;C4jE&V$Yl5dv&9v z<8{tYI*0QjmL*#`;Jr73YjlI%)*lS=5NBpRv72`xXZuBxpN;spabUMUFVK06@_S91`fK9iGWX}Qm}kYu4(MKcHgq5&sDj*aDn+90=3 zQj;7KqXx{9dUpdJ2`Q=R&NkZRcV;}=+3sxiw)4GC$#DtL>TU{H9m#z)5bdnMFEwSW zLGH7~BH0)Xr{mEe6-sZ2E;!pbA1~gCzE1no^D-pQeY{*cj|P>FsMo@>da%&h7@+mf z_+fZ3zJD;bk0VThHQ_9=CqTiEjs&Eg3 zsw@(?Q~b>a+!fB;P%n#-h=m^TIyCui%S1eVB5lbh!B>LeUpS$QgBxUD_0Ggi^nXNI7Zd!;HYlHjEF+4w$bim*6p?hpdA78e zT3s>9O1lIN9}4V$Sxm;!2WOPfYO15!Q{X*?Xt`_yKh}VU!s%6Pf%Bbs_xktQv}+*Q z{X}@02-tXU6HbK|N4ZgE+T@^BAGXqm14>3zXkkX1v_pOf9*9qd>5xS&D^qO{0E&*L zY8*H~BWQBSLrXFnU7e=7dITPdAE-xH8ZhJMB6!f-tFlEu3}@sqfV_c=mtx)b`Fu7+ z-Y8Emt=q6i&LE^D^$ED}c`kJ*eKQD3#E3IfFIix7*YbxOa8F1hJkcNY&}EP4{}cbv zz_3e<#y>MKM6rvgY6G_|5m}KvilKl(tz6kCQBDECOX-OTI*(9QU?Om}e zGkwE1{g>Xe7e;36R0}5#Zahg{f>k-J`e>&JU11!-$3kGvsnKNuJf5D55bw#p*uN%S z%w2eyM!WMLss30vD`h-8yg>!Pk$4f?Z@kvH`KLmt9=+u*QN7(3??5MG1j#)-AH@oV zd+rIcLP4h`?Vjsfp`hbpSN3-ao*=3l9Tp}?oM^(^Lm*CcO6VK>Rz=`=V!4sn=SYs< z2r38^WbnF#KZ?W(H^7<%B`5U*)tN+BXXorH56vL<6u~D#lo51}dBszd5dp=CR1_ea z(5YtbP35r67yH(4HlYPWf0&HUDg6e$S%JP-zRgm~BS~OKE(ByPlHU$0k~I70FPnrG zbO4B#YEJ5$EX1ogJIvsxVL)Cw%iKMYKxwlcP^g)z0)V`9x|xfjgh|>n!ATYUT^Nv; zPC~>4!-2eX2+{)w0C`242;}`}2#}Z3Re^^=L0&ob)2Dl5 zv?a7B=`gvsU535s6QeOYNN{7c-D`2@9OtK^7Uo?AV)#%LV3S#?I$^j$W|xB{+CxaI z%=zy(VXH0;Nb|d@39Z)-0`wGm#Sn{e+d}uw2KcYYDJF=|&Lpo;Z}^Wx{aY0sNNlQh^H66*jge9mJ$4UjV4LqsoGLYrnF?X`IE}d5ygV9hu8|Q z2!DCSS9Hp?*>VbO)uvYrb5*uxzSV~RRb#kd5T4qkfUSE}S2C7XhXK%^Z#BSw8=~I6 zSuChF@ndAje}BG$6$4IWYy9wkqWbDJx&ys_g*xOew2%30UCtr4>^Xq;NNt5{y4C$n zwNcOW0{m$cw(Av{b|+dA2cC;}p-p~Ay%sh&yZZA?F z`pcYR`0)n3$<_;OMI#1;${E6TAXe;esX4xtqc{9n(>}}EvZMm87*K!a29|WKWm}ES z;uMKzNzV986W&~px0KDxEmznofOVZ2sFxyIKB3=%H_g7w2tMDC-T#zs|HP*-v6|M5 zjc=X9SwXm{&$8rv+(zcu|d)1%&4S#Fs7zXBc!1RVcCa3&*!+zJbhVimo zkxMM(cl_C2fjBb_lqE7(;9;D!(z_HR<8`h-QvnOJ6rv(8%Pt!W-~`D7A+m2WC-ID< zBeEvF&e2wSV=Yc{>33OQwMf28Y{UJm!a|`0OEhu~{-VOcgK`ORl>k$~@%^Xun?CYE z%4#Thy%P3scBkFW>Ct$rJB=4mV|T~>w`gjtHTVhV)|=P`$F=ATd24yUW2%vk!ALfF zqbK3AG}HZil52HrGKqAys)~?FT){0Ou?KtQMyAziVQ(8Wz&!}}$M<;zI3l{TKI~J~ z$C|Q>#DXi1OH*_hd&}Y){6uV*H93-)p4nL-uW&cTXl0WR;$*9iS((6nSB&oG@(` z)FdmGbKX4}FFMiXa%|GCC*y~}Chysk$w;F2ihX-Bo~+7i!ll5n`+FAlG6rUo0n4n| zXF%VcOvbe#0ebah{CGL%zC9UFRCL55Y=rK~_;Cp{wfAH=OU=|rk z#*2MM^6kl_Y#UONS5GDk$lA4EPbLr+opN`2GGS5-x86NjySAQ8n6%27%{^KBS<#cp z1X-mgTM5vUS&F>eyPEbk%33j+JH2}{UUZ_%1?kE7A#l~_*^|jg2I$FnvMR3$mjcU; zjAu_KV_-HJu*`~m2K4R8WLz5(pjS`Ek4M^=8L2LGPbO!k_MQxfT=$+V57}Nl8PB@P z-D#|~H)!9ziz~C}$z&QRc-@7bOyF9Pi~QEfgfWksmKmN5&Hs9kI|z=#GpZ zmoQ`O$bPEAwK$SpgIT~;_G5P8+N&Gm*;d(1A$D8Wfj^7j1e<;pZ9XKA%^;lrl~3_99z;UkTpdv_C}7QWoVxo=HG~ z?I3x`OCVZJ7B<$UFCN_w_r=G3Up$h;68qlk1AZ_3@usB;BV6jKB|6MTac8LF9)*YF z2fY=?*^6z@^-0gnQ!cvb&P40u0-ar#zY<+WhTWAI;7ze6-*9AG8{xfvuN_F5R;Jgw zip_qbQ zY(S+hE~ELpzRQi4bG?$(s513g88{x$m>anDl`;w+jNsaGggMolc304$qfhk4t^Tk- z?RN*AbKSuXy7Tmw(Qq=AT$FJ7^y$|vzn13bqtK2Im+~_oSh)*KPW8JJyNecnErJ(o zryRq{?zrDQHRzperz9uYZLN*fIV<{U3`b9ldz+nMcdHjS;yG`=o(KEVD!SVr_Qoke zK!Kg>F>zpL#eVZ-(mykd7x33a&oQ6X%fvVxrVQ?&LsgJ=9`6s&aHn&5b9x0FM26Bw z2Z8iDx!lolR;tUp-XFn|R+X)B51pV{Tv?0D73=Y06fdG$8t2b> zbYJ@1@=Y`owct>EtsxV!K{nE_|~Ax3bJtX166cs-N&=SH!Z@GswPdT@tS=yu?Ela@RK**LNaVXW6MP zZ6O}`Njr!nOg#T_1d?00St%Hl+F2>9^#ThTe;UE-SdJ>|6?H#sjeEg|jqfzzHcy7d zz~L)E+10h|6dkPW!U`V$JMs+G$dQEb;Vbi*1d!j0KynYcNwsPF+0s`WFCZ1irRxV# z#WA0ucevWPfmR!b;M({VR@Fwi;Z+~WzXo59;MFV-b|HguWeOM043uh&=aCv?VHHb= zt1omceyQP-?I??4fbbDl64rvWfx%hy@|r>?;crB+=3zu)IPj5h5el67*KzuJr^bWY zlCFl>)iCfUPVEN67Z(VeeKj>`5hHWfqZmTqYfeoD!=xAr9LipXnoEC|4ZtO@mjpPXwB320x8WO-u!L-!9cc7>?<_EOcKcf^S(pig&7J~UJB43ZuXw+`7d0WTApPwQB`lSczO;^ zElo3MNo3Fz9+>IkI3q>#3JrCua1AI^+BUJ)87Ioh~V*g$e90qPfY3j4!I zwzbor=L;i={bUo4v%FX}^}H?1G#cYHHVb1(9;{*hEu6|sA*QBO2mdpIo4vW>k{Jhq znAqBt%m;tjfIGaI7Sox(7?V2uqqgLvXzUCchwnxX=y)j%2uaOf_A^RrPrE=hhb7(nWyqkb;h11YlHO3ugI6->u^$ZVEN2}{@%>4WW( zL9hu5I~^T@C`ZpFX>kN3NlTt^EL5U=f51mKj@T~Gq<`w-=oP8Uhcly#xnaB9Ol7sM z7@J6F=QdA=S)L?wVMDw=g}Gt7WSq>$q&(M!?UIpM4%JD_4cjGSD7kaTc8#P(V{yTD z&4cW2Ds#hj1+JcL$;=Jg6_^%#Fb8~5py-0_3M3>cjjjrK#p_Us(GlAf*%XqRzwBp} z*4(h&whgu$7_!A<@}XVVak^o<3}@;fq8V0J4ZDChIk%-+ScH_p4cled6vKuC;>b{Q z!FHL)CE)_K7s^`JL$dPtq0y`(w#zan1PMP$7XiXU3Oat>uw8~ZwaE;D3pq%*VY|G%g)reO@7e=}8@5YtSFDEZ zGSHN^d0nodgCGSpnKUFLB~xR86bM)PSy5)=)_ z^K-;@S>}Ww;V0=LKzJCUMvN(d;jnL);a6H6Llj2ZoufBXq6V2{!haj{4=Sh*r7f8kf{UT%n-uoFZB0L<%O)`z--oYgXyHIysMA z?2(|fp0g}9L+BUz4Z--vqKGg1G_r*6`sCU|sY1;YK!!;zwx*32zG{&dKsxE8ba#6I zml9%20O=QT#aAdg_P^rOUs2<;u5L6_sWYaa_tV@fPlih_s#pih-3UMMLq5F}!?IY# z1d*dhFU3x+w1w#ci!E;p64}d@OAsAsF8zsu#UAL(Zm7@RrXKXV;~}~^zB5L*ypsZ? zd|D(t{fn>WbE)d3;4ZwV9LTewPW7hU74;COPoI8Wh#}hQA+n3wxqzYKVgp{H;G(xZ z=?_Lj%^;t>wr$79C-%TG#V9-5n@ArW_OE=0%ieO`x_Ntha6XlBypMjHyv*+5r1}~K zyJa(}j$N{kcUF=mcAmJD&MXBn{{*@yJWc7U&#*V8Coe72DO-4=HBRkc+lj#>l2hdw zoEdjF`@P|G(qjK6Q{`RV>0LX6PWnJ=y0bm#wKhh>>3B5gZ1%Q$==P`K#`(@lIke6d zQNm%BDA&bu3OG8Gz-i4{_qd%fj6*RVclgiw=`dzMP$ayDN`n{ z^wGrU(n=S*XR@BkBv+n&W-EE2+1_l|ksfZ|-1M?r=i4S_R;#XU*WlB8;2tlFMMVHv z&5&8`%UR+QtEG=SL{0i^wy=;mY(FXHjxxn_D)4Tya>rrF+Jcc>1B70eT}?zE6@xE6 z)c99@YX`))I{cbyhx}oO7)pmlHt6OB&&?p*^D|-FFkCTWhAua9?FR_N#i^Q3ftWn* zVuIvZ=RnL6xEP3gt*n{pw0A2I6MgJbASOq;$qd;Ti1EWNftU>I7>Jq2pHn966^Pp{ z+dy2}BJJ15A?Hc+qSS)xt%!>wFl6>?tcc0uE`gXl>l}zV0?#Qc_Ns`F*al)*HKE@* zv8JskT&ifIL-4PN$@|wbLpBCt{IE+PCc`=gV&?JZlnHwU;v>5jh_O0$ArKeutH|uv z7>LQ^E`gXl>l}zV0?#Qc_6o#DZ3D5cns5oKL?2Q!HbnRUftYNTmKm}!5aWkk0x=oZ zF%UD4Kc`ICD-a*uwLpy3A+=y#Mf~+Wa9<2nNlOyhv^8O*#nXb*&PfW1G2JMeJv|rjhUwu zk^q~vJRAixf!yRdW#DgS6=1(G!WsB_fE7O9y>qfgQys)3_=SCtlv&SgduwM9H_=MF z)dH@5x4?aim@N;qjLi|*4I(rgzKP>vshmYn?*v>f zmEj(MN%vfjzn1R#u;*NNu+xh#S>M07H=bMHvo+f64cJ#B_AaZ6#owQYt22_v5#5vR z*0eXAjK-Z|cdIucuU@=ko{aF<&Go-(qA}*ti6IBptDsKn)sWcrm$Gq^N3@DAdEK-| zkvEpc&*eSsx^eN2w5n!`xC1p)B;I1Ch$|B_MbZYEDdO(VOp#b3Z&GSp=m7j}coAz= zle69JUZ*?U>`Vszjl#07C5E-MehoFNxpnFP><9nGJnUyij~B3QCyehTN~2G-6Hltx zd{WKk`4?+cHgEm9k;yl5n@_B5-Z*l;H_A+r*u0q{ws|u}V)JH-*yha?sm&*_usJ=y zjg0nEIM_ZAG5^8eg{v|{E5AV$FW!mb)U-c6&zaqU#~8y@1!You??tDdEOa&o-N_`r z94?EWmz~Cx5y%dfg|E-U3$q{ZMXwW$#)|m8bP3w`c@DYV>Mi%&w|aQ_VmRL>J7F(| z^USp4#^Jr1_hS#hkE32qFf>I-7mcl+2>%BW9OUN{@1s7zIh@Wv5))4%Gq=LQ&S+=4 zy)*4(>`&r09A0f)9aZ^<1g#DzUykzBB>FR^w+%bLeaw{qsx%EYhT{#Sv)qt*HaX~(x}3`SG!D1^oI+MTy%)ZD)_0W zLv%DgF1HAK#iE5#iKN9$N(` zAQ^APnbu*;eveLJOXhV^2GOYa>ZK(m|Z2NkC7c2xr2Sbgqj*F zmsl*?dP#~5( zRLR=WQe*LjOD}`PELC{djtu-s<7@E$ZCNQD@Kg9HWKO(N=7_F@mQeFm+2= zJVyLzpkg+wVQ%a`obz&{iI*%Ru$;UVopCqG72AGcLe{&AM0 zLPg(u5eQQT=oSUc*wcGX@+)uq{=3 zGo5W_n8w(PMlPPaJ=lZrm7UbY)EERW0W2wlj$H~!pVP`3cOr<%=MCRmx zh%&WWCkJE+5K&ckK#q#D19DVkB64*=Dx%D$oE(QCLB!-u$HxKL`GgY{MJw{k$$>?V zgomhjI_{$4>BvOo7OObAuCYoWLd4||t3*ofu}Y*Q6Pa7ABB+#WRZeiK?Ch4j+;oUl z%rvqaWe@~jy(;JCA`_8YtYYTo8ml-G#G;%WnT;_%4#?68Cn^ftxyLGwgomhjI_{$4 z>BvOo7OObAuCYoWLd4||t3*ofu}Y*Q6Pa7ABJiwptYQcdQ8~mamP#%rhgii@k%`DH zRxwmvV--h&h{+*V@pOC~u%1{&@oV>3#gXt36;H=qR6HG-Rk_6~j;?F05{M9SIm9ZF zl6$NYDal0U7OTiQg>$T82oO;@#447Gd#qxq$VB88s~D=Tv5F%>#N-gGcsf1~Xo^*z zXh5a;aSJUxX!Qdu!eIh)RCI{Y!Vt?P_dt-D$t>+R8c-=mK( zXC@Kf(kd(rMw1EJZ+953gIAW7wAJelJCnEUbjQ8TPH#LOja&JW=+Zy90xr*DT)JT| zJv}eqx%chMl^Z}M3M-}1D$woz_$FfvW{&8Cs1AULa>Q~9pex~VjLR_;)I96BI^P+ z#L3FCiQ`*)*RI-Cwd=L3YWIw^My%nydjD1b`~SbHU3K*W{_humB z_SWQ*6O-Oxbp0F7_D8d|&M*JpTX#JEttk88y}Q44$DXe~b}swe%IChY8{Q6AZI6e8 z&2!zey~(saJ~8c2&h}=5@hIMV|FE~&KRq69^(U>D!mjD*-gZB}eCE?L5-`?tvrv3Jl{mH05Y~MW^%m%&T;O+fMd(sdwAo5Z3SEY!wd@i2R0MCZ8&b#BnZYQ|dE!L>qSDBV&{$jW zVf;$M_`VHTj^c&Z;y=CdXPybgNAGFEhlg&YQ@n-MNz_tE20q+?`~3CsKxT9gVw;OE z;eJd)S#fJOM8{CXX!@uVnai}_-|EDTt2wKUS}!lz&OU&(-BIrhDs@oq@Cfpk!k*3XXod>4{v@MyIjOLiAO#<2z(G5{+P6-pgHxk;34cv2 z#gX{iTGis>JP4tQde;6ts7|(c@VCPUu9K9 zrbD-0Oe(3%Huj@RDy;|CTZ?I*0sk(7SJ+ISb4H1OInS-+U6&H?!iDQP@A?|~z|S_! z29>Eoa6l^oB#v5E_?(81G<+%j%~j6u@d#dJqv*EaYgWQ3Jk51}n$~3<{)0u%xTiNm zbxFL0zb2|)IjItI3brge<@Ho|Gt>x9x5p@A_E4-`;D2=@{Z-1)515p(fjk$L2=VUx z8*|jlS+`en!QYjz7bS&WH|<<|vv?_9y!Wn$ZnFgl*5O?iVdPkEbUI#4|39&1zL=n` z0^r#Q?ly?FyIQqk|8{YH{JPOsOhK>%(Wg7vQU|M)D zzRJz-vmbu^8I~V)^BatpqKyyFi)ADK@dmspgvD{ayV%NyvuR#I>hfk??7fLIHd|hz zW^n*`rV+-bX+s~zLr_Z}h7fA16b(6%X5=MJ_*@_X(#Af#DTfiIzPZQ7O1lyt1BKsg zz-MeY<*(&uHLidg>hwH%%5`vF4pl!>W z*z8*iB?)1J+E&6#tW}YOOw4JwX#z=#5=Ll5tR)vFvYPw^J{iGHW-G}0O?H)tYyBNi zDI}kirlzOo6TGI;gnw`DtMcJE7v-EqLqD~?05}}sxa7$qQUJeT#^7>dqFb6Uxcsri zS>swy^e_XtA8M2W|Jn?@5g`d4Xep$^|2+88aqwlU+#&!1MjRt+Oshqwf>Yb}lm`Qp z_`n~S!5XBa5<*}RB6Ezugo3aZ(DDQasFdLETE!a%Con-7g%Ic<_LzkYQqZZ26fm(- z+s!*w{bV)A z>NdpCv&&i$FE1-KBQjXQ3MnAGi7kn-m?ICGkpl8bjSVW6DJ{gRT&nbuuN6{2*%JB` zYst;Y!flX(PS(|P3gVCgrml>1Yo#t5UAlO14WwX&6ksZ97uB?MGWT?89&<)Y&gp~{ zYM?-YJn7V+5%Ew zocwCr4lCf|6W{q~+W-poJ>Ubd!3sJKSOJlXamPd+KC+D?^Ti4}nIiH%1xBoZ3c(s8 zpbE0uRdBc}R^XB#$sh&@fW?5dID{2eKnCT~Q`lVvWC(eG9dK6x8A!`ZffaChSGAPu zrAR3_atqk(DzM>&Yp^QsDzF&=V+EX`Ol2dpqo4Sg8^}QB$AJu1SOIxbX9xCJfeke) ztiXzpFIGSX@xlrS5uE_-u>u=voGF{zTpN-pR^YYZO$ITxxi-5B2r-RFEwKU%N_JR* z6`^4EXowqNgB6e&bB%~(CDJQsmpTkt`_Cp}-7b5)Hg2=4fbdosD$f*jSZ%y ztg_$m603YQ(_|Gky9y{dzH)c&YYK0Zx;W!ud zn03h;#eUy<@)97zYF9z7lB!q%e$0#&1SQgTp26CEox%iS3trH?xH>L03FU?g%uojzzT?5%vb?q1BFICHyBv$D>z&U zE1*IsCTXh^E3Dv1Rjhz%ueANV)4l>S8w03dg%uE5n);#5yn(>*rF>h!k0)t={Y*Gs z3ar4T^daJs;~oL`6_9~+df^(Z%2p&= z0)vpO-at4~16F`(&J8QT!*MR^(gNnQE_vhJv4UJBRj~s6m>Da`$=8n+5A*YL^vU^*%RMu zw)_Q%Fe#@O79#GX!vEL^X!aCc2FrHDgKCLd6|9QNQZ zUVUc~!5=o@u25kBqYwct4ZY$JkQ%aEgEUBJiTpzgd`AZI1+*@VA9hLnMJG zA~xHbwlYNUnFbuGW}Ph#LpgzSnbKP0=F~>Uf30tp7VF8ihT=?TSai;IM?t9 z-?YzPRo|+>O?P5zXn8xFN~~brvhR*8ArO?UueVg+hIm?|ZP3jRWT=@<#WHO$#kK&(hADledpY%tY{rEuXS4l;ICEUdl_=bmq zHCuhcJ0t!`ZY2-BDY`ae);%#9_qH~B(^-1Fbo;6P;MD1v{6JnsCn8y|Us!m@>)}1Z z(F;x!;qQ)Mwn#KvSC`JpUxLN8^n~jJy47T7bF+^w zCRx09eBU;FMhJOIMvsl}7>;|hHn~P4IsQZ~^HMk(UzRBZn6CD5i*`zG?jiRYaHl*g zKVGsbKI|!XaPokPl`{>tN zIS|WC_7Z+~IC!#uA?xXnBH*r*vJ%{Q)SqUYwX=j;H-F{&gpJ{v$_D<6A9r(!ht6Jx zYR12aV6C!YQticycMOKZ#9RMf6fzl8S8H7cFc@w1pDv0kMOcNOY`~3`c|Y18_BYWf z_4uo0CCGX;j!iz%Ao0*xAt#ko-qC=5WvS4frknROZD}z|{+MmWy>OlWe@C!Yw+NVm zytYEAT*F&-O#h-$p8x{(Q?&_>?rW*ir~Op~n{`=Dxr?if_p;G6_++UH9H>b^SC168 zgjy8>mVv|AJk})+_^Bp55P&P1GD*SX1tiqPZ8EcKq}hDtufZZGRdKgJX#`fxJZiP` zBw$&K^^VuGZmn$3;FH3(2S>N24v^Q3H8@ogmL{$%Z84Ic z%I{n7Rif=NMiCVYpp&z!_w)1cNyTQv>Uh$3g%vj&A!A%AfOR-@B0}S&$m@Fa>EA~1 zYAe&u2J5a~xJ5tW#16DWKp^6y0v?zmhn0k+#FKPDVBu-jIQpGv1#^bB_g>IA;IG+Q zSk6tnJO3u()-`RM(GoGFVxuBgDJ)$Cu+#jU09jcf!0)G47Xlx#b&!k~0>V~TLM4Hj zRq(qJzadO%2(&^WC3+UJ>MFG*Yo=^Hro$wEuR2F_$)hskvaK@FDh9WDZ`M*?M{=9} z%uiw6CRLxpsZy08DO^x4{k|@2%u>!!z!mLMQI&NLYTni3qS?v*x$RQg*5GWs`+Cb2qbe(>z=!1ZeeDNAh3|M7yi-D^1yIko#=0NH)i#*ufMPCqwew$IF*$9}g-WQLlw%_38TnkHW+8L#6iteElA?qsp1^p(dOR zZ@^`3A?GjjRZDP5=B2SyRAZ0Bqw&KvX{@`d)Y(6X;687ip~?%VSawSdTcc=tN=eZK z|1je37I(5+eo_?wlZraF6P$EK;pgq^-NEa{BEC+!M~+ zP%n#-h=m%^wKU65POW^R|4A>-a~}2gzTFntdfGeI+a6fA1ocib4R;(RWgNt z9ECKyd;6#2Ci=fBh>L1!loR+~1V_WUMjkCtilF#VK%J({bFZ}FXe;Se^YY3V7=E-- z2lt(V`#NM>O$LgXSXjUZg9b->Cc=F3L=E!oP|>Ko2T(opu2Bhs5;5Y;)JqoF+_n7S2HYRg2#*bheRSC)`v1f~G%)NE zqw&uS3{mVNG$5$Tc(hyL7%KH<1GGYc-YZ4#lD~9e$7Nnt)4uku*p->S z;hX+T@7W6@Gj^(l69+e*q%Of~&P|c{E zf=!j3|48-R364&{DV;gv+2IW;0FFc_n#nd@x#yj>kfH5Xb@)^W)uXrEC91dE;$1q! zpkShkAq2@iJRcX#3I&~(w0o{^g@TTYUD@9yc!H>IbXb@m=Fxk>gk@LNFzfr1QPm+(iCSm6d(lc40Jen91+I-Kb0NRJ|YB19QM=a^SK zMHvxLoJd6hvI(6k&L*@*RC7}2L=dmy$S{LfQmKaldFd>3_e27v&3ZtgW~vGR^3v&ME{YPS z!bufv9RvY+=_Eu&(0*TP;dH=L;YJ79px`)th0MH zm2bYl7aH(rT?ToEbngB_&8U(GQK*|xW!u?+rIjiROZy;O{}vg4GKxfm%ekZlDC zpF@Y))@Yh(%ZAjR)M@_p5$7&^u@>ofYBWg-Pt~imWWM>+yE}d^#8!Ak_{%H4qEoKT zmQ!G>Hob;{x&C{N;etVUYLf!Wmfh84Tvx=CR)_2cKQ|Z%zSRK#ZHRjNX0f2!#E+37 z|NZ$2Rtz|ibp-#PQGN9q-GN@eLTQy`4>h2D%%|&e4zXp=0kp?zD_qm9?r*7$dY%{H zPn)n^ugJ7JDQk14oWEpHn*>FJDC~7YckZ2rlkrxVoqLL)k@IQqBmcDtlX)wg1LtZh zKvIV}AL<@ab#K9m_${@n8}Vqb>YuJq%9{~5jC$p8+?{A+SiG0{A(?v=L72wgt=_EH zJvpA7>7k>%QAED$q1#%TyQip&!p9r%7F#c{6^$4WDrX4WfmpG>t>*Yvj^6NRP5Ufs z%aRJXVnF@58(7k_mTfgQi&G@1LAEaFobj0^ytN*0DVvvDuCP@A>pJsPuTPF};7u0< z;qwjI{ZHxkPkag!t7*;H_|`d`6@-iWtc!3B`CL<&zA8M-<*L7RbjrbAv^jM&-5yW- z-ClNe5BIAR=vNW^X2X8hwTAJsT#-vG;ol~&R4WIaxq@bwUx7F`4HOZ~a0hK!h{6?$ zsSp?7XDVP}mO@nIW!Yt80h}OtbBL@VGXnT@6Hc-E*KL@tB%5*+X7aA&uDB3lvTtoG zu_DW@r)h!y1Uwc$QmH_<931P*wlrUdPc>mMCslF@)%xv)N_-!!=HF$9kjFSzonGzOxSZ^9l<^w6{bf*Wu4A96TtO5LXE> z1svahTEFQdA0!nyuZDux%8lka{C(%vo7e@%wIYQ*L(kBInz*tS2i1lfD{~NHc=>R5+@{EILIV^i5Y~?Nm6} z)jaE*{3?8;!X9=r+4@tUL@xGQ*lih*b!_1?V3`&B42X_mwLZ$qmg{oM%_Ts|6d9n0c=lvc4w#H& z3v(2P89H^zm&bZ0i7uCKd-3bZ_#v># zd-h~9lIXo+-=2&otMZy~DX{GRo`t=PfeS1HmU&LE$MEXOWLz5(pjS`EkC!v#+mrD` zMMtEuvK6`~=uti$zWRbRB0tn4h5o07=8 zCzA=XN>8>LpeM5wd10v47sbk=Tku{d<3%UBT#%lO9|Bi>o;{h2WPqNGC#&+Ba4E3d zpLq6UG6t9oNc}f7*vOBI*|J_=@b1a@(N$^-w@${7N7|SXI?--IGb#uCpf-24wBpuO|}-i%wbSC3N2<49sw(>jS-e zvenvpGJzqLGn;#|)w%R!ANAb3C=(>plfA;QLo+}}W+`w}sl#_&=>ntd1vKgi-W?e) zIMLyPbY%PxK{_%S$p9T0PgdnK;Syl*j!eb?lmBY#$oO&R-I4L5tJD^*Bjd*l)RFN- zMMtC#AAFUJAD1v=3RCb?6|TjR>>A7huCgDq3)f!V7|*uKW~@;Ksvr2X2#&GY#{Tu! z>@^t6Ud;OD0Xx~#={qcw!QW}vU%@AvPF`$bYY+`T&p;3Qyy~TtX>y+z+4B2J@T*Px zL*y)FQ7+_}1Qgf~l1ID*qSa(!V?Fxf(L?ZHeAM^FBS|c=_FMRX-wS`dX{o{pmwIZ6 z4)ammDXO@~;nDbEZ^d!;V%u|l(lhgvi!Qn|(fYVRXV>MgM3<3q*p=8UHx06dFY18s zKEKxvBuy*RYhA@=zwwD_e{!}r8;nQxH^9im!kLQ7&oos_6WvZISs>ImS3lN(H?!PS z&E>4ADH5As24V-m-}?cJQq96YYq}mohD#cDmMYbS%4zIaxkMiJ%4sY?QHQo^rb%6| z6*~8^)5G7xKk<8ikFx=ly10zy^ZG6~TF%UBQlrZ1>Vc(}n~ zjy~0&vFwKq zE5Kj)gATD^y3DakS~Xq z!k)Bn?hQu$Nk$M|1&qc(*Ty#1761iyuE)fIofZ47)9K*UC|<%}6FtX#RxcCdbeM9x zgU-N0N_%oJI+Y=O)F^$~z5@0m#SYMkll^YaE&>o$mv{YY1S?uqwkLgbf@X1LkzNc% zl=EdnHDR^8bmr?WPd)P^-tLl5nR6Fk!D-Fj)fqMJLd0(8F8QZb?m~z?|J;Sl+|FI{ zPpjNAm>a>DrB^Sr9?vHVT(0FE$gNwwGwr)^_8Gh#Wg_%%JS7?EW}1Q8a4^2kkbzjj z{9Wr&yo@SyT%9kXtJW8IBUNu(%N~JmMesV79~^tx-xxULypOpqJ~E|lS<>?%T9G^3-Q2D z+C^OOkc5fnuSOubg`1UvL8;xyq@K#vR+a5qpWca0zNNc-!Y2n0Rp~9>F&GYI>%_i76U-!$95C-Y1YVe-K)#pIF9R5iw)#(ZXXnxd zi$hHf7`2|3v+y04mX_BCmzpFiGF}{&%1;5_%D7-%ONYNK$% z)ba!*$x2>ks!;|kN!=Qgk0JqG+gTlf6qpp!mao){eD1@U(ZvuNC^_1=QrJLpC;{rK z6vl*hZJO~5?=K_0@f8~}LgCX+kjZuMN0Dr6r$5hJ67$lUJU{V~M3ETbC!27T<;AM0 z=WSW0(HN((c^FIba1HZspWjsW-y*oxn=3Avc^O#v%q78f;4d0*mp9X5I`bD}Qip#i zx~KD&(!LitpyQ=5AS5+^+0QAhJ?jG1Ans`&T25)CJ8@X1$_WPnYrgIR*07v$L4?W2 zk*4j-_L!n}gWJsHA&64=DdD!?aDi)BHpQ?}9Y$cOIlwjS<7I17go|omOW{oJx`1!H z05%+RLXhy2bO@ewmYf0@)z^Yqe$n@(diMee4$nC@%rxxj)_@JRIoI&;ZO1m5VN?td zP7NDF&#g^n2$Zc#g0BW~4|(^k(J!Dj(WyaZ*%QKque^guLdUZkwo6ZYldt=#8e^Ag zo(y%Kv0X9^1^$T}FA^ZV`1EOu-LPE)Z+TPfumoTrESiZ6wrd&!H^im}#S8l-Q)rd2 zUq@`05X@|it7N?lt5lDA1G%zx*bUp|SPqc1IC?Hgiz6UOT45|ko}*0GT1JQ+v0a`? z|J23ND^i!2)f$A6CT$0}j+?2h))iwD3GGZT86Mjuvohp5M{Ji+&%%n7XAawCI90xe z;)Gz6c}QMm$`&Zyuw6n`$(=j4Ya}fiiwm}E9wY#^O9m;YD&rc88@5XZ%(cQLv$^CW zFAH_Vb_J%zbmo9B3KU(iU4g`fpVHj0U6D;8srk!(PHD{z+wIha?P9{`5!+=rtw#%?7h7AYAk)h^-?J|!`!bQ0XEnLGxvhtExqgh96mt{@} z5`K~nA_X14ZrCovudMbX^{t2ILhRCY!*)5|xbPs^4CTS0(d+rm3pypbVY?ioVt{bK z8aaAyZ88gfmR1acW4n05)z_6Av0Y9+;wFAydDk8&+^}7GyJG#=E*Z+u@3LN{ZMq%z z?UHF^O-eb_@z^d`a45DXIW-+*7OJ+`4cjGD6}-7)y9UCgme^t6u4#w>4KW$Qp&@qL zw@U`h^ueVn&ae!qm#ms~LD|S`TBXb;!-l~P+a-fw6BOQJcL<^!J(r}#5h(k65tqG0 zDlcR$6f2xCRXRFpFeDuOOa?O4dB%3hG!*#foUmOoi?G-(p{n4`9osb!F15rC*sf`a z01Yu2Vs6+j8SuiucFAmN#dgUcp|D+!o=ei=2v{er>ZoiH;9c5hGD}BnmpqQ8?w`6i zdZzX@5a(|DcDb3#YF#llk=oZsOTki@+nEezp1rYMGK3;O@r}1)(bMrv1~PPJ%QB6+ zW4lJuqOrJOyXHa8Zz^-cb_K4uzDokGczKryue4-qkGu&Q;^Ot>?v}H<;j@a%I8*JAFzQKewkS*_5I%vub z+hsW8f`~OxEr~WBf+)or5o315b{RIsu;G9>GSpnKUFPwM;le|*l_6tCY?ozD2oiph zE&_yy6m$;cwr`i=S6UtA*GhTJ`TSwK9B*8BF#4%=+>&jTau*i3VY?ioVt{bK8aaAy zZL%KQ)zuX4(o<9vI;U~Nb~*V3*tg5kt36Pl!)lIKIZ=$eBu;ziF6oBX=^?u}z>&CP zIAm8spKporO+e}Kl~GG_I={YRjNmUTp1UELPgXYSBZ#D{i1;XCn5Qg~cncEV)r21k zqHJMfWu=o$R1!^UvctlKyalL;DHggE_gTKP0e1({giw|Q(L_jz8Ds-!ZAo%>E-M=T zyl<8D=;{@&{x?jhb1rpr>eg8xe5TnzG`cc~7lBZ{R*f>*k>L(RTe+f=%{ zNKH1W%^tNISN!!S?}`le4)1A%$qC2-g;*3wAM=bx=vuR5Z0s=|^~+fit#ZqaFsX%n z@9mVdPtqz#UFAlAt~IN4U7egqF7|K%Rx`G&s__*`DQXEQO9bEZ$+d-2g_FlfkYD4;2l9Cd%1E6qJv;DL>H)F zAiJSHdz*UL?@dPN=J@Ue-SSQfkn;Of@a!+XmWL)(YwkUGX*rN*L!Ic)daLRoPM$nz zc?-IJh>m&)`-|2$;pGZ0`rFgNa6Hls^4aS;cB1;kF1S}Q%Fgx{(npv5E8pR=mtePV z-QFIaOJyAIq2DI2w0k(IzDB`r*-WZq#q8tV)ntX8CoZKkOF_&(g)RzDQ@ZLi>`m#( zE6Q}r7M^I0Q~TFWVlav1M0p0MCcUjee>9u6*}utDc~5V4&(5%$KG2@+Y!CbG&GBe9 z84tT#{p~)wz-hF3uDe=}Zwo~Pae%>dU9@7@mx52D{&a>eW?xOHpGXt%T^ zaK3U=W}@nrBD*?)$5R*J)4Sk)FN;M*09nnDS?#qfaf#K^#~q?3eKuQINF27G6mv(J z;sq6Ww^+I3Fl24PNUoNlS7v~E+)QSKMYwj^qDFKGzwTQ*AimY%*Hk;?4?DzAIxMn5 zH!pZ$2I0P+3EPI@(ki-N`H*W>$A);SP!1YS^9>{SsTK3{>D=;J&EV)FjA%#e+N z7(eV1h{>>yftY#x1!clsf%x$ACJ=KB{u#SP4dHe-s!d`*+h;<;Q2(iO5 zMiv0K&PHbLot;)Co6JWwp5k=)LHywYwnGKtir?+8KV1!dWT zvP5ifV@^f2fya?HLVIrng)i;`$%X6=hK>PQS**T}m9NIkQwm9d%~~Fgf|)>W@`5t( zx3UVbUl`#Ge8X~uPlw9+?gNt5H|n?^!7uEAq|AC|+h=x$aTBeyTP@(~cMBYQLNSJM z`I{3k??1`$FjPt8Cw`gS<4hOtH4&zHpC`G}{5%QLg(b@HoBU>o2nL@c0ZoI=kwDkQ z7NEpejs#B#Hb;W$1e_x&wS&!(G%v**i*;Jx;HBCr_pJzCrU)RyE^^7om@QI?Ce(%F zLcSGK!JDLXfnVCA+!=^sa(7i5mARc>x~SYqE=J`6+9`InH<`Ap@5l-s@x)yQ$;GJb zqL}W*sGMzZZK(ndMGriqb$UVi_i3Xtid0KDJ|~K}oChoAZqh8=NkZg#3fAzzsi%e&)+ja_HPTm2#XYQ%0;IEO!2gsU@>M-koQo%XCh znvN&kQSVHDN?yHq*CH9=ubLZw*+gS3qVqoXZB#*>HmV`98?RvFB#&qnUGj!$jUsO> zji1YV+70949cfj~6mbV?rbxWSOc7TmW{RW@G*iUgotYxBMBb#-xX?cMVR$KPRnybG z?S8j6+Uib+gU!OSt|NxEvT+SHtA!2e|Lh0<#v<%xMvs@UZ6}QHBub-Cv=dLN*nCpW z=J^+ER5ow@x{=8@a+^=AZQeL?zSqf2k=VSMBDQ%mMPl=2irD7O6sgT8u(UNhw~dVU zGT7hQ7cu|A--N3&Lo2^Q6ffV6;?!&~JI9&bz9$&NR0U;Hp6*6REiH97hrQ`Ez8nt3 zmu81GWdyQ=W#PF+cuDr-1L$?4@k9~7S1v*OKF=bzTf6Q42iFd@E{5}MvJ>`VIL}Nw zZXDXJd98IH`~>RN1VdAVbkW%AiSU0I!G3-|@gC{}oWtq-BQfzLGIKlZ?~Zq7+dH#v z#{M+!z@fFq)lrpyNYLtl@|7r0T>>y!DeVZNDL2634pdMa!KE$@B0y;DC7JFpi%gea zFYthL@qm8bvnQFCr6#tdR&Ju35*@4 zjF~EryecoTc95*R#U%^)eQMBN?nVlbzwU{Iqf8FPOu zF!vhDoP9kgviBOr)buVQd3TR-TbB)TB&(M+cmrk9WTK>$6r+Mq`{jglSU2Zt%JzoOyjlv|NO|L!0 zkGZ z{LhEPU#tpGbcCM|3qKzbetwPc^9{n!Hwr)BB>a3cy!Ucg9E=9DoSnnm5(|&QNOluV zr-M_Y<6)0zZBvo>~@zXqYOZz1DUF63*;Lz~O>?-pm7WHS)ywb8^L71(;nWrjL;w8M%Y~poE$lE0w_ahMNFrkIZ#A%-;~p}VU~2+e{aRLsdiXL%EWS7UQ0abb-FQ6%bOK5m2<)_i1C zglTS0E;1tV+tZF1GJY~D@&hv?7dzqz6?1Zt5y?Qzj9k>ukSXTlMktUS6{(W7qm{<; zORoEWzw??;U)xwn|9k)c@x^_6e&p~E|I%~sfA-cN{Po{kh9A2!{?Zk&22FG#R=fn! z(<{-j^y@kFH9=qBj=rwE1kf)R)1{lWY~^PE8A8VNY zv37P-`meF@;0xQ=P6TcEJ?;MC4&Ba|y%X{|jyJY`?`0^KBwQ?2gWslri zJTV>*6Lgf^%GJX0a`Z3oG~}D9Qf4RplX$FjrrG9!bQ0)?9NngWUqQmig}8z`BlOWh zc3xrzf5(J+q?%Cl(@_NfC+yz9uQsrwrQD|(1arTH2Sz_waDNc?qDl0+C&!aBy;;08 zolVfen#mHX;O~0qwzd7R4}XXL|F*jxLgVkv$G;OvJ3avW&|uvYz0D`LC*$q<$A18> z+?b4eTbsS2I=c(%-CHN#&n34mH=7V6oF^ z?tz;CU;dO_3Pg0fvZb6Ablx`J4YGu{a;#he_%GNMU;}o&;cYv;VaaWFA-B02R=$r8 zxON4^oy!WZ*|+zS-4MSQY4izLykmeGz!(;{24|)#FhIv9KHW`Sja4zSYBD**31dkw z2lk1~MLF(X2fOb?$5Ema5>>MleH**Tp8W@qcfj>P7hluq3`S_dCKak;Iy6o`t?Nm1oBs8BU?M0>GP?Pqw0M$-5#S89DCII7x+J%VE*u;LYRADPiJ>3%;s_o zuY>f>m=bgP$aNqKU8(%9hvvQgQ{?e9;H7~NO@sLA+CG5JayA-@c7UuV-ChW{!QT6a zz0LmV@o=j@DccTjDl1pOF6`&{OR|_Qb0K%SskV|?bp(IEA}RK(fGQh`a#adF!3 zZ^g~QXqGgtURb#deH#6uQ~c&C&Oq0eVX3ot`TnS}a-iTRxBGV^=5K-B5A=J(+XrWf z0Mf4(ksr$s9!;Oed?AC#odgMv98|xubS(R;%3M$!%s+uWinH7?6zh;kFiYDgk`A7} zdN)cI``E2j=r>m&L`B#ZbM@}z0PMz}n|AV5LKM@(OZaVSjTLUjlp3riZP;Y%m^O4XFlF7fHYNQ#(+(+WoNWXv#F_m&ic# z@V}f!=9E7+TD%nKuO$+{6Am0j@oduHx_xl+WPgH=Z|y4$L**fFZW>9M&|xkD zu7-@7L?#E(UwWg{gclkhie$0|u-sWnc|hS|4VKf7XW21#r8FgNaXzJZb~ByCkke;; z73@JfuV({vwl2yI$LRMw-+VqOJ|*F23UItP8T3vJ(H0jJW3u`MXSK9m+Rw)+A>*O2 zn40RWHyEY)Brg^k@xscB(OUcI)N-=zj_jY1XSJ4AuV3w4f2i}~v-q2Xlg-Y-!v|0G zZrHl%YUS@0l!$#U+BXQVa??it%t$*0x7p|`0v004RcgQcn2TGN0}MeqhziEdn-8E6hl z|ED+p%rk*_iBx;mLh;din&_r{Ha_CxmfHFFa0Bl1*9TuA@dZ=DUBKd|@2qm*V~TnS z2k&qIz?B#(!pQPj(^HzbkV6FFqm2+&JVp68O~OeCkLW!egv?+>q~*0MMe#Znd^(8W zP>RKGb%r+G5F$ic4I)B=fy1vgfIF<)$+O}VQVPK}Qb`apf$YyDyw68qT8__garLD@ zhB4CRbGF_>C74hADcv+C+>e;wozMWywnDCGzzDtd?u1+%`~mS!HW$P@Uy))P~~MVbn3U3O#F zdVUwO>vl}|cM*S)kj$>q>2uCl@(2QH#7k@G8(Zsq!C=_wBD$(GFF)Hb8&swW!2#{| z$2w|Ru@A-|6Tw#sOLZ2`$0K-^m7NN{W+fc|`RNQF{)0u%AiygAny7kZ-FsD~kdr17 z7Klyn(6Q{Ov(2W(vnR6qcgQR4DMLSCQbxX%5R)|x;~Pi$(UOV1nhUjEDXethbq#A~ z;awJCBtwcayeP3{zL=n`*x}iTzhPSv(9oJ>#rB;E(b^`Pzp|e8`Du@z_4zIv>d0i% zP;F}HtT@xQ@Xvnu@n>2-)8s?Hsv8N$OVP$NI#PlMt#YrA7{0}eVfc6h-ehoQE4^r2 zdG6MDFPm;j`*UBF(+-{2zq%}3PwNDDrV+-bX+s~zLr_Z}h7j9Sd5Ku%NS_NNK-$=c zH{~#*HydzU0Dr~}ei$=CEq)k6!R*l_#4ko*+?Gx<8`oP1&2<(gD#a98 z_wQvdVz&byYk=wU$0Fbhy@s_=k`Oj1d6A$fsl0?z6$QZI9;JN~NK!~nXhf_f7l=cX zg?}=Fo6KRN{?^~+O(MLf7XH1tuR`i5wf|hDWoTr@=H7j_Ezu9rFY;|8IqCqvV8-Bb ze)2u3))-v=SmLblz%YQj7~x-=VK*Wq!2?aOTmG1?l`nkLgM%+yO)X z6PTckLI~DS1?9#HDWLn$S@gF0LcbRPK<2tc(}?8+Q<1^J1}PvP)(OxaDX^i&nXxa0Vu|Wz5F^xzqkpc@! z?Q}DM5h<`Dl4k;-00~ZW*K&ESj6p#;#Es3$1BM+L90`f_X4TcFa ziY~{{N=s0RH0>La0?L-qr&voa5YqMEU%h0DF>aH7{jP!f11qE;=R)v6OCbf}OUJ=iEsz3oNqL;Vn`K;QM@^?4R=~xlg4eK{3cv;{pto2T zy9q3Uq%Bs!*Z^8!> zo7-F)lHOPW8N}G;+F%8Qm`0?QSb+s4JFLKp5VA*uOBN~DW!0HX0&K7X@?oJ7VP4Hl z-v_v0umUn&E3ANgxH48i9yDVGR^F8+VSOIQP&HD;Sm}#fd zDb#u%u!6%5`wEC$jBi(vTMv0IJwXF2tl)4Ztbhu^8X}-Nv%(7Kd1S0h>#za_snz!L zb-aN4j^sDq{sU zBVeo`59Lj#1#L1+s%AHQfPDkPYF_~vQYSlmtiXnv6;@zH$QLUhgLq*DgosXn_E>=p zHO`dHSb+^mZ>)d}VrnJ81}h-MG$OUc3M?quVFgx%>c$GlB&@Ik@?m`=a=~B)WV%*Z z0r_xctbjad#tO(MH8x1gBEMKKU%WJdPN;eoUZl3#S3ubkIu#>UKpr$Xco?Br?JLNS z;$_RsII;1<=46_7A<{;`51 z4p;$^iy13mY@o2zd0+)cDq#gw2*o6=U-UqRbg%j)vitBAkZrcpgC#+xV00a4n zgXxP15&GBizon3g!CFKD&~b`y4925&o58#P-p@S~mNR7^ADF8GL@2+UfVjG4URF!v@?HDhyy0BA}(AR~!OT3v0I~!G2A{Z#bn6NP>+-@R8U$B5~vbQR14UIj9fxKSU&g-w;l)MN$}y+GqN`QFn?jJn8l)lkuco zzW$_qBT7l^o$*B}QxLWAN?> z?qKcLZ88XDdXn6gzzwXBK`SN`-m|eAO?Y7~T1byc_Lxd`(b$xT`r$!(SbEb=~YMY`0ubthF*gjXcrexU+EdVeYQiI&6KeKa)5??XC{IRNE@XMQ3#AsF$ z>Z4z4i_^T!xmv!$$AP#Kv3himSc|X&Z)>`C`*;d>O*XjRv1Y32B zfGNmpE0oHGK9_sdp7Adl^$8$gKUJIHXwg-rPy4F~HtVvQau-({?`5O0><&?rfNqT* z4mQtq(LM9iwpAfu88|de8g3rz5(oTL6CMb_6-}9>;PCf$z;*)`H^KJ(Y04*e+d zmEel-Cyl_0nMbX5o&@YuA%0TW_TcE&)B*CEu?D9~!qPO>mDcdd-)g{ye+}6u>$hIuUH(e(2M` zjo{T*rkxGeUA=INe#D6#XorA6#76}@Fhve43CR-?w8qizM4L5cXnSw%I3R<+C;>ky z*uJm;zh-M;IXCg{{F{VZ*R*j)OT>_hO@%BJK@Tkg*lGTG0LXsC)AK^{Dv^4A3bknNu!cdj!_E!uRV&NyqD+n|2T7MXa=9v0Zq1Uj*0a2D@W0 z9Ofa;%z9!sZz|80eE|Eph<_^wtFsV&nFq5tH^HSqGaOX9uDe@*_2 zgda&#YywJt0jHF=c}2e`*;L^k)m2#}aHsg&4Y((qxuISbBgf&zoYd_Vs_12hR>Q9Z zwk^De2w@4?H%V9}1$TAgV^M1#EFHsh*X>RF$5BYLySINTZleFIg19KvP0}WOFM^}t zTqBPbC`C|wD49O;<|^T`u6 z$hTuQ8#Q^nyPW&0F9u@ArCFdd~|h^>gq9gBz|)}y3&9dKOe!v z-d>e0`e8UDj{)QjT)Y%pepSa3Qh?8f$Q$M9rF9$j$T@_Rq&@)`K5y5k1VM=yac1fz z3vBLM{%`~C4{3zQ2E#tO>=FHc;vX6qc8SsWX9k8Sb`iA3cMMf>vjJLwm}W1R(rQ7~ z)4V@|hk{5gg>~9e$Cbalo1P1qoejzC6w~g-OwY~_BQtiYg%bxio}@0pY7Tsk;>L2Z z1DCH6;bS2%=hWyj0Uk}yMTmE0U+iC#cH8-nRDW+cD`h-8yg>!Pk$4fOEB7j|w%(8a zsSv71Z@EiUZ@0xe(8(ALOllp-rhNRd_dvye# z2vJ7RIp!5lQAPw5CsI*>Y(l4+xi|4eaaF=Plb{7df0&HUDg6e$S%JP-zRgm~BS~OK zE(ByPlHUm`k~I70FPnrGbO4B#YEJ5$2;x;bo}J@{0eR^xbN567rOkRkp=PQI0P@o5 zW-eMJOwmbIO&1jB*6bO_P|2LO3Rnh50mXb6y((p7W3 z6jGFr8q|beL_2l&VT`C>Rvm*!-a#Iv$NRAy8jRwWK@>>C}Sy(s3y_N{k-8 zbojV`Q;yI4ynS%;WPgITg!UyJCik|>s6TsZJV6HuZjQJ6ZSG*&{8ZE;g9`@;=&%R) zP!wR3S*bc`d|s^``$g)W22HkzG`&_~!09e4zo4)@6`qNayY^1g)?W|J59` zkZBONZZiH=jiROZy;O{}vg4GKxfrRf*l6s8&o%1HR>(BdmJO*rsnh)HBhFp;Vk=wt zPK_o>;i=kEOQy7BzWLLx2Uhpc@LY(k@QU!4SA0dMT$?SYz*cQ~4Fhxi_Zq_mgYeWQ z1#I2px{|T9Iz;FX&1-Hj5PYiv{@W1s_RV5JwTT}iL;m~o6|4w#tB4a>NAUj{)mN|4 z9q9Edv@kns&e1;R({(wA*s|vU+GDj9uIX0yx70?Rpfi8kgzb7orrk+dn=|G7C4<@| zC>lgzuM@g+?=+l@x5Dh)Qv{8iPiyoK{%aE^^Hw+q&ec|cqz-dF)IFl=-hvbHTWVD| z;?Z8!KV6}eHzRNu^~&M6JJH6lcrWuqGWRG__?zPPA~pIJ!N(i$7F#c{6^$4WDrX4W zfmpG>t>*Yvj^6NRP5Ufs%aRJXVnF@58(7k_mTfgQi&G>D8@`}(#%G%F)_T09Y+i1; z!d3yS>&!vDnh;c|-Y0)J@C~1D$nJkiw}0YOm{?6~=;K@Ga8?j5>a#AwHRN+mVfw1@ zkaR338~Ccfb#$VEaMfP~zuB1gdmE08+q>24CbUtE}>e#y--6QSv##)@@((kgqYMFeO*oOOgg@r;1 zmYCc+{CS0g2jvptDgmZ|cVd>eM=UP%CR~(n$C)JQP3Z5ebQmC5ro4Q+AP9aK&+H%KBhQ3pulPU9SS^I@y5g zr3;V8j|R{SXE_W>hcPj4L6ImZa z^a=m6!tu27S(XJ^rKvldRSC&FL|z0JP9QdoounyVfcI25s>CchMIQ7`S7q%~IM~%Z z>zw>5e5Ar2b~D-fQ=mjH_FLF(8IW~s;S*b#73r>Xl$9;l<(8XEfYt}+n#Uv7 zS78IYnW*T9hDNEVjJ=M}HP4GM3qR|b`*6s0Z_M(LZN2T(%$myGi-B)pKm`cyHCzDCjyC)N7tFq8gJ(-LL&z?-m!8|gOj2HWi zSo=hMtI%N?sLic3Czzj#V_GE|KV3YUk$z&wad&Ryz z8BbQ_HQ`cV+5J5Wdl>`s$$({6>@%QmPbTBqkN~}UGJd?A^VK=2&gaS!Sz{t+#?%9P z_hdNay7y#x$oA^Vc-B?!UPSD<^<%y3j|Pu8ieCleS_IkUMZ>pU-dGMON&^kl06dNNCqm)lv>zDL>e7&awX8xo*bPsWcI zs3+rzijGJfKKMEqKQ3XW_MQxfT=$+V57}Nl8PB@P-3!s%7LeY17gy)elgTu2UiU5v zT#GIcWSvYHgQ@g9dor0cy?ZiYwkit^m5^jSc=lvc4w#H&>r#aihE82FroDSIDcgpW z^TqTo{4A7DBWK}*B zE&-O?7|)JO#sHK5YU{}OaWPx->&W=gRcZ^jbCDm9G%<5hUFcOZIWxwN?58SRiz8W_ z%L1;lAF~VBUfmeaw#sITqQbUxgg=Ym7@KYEUw_TsZn5metZyE$lYPOb!|K@IY1m)E zC!0=QP+@COWS|FqUiDJSG`Y`(LjF z9)btsqrNX5Nn(k8?)iY<3xB+6slo`CdTNOd^HJO>s<_AD(fDC+#c}q+^i{e(4?gLc zdCEl>-I-{8T%fb-@>inE$OPV%*yNjv?3FQipWkZ-lBSjEwXR~b-}uC|KRMf*4aOt; z8(?H&;Y>y4XGMsqv<0z=&ijz9d%=%2;LR*IRdYFOYKp|>mx0&;@b`YeqExf+&zi2s zkl~Vsoux{3p>i5KRxXi;y>c2$P}HGqnrRk>89Mi|)5G7xKk<8ikFx=ly10zy^ZG6~ zTF%UBQlrZ1>Vc(*(I;5Q<8nRd!C z9PdpAy%WR!@s1VAG(}BM_a^y>tt#7-J~~0OxUxtuh9b)OYELy`wYzkxxyw_}{K$ECmwd{c zyJ*e!d^wZGU5ME2+$I0C%3TPt=byWfncKNb{^|PN<;&8mmsyYJGXgHx@($$It=^gT z-8lOU-i|U6`Zu1E48+~69>vS3gvMp`BD!3Cp>;FOL~S@2UuVcf?6|ylvjMlW%vEN$EjX&5@MKrS zvePrjzHD9E+~^@)d0I3X*LNe>VA-iIZ6O}`NxO&_9+EKe{M85~w{WvkFetUVT2|`? z7Bv1eg4eSgRn{x&e%KmU=Bky#n5+Qy(6Jax;JXdD!;@h#aQF&Pc6BW~MF%Sj;vY_t zLK3#%e?*?48aa{>K73^!L_nqh^7|1;?jbj+Hf=xK`ikSlr02bCV?XM77cSM6i;VTimmgT`NWKgcSaKTj>nn7hz!I@JrCbTXk^~KU!CQA5n z!zJ5M7R3PJBd#Q@1-0Z(Z4kY(M|af$d?SK&4r01!UQ>i}rhe$(a~7Zniu6HSLUk%~rD z=`G$d7!GCY#J)lk%p{QhmgM0Y=HEWQsqDW+aH}_0Tr%?t3u4}B z)sY(EFB)){H`8J|^A}@Mhkq#G7(@J(V60&RKYTB8K*vjAKuBu-vJbZ z(>}DE(nxorL0}wKH}Wyr(#91eTfuT*E$IwkECPdq`GZ5^F$V__hmR!!aiW2|r2ejK2sT?h;A?jOuH_EWhac zQoVbD1c&Dw8)h2zgf(DyF_${q($b7|$dkk#__kx4%rGhj2&aaPq36~nGX%<3b>RdF z)FwJL$Siw8nDCW%h+v=#HjdaXJ?%}t?mNfEn9PKAG<{?2hV7DZ6!<4@yhwoL3JwJh zE7=I{3;>hKfg83<9#`;|H^mN100zRMnYduPrXgx+h{>SF3f&FcCFC=GaFuKzWR?t2 z&R^agvLm)jW`s>p*y-pHL^*mcNsA*;F=@Gg7GylP4%-piC4*wA`=>6Bp2<0i)aD~E zM{JjysjSu&W0NGbYtu{}%adf%j@T}FJkz$6=en?6GBTT_)vva=VY_4uC3o)Fu9388 zEH2otd64s)%G|JBfh#VVaaEE9geY#=_X!(zna>g16_^%#Fb8~5py-0_3M4N4l;(!* zifjr=&0qF&N^5S|ZpQ}O<@``IDj7icJF(+%5Y z*c8Kt1LDX~bHR3*$0gySG6)+S=*cDRhV8P<2|>b7(nVeCZarGa#tqwL_?6Xuq`s}i z=dFDZ+^}7ax93NAaKm;vM#TW()Fw07rCXcK5Re23-3#*`?om1Rq-~-Dw#&;~2ot{Y zu02q=?c1fdE1q|3m&}9>AKIqdao;W(2g4wxWiKAv4Y+TYjLY(;Zj0TpT{4D(H+O8; zK)BQrJM7yv4N*%&Oa?X8`HuT`3HeMPT&j|kpDrCP8NjZ?K%yOtsVs6+j8AHjPJGN^iEgFjpwrd{b{H8KDY**llOJ*F# zWC(DQtuIb*+>rU$Wj;r2mkdHoQAlSF_@Y431=|%!NKzVoxydIUyRA)1U?0bQyCR!H zQuCL6n6za|j^Kvv9=5@D1H(6XOg^-0AXGHA}#o6rNINWWtRBkx#+vOM)1B3(C$kB6aljn==;t4B?3{C8D#CAFPhzGRz%DeVJ z;fC!VaXXWthQ^r8gmqedW9+tXmyCnKr}<&KWL%a%bzAI)?UFGRyt!k$2EwJ5*a6!$ z4N*%&Oa?Vp=x+OV3HeMPT&i;Ex(KyWeyQg*R<|=5$Rx1|3eB)X5asB(BrT4Bq_XvD zgb8s)jZBm({vrY$CPq1(Cwsuw66Hbz!?? zWVZsM|g1Cx63gq1_-Byje%y} z+GK{ng&ZW@_U-cW7Q%$DylW2>C@>zca-tY_8=UsgU1>LruK-Gq{)}4qQ2up#cJ2*u zB<>i_&Xo+$R~7nFG=jgZxHnreU{kmTw5qrsCfd&Fsa4X3V-oci@dZk zNMDREDBax|z@>!P5E#tdMSowv5E;IM~_~Lomy!NleU+dd$@sj1c~hB$|Z;n<`Z8AvK#8Nx2cEy-eiPs zj_*#;E$^fNDZfz+&;H_T`ShwdL=t-N(sCfrhC0!o^;XqGoIH8*^&y7nsE5cdYUcum zii=Hnxq^%S_H-~DkH{c4)FcOMKK0q_I`D~IaIa#No$W28k1qRHzQbj2xo+ROy*)ga z$~fLbzfE3g_i$2uje^~>nN-Iv*~h!9$qGA9TuNt_f|!2_T@;?Cbk%3ro6?h4l5p5E-9onbe9pgr5!9`@UtCfR(-XhZs{&Z=PN>G zCaSKf*Cn0;pWX%cds!?h0?2BH%xbS?iA$`OKJE}T>9g6wLgKLfq?kL(6fda2yT!^K zhaqbVMsl?bz0`zVc9|04SB$ju`a8a1UVvZstsM~G>hNo-9rA}AVkjLJ*`S*jyfA}s z-_L|?!*FS z+N>V=m(~>iftVcWCNpGXAjS{71Y$C*V<2W8e?ghBS0L`RZ38haCLEeJef>c7e`>o?AKTklgC{GF?rTG5OV}xP*&_!5g(pQASS|>n(=uF#N_>JnIRhkF@D%3 z5R+jY12OaX3(ADO0`cMb1Y#_EsRhqH5Et*O$n4h`h{@wFftWn&9EdprFDNVa3dBcj z1F;Mns~$oNB-sVwAUX^HO_0sfGD9{7V*Id6ASS~)24d#%7nBKm1>z&tfjBfYfz?qp zHp6qf;K4E$W4EfoZGzGP#Ru6Dc%WE9Be|XZc%|KDWD==K-w}e!3(B$wWr^6}#+-^Q z!DF3ud+(2y8o56HNASg6Ai0p;!O$@vD~r|FvGUcJc}gJ(uvyE)Q7{w8O^U~?ouPrx~nQajijN%K<7v1o>TgO_TL*|#EinIeD)yT~OUW41^onouXfe8Vv& z-}$X#_m}o4cLt)E++Ed1Wp1aJE-H7Di&1%ic8Zv-BLy2KUjpTGm=LU-Q%71tUsELC*4u+On*vV zy?ECm8R4&*8-LkEV=ST*L-uV{L7g_LA+Z~;VB;i@Xcb-ZhG~r=Z!C?U%X``lR4APKSfd z!m^l`dUk6rN7&c8j#%Kz#x>Ob7B-~+gKsRtUS{-o3EOtU_)elU`b0bNq>9Za)oh-B zu|{R{)~_3xd?UB{#METC-b|6&d;&{bvvb?XXfK2P zoqZAWAN)1Ah!2oe z1@Gq;;U(F}9zd@XjVFruy>bcK_jwk%-P&#UKe%?NbupZ8lbx^^!+B=fapRCS{K8M5 zUQIAGMMxKot)2+~hY{@O=M(RtKEOGg&OZ_pPa-q7!~X7gXSTgF>t^gv;|?5JYg`>w z`G@kHI-qU5vAAd*&n><76qj3sy<*YA zs6^6YCM5=$QpQDPgs?(P29-%f1z(5&TiOJ7n%&-Tn6FvnEiL9h*^e7sB8}uy{^vvD zFII&oI>OI~g`bZIKfgx!`3B+V8-<^55`MlJ-g`ML4n~7n&dyZK(m|Z2NkC7c2xr6Lv~G9vNY(~cN2eljZZ12ZERJK_ixb8?Xp$w17ET-48yDdyuwD3Bc$sgkv$mB#W* zR{pOm;PNb2_D&4@=`OoL`pq={_T};acRBoS1g7#jcjL7~u?APfah`C4s}$Rhrw8V( z9qQbs&b}OV_VmsPS_&PX>F$gMC&!aBYlm)%;F$0pSwW{I=QH&}>uD zSQuOWNy3%M*vQ9op4QAfd8&wnb{iFu&k|;gDxweAs)#nzDk7h?RuO%`Rz>symLW$K;Vf>WB9iYdRg{0+Rz>;8S&piyh$J=} z6_L+ctB5{et0MXU%aEgra1mjnB2sQxswn@st%~xGvm6yF`ep>0Sj08OG$9>PhgQi0 zL|GM|%DMZ&YGbiXO4NoHsx)bYn|z+6I+o*Sxg_sgDm|0)$(>nj!qQkSsVY}q3Cmd- z*d1@j30$q(K)v$adtpyeC4TMR_$&8btP<&7XkPXgtHk~=JMS8m?-#4Y;Q)DCn|fS& zkFl|kCv}-kej}eHAyX5XGs8snRNj!;<~RBP8$u+3(x#BVSS6zM45Nx@(Pyh7`al2` zU91vqR78tDTNTj<0;uR>mDrz*P?E`Or;Al0+HzYeL`e$vuM*+sr7sgQt86Gyar^C* z%W#HI<<7*_+htk7Z(9{H4hdgN2$^!}d)a5}&Y zse1$t*Gv!lK-0gS{di>p#8Et{7%gZ8oe*>!D1VHMTR7BlXB&0jMCWf%U zJf3*d$ps}jmx!IAtZN`xIR@-yqWb9uypE+}XAHR2#I{uFgd(mJ$j#BHXfhjea1n+85sxPocSmFABC{SRM?;>$*rk)B@g#_tsymuM$Jx;YIxLIIc7R`V(5G4OefIsaX^M8X*hQi6-6uZ%E=+da3nlL#nW*Y6;DTI zRc^70qw5;01R_LS4zWt4l&*BB1Bvcu}Y-m z9;-x3GLgB(Dzb?09IF@tL{tv3ilyQnt5_;B5xK=GhN^2_#gQOla&ka6#`riOODCMD zD1Pl8t2h!KqT=bei;AZs6O~)6;^?}@DuD<0Zp;$6Ah>|KW-I-2d#dHML0}gj*1Qu zS{P!vdvZ6uDf8j|7^XOpG6CK&4>9iIJK(>V@uh zo+PD0EHDJATy)0+%t|IUFXyEUXSV0=ZwwtGx-i7NotK@NM0_i2urwS`r|7MWLvTI3 hx~!x#{obfMecMiN(%c^^Kvdv7D7_ zZPe2}boZ>ZsHn6S?RyxEc5F7RekU8`&HeltLN~4zx2aDw6qU>`^(>W@3Xfh zmz)}R`@`#Ad#*Q}uC{Oa{Eu%v{;eqc;Jtglb?3f!{sm;8Tl(x5_QG4?s_oIBzj3~E zt~;KzMyDpd@wx7_KN`mS?;mtGdS^$2&E9xy zcDH+z_)55Z^-#Qj@vq5$QQTN;F25M|WFLrSe}x^0*|EDD1>X#8YnvN783wKjXhNw4)}Z#?V`T6Yio(|&i*e`{~t8h3}A zqpi-)uzz|q-fCfS?zw$5oJ^N*f`!h;pgWnwEjS!sI~Z-OuGtN-{1Vu6t~=Q2#aFDy zi_Lf*sjIQH*hE&chW@dVZ7wGzbuYrgYW4_xGJ@CI4XNX5t3Ql$o;X8$L22Z9Xspip zFn%Rr{J=UaM)BP8{J*&2XP*hhNB3#Mhlg&YQ@n-MNwlnx41A~o_xbDNfz0S0#5NaS z!u^4p#?(rOS98VnqM zwE?$zdv4skZF4gO*GMHn$RPVOFdjm<@VN+HVJEy8PIkxr?x{iVWPAA#>_-W8+}rF7 zyIZKXLFvFFZeI%fHb%oKs=a#SjMn9({9ywbm*5Ptf%iAyupM8m+a{C#nPI$uzb2OA zNc?RsmEz8%*FojSll|eDxN%JsLZ{1Dz(G`^^f$ZH9xgL8$|i~Z`v|VJl-Rh}-HaFS zMy1!ZKRs_Nu^_yexcGPk*PFOF)$dNYq6(FZqY0@$ZlJZ{VYoJarB!X1Qe1u^DUdE( zKZpvXw9H#uo=@8cK4Gh?!ZrAh5xm@H`kXU5&$#4wmlD6jRqC4c#3ca%ey(9Qs7w`t z16m0nan!Qb=bQy8_c)!RKNi6&Z4})Se9cNYg{L`lq?`MHw#XUxbf>5;i5KwKMAa)N zRYFdxyD4hxCfg$vHBX_aMk`wD)_RR6(oKl~V(14<%2-D(j!J}hZ~l!r>gBB4tGQ6y zm9QU0?`|jUT)WeFA)deYbr0Qc3m>e(J1oM;iSF=hJfHr5V#~bs1Slu)Yy@{3MB7=Z z+P>dRL-Y!?;=?u}nhv69;M3+k{l`W;eFKj|wW*<_gL2tM04A6g-ixnt^ZV?F9)E`A zN8S7eZwO&=T<AY*gCE9>P>UaiP%wKm z3Gs^&Tw^OluD9?~ZK=Ny`uZ&BREkitKI5YeXxZ{6Hv85>NkZ75w)yaqw7dZ^Sp`b_ zCXl2kVT4A+T5_}&T=wG7#LFilxY29{S-;7y5^=4+1B8X-lhO>*{QoC&UzHEXxo8+H zjxbX4S!YZDe$kA<<)eyjX~N+0#}a3aYdz7!4CH>OYo`C58FnK=5?Fo4GgGLQj&-wf6u9hDFQix8P(1SS-OwSa<4UzFhQ zTg4j&Con-7g%FBK+N#0|DQM3EDIkN)6e%EsS|SD9T8N?g$WD6ey1skNmV#bz8fd!>j zx)~r$xYTY3zNz=Zu7N=|peTuc@W)voDkb-vB)pH8+MhY;^nIQ#uIL<}6s!`L@Nl-SI z?LD2G0g;Pw$3$)} z*~Ssn9(ipfpdtYqte~AKBHvSB#0sboYT8wBv?^A>L`Ut)zf*lhW@7*qtomz0i%U;o zcNLH!b`&26p?N$*_+WDrv;VTBbCVj7WJVg(kI?63kW zLc#3O0NYq$1!TrtBO+O{bZJis?Hb?%u-R2W#;=SOkO$3J0r{lH2Gc53*>8A>Rlb^O zvI-llfU+fYDn_h;JV+~3ziQ0Hiw#zAv<9pI)0`PrfQRE;l&OXzZDT0$WtSE)=Qs>j zkgKFBR)8NfV+A?+3lS^8Q?tSfaxMf9G`8FLF|A2ZB--5&tgwQkm9YZcq9#TxlY|-E zMI#fh<-G$Ntl+2vRzT!p#tIl4C?cvSXkdjE9Ib>EP$3kPw8dzw_7xnfiWRt|RWgVH z0$`VUwK#+oRzL>T)DJD@4FoRghul{{<{@vwp9#l{A6B5BW-uR2yhp47?C9xJe+W`z}45%TTL$RJ)=0U@FjpgmS#Lya?KGge?j(i`|9RzNQgRs&XmY0eBQz{7Da8VCZ& z88JctY=x}FCam@qOJw)qlN(>(J&%qTta#!e(^zYp`sFoxw!!sqEN&alHjqq7123d!S*FJ+ zzCIX_40PZ>@$xGN@B9ZpZ@JeXELR1HP!m^{NCG+~?qo1saXpv_Cj>TouBqqqwZ7Jj z63cjNm%3r8o3n^5JIMihF0maK<58h%l^d}K~ zz5%Zb6$UU05zx}mD-HpvA-gpNh=BI{RqeKRNpj`d9tkob9Ib3(MbbF=Ta(y7!KWK= zteW`<&5&lfOd_Ksn5`BSoWE4$t&)X;gsm0ug$UZ!q$zYw)ub6x1_o~7w@jG^N2(ODs?!vrYDXzX>AIihwQA08(=j>EW~y}^z!0U4ns^SXFYluJBDgU> z3mzZJ7^R0U+gf0zCc&GlNG0*J2Q6^uD=4`~t8s!TJ?HO875f$(w~F92`y-e`UOhzs z-?YzPRo|+>O?P5zXn8xFO00-jmVI~TM#Tn;>^%w@1cI{lb(ZQ|7f)-n4Z8V(43*8K zVA>B6iQw0T6Ks)S`oq>%uRH8a-m;UOx!fC%N8{FU{L>x%)2Dmm-f*KAA1eJSsVK07 zyI2C>@KCU3t50})#2?A6#r1{sh<$YQHIpuy%U74q<6nUJRrE1=6;Ju$j-FzQZQg-s<#SYMwy&TSsXK*Hm#rk6-8hpwY;9VR-%5kv-nkU># zAvFLU0&l0G<2zgFl^t7&-N%talM3Q0JYV5==^Ff#z20_bv%fXT4uVGKot)@zKAqlf zl3r@SoeZWlfZ@(oXM5b6po8pNuSMx}&^lkYrkIeeJdaoEWC*%P{h6IZl=zDGWb9S- z3x0XCiWrR=O#A4YtsIDDCVL6LGw46rt6NX9s)P3=rTX9xBH*r*vJ%{A*qdaWwX%d- zH-GMB%zyXeZYJ^2*~{K`D||PC)yjrRwHMFd*&hrNZ~c2w$Yfl&;AH^);b!k?T0p7R z2H>X}a6@I@kM{<>4RlI9{%TnXvR;j2lTS2AJTz9wNs(;eZ4KyEmJ01@I(a|Sk`|-n zkJ(n-3)kuYZv>lli-0M}Yb%t>g}zW7schh1H|i5Wz<#PW!D*_-!hys-?XM!(sLN`~ zU0iLvGufo63Z0TRyOA~t=(3XHmQbrgz%p728J|`b^3pyGRfwMuwmmqyHFbczW~{-flCU(5b)_{z z{G$z6_m3g?@=Mo>mEX7EtHdG;S_=W4oL#-2pM_5<_7GOblfEOYxY-C9<4OUn!=V!q z3|vtR>d~kF5W%agOgkH_vr>1_&qO-S>I7PF%P+R3>l%PKu>?dtG?P3k zGcMb;YqW~Nt=^lpl-H5mW&Shq>lr-W2#k1>8t*qEi9p@1verJ^e99Mrt4$3?S~ z{VUt0w9Wpxc<%$f?%=>k|ge}N!TKG)FU!$cY_}o~7`I2pD&Tu3O zUfV@ll1}XwKaqkH?uIf8(VFdR6aGfI$~-gfZuZe?=7bB_9)b1E@cp_`((yXyC!HgC z5v%B!*)Xo`w&6Vyyi7M(y0DUmI5X>s-MkAq+Xt|pkNCH8usRFTmwAxv-pb6blRnfc zIh2*Sq@qTP#P5-UK<={>qHr|_G3qnnUiJtlNprmW^DXHnsHrnNPW<1(K`!{;q`Cg~wxCChRRY&q*4MaOD@GDK(YLNSEu}C&X z!|8Z5NQKfv>$Ss;^YP-{=cvCv|%TF)d|JqlK;ER|4A>-a~}2gzTFntdfGedLg_6uy6v;9qry#n%zH* zLYm#Zy)$tW{a+QtrI@j5o#A^C91rIjd9*+&g5pB~b(%8Iz0!uGt)y4gmseCO@FR^n zxNjHS*CE?#GB5_m4+IU4^h|`=1%;q)rD!1y(;cdw0Xh6w`3N6fNlXl3Dz#HR}VLD_{%gUG;1c0KWsTv0k z&sKZ{Xsk*zyJ*b{#>p z^#?u^B5#zZm)33ABWDm&lKKR!a>61O@F^t-O2mjWQ!iOybJy~R8gPF|BRtU`^w4FG z=>HS{(7>=ujK)7RFhsG7r~(37`#XUuylEdTOrY0EQ4>n)e0XmJ4+W803hT6_jw^q8 zH+^xp+RMAogn$!LnXywXoH)4gBy|avYHyiyOwGw+QQTOJ;G-chXDYzs>A48;p6rYL zYtqHsg_mh`uLJ;6J$Hhm6L3mr&UkhhCu9vO0FFc_n#tDw<$aBte=>yX(Od4^asyVq z-4^fC83t~K5G42Td|WUq6m(kB?zz4d3OX*=}}jl@1ja{OjcL7*Um*CqT>Bv!Zq)+8u7sUN5gAi6rzf=9##BlviTGJ?)AuXu_w zBA_^tiUMR4I#rxaNaZKg4%RRApanyJn2gRT{RX{RfxcM2%~HxENnl5&hGLy};kSc| zB+dT$%O;@(9RT8`nv*&w3-Ky;vm5*rf`GhqmbrT(fzoC@pinba1ps;JbTbz%5@sYg zsiMCN1M<>Ih?rnFke3cYdf)&cuSgTYQ6C8b@>04g@GvOIOGhJ;2>={YnxaCB5LXMx zOUKQ5u@9s{P+vN{N^vL{6x-PRp&dFNjWr=qUrJT2yjsv*Ixgi#1rMe#9X{?49f_xW zN;*vLHKAc|`qXHQ4ielLZTDK-Imh{_sD*h~ffzm*1=wU(s!kYgklE#6iS`iEDs%q3 zP1veS1JeAiYC`L^g8)5+UNOXC+#1w(Ho$*HPBB4zb|!g+dc%Jl>ffs9$S$g=_kGD; z{F_^YFErrMx(srQbngB_&8U(GQK*|yyxmKnjntx>e}zL$!TR(70nG7}@MF2Bz< z>dRKhG}D$1sXeLF{OcpmUHD=vTlljYO_IV>wWXF!X~}H!Cv|5>6brr{Vk^8N{N)v2 z(J9wv%PFu`n_k1fT>q=aaKRuvwMhY6_o%L9EUgX$pg-Sgfd4i`y?wJ-P;KJJ$dLd3 zd<826C$bIIMcm<5jNyMr_0?;12YUSqwMH1I9N+^DI9r!rTUkTBq9DwOhO1P-HKIUIKv+87q^XMRX#9z_a& zQ`}ypKK(a>k2T;;wq9T>8ZjVL&JeZ(v0{Hq&GD@qz2Q%r_F2}JB^7YRfci5xu%v4( z+iGkUr$`hwd_m`oPdDMs^>|C!yxekytpZrrnWcJtLcarVnthiMe6At8|0&(>h)-c+ zHLV#N-#Ul0f^bovbrG&1pKA)!SA_?stNzgamuRc$aI!s`^g5@|MLfj~Nw#Iw8anuZ zA(Gwx2fxv<-*v5FyewDb5)1hqf3Hw02c5Zs_{pz8oS6oS2xhpbwJb5=3dK|tbMUhj zurNy@D)O@IvatY8ki0QO){q$ie5wg&SpDlZOjnXkISMm*S8`We2r=2Wwv||s<<_&b z?tTIuiyx^}cUumQ^<^DC;FC@0&q$SALbZN-p%UMnl?q6OC1ufTi$L>cs?ImT6Y*p9 zsT1wlG6XgFCl%gL;u&YdHBNFTy2GvljNN2062${qc+#(WtuvczmTAdd5wm}2j!|+i2phtisqATmeK2?3RDZ5B4xZ=1p zWqq)ug`8Qto{<9kRDm6MJbpBQ0y)cJIMOtFYk~mSvYfYn5~iIx@IqN zw@pgrUSy8{72g%Yzo~FMt$dbcK~`z%jra6ax!7-Ew`G9o*sT4RzV8%1RhD^9k7Bhx%F34O za?8ynK*<$YRWfdw3Dlz1syeC>pJQ{)!XvI^Xw25&=R9*C4!L|s5Lal-@{n!4?bOV= z%H4~IZR6Vr0Lz03A0sMXsDh{ z#)D^1Cgp(1NHna4PBb!JX4jKR**2skubxa8khN>So=hMtI%N?sLic3Czzj#V_GCv} z<=h^oCleS_IkUMZJKADnE7lRgyC;)zZAgG# zJsCe<&be<-#uF7C(bOXqVI_3W#?Ol|>)n&#kn7%)?#YDNsw^~ALXz>|*^@~*U^0@eOBIG0x_Of^ z4ZiDSQnn2#$*U(524wBpuO|}-i%wbSC3H_F49sv;Yfsj$ttS&0QaQ7^Cu=_^dNP?H ztMp_m0eUh^k@GESws*0x>*{S9_^y-jq7z*%NKeKOfvY;to=iqEKu^Y#Re4Rg6j*L# zJbN-30~c5ZEc5K$lgYR?BtWm8j31A*DfOz30?MI#E;%!%9?-id!y(tbC(A>&S5L;X zu5$MxJlhtK-s@y5v*^iW8Yp<(g`P~{T9J$V*2#o1m`cyHCzDCjyC)N7tFq8g2}#C- zXHO>OfXPTStc7m1WW3m;guXqQlx;&w^6JTi0a?5D>&XPdqEi+DBXmzD49sv;YfrXP zTTdo1q;h6+PqxDKWYmLfvlHKeWQ$__ud>B9_=xA;MVTO(p6um@9hw0;GE0G*N*!J} zsk~$x7O5k6cVxWaM28E~k?}(W>BwXx19W6OS(VR(OMvC>#IvlIF)*9_S7yaN|M~7* zlyPlHe_kCKKVG1Yj3+8OB6axX&>a~+E@8$Lrr@V5T#F;wHJAllWj|&YuD!Z3o^6$@ z7h<=~ckrhXoM5w!{p+vUYcQ6*nDxyAcCs({bm+u>ykUO@pKLmLv4yQcH2gdRJ?QhQ zmr|z5eO_eC?<>Lkn)ZjtS<0du$(RHb*bb6Myab}vWMN}n`r^?;@L+u0_r)VgEV0i$ z@ArG*k2ft<7~wX}*Ag9Oqqs9vagW2J@x$JVaR%U_8u zBg5`Wv`jk9a?_xHQ|vu{uN_F5R;Jgwip_qbQ;U+CKVVU+S@@St*JH?VNyE-krMgf#jU6kO z$irSajU_1RkXp)e=#<8eiodr_X-)X&e(&#bHlR`$m(hG)-{nTjxn4C>lgwYq-+em#PhXr~;*$?mw{JvHc^Y^RxNa<)6}ZFYv; ztzO)S=e#F>DeOyY;%$v>@f z7eeg0=PqRCcJ7jYI$R)7xIBDWdi65v@qD7d+_u@>YTb>qPyekb6QO^j3CTb= z(G1jr!|}C-48#&%<}PKA;zd-M~9Pl>b>uBl38H2 zm?la$o73}Yx;YZrbR1&-a~B zKWvS=djTQ6cbcz-Wf2R$(||iY85RSFuK;CN*RoS|urfCK73^! zB$S$h=I=!yxrf}O+O#We=_`&KXvJ|Du8m)5RdHm^?h9cLZk{h&Ke!mh^BEE-sWsFm z@vp&GBDk65!7gM_uGHZ|GpMYRcNRh^P+Uq%n}yX(BJkygOSYpdiUGn$TuE39(z>{_ zMCCPwMxXGF2-ZA|NDK!)5-vi4GyghHKj+kVP+QW~5YF=r5eELqsoh}sdM*TlKXz&| z7$(I~;86B5)Lfbj=J9eh8o>4|`@SRu!8e`S3zjvZKJb%s0I}#o0@@cNSzcwsp`_$P zfk2B!v@+PU)17pt=w5-*m^2zU(^_E#TJd3Wt|4q9{l@~#P0i-u z27ED+-L&A#akc;ugwOIK0Ai2hPYEW&Pc|LeL@Hucr8j?Pe=v}(6Z;BHFq1@bz`XAe zcwvUN;9eGZ89;xy*?YQ^!%R(0>%W$kv+!pwEiJDPE;W_PMWL_EMrB<=hlJq|OICt> zgam)?($w-Si;1dwgT>QxXli)^l4K=MHR>*m3R_t0DZ;y#)X}w_)e%U6Ng-|dN<9o` zMi)b9pyX)dN?`-Zp#-R_QWyg&Y17QIk2(#x4*npLZSC~u`NBwIKh=ceEH73~J#Wi0 zjm9{Q&B9ochijOB3#T$uh^Z;n!T*flHgB%DWai}|Gs%ST-3GkQn`tqf`HL~B!#^aw zP@oX*lq=zTkpntj3Ijq?^Ot=fnHby{o^^q05cjkXEvGcnoj5E*<%E|*fHhxp0c%*! zxF8}aUp@FMMHLZ_(#91eTfuT*E$Iwx+@lJmf8p3>vXYf)sq) z1+d|m6M}@Fq&48fS_q+~07mt-V3uF>eW~8PK!U^99UEpE_JlRya1NbXUVtqlxgOx# zj%_l-s2Cue8a9TWTbs-fC|i|c?YEkP2XPwICOS39EPFzj@RfH6zI4GRedo~u+oh+y z$=7{Vjj>BLPlht|yKK_cHrbmkgL|<++y3+^}7NX|V@$z!wFI zF4(R>;=)g9ZrHBKrjXS9Wj~{|=7#OI>%w+1;a$Xb8P2#Ma^ZnDc&6Zn?J{hNVZ#A& zWT?4dyUgQ~a8Z6KwQvm=xEGIB9Sw5Cc3I|xAmJzJAX3nvUyj%=!>_bD%3n|S&|HXx zA2)25Ws)9FnY}Y`z)Dk=F+cgalpdlth zI5fm=`*z8InLfBw<%0haYIfKxyGG&yA3N^bC6mM^=#TAk^jwk_M?jLayjBYcaYZ3p z9U*qacF8=j)csQzN3Td-URG-m2Dt6phjN`st zfoZV^bHEn`iZ0l$K;ptrX>QoA$fl6g{AE9*wC0BG9<2-8#e{be+hsW8f`|(il`~Tc z0nVFrCDF+d+hy1k!-fOm$WU{^cA3W|;UYA)n`@==R$fPJmt{@}5`K~{0)&TchR1dp zer2^Esc${zynEO##~T+O0%5zkZH9{&j@T~8s2Cs|uttuaTbr!Mb}M_ccKegNai(pe z!@gZkKH>o_zVfa;P`F{c$7XjX0~sndLP;|UJ=BC(2<#;9o z8Ct=cJGN^eTxy9OuwBy-0UBa5#LTc=GGL|;E>-PDJ4|L{hYdSoyJQe-g8tYpN6#f` zaRelltvqR2AQVB*WFWJ2#CFN!Sn5KB>yWxQdPVB;3Wo-fI%2!rOl7sM7@J7#>%|Lq z?6JqOLT=cundjMHyJQ}UOy{_7mr%`YS*B5UY}ZIyG!_?Z*F4DXrZP8dSK#WomdxDt z?FvkbJ(vT&C{T34b_EiWl%@k~yuzMSu5>(;L1a@%YW}jHQCf4uc8}R$yIc{e0g;wn zT4_834G0WwXEHFHaX};yc#}^%Ab8`pZyxJBSXyv+hrb?gbTX6!NW}CkwIsZ zLYVNCcMwVFct(fSoUC%97f)P#()YikWy$IMdMF@*zpRMgOXgFC+$05tKi8!? zL$b3Z;-jXC`FAwohk__u0N*ODh^Qo*)MST>KwyxScKyg(x$yP|+#N&{Lc9s0i6ZED zGirFHK1$Dok=&ijiiSV$TV*}Edc~g$4wUnZK>w%WIf81AxhjV1*er}&JUhx?NLB$W zU4E)`ZHJnDv9_sncafTGQky;28&llk9K5R$CMO^V6k<^zeatc%p=-@fu(8K<)Gud6 zw8|~lxTF^Hyx3IMK1r(}b;TC}y4I}Hb#-zc)v-r2wq$=>_-lTJ-FFdu&nMRwN)>9J z05VK!u{A{;FsMbj_cW=%v`Hm9{X2^O>rsZX_w5;B7%7d%1E6qJwa< z(u8j4%dXtc-liV(y5k|b!o4#_7r&DNq>f_4uTijDHk0buCHr`1C0SzUiA(9sQV{b`p^L)Pl&<;= zdsBMy@-m&Wg(q6$)c&=d7)&BLRi43_ad)%d8%`%J_HQy(-qW4lvoq+V545H`+k;+f zV>FzOM}y90Z@Y&sa2jr$@2nKz!eQAb7yNPz2hu`r*qcnzE$rvw#x-*q1O{KHUV`7} z;Qf2x^(O751iH`{fjhubzc zjdokMpTd-#Z{d^~vN{c}!Ke1X{azM}iU6{jA+y?RSmF|^rH?yAP5NxMu#h-xKPl#p zGR5;M@NTkl$6?6Yf{|P;L$A!*^bqP=xg4D<%-&@u6wx94ns4oZ_*REsQ|*vH><~lg zu*e48yx{p6gnND_93l)mR1~>3r4>cJfw(wT($6ZX2JnI~YIRY00aj%s%GoAKx zDG(DK1_;FDNH>`w8v`+Z*d-8?VI2c8^Z4`1guMcByR}<^7^`C!0&#H!hRlAA6)}0- zB@mNmodYpP;CW@mUKR0C+d!;q>RhT@q7SJV8%pf}ftb91Ei+_eAjS{71Y$C*V<2W8 ze_oleS0FyRYk?T6Lu$eGR>Z~oDl+>u24eEKOCToCItOBo!1KzAy#n#E-3r7+A5t@} zKM<46(lSFf24eiMOCTo0ItF6q@#mEZdj;ZSyB3JCI;0k?3&daF0}qz57`s&sZWELa zC_c=Nzyrk+8p$2($1CkNBa=u?`pytko>!LLFH6J*H|A7i2_DmT-28XYR>z?w+al)R zi+ezFA-jX2t6}78S@~+rJf)BX*sSH@D3}T4CeJGae>1B9`-KtCz-t;{5g zr4i5FYVeEuAStt++4k1XAa0_ScB=(k{ceF1PbkJPE`M_Z=KUu*8HOr}{G^6T?s2Az z_nHXPyw8(dX?~sr>B16a_ziwDLD@TGS1e+s4bpp_z!$XSl(?;YK@oxD_4N#$~Qnf~vn zrMeiDF9zjdwmi@>Hb-PPh|qBOCXS1xauz|o6L7gyhWh~~-E%$VwRC%xVfzKJ=UjKN z(~B=z-@mvwo?G9uHQMYA;P>a@>Wt)ZME7L7HSG;2qj6{0-Re!qs~60>?0YqD*Bw}X z$2=M3ubS(B*+k>aqk};XtXF}j)~i9b>n~>mDi3`X?ew~7F(dClji1Y#-*w|EAL(h$ z6mhF+rbyh%Oc9qbW{RX=G*iTFpqU~uS@@gqV%DnA=?2@qPItK3nGE_Hg=Jk!>}6^F zWz<^c)}{YrU-i+NA$(&V_A{f$3)r?3#&;5>(I?u8Csj;2sbm;`h=eXy4~K#B~U{=3LCjw-u z9J_4_YFJgSiO6MJfjOwPAbB~8k&Ej)f$$}Pv7?kRQ{|Bt+a=bHDp*sCSn{BNBS;4c~ zFwz4l*Gd|^o-%1NQBul^EMD)Y!TjX`c_i`F;0=^HqXudHEwXn*a1D~70%`C@%A`?) zdFvpuc%zm@7q`R%DHzl!wd8|0QRa*qr2US_-c4HeTzrrWrC`wHga3_`Y9!qsp<1;()DXDDV$s%1GUV$6&_YI$&&-1wF)Li7GL!2OTPP}PhHcPOaFWC-}>T#eZTbRfnWZ{ zd!N1S2Y>B%7vaaQjK6dRtU?o=h!rnD^z>46BK>+EeNE8Ux1z60F9P(-`E==KHCwqk zczM*AU)a;!w;1mQbnJ3*gkQWLnrI&%dgyIY6z_v*tN8X@{_SP3pGn&2muNXC`%Rvo z)8Fic#vr8<1MJB@%zWQYzdrzb^J5LupZ39g`W6iOHbE0Wz%u#{O4{_!7xE7!-P?&d~!GEHZY9#eSR_{=0Vm&p+{e)i2u zIuZ1nar&F=oAftJS5o;WjU&y~1(R#HTx{tFbCZR!t^{IAJX5rNBOsxhTioYhdqP=r~GrXrgMCqHkjt*>~^| z@(#ET=;CWSo&FFl*rY;LOb51M0l!?9j@Ct=OuwE=pFsYKY-Dq1BYi$Ka#Xz!C)*=* zsAHE}{~Z5^Q_LS;PzZBB>}&5$h1pz;;Wd!H8B=0TAGr=?p(~aDbA(GHN+q}vPOcG!Rapu5pKI~r{E#%0^#O=al{*n|BXe@W)kWiI4S zmpp)Wa*Vbf9QIQWOhuf1EESl<9Ooy!-e%nF52s1v>ba%M(5KNa+Qo0K;tX_k5f<9> zmmiE8ONR=6a=U*gV*VD``#`TdxTAlL2q67x5&5zF;L-GX%oj3<+)0q&$U*f>3n#L_ zs>}t&!Tb~0qd3bQL9q^r1hcS>B5D8WtM{U0ae&=gg?@7dLR5rpF<0+R4#8ghxk)?U zc7S3!*?<6p{$zSh>SnLJCOQ(O(ilL#yTxTR6bq}3b}JgKH|T9l<4a&4+Vrs5pY}(? zt0C1u>LTgaUTOy_SGynf98a0%{1O>x9{v~8$ei-WMvE5%{k25mcfq0KD4vabn|Jh2 zpYDy(@vS|jVW>PLbt+U%9N#&md{m4$I4IY&elb_;ROAj=bO(5#iu0vOaM-H z$Nlc90ovk%VoX-Q;H;L`O9%NlC1gAl7E@E5cKgFLpX9|tBc5A&AzEucn_5n`-I4th z@~q{BmFrg8*BxoU@Erc;@OYzr_~_v?-Rn1RJbk*`J91O}*)Qycx58B{{4zQ<>5b2I zQ>Djjux0mJM9V)=+DL#3z12hU{>8s0|3zp8tGWDQ!kX-_umdqWb~m#%|6wETu#q_I zpnIw}nAAV)?+I5|@YpOyZ1TQ@Jt-e$Y?Q1zJ2+e6d%wxfR_-Dxdb@iQ^&D&HpUZ8! zIe5UTyQ$eT+kj6-@LE=hZd`5kH3y~tiyMCSnLxZmsy%C=_~<@Obi?dAR*ovUj~+35 ziwCptp$6RNuMfUL;tQsPyMV<_-&y6r8d+pKSR4RXVyFls%V$jwL8XJ^LJkpxk2FG9 z@dV}HGzljmJfiz_5HkHCkyhj*E!8VQp9&&4lw$E)ouN%Pgb0yVgNRVQMwx8`f?sU_ zcUZTRXT>R`6oPA{k|1OP*`G;xpNqh>9G|B)^`$_DG1BF8w%$S|n2$%9?l;6n@%{#A zwiR+k14ihrcPHfH;17s*vbiAMA>UJEp%NBIY+Mt;zmGt(+mw@-a0{5F0R>^#z!-cy zg6mCbJX^qGOU1XMTRP}lPm+tc*77TkUH44#30t$%1pZ^hUnC^6t91IDGZF+NURX`v zyIQjzPUe};Jc@n^Ki4oDRHh2S0qyq3I%>HvLT5(h9;eZ&Z2!>5B6y{hoeI8YC7b|E zYS@AQY>_hvu!_GXs$N<5UR5dN6l|f4!XmQi{W_Mdb+#$Gc=l9!H&4pY515pZFD1le zO~d%cQGT>!Vy|Y@)$j`(csawKHF5?Tyu%`lWJpnl7bUjL7YeYIHar{gH*8A+8d{UA z*uFC@qPEHAudJtie%j+_eZI?vIx^WbRGS()E6%hn{Ief={F!B+Y4W*1)r|z>rD)^J zSKJ|9ZkYiuA8WuH49;w&7i}xg-5T#@)4YPzSfZGH3Gj8z1m=S96!w^FDXkfoc zxjsUMFGgV8mQFGo*IO{DRtGiW=p}{WqHo$cGV-fI$ zUc*`_NeCO1Gy-At>$tlgrF|1fQbHE@Q4X`Wi6m!$rCPszi$<97@WWaWfVdvCTT5X8>E2lKWEX~ z>I?l|005Z-8`rfcgcVXisJ1kmF-QSdmMNMM$s)NkoIs`*j+a`H0y0fjH_A-}V1pD8 ziaNdcAq6%gR9f~Aj1-Xhr82FC6p$x%va?4DY^ZJ7AO%*0e31e&h;rYI4N^de=mcnw z6xdMXOxfJz+K}`{3dkU)R@n+EAjC8xwL}UmD7Dhf07j(1icm0nH1$z7NCBDgt|J9x zA_0*CLQ-r=jKv&z(2Nw2PikyX+n=~#Dr8JWhhnk{8>D~?(GvO;YsnQwqZfxJUTlzp z{MAdg7_;3Z;L=Iu=fiMSovvxLK?*QUwTo)#r`8u>$4>qpY}q+QfWitXAaBEFM+(>@ z1^6*DQjn7`X}mB>55)%AGGK!gvZDVv<3z0{R{{ z>(VpE3dkJTe$*c;AXGK=LyN%*q$}}my9&t2EI;c0+GbY)p{V7hzzVnwSqb-W+j>Bz zXN47z59@g08mw4&LBP1Hz-ENq#0tp#s$&J@NuBKMu>u=vR#<@*A>ZDN4B~|q5F$DO z+G7Pa)HqW%x4AYXQ>?&Hz8BfTrAZ)@u)+$+hczO##0o4Z*bV+FWH zHSa4RVJ5>e3to2tD>&+~uYkzK_;v-k^)R)8g3DQ81xG7k1yl&dB!glF^gJ@wrFB>V z6CJhvd>wBfkjYxD#gh+f>W7-J0x~k6sT4hXI9>{@K#F^~!2s2GvDsHZDC+dWHCUCg z0-F&qRuJr^0x~4m{`2}b6Dg8BDA@*LS%5T*;hb5 ztP`L;R$xPoGi5VYU_;V-UjZ4!gg97X1%#MJq?TBL1tmMIz>1JBRzL!H3h;1z2G%uQHdq0s$+;*~4M*BVWC`x)(iZ1zl_G-^R*0Jo@Stbl}>-Ny=!Iba1u zE@rHNv4KLPv%m_DRl*9W5FE};K!$X$`X#da@D-4+@1F0C$$KZPU+Vw^`H6$+iwF_= zx9opQArphOhyb(YGxzd3Mh^t#>#T7aw?qo1saXpv_ zCj>TozU0t1{gM1sgCH(Zys+?J8Uf9oq6+~%3B9@ykQ&?9`Abp(S-{sq3Y8)bHJN-s z@p9OMzjzJ)0iSQc>q3PAj6wvoH1vu?Kx$#_)+E@kYL}`d`Ci@j7>1Hz4*rHy>VPEJ zNCcm5z_DuPBQ!&r<$Q^Zl3=zf=~jx$(_eb>?#ZA?KvyERt4UMnnyN|D0A4Ux9(xM} zemjEKSh806AcZJDL-7_^=xhwSlSzCG+VPRWXk&G)!kBzvp>7JJ^yD$kYt@|HmX1_l zifJ975y`yTm%)-x3#dkv9=dF6fti{FZ>}Ph#LpfIZ}pXm+*dXFm&A_roWCO#Y)yQT zsfysjajxMJzGCk6|Fk5T+_9&SR)_=0fIK#k2UBMwG3uvs7Q{ zEC$=4o4;gH*-Q$3`ynC`{JL<0Es{ci*xKrKhn)$&@TAilk4NKH`TCRcjVMV)fhF9< z68MIPf;G!M7ZJQY;*aE3@__O{Z;USGBKKsaSADde>h;f@ozeq&%j@`ab8owqPXww4 zb{klyCp-fMb zyAonDmwoGd(Ol+MBVjTsxu9c`J*JXfG&Uh(Ii6nZlW^zpDmg3JtY*2OQ5fDq<|-g` z9a?(6DR)(Xay+Sh<?hRVdMHsgupkrTv+mLzMW6@#T+I{eoZKtRhCE2Gc(JW-A9`naLK*JA?j{ zy}I=zt2)p^jg9)?437=mpN~<0C?|$6PBp%AN zXInFV7u^Y1*)Xa0;_Qml#9RMf6fzkXE_fMt?`j${6=4;AssT4t<~_d}GQT3yvJzyy zIt#stPc%q8G*-w-C6%`|pjTNcw5RFh{Y*<*Ymq-@TXE}l`m#5(0ZkJJ;QvOjS+@w7 zg1ojui3@uQeJ@MeD)`rp`UDWLpQ=r86x0!VF;%ei)Y!8;*A!-uP zt_h&8VmJOn2N$)hskvc)0MDh9WDZ`M*?M{=9}%uiw6CRLvj zQl&k{_%&f;mU4yyu4tEvFqT+RaMj}?btaX!!N0OyO55z8i}yaz>kjVdpR-&f%knM% z#*RUX=J|dm;;+$C5`1o~!F^fNx~QY{Cbda5}sJm$ikQztC4L z!6m6#V|1184Asoz@M!#SO`7SfsB8rOQ3UsS>kL(1IK{GCY9#$5n^|?M;Qu({?-qBm zPkvGq|FfKfWcKjI26V#7YMpGxzAG;#ZyTSP#8*mxorKm=!d-YBM83=I(vIwx!An;! ziT5x5HTf?Rek38P2`KpmoKoKA75$!MQ-ym}S7njFo#Jmb;GS^ihI(0y9ETTkQny!V zx1kanQuaacD}ikb?;%21LiSA(R!PBKT^0HZxgCIo6PTxT)Bit>LYm#Zy)$tW{a+Qt zrI?vw5dyv!!SQgekw*)ZA}Br-P^T&L+$(K3+Ddv=Tt=C>|B*%=+_wwv>yT|=8GImU zaHMA<%qCCNAm6sxY*0Q4Sw<9VkpZ0rDI)2H^K5A`wYp-Il{VqP2Lt1%;q)rD!1+$R2mJeN+BJ~uej+?g1Z=#g38zAfqueMnZF116 z4_oP@0VN|UwD8bP+95vzZ;Vfd>5xS&E6p|t07XYrH4Yr05i~jEp(UA(u1-^3JqC}& zZ>&dG8ZhG*B6!%_tFlEu3}@sqfV_c=mtx&F_kSit-Y8Emt=q6i&LE^D^$ED}dApPn z1SMj`nW>j7u(@mbLk+k;q!FIz4|?dbNA&-Re`sLXB}U_)85pA2MbKK_2~^2V`)C1T zlD#`hs|8g@_udE|3L>=>)@ex{SN`&DdM;#kHYE4TitgoIbas9inXywXoH)4gBy|Z^ z<*@3bouat07{NzFV9u%0Wdb~&o{JFg$-daXCSAHSnnCjpd^|)MLFbrPJVhB1P@G6b0kR35YUbWl4$FM8Z=Ffdf}uZ5M(31% zgWjw_Uo78dDdmwQup<`&vKGm22Ng-0{qvViLJK+o#7i|Nbxs!IRU8y%@Y66LFP&xX zo=BjySq~`GOjQ9uUOL^(MNz_}wGr>6swOT7$V(?7VuIm7UOEKnfdhcNB25JHek26Q zOX;e>!=NB99gRpP0B}fYiV7{vs}_)#j+^shdr?IcX4@ehUZpq`42o@R{?HB`kH(r1 zs4u0eR$eWrFCCY1qr~XZONWp9H|6-u&pZ03Pxr=XOK4BhVRCP~413e3Mq_l4;KpdX z*W%7O&QC=x%)3fw`N1f_CbLp?!f=DkE(c4rhmcm8^WSa4R$Us9=66*STCW`h=qdDy zAr{kg=~UsJ4e(!)Q%n$_ok?Dy-tZrX`nM`N%3sb{W4CXrcYJ-J0gu*YkXxj4_ZNaz z*opsYj#)gt22 z+Ror-DwOhO1P-HKIUIKv+87q^XMRX#9z|%_N8rZ(&F-|@IXxP0b*J$H3eB&3=yuH^ zzY%<_0dKPP0$b6D0ikk+upNjM`&(*`Z{_F>f7-OqvbHR#fGY;npSgi0U2EA^W3xC# zqOjpzNCozRO}+t|7bsDc$~wPhny;tr;8N zI)}4@a8aLi5w0PhYYNj>g@>eVIa&6qKldB{*3dx)%#>;X= zF0p`ro7i4~I5Q0t5sVW}dU0Z8yatwg1mBnVvlXx~OCc)qvh1?408Ws+F+|po83BB% z31?V)*PS#`NjBvu%;a6kU2!4AWZ&9WVnvo)&(Z?@33x1iq*8%yIXKprZAM*#Pd1@H zBUN$<)%xv)N_=-#Dj*e>ltr&K1{2zky_u@}P4GngSbge7yORD%h4+(q#?cX3lV0ao zE4{H6C%N>ytgl)m-&u$IS%rln+FPQLYw+h44jz!Lt>5&K4^mb`!E5FI zat;2WbL&m)g5z3rhP<_W$T8JO$6zFzyup)jS(@qoJ;}8?Hkm{^TUABKB(C5Vk=TR1 zawF5~w6M1g8o(ZghvEl40vHipSs(VP>Z48BMPk7f$E7LjgC#BG%-Z#g6xgQ%uHLvCHsj|#-dK9bmQC7BGms@Tw0ZOjGs*-WbOrRF6R@Kq&%jbApv+#&3 z8M?tW_&LwqheNJ=W0r?(>uslI)>ZCaID@VGc=u#STdH~_U{5B~K*8%S^kf3pid^K^ zlL=!mm7Zr$CX=RjPbSP(Wuc*ZG8qq^J(-jPCL__X7CO<$c$r;KCS}`@lDv8{VL;Zd z{dzKiu;`S!{-_8hq5CyqQVh4=J=xJ#Ik$)D$%IL(oY~xy9c{5a8TBA;u_tXT=B(|V zZ%VRdH}LJrWP+^HleGi%WR@Z?vtKk={NJqlHSeB`7oF&GIX3Cnlkr1fllSb&WF!%S z@7t5{WK~`hE(Mm|-?OlnF)*79SZ2jKB6#;?GOi5?(5olo$IChQ?a6qeq9YdhEp$)D zk4u=Ty(hyV*S#mpL$+5>#&e=)=*eUnsJ{Jjjf%&6olM|bbb%o2WWpFs zHRsur$)xGslL@m`S!k$)B;&!eCzEn8i;N`W#XckXu9HdGHl!r4o=g~!wQIkgOdu>e zj}NrO0_X zWqTKCR^$IRo1Tmpo#=8wdNO_pT=jYOWHOQgdNQ7@%4@=QCV1I zX!w$!Am-n@C*z5#)D~`?j31A*G4-mB!eOC%GC4D~_hdNay7y#x$oA^Vc-B>RQ#gaI z`*^RDt<0h)lWAaAdoqD*MK1DNClkhCsyWY|OeRh5o=ljn%0fdWBpDB$J(-k)3o0YY zmvzB%{|%5OnS19 zcHK3(}GCLj>u_WF!N0WIS1w&xA{W z!8RKQ3X$*pdBo zg==vny9Tp>tL(?@!nId7#3S;cjHK+?1_z1CH1_8Xm=^v379 z)Bb2^e*=t6ES#yR{H%x^oa9VvTTOIDq3r!I_^}4Ok>#dpE@w?mk=Xn)5IX?=-Va!m zY8L)w)AblKT+*JR(Ves|D0 z*B$JjJ5O&P4JT8{MG2=*pT2eZ7Mh=rK|4ND%FldY*TzlV$1@DVsNvq1%xQ9;AEUqlliy<6m3LZ4>(yrz%Pd)R)yY4Rel%Lb? zLd0(8F8QZb?m~$1?ozhfaCTP7gEa0!W^U&$`KQCNp>mfmORrvLJ)X}9xLnISklQx9 zTdlis_UXSBWg_%%G$9#?yIDPo7f}g~%jkJ@x%%AlO*9j=;Bb7cArrCV@)<*YuQ<-1 z--_ThEI&B*vcEBKsP;a?(6XfE!F4HLT~6{9tWx2^HydyV%Uor4TY{td2~T!KEIU1e z?90|ANm7NEct}_7=LQYKcOqD4*{LpVAs+ZiJBTDqJpW+?l3Tc0DHxR6St+ZP1PxS! zZBEapHP{i|v70!M)D~gjPa?RL<*2e=QTL;)aSa0Am9X)h2Hfe%uoyUe1t`0^mYt%5 zm0eiD<1Zr5P>mc(2p_&OpGg4uy$B@tkegJSwx2D1#qmPegL~l1)(CmUK15u7-g>a%wjizPLaT(tVd*b+s%;75HPPCWB#834ZQHe=rS%9}PE?C#n;RBfEKX++rd6vaQ zRlULD={YpDJON3vq88;Pe|fQLuqsKS1axg@bp%piQb=3AQZEvhmxv6)NYOya(Z-d+ z29iSwP`{v4*dIi)t)2cnUl>X3rV z%@vo-yuw2G%q0U~zT1G;c{44hGk-BAb@+!Qtpp0;PFW7V7dfEgr7$2QHGkO$l8Gs; zJ?jG1Ans`&T25)CJK>glG{D%h2Gem^^vfp)UvmL#SkAa0;y#fQ!h(L0??noM+kV3Z zu3^~}!$x%&fu-gE*RYS5t!Y{A)jE69Z5eUbBYfKhu;G{!f`p%>ivZ!ylLo+8O971P zYr!nP==)N=dw~RpuRAu(H0LXy&S(2G|Hmv>?&*}Gx8 zBAY@|^OyaM(wZB#+qS`W14FiWOx~|FD2gMt%W%d85f^|UpRo~uvF1rqYJ(fL%djaz zHx7s+L(K)-Wgf2>EI66=7b>OC+Q+Uct}CVuN$_@@GGl#NqxJ^!NCpN z<#^-516Raqgevi1mlq*BcqZjiBoMaCF)9WKr#6|vF5TK>hQNg!B;2rFUfx2O@RfJ% zfx->jrMD|q!**+cY{^X6JgjZH9kE?9PFAm%jhM%Fxrm~`VMDgXWO9!CcFE%k-rTWW z1L0Cj?67awG(;`fE*aDaJh|=LCFC=GaH%R#J4^;B=O7te%es~kQFg?3$&9cG3eB)X z5asB(BrT3W+21AjY7wek8euY?8{F%N?UF&U)csQzM=y{(Ob~z@w#&^_R_ltfiPXMK zXxFBhI+iENq`?i_C68y?mh#MDy8#y{$;f;pQ6xr3Y?q9oXqqOFR z?H=6)Y!{EohjtB$;)v}soN+;f$;Xjyx1%gpxL|KNw_>C~l{CNS*Ad%g*c8Kt1LDX~ zbHR3*$0gySG6<`!nca2axNnzbP6!fyk}d*-J5M@GcHFni@GGnRB=xP{p8|uxczWQ5 z?Q*<5H^PJ4zFm${F+e!A$qY2>)+RFqBtb&AUhLr>m1B?NzFl74LYVNCckO|~4ck3- z!JNrJX2RAM+NRqP+a=>*7^L({1CQ+nY>UabEPu*vRF2p#8AHLFJGN^eTxy9OuwBy- zwKT+J(3-GaLO#<6m#P9`yJUb12iqkx!Y1gC?Q-;7k`_mxV$yQK7sz-s#dgV{SnB?% zi=$VhE-!i1gY9xNmDRdpY?9Qz+B8!Kwo4}Mi0zWcGi^(Gt_#~GBiki0w|%>03?+B& z*shVZXe=(+u6dB%O=WJ_uE5oEEt$FP+ZC7=doTxlQK0C8?Fu9$DUI&<_KMf?w#yL3 zC9sdGSpnKUFLCJxbRScJXjbcEnZ>cwr`hZP6!fyk}d*-hhQ`!aKv^Q zer2^Esc&oXd22^<#CAE}xbPrpm_4*&T+y*-)D7F^7!?DA1J=mVb8D0J*eVtCzU?dT+5-g&j3=v{D8}6er#*C+L<_Ez(ARho-x1^cg3`C!qx{6$HF_fN z^>8e18&2ev%oYB!;<+0V8d>*fiXh%IEtejWeJ&9nObpV6cQoOLf+$-6-zuGCqLOG* zlO0O>hQh1-)g+HRCQZD(0e1({giw|Q(L_k`m;`~=mLzxQvZCS7`&LDQ1WIYc$Hm1_G9cuQ)+NRRoMQXB1ZT1+_ubje;BoxBC z8ewt*azG=2@IshOe?y4LIj8+%Mg{c=`BtK4!UOll!X=R`^y-&4~La9Q{6F`PZEw(0c^1_9DtxO(vGztZj z?(PcUQbKGAApLMc(XszkpZ z{D@C4#jq?^F+t?$(Mz#YD{W!Az+%hWf<*RmQryqT~QBl`t<2rLk!VY50PEe&IJq=7aQTNq;aJY6khtwQV~-KE4O;RgAK;y@~YEVgJf^xa=+0t=qP@2j^26$NT8F$t&z0 zPO7g_uv<2h>ewawcxNS9V&{oV>C93P^G~6R!qb$l`V4zhdh+rzow9`|TI1CIwVfDD zA~{u_!I^P)v)>y|CoT4GGF9Hwo!+xE=%f#{raRk%UTb4CoQ_9>&Sr1Bhi-owZk+F| zltb%W5kVY~@mv?nF&s!2!-l=d6kW`IE^b^ir`eK-uSM zoHAwNN*_&pF0FL2dnW6tOmgM9XSR~NW$ev%9qHk=%}t}-$_z|h5wB_O-1T-!_|zV_ z-^*fA5kOWmWLA3(OI%{L^l^u%NuSLY77~Z;C&k=Rrg&Zj-c45SI1E`^Fp_J4(Cf0% ziRhzZ@Wtmi|C(>@fcRF2UsLUnKkN`g>9EKK-MrxW8H9U&CcGPA*rD9WwI3i57pH1E z1!D5JiwTlvodYpP;9?-|wX$ZW)3y!7vRXoq+3V{Gm*7ow2>yYX9O))AWMd%454!|n zGOS}DW*&cDnXp$NZnvH*ff%bVKp-xTz>wLmu_7jqy98qLtaBjd2t2Q>*sCHwY8!}4 z?^`oqgiBB*`q-t4n7n^2Gh|~R#t*v$Vlu2_AZ8waUYW30AU^t>3B*`^0RnOHzKYC# zje(dv?h=T}v(AB-Bk;VkVy{4a%r+2T2o*8WVStL5Y?hW8vM~_jhg||O8P+inGmk&7 zOxPlPCMX?He3%`92Z|*$k~`RsSK4hx zCXt%-ogt_^uPnP?mWT~*%&Eu{Jgy$0BkdZ9_eNg$;vSG($nIe1Y8d%iR=yfDPbnk; zHfwn}3T6Vi$@9v<-^?n&eqn?&a9c!!zM$p1t4ii7>WD3K6Zpk_kd#@^Y~mcl_S9ug3XbjIsxZMO6_2CB+W}P$6{NJZ#+}&G5b~oFHr;# zVHdgNW6Tz*L=);1hd?Q&q^&qS#Q^_opK@m)ipiZ7ZB*uVdg-EaJGmH@`)H@wx$bz< zs=gyDc*GNT86+2@vWsFm7o&2v!L|OVj9yasK5bM+k!k_Q=R^^g^I)aiO`3(fI%93M zs1MI9!VBaq$Mg4&?%PgYs;Z=NIlN5&chpi{49XXS@-SN-Xc?O$vKvHbID8Yw#Zozo zpxz0%Tq?u;0F&;y9)EN(^Pa`tWmUoX0@!n|JJ{*Pm#pt!+#AoW@7Wq{_6G3#^Kf-W z@;IV`Vszjl#07C5E-M{xWJ-bL-Oo*$@7WdDzd49xq_qP8i=w zlt!OuC!SQX`J|f7^Dow@Y~K2HBa?6BHlJAAym4f-u`o>~W{Sk-%@ncCn<)~TH&etm zZ>C6XK7obJ>G^GBw3orb_JN4`5B@e>l^I(34WfAQZWO1c{poqm><&D^7$&K)_o8!_ z7CIY)?qm{Q4u|4PvxAhf58?x46+s=oJ`XR-KK1~5ooF;x#P6j`(7w-e$n92dzyHD2 zBg+@V`8L@Jdoi46rX4qq=)?I>qFzlfG(|`kjjf&t|M>_G^7D!JQ6JzOPUjzqi6@bn zJK$hvv@_k_nRYVvCvh8&tTwKWs{BKFQyoyg66L8&046J?9YHkZ1~?*-C-ObbL(u?h zR*?Xqv6p1J!z?mge#LZo8E*lrl6N*4M!nZ@6|Cy_!$*Kjm1DO}K@F=CHxapP7rJwO zJx{@jT4|D9{SkagVC*Pm%v5>gRe6cEqXlaY`%rSK3g*;m%HFY+_z3)0fx%oip%b1lGRHZyq+>?GEq{> ziY#95r!HI|E5xKxmqbJ*4cgEvv;j2fiz6k3ib?w4z71HLHKW^R3qvB2<6hK!n{{I zBqsVJ3MOZ%Lp-&;pbz* KzFzqG2I1!$g`aPNcV7Li0+am+*we}9%{&mNAy0fiCs({xfZ0`I`WV@fkvrHA zN~o!^a*4&Ft(RoT*9V}5j3S?z2Q^|=%-rsEpBqE1nBDD_=&|0%OzO2hM2fk%kYSi2 z0>8^)HVhfB`JjjKvLPcKhxxE%iut$^Vpt;*y1TlB&@322#he^;mNyZ2H8yt=7uHA+ zMWQa|<3@;K%|}K>nC9l>A|n#NJ?)4g<0qpcKQJ?Lu_KO9F((%pkqpGl$VL4OnPNU} zgaX-7kt$g`T52r5XlWG|2BXOYExR0n>)=&vio+>%tJfWNCU4p4j(eM(-grD3x3U%P z)gw!P?+Um)`%w4PpqK8@>!;sL;%{Fb|G$^R??hlK&GQhjdL-6hk~l{YN#Od!_T%Yc zd#gvX;L-XvoHU7faUcpOuDSQuOWNy3%M*vQ9oo>t8~d8&w{1sfHS&k|;gDxweAs)#t%~RaEJKbe!WDpxib$>u zMI+ioaLxc(KjQ|#3HVtrU~ho zI9zBVdNi_5}P zl}oRH#jFhMjJD$hu2yZnUi!|xu&<~Rzj|-{m3uE%iF6+|FZ+vCVsDV0m5s{xi&f&F zkG!o#Jq~ONc~Y0zuI z04lmzCEBQn7JarVq7MX6(ZwpUHy)xSlh;lct3@EVqioiX566Wday(~a0>20czhEviU2_mNIjwaA?b~J&GOjI9e z0Jr#LEL|g$V9~uErz5i} zw^+r|b&XX55h5;!SS3<&k5wWinaJE?6<~?c8G(M}kESjEwGja32>A})tmB~o&a zRU##s$lPKTS;TjaRSW?lDu-CbQgM$}EESoE++r0&)wQnTNDwhOIUpNjd>oLa6HZhV zzjlvR90?Cm@pRlp#nX|A$}LuLbX{YWK!k|PAy$c$++&qUNhUJ4SVeZTIL9i601=f# ztYWFS$10YJOhj(6ilOQnt2h!wOb)S%r{m**rdajy22`3Kw+h08RzJid940VFMTZD2 z46$5tZwxXsnWg<^11bd!72BD?-4kM(MLi#iTrR{%0?ke)#t%23QZV7fNKG8|LU%h) vl2RcS7=lzTx|0EBB@>&M^HPR0+jI9fh7J*37-HVe%g#(9zR$kveTV-)ZVZHo literal 0 HcmV?d00001 diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587789385.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587789385.Akashs-MacBook-Pro-2.local new file mode 100644 index 0000000000000000000000000000000000000000..19e8b20ce62d6ee2ec51efec446381bb897032c0 GIT binary patch literal 267629 zcmeIb3zTF>eILBHdvXtC;@-bIh$0Hcx6!$uFZs}*`Sy)(V5 zMm^m_ch5>ozI=wn9}r?E9NWjo#y%&B9ZpD`7zi8)Hc_x+9AZd}V`HH>0c)vc;quUl2OccpK|S?1OEU-f_g|G%nRb!Zp<@0Wkpb$UDEr`Dd%kt+-uDiFEc@K@=fAiI-Ue4~kB5Ve zbKSGO$+SH_HSJH%_GW|eDBgF^u(#2FY&_iTPg*a8-P6Z<+x__R)r0ZArN1KoMR8-P z+1dxYH)rS2UmHiF)(^vh?s#Xmy))}(k4)n$LG~2vK+KNa+b9p~LO9Y+HWG&&_D=PO zQ=Z#8hW-5rZe!UU^=D6vCy#fh8{_SMd$T{9_S=v5C!_wbeaC1p8}x>QxAiCONpG|{ z-s>8y1lEOs}Bz3DV=!{PXv;doVne6Isa``o}7?*-A=F2M1h2OnQpeTSU=;iFbS2?w|2iy1@vhdw zKfV6vpJI85TF)!-vH*?MU7`5sJxTcBOh_@Rj{>F4z7{^xfV=(mac^dH_hFlhFX4Vn zLRoRDm}$Si*@+v6IE#x~FD_|mKfvlz8f(zMwHCla`egRt9*9n$xYP6yCVZ?B!iuLm zr{d=Eol{8&kLW!agv?;n$^{g82uSHRp9vzkt({@qymM!mgf`s}B1BpZB0_^r!>={q zCU4J;n>TH4hTs~hBnX*6_Gc2_7b1AMo$z8f*_#Y{r-uELoz~@~s$qi*7r6}sw+_NS zl#(a?&F-kTh3X%abv&~2rLcEnJer~Etv^X=!3P^~*p9FEP1EV%%qU*OUlU7lB>pzn zx&jWMT4J!-oAq(U7dNgp$8aRMooT;|ik8O*qchnf0U{Orn+UG4l-Q)-+l-g)K&97g zFgs@}F=KKH!Nn&dxX#4IsX=eb6;-HQ98XF8aXqaK55qO_E39h6lw#}oq>8$1{Q#<> z(lT$YwUD+Cd?lr>3fJJ@MetIa>2uELJY&&oT9*>H##QQ?^~5Fa4DV~04JuQG;DA;F zNF24CPE_eV&VrOXvQE*Th~O1Aif#+OW+j{=?K*R$oBMyT$QgI`W~eTS7xC9b)hj1e zLXK^Nyp}3?hWfwh_80}q9tx4W_+OpMensL9AclU}q>Ods;;2N3_vGK0qh8Lsy_#EB z!akG>dfl{P?akuFc;T+w@4wleZOYKQEW*f%-srJ-A^rcvmifZ2ETM`ucshbR45IC> zRBhjxLPOhRw_Z(S`3kh-!#1&;&PLJcr(J&fFAdeEhK|zWvW);tFfBa8m$KpZ zN;63F|L@IxRX!Z&qR2b~AZdh=lFvC~0`N;_3@#s4bW0Nkmp_&`YrHT@51m);`X%54 z{A)ApMua4Iprw#BtB`9U!~kEm$}IvQV8k)9#U%zYXeP#fu+OK!!k&S-u^EZe2itMnI&145ZVGA5vg5LbiE@_>>z` zK!!=pw({lyH>7|Jsgs>OQeZ=E%LXa1BIMhdkwKIj0Bn!~LPRG(d!)dI8fVJpCf9}} zw++B6ZkbvD8>GNu#+FEd1*LYn8Ni4XSP??@Xked&2e&?pOu`B&ARp%15MfII4jJbI zJ|MFz!3S1I0eM<%NsPrDdC-g$kWXsdTGAVTBY>wuC;#T5^FnH28@XQqak| zdQL$cQoz}@X06m^PfHgMu7MQ51}VT)I2W~^bs$I~XT%68SRn+m`kxB*Yi3Mt6B5IoRQNQM8o&r*jrz#m&61)a8l6c{JJ(ze42xcJ0( z{``Ib`vrHTXbdQ$HyaH+Y;5c2*yAXY#I((>Ym6_6q1yadDw$Ur(?xCX28t^%78 zFjl~&dP7Ade=EStY|!u&WU$&*V8t1EO~BYv*-*2>3akkEVg+OnFRXwN(HWmTR$xPo zGi7s|YeUi-D5UhaA)CwygAFhlQ zkO#wr0h6CtVFky|Csu$5_r(e@70yLVUgF%{r;SsHml^ORt@ahuliFjipOm)b!D zG;~?*D>&wW6%e_Yu>!^h=(K!61FLrsj#a`6s1S-t+Ug`Vq`TBFk=>2YW_&~M96DaG z;)#Q6^dy4o;8@%-oJ1fQNrM`sM^v(XoBnMCbe!TFg7L^e2mTzE_wv8{@BV@%eh`+c z0z{~ZD@!B+of3C4n69`UOoS5xo4xZ}29bO-g0~vrKQ;oIJw+D+dJ=kdAs{stXYdxD z5^VTdNTE{1p(c|LC|+)#g1>n69e4zP*nrzZg#nC01hh2tibFtZVeQr=*sp3|fR|)p zuI-Uv0TG*rP55af>vsIDN$kepvkf>_&3uGrNV8lfkx>%NR#W*1-AYl^`3p+E(rU7q zFGkR*CQYGhswT~l44YW{od{lI$(sHk1rD=QcWzPZwX`fg2A%jwm9kiM>_UXkTuj&f zG_O^2cAJvoJX?Z7_|8=H%9enU2$4f0wDizrTMNw8BzSWbsU&{(pmkthiOStqCpdJ@ z-;pZzEjVr!!D-%uhzghX6ajqGK7Unxs{%LOiLIgK?QkkF9@%hvwIWN6;;R)XTVHFb zzIE}mM%$p9AIMPIOgw>*!Y1H1gcEF$6b7UAR=+puPT#tdotWI8OvaP;as1OQgVU$` zlm2LJ>+P@O3oX@oZ0`{cX>^(he|N+|a^k?!`eJ&jK03IXNq5cGq0$lj zi?Fboo=N@g2ySKV*KIPB-so|1R&h+`vUhzCn#-=$NSKUDF6fwKkEvuAU`&Zvj;Gg~ z@aOR=IV%}xVO$4bX^d_@$!|l^V_4{XQ|_t&<#g;!``I3@mPNY-D=G}~9um@th1$Cj0Zvm0k}hpkO3@>^*T zF1)K6IE)T~x6{z^ovrlBj;+M*<4B=N1#zL`c<o`jYs`y##uW{sCDz_ zUdH?vKknud4`p6vTQhz?g4N1~NwpU*+&UNz6L0-{QNXvH(KvY-z+kl5f3hg9R8INR z4Y#byw;x`peZkuU_?n1>(dGv_n83;-dl{m?DRjgj9H! zlTNI0^xM!1<_vA`y`XWxU$eEaoSS%0{!PNIYuY%YC1Oa$X4F&q%8#Aqp9O&IM{OM> zkvg+fa7EM(PHYD;hj+er0$)j66=9`&g*(JBVFdT-WJ zUPp49{mf5c-6mC^5>jQU#8a6ed`;MxrJSLFE83-^D(f87ysO7WbDaCJ zv?QI{Eq)>eC)^EX7NRxlYZLxjxyn2<>1__sYUY#+*dBrPE$}kkDCv0JbJOmTyoi-* z!*=22{SmxGH(0u`l7~1m>xtdG3pv{duwRJyw{oyL3(=Q(knG;d%&wb0)Gj%cREoBX z#P64aK<={>qHr|_G3qnnUiJtlNmcft`i6hpfD>g+`N|2kL1Z1QnxtxyLt@kbm554Y z@ZJVI98yxVoo%$s@62SpgEn~dxAVPD$#DtL>Z^|Az8Z*jSKwEgveh8>*A-w&tcmmJ8aipot2>&n&X?Az@&%{mi ze^n5dV#bO^2>3w+$HTcs9xYIcp!iTgou4o!bX)(2`tcJA<&Q!qg;lTcv z#bg|Pa7GEOraG!UMR1~0YH`^Hexd;nhSRIq0^`@Xl6(F8Y}z%D?0zCVO$2PbzX_*8 zi=*5qGi`Fvst;S~LjfftDzq@8P1+$p2yco{hUt(+Eh~$mAOI8{P1QJXfJV^dkcXCJ zKDs(hb@ebj7{93=U1`9KUyR@ZZ?DQ0{V<%7#{lvME?$at-{Q!sofvq4dolC=nyhOub}*&0WhMX}~=pjqt=^*hiN=qW@3)Lj%JuF&h8Oz!1eQ zV(MU8Xc6!PD)nXqv_gSiJU!Bc4@7W(5UHiGPFw1@%rdw3wRgp4%k&N3^j~^*ei)gt zQ!Si0xbY-)308Ada}+n0inq9hTJYl`Fz3|hG65b>&qauLXJ71JlP-cym7V`c_1p=L zPQWRhIpf*k4JrVRL?@cbw(PWp46V0fe>#Nf(Od2k)!S|HE}daeFj2)2g5(~aj|*mn zf=)}?J=eEFLC3|e?C%mhK~y(-tx&u(1mZ-egucOVRRoSFmK%wSBROcUw)WxQ3MvQ` zWbnF#KZ?W(H^7<%B`5U*JFG}`OwpkTJ{h8npmWSCo}!EhC{CoJ0NI326=xG_cYLvL zZG~vT&>tqFb4tHKZ&si$mT$9^@<Y=I)6EN}Kh7Ld{ea0OY09&0G{EOe#d)Nfp>{7?77vLc|2afxL7G z(gOzoc}1EC*^WS1Aq! zgJK(-KeR)~qp>Cg>PxAXl&1&+g;HxlgXw%Im+9eskN87J;whh!4wHLLXw;uQF`l4< z1UJUp{Wf>baegXlA&DxT<%gpHo6Jhp3BwICyBsXh9zt4W&VQ>3TXktbn%`ATXuWn2 zpr_C)hFDC~rBj7>H^6^IPBB4zb|!g+dc%Jl>c^_+$i9-O-uESY@o(1}e6ax!)n$-d zq;vNdf>zjx|7wm|$h5$-?z{d~jiROZy;O{}vg4GKxfrRf*obFG@cBl4*$SCv+Oi?F zCv}>CeZ;v7UuuOkDLhqMYRQzA%r}3kVJU4>Tv+kfLu`dtgulG9D>~)cY&iwC zYSSx*xonM?idI-F`yVxi3kKn-O$yk$$8{xRX?2LuAKiZNLexrV2)@+-|80nR`)0AA z+Qg5MA^-jP3RVm_k*)E=|BC9X*XR!P`W0GOmNn;SAM;~%IfvM?=K$KnwH2=ER`)m8 zMm^69@Mlffu2*E*os_jXQ_f#9s7->RK@|2np*!~;gVXV5n4Noyppo-w56Au2CQRn7 za1NZStpG_K=6tAoMAf|sr{Xu)s&2%iy{dn%LMd-X;4tcy!*REvjbZUV=7(hNQKay< zD`DSeZ`SLc9#6J<=uB@Ens2}VX5DR8@QDVz+13keMI#1;${E6TAXe;etvSAxqc?n~ zX`f|nSyBO445&YM150|=vaQBuaf(D?!xwbU_-qs2QjfQk&C4xU*eZZ^oq4L)Cr3E& zrj31HXvpq=O1FRFQglov>n!@x|;lb&uKg)Jv4IOT<5A8`E zO}EF>ez%uh>BIdhy(X~OmkYnyu-|p9VZ1C?-Z%0XwYplRk;AkIw#MFcZk z)LNFNa9?6}=_34m1uV=`h>E-{yKF3g6C`g6ku_vS0H0~X8CL(g4bzomQ;xz+-j&=H z7eY+-t!*V%WV!V*TA)7)562HyD$p$l$NI7@&DY@5O&H8cm0Uu#etV%3-`$l8NQEV3 z(Q7sDN38MhN4$lq`_1rZ{BV8hM!S;!euejwc*faqjg#Do-srKcuUaDCS%>?1g#~K^ zr)dHHyu!hQatU#j08_y6{ipStKJr0Qk@ISxETEGOEB2Rb@b{csZ(yiW zsYW^mBiZDQo`lQNO!x0euGO*0B+}WcDncf41-FRA9_*DHnO3KTy=~9{_5j=;-{%p) zi0I1tu=Zs5cvE(fSa8L0Y0COwNeel%c0DHr4yXdV@JRen00nZE!*HZ&^wt2KQR3+X zvGG@B|ym)SXDA^nF-XQ)v7wG5uamo&B7zDWatLh;C-IC4~Ja7BZwvTUCz)?8#&raA?J-c)WWufonxB^6SZjF_=ovvnP{D z)4L}VW~;K$P(7K92hW~N$^ny+XjluKXk@(1uP2kTZAeL8J((~dYuA1~nLt={O6vWs z&^?(jFvC%;J=xKAIk$)D$pnT}&TQ_6VNCxQ1 zc(N+536}!P{fTE!CS%|N%YbE`y?Zhl*M)syk#kv67Y)zQ!!x+jw}Q+rQ_L#}&I zmWOPwo{VQ*?99l6d9`Bw^;98N3{MN~YF_=ovvnP{D)4L}V zW~;K$Pzg!KgJ(}B<$%dZG^~XN|75&aqbuG$nUrlqO7iN-gaKK*_Up+6!lF~^`Xd?{ zg0GvW8aD31eKt zHti3Qvy??Sk}(M=upJ~1dI?0U$->5Z^u?q5;lB8|?~6y0SYn@hKIr$tA8%T!Fv4w` zuO&Lvs<;FWbjLfh?VVY7n%=xNjhA3?bpgH4yjCdn3{~nQ@KF4Kw^BJ}@b=)9~XBcoWM_)m+Y+nj*3JWgvC{{JkHr zDAg?dv!?4YWVobZXQ@(MsGP=*l}qFSubjpb6m@8uW}4KbTA_0vJ3ah8{1dcexO}qvW$1cOhc8bC>+nDt95ocz2-z+gFQ~ zd_&@;8h0Txw{w^L)4|+A<#+I9>D9}u$MZP=muqT(V=wy~1Basb73s(@ zbZQ`CTJjJ|DNkNb@+7QM3BflTa0|;^Wp>+wqxuO?c10{ZJ%jAa)>XKKhbZNKZlO*g zE5r99SZCR(E^Q$m_(|JTOok91{BZ=5Tew*%7?j#wDXaAY3mShG!RuI#D(e+>KWvSQ zlJIg$`Qd}SU)8YaGf zKLW`;O;j>B+G{0ggzBkN)Q!-Zy0S%u*&gi?KRDXA}VBp<%qaLIO*MKM75h${(eLE6CJEKzxxq0uLNBZ4&# zBND@bkA#a*;LN{{(=RwR9@Lg}HH7ngLxh1pacVahzMc(%;7^^J42DTD6gZT<3^kV~ zgL%AMjb13>;G0hE1@cGH3{$Jqh^;gjn; z6ew@YOBm%v03?H5@RLo4Hj#>0Rp~9%_i76U-!$95C-Y1YVe-V5OJPF9R5i zHv3O@^R6_?DoD#;>pipx4)25OZ_;rk7^-J5AKo%xF~slz`i1FAKrv>!wc=y)j% z2uaOf_NIgsChft~E>I2Pp7x>Tlt#J}hc&#Ma4Q5@^EDT+hUJV4A|A|?LV$?9^65$g ztVxP!_=XEy!?G!cjp{H0OU(hUVIMDBQ(@;GDv(#v21$$S!QXZPY&hnGAmJzJAcE0| zAO$e0uLZOGqVG%f?gbJYzV6sC)37J30f%(ymWQE>u?D{F*d{ZKiUGo@VPojIwaE;D zvQ;T+3XOGZQDAokq-~;8gUqrggb81HYfwt&1s)Hc-LPGH+M9gcH)LZ>hC0vKE}4b` z|HO?K36NaZqG(~Auw60>$+lJxY?n|~@Rm2l4od(A!lIeDV7sOva6@eB9KEn#GKE$N z`*p;23BgPsTqP^nxaiX1k^xHFgp*y-pHL^*mcNsA*ONm^knMqb;R zthI~~gB!NXGwGkYIC}Lq!DJ{$Y?nNq*_*7^6=M?#?OyKoBC?(7iuD+6oKCkhIW14R zVY_CYy|G;~gd#uj&9WjfI%2zIXr)Hov0Wo+(O6utUGpFTuw61pIaQgw72L30GGMNi zXInCJ!*&Ix#U9K7Ulb_1V7medNlGiHl>(;l3VTjj4&1O^kxe0~`O7|#Obj{R5!>z5 zh3#U(=MmdwIOBqdOY566BjPQn8qVKuB`0KXeVhT15LTWXv0a8uF>E*>jtn&yY?pal z5-w;3?4bgAWH3lN47STMCj<#UNf!aaLogZIjO~(XVDPE5?8RfdE|oADz^g5G+qX-GR`BMI?HUM|T4INNyQU!mG{j^G zhlbc~-!2(2(+8KTxMI`L9!NG5IISm;*_85=6bN?2cF7>v1pTpHj-E@>;s{tLEen+h zm1z;+eS(G~w#zf=pSn1DCbTFJ=WhFUxtYppT`@M1+Sf-*!BUtTwrl3u8`~vAcqK8n zeY<4nEP}{1>W=LiNsGqfg6*0IIlrmQ4cisC;*wdQg-SAnU4G)17Yy;*ao;YP5ivy} zojKr(0!0^WS0EutX`!)QGNNMhj!E`z*e)5A$X-Zl{<05~woJ*<5!*dl7q*KDpGRz$ z;fxC+=N-JsB_jgGj@T~4rU>0QAdU<*7i^b#ToNwMKWu~N2X6azS>}Ww;V0=LK)B$f zcv@P>#%_bDk{vy&L71I@&YU?l2n=r6F2|b?9)$BrFOYcQi0yKWiUGm_Yvky; zwaFJcwu@PG#CAFPhzGRz%DeVJ;kIx0nA@2Q3^b)ZH^B*mB6Biy(2i#^5b79wssY<2 z19-K?ZrCmvTEUw;wre0BLZ0vexM{Ji2 zf=y6ph8=<^N6#f`aRellZRkdrjOW-0yY1U0gJP)*6|S$;i`3QoA z$fl6g{AC}=!3=Kfi0vM;!FIVKGBkXHXP^Or!R<^2hBGdRoOi4yXE+eH%djazHx7s+ zL(K)-WgeG=i}R1|;`upZyDW1;knodq5g=T!T|BKCuw8~{YLbp=sBomcHlbY;MdxDF%Or5Z} z2;SL%JA!CJh&MqrQ3M@-@fhqpeQ!W=cP=X${=9FM_2}vqe=fL}={+OR|E_qBpqgVY z2Kh5NmnfDV30~>i4mJB?ZByy)A~o5hHhT=|SMtuv;BAsn2=8fx$qC2-g;*3wA0ee? zkWYH8*$FoGn2!48tcX^*UMmtv<@+QJmfFBEN)CXh(N z8oWJ7WG`1PL39vKRvM|mKz2iY_BQpf-5%|rc<`?L~ESdzjhLXNhGJrGdMHpZ4UaQ*|g36 zO{U5_d$T)thTZgm_H1W+*l%x)N3+Rz*xl@J_t6DTqm6Ujm2!OBRYVYnWrkcAtr+&F zuL_U)(;2#${cPO0dY5Lu6uvI~gS+7kELT`}nanNC7iSTf@p8(Pi7S0H@wv3p#qOD` zr!vWvXP?II%c1zD@a=r6UYNG0#wAk6adE1qQy?af zyOp*Q%sl>_GGVVke9Sr!hpvdRI;2LetBAk88}2J(F?OpO+$Ja;P<)sjfd`5u zG?H7`k5}4lMkbM(^sOPNJf|#sP?m@dZp^8uDtPR_16NkD=!@e>7N(!s4U!Al9Sj`< zva(ox4J%)bnWq$z0GqWu90fCh+~hfB;BR3SV81ZJ8TfkD6+YjsW-|L$M{KEW|I%Ja z%B*L$y|pupn`ouoY5`ZjTj0c_iZP7K-<*JX|4B}Up-Lh@@yp~+XS#T=i7?H(J;{~k z=Sh$*EK!Ev=r=<|F!&q^Xc}ye1iCJ^042V1BzQuwITBPS;2cS*9c+%I2P)>+Qta9) z*1tUC9DFN+7byaWu!~&sF=mTYq6u}uW~wBz zlU$6-1GH1@Y;Q7cSKpBpJmQJF43djc*+ntki%~h-;2JKTDI!;TLHZAAqcVzAi#R?f zinyEyE9Gv|EZj+2vT)4FU%*kZ{L~UWPtJ0@aM$?m?c~L(N-CGbOZ0z7E!D-Kd@(4G zvgLucu{k2UL4=0GH*s7nm9q%yoq)@wGTZ|&?VauO*U~+YTrGde`o5(-@vimVTjS0C zkbN~`kE+OqKUjc68Oh^_?#WJj)*nsBlkTXu)t{1AFW$94M)=F-`d>8B80q!(``4?W zPV3c>*!7pPags;0iY|HGv__FPmd4NJJ?*-2@s6~rW{S82HB%(sVy1{I6Ej892AV12 z?#@h+SR!vyYN%*G{B?LCYgOomcI##Sl9|6v3N`1!

N-HOWE7=n>?N7*FpEr=Uol-?##_LuX- zMev^l29H@YNJ=YFcgG~dc=4PMq~KZYm}~GS0&}mT%-Pq2B73hYcyW0#8X;Mrgr@SjPk zM$-Ke%B4|-d9QX*O!P;zOuDEQkECExqt^6jd|Yl3_KHOdqY_DrnUomRmohF|Bx5O9 zG->gR5nxN3;7+sK8xHd|i@c@9{3i!+gG;25T+07^MEu2y@I*)W`Ka*oG2!P|2|r&a z{CvIe^9{n!H^O@_hlRmtFw5CF$}O?*D2!w`(R4aEGfG}%`w&_7#gDyG^H_m$=0{Q` z*BU>~Q#XD-uO0~fBPROj^HoP6`*1M7<`GEXCv!6MK-Oz*M_;Tj)L6O1V$s%1GUOWq&_YI$&&-1wF)LkmR|6J@1FSnXRdDSO8@)7|MkrNz0bV- z9Y6J*4?KO-kNo=YEy0gp8Gq>tScN7!5i4GV=*i{iMEdm{`kJ7xZ$n>~UjXQr3+d9$ zYPNE7;PR-ku(-RqcPZWj=-B1r2)}qAG|@gj^w8U*DBcUvR`KoG{M*Z5ACt7vFVS*P z_M1FEr@z?)jbTb92H2f_nEAewe!m~~?9i(UFqz!m06in4qKNmJbQX%hA8U zlaOzwN|~MXPvf!DnV#(pclzlh&<{DfP5-`(gpmtz8MQ&^ql4_c#0>tX<3_3pML!os z@PEUeb^K}rJ6g(pnn5u4i+Eu4g9Z18VIP`AuX}nt+3L;W#p!HwD43jX%{Z(cnB z`|)?^|8Ktiel-5ReEi#xwBv)Y9}U($)!TS{dotdxfBbvl%Js>(x4F@q&e9pA#}wW) zIWtYoWqJgSpMA5EP6YjClKv+9CjHIwl~n#o<4ALL5f(a)=3ck~c4te2$)!Lw*< zTt)|TX9|rM@m7xIO91}`y8^7k?$^I{r#CFQ&2Hp2hhX`K=zyzNK-{^k@S6SmF4+U| zdyz&Tg@s!Or~!;&VRNuGU4{WVHu1@B>T0ZtkyVq)Ax;=edNHt1WG>2a_Zrx98#;~> zosg)SrRdw(MfM&zh`a-?1-kf}PG>Me3pS}x71M!jSi~=vrK9!GC)2N|(kGC=A{*J< z*+`#HjT}|)qv`e-`CgA&|1SOyry1E~rvy_d7q8%WsNw*im&9LvDVQ-`V*m$_vpOkHfHjhKHe?76q!8{RTFO9YU9wTS#!e(+HGJdR(; zAaW-`f+Hu|FE5_R{;Dz;6bJK9V2|P~cMQckBofTxHj1Q!ClBpG$zngdwF>>_3WTT# z+hPvwNe;pu{JCi-UnN8_ovataa4?-+ox0g8u8xjGsWgU=?{0A!4aLH0qrDD|)*tpa zX7MGk7j1gj9Lxse(IH4Rkh)0vHA^KbSGxyxA5WR){1O>x9{!io$ei-WMvE5${k25m zx52^VD4tFFo3{*3pYBi4@vVKOVW>PLbt+U%9N#&md{m4$I4mk#i8O2~L9ET*P9>kUR}KFN!PM!akJ z`Dm^EvD9+1?T+l9kY}|PSFT;@TzjPR{ImF*!;_89;iHGo^sd{y;q>WV|HzH;=fAiI z-Ue5(@XPqrv_Co9OO+n8!Is^16SaQ0w2=T6daDQHeM^5u{)^C}RK&PM&k@Sl$0^{f)zxY`4W*1Hcb!F!p9mRtays@Z<>UY5FXKcG6>Hcqf|+y_(U@wvvAr@fQin>?)l; z=Zu_VXsD<5#x_2v7iu}PRPtGZqr{7=>1$$Z@VYWw*VSGW5eHW#mf8Qu8BFL2=H3|rJ-jjA2qWf4X)q$tCS z5?kgSX7Y5z->@wSXlP9`Nc*0)?pgcpvQLi8feqFA1|EfKQ$uIP8PmdZfn4$Om*iTw zr$6$@Q!O8+As8>ZHXcp$wO-R?b)yC(KGA?T8l2fmFWOd~yEWd+rg^5+^Rk5yqxzLm$RNP)i?%5ZhFFNloFL@cBRjq>X)eQw}3auDSOTrNQLkHydzs z0Dr~}ei$=CEq)k6!R*l_#AhNfZc8UAkn1gkP8>%eE>9VTmNgw-_98YO@bLziE`KZn zzR+t}3nd9*gKnGfwJ?E?DATlW0!a$V35|%gXfLE`Ka>*0|QEJcLpi z8-RaphTVve1P?U9Zuw({|9SAGF)R48Rc;Xg0V9r)wx_y-R5Ek`v6#1xscZ^PJ-~p} zG6awT{=f{@pqZ2q0*ert!y_i(m9>C^OHa4}{*G0=VQ>Nylu-zwn4~obV}lgX{pT!t zTYaJ53n?ITVB@+!Qb4G-HJmX>L3xqf83Z6Bv;3&LYa671P}K5LAO&3hQZ!a_)WZ!< zAkzaIq=0-_$BQ3QU^4<;_RseE#*HJo1+c(K0hwQQq<}oBlbtPVj7WJA_W$d+UaHhBT`^R zD40E(`Y0QufXrCkh8RYXwIVK^ESU)EJ(#_o>AI#2x3EGA2uZOeF&1;=K{HZ7KB=)m zZGYk=DS3xt(3cfbK-m)d6l=*9MWYvoCSGiig8bD>wix3!32-%wra5Y9b`!X?WBC~{ zHb?>fsM0Um!O}dif=;H081E@CVg*zPHSH>(?_skpJ!h%j`h$Sgm8SOKA^<)y$1q_~G03_wP<jW zdGMt(TN|w4XuikZ9xK2ts(D`l2{Y|fIt8yDz<>mBu-aE})L~x%k&E%|3Uccq&*eO@ zf}@qN0xE<;6+!nE(DTSxm)2ng4F0R_=g|!>`LRq_tjA0AqyeuD2xPKWyY|V4HT6SH zSOFQCkL-%|{cyb0f)!Nb#Re-N6m@#x8m!7#fz1f#6DuI|tBw_rCv~#3#|mtySz!fM zgnY3AGKd#eK#1rBXpa@xP~%M5j1}0BOtAt(`CeoTmxhr{!fIau`LIT$mRNxWB|EIZ zicsBH0hxpqRzNmJhQuc zLhLKZ*)oC(V1pIpDyfPU;K$5ZK~BD;aRuI{(T>)4UAk~QH7l$j=R)v6BUXSP)0zZ* zf!~7z8?4}1Wvl?VsAjBygc*z#a1cSjGYV|>6&!QG3W!|HSOH@L;Z7>Bc?aQGC9Hr7 zp$PldSOGGmyVNg{-Hoq+d_(VCe?s0nVf|VM7^rc82pwC{^1r2!iNRV#0?=`aZwSVt zcALSw|L!k56_zvgUW2e)=|BX;)obU)6*?vEWH4QEJ(vh51U7rFspsnteMuO#q(>Zh z`v?5TMnJQt=t4kGLa#0aq{iY5-ojIY4POf>REjv%Wby&UYsMa!n^wKW4}aKz+e3u` zj6wvoH1vu?Kx$#_)(~sptJ)XfCHbCfd&)>afV@y@Y9zZ8qDm1JKHGp})yzj|hBSu+ zNMw`*vsFcpWYDU%Px5z0bR}Y^nly#3shTv4D%N}I1_UF5--+NgmaJ7iNFmD4Q1rcv zoT+nlSVIKpePYdP)tud?%y6D9>`oB;P2Hns7oUp=hcCxsL`V-^wza@aO@cR9kxJrc z4+;l12vB(Oq3{4tNv7xg9jRj9g5y>ZoCfn?N&{bpzGCk71|> z0*TUWeXXVXN@p?H2HpH6i^^tF7^&axi3ol}IKdWQ9J z8&Q&q0!z4yCGZUo1#6Z#N(AqW_#?TMJoKjM+6;6l7r7@Zz3QX=M1OGRu^By(x4h?n z*RHp}jxT{!+X~(z9Mb4C5&rIogJeg?()uFW(UHDv%H*VG>rm;A4|IvqD*70_JAzwT z`*oWPLYW>XcO}GRE_>Japt9rPr{wYtK_U?vxRXT zV6OW?SDtACXd^w}l)EZGIiA$M^6BdZ+2FOR667OrpSwz@Gjw6hMt8c=8}=sMjmP>M zk9Vg#8|cCybR$r9bJu5ukf&ty#Q4_XxHoH)Yc!IRk}FUpK}0Gb&AeEtDA72{X&tG{ zrmKD2qMeePJITEUj#kBoJ>@V|XoVG8hd2Z{wyZRt-Pe~pY;9VR!^;R+-j%`2T!2zn zUkyNqz}smkz4a=+`YN&eI8ta*L0pAC6n>YYgz1%9=qQwCy2xMLq=jz7OfNOyP6ksN zz-VWyi>_*$qMI4p+3kq!b9HNqQAIWX*|jU}&+Htc#8-?jf2`^k{PJcMF&Z@psE>ZN zl>@QNWQ*n9;o$Lp-FlK$9cZD(Mt$%{5v)~aZ9M8vGtSysLam!W_cG?c_;ELvcqr4J zZO!<7bSI#@_Tub{)Wlo=UKBDJ7cO`ickgN%G8JJJe!2nISLQvx8Zy5k(y|g{y*dlM ziBB|0JTz9wNhOuHH=tixDzvBR=KV}tT5FL%BGd?7kD1@@TUYVs?)3jBg3Y=`z!c=Q z6-v$>)4yocCxC$cRBeLORE>oL2|XPAWds{_Sxvc%tBv=v(O7nes7XM#Mh^!Y=ep>g z`Dxp#g0c)8nkEf5k9COyezpns2H>hx@OS|Ub#a@_>>6n{pZRN0=~7x);VY zj%(f+RdoqD!!z!mLM5ylcL3a)xwG&|Wpw_Qrx9Gs2!+}rOBZyB7mTqMi#E&s-jL5k-2 zelFs#(NYqe-x>@P4=S&feaSX7XE;(!z-zlmOVX*`;wMsY!rf42AzB)&Rf>a6_-o}V z^US2TIY6tKQ!ZdjA||_#rHQrh7I>L%lytoAxoP)EUc@R^{@Kx#nuPa9@Dkl%w+@EG zJj9tDO(M4pDh;2#&|TFjEAXE`Xh9~*~Yne z=??UDHkh50A$jiO<Z>&ld&VAX04>#d- zcmpnL3pszGuUdjjQnSWrIe&&~<`H-(exN4JbXQb10zVbO-QGGwl^0I2?3NlfyJ|`w zNznv96Y+P8J7s=;QWXD_jAuDhc%}i}aI#t_o3U@pi^-cNXQuI$(qAW`b(C-yUI&pc z2{+AFr?fTuCGg_aOX7V?e?|U_gdYJVzkpN9+q|ORlWeMR4|l38Qms>@F+f#0{dNQH z3}yT|V87LG_Wdk1y8XW1F2=mDkHORN4s_+Vtp?nguj40M313C**MA8fA+0tTab%n}A zDKy>EG<-O)|79^5M<1L~LaV99YAjRrFI2IQr)%-s_Sls4-Dg_@}<0LV+Ho4KeOXen%yFYJSWymS&G zCKwLnr9+S&H~`2i(nKKd$3lR-l&%Uq3<~nn(THRM0Ed*OsL;ZUY5{ZUxH&J@4NMr+ zmkzH|90~@-Ha34~hmJ>MO$gMNQdKLj7SxxHOSw@k2>^QO@Dcx}9H04l%i#3s{se6a z?MpgL?roP*fA++9f({bg7;pF6+&Rbjsi-<6p)h>`et-{00XCVHsuPAAWOlo8howD) zRG62J8TeaG*s4nd()_M!LhH4I06m3XF~lm|8q{|;z<)(fF+qHGCV7Q=!+#v=->T@y zE~+TEcFAAPSc5M%;GwzqoSaKRuvwMhY6_qeWPEUgX^`lH*wXJP#Ctp@mSL)6FJDC~7YckVp~r{m2qJNFbpBj?jT zI{eorOy;d{4xFp407)I@e5iXw)x8O);y2f-Zp5R#s(-FRDQ`yLFzS`Vakrt3VevlZ zhh*+ir0_Sz?M3R-e z)zX{c(fHx|w1oC-8LsVyzhB|~B%X0}L>5XR$J*(QwK&P8-(`K(68SE%4fpd33-*bU zfSB;-6%HPhONgrkm;#RPKds;Nkq=T<16mH>x7nNZx~IpJt==qNME%_D_us53t%^tj zf6uw~CU(JbEjmNqTHf!NYNTT@l1<*|Nw_S{bpM{@S{<8ABAuZ^|wb3$8dWO<5l-X(4CUuIp8x8gMEiLu){l z+l5EshXN>QS%4b;? zWR<4wXjUa8^AI^Nr>rL{0u$G-*|d0T@SX}sm6%1R$b-J=s;r#~2fLbQos(aIk5<^j zZYEoQ3Y5sjeha%T1700l_*7ZuIX#Ni`Y0<~uFEYqmjESKU{%SuWhPLIR;%hrz2EXV z9@i{9;!1|bYz^M$nfq|ab#Kh_kZrx~)Xcid-HY&S3wU_IOCeuK{YxU;L zyC)O4R^%eTo=g~nspdR;GMO~Jdop3RDhmzOlgW7S?8&4YFd2!4wa|%1#>@PAGAY}J zl;qWu2?Mfr?bnkDghi(;0!HY*OBk5pNQV%4_hd)g<=h^oCleS_IkUMZJKAP@GU`F4 z=D};(&ATU)39?F0)(Oy)S&F_#vio}$_A&N;>dAQ4RqkGher`QkXC6J7Oale4=bOXQuX^42N9zo-7a9UOgGly2{;)h&{KSY-JujnM?x(ujipB6S!97BENMqVGO34 z^X$oF()8}hgxRVrG*m*8@!;8$NjaEDMw0PjpOJifGAY}Jl;qWu2?Mfr?bnkDghi(; z0!HY*OBk5pNY@8?_hc)z^<)A=DrYwLWGi#&$v)<}cTpxtrYC!;VTWdbj?7Zvg$Z96 z9T_h;(cyx0Wc(08Ix-o_038`mR^>C{5@5NF@ht0Q49qA0m07XRf4&`=jB7*s^Xka> z@d9;ZJWc)7sRjzJr!&E=;od`~_ z*~b3$*X%VI%U;a-<^em|)9E|BP6mIwVSfdmY&vUS!Mf zE5WZe?GKT<np`F$pNJ9V8EW2}G;O>b}(K(YKK9hx_8=zHcE(f)0Gp?}b0!v{Ye) z+caNGbf{HvbaUJps*gwDq4)uBeQ@?-+jD)&GxL-y6}mIg`nW)6*X6H7myrp)E3sLA zRVsq_`@MD`Xnb+;jZaPcle4|qU_7$F0Y)Yk&Qw%>#`9`=^{a_a_mJ&8q{$F| zya8`wxv84VSyNLaHopwS4uHS+0~V#4g@4v`J%$XIH0&%@stc9V*s*enJm8hnSc0Ms zsig$ZRP3nud)xHcgn#1q{vKxoDs^!g&FA%9ZnT{1m1G_&3;eExlo3rEl|>);Pz2YM zBh0D(tha&=9etudX%9w&*`PPsEH40@-A{ga(EB2ORdP5PVNQE#gsH_%*t zXL%{?O>5%bVAP*v1kuIKXpC({eRY=|4=Av6Jthw9tk`dwP6ua3@gn}3=sD)IdYKrf z!&D?Y=uj1;oyP~GGu-K1-m{L}Tj%a^5BFS8!cX9QfXq(jmd5q;0=is%SL;TaiP~^DzQ&M=*m3y)u0Gic=g)6N@EVpM9DCW{ z7&zp-PvTe}jFj@!-vjMlT%vEN$EjX&5@MKrSvePrjzHD7u_w`VP zJcgL0{k;g*S$3*RTZjjK(l!;7MLcnr6#Q`nl3Tc0DHxR6T`8-T1dS@T-l{Faz@J6% zI+ml#dPUujp~iLBK&ytZ@x2Dz>dCMeID7>tySkR0qJx!PSi$3eMxLP>Ig$`Qd}SUY zK$wCM{C)(Id&o_yP2115zT$X3>3J_(KY)7Pg$%vJ)yDO-+Bgi?#ILZbHp&gJ=3q4V zN(8TFd9Vu^lq)V=TuRD-#Z}A%7Zx?MNDW+Urkzo#nW?WYIy>>WaTANdBU<#tWX65y0)`A0x2*lq%B{m z7f$CTB7-mhp@EX4jVpx>B!?29enF?OKZ;~qJN>AY#poB-*$$QC0j^;mFI!XL2OeUS*S|WOL~0ak z-*3ABHXL(8knodq5W#3fkOCOh*MeDo(f6f#mjs7ee4bCbI^MqS*f7(uC#(Tqu;Jm` zj%_l-s2Cue8a9TWTbs-fC|i}HrqEb`ww`d0G-;dY)F89$31Pxl-n9n`H*A-l_9kEV zRW-(pr)lw5egjDu5SUEa5!)q?EAUU;c#!}Z5Zfi=^5Idn$<__qC1WUf%bQ|{B>)3q z(M(*hUDFV?G{j_3&5jBmY?qMF^r5a~ExImo%YjR?MP^g3X9HrpWDsnE!cIqrAj;8m zNm?8MNzy9Um_?}elCr$qHR#$A+vS<`PhA{6lXHZTmm{{z%~V$Fim{1=_6s6~xna9z zp6kMP$;fs|%njQmV`LFTrcrln*GO7478h*SJjnS?Wp3E6z}2%YnYm%R0@Gp-=728> z6kV`gfy9NM(%i6Jkxe0~`O7{`+A<|aaKm;xHrQ@p_y&*3hjvZ#?uhL&oN+-U5M-M( zQCc1nAzOPk-LPGTO)+dZAdU<*7i^b#ToNwig{nsL!C@ObKXAi#S>}Ww;V0=LK)66r zcv_mxX={$yF2k?1I@*oPi}M^D+^}7aH!eId<$9zOs%#^ahx&|h1l+J)j!`i{IJL

55)w#zan1PMP$7XiS-5Ow^z?b~JemDPTvz734+=1R4NSiue3<#^-5gYCef zu-%+rV;phBb~#4H0O5c&a`fEVm0oG64cjH-@Ajyg8&ZiHN>46)zOCDEp=Z@_fNsGqfg6*0IIlrmQ4cisC;*uF>Uc%?sNl%(>f6BBE*_PwY_;2G9kE@GH!eI_ z$2!TbDR+}BR~KSba>RByM#TW()UYwotXrGR5Re23T2pukGen?q+_%fiTL=@r^42+d zh#-=_&~UQKiDKMsaN0w6Nwna)VSEKp`oenD!iVy&(L;8xgJW^WaLBG?cs_9GdxQx7 zqT;z5k^!@_k&7Uu0z(41MV00$D>S?%1@CIYj|Ne;0KQc^$wVd5q$WGmmK5GHb?Rjz zcxMCd2%-rg-UQJ^5m@}iWAaC|wj{YbmlX|v-nYtnboC0C;0+VvkAtZpT*3dYc#fc& zV=e|siW7hBg%51$k>HiC?NGBX);5*yE>e?CYO}`+G7`O~5hf=f2NYsaAbrd;8lh{= zPO!1Zbkr|rMYPH-*SMq>l5|d_4)VPVl?!re#3o9wHLG-8ot#H5_LRbe(8>t4k7jnV zy2n#8@B^P*TPRhic>>5Vsm0a`fAJKYqO>t+Pdp71rMue#xRelE0!UxeT1CNR2w(N- zuc+}^S2sH4)E9ixJq+%ZC&T5lD5|`}r*r}DaFa-$-;hgL3K_Yv( zatWe?U~K9F6%1roZf9>(5Bt5z2;CgtouFIZNdZzmEfSvoiT(@u9-X5MyWgPFN-zG1&dpN1SM!{~`OsZp-?Bm^)WSN~O zE~PU|LCimaE(%Xmy6Q9RP3g%?%XG>Xo@k9z``1olFp1<;c?M@Dz0E;?G@G{BzsXd2 zXK!}r&aj(4(4OsV5Bu$n@n|*~54)TF?LNBwX|!>!yHaircNG!D0U6JA(TZVzT9S?W z(;2#${cPO0dY5KPBEByDgS+7kELT`}nanNC7iSTf@p8(Pi7S0H@wv3p#qOD`r!vWv zXP?IJS?AH0#P?^=LYuh#W%x<{H%VJRxKvpwkR(mx|Tw=BKafhf$ zpUoB)5{K<4#oSS*cuoc0O;+wW3|U(+lB;Ftm06n}CtlNrYnRQeM2GO}zO@75TOEE) zwL|`}Lky+EA{%t`g6C!s?)#arZ5Wo>uqI+^?bo5)$h99J5ErLvIt60#xQhvrXPpBv zN8n;0?zgjMrrWU%#HDwEnHtz7s1kittb*}@`u>5K9O))AWMd%454!|nGOS}DW*&b| znXp$N?zC+KvDbDcEPJWBRtx3<0&#H!hRlAA6)}0-B@mNmodYpP;5lW*UKR1t^A(7R zKBSgyD76Dr#N_>JnIRhkF@D%35R+jY12OaXbIOFh0`XDnKzzO{VywOZfw*{IMP|Ro zKujKY3B=@C=RnL6curZdS0FxS8;EH&;Zpt*;k!g#NrM<55R=W)GD9{7V*Id6ASS~) z24d#%=adP11>$4Rnm~-z2VdU}_m#02yHyQt6O;}pKFp551H}>=$t~>1EA2KTlSoba z)(}*lQ{usIS`C*T}OsU2*Nqz5YI*i!8nqnX?{NPR1U7byaW zu!~&sF=mTYq6u}9#2+lmDzv8hq2cgN92ZOFEP{F` z;Bu)9_W(?LXZ!ro#m|G?XM4k)etgOLzNJ0!uJzqpQ%)AU8^hjo z8ea|v<4dzsm$DDy17ua5{?`}a1=+{$MXwW$CyMyJddp7uw|b;?F`RFc zov;_fd1l&iXPUkoL7sZmX6f7DP3tx-?TiOJ7n%&-Tn6FvnEiG1WasW5D zMA^xu{Le?kU#tjEbcCOe3O^qcetwnk^L4_{*9$-2ApCqIy!Ucg7>ow9oSmcG5(|&Q zNOluVr-L)2)@zXqYOZz1DUF65;Gez_EdR-pm7WHS)yEIJx4j0?e)w)5pk;jNHL~L_$rCl}juZ zZM`Hzz99fDWEA<#Jg5<~V&-<2``j36#q4gbM3418W>T;BAyUl6g$%ht!gl54ID(2*%v%HDGtFgJ0xUfcoC=zusA2&h_ zYd$h6!ZbH07a5WG?P*6089x~n`GJ{{iyd)XxZfmTnn#cQyfmATm9asJALa;Z_?lF_9v6^q@68;uO3h6pNr^l16)gw1V@Kgk*UMh>$h#w7*%;r4IjopWHUT!e)lI07Q zleeK0@TM69=<2kV;c;ZKSk#CK(;0vCvn0mZKaex?dfVA>hl@*8^V?=)L$ggqV_|Ih zCka<3V3<#RF0z*s3U} z&T>@Ei%3?oQ4#s9wTkEiwko0zunak>2v-0$Dk8OlrHb;8+o~x4ILlEr6_KQ9qayNI zYZcK4Y*j=bU>R~$5iUS%R76S@OBLlGw^dR8ah9V(Mc<4-6N|WpnkJ-U>d-2AfGDft zQ#p4(Qf(}jNr~Ff5{V{_aFfrIRL64sESKb6Po-yaKDqOb`P!&7EiMaFRW83Ama;Oi zJKl~HIHcNqz5KnqU~f?+e)X>SD|cP266ro_UiKHO#QrclD;t&X7puhK0C`)RdK}mk z@}w@a$#3MdBxGtLb7q*Rp2{0C+x$i!Fa%1QLjGcvh}J@kDxyW7t%~Ra0aSFcO0-cC zE&6O#L>~yCqKj2xe=20czhEviU2_mNIjwaA?b~J&GOjI9g0Jr#LX9wh{$VBAo zfK)_|nU0+p`rbLy33PlMkYPz0&fP>s(Tcosa)>b;2@g^6blgS7(~((~Tdd;fy2dJj z2oaY*WNxvF zEaE%IDuw_Nl|!szskp}~mWoV7Zn27?>RMNEB#4-t9FUDMJ`Tvz2`4IwU%SUDj)aG( zcslN);_1jl^-A11imrTLs}ks~=?%4ilK8qC$E*Ij1fo3NY<3}4%DVT6#q$ZAfp*x%>NvRME3_&Uv n-N^v6l8MdBc`3u0?Ya9KLx+ei3^8x#WoIT4-*18;L9mH}9pex~VjLR_;smh4kaYnY z;^cbS#PO}YYgg^6+V$F1wR;BL5o{DGBy3kx6r%F276 zyEVDw)TB2UUH`hX{n2c#bI*7Fx8dX8h_VmfyZ0M+?3-Ts|7M?C`ON3`!dv00o$+w6 zd9Hi5H<`A_r>6bM+1_k09>x3bANDr;PmYIM{Yh&-?Ae-~+v&%RBT?)7;9z&WJKNcv zb*E4EcKXx!O1OOOP`rQnugQN=+*oe5UJQG(55%&+!Y;(@*uBj#!$#U+BXQVa?^J&{ z<+;_(J93gm1iu@>oh-Yf{_N@T5aC=+uhyK;PiO1-NxeFbK7_{owaU;rS9giH=V|9I2>O$9B;0z+YQlr3G6xB8}9bw zD>mZgX1s{h)mT|>A}d)(|5z5At)!&xWmsCv9)V9p@H)F8bzE%^MzKFnR}!8MY`}69 zFSHi_>5V`AEXzyOdO?YoMQE%ogyN(3OkI53V#mh^8*rb$J|4)7?m=vG@g>}kNhm9B z?S<$BiYHAUJ4SJ7+V5|5;>OjS)kdwCmNb0;U~M^#OX%Nv3t%yQGJOy}+z4UC)7?{X z^Z4$mB!oxwo(V!`FjAR35r5<%e)wb%!ENsj=AyY0k?X4 zZrr?eYbylTNF_nYAp0{pVjc#(?C`S@yxdNBF`Vp82E9|m{>e`3QrNdS9?ej-)Stu+ zlrb(R1q&M~xZ20W%^}#2a`dFX)gAS=QMrVOx~FM)Ujq)?@zuU{Ivt!D#Y^~WVkwTq z-{x9Zz(G_?47Pf+KCTDj#brIG&RFgELxn1-IV1krt7M;kx)0Rz+kwbnAtr z+`4SzAS$=gdT_n9nD!a)?;?1a&Gb2Elz5Kw+)9ppDRFFExUTcnltw=AGYzvrWvUPy z&`JP_qm~OJbQY1^Yc-N+l|2?d8o?`U6x|kl%}O`{nAETX|G^?>+|!$(x+Gr0UlUcY zoKy)p1zVPV^m;1!8EOQlJ7W|vdni^e@V`2h{wihY`%KE%K%R?Agm`cMjXCP&tlO)( z;O|P+lYXFmj?ddNN*2|39&1zL=n`0^qp_?ly?F zyIQqIJ~tI&=Q+eBVEh@yc{oA>k|8{YH{JPOsOhK>%(Wg7vQU|M)C^(Y$dmKXEo zPXD!Vv;o>DD9g-k|M4Ojfl160x@l14?Z5jO=c^|`b~C~h->{F@Fpaml%}SqXDiA` zvm~?*|K8kJ<->67pmOZtR02o|8N%(m)2A6+SbW0Nkmp_&`YrHT@51q^XP}4cf zeDGhJVK*Wq!2>OY6ogd`Q}9Kr+#&!1MjRt+Oshqwf>Yb}lm`Qp_`vU(!5XBa5<*}R zB6Ezugo3aZ(DDQasFdLETE!a%Con-7g%Ic<_LzkYQqY+PQa}coD^frPJ>N(HnFq46 ze4hi|6JU)L5E@!u3Z#H5_Clf_WFB%}ZUGylfY8wK;)fL2jF4?!;R^{vJ;bik6K8Y- z2gv+5pu!3%AW!OKXO9%vP}@d}|HjVDijZ$-Mg~#g4OU12A)*tYJyKvpjWgviqVz@z z$h?Cf1%#MJq?Sm51*LYn8Q=o5M?>5I8>E1+&b1+27&nX}YcyP71u_xiegu1@fRGeh z5@Rt(9yB8bD~?Aud^3OD+(H2HUej3OZR=&nd_o zDZn)6h7{o8)Gn%N>16Kd(kJH}8vzO{q###`uvx&^1mnldNI_1%gbisu(L*Se!2_^C z3UV&0A_e%d!v8$@(wG(ep#@UVX$we!QNyckJFI|k4!fIE+(W+PhQ^06f{+;qKnT-Kduv&>Hw7B%7-c{gI zSdk&*t+VB;0XDk|$Us_N3alVJF9CNI*zm$NSe17b*o<&Ku>vx`>R17JQfCMDSb+^S zE3CkZ5W)&nu$9;9E*abkDKW4@Xa`I)YpmI|$@t?x7-Nk5y735q99%yX0@na!osoe}?gB2XD zj1}M(HK`V#QY95{5oE6n6ztpftUQrys{z>TD>&+a6%e_Yu>!^h=DV-pXeF$G3Zc5( zt+0Y)7Zg@NW)lJ{Aha~~Lz{U6!KGv(L)4BHkb$(k_+bTP2stkSu>vxXju)=Ms*Dxb zjDWEM=7oOZUv66k$dFw7UuoNd)xH8Uj81m;Sb+^SE3CkZkng?%GKdOsu)+!m5uE_- zu>u=voGF{J0vnRvSOFQt)GS-Qfk239L~4l@SWvRV3akhPvqw`WWwWn<%$RFLN*%!k zgB6hJf(=$cK3o|qAP<_c0`f_X4Ql%hujDUpeN0wigB4J=gighX6_5u>QghomL8+K{ zv3UdGm;o!W?z&xiLp-8xKqFR=&xx*rcv!8D0~@U1SY@mLx2R^UfCPwa z-%}B5Jb(cnE5K(I*kA?69Iyf+7c*AC*g(PMJoXhFtArI$ArzCeRYkf%m}*$p)4r4HSH;0mrJDkI)QhmdhkE zN`l#{qM5M}fwHsm)){;*f=)GQ3SCn*X$FiVrjP}{6~SvPS<@e+Uez)SKS~k3Ppo;ZnzP%K6zAC`zZ3us!6((dX(aJSU#8PTmu)RDQ05TQ z1Cslb$#~K}j(@s+aQbwA(jRU1<3puiB^3pha2HDqvl||4k8mhTUU+-NAIYucp*Ka> zX3V;$Cga}LW^X!6kC$#g-5;EJaz+p2EyDr}3vYWZ-(jj6ly?aS8#+ydzdPa(IT&Gi zV<|lmADvUpq`PM8>eBJ~OR%_xJ_hfM;11S)-6k{Xjh>R6Roqmq2)AM~;e8u>(S#S) zq6I+5BzsIHy8vTK#Bw~n$%H?TSIJq)KnuS)2+L!1FG_w_iXOv4=bLg@1t`ap+E+gP z0D^4525yR-$b{X>~3!M(ZwW-_m1z|flmn`Ps!+s@g2i) zZ`LN)Xe1|}sAXQPtt&4Dk?fvHT{hhr%q`j}xw(hjYrviIto(S%s`#*{e2xmuA;clT zv1KLU?8cegVQbTh{8k!-3oldyhtVPMb{aaqyPaOyv7Oj`94R!ZAg&@RC<1%hIX~6! z?{v2Y+tch=XS9jw#9-^0^jZ`^ml|*~P&ys9&(*CdSvKS0 zd!<{Y{h6IZl=zDGWb9S-3x0XCiWrsIh^z&see|oX9EfEmdkMcg96Z&pTTe_*S1Lpr z=g=wse-Ht8os^Z}#-si;LT&Q1-)M^i|0{y6x<$YgEw7-gAvo5PCcX74x&bf^uNJ!f3M!MGmT|H9V z5^7ZlSOyMb^H`TS;3u2#Kme|2$|MDk7m!dFx5><|k!JImzXpq(RK?x?xDi+}^QhI% zlYkkYu2$b89OL7{wg*SIrVfzTj5Rn_5|*a1uCzt`;cqu!!#{@L%P(CkR({`tuM!##1+Wf>P6Qjc4qPfCv!W1ye;dIotxP)`th;*Q z7X645JJ1dRfryU^cwmYgRuYmH@H6Yg8b`kqtzgd3_TJiYK&#(NBo8`r6JG?g_P)7 z$f~QPGOF2UUSF}q-Rn|GEc~_5%1%nC+z(2QLO4}NojrTs#?+tGsoV8pe%knM%#*RUX z=J|d);;+$C68!F1gZYAOXwGmX3SQeqT9Quf7C(`K6YhpG3(=a_&$i%il&j1$lit<< zt!7TSfNdE?CGi*D4Bx98B^|GOZrVMP7qQY6G24Zg_e5}wZm@J=B@c0C))TvV7k*I+ zR4Ns~&qn-PIar;A=*v7vc5h{7*G(U4mmEqeMcYN<_een?_t^c@62Spi}r8yck;bX z$#DtL>Z^|A!5WBmSK*hMveh8>*<={Iba(?UYYRDlp|4tkOENEwouL|g z93G7yu1RCvRi)1UK?L`C>kL(1IK{GCY9wWjWq|}g5%G76JJ~HiDT@DjOzZOv=!TQk z2HA{#XI@O+Iyp0quay2e39X}qyYM=Qd`TE;wmPM)+1J2J*Di_oFaI_9FA{zPl>7ot zDR1+NeowNg!ab_1vPj@g@i!ZAPdIZ!y(~tK!;3ko+bdMj%MPuEUkYqncn=Z660&cS zuu2N<>clUj)&W>Lf#;5P@2X@9|0oJ+cK7zr#7*>nRS=hA#;T^`I}scY=NfsmKq-Ra zLjiS~GS9uzhNG>dSIx^Sx&=tZ{SP(j;J#CEUx#c1%i#S%gCji?VLo}H2Kjc(PJ{AE z$TFf>iwx*2ND)adoM%gmskK$RyrGPc9|-J!Sxm;!2WOPfYO1l?v&||$+JHyG=~Zlj z@oQYk1O9zB?HWjSKM|fL0yf^$gj1o#QErr(HaTe3hpqI{fRYgvs`9-e%5Blk`4M)8V3&02$~%7(2|^AU7e=7dJG6 zkT-DgQfztGzAdcr=@5CNJiWAT!yY+@kdo9VV6)G24NMr6h!JO|Ub4W(Y*(rH3daW< zaDPZ6JTVyd(PfY5{}cbvz_3e<#y>MKM6rvQI+zw(1U!LCz1aY*P@wlp(YvI-7{Nn9 zq?W=uZK>mODpeo%(NOHlOyBTL|D|W=hmjdO)xwE`8&6V~V2fnTIi}|PqbNOx5IzzD zb54ye6X5amT!eT}_Qn1+Y0afLje;-Xt$;3I!b(yRyGa@C4B!Se}>M zRw&*c0&${KLf_!GDgwt7%ZYZAcNN>{81!UxB=EAC^@Mg5Mzo| zClOs8X<;V#Sco!$&M~ieiZUXgIFX71WD`17A)D~EgS8c+1w((BjLs?j2EAE!*fDv>pCww3Z zu*s}coiN-Wv&+E}*&(DlT#TkvfgkwcCT!QG0cn0$HKFy|L4ck@uNY!6ZVl=?8{oep zr0;ll)s#@&aSm650rq>@404-v?*2m13On&%%`pp^y0mqZ z@vmwWExqrhVx*NFr<}~iNUO{5Gw3ke8cj28*^t_kI?ca6;^~Diww6eOWdz@<(IhE6 zRj<;L`Q}e5J6G`Pt0A_+E5cu1@fDqNZMK{OTeaym49xZ4YYZ0*!c&_RP`2!@CgZvy zp0qj)fc|`=0sh+%_4dtTLA8k=BSZfC^A)TRoQS4D4iB;pb1 zdqTJJJqf4dtuR~p6w!q!{J%C~GH-=*;9PA5Na`?iR5uONBdWzMI2FICRxKhPt?dkc zszNDmM&K~&mBVp&qK#qke&&Z{?op(0BgO4SYV<9Fk2c^8ZjVL&JeZ(v0{Hq z&GD@qz2Q%r_F2}JB^7YRfckScu%u@#+iGkUr$|`)a$)S}BWHZ732&~)Tgv9;mMd%( zz`D*H)Egv6IPj)f6p7%o4cYxq>Gn^23KOeo&Di+XIh+-Qi~6jKa1HrfQ<%OgJUCtT zXW34yqr(mMqdlpk>CSlC?<#KiV=j>U^@jbfYYpRNxgwWX%J2Alxmr2s%oQ}v{0hXm zX`l#97B}4T(iE;xJe6LF1V3E?3$qlWA}`A>8w=nB$s0pt4Ve+ZC!27F)xU1TbS2r8 zqcD?qC3nSz5R-jtTZxtGxploT;U{S&{{%c1KT@fZx11$bEPSE~gE^^^OQ_awFH~X& zfs*yB(-dE5u6;Ar;+x=!__6x5h;}9Y!wT;w@gilzHBNFTdZQ<^zG|6#m)M5;d4+|- zRcwC zMzYBpJPDVjneN|{T&rV~Nu;w?RfJ693T_dJJ=iNZGObPvd)uS|>|uB)e$XR;5z&?P zVcU7G!$+F3i^PH}j!RS42TNMWnYHUVDR4j)*oDXAM*}F3vmAyaO{2FC=!_CiA1Ifh zv=fhPKASA&9lzQufm5a>|r;Ptv>}yt-If8bjxBtu zEb|QLC|2vEtZcb1x7=I;lw5&TCF7QvKrLFWs-xYP&#}2?;SpETycWNXUW;FcpYhCn zIOOsjL0q9R%R{#Hwo^0fDt9j;_S|~1qit0^60j$eX#nkG-oMqG^}46WlkFb5z89^r z-SyCIx+;i@$GayJxK`vMzn)AOgQ@g9dor0cy?ZiYwkit^)sxA1@a)N?99&QtNyfBy zPbOvCkdnN5GGRd0uKjv4fw1V5)caeZdop2Qh9g}x^6trww#&IaOiv~-q;h6+Pj!^o4^XSQB z8Yp&WCGWs3*@=TuO}16U@AS&o=hf9@19JUt;#|}B_tUSo;{hA112L`SCllq zg-%^EroDSIDcg1SWWs=~UHkQ90%6f9i+~ZjCldx{II6WL>(thh2@I*6+1!(LxSotU zhP5kkD{HE)klsC+OpsN2vef`RnWe}JBfc=!$#~I;E*GRHZzUyQ%t_=y$Yn_ZAkF+WEs?HQGsA@y^ zWO8P`domny-Fvb;WP9~wJZmatw=!tX=!{WCCH) zDRuo(5llk&Yr>=$ZoSvZR%`3Ygh{KM+1!(@ay=Q>VexBS*7jar2wH#-d+uG736kl_ zUS`;#8K5Jx6gba(_BL5zxu&v=DkYB3r=*nARQS$M39b5MlwK0#*5tmA?ADw&LHL;CaT$oTOBb!0qI(GiOn5xV2z$0f{|R^{L)D_n~s z*)^C2TxCCI7p}d!F`jLes~2Lo#USBNBRIik8~fK^v)5oOdok;q2kc~D@aeER_IDcg zSMbTElNVIj8brg-Gth%RuX-tEn%w6_w*0;l{BqO&5IIX(lp`6FfCAe=@`#r}w3;kz ztVdrwdI%njkNduOB#9;Vx#xX;FZ}VQr3xe528~*xL#>KaA5p}-m41dQ^>KJKe%M>7 zoGr0E*T+3GPq|W|I}@#s3v_l}{z`Ni8Fp79SMR%ZTTQ;<$QH%mJ$|npNSaor*Sd<$ ze&bWq{^V?LHW-iWZ-9}Bg);}0pA{iS-AOgm7ca^}k?lo;`m{eVTOX5pVTU5_EdB@H`EmFhy}GW+Hb=%a{;C;w?+=Xy*W*jcgPI-L&AjN&EyHPLg-XZ12MPKT{4;2=`w0KFmB z@8+UFTj5^-2fO3l+0O0^|8g2HLMkG}{VqB&3#s_2!RQSCwz#htemQ~_tt#7-J~~0O zxUxtuh9b&&PD~{Xjk|Q_>n=|}`-9%@l24g)m+-b8zymezLd0(8F8QZb?m~!t_qhw1 zxt+V@pRV6sz9_wVne})+QQ&ed??7(d>TS30#@T1^R+Nd*zwwl0pqpt1YQy39Izt9x z3G;WYNAWT$$Z=7=h^|@}a)P0bhi^pi8kQd%d)eO@IOM#~gQ-%UyPV`XSSvVW?xT-P zwSgq>!PgsbJIh>UcH4rZ`Uy{VMJzi#gY3)JrOkF8+L5PRlW~1Jf(@3P>e3eCfuFRC zc;O)l6VHDbf#eo$Rtg5Cc2~=4B|)Q-`Fk+=64-;w*OV*o6$r6&EfpC6&R_8Xm%$GfC`lW>$Z7_VE7UOfq;!&5Rn1? z*r~~2m=r^SL)ptvb7?Y|$II2|g%S?F?$lnetO@mjpPcPB*SdODx-Ukuyvl|{Ny&!- zffkKuXZUHiH|@^Qy#nJ2X*6C#@lGH>8HUQN`3oTp^{aDkXO{5}LReFne42DD5IXMhxF9R5iw))R>^RL>y}{z?IW)CAfr`n>M}C#NWo&5Z+Ro|-q`;(*wtS^t)Usak zP~!xcbA$#;jyA3oHjo@jfcgcU!u}wVZSC~u`NBwIKi-7nEH73~J#Wi0jm9{Q&BIud zhijOB8>cdZKLql+QmTXh7QwCFTye>atCCD?1x{1tC9h@Nge*- zmU*;Yn@`|}??evhcqt4BNzGsOrkXE|1HyAIPz~aq_MzpJM!FM+b&Z^GOO3(sMW+S; zg0HxMH7sZ9Afg#IwNr7sl7}Efm=?v8eE(FA9>dpM;2M@qF>F+a5m;&ta1Hx-*_sMF z_Yh-cuvQXo;F~Uh4ab}iB>W^DL@*jlP63SSYr!nP==)N=dw~RpuR1o&H02Up2T3RIW=i40)Z;qqm>p$T`ycFBye z2?{$M9fBxF&n0Pb1SCnzYqfw7SEN3Jh-LMy8@5a4fu-)Bx;T18jq7Ei24SR0+X1fQ zW-6<7#n>bX?b;>8mKW39F38e4Cb(g{W}Z20*NXx9+9MxH6p0bsuw5f}?%1x8v}i0Y z*sghy^P9@tuw8+x@3v&-hV2SWi#?bFz9>+1!FB}_l9Yyu3uRa36%6DEjw!IaVY?!m zLQ?aW{hZR88@Ai23){tn&m*?WaApc3F5Pg>pAoXPdV!JGJLXhy2bO@ewu#Y3Q%kV3!cS(Kgu7@DO!42Exc;mtY zhr}3cGsMylo=Lf{E-n4IVY?ioVt{aJlNs#NtxaYKNP>i}rtmPe%CW~0+vVjggb81H z*B&U`uw8n)V*S`I8R|S^yJQ*){FBFaT`FNRfC9GViwad+?1t@zwhIc#_)?4GzFjgSVv532r31bwP;|j|1rm~!rt8DJ!k)YnGX-`xY*%Dc z=*RqJKc}?jhV34;!FIX8uERTAnr}P<4bJ1ZZO}fl?MxrnL1*-EOSDT@RM{AAUvd?KFzoz0%;1?UDf$9WNi0 z6s@-7zFji3f;V?;*Fd<`5<6hKrXd0}#AFDEhS+W2E*UV>2bZcC2QoIMvh3v2qLJCy z_0Ep_cF7>v1cheUA&7GHT#^WoXcumF*f;NyJRp!W@1CUHa$9GyJQ-fk4bsvuw5^$&41snkvn&6*GO7478h*S zJjnS?Wp3E6z!jIwBxsA5&58X}>wICm0@Gp-=728>6kV`gfrKQbg-S+Y0>9?iFC zQuCL+X%Qq4_iotkF&k{xrOn2KHIOZvydv$kL$@;-7|ysL!d1j`z-n?qlwyqtB028c zW!MzMh6CcrP;uw8~=<3#I^;<@aYt;IV^j<~D>wIbyrK zyoE5~EAJ3|X)`T4tmb5u6UDeo;R3rA-fX#d`pb)=}6C7jPN0| zyo>3ffC&Dw;<+1=`FKHY*#)BF?tgy~l>(8ZuZiFtP56Nz$`-)4N++49B%0J@huUc2 zFL` zj-Z-ju8Q$z(kg~jing+%2EAHP>Dmr8`(kZV>Fy#m*`zjmblRq!B@E@e(%xE!cQwM~ z1mu81EDEF#1DFy8j8=fIH9Nt^9@A03oE6b3w_M|rTF4TKh~Ty!V!GOC)XBA6>6RUGE8c*HEq4)s}^~+qmw>LcXtMG zDIvB5kiMoZVq%J?z~IY1{S`GnU3GII#1yoL!M*Zixa6XWHDl)^{J;-x z9!{#SQLtM!lj_(d`*?RXSz+giOXf|p23+(Z)?yW&8BVkZ!%Th)0^G1JM5+pv}e0J!+v{nJep0$!|qmpr;jdh8f~8I zu9gGkLJ>h6mMC&vv|>1rc8{a}bcQZwKN~l$UC?w?*$wOPzCG}ImMg5gOy-v6i?fK# zcsXUt#Fajp_*`1)V)sneQ<>!Icc0lx4y&{`+YO|LTer5n?AG-fK&jQLtBiH{$2U9 z=tFA8dPIC3e$}^jKzysiuc>y(A9jeLbXa7AZeH;G48nas6AlrE9mx}vRS_Sx4aB95yPiRn=tFAR^;g8?{cD*a8v`+Z*d-8?VI2c8^Z4`1guMds zQR_ggvtgImjMX8v;Cd_K;(ZmF{Tc%?dE6xslV_a+F-PEeWyM~B_}KXh#6%xbGp;`n zlg-jHLpBCt{IE+PCc`=gV&?JZl?i(V;$zl(&QS$(By0 zy!PMH`T0E{xsct#(A6;Vb*y|fW}Z?=0&Ldua1_i0a+Bwkfxnqmfc?Sf+*{^kVC`%iK*3{?{ONez|U z<4hOtH4&zHpC`G}{5%QLg(b@H8~kR72nL@c0ZoI=k-*T!7NEpejs#B#Hb;W$1e_x& zwS&!(G(5!|TZ&y2SnR>__WD7JYjI-Yb#z3u*#yn4Y6phg1@wBE5u zM)|Ae#$PtkIE(0DkOLc4;HiyjknP6H*nrAIUqw5;VOq?{J5b~2^5%ELxXMR*S~Er5 zs+uVhw=z@2C5)LO=@-ouaT{o+NK6*~CcK!nDs;liPQTk5ZFQ%^!KQ;{T}KRSW#bxZ zRtp=_|H0Q5VLvl^yo7B#VSFc18hxUjcv8iblWL~SzgVL(W$V|COeUY3a$;@D#*xv+ z!ZekbDH2mQQ^clhrbtZLOc9&1nIbV|^t#{D*6iF4GTO`FVCO)@{0DyvuF4Fp{033H zd^ZX|v%%~fXLbjkU<^|ktn)oYu8ykw(*~3; zMS1EHfXPZ}M-WZ90Sg#de9cqYBp4QqSIx)w1WX zvoN0?{3n6IW7Z6k(n{3bvBE+f=9N=b2&tO>;g1C7UPYO+uLni;URAK?@}>vLu?hy& zR-kzCBr0j}2Fj$#L`f+tvUo#iW6K9p@T^g4NrN|1=8PJo^|#30jav3xluCwDFlbO} zNrN|0CXE`*TL+QFn-nwAh*~QY7e~fZD{1g%%A8SywBHfgyIH}WLC{(jUGz>S2>+Rs zY9!qsp+@@kibNqFpq#eoqXOr0`Wfb z#P@P?#ajiKT_vWEksTSigMGh*ni>L^SS;FlNrrrV09wc>@|k&1BWA_S?Oyk}G1Q9L z-ByVn>wV0mUgtxkn2QS;hAATOI~``jknx%idKfPoGSYFF4@;()j~gL|H6o$At4j#Y zf+1AQ$w6m%ZI4%Db0=|OjRa97>S8`_gc#O*WK@J{ZcZ*TBJtbPju)X$J9=Ho^vkR27NlC`6i#`25a|9@TitxsOtSV;f-#sB^D2ll=9 zhyVO5EAM^o*6;t--&uwqxibF36|e?PbRt%~1kp1q(TViyIrKF_U*C$puDl4)FBj9L zo3(7^=HTT~V{vIubKi2j7tpcG#Swn-erTe7eCVOKMNzyDqV3|_v-!7|!G0!bqhF%s zpzJq!eolY07aGHqN(`_k`!Mr;C;k2a?9GogOn=%3i|Jc1=-UKM00GPBJ2ZinYl?|q z372;cMUCd-o~7k|`wv`l@KAjD3$9wZ3ih*yY%HD{kB135N^a$9;dnXv7kCEp%~UC~ zlm2NuRyxycUqCtu^h1tr)4#7EVdO$wLERPl=pZ{UF@wKjLOoJVDEg@=g8u{dZs5bq z*wIq%(+q;SU%~^UA1t^(4Exa}dfn6G$#!oRFHL6?bg*W!gev&E9=dJqARNHoq5r?_ zu7}Y0`}6VdMAD8A!2vW__f&85sh!Dqr~dIDfGam9*UNdHJ9lT zG=BEYYB~}0n@Rec?3?sAD_2taCygV`wIx{WG@AS1CfJiL0VbCM5#63_m2w3g%#AmC zEa9yjE0+NN3w8zAfIY8!%WiL2a+^KKZLWru@1X;(T>)|DvchW)?7w6$#P3ELeF7Hm z7@!6)hQ+PH_H+dX=-9+(x~Z$NDn?dKCWkm-Ea|1dK9RX7$K7jS@15v4N_0Y^YL=pJ zV;9+X@DTD2xE|=@YdW352rby8LRCx$c3=s=T$Ya3L!V5)o=TrU{)%j5Yj-n!J~eVw zy^p3lV|0RJk6Ql%|A$k|A6`%hb3g3s>`jH)T#n&2kiHpHVoo2q4rQS$mH+k7ytjXb zJe~%;H1MHm5MNz80MJ>^MnlmKkkzEy3*k1{fB&$z*?)38-0Dxtw!@ps$`!B&`#JuS zET+p`$ek{E0PW-$Z$CI1q#l@xIQv*CFo`)XPW%0>xH%ZjlE&2wE0>{9qhEB2-(1BR z=-M(Ybrvr_7&TT775wCO|4zjGEwJ~2es6gD;4Be9`qd)xWBI|O>GPN`WDvQNAi}-oR0FAtq+k1~9jILGe%Nz7Wt#I#WT1KYUrr-)${!moUJUft5{cglhmNCo zHtBEOJ~(~4KS9U0_LYXA@{rW2P&ILU_muKcG2-C3nA_BkM(M+uE0AatzYq?gNukhz zB-BL#GTryIvUp;!^$ahaD`DSL{r*ljjigNIFc$$=Lq<&^lf&pQz0s3|7aAanWU>aZ z+*wL_K;dBxmeY@C*)eyeG$n0uKBaheGo8ec(`S4Y>_a=RX9IM$F3Jrj==VI|d_E{X zCE;fZaI!ZU^iB=Y78ev_vib#QwX|M3$j2!m&0Y}m#-a)_b>l7`7c5{B%AEchp6>^rELbN43s`D13W20o%-No4o-}_B=wsIFq zHM8BSjg{pl>N(cYzm~(=P7WTh3e{f>|A`1*$12f{tL=g2p!9!w<4->uh?hvUXAJ`J z(R+sIrhITc;^UUu`S@T1?(^3NUm@`YQ^H-q;->Gca%chI;tJJOWa+)>DIi>%r3AUi zpuh_o6+ zgeDvOl?HH!bvt=hoI*+=xJD`oLI&BNf$Hcqg0FZocrSLFBiC8LSEZO$3_Vrkuot zTfi(0D1^P5d;&fe!40M~o-JUprQ%!Bl^pb~C(YgHu6ri=oT}Na0{<@JFA|d3RXTmn z8Aao75lACmT1#KVTDKlw=9R8Y!_PF#29>Eoa6r5Lv5s2S`kX7Piv2b^MSnDcS6JDp z;A>XG(Y)QUUU;D^x9}e5?DutX>cPtQ_-l1dJGiQ6Di)T+|_wSG+ zqEm*x&!mieDIq3n8pbz{@}ngado`mQzxV|XyqsarBW36v7GWeqiZZ+?v1Pun9b2)( za}j^Twj`jTHOV0Dd)m5Z?Yqm?IWh+}RO%ae6sk=PofT)=7XG;pKK^Vg6hG=FJ{T{$ zHXiYE%N%(5XanA0aAqsLXj^&i)_5oj?Atr1SUjMfgYqOqV|v0bl4ftc8+R zVK*Wq!2?aOTmD!ORy9n)7p-!O001Bk`EbxdVb8$R%RX0Qg$q=XPy zgvcBoF#)fv1%yKh{;pNLVQ>Nylu-!50kg0{3h4fG7QNLi^fR$h+s&gBSMu$XS8T+~ zbVq;}q(CN{g1W|Xk$kwV;jA8{fQ-!NDOH0LjF(!Ff@-|jAO(b?PA`5)fz1eb*&i7* zZPj^gnJUO&umPE@6;ePxtdpHRQeZ=E+XgAHBIJt{kU^CDW^9lGLPRG(d!)dI8fVJp zCf9}}w++Ch1t5bMTL2rRfDqG&)DkJMpwvz`1B43wMIBeG5N(hG!n(Q*F$JVR7yy|F zTJdD|dZv2`WH5ylQb0(GEs3$1BM+L90`f_X4QiJuz4R|{S`7M1OBf?kK-m)d6l=-N z$--@rg8bD>wix3!2{3hKq+M;*>?UyOJn_811}VTFRM0b`IP@t~Qwm}MVE(8xWA_e%d!v8$@(sA&I7DxfPq&&{w%`&dD zqo&giE8yZ&!E4w}1z>{}&|9pF-2@gv(iSUVY@p!Mb87+EUh!`jSh4VefN@uW%?K1L$leQ%8%O52tANZel_`ZPtgr&| zq)vABSb+^SE3CkZ5W)&nZsoNEP6h`Xtblx2CqR3wz=j%U%H}rLhNL%EKn4kd6%b+? zky>H}7L@F;0xLqk*cBPXC`22qfDq#v5tj~@5VGs_`KWGaZzU_ER#*XOYRQQ(% zUpfwKu!5uc9(#MN0Jo^-eFY@Uv{UJ%YAxO!=w6=3xS0zHq6b?G@{1!N9v<>HSO5UQH`fyD{}!T@As zmLGM0ZL_a{P}K5L3syizw&TSHDb`%~*jA$rLLv^>s9*xikS}5>{^@kPmA_YKawC zP_n}ctO)sH1!NGDD!>LSAjG&vgoCe53YT;(KB_PGEzbK22yd0K0`j04DdY(5V=)0`g#(xf(6pW?w;m6faw5a#%s&?E-i>zG~4mT{im) zFip-yFT8yPc#>9FL9UXjSOI>_j1}bMOBz>rqDDKx(S=+{HdsNx1#gY%?fB2k-oQKl^N0 z&eZrpSgr~Xff1{6aLMKg#QH7%tOGhF?qo1saXpv_Cj>TouBqqi4}Gl}$qzU5_7C`v zjeurP(S?AXgkD_;NX-Uk@E7V=LJE~44mFv4K=E?>6uiY~=q%7(vJJQ^R2aZ0L_kYJ zuQ&vx#^Mq63--&}ZS9hL&$T@gu)%-IrCv&cI7F2qDtxK|$Eul+&g4bBGrawr5tD4j;ZPb$B9Z_o$ zRm2OeTVScXIqXfR@iFMcM~36gwe<>Nu1H31808 zq06=wn5jwd<|$~Wm98Z@OAtA zC7i!t0o`P8RoEIDdkh1Gbdc>k?wg1t`z8Qo>+3DmS2~NqHt6OrSyVO?Pei0R(FcA_ zIKdW4a4>3b_j{x66kmAK?N27-NxOXgN%=;Uq@utQ?qUgi!-MVn84M3^kN6|Gl{}#E z-k+dLxyU_P=~W-?r~895PtNFpyk%fvVc~7Bs0`;)MeAvK5o%Y$<00FUIRy~;=`VD7%H^F(k!9k z2oT`dveJBZUtjL9^U#VG{Z*WElNQ`54xJtXZ>OR3)~odDtHkc(NTEpuait(&dSe!P zd#RbO!50^0rDs0V`a7@qT$zVzY80~I%(N%3zbTeZ+yB)E8ZjPF&=08X~s6VrF zh!S5hzWlMOU+~MDRm5mk66&L0ZRJ2LGudK!cQ|;eU$>rQRR`)ynM9#fAN)ZCT&rp& zxbdhz%{Xgk3AJwi+{>8%;>X=w;-O4?wl(8lpgRF88z$9WoL!Ncc42ZL31SGH_^`G~7JaB@Xz>COi;;E1EJ% z!Q%xa)WvNwvumW;eCDsgA}3XGw?A$KR?IwVweuukp9=Bg!nOxTx26t|*NindRT7q_ zv97d+PyTiTHvD4CPyb{SMTTN;gfuJQ?EcZOnB0_g%vj&A!A%A zfOR-@BG|wceSjS*1^+gJS6Z2NHduGH?xLT8EHtYVXu&N%0gA3`q@37+b_fVWd{n># zQ{=FckW?M3rB1AI^gGdJjTzeBTRRRY%RjdG(y!QBSk6tnH~%K#)-`RM(GoGFVk-(4 zg%hQV0Ct+66CmrOh!5F1NX82RVXG^llEBO=`0a?_5T-N)TA`2QgvXQdqzqWBiJ+F-ti^0avt3 zMHox0D7flzQJIg3K;WO-E~RY^&c=Hm==X-V56)UHl4bdp-;ZRFqB$shI^wUL8KFJGjqneYYM(465&F#)gbA}vX$c8i}#!3lRmnT2T0`r3lOQLZx2OnO@b zw3<2P0=7qBeKUNoZj^Mq?zw6ANM6LU@w%GI(Ds1$L~xC6usa6BVIJbltS5H!F63+< zzNQ@dVXR>!U;IWXBn(gkOU4CaKy#Xq z0Ilw(fYp&aSOd}SD*RGYwi@IE9yQ;Dg_y-Z(=dCkTdEpexZmIF8hJGUA?-qBmPkvGq|MQsE=Nr%sC#wyz8T-z> zn7nmzW*T29{dE#rM+tY~brAWsI;9=i*T75HE{XRq|26q95`HA*LlaQ)3pk~`%`5sn z$)*bTsIJN)fjh?~AZDU&b#xn9;g9o%;c?(2|kU>UqWXmF%wBFraG)F9suH4l~j0LmvJ%ZOqv zGN7{{MI^m&o-Hk=)|Az-ZtZ;3uBj6ezXCPgww0o0_QvN z9`Nt8Y1crq`-$*05wP)|CY%Z_j&h^Sw8=rMK5V6r29%7b&?1^`(GK|$cw>ArOouFL zSs62f08n%^RpY<`8bOmo9$J$5=;}1p)no8T{Kk58r2#X3E`o=>y((Mu!*E6(1IQb= zcqz8Lfrni%&}{vIPlw1G<>{q$8}`UKgp{N{0T(`Rmr{bDM2t8y^^yfPcP)Rg0r!VA z!V`mGA6@o{{y*^#4Gg=)X#6t+LlnCRTH`x`D!JJJEkI1OmrH52pz7#;F@lGJNG*kR z+ET}rUp$$z8bai&duJU*d_*jTCg3d9o zc#1M2pg56=0%Q|9)y%yM@$tpJbtXXzhW;=aom2V^db0w3v3#4Qlt+@lj$8=HS|q;} zR3vHk&tEnPE$9FcFV&pXIT6H5)4F0eyU9I+fV^~;xqBjk(q=uNP%~8p0D0+jGZ!rq zX4K-HRMo@<0eR^pL`*Oo$V-PHJ#YY!SEPwR-VcWWc`02Lco-DqrK1ta1ON^xO;MqR zkJbY6(s6TMtlu0Bv+a-$uTmTe2E{fue`tq}M`KM0)R$6KE3X#RmyS!hQDXGyrNhVl zn{s^S=k0^jr~4DMCA2T;FuAu~M*Z2-;|V%QaC5xVZ*vFJ=BJ|Sj_fE*U+6mF15toY zW~J(c;Rcyq4wl5M)bc|}Y1$O#rDFzuxCz@VEvNltiaAU3yQ>*A4>o6bdGVeCyVr zzOw=TD{_hn;{e69hH)@6{}q;vNdf>zjx|7wm| z$kg4fn~Z-|qiE@UFBK!L>^S9QE=F2iexE^y+16;9Y0HMxp44gn^%3VTe6cpkZ`Ej$ z6rQSAX~}%^rz2)Xs}}fbh^_F7@RwJ7MWc%Hm7_QOY12N-+Onhqt{6~%?gp0htYuq`&Egb^ z!iF#Cobjn9yty84DVvvDuCP@A>pJsPuTPF};7uF*KHHGp|CDb3#HTQ^n%0bsZ=J(g zLAa>Tx(L^h&ozbVtHMLlv7Bt+tNzx}!3WGWfv4iewGsS!!+zJbhVimokxMM$-zK(K zAkIw#MFis{(n}>`x+A(Q#0B{23Rsw>5EXe@cG*|}CrI8HB5TNu06y7-GpxPqRt77{ zrW}QtyeqjYE`*ruTiZ&k$a3qGv_O9X9*ZBTRG?c9j`d|*nyO_0C)Nfy`XK!Ep!wT;w@ghY>Wlj6tW9{_DTAbw4@3OvX znS5s*?&lR2ifC_%$*semRXBK1E+MWGU4l5yZsA=Ojw&&WPLT(F(^XkJ6%KYa&pIc+3LmPlhuuuJ{uC&Yi~SaM zTLzep&9`!Ciq-lkD_gG1EjO0{tq;yMk4LPp!UlFTQPB|%jS`dpe2&jG z&xvTUB!e?8#&rD0n>&J(<9@=mPn2 zpI=WVjKNfMo;{gNn%+H`Fk6*{hU&>=Jb3nGQVuStj3i?ke0wq}+lG|n)sqPWvUcs) zlL>@Hrz`?S=)OxBnBhpX__prdlO1iB3zRTDnZS_Bnaw@f(YCTD%NEKCop|(X;Mjdb@EJe=uWT^~`28;ijRlnxllkuVxT`tdqemxmK1U7lko=ip(y;tnplksF# zUK1__mfhd8u$M8wWI*~-PUu#P9~ZM_4cB=0Wc=tVwT0`+`0;WjuGXtM{o+-PJ(-+Y z@16{YT=$+V57}Nl8PA%^ZmjK?=0fkDtTT_EOr`+H#d0a?5D>&XPdqEi+D zBXmzD49sv;YfsjxttS&0QaQ7^C+l!M8LjC&8Zf@`z&jv)dor0ItMp{60eUh^krzf> zlA&qeqin&sCC%Rmao{S$NNKYms8K5WQ$*R02TnY@{lgSvEPX;WrVxIwh*U4mD z8xo+`IvGD+pq`8;Dmr2jHbSq1@#7L^OzU#qJsA$U?mbx^vb}mTo^_SG7owlrI@#(x zdNP>?3SQ4cPbP3Jxu&5%BRIik8~fK^v)5oOdok;q2kc~D@afQr{hfyW6@0SkV-iqcJ4hb!5{Oolg^l&-%S8{tgYj|Smy0B^ z#6I`D&+mml-n3L*Jo8r(CJfor%`R1vhH_lrDvRX#w0DjQW$f z5ig{3oMzX*HrBEqwr1zhU9gQK%9|n5q=257^tZaB-ZuJZypRTVuE)fIofZ47)9K*M zC|<%}6FtX#RxcCdbePU(7agjCwDZ(pbS683iW;S_2CsmF$WR98pvitW7ZW<+N_BbH zFGsMVRb_k9M<-|&R~G5Tpc@#>P z%0%ejcuF!5ce8pFFQZx-*VBvWa`lDQ%`_9W;c$GNArrCV@<~Vyd*b~0jR;=D@`Gb9 z`x^s?ocCQ$ato{#3^A9)>-NmiZbHa4KB5qlAAG$5x3kPuX16Uks-N&=SH!Z@GswPd zT@tS=l81*)$%}; z%W_m%uc-TBYh2MyTNhXyfp0h94o`-~z~L)E+10h|6dkOr<>cixg&g=Fk!PqzjwFN+ zUzyJ(fc$O*l6%Nas!iL^w!Y$cA?(3D@MRkZm!o(wL+^04aU-oZ4#RcvE3B%Ga>J`R z7!AG@!K+yw>_P_RiVGK9L7^EmwM?;?aOSiWqb?=2&eB>YO88>KCEHOJ#Q@(Ch%p8LBG1#XAPW zp=_PlS7?HnB$5N>eTTpcGZd`63}7(Y>Oa%XmnWKjMyGDu(-Yp;;ae^(Ew2wQHF1Vz z7+jJ={Fp7JH=&=#EdQBHQ_Hg~CaUTU7EjNispSbsl9g_?$SaP?>sN!W0bSc!9f1^> z6w;Qj)QiOBB@Ywl2o01RZCojAAUTu(^$R+M{Xrz#+Ud_Tm&C3ljs1ZQK#l!E3pe#% z%H-mgA~C{`H{m$Ti&az4+p;as9QI}$?Pv0aF;jJVmk8|V^W8IXyF(y#ki%k??evhcqt4BNzGsOn!Q7r z%@hvkBQto;1*$>Z(>}DE(nxorfodGq^71ixztS}A@D&%ZhUJV4B3wq%3@fXKJ-98! zni6jNH5a&sWm60r)nNpdngd+JK3=vaZMg7|tUOp~Gz;H!0c<$tgdpK3X$| zKy9K^gUqrggb81H7nOp0Fwg}XaKm=#X>am%UsYqwc$yY}ZE;)F!hpbJ%8uABd0c^i z;>L>v$bfAz8JDk}RBf>vwoArP@Rm2l4od(A!lIeDV7sOvYQc8Npr$(C5!)r?GktKC zY^kJ_!K=Kfn6Q9A9X6RWxM92GacqLZPDh6z%F%O4S{wmM(xR&cE*%^pu2{~rP?@l# z>4@!;d0?sgr!J0Ok-EG@WDo|pVY}Q+Wwovtn@DJ{F*C7#t4)vKhV7bpt_#~GBeNW; zUu|*2cF7o-Ez30Oj_n#ri^k%D?V1NUFW4@bmLs-H9?!MHC9}YprxiXqf*ZCgFfI0A z4)~%#(FNNTNJvsz=;cZ>qFa^&H*A*-O2UAU)cj>Xr?lpV?RL%swu{H)L%R-&bHjES z&bS~VVyf2j2nF`$?2Ew%+^}7SO)+dZAdU<*7i^b#ToNv51?+-M?SIXDJ^@|hV7DZ`9KmD+a+Ttcyq^g4TMWAvBSPy(-7ySAtsY@#CFN!nLfBwRmwLP z47N+=?1=4>$FT|eW4jzZm!!oJkR+|pjW8L{u@QFLw@U`aQuj|?9K9lSxnR45lyUjQ z4cjH;GtOnTt{9tY#CFMC*brX_woArQ^IR9UOGain42|uQF_hf7W4lJuqOrJOyXHa8 zZz^-!w<~bPB{Q$EFqcf|i0ukYi#?bFz9>+1!FB}_7k)}}!*)eBg{0;$`#GgGH*EK) z4Yq3ww6p@_Ja}@B`*s=5xFBN9jASLr1^B`9D6b=lz>g!g%djbi4F|-Lq2_|^GLKgb z7ao$W4EH)>yDW1;knodq5g^e zF2|@CARMqpj-FeatjBgMd$VNmCWJ$!TYwz)?Q-%F4`}g~cL=_;3ngyY?y(EzOa?L& z)~oc5vD=vpWE>2G%n#coNxfpDoMcEEN`L*Ry(t}%ysQI!m8 z1fJZ^WFX`-eQ>FYt7Hv38f6<3T-s?en{quX1C|}JT`~wZL4RzQqvw*eI0BN&R-Uvh zR3hxSEdsocZXEaR@=OX9u0!hL=$V`&5a({#E;mzItt-YRQv24hZ3rB31WljhZ zev&Q%ggZ}K$i{8oF2k>^_9OLeU~CtU%GZ4w69G4Dm*b5K529OI&!pyf;ka*?V^jeOqB3aPzmx0YT zmI&U_gdYf^Yyo_$v?8LCXi}3MrUF-%6#kNzM;?8M}MifENvu5n2%B9J05VK!u{DX47cS(h7J1mw znHx%XcLs1NA+`jNemJ4%*#ELme?^T?SKV9)F$L{maIZWWF1e_}byXS}%Q`AwJ2!?O z_#vNOieXu-VuHxgqnBc*R@%aJfyI`$1&QqC$|Z;n<`Z8AvK#8Nx2cEy-eiPsj_*#; zE$^fNDW4Vz&;7!y`CO`cDYyqOE(h{#s8jt}Z&f|S>C>lQ8)Ar#dWh_zb}nG3xY&f3 zD!AzHOb5g9NHfT%uj|69%z(Hf`rubsqT z63MCZ49-k?TZ8^+Hf^(ild1Ba-t3;;VK;rCJ=@(G_S>7|(QGmvcDMRFeRTWNX!Bfm zwH#U(iU{I>jOV&&#c&`+zefG(3|-8AHf~(IpjlYP*QLL254@h`3hOSDxuyByEFv>r zPMI=srH>{)msYyiJ(KlRCb|0EXSR~NW$ev%1L@(`tu3S7$_z|ht*vYA-1(BkPws*H zy(|_L0c15pX0_L{#3fcsA9skF^x14-A#vD#Qp_D?isx0}-D2gA!;rNFBe`0JUYQB& zp(4E*yKK`XI)q>KtsM~G>hNo-9rA}AVkjLJ*`S*jJU@eQ-_L|?!?4#XI<^>TtxzqL zgMT0{PStb@#N=@o6C}?%2V#!E#X#I|XU$BvV;hL65xYcCB7CVCR}bd_0x>z#O=if( zK#U)D3B+Vr$3Vs+XiA;F;Ojudi8zC+0r~3AP^TvV94y(SP_%QT>>$A z);SP!1fEw`>{SsTol77l!oP3=F?s)5X2`}sj30Ih#AH~>yftY#xd1b<0f%uqpAU@v}F;*XZbq_pP#$xPNHMmVs zI-vM4I|2_BOK2pwvmdXt+l)*iHR(G-P@)N&I?s2Az_nHXP zyw8(dX?~sr>B16a_ziwDLD@TGS1e+rPdIHXol-j}ONE)7E zjxEL3umX1V4N~8T;3bLxBJ3iUe2m#5m1sg;u-Ph(-cSjt+>pXQ-lyCdh+=YgRU4JL zonE@A+(|A*R3Lf#qT?Wa;sO+Mc99AsCleZi%M&{S-iFMvH~d&Av+ ze96ZC<-PI3#-8o*R(}Y;w+L5fB#$GyCp+y~e>5FWx})B9e@b4xc*i1n_^+B9f7wKj zFQOAe4s29Goi?f=u^TUA<0Ox06q_}U`uXGV{gux%%d?<7j2PqY(Hs@Qx|&F1+RYg9IG{koCKH*%X# ztZm*na=zEeOp(~UnIg7%Geu(aW{TM6%@nE4C$O|NJGX<3_A)rwIS?`b!QX0FjpEd7FgwSY-GL_B|fj|ApkMVYg&2SxT?RoJ1+UO1AB6+%+0D?Ptb z@&;f8q6TlEOqxuTl(Hg=Hw4!Q?h_R}>-_Mh((_M`Z72$v_3iE*d05Ico4f zlTwYO`y-S~qYCq0?U0!0k7${6Q7axv!JCyPO+#>81ixx&Dk`^;5G02oME-E90 z75o^SP9j|RTm;zCCb-k=_J+fJ%_47UG5^Uy+~5*vB$x6(9}$1CDm>8LN?v9A5Lxz}P6ua3$tyLF z6(}be^{l+Yv}OvmXJ+2J(3` z55)V(6W`0p6>k+_c9obuMs{T64)*;LYHF-pVzFrJB^mPd0catk$YLkx>z*xjDJWh{SJCJ7UQA$*9N= z%#2*@h$B?Y$wfva12HplQ9nbbn2#HwKz3B5O4g268p|(Q`9H6K%d=eBJ2mX5yX*$( zH`Dl=m&gCl_dBP2@A#6XM9+o_KY&0CloyAS8Q++^YNZ z5q-c`Mf3rdAx9PAEN-JBlJ6~5lz-e-Mft~Bj;g7MBsLork-M92F}1dIXwS#5Kh=cISrwnkx%>WVW3fz1)P|Nh zG--sJe4eB_mg8r+B=1}*J(Kgvomp(c(pWC3Dpy_(%UK!N9q+^mT&>zbz4GmQVP8=t ze)-<`OZQ%^66s!OUiKHO#Qrcl?;4fw7puhK0C`)RdR%&sv9XXRb(u|mBcCN9Qxlmp z!$kE|-jLbmH~Ih@LL`CGrjWl_C8C81ql#$JXR9LmKmZk8tP*WhM2kLK710L*sOVyq z*q@A0lF4hQi&diR6+n`L{i{UyIqA!U%qkm7RNQ_$&%o0IeqvdP4+T zqE~L#j%cQrukNX*CoKOdrbj-dpC0*?IKBV70h|snL+T!Z!!^^xKG5`UXFpz<0C5yg zDmnXa@S6)8|MId3ez5`7Zvr57Oe&)8DiYEiUK2xDU>;Ar>EwcvoJ+*cP}VgNtQ-UO zGEx0h175>Yu`>qTYGPZebV3o^%%I1Kh`eaz;S#(RdO>Ow}DtpyTXl0v(yC-roRj@yW{3c)FQI-BsDlMIuC8 z0Vw4HlN=4m&QcPYN^_El%*g=}WoorfnT{nuL{;4ZIV#Q$$Wf7r$khRl8XqT=bei;AZsvnsb(#nE+*RRR$rE{9kpQgV+~ zA|;u~++r13yTp~0LzH0%5K(zjagSB(Tx240bwI|k-HHWfVqUR|r{m**3`+rH6@~5G zV--h&Sd>G|Kdy! z5=2Z6v5KeTrxColJ}$YCxr6!ikZZIO>J&cAg}qLM$)@sa$j?1I$V$HZSL; z3}?3I?r#hoBDyfdyq%YwnM8amYp^sNPp9auj3aP8yt1sM?S606oxWwaH|cM6`;*Cd P($1Ge@BWF0Kl=Xx18;<( literal 0 HcmV?d00001 diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587848505.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587848505.Akashs-MacBook-Pro-2.local new file mode 100644 index 0000000000000000000000000000000000000000..68d601bc958fce1ea8687d1a8c5132d66b66c719 GIT binary patch literal 267629 zcmeIb3zTF>eIGoxdvv3< z+o-2|=vgN@_OA4;ILo~H{;U4)|NmEYtFE2H|NYz#{=m|nxw*Atula@N zjwY9#8h87{8{c%UH=M4vpZxF5%bxgFlzs5n?r+_>XY^;=+2@u%|Ha+#4!CA}H0W=f z@0{z7C#}(`NpF0vJME8#@!tCf-HqPa(O|PTUVb_3nw;%!_u?y855#*H|BCz<#f`=0 z@?O}rIX#d5+Bg&~e;@4cjCQ8mJJU|~$RxfRWKY2k#O&DJjcUV2PQymxu!HWY-eAIW z>%vc@l~Dx09l>2JyTjh}>CyP9&SYb>-D_?3hLc|Fsor?l8?^2j_NV>sp#P5ExHaw$ zH%D8Yonims)uR(~GZK zj~AQqJW^w0X|ai{WDWgeCEHw1N(xPQ zrv2%8TZx%$F$C}Mi3n~oadE2Oop41JDi=o+Qh#tptFGYImT#s--V(nG2(e9-m+dJG8a^LLfV8m>Z^~gr$u;+0qEz=KnSH{i8gP36f5r`d z7&AgGei%Z*?9n8|=Oeh@R)}10!G&?dC~0XWw(wr2QR+_wlmQ#a#~RSG zlhV}m^n5nf^;Q4D+*jqpsa;er3@(2xan^WYlpYe0 zd$U0Uf+xejF~e>|NP-7i3MmMy8m8dOR=Gt01dKRF)|gg{PLkHXVBP~VfZs8LHAqJ# zguo(1<`{tq1z|0qV966MfWKoEZy21w1Z5OLC?;tucPpfzJqx6O3^G%sfDCGh6mVsk zq8V|@w`3j&i^}&o&^-YuoRF3b#?qJ2(DG6s1ui8F8A3iy%O!ssq<{>hzaV1y(a9TL6qmfekgzl*5S9 z8z~_3HX&J7NC6?H@n=hAptiLhC~*aYLp%t%2_zJv{>O{LJY7k26)y~^N$ z6;hCMA$Xvrkb>}~LkfQ10x4*>1f;+?`IVL(R=~xlf>+xHl}8tpdkJi?g0=%zK;&ZF zG0|acAl!}u8?2z6DI(ufV8jZj5UQ)f3M)8V6)RwRYVFFuQ(sMHV*nMb`fEaqOHW~U z6_6q1{dK@y1!N#CF9lY>=t%iWf}eh*xQE@Y0vleq2CMR}0-F&qR=^3$P!WX#7Rdb4 z%wN1DV6&@$JgKt-`&|V#)V6GP6<87S#R|wEsuvEdumVCvCqVmM1vb<;Q#QA`HYB~V z0y2oHS+>Fo2r-RFEwKU%N_JR*6`{JZ0x}6Jtblyj)QGr%Y=pL5`_H!{hL{L8y9&to zm9YZypcyM5pVZi3ur9wS#C()tg%wb?gighX6_5wR%+(ksY<3kKt^q5+G-rkt;NdtI zb!lz$S%(7#BwH4&b`|6*sfrcg$IMtkPQHGuAQxyoSV7K(;DN?=JD-!!%W0jQhA&od zxH49NThzq3uYiP^?DdNx)?5Ir;IIQ$K;&Y^3K$!hEmm;25>`NkVEy6&)tMDmaHJ|$ z;F2K8AO;A4EmO2OgcVjm2G!ILEzdU)$Pl$-1!N#CF9lZMr6V~E!0JYcKUlqiV8aX7 zU{%HnY(}_{SOJ+|b*z9qsj~xntiXnv6;@zH$QLUhgLq*DgosXn_E>=pHO`dHSb+^m zZ>)d}Vr&9z-asJ4G$OUc3M?quVFgx%g4v^~pSIaoKxWJ}B9cW*ugG5NPGnmJ_&{&s z%?2wVyj8{u$b)9AfP7M8gWAz+31$0W>$zZzTdN2`A2yFIM>gPzl|G;1J+$Mzo)Cg$y6kQ1DNoYGlKx(M3Q@nXl zghg)&!`DIzl_Cx`nS4O;a@d2n7!CbN1i#yWyF-Nmj6wvoH1vu?Kx)Wt^*tkVM4q?c z;j7vg;3dgb-S(JfErBt-=f7O)fFxK(1fOldk!t26G((!@GKq|mV7977EBvJ=@4yU- zgfB+Wt|m>PYpN#AfZ^KYS@0VXyup$+{Xq(Eq($rzXvc@D6tSw)6e4^=B41djo5Cnv z_tU&q&Dm`_rrNVLgUA34U*1bTEub1vdg!vP1!igzyt#^05jUxrjDU++UKvTZ&l!?JFzviyd6#@RRT62 zYqSlz`GE|T&7@%3?^K2eepNWZ773<5Y;E!k=#lix)XG5#u*##IAB9`On^(Opzyh_eW z23i=`epno#n@{rFP_!UfdcG-lRe*9lseR?sX9Zc7QG+!wrEuX1c+g#?$lj;@4Rn!7 zchDVoHqQ1o(5)sr8yh`zG0FU~(S6(SSs~;p89gz&b1>>oTjUyz=G}~=uMe~`$sLNIo&TgE^9kw>D$Zw@VaPM4- z`SPZ$E+`u5A@Ft@I=-`&UfHpg*nJ!+G^rr2A}S~Xd)oOv)$46{Hv3zX>{w@X&B=-W z<}>N-CV(zA;7$fp8o+R8tFt}sP0&I1tv8``I%u7*TT@K>Q0^d;IvIlQQGaIV5GB6i zT^V~-{eoZKtRhBbHX{3m(?0t3Ru056lf8uB8T6m()vYI4)xmp`Qho4y5pdT@SqW}5 z>`gMxT3JG^o4;~>!bb^ZWdr}!kGq+~Lz(t$YsNp1V70PgQtid_clHN^#9RMf6fzkb zU2ypdfc|i^_Y5t%;Kv$pb7kI-_XfQUbX-0DYFP=gUX5duPc%q8G-gaT@XiMGDocg- zG@ZPkX-SJw^2cl|?#1i$|0jaYx<$Yg-4+P+frc6@scmWA@ zahpu-8fiA4`D?JqNmbnK4;z6MGmBd70twisLj0t#?ZMHlsRQISV+~G~gr$k=N}38v z^`kY80)Mjs>;5qWUw-LYvGV&Ce3fX;JN51K@7*`5l z9S)s{zH!olPK8$gT?DVSGVN@z&dSAG^dnB}Ksy8kB0ehMfhlrWNk~+@%C1(Y9D5x7 zF0_I#FZ7nS4Cf=QYlW^;rHqK~?7*eqr^_0G~Y^V7b03iDjTL;N_As}pZ zB~%iaSp~lt@f*UFhCnM6Qle)etFBU8vSwC|rV6Dc_1ZYZ-5 zt+LD^4hNg?*UDAqnQ?csk5)4$T)?)BqLTOvZ-ejEjgpSnIX~$f%8OWbB<1>;ba@j6 zy!S_Noo=vnVI>c7X4VtCc^7_BZe~>h?589CtsJb*LiA-GB)hjVv+JY}wMq^p6_D-X z3slSzsKyTawdGJ z38%vwa9LZ(`3rs35?qp+HKqubg(lBXO+EpS#gEjaN#tRFKZ5(bb%rW0oMPE6HEfO2 zTvqOLkyS(gAmZ;9cd}c4QWXF5nAYsKoA+eGvR&VB5lbh!B>LeUpS$QgBx<8c23O z5uPS$yWZb~Q=!FCZj_leIcU{~t@N>gk`WbJn9(Nfgdc^6NqquVIbjhC_>>X^C1S*xsh2FUxoi2u4Y)s~5uWG|dg!u8^#6%} zXkgeSM&q9u7^2uk&>G(fRLM>IXaNGfHj0`}V^8z*5j+$`YALMKk~%K4NkseFyJA;n z`i5_IvLm`@FO1CCsTNKg+<20@1Y2m8b4<&qauLWnb)H zlP>07W#>OqJ$Hhm6L3mr&Ukisg9?Bn(TQfVOo zxEVr_+{5!xtWdb;o**j}bXwBxxxN(&Ixco)f0y71c2>g5f}3It1x~1Ax3DO$0}MGz7>?>8ilPpdc?DjYuW{a7by23Z>4r zTnorc$IW@MeseSo>Pv@LDGmjLVjG)3v_r?Eu_grSOR1`rR|~pJ$EDn;f>HX?;S>JQ zk$B3dq{HN16B_oWPmjjvAi<5%cCW>qbDW=wT9|hgh~Yy~fK6tl>V)A2nOzQ+Xb&N+ zGUvb5gsr+XAkFWpCbV8V2+&jL6+twDWH1N>Ly6cfZ}XOdT_H~hz;{;i6R@|QE# z*zKF@9baE;z+-h8Hoan)iv|d;Q?_qK{%4Khf(^Y3ETCGOuLh^HfPHDOZv4*P&5eZG*lS&m~Q1e3#a4FFkAT)(S<1dzcpbz zYlU;*Tx|tN>M%1@H!VhnREwK%Dt>FNT0}fr+Zp^sg;L&(z+u!YhvV)-8^hwg%n!-T zqe$UKirb6ScSc0;@dmur)(dP!BL;-Z8NzlTR_t%DIlh&nH~ewaKFiv&qynxOP=DqI zmUOLUTaC@)6p6xyFF-1=4(#!P?)jd4%)Ibg)059kt(@_YW?;?CB8c=6_5%`%A(g6f#z*goo|IFvljNN2062${qc+#(WtuvczmTAfz+t>q0G2_J!n;s-qrVGO3y^X$oF()8}hgxRVrG*nL}qE?Hn{Q7h6J(X1tR0{yvlMw@#CkY-Ry`RnI??5F zY|^hMG7S{G zR$nj!-*qy9YtaRQtdj|2FqNKXPbQP5cTXnFR%M}~5|WGu&z?-m!7MV8j2HWiSo=hMtI%N?sLic3Czzj#V_GInadNP3_l{1@rvi1w2CzA=XN>8>D zpeM5wdAT>8E9|;@n+Cq?WW4A^mkZL9@k8LM&$B0!kqpq2@nltA6D|do8yU}@OvV6{ z0d0%Q!XiU2sPPlT{ClsH@kCW>3%5?jk4M^=dR0gFh=lIRC{5@7I-OvV6{|E!&sx_S6SS?1Zh zBa?A$NPk|dWc+x6Ix?Q9=!nL!K{Xb-W8>#Vn1vs&a4n8x*I*WKmHn7qxc2JCc(zoo zPUEb7G55z2oM5w!{p+vU+bx#8nDxyAcCs({bm+wXcEkP(KG}5gf(l!MX!v;sdeG-p zFQrVA`@G0XUPJn&ru`vuma-^EGA029wu9tRFM((^S=d;YzIgNyJQyGMeep;VOYGhC z1AZ_3@usB;BODl5OLUlx;?7XTJpqr!k9aGNvlphX()D@pNzcqvE}N-H6j4XL+|@)^6v}ob!Vfp#VV0Y!xtujMMPl>IKaZ}b&?$`_6@PD=(wguKe(&#bHlR`$m(hG)-{nTjxn4+m;0#_+SJ#lq1Zk-n6@d4jp~EH*WQZ{b|2D=$z{gcF>)tw~vOCsnv}L z%WtIF`Ute+L#1rZhgt4AlT-cfL~C>lggI|r{71}AsaI!n@cTWv^C);*2MMqdo z&UVMW&Cam9)r%YPocH7}hdpWG-0cs03K)%nu8nQ1&ZU8!>oIX)XT^SWGU=Zg z#tZmsqUV^;>Sbb_4wtWj{YaU8bZSShlTQhOe5!l5kZg9)iCIW{PxXgqxVH;@g#^D8 z!ID;$t#J>Xpjlj5q!&XG4t-5eAq#!Ih>zsf3*&n>k%Cs?Jh*@cJ7jYTIDW;81F7MIzTm7)VK?oxt+V@pRV6szAU|Zne})+2jFro??8@j zcDGvh;Ox_X2g*d~-)JJuK&I0AR+@=ga4^2Xkcrq?@E5a3@ggeKak)N^?q;8ZZ$s(Q-V)B5;3*8O9Q^yfIC>`Dzn=X9Mw;FvMXZQ z=^12SwysoM%53K@H-(=Y%oe^A!8*%Mb!iLnz)#vnTcdD+yQJXvBaqy}%}T+b)Xqv- ztt4o;&&v=8_*87t>mNmM8_Q8;y`t_%S>qZ84;v8D>!$fKR~E70I}NzglVLG%_zF;V zbuBwZ2P<>)@|r>p{8{7~s*xiJ;lo$vMZw|Whz9$H-;O|X54lOTX~WslR~$FfBH|$2 z5WmK%h{*cbmy+uJiuL`ddY{kGzN~!U!-nR_MEFVsuV;C%3mKFvb-2(BDhn0PLMR1_ z%Smaou$oB(zT9xhc9caiK=_C&32Q-$z+e!)vJP?80emBZH4h^a!-0>4i%{UojdqRZ z3r>v(wIy8*!ByWn6*@Dfju-F;PVEN67Z(U5y@B(jjs9&P>;BNG$zYfiLxDrt%TRM^ zGMLB9)u=Y(J1anr>S+uVzUkCnu&fF7fuEdf4hd*qjAVJ04TqAF4+R1(8qvz|(@uBN znWB3IMq|=wFhQcJ2~>^c*VCe61zPbzo1%iwiSJ38Sz*ywT3kc8MEVb&k7PG3_;Q>r z01!UCre8)i(X8kAIH3Y!f3)e)CQ`|%D!utT`-6dOo!D1sf|(?e1Ll2)zzZ|91^3d- z6#)I=X78EK^nAKtaj2%wLR29sZ&1zr4bpTT1(GUCzAx3g7fEpVx?{sk!=A7PEWvU;v?4k=TIY3)KJabFHkn~m3=mEY z8$-{nO=bv`ttyBw$jb*!ijh<@X`ATOAhYZVVZvA5L4>0b1Gr(k^t3nmx^KwFm<$!H zF?Pdt$ut!BCvLn*fOM&Z$pDhQ+jV2RWM~C%c~k7L1YjU6nu!axYZ@XzLrjKPo)xxB z2F&!qRkD&)=Z`EuEWGP+^A9LoWc7Dvw|X>kN3NlUle(Mu)rOlFe08@9_c z>7TkddPVB;l81?NH*A-isjSu&V^fgO{tB|4>8kY@?S)P^?=Y*5te@3^?V5S!uw8B) zNwJvdvW8n9|Cz@Db;}+wkvSOB{R;z zOl)0{%;$*h3QUXX%mH5%D7s*~0*Q-1rMY3dBAY@|^OwB|l?~&7j@WLyE^HSQzJS;+ z!x~+qX-GR`BMI?HUM|T4INNyQU!mV7p|9nPIzRz}m1~GJx^@QoI4U zN)rxl*e;n7HbLPnc84I!(Q`>!905sX8yeds<0-PC0r~(pY?lm*r7l#szEUqzmkYK_ zrehrws*+YD=!w(10-29p=5xe$$sokhgmmVBFA5Z0uw8+~ z#h=pLuw9W&A*uPxenx4{ZQt%;8*G>JyU_3ro`D7g2Dg2?3}@;fqGLv~IOGCP<@^~j zBS&nPVN--|91ur_nhUneJYF$ecu2M~-0Qe+mt{@}5`K~{0)&SYG?wg$?K1qz>RnRb zddyi3+4A6pjyE@Km*ee)5gy$3?Q)EY0m7+aW1v~LHkl!CF$W2^eY?E8g)reO@7e=} z8@7AoB0Q6U%*oI}r=igSLnPF*dcADK{@5-V-~wQ~WM~C%?%1w@aH%D(OD}~Nxd0Y~ zi*mzj1fJZ^WFP}(`ruNPq(JrJs8pg}sC~N}%K?%WN6#f`aRkc#F4;d2$jIciZ9Uj7 z&!m6q;^>*2BaFNpv0ZMavRYS+O{Dhqa<>b+Z`aJTH?~WL@JeEC*e)5G0BWT@1n$_b zk+f(mF4(SlkO0e+WDti`=C*H_447+$OJ?DdPbdVK%-Rv#C65zR^v8AuiZ0l$K;q(0 zX>QoA$fl6g9I#!1VwkjLN{){Ec8}O#yDsnzCai&M?cp0YY?t9oA4EcKzIED|hiB@D z?J{hNRj>o%$WU{^cA3W|;lgX*E+(zm$e;oLBz3nVw#zan1PMP$>&{asf`_{kI%2yF zzp{Fl)VJ>Q3KAULuw9NfAv`!9njkABJY*x*I*$8xIYz|*;ea)A^xWEHJ+>?FW~(`P z5T|j&b~*V3*tg5kt36Pl7aC4hIZ=$e|4n=7E{PUv^z7W5;7HsyoSiG7&x5u2a-VcV zVYDnconH@vMDQ0C&)tyBrwqA?p1Ul8$RfV_G0amITD%1b?{2~m1W~rotyDV6L?zLr zCOcFV5Ecqh9_{b1iFY;No*w^>8k~muI*5>FV;4d?k-Z3O=`2pkbWgEnyLpicyA+2 zPCyPQ#G*j@2q`s#e9~*pPO!1Zbkr|rMYPH-H^QVA^1WeHvNc9p1*wbY2+*}=m9DFk z^T@>>%bkFctg7)9NvZY{z?}%b>yv8>r3y7q02wB=*qQ|XC|t-_+VZfYGdGm(?h4>i zLTm{j{cu9jvHw+{{)!r(uDY>yK40CdgHL*|gnQ-5aQOm?DnI1YOED~qRZI{$dh}B4 z)Jp547g=n1XOPHVu3UoXU^elkFT0^Wdz*UD>yC%$F8IzE-5^g2kn;Of@Z8V6o`)tB z=Wjt5US1C5*-)o?)9#9Th|{M}Ti$}MAEHf$;6k2ax{~b$>22(=0k2eW(c7N%2csbw zq%5kZKX*ghPE?=R1;-Sl>}+o$eRSBr@*OUFuXXF__V(a>D&u$${Wf{E-NQ-sH41ji zW>Ot1W*_gYBunf(aVecy3S$0gbWwPk(p8^fZ%R*IRi;z6@I-5z+P}6FgGnT($}>1K z?r!#b!|9~O{!OOJd%M$ncLtsGf!1_qd(dlbjE2+kXwcd0ZTHXxPQ#7!ot1KEohu@U z!>U5Ai{%*hr8w5GH<_Y~+0Vs|>*q9}Om=BHd|(&6ndJ)WE|a;X`Qj`hGhR-aGI6Dk zCO(%|y4XFF^;9Og^1?G)$=x#cX1k8`aCCFi%Wj>oNt9Wwy3AgK&+LNxy(|_L0c15p zX0_L|#3fcsA9skF^x14-A#vD#Qp_D?iWgPj-DKsC!;rNFBe@0$y)K(Mi9RX@Uwn1i zulUvuh;McHHPsIJ!wxZ&4vTEi%?nKsSu@jVU#LJ#^dYru{bIlx_y=Ngq?^o;je!_H>=KB{u#SP4 zdHh9X!d`*6-I_@t#&ydjaRi3UevK6|dE6xslV_a+F-PDN#NWcF(e#N=_8Kun%>4#XUR7nK!z1>z&Nfmmn6EIt53(ch zK(T~IatHhIO1sU-BvO;UGX#|vm1XzK60yOJITZx}kNtNZS1-gPczzd1E@XEwbTy28 z11n#RnWq$z0GqWu90fCh+~h@N;BR9UV81ZJ8F*zx!*>Un%$U^?JKHYGzk@LcKf4E# zGV7UbZ|w}?CR%B?TENxs7C7;wVhrQ*Hz#1;f0C17sFKJ}YN+I1XS#T=i7?IkJjs>j z=Sh$*EK!Ev;x|J?F!&q^Xc}ye1iCJ^042V1BzQuwITC~>;2cS*9c+%I;VI_0tlBX~ zm(V=p9DFN+S11CAu!~&sF=mTYq6u}1*VH0`tcK^v9<<3A9lRGQgsLbv3(naNV zaw#hJ(N3{*-SMPVeMeUCh$rqcNG?TX7sceTViBIa<#;J7kG6ZQQX!VUr0_l3sEi`j z0*=p#A};5_O1Yaf3wIY$WPWxLULt2XoO)fOq03m&$NIz@&Sw#~)qHyneA;Sxmo#+&_QW`rgIe@!a~Z ztm#>G3*s+uX{4%AGMc#D}L?ibAzNx@;Jh`T#8 zMPiBU+cvv3XW{IFzYZ^Ft!i?%yWQ(_hnti6`N10**yPZjmqY&UpF%OMsD+owaptxMjH#$RAQz`Y~D-} z+q{_~v3WB^Z1ZM{)aDad*qol~HUjnE&8!z%`knmERzW7w9cGc~@++pxD|ic7mAtdbFzUbdD?e;Qzey(oWU3syZ3=2w zmAHw>Wm~IG=QsT1%&GOb244~wJ4_ifRUUa&USjRAmNk!;Zsp9WHHTlj{m%k}N30nn zrIo0=BZc2NaFbK5>~d=ovBDn+%)O2>XI~GB?7dDhP{Faw_BfJu6+Ejoh~M24HFy(c z(qy8floeULNih?RqiBWVVg|U%E@|*)%A8SywEh;^yIDI~7Y&l36mqMo!CNSkMh)hz zgUI47g+;muA5&FuQ_TuW8oZSl}K95q{N`UlyUJxGM0jCgL%LgBfyq6#+_!T zI~e3^7I{mH`A_!a2A4=9xt#y`kob!g;fc2J^I_rVBf`(G6Mnu)`1xkx=UarIZ-w_= z3G@A7f10y%m|J4uQJB&qR(@Jk@;cjx$g=Nb(myjyUaNVmKsm`EHyS_9Q#XD-FDMB8 zLniv@^HoP6`*1M7<`GD`AagSFfCDo5ymOy_{U}Rsm*L ziRoixM@H^o-!Gx2#>yoYi?&{pA>SN;7BY%_W**dtSut}v<~}!uS~0uZE74=UkD1h) ze25ftaUsJnMFf7A!)zEbUh_c@<7GoeIu7$;$rST(BgC*qBy>-8387gqgo-&i=qzs{ z@M>)CBrdFxAc{m?%*Txo! znURb788XFu+z17-qaszZcC^%3eAz3$@auPf=K985`rpt0U(fH`^DCY4vDf^X-wmV>h24{_!7xtJlY)?&d~!GEHZY9#eR9d}flG%j6ImKl^4Soe28P zIQ>obP5PUqtEv2x#-Zlw0?fA?%{_1n?8=q~lgojKZdbNSxrEN!&J-Fi;H?}>mjV6@ zb_H06U2l5(PIpjpn_b9lu7#!Vp#!d71#$a|!fW>Jy=*tc??W1W66WvhqXsaB`OW^; zWC{A{*u-Z#sjIOnMpjKGhd5y@>6O4fk+~?x-5X%{UFbMUbV8zPmZEQC7umD_0P+sF z5$NJ;I-ULyE!d<&RZIuAVFABfmX6j%pG?1=N}oXfifm+aXCr++HF8wF4=39rbb@1- zTK^pXhf~ZSUQ!5iFYIaWPKDWAjNuKCz8O#>9PmV zPL9#mgTsF6fvJeIaF7a2Vvh5ZUT-sQ_J`A?aqZmF73kCG7wzIV*Kh{9x(EyH`78HF zjimzxKe^q%2QmLf*!@7SJGi5NjtC(AY7zOd{NST;ecX9xB14eS2K~wO`qa%{bA5CuN~JM?e0Ph>Xebs|8|^kUT5r(Xn8ugE9<=FUvp?;RhSx%> zfz(CPuURTlx!V1(>v+mE=a%g92o_?`diN zM1S)c-ms*D*&07AvpLk7aHO4V zBn~_1p6U%I{IEK1g{#*rxsBkrg{v!gY!)Lnd0)bwl#entN>-g6oUQP^-(+Vicac;x z+t%7xT5O`8V-5Xtxl1+&4_JjZH9I96@aYKN#46E^tF6A~p!9!s^G`k-h?hvU!8jBj z-Diky$_Kt9KHgY6A0KYOeg693Dwc5yZEcr43 zF742R@XI`kVAw-C@8bVYi zdxT$Z0C!lolV`;#q!fZ{q>>L^JxT7@TFb9EcHU*lRpXkSCh+eg{vsin zU8U1EIivGTP51b%@ouQ&<)<2EgUVDPIH2ABSVt`vM(8XexqE3OA@Otgcm%JpvQxp= ztb`MQNew&jA1!hQ0ao$XMAa+n-m5BwoPsTsepo~{y-dfloy<1H7SEnaFXBlV`aY8~ z@}-2BtZ5kEILeQfOzhQM@OQPa(t+1Cta*fYTZEAeDa!Do#FqJDg0|9z=OX@wZAm~w zYm!0Q_q27-+IN?Ia%2u{c+)rVC{&voIxEhY7M@EJikH767pgt?;U}J5_F+0rJ7IXy zwee`0Z}gfbs~a^S@$m+{#o)|VdeOG>+^z9mHqA4oF4wc2?QHK&oU!RGQ;Gw?vyCuz zKpXl{R%OIatW=6zjYuu>lbXUg;q!q6NE`d`rW{6;TyyUedo=AmewpJ_4Y)mkKjQ{J zj6I?jKMbK@_GpM3@O%WuZRsQha=itYk_@Ayb*_jCc$r428M%@yroieVT0g-C^05Y( zE`KbdgV1YO3nd9*gOWxdP&|_NBnEw@Y2O5r6p|Af5o^f>;?Ts)CnLDU947RwKWFTk z4rp0}5jfyBNXF^B;6IrAs{EsB7uC&L;-2oaErWJsBSpNhnw}H020v@Y;BtQQJ*n0h zT>e<%tntDqJtQD^I)et-{dfpqZ2q0*ert!y_i(m9>C^OJ9`W?^wkf1}88<8HEsv zNd`p<=>BsSy{*2`?}ZeQIk1hJKT<%bwlthENI`j#+!+KQBeVRdyK5VyfKb%(QVUW* zMz-U{1}PvEb-ef?1vVq#Wq)Sjo%okq^G}9M#YCHbUS_6p%LXYR!{}sZj}+KY+d_-~ z#?H)&5bDenJw6e=8-hRvw?YcYhjjw9M+$7Hai$zbl-@`InRgJRfDqG&)DkJMpwvn? z0|c{2gG&aoOBFWQ#FwlfZ>S3i2~xY>)!{QO-prUgy&OV#;No z69E9eqR_RFHb_CP5&^(7Ho^EYGg6R~FKJxii5hOSUg$1wE2JRjLhwLKAr=1TK1 z=Qa3!3#5QtQXc2;W*OJnQPXLM6>#yX;PnDv1@so{VmEj zJq1RrfC|BSTLb0BYF7b$51Vyq9ag}^bZtM6PHoL!wY+LQUZigic!3IJvQ{hcxyP{DF$4f0(K{Z}%umVC+rx&iliiH;hjJpbKM!;ABW6Zi8{egECkoj@I zg4M19@}y37_E>=pH7l&ZiV(sI=vHl)E`|)ELL98H0zyP5Kzppfh8kze<~G-cq&HST z1~E1PHoFQ4F^xzqu>uQ9c36QGp9VDexfrVZ?Y4OWmJLBy7s999r` zvj85BcMeTAYPG8X)8t$<5LS@Sw&d{jVglP_sJc;}x>PwtvJrQ7T( z$hi1@t^J)}?h=K@AW9nFAZy zwK#-Te@&=r>W7-J0x~kokKeumLQ%_0Em#2=*^U<*tbkC|@xnD&m9YYw5iTTFK<1aq zlx{+>!V1WfI@#G{1vb>IumUSWzP%Y4#0x7RM05hQ#|mtyai(m>3T#MvV+CZ8AXot* zrV*(nR$xKN4lA%C6wDq?os`YK0y1N+5vhb^O1c&oM2<{kxzx8f?<*iAD`N%ZK{HlB zKB=)mZHM9|DS3}#vI-llfU+fYDn_h;JXjQsUL2ZuvB3)Rqj=devreo4564$6x~9u! zUje4cxv0eJT)M)1wk4md7c0nBQWYz}kD0N8oP0^+T2J(lp4<<0O1HrZaxMf9G-3t# zG2Nbk!Z+P{w%S*4q%u~3TU0YvK*9{h3Mek(0Srh02P>@LhyzwYfA-TuA_H<&fC%nfy)gh+=#;pV!F0v- zU?Q9l*zEa|Lti;X@>30hxI_Ug{HI1h1EJ_bP@78vQe*o%e@Uu#_9?4rSik;mC6{S}qwyQ}~=$fiYbCz-D zHzIh0C2RVF6eL?7qWla+-;2k&f=u1kM(G}5&1==1-KNZNp6xryQe+Sy3?V9L*pNho z^w4En3(V9ccykr0B!2dwa4`eF!iJ)vkMmSZdd}aWD)ucnZWX}=o3sWziQt>|`Aax| z!2-I;-m0)Qw7|zOL^22jW$PO))mJ);!8YjTFIiMJlS1Eq&`1QoDx6@8cAoTyt*u^n z*qPu9PddHvcr}zkSi)T_fp2&yShL)75y876{zz^m58Vm6HUnMCMefN; zuli^`-Rqw@JEaHmmd`qvn|tSNe21xOP~IyXZ0Ix*{_cnaZPhT&{2Cr3>AfJE--Bmi7 zq6=d-I+Km=pgZntob7Eq)tT&UpbLZ0jX>GWU7r;~o|4fMqdNzq?zBa&(MV28u0WLp z5vhPQ^J1k^1?Ed#Hr=qqE!ru$xtH8);AmBR*i#Neg=UKoN7P(Xz;W41^VxlUxx>yv zD_YM{yt{@V=(!Y!26PC#orcm|uhOfp61$Hhg(elml_G-ajahLT2v9LyT$GjWucXL| z$|=#M2HeSDN&^_~Y<198Z4-1eV=KEIv30&~P06wuS^!{Dh|;Y9^=EbtQQ|Acmp@kZ z3x0XCiWtpGLVfh>tsIDDCR;4;4Ej&?E@nObUIc5ESsM*|lZ>-gmQd^F&%KQKuYTOk zBp%AV%(iCyb95)5yY}Mjiqyng|6UX_85b^i1$Xai8dFt~^T!%+b7kK1t0D6%A}uRH z)~mD7oA^Y7#6x3+oK#YIX9Iebr9yj}PTtS7q_r0LW40Cd;&uA}6TxQPB47&g+6pD- zj_F@E>Jvb~eyTRX(W0wLpZ1p#Y}92nN24v^Q3H8@ogmZq_;v_^>kW&_s!V+g+d(zRmc_bvD;v2a;yA)u49tM~J> z@JSYq@WPY6BdoaD2pQu_0j$HJ6Tt?q=mYFfDfo90yw=LJv%xwmbr<~%w4_;`Knrg9 zsaAAdBjv;nv_n83;-dl{m?DRjgrw?NEp=jzqu+(re5Yu8??sIR{<5uw<=n)(^KTMv zUDL)HEfGU1Hl?0O|Bx;M*lGR+0LXsC)$T836JxYc{Jmhw81+w5n43hOqh`V>x;#I2X@F~+Y68?%%% z6mUhmRD`j_ih`>i7tL|*U)V0CZT8Q_yC3Lv2Y2+(SuT=g`IdiU#~?*>Q21QLU!$cY z_`+C&`I2pD&TyodfY)}BmZVd=#ZRQ*gu9{4LbPW4+JwJWt}@SzyPJKqnmOSDwxqV> zD}42ymAAq7>PAV&>zto-4&_CxB9^hEDdQl#KZ5IYgWcI54Dt|XW<9Z+ci|WI0qmzE z{;eFW&O-EM9wZwtGPCQX54B1TWhE}DsL>Ms`=ua|`|N}$T+Km@`bTU{H9m#_=5bdnMFE(YXLGH7~BH0)Xr{mEe6-sZ2E;!pbA1~g6zE1no^D-pQeY{*c zPXv{YsMo>=d$7>i7@+mf_%V1SerPb-Sd}c0`?905WcW}MPKP((vbK=(7y7CtxFoaE z*cqy^C*ZO8k(xBtSyAfj??-T-x6V-Ig;OlMrN*Ng`Ueqzx42X0=O;z+KaXiW-+)dy zS*?@J*mvc{PB5Pyx9_rbym%oEzP2LCt;X?DkYXW}OMzbc4JF*8+D@!bfHhjWcQTA&m` z@u7e^O_}FjX;Z=yxLP!G|3i&BxNjHS*CE@$GWcN7;7HFzm`$FjLB4IHn*+)xAkH)AFfA-$g6%Pf=9f)DqHl!a7G>j z$Q!tLDYm?Uhh0a|Z2f`Hg~%J_>7{iW_Q)B8l%zfZ7xuz6Bud{5f)X*}%+yO3*xa@J z;Rf6v(g;uV2R(GzBl`cuKQu7x5~K0Y3=C21B4~~81ghkweY5~E$zCp{`xg|k0)9S% zhk{5gg>_m|$CY0uk-m?coejzD-%}r*(n!zF4ns)Z8=H=d*}!KxfqeY8^)Hx?uK zSP0BHHM&fI$J28W;$7Jn``4t4xmVfwk5qpwoRu=39p0b<;7GiP)2&A;ueRQb{pk>@ zM{l`HRByM%JJ87(L2?hz$3?S3L8m3{p6gqopyOg!_IC-MAX)@-SePJjq6zN`fjH4A zp>Ob86@lZ4>g5f}3It1x~1Ax3DO$0}MGz7>?>8ilPpdc?DjYuW{a7by2 z3N6g57Lb>YoAY9Q`dFB4hje(A;!rRswz2s`J9Io6YeJyDl&V^JwV=CnT*{3S2ShI& zKH=Y#<1;_+=$}5_8>20uJxPbjz3npWO`jf((LsV6qwQXcJLfn*6}2$$DiFhmq5zxB zO4SL&4KlkNERh{TssX-~{J`I8!d6`xkmh$)6I!nw1n4RBit%eu-_rp96*zjx|7wm|$h5$-ZZiILjiROZ zy;O{}vg4GKnHXtx`F*}oU$#P~nYL_5?Ma>HUmx-G!WUahr0EF$q(+mZ@KkN7B~w~5 z+x%&{0ZQ3J!`DM>g;#{Xyy7c5<=SjH1-5F_YZ#d8f7Tc-7=))bDPZd!)s>8;)uHtU zKec^-s{#Jo5cT%WVnMZuA0tEl`|}m75S*xT%og^X|F5XNdX4TtuV0}wC0QpiK>L`_ z*5w>x%bo*hkJnbXrd!?LS{wB|FTfu)VY^)~aqK4ETu(rMww|!>Csd z$K8cChQ)iCACj3zk;07>w->3=w+KGofVbLufvsr7fKWL@*bc;s{p~f!w{rA`KW^G* zSzDG=z!d}P&)mS0uC;8dv00oVQP}VWNCnmj3qIR~x7Fh*!b2toE3zN`mBp^4f$MCn7%4JByG#d2EOWV4IO;2 z7rlBqoNSLKz0Rq)aeV}zYS{0()-Ya{D{_g2{Ek1{D-dU<0bUT@R1n77UrVwpu1A8O ztbm1C3Q>`lWtWWwaDwFF5LrWJ1n`+AoMG)9*_5L&lXoR|#f1=)eQR5Z6$evw@!eUefK*sg7QMD8mEK0x`Br!` ze!M<)qCH#cw=dSRw=e!-h4+(q#?cX3D03WXr8m~%B$s}d^;L`HyTmr!&nhewO0Wbe z*Wgbp96TtO5LXE>1svahTEFQdAEc~?g4fFZo4~vLLH8b%(PmA(@593q$3)G@Hgw(iG3Zdn+7OViuhu z5BjF7vUVyQ>}sBMPJRVGQeh9fnQZ+jP$C!mE$p@oFddt1QCS#i_zYO)IX#Ni`Y0<~ zuFEYqmjESKU{%SuWhPLIR;%jh-aVgVbIrmdu4HJ;*5Ic+a~}@5?u}UvzB%{|%S7Tc3?ZA7+ER_KKLTiRmGS%VDQC-LtvBl?8#&#(R;nINn5WGexBGE0${*)KLR@_(~hI`dv9<3%UBT#%lO9|Bi(o;{h2 zWPqNGC#&+Ba4E3dpLq6UG6rUo0n4n|XF%U|G8xx~1nAY1@#B#;rC!zPHxx4aL_+su za%R1IG8}T0YrmdMAS^m%5immc zWWvA1-66aQ7?gL zHAxL-xl3OzdI%njkNdt{B#9;Vx#t6ZFZ}VQr3xe5rukZ;!)z3HhAQp}cr1RzTXCGd z*!EnX^vpcvN`>xBv_3A-*>(9V(Pd=VU5No!z?$WzA%ge&y>=jJTA5z!8aDflPEC5_ zbKPlwG_=0~MkW@{R8)RegqTXrRTI4=FWrLp!wq!R@`zVXV+o2nEbJ|GN@GXG-`l3NCj5fm`+J-XsMN(} zG@sXZxzTd2SCUz%EbzV@DI=OTDhoRB!3b_BN0?K+X?F!3I{I{P-0Bbe(|&i*IoBQR zpgT`*9}Oo{$sGu%PoKVR`HeI`AAxp!sFa`iz{*`zZ$_Sv{R1ZWOv-} zo*MK{w!NoxIqXRb;BJ4|8^?`!E}i2fyZ*JYn*FdjJ&*2!Z5&cx7=dyDdScw$>JQIkCs)z3z*FDwOA#z-RoNQ%&~`*we_G`(gc$EGH99~wSJb!*nYo?26 zdYScjJ|p0AE$={%Zg#g?_u%Z)e+SA$=-+5UG7xvOdK53B5*nA$^XPK*x#e4FCThXK z_y$8JV#no^ka{Bu&Y$0k;0-K4IQFu?F>t8%KEu#yU>3d5gX>bByOQKNSf#>+Z#Li# zmbuF8wggA@6Q1mfSax~_*_W+L;gA-pQ07Q?oxpb@SZCR(E^Q$m_({9yHr#nK7Nox) zf#eo$Rtg5Cc2>%2y~u*bA4PB*%TZ;$qV9*Sak->zVXHF_8{cWbot_Mffx}mTva4&^ zDLPnL5dV-D=N3v80ucOJ_S!aeX6 z>-!g@cs>K|aJ6wWtu_wA4e@KNs*Q5Pt2r1Az7oOfSsv^{2IYzi7tXUS)d-i98ew4- zi-U_q8h#|j3w*iZlIVMJm$@R4u;k!YBQ zFE}+G)RuHLgtdv+5HH{loZ1bBFD?*Rvm$9VoGsZHMBopdnhb_XF%&qIy$m&%CWCpr zT#YK442?Z&K{!`Uz&D-R3zjvZKJb%s%^?Bpi;*m^vf)rt@}WSWMI%}ne%k3yI#YB| z^=K?@G@4nh&{$erL&!w>Z~67KURZ%ve9)#|pfh3diKb?QOaq>eWH&ANa-1yy5I(Oa zHPOCxKsC|aGOmY0@u!3!;YXVeZ6cMSs?wXkvp*Qf)`@+ECYVVgIbhy*2)r;uk?t!1 z`oqoMGo5^SqUmRJ>Z&_E;e8GM#HFR>^`TBphQTGpq#v`T6eVC`nB_lpX=-_v#Y9!T z!Q$yTG_^bdNwU(x-(G$$FKG?B26Sy_bp%piQb=3AQZEvhmpn|IBQ#KQv~i`df#gsE z)Gz83_Ir_RYo|ZY7e^BNu_hd6d9iBhd0UogG{$Lc7RHi1Qp5aP7dDmsj|h%>bHyby zN&Dm#DXj7nTPGUNzk@$-z}?28@H3ztceY|W25`f6>1l8Bb>A5_#$+aJZBYldOU6;)pSbZN0Wu)AOU7mS z3ytlPF%-PzO|iogfPt`RCN9{nX^2`HVlt?y&UeIi3HeMPrb^bO>mmazmy(qXuH{Y7 zgtfV0yJSY#1cjZB4ndTo=aRHI0+OT^8rvn~DN1IO)ZMUMGANe1f9m4s6{*Y1L`|H# zVY}Q+Wwovtn@DKqHcy9Ho+NW&{Y;<2+^}6TPNr=s&vjwDWMq~@brN&KcF7n@?%c6m zBWclCT(Dj9AQv{3xna8kS1+_==7#MGOp8631HLFwbisB75*L3;bHjE;Hie|-FMCtX z7bfjFV!Q1NfbHTj`OvQGINh*ahBGdRID@aUtq(4+PtLwrNjqY@44Y!ua6lXxYA)C= z^LWK@;UUP%NRA`6%Q7bf2|q~}0l-5FItOyYb{T$UwI8W(126pGQSF0+8@9{w#)XGK z*lx~-=;Ww>&fjRSx{Dv&uw9N(F+e!A$qaVs)+RFqF6JQNhVAn57Q%$DylW2>ZrCop zUGairyJRM8_|P}TZu@r0I2Z;gEqn3UF1Pbvb@D?%0GLe9ao;X^T)~?=wre0F2ETX;Nrn{$&9cG`eVBsJ(r}#5vZ87yi_8uZA}QL z8@9_c>7TkddPVB;l81?NH*A-isjSu&V-u-;YruBNTgyLeRl;NXVsa=g7T!h;*O%P}ej2nVc@qvzHp>#<#0Q@H!LVz*PZK;yV? zmy?fpK#Q-uYY!A|*zOUxGZ~mfti@lKYv`cKlpW7xAdh44$$;$!gl);Ve0Wsg#g5o6 z8AHLFJGN^eTxy9OuwBy-wKT+J(3-GaLO#<6m#VzC)yhg-msXBUv4jqT+rC{w51XJr zw#(6TNm?8MNo5;)-!2(Xk;zO5ryI6Q2E|erDqM%u#nCHLmzRN=ICsN#xtYppT`@M1 z+PB8c#D;indIUFY*UWQW*e)5FOC+Q+Uco?G2f!y}(GW<%bqx@Q_z}RlCX9oC*w79h!w#)Ixg$LV#Wqq-S z`iybJao;Y-s2Cue8a4)+b!(Fu0vB_TaND=b%UcK&zVg;NLWm%e2F8LZ-Qu|2rT~MF$n^# zElKXqWktiE_pP!XUA@BLaTh!S{qKtB2&y^esu&4U;;%h1f-Fnml4_AIU4E)`ZHJnD zv9_sncafTGQky+ql#%GYjW9U@IiL`W0_kIx(Fk2@c7lyPrlWp2E233yxyB{6kfd`W zb&%&Jt$ewmZVe+q*P2zju1?M)7khl?UJ5e;q#}eE4R?}43cl-;YYU|cHBSH;CbigF z;V-^wk(V|G=?kS6pmcXv0GASCO91JIlaodw@KvAwiW;A;y0L^WqUHL6Px^kEd*#V+ z$wd`w#w4YWujv~j68w-)FU7DdRxv^3=+R5DQ!8y@(pE6OGOrwN;GIDtd%1E6qJ!DQ zm%i+V`s{7$L9aU=qPyTbV|0T&DL~4*&fvM9dp%z~s5Xvu;pOE(o(*-XH|?&dhd6!u z^lc%AXp|gl~ zm%Zh>b#!}sa6XlByoY|9yxQ*Jr1}~KyJa(}j$N{kcUF=mcAmJD&MXBn|1`QNJWc7U z&#*V8C$B2gDO-4=HBRkc+lj#>l2hdwoEdjF`@P|G(qjK6Q{}zg>AgFHPWnJ=y0bm# zwKhh>>3B5gZ1%Q$==P`K#`(@lIke6d5yWAcA=kxn4ExfOY}lJj(Z%fN;>PuJnk|X= zy7Uk1f;Y2VVclgiw=`dzMP$ayDN`n{^wGrU(n=S*XR@BkBv)Q|W-GZ{#@=k#ksgk2 zZW`^DuJO9wa40oVbxpk{@f7&XF1X*zVo?!5Rx@N)dp%2BVzu;fhp0)P%@!6ChwUfD z+)<`@Q3c*jR_-_qSz9oYYk<(}vfYg6Lu%Q2M0^c?#kY1qe5=E+sdmU8c8H;LSY(54 zUhv`!!aY9|whhCwJYOw{dX*cw_5%ds;#5tiKujKYF+uXIb0FpjTnfa!R@TgP+7~Ji z6MbBuKunHwlNqux5aWkk0x=oZF%UD4zo<;uD-gF^wt={`-N3JpL(Y@tMX3dA;(LHV zTpWQRvtMIHOdfX$#N=7$K+F+%QCYE9MSR#c5Ywj4rTitrm)fkM{FOC#{uMEK|5|3q z#z2f8b_v8}SjRxjJpQ6GVXr`Z*g6nj=!zJtFF+tJ-dB;?uQ3pl$6W$3dDb})a|B*g zR_qmskIW>fzUtY@~pwKIsDXruP{D(1MX+A&5mxo1#=Z$`Sd zp{`i1^PH{XG%1yk%EdVvzU}0ds!A%C!*%+#t(tB#&qnUGlnV zjUsO>ji1YV+I8dN9cfj~6mbV?rbxWSOc7TmW{RW@G*iUgotYxBME0$r-I}v-_Q79= zm$OziIosXtb-KgN&ScQvC@kv+VpvP-*HN>YTbKUNe(-P1!(L|ecmdmX!uU?2H2Oq4 z@uZ5)C)I48f3Zeo^VY8$nS3L+`NZ1hjU(rKqs$bE&6_D=n>SM=HgBegZQe|g+I#{F zo73~#$Y`&C{q21b^B?>TxF$2S@*70);yox%P5aaHoZ0Prk}*tDhVDj(=Pz_N2HnXd zz7h_^muIIgWgo-`$g1keug}BFvX4E0UMCuj74duNGPLjW9CEwW+wXsH_0aOAaK24; z!d?pJnQ6z3L;7(3qo`LC3{4TzMPp;$4b={@TqPC2??$knpHIAp`T*x}I{!#aJc-QQ z0sA|no$2<@w3D$viQ8~!wQ+4!VWc2%;%Bz!8Z&(Q-X#rEbXx zp|O``y2C6oU4F%Mc?E9)t1AUp{%gPT#=i3EZYXpy^ke$Xf(xdTl zxkcD37A=fQBrRrAVo-AY1XHuQ1XnVaf<=RQz!xLHmNv$nW~VzC|;pZd5HzDfA`X5r^sgr9GP_gx9|{b7HavvZhRV&PF3 z$!?;_qt` zs$}hGsj>L7rBzrMj3yJb>~aWhgx9hu4yVwqUU%4;ynUxT?rnB@y3OKmBGBfBVY#|Gg4^GXhg-o`-L9}SSq<~+=e-G_5tZZYwa za=CU zRZ&iz<*1q$k*s8+BJx>l710N5RYV_P8FEw+t^jOQL}~>~73CkdRZ;$NmZNGaB1zFk zMdY*ADxweAs)#hjKt!nf2(sz!*o}x

aqAM$1YWgbRRV@`%6_~Z;+jpjmq~+RpOwJysbq& z4r~f}QkU7}H}Y8$GBuGoGfY%Z#QEqOfO#I#f)Ld`dq(@+on8|EK|6rH&a=_Xr%W znI05^rhfL(iT29}DQG2m7c+ft>|jo4-eJx)aAMI#r_eI9Jc+F5XO zG%A|Rh8$dkAwb0ANyXjK*ty88$H~!*WNxvFtX<+t$|1@y1c<0Sskp}~b}ljzxjG=@*lxuFGcm7N z#nbU|K!&A&v5LZW?y-s^K`hE4=J9mgMa9#RS(RI?;^?}@DuDYy zgomhjI_{$4>BvOo7OObAuCYoWLd4||t3*ofu}Y*Q6Pa7ABD-0fV--Vyh{_>Wu~gh+ z6-z}XBDYw@P<4$}90?*Ohgika@o_*?tolR)D$S2u1>r%fA7Bv<6PTl-LxdKFST4DT zgUn23X}{KhN&!Q~c4lz*hL~ni&xazH3-QrFvy+MOBMqn&OgJ%86Gy$!JmWPn-8#OCF^l;Oc^^Kvdv~_@1nv3< z+o-pD=sHn6UFpB#Ec5F7RekU8`&HelgA4e-pZ~!hIJ9SBVSRJueb3yM zTybjB8;ovv-TD4#w$@o+{@^$bHmt?4KPExB8RTUf8uYyRhAl8!J)k`(S@}yffS0nRTaU zd)xhKd@WqPb|Bun{8!|^C~hn_TQ7uN*#~0TUttGgcI@6}c~~8R$Z1lMIP9=@sz03a z+}1Jd??iAH%kHQ@dvZK^qC4FjZ};0<{n51Fexg4a^@r`dM}yg*HypgJKWR^Tqpk6| z?#^g%dOSJT#^T&{`*<{+wQhl>?&h#JoyKiA9A7^iZ?3J|4bgfL>^k2Y?)2kpHsa-G zyol7*IJDeER+FWqadmDmiv4-ImhiN11D2zBp|$wW zZ~CdHSze;n^GdudLStBpp@BL;X@6$*Iys^XGZq`wz>EU?#CpQ z74wRDaY-@z0M?e%FoOQAw*VHW{r*@$#r;!R^kp-$g~s6NAy2?2$aK`$Q`EcM)7~DX~evw-qnnjY_ZC zV0OV)Vsi-wpNQZ_6Bnljy(w2zp>lCNCH2Qmv^G2p*T=80str?$t>=>}>Z*g1Zf(?XFgB z-(O8b^eVLD!!{wB4x(t_)8;+>$A(IM1CK(rsiC8Ta@j@zCYTnUD|!^2L(7Z#a`XGl zhaP)6O*>(DDcbn()4Y|{jT(^nSOeY|!s58zU2NsU*)-3Ty7pUFH}hVi=GytE8)58# zHuRya%7~rVx|#~dDngr&G~siB1V|hE@TMF_lw5P~BQj09lbJmHY6ET$;Lo_h4`Yw0 z#ScRWHTfowr>&ijkl~9FyxdlZTyG&X=Qs*+dCJ5{FmW7afQkp@U z|Nm(2tMcJE7nL}e2P2|;wVc5O_&GBMm-8dKr3r(}A4{AyUKpi^sO0dSrgN6~Km1!W z>_&tnc%Y?_g0QM#3chTWTLeJBh+|}pX|?EN-5*OZ0RC4zwgA6t25XRxN(g~Ph|Dnp z6AHpwK+6*xp!NWN&nn(9IDrYuD1<-)rHRiF=6NC6p0%S(Y2aMh$@T&I$e%tOxbtzd%`5E?pO{Ez~h5wguIyn+n% zkbK$S%WNtm1y-DqrGH~XWkc;8TKo?~2z6#EQ}pV{2p3jJ0r{}b`0SAa8)}>>hY_VW zQb6WyYL%^!0zyn9QcI-3f>Jx(3=qs7O&%Y{E^Uwk!aCQ6aADjuS4q#}6`M+3iww80 zLJA0PVoPEy=E#F)q=0-_Y@eh0xAS+h=6iqg%uo`2Ub7^nJZR62IbPz zMc7q9<{|H|?RFIq8d_fFfEAE=$a%5D3J47yFIy7{@P|&0cA_*RE$^wd63qEPAdbHiisB+tl&ruSOKOvH>>~;XL3=O&M#-A zFp1-VM9Vg>jyGggq3FJlFAhtV|kTHke-x7DtKoD0DNjqNsmtUjy&)3?G3j#S19 zaEqE4_Z5%;k-dIV#F~qM6&!KE3W!|HSOH@L1()-{3XW953aAi@N!qHy3M)7|53GO; zGFPmC463Og+RPgWF69ClLf$$D+*d#b((*C~tboiz&afR;KxpWA;To*USb@z57ZWQW z^Q(>(kSBGrv&RZ-s99kJR)p%>S3m~0!V1WTbpo`<3T&uxrfkLvY)E=z1!RyQSOFoX z5ve6sU_r?aE3hKui(QdHj6$^8S3roF8WFE7Rcb}%gB1|oDq{uYK{HlBKB=+6qJHv{ zp}fyAG}Dq^$7){zWlQK(j93AA&}5rogaS5L!O2E5V@GK0>%dDv``D=cT$1PI|xTB zVFgqO#UyQYk{Z%6^-E;;;*%TS(7S++7p!>V;5t3B;6^wacML}sNJi422I&!%?Ba(0 z+yZo*;v0hT$Up~N5>V}(_7_9><%M_s{hxUn!g5uB2sKUE@+<>%O5Dj{y5f2;5l#qf z_N+PTPP9tjCywOrD)>vD+l26+8Uf9oq6+~%3B9@ykeUt7;4NYJT1cT%#GxjW4=7#^ zd+-*cp+AY>_Zsl}P+z2Xp%8jDBNFW9eYUx1fnVXp1*pI@nikzf(RY0dg3 zJ4(w+p~ZsFG~j48^AVaM&2pJUMoBPREh;#FQM*7EnKZ?d+rAJ%r-5nQlIYc^U$ z@J;*tRrRe3+;k_lhL*R(sl*D_E&J{)%oYbbs<041+4=@c^=*i!HQEN<{6L1vX5tBi zL{w<-E5Zr3NHBv@`&_>_>Q3Lflbv$hpG?M+_Hq2v9fQ-S`;-1?vmYNQ{VJ&_u!Or< z0^jgJr)K-2Mg;GO_#?TMJoKjM+KgHE)MVV-+U!kd>G9I-C;NjlXJ_<4-g3vku<-WR z@*SoEZ-93T2OBy~gugrD069Hjd1EO(VjrDg%|IW`*1^(w{7bO7mYzxdt_bdA?bmHG zliuhFa#nFn=CWsFH=4`BS|m(HB^Pu|vd2`i3oxccEXUKEO!)J7m7J9fR1@ABh2iao zg-s<#SYMg{9f1tQ2r;SxGp%aVB@zd1yt8{wh#p1RyV@C~|rT zyq$)Q@0?4o>^PU$eHq0(et)~WH8?lT4uVG4oSYbJJ(b>W0_ai$ z?qo2f0gQIeb+;$|DLTl${W_FRhwTe<)KoS9LE1t6nVmzF_={ay(etENs7>ybP z)JMPC%7IvBvX}6?!@(2%y7eThI{0N4rTXCaBjB!+vJ%{Q)SqUYwX=j;H-GMB%zyRc zZZ7dqrajx5@y{bzt8AE5d-39(gW)jo*1s2pOvZ%^UIj21ZS|j`1(fnl!yj+JO_g~+ z-XHci(JA%#t7Rp~dNqzsKG7iY&{!cSMY4gnH=tixDzvBR=KV}tT8xrEBG49H5{U0| ztF26Mcl!Sm!B*WOU<&fuB*3+53VpXEZ58~hMtuSZ*iY3aIC{F2(2MCBIQYv5HtVvQ zau-({?`3<&?rfG#U3ZV9z21S|uGv3aaZ9PkrOxIX|_G-Z;4#|ucPi`!&o*GRMZ z%wK~=PO9Q=f7A%9n0eG{7fHZA72+p_Z4ZuaO&uVw8EbH=BrHv1U1<%U{BQ#{{9_2d z{L-~z<@YW4DzR``YayVMv#a;>^YBRnOvHkKTDS?mBdoaD2pQu_0j$HJ6Tt?q1D6Ul zE2>BM_Yu6(%Cxh=x~p{;{anebT%AA*ulcFabX_Cm#16DWKp^6y0v?zmhn0k66uTY~ zw8qizLMxavw7s`>98i{jZ1JU!+ge!8O}sn*CgIjKZJf~(F{ENM>M30Wu+#jr0FeE# zt%GE|5D>Py5-JJItb*T)_zhu7L!cE3Dbcf#RadFarP>bI+NG?zRo^Kx4p60Krz ztM_It<#i;t+0XnG)@@StDV!=PEMSi@eofezrJSLFE83-^D(f87ysO7Wvy=Tx+oiOv z!TEUi{r%qXj=_1$MY1g4@^9=Iq-dV+XCwX^EhWKc#~RF+Y(sN~BT?|$F4B^8YPa}_ z6r6B3lv#+@tgkKjYvn5Q%%rz9K&zQkE?`S)JIh!LZ-MXCjgpSny)f;rtNL+Rg)YNqXx{9dQSr$ z4JoPF&NkZRcV;r)LHjrQ+xcFn zyqJ@^y+R8*Q-LXC+%E>UExd;aVF}qcNmwNXcXgFm3c2lrr4yK^bXV^`i9(v)vHqF3 ziTU(N@x{=H(T|`c&NiP@@j+I|cW3 z$TqMHJ`glG(lZg}lP7ABZ^!I3D4&EZBZ{@ifX;#xk@UiOwzQa9TQ$l`n{eQRf&DLw zy*T>dj1pQ+byRzb;8ci~&niFKfQQ5BRcwLrYh20w{(Uy>8c23O5uPRjHs0HWQ=!FC zZj_leIcU{~t@M$Ak`WbZE?Tx|=ln3dDLxsda~8E927sbeRgD7&Xar3Td1y&$)fFvR zPE(yc3J=F`sz)apFym(SDcC|?N`uT?%a9>CxJTVyd(PfY5{}cbvz_3e<#y>MK zM6rvYHNF$5lA8_C0t9+(bfsCSR#7&a$TAid~uMi@Vv&yXc<1 zFfwDOS~ziV<4Nigtme$5C_8YFCI-G1{AdWwIW@XWfXDG0UD+4=*Q5)Jtg`bTsh&H* z(Fr)EGiN+Iyg>!Pk?2G-*%lJB-FV$9#R2$K2-Tyv+$E~F+u~h1!=PZRiXjBaJv<+m z%nAjamb81WZ-s)6i(T2@C3u3UZp`D6+X}@yLLg3bO6VK>Rz=`=V!4sn=SYrU3n~Z{ zWbnF#KZ?W(H^7<%B`5U*Ekr~tEm9p*bTEQXgeW8E9P^5&C?f)j6R9XbHlb6+*~B`D zD2fQQVCWB%(K)5xpf@Yf7t6O!=NB99gRpP0B}fYiV7{vs1`7nj+^shf39MXZHIJtmEuq^ zD7LZrLpyXl8f!wJzLaW7c?u+_P--n`Fr5$OGCjQSF@NYtJmpi;VREktjry}E#}jmr z;O2O{-{#Ia&QC=xBvGZa{9qJdlUb=cVYoqNw+nYz+CxaI%=vFO;apuBkmh$)6I!nw z1n4RBiXj%`)}X$t0sbp;iV5PgGs!E|oBrcaKUPIY7H+Thip#<}e4zo4)Mb$8NayY^ z1g)?W|J59`kZE&e-DLcm8bwR*d#M;{WydKeb1~BD^7~w)zHEg|Gi}+B+LJoXzdqvJ zg)i17`KL9SB!#DHOD&nwlKJLOZR<<2kPf{1dWfy?itv|Lc15RLn=PlnR&9F4Fc%FF zT&HZ`iu{in!v%xz)FuUN-Q&8Fv9vlw=#SmeGD>I&zSRK#ZHRjNX0f2!#E+37|NZ$2 zRtQeyYvunds;^$7JJ9P_Xc0NBbJqtNaJDYz5L@;fKzp>d!ZqFM{^r`K=XnACxCz_! zicGtcvNmVR`AY`1Nl-M1!d@qI=iXU39dCu%xu*ykIiL19@V_--GH-=*;9PA5Na`?i zRJUzZD(qrJ?P3c~#c!_FE|4(bCo7clW&{qSUO60h7upyW?`3{S<{m{25qcIm2 zeXId*w)FyA(TD+|a)z)Sh!y)=YmRT_=na3;w9m4(EUADi2GpOsfh9d_*;Zq-I7Om} z4VQGz_)HVtQjfQk&C4xU*eZZ^oq4L)C-gh;ra9m|g3mW(_dliEKk+F{tfn<%<6Gx& zRuC@gvo69l#>;X=F0qu~ z@%J*da?qJ8Xmvtc{Nn; zFV_pM|ABMsP3(f>T6BiIwS3Sq)kw!+B%8d^lWHa;*wK_JLL^@klMaU$s;1-eC zgS~Pi)9SRkZ?!gQBzy=Sj34lbgha*?#eK9XyGSg!;U@sPH4Bfp zk|{dDPkZJ*9CG=NAg<7ujKv;CjB4C8>$%KI!j%w}6jGgBeNKbx*yM&3OcFFMiXa%|GCC*y~}Chysk$w;F2ihX-Bo~+7i z!ll5n`+FAlG6t9oNI|X8JsCePX3M&k_3p{|(N$^-*OT$%&R;MtQ&IhaRAlJR1nk$ihHDcg1SWWs=~UHkQ90%6f9 zsrLt8Cldx{II6WL>(thh2@I*6+1!(Lo)tZrOpsN2vef`RnWe}JBi3&bn%6oRFFMiX zg7jqk5V-2|?8#N3A(S(Vp>OM!U>oUYL1p}?~zlQA%#3|MBxJ_Gu$lgYR?BtWlq zGJZVL#>`1|p?fkpGqv|*IOMwbWO>N;>dAQ4Rqk$W$27Ei_hhT{=*eUnpe#5vXWl)T zz_lV5`K^-)V=$GTXHO=Rrgu*!%vNQgp%Rje2hW~N%E3G`l8hH?Pxf9Xld^3{NnYz@ z!hoz@`}Jf3VbLjd{ZSE2La(6-lVZ5_-n+P3TTdoTTII~_^V{R;6#TD(vk5)1nJ0RBm;D0JXw{`giCCdYpKUvMfMBbvGMaF%)(DpxE4pUYcLDA%6`l)Tzhq6 zJX6kO;FC=!FQ~9Jh=!kM zpa*;g2^hRT$wmXw(uN=A*bXRB?~NBk@DtisS5s>8o^o9(>X>^OP$Ux--%G zxIkyu<*!7Sk@4A;*yOtl%V<8Y?{cH%T(2hcP+8!e zqEbdo0SP`3!S&?`bE-e|BqD13N4B+osdOnNhri zzb1N)`K(?h#_2HSb_bn-g_QQhV00$k>t4hWU$(D-{YbHct=_EP&Dlld75DbQFGX-j ztIGDIk514mt}N1vK{ZY=_B8I&nXkJ%`ScHZyGuUhXSKT!vD>*z{%MuF5Ms|hcOf&k zbC>+n!Q4XSckpHD)yu5M^N9kNYk3E9+g9&f`)-_l25&=|2>lyRNd~%wW}r44j;}Xl zAeJzH*LoB$qskmt=ZomN^#wj-sJE?UkHEJgcn!-Bj=k(}3>7H(Dw2Bmga%WA#Eg2o?5@LHCm%6diJkFv%!2v}C@ zdZ^PX#=v(PaHl83V&L!3SXCTZv-^C~>|V98A2qv+84@U|HPlD)ufta&cs0v|UC5wZ zsl$b4P+6#O7D6ddTuF+NrL{~V@a2X}wxcYH0m4UINmvUi?Alp;@&dx3JfX{nZ$z-} zVMJm$@R4u;k!YBQ&pS09)RuHLgtH1mgn>VFYBw0Zo(+NEkDQtehDk9LIF!8%HJ2uX zdAwYWwn8|wzXQQHo!SeQHK9K6le0fZ+GSg`FGjMw%7#Np$%g`g7L90U7-zRP?at7> z0^`w-FGjMP7JNCreLR}Z z2%lclFE0Y1Squ1!3OW9QxXli)^l4M0sw(${`BIT*E3aT=(hDFzQR!1NOCWW-+EA>E5XAlM;G*EK1aiy?< z4S0@u?9kI77|-)hq%{C*_c+Ud{prIEybya~ryUaXpW-j-z= zjd2>Ahp{9N)iD3|#Z6`ZJ%Zc3x#E&p;LMZWG`-2~E$~i`@aGM9y*JZhI`bD}Qip#8 zir4Zk&Je{V7%_Y|azMvRVL(V~{<04wCWAoW85gJqaZmfua!MoJiNm@^PPi2UtofP? zSi^G01(A}|ivTK{RU=K)ne9SF4GFj1$z4?N4HvkEWm60r)nNpdngd+JK3=vaMeVAA zA0YI6DNu_tX4of>48Jt0i^%DeVJ z;fC$f)86Fkz9Ac9GL!+bWmCAmF?Pdt$ut!BCvLn*fOG)>$N-WJ-gRTUWM~C%c~k7L z1YjU6nu!axYZ@XzLrjKf%?sNl17`Z*Dp~JtRkks~%UZ?S+7a93SPqc1IC?Hgiz6UO zTA^ClBEb8o0o<@%o=N}I#nB7HnQ{5V4cp~rDywzH*c2qRUtDb0%rl4Wa#g-!G0|xe zmdrzu>A(%!B~-JZDbuJswreCU8jB0IYaZm{rZP8dSKx|EW@74SU2@sY5!)4*7Sovn zz9>+1!FB}_l9Z-gzDLOF|y6J}Pifjr=&0qF&N^5S|Zl^A67ZcW?GHm~)1%<#3 z+hsW8f{0{+$OU!E`74DaXKct3+hy1k!-fOm$WU{^cA3W|;ezgN@Q}B>imrwagB!NX zGA9HHKS>t>!b30`5jbMI48PLqXcsOoJmwr?;l~Zz<#^-5gR}q28f6dJh=Iis+vOM) z1B3(C$kB6alUeYyv|{w`G}kNedS$ypm4)>>FtX3W4mOii;V4( zX(;ede{7cwpy+t{+DXxBgWJAcGPHs>cWl={xYQCm?AtXB5uhO^L(C1^B?FeRT@G(C zG)J=R zwcVRf2yxuEE3zphHGkR9DXqC-yGQE6b}`|Li0v|*aX};ywwv=OLNKW2&5sq#k5S;; zFddlSwr`hVQw$pph$BPI1>0pFw+{a2J*%w#zXp1_-Byje%y}+GK{nr5q&Ouw7o>LYVNC zckO|~4ck5Hb|!;Mtw)9m))>3(+a=S$Fi2^E!XMiu16%}bmkh1o%^lk{5H7XE4%n_~ zhyV>S8DehOE*UV>2bZcOm8_Rfl~SMg)+tA9mt#3V(&Fg3BrT4Bb<&!5BTQ!Li0zWc zvE%!vE{M(gwI@(=FJh?6_^&&nFGEk zP;|j|1rm~!7J9jojOe&r>9%i|3`)X)kktHTZ+ej=Oxklili{chw#%e2?GD?P3v_{J zpaFp$_w6#AaY4jmj-?Rd4CKl}ha0xbuqlQO2gH$~=7Q}qk4wUZ*S=j$S`itl;lqyD zF3X${B>W^@1ON}))QG@w-!8+ito9@It;d`%9=6Nz#)St-!|cIfxQOA1?Q)EY0m1=m zLcR>Ga~d~nmy=I`eY+gJ+5-i8q2Xke6UDeo;a9s*|`$>e5Z?V0!j~_j9QY@`Snmh1b0BdLq!2$p#b4f)4~`1HSvxH+#N&{LRk_-69E}DbV3oJ zwI#{jxvXgT^S)KqqpMfE)fF@X{qKtB2&y^eVvwYY@#i~yU`t;usB~?IntidhsdRUd znru>=J%;ovd394gsKL7%VR8a;Kp_?d(uXO81e7YEYt2ruvBz}OFK0!x$}Km-q!#kM zw^hc)5WAz});SHivWWVqy_ ziln0P<<%H0_$E!K*dOxgr5KjQDkg{=J$fm2YNahqmso6hdyvRpu3UoXK(qHY+)eH< zklj$9y-hvr_a-BBb9{G#Zh0pKNcnv#c;@F{&0D?drQjaCupG#=(2z1 zJ6!e>?DlQj+rtZ~jN?7@+vH_-4=2^vDA+BVNp-B4eZ0Gx9Af8*OXi^9{C zuKEmnQ+o2!GM%!8CtBmw{f|p23+(Z)?yW&8BVkZ!%Th)0^G1Gwh}hv}Zfp z!+v`cT?;W854&6a?LNA|X|#EvyV{CjUs|?}`qLS@LH&H(c=>{+^}$!6mrA{bqN;I# z&U5|2`**<`Sgx?{GMQVNFU}$|ho6Ic3Z;&W-Gi`_F>Pi2y;&pxx2+%02owi`$f zw{2}1?N(=l&bM&NoIzcUt;45x!F^s9i;4iUnjy2=m$SqrR!bjuh??}-Y+)gB*nU#X z9c7B=RN&oW<&MLUwFM)&T83Vo3A^k-Bl@Tq)3e*u;Fo=C2gJ8J{F-Wq{9%U}N{2-@ z=;j5_%^=+OGhy2>TrpynJ2}77A=iF@KwO-v=@f{`<1Qvho^=ky9D&P$xZlp2nQq56 z5Yzo)E>V{V-zDlw+#x_9CP%u-4A~fn@xv~Gm<;O}h?&QqQzq;ch&yfDKum4ep#ejl zk|sf^1#8M*nf3Zt#KjR9GW#`F#N=_8Kun%>4#XUR=adzDRm4Z;5{QZLrDm*OqRwu; z<5#a10t906{=KB{u#SP4dHgwL!d`*+h;<;=b#*SW8LLBT)SB2Fpdv2b zSCQGTF%Xl-T>>$A);SP!1fEk?>=lTQ+6H2s5xW%0L?2Q!HpJ!tftYM=mKm}!5aWkk z0x=oZF%UD4Kc`ICD-a*OXn`23Lu$dgK>YPx@IV=hv0K&PHbLot;=}9+JWwp5k=((4 zywYwnGKtir?+iiZIc3>{vP5ifV@^eu;IRyN(+ARQADmF+7VCE2yLRD=yFhXwyMv)) zKvov3uV>|}G4qr{5@55IhofL7kefWG4E!yu0_+z?I0N6XT;ZdLhVS+(nX9NHw#-f7 z=k`ESW<9g*b34PhiB{UJ7I5{u1x`G!7{j>y%?X(IpX6j1swDCgzfA6Nri=HQ2-CdR zlU!+jo&@Q_5@q;}eltV_gU^wGrorY&(ClIhP~s~`f+qx&XJVb!RAOBo??zg zs`ZU$id|vwtq5ME2q3~Pa>>V-EmDal)P>_hzNcHkn+$6Hr+bt;15r%wu4}XrMeuHF9+pO zwmi@_Hb-PPh|qBOCXUOcauz|o6L7gyhWh}fz4Lwk=;G(WuJgU&PCve4WAF0rcwuAL zx$#zi$i79gyQPZ4zq<$rGm^&<-IJa6tUsELC*4u+Tz^Vly?Ey$8R0LR8-LM6V=SWc zKK5-?L7g_LA+Z}TW#c4|Xcb-ZhG~r=Z!C?U%X``lwQf7+8 z=FJqb&6_C_n>SO$HgBd#Z9ajet=WZbWVBbo{?5LL`49dkT$dSI`3<6Y`EC@aW`o%U z&g}L*&KM>sLwBPCO_sWw!`^fnUkwN1E3-qHvJc_|WL3fY>x=M$>|^(%x0J>cMf^T= z1={y{9=YAx?e{&fwqor^t;=zLn{1i=|99MnA49#GU}%bvE*e`s5&rig*w4==-a~zW zb2y!UBqp9jW;CIobqDP4j(29;JF{*knrYmDm9@sfsLDV1N|dKA0hp|mb_CIs8{lv+ zJS$G~QkMo%ipE}%=?=5Vbomw2eTXdk;>TX8d8|M=^CPK}8;qaksT)6^mnnq) ziitk@eAN-iJ{-)ic?1&p$(+nQ;OI*}Zytd}IP!#f1nlYL^X3tVtC1(Zmy;{rD!}Y2 zF@22e$jBY+`z6%WSh>Vv(bh{cwnD`t0lC3>v)F_U_o z50PRnE@T*{h`{f1m<>b5Yd+{q$PdhnT3_fQH@>)U&oz^s&6W2(bKCd- z@^3H0k6asn=^9vrCOQ!-UV`YUL(z%!>jm^RL0{j7z8-o3pkFSgOE+uT%FX_(qsHRW zuI8TQcsHP9my09(;=RyB`}oj9Z;zsQ4@BpRZ_nr7UIlxZq>X-wmV>h202>urA-oURmu%o5irx^ruzk~-y zKUi>o81|w`^tz|VlXJaUyfmFn(7~F?5~|=||KROw`(Ypc4*mb_uYVAYzc(NME+p;v z0PI79bx-v+pV*#^x9cDOez!V1yQBQlToQ1KY5KUoJ~W>!DAkUr(h^Ab&+RvbD3BKA##ns@_M_?J+tSvPZ3d zf&arP<`2&+gt-^?bato0Y%a&}8c5%aDKV#yTnDnymCFAHXddgIA&;j4FAaQX8pK!E z_5pO3v(Zqr17tPn_CmNF_TD$_ZT8QOhg-qSIdl!|!hVjwB#Y@%7ILR6?ngU0 z#^)Xw4N?zGMVx&s6_~^v7pMLHR@@wnW=Z4V!lA3sr_nDu#c!_T40LT7mO6`9?~fXX z4ix<4cKK>F1p@?-hIBkA*)FJut8lOVy7gX#}0oyh*GG8YsF z^G{%p;w*Oz#X2Ms%+fZBq=Tmp?ncRCAG@^*{pK2ks0iC)4(?74z;67xX(!)yfMPn? zfB?h6boTPp&0g{HXeCOeF@$_~i_2&z7FHYWwP>{du)jHruYf&h)5F$aHW-f%LaKq( zMbfYR)DBdxb|36Io-)n(B{I-F{4b}GIpvRy7B2+)Yl+0~f&<4_Xs2Fi@T+D6iN2B!N%oRwqiJuP#(4BAKiKEO(Yt9#D8#gXQ$& zS$51_DNRXRoKGp9-ApGj)8OEt&4KQ3Hm+HH=hrRPf7Th0-Wqk2E9{5 zw8aI*n5=%mSuL%X_VaN{$ap9$rlvaU4Mu4`$%}`GUSm`|fJpShJWV3Vl$l)`+8@FygeY)3Qxh4MG7k0zj;5rt58K0W=C+B;q(qlH* zvb%1g*7ub*3!p-8?LfSD`LD=-5!zzeY`u`MCi^SwK+KNa%WTbm*ho8UBn~_5o$3## z^$+_y!qpW#Hj5FPyf0x-%10R+C9CcZ&Q|!|Z?d!XMWk4|W+Ps1X4_gDhnAbD=U7Mo zd{&{Ea6N{KGd@VE*M#tv2>@K$p$Fk3jSyBmMfo>P z!bu2^=sgvL%wR;s?6oUJwT1K3K?H|VEPksqwCRQrA=0Yx?1g7QqT+ZC3Kuns8j7mm_XR-AbD#3gLSLyN~;g;7d zNWuFXpxIW)6%81nx89wQi-SKP-pS^)i~a5nQF*B!OYGl8pxJH8Nldr}%+i2@#8ZuC*lG#-{ea;yr{^dN= zl6Mh!Bwkue-~L+XYXu!IKix1JRHh2S0qyq3I%>HvLT3?G-m)k0bNE;UuduRH!Pl&W z6M#vbVt>;jXAoc&e@#@qvhKaAQphRTLKe>=vgsW?maTPmvTgC~sq}syKn#7KNg4T4 zLQK{)jBgy}M@uI5YA*P@R#@r4>l)TP!aFU(NQM+;cu``@d@(^=X~Q!Sf5WyUprJL% zAnkkFx@YaX%RV_W2R6Lv8+a6|O%0tDXG{yvr3ppnAi3-8nGZeobjye7H0^}pMc2lo zY2NBJO;$H*K;mN!c%#9Yt@NU8<+)qqy=%B^0tt{d_Tf!Aj40FQo;L43u}9NA;)|GmwE?#W@MqlM zhp|W0;)fv=%pMJK1HKr6aa%e`fn0CFr6j{BX`L%J`d+3{YDR=s9~;O=8(_Npv4{>r zuVF2eB!mqH@)&soVxb)Vu-hn2`zDa2ketwnSW7Mt(+2k7lM&o(4inC;zXKeDM3)Z7 zCwN_7^&icBRZf$0QHj@i%(mon?f}5gnK8JWpL|cMH3pYImN;u%(vlZ;>LES3YwDE# zZ_Th95t876CfF^1EC^pZ4!&%aTLeJBh-0MfsqP?^3>`o$=B;BYn}XB8_`vU)!5TD^ z5<*}RB6E1e1iZ2qP;lvs68t@@c*EcXCMcs2LNUppNCDk{&Z4)~7y7-B0x}0SuKObe zglb#E8G{s*7s;JL05USmkGi|IK?(>(EiVO9z?GPa#!8NQxWNfzdSHVTkPqv4@k0u1 zM!?JdNKmv@=e1=hj>5qPWU^LB0r{{_cJ@et4YhM<@!!~)SrPK>%*Y_heKR&l0U@Fj zpgmGxLya@#FrxHE3dp>JAO(b&Mx>TVfd!>@x)~stJ(`@{3yvY&SRn=E!(1D}g>lnd zB|VD^Qa~osDs?T+NC6=!wj{=4jyz~a3dkolHdxe8USg5ABNobWK?*2aLZ4zSxj;-C z*t0?k@>eg}V$61vfJ^t79|ps@kgnyjK?*QU&P63&=P}!o&((_*khfv8BL(b{0{oa6 zDagr}G+r2`hp6O!sL`+lp8y-AAm>8xKqFFsA8Y9*M6-*#7~l^qkOFc^d7QtSWn5=R zO{X1Jz{RIRtrr0+pto2Ty9q3Uq%Bs!*Z^vq_*W(?ZX}>m0voKLlPMy`dkTzL0Tlur z#D0H+&8`Ca9yaUJI;?<+zS@2so!XkeYRSDl-~}p>$y%+%lMid^hju+!0U4R^J{3KC zI9@IuRzMiG!V1WTb$a0%tXOzKz__cxW(0~AWbXyXjTPMn-N3sF$ox|KpiqSsRzRNA z$<7`tu%Tv!6<87S#R|wEURVJkq7$G!R$xPoGi7s|YeO={3Jm3Yku6+0BQgoF*;PP3 ztP!atR$xKN4lA%C6wDqCs<6Te$c(v0q|`dd+I2q380Hdc?*W-E*z77GAFhlQkO$3J z0r{lH1|_{h7$>RxA_3lyVY91%vL$pX#xj^ZIHxdSv#TIKf`~0M>%|<<$@ndGJASYkac;R*)49^WK$hi2^v^o z1xG4j1yl%yDuV7Spy!dXF0I1~YJdR99N5UN#UZTvYeH31Kh%U3kdaw_{Pqvx`RHoIi0`jCzcJ^3-4K*vQz>1J> zZ$<|3!U_lxodE5z0vl?aDVwnZ8BG^AjC8xwZsZ6DA{2JR)m7tqoF^y z!V1WYxkkhV<{^YyV)xLh18A_q3docyV+G_vGgd%8sj!H3h;1z)uL;lP_sp>xnM#K`y<@@PXC7f}D%0SV2A~ue}2~4V~HAUs|7V^K%b6NK2+LIgA~2gml!HrNPC%^R>d!i$ zQ{qkr(-qf)iEu(-vnRgS^cMn$53pu#+Sd@VuRXwjY6LWUiY^57B=qV+Kx%AX=O0{D zJAA$tQm7PhsLA95ikHJ4{Kc#9EF$>52E0C07{Dk*KubffI0U4I?A9O+5?Uhv&;no8 zz5p-D_olYT-xf;Uj|8!ZDuqG#OaqQqGasQD(i{>Xkx>%NR<%8nzo_M{lEM7aD-k=@ zq$zYw)ub8F+)bVZzY)P}ELqbZq`*~8>dq}{Ne)|#^1~Kyg{AK1us5B?N1+q149A;m z>lG%Ko;;>`t(vpjlo`&mOHL?25P}2hIFw)`zMP>$mu)RDQfn* zrKd02?)s0?Y<+{J`buXp*aqGFC5y^tQgG;ph(z!!!U?wc;?Q&b-l#jp7oK$clgW6} zE?<9Az7Zv+Rt3~gRVT&yo}aJ&o||+3Q&$GwXb~otRTxWYOn^@R4%ta1`oKabUH&9#%y+{ zo4sLg(%n4U-+ZDw-PuGJ2B8~)vYWd;BZNF9qbJ694#&M&n_Q!joRnOFDhVP|0cqyN zN~H=Qmbz@ZVToI`Q*v_;x!1tas`#*{9EJ+5utH0yI06JXwyZRt-Pe~pY;9VR??*$( z@Z`==<4B=N1#uPnPz5z~5Ndj*7J9v@nJ)4dH)*Bk zIipJrxRb$@1~A$=*F{&gP0`Ja?d*2M_Jz7N#i*j1|12z|{h6IZl=zDA<&RbUf?wXO zB1WSI(?0suRu056lP#8ahl3~jb?Zr1b)bbB8}-5ON5HkJR)QOk`qPZFc9u}<=C53z zu)Jw18~Cq&+|4B(%Cu)&GyXZc6R@&jQtidr6{(50{=Fz;3D(>FZG-N8mD*Si@ zZmP_Cel=u%MWkgV$a-}adJ~^$ka%dUkdsO(Z*M@qvQ%hK)6M&twzSqFf6TVxUb;^I zeMZ0sE=i1gEJQ3kMQ15RUOlVcUbFTT=(fYsMO!DhW%|SXWxZ zCqLYP4gVN|FTZrHSowVmzDg|I(pm`U);RiI zXtTx)ZSSoe2b3Wcw)oP=Z7nS4Cf=QYlW^;rHqK~?7*esBRe=@(>@+_oK-NbQAGURn zj28mJR#!qLfvLb9sv7vMh~E&VGz40qkPQiN+RSa(R z-mIm(j^sA`nV-VCO{zYHQzd=1#~x$+ny@iTIYR+gv`a-8OROlk>T!`elgiuRU)nCE zZ4J)HyYKJ!hIb6kTP~7i`IdiU#~?-Xd_NoU*JvpTK0DT6zGNGkGaM-<;I&<(CF#^| z@e?UH;ch6i5Up8XTkzM)RpyyVZ)<>7GpAg@mPAa;SPO4~@70Zxj@P{~?XKiStmIV~ z`^`GA^!R{Okhk1xIv!2+^yO6Ve0Q;GUe=7&8vk-lm2g$~Z%NQ@dVOX@uh zcr>J>W;@$xm*1JmcxSu2)!)wdIwi*?K&!hcV09!9)IhYm3cuKttp>Tz7K>zaJep0$ z!&E5!5xU@P^8$0+fdt8OA1{~AV?m`O>b0<}KHbaz2s{)&SlY|)8$mHU%2MouO*kFi zfXmuK&cA#&(%Wn>yO7ipT#|Wd>s@!A#f-sQg{|3$)&fRbOpDdlZm(eFt%Rk%lWRTin%DJlvkBE|@Qy#e=x zGdI-BV&piyn3KA_LKVI2&}#U_z_x|=5Fsof`z8siq~NZu6duKt_QBE#%oEzP2LB`q zX?Dl@XW}OMzbc4}amMi72#$wyjXYYQ6hZN!fI3Z?=U!>U(N@x{>dUJlF#J%X4(>Yz z_jSm&nhdCIDxxHOAZT!;XCllePt+ja4mA&z_oVqGWEoMcMFw;hq==*!&a8c23O5uPRj zHs0HWQ=!FCZj_leIcU{~t@M$Ak`WbJc<2`GkROIO#V5mb$fA~&W*Y>6qNAxA2M*8( znjG@blFUa}r>U+Ug@@xe)uSs7nDMg_Jml?F*`gnYGx8Wf-oV96vF>m5d^SYhC{Hh~ z+ptH@A*3Yr3ApfiyOa_HC1S*xsh2FUxoi1D4Y)6)5uO+f`{=Sq^#6%}XkgeSM&q9u z7^2ukOx1e}EdriErQU3SRw$mdnF)M z{jqRX%6N8og9?Bn@ggoHX1noP;^vRz=`=V!4sn=SYrU3n~Z{WbnF# zKZ?W(H^7<%B`5U*Ekr~tEmECabTEQXgeW8E9P^5&C?f)j6R9XbHlb6^+`EtvU+mlF zmUO|;A10%7O20vGR-iAIZ?ly0ND|nQ3jtY+04g@GvOIOGhJ;2>={YnxaChpLp1+1r4SX%$W|e?T}JcDGmjL zVjG)3v_r?Eu_grSOR1KWM?s!iP+vMOS?cE8P?bDW=wT13ez5W@$f0GrH8)d|B5GP@isiCL-ThmcaOR+;nP zZo)a1meYPR#hj)2UDbrvYX<>(3I&rS-#YA^BUaXgcQwF&MNTn6e0C;zg?iI}9O~by z=qP_VW1Zc;sXR~uzR-Y2>N3c4q;vNdf>zjx|7wm|$hN%&v9bvMO^u?Z_q|k%w6f!r zleriv4VSipgwHkV%T~xV)0PdXJ*m_D>m#0C_+l%`!X*E+Mw6uQRBfpxQ(7|L{7Gf! z*e1i*Lu`dtgulGvD>~)cY&iwCYSU{NnCpMk7%muur#2~I>mJvYjHT6K0QBcu4e;NF zsJCwx3#v{07#Z^4pRZtr)U7lPa(Iw+D2M+Q)mN|49q9Edvd!ZqFM{^r`K6LjW}o3LH4$h12tYjdWYzhqFG1Vw|ecF@AGCv=P7SvVbUh1ue# zh%Q9o|E&p=c`KX)=V~iJQiqwNx@njmQ7vx4srb#cY7y~hZD;V46-s$C0*6tr9FDsS zZ48U|GCw49k0J=u*t^x6^}46WlXJaUyo4h1>mR&bv*jqFo(CUmz?*Hoz*aP3K&YG{ zYzJb+{??k~TRD2epET{WtSw6_;EDnD=Wbw0&sw(C*ep(wpa$8x2&uq6kbI^IZ>h&y z%I4*kD{K|Oy3QQbt3H6dh(#@*{Ncbie7+&O|0&)6iBDl-HLanKZ=J(gLAa>Tx(L^h z&ozbVtHOi-RezPP^IJy;8SF*tF{A1Bc-rr#*8~=NgkNpg@4D77UY09zi6#8o#P$lr zxoMz?V4P@Dv6}BX=N`!}U4);ifQ4BKQIVHrmyHE*g5*shvWCnE;L}Yw!#byKWw4TL z%2AlfyOO)&LWs$}wXMX8EVrJe1^VOgX#8-c0^M?OtS{S)x(=Ue!eCCSQ;l>CMzYBpJqeekneN|{T&rV~Nu;w?RfJ693T_dJJ=iNZGObPv zd)uS|?jd+Ee!wHZ5z&?PVeQHA(WdMovEYj1(voK-R0re~CR`aro1rJcw;oH;qn)<|+)YcFvxx--6WtJd|wzpijRt$dbcK~`z% zj)66)z%ncL84w-CYJHTIE!X9in@fP!2j`l{Bi2`81G|~1=!k|!iOGLH z$LgBrMVN)3_RM`av{Q!{HScc-z|-k^PZvLkI(%@MFClWD-A6{F(u z?#TqMMHdLtlL=!m)tqNfCX=RjPbSP(Wuc*ZG8qq^J(-k)d1NFRFZK@X+mlJzHl!r4 zo=g~!wQIkgOdu>erDit(laoUCWWuBvZoPZ5Bkgh}6Q(BNC*wsYx?G+I{dzKf2yF76J(-Lodau~GC*#Sg zye3=tq7gid^KkP9}`O zRCAs^nM|7AJ()0Dm4${%NHQKgdon2pOh%#`uR|vq887w@?Awz`*{-uE69#1M+OH=Q z2#Zcx1dPyqmoPBHk**K)?#Vi}^<)A=DrYwLWF4+2qaMU=u zY;`U@*+)G0F3JSS^kgqJ?9dF*ky#47FyZR$m!zBX?#Ot-i4GT}Bjbm_HJ|4ynT%wB zj*KU(@|kc6u-wLYF15%Qm{0yIvtpnBd^<83*M{`x)sgYzktSwNstetb$(b>BWIs{i zS{%u)!7Sh^`!Tz4?bVI(Y^z+oh}O0X%oM5w!{p+vUYcQ6*nDxyAcCs({bXXnx zyAAs*_+-<`i!E#oiVXCC&#PWanI`vokuATk1i#d@KSa(_7Uf9BB%r`{kUZ=q5UnN) z8|%@Riynjr;^V$A7fE7?eeQX`-wS`dX{o{pw?U(p=uoTTtlk(qLzVg%JQ6?TtyIpI zn0^dC>6v-Tl?vUNXnkCuv+MF#qRYszyAlJefHliaLj>>jd+k8dv@*Tcb!_$C>-m-AZ%tQRu`gr5wx$R_+3mQ-j{r?xKZX ziQq-rDaUZKHyQL!4f`iMRwUD~Jw4l-^tZaB-no9|BqD13N4B+osdOnNhrizb1N)`K(?h#_2HSb_X4*g0%C*V04B%oy(i; zYhXVzi~%}vvfs_wMJHT65B7J*JG1Sb8UE!oUW8N(zZAhCtt#7-J~~0OxUxtuh9VsL zYELy`wYzk5?!sIHtrUjDr=EQJ2fcj?)b5f`nRAzUdxDjJYTSj0-OgR|PpjO85PSBy z3z@l{yX2p)-(9{ey?UATcs?WGaxL#bZrkdeYu}Bt&){t+6QO_ODak-DA{pqKjTmL1 zWmGR`8LhR5?n_^2-9j@_8xF_U8!{0)E^m?QmjiJA{8j|7Vfn$am;H@_L(cm=W?jlR zSCf1LYlU#QKxE)ek`>{b4Y-44t}?rA!BPE$C%Yn+ot{DVW$P+j!b27E9B3hJAuGdo zBG_QrsV;3H9{5Q+h;R&IfIo;patk*r1%pz%t7Wy4pi#;EYl|@O#}T}i<*2e=QTM~v zxRRvpVZPxb34EskcX~1`1`b~V%C4?ur|4j1+QQmHAx`lG{%7PFs*xiJ;lo$va|s~7 z6M^I&a+7M)_Oq?8IG#@`j;l8Iql#lOMd5-f!uYdW-vREDZbZ}HB-a41_R_7$37CW+*LdEX)M!i>7TPETTI z4zsulU@+S1Kh>RGNY^A*nqEc;f9lfG^7>GxCKFSV{@2UHO0}whrDdXoKXYknd6vaQ zRlULD={YpDJON3vqI*ERL@G~t7K#;XSafY?bp%piQb=3AQZEvhmxv6)NYOya(Z-d+ z29iSwP`{*8*zZTOt)2cnUm8j5$D44R<;AM0=WSW0(HN((c^FIbP!02M<5WfiPuNtY z*%E={`hSn$HgB%DWF|`=Y#C=f_dtfkc*Pnq!@-|7;Pu{2i|Nc?j7c5-5hz{-?XrLMgI6fbfcU&C^^)(l;hUJV4BBIN- zL;%%#VzOxBE)V#I3tYppDTa;eFak@>0j^;mFI!XSWu1Jq>L?I=+Xb-Um=l78pQMWb z;ch(|0AoD`FsiQwv;3m(OZDz05*)tn*f7(uSIZhOvCVM72EOgsCNqqR0m7+aW9YfH z$qa$ARR!?{c}1Z~F_O@fwuw#+GRvM2CVb@`L^v8Tr0+aBV7v6RH~G5n92;XY6E=m{ zf$frU6!<4@yhwlyi0zVbS^h#}yJQRnZ+TN(znQpTyN0>v4Y8q;F1##E!%QaSi0zWc zGktKCEVuu_#8e<`m&^uL&BDHXS#$1)?UF&T2?{$M9fBxF&n0Pb1SCmIo^UKwBA~K` z>A9d1WR~ED?UKi_)csQzM=uO#M$E|#+vR2|t938w6y}EQnt84Z+a)90B{4T_myD6w zvP`4y*shVZXe=(+u6dA)vP?;)1#Z|bc|6z3vn`pqVY>p;Vh`qkFA5Z0uw8+KB&DHx zp_b&NfNKo765Oy|kxe0~`OAJzY0VAW?OX(G7mvw@c3sEmhV3$(aX~~<W^@1PBi)==gQRb{T%9 z)lvR>df+1!FB}_ zmwrlf!*)eBg{0;$`#GgGH*ELFMZk9Pm~8TjwA&8duw8~TE{Fuec5@~~2*!OEup_q1 zuqi?}4u~T|%>~#^Ms4j#gx9QW;V@(~Yc z@s+nOO9tWD4ck3>$(+eRX2RAM`o`GpOa?MeRe4KW$i*buv&$w0_w`ruL(mm}xY4wKnfw!>~`GLS*A2@1`yLlEWYxg;%) zK-u3VHD(cxU9e0tp5rDbw|%>0P%L$!!u6GUAbA+9+!5R5IG5GBVr(L{Zw=TknF|}@ z^(o8^+a=><+LrQsQL$YzJx6SpJg(%<9osdM7LCOP+cghzaZ{NawkvSOB{K;u;}tne zz>Ms@64uF#hdwxBy8_c<59WX`3KU(iU4g`2RCe&3^XGD-R=Z@GeFK;1C_{zJe6g-5ZNqK2tJXz&LG43`v?V-CQ zT5#Pkz5*yc!7a)U5&T8Pb2lUdX5FJYf*58XD>D3L zP^vW~eN6=KY{Cx&QMLfSRXWK;CDEiNJCt~O;Z^>Um)9Z&BY<}_;O-!r5aLY`O&Hpj zD4_V#Ghrlm=dz;V&-+$ckFH+1v?9>|u6T~1nqw{oSw(Dmr8`(kZV z>Fy#m*`zjmyd)#hyBlG00&+kh76sBrz-VI%Z0WUTC)n6yI_j6RB3k8^Yg|$bNjfJ| z2l?KGY6)^g>9uB+uB(&t$i*Jpv1Jj(m+w;C6Ht~2zUz}~3#AG*PXHMvwb+_A%J@oK zUhSwGj0jM=yDNZ839%)B^ozLSD-<33U-jv)sPXBl8*AsQFZiVIr@2?243}I~5gnN? zUNl0VN0@;h^68}*mc=S2h#Wn7DRye5*HbUC*z)!uk-c2G1ku5K;>$pGLw)u(^|0TY zjL^;T-3hwoofIHku&{p_3!eG;S3?h8SPtabP^bE{-m2#ir%#`Ltzn3c=Md>d?XU?i zR&deZo(_iN5gDX>u;!DWy}rW-A%z6t6T9G;Vw9cjEu@bw`&YihWpBA|-?qIyypYN` z-b24lUS{`jQhkkr-Ljcf$1d5&yQ|3|cAmJD&MXBn|0KF7JWc7U&#*V8Coe72DO-4= zHBRkcJBh(0l2hdwoSF2t2K~`&+GhVIQ{_Fq**!bMZu&rbwzEC#w>QV5*k=7sKRIkYYm5yS!f&2`a=VPE=^@2EeWp^Mqi$BmaSXxborUHbcX!5dhvuFX{C$ZGg(h%lB>@?vz6Q}V{f(_NDsGdZ5i!WW?<^#Y+Y;T zt{1Pur+2}9UKWdr0J54Pv)Y%l#3fcsA9skF^x14-A#vD#Qp_D?isw||-D2gA!;rNF zBe@0$y)N6$h(0O?UwpCAFZ12IS7av<)vvu38-u?@s@D~LI7>Jq2pHn966^J`++dv$$9(L&akh7(EG(aFOj=+%F zudyN~kGlk7@~m?p<_J8etk|m}K4Ke)Wky^*gcbmXOFSn!3=oLP``0oM!1E`gW~>llcc$DdOs>=lTQS_fjC4ZBphSRGOe);Z4k$j%j=%%O5*o=J?8htZHY1ZrP5RCdRGw3oJt#}W1~=wZWC_VCWc-mBs4oS@~+rJf)BX*sSH@D3}T4CeJAYe+#Ps`-KtC zz&9*c_;m5xcki6cd(=TZf}h(1NtyM`w$JSh<0e{Zw_3o}?-n@mxMB?B@;4`7-hYyl zVW^VGPy8~u$C)nPYa&eZUQcqR`FRqg3rm#YH~P&G5ez;@0-6S!BSEu^EkKE{90{Hf zY>ot>2{=bmY6qJmX?Th`7O0eO@KSC6`c?!lQUnlT7rEqP%oeFc6Y9coA>Y%j;7wAx zz(3ui+!=^sa(7i5mARc>x~SYqE=T17+9`IvH<`Ap@5l-s@x)yQ$>pf*qL}XGsGMzZ z4HwT8kt=;k;d``E8AYlk9G??KT+V})ayMxf?!s~69hskAhUdvyju(%O@7+#btg57P zIlM&wchpi{4$7B<@+ey#Xd9a&vKvHbID8YwqaKu z$ZbBcwt3^o`Ccb8MPl=2irD7O6p787DPo&9Q=~SZz|z+2!ZtG6t6+a;U&Q;h+Y`yOWulT@a=TQ8!gE?o@=;w!ULm$DDy17uaf`|FGFg6w1W zqt}VX6Gi+!bOqY?c^*2cbG+{%deO&ui`CWP4doW$g{DW zj`@!OnJUL_hk_bbC2k^e*(ucN`g)#%6SW>C`|%_AlEByz%9yG0$gA=aYey6lQ}1-m z-j6L~;EdWGHnil^ga0frc+{FfQd)_+J6h<@VO}{^g^((0?+*p$UPYO+uLni;UZvH9 z%l0^upA|f-4FkWsCu;CU%B0CeNhvF`c%z>NaZ{+^##4hgQRa*qr1iJR-c1F2F6tt5 z6$~2GC9$5858h0fG-@z!9YhvymJCyH?c$brAO+7FrGn^#w@~Jc8l?S>$lfiAc~qBu zAru!=AQOcDLP|A~?pG+6Miu70+5s`quPB(*2s^me@!S9%jgQMM!d|gxVN@b%F_RL5 zs|p6^twl0J_(BBO(k8gm?DmGke9a>?WE{2WLjft85=4%f9%rS85(B zP|o~Fs^kXar+Mne&*udNp}%6Hk3L^@1hNkY^J^Y~WDqhZGY>fWlFyq*Ac2WIVIBc{ zI{CbL1mZO0iSOm)inj_dyGl$SBRev32m5{rH8oZ)u~@YAk_`EV0JM-%wV0mUgtxkn2QS;hAATOyBuc2knx%idKfPoGSYFF4@;()j~gL| zH6o$At4j#Yf+1AQ$w6m%6MS8`_gc#O*WK@J{ZcZ*TBJtbPju)X$J9=Ho^vkR27NlC`5ljpY{{`a9Ra)mg6Wof`Jj zU3P=?n`!**tKKtb+fUd*`}hgFt+@Yge#M=k&ov* zt(kf9R1pd7HYy^YCCnIAL?5tK5q*HQIY$-YG-{(Fl5p+f0ZH?=D$1#|998ool38q2 zL_TY+BKm->is%C@LyjuKS=>fNB;Q-ADF3*vit>-M992^hNo+PMBA>NZ5q-c`Mf3rd zAx9PABEm*Rq};GnQT}mT73CjiIVx22%?LEHh-->zLOQArt&#_bvMN56bNBt##$uV2 zs0}T1XwnEb`8-K=EXU7sN#40sdM4+SJG0n?rLkO6RUUd7EN5k4cf1`Za8R{@`p|cd z!JeW@{OYmzE5|NZiF7YCFZ;_?Vt<&Oca6&T%T?lVfV{0uJubb+*jUJuy38iOk`z80$>g=u}UcVnW#R{0B-Tg%F%ebnMJ+6 zvYCrSh`0h!$^|Al8jzi(Br=ueBompF10u@QYMnA2OMr-~x&v}loE?y(A`_9T15yz= zW;%9a=zHf(C(!Y6K!zo0ICm2jMJw{k$sxvYBs@gL({UFSPe*1|Zn27^>l&*BB1Bvc zu}Y-m9;-x3GLgB(DzbKoD=CL4!w?{%@}%M(tJt~7MC9s#jAOeM3(Um4Viixv#{n6Z z0>&x|+quUojs&qNhnUCHaTgU&M`l%Sv5KSX8mj~%L|hKBN~Gi-t3*mNk-5bxvWV{- zs~7@AR1UFgmiPshgrO|j||4X89fZWV+Ft$u(-I80!UiVhK47-G5P-V|hJGE4il z22=_dDz-C&yC=jni+Vm3xm<`32b!Hsj2~`5rC`E|k(xN_h3_l6FvPr_mz|kJe23OxX*ixv(OVfSa09%utfX`O-l#i$>rQXd V-|F@!lkudTFNwbIdEVac+1c5DS1aw6gqF+0WP&lYSoKct>NOms(MU+hMh~>B6?!(kGrg;p zdb@}2o|Tq7j1vb4u@g?5R^2@A_}OF8kc_XFk6N-U?T3jfaDc^WAg3 z$+SH_HSJH%^=53pAIoC1m6X)I1dFTLBk+j`US~I?j;phSQS8stm4v7L>#!8XyIKqX z{Q94KhUFz{y`aR)0yI{4h2o?4bX|PhWXHz`8*q=mKJLqm?tW}@@g>}kNhmAk8G30+ zG5Y~lm(nnT{;jnD7N-6FX52WmtF;HB6DSZheH;JaB?upGgs|f2_NlmeeEU=q z!XtW52O%>UsZ5?oG4hZid@_jO&TbFm=3U#vB(&*<5FyfP2vM~R1`fa6fLpvhH*Vgt zxfz0Mq>>+V7(B z76hf!1E8qYsQ3jj6Ss#~~8D)bt!M}^(YDNaJV~%<`>-K7HT?zY8(C&59hP5|~ z7vqJyU;n_Zwx~g&T6l*=7&*}!JsvNl|DV`0U)YW%kg*2OMsTM=wB41e?fYwJh+ctq zeAp&L(?JvseA>LH|Jd-RZ{Sg=HZ^o~P%hgDzy#C6d+}9n;-CHCqn1s4FkXr_K0Gg$ zjr>O&@Wv1p$Mx=FD<96LiBC0KUJ_%wa@K|;O&75y;sfwZBaBVchCYmkpq4%iA=Fgm z5J==FZWS8#nLq-hjeU4i4kJpgx%aYMbw!`qC;VCiZVlkixWNx&MySOPLnxR%nuPd; z2wrI`M6S0Gnrj>~xHS#K&}qFTcK=@XB6cY7kp{GFc@vv`YoR0|Y*5>LcnL+`fEZMh zcAF-Uq$pv8M#Nfjfg-MnmybtqgV_qQev@4#;#z+PR0_!_r5U98|1EQ0l@G_cDCdou zo>S(D9vq8)*b*y!asnHX~%4SDf2=;*4$(1DRhse6jFvgA|Y_b+WTZ3T&vI zwLuE32%*kQ(c=?ox*-T;aIirN$cJ?Tv_}eTsBxxjZgOo%rbvOIcn>5}8>GNu#+FEd z1*LYn8Ni4XSP??@I3FR}AO&Q`TpJRo&nL7|JE33auMS06Aq8Z7u_ZAUbL2rYQb0bb zacgaV;(`>AF)MElz#yCrQb2|fmn^L%7bXmt`~+-}f=<@ea|-fC3NX#NAq99i&PC^q z6yzM2JlP2;$W6&w))Poe{TvSC0@;TA95f8p}SjQh& zAO)SafD{-tywbMA3b^=G#2VWMl?MjAu!4>QRzT!p+%eH*Y@pzB9#}yqQ$)U}z=#!4 zAvo-Bu-R2`bRJj%8Dy?l0U4A_PwHI-T+~-JH7+#;nTNayeQyUH@K6X50TtP=wX40xQnQYXYIH%G{gTObcz`DdMb` zh!xIhg%w!Ml)Nrr+*M#hjWgviqO2V&AbeTvDj*-$__HNeU_r?aE3hI|H&#F<0XA3x z`LMbXG2t44fCVxUJ7B>ID6Tt(k zT?M&Hs$vEBF*8<>ldm5u$Ymh+LyZosb`|7Y2p(u`xAQr9VFfu29S1g8!O_ZC0d7$f z_OS#0tnjI$pR2t1?z#Gr~^pDD_KWDwQv1S_n75YY+HeqVtNHO`dHSb+`66f2-bh)bJ81~Ih~ zR#*WcrV*(nR$xKN4lA%C6wDq?U7-zDKxWJ}B9eB(OWR^Y(ei%9!3rxNyj8{u$b)9A zfP7M8gTb)^GG>kY3dj(Z_Z5%_=M*Mv_7xm6U@$!2K@BI5eZHXU*<*EP?7`-XaKn$QWDeE_-r^KBMrYo)o6XArwW>0*t89+8j z1@yfGzLe4T91(l;A^fLCK(nXlLO@SKuPy|n#`bmoo~hcH@YRq)rHDgKCLd6|9QNQZ zUV|OO?=|4{p~3)0Ap%+&dc`3iHDtF2X^_wo`G*$xiuMI~Npj`d9)HD`Iv5ESAsm}* zl1{nBf=@NzST*wznjy_{nM6iOFk4kLGZrFH9;@iBGx&T2oodn)x~6K<3~25q&w}5K z;I)>l=?_x4i59WPpc5adQpBoGQ;4b^r5L5_ewx>+IlE2APk zmtul|YDDRw%eEGnsY&qWDpE=O?4dATU#V2x0+Fdkhyv+3e@Cj=x8S%{1Q(8TjrqYh z?DJREw<>Vco!A;$-VUb{v)#|1si?q8RBu#JNv7HQ8cX%9i>Ec(2HpHXhRS9_XNgvh zeGYMq;8%qcY>|WpqxRW;Z`7T>Wji}Yxj&hVC+*|-r`ranPxmMN(MCT$So&2`QD6yo zu>`*1!Cs#P=YzLL{E^&B9(q%BZN{v7YBKI^ZuF+J^mys^Q~kl2$7l3F-g0leYuDRu z<~u=EgYqun6$Gb=@OMWrxuMxQR62Zr5f)a{6Rr=ELl&0S7t@pY<6ZF12ySQX*KIPB z-snkkR&h)Q*}J|64YF%B5)QCb$psyg>@k(>0*omU%klIk6aGA2C1)iA)tpY#D17Pw zERE5fDfvAr&GdXz?y3OgcvAbyr>_@egEg?Ga=HCcxZho+$lhmz4Rn!7Z`hl3Hy-bA zpj%D0H#Yj{Vv>cs$MR(4>O6TBrzqvftn8ZVt{)vvZ!&`%EVWn@?w#myk50bV_uo0e3Q( z(f~%=XS-XI{uCW#-+moRr^EL7Icln!{~+id^=EbtQQ|A!ov~NdFZkunDq>XWhQ{=? zkA97n1F_6xFX4BGgD3lS>q!>E_~W6K>Vw~pfV)o0N^s**f0}XD&Jt?f{FUnyHneLh z8~Cq&+|4B(%Cu)&Gkzz6)yjrRwHGhkJ{S%YZ~c2w$Ye}it#ui|V6@qPnifzh?}Q&~ z!1a}RKi(hqH_&nQ_^V|l$a*!7O+L{e@z7WyCsnolwg&VoONI6{-MpV^ON&wR$80NZ z-A?b`yZqk>HtQAvQ;^T6P*Ui-C26bRUp49zK)`;gHo>W3C{3UC7ZGgKWi{n4t~TDw z_F&l^q9y@dR#My&YE=kW1`cEMSeH29$D43p0Iq1tBn6KbkWd%5$;_^iX7ic92Hjwh zKWYS4%sgtf9TKolh4^t{+k>N9QwPXv#u}U|2}{#hS6c40i{NiHVBJ54;L9&vD^`Br zg0B*bGSXTI=;Z9`{ro(9Qn81Sz(jnfsHqI#+ro;QjgT>}6u>$hIuUH(s=Ck$m4bgC z!K_9sN1R_2v;DIS}SV>5#j@1q+w8zo!Kr5Itw7s`>98i{j zZ1JUEwzaUFn|M$DO~S2f+Bl;nVo1eSL_viUrHcS|nx7LO>!XMd**ZwZ3jtxPE1{CW zl&(T>bNKCu-w>uW1X`hx5mITtHQ=CNFYI8oM=ubfaDMApHoNvb9}Bt{Js9-yk&-`#*mLP~13y@hu9 zotccc(FTwHR=(FMIW7TOebtfNUjxzZ3jAVIwi@IN=MXdVOf3pKET89VEjPoeE?s-$Ly$l9zM{7)8P%ctS#jH zg}!PDF3G$!c7|%~QFu6huqKUlSCl&YhY{T4tus`4;S|emsqv_W{!zr=E$(Eu{G=%U z=P|7>G@u(!R_kOl_8oaKdCTO?G`>>$>m;;}67Fc~Ao6W>N}IG_1}|N`B;L35m*l@l z_>qLJCZOaOa7uZbSM+<5O%?7@U6nLGhu0I!&49UTMS8R?@5H!aRS!ojsd?2v@Wic5?ADmG_ ztErA^k47oGtnwobcqp7+#TFR9#+BUX-)GaVfn@g+;b|gZg8%vSGq2VJTFlQ>j zOtxjGE%a->75ft*RFB?r=aw6=>g~38m(DOKm=I3C&{+rs$vr$D7t9I;otCtFu5X2c zj*Io_?-D#gR5#{%sp+y*0dEh1IMFGgZ}3|cf#ZqgMq-~MIetB;AW)FO>k|Gb5-Z#Q zYZ8>4)DN@}5wWyLb#l?62tF2~jG%MOE1sf^2q;dZq5#>1P8DYp+mum=lIsf61w((B zjLs?j2EAEAr&**|~TB($IdK)h6QQs-nLUd3+qLy^{l z!+^YWmbrT(fzoC@pinba1ps;JbTbz%5@uw%bQlEWrIQdb!Ehii9fI`00YF}nCIWds z90KH}bXDMCP>`37MkEseIHWX1g%%;M7Lb>YoAY9Q`dFB4hje(A;!rRswz2s`J9Io6 zYeJyDl&V^JwV=LqT*{3SqeqALJ?ak~iKl!@I!x|0p;3SK)Odmp65JSX_1oMz$N8zK zg?U%$EI$wh*ko3!P8e>G+2vq~>=054-Bp?M-)_R$x-=lo@2V!WUONcTQ|J}r(w7n| zYr;Dl;J+fLm>@nole|K`;Xe-bZ&h@Zznrng!nEp>;XdDhhwC!Pv!rwP7lKyUiT`Sj zS;*Agt(%O0Q=@3Rwt?W4EWG+TpU4EZw)R(Q0X{Ie3QhQRT`PWB0z3|1>5=r2T z;7@BbNeWNZmRd5UCG*XnRCa27O5YuSEyPxMMfl4rzM@mE&6ZPOt2Vucfw}%?jp2ep zcxsaZw(fCV$yiz)20(wl*#Q4-h8ZjVL&JeZ(v0{Hq z&GD@qz2Q%q_F2}JB^7YRfckScu%u@#+iGkUr$`hwd_m`oPc`9*dc37Tx(L^h&ozbVtHOiRRex1s z#x-=x!9KJ(bu`@?Py1cPoqrMhTEl+VwTAJsT#-vG=6C$PLaiKh<_el!eg)#(G*Cn^ ziyLlvAqsaXo=Wdbf}gB_g;@$wk(XtcjRkOmR-2Ex{_?lQJBfQ zlDpzUh{?XSt;C8fw?0k_^vB?l_@PP#y5-#LT?cZqGdpI2BY zlwgTQuEC#GICxMlA+8c&3OK(1w0_e^K1eEZUJa^Can)_*8Wj)x1LxM8*agS6=nQ#l z`G8}pk&eMgHhH5b;j%Q-{do555fcS z{T>01h_0*;`&9LjrtBiI;ELnYl=Z=q7IJ3odQJ))Pz843(fHv23gj$@;Yicyt)*v_ zc=|xO45gjOJe)Z>%+^S9U9*?C*VWnm>k7xy%4b;?WR<4wXghV_WF8_nR6LSiSk}hT zSO)K^a8!v|bc#Iao36^*sc^8XdDc1k75GqvJ?v((^`}6ITB=C|hElL=faa*htW$WF!OhWIS1w*Mv)f zK%zt`iq1QSYPgJG0aO-6Jc%+S~S9PXnLEG)nJ)4|a z@16{YT=$+V57}Nl8PA%^ZVG3xbsz7ZY-JujnM?x(uRG9_30y03k>5I*Fa}fUdG=&7 zX?pi$!faI*8Y&^lc<}7Wq#Q6AiH5DviAKiD{CYAe+jaJ2!hoz@`}Jf3VbLjzfDyVU z69#5DsB(Mh*r6Gq zBeN7Z&wTbaSz*Xcf5u9MNjs5Gd*=sPCy_ogQ19q}6_;l#R{%*tm3O?C%@?r~H zgJ}492D;zpRWGGXlY6|#mfu%`UuxPPB4;U!awKCCP+&Vq9`X{1R+EK|_2`R755WEL zao-n@B(cOk_q@;Vg+Jc3RAGbz18a#6^HJOxs<=nt;rKys#c}px+jD)~GxL-y6}mIg z`nW)6*X6H7myz+=mDuFF3*{Lg@AZ4_K+?1_z1CH1_8Xs?_9y3hv%z>|e*=t6ES#yR z{HzEu>Q1VOt|_#9KMa1j0dHctshZ1KQ&S{1zYN3?~EP z3zgH@v2uw#=#|r0f}##>(@e84%+R@yogV%ke!=hkJf$n*&+EI~XgM=0NsTJ2 z3j>xiqG_YDI0WyH;OcUOIn|%_R?wlNPxUA5!DuiW^oHGYz2P>x^Yqs7XgX`%L=*8b z=)^}#iI|U|+)X8?2ED1|qJ`6^Pv2~J(Za7r@DlBmV>sEH40@-A{gWNNClt>!bxgHY-c2?}SOs9i0qj(X2 zP4pb|S-nh*(_!lhIDnKn*zC>v-JBH!qpI#rMzh~WCuSiPKRFnk;SpC)Gk{-;U|Flm z_N0$a&@8Sj(u+aK1NAxI39H?uqjQ&iuzPcM9$gvSIHDG`=iDSd_0%&zcr&QoC7&|q zE@-CS)*amEH10ygZs#ufr&aDki1F@HqXSfPMUA_VncKNb{^|PNBy0nFO;3w^(iE!u1Sdjif1d?00St%Hl+FdED zl>`lsdF$flJlBY>j&XMTnLxgu%BPaJwhNV&L!4+Z-=i(u;yVzVmR=TZ~&2Ln1|0fH6GNK zbTx#t3PXf}KXht07{0hbVC$L^1gqsN24lp4KXPg^7$(I~;86B5)Lfbj=J9eh8o>60 zgoAH5wHGXFLVe&T=Kx~Sg#@%OMzXxhhC@lohXR2Xjc8|JS+_Us&d|LA;|XarUPEhz z6==tYZE6KN6c0HxvwESiytIbkhV&m>Gc+}ugB$RLNOseLFUQ#eKoCB!Bz4jLbwG8| zu9l=GQlQfCqfLi4k&0MV=`Gwo7!GCY#J)lk%p{Q^Xo@FsnRd29(dJau3 zPe7Nfyi_7jSZbgIt4LDAqH8;=Bai}$V9Si}6=JDbY>M+CQc zbHyby3E<*oPT}(#e5V1g_hwp5XZ~VL>hO=sIISuf6&;I@I^nyK13F#`142^smwg!S zOvw?Rb%AOS_p}c!r!>-?aDiRZ8@eo!<}+xiQ62cI3s}Q)#sv{;W+c(ZVcm=HSBf(;`V+;supasg~O=7b>OCut4%z?@|7 z!7lpJ{G#tm_3i}{9KPn*Fw?LntN|PSl<#*9){F28cE6%WfP`;3w#f{mVt{aJ z*cf_lZ8AfkY*muwmJlvqJ@%w+qEmy+vL}QIUwH?SgvMgicOD(EU3%J^eBC!>V@!tH zVQiO7LxF$d#)||pD(1Y?tB8R1tgdSBeTE zWNXi+8@9`^DTWOP#F3%qg6%SoR}2>(Dv;N|I)ie;W20RdXBxvMq9eA;$w%D8?67J+h}-LPGHyJG#=E*WZvv0X9^1^(%e?UDg@0NW))D|mCq zb`69}EwRJCUDFV_AvSf6URwys6v_>=?xE*ID}Ad=IzG&}HB9e;O1%k<^U5YBs z*pTDCU4~6DY&al}3^f;Qmw8+gE-Hht!C@ObKgWH$EOSDT@RM{AAUq6FcWl={ zxYQCmV7sOv0yM;A2#1E)?MwzTV5Sc)RdHpxp*@gf6elQ#%*GA~cEoncAlL-`v0aXy zOVZ*9NRpQB{W6!PkBm20Y?lm*rS6})IC@2m>m?6^l{;d)+)QP)t{9t0?d!$M&SJY} zo;hsSi_`h<+ck3Mj_n#ri^k%D?V1PK*;M9+?Fw9R$;`{ZN}ZQ%-z{G>86t@!UpnsF z6_^%#Fb8~5py-0_3M3>cO%B#rNKH88GECriJd;6WQ%Gw5vNtV)1mfNe+da19*e;$y zcx;#9j0+-MMXW)@B;M_WUFZCj!jA|7J7T*Gn_}2-KpYurF4!*fxOKQNs6ZJiTtGNH zKSykrWljhZev&Q%fICk**vApuW%!lVyQIGLnDfqIyBu%Njqu=hCIiQ)7$6+5Mvk6a zo283dkyOP%>)dG#mJ!KLqScq85&U_@b2lXODMN0O#bbZ2OXY*4dx@%aFA=<>2|o}- z*#h`h=_C`CM3b8AFsC@^Nc|Mr{Xz+YL2-oCVaS%wL{*bOqZW3UE85%U#x8^-Cd+6o785H zPPlzkZ~TIHHNxZsI40qN5%ny+hq@ zhz)qDf{XsvbTAx`G=qHl>W}t`KQoD;b}@&eTKa$J$ZSVPT9f}t#NAq z+DQy1k(?^e;LN1AIp~jO(>D7znJVw<&F6zr8UY%_ievceB6MM;ADa zHqLif%7JoM5kVY~@mv?J81|>=*Qh_8p^Mqi#f?|)(kxGBm!`w}cEcN3uCVShnOmAK z<47<&-HCSNdq;b7`fE-7{HFWs)n;J+qbEEn{!C>qrl`Y;GFumM-5o-!&;SQFTeN zCh-*b12IS7Vj%9fvu38-*{MKG^dU84{nF%GfIv)+ zbdwpfF%aX2T>>!~)-ez>k3X+W*eeis+B+49u{xv{TyG#Qj=+%FudyN~kGlk7@~m?p z<_J8mtk|m}K583?Wi6Q@UP{`s0>E&oqKOUz1Y+|3wak!>ffzsR5{SvLj)9nY{CQ=< zUV-@Njs;?@jvWZZ#rrBU`!xn)^0-SNCeJztVvfM`%8I=L@v-MpASOBt5Qxdp)BqVuxjnE|z`Os4FQ(;A^|#{xTM0x2nNyg3dW&*j%^UA=VU=?7$Fv1zQEutZ72G!7mWWzE8MSJd6 zgZJ!(q|AC|+h@0jaTBeyTP@(~cMF_&OfiOW`I{3k??1`OFjPt8CpA=Zmor_w*F>1+ zJ)Y!B^YbJ~7nUf)Z}giXA{cy*1T+mcM*>|JTYwT@ITAb}*c=H$6L5~C)DAXB(!3ON zEKn)WVAN9Wl>24`FHr;#VHdgNW6Tz*L=);H6>d1jNeJ-F61+gpa=dW&_@1rgrK(CQm&42Se@89V#h`pK zD37w`fwr+ZBD+C^hQl{;Tr8Ed2+?q!zW{ch>kYU2@g?i~miENE z)_0#BZ}x}ms}XyYwee>c;7~^LIHG&9)1LK5)A6J`>YeRR$*UL4n_qi0#~kf%y<>q) z|d_}PpwyjY}a4T22>vUD%$CF(_%*6ff_%TH^1w~RX)WxD$_maNTtQ@#;`Y?#+Spv_|oi9rtE|G09jQD{j~*n zQTDO>(Cb9wi6VY4UxN01o)jC+z>(F)p{U9~Z9w^Ql&3BM zn5>j`1ksQ%ipe>@;a63Ul6U+f_@cnrQOcO9^2m$r5^G1bs&Lq?k`Gkb zF7Ru&|5;%0m^Fi>v=Vi9OfeI+2gxK~7=VLF@~Oce3d~(gnX|74MfR@M>d$4*G0D#g zx#>N74MYuIN0~I4C@Ez{7O(RYe*W@65dRZ3cs*s#s6kqPi|kz=T!Un&KpMP(GHKLc z-a3dZ-k@dC#iQ^*3I;W$DDN0-vOU6=gZBQ(H zJ_2lM6WnQbd&6PAW|6nFSiQ*s+~5*rCztX+9}$1CB0SL%em*Mvd`$THTH)vGgrBb$ ze!fBY`9^s6<*+aq4Q4qzN4X^y9)*$YCYnwMXGY1jwhxhIU;NmsHIEhGWqu@8a*grR zJayye^ICw=KVqVfK3{bNvJVIIYaW553o<7&4>XXZhTm=!a(yWQu;P%CD4 zYbAQD_c4=toez;>E-qvkrij4raF`84#%n(4VZ3a}NXKD5ESX|HZiE=th=lH}E+I4v zhEOpl2c6|j1YV8Joy3JT5=4=xi}|<_Vp#K$Q4yxOIl0J)#BWbKV#xT(sK^h@j9l!9 zBUH@EMMfk8F*9;eKSQRNj~k&tc2uNF){d4NOE0?l@BG|*KKaVVuJpg3|Lb4azjyuV zJD#}q7oNT4`+wzkmf(l4jK6pVtU?o=h!rnF^z?FcBK>+EeNE8Ux1z7hF9P(-g>>m= zHCwqkaCy{NSlr#*yA{-W$m$9Rz z+@~1?bH9iOMn70^e;D?mN%Xp>$CIjq)LBE-#zsbHyf3tigm4DJW(p+7Hg-)Zn7jA&v+0tNgDG<@^&Q>Xx(ZSq!H^?I1 z%CUS2;J;v3fOXjYy0>iih9$SzjojuCEPoFjaP@yliDXg&1F^y{hg3FNQHMmD!M(&tknN7egi zx-~{8IQFRZ@8bV(iuuC}3SsVpy`4R&Fq=y;ycW_u*d^xlk?UXEUH1I@epXUOAe zz)J%kng;RJ)%^gSrED}5?EqO#y1fu?g?;x9dmH`7$HUG3q-;C9sVrXsyRo0+FUdl> z%!S zg2m3lExa-#k6 z;)(39Dsw?`F#iPhD9&=nP^?2D!7Of}NIH1>&>oa5_On~7&~L6lh>EZ+=FpzxAnd`P zn|AVT2PmeK4G1tCOlPl5-RxDbjE+R9G=`AxZgCk6#lmW%-Hb--5BnRl_!8KQHa%<( zW`ptQ5TqJNT_pY5PwhbEYWKqK<0;deUm^p|!~aqmnN$AQXz^m8zm`b+4mfxm#j{C& z^R~h1)BOoLzO}D343&qZPKBz80AkWpX~Ryx@jb3LWj8sI0P9riA)Zozw}0r6JBV5D3Zx4z*1*1 z_d7 zDZt6zWY9Y`L|a@?jLGU3oYm5L=>Q+6gp7y6Vrr_h-e8pGle}1H#JiSXh}PO4Pc0|g z?#TWLc~)z2<(iewHAgxxJcqwIJlW_RK6>~}@4C$!PM_}ekK7o4=JR{tt#B0!zl={! z`;&9MROvArY}vgQQS19k8wpUMw|X$%xAd3fzX@z)1mA@K!M!d<}PrthqB;A4t<2`Agm&;r1f z7%IZZ@>$bEQ0d^fkV6FF!;KJDJVp68O~OeCkLW!egv?+>q!sx{OSOgblR*TBQY?O} zGqmZ35FyfP5D^*-9Dcb0++p2Lo)xE%QV6b*N`jC<_Ge%`gmB?=5tx?a^VG&&3RJom zOt=*lhaN7Jo4Ao&ALOvepHCLv*8t77Lau1Q2)*_0gnR`4fOsdH3!%Wu>-tzTrDLii z_;(R#cAIh%6K(;sG@!ZI^T#5%&XmTp1uV8yd@H(=gTD17xrl2mzv9?=<0a<^nw=)_ zA0qxDA(>sJ)90MgmU_C6g>8ILFVu2osT2VSjuJ1frf+|(!A~{J29>Eoa6r5Lv5s0U ze9oEkAiMZJ8o{fq>{ReIE8zrSQfH+9(IRIMU=@E&RK2q9y{b~kDcC}W%p$VsWjdDa zWVW%kc=l9!5f31SzR#qLd?_I&YZ}Hkj`E`=6MHom{9P%mbl`OjYaZbp7GWeqiZZ+? zv1PuPpslpw*@(YkTN2RFnqYUr#u)3)%> ze(=#}T0Ya{dqq_@5{#FkjW2iTh?kq@z{^J)@J53(Tj@pH%5%5Id)YMcsiwDRXjjhb zTwQl(7u~S<06fzOW7D*u591-Ir4K_0HB|~1AZm!b)Bv9eBtY8Ohd1RgqD-56lV#BZ zXMF+~{8|HU4dBnX!4G3bsKpOMNZaIN_Y5P%FGOJ6mQFGo*ISgze4)|DlVlQdWHbv& z7vg0gA8CN;^2Z|J3%!Q5P?8WfSh-op(k(aZfua>k`zDa2ketwnSW7O+Z15HR8xX?vP{3EJ9=skC=d0)&dGHeNlqHXBBT4oWKNS6hbH_85Aj? z`_EbQw)#T97XU!!z=lRG3Sos55UOnrXZ0WjWMq~f<-&V7UTQ%Ks_|lj6cCC!z4##o zHX~G8_Aj;d#2MWn1~R|YJ}AUsg%pq{b+WTZ3T&vIwLuE32(iomF5L+kM1?n4Aq9kp zPJs4Efekgzl+8`94M}brfR`u@c!L#EU@>D$q`-nwJKYRmL<+13)r}O8Nq`MfKt9a1 zAv=KqkcqTPjf@PY02`!$kQ7@IV=+e_G$RG%lNz_yb{k$!BwxKWxt0x5K-m)d6l=+m zJmrSLlHr3O1^KI&Y%#`d65xzo*EMQ(6Sy>ucwS(G6yT4lT~s$~2}I{U+p=?tD1#MJ zK;DMUjufy*3h-lQq#!3>(zw9oTNNPuW(SV35bfQk-mumXCEb+MbkB1qa|1&j@#ebW3ZlNC1-6l~dG z1)WS0G2T;P#0sbothY53v$5G#K;Oe=U3$(~0ht5aTl-@LgsP@~XxD=kkdaw_{ICK- zQOip$SOFQ?ju#uOfKb%&!Zld2@PdGGSAoq4JBbyL`BldX$dfwR*<%GZ)U2=qD?+|l z0U5*#D=B?9BF0?L-qsTj*(@?apQ4eVL%D#(u@V#`bpD+mk$;NkeHMb~uMUOYRNt-wJT0)n3XbM`?Cr4v+@hNI6_7B4u>!gWFDz(ag%uoi*jGU0Vtl)T+umXA>8SBz>#tO(B*g)%#6%eYL`k^MQfQ-!Yb4MkSvHxS5> zI@#G{1vb>IumUSWzP%Y4#0x7RM05hQ#|mtyai(m>3T#MvV+CZ8AXot*rV*(nR$xKN z4lA%C6wDqCPHwfYfXtX{L|j(82_d^)zgn~^uZ>#mD$;<87s)imo#1&r3+S&Gc5N*jfQ1DX!8a_&V}HCMyw#8ChaUeSk*9P zg%upDj1}M()r=L8FthXf3XVBo1w<}ptbnnB!g%L_6&$OC6;L5KU;+{QE!s z%mv3)0V0&~0W{&8{3p!kWCc0_=#;pV!F0v-U?Q9l*z9@s^p#V@0sthkG{cCPA`IO86+xrT;TzpqD{~FJ5s^cvW7mUcgpBGQMSIuQhlYf7;J-X{*pyyGbxPJ4-tvrSA`R7 zkrW1__St@K)ScoBPrCidWISn?uRkf@h>}zkSi)T_fp2&yShGCKA%eF@{E^&B9#9_W zPtc`Y@Ffp-aqG&)U$zdM4d`kJjnr3*dK zjYX^J!MX>@eOycHi%aOx<#-poGlJV$`*oWPLYbZ*{3qg||AnyMZU z&@ss#Q^_tGn-Z}cPcQaKxbt|GoRw_0Fs=j4bwB9JGfe<(r01J*R|P1?liF84eZ3$X zc7l8q?sr$|bcQaB+2~F;dc)qNyYYB`9wA#ll+9|oYi`;AAXjOdJQw~FgR#>5Rh(j=H%S!Xv zeSNvZ)}|FMD5=Ys5O7RbuyXq|l^-xC(tJ0(%M>rdMjk zX{10Eb8(YadY&`7)POq~Olbh4?Xz8URofKZ%-GIuM{J+3TT`-ZhH!mGflBl|?a%BS zqQqB>FMq7+7yR;O6)_q$2&j*Kjg?&cB?WnN}mGkyo%30T=MsrKURiqyng|6UX_85b^i z8F%k$8Zs4O6@IJ%*H`8}zZx>XBGR%FWW72Iy@^jWNIW!F$VnxYw>6+&St_)r>E``R zTUu+8KW1BT>vno3sA)W0*1P=Q2sY~$0aK9IRwy}lO#iA;p8x{(Q?&_>?%}D@r~O3) z8+BPtxr?ifcg}4TL84W*tVViuCAu|wIM_JfMfc24+g62uW#G^>X}Ec;OC0dyO}H-r zS2Sgkg2xL;sEgZVX4gow`OIH~I`pH+SAr|TA2k9iW*)WL4hfj?X=TGDt?%DmDcdd-)g|Re++NgsrZGN&=&zR_OwMJK{HlDGh;ED5ONsLRMX+wgh+J zGmUb!6@I5WM{~)eJ~$*=#o$)&&05OqNN%&A`6;a1r0P>RRZ>{M9%KBfurW(HLjhN` zOGOwf0HBJq2rAdvg)geY9iL5%uLPL`$RKG#B$gv!veLfJZ_~YPP+FcKMx|jJLPCoBgePuTyed0<^lD0#-+I ze+@*tEAWd=*=mscY_Ui-#-rI}JWPesAE67*HqJARyit)qmy@H&WmNf>IjI;Gv(FN2q^UJ~zH`b+X(B>V^{`30O(-sTnko@7&ndsJ6t zk!qbHZRu5&({D83u5jjtdRdGdhZl2Fw^tC;QuZ`j*cyH@ux;TzLVO?w^U9=>Mu9F2#&huJPRnj)!xNJX)X>LGhu0I!&49-UYyT|=8N5GeaHMA<%qLIOAm0u(4`r!`@=3@tqF9Rz=qyMP zNiUpdON*)16{D>5`Q!tE{V$8jIQrm>5?W0)R(ny)F01@V10D*eSFr`ocjDdW-)GaV zfn@g+;b|gZVE&eOk zWAqmPLflwtEH9<+<7P)ba(=DbZ23$GI3bl8JJrI8gBwp$mtZvqszpe+%mFN%L&HZx zV9r#4$J28W;@#O7``4t4U?c4A*S!(|NcDGzvr@*h!#E*pPyuixUc|QNyhdg3rox6# zgit+t%bi~;vMK@j3Bv(=i`D|p`g=}cF*;#P|$I)KK)&SCy44shlL4Zx=na{ z2*im_34Md#st6oUEH@GtM{?M{ML`4+!LJ7u1PU^EUBVwlVuc%EO@fk>`hgZAqQX+0 zTy!XckA)~B=p6Hkrzj%=iW8|QKsKRM&D^_?5LX*mCz*7?&>tqFb4tHKZ&si$mT$9^ z@<%ViWtJ>=~f`37MkEseIHWX1g%;*j3&=~y&3UoUEkRIUI=o78C>Rvm*!-a#Iv$NRAy8jR zRjs^QP+vMO51YnV+`}PM_{i(3a4?q{HOib{X|&PmL$&Ai<6CR=>?1 zOq-vIszVYA(-+_e_&^k3lUb=cVYoqNmxCoSD>d;DQej>?X5eo(;VetbX+N1_&eHs@ zYC`L^g8)5+f=MCYx^1C*X9N6Kq^Gb>X6;w=LQ48Hyhx;4N-63EEZIo_%Sl%zdv8Wicq(T zn*nV@IsEUazIu)BK(Ak+G$mOlF+ls6AFspF+a80+mzqvN*d0v1&Zo*c* zBGc}qtj(En{*pm$5)=)hu-6IQx%W7njyJ>X+*1UNoKJfn`EN~_%v<3cI9FQ%k~+*z zsCz`!y$Pq{H`l6e#G}2cf1*MuZ${uS>XpNBcc6`7@jm8l?>9f%eCTWXGP<>(E6(zMUAwk)ZDD+bh`yMZM=YuQ#~vp7Yfu;B|j zXMCy&Pt@ZrW%F{&6}AdsU1y%^^~n(qylEB~BKTZGcK=hl-4UO{#A;eIHokQZX9eM+ zKI{WlITl$#2wcOu&F}j$?Hr=kluQlv&re?e4+`1 zIjNFMsMc>URN}k4QUR&3q%3-EQ7Sz_)%j+4EPkXub)r36rrW3gafSDjc*fCDS<`;^ zSUbJ37ALv%yR5HTBHtyp;eK9W!P>y7w&2ey96TtO5LXE>1svahTEFQdAEc}Xv}V3< zvp4H?Pmd>Od$V{E^>eR(;8sm(wN`Nb51dJg%TU^h%)^6Vr0Lz0 z3A0sMXsDh{#)D^1Cgorr8A-;AeMa)_$)s!>Qj%9sCJe~hwO>yr5Eh+M*B=$ZBy_(f zOp4*wyC*x^E>|*PdNN_sDrYwLWJlZ2g`P|%$SOTqCqPeTDe}TlOMR_rv2Bg-*|Nnp z@1Beoo#=9T9`x(U_#v>#d-h~9lIXo+-=2&otMZy~DX{GRo`t=P0VV_57L|pOhHl>Y z31a@edorG=N^RkKGJd>Vi7WN0j>2J~doDRMrXJ9{C&MAvy(h~jv(v$I`6J0JyPsR@sq$iV+4A7JD zWK~`hE(Hef$z%*L8PK+plfB%qLo+}}W-0J;XS%aH7kR;n z4i}^&T-MPq*u2NgL zRWg1&(xl8ub)h>hIWwjX5PrPEwK$SpgIT~;_G5P8+N&Gm*;d(1QE*syV}BCC2{zl< zzy6xN-D25`S>HThC;Ng=hfeJ8Htet9lT9Ztwy-rQGSK}#uX-tEn%v_>R`MFsFE#BC zk+YOVxsYcPP+&Vq9`X{1R+EK|_2`R755WELao-n@B(cOk_q@;Vg+Jc3RAGcmJ+(xK z`6%uTRotWSaQvXR;y8P;?YTbgnR&`Z7u}g?eO#ck>+)Bk%gC_161hd2>?Nn~@AZ4_K+?1_z1CH1_8Xs?_9y3hv%z>|e*=t6ES#yR{7fSq-EvnGo%bQ#g80J? zcoWM_)m+Y+nj*3JWgvC{{JkHrDAg?di>B)_WVobZXQ@(MsGP=*l}qG7ubjpb6m?kG zTj-R=j*7pxO=(T|1;6+AI2%x@i_2&}ukUiBdp58hhO=pse5>B5!eRJz3nxBtBCq7cj&wOCz zE-*PY=uPb|TKLrnUZS0H3@3Y&LGRSCf3nlM6!xYCY;Q2?PvS|BqD13N4BTc*>&nNhrmzb1N)`K(?h z#_2HSXB!==g0%DGV00!si;5bZCu6NE-~cj|0Xh++-_6C0j=EA^-t|inENfNSp7hZP zn#GkxdNCB?(3cI>gw^iSspc+EJ@bP*?k@S1pVRI_#BS#<`KMLxLWn*0+=a~C&Rz0P z*Y7T0l3u;cdOV*IaJiOuAh&Gx&bIHw*=O)pl!?&4@swmB?q>BUUP2`_&YuhDa`j!U z8)+tL!{PX9LndO!<&%&hTf;XacrD8hj=k(}3>L)ze6|wB}46-jMkkxg9s$IaI;b{D7Cv%R_g^8H2ye(n^}%3>lJlBY>n&f*Rm4VU5BNb8@}Cu+dUZ; z1Bb5wWmng-Q*^MhBAiAVw)keADRiDJa249ZgH7pNyA%k+og^N;M za4D$^7FRJ>Tuq>vmMm_#$^yRBaLIO*MKM75h${(eL51DB3P;gb8&o57`SA4!);x?z z3W@^YA&R#)I0Du7=prFz|;??FPda7YKI3%}d1r`b7@N<ws#aT`gP>`AnO~UsOQsk2W3JL@F9prMGbVU^tYm6Z;BHFq1@b zz`XAecwvSD`Ii9Yx+;a$n3>oRuT78e`;lyGr$5hJ5@V9Ja;Z|X;*IjNXM+B*CLCvZ zv1;mhTb5}w#%XLG#*#c(!~EMjo67!21h;r|#U(RY`e1!Px9&2Z3!-wueyLWEN5I039~kwlQ*LXz*2L7YuLxj)>PQJ zyDbEPeBfIyfDOl-5G4F0tpPTelkDAvmI4^n*MeDo(f6f#mjs7eeBRz`zUJ64)37J3 z0b9qqImZ$BmSdaDFe(NJr-qH8=hh}O1j<&Wtts4HpnBW@)FwJL$Siw8nDCW%kO-e4KW$iROdTlyM%nE53Z86f>(J{v49)JE&47fBbhU}VY}pUY=Xj0 zM~5KF(Q`>!9D#~S%SV2Sv}MquBeu&k>7TkddPVB;l81?NH*A-isjSu&V-pGOHDJ4B zF09|`QfqF+ZEXqlA6El z=akmmu-(pujP2sdIbypEXX+rLX>?@)#06Zy^C*Xp(pEe-Y?onEgl-%VM~0dUw#z&& z2^VrlH>g0lv%y^#j@T~CoDd}ZBwYjucb;_kup_q1@GGnRNPQa^+r^`@m2ITu%?;b- zc;mu@hYCu)w&>#4TuEY&Beu&iDh3FrHkrXL-P&Y^fFwxJn!-a7Ap(sfw#&;~2ot{Y z);U6mAOddKF1=lG$FW^96V}o6jjfbEjGupwTb z!rZW3GESy#DbICbyJTdRLv<2!!*4J?TwtLL&Ookd7V=@!A zw$L}mZrCmv2g4xq!*aFtTLC7fpspBHLZ#hV2SWi#?bF zz9>+1!FB}_l9YyEGUc{9FI~wSJcBwMv0af(A*uPxeokr44ck4o1K2JelXo=@I_HS( zGMsTigv(NzVI|(}gw^ESmcow+A~|Ba44Y!ua6lXxYA)C=^SC5j&Ji z3PH*d+hv&(f`p%>ivZyv7>x)Vv0a8=S?x#a+rZdvu4h{>1vhM$?_;xNoOh^54Rz$1ZawAM?AxY;%>LA~{ zP%Yc12+*}=m9DFk^T@>>NeAW2juyv8>r3y7q02wB=*jnK) zzB-VXHX2b&0vw=pcSiu15@JgL>1$f6D3}c4D?a@dH9lQ+b0NeOw1>gH@?^Mt4n>t8 z^68}*mc=S2h#Wn7DRye5Elk1uLb0o%Vr+R^kjP%HT!QEzoUAlbfr0FX`s{7$VZS#S zp_}8o6LiZvDL~2xZNalY`x+itP+jKJgBOI40qN5%nyQrNj zfmB>`m#(%gc1i7M^I0 zQ~TFWVlav1RCxwxCcVu;e>9u6*}utDc~@_C*Y>cRKG2?RZw>qHjqzwU84tUg{jEN_ z{b{suzPnO{3x_44T=1IK@>7$9yrIjvr&tyH7Nv=Hi%vSP3v%T4_BR$-*xoNc9vi%gM?0k`` z%#hV-a1B1W8}9Y8SX2a%)eM={zLF&_v0D1LL)4_tW(y06!}gP6?kH0{uLAETD|Z}* ztSuPH)iU(TtW6JGua#TfU4_}Z?1UmZgkSNk9T4B@@N23a@`oK_C><8rpqm#wKZ9`J z&xCEmu-8@_Y%$XEK@)W)J`f-f7pH1E1!D5JiwTlvodYpP;9?-|x3gxZ+p!JA)QDXo zC=tHYjH`$90D+hs=_WH|V<5&4y98n~tYaW%9)Dh$uvZ}NwC56tvFxQ5Joi9c9DyOT zUt>i~9(M`EzX>3*i7^xwQNId4iJdR``0oLV_liyzkgO%ZRqRe2)?izBp0$f7`hrpzM7S<#>`U+Nr25-9*%;UKyLE9GVmu@ z1=uf)a0Xr((eT|+C^Ket5RYU*{5^XiDYKs0_Sx-W+(awwRtvcL-2x{bQ;cC;{^kVC z`%iK*3{?{ONez|U>^uRM(rx&Duk2Wf!NVSOLbE1gLd9YIMCe6ZKow2rB)Q4x5;01D) z=w2jRX*$pBz9KMO;VyT?HBH*1k%B3>g z3oz}S>+{#ry#RKf>kYU2@g?i~miENE)_0#BZ}x}ms}Xxzif|5}U4TOw$>WIb$xeIL zA5F)T?x=URKP9hTykmik@E6VXKX0Nj7SM?y``4?WPV3c>*!7pQags;0iY|HGv__FP zmd4NJJ?*-2@s6~rW{S82HB%(sVy1}uMKeWGaF{9L?#@h+SR!vyYN%*G{55znYgN<7 zdt3c(Z?xH+4hI{BWihV^?a{mdy1#WbvB2f^mr?uMwJ!Z1e0>4-F{8(e*tQeKcM_%1 zC)$Z8Rct<~X7l`uH7c98e%;9A8@bIV);4b(8Eq^~Q;C@(v3WB^Z1ZM{#OBQuvCW$) zQkzd;adUQl3mNTYaGWxTK+xg_&rCaR9MOhf_)*lW35KQ!>7ub!3EcMS<@X{uz|SY%OMQTIIGukaCZ0rQ zG@+n%8yx74w`W`1vu-AuY21M$tBpfZm4EQ%C{JAiFj*<>2%;fjCTe-M?22wzT?a^( zj3Pj2>?N7*FpEr=Uol-?##_MZO1aZ#PLA+j`<0)+VOsq~rpmF~p`eCUiJORAcC=hN zzu_lmPOaKC_@cnrQOcO9^2n?55^F~l2BsFV;+0uO?GAff^69~U78pEc%^)eQMBN=z zFsKo;R@)BDCQT+vN?DP` z>xy9NBK-V)g38V&5o}3=*Hh+<8l?5N$lmpCf|g9Lkduqv$plLpyn!-l)L`B^h%DZq zove#e@jwd6XpcX(X5OKOYf)u_8Rt5q>@@{CrIK`C8%U>x7@L7k<7$`1wY7 z_vNrK7!77QJ4d-C79NF>>?WE{2WLjfwYCqDWncW*t2K`mC}(~oRdS8-(>!(K=kr>C z&_80Lk3L^@1hNkY^J^Y~1b#9nGY>fWlFyq*AmNNWVIBc{I{CbL1mb<%b4^e|pFWTfLTAC^opA2&h_YeYhKR+kW(1w*KqlY`Fk zCIYX<=1$_m8VRCE)Wv+<2r;br$fyX@+?-rwMB=xn9WiA5WK`q_W=1Y{#1SgyiBGTdo(ybo}67hazg~qL}2QrvS^L? z(E!P8&coc;eK_ak1`{t?zF;|dD>@W!nlXT`PHPz+M;41kjhHZ<@kc*PVx0X0IWw=b zoeg)mxI{I-VKz22+f+0b#+HAQaAh(!^6{LfRWnbXDk5pYMn&Yagc+lX=mWMYq7Se( z=cpo_f^AeplC@ntASHsWigM~KN7cNDWF;FFk-M992^hNs2ZqBA>NZ5q-c`Mf3rdAx9PA0>nl|q*Sp~QT}mT73CjiIVx22 zjR-Wch-;{6LOP}nt&#_bvMN56bNBt##$uV2s0}T0Y0?Nc`8-K=EXU7sN#6BTdM4+S zJMWmUjY`wvvM^QU@+)8|D+9aZtvG>0s?FES-@Y667FFU`?vB5F_r)ra?xW^qf3ZsJ z53{qfQTcwcN*oT5x3#IqflVP#>N1=BMm|eIrY16HhKcH_ydksAZ}b5}ptLFEFII_Y zEySoITJ+hfh&~WNMHj0?8x_%_&sIhBfdDGHSS9u+Ba~$F+Ua7Ih_>sNE52Y=BK(Z> zMMP$W4kapXuby%V&+w^y%3Qr&rZqro(SzO)!ItQqp4B6o>E$bf>gfqfhl=TuPwA&e zJ|#}?pEQ80)GirGi7N4vfji;Mg)axsoxk!YFD*&ZjV3K16*;z^=Q)x~zkvTaaqD-yU zDbuk8h^VSNAVev0U|0-D(Rw{A|@vX zWMhnv1G03&iHhRa?y-s^;UOxXj=QLMIx5;EEV@y#Zr-p$SqbeR9#~gM}mmSAy)Bpd>qget3K9%O7r7ZL3q&W z2Uvu|1m>vd5TS)3mP_tUL1reiv|n#PrGTMgJ2SYuLQJ!$=R=Xph4@gQ*~!HCp$1e6 zCY%_liKAZVPUlHdD#QXqkjh1OGQg~4V)Jrd%5Y|T?*7KmA)*UI%-ea{nMuU=S3mom GxBoxSeuAX{ literal 0 HcmV?d00001 diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860969.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860969.Akashs-MacBook-Pro-2.local new file mode 100644 index 0000000000000000000000000000000000000000..7c59cdfdcadd549ead313ea8df07aae86ba79bb9 GIT binary patch literal 267629 zcmeIb3zTF>eIGoxdv6z)> zZPeR6boZ>Z%9qbLaexpz;lw#Uew~vqb~v%)*g)Vwz(m0h9AZd}V}rmXfDMM653o&~ z93M;^e|2x&s#{gJUbm`l?@Hf_v&^gSzv}<~|9@4t>fjvy?-zdfhnDut&8EqvuvJc+7`&)PJ`REsqXP;a8%;$H*GjQehXwct0 z*E!oAPg*eE+-|0F2cfU_6U3;g4fv%spINQe;E7obOqsQ-#RQt@!azK zzq;Y4pJjQ8mS0@rWgZ%F4-U1(Jz-;l*&GJMMQk2E7w)ACGtu?AaU*r>M~CjpGJN zHkXkKiH%xZ7BDced;s>M^gixwb%xzDsFsT7;JpnvWXD(Qw#lS_Y8WrzuZg8N5`UXZ z=e;xObx`^7WPf-nZd@G&ahS_tKPplBTit077m^uegAn02Be=#=V&h(SD_*=Cm0r{S z^qj54g79YI;$sn9Z{lL3-<@zp6)G1;6KVaSuHe>|Z=glwA-E=fwN(+B4!!&mQf^(k zz8{raX+5~MJfHR%@E;?1rOos?XDpk)``k+2`6A+-xNu$Liw})_;Aa|UgUVDPIG~jP z5=Sj-ea=~BA6Wn<_7HtJ?a{1QH-^?8BRK7*VFpy~(n2v?=nkZ1{}^+#bN6af2Vmj8KaohEOnj zG^hf;5W&^9LgadjQa>Pc>o|s3cI)`Vl6J+%7vaMVXxZ{6Hv85>NkZ5liS>RSu`KU4 z3??9He9~^y1duH#dPq<1hZ+sbeDL3!VK*Wq z!2>OY6ogd`Q}AW0+#&!1MjRt+OshpF>;9O>093F56&(D58LUA%Dj@_GAu`7ZOehFz z0n47?K*82Ow2C(jPGEvE3L)65f)t5XNI`oRNC6pSrbq!9v__1vVpOn^)v3`Cig)y%L}z^VNP!JC&Xmnft_?|U8-SO!8af;+q`+dvmPmmG zrB=EbAdF!LvqzHMMF-r|N*r zL*9fxn-&em;+N3S@={<0=ZP1)T?ID0a1B=FT?IBHV61=>l%eJi2P}~JrT9znl7P*w z0`jEJ4(xXo*if^=3akkE_GV-dFRXwN(FxEVE3l!)nXka-3lu>S`{l`qNDaL0jIv2%*FsJSoPP0mZpAadA@-_hETu& ze5O+158-&30al=A*bXZoG<1658m!7#fz1f#6DuI|tBw_rCv~#3#|mtySz!fMgivp$ zaDgJUxm4d|aIo1|Kt8M!pgmS#Lya?KGge?jGQ|o^Jq|5WyhLh)6A z6_6n+V+G{FFp<@0;Wk*o(HgJqqD#Yj#k17s1S-tT4M!hgUY??m&oqJXEVN`dk!5hSn8$98;&fHjHCer(jzL_n5ac003E0JhG0B0(1AaP<@XZa`Hz0q@&tphTooWfOaSe(cVcU3c{`j+tcX~aeRt+Y#Rd!2c?^KE z^|hAjTNh7jv<oM4Lt(;v3Z^t!{&crl8ORLxQiw54G-1z32%@1Be|74bSLQAjA>_MJnC+3b|=&H zco%EjclabZt2ice*|WYI&1G&i5+#^rbdY`Pbts(-%zxXUT+qi?oyAeNczCH&5y|77n%*3%aw;I5Oh65MFmn`E4| zvV>YUf9_?>fAiyRCh<__WwtfrcOzJ>Y?xGg@%)|r!65P0zZZo}#zj)Q6rexc>OEZ) zSE|VQ6AiebGVjNFgWe`Or5=B^tOQxF#<9sK8YCVXE99h-%G(;yt1K1T({%EFrX?*# z$se<=xEHR||DOo9>J|Z0kk?iyId@F|rcs{&0`^n22~Jm3(t=Fr;oz?$*sRNH%3WM- zyqAr}vO7dg0=jynxFyu85U>m!#^$juallVD;eh~L(UeIF9xouFE^d>lT_errGk*;h zIZ1FS4X)gPKWPM3%q(iP^CV!VV^%g?3a|q{E^K>nbZhDWdCgdZQzc<(;=0lnBPooy zRDil_Gz6D<;7Tns(Cp|g4dOtr4pESTkJT$7Q4B$J$ikpp)F|HKA zIvhF?p>a|)1X_3s{zC+>u`=y!u+GYbTl6DN>_9sN1R_2v;DIS}SV>5QcRA_A8b`ki ztzb^k_TCE`2mGk5h2`ADyYp`nZe7#H87&b*DmJ5@(pP@$H2*vRWIt%@AQ>+NgsrZG zN&+*h;CCZ_LzvPKXoW&b^ekl6RcfQ%BKg7em6uk3uR2FF$)i41CR)YdR`1PP%IipO zv!D4XtlOmOQ$ngNm3XQZ0Qj1)F-ti^0avt3MOD^0sCie9i)JVL72Bn>t^V0~_XEA| z;Ew)T%SEy*-|}zl7^G;P@24aF8Z9Nk*{#7idkuP?5x6cnf^LZj^Mq&bdkFa9+eJI%akP z%5EFp6Twxw!P145Jj9t`5HQnXzp zevcFca-W?Lg{wJ;QJ)F-vPU>c7J60m3-B)+aJ;N3Upb*Rh^&KElT=M|NQ@dVOX}SX zcr2u(raRkcm*1)JXa{ZZ=xyhFoswe`pw(9$$%8cz?X18rH)X3q?z6=r*&Ge0cvCv|%TF%xA^qhBfb<-oRu_Yfg0A^Ro?tEAwrUI=d=EF8ykZyau_ss{MyQAo49 zw|6RTqW`OcxD+#1H5K2B;8-}<$fE^H5fmQ^sMC~r?v*wiZ6&=bE~8Y6p;Cb#Y1F}e zyWqYK*#?%u`+^2XdM3hb@a?Vrq58C@XzF zd4FL4%VIK)J~*R@j7u(|J%_&@{h4{3zQ`-2|3>=FHc;vX6q zc8SsWX9k8Sb`iA3cN|r6(>_{&K<|~JW|M{pRY&)W5j+$`YALMKk~;3o$+nlv*JJb+ z|9sq7Y%DFN@8ecG*%95d7e;36R0}5#Zahg{f-SVlJFNQrqX@}7f)9tloKvIA1b8ex z7a`u2eX)N{x|n;Fo&QMn+zF0Oz$u+M=D6vfBOO6CqTO-g1|y z-foL`=?numLkN<4cs`013isR-WQBrGOWHlxw?aY3#jfn{59SM-Zx4Yu z(J7&C@LLsu9ek-UTP>{jv68E8GBU5|o_O53~>wv9w5aOwqvzJ{F>k zpmWSCo}!EhC{CoJ0NI326=xG#5%RTzZIVeB4EBif`GhqmbrT(fzoC@pinba1ps;J zbTb!K11&{(BA#S-A)R#^$XF3e(OR1_9hk`+|jm;n0q2tk569V<6R7=VuX=k|>)R&G+xly9N z=%vHQ{h=fAlut>A$-O2t>`k8(3cX^8gxwm{cQ(L(MNTn6e0C;zg?iI} z9O~by=qP_VV~yRusoV|*pKrjUbs6Lt(z*K!K`ZRUe>KM}WE#Y+`>uaiqiE@UFBK!L z>^S9QCPrFaexGU7m#vU#rY##%ds3(Q*GHVY@Wt9B|GY+%r0`U2sU=fdGTZ#AZGA}= z(t%fB53vCsd$K8cChQ)iCACj3zk;07>*BGhMw+KGcfH&KEfvsr7fKWL@*bc;s{jD{}w{rA` zKWo}&SzDG=z!d}P&)mS0uC;8dv00oVQP}W#NCozR8rwn(^Y@y{!6sg zbU4`_O?sUTbW=}pk)5qetf4m&7$Vv2fAAX(`(4)>#>;X=F0qi`@%JjVa?qJ8h@bol z#F=S;7ep$}xgw~k_b#ucdzblVQz1t1(-p8VOCc)qvh1?408Ws+DMZ$g83BB<38z^7 z>o!bRl1(`ZGkI5XS6m1&*|)ZpSdrz{)3iW;0v?MWsZ^j_4vzI@9X{X_P3X@^m0Uu# zetV%3-<_2TNQEV3(Q8c?&KFVVEmYlah9~03>Qgt`mGsXlyr0A~&W3B8q%?y7Zna3luL-K1egMj??0{I^pOveikw#iS~Hj4j;n4f*Qj{l zA3L|+#4b3lMQ6xc%ZD6OjdTn~vdJ4g374gr?%$JKt7DT%q_b63giPWJZV`z+*ef?O ztxl=&EN{{P_AopYKj;y_i0I1tuuoMVZptna3$8dWO<5l-X(4CUu4kmcK2=}`9*-Xl zpg_)Y7>+cJ-dcJ_iKh>g%TU^h%)*(ILu`#C*EM^IyKPb`Hy(5RulTMI{%wWhY2~vl z3$jX6cchg%a54*#7g0#xyh%D>p`XioL3me%qe{%8Q{+M4bXC?)g@awqv(Cw{zy~Yr zVKYCq$d`*6tRJA$}EW0r?(>uslI)>Q6pUCGhV?%k6eX{qXwfIXQ^ z1C#}a=FGb%6S!97BEOzY7=x+wJbN;kG`)K=VYVs@4b_v$c<}7Wq#RsO8A--8`1WK{ zwhbxCt0xl%WbN9oCld&ZPFVzu&^?(jFvC%;J=u{~Ik$)D$pnT}&TQ_mD4 z?8$@yS-bY@$ppfpQx$pnT}&TQ_<+RuxgOeV-GJ=sctp3G9@ zrecS0S+l*3{IB?}lHTiNyy!%i3(}MEL*S~)w;)A=|4b<5^d^ zdr@}S7LeXO*~%<>GMNSnUe7~MCUC9DMSkmK!Wc}Y=h>6Vr0Lz03A0sMXsCoF`_ABbuua2hLq&BP9_Y<+O=O#CJ+{#vIrQVdop2QhND`0vX$C; zGJzqLGn;#|m6`NpAM)J0C=(>plfBZgLo+}}W-0K(gfEO$GG1_^!v*Qc_#uLHWHOQg zIx?QD%4fnQz;buuS=P%Km`(mGvtpnBe0MI&xHhCeua1l#FHlFu6BQk?2pge0GJagb zjIks8$qLuvNOlco0aw|N*@bJbZj5JJ@!dI%njkNLiMB#9;Vx#zupFZ}VQr3xe528~*x!)z3HiYo4Lcr_sl%yN`>xBv_3A-*>(9V(Pd~6aX)Hlehqh^kxePUQ?qjEizlUG) zdw-9!0hPMAjOO$DE;m}v%t}(D%Id;^rHp9Ws4Nb_`y#le9AP$k)9wm7bo8m-xYZx_ zr~U4rbGAF!L3f_sJ{nG@RyQIn-$JwXQE119OWB$av)pwi8~yG?avQ_RlP7P5UytDB z+9}6yqC4()HwL{EZM&JG4dIj1-EnWLGwhz}#f^B*d-5-WJ!#?G?GJn7j3Bzh8;yak zjcu&X0SfF~kBI|2EB4zalm4k;ynw$ZdXD+5UM9xrFipri=nO2Rk0<-XQ~b$ZzB;=c z_9H{-Z*`}=PR=emI81c|^RGs*q*Y~W+(Rd57FQPO#ZaVQUp7<|R=Z1E=Ptg}xq@6H zy>A^BBV?WPPd)p?w_4e@#$EC$bM7*0Pq5=b?Jh*@cJ7jYTIDW;81F8XOuA0z0=2u4 zncKNb{%Mt426H3$vh?a@*5mmcfXlVK1G#Ohd!}_a&OZHTP$okEMiY{OZlcMh1&88m z49SHhyv$w79>t5OAjd`dJi3>Cj<-ld9S`4%;I%A2IQFu?F>q+|J`X-iDd;kif?%~^ z8s(*}>A5s79gd9U!Z#am2g_V#c3Xm@`Uy{VMJzi#gY3)JrFCBqRmfwA$+*4~!8*%M zb!iLnz)#vmT=0;DiRV9xKynK=D+Pm6J1b?iUSL7vPb0XM<*2e=QTM~vxZZx9n!Q*6 zd+1mUJn)?c-08`%7&v?dD7(6rouY%41@RB3NFfPZ@V_F@P>mc(2p_&OH>H;l0)*d> zKynYcNwsOi+0s`WT%yz+Z+HVOO%A~|@vE&$ldR!=2`Te0UEhz&{P_$i%!(5DN(48v zJlKT{$`uzbtQnC+1!vyGn1HSW%lqMl)r?X2a>FItQ5MAj;UlgjtOZHjvoLjMiOM5| z#xUU<5v+L_kr)nqBpiT$9jBjlYCNbd>1qgPeW|^hZ#i^){jpQK!SMBb2n2uP)MPMB zilM-v>}9CAG#Sj}r#OJaI5!pCkKX_n#w)BqhbyI+@+=E^`TBpYRn)TT-LpMv0tiHg$5h5{1+}wEzh!; zsH!(uJUxe|mM0)dR$(kkUaVRuR#>S5bZuvK1X5s9NL#*AFA|rR)ft2V2o01RZCojA zAUTu(byW)U;)R3QOPPtSceUvez8J~2cKY*tVI;AiXu>g;7ptb8w`G||W1PljVJykR zHO#+tepA{1h~PGFuDE38A#9;{|C%ha2tofP?Si^G01rZT1Xz*2L7YuLxj)}#m*28j!GYvC;JqK9w005%+RLXhy2 zbO@d_;K*7EU{qfVX8A?mm+D;-92kfu(#>MoU16Fi0$+D*m}%G()_|?qku2(Xa41HD z^0Lmi9ouAvQ87R`HEaw$w>Fs}P`0Wdz96qlss);~O>}CIS@wi5;VbVT!qJEU+^}7G z+M9gcH)LZ>hB835Y);oV#%|aynT7)Y#ElmTkS<*d89=gkJB4jo{*-vJBeqM1R`8ZL z#STjV2EwA5xL~`cAp$hSWQgTiVY_6&Odnh&E6G4!wpPkPGPssEJrmaEhV61J2S{2R zJ(r}#5s)M;qzPr6>ZKBSCNoLh4cq0J^iN$Jy&`pa$-~6C8@9{MR95SXv5ACsrdZUb z8K}QnL+_2rY z4Yteqou)T*f^YB)bnuNEw##tF1rbR_?1IVU(=J5?XXw}w+hy1k!-fOm$WU{^cA3X3 zh6@iB$m?H?nL1*-EOSDT@RM{AAUvd?l+cs*OZpVGQWEu+mlgDPgYUI<+Qb7775^5!)qH6}-7)y9UCgme^t6u4#w> z4KW#FX4oznu#D}x;4Oq2T00cZOz{SCC3_#h4cjG?#3m@b#qJP9IeIQhiz6VZY~=~Z z07OcXP7&aJbnCcpmuFI_aDAm7hBMPPDaU=g9OtrHSBy=h_Vv+HuoULDZ`aH-hwYxX zB<6XqqOFR?H)Pr*e;%d4!&{2b{WpNAi`xSO`|I*Rh_WwoWD}65kX)_ zY?onE3>yxJBSXyv+hrb?gbUs71{YK_m#n<7)@ard+hv&(f`p%>wYy#E>wfMMa@@De z@GGl#NquYWPwoupf`c2j%kjpAhd@g|xosM~o<*Uw&^WkZyBwopfN*Nq7--h5O=bv4 zf<$F+*0#v*Hs^@#^70nKgs;4dO2VBlU9fT7w|ms>Oa_GA9HHKS>t>!ks5A zWaGAPm*H1d?~?k~+Mhg_Q=jbOcqRkK+w&tlxM8~-qhf$?z#2JvZf&w2+x1$@z_SZ- z_=_X9%gHCezFm%<&dEarku)%#sB)qhcmJFA&|PV?(0A(5@)|v8_j))Qw+#pFN+!cY z!1&sr^f8K2PqR~66QkPK0V+$42_KO?2`LDH&3d`V@HF1(`&KNLjS!p6!6+LFRwO)ec#1aEJ^-9a=VlqEqlQ3Mu$@t8~!y*D7aJC_v=f8MvsdUW-QKNlP* zR|+cCobZ2FJV#K?vAT*eNIi84@ggCAAz1~i^wok&*LJAc7i*hJcNeM2CbijPNWXFl zJCaZc?`nj}3CID3SQJPfA*E*U3qaSJ9cN>Y>8M}MifENvZiGoKUMmtv<@e#aV#T6q+iugt5*mbV3o?B&WOhz^3q z5M7{xzU+qj>}~2nuR9*1o8voUbjv#_K+0?G@Z2xl%oh)e7mc9{FDnP~Y^aUiw7Wuv z;M`V0OY+|F$&)8{|(MnlaYpT4GTC#sL_f_oLC>}+o# zeRSBr@*OUF33ltY?d`$2RL1ch`fc(myN8qNYZUC3&7?Y3%s$>(NtW1o;!--Z6vX^f z=%Vm6rK>)}-jtravP`FJ;fdBbwSR3V29roO$}>1M?r!ya!|9~O{!OOJd%Dwmb_SjF zf!1_qd(dlbj)v3mXwcc}ZTHXxPQ%S}ot1LgHdjOthlP|}7t1m1OC$EMH<_Y~+0Vv} ztLHTD!e%$D!+UqZ8(6Ne?lPHMnlH{GGUMfxDHB)vXyS8erHkD&Sx;q>E6+c(mE0|3 zZ?@}554UY?8SR!X-#A~nDKk-ZOR*;L6!_#WxZlfSQ4v5^Gh|kKHA`G#we)d^s7asA z78VkR?I*?DQKoo71>P-I?l=rtTQHKVW$2X|pdKoc8DSBwUG_8)9m22q)((hob@(;a z4*A0lF_aFAY|zaMUYJ3+=V!vUVYt+p`IQg3Hl^i*CN>8M#KozaPJx&_?qY)ES?55^ z5x5wLd#$XQ>9n6uftctpKp-YZy2%XL7>M!1E`gW~>llcc$6rt;>=lUHt@9O#u{zE} zATEx;klC-XA|{W!1Y+{6b0Fpjyr8Vut0F#P8;Et)giCcx^dU84L#Z7g5R><>Wrl1F z#Q0&CKum^p48+XiFDMiC3dBdwTOh{jkXmrP6>;&tip+kEftWn*5{SvO&ViUC@Pe{p zuRwg%HV{*E%_Zs*;Y)4S5OrlJn?E!`HaE)**%*lN!!CiC4C@$(na5vHChQf6k6H&} zYQql87#%EosRe7IuFQJBz6%~KV=;EC8r&u*9Z-CT9f1dmB{Y&d*pFA*ZAK=Mn)IC^ zsJx&oyI+=w4Q|Y-$Pzr(mCF8`YO6!j5qx16NG@b|FmyGHd<`pKjhUwuk^q~vJRAix zf!yQ;W#DgN6=1(G!Wp~4c^MeuS(01eg&C@9(yE#%;tteIk$8)lBCbr#6iFLsrii;cGeu&Fyh*7sr+x6Z;bp84d=GLYEvmg8$^XNQ<)J?EW#|!vt!uU?2H2Oq4@uZ5)C)I48 zf3Zeo^VY8$nS3L+`NZ1hjU%Itg=s1=QzSNTrig9cOp(~UnIg7%Gev6i2`p?)&ut^4 zy%hGh_eIQq@OR+K%+Shj5XFmkqc}C~PtS2?x9ZwB?7T=tHwM5yqG0)t1b86>5ZsJo+H4CYi73~E#* zWA2Xy=B}g6+1G<2d)G+@DmZr8F-P*Vf@if-(=#U5N*cVLGHEhVQp$=fUaw`*!D{n? z6bx#V3Ze#Ypv)OHNb7Hry&DwsFbG;J6c@dd36?Z?BW2R4!Mt@4S-ep@Sr-lBffSOf zqQRReb4Cr)en({QCO?NGbQRoGw*&aEq*Np6{xIdzsKUHgJ0K?d!;0x?gdJS#cy54> z#>eFrVXs)UFe;I>m`RC2rj&7UA3|6mCI>BkJ_2lMW87(Wx`RQ!W|6nFnEzxyZg7b- zk{9tm9~OVHB0SL+em)}nd{p@PI^pN*g`aN_e!fxo`6hVxWia0#_NO^JN4O;x9)*$Y zCYnt8r-sROwhxhIU;Nl>G>;W1XMQAAa;@>xJayye^MZoVKWw6pK3{bNvJVIIYaW3F zeljOB4>ujy%Igv`s0h>CoLpo?;55h~{7A|sN4m>IdKpCMDs$Bj@RJ1SBo zYe!3s#g{(*-CckA$*UW4>3_fYe|=%!o?p4_p=Yl9rRQ$@!C(8mMflMx;xAnetI$Lz zV#Ny(J-rkiPrsf+Ula888T57OrGS1ppDx|3W-B-QFN+%U3%iEr#&#Az6FE6P0$1ou#CP#6Ii;cnD`ZNS^GfLXwL6iSlqLB-zEDG#FxGJ%B3s$ zb3f-dMx#N3j*?qCC>$?G{{l}#zL_dzcHBFO$4Y0KZ5~J`fqux*ZTj~mB#c~$OQ^d- zAMI!7C1&t?x*zu8@6i9>{`!Z|_ zw;Rnpa3k!>mIjj-0TJDmSchG&d+SbjP;#4H$ZZb7 z()ZB;S1*USeQDt}`}SV48{&5(jXnYMclJ>O7{mNl|IB0w`smoir#q>uu_{JZO(usp zVJzttz&??=D97DvVfS6=I7)OvqH30+Z(|qPv;P3{4!9QR;%hpc{tzwLq(W6p2ex4W zzg(7%) zHWy=fEu?S8l$g^;t^---qSS-Ko{VL!)TlKFI*3%S!J51^eKqcaZ<`>6+}BF;XR3QS^- z^OIh0D{l6O)1+~5Zs}6=Y4nSB@tZ3-16^H&h4%bq`=iFvfr6jh?%#=+zXf(b(CZHF z=$|D5NWWS{ek?zDG<_cPg$yEh5+pcsYW~u~@$9cEb3t)1{{;3Z&T>aktV1HfENr7l z+JE}sZj>zcv0JOqZ!SlOim)x_;O^uA?8cv)wDVO$6w}FiAq@JH>D8&5z543taFj}8 z0Qv3~m(frxtTx)MXtds-w>gb3fjwx`!&ZOV9}N#es)5u+(yzVL4pgpoKkPb|GR^rV zGSEEyFQ$<><&TXPF9Z5(iNx=M1IJK28~3*E=$|~<8>8b}drHGlc}VJ1sG2yov!Q%c zj5s(h<~H?XQTlM^3MAUZFM$JSQYdsF33X6_O!qx4%^&Y?JuCaVC8?S+&F6dqP#G5vU!9dlPoQ_>daQ;KId(@6|C zea2V99<=j%+DB*WqTFzte$VsG=Y!%?5`HECC%WT)cVmFIxS$x5)h{@!rS;N&K28Z4 z4~50lRHxnkFwG}RB_nYi&We!JgYBs<$_-pK%+5XqY(qa=eCu`{6vcs}bjxxX}B6uCEL^rO^^yxwA z3{R12+ppxn_kVT6Pd|GB@X>vm=%#$CJeuMy9#iC9P_?J-0}Z&(Umtvh#1~8ncL9r= zzO%|9WeC?&sIcO>Q!nx0?G6CAEK48E&jV?Ch!#H72w}w&lz-DCoP_X*?$bfY^oK-1 zk%xd(jtHL&A~=*{@mrmtO*e!Hkye9<&|uT>(FSmbbvt=hoI*+=xJD`oLI&BNf$&49EGTZFqBwiKBt|8ek4Kg61QIS}&F@ZVfM#1ES2SRR-gHcqf~yyiBj!vKGV=elr5iZc|QT!YyEy1~e0U{#XRpo6>l;fW?-I&!8(g=vz;c z`?c2at;NgEyDYidOtV`B{$s>nBqXz|bo!h#O8m=trX}wp@JPI{n!f$D#@8DZ#wIWtH&;37JJC_h>Kw#*k3v=uu%7x6c2O9C2NldRajbN4{gt~~9?v##7_ z>l~Q_8{V|gKpLt|4V@Kd{w@4-A9(!PWgn))@lv$$T*^~4-4Zwg@p6kFFCS^Z8x78E zr59~0&)pjDWz(#|MVb;R+u6P>?caS=pAJv41bDU)#-?dQAI3vaOCN?1+thnWO<~aR znLq-hjeU4i4kOC6xmWjn*jPz^_4DW7Xu$0O{24d+Vay1%_+bbIvqys};0qBLx22O5 zXzDGz9H-P@2;Doj94>j7c>8bzOqV|v0bl4ftc8+fb7rS&2{jmj!;%n^aXK&fPv*WV|0w68C6{t;*;D%rfYcFw ze%_41@MWvqA^-wL93yQ{ zbqA?r=m26dZyi(F6zyqPpaA~B4A!8Tln?@o5ShaxCg7E|0Ms<`+aN1~15`@z53S-2 zgAE2lKWEWf-9mp2NCBAx+qn571%zr#!x@7Vlo!dJK>#u`%a3vc zffZ6fC~A2rkOC<$ZXf^|*^U<*q<~P=@#2RR*o;7z{mHQ{Geio={Hh}blt__j&4qnLpu@*=`bW>7?**eAS}CAFPl9 zOp|lbl1rU;qySG6Y>6jI@T z9(?II_#+FXfLu}@=kI12*V$3iX@?bX@hMenu^*tD?FX=3)aIWR_7d1&1@so{VmEIoWHVFm3>5i#CVV8jZj5Q<6KD!^)20eugfb?F&n1!N9vWcSAk2vtq} z(5eS3AS1K<_+bTvqL!ChumUo&9WORm0imelg=?^4;RONXt^%78Fjhb}_q){KWSCSm zbaMbNCqu!=Zd9<^RX~Q+$<7`tu%Tv!6<87S?ajy_D#XDGD?$CFLP$K(rG3oV68&&lgea`Af?Oq4u>$;<87s)imo#1& zrH2IMPN&hZ#Q(ttE6BMJJkW?0;Kzcns$mLju!1A`9(#MN0Jo^-eFY@UWLT!I-{=2U z4M(urS8&8(UjdPe@$Cw7>tSjGg+^zA6&$IA6;L4*lMIR#(DTSxm!2_JK<2=pH7l&ZijZ$_Mh5Z13J4LM0PV2?8)}>>o3R2LlHOPW86*f+K!|BXYKawC zP_n}ctOy0OM^h(dv#)^6Slx&iMoFu&5U>E52lg*gJ!IN zd{SeBZp;6Y4x=c5V^({wwb@eKs>;mZ38$b%*a55$WNR*)aX%a)mSVg-0OzG~4m zT{c(&rm1#O4Skt=x^&7pTP5do!U}SgRK*JLV`i)%CtuQdVRRl6m>E`(b5Rv5z>fuC zRby6GSi#ZCSOIQP%~$~mGv^;GIO>2E5V@GK0>%akjm`oqI9drSph74nX^j;i(CJ?F zOJw)qD;}qWzj7RM@gLnR;pM5qgXKMT) zELS=Z0daN9thhp_#GMGHE3O9<;e^0u&zBtfsy$*i3lOJmh7oZAEc};7K(nXlLO@SK zuPy|n#`bmo!A0>Z1bi)|P$}Y2lgS4ZZ;CybjGCT);4OamTmxPoDhyy0BA}(AR~!OT zLv}ku!G2ZSeoFE^*Y=e94+%IBRf~iY{KDUSKGlGu)yzj|hBPg@L`F$4TP4XL3Xlxs zt&+*skUn&7)f?@t-_eRJE@z(DBUBhd99kW+msp3vwbI7iVOlWAw&fY8xqJQ zJ#^XD0y8xU-dsg0iJv`afkR&*%RO2n%k-ST!xd~z1RSdft_WSeZ0Z>LrhWbr&R;NY z^T17aVryuDk70;X%PHQ^$lkF4l&!C|RA1>V2HT*UzhqI_ObUJbAtDj{x^RLml0tvj zI@9Y8I}?22NvAg+kH)R?^(W;UQId)ROSp?A@C^^PXE{Vv2JrTXKayL?1Ih!vF}jqC z+>@1F_0f8&*FSZ7N)O~MpARrM_qJR4M4)O=-X$E;=rj?{-4O>%7kZ!@i&oQxq66eU zuEq6*MRag;JO}TL;7-BI~h(mzmvX$nu`}%T+txYSo z=&#~J8w5emr8qR8L*VT+l-_!kUVW9=eHg;J^8CHxWmO1R1`bV=hMUK_!~s9qga-m}MN=jzc)Wmwy0}fIc8xTf z&-^u5v+zj+OvHDJn#usaBdoaD2pQu_0j$HJ z6VW$LR;U#GhX`I{W!l+bos|o>=trE`fp!Q8M0`}h15@O%l8}mos4CiZ$^mN}{Vudw zV~V!-UeGw;M{O-E=O*5rf0J*W~TK&E19L*$;`c#={6@y#7H)|=cBe~6f z=BKc3ld4bYQzc!aDVKg<6EWyGTpYsomlyQgFiE zP-Y=ov%a?AZ@fkcJnU$xD=>VDuADj__uPfIt$U4d5~5Tibm6J=?+&$Wd<%HTGw@^}(91^1jRAx>k2=8vd zV<9Cq-PuOF{7#KWJKLSD-gds%DLEzqTHQ?nt0Q@^2BMu6_~oW-HOPIoSR|XH;dDG2 zq(bQp(FJFl=ic4?FL zRq%?{OX9tYe?$I@gda)rYo^IB;FR(lA4WP*qO9-GF<-nH%b5 zF>)MU%t_r|p^9F1`&Z$Y1KSqfLxiw|?3*O4l7hRsQh3w=_rbz(%u~8I?Vm>>&FZtbQ*+VI_A8WuP;q)rD!1+$R2mJeN+BJ~uemp!)1Z=#g2^*osQErr( zHaTe3hpqI{fRYgvs)8V3&02$~%7(2~@uD;f$; zQk^^okHl}PM<*IEJzZp=lMn@2uj3=GgB{FU~|{<2O4mHNFzMnAN0^=kLdps|Iom&ON_=pGcZK4 zi=Z{WtKM#lcll%t8kz)U>nsF<tqFb4tHKZ&si$mT$9^@<a=F)LzuTmTe2E{fue`tq}M`KM0)R$5%DNhju3Z>S92GjXaF4LC|ANOy{@tL1@^iQ7b zjnS6So}|O%-gX)GrcaH==pez((RQ!JopYR@idsmbN@w~0D8MGOQgy;`gUl`mOJs+T z>TogIDhT|*-)q8|x-=lo@2V!WUONcTQ|J{#EXJ)teP;vwSL75E#Aj!cSEx7r$Dw|# zijMM^GuGG_YSjCKKHq>x>oUkQq;vNdf>zjx|7wm|$hJ*CvCIhmU5%op_q|k%w6f!r zlbIMP4T-jbgwHhU%T~xV)0PdXJ*m_D>m$xxHft@?Kd;dwDLhqMYRQzA%r<{|gI8Y< zu@zns{_@JM=#*=-T+N6N3dsJ64mR5%d{h@JL3Git50K&H# z;J*z~Z{I8yRGauQGUUHMU%`q2C$f&kP-fZgywxSUOLgfr$I}j`O zx7Hlr%F!GCtZAQRZCO$QR}82>a|27d*0Qa}W^szd^CV|{stIqY$6Lzg<(4aK6~MaA zEY<6iBOG|s1wr_1Lw5gDy4?|)TthzB6sE5V4^CJ8akun! z(fz1%wkdTDy@(*(@W9 zZ-yu0$Ldoj+OuW2Ya9Mqh4+(q#?cX3=#p-HV=Yc{$GgMRSzonCzOxSZvkD7^5-c&f zHTa7P2M@|6#8m=J0mt{B)^GaA2T4WFtD)eva-+Eh|Jb?pCU(JbEjmNqT0Z2MYNTT@ zl1<*|Nw_S{bpM{@S{<8ABAu%I#C-cHr^&(E!Tjtcu|l&>1D3K2R=0 zX(uuZXHE{WHIiJ{fiH2FwRrGvD;!TNpJiE)RhqgZt<-^&S%|zaRIW?2Y3wATn}c^% zII6@fIz=AzO;=^@R5;kxJnNkN3Vg7_9(FU?`ct4pF7{j4Z5fbtY~eFtnHBpCh>l{l zKFZ3L>vGG@B|ym)SXDA^nOoGN%|>-T$K#rXM_kF!n61Ijc;-GFa`}!RuF#m}A=`S} zshM?^yB86AWsJy&;GJ$JFF7oTigfW>@j0ex2Ov=G5GLnoJ`;6q0YrmdMAS^m%5gbGJUBbW&N1DaA zHSeD6NUNON!}MeVLn>!B_hd&}&xf8&Cdev1Svx>aW+`%hjcp@av7~!)tG8+1JsB@L z(dBY%(yu4uhrlN9*^|jgqW6k@dorG^%4@=ZCah<;{0S$h^enM?x( zujgT%OyF8{fgtN-!Wc}Y=h>6Vr0Lz03A0sMXsCoFZA`tYqqt4z zo=nb6?L8R|x$Zq#9D`kFvsGDWsDvcr!Luima=>IH8rDK58W}Ivo(#S{nUw81dop1_)~@|} zGJ&w@ltsV@-IECeGaS|0ldaU&lL-u|oY~xyt#CaV^&qY}tJZ+=O-a_+!G}EeF3JSS z^klCz?9dF*ky#47FyWE}U0O5mj*J(a=x{+gGJc349hr<|fR2nOtMZv}2{3p^CSzbW z`LE21eg5<9$Yfj_(x2BV89!d2j*KTNI${wvLU&~RxP%#FNA{ByuEmk;8q5N&vLCYx z*IwNi&$i0dt!>-WMRZ(6D_!hwOcM2A`xM~nVbR3DGSqw&Mu`rz!vw&(h|XXYtaDs*R} z^>KmDuFGGEE+Z3oS7KAvZN10uwF61h%Jf=Sve|F6G3kxZcBlQ((EbJ(nOHbeQTbV! z*W%0${%@u?#hT~{kTv+x2E2*orfM!{O-+&5{4x+b0RG+&Sd?lO{&my!7&2Vau(MRD zE>uoq$I2!0uvboF35q%_%BIkniX9byZ<{`w@JoL0?{PMuQWuxed|uz>M$5TgNoJw4 zz!;Yp58thPNt|z--UnP zw+@R@Jhwc5^5n@|mv5o@`6#sG!=?Pp2UhL^lZ}3NVt3KPuSf85?UZ9U(H-}@8-w17 zwiU@VG*3=<$Gxr2uzRK#HxNUw26nE;#DSd^ z`)!j+|I{#Ez+V$R$9z^V6XSGP1PxyvFNgie3i{}H$zCVt4egOpUDAVuyo1hiK}PXp ze|Re0P6NLh!ID;$t#J>Xpjlj5q!&Xt&J;X=+Fjb!+~uiffB3w+OFrf2wYw0p+qp~r zX_dPWV$VN!Av3pgm;BT9yUUlQS1+?3&u0W&uH_xbZCl+lt-Epd=|6)q5&AcpkPP&4 zl7TK?k5L9%MD=o((U#}Yo#}JSH_=SgfFC<4hX+^iG~O6{za)p~&i zjX#axR+gj6dPUt2TjP5BwX6X4Z~!szz;_yOrzgW=;P4fo?CM%}iVjvLap52?;}lQe ze?^|58aa{>K73_v>PACI3w}QW$vxyI)u!!dOJ8wtiBfln*-J=ia_RbhRGQ4EP+ne^ z+(65cLvT&}YOAs&of>>4f}2?$>_P_RiVGK#D#LYN%c85JU*v7>!YXFmR&4O)hD)}i zEQ$fbM_fr*3tE(qEqThi4KHF!7Q za%ju~{@AJAVEEz!fvs!G$^%y;G)D~h6Q?GFVNwhQ4rMPx&85j;9xqp;{`MUtpujhs z+6$I7p+4}Fa{#gETnFuoku0yW;ZRcYp+KNTBU%~k+38L?Q*==EXiORn#RlC*ARq}{#(A8)(b1piVxY;3v|lxg-CYOf-lF}0s!H&ybge7?Kf>c7^;BS zA8$IeiBvSIN^kzo{$LD$IF*l5x>98yOVM*ERQxXli)^x@6@e zEJeaHS!=lp26Sy_bp%piQb=3AQrEP#g46kk5D*$DIoh~V*g$e90qUw0R%2#jL%cRU z!WSdi)=qz(FN`Gi6HPeA@?zE0^R_J0XpGa?EQ}?2xQ6++a4Ms_{=%jzlefbEh~PGF zuDE1&!R4+}69(UH!0Wx47Sox(7?V2uqi8gI;&Hf?_Pxjf9WR9eA*uPx-sC7@#0by1 zKsAVa+J}}?8tG2B@=((oE^9Cyhh>m_a_}`5u!iM~3nHb8*coY+!8smme!~T>Vc8VJ zMs*m0rRD(Fu#cCmsjzc*At{1|&L$@CP!(77!8~Cuc|R-stRrMS_igErtFCAlE)SJCvLn*fDDN3l5yF_ zU%62j+^}6ThJv@eDRx)_Fc22a#0A?m4N*%&Oa?X8`QV1_67rcoxJvedX@|)a%k8im zwoB+?6BKqjIs{RUo=ei=2uPBaJmFMFWs3ms(ovIHI%2!zaV&NJ)Wy*=IY%JQ-LPG5 zrm|XBj7=o8Ul1wG4cj&I%wf9>r^+ZC7=doTxlQK0C8?Fu9$DUGfg zcnLOCVgxsAS7cL2YW}hhleSFB(GlBi+hDtaAzM5q-=F9@PB(0q;fxC+fxw%biGbjZ z8@9`^DTWOP#F3%qg6%SoOTtANWi4F81wYGGAdeq9gL1=mS>}Ww;V0=JQqb}1hV3%^ zN~@#%_4M-`9Ne&7jyEnma5aWTsG$~iaw9gz5l3v7V^je4KW$iSfRUNyM%nE4|S@VQ9Dd#W7!V7VY_4y zY=ZvSE=SKLX>kN3Nz1DdCd3u{7AzDizRT{mZL`+hV2sa z8RxQESBy;r+pS^WE}07(;B~iV!LDvC3o)Fu9388 zEH2otc@Uh+Ou>N$rZD?@$+R5z?UKiHtvuh7ncKcyfoZV^bHEn`iZ0l$K;ptrX>QoA z$fl6g{AE9*wC1*N_sDs`cJY{eXxE@9j{9~Q&bT0AL{Y3K77E_MGX*znmtj*38xDvg zL(K)-Wggds3m5bOlUCHf8taijKj4P#vdjrV!cWpgfbbBEMg)%gb{T$U^)9Jzt-;X1 zATXXDxM8~-Z(MlrP(iJc#Sz=(7!?DA1J=mVb8D0J*e$FnA-oauOMmkW>z95&>ZCo(z5eY@mw1#j-yu7Pl=C3e7e zO+(az?UF%_z>^!cOUP&X;8K+fN=B&JVY7TGYyuL>yC%R5+a;64Cg_jta`ar17Du4$ z??qhpQi(imSx7w$w#zdqRJabQi=!7vL#l-#*vyGGKYvAAHn=0VPHDs$VnD{#dnvr_XU+d9jej3F4v z;2EFz?6_}NU|Q_K9PmYfq6@Ywkht(unj5w&vMD4rf7zRAzA$Oeao_II^MLK*F?m1L zK~rw~b{WpNAi}j;nqg(3!3A%~xh=&m@hAvKY?onE3>yxJBSXyv+hrb?gp1Joc5}(f zOJa>?9kE@OIUz{+NxBFC9`>c<*KOY}!>_FNBlT@y$TnB1t(Q7tyBu#^c<@kx(Z#Jp zo+S1-V!IrpVt{aJ*cfQmtxaYKT*yJf?Mwz<-a?r0m3MvncJZ{+z<8p{iDKMsaN0w6 zNwnbnT3?(;{9z3EzM%AQ#wb6Se~lirdp#VD+lGU7C3A(ps(9{(ghm;1D}@0`mlE-z zz$Az=YPRgp{5zWPLqU`+fNzyfGEqr1smTr{OT9(<^_Hn(>Di!OczXlx4x$Mm-UQKv zp?!%0iZ6X{Kyr63D;oa1ZXi#C0{!ob=Lo7f=Bk)F0*2LcK()Y@EDgoM zl_$d`7gcN{S`9IyT=<9)4SvL@mtt5JtC%2i^ysD7sg+(&Ra7uKtJ6WizRvZwAd$UX zxdhQcQMY)qsS8xlm)%gGy-hvnb;m<=g?nd=E`BEkNcm+fc=&*m~J6!gb>(*`C+k+!uN;2cNe^Yj5+t!ws-8x@CDYII2m9Yk&+y(c0Su837$ZCenYOiLAORSbY?hrNUv)RHz z;;{Xsm^;c8FQ~w~#mXIrA!`dpat#oAUG|0%eN+s-*)?SFYreGu;#(blO|?V*utN-` z!y+4W^MV&<5bpVzaELJMP(I|^lv;4Tfw(wT($6ZX2JnI~YIRY00aj%s%Go7|= zAf~VRxkOzee5uXq7i8BY)pvkEOpbJu8L}}D~teDW0u96^H6cR469IFq$z6%~KV=;EC8r&u*9Z-CT9f1dmB{Y&d z*pFA*ZAK=Mn)IC^sJx&oyI+=w4Q|Y-$PzrJWrqJ=a+#y(V{#;M7Wl$0kX*>_VCZTX z`5IQf8Z%ERBmp*Sc{mDY0=dZx%D~^kD!_hWgfnnkM1xwm??x7xeXA3J)V6VnNyv8Pidq;f+F|6-4FXCR8nofU0V=5~7NqH;UA z7?t~Ir`XxE|uYafJygkk3YKj#jxvacd*loFInHaxI3O(-*slR)f=#{ zM(kD<+3*MRa4;iz4ADK&ZcTf`$!OdecF*)C|3t_PpwyjY}a4O22>vUD%$CF(_%*6ff_%TH^1w~RX)3g=GLYE_%b*UzbHFMDf=KkKvq>ke|;Wantkj6^g7XK ztcc%Bm!N&0XOY{j-hTgstA{T}>CraX3H$%=C~Y56e;oB{f}trwx@c_HwL_Iu0H2Fs zKR=&%5A^}g;dK6yn0OMIxdZlhMmy8(ooOdye-gLh@M_~=ROO#GpnN6DQbkgcnELk#&QZ)9GOm~a$|EnfOROEyvgWYWBp*m&w`$Gd z*KYq%VDP9lgQT<)b$7IIMF(zjstRs2s*;Em{#an{I?9}VJt(qwon)YbW0&o5BtI*7 zR%;NyyC-V!ddj59L`f+tvUt6gMHi*!?-Nw_KT(4>Q09ypr1iJR-VIvzTr^0A3Z%gs zDU(JG=B|#P+as*CJ6tPlxig1 zAEsOyRhai`2gF2wSUX)89paG`veW4h9gUC6Ey7;0Xkk<$X)%)$gDZ-e%vy_NhVc0a zu%(T0r`hQa2Kkyr-qK?Jll{2CCDKS<#Q%I){Kbm!L|gdzi171K;pgjwpRX5wzCrl; zM&ak1;N6$Oe1F)V=Ik8dmRNWcMzWh|GU=ZhCfC_MM3#N=W3SOXR)ClJkyOdG#!vIq zji1j83PS&|i9Y&#)e*=(9L%qI1d=YuoXkAn=u19t9)ScV@`QN=92>~z%_9(}Ay0fi zCs({xfZ0`I`WV@fkvrHAN~ozJaEZmDt(RoTHw2)Cj3S?z2Q^|=%-rsEpBqE1nBDD_ z=&|0%OzL$$M2fk%kYSi20>8^)HVhfB`JjjKvLPcKhxxE%iut$^Vpt;*y1TlB&@322 z#he^;mNyZ2H8yt=7uHA+MWQa|<3@;K%|}K>nC9l>A|n#NJ?)4g<0qpcKQJ?Lu_KO9 zF((%pkqpGl$VL4OnPNU}gaX-7kt$g`T52r5bm>*Fn5D|jXgf~eplajH(s%BKJ!lVJ zzq>I&OEh1-H~z}K@VgP1itpUXR}aS;)DdR^!Yi&}Y(JhJr?+~zeY-k0bJW?hJ;!LF zbabY(Gwh!njnAwezA=JlBQSMKSv*GkXrN*?t6^^JKAiJ%qluR+Be0x2gU+~{WDGnP z!Ls3TWRqCbhzZjve)O{>irGJqGxIvz*>J~)OH}jQW@AIMO+{m2Z22b%S0-a4AJ2JO zHS^@DA`-my^; z`K+~y=mWMYq7SePIjRWfcpDXwYQR!O`NwTllz*J%sG5pM+Ots+`K+~y=mWMYq7SeP zIjRVk7&a;*#fYVf@{il9DE~OiQK6!5MxcpBT+2)o(ouD2l{`R{Rq?5uyC1AJ7R#hW zZDJpuMnOpkm@KRxm(aeDtx12`REhSb{UD%SKR zPc;2I*pF8xKpe%BO3wZp{pKPuG5Ey>R6hZhM-Y&Z?(~`%s`7F#^QMyvO7cEBJ44iDR5WgmMn%(EadR|=0I?tk zM`NkDI~q$xW<5@hhAfeE=h%X(I|xsLh{>Cdv!e;~k%{Vk4d7q}D@Wt$W)}7O%4RMS zA>s-^DHoXJNEmjOlE_q=lT2hz4u~jIt95cfmH-h|bqD0AI6ELmMJ6Iw2c#m(Y|6=T z7!pKG-gJB%keyFBQBkxaubdoMgZZ0wrxy34GZmzM4BS9?6$&uL@ z#!801=f#tYWF;VseO8EESoE++r0&)iqXeB#4+CViixv#{uh!RTRH=k5wEA4^i=S z+(pIHky({ntm5dp#wvjb5tl=(5-GXIDv^>*WNxvFtW!A0Duw_Nl|!szskp}~mWoV7 zZn27?>Kdy!5=2Z6v5KeTn#3CFfFh@m)2rUe;Tyk#;GBcT_ z{Z<1i1q>D2nZex?Vwwmb$=jY#ZC5@>cZF@CTS;-+T=Zr$xXN$My>d?85XqB{{_ zRx+`9IWJ{6vpsizW9Sgkg(2qcyzI;*;#*pUg~4btLHq3v!?p05vXaj9y2H-otvlUu zZ>!TAk4NKHz9hQz_b-RbvKW_c*h^2(%XjX5`?C1|z3k%7J$ibT1nJJvXuM 0.0, "your penetration rate should be above zero" +inflow.add( + veh_type="human", + edge="119257914", + vehs_per_hour=8378 * pen_rate, + # probability=1.0, + departLane="random", + departSpeed=20) +# on ramp +# inflow.add( +# veh_type="human", +# edge="27414345", +# vehs_per_hour=321 * pen_rate, +# departLane="random", +# departSpeed=20) +# inflow.add( +# veh_type="human", +# edge="27414342#0", +# vehs_per_hour=421 * pen_rate, +# departLane="random", +# departSpeed=20) + +# Now add the AVs +# main highway +inflow.add( + veh_type="av", + edge="119257914", + vehs_per_hour=int(8378 * pen_rate), + # probability=1.0, + departLane="random", + departSpeed=20) +# # on ramp +# inflow.add( +# veh_type="av", +# edge="27414345", +# vehs_per_hour=int(321 * pen_rate), +# departLane="random", +# departSpeed=20) +# inflow.add( +# veh_type="av", +# edge="27414342#0", +# vehs_per_hour=int(421 * pen_rate), +# departLane="random", +# departSpeed=20) + +NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml") + +flow_params = dict( + # name of the experiment + exp_tag='I_210_subnetwork', + + # name of the flow environment the experiment is running on + env_name=I210MultiEnv, + + # name of the network class the experiment is running on + network=I210SubNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # simulation-related parameters + sim=SumoParams( + sim_step=0.8, + render=True, + color_by_speed=True, + restart_instance=True, + emission_path="/Users/akashvelu/Documents/data3" + ), + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + sims_per_step=1, + additional_params=additional_env_params, + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflow, + template=NET_TEMPLATE + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig( + edges_distribution=EDGES_DISTRIBUTION, + ), +) + +# SET UP RLLIB MULTI-AGENT FEATURES + +create_env, env_name = make_create_env(params=flow_params, version=0) + +# register as rllib env +register_env(env_name, create_env) + +# multiagent configuration +test_env = create_env() +obs_space = test_env.observation_space +act_space = test_env.action_space + +POLICY_GRAPHS = {'av': (PPOTFPolicy, obs_space, act_space, {})} + +POLICIES_TO_TRAIN = ['av'] + + +def policy_mapping_fn(_): + """Map a policy in RLlib.""" + return 'av' diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index a3f6864ae..e14ab5850 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -7,7 +7,7 @@ class ImitatingController(BaseController): """ - Controller which learns to imitate another given expert controller. + Controller which uses a given neural net to imitate an expert. Subclasses BaseController """ # Implementation in Tensorflow @@ -16,11 +16,30 @@ def __init__(self, veh_id, action_network, multiagent, car_following_params=None BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) self.action_network = action_network self.multiagent = multiagent + self.veh_id = veh_id def get_accel(self, env): + """ + Get acceleration for vehicle in the env + """ + if self.multiagent: observation = env.get_state()[self.veh_id] else: observation = env.get_state() - return self.action_network.get_accel_from_observation(observation) + action = self.action_network.get_accel_from_observation(observation) + + if not self.multiagent: + if self.action_network.action_dim > 1: + # TODO: fill in + try: + rl_ids = env.get_sorted_rl_ids() + except: + print("Error caught: no get_sorted_rl_ids function, using get_rl_ids instead") + rl_ids = env.get_rl_ids() + + assert self.veh_id in rl_ids, "Vehicle corresponding to controller not in env!" + + ind = list.index(self.veh_id) + return action[ind] diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 8c7d35b27..5098f0314 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -10,9 +10,8 @@ class ImitatingNetwork(): """ Neural network which learns to imitate another given expert controller. """ - # Implementation in Tensorflow - def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars', load_existing=False, load_path=''): + def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, stochastic=False, noise_variance=0.5, policy_scope='policy_vars', load_existing=False, load_path=''): self.sess = sess self.action_dim = action_dim @@ -21,7 +20,7 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r self.size = size self.learning_rate = learning_rate self.training = training - self.inject_noise=inject_noise + self.stochastic=stochastic self.noise_variance = noise_variance if load_existing: @@ -58,12 +57,20 @@ def load_network(self, path): loader = tf.train.import_meta_graph(path + 'model.ckpt.meta') loader.restore(self.sess, path+'model.ckpt') - self.obs_placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/obs:0') - self.action_predictions = tf.get_default_graph().get_tensor_by_name('policy_vars/network_scope/Output_Layer/BiasAdd:0') - - if self.inject_noise == 1: - self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) - + # print([n.name for n in tf.get_default_graph().as_graph_def().node]) + self.obs_placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/observation:0') + network_output = tf.get_default_graph().get_tensor_by_name('policy_vars/network_scope/Output_Layer/BiasAdd:0') + + if self.stochastic: + # determine mean and (diagonal) covariance matrix for action distribution + mean = network_output[:self.action_dim] + cov_diag = network_output[self.action_dim:] + # set up action distribution (parameterized by network output) + dist = tfp.distributions.MultivariateNormalDiag(loc=mean, scale_diag=cov_diag) + # action is a sample from this distribution + self.action_predictions = dist.sample() + else: + self.action_predictions = network_output def define_placeholders(self): """ @@ -80,53 +87,89 @@ def define_forward_pass(self): """ Build network and initialize proper action prediction op """ - pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) - self.action_predictions = pred_action + self.stochastic = False + if self.stochastic: + output_size = 2 * self.action_dim + else: + output_size = self.action_dim + + network_output = build_neural_net(self.obs_placeholder, output_size=output_size, scope='network_scope', n_layers=self.num_layers, size=self.size) + self.network_output = network_output + + # TODO: add this as a class variable + if self.stochastic: + # determine mean and (diagonal) covariance matrix for action distribution + mean = network_output[:self.action_dim] + cov_diag = network_output[self.action_dim:] + # set up action distribution (parameterized by network output) + self.dist = tfp.distributions.MultivariateNormalDiag(loc=mean, scale_diag=cov_diag) + # action is a sample from this distribution + self.action_predictions = dist.sample() + + else: + self.dist = None + self.action_predictions = network_output - if self.inject_noise == 1: - self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) def define_train_op(self): """ - Defines training operations for network + Defines training operations for network (loss function and optimizer) """ true_actions = self.action_labels_placeholder - predicted_actions = self.action_predictions + network_prediction = self.network_output + + + if self.stochastic: + # negative log likelihood loss for stochastic policy + log_likelihood = self.dist.log_prob(true_actions) + self.loss = -tf.reduce_mean(log_likelihood) + else: + # MSE loss for deterministic policy + self.loss = tf.losses.mean_squared_error(true_actions, network_prediction) - self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) def train(self, observation_batch, action_batch): """ - Executes one training step for the given batch of observation and action data + Executes one training step for the given batch of observation and action data """ action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) def get_accel_from_observation(self, observation): + """ + Gets the network's acceleration prediction based on given observation/state + """ + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays if len(observation.shape)<=1: observation = observation[None] ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] - return ret_val def get_accel(self, env): + """ + Get network's acceleration prediction based on given env + """ # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays observation = env.get_state() return self.get_accel_from_observation(observation) + def add_to_replay_buffer(self, rollout_list): """ Add rollouts to replay buffer """ self.replay_buffer.add_rollouts(rollout_list) + def sample_data(self, batch_size): """ Sample a batch of data from replay buffer """ return self.replay_buffer.sample_batch(batch_size) def save_network(self, save_path): + """ Save network to given path and to tensorboard """ + self.saver.save(self.sess, save_path) # tensorboard writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) diff --git a/flow/controllers/imitation_learning/multiagent_ring_env.py b/flow/controllers/imitation_learning/multiagent_ring_env.py index 538679ed0..4fa72addc 100644 --- a/flow/controllers/imitation_learning/multiagent_ring_env.py +++ b/flow/controllers/imitation_learning/multiagent_ring_env.py @@ -63,7 +63,7 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( sim_step=0.1, - render=True, + render=False, restart_instance=False ), diff --git a/flow/controllers/imitation_learning/replay_script.py b/flow/controllers/imitation_learning/replay_script.py index 5e3984e0d..9d41afea8 100644 --- a/flow/controllers/imitation_learning/replay_script.py +++ b/flow/controllers/imitation_learning/replay_script.py @@ -20,7 +20,7 @@ def run_experiment(): action_dim = (1,)[0] sess = create_tf_session() - action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/') + action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models8_vdes14/') def get_rl_actions(state): rl_actions = {} diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 2b7e823cc..5ab94b425 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -29,13 +29,13 @@ def save_controller_network(self): def main(): import argparse parser = argparse.ArgumentParser() - parser.add_argument('--ep_len', type=int, default=3000) + parser.add_argument('--ep_len', type=int, default=5000) parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) parser.add_argument('--n_iter', type=int, default=5) - parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=3000) + parser.add_argument('--batch_size', type=int, default=3000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=4000) parser.add_argument('--train_batch_size', type=int, default=100) # number of sampled data points to be used per gradient/train step @@ -47,10 +47,11 @@ def main(): parser.add_argument('--save_path', type=str, default='') parser.add_argument('--save_model', type=int, default=0) parser.add_argument('--num_eval_episodes', type=int, default=30) - parser.add_argument('--inject_noise', type=int, default=0) + parser.add_argument('--stochastic', type=bool, default=False) parser.add_argument('--noise_variance',type=float, default=0.5) parser.add_argument('--vehicle_id', type=str, default='rl_0') parser.add_argument('--multiagent', type=bool, default=False) + parser.add_argument('--v_des', type=float, default=15) args = parser.parse_args() diff --git a/flow/controllers/imitation_learning/singleagent_straight_road.py b/flow/controllers/imitation_learning/singleagent_straight_road.py new file mode 100644 index 000000000..bcebad140 --- /dev/null +++ b/flow/controllers/imitation_learning/singleagent_straight_road.py @@ -0,0 +1,163 @@ +"""Multi-agent highway with ramps example. +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +from flow.controllers import RLController, IDMController +from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ + VehicleParams, SumoParams, SumoLaneChangeParams +from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS +from flow.networks import HighwayNetwork +from flow.envs.straightroad_env import SingleStraightRoad +from flow.networks.highway import ADDITIONAL_NET_PARAMS +from flow.utils.registry import make_create_env +from ray.tune.registry import register_env + + +# SET UP PARAMETERS FOR THE SIMULATION + +# number of steps per rollout +HORIZON = 2000 + +# inflow rate on the highway in vehicles per hour +HIGHWAY_INFLOW_RATE = 10800 / 5 +# percentage of autonomous vehicles compared to human vehicles on highway +PENETRATION_RATE = 10 + + +# SET UP PARAMETERS FOR THE NETWORK + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2000, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2 +}) + + +# SET UP PARAMETERS FOR THE ENVIRONMENT + +additional_env_params = ADDITIONAL_ENV_PARAMS.copy() +additional_env_params.update({ + 'max_accel': 2.6, + 'max_decel': 4.5, + 'target_velocity': 14.0, + 'local_reward': True, + 'lead_obs': True, + "terminate_on_wave": False, + # the environment is not allowed to terminate below this horizon length + 'wave_termination_horizon': 1000, + # the speed below which we consider a wave to have occured + 'wave_termination_speed': 10.0, + # whether the vehicle continues to acquire reward after it exits the system. This causes it to have incentive + # to leave the network in a good state after it leaves + 'reward_after_exit': True +}) + + +# CREATE VEHICLE TYPES AND INFLOWS + +vehicles = VehicleParams() +inflows = InFlows() + +# human vehicles +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), +) + +# autonomous vehicles +vehicles.add( + veh_id='rl', + acceleration_controller=(RLController, {})) + +# add human vehicles on the highway +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23.0", + name="idm_highway_inflow") + +# add autonomous vehicles on the highway +# they will stay on the highway, i.e. they won't exit through the off-ramps +inflows.add( + veh_type="rl", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23.0", + name="rl_highway_inflow") + +# SET UP FLOW PARAMETERS +done_at_exit = True +if additional_env_params['reward_after_exit']: + done_at_exit = False + +flow_params = dict( + # name of the experiment + exp_tag='singleagent_highway', + + # name of the flow environment the experiment is running on + env_name=SingleStraightRoad, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, # do not put more than one + done_at_exit=done_at_exit, + additional_params=additional_env_params, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + use_ballistic=True, + restart_instance=True + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) + + +# SET UP RLLIB MULTI-AGENT FEATURES + +create_env, env_name = make_create_env(params=flow_params, version=0) + +# register as rllib env +register_env(env_name, create_env) + +# multiagent configuration +test_env = create_env() +obs_space = test_env.observation_space +act_space = test_env.action_space diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 937ab4793..a390502d4 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -34,33 +34,32 @@ def __init__(self, params): # vehicle setup self.multiagent = params['multiagent'] - # TODO: remove print - print("MULTI: ", self.multiagent) - self.vehicle_ids = self.env.k.vehicle.get_rl_ids() # neural net setup obs_dim = self.env.observation_space.shape[0] - action_dim = (1,)[0] + action_dim = self.env.action_space.shape[0] + self.params['action_dim'] = action_dim self.params['obs_dim'] = obs_dim - self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], noise_variance=self.params['noise_variance']) tf.global_variables_initializer().run(session=self.sess) # controllers setup + v_des = self.params['v_des'] car_following_params = SumoCarFollowingParams() self.controllers = dict() for vehicle_id in self.vehicle_ids: - expert = FollowerStopper(vehicle_id, car_following_params=car_following_params) + expert = FollowerStopper(vehicle_id, car_following_params=car_following_params, v_des=v_des) imitator = ImitatingController(vehicle_id, self.action_network, self.multiagent, car_following_params=car_following_params) self.controllers[vehicle_id] = (imitator, expert) def run_training_loop(self, n_iter): """ - Trains imitator for n_iter iterations + Trains imitator for n_iter iterations (each iter runs optimizer once on given batch of dat) Args: param n_iter: number of iterations to execute training @@ -68,14 +67,13 @@ def run_training_loop(self, n_iter): # init vars at beginning of training self.total_envsteps = 0 - self.start_time = time.time() for itr in range(n_iter): print("\n\n********** Iteration %i ************"%itr) # collect trajectories, to be used for training if itr == 0: - # first iteration is standard behavioral cloning + # first iteration is behavioral cloning training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) else: training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) @@ -102,7 +100,7 @@ def collect_training_trajectories(self, itr, batch_size): """ print("\nCollecting data to be used for training...") - trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, batch_size, self.params['ep_len'], self.multiagent, use_expert=itr==0) + trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, batch_size, self.params['ep_len'], self.multiagent, use_expert=itr==0, v_des=self.params['v_des']) return trajectories, envsteps_this_batch @@ -126,9 +124,8 @@ def evaluate_controller(self, num_trajs = 10): print("\n\n********** Evaluation ************ \n") - trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, False) + trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, False, v_des=self.params['v_des']) - average_imitator_reward = 0 total_imitator_steps = 0 average_imitator_reward_per_rollout = 0 @@ -137,9 +134,10 @@ def evaluate_controller(self, num_trajs = 10): average_action_imitator = 0 # compare actions taken in each step of trajectories - for traj_pair in trajectories: - traj = traj_pair[0] - traj_len = traj_pair[1] + for traj_tuple in trajectories: + traj = traj_tuple[0] + traj_len = traj_tuple[1] + imitator_actions = traj['actions'] expert_actions = traj['expert_actions'] @@ -149,45 +147,45 @@ def evaluate_controller(self, num_trajs = 10): action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) action_errors = np.append(action_errors, action_error) - average_imitator_reward += np.sum(traj['rewards']) total_imitator_steps += traj_len average_imitator_reward_per_rollout += np.sum(traj['rewards']) - average_imitator_reward = average_imitator_reward / total_imitator_steps average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) average_action_expert = average_action_expert / total_imitator_steps average_action_imitator = average_action_imitator / total_imitator_steps - expert_trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, True) + expert_trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, True, v_des=self.params['v_des']) average_expert_reward = 0 total_expert_steps = 0 average_expert_reward_per_rollout = 0 # compare reward accumulated in trajectories collected via expert vs. via imitator - for traj_pair in expert_trajectories: - traj = traj_pair[0] - traj_len = traj_pair[1] + for traj_tuple in expert_trajectories: + traj = traj_tuple[0] + traj_len = traj_tuple[1] average_expert_reward += np.sum(traj['rewards']) total_expert_steps += traj_len average_expert_reward_per_rollout += np.sum(traj['rewards']) average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) - average_expert_reward = average_expert_reward / total_expert_steps - print("\nAVERAGE REWARD PER STEP EXPERT: ", average_expert_reward) - print("AVERAGE REWARD PER STEP IMITATOR: ", average_imitator_reward) - print("AVERAGE REWARD PER STEP DIFFERENCE: ", np.abs(average_expert_reward - average_imitator_reward), "\n") + average_expert_reward = average_expert_reward / total_expert_steps print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: \n", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") + print("MEAN EXPERT ACTION: ", average_action_expert) print("MEAN ACTION ERROR: ", np.mean(action_errors), "\n") + def save_controller_network(self): + """ + Saves a tensorflow model to the specified path given in the command line params. Path must end with .ckpt + """ print("Saving tensorflow model to: ", self.params['save_path']) self.action_network.save_network(self.params['save_path']) diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 499e06f1d..a1334066f 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -7,10 +7,11 @@ from imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper +from flow.core.rewards import * """ Class agnostic helper functions """ -def sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert): +def sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des): """ Samples a trajectory for a given vehicle using the actions prescribed by specified controller. Args: @@ -24,8 +25,6 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto """ vehicle_ids = env.k.vehicle.get_rl_ids() - print("VEHICLE IDS: ", vehicle_ids) - assert len(vehicle_ids) <= 1, "Not single-agent" observation = env.reset() if len(vehicle_ids) == 1: @@ -38,7 +37,7 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto while True: - # update vehicle ids and make sure it is single agent + # update vehicle ids vehicle_ids = env.k.vehicle.get_rl_ids() if len(vehicle_ids) == 0: observation, reward, done, _ = env.step(None) @@ -46,43 +45,65 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto break continue - assert len(vehicle_ids) == 1, "Not single agent" + # init controllers if any of vehicle ids are new + # there could be multiple vehicle ids if they all share one state but have different actions + for vehicle_id in vehicle_ids: + if vehicle_id not in set(controllers.get_keys()): + expert = FollowerStopper(vehicle_id, car_following_params=car_following_params, v_des=v_des) + imitator = ImitatingController(vehicle_id, action_network, false, car_following_params=car_following_params) + controllers[vehicle_id] = (imitator, expert) - # init controllers if vehicle id is new - vehicle_id = vehicle_ids[0] - if vehicle_id not in set(controllers.get_keys()): - expert = FollowerStopper(vehicle_id, car_following_params=car_following_params) - imitator = ImitatingController(vehicle_id, action_network, false, car_following_params=car_following_params) - controllers[vehicle_id] = (imitator, expert) + print("CONTROLLING CONTROLLER: ", controller) + print("EXPERT CONTROLLER: ", expert_controller) - # decide which controller to use to collect trajectory - expert_controller = controllers[vehicle_id][1] - if use_expert: - controller = expert_controller - else: - controller = controllers[vehicle_id][0] + # get the actions + action_dim = env.action_space.shape[0] + rl_actions = [] + expert_actions = [] + invalid_expert_action = False + for i in range(len(action_dim)): + # if max number of RL vehicles is not reached, insert dummy values + if i >= len(vehicle_ids): + rl_actions.append(0.0) + expert_actions.append(0.0) + else: + imitator = controllers[vehicle_ids[i]][0] + expert = controllers[vehicle_ids[i]][1] - print("COLLECTING CONTROLLER: ", controller) - print("EXPERT CONTROLLER: ", expert_controller) + expert_action = expert.get_action(env) + # catch invalid expert actions + if (expert_action is None or math.isnan(expert_action)): + invalid_expert_action = True + + expert_actions.append(expert_action) - action = controller.get_action(env) - if type(action) == np.ndarray: - action = action.flatten()[0] + if use_expert: + rl_actions.append(expert_action) + else: + rl_actions.append(imitator.get_action(env)) + + + # don't add invalid expert actions to replay buffer if any are invalid + if invalid_expert_action: + if use_expert: + observation, reward, done, _ = env.step(None) + else: + observation, reward, done, _ = env.step(rl_actions) - expert_action = expert_controller.get_action(env) - if (expert_action is None or math.isnan(expert_action)): - observation, reward, done, _ = env.step(action) terminate_rollout = traj_length == max_trajectory_length or done + if terminate_rollout: break + # skip to next step continue + # update collected data observations.append(observation) - actions.append(action) - expert_actions.append(expert_action) - observation, reward, done, _ = env.step(action) + actions.append(rl_actions) + expert_actions.append(expert_actions) + observation, reward, done, _ = env.step(rl_actions) traj_length += 1 next_observations.append(observation) @@ -93,10 +114,10 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto if terminate_rollout: break - return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length) -def sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert): +def sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des): """ Samples a trajectory for a given set of vehicles using the actions prescribed by specified controller. @@ -117,9 +138,10 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector traj_length = 0 while True: + vehicle_ids = env.k.vehicle.get_rl_ids() + # add nothing to replay buffer if no vehicles if len(vehicle_ids) == 0: - print("NO RL VEHICLES") observation_dict, reward, done, _ = env.step(None) print(env.k.vehicle.get_rl_ids()) if done['__all__']: @@ -138,7 +160,7 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector if vehicle_id not in set(controllers.keys()): car_following_params = SumoCarFollowingParams() - expert = FollowerStopper(vehicle_id, car_following_params=car_following_params) + expert = FollowerStopper(vehicle_id, car_following_params=car_following_params, v_des=v_des) imitator = ImitatingController(vehicle_id, action_network, True, car_following_params=car_following_params) controllers[vehicle_id] = (imitator, expert) @@ -193,7 +215,7 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length -def sample_trajectories(env, controllers, action_network, min_batch_timesteps, max_trajectory_length, multiagent, use_expert): +def sample_trajectories(env, controllers, action_network, min_batch_timesteps, max_trajectory_length, multiagent, use_expert, v_des=15): """ Samples trajectories to collect at least min_batch_timesteps steps in the environment @@ -204,6 +226,7 @@ def sample_trajectories(env, controllers, action_network, min_batch_timesteps, m expert_controller: subclass of BaseController, "expert" for imitation learning min_batch_timesteps: minimum number of environment steps to collect max_trajectory_length: maximum steps in a trajectory + v_des: parameter used for follower-stopper (applies if Expert controller is follower-stopper) Returns: List of rollout dictionaries, total steps taken by environment @@ -214,9 +237,9 @@ def sample_trajectories(env, controllers, action_network, min_batch_timesteps, m while total_envsteps < min_batch_timesteps: if multiagent: - trajectory, traj_length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert) + trajectory, traj_length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) else: - trajectory, traj_length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert) + trajectory, traj_length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) trajectories.append(trajectory) @@ -224,7 +247,7 @@ def sample_trajectories(env, controllers, action_network, min_batch_timesteps, m return trajectories, total_envsteps -def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_length, multiagent, use_expert): +def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_length, multiagent, use_expert, v_des=15): """ Collects a fixed number of trajectories. @@ -235,6 +258,8 @@ def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_le expert_controller: subclass of BaseController, "expert" for imitation learning n: number of trajectories to collect max_trajectory_length: maximum steps in a trajectory + v_des: parameter used for follower-stopper (applies if Expert controller is follower-stopper) + Returns: List of rollout dictionaries @@ -244,9 +269,9 @@ def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_le for _ in range(n): if multiagent: - trajectory, length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert) + trajectory, length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) else: - trajectory, length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert) + trajectory, length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) trajectories.append((trajectory, length)) diff --git a/flow/controllers/imitation_learning/utils_tensorflow.py b/flow/controllers/imitation_learning/utils_tensorflow.py index 57000323f..1636da035 100644 --- a/flow/controllers/imitation_learning/utils_tensorflow.py +++ b/flow/controllers/imitation_learning/utils_tensorflow.py @@ -2,7 +2,7 @@ import tensorflow as tf -# Below are tensorflow related functions +""" Class agnostic helper functions related to tensorflow""" def build_neural_net(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): """ From f924d9cbc75de9da430785734f3b080f8fb94167 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 3 May 2020 23:34:33 -0700 Subject: [PATCH 14/57] Bug fixes to stochastic policies and singlagent with multiple RL vehicles --- .../imitation_learning/bottleneck_env.py | 150 -------------- .../imitation_learning/i210_multiagent.py | 193 ------------------ .../i210_multiagent_ghost.py | 181 ---------------- .../imitating_controller.py | 50 +++-- .../imitation_learning/imitating_network.py | 93 ++++++--- .../imitation_learning/multiagent_ring_env.py | 99 --------- .../imitation_learning/replay_buffer.py | 2 + .../imitation_learning/ring_env.py | 85 -------- flow/controllers/imitation_learning/run.py | 16 +- .../singleagent_straight_road.py | 163 --------------- .../controllers/imitation_learning/trainer.py | 87 +++++--- flow/controllers/imitation_learning/utils.py | 75 ++++--- 12 files changed, 211 insertions(+), 983 deletions(-) delete mode 100644 flow/controllers/imitation_learning/bottleneck_env.py delete mode 100644 flow/controllers/imitation_learning/i210_multiagent.py delete mode 100644 flow/controllers/imitation_learning/i210_multiagent_ghost.py delete mode 100644 flow/controllers/imitation_learning/multiagent_ring_env.py delete mode 100644 flow/controllers/imitation_learning/ring_env.py delete mode 100644 flow/controllers/imitation_learning/singleagent_straight_road.py diff --git a/flow/controllers/imitation_learning/bottleneck_env.py b/flow/controllers/imitation_learning/bottleneck_env.py deleted file mode 100644 index 820244a87..000000000 --- a/flow/controllers/imitation_learning/bottleneck_env.py +++ /dev/null @@ -1,150 +0,0 @@ -"""Bottleneck example. -Bottleneck in which the actions are specifying a desired velocity -in a segment of space -""" -from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ - InFlows, SumoCarFollowingParams, SumoLaneChangeParams -from flow.core.params import TrafficLightParams -from flow.core.params import VehicleParams -from flow.controllers import RLController, ContinuousRouter, \ - SimLaneChangeController -from flow.envs import BottleneckDesiredVelocityEnv -from flow.networks import BottleneckNetwork - -# time horizon of a single rollout -HORIZON = 1000 -# number of parallel workers -N_CPUS = 2 -# number of rollouts per training iteration -N_ROLLOUTS = N_CPUS * 4 - -SCALING = 1 -NUM_LANES = 4 * SCALING # number of lanes in the widest highway -DISABLE_TB = True -DISABLE_RAMP_METER = True -AV_FRAC = 0.10 - -vehicles = VehicleParams() -vehicles.add( - veh_id="human", - lane_change_controller=(SimLaneChangeController, {}), - routing_controller=(ContinuousRouter, {}), - car_following_params=SumoCarFollowingParams( - speed_mode="all_checks", - ), - lane_change_params=SumoLaneChangeParams( - lane_change_mode=0, - ), - num_vehicles=1 * SCALING) -vehicles.add( - veh_id="followerstopper", - acceleration_controller=(RLController, {}), - lane_change_controller=(SimLaneChangeController, {}), - routing_controller=(ContinuousRouter, {}), - car_following_params=SumoCarFollowingParams( - speed_mode=9, - ), - lane_change_params=SumoLaneChangeParams( - lane_change_mode=0, - ), - num_vehicles=1 * SCALING) - -controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True), - ("4", 2, True), ("5", 1, False)] -num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)] -additional_env_params = { - "target_velocity": 40, - "disable_tb": True, - "disable_ramp_metering": True, - "controlled_segments": controlled_segments, - "symmetric": False, - "observed_segments": num_observed_segments, - "reset_inflow": False, - "lane_change_duration": 5, - "max_accel": 3, - "max_decel": 3, - "inflow_range": [1000, 2000] -} - -# flow rate -flow_rate = 2300 * SCALING - -# percentage of flow coming out of each lane -inflow = InFlows() -inflow.add( - veh_type="human", - edge="1", - vehs_per_hour=flow_rate * (1 - AV_FRAC), - departLane="random", - departSpeed=10) -inflow.add( - veh_type="followerstopper", - edge="1", - vehs_per_hour=flow_rate * AV_FRAC, - departLane="random", - departSpeed=10) - -traffic_lights = TrafficLightParams() -if not DISABLE_TB: - traffic_lights.add(node_id="2") -if not DISABLE_RAMP_METER: - traffic_lights.add(node_id="3") - -additional_net_params = {"scaling": SCALING, "speed_limit": 23} -net_params = NetParams( - inflows=inflow, - additional_params=additional_net_params) - -flow_params = dict( - # name of the experiment - exp_tag="DesiredVelocity", - - # name of the flow environment the experiment is running on - env_name=BottleneckDesiredVelocityEnv, - - # name of the network class the experiment is running on - network=BottleneckNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.5, - render=False, - print_warnings=False, - restart_instance=True, - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - warmup_steps=40, - sims_per_step=1, - horizon=HORIZON, - additional_params=additional_env_params, - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflow, - additional_params=additional_net_params, - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig( - spacing="uniform", - min_gap=5, - lanes_distribution=float("inf"), - edges_distribution=["2", "3", "4", "5"], - ), - - # traffic lights to be introduced to specific nodes (see - # flow.core.params.TrafficLightParams) - tls=traffic_lights, -) diff --git a/flow/controllers/imitation_learning/i210_multiagent.py b/flow/controllers/imitation_learning/i210_multiagent.py deleted file mode 100644 index dcb1135f3..000000000 --- a/flow/controllers/imitation_learning/i210_multiagent.py +++ /dev/null @@ -1,193 +0,0 @@ -"""Multi-agent I-210 example. -Trains a non-constant number of agents, all sharing the same policy, on the -highway with ramps network. -""" -import os -import numpy as np - -from ray.tune.registry import register_env - -from flow.controllers import RLController -from flow.controllers.car_following_models import IDMController -import flow.config as config -from flow.core.params import EnvParams -from flow.core.params import NetParams -from flow.core.params import InitialConfig -from flow.core.params import InFlows -from flow.core.params import VehicleParams -from flow.core.params import SumoParams -from flow.core.params import SumoLaneChangeParams -from flow.core.rewards import energy_consumption -from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS -from flow.utils.registry import make_create_env - -# SET UP PARAMETERS FOR THE SIMULATION - -# number of steps per rollout -HORIZON = 4000 - -VEH_PER_HOUR_BASE_119257914 = 10800 -VEH_PER_HOUR_BASE_27414345 = 321 -VEH_PER_HOUR_BASE_27414342 = 421 - -# percentage of autonomous vehicles compared to human vehicles on highway -PENETRATION_RATE = 10 - -# SET UP PARAMETERS FOR THE ENVIRONMENT -additional_env_params = ADDITIONAL_ENV_PARAMS.copy() -additional_env_params.update({ - 'max_accel': 2.6, - 'max_decel': 4.5, - # configure the observation space. Look at the I210MultiEnv class for more info. - 'lead_obs': True, - # whether to add in a reward for the speed of nearby vehicles - "local_reward": True -}) - -# CREATE VEHICLE TYPES AND INFLOWS -# no vehicles in the network -vehicles = VehicleParams() -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic"), - acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), -) -vehicles.add( - "av", - acceleration_controller=(RLController, {}), - num_vehicles=0, -) - -inflow = InFlows() -# main highway -pen_rate = PENETRATION_RATE / 100 -assert pen_rate < 1.0, "your penetration rate is over 100%" -assert pen_rate > 0.0, "your penetration rate should be above zero" -inflow.add( - veh_type="human", - edge="119257914", - vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * (1 - pen_rate)), - # probability=1.0, - depart_lane="random", - departSpeed=20) -# # on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321 * pen_rate, -# depart_lane="random", -# depart_speed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421 * pen_rate, -# depart_lane="random", -# depart_speed=20) - -# Now add the AVs -# main highway -inflow.add( - veh_type="av", - edge="119257914", - vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * pen_rate), - # probability=1.0, - depart_lane="random", - depart_speed=20) -# # on ramp -# inflow.add( -# veh_type="av", -# edge="27414345", -# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414345 * pen_rate), -# depart_lane="random", -# depart_speed=20) -# inflow.add( -# veh_type="av", -# edge="27414342#0", -# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414342 * pen_rate), -# depart_lane="random", -# depart_speed=20) - -NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") - -flow_params = dict( - # name of the experiment - exp_tag='I_210_subnetwork', - - # name of the flow environment the experiment is running on - env_name=I210MultiEnv, - - # name of the network class the experiment is running on - network=I210SubNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # simulation-related parameters - sim=SumoParams( - sim_step=0.5, - render=False, - color_by_speed=False, - restart_instance=True, - use_ballistic=True, - emission_path="/Users/akashvelu/Documents/data14_2" - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - sims_per_step=1, - warmup_steps=0, - additional_params=additional_env_params, - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflow, - template=NET_TEMPLATE - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, - ), -) - -# SET UP RLLIB MULTI-AGENT FEATURES - -create_env, env_name = make_create_env(params=flow_params, version=0) - -# register as rllib env -register_env(env_name, create_env) - -# multiagent configuration -test_env = create_env() -obs_space = test_env.observation_space -act_space = test_env.action_space - -POLICY_GRAPHS = {'av': (None, obs_space, act_space, {})} - -POLICIES_TO_TRAIN = ['av'] - - -def policy_mapping_fn(_): - """Map a policy in RLlib.""" - return 'av' - - -custom_callables = { - "avg_speed": lambda env: np.mean([speed for speed in - env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]), - "avg_outflow": lambda env: np.nan_to_num( - env.k.vehicle.get_outflow_rate(120)), - "avg_energy": lambda env: -1*energy_consumption(env, 0.1) -} diff --git a/flow/controllers/imitation_learning/i210_multiagent_ghost.py b/flow/controllers/imitation_learning/i210_multiagent_ghost.py deleted file mode 100644 index f3357f94b..000000000 --- a/flow/controllers/imitation_learning/i210_multiagent_ghost.py +++ /dev/null @@ -1,181 +0,0 @@ -"""Multi-agent I-210 example. -Trains a non-constant number of agents, all sharing the same policy, on the -highway with ramps network. -""" -import os - -from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy -from ray.tune.registry import register_env - -import flow.config as config -from flow.controllers.rlcontroller import RLController -from flow.core.params import EnvParams -from flow.core.params import NetParams -from flow.core.params import InitialConfig -from flow.core.params import InFlows -from flow.core.params import VehicleParams -from flow.core.params import SumoParams -from flow.core.params import SumoLaneChangeParams -from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS -from flow.utils.registry import make_create_env - -# SET UP PARAMETERS FOR THE SIMULATION - -# number of training iterations -N_TRAINING_ITERATIONS = 200 -# number of rollouts per training iteration -N_ROLLOUTS = 2 -# number of steps per rollout -HORIZON = 4000 -# number of parallel workers -N_CPUS = 1 - -# percentage of autonomous vehicles compared to human vehicles on highway -PENETRATION_RATE = 10 - -# SET UP PARAMETERS FOR THE ENVIRONMENT -additional_env_params = ADDITIONAL_ENV_PARAMS.copy() -additional_env_params.update({ - 'max_accel': 1, - 'max_decel': 1, - # configure the observation space. Look at the I210MultiEnv class for more info. - 'lead_obs': True, -}) - -# CREATE VEHICLE TYPES AND INFLOWS -# no vehicles in the network -vehicles = VehicleParams() -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ) -) -vehicles.add( - "av", - acceleration_controller=(RLController, {}), - num_vehicles=0, -) - -inflow = InFlows() -# main highway -pen_rate = PENETRATION_RATE / 100 -assert pen_rate < 1.0, "your penetration rate is over 100%" -assert pen_rate > 0.0, "your penetration rate should be above zero" -inflow.add( - veh_type="human", - edge="119257914", - vehs_per_hour=8378 * pen_rate, - # probability=1.0, - departLane="random", - departSpeed=20) -# on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321 * pen_rate, -# departLane="random", -# departSpeed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421 * pen_rate, -# departLane="random", -# departSpeed=20) - -# Now add the AVs -# main highway -inflow.add( - veh_type="av", - edge="119257914", - vehs_per_hour=int(8378 * pen_rate), - # probability=1.0, - departLane="random", - departSpeed=20) -# # on ramp -# inflow.add( -# veh_type="av", -# edge="27414345", -# vehs_per_hour=int(321 * pen_rate), -# departLane="random", -# departSpeed=20) -# inflow.add( -# veh_type="av", -# edge="27414342#0", -# vehs_per_hour=int(421 * pen_rate), -# departLane="random", -# departSpeed=20) - -NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml") - -flow_params = dict( - # name of the experiment - exp_tag='I_210_subnetwork', - - # name of the flow environment the experiment is running on - env_name=I210MultiEnv, - - # name of the network class the experiment is running on - network=I210SubNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # simulation-related parameters - sim=SumoParams( - sim_step=0.8, - render=True, - color_by_speed=True, - restart_instance=True, - emission_path="/Users/akashvelu/Documents/data3" - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - sims_per_step=1, - additional_params=additional_env_params, - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflow, - template=NET_TEMPLATE - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, - ), -) - -# SET UP RLLIB MULTI-AGENT FEATURES - -create_env, env_name = make_create_env(params=flow_params, version=0) - -# register as rllib env -register_env(env_name, create_env) - -# multiagent configuration -test_env = create_env() -obs_space = test_env.observation_space -act_space = test_env.action_space - -POLICY_GRAPHS = {'av': (PPOTFPolicy, obs_space, act_space, {})} - -POLICIES_TO_TRAIN = ['av'] - - -def policy_mapping_fn(_): - """Map a policy in RLlib.""" - return 'av' diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index e14ab5850..70c483596 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -12,34 +12,50 @@ class ImitatingController(BaseController): # Implementation in Tensorflow def __init__(self, veh_id, action_network, multiagent, car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): + """ + Args: + veh_id: ID of vehicle to control + action_network: Instance of imitating_network class; neural net that gives action given state + multiagent: boolean indicating if env is multiagent or singleagent + """ BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) - self.action_network = action_network - self.multiagent = multiagent - self.veh_id = veh_id + self.action_network = action_network # neural network which specifies action to take + self.multiagent = multiagent # whether env is multiagent or singleagent + self.veh_id = veh_id # vehicle id that controller is controlling def get_accel(self, env): """ - Get acceleration for vehicle in the env - """ + Args: + env: instance of environment being used + Get acceleration for vehicle in the env, using action_network. Overrides superclass method. + """ + # observation is a dictionary for multiagent envs, list for singleagent envs if self.multiagent: observation = env.get_state()[self.veh_id] else: observation = env.get_state() - action = self.action_network.get_accel_from_observation(observation) + # get action from neural net + action = self.action_network.get_accel_from_observation(observation)[0] + + # handles singleagent case in which there are multiple RL vehicles sharing common state + # if action space is multidimensional, obtain the corresponding action for the vehicle + if not self.multiagent and self.action_network.action_dim > 1: + + # get_sorted_rl_ids used for singleagent_straight_road; use get_rl_ids if method does not exist + try: + rl_ids = env.get_sorted_rl_ids() + except: + print("Error caught: no get_sorted_rl_ids function, using get_rl_ids instead") + rl_ids = env.k.vehicle.get_rl_ids() - if not self.multiagent: - if self.action_network.action_dim > 1: - # TODO: fill in - try: - rl_ids = env.get_sorted_rl_ids() - except: - print("Error caught: no get_sorted_rl_ids function, using get_rl_ids instead") - rl_ids = env.get_rl_ids() + assert self.veh_id in rl_ids, "Vehicle corresponding to controller not in env!" - assert self.veh_id in rl_ids, "Vehicle corresponding to controller not in env!" + # return the action taken by the vehicle + ind = rl_ids.index(self.veh_id) + return action[ind] - ind = list.index(self.veh_id) - return action[ind] + # in other cases, acceleration is the output of the network + return action[0] diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 5098f0314..0ea5c32c8 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -8,10 +8,29 @@ class ImitatingNetwork(): """ - Neural network which learns to imitate another given expert controller. + Class containing neural network which learns to imitate a given expert controller. """ - def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, stochastic=False, noise_variance=0.5, policy_scope='policy_vars', load_existing=False, load_path=''): + def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, stochastic=False, policy_scope='policy_vars', load_existing=False, load_path=''): + + """ + Initializes and constructs neural network + + Args: + sess: Tensorflow session variable + action_dim: dimension of action space (determines size of network output) + obs_dim: dimension of observation space (size of network input) + num_layers: number of hidden layers (for an MLP) + size: size of each layer in network + learning_rate: learning rate used in optimizer + replay_buffer_size: maximum size of replay buffer used to hold data for training + training: boolean, whether the network will be trained (as opposed to loaded) + stochastic: boolean indicating if the network outputs a stochastic (multivariate Gaussian) or deterministic policy + policy_scope: variable scope used by Tensorflow for weights/biases + load_existing: boolean, whether to load an existing tensorflow model + load_path: path to directory containing an existing tensorflow model + + """ self.sess = sess self.action_dim = action_dim @@ -21,8 +40,8 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r self.learning_rate = learning_rate self.training = training self.stochastic=stochastic - self.noise_variance = noise_variance + # load network if specified, or construct network if load_existing: self.load_network(load_path) @@ -30,21 +49,25 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): self.build_network() + # init replay buffer if self.training: self.replay_buffer = ReplayBuffer(replay_buffer_size) else: self.replay_buffer = None + # set up policy variables, and saver to save model. Save only non-training variables (weights/biases) if not load_existing: self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) def build_network(self): """ - Defines neural network for choosing actions. + Defines neural network for choosing actions. Defines placeholders and forward pass """ + # setup placeholders for network input and labels for training, and hidden layers/output self.define_placeholders() self.define_forward_pass() + # set up training operation (e.g. Adam optimizer) if self.training: with tf.variable_scope('train', reuse=tf.AUTO_REUSE): self.define_train_op() @@ -54,31 +77,39 @@ def load_network(self, path): """ Load tensorflow model from the path specified, set action prediction to proper placeholder """ + # load and restore model loader = tf.train.import_meta_graph(path + 'model.ckpt.meta') loader.restore(self.sess, path+'model.ckpt') - # print([n.name for n in tf.get_default_graph().as_graph_def().node]) + # get observation placeholder (for input into network) self.obs_placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/observation:0') + # get output tensor (using name of appropriate tensor) network_output = tf.get_default_graph().get_tensor_by_name('policy_vars/network_scope/Output_Layer/BiasAdd:0') + # for stochastic policies, the network output is twice the action dimension. First half specifies the mean of a multivariate gaussian distribution, second half specifies the diagonal entries for the diagonal covariance matrix. + # for deterministic policies, network output is the action. if self.stochastic: - # determine mean and (diagonal) covariance matrix for action distribution - mean = network_output[:self.action_dim] - cov_diag = network_output[self.action_dim:] + # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution + means = network_output[:, :self.action_dim] + cov_diags = network_output[:, self.action_dim:] + # set up action distribution (parameterized by network output) - dist = tfp.distributions.MultivariateNormalDiag(loc=mean, scale_diag=cov_diag) - # action is a sample from this distribution - self.action_predictions = dist.sample() + # if a batch of size k is input as observations, then the the self.dist will store k different Gaussians + self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=cov_diags) + # action is a sample from this distribution; one sample output per Gaussian contained in self.dist + self.action_predictions = self.dist.sample() else: + self.dist = None self.action_predictions = network_output def define_placeholders(self): """ Defines input, output, and training placeholders for neural net """ + # placeholder for observations (input into network) self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="observation", dtype=tf.float32) - self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) + # if training, define placeholder for labels (supervised leearning) if self.training: self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) @@ -87,24 +118,30 @@ def define_forward_pass(self): """ Build network and initialize proper action prediction op """ - self.stochastic = False + # network output is twice action dim if stochastic (1st half mean, 2nd half diagonal elements of covariance) if self.stochastic: output_size = 2 * self.action_dim else: output_size = self.action_dim + # build forward pass and get the tensor for output of last layer network_output = build_neural_net(self.obs_placeholder, output_size=output_size, scope='network_scope', n_layers=self.num_layers, size=self.size) - self.network_output = network_output - # TODO: add this as a class variable + # unpack array of array into just array + # if self.stochastic: + # # network_output = network_output[0] + + # parse the mean and covariance from output if stochastic, and set up distribution if self.stochastic: - # determine mean and (diagonal) covariance matrix for action distribution - mean = network_output[:self.action_dim] - cov_diag = network_output[self.action_dim:] + # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution + means = network_output[:, :self.action_dim] + cov_diags = network_output[:, self.action_dim:] + # set up action distribution (parameterized by network output) - self.dist = tfp.distributions.MultivariateNormalDiag(loc=mean, scale_diag=cov_diag) - # action is a sample from this distribution - self.action_predictions = dist.sample() + # if a batch of size k is input as observations, then the the self.dist will store k different Gaussians + self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=cov_diags) + # action is a sample from this distribution; one sample output per Gaussian contained in self.dist + self.action_predictions = self.dist.sample() else: self.dist = None @@ -115,9 +152,9 @@ def define_train_op(self): """ Defines training operations for network (loss function and optimizer) """ + # labels true_actions = self.action_labels_placeholder - network_prediction = self.network_output - + predicted_actions = self.action_predictions if self.stochastic: # negative log likelihood loss for stochastic policy @@ -125,16 +162,18 @@ def define_train_op(self): self.loss = -tf.reduce_mean(log_likelihood) else: # MSE loss for deterministic policy - self.loss = tf.losses.mean_squared_error(true_actions, network_prediction) + self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) + # Adam optimizer self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) def train(self, observation_batch, action_batch): """ Executes one training step for the given batch of observation and action data """ + # reshape action_batch to ensure a shape (batch_size, action_dim) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) + self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) def get_accel_from_observation(self, observation): """ @@ -144,14 +183,14 @@ def get_accel_from_observation(self, observation): # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays if len(observation.shape)<=1: observation = observation[None] + # "batch size" is 1, so just get single acceleration/acceleration vector ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] return ret_val def get_accel(self, env): """ - Get network's acceleration prediction based on given env + Get network's acceleration prediction(s) based on given env """ - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays observation = env.get_state() return self.get_accel_from_observation(observation) diff --git a/flow/controllers/imitation_learning/multiagent_ring_env.py b/flow/controllers/imitation_learning/multiagent_ring_env.py deleted file mode 100644 index 4fa72addc..000000000 --- a/flow/controllers/imitation_learning/multiagent_ring_env.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Ring road example. -Trains a number of autonomous vehicles to stabilize the flow of 22 vehicles in -a variable length ring road. -""" -from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams -from flow.core.params import VehicleParams, SumoCarFollowingParams -from flow.controllers import RLController, IDMController, ContinuousRouter -from flow.envs.multiagent import MultiAgentWaveAttenuationPOEnv -from flow.networks import RingNetwork -from flow.utils.registry import make_create_env - -# time horizon of a single rollout -HORIZON = 3000 -# number of rollouts per training iteration -N_ROLLOUTS = 20 -# number of parallel workers -N_CPUS = 2 -# number of automated vehicles. Must be less than or equal to 22. -NUM_AUTOMATED = 2 - - -# We evenly distribute the automated vehicles in the network. -num_human = 22 - NUM_AUTOMATED -humans_remaining = num_human - -vehicles = VehicleParams() -for i in range(NUM_AUTOMATED): - # Add one automated vehicle. - vehicles.add( - veh_id="rl_{}".format(i), - acceleration_controller=(RLController, {}), - routing_controller=(ContinuousRouter, {}), - num_vehicles=1) - - # Add a fraction of the remaining human vehicles. - vehicles_to_add = round(humans_remaining / (NUM_AUTOMATED - i)) - humans_remaining -= vehicles_to_add - vehicles.add( - veh_id="human_{}".format(i), - acceleration_controller=(IDMController, { - "noise": 0.2 - }), - car_following_params=SumoCarFollowingParams( - min_gap=0 - ), - routing_controller=(ContinuousRouter, {}), - num_vehicles=vehicles_to_add) - - -flow_params = dict( - # name of the experiment - exp_tag="multiagent_ring", - - # name of the flow environment the experiment is running on - env_name=MultiAgentWaveAttenuationPOEnv, - - # name of the network class the experiment is running on - network=RingNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.1, - render=False, - restart_instance=False - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - warmup_steps=750, - clip_actions=False, - additional_params={ - "max_accel": 1, - "max_decel": 1, - "ring_length": [220, 270], - }, - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - additional_params={ - "length": 260, - "lanes": 1, - "speed_limit": 30, - "resolution": 40, - }, ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig(), -) diff --git a/flow/controllers/imitation_learning/replay_buffer.py b/flow/controllers/imitation_learning/replay_buffer.py index 77902814c..58bdd2cd7 100644 --- a/flow/controllers/imitation_learning/replay_buffer.py +++ b/flow/controllers/imitation_learning/replay_buffer.py @@ -8,6 +8,7 @@ class ReplayBuffer(object): def __init__(self, max_size=100000): + # max size of buffer self.max_size = max_size # store each rollout @@ -34,6 +35,7 @@ def add_rollouts(self, rollouts_list): assert (not np.any(np.isnan(expert_actions))), "Invalid actions added to replay buffer" + # only keep max_size tuples in buffer if self.observations is None: self.observations = observations[-self.max_size:] self.actions = actions[-self.max_size:] diff --git a/flow/controllers/imitation_learning/ring_env.py b/flow/controllers/imitation_learning/ring_env.py deleted file mode 100644 index 20ced1ce9..000000000 --- a/flow/controllers/imitation_learning/ring_env.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Ring road example. -Trains a single autonomous vehicle to stabilize the flow of 21 human-driven -vehicles in a variable length ring road. -""" -from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams -from flow.core.params import VehicleParams, SumoCarFollowingParams -from flow.controllers import RLController, IDMController, ContinuousRouter -from flow.envs import WaveAttenuationPOEnv -from flow.networks import RingNetwork - -# time horizon of a single rollout -HORIZON = 3000 -# number of rollouts per training iteration -N_ROLLOUTS = 20 -# number of parallel workers -N_CPUS = 2 - -# We place one autonomous vehicle and 22 human-driven vehicles in the network -vehicles = VehicleParams() -vehicles.add( - veh_id="human", - acceleration_controller=(IDMController, { - "noise": 0.2 - }), - car_following_params=SumoCarFollowingParams( - min_gap=0 - ), - routing_controller=(ContinuousRouter, {}), - num_vehicles=21) -vehicles.add( - veh_id="rl", - acceleration_controller=(RLController, {}), - routing_controller=(ContinuousRouter, {}), - num_vehicles=1) - -flow_params = dict( - # name of the experiment - exp_tag="stabilizing_the_ring", - - # name of the flow environment the experiment is running on - env_name=WaveAttenuationPOEnv, - - # name of the network class the experiment is running on - network=RingNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.1, - render=False, - restart_instance=False - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - warmup_steps=750, - clip_actions=False, - additional_params={ - "max_accel": 1, - "max_decel": 1, - "ring_length": [220, 270], - }, - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - additional_params={ - "length": 260, - "lanes": 1, - "speed_limit": 30, - "resolution": 40, - }, ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig(), -) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 5ab94b425..17434d63e 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -1,7 +1,6 @@ import os import time import numpy as np -#import tensorflow as tf from trainer import Trainer from flow.controllers.car_following_models import IDMController @@ -11,22 +10,33 @@ class Runner(object): def __init__(self, params): - # initialize trainer + # initialize trainer class instance and params self.params = params self.trainer = Trainer(params) def run_training_loop(self): - + """ + Runs training for imitation learning for specified number of iterations + """ self.trainer.run_training_loop(n_iter=self.params['n_iter']) def evaluate(self): + """ + Evaluates a trained controller over a specified number trajectories; compares average action per step and average reward per trajectory between imitator and expert + """ self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) def save_controller_network(self): + """ + Saves a tensorflow checkpoint to path specified in params (and writes to tensorboard) + """ self.trainer.save_controller_network() def main(): + """ + Parse args, run training, and evalutation + """ import argparse parser = argparse.ArgumentParser() parser.add_argument('--ep_len', type=int, default=5000) diff --git a/flow/controllers/imitation_learning/singleagent_straight_road.py b/flow/controllers/imitation_learning/singleagent_straight_road.py deleted file mode 100644 index bcebad140..000000000 --- a/flow/controllers/imitation_learning/singleagent_straight_road.py +++ /dev/null @@ -1,163 +0,0 @@ -"""Multi-agent highway with ramps example. -Trains a non-constant number of agents, all sharing the same policy, on the -highway with ramps network. -""" -from flow.controllers import RLController, IDMController -from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ - VehicleParams, SumoParams, SumoLaneChangeParams -from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS -from flow.networks import HighwayNetwork -from flow.envs.straightroad_env import SingleStraightRoad -from flow.networks.highway import ADDITIONAL_NET_PARAMS -from flow.utils.registry import make_create_env -from ray.tune.registry import register_env - - -# SET UP PARAMETERS FOR THE SIMULATION - -# number of steps per rollout -HORIZON = 2000 - -# inflow rate on the highway in vehicles per hour -HIGHWAY_INFLOW_RATE = 10800 / 5 -# percentage of autonomous vehicles compared to human vehicles on highway -PENETRATION_RATE = 10 - - -# SET UP PARAMETERS FOR THE NETWORK - -additional_net_params = ADDITIONAL_NET_PARAMS.copy() -additional_net_params.update({ - # length of the highway - "length": 2000, - # number of lanes - "lanes": 1, - # speed limit for all edges - "speed_limit": 30, - # number of edges to divide the highway into - "num_edges": 2 -}) - - -# SET UP PARAMETERS FOR THE ENVIRONMENT - -additional_env_params = ADDITIONAL_ENV_PARAMS.copy() -additional_env_params.update({ - 'max_accel': 2.6, - 'max_decel': 4.5, - 'target_velocity': 14.0, - 'local_reward': True, - 'lead_obs': True, - "terminate_on_wave": False, - # the environment is not allowed to terminate below this horizon length - 'wave_termination_horizon': 1000, - # the speed below which we consider a wave to have occured - 'wave_termination_speed': 10.0, - # whether the vehicle continues to acquire reward after it exits the system. This causes it to have incentive - # to leave the network in a good state after it leaves - 'reward_after_exit': True -}) - - -# CREATE VEHICLE TYPES AND INFLOWS - -vehicles = VehicleParams() -inflows = InFlows() - -# human vehicles -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), -) - -# autonomous vehicles -vehicles.add( - veh_id='rl', - acceleration_controller=(RLController, {})) - -# add human vehicles on the highway -inflows.add( - veh_type="human", - edge="highway_0", - vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), - depart_lane="free", - depart_speed="23.0", - name="idm_highway_inflow") - -# add autonomous vehicles on the highway -# they will stay on the highway, i.e. they won't exit through the off-ramps -inflows.add( - veh_type="rl", - edge="highway_0", - vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), - depart_lane="free", - depart_speed="23.0", - name="rl_highway_inflow") - -# SET UP FLOW PARAMETERS -done_at_exit = True -if additional_env_params['reward_after_exit']: - done_at_exit = False - -flow_params = dict( - # name of the experiment - exp_tag='singleagent_highway', - - # name of the flow environment the experiment is running on - env_name=SingleStraightRoad, - - # name of the network class the experiment is running on - network=HighwayNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - warmup_steps=0, - sims_per_step=1, # do not put more than one - done_at_exit=done_at_exit, - additional_params=additional_env_params, - ), - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.5, - render=False, - use_ballistic=True, - restart_instance=True - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflows, - additional_params=additional_net_params - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig(), -) - - -# SET UP RLLIB MULTI-AGENT FEATURES - -create_env, env_name = make_create_env(params=flow_params, version=0) - -# register as rllib env -register_env(env_name, create_env) - -# multiagent configuration -test_env = create_env() -obs_space = test_env.observation_space -act_space = test_env.action_space diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index a390502d4..801c7517f 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -5,7 +5,7 @@ import gym import os from flow.utils.registry import make_create_env -from i210_multiagent import flow_params +from env_configs.singleagent_straight_road import flow_params from imitating_controller import ImitatingController from imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController @@ -29,12 +29,19 @@ def __init__(self, params): # environment setup create_env, _ = make_create_env(flow_params) self.env = create_env() - init_state = self.env.reset() # vehicle setup - self.multiagent = params['multiagent'] - - self.vehicle_ids = self.env.k.vehicle.get_rl_ids() + self.multiagent = params['multiagent'] # multiagent or singleagent env + + if not self.multiagent and self.env.action_space.shape[0] > 1: + # use sorted rl ids if the method exists (e.g.. singlagent straightroad) + try: + self.vehicle_ids = self.env.get_sorted_rl_ids() + except: + self.vehicle_ids = self.k.vehicle.get_rl_ids() + else: + # use get_rl_ids if sorted_rl_ids doesn't exist + self.vehicle_ids = self.env.k.vehicle.get_rl_ids() # neural net setup obs_dim = self.env.observation_space.shape[0] @@ -43,14 +50,16 @@ def __init__(self, params): self.params['action_dim'] = action_dim self.params['obs_dim'] = obs_dim - self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], noise_variance=self.params['noise_variance']) + # initialize neural network class and tf variables + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], stochastic=self.params['stochastic']) tf.global_variables_initializer().run(session=self.sess) # controllers setup - v_des = self.params['v_des'] + v_des = self.params['v_des'] # for FollowerStopper car_following_params = SumoCarFollowingParams() self.controllers = dict() + # initialize controllers: save in a dictionary to avoid re-initializing a controller for a vehicle for vehicle_id in self.vehicle_ids: expert = FollowerStopper(vehicle_id, car_following_params=car_following_params, v_des=v_des) imitator = ImitatingController(vehicle_id, self.action_network, self.multiagent, car_following_params=car_following_params) @@ -59,13 +68,14 @@ def __init__(self, params): def run_training_loop(self, n_iter): """ - Trains imitator for n_iter iterations (each iter runs optimizer once on given batch of dat) + Trains imitator for n_iter iterations (each iteration collects new trajectories to put in replay buffer) Args: param n_iter: number of iterations to execute training """ # init vars at beginning of training + # number of environment steps taken throughout training self.total_envsteps = 0 for itr in range(n_iter): @@ -76,23 +86,24 @@ def run_training_loop(self, n_iter): # first iteration is behavioral cloning training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) else: + # other iterations use DAgger (trajectories collected by running imitator policy) training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) paths, envsteps_this_batch = training_returns self.total_envsteps += envsteps_this_batch - # add collected data to replay buffer + # add collected data to replay buffer in neural network class self.action_network.add_to_replay_buffer(paths) - # train controller (using sampled data from replay buffer) - loss = self.train_controller() + # train controller + self.train_controller() def collect_training_trajectories(self, itr, batch_size): """ Collect (state, action, reward, next_state, terminal) tuples for training Args: - itr: iteration of training during which functino is called + itr: iteration of training during which function is called. Used to determine whether to run behavioral cloning or DAgger batch_size: number of tuples to collect Returns: paths: list of trajectories @@ -106,17 +117,19 @@ def collect_training_trajectories(self, itr, batch_size): def train_controller(self): """ - Trains controller using data sampled from replay buffer + Trains controller for specified number of steps, using data sampled from replay buffer; each step involves running optimizer (i.e. Adam) once """ - print('Training controller using sampled data from replay buffer') + print("Training controller using sampled data from replay buffer...") for train_step in range(self.params['num_agent_train_steps_per_iter']): + # sample data from replay buffer ob_batch, ac_batch, expert_ac_batch = self.action_network.sample_data(self.params['train_batch_size']) + # train network on sampled data self.action_network.train(ob_batch, expert_ac_batch) def evaluate_controller(self, num_trajs = 10): """ - Evaluates a trained controller on similarity with expert with respect to action taken and total reward per rollout + Evaluates a trained imitation controller on similarity with expert with respect to action taken and total reward per rollout Args: num_trajs: number of trajectories to evaluate performance on @@ -124,16 +137,19 @@ def evaluate_controller(self, num_trajs = 10): print("\n\n********** Evaluation ************ \n") + + # collect imitator driven trajectories (along with corresponding expert actions) trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, False, v_des=self.params['v_des']) - total_imitator_steps = 0 - average_imitator_reward_per_rollout = 0 + # initialize metrics + total_imitator_steps = 0 # total number of environment steps taken across the n trajectories + average_imitator_reward_per_rollout = 0 # average reward per rollout achieved by imitator - action_errors = np.array([]) - average_action_expert = 0 - average_action_imitator = 0 + action_errors = np.array([]) # difference in action (acceleration) taken between expert and imitator + average_action_expert = 0 # average action taken, across all timesteps, by expert (used to compute % average) + average_action_imitator = 0 # average action taken, across all timesteps, by imitator (used to compute % average) - # compare actions taken in each step of trajectories + # compare actions taken in each step of trajectories (trajectories are controlled by imitator) for traj_tuple in trajectories: traj = traj_tuple[0] traj_len = traj_tuple[1] @@ -144,21 +160,22 @@ def evaluate_controller(self, num_trajs = 10): average_action_expert += np.sum(expert_actions) average_action_imitator += np.sum(imitator_actions) - action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) + # use RMSE as action error metric + action_error = (np.linalg.norm(imitator_actions - expert_actions)) / len(imitator_actions) action_errors = np.append(action_errors, action_error) total_imitator_steps += traj_len average_imitator_reward_per_rollout += np.sum(traj['rewards']) + # compute averages for metrics average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) average_action_expert = average_action_expert / total_imitator_steps - average_action_imitator = average_action_imitator / total_imitator_steps - + # collect expert driven trajectories (these trajectories are only used to compare average reward per rollout) expert_trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, True, v_des=self.params['v_des']) - average_expert_reward = 0 + # initialize metrics total_expert_steps = 0 average_expert_reward_per_rollout = 0 @@ -166,21 +183,27 @@ def evaluate_controller(self, num_trajs = 10): for traj_tuple in expert_trajectories: traj = traj_tuple[0] traj_len = traj_tuple[1] - average_expert_reward += np.sum(traj['rewards']) total_expert_steps += traj_len average_expert_reward_per_rollout += np.sum(traj['rewards']) average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) - average_expert_reward = average_expert_reward / total_expert_steps + # compute percent errors (using expert values as 'ground truth') + percent_error_average_reward = (np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout) / average_expert_reward_per_rollout) * 100 + + percent_error_average_action = (np.abs(np.mean(action_errors)) / np.abs(average_action_expert)) * 100 - print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: \n", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") + # Print results + print("\nAverage reward per rollout, expert: ", average_expert_reward_per_rollout) + print("Average reward per rollout, imitator: ", average_imitator_reward_per_rollout) + print("% Difference, average reward per rollout: ", percent_error_average_reward, "\n") - print("MEAN EXPERT ACTION: ", average_action_expert) - print("MEAN ACTION ERROR: ", np.mean(action_errors), "\n") + print(" Average RMSE action error per rollout: ", np.mean(action_errors)) + print("Average Action Taken by Expert: ", average_action_expert) + print("% Action Error: ", percent_error_average_action, "\n") + print("Total imitator steps: ", total_imitator_steps) + print("Total expert steps: ", total_expert_steps) def save_controller_network(self): diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index a1334066f..198a2a4ad 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -24,21 +24,24 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ - vehicle_ids = env.k.vehicle.get_rl_ids() + # reset and initialize arrays to store trajectory observation = env.reset() - if len(vehicle_ids) == 1: - vehicle_id = vehicle_ids[0] - else: - vehicle_id = None - observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] traj_length = 0 while True: - # update vehicle ids - vehicle_ids = env.k.vehicle.get_rl_ids() + # update vehicle ids: if multidimensional action space, check if env has a sorted_rl_ids method + if env.action_space.shape[0] > 1: + try: + vehicle_ids = env.get_sorted_rl_ids() + except: + vehicle_ids = env.k.vehicle.get_rl_ids() + else: + vehicle_ids = env.k.vehicle.get_rl_ids() + + # no RL actions if no RL vehicles if len(vehicle_ids) == 0: observation, reward, done, _ = env.step(None) if done: @@ -47,27 +50,26 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto # init controllers if any of vehicle ids are new # there could be multiple vehicle ids if they all share one state but have different actions + car_following_params = SumoCarFollowingParams() + for vehicle_id in vehicle_ids: - if vehicle_id not in set(controllers.get_keys()): + if vehicle_id not in set(controllers.keys()): expert = FollowerStopper(vehicle_id, car_following_params=car_following_params, v_des=v_des) - imitator = ImitatingController(vehicle_id, action_network, false, car_following_params=car_following_params) + imitator = ImitatingController(vehicle_id, action_network, False, car_following_params=car_following_params) controllers[vehicle_id] = (imitator, expert) - print("CONTROLLING CONTROLLER: ", controller) - print("EXPERT CONTROLLER: ", expert_controller) - - # get the actions + # get the actions given by controllers action_dim = env.action_space.shape[0] rl_actions = [] - expert_actions = [] + actions_expert = [] invalid_expert_action = False - for i in range(len(action_dim)): + for i in range(action_dim): # if max number of RL vehicles is not reached, insert dummy values if i >= len(vehicle_ids): rl_actions.append(0.0) - expert_actions.append(0.0) + actions_expert.append(0.0) else: imitator = controllers[vehicle_ids[i]][0] expert = controllers[vehicle_ids[i]][1] @@ -77,32 +79,39 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto if (expert_action is None or math.isnan(expert_action)): invalid_expert_action = True - expert_actions.append(expert_action) + actions_expert.append(expert_action) if use_expert: + if traj_length == 0 and i == 0: + print("Controller collecing trajectory: ", type(expert)) rl_actions.append(expert_action) else: - rl_actions.append(imitator.get_action(env)) + if traj_length == 0 and i == 0: + print("Controller collecting trajectory: ", type(imitator)) + imitator_action = imitator.get_action(env) + rl_actions.append(imitator_action) - # don't add invalid expert actions to replay buffer if any are invalid - if invalid_expert_action: - if use_expert: - observation, reward, done, _ = env.step(None) - else: - observation, reward, done, _ = env.step(rl_actions) - + # invalid action in rl_actions; default to Sumo, ignore sample + if None in rl_actions or np.nan in rl_actions: + observation, reward, done, _ = env.step(None) + terminate_rollout = traj_length == max_trajectory_length or done + if terminate_rollout: + break + continue + # invalid expert action (if rl_actions is expert actions then this would have been caught above)) + if not use_expert and invalid_expert_action: + # throw away sample, but step according to rl_actions + observation, reward, done, _ = env.step(rl_actions) terminate_rollout = traj_length == max_trajectory_length or done - if terminate_rollout: break - # skip to next step continue # update collected data observations.append(observation) actions.append(rl_actions) - expert_actions.append(expert_actions) + expert_actions.append(actions_expert) observation, reward, done, _ = env.step(rl_actions) traj_length += 1 @@ -114,7 +123,7 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto if terminate_rollout: break - return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length) + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length def sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des): @@ -143,7 +152,6 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector # add nothing to replay buffer if no vehicles if len(vehicle_ids) == 0: observation_dict, reward, done, _ = env.step(None) - print(env.k.vehicle.get_rl_ids()) if done['__all__']: break continue @@ -171,10 +179,11 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector controller = controllers[vehicle_id][0] if traj_length == 0 and i == 0: - print("COLLECTOR: ", controller) + print("Controller collecting trajectory: ", controller) action = controller.get_action(env) + # action should be a scalar acceleration if type(action) == np.ndarray: action = action.flatten()[0] @@ -262,7 +271,7 @@ def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_le Returns: - List of rollout dictionaries + List of rollouts (tuple of rollout dictionary, length of rollout) """ trajectories = [] From 0b08c331f7af272fe17ee4df7c8990311e73f56b Mon Sep 17 00:00:00 2001 From: Akash Velu <31679538+akashvelu@users.noreply.github.com> Date: Sun, 3 May 2020 23:38:12 -0700 Subject: [PATCH 15/57] Delete Untitled.ipynb --- .../imitation_learning/Untitled.ipynb | 856 ------------------ 1 file changed, 856 deletions(-) delete mode 100644 flow/controllers/imitation_learning/Untitled.ipynb diff --git a/flow/controllers/imitation_learning/Untitled.ipynb b/flow/controllers/imitation_learning/Untitled.ipynb deleted file mode 100644 index d412275b8..000000000 --- a/flow/controllers/imitation_learning/Untitled.ipynb +++ /dev/null @@ -1,856 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", - " PendingDeprecationWarning\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import gym\n", - "from i210_multiagent import flow_params" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.utils.registry import make_create_env\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.reset()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "len(env.get_state())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.sample()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.k.vehicle.get_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.step({})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.controllers.velocity_controllers import FollowerStopper\n", - "from flow.core.params import SumoCarFollowingParams\n", - "car_following_params = SumoCarFollowingParams()\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert = FollowerStopper('followerstopper_0', car_following_params=car_following_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert.get_action(env)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "len(env.k.vehicle.get_ids())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert2 = FollowerStopper('flow_10.1', car_following_params=car_following_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert2.get_action(env)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow as tf" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t = tf.convert_to_tensor(np.array([1,2]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t.get_shape()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t[0:1]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mean = tf.convert_to_tensor(np.array([1.0,2.0]))\n", - "cov = tf.convert_to_tensor(np.array([1.0,1.0]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "tf.random_normal(tf.shape(tf.convert_to_tensor(np.array([1, 1]))), np.array([0,0]), np.array([1,1]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cov" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tf.cast(tf.shape(mean), tf.int64)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mean" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np.diag(np.array([1,1]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow_probability as tfp" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tfd = tfp.distributions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tfd" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess = tf.Session()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mvn = tfd.MultivariateNormalDiag(loc=mean, scale_diag=cov)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cov" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mvn.prob([-1, 0]).eval(session=sess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run(mean)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mvn.prob([1, 2.5]).eval(session=sess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run(mvn.sample(1))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "inp = tf.placeholder(shape=[None, 2], name=\"obs\", dtype=tf.float32)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "out = inp\n", - "for _ in range(2):\n", - " out = tf.layers.dense(out, 30, activation=tf.tanh)\n", - "out = tf.layers.dense(out, 2, activation=None, name=\"output\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "pred = out" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "type(pred)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tf.global_variables_initializer().run(session=sess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "obs = np.array([1,1])\n", - "obs = obs[None]\n", - "ret = sess.run([pred], feed_dict={inp:obs})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "obs = np.array([[1,1], [1,1]])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ret = sess.run([pred], feed_dict={inp:obs})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ret" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "type(ret)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "batch = np.array([[3,3],[4,4],[1,1]])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "log_likelihood = sess.run(mvn.log_prob(batch))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "log_likelihood" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run(tf.reduce_mean(log_likelihood))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np.mean(log_likelihood)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "labels_batch = tf.placeholder(shape=[None, 2], dtype=tf.float64)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ll = mvn.log_prob(labels_batch)\n", - "loss = tf.reduce_mean(ll, axis=-1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "b = batch.reshape(batch.shape[0], 2)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run([loss], feed_dict={labels_batch:b})" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "from singleagent_straight_road import flow_params\n", - "from flow.utils.registry import make_create_env\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0.])" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.reset()" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[]" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.k.vehicle.get_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Box(8,)" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.action_space" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "for i in range(100):\n", - " env.step(None)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "24" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "len(env.get_state())" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['rl_highway_inflow_10.0', 'rl_highway_inflow_10.1', 'rl_highway_inflow_10.2']" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.k.vehicle.get_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "rl_actions = {}\n", - "for vehicle_id in env.k.vehicle.get_rl_ids():\n", - " rl_actions[vehicle_id] = 1.0\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['rl_highway_inflow_10.0', 'rl_highway_inflow_10.2', 'rl_highway_inflow_10.1']" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.get_sorted_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "rl_actions = [1,1,1,0,0,0,0,0]\n", - "rl_actions = np.array(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0\n", - "rl_highway_inflow_10.0\n", - "1\n", - "rl_highway_inflow_10.2\n", - "2\n", - "rl_highway_inflow_10.1\n" - ] - }, - { - "data": { - "text/plain": [ - "(array([0.54393322, 0.06077194, 0.56137638, 0.40959813, 0.0259221 ,\n", - " 0.4041333 , 0.42759098, 0.02818569, 0.42912874, 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ]),\n", - " 0.1718155323023197,\n", - " False,\n", - " {})" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.step(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "error\n" - ] - } - ], - "source": [ - "try:\n", - " test(1)\n", - "except:\n", - " print(\"error\")" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "ename": "AssertionError", - "evalue": "blah", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAssertionError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32massert\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"blah\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;31mAssertionError\u001b[0m: blah" - ] - } - ], - "source": [ - "assert False, \"blah\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "flow", - "language": "python", - "name": "flow" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} From 21ee5ce71739a5a6ad1a59009fd7fa5a4751e683 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 3 May 2020 23:43:24 -0700 Subject: [PATCH 16/57] Removed files --- .../imitation_learning/Untitled.ipynb | 856 ------------------ .../imitation_learning/Useless/Untitled.ipynb | 438 --------- .../Useless/Untitled1.ipynb | 96 -- ...ents.1587254017.Akashs-MacBook-Pro-2.local | Bin 265723 -> 0 bytes ...ents.1587339098.Akashs-MacBook-Pro-2.local | Bin 267581 -> 0 bytes ...ents.1587776769.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587779365.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587780241.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587781276.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587789385.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587841939.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587848505.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587855757.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587860905.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587860969.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes 15 files changed, 1390 deletions(-) delete mode 100644 flow/controllers/imitation_learning/Untitled.ipynb delete mode 100644 flow/controllers/imitation_learning/Useless/Untitled.ipynb delete mode 100644 flow/controllers/imitation_learning/Useless/Untitled1.ipynb delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587254017.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587339098.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587776769.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587779365.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587780241.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587781276.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587789385.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587841939.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587848505.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587855757.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860905.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860969.Akashs-MacBook-Pro-2.local diff --git a/flow/controllers/imitation_learning/Untitled.ipynb b/flow/controllers/imitation_learning/Untitled.ipynb deleted file mode 100644 index d412275b8..000000000 --- a/flow/controllers/imitation_learning/Untitled.ipynb +++ /dev/null @@ -1,856 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", - " PendingDeprecationWarning\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import gym\n", - "from i210_multiagent import flow_params" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.utils.registry import make_create_env\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.reset()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "len(env.get_state())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.sample()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.k.vehicle.get_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.step({})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.controllers.velocity_controllers import FollowerStopper\n", - "from flow.core.params import SumoCarFollowingParams\n", - "car_following_params = SumoCarFollowingParams()\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert = FollowerStopper('followerstopper_0', car_following_params=car_following_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert.get_action(env)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "len(env.k.vehicle.get_ids())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert2 = FollowerStopper('flow_10.1', car_following_params=car_following_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert2.get_action(env)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow as tf" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t = tf.convert_to_tensor(np.array([1,2]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t.get_shape()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t[0:1]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mean = tf.convert_to_tensor(np.array([1.0,2.0]))\n", - "cov = tf.convert_to_tensor(np.array([1.0,1.0]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "tf.random_normal(tf.shape(tf.convert_to_tensor(np.array([1, 1]))), np.array([0,0]), np.array([1,1]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cov" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tf.cast(tf.shape(mean), tf.int64)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mean" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np.diag(np.array([1,1]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow_probability as tfp" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tfd = tfp.distributions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tfd" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess = tf.Session()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mvn = tfd.MultivariateNormalDiag(loc=mean, scale_diag=cov)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cov" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mvn.prob([-1, 0]).eval(session=sess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run(mean)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mvn.prob([1, 2.5]).eval(session=sess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run(mvn.sample(1))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "inp = tf.placeholder(shape=[None, 2], name=\"obs\", dtype=tf.float32)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "out = inp\n", - "for _ in range(2):\n", - " out = tf.layers.dense(out, 30, activation=tf.tanh)\n", - "out = tf.layers.dense(out, 2, activation=None, name=\"output\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "pred = out" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "type(pred)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tf.global_variables_initializer().run(session=sess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "obs = np.array([1,1])\n", - "obs = obs[None]\n", - "ret = sess.run([pred], feed_dict={inp:obs})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "obs = np.array([[1,1], [1,1]])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ret = sess.run([pred], feed_dict={inp:obs})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ret" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "type(ret)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "batch = np.array([[3,3],[4,4],[1,1]])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "log_likelihood = sess.run(mvn.log_prob(batch))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "log_likelihood" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run(tf.reduce_mean(log_likelihood))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np.mean(log_likelihood)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "labels_batch = tf.placeholder(shape=[None, 2], dtype=tf.float64)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ll = mvn.log_prob(labels_batch)\n", - "loss = tf.reduce_mean(ll, axis=-1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "b = batch.reshape(batch.shape[0], 2)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run([loss], feed_dict={labels_batch:b})" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "from singleagent_straight_road import flow_params\n", - "from flow.utils.registry import make_create_env\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0.])" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.reset()" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[]" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.k.vehicle.get_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Box(8,)" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.action_space" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "for i in range(100):\n", - " env.step(None)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "24" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "len(env.get_state())" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['rl_highway_inflow_10.0', 'rl_highway_inflow_10.1', 'rl_highway_inflow_10.2']" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.k.vehicle.get_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "rl_actions = {}\n", - "for vehicle_id in env.k.vehicle.get_rl_ids():\n", - " rl_actions[vehicle_id] = 1.0\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['rl_highway_inflow_10.0', 'rl_highway_inflow_10.2', 'rl_highway_inflow_10.1']" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.get_sorted_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "rl_actions = [1,1,1,0,0,0,0,0]\n", - "rl_actions = np.array(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0\n", - "rl_highway_inflow_10.0\n", - "1\n", - "rl_highway_inflow_10.2\n", - "2\n", - "rl_highway_inflow_10.1\n" - ] - }, - { - "data": { - "text/plain": [ - "(array([0.54393322, 0.06077194, 0.56137638, 0.40959813, 0.0259221 ,\n", - " 0.4041333 , 0.42759098, 0.02818569, 0.42912874, 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ]),\n", - " 0.1718155323023197,\n", - " False,\n", - " {})" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.step(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "error\n" - ] - } - ], - "source": [ - "try:\n", - " test(1)\n", - "except:\n", - " print(\"error\")" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "ename": "AssertionError", - "evalue": "blah", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAssertionError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32massert\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"blah\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;31mAssertionError\u001b[0m: blah" - ] - } - ], - "source": [ - "assert False, \"blah\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "flow", - "language": "python", - "name": "flow" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/flow/controllers/imitation_learning/Useless/Untitled.ipynb b/flow/controllers/imitation_learning/Useless/Untitled.ipynb deleted file mode 100644 index 982ef03a7..000000000 --- a/flow/controllers/imitation_learning/Useless/Untitled.ipynb +++ /dev/null @@ -1,438 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", - " PendingDeprecationWarning\n" - ] - } - ], - "source": [ - "import time\n", - "import pickle\n", - "import numpy as np\n", - "import gym\n", - "import os\n", - "from flow.utils.registry import make_create_env\n", - "from i210_multiagent import flow_params as flow_params_multi\n", - "from flow.controllers.car_following_models import IDMController\n", - "from flow.core.params import SumoCarFollowingParams\n", - "from utils import *\n", - "from imitating_network import *\n", - "from utils_tensorflow import *" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params_multi)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "obs_dim = env.observation_space.shape[0]\n", - "action_dim = (1,)[0]" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "3" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "obs_dim" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "sess = create_tf_session()" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Restoring parameters from /Users/akashvelu/Documents/models2/model.ckpt\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Restoring parameters from /Users/akashvelu/Documents/models2/model.ckpt\n" - ] - } - ], - "source": [ - "action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/subprocess.py:786: ResourceWarning: subprocess 11185 is still running\n", - " ResourceWarning, source=self)\n" - ] - }, - { - "data": { - "text/plain": [ - "{}" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "init_state = env.reset()\n", - "init_state" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{}" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.get_state()" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0\n", - "1\n", - "OBS: [[0.4 1. 0. ]]\n", - "SHAPE: (1, 3)\n", - "TYPE: float64\n" - ] - }, - { - "ename": "InvalidArgumentError", - "evalue": "You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1333\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1335\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1318\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1406\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[{{node policy_vars/obs}} = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]", - "\nDuring handling of the above exception, another exception occurred:\n", - "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;31m# print(len(obs.shape))\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;31m# print(obs[None].shape)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0maction\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maction_network\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_accel_from_observation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0mrl_actions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mvehicle_id\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maction\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0menv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrl_actions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\u001b[0m in \u001b[0;36mget_accel_from_observation\u001b[0;34m(self, observation)\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"SHAPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 89\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"TYPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 90\u001b[0;31m \u001b[0mret_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maction_predictions\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mobs_placeholder\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 91\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mret_val\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 928\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 930\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 931\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1150\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1151\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1153\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1154\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1326\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1327\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1329\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1330\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1347\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0merror_interpolation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1348\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1350\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n" - ] - } - ], - "source": [ - "for i in range(100):\n", - " print(i)\n", - " rl_vehicles = env.k.vehicle.get_rl_ids()\n", - " if len(rl_vehicles) == 0:\n", - " env.step(None)\n", - " continue\n", - " \n", - " rl_actions = {}\n", - " observations = env.get_state()\n", - "# print(observations)\n", - " for vehicle_id in rl_vehicles:\n", - " obs = observations[vehicle_id]\n", - "# print(len(obs.shape))\n", - "# print(obs[None].shape)\n", - " action = action_network.get_accel_from_observation(obs)\n", - " rl_actions[vehicle_id] = action\n", - " env.step(rl_actions)\n", - " \n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "dtype('float32')" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "t=np.array([[1.0,1.0,1.0]], dtype='float32')\n", - "t.dtype" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "OBS: [[1. 1. 1.]]\n", - "SHAPE: (1, 3)\n", - "TYPE: float32\n" - ] - }, - { - "ename": "InvalidArgumentError", - "evalue": "You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1333\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1335\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1318\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1406\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[{{node policy_vars/obs}} = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]", - "\nDuring handling of the above exception, another exception occurred:\n", - "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0maction_network\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_accel_from_observation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m~/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\u001b[0m in \u001b[0;36mget_accel_from_observation\u001b[0;34m(self, observation)\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"SHAPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 89\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"TYPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 90\u001b[0;31m \u001b[0mret_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maction_predictions\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mobs_placeholder\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 91\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mret_val\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 928\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 930\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 931\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1150\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1151\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1153\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1154\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1326\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1327\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1329\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1330\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1347\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0merror_interpolation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1348\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1350\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n" - ] - } - ], - "source": [ - "action_network.get_accel_from_observation(t)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for i in range(40):\n", - " env.step(None)\n", - " env.render()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.get_state()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def test(d):\n", - " d['asdf'] = 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t = dict()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test(t)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "set(t.keys())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "b = np.array([1,2,3])\n", - "print(b.dtype)" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [ - "placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/obs:0')" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "placeholder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "flow", - "language": "python", - "name": "flow" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/flow/controllers/imitation_learning/Useless/Untitled1.ipynb b/flow/controllers/imitation_learning/Useless/Untitled1.ipynb deleted file mode 100644 index b93658a05..000000000 --- a/flow/controllers/imitation_learning/Useless/Untitled1.ipynb +++ /dev/null @@ -1,96 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" - ] - } - ], - "source": [ - "import tensorflow as tf" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from i210_multiagent import flow_params" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from imitating_network import ImitatingNetwork" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.utils.registry import make_create_env" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from utils_tensorflow import *" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "flow", - "language": "python", - "name": "flow" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587254017.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587254017.Akashs-MacBook-Pro-2.local deleted file mode 100644 index 0e64e0dc20ed4f255f8e9f110f2f9c0bf6a3ad75..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 265723 zcmeIb3y>Vgc^`hWdpI0EfFLm>C1~_8gOX+GC~9#BfR9tjJRB)f{Qu2|2#^#-QLDw> z0^D%1d)nOt38fM%iOW$OJ8`U(lh{txKXECx<0P^yl@u!~TIEEFttfuPaU{i-6gzsQ zWGQ+$~lxH{Cj(00@v=0 zhl8z)-3z_Rv@}~a*8V|Sola>9jXM1*WrysYDMk_x62fO3l+0O2)JAJCR z)1SsS!u9Ki;{D6tWB)~QYq`Dh64;Y{AXfbqb|L1+>}|~kS5i9m#!R3r~O?49Wk z18^G|xsBlWBe<7mchsLfGoC!%oo3}~tMh|V?9bDUjHd&eupGq; zD~tc)wtxH_&r7uOf(kE-&{|&z#YgYirug_lH$L9qf(QNe@o;8zk5HS7uMvJsLRoRj z(6ry*UW;3Y1&fPTURKf40f6=8G^UV$8!G^d>66)mdm%baqEg$#8{mVj5LP_hJrlQ2 z?Vd?Octr2nAY=xkm0UoPhk&$h^P52gcYb#mx9{H_CZSC?gb0aNt@ao|#PH!3ob>kG zxP5YaI|SE6B|*ppvOh~efIo=f6>h?d;nCh?&^t5iKe{G{RLdAEhhRU+zLWlTchoyi z3L%nUJQDD=uy1QTnvnvnKgnoa&uS$u(7}6KaElvXos-k);M^!)qF)n7aU}jWmkMTg z+V7I`y=E3U+-3x3O|tQAFMXH^;AXDk9UNS6;-*t?M=q zl5#7p2RBw0)BXZ}D}tB1OkZ$D^h7GIbS-lwTBUA?er^&*)yLsyT6TlVR3SN_lK>K= zmRHoyVPQ?{rC8C!uSM`G7e#j@Uvm;p8AY)3Gqf%n@NtKnaer?{>XLYgeof>aIjIwB zN_HIk=Z!S7&PW3|-5Hbk+9UCALHyO3^jGlP5&VEn37f=YNof%8&A+inyPRcr^{(6q z`$@3wb<;+*H;b3z#RuN-*j=t@!A2n;_*)L)<8*KIRJ@q}f8xq~F@W=w;8PLYXAy08 zwQk$~+9KkvLMOh(CE^x?Xd3slX;1(0klMrodCQm z!~fL#pL{M&H(_`w+xPI(bZp>%tp#rkVR1swF1PUEY?}F0+fT6=C}2Z|0^qq;7&|B$ z`A}A6#Y|jXjfp$*6Su}W;dcWGuxRMRn{pUYrcHg(w0rrwz8}f#6MmxwcLnfg+SrG& zN7CAdAw-&c3;SV&44;YMm99b*dI}*W+2&d;{{6hH(w9grJ~faJwV>n5o806(3nd9* zgPNwpL9E)w!^ic~v~L4P%D5^uAkLBtW5G5*fsaOTyWI-1K9gS^5?X!)8b#!j(lqq) zd_@@<`>FqA@2B$N^e$>?U<2T9jN^)P%Af)KGduQ{6O-N2hP~yFC1{P8M(LsR5D)_V zx*cv~LXtetQAj~pHJBBA-YK_8K!6p$$Qsjn(W&G#Fh1~K>>v%%Q3WAz2$4BPU_(K8 z3n;ntgbU#BIK>+dC$K>or4Y(Vx~jqnDOj5aQosh8D^kD)bwmmZdu3l2pIg$%f;KVXp`nMtlF4P254kJo$q=3!a+5)&B1&o-% zpB<3`2TGlE6ToF=kCwRMgcLB=g*HUho7W3jf#3r+yA`=Z0T-lzJuSB+)?$u5Xh#az zCk-}unIHvhrcOu!`>?!J=`6XjX!MG>HVbz_3f8i&UQm!XQb1|W4Jn|*2`(ytd4`rQ z&`zCh&av!=#4+<-&H45MC!`=(iL^<;+62?b>_|aQzJd*f-kigPQX4#QLJD#&>LLa7 zvC{uAph!-`U{>(Q4oJaTM?wm$8eZ+VVFf~ba$dm}_Y3H@5EP;V7p!0nffX>hSa(e1 z29`zPa61ZIu!6Nr5&511D^^el!Ce(>!O00LI5rQgfDJNNtbh$Fq$mBZ0!%VxL#X@f zfLH+=$jFNyR=|c(^AZp%U;~+W5gM%8y9!)JD6oRsuLe~11F)!u+nc~9>x31s51VA? zzN^56niE#wM93E_V1sC15^%u^7!i{I-FFqZP!mkq-sZZH^u`L!7eYiGOz#g%8axDpvqSV69mx>x~y z%#Ic0n1U}1)K(X8)u z?^NC1J8;1Yjv=rDCKo$az}Y~lwR!9-I93ZQD1=Z>GU&d7<8`qDE;<_D5GeFZMmoUj5XLcUl58^jAMU_?yD=Z+P)P!mkqjup6&^u`L< zAhu@N=?w%%%pg)ntiXYi8&=>%D40E3;)WAez-BBoB9)HdvcU@2be*sQ_Tk!C0ejGn z6|helY_P1Kyh66RA+eZ%VkOlHD=65KIu$Ecz#gEPFMk>WvCx~u>v-PqQ8FE z-~HKuSOFWz$crCVz=lxs5)dn31DSXc8m!t_fy)RKE677RZbtwcCfyc{+MN*b@PpI7 z0yd;ccJBKMT&Ou=1x|!~downO7goTCm;~sK6}V6nOxcbVxRCV53fLf)Rw8o33K%hi zNFA{P2TE>OffJ!%_GoStBPXnY%~)te0=+@NXs=W{372A(XoC zLF9xL{^9)_~6`Vj|1xzk> ztbntDQloyLK?L%duHmB$NoZEYRx?O0FD1od|w363?gBPQN#u_9wIMnaQ}fz15q}(zBL3&-4f9o|>so zr`5b;tY}OZ7T$R${FrpwA8I1vMGA-5kpjz`OX(5w9ro>;j~)LDz<4j zC8$*z-a%L`_%dc)qN zyY*Cmi(K)syS3FPw{$E%Fn(|c{$m8MaiV{Ee9v&)n|0Wo63L@)b$U)IAj7;gj>Zd1 ze{lvmQA1eC(yHx#_NKpZ=MYD(XjyTRaCZGl?y!w%MSVpJK|>c( zNR-!(aSvB|BD$M~PVJsg@2@zYxP6=`G^rsj9gLH9sZaO&JKgQU`Du2{1W5_L83PFC3SpHJs7z`R>l7KP9KScV162>9BLLX-!p6q)D~JYe?@upO8*^cjqhK zld)IVFT^_DeYnm=tp*DveL8~IIyn%pr0mV~?r`vQziB;jIbEv|WiTWw5PukfaJQ0^ z;Krl=G~=w3CDf+*!*)pY(c-aREnOa zoA)yvWv#{jn5{WozE1ytBG_(P1YAMhSfRv)^|n^=ina=Vwbh&e0`^ml36ACM6{#09 zHE{6V2)3HCnrau<81GekaO@6AlR)m{D6h(NDg+z@hoMQM&0|yIfS+!|!vVM|QYI^S zynuwJxJ_nmjWnOn{54oMq;=fwi><(lnMbX5i3H3gf_n8mK@~sRf*%U$0C~;WfKw%5 zY0|o~NClPp(ewP3HvL6NZ-T^DtVvKMmM-gzA(7*x-2g5=xXi;RHCyabq@o_4 z^vwv4)>Sjbl>%4?q7#t}T!Z==VJP?y5xm-o=WMX<>g8MX6HeSfI|2j}K5F2BDRNj# zNZMN64H0z4(eEW|xwHP{^2Px_>}p{-H}T&5o20fgIwFRJ*vcrV@tt&Ui<{=32Y~Dc zTpc9ig@Lg3l~7Ayb`|_i1jn5sRHa1ELRM3yHd)`#PiwEewEBDXIhsoz^}!*@Du&xK zR-vW5jzsA;sr!_aDyy}iE&zN%x&}ns6ic|GTPmuv&QS=ieq6LW*}rmKO4}Y>i1$9+ z?+x!BTyR_@%kr(bHN_%DZT@7%JbW?|@0QXMe15FKe9kpAXE;&;Ub{tFmQLLkKZ$}9 z;o2?-(c0~68~#!?hM$}Cwg+T2bILFk>ZSO=`j_GRO{1jabuUi4NAn_9xgzGe@baDr zZZZv)F0AAs&dz$`Ht$l-?g8v)BY4y_&+6Y48@DpE>!$MSR2)hvMb|~*_b5T2(lF}I zPKv_y9K@>6q#;R`dex<&UuePUsw{lvMA{&>4pvW64ap&~YQUZuezFB`2`Q=B?he`I zcWyG?B^x>VJNaIxtMJQh)oKuawp1ir4W{^7V9VA@%XXIE!H$4`m&?5WO!d2&W1POs>o9cRK}_a z>(|8lm%qpUi=-ccB)@=D%G ze{~QSoktX$z+XggDx7QN(SoFi0v`&f)0BDPHdZYxL94}rOPv_tueX}uey!xb3E2jg z!LI}jj>T~U^T`tp$al?dHYA^fEF;Rb$bim56p{48dA72cT3`onBhEQIDKMNJ*L#aOv|x z>d^XT5R`}$XQp1Zz=mJSdw&Za3TcF=2g5$O0g?QF;vX6~cA3%mX9kWabrG`0cbZgk zvjJIvAn%WoX449G{9*);1(8|`>vWWktA2Sm-Dp{Fv*mLk;Dl6W{8URP4sJY2Q-UqE zDmtwGz^W*2El2R75SVizz*FhD2=Simi~DQR<=pG+{3oh^Ae@zQo*l*sd4mdoBZ(q* z-gKV6@2GKq_*e+llegT3eJsPM1rJl%(D<~h2lp;AWp@U$T#$@ioo&2aU*eYj)}WhvlI^hKByo_kfG}m{wNYJ z+yHA51v#Z3Fk^~zClMWvj4%^?Bt#ie%rUQc$}%FLIEjh^WRqg5nR}Pw6Ke|J0rD1fb>Lx8khd6(NhSbrSkROeS~%NEBOq@vZqAEysS{?~VKKZ;aU>X& z+t~b}onpKq)`UQP3#t|6X@Wqh)JD+YVm<|z@bL(q^l!@L13ncUru+Gxr4Z_%3-5~p z>@TYnCLANg;&PnCc>ZXeIseTzoNr3_%G9pwKbtjy0PTcXG0b9cz3IDJ;J+BBnIJtj zlf6B?|3WCY)6&>9O+sh$i3zZ7Bvye9JH^<3E>H)hKzoYk0K!+=|V)?m2caGl1aK&^Y+R326qh5<02 zKW%~k=0mf6^9WF5;-`3!|E_!uD-0c~9kWFk3I054u2d`boj0%0A|*K|G5D1hJk^wQ z$c=gqo4uv6nl;_v{34%_*?{84}Q%y>dB z@Yovf^gHCO?P;G}Ew(x6O;2ubQ>-ih;a;*6EZ)!kkjy=RlpdtHsmOG%)O%|*I1W;g z!$6)F9&?U64rB1be!DYa{R>fQP*#TqO{jb5b>S!5`3twIw(H4o`r1CP=K13J_kie@4}Pu6SYFRgCTK_Z#bzu0`d-; z^Zr)#u=n?71nHuIo_-%M(XWZ(4D{eO(e#CU8{+MIz3lcKa#Mnyv)iC1t-jGY z``L%c&D;Ip9#@iNkHQCP>`}0jb;y{(-M_{vw<`l&LUhD9HcBJiDkAOU@L2pvfJj^Q z)byww=U0_oxvuuVLIP|c+%&5?TeK)vaeR(VvI{rY-YWczXYQjRmu9}xz0p(mrv1$_ z`z~<}+0JW=cGh(6UOI!T`@pwr9QLQ0*m1`e&@23?G(Z-s_iy)Rz3$oZrX$-E?^X$n~()8}hq}l2$G*m)T z@!;8$DLLRWQW4fd_hc$w=GT)c*|wx4ubxaAkhg2Uo=hSvJ7pO#Lic3Sz#K=7_GHHz z>&YaBbk1z=$&LvCC$)Qwg$8Pqr4ICvy~eDX{ttm?*k1>tv$n zWS0xllZivn64tXPQ;`hNlZj+?UK1__R{Imr!d}I|d@^8_755D2yH2L!+L8dh*2%>2 z0`+7fQP~m8uo1c^6USxDG~ScZkc;oh@{sM-lZmYB+}+uZ3mtj)WNY*2$y6FBSU@b8 zdG}-z*Rl%)*$OF*!Bu*mJ()_H-aVN#Tb+f5N=PamJbN-F2lL2CDqfsD*?XN#$+jgW zdG%z{fV^G%^<)xZ*(u9_5xOUn2Ie?wv?p6@tS6Hg(mAueCtDMGGLOEi+D}Ap=VW>sfHa0>dB-5dAs)O$t1$EQb7p%_wkq^wO|6rC(DQvVl_0sE?B$mC$pUm_jsh=D*j8f+qHe4s69p$bT#$}T z93n_ZrXm@jBNNH$d?s81thO-{?1-gCLU&x^xQrPaUIaf~<0=}(-s3FbYWp#_aP8HNiEQg^#<_+o^aKAZg42Aq zFrtP}y*Ghv6?yWe%Ubt9i?72SbnR%*3S8?ow^I?5x*A=fJ zSCK)yDb}v8-k9GM`zL;{f-9O_>mU8iRYqj4(ClwlhFJ!4?h6?-VabzY83uu8{QUH49(>-g+w0r%4Ix3S%-zC z1W#rBsPuc+RMv)H@;ggKumPQ?37O32)pcRCoa@zO9x4mm(kdCT1tj>T2yU*r)|vjS zw@Qxse5OC?3`T?5pf~JZ=nZ$t{i1h`N7LELZAAjU1#XUCRY}0A2zrK`O{loi;OyD6 zce-6^@ZktvYMg5fkM<^m-kD+l(X}+LPM_*c`rF-6?|eUQ#S8dZUkm%vh|wF2`jd>( ze%M2ZllxX%>kDp^|84|AH%UV|FX)rg>EPTbUZP(UGpl0$t`ZZpwsHd;B5^Tzo zetNXgedKi4*v(=>MCE^D;ZSfiMegz7 zvyy$TXW0kVOQr?qwz2!w7TnD#bMyc8@-?XFhUdYJ`{uSakv&rxl?k}ifXS-t&Q zRrGr3SPne!%@*9_$*>$ad&>v>M0slRM*YZ5Lg^YqLAzUb` z3^Z@DtCKt1^A2}uoiZxa7bX@z-@;_Of<-w%_=qbBYeAa8U=W5eC}5QZUyfkI!-(W? z;3Hwv3P~9z(477Os_`gn$y7sN=M*Fi{0XYv;P?^(0oqqp^#WQhatMJhqM8hjNjVfC z%3h8drpe$QuU4atICuRR17Ah87d&fHec&f&f44UUm2_W=VtJGghl-Mq1Og)((aBKJ zZg1M1k(+eJ6V_;ONv~~~Rcqz71&px@o%j|P7^9ez2xPRg!lHF#d4qr$=|A{P1ds9j zIn@jWbxk$Y$$)&ur`PmvWOdPjO(1pA!iu6M(qPT-6KzDBXyYwjyk{^Rs@8jXF$OS` zq$$0WKIEmD>i`C$?f$df9QAD*)`QZkBR1e0n3h)5hme{$2a0TitFn<7`*KEFks55u z^0zTft;n*RsOmRZB0WS?D-uv7t72iqOQdxHuuQ~?LiA*MEPM?E*a*x? zLBdbcL8M^fmjD>`*MfO|$@i6d_c93%UqUv_Mc9+pfMvJjYwXAXL1lZ(8;lq5HDsI2 zF)9ZLRKv#6!?npAfvQyn@rBwEK@pnLHWAez^Xy4s!dKowgkuU6IBd5#txdh|8?rHG zLs=kO-f7K^F%H{h({RFe8*7W%EEJo%32eQ9#C91~4R3gC*Fsn}6AZR%8-i}0_kxkw z5G(V-cG-ZLK7>lPQc|kmRo)fmh5)lU0}k6|k5dzrb~+-0s6Y>sv;+c*q-EOeh{6BZ zc$&pXTPujecG;j*>i(%qpjW0YFL~HF$6>p|OjWh69Gh6}%Y}Ahnn9tSY*vk)wvecDctD;lfk_W3b&^ zvg-I@&@2+$<(ZR$grB61h+z?9@e)cmry;Oij$c*n$Ld@Byn+M=9JVX)_WTGBIBZv7 zR1OfRHkrdNacwe3;BpQUIBZvxw-hFP<=uFoz+t<^?TF1|yKE>+zst9c#-NhSq8nkL_AX zR}vXw->!X-0N5@Yq?)R%eul$#*?_rLo^QzvhwVyC%jpb(FG>_K*serEk1dk04JJwB_U-b_NkPI-(q(|~Fhmo-IBb{W zH#D}JYc9k=EI4de;7te*9x4bI3l!^0$bGv4qjG>iV2uJjT$?;!Y&SPGb&+9m8XUGO z$VYw$$XDKt2MQdvd;BsylY!020s5uhPvLm(Pr+?fn)z)T-PsuId_3*M*5D5&)WHXFySPB?6r4MI)Of8VY^ z50kV60#$!6<|V7n5D%Ri;z_U+1SN=eOM_O?ZkFli5P*zWO5j_uMJ zgvWL{&V(Q$RK$knUg6y+>^kSK5`JV57>Vt2Y|3E+fjDy1FxW2lxFTGH#&&bbs!L*n zW|8}LdFG@b;V0?lw&T$$Blqoc{3@%X{CX%17@BMD(qX#-Z_khLfZMk#Fe(QKRKvzW zv$!^yBcKQpwY@ow(^7L=W>ma!BF|(HE?UwMb%%M@(jf1oz_pD<&)K8-LVEkd;P z{-5+Hsz`PIthsEDkozauRJ<^@NL5u}+8Q<+CyXsZz8lX535Br<^f19gAfN~yrcmG| zWpz}t85-{0c#%o}5F*ekLx`6=Y@B~F!km&R%+yqKc+FkF)I3884&#vc6zUIR!t&dK zUffpt4h8HH)qy=CB*q~zE!QH5K3k%Q!8jxmmw!mXVH`4>Qc&@iy#?cl!h{_p#&N<0 zZg_4BV43uevBxX^=_RL^J6hRW`O*AH>_>{-W4!OcUgu_1&2rkSo{ z2|m4?9Jb(P8ZP=f)4^~&VuMsg^~@)4UUL)GNA|!2no)Llvb*g0zbdZtDKMy=o#Dk) z#__(B?AzoOZVxB*b6hbj88N9JYxu{ztH}|5p7dsk%u>2L<7dca+G$4CeTKhAJb8JQ zPSwH_qj4Jlx|Uc>A~{o?!MRCqdq8eCpLY1asZ@D?Z+8Fgu$w;6neFZj`<<=vXf_!S zyW9PpKDp#*v~{t&S_OU=yn;uriYKa~V!&lB>@@vsK)`;%>H^L=Pvox2<-oGBAB< zn;rHoE^u&mj=jMVesd2zUS`+qsjT*uJaL897LOyMX7OwWOeYBVqMSR4+gDWJ z-R9*^!;rHD6S)Qmy%@NX>7!=wrMHp%Ti@D&@U0KOw%Q?o7!gB@VR5ws|M(SU!hJsz zz7$~?)3s5f3lNCQlP6Jum_3dO#OzsgAQlK*QC92~h}SMvAZGfwM1hzcd!{mEYapf% zV*)W778!`S$FC?8_6o#n9oIlyfy21^786*hI+PY{D6<0u;_{dXmHk>PV)i&D5VL2| zfmk4LMOm>|AU<}f0x{Ev(u~d9FE+rxB4+O>s|?v1i0Q+aK+J|k24e2BqzdMZEWVPIB5!SrofB#!GW1yC>lLtM?$*YUL z1dH_&=E2Du{booA2A?B?5`)c=0lJ-XH;kuZx~$6%Hb;ha1e_x)K!eSZwE+R=$YOJ_ zIkKc#&avDOgWV{wqS-A5EjlS7_QExfUHTwR~2)@r4eMw+i zqH#FU#NRwNX*YUi(M@x5vL;wI60hL{%kTm4(3?y2}#og!;E?G&jq*(tID#ZHlS zeRhhhnX^-*hH9tClD}w3dYI<`{1tc!Z%g^jP^0bcl!Tzto^EF5bY%0U!k88|mH%@e z{IiSXn1R$es3pfs^lQR-OrkaQ#5i&By=91sFF2!ykOF*!Mu2qSv+oJ5wp5V2MVCU69sct4Hgxj%-#tjdl(I}p)?HE)!?lK zlU5Dp#j4EWtwt6x8l(eh7_8%iw-wA;HCR;GGJCg`@dTqTMpwg)MO_NfR-)+b1(Q|{ z=FNf3;_aHr8APp^SScrr&gs-DQS^?2IjaVXjz(th4h?(tWnZ$0F$Fe3_#;-9u|$8g z;L@tXyc;_tXZoYTH9|+y$WCt>*^y^H zZPL%TOF!QM--=;zFdEEqc3v$kljx9}{D7t8HP%nF%`wyI;M~am`Mf4$^pD!;lh4;3 zfz}M}Bd{dQp0M&;w zCifTJ$Jm~U{9|`1sJUS2bT2_pdiO`a@XtSW^8fkO-&%$rxiS9S4X_Sva+XxQ z1ktlcqSNWui{xuUzP^KeJ@R58zg$e0U)HnLmxI?wt;MB1?S0GfUif+h<>7bnerS{Z z4CJAAMp3*EqVwgq7xHhfgZ`ru%E&5t?YM@zX+PZXluFVTU?4-VYl0{h7%dfl_*$@$(a zUYgD(gMOLXhSk!yhdg}MT4!k*W^eYZEPxXm8oHizNJ z_Z0)K-vII2b*0xF*niDlh<}o3^sTUX&w#Xeq&M0goSz37;yo!lcv&j)o zI7@mN@K0neDscBY*n2NI$dQ~asGFti+tfw&9Xv$516~8g0%|&)!H6ueq(aq92XLCIIrexO@Y29Xronu5{Q$t)ayA-?c0jBq-2@1C z!TyJay{-OJIjvOla$?g7Kg!vzYy$|<$ z!@CC;m;lnR4w0YA51vS$r+g8E$en};jvd5)Wa)JFSDm?#IGBHedX!+fV<=Y@iC~s? zNF*IRdw4HN76BPka;>_xy~$fzk~atrxOZ}b%7r3esZGFb;$URz3eAmL#hmeY@C*)eyeG$m~d zKBakfE1kqp&}aNY*he--&j#dFT9O-17vGC~i}|4Zl!Birz@xp%pm%0SwyBU9lhrRY ztEKhQK`~BA8IOd;)Kq7^!6?lqd9l!n7mmD0yhn!n6Y;E-rPbH0uD#~y+KVpGZ*G}v zt=)3$mUF#Zw{Jgtw%0#;NBrgA+Y1x8mWNu#XQnY9WcmGO(aH~0mi$S9w|*$zzx+M+ zUqrS-wpU)lSW^8Jb|L1+>}59QKWtN4vqOM8MZ9geN3I1Qi{SPC zT%8{nju!tHxBcVi0`U^*_M3&`qxUS+P4$R$!p9Fb&d2*(@Swjw#L7r4l#)&TH7Y;EDU=j~Yod}MWCGcrO?ZD0fo-8ZUwja* z$5C9thuG@Cw$)WG#gZ=Iqk)V|t&#nL{BFy-$IOv+jmA zdOyRzi{NHQiAmR9IU3Mhg26{3xYfo*3OiQsC1dr6;*zP2{0d#{epWGI>o<&Sz;8w3 z)jKM?N~bS4qu>|@>Z!dE$bodF+51>A>48?zELD7#;V1FZdipxo2K-FRZcv#jBnNcc zxs24ZW`~PKYT{8z+-4iB_~6$fc$Jf#O1|bKoYK><^E0$A8}M<5oWTH9`ZbYz3$T~s1b&L(2HUCwHCZFgvA+K_;5Cz8eGvw z^VVmoZL?PoR+JsRkFmgWtuQuSH1c6QgtYQu2=UF7kD#Q^eg zg@gytH?Tg`f{wF+NIizLP?8WfsA)PJ#HwvPeB_&UnKp2wEMcSu#94B43KJiV;C8ze zpj&;VpnPP2#r4n&3dm*57f8Hylo2gEC4X z6oa^97A{CZu_xbef!_-$U~}N(x<69Ds0w(rv~{5wq=1dg^W%pUFp5TAN~EB=(2oWI z*vM|YxF7|LqKOwjq`+l_Y=fZ4l&xqC1`e?KrNb9Mg%eW1o;1nM9Vu|3cHRXka3VxH zGaYE<)z-4X!38N`A2tcl9Vu|3CYZ9l$#o$qEdOKL6*h>ik#Ir^7%_uL9gzYDN}Y5Q zfE6ilB1G)b=J8?d(gi7CGZxwq#iF8@wk!RGs`;-K6Tt;3V7$pKiM5zx589Ce_DO>c zYWfqK2@oA9XdiSO zrMn;nITw-#Itt0u<(IyR=NbOk0V!bDdB^#?Sk`rR(sa6E1wwo>_50%A>fs1ZSV3{$ zbGe(qAxOGn1)L4c7b{rH6cN)s1y-z}5P~yAP}rc;u7cv-bl#<HZ9QPqON$0;@ykAJGt! zvO#o+gA-Q3h?oTEjup626HM9O=DLvd#tPUV)+WGZR{+4T&!>SC174s4o2*lR%|2|XA9$=FAPm4p7W+mG z{t%9r5-U*hf`b9-@#3dB;Di;hCrz?*#|m7iIbj7( zgnWB5Hi#Eiz=)Ux=#CY*P!mkqjup6&^u`Lab*n@VgfPKP zsaUZB_Mpwd!wAJ`UqOC6F<)k07FYqDmlIY%KPtFrAP9g`uKKJD0f-f)sfl!X10h#Q zU95mUX2%M0@)eC=23P@|niE!#b0K-46)T{RmHvgn3UV4IHM{I9I9?kope?E$D_~)U zVg&+3AihV?_`ZVU2&{m~#f}wlHc)DG9$3NgT3A6LgmRLuFM&AiD=5w?=Uv)_73d*= zXn`Cuh)Kk34tyju;t)>#HKS^%A9$>Q!$7p}3nu!rPFR5x zA>ZDN4dR6rFd`-ax?=?{)C5zuV+AfGy|Dr|h^3W?oZdlT#0(;J#0nfJxnTuPgo4?l z&9Pmu0yblz5fM_7Wt31m2G~TZeTxEXh@7wjMzS_mz#g?k!oK-l%HgXX~^ijoUj7=QN4@mo4QEl zp7_~Tol^o8B5=V9*v(1Vm4)u770}1*SV2y{qH&`qdI&}BhX$oMVFfuCk_TF`0{WQY z9R%*UMLf?=`wC9f#tLYQYR3v#m{DK_wcazx+@suzAmR*8`wC7VumUC*J66EiKp9c} zu!0EWGhGS)gHh*vzc=bm>D3?I{$w(qbgCDDRPP3{p3B%RB$NoZEYRx?TsI<=Mhib0 ziRV*mr{9}S`xA0A6}v1dz3-y)On-3hsaf#?T6J-IVd0&3iv5#rt^LQOefg+~h!-gw zV%r>+HRZ_9oq}r}|q@cc;5sAYjcG*580HrT_T0l-jchgXM^;3HPQ{wh-vRQ$GZ>LxmYMer%L#A1g~{+AYMsXSJ)j6p6*}Hdiui% zHfpmr9`&agXPqpeHq9TtjQOk|cXNq{Dlc=b8NWj=#=~nb&h80Ky!G!zA(L_Gg4YR` zkEXFxSI)e<1-I4aJ-;6@zbDbL66C!)3%!X?G)O!&rV~aXKX_*g`n9D}^fcYPpXn%T zE%wK3&FS)W`u`KbcGDu@3i8GZC3MI1tF7h)5U`(WOmM8YTB*~geK&%wrmUvg#Wlt| zSDa}>=StPGT4|F&u1+2fwk~$bW$V+9Qw8N1I1Ei1Z62Ev2mEv!9uB}&rQq=b5}M*R znYlI6d_MEnpb7mb@{;U|@WobO#mu8tyF>!^sSrQff*%U$0C~;WfKw%5XlM0U-MUR|m;> zVIXXMCDam_T?M}r!EvVuRVmT4kkwSFP4<4|Z;;nsTK&EH9L*(<`c#=@6~k>AtI$$j zN1}9_)O|`ymF^hh7o=-Iv`w*uE4rm3iY3+*T>ZGH%|{F;fq&(?l(s#%5bu4s-y7aN zxZt=*mgQS1CcW(eSt398kLE?JvSM*vczI6*H<<>zXD}S* zAr@;{DMi;s;`b;)pwckv&Q6NL^&G^i z&!izqmU`9Ip})|A(^Xmc%89f=xKPrN91^Pr?2`J)7Q7{-q-MK2WS8H$$#{3CyWQW( z_c|q~6hN!5I+8~kAlhApUv8^bgYdJZBH0>`W|Q$S6-s}!-5+gTjF;~tUuT2aMHP~V zACEe9@?=oy$a*a;s}D=vtsz26hGdA`@@+V>18Q$9A3;x(_Ud* z2w--ng?HhX1KSqeLqxEI?3*O4l7hRsF7%gjI{-_kDNkh28vN5Jq}e^tKNq*j|J6ZU z%9-h!iob~9R5;hjqXkJ31wIr|rz!KoZLC^Yf>w)m?*DqL3GUZQ?wgQpU>W>M(BN1c zH!z<((SUr{>}EsqNysvyT#F3oEJP7WFPvv9i>dWhtE`L(2i_Oh|FYOiqYup}q1Du2 zwHK`j*+Z9X;Ky6==5TtITi|>r-oyTVcF{Gk?0z~tO#*DZrwwO9i=)~ob8T|as*hUf z69FY7E41{`?V>||GrTE&G)#vqYgucyK>(;?G+pB$02)D)Lmpa^`RM9wp{uvRo8vb% zqbmcL@pBP8?(J3Cq927b@)*G0z@7{iW^~gDdl%zQUmp<>7Qi7mF zoH#S}vIRE0mfzokhe8_R>A|p1Za^gepZJFcj$LLn{+WRzN?n94&g;VJ3Rwg^O-jAl zfUHnV)3;3F7bAEqh}2S8r=xUSZYvmOvt{~*Z~8AiBrc51_^Fmo9Nc)4rUa{VSp88& zQQTT42Udl^oKvIA1b8Yv7a`u0eQ|$Hx}1BR7j1~@9|&iqoM(qOr~o*UDB@CLuDj0_ zZvL?lswZ!`D^%~c#XHc+7(sH6$Va(CfzLfbRw#;TDZA(9Rw#;bsVn=tgh-IojfjN_ zX7z3O(GZAJF(vX1eXAmHJaODeT%2RV_=}w1tAKwWR1hS{&~*ua6p0sZfHjGNoYD_U z!D-eI4@YJc=_4V^h+>X;#Z#6M0mVsF6d;=vQ_bAF6rWh^yXKa3!O$NjE9R7bL*A?) zUmV|NspOF)up^fOau&%af{G-~{`t!$p#>cP;w?0%bWQ^CI=!%>J4Nhp6b9riW|_Mu z5opn@2NY_issJEwG2P5XNy01|InhaV%W^?L-eM9mCKwLnErwt{Z~%~3rb$5F4~77F z3%WY+Feu1djK(Ar05~jY$_g#bs}Yd57&qs|y}}8C`WD0M6i0$VxsA;q+9}2>VoeCt zx1g$5UL&Y)F>b+)4orfNNARS7Q%;ZjynAr=Y=1(wg!UC3=BxNoM*Z0{;|V!PaBIBN z?~vW2(|&orXKC4W(1rI!0rr|TDVDd&(I^&B>POJg-_y21TzjZx3@0(`v< zJI#vBxPP)SXUh3Y28~HjHi*IuS0=@Ny{F)8yd7r0o+fDId^$S|e5DPOc`ICiK+y9-i9;r+Zt6j;n7{y2AJfSQ*+2Sx1f!jth6wPL@6 zA#sjxI8!+S@(!Bw{#Ny{_xEN5>7s$2ejhK#$oDB(ZlUO6%Q<`SpbJ5V}MY$H*fcYdt6D9JqjPFu}8s9)*)jCcmEoz+^!6$65>`7Kp%(4;zt4m(5eHv zR?HjeQ9I7BD!X!B?SF*?*g&|Mlj?kqMY0PoSL3QJCc)2m<~|y7Y34iK8$ESz+TSd* z?-JLL?Yy>VXIPIX<&{cLr=Eh-IE>9?=}zLlSvHeoY~%!9q&BdpN#rL#U<%t zUnMpUwJuQK0NN*L8h8ZHG^neZ&^RSe8016Ema&w#!?nTqSG+QRi@;&?gdt8-Fa z=$=f?jHM^r@b1ZI$i??$dC2za$wbz5NwSRCbL+{D&7&t%X`tlw67*ye*P2}9*ON(O zaFw2CPo|QlcTXnGR%fB15|WAs&z?-l!8|gOiWm2c&YaBbk1z=$&Lv&S5GFg zu5NLJ@H;Zk6= zk@4)wR1C}~16Ema&w#$`WGb#L3DB!26USq1OtY%fAECTKj!@nx^IzzmP0g(LIvEYQ z_?|2e*c$xF+FNrC-Iqy|6QE;-u1?kAd zA%b*dDv|*@GLfv#XTl}GY8&HO)~gtpPyVa2;-3F}J2Dm5mh|Vc~W*vLlvZ zBXq|lj?0*_u9E$9jjL!Bdylh#tL?|!!nId7CbF$_b!Qtc^aKAZg42AqF`Y=O5=>rR zP}y*`hIo{S^J)L8A<}L7qU_0Oru(;hvtIY?cyhisi{89urS6%B&f7V+iM}0oipL7PJ!EDeQb}#gXyX1b+yT+sGOmXSJ*|TTwT)C~t zy|=*4@vADiR~1;#kW&hsZaRPuNAObPTw{2&HyQNK4Ev9+Igw0(tm#v|Nq@UL>YeY$ zt#|>?!?mz44J*CDs6WXlk&AW-adIhZYkdK}8-dVG(ooI|`s8#vI5&!y=-0%|s+hm4 z#00IC-j7z$4RDYcz<~5-{cg?_IetoW0ahy3U2;|z(f`wf(Yf@bD)@y6ju@4fy_3#$ z6lVSdu%wy1!6nw}xx_Qi{qUu8iF~flPnTeVc5{jR(>j-6#6*{7vOF7bKgb;Z0pt0H^eL7d#~o$uU7v&i5bB%_dj<0;D| zFNHlcv)r&5FGuk*DTQesTqJjlFRa{AWRwov65s5UQRuj00Kyh<5F9nNIyQ@{TUS>h#>k-_^b5vWe zq>JH8R+X#P3S$k&68L5d?(t+;4jjG$R9*MVPZ42dit;sx%b;Qs{O`y!R1-&%!iTTS zgGj>?i{bYpP~6>ZQ*GK!cFYyWi&({R-R413aV%!w8?83b?Gb79wr3a9R_|zJTC9Tq z9>HsQ9^67k!Icm$TnAM}1vICX`r=wvUo5R>qJ+=4FxjqPQ4SD3;!46=kTEczC8`b! zhP4j(as(S5MkI#=9|@PC0L{ON(?38p9)&HLY6xfVX&_#}pP3GHFL>6Z`oK@l4Tl7>uSKyu z%7;Tm$wvZ#5sm0%m}a*(?as&z*5e6lG`JwqHstlziU4D@v!bGPWO;)?80o*2*A_6w zDsQUVrS>beG6G-8-utJL_FW!Eljc5~X zyv2+642DD1dM_`=0A`XjrI*r&t^*j1w)@X^^YXIoWlM&(J$s=FzJY0JMSTdV$;UWW zDz&E6Ak%liI3Kkh=A%e>@NG;}E3zyns`?Fmy>vP+5m|(hqJfH|-D^cAB8Li~u1jGJW+uMQG^R)R!$`F?)1PM{ zi8YX)cn-CR5#HT~Q#>z-1g+9&p2p^3EXm^y%%7GRrB}^4m3=FMlipkj$xN|Zz$;RO z&u{RZ7QDfmX*r$wi!o`!Ka6mUmts&^4*nuSpyOgnBR@$=&0qGmny++FADO|YFi;KQ zx9CHwDUEd}1_TDNDwmHbhIT{K4qw24H9TiR5b>CCC0>LCe&EX(xQ1s_4jc7h1fCiK zuHhfAT9YwccxXi(EKCOVH4I=QFee2GKS>*a4Q^jOUP38=QGYF%=a+n6sdq1v;P546 z!(4>%xkTwz3AoJ`=VZvA5 zWu@R@lBU3j!*+|)+SKd5b8L*+O!(Tu+!*7qT{eyrwi~c5X5;ew>Dyu)w#&xQ@P@~B zErew=!CdoTpPC{e^sRuE3}qAW&^Ghh5^@WR8F$NYq}bQ8;)Ahf3Q-1hy;6 zTM84t@@{SsD0e0twp-kec*(I{HWR+KFgD%DeYGomKwkL?Qd zFiA@wph#L?*e)ZksckKYgChMQv0XL~Ds}(VCD1EVvX_Zkgn`6%g_){qT{$+f+P8sy zyKF9eh&QJ&+`e5lPNr=o&jPk9l*O7ONE9bdk@sqFFV?W>A@}XF$F@hOg1gRVY}?{Tr1DFWQN0bC8p&b41q676fxMYMB?&KX*g_GW>ZRP z{<1F}tkJZF!*-9kV7o$KH-Jc?;Tt+R@=TN@1m9Jb3dCj|*VNtXe_Lkb4BL}I%fzpC1g)wh8m zTRJLV*%~2MB(^K?_WTGBxP7|iV2uJjT$^mhc8fOI!xw5t1XY8K!*&Jv$nOC8 z%DeGEfx~u>XS|t)yjR!C<@gK`w17!(qD;S3)wYY`MGavK#VD28n682SebC z5=9KQE0Iv7w4x95ir1+8rRv6T*sjc`l+^rXKc}>Y!*-8f&e$%U91`2*I5P(k8Dtf6 zjsY`MmBtuEB4-na?Q(3&VFQ6Ua?~)`F88=1T&Nx0BH*BCa4u;aw#zdo1qnY%H@6)x z=NES-1IMqjI?Aty3XJX2QJsAdiR}u!JwL(&4%-zNl>-E-VdJ1#T${`hPy~t43qZL& z)s8*LeY>K(r7+rx{bQf&YOzyyb)$+hvWgu&8Qo(j)ODR(e%Xdc0)haSkn; zCK6*~k5@6aT8ssP)7We(aN3HZCyJ058yh4P#wO6i1P_6LB6xT;B#by}|74_UNd6qP z^`FgyO5Hz%2=vMjg6Y{9Da#h+2)-C$PRV5C^@vPfa|oDP*ytcu9+ohL+`lIADbyd& zgJ6Q&za}v)*CL2MTcU`;I3yB^kYcJwyj)0Kk=P6lhjGYkN?j3&x>n zV_30)DYodCVldHnx)BoN;5gF<5yP;d5R9DL3O*}AD-rVDWR6WaY#=&sjv5Bz;2y6T zE!3tm6O*|TTw3^Bx- zeu(VCav`qixY&Z1X}IX`Ob5g9$S}w!Z(eib<0E_E0nI48JK0V2{9hH<`4kw`&d%^+ zD&u(HN%n2>3b%)o`Z=x`mW-Ixk2U<`-PPm>KTmqIL}n@7o$)i|GVL@Y>psI@Bc8mx zN~db!iP1QXe_cx~CXt+}&fwgnw>==Yn@>CZ-&Cr+zc;&oci2rI=*)I^hW*agcr=@g zhu!V|PM_S6G}^k@UG<6-x!_k~IFPO|jr!9WxvBg@+}Sba_%HxE6t#b67i*<+A2rbGC*zO@74TOWRHwL|_eB8C>j;%W!}@hi%N z`+g?u8iu{PFlsS%qi(eaTL=({%abQjftWpx3B>GKbRZT8Tv1l+6^Ped194%*mGxSJN8Uv$ksqiAI1b?HY_p_bB|w9ChQf6*E+6&*i;W=5HhL`rBOFq z5tqkAsO;BT5wpiJftWpu4#Wb1E6R$!0`W1|Kx{H%Ol)TQP@1tNHV3GP**nTAL$(HD z`Y~TyWX3wGnu|VL8vSP15 zeB3ntr5Vimk*h?ok? zE1@+^1x?i#0GcQ_o~i8D8i?8Bm_W>)MF(Pmz!hc1UV-?;Tmmr@J|Yk+wqh>?sEFAv z1(hLN12KIV6NuTc$Uw|JenpwES0Fy&9EeR_9VW0+btsKmAKxSR`}-iNg5ufo^7-9i z+$O8#PK&VS761F+su=^dgq=L-Nlso}^d(rVmoN`b-sm?&LNNFo8I%}ojtt}Nl)GU( z71L#1cCa}ztRvtYSpgbsj;u2XI7b$ngUyj8&2o-q8HnCkpxYKkUh#RUCSFLVF)2RA z?P^L4kf)#y7IWwm<8E!|v=zI30X zcNV)EeY5?d-g2C4^!=X^e4jD;lEAb?<8Y#hzj`pbllkwYF}fOjuLfVT>7Q&^9S+Fe;tRdWw4>Uzc(wGUvC_ANm_z9c4*^Vj z7y9BQYA=927kb0petga5{^h;#!sedyMM9$dV>34gh z?e3I>pwgbWS9JCo&Tcudax=5LBbzrBR=2RJ{2zRFk(}zAItR7nc!_>ZIFCuRrk)rl zF1~!C2%?##JHL`+^N8Hi6K6}ej@+e|Zl}mB-A<8Ox}74kbUQ_A>2`|D((M$frQ0bM zmY%@U_Uz&gG1cqf;M#$R`w#vqyf8Db>U%r!@_pp}o!MY^Q82axZ{^I9p%#0|F+fY* ztzmCEjjxA8@wLP8*7`>FL3*^QE>Qo`BD^^J*u&(#p7BHztB+jM+3sH;&bEHnLyxQ< zUAY>%ozu(z-J$y@Nb@C`mnMA6Ce}&HlAc=?uLWi@$PJA zch=3=pT=u&biH*rs`HQJG<_KP&rzPHBt+4vm`Ws>XwG%g=V-+q=ZJ--Xax&D!sjBm zCes#Wifhzuxh{9l#Nl+otr3PohlzlBQgCxnC$&X_-_(}YXuTvV~{0tnZdPE z>Kg{k?R@Q5H)@v|(RjZ=G;{}se4d%s@%;IrB z4N|HaZag)3qF~Od!J^`m**j6Phfx=!t6|WhE`{|}G2r%-0?Arj_!a9HcEQ zlSXo_`14Wu7pu|}Ytqlhq@RyVKcA3(zE%49HtFZvrJwJBZ^f`U7!77QJFga&Np#3f ze!x=l8tbRo=9uYpaBgJ(d|sw8`bTZ_$>-~iK+6dC5m;VkPgr>__Qp`2?L4r&;XVS5 zx$N`y5m-24PuO{&{>?sbAAva-d*b^A%~g-F$^Aw5F}7zS|JYp$YA#qh-Aj-YdA$!2 zVrU*@n2nMRoQ%x9h}m#tyyjDQncsYPGUa>_jFE>q)|iCut1lsuP&p@r>T?8GupkU; zEZh)Lm-E393M8X4pb2xr$cR?&-ZJ9I_{pe@q?~1A>_{L~&IuzU7OS}#Vbsr&Dd&SD z6v&Rsq~dKyEZca>h$G`CqcWRuGs4)BK&Y70k=F8yk9_uexIW8^y)(mpy4zroelv|f zc76OC*Te5bU@M^Wn7Mv5zQvXcnZ{{F;`;IQsJZo{Yj-(P=dZV&kVVY#`R?v$aCSU7 zzkc-g2%d|;*70Pkf`p%&Z6nf?dMGz;AI^EX-H9^K$veoAaMO$da`5zu<#D26Dr&-n z?S#AHvn=1aKaex?de_+qXA~<`^KrYek=dp~cQCg6lZ-2yv9XWmJgwV#%9M{+5leqA zDq^2y%ve=aJm9LL;sKr^q>5;%;i4i|l(@wMRtmYQD5uVIRL_f8_H|Ja`>eByiU(X( zR6M{lgj5l&bX-)#Y9dD!k9E33b+-XDME{;SoWX#;Lnh%iIFTK#GFDYIJi zYW24@-szKVUL$hIGpn~;f+G7Yv$d<$AL$0I1^U(MPwSCat3R>^Wc8w=m~$;HiU;^a zj4Um#R(~!kDvCK*6%`KzP|@?D`h%ZSzMIFr@=Owtu!B*%L}vL^t^omNsnQyd72ZK_ zNMJ_t-pl$?!}Ri%H~sXar8&*?*r&|XW1o_z_y4uvP96xN4Dr8FtxPkNlVG-F`gijm zuT6mEd`~Jl`)~A{3-2^}g8{$Tg8FCP^UG}ARU)K&ye3AlyL?g9n@%pMQ@-<2iRvG< z;C1!sQjD3}(x%7bOhjHZa`8OqGY^$$aE?}pCbJ=gi*N*(cs!}#9gUxhN<=6}Bc35f zqxudal3-%;rh|4gX+A1Z{Yndj#UiJuE7Hv@>J7EcTqeTA6@XGMut)soq!Jm*0SUK- z)uJ*TPk@Q4z5@zW&<-e2QHcoafQ5*VnU0?r`5v9=BsxA0$gw1#I$lLVfr(`MI8a4_ zNWw!@CFGJWyyHbB(otCzE>;P2v9U@b!o-D$RWc=ftdc3IM23r1Y^@Pnl@r@4%$|t~ z5vzDA_*lhLQHcl_t2nCISS65PVnP%PA{`$GER;s#UpP?}i?Qj!hqzcJknj+dNCz(} zk&a4KxL767#l|X$2oo0~R>_p`u}Y?-5*aR5u|;=utl|hTQ6XX#PX!;Vcq%Fp;bIj> z6&tGr5==~pSS8Z&almF`Re?+6W0gR{LsTLiyr@JvDyzc9DuFIGR!KydxDc^Qri71G zG9{JBaIuQ*K0(JSjsO!CB3AKK@Ue=gq7o4ZB_j{x6^zFO7 WNq@WBpG?M+PQHZs+kf_@$NztIWgI^M diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587339098.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587339098.Akashs-MacBook-Pro-2.local deleted file mode 100644 index d1f73ec681fceb20a96903b3512fd110cce38d52..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 267581 zcmeIb3zTF>eIGoxdvN?CE1dG-BQ{onupuj*D^HHZKE`5*d$rG0aAZ~ff$zwpe> z$t9=8-Tv^}+s^fd)7AEmzUa1Z-}cQY``~Z?;Pu^ozjep|oqcZU*FL`&#&GHOXwcs{ z-#OPEPglX(BVgYHJ}>}asr8!zvNJ)6_>+r7APBwGGHIM^BOOt*KYoypnmc5f12 z374-PiuW)6HTf@!8;i~57r~zF1F`I{umdqWW_M%S9}WG7jkLo?;;@76soo#}x29o# zKY}}0c89&`lcVtyoyo>%yVu(64JW`(jMLH{kiackTiZjQD(JH!6z z(Rizc#kuE}(QqaH&e|_AS8QuNZ=Hg4ZACpj4%q!+4 zCB+;7SY1rR2>Q3S3@|_G^)}^Z3rG zB!owFp9(^zKUA4Kkz(W_MfhY8!ENmf;^tjDgCw-+h7cjrY6wv+8w?zNy#Y6Sdv4sk zd2=%a*GMHn$RPX6z17FgjKceD1TVJ}UJNI@<9_$lpm(ypdg?$^N;S|+ey>UkCa#H@Vfs9KGCZl+70}k8q)w+2y>7NqFxn@1S%&%SL0YBX^8&swW!2zuVkT_~t z>vOKGF3UYmr|6GF@CqA6w*+6a5>DYs&K&9H{t1hmaaVVW>XLW?e@(<5IjItAY}@9w zRJl{s_f57(D6Do-*qh^jbt?N6$*b`1Bltd(64sGxqtYPWn}1`Db~(%T>MhiDCG1C0 zyW2@S*6uW3i0AKq-2=B+^AB&g2p=c9!?W>x`u~Y7^Th-$0gN?xCW1Q+qV23yZQEZ> zL-Pu>;=?wfnGT|8+|#B#{l|t#edCTow5gG!gK}9$03?{!ovUnAEw2~z<)-(U4?OmC znr_1IQnc^kr+E{r8#NH|kp{dWgvD_^yV$~qvuWZ}&1El%vE4W?Cw0BICgK9{bR&!% z&_+I#RT(i8TURqT^FC^T&jb=6ZRo?Bau`u^O}&rEG~GaEpYWRvxFvu;y-Z3Jr+0x{1Ti&Nbs=;ZZKOx)@QP-LtM+x8M~(QalyEdAk)D$ z=m7jXb3c_+QM;&a))M!00P6@NCFc~;2KYHM_LdV9-O_};<&P!K8rQb}9zv-O0Q_q+ z+(v{Xc%Y?_3jg!qOJi2>C9B*b00Bn)B5O>mMJHX4Z{0idL^A9E{=f{U1fevCVcPpfzJqx6O3^G%sfDCGh6u6{)G6XW9 zeCq;T=1)NYzH3yV4^~J48A!{EA5uVukn<7{DIf#sc=1CDY(~g7t?&vm)I;*+elN3O zAqFd?z=|`nArTLhwLKAr=1TK17At6HipcjA7_kB>gzBoW!U~Sg0xKYc%oHmigL3I<7j_kpdB~e^ zyIlo@hL#sUtbhz5pJqU;fDEMLg=?@X?<%kv0b>PphrdfHMTSWW61vBquTBg_77IUE zVFhGJo$Ty)71&U-!V0Vi`CPHVDc5>{9N`LMbXG0j!djCcjLQad8U8mzDa!dqpmfIMi% z3dkolHmL14yd+g0RzTSjIu#>UKprfLMlTKxwr91g;Ajn40j4=KtN;(kxv0eJT)M)X zk&@4e5CC6YXu4~wT?M&Hs$vEBF*8<>lfMwL0z5S0p|A(Z1xo#b-)UUT+CPjV*`aoJvSIw?JGE12`ivN zuvZ1CaICO`W3#{t$RIPt3do?E`k}?Vf#8x%$q@4XI^eznGLV)RKdgWZA?GC^RzL>Q z@xnD&m9YYw5q5K50hu2MGFV{+sA_felu0tTI-BTU0YvK*9{h3P|5+->D)) zFdtT6wXfiq16DxfV#W#>8z?k73#{N+C9Hr7!CnJIXXyr&yVWm|-GfhTe0}#kI##ga zNrP+j$b#$OSll)oSs)ongBqkqQnCe*{@em|jNU2#2_2qy$Kd)6EyLxUBun*~T@X)gOqp546w|Dh4k>?yhs(38-s z3jwLIID@|=Rc`}*HKb4};!u;x2NW-dJ@|`P-&sWPxdyy0R2aZ0L_kYJuQ&vx7S?V} zg8hp21$apo=GqPYpNzqWAY@$tG|rkcOrO=C2K(rQizssqGjb8J0lo~zPWr=>CpWJm|sng!9GL| zSy)_ONKfF8=inU?+|Jss+hoSw;S=PHVs0)987#+SCi~X+qM6LCMsw-$$R1P4F20x$ zu^dltFyYVPRdQA`(89P5!r}FP^`>KNuw5`uC!MZ#N^ZwtN{tf4JFu zswl1$wIcjP1Fo;k`|;kOw}DQn$6qZgLDs8rZ1RZ)iHF9D5>zFXw>F?xSt_)r>E!)P zOInPQKW1BTFI=bpUlDB9Edr(>udPs$G(&vcQ`x}3Xw)Zwfc;c$f}_EtN}u*u5p2|D zHRUd@Hr~rd)37rd)FhxgNQzrQtqK9lz+r42>kqffX~0T5XpE>{B6rOxX6|=+@K$@|v**r%J-oG}e{2h(G+T2CVzX z5Pa#SYsJd%Tkut4;j-33KqqHc@8@UXlZwrT)$ycn3oC9mLdLjK0PAq*M6iJ?YCuc$ z>EA@~N-NXO2J5U`xJ5tW#16DWKp^6y0v?zmhn0k6Cwmp@WRIiYfmSf5XnSw%I3Tlh zqXhhPy z5-JJItb*T-_zhu7L!cE3Dbcf#RadFa_f(aZR)4QLM>ENzK2;`K#o$)&&05OqNN%&A z`6;a1r0P>RRZ>{M9%KBfurW(HLjhN`OGQ=IIjDJ8kBep}`{%YxX`B6X@!tD--NCK> zbC!!_S-$1p*fB_v#$VOM;nNX+jh2$&?ABm>a^x3nLvw~BQSjO>(voy)xA=(^oNzak zS%}uGuTA(HcxVza$tC8-afi!9CtOLzNd!vFw%_%hE?uG{HZN z_`AiO?314q#s4JZSzl@d-z`_YU_r{T?G9&zxD5Tll-8&OE(f?IJ zTvSt|oWOS@I3CV5@@Rol1jUB}>NI7Zd!-FWTS>2q%P2GVKisH;`*y*79kLB9gZBju zj`U1~+2n~DT*-a@eKzeHNOnIFo+bh|-qVCrp~X>dl$ka;Xw`?U z^pSv)5fxf^=qBxuABG3xlVLh!QOnAh83cf$qp2DP4$ugi9P-eT%tlwIsjeP{hvNt9 z(Uk_w_}K^^^7g81(GSBJc?=+L;NqoN_cwYz9U^a(rf2mX6#f8Ck}2rNnL`a+7plk z^=VI0+*pj@!yzzdD!}9Exd`!|?2G+t(#71X?EFWn=T2~R0#5178P5*mgsedYz>(-g zGudjq*ZRGF_;?7_qqp3-nEg1K7lb7X8DUl^=(MEW zbA2lmbX=@Yf0y71qPo#xVS<=P6W$gAaiUW~-{7|@0>=}}jl@1ja{P8sL7*Um*CqT> zBv!Zq)+8u7sUHx-h*XCYT@~q3q>qLuBj_CSil-e$cY=x}&HnkzCZPo#0OF;ZlR760@hUdB zo7^)9$V+FLyC)JTZPo({HB(gpke5z3b5S+WQrIT%lR-dUItdXI3E zB9Qk(AwXVAR|Osh1$pUcL^1(@LrPOrXkkXRfV_0voEQ5pB?#(EhgT^M1%qN6n?JNe z$D^?(1nNtvs+Cs@>PyF^+$b@6ba>xm{?L(l%BQ5mjyh6R z7Ba!#?Q`S5s!_D`zL$!TR(70nG7}@MF2B#9!)$9b&9r4hYESAk|N4k?7rt2cy?I0Vwj5t2(DAM54ZoV#&E$P zJhe#yWy{V=GO8=$Nvp#E=+8GB;J*z~Z{I8yRGauQGUUHMU%?8&iEKkP4IuoAG5n9H zzIu)BK(Ak+1$(vvf%i4wY+cSFw(L28_GoQ|Yr56_jkQtF^8);76SnIWnRX{-ZO)YQ zm-K6splA@*X{a#lG2P;K7EZ^TVYc`wq6<;@e`&&a)(Ypqx!MYl)L~|*Zd!~CsTMck zRQ$$TwTO7Mwlnz23Z=Xmfy1a*4#(YrHipIfnIDpwN0Gvf6xSGOF4}^RG~kW4USKO4 zF(6dV5VixcVt;eZ@vR)a;m?}(S=N>%6>!CX`ZG7Mq-!nPYHSv#NE9}_3#q_9kbJ5M zZ>q;z%I4*kD{K|Oy3Q=s>yslKc+=hF2cK=o?teQui7v6j9FK|{#$o2a_q2#?2))~9Z?E9oCsct433DI2bF zk~`5Ip3VBIMe)PiMciYbH*D4%OE1zXq zkX4$xqpj30lUaz|(9cOErtjz8S>dP>v*;9g&^KL`wNv3>SM#iM@+DX+G$~rS(ndkH*!fIXQ^0|l?un=|j8 zOyF9Pi~M>rVGO3y^X$oF()8}hgxRVrG*nL}So=hMtI%N?sLic3Czzj#V_GCv}<=h^oCleS_IkUMZJKB0S^kgzYR_V#w0eUh^ zkrzg+$G|O{Gw|)nc+rV2mv4LV>&f^bu*rM&WHOQn!T0USc(N+536}!P?(bRH%NSrX zAT8`egN^*Sm@R7z)Vn9+M^~vWTu;W2mn+e?C*z5Vj#z|^&^;MHE@7tjo(zXv_ns^d z*Rs!^7mLlib=QlB$3L*a4Y)_M729oor*ADerG3bA@1jhQOi%VQ!w$^=9hs%TO{I>c>*9aKH*ww_880}|;evEz{18DpG8xGL z9T`tnHTh zCwn@5hu6vA?>6kO;FC=!FSf8Xh=!kMp!5E4X!2R)Y-xrT0vBcVM;k|w@{PCux3L_jCSW9%6jpEKw#XSa( z#1DBZj7}eVt@0COdOnvsQgP)tu)bF_m(>j3;Fy=10H0#sG7rB6H_EKzs$o9fWP+x z6r~!4Uun7?KZZ*gc9tsDh00~@Sh++V^2%i_K~aa)QkFxfGImt_y=^LM!awtSZ;!J9 zmAbf0=JVPvH(Jj1N-_(T1#W4jjFApV!wGZ>7NTOS z+Uw-(q9d#nmomezMzEw+Woz6+CubH{73syG8m9=8p6qGdr9E4BdGhHW@^+Ve%AC7s z&33n(N#ia=>~`*we_G`(gxIssUC7Ms+$H~XFte4@bRTHblwyxHAq z-HEeL|1BsJp?{+Z$v`*K4Ag?d@im4F#1dZSE@O}4MO2yN>U=F28 z1g~NF!LgV9je$eG_gzks2CNp-WbSB=ObK3=+T2A4-)O+CEOV9FZ3&L*Cp_8Ju9^#ThT ze;UDSS&k~}6?H$#8rLA;umK^xW|}W^Wf2R$-GJLY85RSFuK;CN*0NJ{u(As)c>MRs zGgKo-62gbC%#F#4wju5T27W&R$^GLd)uvr(OJ8wtX;*iw;q|mMISkjtudpgjvaa|A zum^Y9m#rUMjN_=qbBYeCw;;4FH1O`$PN_<96u9!4aF10Mf zwRiI^hfafk?9^^Bd_5Zi!Jjxa84QzRC~zoy8EP&~2J?8i8of}$!8e@R3zjvZKJb&X zzuW7AO1d9LvaHI6LrKYp0)ZBdXk{2@r#tCP(R~7=F=;ejO-q0kXvK$ZN&q?(k54qS zGN7@vxQ4Ka^dB26ni`H9@P$Zr!-6lz*#bZiKCdL@&VhA6<<8u)1b$3Nb5XcMW3 zRh8cS?ft<(wodFTG{H;~$-(lzL*RuOT0whR;AH^);b!lt&h&h`U~#0Wsoc}!L>y}{z?IW)CA0ZFowmzipm zfyzs{#^j@0K-YCvM<4|zg|y`>^&)ZkaAtHdga%5EG_DjjkQ_>Y`URcB{xFhl>GbFM z!boC2(S+kHFIG)GZ_6@`#yE}5!dQ}rYM6g(cT?Gajo@Z)uDE1YNBUXziZx<}guiIO z>%5s3)0w{*lREsP@@h=YDeb$F13F#`142^sm;H>=+A}Ut4dR~mq2-iDx)TQ!IVT(h ztof=7Si^G01rb{uHP8YN5rh~i(27sKk0}Ru;p;AN4a=q&Hmbu2EHwwXhJCzjO@$wL z*ps}9)b7(m@2H5kU%IR9_2b`9fH+@IDE~qVWweESOYft zDc|pE#2z+)z_%RRWQI{OKsYsQ3_Z6tnITZNDn(6U5VNvHc8@fG+C--YnPpE16Tb4U zJy5t|yY!Sd`MPh&#+VGX%h)cNh64Y@jTZ@!E)515K(fIbLDw91t=<*vhV7D}6};t5 zvBMI8fv{*MF4(SVhyV>S8De=>*e)3`(}%i}wEz+kc+PFj_cF{!905tv(!B}bC1rWJYtXeLw#zf=pSn1DMe6dBhlz7HY?qsyxJBSXyv+hrb?gbTV~#zWrnVz(Of_bD%3n{nUMulnPQ9HM+^}7aH!eI_voqs3;)v~XjEVum0c+&wxwXkG_*q&p zxB#$S%%UT<%gIOF#P2Kb+5?3fwo7kStRLGYL+vuQOQxZ~KmD;?GJscG?1t@zwh01Yu2VrJMb8L*7)%FtB4+%!O8veicy=#5OV^oFV7*GO7478h*SJjm{*GB<2j z;7Z+hSwzkd3zcL(rM$H)ORbLkcF7>b6oqu=fG-LZU9eq&#D$;I+^}7dO(CiI%iffb z!lXTL!*-9>h3#U(yNK;FoT-C|1`(4LzAo@oE(jwC?1=3$Y>Lp01LDX~bHR3*$0gxH zx4XeZvhu=Oqgh96mt{@}5`K~{0)U4Ubo{#Q+hzEb)qbSD^_a67vgKP;9dB;fF2@@e z9s)tOcz|-z;J)K? zH4PDoXTr%?t3ng&HJ9A0UA4hCgU|Q_K z9PmYfq6@Ywkht(unj5w&vMD4rf7#C{t+`>l$Lhj%G2varb{WpNAQA|?$@wcq6?gE) z?Mw!SO)+dZAdU<*7i^b#ykfX;K?rjd$m?Gn9_V%^1IwHcB>W_;IS_KK2p;Yda@@De z@GGs3X0dNKSFsudcHFni@%HQp4{q2l$EX+}oZ4gtnssZF83K|ZQQ4a%xNH!oal>|b zc?)5}SKhS;3KSSmRyk3OyChC~=q~Ao*XSX;*TJ#4Z8&6CLZ1g~@l8PKSznRtBw9Tb z5W!zoJaAM=)a;A3O{Kew)MS&|?D2w(MDJ{b$qC2-g;*3w zAG3@`=vuQAZ0s=|^~+fit#ZpXE~$mMLP6cHM&9?@O4_HqTmp2hS*7dh;?rMIa(}02fglih;EMW zjL|LcqyQ;j<^#|C{Hu9KS6$}Qg%_0rc{bFk-n6@-9^&-r)0VfO>xXEohp@kBeFI*i z;G(xZ=?_Lj%^;t?rfnyxkM4oH6{GBIZz6qk*uU}}E_(@f>*nq4!TD6i@jm)(@^ZU} zlj>^}?3T@>I(ErE-dRbO*m>eoICX0`*V-5jr{mF}v)SA3p$nXb8|OPK z%P|~CaTbvo zFQ-hIxY9=xpGzxU?4HSbDwAA!_L;5ZZW(*CT}OJjd2`cfw>leizJ*if4C*wv2A|vm z_j*|@DgwxAhRkZOW{FFzmOkzfHR-e2!b0M({iK*X$`sG3z`M!H9fu)n3r2Fa488R2 z5|`~}MEDg0xZ<{&Irx{pwFBZ?9ez!>!~)-ez>k3XkO*eejXTeg9i785Q}7t6kC)Rp)^fQq;{0z+oM#)_Cc z?h=T}v(AB-Bk-KEVy}w$sBIwDHFYjQmFPoi*@g%oAP|%HuVsd848-_hmq1L0bqvJJ zp-k4CR~bS ztPZJBYvMb6Z4caE#$xPNHMmVsI-vM4I|2_BOK2pwvLCOs+l)*iHR;E8xY}WE{6wCy2ljoFy zzll|V{lW-m;I@c{&cc0npULc79mFH}xqXn7S|A#|X;t5m6+GgJy9|voo5TW7lO&k|XM07H=bMHvo+f64cJ#B_AV>J9sI#OT$PbLj_96j zx2C<}WHjy!yIZ{pdG+G$^JIj-YOeof6OA#C&igp9UIlepuZF~~zl@EOJfc-}$?K*y zioCHjelG86*NuyJq*XOj#2u)aBJmb8Mcgl%DUyQ2Oc8f?W{SiTd6QD(LI>an;YF-f zp*w!Id!6oZvojg=Hww$Th8Wh;`b()<&8C6W-b@kOyqO}k`2-d= zr{}kk(Ow1z+Xo`%Klq#Q{LIkGZxF?cccM5o?N85hW_RFm#xO}Oy%!y+w9wfYbSIPe zayS%UnjOlNeGnfYs|wyAG;5|r8F8V;`h=eXy4~KByo|Si)fGjjZ&;tzipP7!7?7!Q?6xVWVO8QLBA4w#ch0Z#RaKf~SAPUw6c{^7 z88cNLc~xFw?Wk524qH?5fhyZY_BJp4M}fg()(n!;O4Qvk#Z1)hB9nY!01hI_rv`s4 zF!w6ToP9kgviGWDW-i;~NRCwqN$lN*P%U^wT|=A$&dpY-wZMX?D7ULB3{@x3pNj$wA!U5@jcs@;@ID zf3YGw(H4F_D*Sv*`1w`B&({e*UoZT8gYff>@UF{YzCY|wb9Rn$ODsGJBiT(fne@*L zlULb3M3#N=W3SXaR-l~ukyOdG#!vIqji1kJ0Yd+Xi9Y&#)e*=(9L%qI1QPhkoXkAn zfJ{Db9)W~2@`QN=92>~z%_9(}Ay0fSCs({xfZ0`I`WV@fkvrJ;OQ@-_a*4&Ft(RoT z*9V}5j3S?z2Q^|=%-rsFpBqE1nB6Uv=&|0%OzJitBE?)>$S_P1f#2aU8-|S6e9*&q z*^rTr!+cmW#eCcdF{}{@-C12iXci2iVonY^%WHeQ8k;+b3u`2ZB2gFfaU;aA<|Csb zOmlN`kr9dCo_55L@sm-JAD9`r*bzsln3Ic)NCskN_S=dS;SXKw!fpZ~o@_>n8)FJ1ww&_pL<#S0KU zwG^F5zn({56ZG{h=w347P^;brV-Dfejx!Q3z4fzb~Z+#iPhXcFDd>Ct$rJB=46 z(=j?&Gg&|t{OcaLW%VE&z~7<&zvXogpz-(T2H>mtsWOHXDeLgjERJ{);+aq)^WS3h19RG(?%paat2y;K|Ywt~k*<6g_ zHITj;Q({gZxejHaE0zDX(7d~MhCH4IyfpBkX%JssJpj;N%tk}e4v^KP+Y8|q*njV! zyU{y48f^B)W!vFRW$6lX8Z$nWIhjwFxsW?uav$2sG1|I+*iSt$6>%00Qh`a#aemV4 zZN|<1aGEr(np?UIeH#6uUHsZujp%%-;li@9T93xAxBw z0i<6oB0rWNJd!?-;}}t&!Tb~0qd3bQL9q^r1hcS>B5D7r ztM;N~ae&=gg?@7dLR5rpF<0$P4#8ghxk)=;B}6futQW$dKbc;gy4fqPj*djBGzO6G zZgCk6#lmW%y%vqu8}v4&@g=YiZF<=3Py3_cRgh{Rb&>RIFSP@etKAEGj;Bm>eu)e; z5C4m4WKQ{Gqs5DW{#qjOJK)f96wk)J&0G7YPxr>?_|~4%FjO9rIu)uWj_;gOJ}O2W z92aw&`mrc|ICBLOZQ>WeAv7rzI*^1qC_twBo|fiM^f#a4rE?|hd!pCd?xc~F2_5Dl z;3~+dNn~;u{iQoROL(CHqDUsI0E_K~lm`?ZR$(#yc$OV=S4vaTCg)R%XE)MG3^;wp z&xd_z=k>IY&elb_;ROAj=bO(5#iu0vOaM-H$Nlc90ovk%VoX-Q;H;L`O9%NlC1gAl z7E@E5cKgFLpX9|tBc5A&0a|N6n_5n`-I4th@~q{Bm1|ep*B)uV;2i$u@OYzr_~_v? z-Rm}QIDNX?J91W4M%sUq+`UF&kvr-8Rwk_mwsfpaO68P`rQfugQNA+FIFM zei30w_E*?}m>sj5*_i*Zk#^Wf9Cpw>)f)uh)->$z3s+X~m@GzW@_vLpAs=OGl%zU4 zI7{LCzR8YO?oOy?ww<-HwAe(w#v1y!?687BmYSI>Wty6sC>rqb2ySDQ=*HDnUvohE zFJ1prPY2>9Qf?p)#7FljqMP!W_=u02YUkqv4Yi0)HC$n=Lq%#n|nRa-Yd8ANa> zwc@uqLz`|05hAS`Pt2$1DY=JS`1J;Ghjcr6R-8gfA-F~=2|^~2{TZAYJ{y5)89q;K zrptibHCVbMY+1NH5yu@Swd$^Q#hbU@+W^gWLau1Q$h`Hggj^i_!SGHtr`T>_P+91` z4>>>ZTM=k>nQ{^nZvV10pdj(oARPE;1lO6;c(!oGmWppdH+;~yoj5boZ2ef4+|;#} z-+}D9GZX%8#9tsJv#WIaCTDb=K-m=uED|rQrf+?%@wI|RKJe2Gvq5F55FF5MU#z2+ z3nO$EQIOrUABo@O zxW9F?zgc$KYw+(Q_&$>o@}+~AtYH}6Fv^dTOzhQLsOw5$WdpBc*un;D@OF#vks(7F zK9txpUrf+iw9iERjoOkxhSng1wC!ojp0({RyBo3 z{)`*@FlK~W`!Ix%J*pd-kV5=I1jcRXBn6s!3LkBj`wF4O$4qm*mq9}C;Rdv<4MgZM ztc8+OiBoWMTpE{5fk9bT0p_2FG}#ER`G_x2~1E%A%tR*L6HKw-<(Bl zt1s|-Aq8X(Y+Uz83JBGfhO>H*0x~koj~`M%C~A4B1t}mS+wo$96cCC!Ui^>(n-TDG zKdOpptIl(1DD;+t!3JcqR!9N)uugXNNP!KtEgPi3iV*6|yxLbXhzf78LJ9~GodE5T z0vl?aDVv*I8+E4M-lEM{zp6j)GdrJDeZNP!ihVD@Mdq770&X3VuAReJWc z$YQr#rhSG>|4Sxf2P;@11%#y7k{F9Q@}LUC{Np?_{cY1k2WF&lr5o8 zv6kG7!h{V{kiTll7GvDzfWSxr9*)oHnqCxakOEATb5WNDFrQjDSU}?YV1pFoDiHuY zV-t)YGb05#`I5#9qx6uT9G=r?SmMc6NI}kp;DMGx3c{*}DflA`q<~yX9_R038Q0lS z(`knlaPcV>aIu%5+I|84S$-da6;?oRtuA&GSOiI1tbnnBLZh?53fh?>V!Wroh!s#F z6q5{!70~ytS(lzMRzT*!Ms|O!fKb)c53PEz0x~koj~`Y*C~A2rumY~gR4wI}-yCq> zdO)TJHdq1qu#Ok5!HR_!1B|;0Y(}71LG~VS+&D7BT?J%*seMqW!U`)OPwHf6j}_QZ zv%(6j2>JGAWDqZ`fDq9M&>kzWp~jiAxy`j9>5UbTL5$0VHoFQ4F^xzqu>uQ9c36QG zA!LuHM$rq9A(H?btblx&Yecv(ZklT#tbj};AXY#~R>lg*gJ!INd{SeBl3t-Gsa(2# zGNu~{VTBctAzDJGVl0EngMm0S*q+s{g8axKw#?+P0?ybq?UC+90+-$s&&vucz#rvY zwAO~-x6(OrcKKh&6#6;_aQA$Xt>E5MHx z{^!A$jsqL4;Apx9H9Huro3U$1q*79xu`p2D~;PkjbWr*0{JtKCG!9 zYQhT0$luQ9c36QG zpZEM20y1N+5n)oT8OaQU6_AMp#0m(>%2)w;(2Nz3PikyX+o8B%1!PP`k7BY4 zn|%djh{{+2d9WxNy*M=SVzaLxKYEufGdZjv@V){(9ACBQnl77t1(+u1qRy`fq~<8@ z19Isnb3R4P!U`+MRZ?|d0e;Mk73Abg8ZV5}gW;L&E6BO1iWT6;g0QMHTN|w4SY@mL zx2R^UfP|Ud#|n-)U#wi$4Bn*GxJJO19!Jbl4&r2`Srgm3bnFrSkZ z;sSI!+{s|N;(9O)-K8ST-)QX zc(Gv-4?IjyLx`%y(sD-ypK8FdYUU#}Lz=Ds5*a1IY*mX^_)A~jJ(+9Fu2mS5cLsG+7y&wY zO!HbbXSXRcoM&^jtY#1ye&NetDJBT$N|YY9Y-@p;ngnmIB9+9?9<&bZD=4}9YP6M} z^LM0zt%-nR6~Ps5>&vE&p>NpdFX8+J<2Dc6bSJik#va4K?ULYMk4O!|@wJNxP`19- zQhlYf7;J-X{*pyyGb!}#hloV*8^X!8NL>A4Ypd5Cb|(0OlTL3u9*tY&>rToypd=Lq zmT(tJ;F}!^)+}+92;LU)M{z58KzX1yMwf7rd$H21Jz7up`e)8gO@zNQg2@fd<*P~;d7vALR@2v650Q-@i|Y$$;|KZ`yd#3!S^IUH3__Wn zAa^9hWRQL9d(j|ss}XmGe^a?ijE+b4m`ZlB*o27XczTgf!kxpbfBZuJ=J# zoN11nX{6_ya#sW>$CKJuK7G9)8*HVa7UW}azq?8&)9$pt(V1*?2ie=ZUD+|>iU!r@|2987~MV?b*C+IeMWLpa^_~ipr-#4W zX(+wrD!u9|vHLhuXi`C3RLrUXmO_T<6(yE4O?;w3;-RrZPAaLqwE?}#QlULfC+}xk(proBG24oJ;X3{QieR&D5ikXLZH1C^ z$Mi26^$8$gKUJIH=o_O|`n11_V52UpDR*(T@m@9>%kB_03FwySL4V_X2i+?_X;~El zmVralq~YeVE^)xSn{ZzMu4u|61&Mr`rD-pbU zm1`J)II#om5DHI9A<+N?1}+k0!r0cH8e7GL^xTMNs% ziTCE;B;2~DjWb#zhE!}+mV5~1ca@wgh~Q4tKfGdenXhj z5NL%$O7tvb)m3U+h9%k3>hD$OXeN17W?Z&7BwEGbR`1PP%IipOv!D4XtlOmOQ$nh= z#~8mVY|K*5P{0-KQW3@yD+;c9Tr@k`Ket^<+w7l<_ukj*4sPwAvs@(0@-6?yjzNm% z`F=X$uhCKxe0HqCe9<;EXE;(!z-zlmOVX*`;wMsY!rf42AzD@XqPAV&>zto-j^stGbmh`E;Jqh;m+A()y+0V_A?14Spu#-^#)2EJR=CL9+28GrLauP^;unQYqRNhwqVsK<={>qHr|_G3qlpS(cXj zTnkAW^$NexfD<9d#&SY!kXtCJNe+oo12m$jL8|UN2JJHu^e|lbq zDqSXPg2`hNr-iXSL#`uB~Xm>s17zrP8m!y9l}Tgdqfebo|NlA1N9 z0=($z-x;dO$Ka9pp_(*_JnSDtaF4gnQ00YFEW4$~qZ;~$5r4P1lil)@qWGU>MJAa& ze4zoIaI#t_o3Zc6i^-eEXD0EL(qAW`b(C-yUI&pc2}8~0c4@cvOW`G}m&E%Q|C;<4 z2|uREFW{8&Hm~USB%3PSqq-`KRO=K?F#tZ%fV;w(8|q~-avWaFN!?zd$~cy&^e+dt zExd;aVF}qcNmwNXcXi^I(eeRUIDzM`+ne@JqL5~HckfKxME_R>aZ#$9q(}H}1joa< zMjkCtilF#VK%J({bFZ}FXe;Se^YV&tsS*SHaH9_H+XeS^$TqMH-WN1D(lZfelP7AB zZ(C(8)cwa?Vrq4T%0wwN?RyXJ5A1(gOvcd%XOz%tszqVG8j$Q!tLDb{_T&!!;x($2e3_?m$pMVRWw`){_phS!~Gxd@M zHuv|oKG1-BLmJ_U{-B30dqn@A_=g6DU1Bu;nSmjST?DP=oj{e`w2u}bCfU2A#-8RE zBX}T))KXZdC3Rf+9TDmKxY?19oW?cp_NMpjg^?LM)xwE`8&6V~U{wyQKL036588qc zhrpauqss(%JUtg7-jjW?e@(iWdzGF4NcDGzvr@*h!y8lp9ElgP{mc7m_lJ*%P(6Cf zU7~usE#BpmF*L5OG!h7sdw4#I6$?$5 zgtvu2oamI$H~6iJ!12U#BeBns9KRh@5GcstbqRkIi4|^uH3>>i>IcLyBGutUS7it8 zs!z}OXoxa`&M~ieiZUXgIFX71WD`2o%)RZi2`wGDt{`17^oPmloYHU5n-%Db<=ZT! zJdy-<nc&X;3&WRvi#p^Z(ucT6kARsTDW$vCxptM;J zDAY_<0YF|l-ONQ%!j$`-5`PH-^3q9&m|!@NmkvRC-~b@6NE3m)9|{5TQo1VeFeu1N zMZjrK(n5EvPRY zmvW64=|I!JJ1wB2iQ=N#v!q88>| z1!8!A6kwBCsXAe}L1veOCE7zstIYXtH({$T4M_95stK*v4g&NPdc_cnaoa-ojt2Oz z$SEd>&(0*TP;dB;L;YJ79px`)tg+iS)hEM!z5$QaWsqB>bN3g5R@jOEYK~dRw7|1& zGX7PKqNVq}RE)H;N>%e;cCSzF91&Ht}O*$bWynf)xW!WF5i(M^s ztT{*fn9tVb9Ae9!189%dR=B2H-QQRn^*k@YpEhB;UXf{cQr6~7Ie$sNHVKLbQP}I4 z?%X>Ir{m2qJNFbpBj?i|j{7f77|&Ya95`270g^h*Zm4@m)x8O);y2c+Zp5R#s(-RV zDQ`yLFzS`Vad)7NVex+Ehh*ka1YsKcH@nkr=k#d2)t$x*C?db^fm<|purN1ae53(y zwDkg8(TD+|a)z)Sh!y*rYmRT_=na3?w9m4(EUADi2GpOqfhApQ*;Zq-I7Q-Fk~2Ql zgg4dWEoJj^%N4c?U|naH>h;MH4!r4tAbhqVyZp)b%;Lh7bMT2^mo&h8C{pQ?a`Sqf2+mt~iY1#p7o!4O$PW(4rbCY)iNQ+LutCE1jt zFq3yBcg2MelYMJji4|FHJxdGp$KlcV;YtO%<=|Lfwx#(Re7p($8L5&>sMc>URN}j{ zQUR&3q%3-E5mnwq)%iwvJbtu3b)r36rf~I-E4-h?GmehPn)EvO##)@@PIQN7v%YGP zd}kf*XB8F-C0Jr|Yw(>42M@|6#8m=J0mt{B)^GaA2T4WFtD)eva-+Ehf8V+FCU(Jb zEjmNqT0Y>IYNTT@l1<*=Nw_S{bpM{@S{<8ABAu01h_0*;`&9MertBiI;ELnYl=Z=q7IJ3ox?TmUUw&FEl-sAu?Z9L4BLS4l zSrx-=dPa$-50uML+KJ4)66) zz%ncL84w-CYJHTIE!X9in@fO_E3m3$+%mVQMVpQ4e2&dE3y-*xp)p&7pZ3gsIOOsj zL0q9R%R{#Hwo^0fDt9j;_RM;+qb*g<5wItdX`tYB7kV;*Yeg>d>&b*Mm}<_mCzDCj zyC)N7tFq8gJ(-LL&z?-m0h5tzU8*q5(5XwtH2C&pQnn2#$*U(524wBpuO|}-i%wbS zC3N2<49sw(S$tdb?#Yg}%DFvEPbM&=a%OW+cC__u=*eV)tkRRU1N3B;A}@^i!sy9( z(TOgXW0QV889xLzdC#6qMiRYO?Aw#^WK~`hE(Mm|-?OlnF~DR%im8N8Y-Ltv*OSS( zHY7l=o{S$a=iIj^0YrmdMAS^m%5immcWWvA=WfgzPMn|rc0*OPH= zM7DXc&*M)syk#kv3*Vstetd z$(gCWC&MAvy(h~#x~sFqXZT_00ozvM=~_=*0eR!~P0B*>v(^3tNL|_<07p-{(~?rA(81 zyvUZ{SAt({+8-ikDT{I>V-iqcJ4hb(5{Oolg^hLT%S8{s{qb?%my0B^#2!3(uip!Q zylJVz2)Ajzmgq1W#hsywdkh|lAM#clXD_xr*T+0FPq_f0I}@#s3v_l}{z`NinZUae zn|#B8yRCoZ_u7G^X=Qq?OWEu;U+CKR{8cQTUan>+xf_lwoJ7QeCKA#*URs z4}0T`AiCohje)L+yU>z~x%rdEC6&-D=&5vrqpmC=;Q7qY24C+>Po{yogF@ zTt?5MOV#I=Z={*11&8Bn44H@>m(LjL6RmLm{AL8NVfn$am;H@_L(cn3N#k;oG+?#h zkhv}1s;dxFo4ah_8x6RXWv(*2Ex}R!geSWimYtqK_GRl@mgg^n6-a!w@GTE%e>;M8 zmYwR-7UF@Qv_tS@EJ*(-0?F;$tP~7N?W~m5dVvLvKaJqEEJu~~inr01!T}BsI~2bwD-I+%m3*Lh+|Yj(@!A&?ZtDsw%zt+xvroY@OIwXo8s} zl7r=ahrkOn6s)`qpg-K~J=K|>PuCodH2sWDU3I4yQLMqAyR@{tKDg9miE(9v%j#4w z3oNBK0fxgYf5)Y%}4RD-yueP}tQk?uqT)i|umEW(bt6>cR;Us7-WgkXiPGFySlj5WzqfY#gy&ddi!8 z-S5TjBD(32Sr1cFBye2?{$M9fBxF&n0Pb1SCnz ztAiuN6}uY@B9s)eDqv*e)4E$(=j4Ya}fiiwm}E9t5W{gFh5r7iJlgObgtw zUGjLYm1kQrbHjE8ro|r20bdj-x?sBk2}w%Rz1-#%!sT#{!H~fX+ZEXqlA6ElO*LQP zfIc#F#CF@efbHTj`OvO|;@q%ZhBGdRaJ8vsSP8!3@}^TK=hg`PfE%{UuqlQO2gH$~ z=7Q}qk5>#AE|6L-S$VL~XcpYCU6wf^Ncc&*2ml^Z(DCbr?K1qzYClrn28L{NrP_L_ zBeu)&_Us4`ZrCozs2Cue+GGa1bZe6t0+Jv>+hh-4s2mYw4YC`y%gb8`6Tb4+;j$ru zh$FU3Z&iHuV7p|>j@T}F9K#@`Wv|+>T{13TJB7t|$ruXW+_7B);ZjTNuy5Bi#I7{N zWKxdUE_poD2bZc!C8ccZEN?1?7D`t0x-=zZ&ftdalE<+L`eVBsJ(r}#5zr+qA7Sk_ zw#zf=pSn1DMSAiQk->f)v0ZMavRYS+O{Dg%0ox^WVg0raY?q9a`IwaFy0Bd`GRt9T zY?q9oBEkaJrq#KjH1 zj@T~4rWiIH5J!fZ3%1KVE(sU9r63Q^_9OLe zU~CtUY9Acj_U&@KJv+jK8@9_aDh3D#tdXPV)+Xz*-OAo9!DWk#>mF9pHql|>y`f z3_gz7F2kl6HXIN~hMEhu%RDX#7jm!GXg)Y>gXian?Xt`XLBdbcMS$=yCyf{#v0a8= zS?wpOZ)bWYL#|6VY?tGW3lAPD5Iq|8zxnl1`El7kp~mq{298lNKsYsQ3^eQ3CNl&i zK|)tkc<82b>~X|)d3g(A!dKq42MY8;!^tWqigCBWX%F2c(SqCQgs%WfU-gdiLul9N zA-mVXvAAtGWLGjg?_&CDJ%Ycih~G;F^n%VqLoYRO#9dHTz<1Q|ay^HQA&#dz1u%kgnzwoZy{} zFgXD^pb(1!>BF#q76lX`pli)eu(8K<)Gud6w8|~lxTF@6?1~|Ekndfn%%X#?DT@GI zYgXyHIysMA?D3s@De?$yRK!Oeb>l0muHd^qxwcTMQ1b+kVN#2&DP9IK2u)ZlES$TNq;aJ`VR8xYufP9J#e>Tl%4HOq>m2!SH8n#Z@F&WyuCd* zpUODiN54&8Zuf9feT{8WLJQ{R1 zd)qy9`_pjad}pN`TIY%g;sAr^x>%0kKw8@kdy^@;nEhPbxOz_0QQ_;--@6B1&vJ!z zm&x4Hd~p_$884?ynYhwN6Q4^fUF@F8dMcA#dG?vD6R5H;zu*}_8Nu>GW%JIWN#sldC*${mLx zYYRqlwG6#p8+D2BD@Iy;+;M5`a}NHcZ|#8iR)=3x?T|n05JTy($OhfK;JF!udwwQt z8-}S3JJbrf>ZBH2EtCfc#KozaPJx&_?qY)ES?55^5x5wLd#$XQ>9lPFv8IgIWcF*Uh{@wFftWn&9Edpr&nYYRs)&!;24bBNy98CD52+a&B7A^AOy0ki8L}}D zL~vgRkv@`^#92-Kqw+2}%bPA7)43fno`bHHGabtv)1Rmkr%$O2P7A=I~Y0!WM#4X8dkm-r5<&O|;T(wScSNEpXy-#Tdrr zZ%)9x|0E~FP$iL{_+@gJGhMvbM40A1p5#jN^CU>`(ZjM*ZUXhNN~ zO7>+;z7x~SYvE=J`(+9`IfJD#+v@5l-s@x)yQ$;GJb zqL|LbsGMzZRRz;v(K;@)^}eb3ftvo~O0@z|>h=kN#fa8*X~IHG&9-J14> zlhL>{>~8fYl2AIUGL^q-uK#5dO=cb)EplMJ3Ou!54YFN-85>Y}=&NX_ z*G-EVc?W9zT;BYy8&~;APiv-#TU9ef;#OvgxP&oNB>ke9B5niC6p6{g55kLBtD2nc zZudIf;bvzt=x;b!)-}YimeyZN&1!C4`ak&kJnUyij~B3QCyehTN~2G-6Hls`a#GEd z`4?+crfmJXk;&w9Q%iSnZ%dFq4?75 z)TQi$_yAc&tqNb8hZklayAQoiG#V@7_tGV3-{(2xcB{ACd;jW@i&1*CO?JZm?;WN2 z3PHUDd;oqN^=g8lDMGqvY*hlc{d_(b!9jjL@jmJUoWtq-BQfzLGIJ{&?2LA%+dI=v z#{ML3!;#g-RZ*3H+JN%qC{JAiFj*<>2%;&sw9Fwb^+1zqTZ&RN_L59@m_??`ub3_` z<1Juyg~Zcn*?ZAe0UA_ZNR9xRD#vb{f*MwpYa(*lR!mJTa><^2#sHk(@%y5{*ip)u zsq)B+?GkH8wU+9z)g&KCAu+Ww@oTsLQDE?xHG`zI5_NYhG=up-3I;VM0e>to_bSSq zeLX0$_bM%WF8h>8epWE3)?oHJABY;fjxuR7QBul^EMBK&(ZNLXffNi@(ctxzIim(? z{VlS0eGx)k)J5njxG|_pBHEG$Z=g&XHJG;!B8xW^EV`%*Q&ljiQJ0K4(Fbp&%o#OE z`yG+J8zlo39J?r$45i@Npj7xzq*Np6{s`sLsKUHgJ0vFhBLzn;I>clZ{8ZE-y7m;8 zTZFx0(ZZ-i(qbkh2ANXEMP-Drf<=SMBo+jpj{sZR7!D(=07=z8(bod zujr4l{X`atf=e*ot;w8%$ zEGKV4C*Vyo2GG@M%ZA61#bQw-CQN7i(a(|?Xa7LX%x$)_;SLv=_k``=KL_SNHF{+3@V5=hf0BduOD#9t)Mnxo9+rs)#-iKt&g;#NK#_l1yGZU91w(cHMHt7pzKzpOwCd$gI$zM8)mZQ!e2dK9x_I ztGCOv`e-e>-yI;>61~&2dPFn5d}UBQJz?okF+K7r{q)GE#OeLN4d5zu%#gZA;Bd|K zpb#|uTiK6SCO{m;lSUjw+sCo4ze z>1G!7y2@rQ5+ULWKq(iP%5#bSwcPs_G8NQE_%aj*3h~ zt`0~=h0bF5+r5K%eADwc|StYWFiMC2B$7^<#y6-R=I$;koP7~|uBES+$oqWHCYtl~&` zh>EAdEVUa+1c5DSBv%vq2=;0nS(L3SoKctqQ`KMMkAqzjUH%MEA(u7XL?tS zdb)@1o|Tq7j1vb4u@g@0 zZI4e+`;&9M*d$SY z9@D~qiMhWM1L~s58HQ+2D3qLICx8c(w_82TjTBS z?r3mkJlSqzaqhWgJetm0H^5SNbJ&|s<2D?LuO5y!*VgTZXuSyboa+sD`|)!(;^k(% zh}6|sS#BaLSx5g^$u?U_N!`n^w3a;rpNQbKc0=m8+8&Hzf1a)+JRR78;*w$x z0IV&iVFdkKZviY$`~9uBad@G%7ow9W5H)=q00p#^UMD38A8v%O;_2?`xOrmtbP~cN zdQSx*GZ?8%o=7qBkRp6Ch~Tz&hjH_+-C+{ibVG;`X*GnXS_T7$UunS2-kuvbZ{FGp z!8KAz5HiUAa&PsqGo$c68^O!$gcrl9-ek}_J?x+AREt1|U_Z*flm1qB)Z0dd5K1r} z3HUj%Z*x4Fp+c=c$!J|cY9%J>!TTC;#E!4_&C}`N>?mHsUlU7lB>pzndM+G9wZven zH|yhiFmAj&GO4VE#JM}|cTv&u#9(wbdnACRz`u{+YDDZ1&K&9H{+}#z#$CM`s!QS}{54Va%1M=wW7|Hjr%Ilo z{%^W7MuD=2LgWJftJB%9NYn(x&<~iDv4LD1l?d_P{2O!B%UQQqbL&djk8(k;n>MVy zS-cc4-hIadx7f2y8G5@#7&+M+JsvNn|DV`0U)Yr;kg*QWL~y4;wB6OJ?fa`~h+c(u ze8eV1(?JvseA>LH|JYEiZ{Sg=HZ^o~P%hgDzy#C6d+}9n;-C58qn1s4FkXr_K0Gg$ zjr>O&@cIxI$Mx=FD<96LS%cHne(UOH-b>V6JO6Yej7`&qK8%N;mOcz2wyE-xnhFj3 zOdtW$#y-3$hY@Ak+^ZXNY^)?5wR}Vkzutgb0{Am-@WYr9YVpGm3TBT6Rlw&Xc)6_* zx!yw3GWck-RcafA?j2hWw`gMG?IR6n+wvwh`_@89LfByCh965q*YNu&BkeX#AW2ce z2#tuf|Ap{m7GRFu^C3H!&3T#HmHm~q`-B1SSk+ z6xdMPwm}N42>EtqWDwOR0Jx(3=k^x7kR%{A=)4Xgms|}(XY!_@9fj+OkA(@;T>#{0>Ycvk{F9Q@}LJSYZWZP%b@%-Bmz_koVUCcNLI< zw7mFX1!M?0F9ERvGLViJuEDCjtH5Rij1@?C{d-wB+y1lmY}~W}HduibXXG^jV@qX2 z%?c~9BIJt|kU_k#0zyP*eD+v@4K>b`&26p?NpGxx3}RYzw89DqF^xzqu>uQ9c36QG zp`|9RzN*&P646I*mjm{tq@-LC%HXfyQ<_pJoVFHB5mGR&cB` zR)Aa7#JI13gqaM>v~N(6(^Pryz-C{;F$b)G$i<8mFg8#`%z0o1$0}h3R0zc+ZDAX$ zHxQ0j#R?cCRol-y?JFR&F@OqISOKA>sUO;&Zy=B%g(E3lwshZR^63TBU{uFwW6 zAT#C~5iX1yMhS%#kckAu3JA%{SOIy^j1`biYHU!_D-k@+f}DIA zD+tl&7nbcVMk}l!=b|cBfFBDnOWk%AE3DvnWvl?VsAjBygc*z#P+Y{eK~0^)3>sLy zgK*pdDw@vfKnMODmfPuf{DYsf#1F!9Re%UJab>yd z0G$$dDwwXg9!!K20-HT+j;0L)5eonir){?UCC?%f@Sht2&7Qus5YUs*s|x|Cp}tOW zMgfs<N|@F{-6POgbD)~g$QVA=oN>6)L1;Ce!+f4 z`vSZq3#;26gAQe_k3WB<4oGsELIj^`!0~G4BQ!&r|WpqxN>cH|kE`yqg`P+@DOwllBSx)2)LuXZn-=XtN(5 zD*Y;{D6oXPSOVYhV6RVteZkuz{zz^m54|b6He=R3JsJ15Hha@qdc1V|$^PK%<1>07 zZy6R?Sa|DAe21y%b>N-C(F;x!;qQ((L{3jw-dIYH*hi;XGfPy>*5T55{7bO7hCT-G zh~PHXe%&TB>5ZNsXBBe;D`e1$$%OZ9>_rn^Sc?__9h2-amFxnHDG|$w^d=MjJYFSd zB?B$|<{&JO(Y+}7T`77D3!QJuT@|1lPikNJ^aBX80guA{?kYw0J{xSJi%fdM-lV(v zcz+Y!YO=ez*+&ev7hb3a4x>Zh?KE^^ zcRRhZV>_|?I8ta*L0m;tPz5zqB|p*c?{v2Y+tchYXY|6;$-&lB>Fp+fE;Zmz22&cq zXm`82GwDy!LH6y}qI5cJpRZd}vTVk~_sY#`X@6$t5GB6iJsEpd{eoZKtRhAu`k407 zueNd^mYM7&{O)k@M89r5F{xFl5NR-6Sb#r_fV)o0N^s**f0}XD&JyYz`P0-a2mr~? zilWsH{5L=D<`NHOUS?Y}{&@szl?{_>FJ8QDFdQb{`uC!MZ#g67cHl8@TXl#~}17grna zWus}>84YR@&>bYjEumJ0fMwt?Hjj0Q1AeLr_XXgJrc6@scmWA@ahuHS8fiA4`D;+K z-K|kHM}jNDA2$LkW*)WL1ro4Nh4^t{+k>N9QwPXv#u}U|2}{#hS6c40i{NiJV8cI# z;L9&vD^`Brg0B(_x3m@lIyt*~KR*wjRBSe^jwgLbSaGuvGRBnxScgL=f(=|z1Ez0- zC_yKxe)z2jUTJ08*IDnLi5+N%fI!4Y1w1fC4l46BxS zqu-8JFlT6cZ|yjs55Dv(wicFi6YtHxNw{@Q8)vjc45`?Z>LL9@`pS=;=D!C3*$>$| zNX82RVXG^llEBO=_??K~5T-N)TA`2&Shq>lr*NtyZe0Om{Hm}qOF2UUSF}q-Rn|GEc~_5%#(V_-%62JjYj7^! zdtbjdymfHSa*-^{xBMGB1}U27`{{_kMoUR>erqr?@3CDg`+{v~&Tu3OUfV@ll1}Xw zKaqkH?uIf8(VBz!7W}nxm3em3+Zv$N%qbVJJp${S;QMu>q~mqZPrFC+B38O0X1nn6 zo(Nu|8!TN|$wQo(^~7%8Mmd)Q8~j|vzm$RiJ z^YQYX=<94SJ1;}>+{eq8Y99?M9Z|1^W%cR%01v~1@dKsz0et-)v!jaT@PQ_r32(q< zZ6W6`^i@l6N#>=ovs7b`!o%@{HEFE7s?^y(ir^k^ouSGLr&xANjYl=~lM#QnxRc%T zlcM;a$Fx4*fNnThZII2_x97#=&6Bg!_)6)olh8U!xZ|dR$hXxg?Z|!!ym;-hc>nTW zk^ds$M-sZ4fRbOpDdlZm(eFt%Rk%lWRTin%DXP$K((1!+HsG#s=7xG%j2wp-b5gfg z*sTaCW85zVwk^De2w@4?H%V9}1$TAgV^Qk>ESQHS|Eq$y zsHR3af$v6eBAjdF(E_CiiVp?UY05nJN*j*0l3rC`UKQN`NTUw!I|cW3$hMjcNDx$+ z0=z$HaHMA<%qLIOAm5HkQia)|d=j#ZDApnaItx-n(hKL=(qd|DmC8gZH0>(q2Lk(F z7L#%G!5JmAn(C#CkMkmy6h4Cf8rk+7lc2^rqBFRO-zJXoUj3SBl;xeQyL01d&<_>$IhgD}Q-6eQ~$i%e&~Fy)ZIkr&>61 zaN|kp60FK$)n{}?abr1xkA%RSQ=`iScp^O)A>NaHv42h4ZRbByJ$Hhm6L3mr&Ukis zg9?Bn(TQfVO;_l7r!Dkvy%qZtAykjva+j#yZi{#631h0@C~Upl-> zaVQuR+t~b}9XcM3H6c)6N>#1AT2Nm)F6Bmv(WArr9`%Qg#8W;c9VYjh(5OFqay&r? z32u&e`fcu<{(2$*feJFx()s%fS-aA*8e{t1{=m(}eB1G$76I zswT8vI|$HI=oRDAml7*$!aEw^zapoYAU->jyh6R{KMwV8RdkfUoUzWnsi-_K4nEg_ zhwC!PZPK~>3qdRF#D6u%EM)5L)=kF0tx>e}zL$!TR(70nG8ZGY6&vx=2tI=jv#rrI z)0PdXJ*m_D>m#0C_)=Y@e_EqSQh2Ifr6u#tpWfZ^*FtQCSA@U3;ww7k+H5%mwrbOB z7?|sS)fg@qgr_zsplsP)O~!RaJZW{vZtznAJX#lkZ#KYx8=~I6SuChF@ndAje}BG$ z6$4IW8_MDTMD^8cbO(C<3Z+@zI*FzGm>;jpImDJd2hbj=t#D1Zy1%hD>Umy(KWV~F zy&}`@q^!-Ea{iJ*Z4wj>qOjKq-MRNToQb!>?A%iXjhs(=ANlW1n9N(@95`270g^h* z9Mx?bj|;mPQM=fJ)A1W?wF@K+_~{CzycvPRs8zA#4X?#s21+<6Aj;!?&CES=N>%6>!CX`g1q1q-QPLYHSv# zNKk`pUDP?_Q%!hNJ>F6_FSlG_s{qz@=BZwv(C@&TW>F-9&o*TDKc(A0@hMEKrZr>Z zTjy|A5H9MoF2XhBb4_9Ts_;+*;@QEf2=A=q4p<2JaP>F8@O3qQ8Z=&jaBRm#AQlC1}o-IRAgI}od zeiAQIHeBN*cd|EnJnO5L$#;ouxSv;8ur_d-7U0h+96TtO5LXE>1svahTEFQdA0!ny zuLjCO^+t0Y{-Ja0P3(f>T9LvYa7;DQF&N1vulFQemS(ztPjan}O(v1fR#g!)i7U88 zB=%sh+{m;#E$nTR2Dk^|f%txp07pbu)`ztx!$+F3i^PH}j!RS42TNMWnYHVB6{tp@ z)(hnhsB*jTX#8*h<#JZVaGRca<>>?EGL&{A^KjG z@>!MzS*58v)=nKbnTN;=L)Di)646Qfxp!7Ls>CchMIQ7`S7q%~IM~%Z>zw>5e5k@6 zb~D-fQ=mjH_FLF(8DKg#%Ow>inubr6WuDWcSgnt;vgNwma&rk#as^hEj9X>`wP>}f zj)q>JV{^^IBd%m<%+}#&J#!xpxqL?uS7^-gkZrx~)Xcid-HV8A>ptE+*|D~&<_Or6 z$u!{5ic#@+_hbUsid^K^lL=!m)tqNfCX=RjPbSP(Wuc*ZG8qq^J(-jPCL__X7CO<$ zc$r^MCS}`@lDv8{VL;Zd{dzKiu;`SUg)pXy2;GwjlVZ5_?#Yg|%eg&FPbN%S<;>=u z>{$DIp(m3GvPw_Z3DA>Sikzk}Q`KX+#^&3T@uCx5F5mXz*OT!>V3YUk$z&wad&Ryz z8BbQ_HQ`cV+5J5Wdl>`s$$({6>@%QmPbTBqkN~}UGJd?AbKjnfCn`E(5jH~iWc;{< z8B=uf?#Xb-b??dYknPoz@vN)dy%7D}da}+udNP>?E@)3Ca4ot(o{Ri?GGPp+n)B?* zWYYBR$%NUeEHqR?lJVf#lSw(4M@EwIVxN(G*U6-88&Z;2PbLh=+O=O#CJ+{#vIvf$ z`z~Q%h9g}c=-rcbYU{}ahE&dM?#Vh_Po~+$Xl?K1USq*~olGXkDm~e1fS$}!Jd?nT6&TTiw+kDg4XfeYG` z30y03kzY?HjKNfTo;{gNn%+H`Fk6*{hDt~>9z1(8DF^e&NHSjRGm>vlCS}`@lDv8{ zVL;Zd{dzKiu;`RUa17m(2?H}6)!LJ-*4C2=45^&i+>@w%JM2fbqY|UQdG$ zd+uG736kl_UTWB(8K5Jx6nJ66rW%WDf7AE+YT3ES3r=*nARQS$M39b5MlwK0#*5?DL=RDw&LHL;CaT$oTOBb!0qI(GiQV5xV2z$0f`dSIK^= z!nHV(U4vP`RrX_c;o7Sk|cM)UW2jh#jI~0u#~qii{9gFuO-mIqeTbk3%OuTFTaZnB}fBIX&o2(dx_|Jm`T9SdQX_ z*5a8nXKsRDi{M4tDaUZCHyQL!5BsM&>5PI@D`Mb(7*ANWS|>p za%sbn_-aFPVF|amOWC7%8CBo70$)V;vI{xEP_)B0BX|wV501U;ZwwrA-ggDbO|VwX zigQWBd}OZaAt^C=!Z#XlE6ZGEcH4rZ`Uy{VMJzi#gY3)JrOk~Vf|RF4gEHYe5p1yR zRF}385B#KE#0w8en0Wr92qd>~vr;f9wYyqY>qQnc{v?8%SdJ>|6?H#sje8MAh?Xpb z!FL*PnuAMs1g?%>VO4QteeCl|AA9-6LDa`CW@w+J*3cY`249Nc)hrKoA%k+I4i}n1WyOTE z5X4dfRs4A$zOWOCh%p$*C&6#oGqMp=_PlS7?HnB$5N>eTTpcGwSv_yeV0oz)RYfk`24`AXerd34FwM_wr!C^_1=QrJLp zC;{phbqf2#NVc`ppXZAsiTy+qPO!XKHTAqL%QPC}G&T=oNgk|W{%xGff_P1lG#O-?I4o1;goA)JUv&X%SkAa0QZHEJE-LuC3tYppDTa;e zFak@>0j^;mFI$tuWeeAEmzyGfXh1gjmJ49RF((8GKS^tV4W`j_mrx2|R9_2b`9fMVZIDE~qVWweESOaz!bEz08EzS7A!0;`{Hkn~m3=mEY8$-{nO=bv`t?J?l5~xjd zYLHp>gfQVN?;zn)7Z}~JU3%J^eBD>o7`qgWWGDk<%Q~7CH+IB!$ut!BCvLn*faJh6 z1rF2Up2@ zIZ&w#uA{kj#CAEB10*euo=ei=2vkg3URtWU-!Vhz9kE@WN&nQv(JNAympn{Oup73^ z%~V$Fim^!&+O=uMFT8X6tjo)jZrHAwXAaxFU`fmk+ck3Mj_n#ri^k%D?V1O{sm$OH zfxNDS{W@a10#{rz;|$EiR-h$FGM{qiCQF8n*e)3)%ik!t2pjhu@I`^53$`nekfb!P zRRcm^fz4Q`SJ<@Yi0zVj5ZMb!&0qGWMUXHK2yWPJr!H(46TX1hF2k8Rh-eyJ*^XQn z;3pSEsg^G7$#uha88*eR;ea?Y)LgJ#=5a~5(Cu#UkgUA^)o9ic+hv&(f`p%>ivZvu z1s%U`*e=7btllN{t;d{0?9z3^b~)a@H^PG(w#zXp1_%eNk)!9jM zC^B?TDgsOr!cWl={ zxYQCm?AtXB5dhmIL(C1^B?D&q;8Inf4wnpI*Wt3uCdPfhj@T}l5jH`g8FmPw96gt$ z#Sy4j*}7nvWIRRMGW5od*e)3qOI@gNeWhNcE-!hQICsN#InHIZt{9t0?d!#h9Xr?lpV?H;qicDXebO+Van!FliuG$1gz?b~HI z^-X-;|$DA)5w#)JMy%8SVuw9N(F+eyqYz#E()+RFqBte3%7kk)Mh(MFR z-s!M!mzTE?CVb^xd!TT`c8|NA$>7p1lA$gzwo9g=z(09x*Nab|w%Bdou7NjqY}Y`z z)Dk;jyQU!mG{j_xxna9xz%sVWRk8-uOSV(N%P>pztaPiB+nEd;%K?%WN6#f`aRhWp z%S)v4v}K`Kag&qVnG8IW{;7+jSEMd45gCNxcqRikQ(3Jm#wJqxdhv2$v0XFI-qIaOY}ZIyG!_?Z*F4CDO=WJ_uD})7cUk2V-ee5HAau%e z+_x(*Ev7REd{Ln2g6#?d|5zi~^Ib}J=eY+x?LQ?aW{hZR88@7A=f@8aQ2H~+? zhBG0E@MqZa@j?TU$zp{I0Fg5+1~bPq85lOju;G9>GSpnKUFPwM;le|Z@?fD+q$9S= zGA9HHKS>t>!b1u=e%(V%pmgX5VD9HU}@ zaKIWldTwp99^2K`6dtBlJxI7=yPSLi?Azt&)gCC&VKt|!oG8ZK|E4{3mvqDH^z7Vg z;dtCJoSiG7&!f8dCZP09UmxmdX8NRIncLvdfP?iMIgyFTrD4_V#^S&i_ z=dz;V&-+$ckFH+f61+h(Eq{yP5WG^jg8y6b96>e5TovO#Z>t!>_f95ZmN(0!$Hr8; zwnNRnSld*(yGTtosm&fQ%1HFiMwpy{98icwf%FkD+L!_xpli)eva!c>)Gud6w8||v z!lV}Ryy4I}Hb#-zcx!6;7bMG5!OTkEZ={e#9k&fZ-`sCU|sY1;Y zK!!;zwx*3Tz6L0-c68E5>F)LbE+xd40Makwimy;~?0?0lzoN#ct8RQ}A0nopv&Y;k zPln6yp{VjBKD`vfvRK6gk)ua1#ZIl-JJ!C2SUtA9HArMHS1v(xpgDJ!>7YD?1_Rj* z_1W9h!+vivLN~{EC+L=UQh<~eWRS9`p8WLH9XnBdY!BS67-e^73+bcF z{*~`=*-Nn7H}C8W&!;ku_t9^Ym)Sj>R9~ZDw`?ZWv10b|?rO5a&J&l?nWZ4+pF|gh zrzu_a8TO|1T+w9+Ds=TW=yK8sYO&@5_ zc6WyT_U3prn~aCut^Q6QUC=bzJl|a{mu(A0h2j8%=elUca3Dp$M*ZmwUCe$iZoGU! z(*|WXti${E!0TA9uFX{C$ZGg(h%lB?f)W-GZ{#@=i< zkREQ{+A`WL-J0)ulcdx{)iw3H#8cptd*EI#i$z5MS604<;J48+TY__nF zIBY*D=8iJOvnudzv2w>@$l8LDTmyt&mt9RnA5zQKW2WoytG=}Z;#(blO|?V*utN-` z!y+4W^MYq*5bpb#ux%KY<@sts)T`XcwI3i57pH1E1!D5JiwTlvodYpP;8Gy&x3gxZ z+qqDInCRmI1!8ifo6L}nffzsR5{SvLj)9nY{8?qfUV*sNwhhFk?FN2*9CDsCFG?*~ z6W;>_;^GJlnf)3oV)D34ASTZ`2V#!Ev&xFSD&k`oDi9NWNX@wZikQ5AEi+_eAjS{7 z1Y$C*V<2W8e^!~WS0Fxi!2&T>htz`W4aCL!Dl+>u24eEKOCToCItOBoz_ZGVy#n!Z z+dxdwH5VL$2w!TmhNvq;+5DjivbkAi$i_g7A9e}EWLU>Q%sl?AGGVVkeEfn1Vyq6S zQR@Qn*Y?2uWh};SRfF3Er2~qOup{t5v4lo)EBo^{vB8Zw z6Wl=laDFJ zFfM;{0_OcEITeN~iTuPble?Vh;=LxqH1F{wSDK$ELAtO+8GgOr3=zTLb0nZ?usIUw zy4V7g_{x#s3Bl$_P@RBtB&Bw+Ig%czm}5(|V~qA%o3xC|?T7qilJgZETLnZV;j2@J$?-O64qq zdMDs=sSNi5Onc}0{L#hFgFWYZ!`*&-*~b3mz45}vp6&5gf5^TXu~$`O!|yM`;f&-7 zME6vuJ?oFA<4Jea+wM=vs~2xyBqRJ~bK@_XXpBX4-p7HBDyY*&H6(W9rEHw!5v`(2 z-Y~6EyHKk+gwkinzNoQzVwio0J;dKLCFn zUdUS2^zq(KzuOyab*IC@W?@-Z6T@2BcnLMDg$?Qd><9n)BJ5{IkC(7*CyehTN~2G- z6Hltxd{WKk`4?+cHgEm9k;yl5n@_B5-Z*l;H_A+r*u0q{ws|u}V)JH-*yha?sm&*_ zv^6`wgN*iaIM_K5G5^8egsU<`E5AV$FW-sc)NC+2&zaqU#~8yTmFZq|q|#D%bJ&|s z<164${G9AirtE|G09i$e313@;7i1s154}z_o+#q?%4KNZ=Q-qdYq#8c|Ju>krEtDO zcEVl?=b358jiY-t@5dg1A4k2KU}%bvE*e`s5&jP%ILOZ@-ba0ab2y!UBqp9jW^RRp z-SO^hXLr`k*q_E7IJ(w2998*;1g#DzUyAb7B>U z8hcr$JIo@}YthL@qmpI-TF}tExxI zJAM&-L163{Wz1B0-XI~GB?7d1cHN6AMrdPLm?cOPMs8C@Ez{7OyR4=Ayy; z<$)mnCu;CI%A8SywEh;^yRKl*Meu}fFgqw|@OsLmQG->F;xYFo<4X3 zWzMKU+V6<$-Jp$zE=nasDR?$GH~d#ps*!YmlyYfQVcx495)=JV#ayb%-_H>6+|p}L zak)jt}%X^r*8awUQiJFM@{t6=c|rD_TgZD%_EQuLgr-V z0S9FAdGiP)oRKHYBVbP_pEr*{oQ6E{{hVC!Rsm*LiRoixM@H^oKPaK5#>yoYi?&{p zAzv4O7BY%_W**dtSuu0F+kI{fwPJR+RHDavA2X@f`VcAR;zEXDiU|C6huJV>yyk-* z#><9`bR6cxk}2lnMu=gJNa)V$5<;_J2o-a3&{^I@;MLgNNnBVXK@^F)n2#GFhBY4< z6=9m2lZ%W<{PwgXhK!$#iu}OL$i^V_fd3{G28=pV0? zPpw2J)34{z*93ii3;Me90zki9OqXufvXz^IS454)r9I7k%kf@7$1WE~_{IC7iT3fK zhu#`R@ji&Qi*L{6-(C*;nWT+=iI#)1-{ko@{mouz3{xsGz@F^G%=ex2`vb5yKh`k) zX&)@6Z^58%6Epz?ETiwx1Xf;BO#Di?qH`!}G#B?QE$`cZ;Ie~<;wzqa)yh@;9SV!5 z$Kzpwj*?qBEF3RK{{l}zzL_dzcG5qC$4Y0KZ5~J`fqux*ZTj~WB#c~$E2z6dA01@p zC1&t^9_+3b&kHO+?1JnS-6H+3~u#mK73>~RP9zxy$*8p97O{X&$p#_^%sEX;p4lLo9%hJ($ z=#%N!)9Dk)Uy+S$?QW*er$&yd_tA7`j81UuQR`pe|8Sc5!}AJZ?uUJyy{Ryp%Q3tL z(l=vD%;_W7p)7Qz^1lX}clXbd$J2n920kzA>0D{?;Z9w z`;U)@Tm4Ddc6d`+c`oD^_2DncV!F(Q-08CW&`yr=_Wh$l>Vc_o38MjRX$ zbDR3HD1A6{1rlxI=ffd1DHJ-8gt{m|ru&{&7Ecbgp5mo*CG2~m-{0w`k(3D?<|5!Q zWYi=wIfDMu8$C{Vp#h>uCTjr8ou!lq6du-KIsJH+9dlPoQ_>daQ;KId(@6|Dea2V8 zKD6_CHb7_VqTFzje$VsG=Y!%?5`Lxtr+Sk?@AMFDaX~RAt6y+dOY5bBe4G+89tw-8 zsm^+XQJPQkVxbW)tUMpBwLhL(PPW~V{S)%6*3#-VtDS3(cAkF@e{*EA**S9T$l2bt zTi2gC)9W9-A^yzg_QG4>Di(ejpPu$7=X$BqV>Z~byVs)D50o|%ph9o$P`rQnugHH9 z+G5#ky^ydb`z!21%#Pj5Y>gjQM?ko=!*WuQIP9=@x<8!OKkV-bS6A@ZEJkedzJxs~ zA7yNmth&26Tj6`Z$<9{hQyf}3;0+B*P7CJwt&T!if=(za?rP)I5X4j zERo#UwVvOF?7Bk}{zJrHBqXz|bo!h#mfVLx8u8Lv`o`8eUnA&vl3XF^%*)R<%m$UI zLU2I4{jrW()_RXCs|rWf2pT>b!7HrnRPZ$`;S`?a%#m*H|H&d}5MUL5O;o+I?!Br~ z$SK%DX38S6>18^WU2L{js(AKvdJzvGhJL`LjC?5}CTkkTH;(e7B@=r!7yMl*taRXY z4Qn3Z?G|AqLy9uID6wVUdIFRacqZa+*p>t|v?f`xedn%rrd@g3k!M}G%Wg+92R2k{ zp@B41n;JSR&a^H3Gar2P=~gIyB4sxcj2B%Sk9fIp4!nG{0k1bWvz1=7tvq*Yyq8V0 z2B#})*Rw<}?caUvgJKErbR&#S(}q5bhoF`|3?bB1DR3+rvAir4J`+fQw6PCw%3(yw zHTPZysBU*MlZRh#z%2p%88`T0%m}skVFO>)9lZmvf<%`N$0vAQU-ci&eN|4AbJ3vPz6YP8 zx5K;57+lUzz9-chgUcUFoHbq;r3b@7Ug!IFX4s7gN$@}u?3OH*0x~koj~`M%C~A4B1t}mS+wo$9 z6cCC!Ui^>(n-TD`KV7hQDf-ATsc2}!&&$bFhce)U6;eQk)XB~sDX^ioZG#k85vmI* zAcI>W1?0mz0oo%4Hq5@}0gOn26(L`w zfDB?%1=t`3gc#R`aOhabpcyG3pVYXu zcA3)0N!%a^E2Mz3CG;uQk}E1(FAfd12R2AS{^})LjB%R;0wV?a`7oRd=~^xuqyW=Y zyQqe~1Z8vC1DC%CTXs$nv9Ll4$lI{lkplKe0e;Mk6y)Sf8rOQF3wVGBQuv`pDPV&X z0k8ski*>P^z#>T6 zVg-y16kN^&E9hj3i1D5RBUV6#P)yQx4~NyR0{R{{>(X<^3dkJTKFc2~AXGK=L%SZV zfQ-!Yjy9#VZz*qs(s}uifI+fUD$W%;j zxPcW`K!(xD&K@hUp=N~@SP}B=&B!3Cy$M!W0U@FjpgmS#Lya?KbDL{J(tB3{8N}G; z+UzPI#55wc#0o4Z*ep;IxI!Q{a}OdHs}(s*#JAeVvM zHFZk2!3uIN1P`6*u`WGl ztbojc4YdAP0imj?A8Nu1$jB@|epmsasO6;=tbmMc$BPYCKq%^X;To*USb@z50rnM; zVN%h!;R7uOtFXcf$dEeO*<%GZ)U2=qD?+}#85zV2D`|9RzNE5MHhVO7Hv*kA?6D`N$?MKxmuB+N*#0>0n(!4a&mg5wTY0g;Ot zD`0G((5NS905({`@k&?$6@oQHK)Fc`>2CE)WcT1JAYa!z-=C28PFTNo0}P_{AVKf@ z2H6@$j~bxk6kivNN9{I)cl?8&dpay@adqRoxI(AIoeHKat_Kt0gurI+ zj0Qw38bBPg=`U~i&y9d)Ptk>do`haq2uO|X>->X@;?M;6YDl3{#GxjW4=7#^d+-;p zzHy4+4;pYss4#$0h=7)cUU3LWEv(&|1p5{33-FSB&$T_0eFMVL%7*bJSb~3~E44-N zsRkUcW}jAgTjFg0#rJ$r|qQY{2i@eYgt1d zFW-rd8kh}c=XD)&$(#ze^_;ZU#=l{ozl8G_ETEh0tqNO13w#Vi=19e!3w%r?%GTFd zs;_hwgKf~wU$UrdCWXHJ5RnLeO*p|8NntQ*Z})qn?i62m((O+s<4L=G{Ym*ol%%4- z67FJ&VRplVEsj#0N(66<_#?TMJfJ+#pP);*$URx&eYozC!a#sZ?$CKJuK7G9)8@yIkf_xP2cUS3jhAxcR>`pg(!``I3`FMZxiSBfF z6I~dDZUoA1?)sDv@|2989N#t^_hxN!jYe`xas{d+h)4ybnU})RSYfF_I&|I`w`iy2 z<}PxtfumLNVNW>>68)4k)mMq#$B{yl3gSvZ!1TtfI1L1-{w*%bO7~vU*Hl$bi7qwZP6ksNz-V{7 zi>_*$q5`j--HzBkU$>?hRVW=r>SPNGX@6$t5GB51eEDNlzu=cQtB6sxKZz(;umA5vaUs)=&r|IVXOj}xOkw0czaqD(^#w|3B3WBBcyM$?r z1OG3At-3|P6y&uPO3oeAzi!kgfPno}ZGxjkSCu~PFC*Bj%WBG9Ty4B_Zleeit+Hh` z(k21j8a*6rp6{Z2=BI6|LclU`Xqq(KJk})+_^Bq`7l138GD*SX1tiqPZ8EcKq}hDt zuR$I9QRFMZ72%H?ffX~4TI~V}nDOaqeHf|`KQ3&0aCB?x0C~+=gHt78X&UQFYxv}E zH(##1+Wf>P6Qjc zD#cr&Qt(?5ywb|Fv%$Knbr=2RYMw!aY%MJ3Cf=KWlW^;rHqK~?7*eqr^^_hDVyF2z0kS?o--m1+B;$pE zu+^1NNnmCb{7%Gg2vZsYtx!mbo`tNsN^J@5U>m7^w>n32$)hskdh1OrSIXiO&?*ME zdT-WJUPp49{mf5c-6mC^!l{zN0`?f=SA~sP${7l{qFpM&SYk!NRga5iC;L~nOKDq! zbMfB$`n}<;gL9UPWLduD_ahmks6u=xAMojjzeY<*aDHnr&OI7hEBk_NXwGn?n1I)I zk(Q)WyTwnW;Do!O%tEwOFDk{s7W}nxm3em3+Zv$N%qbVJJp${S;QMu>q~mqZPrFC+ zB37}cVi)k<6TwS#gWWb54)YLaW<9Z+cOhr{0QPec|5grGXCeAB50Z@+nb~#IhuS5F zl1kBb@$o%U5XgOYLKLp%AVz&Ar^?cDpKBq>La&N`0sdJ7PKF#C%L%nXZlRt2%2tEiXNyI$ zIUda><6$b4{s`?l-aOCjK9(SP?&Iasc{HeWM7>ovNkGIZH<%Lr$yQRjX z8v4nIzgyhNZuv=3{Ew}XG@zVh_VD=zbi>JNgKWmWJufD2o}8V=S4w}Kgw|2QU3eWt zzQlN&txjoc_DkT!YnR3Qm;Z|V7YRQCN`3*Sl(%_BzbDyL;U3jhStM|$_?r#5E1bEZ zUKS(A;l-TP?G?ndlpv0FBP;w;VB5lbh!B>LeUpS$QgBxsF&Rf6oKZrnsg7z-5u6I?aoGlbtN{;&)2rA5=R5K4^Y621*FduS$?!B$ z+x4C%oDMCHa-+<&$w8|=Y^4tel#HlQmG2eiwM9GRhu{tIsW2U~sQoYi6s4+a95_HD zXmZFyOH!+@Xu)!Z>f{l4D1JjdI_a*ej01i?f(O05DqHl!a7G>j$Q!tLDb{_T&!!;x($2e970M`pMVSB=2C}J+d)twMx2>?$pRa*CR!0C`d|a@4QYfY2g5$P>=FHc z;vX6qc8SsWX9k8Sb`iA3cM?@{vjJLwm}W1R(h#928{oYWJP<@`DXi0$I|c{E<}U2+ z*S!*uUYl@tI4fm5JB$;u1{DBD;zgXk-l+0w>#f+I2%&oPmOHoHfK_j|#XHc+7(sFm z&quLB;a*_`S)riQl6KGatx(W$u|EA>f+vXT#yktTtx&uz1mZ-egucOVRRoSFmK%wE zj^y}_pn^a_2Cqx_qe!f91FT61PBnAy>Ze_`xUL{wF!YDX=$z7T(3=(Li{;xar96@ZcI3jotdr|+2Ng-0{qvVi zLJK+o#7i|Nbxs8FD$WNpcqNs37?79FGIviTP}-~q6l$ib03a`&Zswvz!i;#|Q+DJ* zKwdft5fcmt^3owl4;%pG6=@=n_roDTUP@O59tH(@>1aeU0f0kFQ&cE*wpJ}*E*&@L z#X9ALL4E1)D#f8-P;6uKhj!?AG}eSbeJNG7@@heS>9~{|Wz<109X{&cl;bl$ZylUD z)1RO%p?yh*$-V6|>d&4WPtZYvo8z5+n>&~`KNVGnBowAEzz^_&D8MGOQgy;`gUl`m zOJY`P`5~kfs1xR;V+Q_C6Si4ePW#CebC%|JRTEmT9R%np6if>F)~!K(M+5v<*C@U;+I;T7R8ulR~ixi(u)fvwu~ieWAqAUO2Oz7_djHHHfY;i*ju*t*Ac zC1YuI7y$kGW&`}UA?od$#e!-RKSqZ9_vb5E5$aYEC$f&<|0k-iUZXqE>sM$IR;@Wl z`81T~- zN_jH^hf%K_j=LRg42$hYGcdAa2ZTLrMLGf(yUgnkF!bU_e4+mPM=lx}y#r!cXa){KpBox@o{xTw#% z2-lF$HHGP`!b8%roNVB${?^fn2K&*%%xJnZp7y)xReurudc%I#wTAJsT#-vG;ol~< zS0K(!14RVmB+^SIV!FGvEW`!)nF?5#r4SW)S$5f204GS^5F%^Hi~v5_gtM%@>sAIU z$)+5InY=5xD=vhX>|5JPtjKcf#rlPbA{YW?;? zCBD0>6_5%`%A(g6f#ywAoo|H4;z#OJC)%^6e*0oQd;8)SD!iY>GmehRLYd=uJH4?M zC%N>ytgl)o-&u$Id4&aQ1E<=8KdW%?pj<*+CBPJLeE(_vrjLA(vKlB0)f>%q_=nD| zH?a$jYtb3<*75Pt%33*`=|a=Y+o z{BQu}a#qD~o1RhP=>z35ly)NXaOUI)TO-MJJ&(@r-&8oBRzAzJAgeTW$FeFRnTNRGls@&9e&m` z_u-K1-k9Yf+j`rnnRS)B7oxXZkzDug$&R&EHAldnOs0W?*UF23{ymw%wdevtdNN@Q zrkeBY$z;;>?#YDNsw^~APbTBRvnP{sFprERt zkX3rJPJo`wQsjjZmxO6rW0NdYE_nB3yy!%i%k!XLPsR^{P2RI7laWO475nyNJXw|3 zgiC>C_xCL9Wem(G1D08_4h`VjlgYR?BtWm8j2|!Oe05H$3*D2+nW?=e!y(tbC(A>& zS5L;Xu5$MxV$ZE7>&&AklWD+Vd5nt3d!0<+T9J$V*2#o1m`cyHCzDCjyC)N7tFq8g z2}#C-XHO>OU>+Gs#*2MM^6kl_Y#UONS5GDk$lA4EPbLr+osxQgD|AmL49sv;Yfsjx zttS&0QaQ7^C+l!M8GVnk)K7RdXW-kD$pl%YCtD5BlUa(q+?y^Gc3l#yu}$;t$#~I; zE*GRHC;T=RLZlF3L0=*W1oDxV3L0LyKRXGbPufXROpQ3;>c%B;+< zBa?A$NPk`(89yFrV&fka8DmHGQx&eok?b1G0*k8dXn@(PAVQWxip!1-66aAuoYwHCfnLkG@>=0Nfv+@O`;R5=-oJ&-?sd_~T7W z6-KyC^R+~W`6%uzRotWSaQvXR;y8P;?YTbgnR&{U3f-A#eO#ck>+)Bk%gBV;mDnuz z8O*N4_xQbbAZc2eUh670`;AXe`;&9M*uepY7Mb-AmF-kV>CA8o)J zSZ=E3a@N!oiOnwqu>;`m{eVTOX5n8pU5_EdB@H`EmFhy}GT-lR-JkVV z(V?SH_9yMZXfPY}hTU_$;V!!K^p^2xIztO5d+^T(Hefl57g~#F&YZcabtBEs$DtD+ zE#+rEuyPleoF4S1b{8%DS_CiBPC15Cy~&_=de}eJX*~z_r3GwnFzQd@2AV0lWqF!i z-`ZHqe%P9wM|Zh4jw)|QTIm>g6n>wHDFk>Ir$S)g5*h9qr!!6G~ia2xytOe1xNK0p6rTPc6tWcm#wRC z2@g@q^Q(nAg**%2iC}|er@FL-c;F{(Q!yDrc<@INNN(X~rC?BMceSk6i!5mTNdz~s z997mU>VDW7S5lyQn6FhNs1_x^(}3GN85RSFuK;CN*RoS|urft9ukQFk9O-{Yo}n5! zk`O+8WgdbrQxJmRi$HP@xkJ`R7!AG@!K+yw>_P_R$`me~87Kvd=a8~tX$?z=>nLmEiVh65i7>vpvjVZxcy&pI_8)RuHL#03olUv_FY7{0hbVC$N) zR>M^_iHzTwnfu&fF7fuEdf4hd*q zjAVJ04TqAF4+R1(8qv=1({69touPxO#}m?MFrL-a@YQH>Fh(;gDjF-x>j=U~|FvFC z3yW1~$46`m3p!=^d?dSR!I$G~0f6vXUI#$4_M0}}Stw9x`0=Jgn@B~Ys`M6b8w`iC zbz)zk31*T=4w&~H0x!%^X#R45!Dy@hR5xGVXnNU_rs_^lcwdJF%R96ac zR5mJaD(XH9e^}Bbd%4%2xiqyr%VMIc-eB?c9GY65fFxN}MYE`|1vIOu>b#_muI;Rj zKnhF>Y0FpYCg%vmIYI*^M;liP8%Pc%KwXu>YRpWm-)hq%{9z>9+Ud{p#gW8*q6sHh zUaXpW-j-z=jd2>Ahp{9N)-eAzPGzQeUQ?=r{~5u}-du6XjHw2VM;yp{EuVl{?zd!_ z6#l#ccX%@`rZayrCUy8nn0TycxJ*`CUQLGYMh@tBDGUfn&0qF`#AFZ%JmUh@Ans`& zT25)CJJCQj4(lWNm~8TjG`-zAeANZ4VL4L=5d(B6?#}Swwp56h;F_U(XP(j1DTmT!6IUz{+NjivNG$Kd=jOuH_EWhacQoVbT z1c$FVHq12a32VSyjiC{$tiJHDYjb$`mSdaDFe(NJr-qH8=hh}O1j<&W#dJO7TRCo| zn?{@(WR^W4O!&&X_CVo=?b6fUdfF*xEwh7`tJ+WE=(li5o8xAi0R5z+ojD z!JPqMGC4~u3Xw- zG8@Zw*bUnygJ2UBb~-u)QI4KV(&7kIOj=%2mZvR)uE7o4<(c$PT^zk4b$Q9d#JL-` z%gt0)>x!|7g!US+T{0K@6y}EQl5sL0lk!{_wo684IaIzQ0dCkX8AHjPJGN^iEgFjp zwrd{b!lp7eY**llOJ;#q70D2G`H43{Q*d#^cFBy0DGKS#0bdj-x?sBk2}w$$s{&r} z+9`iYdO$~PS7cL2YW}h})qG*no+Gx~xd7NM9+USg4T|E3?J}HkK}5t+;tk6qA) zoP80^0XJ-yVN--|91ur_nhUneJgy5D9#WGB3yt;20)-p4%Q7bf2|q~}0l-5r8WA{R zy9~dw+K<$?9CV?XMJwhJYkUgmCZ>4&{jL^70nKgs;4HE1*GmcEfh*?TQy1+a)t$b9x=vE*S@d zPo-tA3ybZN$vI-XHbXpa|kVx^cvIc_#f+7e~+J9Dz7@ z!*;ou%4%IPHc4t$+ph&CSws_#m#wMVbu-W6_^%#Fb8~5py-0_3M4N6l;*Z? zS7cL2YW}hhleSFB(GlA{W`pfo;~jzF8$3BjY?tAT3nGCq8BD%h;)<9IAC5HGmm{{z zuqlQO2gH$~=7Q}qk4wS@T?%r+z;bzqfPNhJ?Xt`XLBdbc^+7gx%8uAB!>_FNBlT@9 zK5y?eZrCoz8y6l*HHNc8%i3vo%Xh?fIYz|*;nXHG(5zdV%n-PkgM{0@U0&WonDCW% zQAxPlkuKPzca`+2zjb!p?M#LmHa(G4QsETvd*aYt;I z41!J2AKT^Vxg;%)fFx;^Um`7x%mq#$WJ-gFh5r z7dF*7p2;9^#U-;+-zCc@yvdlDmmobp!PxOk27zg@2Xnv|1&S`%u0TSP(&Rvkg~)?0 zSCV-;?%O4glQ1A8HGkO`4(4N7#>&y{OornZGq#H-=ZNhxoN+-!;_)t>GA3Uxah;(c zM{Jj2Qw$pph$BPI1>0pFmxK#1Y&Vy;JmhE~^KkoiS>}Ww;U{Sw+)@M&cO!Mgb{T%9 z)lq(}RA6j3*E83*YSinvZ5CrgVP?mEA58y6+r3XlTm&s|2jQ8_gXj} zcMNCeN`|je{YUT@70=y}449RTTm+F682)UP;)A61iK;RG_9pyL5M>MCTcwjsR1!^U zvO_1dERqxvg)e$57v9!@JA-IKC`*E9A|RuN9wtiHmLzxQvZCS7`&Lz-X0TYj%>2J*K06IV+-7Zn+UAwGbDH)IClld*hK5OrDpf0=m|$ z(sgxm9=X_K&6cE4@?^&d$3*a5pIlogRj7Fa$S|qJ*0fQ^SK3v3wJ1$@w+C=3A+`jN zei1i(g`#m}_=-<|MUBt8y3tIf&X|JsFt}Hq43}I~DOEHAKq53hI@ZI-9rzKSUW#E^ ztYU)5(W94Qr&ij+bdklDw+4yq<;o?94(1bI2C^IKv$v^-{oZ7R?t<@5&<*mW04eV} zgJ*v6)jW!zI7AY9@WOH+&xShPpY>MB5L_{!pd~qw^30htHyMWLkRi%kWEZvDj0~Ib zVg(odo#|jW9%%;o^wk|ZQGIL=+^ra8cV`Rfqs#u4?{L{$uG=^7>Q%sl?A zGGVVk+-cheVuUsFo1OjoIOIHOUX)sJy%lkB1cuChjTJF@+$9i`XPpBvN8njy#abfgcPU)uxsm$4YTRSj+vlny99 z!j8ZL#S$9Ht?b7u?KUHmNKN{-5LBL3mOUs-#0EF!R1^R__TDZlwR?TN9Kq-JfaF4U z2SdkztSnYv&B|9}<|&0Fz-BEEN5M=WH+fbW_?uV-*e{H52EGwsh0k~IT(u%Sf_LwO zq|AC|+uOUtxQSNUtrl?gy9G`@rWnJx{LKlN_n+ic7^)=l6TeLEa;A&-nh4Xp$CF%X zex3yB!V+cp^?ox%1cT3!fTqFbNTBOt3sB-KM}j8=nhO#$dduZt0W4!!9U%n+!=^sa(7i5mARc> zx~SYqE=A=5+9`IfH<`Ap@5l-s@x)yQ$)%|5qL}WbsGMzZtv@QGmlVEF80iLe{F#?L9mFZf~^Joel?^g=Jk$3~OcMCDg1IHl+WvAN=c! zu%8({Uc$DWFus#0jXu#%JgH*yNj012U#wBty!GowCf~?yKC!lWSOWHlM)K*6jQaGTO`GVCO)@{0Dy%uF4Fp{033Hd?$)iv%%~?Yiuk>98QS-G z4!PahE%)BPcC>XVobQmGu$RJlX4-M%=w8jSWC!5KQLiQ#nj)l&#>Tt^svV+LB^AIQ zL~xLwPrQ%%0OxQz|42+ciOk#z2fO3l+0O2)o3THQJ8*QZaX6~-4+&ZwP`(u9sY?JR zE2SMlG$hPKt$J`sbsZo(wkt(rFUxd?S!BBWis|xl-U8NE6`j6eeYV{3M}@?^SBz6r z<=E{|P{XRkO++p`3NF=JSl4ps`c5ExL163{Wz1B0{pb8k2x83(UQWGG|{8itN2g%bv@QIg+0h45~GV-`x{6 zcr9hpWTK>$6$leXb%v{t(=qiL{P?yAd;J=bm zjimddluM%u^Iq+cnCOodOuC34lU1--%?{F|@o~9D*eez-j7lUeW>R9%t(4IxdKYsb zGlb7YfGur;JI!uyILy~9@|G6!pB%&uE|Es^9RBB{;xAT(CpyB<$Aq7c3qQX~`1xAl z=j()@uNQv40p4{5EDlD4SsDd zZufhm?)1&Oy-9zo+n-FvlXkWYzIJrw?>-l<$Uf9NJ?y7D^aklS)A(Cg#Q)<8_?-w$ zrFk9#){e#+OcLh^A_-jK*nT`cY;Wyo=N6VNzSiWZvnPK}&@$?HySqCYoEcBH*N$Ev z!P60#dZ{d0BYreMGMn=-H+CP+dAZ)iOO`KKPTqn}z?)_apsUkbhR2b`Vo@U|OlSPj z&ypBt|3J>nYi(!49WE|W&2O5G4b3(cjfJu0pCnwFjE#Ie=V{H%lc$PETChI?GWtFCtmVMn&Ya)+(Y8*s6#=z%t~h zB3uF3sEE`GmMY3WZmXjF<19zjR78@Zjf%);tyM%HuvHO#fMv*0MYsU5Q4uLsELD_$ z+*U>T$61aF6@4QDO)TOXYMPLat3#{g0ivvmPvzYGV70MWCM9Y^OC*{!!c9I;QXR|j zvs{vQJ(Zrx`Q*+!=4+$Uw74uxRk`vqSkB78?sz9o;IL}*^~!hdhJ8hq_?5fkFWr5q zN~HU!dD&m868ppKtZY=iU#b#^1LSRO>TzIG$dkIvCclx-l8~v1%$Z@LdMa5)%~)BDE_;3{>@kh(|UaLx3f5H$T;*^gHyKpe%BO3wc4{pP~Pzq~Ah z_coyVEdr#DNk!CMMMApGYhnls%;SkSom^0oGmY38%DM)Em1D49CaRxqz-w44cE*5P zO>9e*PB&tk8T2?2kr$0zJok98A!}#B&C#f6G8=Mm5rzN}k0%v(M`PzAvmPf$L!QCd zrIVxaB#4-*JDNbp+0g_#GEu$10o>w~m80==GmE;TvYCrSh`0h!$^|AlR*;>gBr=ue zBompF10u@QYMnA2OMr-~x&v}loE?y(A`_9T15yz=W;%9a=zHf(C(!Y6K!zo0ICm2j zMJw{k$sxvYBs@gL({UFSPe*1|Zn27^>l&*BB1Bvcu}Y-m9;-x3GLgB(DzbKoD=CL4 z!w?{%@}%M(tJt~7MC9s#jAOeM3(Um4Viixv#{n6Z0>&x|+quUojs&qNhnUCHaTgU& zM`l%Sv5KSX8mj~%L|hKBN~Gi-t3*mNk-5bxvWV{-s~7@AR1UFgmiPshgrO|j}@ z4X89fZWV+Ft$v6_I80!UiVhK47-G5P-VkJFGE4i722=_dDz-C&yDP*ri+Vm3xm<`3 z1)7~qj2~)1rC`E|k(xN_h3<5oB&9+uFa)Vwbf*H$N+vch=cNp1w&(6|3>_l6FvPr_ Pmz|kJe82GJ&o%x(Y2dEVUa+1c5DSBrEdA#r(FnS(KmSoKctqQ`K6(MafFqm|IER&O=EGrhZw zdb)@1o|Tq7j2#Csv13l0<6~o=lh_U?Bu;n)4j7v#*f9<^B*w9^Q0xFUubcq3iIbCK z6UV80>sH;Wy7jtMb$eDiE6y^nzJJyK{{OG)R$V=Z|NF%s{DGx?b8~CI^|kjseRFcj zsd2YIyzceqdc)~z`^eA#=>K^1TT%AGV|%}K+rGPQKbC!N>2qJ$3vY$1wnu~h#`(^< z?s(D~otpH<=epDWXc+ImXVBf~ogED}d*kK(uxE36e!CYp4oA!12M0T&o$2<@v@Kcg`9-iN`#>!FE9^kbj@{h|Gi;g^dacKM<6&>mx^vi{_Pc}rTYKZyxI5e&ZFP2r z{nMlIRtt-B&n=_jWV(D4EOa&o-N__w!J+ut!DwT3&2EV07sH-&-N8;TzG6LIY{v6Q zU5%y1CbE(>^p90{b2%xgdl42^vq#`l5xm}RNF7&O{bB6S)0Kp$1M9FD#dFK^|LTUH zd5YyFT7G_smw9Nc&V}Nm`(#~wyw;A74>#a$e|_AW8Qp!@=Hg4ZACpj4%ro?ol41@3 ztS+Wu1pQlE2AH4pdYf_M>bd2;5S>7QsOjSXD4?bE+9^T!Xd{FbPj*hl&Eq?#k`Nxz zeKH7{{*Xv3@{yLxZ9W}Da9cZrxOvykAPH@{Aw-C@8bpK!1BYK}z|G#C8#iy>+zi1r zQb`apf$YyDyw6AQGCSeLaI!n@cTWv^C)?E`&}Fb6W#4gcvoq{&p+X2H7>@+}JlMA} z8ctE6)*ENEE+@4T6ZPN&4LD@SSL^1}@FW|3ZK99H|u5j1jJq@!#WvUPy&`JP_qn5Qk=PV4l$7xgn zACKS_Hi~WuzGfwy!qc2N(#`!pTjY$px>Hn_#0&UqqUx2CDj_G;-4wNTlkE|Tnx{}y zqZO@nYrV!3>83;gG4y>VWvn9?MQ^>Wtj)m*6UO4yI0cej&vuH9+85YHdG zHgmajl7 zK4cTi>1-68e%j@y|I$!xYUn5}F53ve1k=KMO}E^{KmFlHEt~jYycBJGcwQ_U`Hwf? zjUg)pjxKAcUbwE^v?`9eWm>#V6edmp*OQ;je-O&j_!9)eo>Fof8q%12O2H0*PM z1V|hE@TMF_lxcHsvd00fLU zM%I{Ci%tbMw(Y3|4B&s2fei5bX0Qh7sDuz$gvcBtFrgr<1uT1l15`@z53S-2gA5PQtR1}SJ)MG9P!DjCE80I*S2i$Yi-1!Pc5q<}#{luPqYWjC1zva)=i1Kksl z!U^Gc@k0vq(+r3dkb!i1@k0u1M#wg=5T9!LDK-e+V1*P|aYmN@jSZCzwJjT@z>1J> zXGR9`LJ9~Go$=Wt1vb<;Q#LobHYB-i0ABIf)Xvx-1r{^5L<%e@wbIQ1VGJADqk(-A z9^Bv-!i^PDKt9a1A(F*KFKw6F8d>wtCgI2=AuFVS@Fun-#$t{0 zpxaR3qYNvgfU+g@Db|uR=`WCl+aLw)tgGh~#32P-{iW*~UHU3KxCT-H8>9eJF}bM6 z{7Me$j5}B%1-VLu%>u?I7(Zr43Ucx#Y)IjWI-Tph&|ThENI}kp;DMGxst+l^(}F*? zKnmI|0Vyy}ex+rH6>#xM(&iW8#RERD!V20BSOJlXamPf5v4MgmPtd>$D`;nm$oCW& zu>vZDVv@FUx7t;3q$*ax#B^;x@6>*g*%&|ttNxnM;?k3PR{_IUSge4|!v`xMG_<_< zVFhFe`7{G!1!N!{FI~|H| zP_x1ctO)sH1!NE}tbh>F3D6!Zu%X78vboK*A?b}3kU>nXgw?JBLQEr4ORT_xk{woH zMW}A9fK0*)DH6mW&rqqhe1}h-ERmKX)gJ!INd{SeBX(_8*ne-B?e38_UO{}m2 z%9hZn7_kEKU{N%BacJVj1}ivXzzSSiK1@>s0r0?=rW>`w3h;28i*f~ohJI>&;!6b{ z9G5&<1Qo2Xf?Oq4u>$;<87s)im$3r68z{J(1y*pR z5>`NkV7;Aya$|)R9Ic8KxFj7ihyem%BfA!du)+$+pql!j<@p8z8A9G)2Yds845a17 z4=W%;$ax8f6_9~+yl@RxWvswvgx$mn$o#5f1>{Mc?Ch}u8){ZqffXTNtbh#Sg%uDY zIsw{a1vb<;Q#NA-HYB~V0y0Pttbh>Hh}04*u%Kjz6<84pW{;*$%H|COGGlckVi-l% zjCiH$Qad687OeIa5Z)?d1>`|9RzNC!&t9G9Ha2`k7|QWYz}kD0N8oO~H8 z2+`*kmhCP^E36>rqAFH^9}6){-QEN%tl(&6tN^#DW~_jO8H^R609(r$1vc*>9Cg47 zh+NEA0b>J&MrVN)9Ib>EP$3kPw1sU_Lpr8@iR^BCa^oAi=h5+k6;B*oqbCtu4@cv+ z;UogdNE*~2J)+VcH9*HHz9ATo40PbnVfmGVcmAWFwZsp?a#er`HF0H$B%o8`P6pEz z*Mo_0LSVC(pE~F-Om<%ZYx1VOzY+YGMnJQt=t4kGLa#0aq{jAj{$f|1V))gNLZyg9 zO(q{uyd3u6FJ66T5y2lc;EqsX0HY8AEe*Zm5Rh6}yEO^+E7}*}C0UqjdrJL>1RRJe zMJZW>|UDas{QMIEKqjcR*^IA1$x9OOiXIpN5$V^S#{NT%b zDXIvlMwA}9Y-@p;ngnmIB9+9?9uy9&hKTge>nYmwoWH|W>|1c$DuOGTPtQqffR6~i zX`jEUzEy#n?!?y6@^(0tSi!nw-<>JF08NzJB~Z4$&Qg8r;%SYxK{r2;p|Y72O#2}s z5&W8Pf-MqEf7sgUb%&kFTXwQzlzZdxXxuuEf4a4Q`gCvH8*cRC%SyjWDhe#&E|$PI zJQS?i>J#1`@keqidFW2iwHedSsqv`0xzU|W)8nOEPxSg{&Q9rpyk%HmZtiWbhfB(#Syv}CBG|0 zzw-s1Z^~U2pd3$XU-|S|L2?>Y2yMA6?FKvw_qnSS+55D=fi5!X4!Yyc#@XHmy47T7 zW21*ICYe7rx_cWwBZNF9qbEkU4MyE*i(I3ToP4a7d9k*xew#?{vgv9cw`iy2<}Pxt z0e8x?^5Z3|;=`WuIV!Zm3avvN0vwmEB%IwilRIo}T9MyMgK*)wYTz(B1l~?V$9J~U zD?7FlyN@GW`Apvo%4)dQ99Azd@{SdgzmCcI4Yn^4Y-rRlm;-| z+3IYMdlPh!ee3looeo;(>(-RaHF)@5=~jUHGdqVU@fGjM*sJOn{PJcMF{<__QBMQZ zN59(2fmmj;m+(7-{^Pw1SxxK3BSv zWRGWBSi)aMuv*zLsrKUe+xmk+;;nx#3YmlOi1kk?iym1}s*j_Kbv>Jvb~eyTRXsWH0-&FRZR4+no8!A4zHQ|{tw z#SV3ML*)i4zxo+AmXC}9+)DBm4sAymjd+DLWcG@`t4{1 zbBeb2UeGw;uh?2x&P}{G|0dzqHEo>H5;3G=GwLZ_1hCWmvjC9&h^>QUybut!x)Lf0 z%&daniTDj+N<*L(3MtXEkX2WyEx{f5HcP#<`n%ORnn@n@sWQ2+aa-03k zPhs6ARiDDCQd0{m0yTV9*qEi9p@1verJ^e99Mrt4$3?S~{Tth*w9Wpxc<;Tv?%>w` zIm<<|EZ_2P>=>kIp6_QP{u(VM!P%|BFmPox6dzsB`9<5%oZ(0myta$9B%RtVej)`Y z+zn+GqBZMl6aH4Y$~-gfZuZe?=7bB_mQhp^f8ovWy}D7-@jB-xox^z%t5{QEC!o|M zyf=btbc3Y}D|v`Bv!2+^yO49anNNDY9_6R4*La!<<{)+~jC~L}BPN)qc>tNL+Rg)YNqXx{9dUpdJ z2`Q=R&NkZRcV;}=K^r`J+xcFnlDU;G>LUnKlULRS+|@(Vbnyv-~6J;|mD_o%MQB7r-_-)z8L;mi&7vKToI zFXp6fuh4?d#D>%j&o2eGExd;aVF}qcNmwNXcXd_hFXVOr7EWNE8i$+k^HE5%JJvfB zH_`u9L0pQNshW!KMsPfwYvj=ar3i`-1=MNEJoic)j<%9sH7~Cy)~DkBhZ=Qo-!8bX zL$-ls@WG(Lk)DY#n>P%@%I3p3iJ9r8o)KzuSxhb(GY6+FWLP;@j^NM5WBk)lCKs~zBfEhm*!Gqpjl`Z;VI3te%z zjellfh+-E(YkVhAB{%J(1qk$B>EULfT1D9a?~CC6AW}9T)4<-z9j0sBX-WwWiBb1-v~3;zXx}zQJ!*1db<` z8;O06L>WQnm{&YS84*yNNJRm% z37sm=CKTo7YX|F>deDNQKTJmFlzxNWtUzBZ-)1T0ktDDqlVBBD=Uw>ipdv}LfBv#b zXh8>nc&X;3&WRviYF~=o>;^v#1M<>Y=I)6EN}Kh7Ld{ea0OY09&0Mrdn9(xtq^c$^ z2*^t(A!35zKwdfo>45`)ydq5m@_sY~$V=&}z{8**FCC3YCIE0qX^ILhe6$vjmyVnB zVjoC@Y&)dGs}zTVL9va^AKIbg(O44#^`%rx%2Nb^LaDW&!E`>9%k-tgNByBA@sv+V zhsnJrH0({E7>&_Ef*YglUW+^DI6oCt*FY9h5+Q~UMFBRMm8uhl8)SAlSfV|IRG62J z8Tk86*s4nd()_M!LhH4I06m3XF)n>6v7{!vvjP4qa*7G!vopyn)EoZeP(M~hNBPSc zYwTK!ddJrn8t`yk2DwE#cYh&hg`N1X=9q;{t7PlG>)+KVT6*70#YihVPC1#0kye-A z=g?ucHJWDHvLUr6b((*D#JLM!tWEOIYcxp;Pt~imWVZQ}B#p=s#e%Pg*b1)*e|cqB zbjr2aatdtKrq?hq*Z-<9TrdbvZBjtlva^zm>WX;M>JXtnZlDi!3;0$8{I?#ofm{?6~#>ThK;jAEB)Ms6UYslxC!t_<)!Re|$%SLJq?ML5__M{Fc z+oMUZqqyOZ$tl0yu-|p9VZ1C?-Y%0XwYplRk;AkIt!ypT?1_a0FkSG{+6 zE#14!Kbs0Tf}g2?g;@$wk(XtcjRkOm5IkR?MuL5b^+oyWzz@zcQ0rbLI4#SbA(OXN;DDm`xav4fHky$u%a)_;w zW;Khzf5K!@*=qCOCO2ognlmT1>s#4jw&&WPLT(F z(^XkJ6%KYa&pIc+0w1ZchuuuJ{uC&Yi~SaMTLzep&9XCpwnM?x(uay`7*1UT%fonxB^6SZjF_=ovvnP{D)4L}VW~;K$ zP(7K92hW~N$^ny+XjluKXk@%tcYxsAlS$b&q$IDNOc;>0YrmdMAS^m%5immcWWvA< zN4553M_T3F9;PP~7*aX2xhFf)VtX=f5iRWriWM_!doM2(&3X4^GC@}9$=U&WGE0#c zMtouPWW4A^m&>=k`1NG`5ZL5BdomeG^j@)VPsWo~c}=(!SayHU!d}L}Y%*Y(73+xL z-IK|o=nQYEHaXe z7yFFl+mlJzHl!r4o=g~!wQIkgOdu>eWf3q!_hiDr3`e#0WbN8|GJzqLGn;#|HrJC; zRKqqqNg6QzS6O4{-IK`#S*0gi3DA>SiriG}@Z!n#HuAsXw@P}ilkuVxT`ovZ#t(t3 zKF^*^MlwK8#*gb{2OMZfwfA5}*C#q6gxOFmqJkrM0 zt2(NY&^?))8B>_@UMIsL*S#mpL$+5>#@B#sYf^=juk^wq0o~+7e!X?1q9hr;)CjZg0K6InSkBix| z$0U7sF7l(R)E2HIHThC;Ng=hfeHA8}?W5$)=MRTi6;z!_PC& zeLk;xDP@}6?L}7d8q)il_J_z>%A#DzGYKfL9V8EV2}G+&>wHicW0$_6bU)k|ANPGj zNfLD61AZ_3@usB;BV6jKB|6lqI9fKGq560f9*!UM)(2-VwmsJ;Ju^?a=%PClt&a^C|!>5b2Ir~T2;{stJCSU6Kr`I)9_ zX`-V%EZ+vB@dtjS0S~a;RL$kAsVNehUj||az~B1;i&D+Pzizr7LxxKlc9tsDh01B{ zSh++V^vY>0K~aa)QkFw!Dt1)-y>0qz!oT!;e~+^PmAbf$=JWb4H(Jj1N-_(T1@3{R zjFOZ`xf!hmJnc8@Kwy{+CLC__YXLtetWUC%fZ*_tc7N0lF6xjcMHdi65v@qD7d+`QS{YTb#mPyekb z6QO^j3CTbxj2Dqxy_A8=?we@3YQdrST0^>GXR*xP;2y<`sEo%n=g}qZbMUPQUc>T( zV=wy~1BaaVRboowsvdln^5o?tPr@o?8ot?pTUq8Rv)d9J)lYb`D`MH{8Dw9!uEHfe zL@5s#CL5Kc$L~b2&azWo+Cn_=leVdt3?V%D;|L_TaI;b{D7CXvRx1e_9!k~60lsoI zs2ct>g4eMeRn{x&e%KmU=BkxuBr9<}WF^KD_)Y_E^JG{I9KHgSU0usg(ZR}6DGC*C z2$xWl*(M|V-;rmiMvf$e4_}!Fk%o!y--|$U54lOTX;<3PR~$FcBH|ES8^6M;h{&4V z7r-9eJYTwga50MKGbAu8A9yEC;9zI8Gu_^q;%;gZFT%oV_Hp=f1g~a!unQTKD=u7c zRfc9zSyXTqf*2D}TafqRgvBp4T(TWyQ4A11;!46=kTx(lOH^KFX!Hr+h+xgbh{SN< zBjF+xIP%QjHWH3yMp}?W+ zWvIC{8O-D5YBYfDSN45L2!d}qwHGXFLVe&T=Kx~Sg#@%OMzXxhhC@lohXR2Xjc8@C zXQw;qOwqjpqcLeTUQKI-6==nWY-$BM6c0HxvwESiw77=ghV&onM@`M<;08Ps$!=Qk z6;7|7O%eT62NNg_F5-ggMR zFhjRWcv;}30R7=+@5xTSHqz9z{^=bRYw+hTEiJDPE;W_XiO}O_qq43LnWiYKW_^SN zf8o;9@+^yqs(ORP({pHQc>6w;Qj)Qy%G#3=!x zfs&(*D}@athZ3NEL8q`kjAUCo{dvAHlGsl+;W*2SRa4K~vP`2fPGhq$mgK=2=HJ@g zRQ5k3xY?U4E}30$xvR7$3xC;wJG_||)0w{*lRErE(hC*6hfi{AR5wI$jC` zLQ?aW{fyGu(=JdA;-2=Q<&;Lc6Ne>?obYl8u;!~SU=7O|7eq=Gu?V2@)v%@&mQ}*; zqJnR@z%?wJV%Vq-Be2vQ;2QSvvNaX4!Cgp-$Y4;Cu$Tnjb^&ZS=7b>OC+Q%9(TE@g zFsiQwv;3m(OZDyr5*)tn*f7(uC#(T`*k-Mf1-|XrCNqqR0m7+aW9YfH$qa$ARRwYI z5Dt~LiB1hN%bpM>eB~X2FAdVUmcH}ofbG)L-sJ1PAsb^d)GlMYWEu+m6E|KYK)O`I zWB|zqZvLBeqKjX8PbN zS#JM;s{8RR%n!#jHk$k23><2wo3-Zj_;qk zIC}Lq!DJ{$Y?nNqaW1QMFX$BJhV7bp=CEBZ)+iPem%Rdtd=(Siuw5f}?%1x8v}i0Y z*sggHjO`jCK~XSzsivID+^}7Nt7ls>bHjE8rp0vTfG-LZU9eq&ge0YfDS|>RdV(9a zE3zphHGkO$axjA%gB!Nnt_$16gm)3!WjN!4NU12@b!->U)Dhcd*c8Kt1LDX~bHR3* z$0gxHR{^^~YIrn7{j0+|-LPGjIUz{+NxBFS9#YWp>xS(z{L1QG0$q5_ImE(`8@9{w z#)St-5A4BVxM1Um?Q)EY0m1=mjrMD|q!*-c=QMv1ckTQ9fBxF&n0Pb z1SFNMS0hY_D+*ad-zQ0!j@T}l2bQ`};rdFwNL?-{6PcRP#oYGo67m`6vRYS+O{Dhq z;)Q$5-Q(30H*D9;Gl%VZaVm#x`8`w$yx0-jHFD>U?HWmo#^QqQng`k4ROW{53S4o? zOwxCG*<7jflI17f1Pzhaao?`MwAh0=;EMu97i?D`AxUXEu*OR<@=nYodpB%XWK&3L z{<5D@T64p8kJN?jV#2$K?J}HkL4>P_HO+mfUAeg+O0h((YS1SCO1S5tUcQ03U;xNn!2w-6?LIT92F&!qrK(a%>D|7{YF?K{j?CF{-!6F^o1j0o%h7X5S{#9jNy|rmZg8(7w#zf= zpSn1DMe6dBhY133!*;ou%4%IPHj&!bip|n7=ESIQGTtI$DDU>-!8`+7aq78LnBmKec>SrV;phZx63gq1_%eN zk)!9Ldjswaq6wia38D$ZUdbq+MAI{2BzNbs zqT$c`R#}g(Uh&sE2g-Ryp#NL(96>e5TovVsmUg_*`pKg z5lO;FNC$jZBTP;}4k*N;K>7&CtSPXi*P5MRV~^>mU(SkXm0NCvNiF1im8+zEl2$?L zE+$H^HLG-8ot#H5_LNE$0TL0)6wT~pJ(j1?;JZG#wos~2^8}D#Qj4uAqKrZ4$_;hC z(nsm;_5dy=#FhZkFXD5h!G8b$fuWLSQe|8AaeBRrP!&JwlHaH5nn@;H&>dt1#b%y*~^tn5FN}WzVu}` z)Msx~4|?745MANk8KaBeNdZ!Rp9-G-#aHtvf?^#Xy6~cMAkT(6)th!#)I*#;ecJLC zbo~%*^$_+Kt#80f6kPPSC;h=_s2Sw5*S76M^@%-jOfkyN_9oIthy5$x;j;Hyw{G6v z9-L2Q9PgvwCNHylIH|ry!EV`1s$<3MND(3>B&pW zbjlW(pp!n(n(k~5daaGoa5^3h zI-9-i9=gD3xN*L-vK+&Kv@!=*-7@xOyN>j5^X8_}Zt0c~ z=SwMNmak6xYvm$j4nDmH?(wo%R0NRK44KuwoFy)?TKc#{)TGa53k!+E_LE}nC{sMA z0`Dd(cN~VSEf~qwGW1dtcG<8rpqm#w zH-m7`&xCEmaNtrP*QV5{HL*EBATCbTbPB}eaTgOL&pHQUj=;r0+-qgcOs74QKum-$ zwORcdt+b}_55(k1H<=+D12KNsB@mNg9Ro4*_;bpHy#jH&WgCe7t68re4mn$zM*{@n z;s^|x{TeG`^0-SNCeJztVvfLb%8I=z;v=?!SXWKB6v;#%F0na4MNHnmmKm}!5aWkk z0x=oZF%UD4Kc`ICD-a*C4#c#pb3np6SoTtL)s(;WKoiCLDl+>u24eEKOCToCItOBo zz;nuqy#n#k-3r7+A5zOU6v+VsG1=TKGh|~R#t*v$Vlu2_AZ8waPMNS*AUn8PKI)Z2RfaF4U2SdkztSnYv%gR?{<|&0Fz-BEEN5M=W zH+fDO_?uY;*e{H52EJ~&!Y8vqb)c1OSY}qGJvRx#FYJS)%z9?qTRVfeiB{UJ7I5{u z1x`Gs7{j>y%?X(IpX6j1swDCgzfA6Ori=HQ2-CdVlU!+jo&@Q_5@q;}eltV_gU^wG zrorY&(ClIhP~s~`f+qxBX0q zQA-P*jX`%Zi7$uC;^$?DHDv^{gJt3C^YFs#$9vIRN~5tNelJ~u_I;j1Znt{NJ@>61 zUcMO4x5-Y}i{U&o?YMDxujaM+1MuUhR}&0P5z<9tt0%($K?Dc+`NaFE4{#2r^N+;D zlgP}iaIiDlnQre)I~n_vxDAI_8&^kF{vkoD1Im}9Jaq}cWTmtth(^(*s?)b*$%r{* z$`cO&gvMTy=?=5Vbomw2<)yp@tge(heP-JJYrpar;!Uf+$W%FY+Z5EWDsdB$%eIzF z=a>HE%&Bc;4ZbKac7!r!syynUmQM#`j7gL&&9 zvUp>`qKgJGRRx2d8oY@zXVf6=cSQDXl1Q=O*hQ&iCrJ zz8vQJ!~Qg9=LomN!lN*f-9(c~|I9FXmF+`h*%v?dO3h;h%9$TYm0V~1G*8|5`MjVY z^becpqt90zf$YP<{F+A~8HCKq%mWU{@|k&1BWA_S?U?)A7;44rZmC3%^*&}& zulFHR%*BNa!xRzt?GCeH$au{MJ&czP8Rx!4N9u ziNLF|xs$lCMuI33buk||LJVs@GAhC}HzyYvk@)RtM+_N185Q|~nURYfafFIFxyXoQ zAZA7`>SxFl^Kl~-$c~Cs$=cCUWATOWdD%^8KmGE?T>9Vp{?0Q8_Wi{B@BXc?z3=Ip zzyDW%cM*Q%%J_>{z$!G+iCFOhL{Bb7C(^Iy(boiheJlF9^g=+toKKf-RX^!o#_H$T=e{b?V}r*FZaZxb{D1T3TP&;*vQDJFg;T;9GcYBcBf zEG+KZf8dgXm&KPq|Ei^{U_X08@cgOKXpo>2>Xxn+POYPVfhQr~OqDV_?w!VCr8CVo z52TYoKji2({reIUMlQr9)ES|V_OtU6Gx$3u)Fah|qMwc;_gfJpO1e# zl6HI<96*D0PIWgP-yV;)>mUDKxN?0w>TYgyC)0EW=`n>jkIzg}bD11Q<7eNjq!U5E z8K=L=zDa+xbS0I4(m32)U4Z#^qqz@m1bq2Z@;o4-+mkKjETNNe@ota>yp?0=62O1K zt^n(>=k;&d=?+S6vj@4&)v)wEbimasAZ}k;c+G+Rm+Xc3-AJR4!TfD~)BwgXzuDiK zEI}U~oA_iWbv0JS$g0WY5GRZ!y#&}NG8g5zdkyTp9UVuB4qjBvQuJ-?BKrv|2k+M>zyHwrvWbwd}tcP zS62@Jv=_6{P_zSNHR<+3xCQp#Gw5#g&W;9~y>Z!gcvD%r0`_1($6u29beRje(%$F~|8yueTXD`@?C{xO#5sQuJx`i+1swt2hH)U4(`9{N)Fu z#?oa4Ke^q%6EXi<*n4lUJGixfjtC(AY7zOd{NUmAdCV6wh}=n#;K-5EOA9BmzpBgy z#lid&*rPbh9YL`Ui3GE-jUs9P$*cFGWO0DqT7`ad1wvGWZ82BxO)i7I_;ZtXzDkH< zI$1A-L4PuRdFp1bczJXXebs|8|`&ywBDe%F^w;QeQ49eW`Eir4X=h& z1F4IoUwf$?s9fzH*mFE(n)6F!pn3RTOe1s39~&)R1oYPuiQf*F9Y^tO+}pghfBJN9 zjE-;ZDGfvAA*oZLYU23LDdnSL#KCbfx2YeC(uXrwAkij%0bGVAg+d3CPzMFbbl=m` z{E7bNle~1Ugnf_qdfS~ek}{#gTm)PV88wMa4xztvhi3^dG(Z%|WEEhsy^!*N!ow;o zrXSC;W9~|6O4{UnO7ZMQI*9?N&-g0Xhjw01`{-<4lp9Xa?|HuYd{BH!!p{WYWOv-} zo*JMnE-1!i^$X5wX}xrik5fX%Lt!yB)oHgsO!G-zEHvV|r5B*J_Oq$wWZNCtKOxUr zURb$qrG4Gu_6yG8Zw`$&+J}xDI@7&=^TyMsyS>9V#h?4aUU(~9#lkP6QtK*>|E12H>xH?uW@L-IcKz< zF*XG zDLl<}ewx;04gRx5&LF@l{+g(IW!-yKrI3?uaVvn>Q`yx#^mTRaZ{6%~mThmw(D#{? zkuN30WKF~P#!-H>WMZ%8LTy(HD;;=U!xkV|gLhbjkqjxy@S?<)`C@|BqJ28zZ`hUu zG_)oeqn(-e_=UE4^r2dG6MDFPkPl)$|q(?aEmj))mkA?i&UlfTtQ^Y??OoVLSx2^kE3G zO_i5dsoMI_1ri`_?8BRK7*TS~y_YD}9V&jA&4KQ8)SOk2b*RU2!62b<9zoS7^rs!%+l0#|V z1de{Y7}h>!#iG{J89W4czp@J)A~;Y(Jz zMF0ehI7Zr@>JC!L&;i6^-a4kT;qifL7y$n)k1fFOo5311lM+H;5h8PV#00#u7EtKS z7bWAq8X(Y?tMa6cDN{4QC8ez{Pw;GqPM> zNq2@5$n;Wx+F1IM4{Ld;1t}mSv$|2E9yUk;p{V1<4=J!20WbUKSf?9OK!!<0Gb5yc z45^cyJyKvpZOaBJup)#yGnHFW$l0Y4B7=hsQb0be6QDg(U_*^FWpk5jL(&^5AcJU@ zf$fn3LQEr4OQgVpQY+mI5GGtAdo*-FMauQEOBONxG8_8=E@xp3)D(o6xa#e#Nzy>KGZ^LFs3fLnB_%Sn5kdrTITDI&&u3XE6*6+$t|pjZKY51Vyq9ag~f)Y^U?oz9xSYI((a zyhz_3@B$UcWUcyZ@?lN=(Aq_;fJ`qOFSTF=WSV^9QZ#*FgB1{pI=yfWRxG?AVBA$; zGXlm67-O!q1%NoC8^}QB$AJu1y9&sYI@#G{1vb>IumUSWs5kS%1jryN#K8(HAVhQm zw8si;sBxxjZgXu&dSeA-5MvWyv#Wp*(}>g(E3lwshZR^63TBT6!~txu0y1N+5vdfN z0s#wTBIQ;_1}uQht^z``GFCtyG-CzilNuY;b|@}b0U5JKtbh#B5;_%Q8B88DIe3_e zY=sr%M-Z`PCWjSh-V<=?v+!_y)uL;`NkKnJmZ7sBQZ z1bQAB>(V-`pauwl%z=&US{%Zvza~^Q^#h9)1l%$}MrQd@uf^N!D2E5V@GK z0>%akje3FxR#?H&N>~9ELNQ6}eFeynj;UWFyBl8t`G)TK-k7|1!uquvU=X2W3zq$F zDP+ndJ!*iCQ+z`(9{sl&yz?LZ>{I^iLs+f~5P=b^615A*Y zL^vU^+4CibzH*9|nIi~@(>DF3#~~8%Um5|;o}vo@Jqf+K5Re+%*ZBt*)vlPYh7>AA z9BMN8fa2w_2Y>Mz`~&`=0e6H70~mz}Xldvbhk(?O-5R7pB1GgLA>b?87vLrNo@;x` z4Tt}ftPqE&Qh0^WG~j48^AVaM%^?92870ANRof%^yPdpMGMFErD-qk(q$zYw)ub8F z+)bVZza7DAELqbZq#)Vy5amZBdR`;WTfVv}i~z0sX^5bF^K4;vg5YnOQ6(V6 zvM_9gbt)c^{k^SZ`i7c)!pqE6_GpXMh!ELPlx=jY5OplYh5@IsQzV*Fmkh#@p zj+IMOxk`+VN%ojZcG1{`h~;>Cu}{LC$E)P5WV3~F9b~TiL06t>HkdWi^G&&{0+i!P z?JJ+YUXTqtK|TuixvO+CMHj|wbS4|!L3iBQINRHJyffL^Ko-gmQd^FuUwz7HmIp=;J^8CHlT_errGk*;#T}lg!e60~!F|(-Ec1gfK72+p_Z4ZuaO&uVw8EbH= zBrHv1U1<%U{BQ%-{bLBe{L-~z<@YW4D$(|hL}Y{G?#}+#LLht%c>>#C!8^5^i17#u+UULn=0< zddQ-1`qr|Y=AQ+C>_==JB;$pEu+^1NNnl1-f!~Sv4Pi<{pcTq&FiFNID#8=LTb-ks z{=$eM+AyiMyAvNBF9+F-ti^0avt3MHox0 zD7flz(d=aZ#&#)fvwtq$dvC8hxV3-Ia*-^{xBMGB1}U23@@FId8Z9NkXU7`M7i~jx zh9kuUyta$9B%RtVej)`Y+zn+GqBYysCj6~(m3d~|-Rz^)%n28;C4jE&V$Yl5dv&9v z<8{tYI*0QjmL*#`;Jr73YjlI%)*lS=5NBpRv72`xXZuBxpN;spabUMUFVK06@_S91`fK9iGWX}Qm}kYu4(MKcHgq5&sDj*aDn+90=3 zQj;7KqXx{9dUpdJ2`Q=R&NkZRcV;}=+3sxiw)4GC$#DtL>TU{H9m#z)5bdnMFEwSW zLGH7~BH0)Xr{mEe6-sZ2E;!pbA1~gCzE1no^D-pQeY{*cj|P>FsMo@>da%&h7@+mf z_+fZ3zJD;bk0VThHQ_9=CqTiEjs&Eg3 zsw@(?Q~b>a+!fB;P%n#-h=m^TIyCui%S1eVB5lbh!B>LeUpS$QgBxUD_0Ggi^nXNI7Zd!;HYlHjEF+4w$bim*6p?hpdA78e zT3s>9O1lIN9}4V$Sxm;!2WOPfYO15!Q{X*?Xt`_yKh}VU!s%6Pf%Bbs_xktQv}+*Q z{X}@02-tXU6HbK|N4ZgE+T@^BAGXqm14>3zXkkX1v_pOf9*9qd>5xS&D^qO{0E&*L zY8*H~BWQBSLrXFnU7e=7dITPdAE-xH8ZhJMB6!f-tFlEu3}@sqfV_c=mtx)b`Fu7+ z-Y8Emt=q6i&LE^D^$ED}c`kJ*eKQD3#E3IfFIix7*YbxOa8F1hJkcNY&}EP4{}cbv zz_3e<#y>MKM6rvgY6G_|5m}KvilKl(tz6kCQBDECOX-OTI*(9QU?Om}e zGkwE1{g>Xe7e;36R0}5#Zahg{f>k-J`e>&JU11!-$3kGvsnKNuJf5D55bw#p*uN%S z%w2eyM!WMLss30vD`h-8yg>!Pk$4f?Z@kvH`KLmt9=+u*QN7(3??5MG1j#)-AH@oV zd+rIcLP4h`?Vjsfp`hbpSN3-ao*=3l9Tp}?oM^(^Lm*CcO6VK>Rz=`=V!4sn=SYs< z2r38^WbnF#KZ?W(H^7<%B`5U*)tN+BXXorH56vL<6u~D#lo51}dBszd5dp=CR1_ea z(5YtbP35r67yH(4HlYPWf0&HUDg6e$S%JP-zRgm~BS~OKE(ByPlHU$0k~I70FPnrG zbO4B#YEJ5$EX1ogJIvsxVL)Cw%iKMYKxwlcP^g)z0)V`9x|xfjgh|>n!ATYUT^Nv; zPC~>4!-2eX2+{)w0C`242;}`}2#}Z3Re^^=L0&ob)2Dl5 zv?a7B=`gvsU535s6QeOYNN{7c-D`2@9OtK^7Uo?AV)#%LV3S#?I$^j$W|xB{+CxaI z%=zy(VXH0;Nb|d@39Z)-0`wGm#Sn{e+d}uw2KcYYDJF=|&Lpo;Z}^Wx{aY0sNNlQh^H66*jge9mJ$4UjV4LqsoGLYrnF?X`IE}d5ygV9hu8|Q z2!DCSS9Hp?*>VbO)uvYrb5*uxzSV~RRb#kd5T4qkfUSE}S2C7XhXK%^Z#BSw8=~I6 zSuChF@ndAje}BG$6$4IWYy9wkqWbDJx&ys_g*xOew2%30UCtr4>^Xq;NNt5{y4C$n zwNcOW0{m$cw(Av{b|+dA2cC;}p-p~Ay%sh&yZZA?F z`pcYR`0)n3$<_;OMI#1;${E6TAXe;esX4xtqc{9n(>}}EvZMm87*K!a29|WKWm}ES z;uMKzNzV986W&~px0KDxEmznofOVZ2sFxyIKB3=%H_g7w2tMDC-T#zs|HP*-v6|M5 zjc=X9SwXm{&$8rv+(zcu|d)1%&4S#Fs7zXBc!1RVcCa3&*!+zJbhVimo zkxMM(cl_C2fjBb_lqE7(;9;D!(z_HR<8`h-QvnOJ6rv(8%Pt!W-~`D7A+m2WC-ID< zBeEvF&e2wSV=Yc{>33OQwMf28Y{UJm!a|`0OEhu~{-VOcgK`ORl>k$~@%^Xun?CYE z%4#Thy%P3scBkFW>Ct$rJB=4mV|T~>w`gjtHTVhV)|=P`$F=ATd24yUW2%vk!ALfF zqbK3AG}HZil52HrGKqAys)~?FT){0Ou?KtQMyAziVQ(8Wz&!}}$M<;zI3l{TKI~J~ z$C|Q>#DXi1OH*_hd&}Y){6uV*H93-)p4nL-uW&cTXl0WR;$*9iS((6nSB&oG@(` z)FdmGbKX4}FFMiXa%|GCC*y~}Chysk$w;F2ihX-Bo~+7i!ll5n`+FAlG6rUo0n4n| zXF%VcOvbe#0ebah{CGL%zC9UFRCL55Y=rK~_;Cp{wfAH=OU=|rk z#*2MM^6kl_Y#UONS5GDk$lA4EPbLr+opN`2GGS5-x86NjySAQ8n6%27%{^KBS<#cp z1X-mgTM5vUS&F>eyPEbk%33j+JH2}{UUZ_%1?kE7A#l~_*^|jg2I$FnvMR3$mjcU; zjAu_KV_-HJu*`~m2K4R8WLz5(pjS`Ek4M^=8L2LGPbO!k_MQxfT=$+V57}Nl8PB@P z-D#|~H)!9ziz~C}$z&QRc-@7bOyF9Pi~QEfgfWksmKmN5&Hs9kI|z=#GpZ zmoQ`O$bPEAwK$SpgIT~;_G5P8+N&Gm*;d(1A$D8Wfj^7j1e<;pZ9XKA%^;lrl~3_99z;UkTpdv_C}7QWoVxo=HG~ z?I3x`OCVZJ7B<$UFCN_w_r=G3Up$h;68qlk1AZ_3@usB;BV6jKB|6MTac8LF9)*YF z2fY=?*^6z@^-0gnQ!cvb&P40u0-ar#zY<+WhTWAI;7ze6-*9AG8{xfvuN_F5R;Jgw zip_qbQ zY(S+hE~ELpzRQi4bG?$(s513g88{x$m>anDl`;w+jNsaGggMolc304$qfhk4t^Tk- z?RN*AbKSuXy7Tmw(Qq=AT$FJ7^y$|vzn13bqtK2Im+~_oSh)*KPW8JJyNecnErJ(o zryRq{?zrDQHRzperz9uYZLN*fIV<{U3`b9ldz+nMcdHjS;yG`=o(KEVD!SVr_Qoke zK!Kg>F>zpL#eVZ-(mykd7x33a&oQ6X%fvVxrVQ?&LsgJ=9`6s&aHn&5b9x0FM26Bw z2Z8iDx!lolR;tUp-XFn|R+X)B51pV{Tv?0D73=Y06fdG$8t2b> zbYJ@1@=Y`owct>EtsxV!K{nE_|~Ax3bJtX166cs-N&=SH!Z@GswPdT@tS=yu?Ela@RK**LNaVXW6MP zZ6O}`Njr!nOg#T_1d?00St%Hl+F2>9^#ThTe;UE-SdJ>|6?H#sjeEg|jqfzzHcy7d zz~L)E+10h|6dkPW!U`V$JMs+G$dQEb;Vbi*1d!j0KynYcNwsPF+0s`WFCZ1irRxV# z#WA0ucevWPfmR!b;M({VR@Fwi;Z+~WzXo59;MFV-b|HguWeOM043uh&=aCv?VHHb= zt1omceyQP-?I??4fbbDl64rvWfx%hy@|r>?;crB+=3zu)IPj5h5el67*KzuJr^bWY zlCFl>)iCfUPVEN67Z(VeeKj>`5hHWfqZmTqYfeoD!=xAr9LipXnoEC|4ZtO@mjpPXwB320x8WO-u!L-!9cc7>?<_EOcKcf^S(pig&7J~UJB43ZuXw+`7d0WTApPwQB`lSczO;^ zElo3MNo3Fz9+>IkI3q>#3JrCua1AI^+BUJ)87Ioh~V*g$e90qPfY3j4!I zwzbor=L;i={bUo4v%FX}^}H?1G#cYHHVb1(9;{*hEu6|sA*QBO2mdpIo4vW>k{Jhq znAqBt%m;tjfIGaI7Sox(7?V2uqqgLvXzUCchwnxX=y)j%2uaOf_A^RrPrE=hhb7(nWyqkb;h11YlHO3ugI6->u^$ZVEN2}{@%>4WW( zL9hu5I~^T@C`ZpFX>kN3NlTt^EL5U=f51mKj@T~Gq<`w-=oP8Uhcly#xnaB9Ol7sM z7@J6F=QdA=S)L?wVMDw=g}Gt7WSq>$q&(M!?UIpM4%JD_4cjGSD7kaTc8#P(V{yTD z&4cW2Ds#hj1+JcL$;=Jg6_^%#Fb8~5py-0_3M3>cjjjrK#p_Us(GlAf*%XqRzwBp} z*4(h&whgu$7_!A<@}XVVak^o<3}@;fq8V0J4ZDChIk%-+ScH_p4cled6vKuC;>b{Q z!FHL)CE)_K7s^`JL$dPtq0y`(w#zan1PMP$7XiXU3Oat>uw8~ZwaE;D3pq%*VY|G%g)reO@7e=}8@5YtSFDEZ zGSHN^d0nodgCGSpnKUFLB~xR86bM)PSy5)=)_ z^K-;@S>}Ww;V0=LKzJCUMvN(d;jnL);a6H6Llj2ZoufBXq6V2{!haj{4=Sh*r7f8kf{UT%n-uoFZB0L<%O)`z--oYgXyHIysMA z?2(|fp0g}9L+BUz4Z--vqKGg1G_r*6`sCU|sY1;YK!!;zwx*32zG{&dKsxE8ba#6I zml9%20O=QT#aAdg_P^rOUs2<;u5L6_sWYaa_tV@fPlih_s#pih-3UMMLq5F}!?IY# z1d*dhFU3x+w1w#ci!E;p64}d@OAsAsF8zsu#UAL(Zm7@RrXKXV;~}~^zB5L*ypsZ? zd|D(t{fn>WbE)d3;4ZwV9LTewPW7hU74;COPoI8Wh#}hQA+n3wxqzYKVgp{H;G(xZ z=?_Lj%^;t>wr$79C-%TG#V9-5n@ArW_OE=0%ieO`x_Ntha6XlBypMjHyv*+5r1}~K zyJa(}j$N{kcUF=mcAmJD&MXBn{{*@yJWc7U&#*V8Coe72DO-4=HBRkc+lj#>l2hdw zoEdjF`@P|G(qjK6Q{`RV>0LX6PWnJ=y0bm#wKhh>>3B5gZ1%Q$==P`K#`(@lIke6d zQNm%BDA&bu3OG8Gz-i4{_qd%fj6*RVclgiw=`dzMP$ayDN`n{ z^wGrU(n=S*XR@BkBv+n&W-EE2+1_l|ksfZ|-1M?r=i4S_R;#XU*WlB8;2tlFMMVHv z&5&8`%UR+QtEG=SL{0i^wy=;mY(FXHjxxn_D)4Tya>rrF+Jcc>1B70eT}?zE6@xE6 z)c99@YX`))I{cbyhx}oO7)pmlHt6OB&&?p*^D|-FFkCTWhAua9?FR_N#i^Q3ftWn* zVuIvZ=RnL6xEP3gt*n{pw0A2I6MgJbASOq;$qd;Ti1EWNftU>I7>Jq2pHn966^Pp{ z+dy2}BJJ15A?Hc+qSS)xt%!>wFl6>?tcc0uE`gXl>l}zV0?#Qc_Ns`F*al)*HKE@* zv8JskT&ifIL-4PN$@|wbLpBCt{IE+PCc`=gV&?JZlnHwU;v>5jh_O0$ArKeutH|uv z7>LQ^E`gXl>l}zV0?#Qc_6o#DZ3D5cns5oKL?2Q!HbnRUftYNTmKm}!5aWkk0x=oZ zF%UD4Kc`ICD-a*uwLpy3A+=y#Mf~+Wa9<2nNlOyhv^8O*#nXb*&PfW1G2JMeJv|rjhUwu zk^q~vJRAixf!yRdW#DgS6=1(G!WsB_fE7O9y>qfgQys)3_=SCtlv&SgduwM9H_=MF z)dH@5x4?aim@N;qjLi|*4I(rgzKP>vshmYn?*v>f zmEj(MN%vfjzn1R#u;*NNu+xh#S>M07H=bMHvo+f64cJ#B_AaZ6#owQYt22_v5#5vR z*0eXAjK-Z|cdIucuU@=ko{aF<&Go-(qA}*ti6IBptDsKn)sWcrm$Gq^N3@DAdEK-| zkvEpc&*eSsx^eN2w5n!`xC1p)B;I1Ch$|B_MbZYEDdO(VOp#b3Z&GSp=m7j}coAz= zle69JUZ*?U>`Vszjl#07C5E-MehoFNxpnFP><9nGJnUyij~B3QCyehTN~2G-6Hltx zd{WKk`4?+cHgEm9k;yl5n@_B5-Z*l;H_A+r*u0q{ws|u}V)JH-*yha?sm&*_usJ=y zjg0nEIM_ZAG5^8eg{v|{E5AV$FW!mb)U-c6&zaqU#~8y@1!You??tDdEOa&o-N_`r z94?EWmz~Cx5y%dfg|E-U3$q{ZMXwW$#)|m8bP3w`c@DYV>Mi%&w|aQ_VmRL>J7F(| z^USp4#^Jr1_hS#hkE32qFf>I-7mcl+2>%BW9OUN{@1s7zIh@Wv5))4%Gq=LQ&S+=4 zy)*4(>`&r09A0f)9aZ^<1g#DzUykzBB>FR^w+%bLeaw{qsx%EYhT{#Sv)qt*HaX~(x}3`SG!D1^oI+MTy%)ZD)_0W zLv%DgF1HAK#iE5#iKN9$N(` zAQ^APnbu*;eveLJOXhV^2GOYa>ZK(m|Z2NkC7c2xr2Sbgqj*F zmsl*?dP#~5( zRLR=WQe*LjOD}`PELC{djtu-s<7@E$ZCNQD@Kg9HWKO(N=7_F@mQeFm+2= zJVyLzpkg+wVQ%a`obz&{iI*%Ru$;UVopCqG72AGcLe{&AM0 zLPg(u5eQQT=oSUc*wcGX@+)uq{=3 zGo5W_n8w(PMlPPaJ=lZrm7UbY)EERW0W2wlj$H~!pVP`3cOr<%=MCRmx zh%&WWCkJE+5K&ckK#q#D19DVkB64*=Dx%D$oE(QCLB!-u$HxKL`GgY{MJw{k$$>?V zgomhjI_{$4>BvOo7OObAuCYoWLd4||t3*ofu}Y*Q6Pa7ABB+#WRZeiK?Ch4j+;oUl z%rvqaWe@~jy(;JCA`_8YtYYTo8ml-G#G;%WnT;_%4#?68Cn^ftxyLGwgomhjI_{$4 z>BvOo7OObAuCYoWLd4||t3*ofu}Y*Q6Pa7ABJiwptYQcdQ8~mamP#%rhgii@k%`DH zRxwmvV--h&h{+*V@pOC~u%1{&@oV>3#gXt36;H=qR6HG-Rk_6~j;?F05{M9SIm9ZF zl6$NYDal0U7OTiQg>$T82oO;@#447Gd#qxq$VB88s~D=Tv5F%>#N-gGcsf1~Xo^*z zXh5a;aSJUxX!Qdu!eIh)RCI{Y!Vt?P_dt-D$t>+R8c-=mK( zXC@Kf(kd(rMw1EJZ+953gIAW7wAJelJCnEUbjQ8TPH#LOja&JW=+Zy90xr*DT)JT| zJv}eqx%chMl^Z}M3M-}1D$woz_$FfvW{&8Cs1AULa>Q~9pex~VjLR_;)I96BI^P+ z#L3FCiQ`*)*RI-Cwd=L3YWIw^My%nydjD1b`~SbHU3K*W{_humB z_SWQ*6O-Oxbp0F7_D8d|&M*JpTX#JEttk88y}Q44$DXe~b}swe%IChY8{Q6AZI6e8 z&2!zey~(saJ~8c2&h}=5@hIMV|FE~&KRq69^(U>D!mjD*-gZB}eCE?L5-`?tvrv3Jl{mH05Y~MW^%m%&T;O+fMd(sdwAo5Z3SEY!wd@i2R0MCZ8&b#BnZYQ|dE!L>qSDBV&{$jW zVf;$M_`VHTj^c&Z;y=CdXPybgNAGFEhlg&YQ@n-MNz_tE20q+?`~3CsKxT9gVw;OE z;eJd)S#fJOM8{CXX!@uVnai}_-|EDTt2wKUS}!lz&OU&(-BIrhDs@oq@Cfpk!k*3XXod>4{v@MyIjOLiAO#<2z(G5{+P6-pgHxk;34cv2 z#gX{iTGis>JP4tQde;6ts7|(c@VCPUu9K9 zrbD-0Oe(3%Huj@RDy;|CTZ?I*0sk(7SJ+ISb4H1OInS-+U6&H?!iDQP@A?|~z|S_! z29>Eoa6l^oB#v5E_?(81G<+%j%~j6u@d#dJqv*EaYgWQ3Jk51}n$~3<{)0u%xTiNm zbxFL0zb2|)IjItI3brge<@Ho|Gt>x9x5p@A_E4-`;D2=@{Z-1)515p(fjk$L2=VUx z8*|jlS+`en!QYjz7bS&WH|<<|vv?_9y!Wn$ZnFgl*5O?iVdPkEbUI#4|39&1zL=n` z0^r#Q?ly?FyIQqk|8{YH{JPOsOhK>%(Wg7vQU|M)D zzRJz-vmbu^8I~V)^BatpqKyyFi)ADK@dmspgvD{ayV%NyvuR#I>hfk??7fLIHd|hz zW^n*`rV+-bX+s~zLr_Z}h7fA16b(6%X5=MJ_*@_X(#Af#DTfiIzPZQ7O1lyt1BKsg zz-MeY<*(&uHLidg>hwH%%5`vF4pl!>W z*z8*iB?)1J+E&6#tW}YOOw4JwX#z=#5=Ll5tR)vFvYPw^J{iGHW-G}0O?H)tYyBNi zDI}kirlzOo6TGI;gnw`DtMcJE7v-EqLqD~?05}}sxa7$qQUJeT#^7>dqFb6Uxcsri zS>swy^e_XtA8M2W|Jn?@5g`d4Xep$^|2+88aqwlU+#&!1MjRt+Oshqwf>Yb}lm`Qp z_`n~S!5XBa5<*}RB6Ezugo3aZ(DDQasFdLETE!a%Con-7g%Ic<_LzkYQqZZ26fm(- z+s!*w{bV)A z>NdpCv&&i$FE1-KBQjXQ3MnAGi7kn-m?ICGkpl8bjSVW6DJ{gRT&nbuuN6{2*%JB` zYst;Y!flX(PS(|P3gVCgrml>1Yo#t5UAlO14WwX&6ksZ97uB?MGWT?89&<)Y&gp~{ zYM?-YJn7V+5%Ew zocwCr4lCf|6W{q~+W-poJ>Ubd!3sJKSOJlXamPd+KC+D?^Ti4}nIiH%1xBoZ3c(s8 zpbE0uRdBc}R^XB#$sh&@fW?5dID{2eKnCT~Q`lVvWC(eG9dK6x8A!`ZffaChSGAPu zrAR3_atqk(DzM>&Yp^QsDzF&=V+EX`Ol2dpqo4Sg8^}QB$AJu1SOIxbX9xCJfeke) ztiXzpFIGSX@xlrS5uE_-u>u=voGF{zTpN-pR^YYZO$ITxxi-5B2r-RFEwKU%N_JR* z6`^4EXowqNgB6e&bB%~(CDJQsmpTkt`_Cp}-7b5)Hg2=4fbdosD$f*jSZ%y ztg_$m603YQ(_|Gky9y{dzH)c&YYK0Zx;W!ud zn03h;#eUy<@)97zYF9z7lB!q%e$0#&1SQgTp26CEox%iS3trH?xH>L03FU?g%uojzzT?5%vb?q1BFICHyBv$D>z&U zE1*IsCTXh^E3Dv1Rjhz%ueANV)4l>S8w03dg%uE5n);#5yn(>*rF>h!k0)t={Y*Gs z3ar4T^daJs;~oL`6_9~+df^(Z%2p&= z0)vpO-at4~16F`(&J8QT!*MR^(gNnQE_vhJv4UJBRj~s6m>Da`$=8n+5A*YL^vU^*%RMu zw)_Q%Fe#@O79#GX!vEL^X!aCc2FrHDgKCLd6|9QNQZ zUVUc~!5=o@u25kBqYwct4ZY$JkQ%aEgEUBJiTpzgd`AZI1+*@VA9hLnMJG zA~xHbwlYNUnFbuGW}Ph#LpgzSnbKP0=F~>Uf30tp7VF8ihT=?TSai;IM?t9 z-?YzPRo|+>O?P5zXn8xFN~~brvhR*8ArO?UueVg+hIm?|ZP3jRWT=@<#WHO$#kK&(hADledpY%tY{rEuXS4l;ICEUdl_=bmq zHCuhcJ0t!`ZY2-BDY`ae);%#9_qH~B(^-1Fbo;6P;MD1v{6JnsCn8y|Us!m@>)}1Z z(F;x!;qQ)Mwn#KvSC`JpUxLN8^n~jJy47T7bF+^w zCRx09eBU;FMhJOIMvsl}7>;|hHn~P4IsQZ~^HMk(UzRBZn6CD5i*`zG?jiRYaHl*g zKVGsbKI|!XaPokPl`{>tN zIS|WC_7Z+~IC!#uA?xXnBH*r*vJ%{Q)SqUYwX=j;H-F{&gpJ{v$_D<6A9r(!ht6Jx zYR12aV6C!YQticycMOKZ#9RMf6fzl8S8H7cFc@w1pDv0kMOcNOY`~3`c|Y18_BYWf z_4uo0CCGX;j!iz%Ao0*xAt#ko-qC=5WvS4frknROZD}z|{+MmWy>OlWe@C!Yw+NVm zytYEAT*F&-O#h-$p8x{(Q?&_>?rW*ir~Op~n{`=Dxr?if_p;G6_++UH9H>b^SC168 zgjy8>mVv|AJk})+_^Bp55P&P1GD*SX1tiqPZ8EcKq}hDtufZZGRdKgJX#`fxJZiP` zBw$&K^^VuGZmn$3;FH3(2S>N24v^Q3H8@ogmL{$%Z84Ic z%I{n7Rif=NMiCVYpp&z!_w)1cNyTQv>Uh$3g%vj&A!A%AfOR-@B0}S&$m@Fa>EA~1 zYAe&u2J5a~xJ5tW#16DWKp^6y0v?zmhn0k+#FKPDVBu-jIQpGv1#^bB_g>IA;IG+Q zSk6tnJO3u()-`RM(GoGFVxuBgDJ)$Cu+#jU09jcf!0)G47Xlx#b&!k~0>V~TLM4Hj zRq(qJzadO%2(&^WC3+UJ>MFG*Yo=^Hro$wEuR2F_$)hskvaK@FDh9WDZ`M*?M{=9} z%uiw6CRLxpsZy08DO^x4{k|@2%u>!!z!mLMQI&NLYTni3qS?v*x$RQg*5GWs`+Cb2qbe(>z=!1ZeeDNAh3|M7yi-D^1yIko#=0NH)i#*ufMPCqwew$IF*$9}g-WQLlw%_38TnkHW+8L#6iteElA?qsp1^p(dOR zZ@^`3A?GjjRZDP5=B2SyRAZ0Bqw&KvX{@`d)Y(6X;687ip~?%VSawSdTcc=tN=eZK z|1je37I(5+eo_?wlZraF6P$EK;pgq^-NEa{BEC+!M~+ zP%n#-h=m%^wKU65POW^R|4A>-a~}2gzTFntdfGeI+a6fA1ocib4R;(RWgNt z9ECKyd;6#2Ci=fBh>L1!loR+~1V_WUMjkCtilF#VK%J({bFZ}FXe;Se^YY3V7=E-- z2lt(V`#NM>O$LgXSXjUZg9b->Cc=F3L=E!oP|>Ko2T(opu2Bhs5;5Y;)JqoF+_n7S2HYRg2#*bheRSC)`v1f~G%)NE zqw&uS3{mVNG$5$Tc(hyL7%KH<1GGYc-YZ4#lD~9e$7Nnt)4uku*p->S z;hX+T@7W6@Gj^(l69+e*q%Of~&P|c{E zf=!j3|48-R364&{DV;gv+2IW;0FFc_n#nd@x#yj>kfH5Xb@)^W)uXrEC91dE;$1q! zpkShkAq2@iJRcX#3I&~(w0o{^g@TTYUD@9yc!H>IbXb@m=Fxk>gk@LNFzfr1QPm+(iCSm6d(lc40Jen91+I-Kb0NRJ|YB19QM=a^SK zMHvxLoJd6hvI(6k&L*@*RC7}2L=dmy$S{LfQmKaldFd>3_e27v&3ZtgW~vGR^3v&ME{YPS z!bufv9RvY+=_Eu&(0*TP;dH=L;YJ79px`)th0MH zm2bYl7aH(rT?ToEbngB_&8U(GQK*|xW!u?+rIjiROZy;O{}vg4GKxfm%ekZlDC zpF@Y))@Yh(%ZAjR)M@_p5$7&^u@>ofYBWg-Pt~imWWM>+yE}d^#8!Ak_{%H4qEoKT zmQ!G>Hob;{x&C{N;etVUYLf!Wmfh84Tvx=CR)_2cKQ|Z%zSRK#ZHRjNX0f2!#E+37 z|NZ$2Rtz|ibp-#PQGN9q-GN@eLTQy`4>h2D%%|&e4zXp=0kp?zD_qm9?r*7$dY%{H zPn)n^ugJ7JDQk14oWEpHn*>FJDC~7YckZ2rlkrxVoqLL)k@IQqBmcDtlX)wg1LtZh zKvIV}AL<@ab#K9m_${@n8}Vqb>YuJq%9{~5jC$p8+?{A+SiG0{A(?v=L72wgt=_EH zJvpA7>7k>%QAED$q1#%TyQip&!p9r%7F#c{6^$4WDrX4WfmpG>t>*Yvj^6NRP5Ufs z%aRJXVnF@58(7k_mTfgQi&G@1LAEaFobj0^ytN*0DVvvDuCP@A>pJsPuTPF};7u0< z;qwjI{ZHxkPkag!t7*;H_|`d`6@-iWtc!3B`CL<&zA8M-<*L7RbjrbAv^jM&-5yW- z-ClNe5BIAR=vNW^X2X8hwTAJsT#-vG;ol~&R4WIaxq@bwUx7F`4HOZ~a0hK!h{6?$ zsSp?7XDVP}mO@nIW!Yt80h}OtbBL@VGXnT@6Hc-E*KL@tB%5*+X7aA&uDB3lvTtoG zu_DW@r)h!y1Uwc$QmH_<931P*wlrUdPc>mMCslF@)%xv)N_-!!=HF$9kjFSzonGzOxSZ^9l<^w6{bf*Wu4A96TtO5LXE> z1svahTEFQdA0!nyuZDux%8lka{C(%vo7e@%wIYQ*L(kBInz*tS2i1lfD{~NHc=>R5+@{EILIV^i5Y~?Nm6} z)jaE*{3?8;!X9=r+4@tUL@xGQ*lih*b!_1?V3`&B42X_mwLZ$qmg{oM%_Ts|6d9n0c=lvc4w#H& z3v(2P89H^zm&bZ0i7uCKd-3bZ_#v># zd-h~9lIXo+-=2&otMZy~DX{GRo`t=PfeS1HmU&LE$MEXOWLz5(pjS`EkC!v#+mrD` zMMtEuvK6`~=uti$zWRbRB0tn4h5o07=8 zCzA=XN>8>LpeM5wd10v47sbk=Tku{d<3%UBT#%lO9|Bi>o;{h2WPqNGC#&+Ba4E3d zpLq6UG6t9oNc}f7*vOBI*|J_=@b1a@(N$^-w@${7N7|SXI?--IGb#uCpf-24wBpuO|}-i%wbSC3N2<49sw(>jS-e zvenvpGJzqLGn;#|)w%R!ANAb3C=(>plfA;QLo+}}W+`w}sl#_&=>ntd1vKgi-W?e) zIMLyPbY%PxK{_%S$p9T0PgdnK;Syl*j!eb?lmBY#$oO&R-I4L5tJD^*Bjd*l)RFN- zMMtC#AAFUJAD1v=3RCb?6|TjR>>A7huCgDq3)f!V7|*uKW~@;Ksvr2X2#&GY#{Tu! z>@^t6Ud;OD0Xx~#={qcw!QW}vU%@AvPF`$bYY+`T&p;3Qyy~TtX>y+z+4B2J@T*Px zL*y)FQ7+_}1Qgf~l1ID*qSa(!V?Fxf(L?ZHeAM^FBS|c=_FMRX-wS`dX{o{pmwIZ6 z4)ammDXO@~;nDbEZ^d!;V%u|l(lhgvi!Qn|(fYVRXV>MgM3<3q*p=8UHx06dFY18s zKEKxvBuy*RYhA@=zwwD_e{!}r8;nQxH^9im!kLQ7&oos_6WvZISs>ImS3lN(H?!PS z&E>4ADH5As24V-m-}?cJQq96YYq}mohD#cDmMYbS%4zIaxkMiJ%4sY?QHQo^rb%6| z6*~8^)5G7xKk<8ikFx=ly10zy^ZG6~TF%UBQlrZ1>Vc(}n~ zjy~0&vFwKq zE5Kj)gATD^y3DakS~Xq z!k)Bn?hQu$Nk$M|1&qc(*Ty#1761iyuE)fIofZ47)9K*UC|<%}6FtX#RxcCdbeM9x zgU-N0N_%oJI+Y=O)F^$~z5@0m#SYMkll^YaE&>o$mv{YY1S?uqwkLgbf@X1LkzNc% zl=EdnHDR^8bmr?WPd)P^-tLl5nR6Fk!D-Fj)fqMJLd0(8F8QZb?m~z?|J;Sl+|FI{ zPpjNAm>a>DrB^Sr9?vHVT(0FE$gNwwGwr)^_8Gh#Wg_%%JS7?EW}1Q8a4^2kkbzjj z{9Wr&yo@SyT%9kXtJW8IBUNu(%N~JmMesV79~^tx-xxULypOpqJ~E|lS<>?%T9G^3-Q2D z+C^OOkc5fnuSOubg`1UvL8;xyq@K#vR+a5qpWca0zNNc-!Y2n0Rp~9>F&GYI>%_i76U-!$95C-Y1YVe-K)#pIF9R5iw)#(ZXXnxd zi$hHf7`2|3v+y04mX_BCmzpFiGF}{&%1;5_%D7-%ONYNK$% z)ba!*$x2>ks!;|kN!=Qgk0JqG+gTlf6qpp!mao){eD1@U(ZvuNC^_1=QrJLpC;{rK z6vl*hZJO~5?=K_0@f8~}LgCX+kjZuMN0Dr6r$5hJ67$lUJU{V~M3ETbC!27T<;AM0 z=WSW0(HN((c^FIba1HZspWjsW-y*oxn=3Avc^O#v%q78f;4d0*mp9X5I`bD}Qip#i zx~KD&(!LitpyQ=5AS5+^+0QAhJ?jG1Ans`&T25)CJ8@X1$_WPnYrgIR*07v$L4?W2 zk*4j-_L!n}gWJsHA&64=DdD!?aDi)BHpQ?}9Y$cOIlwjS<7I17go|omOW{oJx`1!H z05%+RLXhy2bO@ewmYf0@)z^Yqe$n@(diMee4$nC@%rxxj)_@JRIoI&;ZO1m5VN?td zP7NDF&#g^n2$Zc#g0BW~4|(^k(J!Dj(WyaZ*%QKque^guLdUZkwo6ZYldt=#8e^Ag zo(y%Kv0X9^1^$T}FA^ZV`1EOu-LPE)Z+TPfumoTrESiZ6wrd&!H^im}#S8l-Q)rd2 zUq@`05X@|it7N?lt5lDA1G%zx*bUp|SPqc1IC?Hgiz6UOT45|ko}*0GT1JQ+v0a`? z|J23ND^i!2)f$A6CT$0}j+?2h))iwD3GGZT86Mjuvohp5M{Ji+&%%n7XAawCI90xe z;)Gz6c}QMm$`&Zyuw6n`$(=j4Ya}fiiwm}E9wY#^O9m;YD&rc88@5XZ%(cQLv$^CW zFAH_Vb_J%zbmo9B3KU(iU4g`fpVHj0U6D;8srk!(PHD{z+wIha?P9{`5!+=rtw#%?7h7AYAk)h^-?J|!`!bQ0XEnLGxvhtExqgh96mt{@} z5`K~nA_X14ZrCovudMbX^{t2ILhRCY!*)5|xbPs^4CTS0(d+rm3pypbVY?ioVt{bK z8aaAyZ88gfmR1acW4n05)z_6Av0Y9+;wFAydDk8&+^}7GyJG#=E*Z+u@3LN{ZMq%z z?UHF^O-eb_@z^d`a45DXIW-+*7OJ+`4cjGD6}-7)y9UCgme^t6u4#w>4KW$Qp&@qL zw@U`h^ueVn&ae!qm#ms~LD|S`TBXb;!-l~P+a-fw6BOQJcL<^!J(r}#5h(k65tqG0 zDlcR$6f2xCRXRFpFeDuOOa?O4dB%3hG!*#foUmOoi?G-(p{n4`9osb!F15rC*sf`a z01Yu2Vs6+j8SuiucFAmN#dgUcp|D+!o=ei=2v{er>ZoiH;9c5hGD}BnmpqQ8?w`6i zdZzX@5a(|DcDb3#YF#llk=oZsOTki@+nEezp1rYMGK3;O@r}1)(bMrv1~PPJ%QB6+ zW4lJuqOrJOyXHa8Zz^-cb_K4uzDokGczKryue4-qkGu&Q;^Ot>?v}H<;j@a%I8*JAFzQKewkS*_5I%vub z+hsW8f`~OxEr~WBf+)or5o315b{RIsu;G9>GSpnKUFPwM;le|*l_6tCY?ozD2oiph zE&_yy6m$;cwr`i=S6UtA*GhTJ`TSwK9B*8BF#4%=+>&jTau*i3VY?ioVt{bK8aaAy zZL%KQ)zuX4(o<9vI;U~Nb~*V3*tg5kt36Pl!)lIKIZ=$eBu;ziF6oBX=^?u}z>&CP zIAm8spKporO+e}Kl~GG_I={YRjNmUTp1UELPgXYSBZ#D{i1;XCn5Qg~cncEV)r21k zqHJMfWu=o$R1!^UvctlKyalL;DHggE_gTKP0e1({giw|Q(L_jz8Ds-!ZAo%>E-M=T zyl<8D=;{@&{x?jhb1rpr>eg8xe5TnzG`cc~7lBZ{R*f>*k>L(RTe+f=%{ zNKH1W%^tNISN!!S?}`le4)1A%$qC2-g;*3wAM=bx=vuR5Z0s=|^~+fit#ZqaFsX%n z@9mVdPtqz#UFAlAt~IN4U7egqF7|K%Rx`G&s__*`DQXEQO9bEZ$+d-2g_FlfkYD4;2l9Cd%1E6qJv;DL>H)F zAiJSHdz*UL?@dPN=J@Ue-SSQfkn;Of@a!+XmWL)(YwkUGX*rN*L!Ic)daLRoPM$nz zc?-IJh>m&)`-|2$;pGZ0`rFgNa6Hls^4aS;cB1;kF1S}Q%Fgx{(npv5E8pR=mtePV z-QFIaOJyAIq2DI2w0k(IzDB`r*-WZq#q8tV)ntX8CoZKkOF_&(g)RzDQ@ZLi>`m#( zE6Q}r7M^I0Q~TFWVlav1M0p0MCcUjee>9u6*}utDc~5V4&(5%$KG2@+Y!CbG&GBe9 z84tT#{p~)wz-hF3uDe=}Zwo~Pae%>dU9@7@mx52D{&a>eW?xOHpGXt%T^ zaK3U=W}@nrBD*?)$5R*J)4Sk)FN;M*09nnDS?#qfaf#K^#~q?3eKuQINF27G6mv(J z;sq6Ww^+I3Fl24PNUoNlS7v~E+)QSKMYwj^qDFKGzwTQ*AimY%*Hk;?4?DzAIxMn5 zH!pZ$2I0P+3EPI@(ki-N`H*W>$A);SP!1YS^9>{SsTK3{>D=;J&EV)FjA%#e+N z7(eV1h{>>yftY#x1!clsf%x$ACJ=KB{u#SP4dHe-s!d`*+h;<;Q2(iO5 zMiv0K&PHbLot;)Co6JWwp5k=)LHywYwnGKtir?+8KV1!dWT zvP5ifV@^f2fya?HLVIrng)i;`$%X6=hK>PQS**T}m9NIkQwm9d%~~Fgf|)>W@`5t( zx3UVbUl`#Ge8X~uPlw9+?gNt5H|n?^!7uEAq|AC|+h=x$aTBeyTP@(~cMBYQLNSJM z`I{3k??1`$FjPt8Cw`gS<4hOtH4&zHpC`G}{5%QLg(b@HoBU>o2nL@c0ZoI=kwDkQ z7NEpejs#B#Hb;W$1e_x&wS&!(G%v**i*;Jx;HBCr_pJzCrU)RyE^^7om@QI?Ce(%F zLcSGK!JDLXfnVCA+!=^sa(7i5mARc>x~SYqE=J`6+9`InH<`Ap@5l-s@x)yQ$;GJb zqL}W*sGMzZZK(ndMGriqb$UVi_i3Xtid0KDJ|~K}oChoAZqh8=NkZg#3fAzzsi%e&)+ja_HPTm2#XYQ%0;IEO!2gsU@>M-koQo%XCh znvN&kQSVHDN?yHq*CH9=ubLZw*+gS3qVqoXZB#*>HmV`98?RvFB#&qnUGj!$jUsO> zji1YV+70949cfj~6mbV?rbxWSOc7TmW{RW@G*iUgotYxBMBb#-xX?cMVR$KPRnybG z?S8j6+Uib+gU!OSt|NxEvT+SHtA!2e|Lh0<#v<%xMvs@UZ6}QHBub-Cv=dLN*nCpW z=J^+ER5ow@x{=8@a+^=AZQeL?zSqf2k=VSMBDQ%mMPl=2irD7O6sgT8u(UNhw~dVU zGT7hQ7cu|A--N3&Lo2^Q6ffV6;?!&~JI9&bz9$&NR0U;Hp6*6REiH97hrQ`Ez8nt3 zmu81GWdyQ=W#PF+cuDr-1L$?4@k9~7S1v*OKF=bzTf6Q42iFd@E{5}MvJ>`VIL}Nw zZXDXJd98IH`~>RN1VdAVbkW%AiSU0I!G3-|@gC{}oWtq-BQfzLGIKlZ?~Zq7+dH#v z#{M+!z@fFq)lrpyNYLtl@|7r0T>>y!DeVZNDL2634pdMa!KE$@B0y;DC7JFpi%gea zFYthL@qm8bvnQFCr6#tdR&Ju35*@4 zjF~EryecoTc95*R#U%^)eQMBN?nVlbzwU{Iqf8FPOu zF!vhDoP9kgviBOr)buVQd3TR-TbB)TB&(M+cmrk9WTK>$6r+Mq`{jglSU2Zt%JzoOyjlv|NO|L!0 zkGZ z{LhEPU#tpGbcCM|3qKzbetwPc^9{n!Hwr)BB>a3cy!Ucg9E=9DoSnnm5(|&QNOluV zr-M_Y<6)0zZBvo>~@zXqYOZz1DUF63*;Lz~O>?-pm7WHS)ywb8^L71(;nWrjL;w8M%Y~poE$lE0w_ahMNFrkIZ#A%-;~p}VU~2+e{aRLsdiXL%EWS7UQ0abb-FQ6%bOK5m2<)_i1C zglTS0E;1tV+tZF1GJY~D@&hv?7dzqz6?1Zt5y?Qzj9k>ukSXTlMktUS6{(W7qm{<; zORoEWzw??;U)xwn|9k)c@x^_6e&p~E|I%~sfA-cN{Po{kh9A2!{?Zk&22FG#R=fn! z(<{-j^y@kFH9=qBj=rwE1kf)R)1{lWY~^PE8A8VNY zv37P-`meF@;0xQ=P6TcEJ?;MC4&Ba|y%X{|jyJY`?`0^KBwQ?2gWslri zJTV>*6Lgf^%GJX0a`Z3oG~}D9Qf4RplX$FjrrG9!bQ0)?9NngWUqQmig}8z`BlOWh zc3xrzf5(J+q?%Cl(@_NfC+yz9uQsrwrQD|(1arTH2Sz_waDNc?qDl0+C&!aBy;;08 zolVfen#mHX;O~0qwzd7R4}XXL|F*jxLgVkv$G;OvJ3avW&|uvYz0D`LC*$q<$A18> z+?b4eTbsS2I=c(%-CHN#&n34mH=7V6oF^ z?tz;CU;dO_3Pg0fvZb6Ablx`J4YGu{a;#he_%GNMU;}o&;cYv;VaaWFA-B02R=$r8 zxON4^oy!WZ*|+zS-4MSQY4izLykmeGz!(;{24|)#FhIv9KHW`Sja4zSYBD**31dkw z2lk1~MLF(X2fOb?$5Ema5>>MleH**Tp8W@qcfj>P7hluq3`S_dCKak;Iy6o`t?Nm1oBs8BU?M0>GP?Pqw0M$-5#S89DCII7x+J%VE*u;LYRADPiJ>3%;s_o zuY>f>m=bgP$aNqKU8(%9hvvQgQ{?e9;H7~NO@sLA+CG5JayA-@c7UuV-ChW{!QT6a zz0LmV@o=j@DccTjDl1pOF6`&{OR|_Qb0K%SskV|?bp(IEA}RK(fGQh`a#adF!3 zZ^g~QXqGgtURb#deH#6uQ~c&C&Oq0eVX3ot`TnS}a-iTRxBGV^=5K-B5A=J(+XrWf z0Mf4(ksr$s9!;Oed?AC#odgMv98|xubS(R;%3M$!%s+uWinH7?6zh;kFiYDgk`A7} zdN)cI``E2j=r>m&L`B#ZbM@}z0PMz}n|AV5LKM@(OZaVSjTLUjlp3riZP;Y%m^O4XFlF7fHYNQ#(+(+WoNWXv#F_m&ic# z@V}f!=9E7+TD%nKuO$+{6Am0j@oduHx_xl+WPgH=Z|y4$L**fFZW>9M&|xkD zu7-@7L?#E(UwWg{gclkhie$0|u-sWnc|hS|4VKf7XW21#r8FgNaXzJZb~ByCkke;; z73@JfuV({vwl2yI$LRMw-+VqOJ|*F23UItP8T3vJ(H0jJW3u`MXSK9m+Rw)+A>*O2 zn40RWHyEY)Brg^k@xscB(OUcI)N-=zj_jY1XSJ4AuV3w4f2i}~v-q2Xlg-Y-!v|0G zZrHl%YUS@0l!$#U+BXQVa??it%t$*0x7p|`0v004RcgQcn2TGN0}MeqhziEdn-8E6hl z|ED+p%rk*_iBx;mLh;din&_r{Ha_CxmfHFFa0Bl1*9TuA@dZ=DUBKd|@2qm*V~TnS z2k&qIz?B#(!pQPj(^HzbkV6FFqm2+&JVp68O~OeCkLW!egv?+>q~*0MMe#Znd^(8W zP>RKGb%r+G5F$ic4I)B=fy1vgfIF<)$+O}VQVPK}Qb`apf$YyDyw68qT8__garLD@ zhB4CRbGF_>C74hADcv+C+>e;wozMWywnDCGzzDtd?u1+%`~mS!HW$P@Uy))P~~MVbn3U3O#F zdVUwO>vl}|cM*S)kj$>q>2uCl@(2QH#7k@G8(Zsq!C=_wBD$(GFF)Hb8&swW!2#{| z$2w|Ru@A-|6Tw#sOLZ2`$0K-^m7NN{W+fc|`RNQF{)0u%AiygAny7kZ-FsD~kdr17 z7Klyn(6Q{Ov(2W(vnR6qcgQR4DMLSCQbxX%5R)|x;~Pi$(UOV1nhUjEDXethbq#A~ z;awJCBtwcayeP3{zL=n`*x}iTzhPSv(9oJ>#rB;E(b^`Pzp|e8`Du@z_4zIv>d0i% zP;F}HtT@xQ@Xvnu@n>2-)8s?Hsv8N$OVP$NI#PlMt#YrA7{0}eVfc6h-ehoQE4^r2 zdG6MDFPm;j`*UBF(+-{2zq%}3PwNDDrV+-bX+s~zLr_Z}h7j9Sd5Ku%NS_NNK-$=c zH{~#*HydzU0Dr~}ei$=CEq)k6!R*l_#4ko*+?Gx<8`oP1&2<(gD#a98 z_wQvdVz&byYk=wU$0Fbhy@s_=k`Oj1d6A$fsl0?z6$QZI9;JN~NK!~nXhf_f7l=cX zg?}=Fo6KRN{?^~+O(MLf7XH1tuR`i5wf|hDWoTr@=H7j_Ezu9rFY;|8IqCqvV8-Bb ze)2u3))-v=SmLblz%YQj7~x-=VK*Wq!2?aOTmG1?l`nkLgM%+yO)X z6PTckLI~DS1?9#HDWLn$S@gF0LcbRPK<2tc(}?8+Q<1^J1}PvP)(OxaDX^i&nXxa0Vu|Wz5F^xzqkpc@! z?Q}DM5h<`Dl4k;-00~ZW*K&ESj6p#;#Es3$1BM+L90`f_X4TcFa ziY~{{N=s0RH0>La0?L-qr&voa5YqMEU%h0DF>aH7{jP!f11qE;=R)v6OCbf}OUJ=iEsz3oNqL;Vn`K;QM@^?4R=~xlg4eK{3cv;{pto2T zy9q3Uq%Bs!*Z^8!> zo7-F)lHOPW8N}G;+F%8Qm`0?QSb+s4JFLKp5VA*uOBN~DW!0HX0&K7X@?oJ7VP4Hl z-v_v0umUn&E3ANgxH48i9yDVGR^F8+VSOIQP&HD;Sm}#fd zDb#u%u!6%5`wEC$jBi(vTMv0IJwXF2tl)4Ztbhu^8X}-Nv%(7Kd1S0h>#za_snz!L zb-aN4j^sDq{sU zBVeo`59Lj#1#L1+s%AHQfPDkPYF_~vQYSlmtiXnv6;@zH$QLUhgLq*DgosXn_E>=p zHO`dHSb+^mZ>)d}VrnJ81}h-MG$OUc3M?quVFgx%>c$GlB&@Ik@?m`=a=~B)WV%*Z z0r_xctbjad#tO(MH8x1gBEMKKU%WJdPN;eoUZl3#S3ubkIu#>UKpr$Xco?Br?JLNS z;$_RsII;1<=46_7A<{;`51 z4p;$^iy13mY@o2zd0+)cDq#gw2*o6=U-UqRbg%j)vitBAkZrcpgC#+xV00a4n zgXxP15&GBizon3g!CFKD&~b`y4925&o58#P-p@S~mNR7^ADF8GL@2+UfVjG4URF!v@?HDhyy0BA}(AR~!OT3v0I~!G2A{Z#bn6NP>+-@R8U$B5~vbQR14UIj9fxKSU&g-w;l)MN$}y+GqN`QFn?jJn8l)lkuco zzW$_qBT7l^o$*B}QxLWAN?> z?qKcLZ88XDdXn6gzzwXBK`SN`-m|eAO?Y7~T1byc_Lxd`(b$xT`r$!(SbEb=~YMY`0ubthF*gjXcrexU+EdVeYQiI&6KeKa)5??XC{IRNE@XMQ3#AsF$ z>Z4z4i_^T!xmv!$$AP#Kv3himSc|X&Z)>`C`*;d>O*XjRv1Y32B zfGNmpE0oHGK9_sdp7Adl^$8$gKUJIHXwg-rPy4F~HtVvQau-({?`5O0><&?rfNqT* z4mQtq(LM9iwpAfu88|de8g3rz5(oTL6CMb_6-}9>;PCf$z;*)`H^KJ(Y04*e+d zmEel-Cyl_0nMbX5o&@YuA%0TW_TcE&)B*CEu?D9~!qPO>mDcdd-)g{ye+}6u>$hIuUH(e(2M` zjo{T*rkxGeUA=INe#D6#XorA6#76}@Fhve43CR-?w8qizM4L5cXnSw%I3R<+C;>ky z*uJm;zh-M;IXCg{{F{VZ*R*j)OT>_hO@%BJK@Tkg*lGTG0LXsC)AK^{Dv^4A3bknNu!cdj!_E!uRV&NyqD+n|2T7MXa=9v0Zq1Uj*0a2D@W0 z9Ofa;%z9!sZz|80eE|Eph<_^wtFsV&nFq5tH^HSqGaOX9uDe@*_2 zgda&#YywJt0jHF=c}2e`*;L^k)m2#}aHsg&4Y((qxuISbBgf&zoYd_Vs_12hR>Q9Z zwk^De2w@4?H%V9}1$TAgV^M1#EFHsh*X>RF$5BYLySINTZleFIg19KvP0}WOFM^}t zTqBPbC`C|wD49O;<|^T`u6 z$hTuQ8#Q^nyPW&0F9u@ArCFdd~|h^>gq9gBz|)}y3&9dKOe!v z-d>e0`e8UDj{)QjT)Y%pepSa3Qh?8f$Q$M9rF9$j$T@_Rq&@)`K5y5k1VM=yac1fz z3vBLM{%`~C4{3zQ2E#tO>=FHc;vX6qc8SsWX9k8Sb`iA3cMMf>vjJLwm}W1R(rQ7~ z)4V@|hk{5gg>~9e$Cbalo1P1qoejzC6w~g-OwY~_BQtiYg%bxio}@0pY7Tsk;>L2Z z1DCH6;bS2%=hWyj0Uk}yMTmE0U+iC#cH8-nRDW+cD`h-8yg>!Pk$4fOEB7j|w%(8a zsSv71Z@EiUZ@0xe(8(ALOllp-rhNRd_dvye# z2vJ7RIp!5lQAPw5CsI*>Y(l4+xi|4eaaF=Plb{7df0&HUDg6e$S%JP-zRgm~BS~OK zE(ByPlHUm`k~I70FPnrGbO4B#YEJ5$2;x;bo}J@{0eR^xbN567rOkRkp=PQI0P@o5 zW-eMJOwmbIO&1jB*6bO_P|2LO3Rnh50mXb6y((p7W3 z6jGFr8q|beL_2l&VT`C>Rvm*!-a#Iv$NRAy8jRwWK@>>C}Sy(s3y_N{k-8 zbojV`Q;yI4ynS%;WPgITg!UyJCik|>s6TsZJV6HuZjQJ6ZSG*&{8ZE;g9`@;=&%R) zP!wR3S*bc`d|s^``$g)W22HkzG`&_~!09e4zo4)@6`qNayY^1g)?W|J59` zkZBONZZiH=jiROZy;O{}vg4GKxfrRf*l6s8&o%1HR>(BdmJO*rsnh)HBhFp;Vk=wt zPK_o>;i=kEOQy7BzWLLx2Uhpc@LY(k@QU!4SA0dMT$?SYz*cQ~4Fhxi_Zq_mgYeWQ z1#I2px{|T9Iz;FX&1-Hj5PYiv{@W1s_RV5JwTT}iL;m~o6|4w#tB4a>NAUj{)mN|4 z9q9Edv@kns&e1;R({(wA*s|vU+GDj9uIX0yx70?Rpfi8kgzb7orrk+dn=|G7C4<@| zC>lgzuM@g+?=+l@x5Dh)Qv{8iPiyoK{%aE^^Hw+q&ec|cqz-dF)IFl=-hvbHTWVD| z;?Z8!KV6}eHzRNu^~&M6JJH6lcrWuqGWRG__?zPPA~pIJ!N(i$7F#c{6^$4WDrX4W zfmpG>t>*Yvj^6NRP5Ufs%aRJXVnF@58(7k_mTfgQi&G>D8@`}(#%G%F)_T09Y+i1; z!d3yS>&!vDnh;c|-Y0)J@C~1D$nJkiw}0YOm{?6~=;K@Ga8?j5>a#AwHRN+mVfw1@ zkaR338~Ccfb#$VEaMfP~zuB1gdmE08+q>24CbUtE}>e#y--6QSv##)@@((kgqYMFeO*oOOgg@r;1 zmYCc+{CS0g2jvptDgmZ|cVd>eM=UP%CR~(n$C)JQP3Z5ebQmC5ro4Q+AP9aK&+H%KBhQ3pulPU9SS^I@y5g zr3;V8j|R{SXE_W>hcPj4L6ImZa z^a=m6!tu27S(XJ^rKvldRSC&FL|z0JP9QdoounyVfcI25s>CchMIQ7`S7q%~IM~%Z z>zw>5e5Ar2b~D-fQ=mjH_FLF(8IW~s;S*b#73r>Xl$9;l<(8XEfYt}+n#Uv7 zS78IYnW*T9hDNEVjJ=M}HP4GM3qR|b`*6s0Z_M(LZN2T(%$myGi-B)pKm`cyHCzDCjyC)N7tFq8gJ(-LL&z?-m!8|gOj2HWi zSo=hMtI%N?sLic3Czzj#V_GE|KV3YUk$z&wad&Ryz z8BbQ_HQ`cV+5J5Wdl>`s$$({6>@%QmPbTBqkN~}UGJd?A^VK=2&gaS!Sz{t+#?%9P z_hdNay7y#x$oA^Vc-B?!UPSD<^<%y3j|Pu8ieCleS_IkUMZ>pU-dGMON&^kl06dNNCqm)lv>zDL>e7&awX8xo*bPsWcI zs3+rzijGJfKKMEqKQ3XW_MQxfT=$+V57}Nl8PB@P-3!s%7LeY17gy)elgTu2UiU5v zT#GIcWSvYHgQ@g9dor0cy?ZiYwkit^m5^jSc=lvc4w#H&>r#aihE82FroDSIDcgpW z^TqTo{4A7DBWK}*B zE&-O?7|)JO#sHK5YU{}OaWPx->&W=gRcZ^jbCDm9G%<5hUFcOZIWxwN?58SRiz8W_ z%L1;lAF~VBUfmeaw#sITqQbUxgg=Ym7@KYEUw_TsZn5metZyE$lYPOb!|K@IY1m)E zC!0=QP+@COWS|FqUiDJSG`Y`(LjF z9)btsqrNX5Nn(k8?)iY<3xB+6slo`CdTNOd^HJO>s<_AD(fDC+#c}q+^i{e(4?gLc zdCEl>-I-{8T%fb-@>inE$OPV%*yNjv?3FQipWkZ-lBSjEwXR~b-}uC|KRMf*4aOt; z8(?H&;Y>y4XGMsqv<0z=&ijz9d%=%2;LR*IRdYFOYKp|>mx0&;@b`YeqExf+&zi2s zkl~Vsoux{3p>i5KRxXi;y>c2$P}HGqnrRk>89Mi|)5G7xKk<8ikFx=ly10zy^ZG6~ zTF%UBQlrZ1>Vc(*(I;5Q<8nRd!C z9PdpAy%WR!@s1VAG(}BM_a^y>tt#7-J~~0OxUxtuh9b)OYELy`wYzkxxyw_}{K$ECmwd{c zyJ*e!d^wZGU5ME2+$I0C%3TPt=byWfncKNb{^|PN<;&8mmsyYJGXgHx@($$It=^gT z-8lOU-i|U6`Zu1E48+~69>vS3gvMp`BD!3Cp>;FOL~S@2UuVcf?6|ylvjMlW%vEN$EjX&5@MKrS zvePrjzHD9E+~^@)d0I3X*LNe>VA-iIZ6O}`NxO&_9+EKe{M85~w{WvkFetUVT2|`? z7Bv1eg4eSgRn{x&e%KmU=Bky#n5+Qy(6Jax;JXdD!;@h#aQF&Pc6BW~MF%Sj;vY_t zLK3#%e?*?48aa{>K73^!L_nqh^7|1;?jbj+Hf=xK`ikSlr02bCV?XM77cSM6i;VTimmgT`NWKgcSaKTj>nn7hz!I@JrCbTXk^~KU!CQA5n z!zJ5M7R3PJBd#Q@1-0Z(Z4kY(M|af$d?SK&4r01!UQ>i}rhe$(a~7Zniu6HSLUk%~rD z=`G$d7!GCY#J)lk%p{QhmgM0Y=HEWQsqDW+aH}_0Tr%?t3u4}B z)sY(EFB)){H`8J|^A}@Mhkq#G7(@J(V60&RKYTB8K*vjAKuBu-vJbZ z(>}DE(nxorL0}wKH}Wyr(#91eTfuT*E$IwkECPdq`GZ5^F$V__hmR!!aiW2|r2ejK2sT?h;A?jOuH_EWhac zQoVbD1c&Dw8)h2zgf(DyF_${q($b7|$dkk#__kx4%rGhj2&aaPq36~nGX%<3b>RdF z)FwJL$Siw8nDCW%h+v=#HjdaXJ?%}t?mNfEn9PKAG<{?2hV7DZ6!<4@yhwoL3JwJh zE7=I{3;>hKfg83<9#`;|H^mN100zRMnYduPrXgx+h{>SF3f&FcCFC=GaFuKzWR?t2 z&R^agvLm)jW`s>p*y-pHL^*mcNsA*;F=@Gg7GylP4%-piC4*wA`=>6Bp2<0i)aD~E zM{JjysjSu&W0NGbYtu{}%adf%j@T}FJkz$6=en?6GBTT_)vva=VY_4uC3o)Fu9388 zEH2otd64s)%G|JBfh#VVaaEE9geY#=_X!(zna>g16_^%#Fb8~5py-0_3M4N4l;(!* zifjr=&0qF&N^5S|ZpQ}O<@``IDj7icJF(+%5Y z*c8Kt1LDX~bHR3*$0gySG6)+S=*cDRhV8P<2|>b7(nVeCZarGa#tqwL_?6Xuq`s}i z=dFDZ+^}7ax93NAaKm;vM#TW()Fw07rCXcK5Re23-3#*`?om1Rq-~-Dw#&;~2ot{Y zu02q=?c1fdE1q|3m&}9>AKIqdao;W(2g4wxWiKAv4Y+TYjLY(;Zj0TpT{4D(H+O8; zK)BQrJM7yv4N*%&Oa?X8`HuT`3HeMPT&j|kpDrCP8NjZ?K%yOtsVs6+j8AHjPJGN^iEgFjpwrd{b{H8KDY**llOJ*F# zWC(DQtuIb*+>rU$Wj;r2mkdHoQAlSF_@Y431=|%!NKzVoxydIUyRA)1U?0bQyCR!H zQuCL6n6za|j^Kvv9=5@D1H(6XOg^-0AXGHA}#o6rNINWWtRBkx#+vOM)1B3(C$kB6aljn==;t4B?3{C8D#CAFPhzGRz%DeVJ z;fC!VaXXWthQ^r8gmqedW9+tXmyCnKr}<&KWL%a%bzAI)?UFGRyt!k$2EwJ5*a6!$ z4N*%&Oa?Vp=x+OV3HeMPT&i;Ex(KyWeyQg*R<|=5$Rx1|3eB)X5asB(BrT4Bq_XvD zgb8s)jZBm({vrY$CPq1(Cwsuw66Hbz!?? zWVZsM|g1Cx63gq1_-Byje%y} z+GK{ng&ZW@_U-cW7Q%$DylW2>C@>zca-tY_8=UsgU1>LruK-Gq{)}4qQ2up#cJ2*u zB<>i_&Xo+$R~7nFG=jgZxHnreU{kmTw5qrsCfd&Fsa4X3V-oci@dZk zNMDREDBax|z@>!P5E#tdMSowv5E;IM~_~Lomy!NleU+dd$@sj1c~hB$|Z;n<`Z8AvK#8Nx2cEy-eiPs zj_*#;E$^fNDZfz+&;H_T`ShwdL=t-N(sCfrhC0!o^;XqGoIH8*^&y7nsE5cdYUcum zii=Hnxq^%S_H-~DkH{c4)FcOMKK0q_I`D~IaIa#No$W28k1qRHzQbj2xo+ROy*)ga z$~fLbzfE3g_i$2uje^~>nN-Iv*~h!9$qGA9TuNt_f|!2_T@;?Cbk%3ro6?h4l5p5E-9onbe9pgr5!9`@UtCfR(-XhZs{&Z=PN>G zCaSKf*Cn0;pWX%cds!?h0?2BH%xbS?iA$`OKJE}T>9g6wLgKLfq?kL(6fda2yT!^K zhaqbVMsl?bz0`zVc9|04SB$ju`a8a1UVvZstsM~G>hNo-9rA}AVkjLJ*`S*jyfA}s z-_L|?!*FS z+N>V=m(~>iftVcWCNpGXAjS{71Y$C*V<2W8e?ghBS0L`RZ38haCLEeJef>c7e`>o?AKTklgC{GF?rTG5OV}xP*&_!5g(pQASS|>n(=uF#N_>JnIRhkF@D%3 z5R+jY12OaX3(ADO0`cMb1Y#_EsRhqH5Et*O$n4h`h{@wFftWn&9EdprFDNVa3dBcj z1F;Mns~$oNB-sVwAUX^HO_0sfGD9{7V*Id6ASS~)24d#%7nBKm1>z&tfjBfYfz?qp zHp6qf;K4E$W4EfoZGzGP#Ru6Dc%WE9Be|XZc%|KDWD==K-w}e!3(B$wWr^6}#+-^Q z!DF3ud+(2y8o56HNASg6Ai0p;!O$@vD~r|FvGUcJc}gJ(uvyE)Q7{w8O^U~?ouPrx~nQajijN%K<7v1o>TgO_TL*|#EinIeD)yT~OUW41^onouXfe8Vv& z-}$X#_m}o4cLt)E++Ed1Wp1aJE-H7Di&1%ic8Zv-BLy2KUjpTGm=LU-Q%71tUsELC*4u+On*vV zy?ECm8R4&*8-LkEV=ST*L-uV{L7g_LA+Z~;VB;i@Xcb-ZhG~r=Z!C?U%X``lR4APKSfd z!m^l`dUk6rN7&c8j#%Kz#x>Ob7B-~+gKsRtUS{-o3EOtU_)elU`b0bNq>9Za)oh-B zu|{R{)~_3xd?UB{#METC-b|6&d;&{bvvb?XXfK2P zoqZAWAN)1Ah!2oe z1@Gq;;U(F}9zd@XjVFruy>bcK_jwk%-P&#UKe%?NbupZ8lbx^^!+B=fapRCS{K8M5 zUQIAGMMxKot)2+~hY{@O=M(RtKEOGg&OZ_pPa-q7!~X7gXSTgF>t^gv;|?5JYg`>w z`G@kHI-qU5vAAd*&n><76qj3sy<*YA zs6^6YCM5=$QpQDPgs?(P29-%f1z(5&TiOJ7n%&-Tn6FvnEiL9h*^e7sB8}uy{^vvD zFII&oI>OI~g`bZIKfgx!`3B+V8-<^55`MlJ-g`ML4n~7n&dyZK(m|Z2NkC7c2xr6Lv~G9vNY(~cN2eljZZ12ZERJK_ixb8?Xp$w17ET-48yDdyuwD3Bc$sgkv$mB#W* zR{pOm;PNb2_D&4@=`OoL`pq={_T};acRBoS1g7#jcjL7~u?APfah`C4s}$Rhrw8V( z9qQbs&b}OV_VmsPS_&PX>F$gMC&!aBYlm)%;F$0pSwW{I=QH&}>uD zSQuOWNy3%M*vQ9op4QAfd8&wnb{iFu&k|;gDxweAs)#nzDk7h?RuO%`Rz>symLW$K;Vf>WB9iYdRg{0+Rz>;8S&piyh$J=} z6_L+ctB5{et0MXU%aEgra1mjnB2sQxswn@st%~xGvm6yF`ep>0Sj08OG$9>PhgQi0 zL|GM|%DMZ&YGbiXO4NoHsx)bYn|z+6I+o*Sxg_sgDm|0)$(>nj!qQkSsVY}q3Cmd- z*d1@j30$q(K)v$adtpyeC4TMR_$&8btP<&7XkPXgtHk~=JMS8m?-#4Y;Q)DCn|fS& zkFl|kCv}-kej}eHAyX5XGs8snRNj!;<~RBP8$u+3(x#BVSS6zM45Nx@(Pyh7`al2` zU91vqR78tDTNTj<0;uR>mDrz*P?E`Or;Al0+HzYeL`e$vuM*+sr7sgQt86Gyar^C* z%W#HI<<7*_+htk7Z(9{H4hdgN2$^!}d)a5}&Y zse1$t*Gv!lK-0gS{di>p#8Et{7%gZ8oe*>!D1VHMTR7BlXB&0jMCWf%U zJf3*d$ps}jmx!IAtZN`xIR@-yqWb9uypE+}XAHR2#I{uFgd(mJ$j#BHXfhjea1n+85sxPocSmFABC{SRM?;>$*rk)B@g#_tsymuM$Jx;YIxLIIc7R`V(5G4OefIsaX^M8X*hQi6-6uZ%E=+da3nlL#nW*Y6;DTI zRc^70qw5;01R_LS4zWt4l&*BB1Bvcu}Y-m z9;-x3GLgB(Dzb?09IF@tL{tv3ilyQnt5_;B5xK=GhN^2_#gQOla&ka6#`riOODCMD zD1Pl8t2h!KqT=bei;AZs6O~)6;^?}@DuD<0Zp;$6Ah>|KW-I-2d#dHML0}gj*1Qu zS{P!vdvZ6uDf8j|7^XOpG6CK&4>9iIJK(>V@uh zo+PD0EHDJATy)0+%t|IUFXyEUXSV0=ZwwtGx-i7NotK@NM0_i2urwS`r|7MWLvTI3 hx~!x#{obfMecMiN(%c^^Kvdv7D7_ zZPe2}boZ>ZsHn6S?RyxEc5F7RekU8`&HeltLN~4zx2aDw6qU>`^(>W@3Xfh zmz)}R`@`#Ad#*Q}uC{Oa{Eu%v{;eqc;Jtglb?3f!{sm;8Tl(x5_QG4?s_oIBzj3~E zt~;KzMyDpd@wx7_KN`mS?;mtGdS^$2&E9xy zcDH+z_)55Z^-#Qj@vq5$QQTN;F25M|WFLrSe}x^0*|EDD1>X#8YnvN783wKjXhNw4)}Z#?V`T6Yio(|&i*e`{~t8h3}A zqpi-)uzz|q-fCfS?zw$5oJ^N*f`!h;pgWnwEjS!sI~Z-OuGtN-{1Vu6t~=Q2#aFDy zi_Lf*sjIQH*hE&chW@dVZ7wGzbuYrgYW4_xGJ@CI4XNX5t3Ql$o;X8$L22Z9Xspip zFn%Rr{J=UaM)BP8{J*&2XP*hhNB3#Mhlg&YQ@n-MNwlnx41A~o_xbDNfz0S0#5NaS z!u^4p#?(rOS98VnqM zwE?$zdv4skZF4gO*GMHn$RPVOFdjm<@VN+HVJEy8PIkxr?x{iVWPAA#>_-W8+}rF7 zyIZKXLFvFFZeI%fHb%oKs=a#SjMn9({9ywbm*5Ptf%iAyupM8m+a{C#nPI$uzb2OA zNc?RsmEz8%*FojSll|eDxN%JsLZ{1Dz(G`^^f$ZH9xgL8$|i~Z`v|VJl-Rh}-HaFS zMy1!ZKRs_Nu^_yexcGPk*PFOF)$dNYq6(FZqY0@$ZlJZ{VYoJarB!X1Qe1u^DUdE( zKZpvXw9H#uo=@8cK4Gh?!ZrAh5xm@H`kXU5&$#4wmlD6jRqC4c#3ca%ey(9Qs7w`t z16m0nan!Qb=bQy8_c)!RKNi6&Z4})Se9cNYg{L`lq?`MHw#XUxbf>5;i5KwKMAa)N zRYFdxyD4hxCfg$vHBX_aMk`wD)_RR6(oKl~V(14<%2-D(j!J}hZ~l!r>gBB4tGQ6y zm9QU0?`|jUT)WeFA)deYbr0Qc3m>e(J1oM;iSF=hJfHr5V#~bs1Slu)Yy@{3MB7=Z z+P>dRL-Y!?;=?u}nhv69;M3+k{l`W;eFKj|wW*<_gL2tM04A6g-ixnt^ZV?F9)E`A zN8S7eZwO&=T<AY*gCE9>P>UaiP%wKm z3Gs^&Tw^OluD9?~ZK=Ny`uZ&BREkitKI5YeXxZ{6Hv85>NkZ75w)yaqw7dZ^Sp`b_ zCXl2kVT4A+T5_}&T=wG7#LFilxY29{S-;7y5^=4+1B8X-lhO>*{QoC&UzHEXxo8+H zjxbX4S!YZDe$kA<<)eyjX~N+0#}a3aYdz7!4CH>OYo`C58FnK=5?Fo4GgGLQj&-wf6u9hDFQix8P(1SS-OwSa<4UzFhQ zTg4j&Con-7g%FBK+N#0|DQM3EDIkN)6e%EsS|SD9T8N?g$WD6ey1skNmV#bz8fd!>j zx)~r$xYTY3zNz=Zu7N=|peTuc@W)voDkb-vB)pH8+MhY;^nIQ#uIL<}6s!`L@Nl-SI z?LD2G0g;Pw$3$)} z*~Ssn9(ipfpdtYqte~AKBHvSB#0sboYT8wBv?^A>L`Ut)zf*lhW@7*qtomz0i%U;o zcNLH!b`&26p?N$*_+WDrv;VTBbCVj7WJVg(kI?63kW zLc#3O0NYq$1!TrtBO+O{bZJis?Hb?%u-R2W#;=SOkO$3J0r{lH2Gc53*>8A>Rlb^O zvI-llfU+fYDn_h;JV+~3ziQ0Hiw#zAv<9pI)0`PrfQRE;l&OXzZDT0$WtSE)=Qs>j zkgKFBR)8NfV+A?+3lS^8Q?tSfaxMf9G`8FLF|A2ZB--5&tgwQkm9YZcq9#TxlY|-E zMI#fh<-G$Ntl+2vRzT!p#tIl4C?cvSXkdjE9Ib>EP$3kPw8dzw_7xnfiWRt|RWgVH z0$`VUwK#+oRzL>T)DJD@4FoRghul{{<{@vwp9#l{A6B5BW-uR2yhp47?C9xJe+W`z}45%TTL$RJ)=0U@FjpgmS#Lya?KGge?j(i`|9RzNQgRs&XmY0eBQz{7Da8VCZ& z88JctY=x}FCam@qOJw)qlN(>(J&%qTta#!e(^zYp`sFoxw!!sqEN&alHjqq7123d!S*FJ+ zzCIX_40PZ>@$xGN@B9ZpZ@JeXELR1HP!m^{NCG+~?qo1saXpv_Cj>TouBqqqwZ7Jj z63cjNm%3r8o3n^5JIMihF0maK<58h%l^d}K~ zz5%Zb6$UU05zx}mD-HpvA-gpNh=BI{RqeKRNpj`d9tkob9Ib3(MbbF=Ta(y7!KWK= zteW`<&5&lfOd_Ksn5`BSoWE4$t&)X;gsm0ug$UZ!q$zYw)ub6x1_o~7w@jG^N2(ODs?!vrYDXzX>AIihwQA08(=j>EW~y}^z!0U4ns^SXFYluJBDgU> z3mzZJ7^R0U+gf0zCc&GlNG0*J2Q6^uD=4`~t8s!TJ?HO875f$(w~F92`y-e`UOhzs z-?YzPRo|+>O?P5zXn8xFO00-jmVI~TM#Tn;>^%w@1cI{lb(ZQ|7f)-n4Z8V(43*8K zVA>B6iQw0T6Ks)S`oq>%uRH8a-m;UOx!fC%N8{FU{L>x%)2Dmm-f*KAA1eJSsVK07 zyI2C>@KCU3t50})#2?A6#r1{sh<$YQHIpuy%U74q<6nUJRrE1=6;Ju$j-FzQZQg-s<#SYMwy&TSsXK*Hm#rk6-8hpwY;9VR-%5kv-nkU># zAvFLU0&l0G<2zgFl^t7&-N%talM3Q0JYV5==^Ff#z20_bv%fXT4uVGKot)@zKAqlf zl3r@SoeZWlfZ@(oXM5b6po8pNuSMx}&^lkYrkIeeJdaoEWC*%P{h6IZl=zDGWb9S- z3x0XCiWrR=O#A4YtsIDDCVL6LGw46rt6NX9s)P3=rTX9xBH*r*vJ%{A*qdaWwX%d- zH-GMB%zyXeZYJ^2*~{K`D||PC)yjrRwHMFd*&hrNZ~c2w$Yfl&;AH^);b!k?T0p7R z2H>X}a6@I@kM{<>4RlI9{%TnXvR;j2lTS2AJTz9wNs(;eZ4KyEmJ01@I(a|Sk`|-n zkJ(n-3)kuYZv>lli-0M}Yb%t>g}zW7schh1H|i5Wz<#PW!D*_-!hys-?XM!(sLN`~ zU0iLvGufo63Z0TRyOA~t=(3XHmQbrgz%p728J|`b^3pyGRfwMuwmmqyHFbczW~{-flCU(5b)_{z z{G$z6_m3g?@=Mo>mEX7EtHdG;S_=W4oL#-2pM_5<_7GOblfEOYxY-C9<4OUn!=V!q z3|vtR>d~kF5W%agOgkH_vr>1_&qO-S>I7PF%P+R3>l%PKu>?dtG?P3k zGcMb;YqW~Nt=^lpl-H5mW&Shq>lr-W2#k1>8t*qEi9p@1verJ^e99Mrt4$3?S~ z{VUt0w9Wpxc<%$f?%=>k|ge}N!TKG)FU!$cY_}o~7`I2pD&Tu3O zUfV@ll1}XwKaqkH?uIf8(VFdR6aGfI$~-gfZuZe?=7bB_9)b1E@cp_`((yXyC!HgC z5v%B!*)Xo`w&6Vyyi7M(y0DUmI5X>s-MkAq+Xt|pkNCH8usRFTmwAxv-pb6blRnfc zIh2*Sq@qTP#P5-UK<={>qHr|_G3qnnUiJtlNprmW^DXHnsHrnNPW<1(K`!{;q`Cg~wxCChRRY&q*4MaOD@GDK(YLNSEu}C&X z!|8Z5NQKfv>$Ss;^YP-{=cvCv|%TF)d|JqlK;ER|4A>-a~}2gzTFntdfGedLg_6uy6v;9qry#n%zH* zLYm#Zy)$tW{a+QtrI@j5o#A^C91rIjd9*+&g5pB~b(%8Iz0!uGt)y4gmseCO@FR^n zxNjHS*CE?#GB5_m4+IU4^h|`=1%;q)rD!1y(;cdw0Xh6w`3N6fNlXl3Dz#HR}VLD_{%gUG;1c0KWsTv0k z&sKZ{Xsk*zyJ*b{#>p z^#?u^B5#zZm)33ABWDm&lKKR!a>61O@F^t-O2mjWQ!iOybJy~R8gPF|BRtU`^w4FG z=>HS{(7>=ujK)7RFhsG7r~(37`#XUuylEdTOrY0EQ4>n)e0XmJ4+W803hT6_jw^q8 zH+^xp+RMAogn$!LnXywXoH)4gBy|avYHyiyOwGw+QQTOJ;G-chXDYzs>A48;p6rYL zYtqHsg_mh`uLJ;6J$Hhm6L3mr&UkhhCu9vO0FFc_n#tDw<$aBte=>yX(Od4^asyVq z-4^fC83t~K5G42Td|WUq6m(kB?zz4d3OX*=}}jl@1ja{OjcL7*Um*CqT>Bv!Zq)+8u7sUN5gAi6rzf=9##BlviTGJ?)AuXu_w zBA_^tiUMR4I#rxaNaZKg4%RRApanyJn2gRT{RX{RfxcM2%~HxENnl5&hGLy};kSc| zB+dT$%O;@(9RT8`nv*&w3-Ky;vm5*rf`GhqmbrT(fzoC@pinba1ps;JbTbz%5@sYg zsiMCN1M<>Ih?rnFke3cYdf)&cuSgTYQ6C8b@>04g@GvOIOGhJ;2>={YnxaCB5LXMx zOUKQ5u@9s{P+vN{N^vL{6x-PRp&dFNjWr=qUrJT2yjsv*Ixgi#1rMe#9X{?49f_xW zN;*vLHKAc|`qXHQ4ielLZTDK-Imh{_sD*h~ffzm*1=wU(s!kYgklE#6iS`iEDs%q3 zP1veS1JeAiYC`L^g8)5+UNOXC+#1w(Ho$*HPBB4zb|!g+dc%Jl>ffs9$S$g=_kGD; z{F_^YFErrMx(srQbngB_&8U(GQK*|yyxmKnjntx>e}zL$!TR(70nG7}@MF2Bz< z>dRKhG}D$1sXeLF{OcpmUHD=vTlljYO_IV>wWXF!X~}H!Cv|5>6brr{Vk^8N{N)v2 z(J9wv%PFu`n_k1fT>q=aaKRuvwMhY6_o%L9EUgX$pg-Sgfd4i`y?wJ-P;KJJ$dLd3 zd<826C$bIIMcm<5jNyMr_0?;12YUSqwMH1I9N+^DI9r!rTUkTBq9DwOhO1P-HKIUIKv+87q^XMRX#9z_a& zQ`}ypKK(a>k2T;;wq9T>8ZjVL&JeZ(v0{Hq&GD@qz2Q%r_F2}JB^7YRfci5xu%v4( z+iGkUr$`hwd_m`oPdDMs^>|C!yxekytpZrrnWcJtLcarVnthiMe6At8|0&(>h)-c+ zHLV#N-#Ul0f^bovbrG&1pKA)!SA_?stNzgamuRc$aI!s`^g5@|MLfj~Nw#Iw8anuZ zA(Gwx2fxv<-*v5FyewDb5)1hqf3Hw02c5Zs_{pz8oS6oS2xhpbwJb5=3dK|tbMUhj zurNy@D)O@IvatY8ki0QO){q$ie5wg&SpDlZOjnXkISMm*S8`We2r=2Wwv||s<<_&b z?tTIuiyx^}cUumQ^<^DC;FC@0&q$SALbZN-p%UMnl?q6OC1ufTi$L>cs?ImT6Y*p9 zsT1wlG6XgFCl%gL;u&YdHBNFTy2GvljNN2062${qc+#(WtuvczmTAdd5wm}2j!|+i2phtisqATmeK2?3RDZ5B4xZ=1p zWqq)ug`8Qto{<9kRDm6MJbpBQ0y)cJIMOtFYk~mSvYfYn5~iIx@IqN zw@pgrUSy8{72g%Yzo~FMt$dbcK~`z%jra6ax!7-Ew`G9o*sT4RzV8%1RhD^9k7Bhx%F34O za?8ynK*<$YRWfdw3Dlz1syeC>pJQ{)!XvI^Xw25&=R9*C4!L|s5Lal-@{n!4?bOV= z%H4~IZR6Vr0Lz03A0sMXsDh{ z#)D^1Cgp(1NHna4PBb!JX4jKR**2skubxa8khN>So=hMtI%N?sLic3Czzj#V_GCv} z<=h^oCleS_IkUMZJKADnE7lRgyC;)zZAgG# zJsCe<&be<-#uF7C(bOXqVI_3W#?Ol|>)n&#kn7%)?#YDNsw^~ALXz>|*^@~*U^0@eOBIG0x_Of^ z4ZiDSQnn2#$*U(524wBpuO|}-i%wbSC3H_F49sv;Yfsj$ttS&0QaQ7^Cu=_^dNP?H ztMp_m0eUh^k@GESws*0x>*{S9_^y-jq7z*%NKeKOfvY;to=iqEKu^Y#Re4Rg6j*L# zJbN-30~c5ZEc5K$lgYR?BtWm8j31A*DfOz30?MI#E;%!%9?-id!y(tbC(A>&S5L;X zu5$MxJlhtK-s@y5v*^iW8Yp<(g`P~{T9J$V*2#o1m`cyHCzDCjyC)N7tFq8g2}#C- zXHO>OfXPTStc7m1WW3m;guXqQlx;&w^6JTi0a?5D>&XPdqEi+DBXmzD49sv;YfrXP zTTdo1q;h6+PqxDKWYmLfvlHKeWQ$__ud>B9_=xA;MVTO(p6um@9hw0;GE0G*N*!J} zsk~$x7O5k6cVxWaM28E~k?}(W>BwXx19W6OS(VR(OMvC>#IvlIF)*9_S7yaN|M~7* zlyPlHe_kCKKVG1Yj3+8OB6axX&>a~+E@8$Lrr@V5T#F;wHJAllWj|&YuD!Z3o^6$@ z7h<=~ckrhXoM5w!{p+vUYcQ6*nDxyAcCs({bm+u>ykUO@pKLmLv4yQcH2gdRJ?QhQ zmr|z5eO_eC?<>Lkn)ZjtS<0du$(RHb*bb6Myab}vWMN}n`r^?;@L+u0_r)VgEV0i$ z@ArG*k2ft<7~wX}*Ag9Oqqs9vagW2J@x$JVaR%U_8u zBg5`Wv`jk9a?_xHQ|vu{uN_F5R;Jgwip_qbQ;U+CKVVU+S@@St*JH?VNyE-krMgf#jU6kO z$irSajU_1RkXp)e=#<8eiodr_X-)X&e(&#bHlR`$m(hG)-{nTjxn4C>lgwYq-+em#PhXr~;*$?mw{JvHc^Y^RxNa<)6}ZFYv; ztzO)S=e#F>DeOyY;%$v>@f z7eeg0=PqRCcJ7jYI$R)7xIBDWdi65v@qD7d+_u@>YTb>qPyekb6QO^j3CTb= z(G1jr!|}C-48#&%<}PKA;zd-M~9Pl>b>uBl38H2 zm?la$o73}Yx;YZrbR1&-a~B zKWvS=djTQ6cbcz-Wf2R$(||iY85RSFuK;CN*RoS|urfCK73^! zB$S$h=I=!yxrf}O+O#We=_`&KXvJ|Du8m)5RdHm^?h9cLZk{h&Ke!mh^BEE-sWsFm z@vp&GBDk65!7gM_uGHZ|GpMYRcNRh^P+Uq%n}yX(BJkygOSYpdiUGn$TuE39(z>{_ zMCCPwMxXGF2-ZA|NDK!)5-vi4GyghHKj+kVP+QW~5YF=r5eELqsoh}sdM*TlKXz&| z7$(I~;86B5)Lfbj=J9eh8o>4|`@SRu!8e`S3zjvZKJb%s0I}#o0@@cNSzcwsp`_$P zfk2B!v@+PU)17pt=w5-*m^2zU(^_E#TJd3Wt|4q9{l@~#P0i-u z27ED+-L&A#akc;ugwOIK0Ai2hPYEW&Pc|LeL@Hucr8j?Pe=v}(6Z;BHFq1@bz`XAe zcwvUN;9eGZ89;xy*?YQ^!%R(0>%W$kv+!pwEiJDPE;W_PMWL_EMrB<=hlJq|OICt> zgam)?($w-Si;1dwgT>QxXli)^l4K=MHR>*m3R_t0DZ;y#)X}w_)e%U6Ng-|dN<9o` zMi)b9pyX)dN?`-Zp#-R_QWyg&Y17QIk2(#x4*npLZSC~u`NBwIKh=ceEH73~J#Wi0 zjm9{Q&B9ochijOB3#T$uh^Z;n!T*flHgB%DWai}|Gs%ST-3GkQn`tqf`HL~B!#^aw zP@oX*lq=zTkpntj3Ijq?^Ot=fnHby{o^^q05cjkXEvGcnoj5E*<%E|*fHhxp0c%*! zxF8}aUp@FMMHLZ_(#91eTfuT*E$Iwx+@lJmf8p3>vXYf)sq) z1+d|m6M}@Fq&48fS_q+~07mt-V3uF>eW~8PK!U^99UEpE_JlRya1NbXUVtqlxgOx# zj%_l-s2Cue8a9TWTbs-fC|i|c?YEkP2XPwICOS39EPFzj@RfH6zI4GRedo~u+oh+y z$=7{Vjj>BLPlht|yKK_cHrbmkgL|<++y3+^}7NX|V@$z!wFI zF4(R>;=)g9ZrHBKrjXS9Wj~{|=7#OI>%w+1;a$Xb8P2#Ma^ZnDc&6Zn?J{hNVZ#A& zWT?4dyUgQ~a8Z6KwQvm=xEGIB9Sw5Cc3I|xAmJzJAX3nvUyj%=!>_bD%3n|S&|HXx zA2)25Ws)9FnY}Y`z)Dk=F+cgalpdlth zI5fm=`*z8InLfBw<%0haYIfKxyGG&yA3N^bC6mM^=#TAk^jwk_M?jLayjBYcaYZ3p z9U*qacF8=j)csQzN3Td-URG-m2Dt6phjN`st zfoZV^bHEn`iZ0l$K;ptrX>QoA$fl6g{AE9*wC0BG9<2-8#e{be+hsW8f`|(il`~Tc z0nVFrCDF+d+hy1k!-fOm$WU{^cA3W|;UYA)n`@==R$fPJmt{@}5`K~{0)&TchR1dp zer2^Esc${zynEO##~T+O0%5zkZH9{&j@T~8s2Cs|uttuaTbr!Mb}M_ccKegNai(pe z!@gZkKH>o_zVfa;P`F{c$7XjX0~sndLP;|UJ=BC(2<#;9o z8Ct=cJGN^eTxy9OuwBy-0UBa5#LTc=GGL|;E>-PDJ4|L{hYdSoyJQe-g8tYpN6#f` zaRelltvqR2AQVB*WFWJ2#CFN!Sn5KB>yWxQdPVB;3Wo-fI%2!rOl7sM7@J7#>%|Lq z?6JqOLT=cundjMHyJQ}UOy{_7mr%`YS*B5UY}ZIyG!_?Z*F4DXrZP8dSK#WomdxDt z?FvkbJ(vT&C{T34b_EiWl%@k~yuzMSu5>(;L1a@%YW}jHQCf4uc8}R$yIc{e0g;wn zT4_834G0WwXEHFHaX};yc#}^%Ab8`pZyxJBSXyv+hrb?gbTX6!NW}CkwIsZ zLYVNCcMwVFct(fSoUC%97f)P#()YikWy$IMdMF@*zpRMgOXgFC+$05tKi8!? zL$b3Z;-jXC`FAwohk__u0N*ODh^Qo*)MST>KwyxScKyg(x$yP|+#N&{Lc9s0i6ZED zGirFHK1$Dok=&ijiiSV$TV*}Edc~g$4wUnZK>w%WIf81AxhjV1*er}&JUhx?NLB$W zU4E)`ZHJnDv9_sncafTGQky;28&llk9K5R$CMO^V6k<^zeatc%p=-@fu(8K<)Gud6 zw8|~lxTF^Hyx3IMK1r(}b;TC}y4I}Hb#-zc)v-r2wq$=>_-lTJ-FFdu&nMRwN)>9J z05VK!u{A{;FsMbj_cW=%v`Hm9{X2^O>rsZX_w5;B7%7d%1E6qJwa< z(u8j4%dXtc-liV(y5k|b!o4#_7r&DNq>f_4uTijDHk0buCHr`1C0SzUiA(9sQV{b`p^L)Pl&<;= zdsBMy@-m&Wg(q6$)c&=d7)&BLRi43_ad)%d8%`%J_HQy(-qW4lvoq+V545H`+k;+f zV>FzOM}y90Z@Y&sa2jr$@2nKz!eQAb7yNPz2hu`r*qcnzE$rvw#x-*q1O{KHUV`7} z;Qf2x^(O751iH`{fjhubzc zjdokMpTd-#Z{d^~vN{c}!Ke1X{azM}iU6{jA+y?RSmF|^rH?yAP5NxMu#h-xKPl#p zGR5;M@NTkl$6?6Yf{|P;L$A!*^bqP=xg4D<%-&@u6wx94ns4oZ_*REsQ|*vH><~lg zu*e48yx{p6gnND_93l)mR1~>3r4>cJfw(wT($6ZX2JnI~YIRY00aj%s%GoAKx zDG(DK1_;FDNH>`w8v`+Z*d-8?VI2c8^Z4`1guMcByR}<^7^`C!0&#H!hRlAA6)}0- zB@mNmodYpP;CW@mUKR0C+d!;q>RhT@q7SJV8%pf}ftb91Ei+_eAjS{71Y$C*V<2W8 ze_oleS0FyRYk?T6Lu$eGR>Z~oDl+>u24eEKOCToCItOBo!1KzAy#n#E-3r7+A5t@} zKM<46(lSFf24eiMOCTo0ItF6q@#mEZdj;ZSyB3JCI;0k?3&daF0}qz57`s&sZWELa zC_c=Nzyrk+8p$2($1CkNBa=u?`pytko>!LLFH6J*H|A7i2_DmT-28XYR>z?w+al)R zi+ezFA-jX2t6}78S@~+rJf)BX*sSH@D3}T4CeJGae>1B9`-KtCz-t;{5g zr4i5FYVeEuAStt++4k1XAa0_ScB=(k{ceF1PbkJPE`M_Z=KUu*8HOr}{G^6T?s2Az z_nHXPyw8(dX?~sr>B16a_ziwDLD@TGS1e+s4bpp_z!$XSl(?;YK@oxD_4N#$~Qnf~vn zrMeiDF9zjdwmi@>Hb-PPh|qBOCXS1xauz|o6L7gyhWh~~-E%$VwRC%xVfzKJ=UjKN z(~B=z-@mvwo?G9uHQMYA;P>a@>Wt)ZME7L7HSG;2qj6{0-Re!qs~60>?0YqD*Bw}X z$2=M3ubS(B*+k>aqk};XtXF}j)~i9b>n~>mDi3`X?ew~7F(dClji1Y#-*w|EAL(h$ z6mhF+rbyh%Oc9qbW{RX=G*iTFpqU~uS@@gqV%DnA=?2@qPItK3nGE_Hg=Jk!>}6^F zWz<^c)}{YrU-i+NA$(&V_A{f$3)r?3#&;5>(I?u8Csj;2sbm;`h=eXy4~K#B~U{=3LCjw-u z9J_4_YFJgSiO6MJfjOwPAbB~8k&Ej)f$$}Pv7?kRQ{|Bt+a=bHDp*sCSn{BNBS;4c~ zFwz4l*Gd|^o-%1NQBul^EMD)Y!TjX`c_i`F;0=^HqXudHEwXn*a1D~70%`C@%A`?) zdFvpuc%zm@7q`R%DHzl!wd8|0QRa*qr2US_-c4HeTzrrWrC`wHga3_`Y9!qsp<1;()DXDDV$s%1GUV$6&_YI$&&-1wF)Li7GL!2OTPP}PhHcPOaFWC-}>T#eZTbRfnWZ{ zd!N1S2Y>B%7vaaQjK6dRtU?o=h!rnD^z>46BK>+EeNE8Ux1z60F9P(-`E==KHCwqk zczM*AU)a;!w;1mQbnJ3*gkQWLnrI&%dgyIY6z_v*tN8X@{_SP3pGn&2muNXC`%Rvo z)8Fic#vr8<1MJB@%zWQYzdrzb^J5LupZ39g`W6iOHbE0Wz%u#{O4{_!7xE7!-P?&d~!GEHZY9#eSR_{=0Vm&p+{e)i2u zIuZ1nar&F=oAftJS5o;WjU&y~1(R#HTx{tFbCZR!t^{IAJX5rNBOsxhTioYhdqP=r~GrXrgMCqHkjt*>~^| z@(#ET=;CWSo&FFl*rY;LOb51M0l!?9j@Ct=OuwE=pFsYKY-Dq1BYi$Ka#Xz!C)*=* zsAHE}{~Z5^Q_LS;PzZBB>}&5$h1pz;;Wd!H8B=0TAGr=?p(~aDbA(GHN+q}vPOcG!Rapu5pKI~r{E#%0^#O=al{*n|BXe@W)kWiI4S zmpp)Wa*Vbf9QIQWOhuf1EESl<9Ooy!-e%nF52s1v>ba%M(5KNa+Qo0K;tX_k5f<9> zmmiE8ONR=6a=U*gV*VD``#`TdxTAlL2q67x5&5zF;L-GX%oj3<+)0q&$U*f>3n#L_ zs>}t&!Tb~0qd3bQL9q^r1hcS>B5D8WtM{U0ae&=gg?@7dLR5rpF<0+R4#8ghxk)?U zc7S3!*?<6p{$zSh>SnLJCOQ(O(ilL#yTxTR6bq}3b}JgKH|T9l<4a&4+Vrs5pY}(? zt0C1u>LTgaUTOy_SGynf98a0%{1O>x9{v~8$ei-WMvE5%{k25mcfq0KD4vabn|Jh2 zpYDy(@vS|jVW>PLbt+U%9N#&md{m4$I4IY&elb_;ROAj=bO(5#iu0vOaM-H z$Nlc90ovk%VoX-Q;H;L`O9%NlC1gAl7E@E5cKgFLpX9|tBc5A&AzEucn_5n`-I4th z@~q{BmFrg8*BxoU@Erc;@OYzr_~_v?-Rn1RJbk*`J91O}*)Qycx58B{{4zQ<>5b2I zQ>Djjux0mJM9V)=+DL#3z12hU{>8s0|3zp8tGWDQ!kX-_umdqWb~m#%|6wETu#q_I zpnIw}nAAV)?+I5|@YpOyZ1TQ@Jt-e$Y?Q1zJ2+e6d%wxfR_-Dxdb@iQ^&D&HpUZ8! zIe5UTyQ$eT+kj6-@LE=hZd`5kH3y~tiyMCSnLxZmsy%C=_~<@Obi?dAR*ovUj~+35 ziwCptp$6RNuMfUL;tQsPyMV<_-&y6r8d+pKSR4RXVyFls%V$jwL8XJ^LJkpxk2FG9 z@dV}HGzljmJfiz_5HkHCkyhj*E!8VQp9&&4lw$E)ouN%Pgb0yVgNRVQMwx8`f?sU_ zcUZTRXT>R`6oPA{k|1OP*`G;xpNqh>9G|B)^`$_DG1BF8w%$S|n2$%9?l;6n@%{#A zwiR+k14ihrcPHfH;17s*vbiAMA>UJEp%NBIY+Mt;zmGt(+mw@-a0{5F0R>^#z!-cy zg6mCbJX^qGOU1XMTRP}lPm+tc*77TkUH44#30t$%1pZ^hUnC^6t91IDGZF+NURX`v zyIQjzPUe};Jc@n^Ki4oDRHh2S0qyq3I%>HvLT5(h9;eZ&Z2!>5B6y{hoeI8YC7b|E zYS@AQY>_hvu!_GXs$N<5UR5dN6l|f4!XmQi{W_Mdb+#$Gc=l9!H&4pY515pZFD1le zO~d%cQGT>!Vy|Y@)$j`(csawKHF5?Tyu%`lWJpnl7bUjL7YeYIHar{gH*8A+8d{UA z*uFC@qPEHAudJtie%j+_eZI?vIx^WbRGS()E6%hn{Ief={F!B+Y4W*1)r|z>rD)^J zSKJ|9ZkYiuA8WuH49;w&7i}xg-5T#@)4YPzSfZGH3Gj8z1m=S96!w^FDXkfoc zxjsUMFGgV8mQFGo*IO{DRtGiW=p}{WqHo$cGV-fI$ zUc*`_NeCO1Gy-At>$tlgrF|1fQbHE@Q4X`Wi6m!$rCPszi$<97@WWaWfVdvCTT5X8>E2lKWEX~ z>I?l|005Z-8`rfcgcVXisJ1kmF-QSdmMNMM$s)NkoIs`*j+a`H0y0fjH_A-}V1pD8 ziaNdcAq6%gR9f~Aj1-Xhr82FC6p$x%va?4DY^ZJ7AO%*0e31e&h;rYI4N^de=mcnw z6xdMXOxfJz+K}`{3dkU)R@n+EAjC8xwL}UmD7Dhf07j(1icm0nH1$z7NCBDgt|J9x zA_0*CLQ-r=jKv&z(2Nw2PikyX+n=~#Dr8JWhhnk{8>D~?(GvO;YsnQwqZfxJUTlzp z{MAdg7_;3Z;L=Iu=fiMSovvxLK?*QUwTo)#r`8u>$4>qpY}q+QfWitXAaBEFM+(>@ z1^6*DQjn7`X}mB>55)%AGGK!gvZDVv<3z0{R{{ z>(VpE3dkJTe$*c;AXGK=LyN%*q$}}my9&t2EI;c0+GbY)p{V7hzzVnwSqb-W+j>Bz zXN47z59@g08mw4&LBP1Hz-ENq#0tp#s$&J@NuBKMu>u=vR#<@*A>ZDN4B~|q5F$DO z+G7Pa)HqW%x4AYXQ>?&Hz8BfTrAZ)@u)+$+hczO##0o4Z*bV+FWH zHSa4RVJ5>e3to2tD>&+~uYkzK_;v-k^)R)8g3DQ81xG7k1yl&dB!glF^gJ@wrFB>V z6CJhvd>wBfkjYxD#gh+f>W7-J0x~k6sT4hXI9>{@K#F^~!2s2GvDsHZDC+dWHCUCg z0-F&qRuJr^0x~4m{`2}b6Dg8BDA@*LS%5T*;hb5 ztP`L;R$xPoGi5VYU_;V-UjZ4!gg97X1%#MJq?TBL1tmMIz>1JBRzL!H3h;1z2G%uQHdq0s$+;*~4M*BVWC`x)(iZ1zl_G-^R*0Jo@Stbl}>-Ny=!Iba1u zE@rHNv4KLPv%m_DRl*9W5FE};K!$X$`X#da@D-4+@1F0C$$KZPU+Vw^`H6$+iwF_= zx9opQArphOhyb(YGxzd3Mh^t#>#T7aw?qo1saXpv_ zCj>TozU0t1{gM1sgCH(Zys+?J8Uf9oq6+~%3B9@ykQ&?9`Abp(S-{sq3Y8)bHJN-s z@p9OMzjzJ)0iSQc>q3PAj6wvoH1vu?Kx$#_)+E@kYL}`d`Ci@j7>1Hz4*rHy>VPEJ zNCcm5z_DuPBQ!&r<$Q^Zl3=zf=~jx$(_eb>?#ZA?KvyERt4UMnnyN|D0A4Ux9(xM} zemjEKSh806AcZJDL-7_^=xhwSlSzCG+VPRWXk&G)!kBzvp>7JJ^yD$kYt@|HmX1_l zifJ975y`yTm%)-x3#dkv9=dF6fti{FZ>}Ph#LpfIZ}pXm+*dXFm&A_roWCO#Y)yQT zsfysjajxMJzGCk6|Fk5T+_9&SR)_=0fIK#k2UBMwG3uvs7Q{ zEC$=4o4;gH*-Q$3`ynC`{JL<0Es{ci*xKrKhn)$&@TAilk4NKH`TCRcjVMV)fhF9< z68MIPf;G!M7ZJQY;*aE3@__O{Z;USGBKKsaSADde>h;f@ozeq&%j@`ab8owqPXww4 zb{klyCp-fMb zyAonDmwoGd(Ol+MBVjTsxu9c`J*JXfG&Uh(Ii6nZlW^zpDmg3JtY*2OQ5fDq<|-g` z9a?(6DR)(Xay+Sh<?hRVdMHsgupkrTv+mLzMW6@#T+I{eoZKtRhCE2Gc(JW-A9`naLK*JA?j{ zy}I=zt2)p^jg9)?437=mpN~<0C?|$6PBp%AN zXInFV7u^Y1*)Xa0;_Qml#9RMf6fzkXE_fMt?`j${6=4;AssT4t<~_d}GQT3yvJzyy zIt#stPc%q8G*-w-C6%`|pjTNcw5RFh{Y*<*Ymq-@TXE}l`m#5(0ZkJJ;QvOjS+@w7 zg1ojui3@uQeJ@MeD)`rp`UDWLpQ=r86x0!VF;%ei)Y!8;*A!-uP zt_h&8VmJOn2N$)hskvc)0MDh9WDZ`M*?M{=9}%uiw6CRLvj zQl&k{_%&f;mU4yyu4tEvFqT+RaMj}?btaX!!N0OyO55z8i}yaz>kjVdpR-&f%knM% z#*RUX=J|dm;;+$C5`1o~!F^fNx~QY{Cbda5}sJm$ikQztC4L z!6m6#V|1184Asoz@M!#SO`7SfsB8rOQ3UsS>kL(1IK{GCY9#$5n^|?M;Qu({?-qBm zPkvGq|FfKfWcKjI26V#7YMpGxzAG;#ZyTSP#8*mxorKm=!d-YBM83=I(vIwx!An;! ziT5x5HTf?Rek38P2`KpmoKoKA75$!MQ-ym}S7njFo#Jmb;GS^ihI(0y9ETTkQny!V zx1kanQuaacD}ikb?;%21LiSA(R!PBKT^0HZxgCIo6PTxT)Bit>LYm#Zy)$tW{a+Qt zrI?vw5dyv!!SQgekw*)ZA}Br-P^T&L+$(K3+Ddv=Tt=C>|B*%=+_wwv>yT|=8GImU zaHMA<%qCCNAm6sxY*0Q4Sw<9VkpZ0rDI)2H^K5A`wYp-Il{VqP2Lt1%;q)rD!1+$R2mJeN+BJ~uej+?g1Z=#g38zAfqueMnZF116 z4_oP@0VN|UwD8bP+95vzZ;Vfd>5xS&E6p|t07XYrH4Yr05i~jEp(UA(u1-^3JqC}& zZ>&dG8ZhG*B6!%_tFlEu3}@sqfV_c=mtx&F_kSit-Y8Emt=q6i&LE^D^$ED}dApPn z1SMj`nW>j7u(@mbLk+k;q!FIz4|?dbNA&-Re`sLXB}U_)85pA2MbKK_2~^2V`)C1T zlD#`hs|8g@_udE|3L>=>)@ex{SN`&DdM;#kHYE4TitgoIbas9inXywXoH)4gBy|Z^ z<*@3bouat07{NzFV9u%0Wdb~&o{JFg$-daXCSAHSnnCjpd^|)MLFbrPJVhB1P@G6b0kR35YUbWl4$FM8Z=Ffdf}uZ5M(31% zgWjw_Uo78dDdmwQup<`&vKGm22Ng-0{qvViLJK+o#7i|Nbxs!IRU8y%@Y66LFP&xX zo=BjySq~`GOjQ9uUOL^(MNz_}wGr>6swOT7$V(?7VuIm7UOEKnfdhcNB25JHek26Q zOX;e>!=NB99gRpP0B}fYiV7{vs}_)#j+^shdr?IcX4@ehUZpq`42o@R{?HB`kH(r1 zs4u0eR$eWrFCCY1qr~XZONWp9H|6-u&pZ03Pxr=XOK4BhVRCP~413e3Mq_l4;KpdX z*W%7O&QC=x%)3fw`N1f_CbLp?!f=DkE(c4rhmcm8^WSa4R$Us9=66*STCW`h=qdDy zAr{kg=~UsJ4e(!)Q%n$_ok?Dy-tZrX`nM`N%3sb{W4CXrcYJ-J0gu*YkXxj4_ZNaz z*opsYj#)gt22 z+Ror-DwOhO1P-HKIUIKv+87q^XMRX#9z|%_N8rZ(&F-|@IXxP0b*J$H3eB&3=yuH^ zzY%<_0dKPP0$b6D0ikk+upNjM`&(*`Z{_F>f7-OqvbHR#fGY;npSgi0U2EA^W3xC# zqOjpzNCozRO}+t|7bsDc$~wPhny;tr;8N zI)}4@a8aLi5w0PhYYNj>g@>eVIa&6qKldB{*3dx)%#>;X= zF0p`ro7i4~I5Q0t5sVW}dU0Z8yatwg1mBnVvlXx~OCc)qvh1?408Ws+F+|po83BB% z31?V)*PS#`NjBvu%;a6kU2!4AWZ&9WVnvo)&(Z?@33x1iq*8%yIXKprZAM*#Pd1@H zBUN$<)%xv)N_=-#Dj*e>ltr&K1{2zky_u@}P4GngSbge7yORD%h4+(q#?cX3lV0ao zE4{H6C%N>ytgl)m-&u$IS%rln+FPQLYw+h44jz!Lt>5&K4^mb`!E5FI zat;2WbL&m)g5z3rhP<_W$T8JO$6zFzyup)jS(@qoJ;}8?Hkm{^TUABKB(C5Vk=TR1 zawF5~w6M1g8o(ZghvEl40vHipSs(VP>Z48BMPk7f$E7LjgC#BG%-Z#g6xgQ%uHLvCHsj|#-dK9bmQC7BGms@Tw0ZOjGs*-WbOrRF6R@Kq&%jbApv+#&3 z8M?tW_&LwqheNJ=W0r?(>uslI)>ZCaID@VGc=u#STdH~_U{5B~K*8%S^kf3pid^K^ zlL=!mm7Zr$CX=RjPbSP(Wuc*ZG8qq^J(-jPCL__X7CO<$c$r;KCS}`@lDv8{VL;Zd z{dzKiu;`S!{-_8hq5CyqQVh4=J=xJ#Ik$)D$%IL(oY~xy9c{5a8TBA;u_tXT=B(|V zZ%VRdH}LJrWP+^HleGi%WR@Z?vtKk={NJqlHSeB`7oF&GIX3Cnlkr1fllSb&WF!%S z@7t5{WK~`hE(Mm|-?OlnF)*79SZ2jKB6#;?GOi5?(5olo$IChQ?a6qeq9YdhEp$)D zk4u=Ty(hyV*S#mpL$+5>#&e=)=*eUnsJ{Jjjf%&6olM|bbb%o2WWpFs zHRsur$)xGslL@m`S!k$)B;&!eCzEn8i;N`W#XckXu9HdGHl!r4o=g~!wQIkgOdu>e zj}NrO0_X zWqTKCR^$IRo1Tmpo#=8wdNO_pT=jYOWHOQgdNQ7@%4@=QCV1I zX!w$!Am-n@C*z5#)D~`?j31A*G4-mB!eOC%GC4D~_hdNay7y#x$oA^Vc-B>RQ#gaI z`*^RDt<0h)lWAaAdoqD*MK1DNClkhCsyWY|OeRh5o=ljn%0fdWBpDB$J(-k)3o0YY zmvzB%{|%5OnS19 zcHK3(}GCLj>u_WF!N0WIS1w&xA{W z!8RKQ3X$*pdBo zg==vny9Tp>tL(?@!nId7#3S;cjHK+?1_z1CH1_8Xm=^v379 z)Bb2^e*=t6ES#yR{H%x^oa9VvTTOIDq3r!I_^}4Ok>#dpE@w?mk=Xn)5IX?=-Va!m zY8L)w)AblKT+*JR(Ves|D0 z*B$JjJ5O&P4JT8{MG2=*pT2eZ7Mh=rK|4ND%FldY*TzlV$1@DVsNvq1%xQ9;AEUqlliy<6m3LZ4>(yrz%Pd)R)yY4Rel%Lb? zLd0(8F8QZb?m~$1?ozhfaCTP7gEa0!W^U&$`KQCNp>mfmORrvLJ)X}9xLnISklQx9 zTdlis_UXSBWg_%%G$9#?yIDPo7f}g~%jkJ@x%%AlO*9j=;Bb7cArrCV@)<*YuQ<-1 z--_ThEI&B*vcEBKsP;a?(6XfE!F4HLT~6{9tWx2^HydyV%Uor4TY{td2~T!KEIU1e z?90|ANm7NEct}_7=LQYKcOqD4*{LpVAs+ZiJBTDqJpW+?l3Tc0DHxR6St+ZP1PxS! zZBEapHP{i|v70!M)D~gjPa?RL<*2e=QTL;)aSa0Am9X)h2Hfe%uoyUe1t`0^mYt%5 zm0eiD<1Zr5P>mc(2p_&OpGg4uy$B@tkegJSwx2D1#qmPegL~l1)(CmUK15u7-g>a%wjizPLaT(tVd*b+s%;75HPPCWB#834ZQHe=rS%9}PE?C#n;RBfEKX++rd6vaQ zRlULD={YpDJON3vq88;Pe|fQLuqsKS1axg@bp%piQb=3AQZEvhmxv6)NYOya(Z-d+ z29iSwP`{v4*dIi)t)2cnUl>X3rV z%@vo-yuw2G%q0U~zT1G;c{44hGk-BAb@+!Qtpp0;PFW7V7dfEgr7$2QHGkO$l8Gs; zJ?jG1Ans`&T25)CJK>glG{D%h2Gem^^vfp)UvmL#SkAa0;y#fQ!h(L0??noM+kV3Z zu3^~}!$x%&fu-gE*RYS5t!Y{A)jE69Z5eUbBYfKhu;G{!f`p%>ivZ!ylLo+8O971P zYr!nP==)N=dw~RpuRAu(H0LXy&S(2G|Hmv>?&*}Gx8 zBAY@|^OyaM(wZB#+qS`W14FiWOx~|FD2gMt%W%d85f^|UpRo~uvF1rqYJ(fL%djaz zHx7s+L(K)-Wgf2>EI66=7b>OC+Q+Uct}CVuN$_@@GGl#NqxJ^!NCpN z<#^-516Raqgevi1mlq*BcqZjiBoMaCF)9WKr#6|vF5TK>hQNg!B;2rFUfx2O@RfJ% zfx->jrMD|q!**+cY{^X6JgjZH9kE?9PFAm%jhM%Fxrm~`VMDgXWO9!CcFE%k-rTWW z1L0Cj?67awG(;`fE*aDaJh|=LCFC=GaH%R#J4^;B=O7te%es~kQFg?3$&9cG3eB)X z5asB(BrT3W+21AjY7wek8euY?8{F%N?UF&U)csQzM=y{(Ob~z@w#&^_R_ltfiPXMK zXxFBhI+iENq`?i_C68y?mh#MDy8#y{$;f;pQ6xr3Y?q9oXqqOFR z?H=6)Y!{EohjtB$;)v}soN+;f$;Xjyx1%gpxL|KNw_>C~l{CNS*Ad%g*c8Kt1LDX~ zbHR3*$0gySG6<`!nca2axNnzbP6!fyk}d*-J5M@GcHFni@GGnRB=xP{p8|uxczWQ5 z?Q*<5H^PJ4zFm${F+e!A$qY2>)+RFqBtb&AUhLr>m1B?NzFl74LYVNCckO|~4ck3- z!JNrJX2RAM+NRqP+a=>*7^L({1CQ+nY>UabEPu*vRF2p#8AHLFJGN^eTxy9OuwBy- zwKT+J(3-GaLO#<6m#P9`yJUb12iqkx!Y1gC?Q-;7k`_mxV$yQK7sz-s#dgV{SnB?% zi=$VhE-!i1gY9xNmDRdpY?9Qz+B8!Kwo4}Mi0zWcGi^(Gt_#~GBiki0w|%>03?+B& z*shVZXe=(+u6dB%O=WJ_uE5oEEt$FP+ZC7=doTxlQK0C8?Fu9$DUI&<_KMf?w#yL3 zC9sdGSpnKUFLCJxbRScJXjbcEnZ>cwr`hZP6!fyk}d*-hhQ`!aKv^Q zer2^Esc&oXd22^<#CAE}xbPrpm_4*&T+y*-)D7F^7!?DA1J=mVb8D0J*eVtCzU?dT+5-g&j3=v{D8}6er#*C+L<_Ez(ARho-x1^cg3`C!qx{6$HF_fN z^>8e18&2ev%oYB!;<+0V8d>*fiXh%IEtejWeJ&9nObpV6cQoOLf+$-6-zuGCqLOG* zlO0O>hQh1-)g+HRCQZD(0e1({giw|Q(L_k`m;`~=mLzxQvZCS7`&LDQ1WIYc$Hm1_G9cuQ)+NRRoMQXB1ZT1+_ubje;BoxBC z8ewt*azG=2@IshOe?y4LIj8+%Mg{c=`BtK4!UOll!X=R`^y-&4~La9Q{6F`PZEw(0c^1_9DtxO(vGztZj z?(PcUQbKGAApLMc(XszkpZ z{D@C4#jq?^F+t?$(Mz#YD{W!Az+%hWf<*RmQryqT~QBl`t<2rLk!VY50PEe&IJq=7aQTNq;aJY6khtwQV~-KE4O;RgAK;y@~YEVgJf^xa=+0t=qP@2j^26$NT8F$t&z0 zPO7g_uv<2h>ewawcxNS9V&{oV>C93P^G~6R!qb$l`V4zhdh+rzow9`|TI1CIwVfDD zA~{u_!I^P)v)>y|CoT4GGF9Hwo!+xE=%f#{raRk%UTb4CoQ_9>&Sr1Bhi-owZk+F| zltb%W5kVY~@mv?nF&s!2!-l=d6kW`IE^b^ir`eK-uSM zoHAwNN*_&pF0FL2dnW6tOmgM9XSR~NW$ev%9qHk=%}t}-$_z|h5wB_O-1T-!_|zV_ z-^*fA5kOWmWLA3(OI%{L^l^u%NuSLY77~Z;C&k=Rrg&Zj-c45SI1E`^Fp_J4(Cf0% ziRhzZ@Wtmi|C(>@fcRF2UsLUnKkN`g>9EKK-MrxW8H9U&CcGPA*rD9WwI3i57pH1E z1!D5JiwTlvodYpP;9?-|wX$ZW)3y!7vRXoq+3V{Gm*7ow2>yYX9O))AWMd%454!|n zGOS}DW*&cDnXp$NZnvH*ff%bVKp-xTz>wLmu_7jqy98qLtaBjd2t2Q>*sCHwY8!}4 z?^`oqgiBB*`q-t4n7n^2Gh|~R#t*v$Vlu2_AZ8waUYW30AU^t>3B*`^0RnOHzKYC# zje(dv?h=T}v(AB-Bk;VkVy{4a%r+2T2o*8WVStL5Y?hW8vM~_jhg||O8P+inGmk&7 zOxPlPCMX?He3%`92Z|*$k~`RsSK4hx zCXt%-ogt_^uPnP?mWT~*%&Eu{Jgy$0BkdZ9_eNg$;vSG($nIe1Y8d%iR=yfDPbnk; zHfwn}3T6Vi$@9v<-^?n&eqn?&a9c!!zM$p1t4ii7>WD3K6Zpk_kd#@^Y~mcl_S9ug3XbjIsxZMO6_2CB+W}P$6{NJZ#+}&G5b~oFHr;# zVHdgNW6Tz*L=);1hd?Q&q^&qS#Q^_opK@m)ipiZ7ZB*uVdg-EaJGmH@`)H@wx$bz< zs=gyDc*GNT86+2@vWsFm7o&2v!L|OVj9yasK5bM+k!k_Q=R^^g^I)aiO`3(fI%93M zs1MI9!VBaq$Mg4&?%PgYs;Z=NIlN5&chpi{49XXS@-SN-Xc?O$vKvHbID8Yw#Zozo zpxz0%Tq?u;0F&;y9)EN(^Pa`tWmUoX0@!n|JJ{*Pm#pt!+#AoW@7Wq{_6G3#^Kf-W z@;IV`Vszjl#07C5E-M{xWJ-bL-Oo*$@7WdDzd49xq_qP8i=w zlt!OuC!SQX`J|f7^Dow@Y~K2HBa?6BHlJAAym4f-u`o>~W{Sk-%@ncCn<)~TH&etm zZ>C6XK7obJ>G^GBw3orb_JN4`5B@e>l^I(34WfAQZWO1c{poqm><&D^7$&K)_o8!_ z7CIY)?qm{Q4u|4PvxAhf58?x46+s=oJ`XR-KK1~5ooF;x#P6j`(7w-e$n92dzyHD2 zBg+@V`8L@Jdoi46rX4qq=)?I>qFzlfG(|`kjjf&t|M>_G^7D!JQ6JzOPUjzqi6@bn zJK$hvv@_k_nRYVvCvh8&tTwKWs{BKFQyoyg66L8&046J?9YHkZ1~?*-C-ObbL(u?h zR*?Xqv6p1J!z?mge#LZo8E*lrl6N*4M!nZ@6|Cy_!$*Kjm1DO}K@F=CHxapP7rJwO zJx{@jT4|D9{SkagVC*Pm%v5>gRe6cEqXlaY`%rSK3g*;m%HFY+_z3)0fx%oip%b1lGRHZyq+>?GEq{> ziY#95r!HI|E5xKxmqbJ*4cgEvv;j2fiz6k3ib?w4z71HLHKW^R3qvB2<6hK!n{{I zBqsVJ3MOZ%Lp-&;pbz* KzFzqG2I1!$g`aPNcV7Li0+am+*we}9%{&mNAy0fiCs({xfZ0`I`WV@fkvrHA zN~o!^a*4&Ft(RoT*9V}5j3S?z2Q^|=%-rsEpBqE1nBDD_=&|0%OzO2hM2fk%kYSi2 z0>8^)HVhfB`JjjKvLPcKhxxE%iut$^Vpt;*y1TlB&@322#he^;mNyZ2H8yt=7uHA+ zMWQa|<3@;K%|}K>nC9l>A|n#NJ?)4g<0qpcKQJ?Lu_KO9F((%pkqpGl$VL4OnPNU} zgaX-7kt$g`T52r5XlWG|2BXOYExR0n>)=&vio+>%tJfWNCU4p4j(eM(-grD3x3U%P z)gw!P?+Um)`%w4PpqK8@>!;sL;%{Fb|G$^R??hlK&GQhjdL-6hk~l{YN#Od!_T%Yc zd#gvX;L-XvoHU7faUcpOuDSQuOWNy3%M*vQ9oo>t8~d8&w{1sfHS&k|;gDxweAs)#t%~RaEJKbe!WDpxib$>u zMI+ioaLxc(KjQ|#3HVtrU~ho zI9zBVdNi_5}P zl}oRH#jFhMjJD$hu2yZnUi!|xu&<~Rzj|-{m3uE%iF6+|FZ+vCVsDV0m5s{xi&f&F zkG!o#Jq~ONc~Y0zuI z04lmzCEBQn7JarVq7MX6(ZwpUHy)xSlh;lct3@EVqioiX566Wday(~a0>20czhEviU2_mNIjwaA?b~J&GOjI9e z0Jr#LEL|g$V9~uErz5i} zw^+r|b&XX55h5;!SS3<&k5wWinaJE?6<~?c8G(M}kESjEwGja32>A})tmB~o&a zRU##s$lPKTS;TjaRSW?lDu-CbQgM$}EESoE++r0&)wQnTNDwhOIUpNjd>oLa6HZhV zzjlvR90?Cm@pRlp#nX|A$}LuLbX{YWK!k|PAy$c$++&qUNhUJ4SVeZTIL9i601=f# ztYWFS$10YJOhj(6ilOQnt2h!wOb)S%r{m**rdajy22`3Kw+h08RzJid940VFMTZD2 z46$5tZwxXsnWg<^11bd!72BD?-4kM(MLi#iTrR{%0?ke)#t%23QZV7fNKG8|LU%h) vl2RcS7=lzTx|0EBB@>&M^HPR0+jI9fh7J*37-HVe%g#(9zR$kveTV-)ZVZHo diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587789385.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587789385.Akashs-MacBook-Pro-2.local deleted file mode 100644 index 19e8b20ce62d6ee2ec51efec446381bb897032c0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 267629 zcmeIb3zTF>eILBHdvXtC;@-bIh$0Hcx6!$uFZs}*`Sy)(V5 zMm^m_ch5>ozI=wn9}r?E9NWjo#y%&B9ZpD`7zi8)Hc_x+9AZd}V`HH>0c)vc;quUl2OccpK|S?1OEU-f_g|G%nRb!Zp<@0Wkpb$UDEr`Dd%kt+-uDiFEc@K@=fAiI-Ue4~kB5Ve zbKSGO$+SH_HSJH%_GW|eDBgF^u(#2FY&_iTPg*a8-P6Z<+x__R)r0ZArN1KoMR8-P z+1dxYH)rS2UmHiF)(^vh?s#Xmy))}(k4)n$LG~2vK+KNa+b9p~LO9Y+HWG&&_D=PO zQ=Z#8hW-5rZe!UU^=D6vCy#fh8{_SMd$T{9_S=v5C!_wbeaC1p8}x>QxAiCONpG|{ z-s>8y1lEOs}Bz3DV=!{PXv;doVne6Isa``o}7?*-A=F2M1h2OnQpeTSU=;iFbS2?w|2iy1@vhdw zKfV6vpJI85TF)!-vH*?MU7`5sJxTcBOh_@Rj{>F4z7{^xfV=(mac^dH_hFlhFX4Vn zLRoRDm}$Si*@+v6IE#x~FD_|mKfvlz8f(zMwHCla`egRt9*9n$xYP6yCVZ?B!iuLm zr{d=Eol{8&kLW!agv?;n$^{g82uSHRp9vzkt({@qymM!mgf`s}B1BpZB0_^r!>={q zCU4J;n>TH4hTs~hBnX*6_Gc2_7b1AMo$z8f*_#Y{r-uELoz~@~s$qi*7r6}sw+_NS zl#(a?&F-kTh3X%abv&~2rLcEnJer~Etv^X=!3P^~*p9FEP1EV%%qU*OUlU7lB>pzn zx&jWMT4J!-oAq(U7dNgp$8aRMooT;|ik8O*qchnf0U{Orn+UG4l-Q)-+l-g)K&97g zFgs@}F=KKH!Nn&dxX#4IsX=eb6;-HQ98XF8aXqaK55qO_E39h6lw#}oq>8$1{Q#<> z(lT$YwUD+Cd?lr>3fJJ@MetIa>2uELJY&&oT9*>H##QQ?^~5Fa4DV~04JuQG;DA;F zNF24CPE_eV&VrOXvQE*Th~O1Aif#+OW+j{=?K*R$oBMyT$QgI`W~eTS7xC9b)hj1e zLXK^Nyp}3?hWfwh_80}q9tx4W_+OpMensL9AclU}q>Ods;;2N3_vGK0qh8Lsy_#EB z!akG>dfl{P?akuFc;T+w@4wleZOYKQEW*f%-srJ-A^rcvmifZ2ETM`ucshbR45IC> zRBhjxLPOhRw_Z(S`3kh-!#1&;&PLJcr(J&fFAdeEhK|zWvW);tFfBa8m$KpZ zN;63F|L@IxRX!Z&qR2b~AZdh=lFvC~0`N;_3@#s4bW0Nkmp_&`YrHT@51m);`X%54 z{A)ApMua4Iprw#BtB`9U!~kEm$}IvQV8k)9#U%zYXeP#fu+OK!!k&S-u^EZe2itMnI&145ZVGA5vg5LbiE@_>>z` zK!!=pw({lyH>7|Jsgs>OQeZ=E%LXa1BIMhdkwKIj0Bn!~LPRG(d!)dI8fVJpCf9}} zw++B6ZkbvD8>GNu#+FEd1*LYn8Ni4XSP??@Xked&2e&?pOu`B&ARp%15MfII4jJbI zJ|MFz!3S1I0eM<%NsPrDdC-g$kWXsdTGAVTBY>wuC;#T5^FnH28@XQqak| zdQL$cQoz}@X06m^PfHgMu7MQ51}VT)I2W~^bs$I~XT%68SRn+m`kxB*Yi3Mt6B5IoRQNQM8o&r*jrz#m&61)a8l6c{JJ(ze42xcJ0( z{``Ib`vrHTXbdQ$HyaH+Y;5c2*yAXY#I((>Ym6_6q1yadDw$Ur(?xCX28t^%78 zFjl~&dP7Ade=EStY|!u&WU$&*V8t1EO~BYv*-*2>3akkEVg+OnFRXwN(HWmTR$xPo zGi7s|YeUi-D5UhaA)CwygAFhlQ zkO#wr0h6CtVFky|Csu$5_r(e@70yLVUgF%{r;SsHml^ORt@ahuliFjipOm)b!D zG;~?*D>&wW6%e_Yu>!^h=(K!61FLrsj#a`6s1S-t+Ug`Vq`TBFk=>2YW_&~M96DaG z;)#Q6^dy4o;8@%-oJ1fQNrM`sM^v(XoBnMCbe!TFg7L^e2mTzE_wv8{@BV@%eh`+c z0z{~ZD@!B+of3C4n69`UOoS5xo4xZ}29bO-g0~vrKQ;oIJw+D+dJ=kdAs{stXYdxD z5^VTdNTE{1p(c|LC|+)#g1>n69e4zP*nrzZg#nC01hh2tibFtZVeQr=*sp3|fR|)p zuI-Uv0TG*rP55af>vsIDN$kepvkf>_&3uGrNV8lfkx>%NR#W*1-AYl^`3p+E(rU7q zFGkR*CQYGhswT~l44YW{od{lI$(sHk1rD=QcWzPZwX`fg2A%jwm9kiM>_UXkTuj&f zG_O^2cAJvoJX?Z7_|8=H%9enU2$4f0wDizrTMNw8BzSWbsU&{(pmkthiOStqCpdJ@ z-;pZzEjVr!!D-%uhzghX6ajqGK7Unxs{%LOiLIgK?QkkF9@%hvwIWN6;;R)XTVHFb zzIE}mM%$p9AIMPIOgw>*!Y1H1gcEF$6b7UAR=+puPT#tdotWI8OvaP;as1OQgVU$` zlm2LJ>+P@O3oX@oZ0`{cX>^(he|N+|a^k?!`eJ&jK03IXNq5cGq0$lj zi?Fboo=N@g2ySKV*KIPB-so|1R&h+`vUhzCn#-=$NSKUDF6fwKkEvuAU`&Zvj;Gg~ z@aOR=IV%}xVO$4bX^d_@$!|l^V_4{XQ|_t&<#g;!``I3@mPNY-D=G}~9um@th1$Cj0Zvm0k}hpkO3@>^*T zF1)K6IE)T~x6{z^ovrlBj;+M*<4B=N1#zL`c<o`jYs`y##uW{sCDz_ zUdH?vKknud4`p6vTQhz?g4N1~NwpU*+&UNz6L0-{QNXvH(KvY-z+kl5f3hg9R8INR z4Y#byw;x`peZkuU_?n1>(dGv_n83;-dl{m?DRjgj9H! zlTNI0^xM!1<_vA`y`XWxU$eEaoSS%0{!PNIYuY%YC1Oa$X4F&q%8#Aqp9O&IM{OM> zkvg+fa7EM(PHYD;hj+er0$)j66=9`&g*(JBVFdT-WJ zUPp49{mf5c-6mC^5>jQU#8a6ed`;MxrJSLFE83-^D(f87ysO7WbDaCJ zv?QI{Eq)>eC)^EX7NRxlYZLxjxyn2<>1__sYUY#+*dBrPE$}kkDCv0JbJOmTyoi-* z!*=22{SmxGH(0u`l7~1m>xtdG3pv{duwRJyw{oyL3(=Q(knG;d%&wb0)Gj%cREoBX z#P64aK<={>qHr|_G3qnnUiJtlNmcft`i6hpfD>g+`N|2kL1Z1QnxtxyLt@kbm554Y z@ZJVI98yxVoo%$s@62SpgEn~dxAVPD$#DtL>Z^|Az8Z*jSKwEgveh8>*A-w&tcmmJ8aipot2>&n&X?Az@&%{mi ze^n5dV#bO^2>3w+$HTcs9xYIcp!iTgou4o!bX)(2`tcJA<&Q!qg;lTcv z#bg|Pa7GEOraG!UMR1~0YH`^Hexd;nhSRIq0^`@Xl6(F8Y}z%D?0zCVO$2PbzX_*8 zi=*5qGi`Fvst;S~LjfftDzq@8P1+$p2yco{hUt(+Eh~$mAOI8{P1QJXfJV^dkcXCJ zKDs(hb@ebj7{93=U1`9KUyR@ZZ?DQ0{V<%7#{lvME?$at-{Q!sofvq4dolC=nyhOub}*&0WhMX}~=pjqt=^*hiN=qW@3)Lj%JuF&h8Oz!1eQ zV(MU8Xc6!PD)nXqv_gSiJU!Bc4@7W(5UHiGPFw1@%rdw3wRgp4%k&N3^j~^*ei)gt zQ!Si0xbY-)308Ada}+n0inq9hTJYl`Fz3|hG65b>&qauLXJ71JlP-cym7V`c_1p=L zPQWRhIpf*k4JrVRL?@cbw(PWp46V0fe>#Nf(Od2k)!S|HE}daeFj2)2g5(~aj|*mn zf=)}?J=eEFLC3|e?C%mhK~y(-tx&u(1mZ-egucOVRRoSFmK%wSBROcUw)WxQ3MvQ` zWbnF#KZ?W(H^7<%B`5U*JFG}`OwpkTJ{h8npmWSCo}!EhC{CoJ0NI326=xG_cYLvL zZG~vT&>tqFb4tHKZ&si$mT$9^@<Y=I)6EN}Kh7Ld{ea0OY09&0G{EOe#d)Nfp>{7?77vLc|2afxL7G z(gOzoc}1EC*^WS1Aq! zgJK(-KeR)~qp>Cg>PxAXl&1&+g;HxlgXw%Im+9eskN87J;whh!4wHLLXw;uQF`l4< z1UJUp{Wf>baegXlA&DxT<%gpHo6Jhp3BwICyBsXh9zt4W&VQ>3TXktbn%`ATXuWn2 zpr_C)hFDC~rBj7>H^6^IPBB4zb|!g+dc%Jl>c^_+$i9-O-uESY@o(1}e6ax!)n$-d zq;vNdf>zjx|7wm|$h5$-?z{d~jiROZy;O{}vg4GKxfrRf*obFG@cBl4*$SCv+Oi?F zCv}>CeZ;v7UuuOkDLhqMYRQzA%r}3kVJU4>Tv+kfLu`dtgulG9D>~)cY&iwC zYSSx*xonM?idI-F`yVxi3kKn-O$yk$$8{xRX?2LuAKiZNLexrV2)@+-|80nR`)0AA z+Qg5MA^-jP3RVm_k*)E=|BC9X*XR!P`W0GOmNn;SAM;~%IfvM?=K$KnwH2=ER`)m8 zMm^69@Mlffu2*E*os_jXQ_f#9s7->RK@|2np*!~;gVXV5n4Noyppo-w56Au2CQRn7 za1NZStpG_K=6tAoMAf|sr{Xu)s&2%iy{dn%LMd-X;4tcy!*REvjbZUV=7(hNQKay< zD`DSeZ`SLc9#6J<=uB@Ens2}VX5DR8@QDVz+13keMI#1;${E6TAXe;etvSAxqc?n~ zX`f|nSyBO445&YM150|=vaQBuaf(D?!xwbU_-qs2QjfQk&C4xU*eZZ^oq4L)Cr3E& zrj31HXvpq=O1FRFQglov>n!@x|;lb&uKg)Jv4IOT<5A8`E zO}EF>ez%uh>BIdhy(X~OmkYnyu-|p9VZ1C?-Z%0XwYplRk;AkIw#MFcZk z)LNFNa9?6}=_34m1uV=`h>E-{yKF3g6C`g6ku_vS0H0~X8CL(g4bzomQ;xz+-j&=H z7eY+-t!*V%WV!V*TA)7)562HyD$p$l$NI7@&DY@5O&H8cm0Uu#etV%3-`$l8NQEV3 z(Q7sDN38MhN4$lq`_1rZ{BV8hM!S;!euejwc*faqjg#Do-srKcuUaDCS%>?1g#~K^ zr)dHHyu!hQatU#j08_y6{ipStKJr0Qk@ISxETEGOEB2Rb@b{csZ(yiW zsYW^mBiZDQo`lQNO!x0euGO*0B+}WcDncf41-FRA9_*DHnO3KTy=~9{_5j=;-{%p) zi0I1tu=Zs5cvE(fSa8L0Y0COwNeel%c0DHr4yXdV@JRen00nZE!*HZ&^wt2KQR3+X zvGG@B|ym)SXDA^nF-XQ)v7wG5uamo&B7zDWatLh;C-IC4~Ja7BZwvTUCz)?8#&raA?J-c)WWufonxB^6SZjF_=ovvnP{D z)4L}VW~;K$P(7K92hW~N$^ny+XjluKXk@(1uP2kTZAeL8J((~dYuA1~nLt={O6vWs z&^?(jFvC%;J=xKAIk$)D$pnT}&TQ_6VNCxQ1 zc(N+536}!P{fTE!CS%|N%YbE`y?Zhl*M)syk#kv67Y)zQ!!x+jw}Q+rQ_L#}&I zmWOPwo{VQ*?99l6d9`Bw^;98N3{MN~YF_=ovvnP{D)4L}V zW~;K$Pzg!KgJ(}B<$%dZG^~XN|75&aqbuG$nUrlqO7iN-gaKK*_Up+6!lF~^`Xd?{ zg0GvW8aD31eKt zHti3Qvy??Sk}(M=upJ~1dI?0U$->5Z^u?q5;lB8|?~6y0SYn@hKIr$tA8%T!Fv4w` zuO&Lvs<;FWbjLfh?VVY7n%=xNjhA3?bpgH4yjCdn3{~nQ@KF4Kw^BJ}@b=)9~XBcoWM_)m+Y+nj*3JWgvC{{JkHr zDAg?dv!?4YWVobZXQ@(MsGP=*l}qFSubjpb6m@8uW}4KbTA_0vJ3ah8{1dcexO}qvW$1cOhc8bC>+nDt95ocz2-z+gFQ~ zd_&@;8h0Txw{w^L)4|+A<#+I9>D9}u$MZP=muqT(V=wy~1Basb73s(@ zbZQ`CTJjJ|DNkNb@+7QM3BflTa0|;^Wp>+wqxuO?c10{ZJ%jAa)>XKKhbZNKZlO*g zE5r99SZCR(E^Q$m_(|JTOok91{BZ=5Tew*%7?j#wDXaAY3mShG!RuI#D(e+>KWvSQ zlJIg$`Qd}SU)8YaGf zKLW`;O;j>B+G{0ggzBkN)Q!-Zy0S%u*&gi?KRDXA}VBp<%qaLIO*MKM75h${(eLE6CJEKzxxq0uLNBZ4&# zBND@bkA#a*;LN{{(=RwR9@Lg}HH7ngLxh1pacVahzMc(%;7^^J42DTD6gZT<3^kV~ zgL%AMjb13>;G0hE1@cGH3{$Jqh^;gjn; z6ew@YOBm%v03?H5@RLo4Hj#>0Rp~9%_i76U-!$95C-Y1YVe-V5OJPF9R5i zHv3O@^R6_?DoD#;>pipx4)25OZ_;rk7^-J5AKo%xF~slz`i1FAKrv>!wc=y)j% z2uaOf_NIgsChft~E>I2Pp7x>Tlt#J}hc&#Ma4Q5@^EDT+hUJV4A|A|?LV$?9^65$g ztVxP!_=XEy!?G!cjp{H0OU(hUVIMDBQ(@;GDv(#v21$$S!QXZPY&hnGAmJzJAcE0| zAO$e0uLZOGqVG%f?gbJYzV6sC)37J30f%(ymWQE>u?D{F*d{ZKiUGo@VPojIwaE;D zvQ;T+3XOGZQDAokq-~;8gUqrggb81HYfwt&1s)Hc-LPGH+M9gcH)LZ>hC0vKE}4b` z|HO?K36NaZqG(~Auw60>$+lJxY?n|~@Rm2l4od(A!lIeDV7sOva6@eB9KEn#GKE$N z`*p;23BgPsTqP^nxaiX1k^xHFgp*y-pHL^*mcNsA*ONm^knMqb;R zthI~~gB!NXGwGkYIC}Lq!DJ{$Y?nNq*_*7^6=M?#?OyKoBC?(7iuD+6oKCkhIW14R zVY_CYy|G;~gd#uj&9WjfI%2zIXr)Hov0Wo+(O6utUGpFTuw61pIaQgw72L30GGMNi zXInCJ!*&Ix#U9K7Ulb_1V7medNlGiHl>(;l3VTjj4&1O^kxe0~`O7|#Obj{R5!>z5 zh3#U(=MmdwIOBqdOY566BjPQn8qVKuB`0KXeVhT15LTWXv0a8uF>E*>jtn&yY?pal z5-w;3?4bgAWH3lN47STMCj<#UNf!aaLogZIjO~(XVDPE5?8RfdE|oADz^g5G+qX-GR`BMI?HUM|T4INNyQU!mG{j^G zhlbc~-!2(2(+8KTxMI`L9!NG5IISm;*_85=6bN?2cF7>v1pTpHj-E@>;s{tLEen+h zm1z;+eS(G~w#zf=pSn1DCbTFJ=WhFUxtYppT`@M1+Sf-*!BUtTwrl3u8`~vAcqK8n zeY<4nEP}{1>W=LiNsGqfg6*0IIlrmQ4cisC;*wdQg-SAnU4G)17Yy;*ao;YP5ivy} zojKr(0!0^WS0EutX`!)QGNNMhj!E`z*e)5A$X-Zl{<05~woJ*<5!*dl7q*KDpGRz$ z;fxC+=N-JsB_jgGj@T~4rU>0QAdU<*7i^b#ToNwMKWu~N2X6azS>}Ww;V0=LK)B$f zcv@P>#%_bDk{vy&L71I@&YU?l2n=r6F2|b?9)$BrFOYcQi0yKWiUGm_Yvky; zwaFJcwu@PG#CAFPhzGRz%DeVJ;kIx0nA@2Q3^b)ZH^B*mB6Biy(2i#^5b79wssY<2 z19-K?ZrCmvTEUw;wre0BLZ0vexM{Ji2 zf=y6ph8=<^N6#f`aRellZRkdrjOW-0yY1U0gJP)*6|S$;i`3QoA z$fl6g{AC}=!3=Kfi0vM;!FIVKGBkXHXP^Or!R<^2hBGdRoOi4yXE+eH%djazHx7s+ zL(K)-WgeG=i}R1|;`upZyDW1;knodq5g=T!T|BKCuw8~{YLbp=sBomcHlbY;MdxDF%Or5Z} z2;SL%JA!CJh&MqrQ3M@-@fhqpeQ!W=cP=X${=9FM_2}vqe=fL}={+OR|E_qBpqgVY z2Kh5NmnfDV30~>i4mJB?ZByy)A~o5hHhT=|SMtuv;BAsn2=8fx$qC2-g;*3wA0ee? zkWYH8*$FoGn2!48tcX^*UMmtv<@+QJmfFBEN)CXh(N z8oWJ7WG`1PL39vKRvM|mKz2iY_BQpf-5%|rc<`?L~ESdzjhLXNhGJrGdMHpZ4UaQ*|g36 zO{U5_d$T)thTZgm_H1W+*l%x)N3+Rz*xl@J_t6DTqm6Ujm2!OBRYVYnWrkcAtr+&F zuL_U)(;2#${cPO0dY5Lu6uvI~gS+7kELT`}nanNC7iSTf@p8(Pi7S0H@wv3p#qOD` zr!vWvXP?II%c1zD@a=r6UYNG0#wAk6adE1qQy?af zyOp*Q%sl>_GGVVke9Sr!hpvdRI;2LetBAk88}2J(F?OpO+$Ja;P<)sjfd`5u zG?H7`k5}4lMkbM(^sOPNJf|#sP?m@dZp^8uDtPR_16NkD=!@e>7N(!s4U!Al9Sj`< zva(ox4J%)bnWq$z0GqWu90fCh+~hfB;BR3SV81ZJ8TfkD6+YjsW-|L$M{KEW|I%Ja z%B*L$y|pupn`ouoY5`ZjTj0c_iZP7K-<*JX|4B}Up-Lh@@yp~+XS#T=i7?H(J;{~k z=Sh$*EK!Ev=r=<|F!&q^Xc}ye1iCJ^042V1BzQuwITBPS;2cS*9c+%I2P)>+Qta9) z*1tUC9DFN+7byaWu!~&sF=mTYq6u}uW~wBz zlU$6-1GH1@Y;Q7cSKpBpJmQJF43djc*+ntki%~h-;2JKTDI!;TLHZAAqcVzAi#R?f zinyEyE9Gv|EZj+2vT)4FU%*kZ{L~UWPtJ0@aM$?m?c~L(N-CGbOZ0z7E!D-Kd@(4G zvgLucu{k2UL4=0GH*s7nm9q%yoq)@wGTZ|&?VauO*U~+YTrGde`o5(-@vimVTjS0C zkbN~`kE+OqKUjc68Oh^_?#WJj)*nsBlkTXu)t{1AFW$94M)=F-`d>8B80q!(``4?W zPV3c>*!7pPags;0iY|HGv__FPmd4NJJ?*-2@s6~rW{S82HB%(sVy1{I6Ej892AV12 z?#@h+SR!vyYN%*G{B?LCYgOomcI##Sl9|6v3N`1!

N-HOWE7=n>?N7*FpEr=Uol-?##_LuX- zMev^l29H@YNJ=YFcgG~dc=4PMq~KZYm}~GS0&}mT%-Pq2B73hYcyW0#8X;Mrgr@SjPk zM$-Ke%B4|-d9QX*O!P;zOuDEQkECExqt^6jd|Yl3_KHOdqY_DrnUomRmohF|Bx5O9 zG->gR5nxN3;7+sK8xHd|i@c@9{3i!+gG;25T+07^MEu2y@I*)W`Ka*oG2!P|2|r&a z{CvIe^9{n!H^O@_hlRmtFw5CF$}O?*D2!w`(R4aEGfG}%`w&_7#gDyG^H_m$=0{Q` z*BU>~Q#XD-uO0~fBPROj^HoP6`*1M7<`GEXCv!6MK-Oz*M_;Tj)L6O1V$s%1GUOWq&_YI$&&-1wF)LkmR|6J@1FSnXRdDSO8@)7|MkrNz0bV- z9Y6J*4?KO-kNo=YEy0gp8Gq>tScN7!5i4GV=*i{iMEdm{`kJ7xZ$n>~UjXQr3+d9$ zYPNE7;PR-ku(-RqcPZWj=-B1r2)}qAG|@gj^w8U*DBcUvR`KoG{M*Z5ACt7vFVS*P z_M1FEr@z?)jbTb92H2f_nEAewe!m~~?9i(UFqz!m06in4qKNmJbQX%hA8U zlaOzwN|~MXPvf!DnV#(pclzlh&<{DfP5-`(gpmtz8MQ&^ql4_c#0>tX<3_3pML!os z@PEUeb^K}rJ6g(pnn5u4i+Eu4g9Z18VIP`AuX}nt+3L;W#p!HwD43jX%{Z(cnB z`|)?^|8Ktiel-5ReEi#xwBv)Y9}U($)!TS{dotdxfBbvl%Js>(x4F@q&e9pA#}wW) zIWtYoWqJgSpMA5EP6YjClKv+9CjHIwl~n#o<4ALL5f(a)=3ck~c4te2$)!Lw*< zTt)|TX9|rM@m7xIO91}`y8^7k?$^I{r#CFQ&2Hp2hhX`K=zyzNK-{^k@S6SmF4+U| zdyz&Tg@s!Or~!;&VRNuGU4{WVHu1@B>T0ZtkyVq)Ax;=edNHt1WG>2a_Zrx98#;~> zosg)SrRdw(MfM&zh`a-?1-kf}PG>Me3pS}x71M!jSi~=vrK9!GC)2N|(kGC=A{*J< z*+`#HjT}|)qv`e-`CgA&|1SOyry1E~rvy_d7q8%WsNw*im&9LvDVQ-`V*m$_vpOkHfHjhKHe?76q!8{RTFO9YU9wTS#!e(+HGJdR(; zAaW-`f+Hu|FE5_R{;Dz;6bJK9V2|P~cMQckBofTxHj1Q!ClBpG$zngdwF>>_3WTT# z+hPvwNe;pu{JCi-UnN8_ovataa4?-+ox0g8u8xjGsWgU=?{0A!4aLH0qrDD|)*tpa zX7MGk7j1gj9Lxse(IH4Rkh)0vHA^KbSGxyxA5WR){1O>x9{!io$ei-WMvE5${k25m zx52^VD4tFFo3{*3pYBi4@vVKOVW>PLbt+U%9N#&md{m4$I4mk#i8O2~L9ET*P9>kUR}KFN!PM!akJ z`Dm^EvD9+1?T+l9kY}|PSFT;@TzjPR{ImF*!;_89;iHGo^sd{y;q>WV|HzH;=fAiI z-Ue5(@XPqrv_Co9OO+n8!Is^16SaQ0w2=T6daDQHeM^5u{)^C}RK&PM&k@Sl$0^{f)zxY`4W*1Hcb!F!p9mRtays@Z<>UY5FXKcG6>Hcqf|+y_(U@wvvAr@fQin>?)l; z=Zu_VXsD<5#x_2v7iu}PRPtGZqr{7=>1$$Z@VYWw*VSGW5eHW#mf8Qu8BFL2=H3|rJ-jjA2qWf4X)q$tCS z5?kgSX7Y5z->@wSXlP9`Nc*0)?pgcpvQLi8feqFA1|EfKQ$uIP8PmdZfn4$Om*iTw zr$6$@Q!O8+As8>ZHXcp$wO-R?b)yC(KGA?T8l2fmFWOd~yEWd+rg^5+^Rk5yqxzLm$RNP)i?%5ZhFFNloFL@cBRjq>X)eQw}3auDSOTrNQLkHydzs z0Dr~}ei$=CEq)k6!R*l_#AhNfZc8UAkn1gkP8>%eE>9VTmNgw-_98YO@bLziE`KZn zzR+t}3nd9*gKnGfwJ?E?DATlW0!a$V35|%gXfLE`Ka>*0|QEJcLpi z8-RaphTVve1P?U9Zuw({|9SAGF)R48Rc;Xg0V9r)wx_y-R5Ek`v6#1xscZ^PJ-~p} zG6awT{=f{@pqZ2q0*ert!y_i(m9>C^OHa4}{*G0=VQ>Nylu-zwn4~obV}lgX{pT!t zTYaJ53n?ITVB@+!Qb4G-HJmX>L3xqf83Z6Bv;3&LYa671P}K5LAO&3hQZ!a_)WZ!< zAkzaIq=0-_$BQ3QU^4<;_RseE#*HJo1+c(K0hwQQq<}oBlbtPVj7WJA_W$d+UaHhBT`^R zD40E(`Y0QufXrCkh8RYXwIVK^ESU)EJ(#_o>AI#2x3EGA2uZOeF&1;=K{HZ7KB=)m zZGYk=DS3xt(3cfbK-m)d6l=*9MWYvoCSGiig8bD>wix3!32-%wra5Y9b`!X?WBC~{ zHb?>fsM0Um!O}dif=;H081E@CVg*zPHSH>(?_skpJ!h%j`h$Sgm8SOKA^<)y$1q_~G03_wP<jW zdGMt(TN|w4XuikZ9xK2ts(D`l2{Y|fIt8yDz<>mBu-aE})L~x%k&E%|3Uccq&*eO@ zf}@qN0xE<;6+!nE(DTSxm)2ng4F0R_=g|!>`LRq_tjA0AqyeuD2xPKWyY|V4HT6SH zSOFQCkL-%|{cyb0f)!Nb#Re-N6m@#x8m!7#fz1f#6DuI|tBw_rCv~#3#|mtySz!fM zgnY3AGKd#eK#1rBXpa@xP~%M5j1}0BOtAt(`CeoTmxhr{!fIau`LIT$mRNxWB|EIZ zicsBH0hxpqRzNmJhQuc zLhLKZ*)oC(V1pIpDyfPU;K$5ZK~BD;aRuI{(T>)4UAk~QH7l$j=R)v6BUXSP)0zZ* zf!~7z8?4}1Wvl?VsAjBygc*z#a1cSjGYV|>6&!QG3W!|HSOH@L;Z7>Bc?aQGC9Hr7 zp$PldSOGGmyVNg{-Hoq+d_(VCe?s0nVf|VM7^rc82pwC{^1r2!iNRV#0?=`aZwSVt zcALSw|L!k56_zvgUW2e)=|BX;)obU)6*?vEWH4QEJ(vh51U7rFspsnteMuO#q(>Zh z`v?5TMnJQt=t4kGLa#0aq{iY5-ojIY4POf>REjv%Wby&UYsMa!n^wKW4}aKz+e3u` zj6wvoH1vu?Kx$#_)(~sptJ)XfCHbCfd&)>afV@y@Y9zZ8qDm1JKHGp})yzj|hBSu+ zNMw`*vsFcpWYDU%Px5z0bR}Y^nly#3shTv4D%N}I1_UF5--+NgmaJ7iNFmD4Q1rcv zoT+nlSVIKpePYdP)tud?%y6D9>`oB;P2Hns7oUp=hcCxsL`V-^wza@aO@cR9kxJrc z4+;l12vB(Oq3{4tNv7xg9jRj9g5y>ZoCfn?N&{bpzGCk71|> z0*TUWeXXVXN@p?H2HpH6i^^tF7^&axi3ol}IKdWQ9J z8&Q&q0!z4yCGZUo1#6Z#N(AqW_#?TMJoKjM+6;6l7r7@Zz3QX=M1OGRu^By(x4h?n z*RHp}jxT{!+X~(z9Mb4C5&rIogJeg?()uFW(UHDv%H*VG>rm;A4|IvqD*70_JAzwT z`*oWPLYW>XcO}GRE_>Japt9rPr{wYtK_U?vxRXT zV6OW?SDtACXd^w}l)EZGIiA$M^6BdZ+2FOR667OrpSwz@Gjw6hMt8c=8}=sMjmP>M zk9Vg#8|cCybR$r9bJu5ukf&ty#Q4_XxHoH)Yc!IRk}FUpK}0Gb&AeEtDA72{X&tG{ zrmKD2qMeePJITEUj#kBoJ>@V|XoVG8hd2Z{wyZRt-Pe~pY;9VR!^;R+-j%`2T!2zn zUkyNqz}smkz4a=+`YN&eI8ta*L0pAC6n>YYgz1%9=qQwCy2xMLq=jz7OfNOyP6ksN zz-VWyi>_*$qMI4p+3kq!b9HNqQAIWX*|jU}&+Htc#8-?jf2`^k{PJcMF&Z@psE>ZN zl>@QNWQ*n9;o$Lp-FlK$9cZD(Mt$%{5v)~aZ9M8vGtSysLam!W_cG?c_;ELvcqr4J zZO!<7bSI#@_Tub{)Wlo=UKBDJ7cO`ickgN%G8JJJe!2nISLQvx8Zy5k(y|g{y*dlM ziBB|0JTz9wNhOuHH=tixDzvBR=KV}tT5FL%BGd?7kD1@@TUYVs?)3jBg3Y=`z!c=Q z6-v$>)4yocCxC$cRBeLORE>oL2|XPAWds{_Sxvc%tBv=v(O7nes7XM#Mh^!Y=ep>g z`Dxp#g0c)8nkEf5k9COyezpns2H>hx@OS|Ub#a@_>>6n{pZRN0=~7x);VY zj%(f+RdoqD!!z!mLM5ylcL3a)xwG&|Wpw_Qrx9Gs2!+}rOBZyB7mTqMi#E&s-jL5k-2 zelFs#(NYqe-x>@P4=S&feaSX7XE;(!z-zlmOVX*`;wMsY!rf42AzB)&Rf>a6_-o}V z^US2TIY6tKQ!ZdjA||_#rHQrh7I>L%lytoAxoP)EUc@R^{@Kx#nuPa9@Dkl%w+@EG zJj9tDO(M4pDh;2#&|TFjEAXE`Xh9~*~Yne z=??UDHkh50A$jiO<Z>&ld&VAX04>#d- zcmpnL3pszGuUdjjQnSWrIe&&~<`H-(exN4JbXQb10zVbO-QGGwl^0I2?3NlfyJ|`w zNznv96Y+P8J7s=;QWXD_jAuDhc%}i}aI#t_o3U@pi^-cNXQuI$(qAW`b(C-yUI&pc z2{+AFr?fTuCGg_aOX7V?e?|U_gdYJVzkpN9+q|ORlWeMR4|l38Qms>@F+f#0{dNQH z3}yT|V87LG_Wdk1y8XW1F2=mDkHORN4s_+Vtp?nguj40M313C**MA8fA+0tTab%n}A zDKy>EG<-O)|79^5M<1L~LaV99YAjRrFI2IQr)%-s_Sls4-Dg_@}<0LV+Ho4KeOXen%yFYJSWymS&G zCKwLnr9+S&H~`2i(nKKd$3lR-l&%Uq3<~nn(THRM0Ed*OsL;ZUY5{ZUxH&J@4NMr+ zmkzH|90~@-Ha34~hmJ>MO$gMNQdKLj7SxxHOSw@k2>^QO@Dcx}9H04l%i#3s{se6a z?MpgL?roP*fA++9f({bg7;pF6+&Rbjsi-<6p)h>`et-{00XCVHsuPAAWOlo8howD) zRG62J8TeaG*s4nd()_M!LhH4I06m3XF~lm|8q{|;z<)(fF+qHGCV7Q=!+#v=->T@y zE~+TEcFAAPSc5M%;GwzqoSaKRuvwMhY6_qeWPEUgX^`lH*wXJP#Ctp@mSL)6FJDC~7YckVp~r{m2qJNFbpBj?jT zI{eorOy;d{4xFp407)I@e5iXw)x8O);y2f-Zp5R#s(-FRDQ`yLFzS`Vakrt3VevlZ zhh*+ir0_Sz?M3R-e z)zX{c(fHx|w1oC-8LsVyzhB|~B%X0}L>5XR$J*(QwK&P8-(`K(68SE%4fpd33-*bU zfSB;-6%HPhONgrkm;#RPKds;Nkq=T<16mH>x7nNZx~IpJt==qNME%_D_us53t%^tj zf6uw~CU(JbEjmNqTHf!NYNTT@l1<*|Nw_S{bpM{@S{<8ABAuZ^|wb3$8dWO<5l-X(4CUuIp8x8gMEiLu){l z+l5EshXN>QS%4b;? zWR<4wXjUa8^AI^Nr>rL{0u$G-*|d0T@SX}sm6%1R$b-J=s;r#~2fLbQos(aIk5<^j zZYEoQ3Y5sjeha%T1700l_*7ZuIX#Ni`Y0<~uFEYqmjESKU{%SuWhPLIR;%hrz2EXV z9@i{9;!1|bYz^M$nfq|ab#Kh_kZrx~)Xcid-HY&S3wU_IOCeuK{YxU;L zyC)O4R^%eTo=g~nspdR;GMO~Jdop3RDhmzOlgW7S?8&4YFd2!4wa|%1#>@PAGAY}J zl;qWu2?Mfr?bnkDghi(;0!HY*OBk5pNQV%4_hd)g<=h^oCleS_IkUMZJKAP@GU`F4 z=D};(&ATU)39?F0)(Oy)S&F_#vio}$_A&N;>dAQ4RqkGher`QkXC6J7Oale4=bOXQuX^42N9zo-7a9UOgGly2{;)h&{KSY-JujnM?x(ujipB6S!97BENMqVGO34 z^X$oF()8}hgxRVrG*m*8@!;8$NjaEDMw0PjpOJifGAY}Jl;qWu2?Mfr?bnkDghi(; z0!HY*OBk5pNY@8?_hc)z^<)A=DrYwLWGi#&$v)<}cTpxtrYC!;VTWdbj?7Zvg$Z96 z9T_h;(cyx0Wc(08Ix-o_038`mR^>C{5@5NF@ht0Q49qA0m07XRf4&`=jB7*s^Xka> z@d9;ZJWc)7sRjzJr!&E=;od`~_ z*~b3$*X%VI%U;a-<^em|)9E|BP6mIwVSfdmY&vUS!Mf zE5WZe?GKT<np`F$pNJ9V8EW2}G;O>b}(K(YKK9hx_8=zHcE(f)0Gp?}b0!v{Ye) z+caNGbf{HvbaUJps*gwDq4)uBeQ@?-+jD)&GxL-y6}mIg`nW)6*X6H7myrp)E3sLA zRVsq_`@MD`Xnb+;jZaPcle4|qU_7$F0Y)Yk&Qw%>#`9`=^{a_a_mJ&8q{$F| zya8`wxv84VSyNLaHopwS4uHS+0~V#4g@4v`J%$XIH0&%@stc9V*s*enJm8hnSc0Ms zsig$ZRP3nud)xHcgn#1q{vKxoDs^!g&FA%9ZnT{1m1G_&3;eExlo3rEl|>);Pz2YM zBh0D(tha&=9etudX%9w&*`PPsEH40@-A{ga(EB2ORdP5PVNQE#gsH_%*t zXL%{?O>5%bVAP*v1kuIKXpC({eRY=|4=Av6Jthw9tk`dwP6ua3@gn}3=sD)IdYKrf z!&D?Y=uj1;oyP~GGu-K1-m{L}Tj%a^5BFS8!cX9QfXq(jmd5q;0=is%SL;TaiP~^DzQ&M=*m3y)u0Gic=g)6N@EVpM9DCW{ z7&zp-PvTe}jFj@!-vjMlT%vEN$EjX&5@MKrSvePrjzHD7u_w`VP zJcgL0{k;g*S$3*RTZjjK(l!;7MLcnr6#Q`nl3Tc0DHxR6T`8-T1dS@T-l{Faz@J6% zI+ml#dPUujp~iLBK&ytZ@x2Dz>dCMeID7>tySkR0qJx!PSi$3eMxLP>Ig$`Qd}SUY zK$wCM{C)(Id&o_yP2115zT$X3>3J_(KY)7Pg$%vJ)yDO-+Bgi?#ILZbHp&gJ=3q4V zN(8TFd9Vu^lq)V=TuRD-#Z}A%7Zx?MNDW+Urkzo#nW?WYIy>>WaTANdBU<#tWX65y0)`A0x2*lq%B{m z7f$CTB7-mhp@EX4jVpx>B!?29enF?OKZ;~qJN>AY#poB-*$$QC0j^;mFI!XL2OeUS*S|WOL~0ak z-*3ABHXL(8knodq5W#3fkOCOh*MeDo(f6f#mjs7ee4bCbI^MqS*f7(uC#(Tqu;Jm` zj%_l-s2Cue8a9TWTbs-fC|i}HrqEb`ww`d0G-;dY)F89$31Pxl-n9n`H*A-l_9kEV zRW-(pr)lw5egjDu5SUEa5!)q?EAUU;c#!}Z5Zfi=^5Idn$<__qC1WUf%bQ|{B>)3q z(M(*hUDFV?G{j_3&5jBmY?qMF^r5a~ExImo%YjR?MP^g3X9HrpWDsnE!cIqrAj;8m zNm?8MNzy9Um_?}elCr$qHR#$A+vS<`PhA{6lXHZTmm{{z%~V$Fim{1=_6s6~xna9z zp6kMP$;fs|%njQmV`LFTrcrln*GO7478h*SJjnS?Wp3E6z}2%YnYm%R0@Gp-=728> z6kV`gfy9NM(%i6Jkxe0~`O7{`+A<|aaKm;xHrQ@p_y&*3hjvZ#?uhL&oN+-U5M-M( zQCc1nAzOPk-LPGTO)+dZAdU<*7i^b#ToNwig{nsL!C@ObKXAi#S>}Ww;V0=LK)66r zcv_mxX={$yF2k?1I@*oPi}M^D+^}7aH!eId<$9zOs%#^ahx&|h1l+J)j!`i{IJL

55)w#zan1PMP$7XiS-5Ow^z?b~JemDPTvz734+=1R4NSiue3<#^-5gYCef zu-%+rV;phBb~#4H0O5c&a`fEVm0oG64cjH-@Ajyg8&ZiHN>46)zOCDEp=Z@_fNsGqfg6*0IIlrmQ4cisC;*uF>Uc%?sNl%(>f6BBE*_PwY_;2G9kE@GH!eI_ z$2!TbDR+}BR~KSba>RByM#TW()UYwotXrGR5Re23T2pukGen?q+_%fiTL=@r^42+d zh#-=_&~UQKiDKMsaN0w6Nwna)VSEKp`oenD!iVy&(L;8xgJW^WaLBG?cs_9GdxQx7 zqT;z5k^!@_k&7Uu0z(41MV00$D>S?%1@CIYj|Ne;0KQc^$wVd5q$WGmmK5GHb?Rjz zcxMCd2%-rg-UQJ^5m@}iWAaC|wj{YbmlX|v-nYtnboC0C;0+VvkAtZpT*3dYc#fc& zV=e|siW7hBg%51$k>HiC?NGBX);5*yE>e?CYO}`+G7`O~5hf=f2NYsaAbrd;8lh{= zPO!1Zbkr|rMYPH-*SMq>l5|d_4)VPVl?!re#3o9wHLG-8ot#H5_LRbe(8>t4k7jnV zy2n#8@B^P*TPRhic>>5Vsm0a`fAJKYqO>t+Pdp71rMue#xRelE0!UxeT1CNR2w(N- zuc+}^S2sH4)E9ixJq+%ZC&T5lD5|`}r*r}DaFa-$-;hgL3K_Yv( zatWe?U~K9F6%1roZf9>(5Bt5z2;CgtouFIZNdZzmEfSvoiT(@u9-X5MyWgPFN-zG1&dpN1SM!{~`OsZp-?Bm^)WSN~O zE~PU|LCimaE(%Xmy6Q9RP3g%?%XG>Xo@k9z``1olFp1<;c?M@Dz0E;?G@G{BzsXd2 zXK!}r&aj(4(4OsV5Bu$n@n|*~54)TF?LNBwX|!>!yHaircNG!D0U6JA(TZVzT9S?W z(;2#${cPO0dY5KPBEByDgS+7kELT`}nanNC7iSTf@p8(Pi7S0H@wv3p#qOD`r!vWv zXP?IJS?AH0#P?^=LYuh#W%x<{H%VJRxKvpwkR(mx|Tw=BKafhf$ zpUoB)5{K<4#oSS*cuoc0O;+wW3|U(+lB;Ftm06n}CtlNrYnRQeM2GO}zO@75TOEE) zwL|`}Lky+EA{%t`g6C!s?)#arZ5Wo>uqI+^?bo5)$h99J5ErLvIt60#xQhvrXPpBv zN8n;0?zgjMrrWU%#HDwEnHtz7s1kittb*}@`u>5K9O))AWMd%454!|nGOS}DW*&b| znXp$N?zC+KvDbDcEPJWBRtx3<0&#H!hRlAA6)}0-B@mNmodYpP;5lW*UKR1t^A(7R zKBSgyD76Dr#N_>JnIRhkF@D%35R+jY12OaXbIOFh0`XDnKzzO{VywOZfw*{IMP|Ro zKujKY3B=@C=RnL6curZdS0FxS8;EH&;Zpt*;k!g#NrM<55R=W)GD9{7V*Id6ASS~) z24d#%=adP11>$4Rnm~-z2VdU}_m#02yHyQt6O;}pKFp551H}>=$t~>1EA2KTlSoba z)(}*lQ{usIS`C*T}OsU2*Nqz5YI*i!8nqnX?{NPR1U7byaW zu!~&sF=mTYq6u}9#2+lmDzv8hq2cgN92ZOFEP{F` z;Bu)9_W(?LXZ!ro#m|G?XM4k)etgOLzNJ0!uJzqpQ%)AU8^hjo z8ea|v<4dzsm$DDy17ua5{?`}a1=+{$MXwW$CyMyJddp7uw|b;?F`RFc zov;_fd1l&iXPUkoL7sZmX6f7DP3tx-?TiOJ7n%&-Tn6FvnEiG1WasW5D zMA^xu{Le?kU#tjEbcCOe3O^qcetwnk^L4_{*9$-2ApCqIy!Ucg7>ow9oSmcG5(|&Q zNOluVr-L)2)@zXqYOZz1DUF65;Gez_EdR-pm7WHS)yEIJx4j0?e)w)5pk;jNHL~L_$rCl}juZ zZM`Hzz99fDWEA<#Jg5<~V&-<2``j36#q4gbM3418W>T;BAyUl6g$%ht!gl54ID(2*%v%HDGtFgJ0xUfcoC=zusA2&h_ zYd$h6!ZbH07a5WG?P*6089x~n`GJ{{iyd)XxZfmTnn#cQyfmATm9asJALa;Z_?lF_9v6^q@68;uO3h6pNr^l16)gw1V@Kgk*UMh>$h#w7*%;r4IjopWHUT!e)lI07Q zleeK0@TM69=<2kV;c;ZKSk#CK(;0vCvn0mZKaex?dfVA>hl@*8^V?=)L$ggqV_|Ih zCka<3V3<#RF0z*s3U} z&T>@Ei%3?oQ4#s9wTkEiwko0zunak>2v-0$Dk8OlrHb;8+o~x4ILlEr6_KQ9qayNI zYZcK4Y*j=bU>R~$5iUS%R76S@OBLlGw^dR8ah9V(Mc<4-6N|WpnkJ-U>d-2AfGDft zQ#p4(Qf(}jNr~Ff5{V{_aFfrIRL64sESKb6Po-yaKDqOb`P!&7EiMaFRW83Ama;Oi zJKl~HIHcNqz5KnqU~f?+e)X>SD|cP266ro_UiKHO#QrclD;t&X7puhK0C`)RdK}mk z@}w@a$#3MdBxGtLb7q*Rp2{0C+x$i!Fa%1QLjGcvh}J@kDxyW7t%~Ra0aSFcO0-cC zE&6O#L>~yCqKj2xe=20czhEviU2_mNIjwaA?b~J&GOjI9g0Jr#LX9wh{$VBAo zfK)_|nU0+p`rbLy33PlMkYPz0&fP>s(Tcosa)>b;2@g^6blgS7(~((~Tdd;fy2dJj z2oaY*WNxvF zEaE%IDuw_Nl|!szskp}~mWoV7Zn27?>RMNEB#4-t9FUDMJ`Tvz2`4IwU%SUDj)aG( zcslN);_1jl^-A11imrTLs}ks~=?%4ilK8qC$E*Ij1fo3NY<3}4%DVT6#q$ZAfp*x%>NvRME3_&Uv n-N^v6l8MdBc`3u0?Ya9KLx+ei3^8x#WoIT4-*18;L9mH}9pex~VjLR_;smh4kaYnY z;^cbS#PO}YYgg^6+V$F1wR;BL5o{DGBy3kx6r%F276 zyEVDw)TB2UUH`hX{n2c#bI*7Fx8dX8h_VmfyZ0M+?3-Ts|7M?C`ON3`!dv00o$+w6 zd9Hi5H<`A_r>6bM+1_k09>x3bANDr;PmYIM{Yh&-?Ae-~+v&%RBT?)7;9z&WJKNcv zb*E4EcKXx!O1OOOP`rQnugQN=+*oe5UJQG(55%&+!Y;(@*uBj#!$#U+BXQVa?^J&{ z<+;_(J93gm1iu@>oh-Yf{_N@T5aC=+uhyK;PiO1-NxeFbK7_{owaU;rS9giH=V|9I2>O$9B;0z+YQlr3G6xB8}9bw zD>mZgX1s{h)mT|>A}d)(|5z5At)!&xWmsCv9)V9p@H)F8bzE%^MzKFnR}!8MY`}69 zFSHi_>5V`AEXzyOdO?YoMQE%ogyN(3OkI53V#mh^8*rb$J|4)7?m=vG@g>}kNhm9B z?S<$BiYHAUJ4SJ7+V5|5;>OjS)kdwCmNb0;U~M^#OX%Nv3t%yQGJOy}+z4UC)7?{X z^Z4$mB!oxwo(V!`FjAR35r5<%e)wb%!ENsj=AyY0k?X4 zZrr?eYbylTNF_nYAp0{pVjc#(?C`S@yxdNBF`Vp82E9|m{>e`3QrNdS9?ej-)Stu+ zlrb(R1q&M~xZ20W%^}#2a`dFX)gAS=QMrVOx~FM)Ujq)?@zuU{Ivt!D#Y^~WVkwTq z-{x9Zz(G_?47Pf+KCTDj#brIG&RFgELxn1-IV1krt7M;kx)0Rz+kwbnAtr z+`4SzAS$=gdT_n9nD!a)?;?1a&Gb2Elz5Kw+)9ppDRFFExUTcnltw=AGYzvrWvUPy z&`JP_qm~OJbQY1^Yc-N+l|2?d8o?`U6x|kl%}O`{nAETX|G^?>+|!$(x+Gr0UlUcY zoKy)p1zVPV^m;1!8EOQlJ7W|vdni^e@V`2h{wihY`%KE%K%R?Agm`cMjXCP&tlO)( z;O|P+lYXFmj?ddNN*2|39&1zL=n`0^qp_?ly?F zyIQqIJ~tI&=Q+eBVEh@yc{oA>k|8{YH{JPOsOhK>%(Wg7vQU|M)C^(Y$dmKXEo zPXD!Vv;o>DD9g-k|M4Ojfl160x@l14?Z5jO=c^|`b~C~h->{F@Fpaml%}SqXDiA` zvm~?*|K8kJ<->67pmOZtR02o|8N%(m)2A6+SbW0Nkmp_&`YrHT@51q^XP}4cf zeDGhJVK*Wq!2>OY6ogd`Q}9Kr+#&!1MjRt+Oshqwf>Yb}lm`Qp_`vU(!5XBa5<*}R zB6Ezugo3aZ(DDQasFdLETE!a%Con-7g%Ic<_LzkYQqY+PQa}coD^frPJ>N(HnFq46 ze4hi|6JU)L5E@!u3Z#H5_Clf_WFB%}ZUGylfY8wK;)fL2jF4?!;R^{vJ;bik6K8Y- z2gv+5pu!3%AW!OKXO9%vP}@d}|HjVDijZ$-Mg~#g4OU12A)*tYJyKvpjWgviqVz@z z$h?Cf1%#MJq?Sm51*LYn8Q=o5M?>5I8>E1+&b1+27&nX}YcyP71u_xiegu1@fRGeh z5@Rt(9yB8bD~?Aud^3OD+(H2HUej3OZR=&nd_o zDZn)6h7{o8)Gn%N>16Kd(kJH}8vzO{q###`uvx&^1mnldNI_1%gbisu(L*Se!2_^C z3UV&0A_e%d!v8$@(wG(ep#@UVX$we!QNyckJFI|k4!fIE+(W+PhQ^06f{+;qKnT-Kduv&>Hw7B%7-c{gI zSdk&*t+VB;0XDk|$Us_N3alVJF9CNI*zm$NSe17b*o<&Ku>vx`>R17JQfCMDSb+^S zE3CkZ5W)&nu$9;9E*abkDKW4@Xa`I)YpmI|$@t?x7-Nk5y735q99%yX0@na!osoe}?gB2XD zj1}M(HK`V#QY95{5oE6n6ztpftUQrys{z>TD>&+a6%e_Yu>!^h=DV-pXeF$G3Zc5( zt+0Y)7Zg@NW)lJ{Aha~~Lz{U6!KGv(L)4BHkb$(k_+bTP2stkSu>vxXju)=Ms*Dxb zjDWEM=7oOZUv66k$dFw7UuoNd)xH8Uj81m;Sb+^SE3CkZkng?%GKdOsu)+!m5uE_- zu>u=voGF{J0vnRvSOFQt)GS-Qfk239L~4l@SWvRV3akhPvqw`WWwWn<%$RFLN*%!k zgB6hJf(=$cK3o|qAP<_c0`f_X4Ql%hujDUpeN0wigB4J=gighX6_5u>QghomL8+K{ zv3UdGm;o!W?z&xiLp-8xKqFR=&xx*rcv!8D0~@U1SY@mLx2R^UfCPwa z-%}B5Jb(cnE5K(I*kA?69Iyf+7c*AC*g(PMJoXhFtArI$ArzCeRYkf%m}*$p)4r4HSH;0mrJDkI)QhmdhkE zN`l#{qM5M}fwHsm)){;*f=)GQ3SCn*X$FiVrjP}{6~SvPS<@e+Uez)SKS~k3Ppo;ZnzP%K6zAC`zZ3us!6((dX(aJSU#8PTmu)RDQ05TQ z1Cslb$#~K}j(@s+aQbwA(jRU1<3puiB^3pha2HDqvl||4k8mhTUU+-NAIYucp*Ka> zX3V;$Cga}LW^X!6kC$#g-5;EJaz+p2EyDr}3vYWZ-(jj6ly?aS8#+ydzdPa(IT&Gi zV<|lmADvUpq`PM8>eBJ~OR%_xJ_hfM;11S)-6k{Xjh>R6Roqmq2)AM~;e8u>(S#S) zq6I+5BzsIHy8vTK#Bw~n$%H?TSIJq)KnuS)2+L!1FG_w_iXOv4=bLg@1t`ap+E+gP z0D^4525yR-$b{X>~3!M(ZwW-_m1z|flmn`Ps!+s@g2i) zZ`LN)Xe1|}sAXQPtt&4Dk?fvHT{hhr%q`j}xw(hjYrviIto(S%s`#*{e2xmuA;clT zv1KLU?8cegVQbTh{8k!-3oldyhtVPMb{aaqyPaOyv7Oj`94R!ZAg&@RC<1%hIX~6! z?{v2Y+tch=XS9jw#9-^0^jZ`^ml|*~P&ys9&(*CdSvKS0 zd!<{Y{h6IZl=zDGWb9S-3x0XCiWrsIh^z&see|oX9EfEmdkMcg96Z&pTTe_*S1Lpr z=g=wse-Ht8os^Z}#-si;LT&Q1-)M^i|0{y6x<$YgEw7-gAvo5PCcX74x&bf^uNJ!f3M!MGmT|H9V z5^7ZlSOyMb^H`TS;3u2#Kme|2$|MDk7m!dFx5><|k!JImzXpq(RK?x?xDi+}^QhI% zlYkkYu2$b89OL7{wg*SIrVfzTj5Rn_5|*a1uCzt`;cqu!!#{@L%P(CkR({`tuM!##1+Wf>P6Qjc4qPfCv!W1ye;dIotxP)`th;*Q z7X645JJ1dRfryU^cwmYgRuYmH@H6Yg8b`kqtzgd3_TJiYK&#(NBo8`r6JG?g_P)7 z$f~QPGOF2UUSF}q-Rn|GEc~_5%1%nC+z(2QLO4}NojrTs#?+tGsoV8pe%knM%#*RUX z=J|d);;+$C68!F1gZYAOXwGmX3SQeqT9Quf7C(`K6YhpG3(=a_&$i%il&j1$lit<< zt!7TSfNdE?CGi*D4Bx98B^|GOZrVMP7qQY6G24Zg_e5}wZm@J=B@c0C))TvV7k*I+ zR4Ns~&qn-PIar;A=*v7vc5h{7*G(U4mmEqeMcYN<_een?_t^c@62Spi}r8yck;bX z$#DtL>Z^|A!5WBmSK*hMveh8>*<={Iba(?UYYRDlp|4tkOENEwouL|g z93G7yu1RCvRi)1UK?L`C>kL(1IK{GCY9wWjWq|}g5%G76JJ~HiDT@DjOzZOv=!TQk z2HA{#XI@O+Iyp0quay2e39X}qyYM=Qd`TE;wmPM)+1J2J*Di_oFaI_9FA{zPl>7ot zDR1+NeowNg!ab_1vPj@g@i!ZAPdIZ!y(~tK!;3ko+bdMj%MPuEUkYqncn=Z660&cS zuu2N<>clUj)&W>Lf#;5P@2X@9|0oJ+cK7zr#7*>nRS=hA#;T^`I}scY=NfsmKq-Ra zLjiS~GS9uzhNG>dSIx^Sx&=tZ{SP(j;J#CEUx#c1%i#S%gCji?VLo}H2Kjc(PJ{AE z$TFf>iwx*2ND)adoM%gmskK$RyrGPc9|-J!Sxm;!2WOPfYO1l?v&||$+JHyG=~Zlj z@oQYk1O9zB?HWjSKM|fL0yf^$gj1o#QErr(HaTe3hpqI{fRYgvs`9-e%5Blk`4M)8V3&02$~%7(2|^AU7e=7dJG6 zkT-DgQfztGzAdcr=@5CNJiWAT!yY+@kdo9VV6)G24NMr6h!JO|Ub4W(Y*(rH3daW< zaDPZ6JTVyd(PfY5{}cbvz_3e<#y>MKM6rvQI+zw(1U!LCz1aY*P@wlp(YvI-7{Nn9 zq?W=uZK>mODpeo%(NOHlOyBTL|D|W=hmjdO)xwE`8&6V~V2fnTIi}|PqbNOx5IzzD zb54ye6X5amT!eT}_Qn1+Y0afLje;-Xt$;3I!b(yRyGa@C4B!Se}>M zRw&*c0&${KLf_!GDgwt7%ZYZAcNN>{81!UxB=EAC^@Mg5Mzo| zClOs8X<;V#Sco!$&M~ieiZUXgIFX71WD`17A)D~EgS8c+1w((BjLs?j2EAE!*fDv>pCww3Z zu*s}coiN-Wv&+E}*&(DlT#TkvfgkwcCT!QG0cn0$HKFy|L4ck@uNY!6ZVl=?8{oep zr0;ll)s#@&aSm650rq>@404-v?*2m13On&%%`pp^y0mqZ z@vmwWExqrhVx*NFr<}~iNUO{5Gw3ke8cj28*^t_kI?ca6;^~Diww6eOWdz@<(IhE6 zRj<;L`Q}e5J6G`Pt0A_+E5cu1@fDqNZMK{OTeaym49xZ4YYZ0*!c&_RP`2!@CgZvy zp0qj)fc|`=0sh+%_4dtTLA8k=BSZfC^A)TRoQS4D4iB;pb1 zdqTJJJqf4dtuR~p6w!q!{J%C~GH-=*;9PA5Na`?iR5uONBdWzMI2FICRxKhPt?dkc zszNDmM&K~&mBVp&qK#qke&&Z{?op(0BgO4SYV<9Fk2c^8ZjVL&JeZ(v0{Hq z&GD@qz2Q%r_F2}JB^7YRfckScu%u@#+iGkUr$|`)a$)S}BWHZ732&~)Tgv9;mMd%( zz`D*H)Egv6IPj)f6p7%o4cYxq>Gn^23KOeo&Di+XIh+-Qi~6jKa1HrfQ<%OgJUCtT zXW34yqr(mMqdlpk>CSlC?<#KiV=j>U^@jbfYYpRNxgwWX%J2Alxmr2s%oQ}v{0hXm zX`l#97B}4T(iE;xJe6LF1V3E?3$qlWA}`A>8w=nB$s0pt4Ve+ZC!27F)xU1TbS2r8 zqcD?qC3nSz5R-jtTZxtGxploT;U{S&{{%c1KT@fZx11$bEPSE~gE^^^OQ_awFH~X& zfs*yB(-dE5u6;Ar;+x=!__6x5h;}9Y!wT;w@gilzHBNFTdZQ<^zG|6#m)M5;d4+|- zRcwC zMzYBpJPDVjneN|{T&rV~Nu;w?RfJ693T_dJJ=iNZGObPvd)uS|>|uB)e$XR;5z&?P zVcU7G!$+F3i^PH}j!RS42TNMWnYHUVDR4j)*oDXAM*}F3vmAyaO{2FC=!_CiA1Ifh zv=fhPKASA&9lzQufm5a>|r;Ptv>}yt-If8bjxBtu zEb|QLC|2vEtZcb1x7=I;lw5&TCF7QvKrLFWs-xYP&#}2?;SpETycWNXUW;FcpYhCn zIOOsjL0q9R%R{#Hwo^0fDt9j;_S|~1qit0^60j$eX#nkG-oMqG^}46WlkFb5z89^r z-SyCIx+;i@$GayJxK`vMzn)AOgQ@g9dor0cy?ZiYwkit^)sxA1@a)N?99&QtNyfBy zPbOvCkdnN5GGRd0uKjv4fw1V5)caeZdop2Qh9g}x^6trww#&IaOiv~-q;h6+Pj!^o4^XSQB z8Yp&WCGWs3*@=TuO}16U@AS&o=hf9@19JUt;#|}B_tUSo;{hA112L`SCllq zg-%^EroDSIDcg1SWWs=~UHkQ90%6f9i+~ZjCldx{II6WL>(thh2@I*6+1!(LxSotU zhP5kkD{HE)klsC+OpsN2vef`RnWe}JBfc=!$#~I;E*GRHZzUyQ%t_=y$Yn_ZAkF+WEs?HQGsA@y^ zWO8P`domny-Fvb;WP9~wJZmatw=!tX=!{WCCH) zDRuo(5llk&Yr>=$ZoSvZR%`3Ygh{KM+1!(@ay=Q>VexBS*7jar2wH#-d+uG736kl_ zUS`;#8K5Jx6gba(_BL5zxu&v=DkYB3r=*nARQS$M39b5MlwK0#*5tmA?ADw&LHL;CaT$oTOBb!0qI(GiOn5xV2z$0f{|R^{L)D_n~s z*)^C2TxCCI7p}d!F`jLes~2Lo#USBNBRIik8~fK^v)5oOdok;q2kc~D@aeER_IDcg zSMbTElNVIj8brg-Gth%RuX-tEn%w6_w*0;l{BqO&5IIX(lp`6FfCAe=@`#r}w3;kz ztVdrwdI%njkNduOB#9;Vx#xX;FZ}VQr3xe528~*xL#>KaA5p}-m41dQ^>KJKe%M>7 zoGr0E*T+3GPq|W|I}@#s3v_l}{z`Ni8Fp79SMR%ZTTQ;<$QH%mJ$|npNSaor*Sd<$ ze&bWq{^V?LHW-iWZ-9}Bg);}0pA{iS-AOgm7ca^}k?lo;`m{eVTOX5pVTU5_EdB@H`EmFhy}GW+Hb=%a{;C;w?+=Xy*W*jcgPI-L&AjN&EyHPLg-XZ12MPKT{4;2=`w0KFmB z@8+UFTj5^-2fO3l+0O0^|8g2HLMkG}{VqB&3#s_2!RQSCwz#htemQ~_tt#7-J~~0O zxUxtuh9b&&PD~{Xjk|Q_>n=|}`-9%@l24g)m+-b8zymezLd0(8F8QZb?m~!t_qhw1 zxt+V@pRV6sz9_wVne})+QQ&ed??7(d>TS30#@T1^R+Nd*zwwl0pqpt1YQy39Izt9x z3G;WYNAWT$$Z=7=h^|@}a)P0bhi^pi8kQd%d)eO@IOM#~gQ-%UyPV`XSSvVW?xT-P zwSgq>!PgsbJIh>UcH4rZ`Uy{VMJzi#gY3)JrOkF8+L5PRlW~1Jf(@3P>e3eCfuFRC zc;O)l6VHDbf#eo$Rtg5Cc2~=4B|)Q-`Fk+=64-;w*OV*o6$r6&EfpC6&R_8Xm%$GfC`lW>$Z7_VE7UOfq;!&5Rn1? z*r~~2m=r^SL)ptvb7?Y|$II2|g%S?F?$lnetO@mjpPcPB*SdODx-Ukuyvl|{Ny&!- zffkKuXZUHiH|@^Qy#nJ2X*6C#@lGH>8HUQN`3oTp^{aDkXO{5}LReFne42DD5IXMhxF9R5iw))R>^RL>y}{z?IW)CAfr`n>M}C#NWo&5Z+Ro|-q`;(*wtS^t)Usak zP~!xcbA$#;jyA3oHjo@jfcgcU!u}wVZSC~u`NBwIKi-7nEH73~J#Wi0jm9{Q&BIud zhijOB8>cdZKLql+QmTXh7QwCFTye>atCCD?1x{1tC9h@Nge*- zmU*;Yn@`|}??evhcqt4BNzGsOrkXE|1HyAIPz~aq_MzpJM!FM+b&Z^GOO3(sMW+S; zg0HxMH7sZ9Afg#IwNr7sl7}Efm=?v8eE(FA9>dpM;2M@qF>F+a5m;&ta1Hx-*_sMF z_Yh-cuvQXo;F~Uh4ab}iB>W^DL@*jlP63SSYr!nP==)N=dw~RpuR1o&H02Up2T3RIW=i40)Z;qqm>p$T`ycFBye z2?{$M9fBxF&n0Pb1SCnzYqfw7SEN3Jh-LMy8@5a4fu-)Bx;T18jq7Ei24SR0+X1fQ zW-6<7#n>bX?b;>8mKW39F38e4Cb(g{W}Z20*NXx9+9MxH6p0bsuw5f}?%1x8v}i0Y z*sghy^P9@tuw8+x@3v&-hV2SWi#?bFz9>+1!FB}_l9Yyu3uRa36%6DEjw!IaVY?!m zLQ?aW{hZR88@Ai23){tn&m*?WaApc3F5Pg>pAoXPdV!JGJLXhy2bO@ewu#Y3Q%kV3!cS(Kgu7@DO!42Exc;mtY zhr}3cGsMylo=Lf{E-n4IVY?ioVt{aJlNs#NtxaYKNP>i}rtmPe%CW~0+vVjggb81H z*B&U`uw8n)V*S`I8R|S^yJQ*){FBFaT`FNRfC9GViwad+?1t@zwhIc#_)?4GzFjgSVv532r31bwP;|j|1rm~!rt8DJ!k)YnGX-`xY*%Dc z=*RqJKc}?jhV34;!FIX8uERTAnr}P<4bJ1ZZO}fl?MxrnL1*-EOSDT@RM{AAUvd?KFzoz0%;1?UDf$9WNi0 z6s@-7zFji3f;V?;*Fd<`5<6hKrXd0}#AFDEhS+W2E*UV>2bZcC2QoIMvh3v2qLJCy z_0Ep_cF7>v1cheUA&7GHT#^WoXcumF*f;NyJRp!W@1CUHa$9GyJQ-fk4bsvuw5^$&41snkvn&6*GO7478h*S zJjnS?Wp3E6z!jIwBxsA5&58X}>wICm0@Gp-=728>6kV`gfrKQbg-S+Y0>9?iFC zQuCL+X%Qq4_iotkF&k{xrOn2KHIOZvydv$kL$@;-7|ysL!d1j`z-n?qlwyqtB028c zW!MzMh6CcrP;uw8~=<3#I^;<@aYt;IV^j<~D>wIbyrK zyoE5~EAJ3|X)`T4tmb5u6UDeo;R3rA-fX#d`pb)=}6C7jPN0| zyo>3ffC&Dw;<+1=`FKHY*#)BF?tgy~l>(8ZuZiFtP56Nz$`-)4N++49B%0J@huUc2 zFL` zj-Z-ju8Q$z(kg~jing+%2EAHP>Dmr8`(kZV>Fy#m*`zjmblRq!B@E@e(%xE!cQwM~ z1mu81EDEF#1DFy8j8=fIH9Nt^9@A03oE6b3w_M|rTF4TKh~Ty!V!GOC)XBA6>6RUGE8c*HEq4)s}^~+qmw>LcXtMG zDIvB5kiMoZVq%J?z~IY1{S`GnU3GII#1yoL!M*Zixa6XWHDl)^{J;-x z9!{#SQLtM!lj_(d`*?RXSz+giOXf|p23+(Z)?yW&8BVkZ!%Th)0^G1JM5+pv}e0J!+v{nJep0$!|qmpr;jdh8f~8I zu9gGkLJ>h6mMC&vv|>1rc8{a}bcQZwKN~l$UC?w?*$wOPzCG}ImMg5gOy-v6i?fK# zcsXUt#Fajp_*`1)V)sneQ<>!Icc0lx4y&{`+YO|LTer5n?AG-fK&jQLtBiH{$2U9 z=tFA8dPIC3e$}^jKzysiuc>y(A9jeLbXa7AZeH;G48nas6AlrE9mx}vRS_Sx4aB95yPiRn=tFAR^;g8?{cD*a8v`+Z*d-8?VI2c8^Z4`1guMds zQR_ggvtgImjMX8v;Cd_K;(ZmF{Tc%?dE6xslV_a+F-PEeWyM~B_}KXh#6%xbGp;`n zlg-jHLpBCt{IE+PCc`=gV&?JZl?i(V;$zl(&QS$(By0 zy!PMH`T0E{xsct#(A6;Vb*y|fW}Z?=0&Ldua1_i0a+Bwkfxnqmfc?Sf+*{^kVC`%iK*3{?{ONez|U z<4hOtH4&zHpC`G}{5%QLg(b@H8~kR72nL@c0ZoI=k-*T!7NEpejs#B#Hb;W$1e_x& zwS&!(G(5!|TZ&y2SnR>__WD7JYjI-Yb#z3u*#yn4Y6phg1@wBE5u zM)|Ae#$PtkIE(0DkOLc4;HiyjknP6H*nrAIUqw5;VOq?{J5b~2^5%ELxXMR*S~Er5 zs+uVhw=z@2C5)LO=@-ouaT{o+NK6*~CcK!nDs;liPQTk5ZFQ%^!KQ;{T}KRSW#bxZ zRtp=_|H0Q5VLvl^yo7B#VSFc18hxUjcv8iblWL~SzgVL(W$V|COeUY3a$;@D#*xv+ z!ZekbDH2mQQ^clhrbtZLOc9&1nIbV|^t#{D*6iF4GTO`FVCO)@{0DyvuF4Fp{033H zd^ZX|v%%~fXLbjkU<^|ktn)oYu8ykw(*~3; zMS1EHfXPZ}M-WZ90Sg#de9cqYBp4QqSIx)w1WX zvoN0?{3n6IW7Z6k(n{3bvBE+f=9N=b2&tO>;g1C7UPYO+uLni;URAK?@}>vLu?hy& zR-kzCBr0j}2Fj$#L`f+tvUo#iW6K9p@T^g4NrN|1=8PJo^|#30jav3xluCwDFlbO} zNrN|0CXE`*TL+QFn-nwAh*~QY7e~fZD{1g%%A8SywBHfgyIH}WLC{(jUGz>S2>+Rs zY9!qsp+@@kibNqFpq#eoqXOr0`Wfb z#P@P?#ajiKT_vWEksTSigMGh*ni>L^SS;FlNrrrV09wc>@|k&1BWA_S?Oyk}G1Q9L z-ByVn>wV0mUgtxkn2QS;hAATOI~``jknx%idKfPoGSYFF4@;()j~gL|H6o$At4j#Y zf+1AQ$w6m%ZI4%Db0=|OjRa97>S8`_gc#O*WK@J{ZcZ*TBJtbPju)X$J9=Ho^vkR27NlC`6i#`25a|9@TitxsOtSV;f-#sB^D2ll=9 zhyVO5EAM^o*6;t--&uwqxibF36|e?PbRt%~1kp1q(TViyIrKF_U*C$puDl4)FBj9L zo3(7^=HTT~V{vIubKi2j7tpcG#Swn-erTe7eCVOKMNzyDqV3|_v-!7|!G0!bqhF%s zpzJq!eolY07aGHqN(`_k`!Mr;C;k2a?9GogOn=%3i|Jc1=-UKM00GPBJ2ZinYl?|q z372;cMUCd-o~7k|`wv`l@KAjD3$9wZ3ih*yY%HD{kB135N^a$9;dnXv7kCEp%~UC~ zlm2NuRyxycUqCtu^h1tr)4#7EVdO$wLERPl=pZ{UF@wKjLOoJVDEg@=g8u{dZs5bq z*wIq%(+q;SU%~^UA1t^(4Exa}dfn6G$#!oRFHL6?bg*W!gev&E9=dJqARNHoq5r?_ zu7}Y0`}6VdMAD8A!2vW__f&85sh!Dqr~dIDfGam9*UNdHJ9lT zG=BEYYB~}0n@Rec?3?sAD_2taCygV`wIx{WG@AS1CfJiL0VbCM5#63_m2w3g%#AmC zEa9yjE0+NN3w8zAfIY8!%WiL2a+^KKZLWru@1X;(T>)|DvchW)?7w6$#P3ELeF7Hm z7@!6)hQ+PH_H+dX=-9+(x~Z$NDn?dKCWkm-Ea|1dK9RX7$K7jS@15v4N_0Y^YL=pJ zV;9+X@DTD2xE|=@YdW352rby8LRCx$c3=s=T$Ya3L!V5)o=TrU{)%j5Yj-n!J~eVw zy^p3lV|0RJk6Ql%|A$k|A6`%hb3g3s>`jH)T#n&2kiHpHVoo2q4rQS$mH+k7ytjXb zJe~%;H1MHm5MNz80MJ>^MnlmKkkzEy3*k1{fB&$z*?)38-0Dxtw!@ps$`!B&`#JuS zET+p`$ek{E0PW-$Z$CI1q#l@xIQv*CFo`)XPW%0>xH%ZjlE&2wE0>{9qhEB2-(1BR z=-M(Ybrvr_7&TT775wCO|4zjGEwJ~2es6gD;4Be9`qd)xWBI|O>GPN`WDvQNAi}-oR0FAtq+k1~9jILGe%Nz7Wt#I#WT1KYUrr-)${!moUJUft5{cglhmNCo zHtBEOJ~(~4KS9U0_LYXA@{rW2P&ILU_muKcG2-C3nA_BkM(M+uE0AatzYq?gNukhz zB-BL#GTryIvUp;!^$ahaD`DSL{r*ljjigNIFc$$=Lq<&^lf&pQz0s3|7aAanWU>aZ z+*wL_K;dBxmeY@C*)eyeG$n0uKBaheGo8ec(`S4Y>_a=RX9IM$F3Jrj==VI|d_E{X zCE;fZaI!ZU^iB=Y78ev_vib#QwX|M3$j2!m&0Y}m#-a)_b>l7`7c5{B%AEchp6>^rELbN43s`D13W20o%-No4o-}_B=wsIFq zHM8BSjg{pl>N(cYzm~(=P7WTh3e{f>|A`1*$12f{tL=g2p!9!w<4->uh?hvUXAJ`J z(R+sIrhITc;^UUu`S@T1?(^3NUm@`YQ^H-q;->Gca%chI;tJJOWa+)>DIi>%r3AUi zpuh_o6+ zgeDvOl?HH!bvt=hoI*+=xJD`oLI&BNf$Hcqg0FZocrSLFBiC8LSEZO$3_Vrkuot zTfi(0D1^P5d;&fe!40M~o-JUprQ%!Bl^pb~C(YgHu6ri=oT}Na0{<@JFA|d3RXTmn z8Aao75lACmT1#KVTDKlw=9R8Y!_PF#29>Eoa6r5Lv5s2S`kX7Piv2b^MSnDcS6JDp z;A>XG(Y)QUUU;D^x9}e5?DutX>cPtQ_-l1dJGiQ6Di)T+|_wSG+ zqEm*x&!mieDIq3n8pbz{@}ngado`mQzxV|XyqsarBW36v7GWeqiZZ+?v1Pun9b2)( za}j^Twj`jTHOV0Dd)m5Z?Yqm?IWh+}RO%ae6sk=PofT)=7XG;pKK^Vg6hG=FJ{T{$ zHXiYE%N%(5XanA0aAqsLXj^&i)_5oj?Atr1SUjMfgYqOqV|v0bl4ftc8+R zVK*Wq!2?aOTmD!ORy9n)7p-!O001Bk`EbxdVb8$R%RX0Qg$q=XPy zgvcBoF#)fv1%yKh{;pNLVQ>Nylu-!50kg0{3h4fG7QNLi^fR$h+s&gBSMu$XS8T+~ zbVq;}q(CN{g1W|Xk$kwV;jA8{fQ-!NDOH0LjF(!Ff@-|jAO(b?PA`5)fz1eb*&i7* zZPj^gnJUO&umPE@6;ePxtdpHRQeZ=E+XgAHBIJt{kU^CDW^9lGLPRG(d!)dI8fVJp zCf9}}w++Ch1t5bMTL2rRfDqG&)DkJMpwvz`1B43wMIBeG5N(hG!n(Q*F$JVR7yy|F zTJdD|dZv2`WH5ylQb0(GEs3$1BM+L90`f_X4QiJuz4R|{S`7M1OBf?kK-m)d6l=-N z$--@rg8bD>wix3!2{3hKq+M;*>?UyOJn_811}VTFRM0b`IP@t~Qwm}MVE(8xWA_e%d!v8$@(sA&I7DxfPq&&{w%`&dD zqo&giE8yZ&!E4w}1z>{}&|9pF-2@gv(iSUVY@p!Mb87+EUh!`jSh4VefN@uW%?K1L$leQ%8%O52tANZel_`ZPtgr&| zq)vABSb+^SE3CkZ5W)&nZsoNEP6h`Xtblx2CqR3wz=j%U%H}rLhNL%EKn4kd6%b+? zky>H}7L@F;0xLqk*cBPXC`22qfDq#v5tj~@5VGs_`KWGaZzU_ER#*XOYRQQ(% zUpfwKu!5uc9(#MN0Jo^-eFY@Uv{UJ%YAxO!=w6=3xS0zHq6b?G@{1!N9v<>HSO5UQH`fyD{}!T@As zmLGM0ZL_a{P}K5L3syizw&TSHDb`%~*jA$rLLv^>s9*xikS}5>{^@kPmA_YKawC zP_n}ctO)sH1!NGDD!>LSAjG&vgoCe53YT;(KB_PGEzbK22yd0K0`j04DdY(5V=)0`g#(xf(6pW?w;m6faw5a#%s&?E-i>zG~4mT{im) zFip-yFT8yPc#>9FL9UXjSOI>_j1}bMOBz>rqDDKx(S=+{HdsNx1#gY%?fB2k-oQKl^N0 z&eZrpSgr~Xff1{6aLMKg#QH7%tOGhF?qo1saXpv_Cj>TouBqqi4}Gl}$qzU5_7C`v zjeurP(S?AXgkD_;NX-Uk@E7V=LJE~44mFv4K=E?>6uiY~=q%7(vJJQ^R2aZ0L_kYJ zuQ&vx#^Mq63--&}ZS9hL&$T@gu)%-IrCv&cI7F2qDtxK|$Eul+&g4bBGrawr5tD4j;ZPb$B9Z_o$ zRm2OeTVScXIqXfR@iFMcM~36gwe<>Nu1H31808 zq06=wn5jwd<|$~Wm98Z@OAtA zC7i!t0o`P8RoEIDdkh1Gbdc>k?wg1t`z8Qo>+3DmS2~NqHt6OrSyVO?Pei0R(FcA_ zIKdW4a4>3b_j{x66kmAK?N27-NxOXgN%=;Uq@utQ?qUgi!-MVn84M3^kN6|Gl{}#E z-k+dLxyU_P=~W-?r~895PtNFpyk%fvVc~7Bs0`;)MeAvK5o%Y$<00FUIRy~;=`VD7%H^F(k!9k z2oT`dveJBZUtjL9^U#VG{Z*WElNQ`54xJtXZ>OR3)~odDtHkc(NTEpuait(&dSe!P zd#RbO!50^0rDs0V`a7@qT$zVzY80~I%(N%3zbTeZ+yB)E8ZjPF&=08X~s6VrF zh!S5hzWlMOU+~MDRm5mk66&L0ZRJ2LGudK!cQ|;eU$>rQRR`)ynM9#fAN)ZCT&rp& zxbdhz%{Xgk3AJwi+{>8%;>X=w;-O4?wl(8lpgRF88z$9WoL!Ncc42ZL31SGH_^`G~7JaB@Xz>COi;;E1EJ% z!Q%xa)WvNwvumW;eCDsgA}3XGw?A$KR?IwVweuukp9=Bg!nOxTx26t|*NindRT7q_ zv97d+PyTiTHvD4CPyb{SMTTN;gfuJQ?EcZOnB0_g%vj&A!A%A zfOR-@BG|wceSjS*1^+gJS6Z2NHduGH?xLT8EHtYVXu&N%0gA3`q@37+b_fVWd{n># zQ{=FckW?M3rB1AI^gGdJjTzeBTRRRY%RjdG(y!QBSk6tnH~%K#)-`RM(GoGFVk-(4 zg%hQV0Ct+66CmrOh!5F1NX82RVXG^llEBO=`0a?_5T-N)TA`2QgvXQdqzqWBiJ+F-ti^0avt3 zMHox0D7flzQJIg3K;WO-E~RY^&c=Hm==X-V56)UHl4bdp-;ZRFqB$shI^wUL8KFJGjqneYYM(465&F#)gbA}vX$c8i}#!3lRmnT2T0`r3lOQLZx2OnO@b zw3<2P0=7qBeKUNoZj^Mq?zw6ANM6LU@w%GI(Ds1$L~xC6usa6BVIJbltS5H!F63+< zzNQ@dVXR>!U;IWXBn(gkOU4CaKy#Xq z0Ilw(fYp&aSOd}SD*RGYwi@IE9yQ;Dg_y-Z(=dCkTdEpexZmIF8hJGUA?-qBmPkvGq|MQsE=Nr%sC#wyz8T-z> zn7nmzW*T29{dE#rM+tY~brAWsI;9=i*T75HE{XRq|26q95`HA*LlaQ)3pk~`%`5sn z$)*bTsIJN)fjh?~AZDU&b#xn9;g9o%;c?(2|kU>UqWXmF%wBFraG)F9suH4l~j0LmvJ%ZOqv zGN7{{MI^m&o-Hk=)|Az-ZtZ;3uBj6ezXCPgww0o0_QvN z9`Nt8Y1crq`-$*05wP)|CY%Z_j&h^Sw8=rMK5V6r29%7b&?1^`(GK|$cw>ArOouFL zSs62f08n%^RpY<`8bOmo9$J$5=;}1p)no8T{Kk58r2#X3E`o=>y((Mu!*E6(1IQb= zcqz8Lfrni%&}{vIPlw1G<>{q$8}`UKgp{N{0T(`Rmr{bDM2t8y^^yfPcP)Rg0r!VA z!V`mGA6@o{{y*^#4Gg=)X#6t+LlnCRTH`x`D!JJJEkI1OmrH52pz7#;F@lGJNG*kR z+ET}rUp$$z8bai&duJU*d_*jTCg3d9o zc#1M2pg56=0%Q|9)y%yM@$tpJbtXXzhW;=aom2V^db0w3v3#4Qlt+@lj$8=HS|q;} zR3vHk&tEnPE$9FcFV&pXIT6H5)4F0eyU9I+fV^~;xqBjk(q=uNP%~8p0D0+jGZ!rq zX4K-HRMo@<0eR^pL`*Oo$V-PHJ#YY!SEPwR-VcWWc`02Lco-DqrK1ta1ON^xO;MqR zkJbY6(s6TMtlu0Bv+a-$uTmTe2E{fue`tq}M`KM0)R$6KE3X#RmyS!hQDXGyrNhVl zn{s^S=k0^jr~4DMCA2T;FuAu~M*Z2-;|V%QaC5xVZ*vFJ=BJ|Sj_fE*U+6mF15toY zW~J(c;Rcyq4wl5M)bc|}Y1$O#rDFzuxCz@VEvNltiaAU3yQ>*A4>o6bdGVeCyVr zzOw=TD{_hn;{e69hH)@6{}q;vNdf>zjx|7wm| z$kg4fn~Z-|qiE@UFBK!L>^S9QE=F2iexE^y+16;9Y0HMxp44gn^%3VTe6cpkZ`Ej$ z6rQSAX~}%^rz2)Xs}}fbh^_F7@RwJ7MWc%Hm7_QOY12N-+Onhqt{6~%?gp0htYuq`&Egb^ z!iF#Cobjn9yty84DVvvDuCP@A>pJsPuTPF};7uF*KHHGp|CDb3#HTQ^n%0bsZ=J(g zLAa>Tx(L^h&ozbVtHMLlv7Bt+tNzx}!3WGWfv4iewGsS!!+zJbhVimokxMM$-zK(K zAkIw#MFis{(n}>`x+A(Q#0B{23Rsw>5EXe@cG*|}CrI8HB5TNu06y7-GpxPqRt77{ zrW}QtyeqjYE`*ruTiZ&k$a3qGv_O9X9*ZBTRG?c9j`d|*nyO_0C)Nfy`XK!Ep!wT;w@ghY>Wlj6tW9{_DTAbw4@3OvX znS5s*?&lR2ifC_%$*semRXBK1E+MWGU4l5yZsA=Ojw&&WPLT(F(^XkJ6%KYa&pIc+3LmPlhuuuJ{uC&Yi~SaM zTLzep&9`!Ciq-lkD_gG1EjO0{tq;yMk4LPp!UlFTQPB|%jS`dpe2&jG z&xvTUB!e?8#&rD0n>&J(<9@=mPn2 zpI=WVjKNfMo;{gNn%+H`Fk6*{hU&>=Jb3nGQVuStj3i?ke0wq}+lG|n)sqPWvUcs) zlL>@Hrz`?S=)OxBnBhpX__prdlO1iB3zRTDnZS_Bnaw@f(YCTD%NEKCop|(X;Mjdb@EJe=uWT^~`28;ijRlnxllkuVxT`tdqemxmK1U7lko=ip(y;tnplksF# zUK1__mfhd8u$M8wWI*~-PUu#P9~ZM_4cB=0Wc=tVwT0`+`0;WjuGXtM{o+-PJ(-+Y z@16{YT=$+V57}Nl8PA%^ZmjK?=0fkDtTT_EOr`+H#d0a?5D>&XPdqEi+D zBXmzD49sv;YfsjxttS&0QaQ7^C+l!M8LjC&8Zf@`z&jv)dor0ItMp{60eUh^krzf> zlA&qeqin&sCC%Rmao{S$NNKYms8K5WQ$*R02TnY@{lgSvEPX;WrVxIwh*U4mD z8xo+`IvGD+pq`8;Dmr2jHbSq1@#7L^OzU#qJsA$U?mbx^vb}mTo^_SG7owlrI@#(x zdNP>?3SQ4cPbP3Jxu&5%BRIik8~fK^v)5oOdok;q2kc~D@afQr{hfyW6@0SkV-iqcJ4hb!5{Oolg^l&-%S8{tgYj|Smy0B^ z#6I`D&+mml-n3L*Jo8r(CJfor%`R1vhH_lrDvRX#w0DjQW$f z5ig{3oMzX*HrBEqwr1zhU9gQK%9|n5q=257^tZaB-ZuJZypRTVuE)fIofZ47)9K*M zC|<%}6FtX#RxcCdbePU(7agjCwDZ(pbS683iW;S_2CsmF$WR98pvitW7ZW<+N_BbH zFGsMVRb_k9M<-|&R~G5Tpc@#>P z%0%ejcuF!5ce8pFFQZx-*VBvWa`lDQ%`_9W;c$GNArrCV@<~Vyd*b~0jR;=D@`Gb9 z`x^s?ocCQ$ato{#3^A9)>-NmiZbHa4KB5qlAAG$5x3kPuX16Uks-N&=SH!Z@GswPd zT@tS=l81*)$%}; z%W_m%uc-TBYh2MyTNhXyfp0h94o`-~z~L)E+10h|6dkOr<>cixg&g=Fk!PqzjwFN+ zUzyJ(fc$O*l6%Nas!iL^w!Y$cA?(3D@MRkZm!o(wL+^04aU-oZ4#RcvE3B%Ga>J`R z7!AG@!K+yw>_P_RiVGK9L7^EmwM?;?aOSiWqb?=2&eB>YO88>KCEHOJ#Q@(Ch%p8LBG1#XAPW zp=_PlS7?HnB$5N>eTTpcGZd`63}7(Y>Oa%XmnWKjMyGDu(-Yp;;ae^(Ew2wQHF1Vz z7+jJ={Fp7JH=&=#EdQBHQ_Hg~CaUTU7EjNispSbsl9g_?$SaP?>sN!W0bSc!9f1^> z6w;Qj)QiOBB@Ywl2o01RZCojAAUTu(^$R+M{Xrz#+Ud_Tm&C3ljs1ZQK#l!E3pe#% z%H-mgA~C{`H{m$Ti&az4+p;as9QI}$?Pv0aF;jJVmk8|V^W8IXyF(y#ki%k??evhcqt4BNzGsOn!Q7r z%@hvkBQto;1*$>Z(>}DE(nxorfodGq^71ixztS}A@D&%ZhUJV4B3wq%3@fXKJ-98! zni6jNH5a&sWm60r)nNpdngd+JK3=vaZMg7|tUOp~Gz;H!0c<$tgdpK3X$| zKy9K^gUqrggb81H7nOp0Fwg}XaKm=#X>am%UsYqwc$yY}ZE;)F!hpbJ%8uABd0c^i z;>L>v$bfAz8JDk}RBf>vwoArP@Rm2l4od(A!lIeDV7sOvYQc8Npr$(C5!)r?GktKC zY^kJ_!K=Kfn6Q9A9X6RWxM92GacqLZPDh6z%F%O4S{wmM(xR&cE*%^pu2{~rP?@l# z>4@!;d0?sgr!J0Ok-EG@WDo|pVY}Q+Wwovtn@DJ{F*C7#t4)vKhV7bpt_#~GBeNW; zUu|*2cF7o-Ez30Oj_n#ri^k%D?V1NUFW4@bmLs-H9?!MHC9}YprxiXqf*ZCgFfI0A z4)~%#(FNNTNJvsz=;cZ>qFa^&H*A*-O2UAU)cj>Xr?lpV?RL%swu{H)L%R-&bHjES z&bS~VVyf2j2nF`$?2Ew%+^}7SO)+dZAdU<*7i^b#ToNv51?+-M?SIXDJ^@|hV7DZ`9KmD+a+Ttcyq^g4TMWAvBSPy(-7ySAtsY@#CFN!nLfBwRmwLP z47N+=?1=4>$FT|eW4jzZm!!oJkR+|pjW8L{u@QFLw@U`aQuj|?9K9lSxnR45lyUjQ z4cjH;GtOnTt{9tY#CFMC*brX_woArQ^IR9UOGain42|uQF_hf7W4lJuqOrJOyXHa8 zZz^-!w<~bPB{Q$EFqcf|i0ukYi#?bFz9>+1!FB}_7k)}}!*)eBg{0;$`#GgGH*EK) z4Yq3ww6p@_Ja}@B`*s=5xFBN9jASLr1^B`9D6b=lz>g!g%djbi4F|-Lq2_|^GLKgb z7ao$W4EH)>yDW1;knodq5g^e zF2|@CARMqpj-FeatjBgMd$VNmCWJ$!TYwz)?Q-%F4`}g~cL=_;3ngyY?y(EzOa?L& z)~oc5vD=vpWE>2G%n#coNxfpDoMcEEN`L*Ry(t}%ysQI!m8 z1fJZ^WFX`-eQ>FYt7Hv38f6<3T-s?en{quX1C|}JT`~wZL4RzQqvw*eI0BN&R-Uvh zR3hxSEdsocZXEaR@=OX9u0!hL=$V`&5a({#E;mzItt-YRQv24hZ3rB31WljhZ zev&Q%ggZ}K$i{8oF2k>^_9OLeU~CtU%GZ4w69G4Dm*b5K529OI&!pyf;ka*?V^jeOqB3aPzmx0YT zmI&U_gdYf^Yyo_$v?8LCXi}3MrUF-%6#kNzM;?8M}MifENvu5n2%B9J05VK!u{DX47cS(h7J1mw znHx%XcLs1NA+`jNemJ4%*#ELme?^T?SKV9)F$L{maIZWWF1e_}byXS}%Q`AwJ2!?O z_#vNOieXu-VuHxgqnBc*R@%aJfyI`$1&QqC$|Z;n<`Z8AvK#8Nx2cEy-eiPsj_*#; zE$^fNDW4Vz&;7!y`CO`cDYyqOE(h{#s8jt}Z&f|S>C>lQ8)Ar#dWh_zb}nG3xY&f3 zD!AzHOb5g9NHfT%uj|69%z(Hf`rubsqT z63MCZ49-k?TZ8^+Hf^(ild1Ba-t3;;VK;rCJ=@(G_S>7|(QGmvcDMRFeRTWNX!Bfm zwH#U(iU{I>jOV&&#c&`+zefG(3|-8AHf~(IpjlYP*QLL254@h`3hOSDxuyByEFv>r zPMI=srH>{)msYyiJ(KlRCb|0EXSR~NW$ev%1L@(`tu3S7$_z|ht*vYA-1(BkPws*H zy(|_L0c15pX0_L{#3fcsA9skF^x14-A#vD#Qp_D?isx0}-D2gA!;rNFBe`0JUYQB& zp(4E*yKK`XI)q>KtsM~G>hNo-9rA}AVkjLJ*`S*jJU@eQ-_L|?!?4#XI<^>TtxzqL zgMT0{PStb@#N=@o6C}?%2V#!E#X#I|XU$BvV;hL65xYcCB7CVCR}bd_0x>z#O=if( zK#U)D3B+Vr$3Vs+XiA;F;Ojudi8zC+0r~3AP^TvV94y(SP_%QT>>$A z);SP!1fEw`>{SsTol77l!oP3=F?s)5X2`}sj30Ih#AH~>yftY#xd1b<0f%uqpAU@v}F;*XZbq_pP#$xPNHMmVs zI-vM4I|2_BOK2pwvmdXt+l)*iHR(G-P@)N&I?s2Az_nHXP zyw8(dX?~sr>B16a_ziwDLD@TGS1e+rPdIHXol-j}ONE)7E zjxEL3umX1V4N~8T;3bLxBJ3iUe2m#5m1sg;u-Ph(-cSjt+>pXQ-lyCdh+=YgRU4JL zonE@A+(|A*R3Lf#qT?Wa;sO+Mc99AsCleZi%M&{S-iFMvH~d&Av+ ze96ZC<-PI3#-8o*R(}Y;w+L5fB#$GyCp+y~e>5FWx})B9e@b4xc*i1n_^+B9f7wKj zFQOAe4s29Goi?f=u^TUA<0Ox06q_}U`uXGV{gux%%d?<7j2PqY(Hs@Qx|&F1+RYg9IG{koCKH*%X# ztZm*na=zEeOp(~UnIg7%Geu(aW{TM6%@nE4C$O|NJGX<3_A)rwIS?`b!QX0FjpEd7FgwSY-GL_B|fj|ApkMVYg&2SxT?RoJ1+UO1AB6+%+0D?Ptb z@&;f8q6TlEOqxuTl(Hg=Hw4!Q?h_R}>-_Mh((_M`Z72$v_3iE*d05Ico4f zlTwYO`y-S~qYCq0?U0!0k7${6Q7axv!JCyPO+#>81ixx&Dk`^;5G02oME-E90 z75o^SP9j|RTm;zCCb-k=_J+fJ%_47UG5^Uy+~5*vB$x6(9}$1CDm>8LN?v9A5Lxz}P6ua3$tyLF z6(}be^{l+Yv}OvmXJ+2J(3` z55)V(6W`0p6>k+_c9obuMs{T64)*;LYHF-pVzFrJB^mPd0catk$YLkx>z*xjDJWh{SJCJ7UQA$*9N= z%#2*@h$B?Y$wfva12HplQ9nbbn2#HwKz3B5O4g268p|(Q`9H6K%d=eBJ2mX5yX*$( zH`Dl=m&gCl_dBP2@A#6XM9+o_KY&0CloyAS8Q++^YNZ z5q-c`Mf3rdAx9PAEN-JBlJ6~5lz-e-Mft~Bj;g7MBsLork-M92F}1dIXwS#5Kh=cISrwnkx%>WVW3fz1)P|Nh zG--sJe4eB_mg8r+B=1}*J(Kgvomp(c(pWC3Dpy_(%UK!N9q+^mT&>zbz4GmQVP8=t ze)-<`OZQ%^66s!OUiKHO#Qrcl?;4fw7puhK0C`)RdR%&sv9XXRb(u|mBcCN9Qxlmp z!$kE|-jLbmH~Ih@LL`CGrjWl_C8C81ql#$JXR9LmKmZk8tP*WhM2kLK710L*sOVyq z*q@A0lF4hQi&diR6+n`L{i{UyIqA!U%qkm7RNQ_$&%o0IeqvdP4+T zqE~L#j%cQrukNX*CoKOdrbj-dpC0*?IKBV70h|snL+T!Z!!^^xKG5`UXFpz<0C5yg zDmnXa@S6)8|MId3ez5`7Zvr57Oe&)8DiYEiUK2xDU>;Ar>EwcvoJ+*cP}VgNtQ-UO zGEx0h175>Yu`>qTYGPZebV3o^%%I1Kh`eaz;S#(RdO>Ow}DtpyTXl0v(yC-roRj@yW{3c)FQI-BsDlMIuC8 z0Vw4HlN=4m&QcPYN^_El%*g=}WoorfnT{nuL{;4ZIV#Q$$Wf7r$khRl8XqT=bei;AZsvnsb(#nE+*RRR$rE{9kpQgV+~ zA|;u~++r13yTp~0LzH0%5K(zjagSB(Tx240bwI|k-HHWfVqUR|r{m**3`+rH6@~5G zV--h&Sd>G|Kdy! z5=2Z6v5KeTrxColJ}$YCxr6!ikZZIO>J&cAg}qLM$)@sa$j?1I$V$HZSL; z3}?3I?r#hoBDyfdyq%YwnM8amYp^sNPp9auj3aP8yt1sM?S606oxWwaH|cM6`;*Cd P($1Ge@BWF0Kl=Xx18;<( diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587848505.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587848505.Akashs-MacBook-Pro-2.local deleted file mode 100644 index 68d601bc958fce1ea8687d1a8c5132d66b66c719..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 267629 zcmeIb3zTF>eIGoxdvv3< z+o-2|=vgN@_OA4;ILo~H{;U4)|NmEYtFE2H|NYz#{=m|nxw*Atula@N zjwY9#8h87{8{c%UH=M4vpZxF5%bxgFlzs5n?r+_>XY^;=+2@u%|Ha+#4!CA}H0W=f z@0{z7C#}(`NpF0vJME8#@!tCf-HqPa(O|PTUVb_3nw;%!_u?y855#*H|BCz<#f`=0 z@?O}rIX#d5+Bg&~e;@4cjCQ8mJJU|~$RxfRWKY2k#O&DJjcUV2PQymxu!HWY-eAIW z>%vc@l~Dx09l>2JyTjh}>CyP9&SYb>-D_?3hLc|Fsor?l8?^2j_NV>sp#P5ExHaw$ zH%D8Yonims)uR(~GZK zj~AQqJW^w0X|ai{WDWgeCEHw1N(xPQ zrv2%8TZx%$F$C}Mi3n~oadE2Oop41JDi=o+Qh#tptFGYImT#s--V(nG2(e9-m+dJG8a^LLfV8m>Z^~gr$u;+0qEz=KnSH{i8gP36f5r`d z7&AgGei%Z*?9n8|=Oeh@R)}10!G&?dC~0XWw(wr2QR+_wlmQ#a#~RSG zlhV}m^n5nf^;Q4D+*jqpsa;er3@(2xan^WYlpYe0 zd$U0Uf+xejF~e>|NP-7i3MmMy8m8dOR=Gt01dKRF)|gg{PLkHXVBP~VfZs8LHAqJ# zguo(1<`{tq1z|0qV966MfWKoEZy21w1Z5OLC?;tucPpfzJqx6O3^G%sfDCGh6mVsk zq8V|@w`3j&i^}&o&^-YuoRF3b#?qJ2(DG6s1ui8F8A3iy%O!ssq<{>hzaV1y(a9TL6qmfekgzl*5S9 z8z~_3HX&J7NC6?H@n=hAptiLhC~*aYLp%t%2_zJv{>O{LJY7k26)y~^N$ z6;hCMA$Xvrkb>}~LkfQ10x4*>1f;+?`IVL(R=~xlf>+xHl}8tpdkJi?g0=%zK;&ZF zG0|acAl!}u8?2z6DI(ufV8jZj5UQ)f3M)8V6)RwRYVFFuQ(sMHV*nMb`fEaqOHW~U z6_6q1{dK@y1!N#CF9lY>=t%iWf}eh*xQE@Y0vleq2CMR}0-F&qR=^3$P!WX#7Rdb4 z%wN1DV6&@$JgKt-`&|V#)V6GP6<87S#R|wEsuvEdumVCvCqVmM1vb<;Q#QA`HYB~V z0y2oHS+>Fo2r-RFEwKU%N_JR*6`{JZ0x}6Jtblyj)QGr%Y=pL5`_H!{hL{L8y9&to zm9YZypcyM5pVZi3ur9wS#C()tg%wb?gighX6_5wR%+(ksY<3kKt^q5+G-rkt;NdtI zb!lz$S%(7#BwH4&b`|6*sfrcg$IMtkPQHGuAQxyoSV7K(;DN?=JD-!!%W0jQhA&od zxH49NThzq3uYiP^?DdNx)?5Ir;IIQ$K;&Y^3K$!hEmm;25>`NkVEy6&)tMDmaHJ|$ z;F2K8AO;A4EmO2OgcVjm2G!ILEzdU)$Pl$-1!N#CF9lZMr6V~E!0JYcKUlqiV8aX7 zU{%HnY(}_{SOJ+|b*z9qsj~xntiXnv6;@zH$QLUhgLq*DgosXn_E>=pHO`dHSb+^m zZ>)d}Vr&9z-asJ4G$OUc3M?quVFgx%g4v^~pSIaoKxWJ}B9cW*ugG5NPGnmJ_&{&s z%?2wVyj8{u$b)9AfP7M8gWAz+31$0W>$zZzTdN2`A2yFIM>gPzl|G;1J+$Mzo)Cg$y6kQ1DNoYGlKx(M3Q@nXl zghg)&!`DIzl_Cx`nS4O;a@d2n7!CbN1i#yWyF-Nmj6wvoH1vu?Kx)Wt^*tkVM4q?c z;j7vg;3dgb-S(JfErBt-=f7O)fFxK(1fOldk!t26G((!@GKq|mV7977EBvJ=@4yU- zgfB+Wt|m>PYpN#AfZ^KYS@0VXyup$+{Xq(Eq($rzXvc@D6tSw)6e4^=B41djo5Cnv z_tU&q&Dm`_rrNVLgUA34U*1bTEub1vdg!vP1!igzyt#^05jUxrjDU++UKvTZ&l!?JFzviyd6#@RRT62 zYqSlz`GE|T&7@%3?^K2eepNWZ773<5Y;E!k=#lix)XG5#u*##IAB9`On^(Opzyh_eW z23i=`epno#n@{rFP_!UfdcG-lRe*9lseR?sX9Zc7QG+!wrEuX1c+g#?$lj;@4Rn!7 zchDVoHqQ1o(5)sr8yh`zG0FU~(S6(SSs~;p89gz&b1>>oTjUyz=G}~=uMe~`$sLNIo&TgE^9kw>D$Zw@VaPM4- z`SPZ$E+`u5A@Ft@I=-`&UfHpg*nJ!+G^rr2A}S~Xd)oOv)$46{Hv3zX>{w@X&B=-W z<}>N-CV(zA;7$fp8o+R8tFt}sP0&I1tv8``I%u7*TT@K>Q0^d;IvIlQQGaIV5GB6i zT^V~-{eoZKtRhBbHX{3m(?0t3Ru056lf8uB8T6m()vYI4)xmp`Qho4y5pdT@SqW}5 z>`gMxT3JG^o4;~>!bb^ZWdr}!kGq+~Lz(t$YsNp1V70PgQtid_clHN^#9RMf6fzkb zU2ypdfc|i^_Y5t%;Kv$pb7kI-_XfQUbX-0DYFP=gUX5duPc%q8G-gaT@XiMGDocg- zG@ZPkX-SJw^2cl|?#1i$|0jaYx<$Yg-4+P+frc6@scmWA@ zahpu-8fiA4`D?JqNmbnK4;z6MGmBd70twisLj0t#?ZMHlsRQISV+~G~gr$k=N}38v z^`kY80)Mjs>;5qWUw-LYvGV&Ce3fX;JN51K@7*`5l z9S)s{zH!olPK8$gT?DVSGVN@z&dSAG^dnB}Ksy8kB0ehMfhlrWNk~+@%C1(Y9D5x7 zF0_I#FZ7nS4Cf=QYlW^;rHqK~?7*eqr^_0G~Y^V7b03iDjTL;N_As}pZ zB~%iaSp~lt@f*UFhCnM6Qle)etFBU8vSwC|rV6Dc_1ZYZ-5 zt+LD^4hNg?*UDAqnQ?csk5)4$T)?)BqLTOvZ-ejEjgpSnIX~$f%8OWbB<1>;ba@j6 zy!S_Noo=vnVI>c7X4VtCc^7_BZe~>h?589CtsJb*LiA-GB)hjVv+JY}wMq^p6_D-X z3slSzsKyTawdGJ z38%vwa9LZ(`3rs35?qp+HKqubg(lBXO+EpS#gEjaN#tRFKZ5(bb%rW0oMPE6HEfO2 zTvqOLkyS(gAmZ;9cd}c4QWXF5nAYsKoA+eGvR&VB5lbh!B>LeUpS$QgBx<8c23O z5uPS$yWZb~Q=!FCZj_leIcU{~t@N>gk`WbJn9(Nfgdc^6NqquVIbjhC_>>X^C1S*xsh2FUxoi2u4Y)s~5uWG|dg!u8^#6%} zXkgeSM&q9u7^2uk&>G(fRLM>IXaNGfHj0`}V^8z*5j+$`YALMKk~%K4NkseFyJA;n z`i5_IvLm`@FO1CCsTNKg+<20@1Y2m8b4<&qauLWnb)H zlP>07W#>OqJ$Hhm6L3mr&Ukisg9?Bn(TQfVOo zxEVr_+{5!xtWdb;o**j}bXwBxxxN(&Ixco)f0y71c2>g5f}3It1x~1Ax3DO$0}MGz7>?>8ilPpdc?DjYuW{a7by23Z>4r zTnorc$IW@MeseSo>Pv@LDGmjLVjG)3v_r?Eu_grSOR1`rR|~pJ$EDn;f>HX?;S>JQ zk$B3dq{HN16B_oWPmjjvAi<5%cCW>qbDW=wT9|hgh~Yy~fK6tl>V)A2nOzQ+Xb&N+ zGUvb5gsr+XAkFWpCbV8V2+&jL6+twDWH1N>Ly6cfZ}XOdT_H~hz;{;i6R@|QE# z*zKF@9baE;z+-h8Hoan)iv|d;Q?_qK{%4Khf(^Y3ETCGOuLh^HfPHDOZv4*P&5eZG*lS&m~Q1e3#a4FFkAT)(S<1dzcpbz zYlU;*Tx|tN>M%1@H!VhnREwK%Dt>FNT0}fr+Zp^sg;L&(z+u!YhvV)-8^hwg%n!-T zqe$UKirb6ScSc0;@dmur)(dP!BL;-Z8NzlTR_t%DIlh&nH~ewaKFiv&qynxOP=DqI zmUOLUTaC@)6p6xyFF-1=4(#!P?)jd4%)Ibg)059kt(@_YW?;?CB8c=6_5%`%A(g6f#z*goo|IFvljNN2062${qc+#(WtuvczmTAfz+t>q0G2_J!n;s-qrVGO3y^X$oF()8}hgxRVrG*nL}qE?Hn{Q7h6J(X1tR0{yvlMw@#CkY-Ry`RnI??5F zY|^hMG7S{G zR$nj!-*qy9YtaRQtdj|2FqNKXPbQP5cTXnFR%M}~5|WGu&z?-m!7MV8j2HWiSo=hMtI%N?sLic3Czzj#V_GInadNP3_l{1@rvi1w2CzA=XN>8>D zpeM5wdAT>8E9|;@n+Cq?WW4A^mkZL9@k8LM&$B0!kqpq2@nltA6D|do8yU}@OvV6{ z0d0%Q!XiU2sPPlT{ClsH@kCW>3%5?jk4M^=dR0gFh=lIRC{5@7I-OvV6{|E!&sx_S6SS?1Zh zBa?A$NPk|dWc+x6Ix?Q9=!nL!K{Xb-W8>#Vn1vs&a4n8x*I*WKmHn7qxc2JCc(zoo zPUEb7G55z2oM5w!{p+vU+bx#8nDxyAcCs({bm+wXcEkP(KG}5gf(l!MX!v;sdeG-p zFQrVA`@G0XUPJn&ru`vuma-^EGA029wu9tRFM((^S=d;YzIgNyJQyGMeep;VOYGhC z1AZ_3@usB;BODl5OLUlx;?7XTJpqr!k9aGNvlphX()D@pNzcqvE}N-H6j4XL+|@)^6v}ob!Vfp#VV0Y!xtujMMPl>IKaZ}b&?$`_6@PD=(wguKe(&#bHlR`$m(hG)-{nTjxn4+m;0#_+SJ#lq1Zk-n6@d4jp~EH*WQZ{b|2D=$z{gcF>)tw~vOCsnv}L z%WtIF`Ute+L#1rZhgt4AlT-cfL~C>lggI|r{71}AsaI!n@cTWv^C);*2MMqdo z&UVMW&Cam9)r%YPocH7}hdpWG-0cs03K)%nu8nQ1&ZU8!>oIX)XT^SWGU=Zg z#tZmsqUV^;>Sbb_4wtWj{YaU8bZSShlTQhOe5!l5kZg9)iCIW{PxXgqxVH;@g#^D8 z!ID;$t#J>Xpjlj5q!&XG4t-5eAq#!Ih>zsf3*&n>k%Cs?Jh*@cJ7jYTIDW;81F7MIzTm7)VK?oxt+V@pRV6szAU|Zne})+2jFro??8@j zcDGvh;Ox_X2g*d~-)JJuK&I0AR+@=ga4^2Xkcrq?@E5a3@ggeKak)N^?q;8ZZ$s(Q-V)B5;3*8O9Q^yfIC>`Dzn=X9Mw;FvMXZQ z=^12SwysoM%53K@H-(=Y%oe^A!8*%Mb!iLnz)#vnTcdD+yQJXvBaqy}%}T+b)Xqv- ztt4o;&&v=8_*87t>mNmM8_Q8;y`t_%S>qZ84;v8D>!$fKR~E70I}NzglVLG%_zF;V zbuBwZ2P<>)@|r>p{8{7~s*xiJ;lo$vMZw|Whz9$H-;O|X54lOTX~WslR~$FfBH|$2 z5WmK%h{*cbmy+uJiuL`ddY{kGzN~!U!-nR_MEFVsuV;C%3mKFvb-2(BDhn0PLMR1_ z%Smaou$oB(zT9xhc9caiK=_C&32Q-$z+e!)vJP?80emBZH4h^a!-0>4i%{UojdqRZ z3r>v(wIy8*!ByWn6*@Dfju-F;PVEN67Z(U5y@B(jjs9&P>;BNG$zYfiLxDrt%TRM^ zGMLB9)u=Y(J1anr>S+uVzUkCnu&fF7fuEdf4hd*qjAVJ04TqAF4+R1(8qvz|(@uBN znWB3IMq|=wFhQcJ2~>^c*VCe61zPbzo1%iwiSJ38Sz*ywT3kc8MEVb&k7PG3_;Q>r z01!UCre8)i(X8kAIH3Y!f3)e)CQ`|%D!utT`-6dOo!D1sf|(?e1Ll2)zzZ|91^3d- z6#)I=X78EK^nAKtaj2%wLR29sZ&1zr4bpTT1(GUCzAx3g7fEpVx?{sk!=A7PEWvU;v?4k=TIY3)KJabFHkn~m3=mEY z8$-{nO=bv`ttyBw$jb*!ijh<@X`ATOAhYZVVZvA5L4>0b1Gr(k^t3nmx^KwFm<$!H zF?Pdt$ut!BCvLn*fOM&Z$pDhQ+jV2RWM~C%c~k7L1YjU6nu!axYZ@XzLrjKPo)xxB z2F&!qRkD&)=Z`EuEWGP+^A9LoWc7Dvw|X>kN3NlUle(Mu)rOlFe08@9_c z>7TkddPVB;l81?NH*A-isjSu&V^fgO{tB|4>8kY@?S)P^?=Y*5te@3^?V5S!uw8B) zNwJvdvW8n9|Cz@Db;}+wkvSOB{R;z zOl)0{%;$*h3QUXX%mH5%D7s*~0*Q-1rMY3dBAY@|^OwB|l?~&7j@WLyE^HSQzJS;+ z!x~+qX-GR`BMI?HUM|T4INNyQU!mV7p|9nPIzRz}m1~GJx^@QoI4U zN)rxl*e;n7HbLPnc84I!(Q`>!905sX8yeds<0-PC0r~(pY?lm*r7l#szEUqzmkYK_ zrehrws*+YD=!w(10-29p=5xe$$sokhgmmVBFA5Z0uw8+~ z#h=pLuw9W&A*uPxenx4{ZQt%;8*G>JyU_3ro`D7g2Dg2?3}@;fqGLv~IOGCP<@^~j zBS&nPVN--|91ur_nhUneJYF$ecu2M~-0Qe+mt{@}5`K~{0)&SYG?wg$?K1qz>RnRb zddyi3+4A6pjyE@Km*ee)5gy$3?Q)EY0m7+aW1v~LHkl!CF$W2^eY?E8g)reO@7e=} z8@7AoB0Q6U%*oI}r=igSLnPF*dcADK{@5-V-~wQ~WM~C%?%1w@aH%D(OD}~Nxd0Y~ zi*mzj1fJZ^WFP}(`ruNPq(JrJs8pg}sC~N}%K?%WN6#f`aRkc#F4;d2$jIciZ9Uj7 z&!m6q;^>*2BaFNpv0ZMavRYS+O{Dhqa<>b+Z`aJTH?~WL@JeEC*e)5G0BWT@1n$_b zk+f(mF4(SlkO0e+WDti`=C*H_447+$OJ?DdPbdVK%-Rv#C65zR^v8AuiZ0l$K;q(0 zX>QoA$fl6g9I#!1VwkjLN{){Ec8}O#yDsnzCai&M?cp0YY?t9oA4EcKzIED|hiB@D z?J{hNRj>o%$WU{^cA3W|;lgX*E+(zm$e;oLBz3nVw#zan1PMP$>&{asf`_{kI%2yF zzp{Fl)VJ>Q3KAULuw9NfAv`!9njkABJY*x*I*$8xIYz|*;ea)A^xWEHJ+>?FW~(`P z5T|j&b~*V3*tg5kt36Pl7aC4hIZ=$e|4n=7E{PUv^z7W5;7HsyoSiG7&x5u2a-VcV zVYDnconH@vMDQ0C&)tyBrwqA?p1Ul8$RfV_G0amITD%1b?{2~m1W~rotyDV6L?zLr zCOcFV5Ecqh9_{b1iFY;No*w^>8k~muI*5>FV;4d?k-Z3O=`2pkbWgEnyLpicyA+2 zPCyPQ#G*j@2q`s#e9~*pPO!1Zbkr|rMYPH-H^QVA^1WeHvNc9p1*wbY2+*}=m9DFk z^T@>>%bkFctg7)9NvZY{z?}%b>yv8>r3y7q02wB=*qQ|XC|t-_+VZfYGdGm(?h4>i zLTm{j{cu9jvHw+{{)!r(uDY>yK40CdgHL*|gnQ-5aQOm?DnI1YOED~qRZI{$dh}B4 z)Jp547g=n1XOPHVu3UoXU^elkFT0^Wdz*UD>yC%$F8IzE-5^g2kn;Of@Z8V6o`)tB z=Wjt5US1C5*-)o?)9#9Th|{M}Ti$}MAEHf$;6k2ax{~b$>22(=0k2eW(c7N%2csbw zq%5kZKX*ghPE?=R1;-Sl>}+o$eRSBr@*OUFuXXF__V(a>D&u$${Wf{E-NQ-sH41ji zW>Ot1W*_gYBunf(aVecy3S$0gbWwPk(p8^fZ%R*IRi;z6@I-5z+P}6FgGnT($}>1K z?r!#b!|9~O{!OOJd%M$ncLtsGf!1_qd(dlbjE2+kXwcd0ZTHXxPQ#7!ot1KEohu@U z!>U5Ai{%*hr8w5GH<_Y~+0Vs|>*q9}Om=BHd|(&6ndJ)WE|a;X`Qj`hGhR-aGI6Dk zCO(%|y4XFF^;9Og^1?G)$=x#cX1k8`aCCFi%Wj>oNt9Wwy3AgK&+LNxy(|_L0c15p zX0_L|#3fcsA9skF^x14-A#vD#Qp_D?iWgPj-DKsC!;rNFBe@0$y)K(Mi9RX@Uwn1i zulUvuh;McHHPsIJ!wxZ&4vTEi%?nKsSu@jVU#LJ#^dYru{bIlx_y=Ngq?^o;je!_H>=KB{u#SP4 zdHh9X!d`*6-I_@t#&ydjaRi3UevK6|dE6xslV_a+F-PDN#NWcF(e#N=_8Kun%>4#XUR7nK!z1>z&Nfmmn6EIt53(ch zK(T~IatHhIO1sU-BvO;UGX#|vm1XzK60yOJITZx}kNtNZS1-gPczzd1E@XEwbTy28 z11n#RnWq$z0GqWu90fCh+~h@N;BR9UV81ZJ8F*zx!*>Un%$U^?JKHYGzk@LcKf4E# zGV7UbZ|w}?CR%B?TENxs7C7;wVhrQ*Hz#1;f0C17sFKJ}YN+I1XS#T=i7?IkJjs>j z=Sh$*EK!Ev;x|J?F!&q^Xc}ye1iCJ^042V1BzQuwITC~>;2cS*9c+%I;VI_0tlBX~ zm(V=p9DFN+S11CAu!~&sF=mTYq6u}1*VH0`tcK^v9<<3A9lRGQgsLbv3(naNV zaw#hJ(N3{*-SMPVeMeUCh$rqcNG?TX7sceTViBIa<#;J7kG6ZQQX!VUr0_l3sEi`j z0*=p#A};5_O1Yaf3wIY$WPWxLULt2XoO)fOq03m&$NIz@&Sw#~)qHyneA;Sxmo#+&_QW`rgIe@!a~Z ztm#>G3*s+uX{4%AGMc#D}L?ibAzNx@;Jh`T#8 zMPiBU+cvv3XW{IFzYZ^Ft!i?%yWQ(_hnti6`N10**yPZjmqY&UpF%OMsD+owaptxMjH#$RAQz`Y~D-} z+q{_~v3WB^Z1ZM{)aDad*qol~HUjnE&8!z%`knmERzW7w9cGc~@++pxD|ic7mAtdbFzUbdD?e;Qzey(oWU3syZ3=2w zmAHw>Wm~IG=QsT1%&GOb244~wJ4_ifRUUa&USjRAmNk!;Zsp9WHHTlj{m%k}N30nn zrIo0=BZc2NaFbK5>~d=ovBDn+%)O2>XI~GB?7dDhP{Faw_BfJu6+Ejoh~M24HFy(c z(qy8floeULNih?RqiBWVVg|U%E@|*)%A8SywEh;^yIDI~7Y&l36mqMo!CNSkMh)hz zgUI47g+;muA5&FuQ_TuW8oZSl}K95q{N`UlyUJxGM0jCgL%LgBfyq6#+_!T zI~e3^7I{mH`A_!a2A4=9xt#y`kob!g;fc2J^I_rVBf`(G6Mnu)`1xkx=UarIZ-w_= z3G@A7f10y%m|J4uQJB&qR(@Jk@;cjx$g=Nb(myjyUaNVmKsm`EHyS_9Q#XD-FDMB8 zLniv@^HoP6`*1M7<`GD`AagSFfCDo5ymOy_{U}Rsm*L ziRoixM@H^o-!Gx2#>yoYi?&{pA>SN;7BY%_W**dtSut}v<~}!uS~0uZE74=UkD1h) ze25ftaUsJnMFf7A!)zEbUh_c@<7GoeIu7$;$rST(BgC*qBy>-8387gqgo-&i=qzs{ z@M>)CBrdFxAc{m?%*Txo! znURb788XFu+z17-qaszZcC^%3eAz3$@auPf=K985`rpt0U(fH`^DCY4vDf^X-wmV>h24{_!7xtJlY)?&d~!GEHZY9#eR9d}flG%j6ImKl^4Soe28P zIQ>obP5PUqtEv2x#-Zlw0?fA?%{_1n?8=q~lgojKZdbNSxrEN!&J-Fi;H?}>mjV6@ zb_H06U2l5(PIpjpn_b9lu7#!Vp#!d71#$a|!fW>Jy=*tc??W1W66WvhqXsaB`OW^; zWC{A{*u-Z#sjIOnMpjKGhd5y@>6O4fk+~?x-5X%{UFbMUbV8zPmZEQC7umD_0P+sF z5$NJ;I-ULyE!d<&RZIuAVFABfmX6j%pG?1=N}oXfifm+aXCr++HF8wF4=39rbb@1- zTK^pXhf~ZSUQ!5iFYIaWPKDWAjNuKCz8O#>9PmV zPL9#mgTsF6fvJeIaF7a2Vvh5ZUT-sQ_J`A?aqZmF73kCG7wzIV*Kh{9x(EyH`78HF zjimzxKe^q%2QmLf*!@7SJGi5NjtC(AY7zOd{NST;ecX9xB14eS2K~wO`qa%{bA5CuN~JM?e0Ph>Xebs|8|^kUT5r(Xn8ugE9<=FUvp?;RhSx%> zfz(CPuURTlx!V1(>v+mE=a%g92o_?`diN zM1S)c-ms*D*&07AvpLk7aHO4V zBn~_1p6U%I{IEK1g{#*rxsBkrg{v!gY!)Lnd0)bwl#entN>-g6oUQP^-(+Vicac;x z+t%7xT5O`8V-5Xtxl1+&4_JjZH9I96@aYKN#46E^tF6A~p!9!s^G`k-h?hvU!8jBj z-Diky$_Kt9KHgY6A0KYOeg693Dwc5yZEcr43 zF742R@XI`kVAw-C@8bVYi zdxT$Z0C!lolV`;#q!fZ{q>>L^JxT7@TFb9EcHU*lRpXkSCh+eg{vsin zU8U1EIivGTP51b%@ouQ&<)<2EgUVDPIH2ABSVt`vM(8XexqE3OA@Otgcm%JpvQxp= ztb`MQNew&jA1!hQ0ao$XMAa+n-m5BwoPsTsepo~{y-dfloy<1H7SEnaFXBlV`aY8~ z@}-2BtZ5kEILeQfOzhQM@OQPa(t+1Cta*fYTZEAeDa!Do#FqJDg0|9z=OX@wZAm~w zYm!0Q_q27-+IN?Ia%2u{c+)rVC{&voIxEhY7M@EJikH767pgt?;U}J5_F+0rJ7IXy zwee`0Z}gfbs~a^S@$m+{#o)|VdeOG>+^z9mHqA4oF4wc2?QHK&oU!RGQ;Gw?vyCuz zKpXl{R%OIatW=6zjYuu>lbXUg;q!q6NE`d`rW{6;TyyUedo=AmewpJ_4Y)mkKjQ{J zj6I?jKMbK@_GpM3@O%WuZRsQha=itYk_@Ayb*_jCc$r428M%@yroieVT0g-C^05Y( zE`KbdgV1YO3nd9*gOWxdP&|_NBnEw@Y2O5r6p|Af5o^f>;?Ts)CnLDU947RwKWFTk z4rp0}5jfyBNXF^B;6IrAs{EsB7uC&L;-2oaErWJsBSpNhnw}H020v@Y;BtQQJ*n0h zT>e<%tntDqJtQD^I)et-{dfpqZ2q0*ert!y_i(m9>C^OJ9`W?^wkf1}88<8HEsv zNd`p<=>BsSy{*2`?}ZeQIk1hJKT<%bwlthENI`j#+!+KQBeVRdyK5VyfKb%(QVUW* zMz-U{1}PvEb-ef?1vVq#Wq)Sjo%okq^G}9M#YCHbUS_6p%LXYR!{}sZj}+KY+d_-~ z#?H)&5bDenJw6e=8-hRvw?YcYhjjw9M+$7Hai$zbl-@`InRgJRfDqG&)DkJMpwvn? z0|c{2gG&aoOBFWQ#FwlfZ>S3i2~xY>)!{QO-prUgy&OV#;No z69E9eqR_RFHb_CP5&^(7Ho^EYGg6R~FKJxii5hOSUg$1wE2JRjLhwLKAr=1TK1 z=Qa3!3#5QtQXc2;W*OJnQPXLM6>#yX;PnDv1@so{VmEj zJq1RrfC|BSTLb0BYF7b$51Vyq9ag}^bZtM6PHoL!wY+LQUZigic!3IJvQ{hcxyP{DF$4f0(K{Z}%umVC+rx&iliiH;hjJpbKM!;ABW6Zi8{egECkoj@I zg4M19@}y37_E>=pH7l&ZiV(sI=vHl)E`|)ELL98H0zyP5Kzppfh8kze<~G-cq&HST z1~E1PHoFQ4F^xzqu>uQ9c36QGp9VDexfrVZ?Y4OWmJLBy7s999r` zvj85BcMeTAYPG8X)8t$<5LS@Sw&d{jVglP_sJc;}x>PwtvJrQ7T( z$hi1@t^J)}?h=K@AW9nFAZy zwK#-Te@&=r>W7-J0x~kokKeumLQ%_0Em#2=*^U<*tbkC|@xnD&m9YYw5iTTFK<1aq zlx{+>!V1WfI@#G{1vb>IumUSWzP%Y4#0x7RM05hQ#|mtyai(m>3T#MvV+CZ8AXot* zrV*(nR$xKN4lA%C6wDq?os`YK0y1N+5vhb^O1c&oM2<{kxzx8f?<*iAD`N%ZK{HlB zKB=)mZHM9|DS3}#vI-llfU+fYDn_h;JXjQsUL2ZuvB3)Rqj=devreo4564$6x~9u! zUje4cxv0eJT)M)1wk4md7c0nBQWYz}kD0N8oP0^+T2J(lp4<<0O1HrZaxMf9G-3t# zG2Nbk!Z+P{w%S*4q%u~3TU0YvK*9{h3Mek(0Srh02P>@LhyzwYfA-TuA_H<&fC%nfy)gh+=#;pV!F0v- zU?Q9l*zEa|Lti;X@>30hxI_Ug{HI1h1EJ_bP@78vQe*o%e@Uu#_9?4rSik;mC6{S}qwyQ}~=$fiYbCz-D zHzIh0C2RVF6eL?7qWla+-;2k&f=u1kM(G}5&1==1-KNZNp6xryQe+Sy3?V9L*pNho z^w4En3(V9ccykr0B!2dwa4`eF!iJ)vkMmSZdd}aWD)ucnZWX}=o3sWziQt>|`Aax| z!2-I;-m0)Qw7|zOL^22jW$PO))mJ);!8YjTFIiMJlS1Eq&`1QoDx6@8cAoTyt*u^n z*qPu9PddHvcr}zkSi)T_fp2&yShL)75y876{zz^m58Vm6HUnMCMefN; zuli^`-Rqw@JEaHmmd`qvn|tSNe21xOP~IyXZ0Ix*{_cnaZPhT&{2Cr3>AfJE--Bmi7 zq6=d-I+Km=pgZntob7Eq)tT&UpbLZ0jX>GWU7r;~o|4fMqdNzq?zBa&(MV28u0WLp z5vhPQ^J1k^1?Ed#Hr=qqE!ru$xtH8);AmBR*i#Neg=UKoN7P(Xz;W41^VxlUxx>yv zD_YM{yt{@V=(!Y!26PC#orcm|uhOfp61$Hhg(elml_G-ajahLT2v9LyT$GjWucXL| z$|=#M2HeSDN&^_~Y<198Z4-1eV=KEIv30&~P06wuS^!{Dh|;Y9^=EbtQQ|Acmp@kZ z3x0XCiWtpGLVfh>tsIDDCR;4;4Ej&?E@nObUIc5ESsM*|lZ>-gmQd^F&%KQKuYTOk zBp%AV%(iCyb95)5yY}Mjiqyng|6UX_85b^i1$Xai8dFt~^T!%+b7kK1t0D6%A}uRH z)~mD7oA^Y7#6x3+oK#YIX9Iebr9yj}PTtS7q_r0LW40Cd;&uA}6TxQPB47&g+6pD- zj_F@E>Jvb~eyTRX(W0wLpZ1p#Y}92nN24v^Q3H8@ogmZq_;v_^>kW&_s!V+g+d(zRmc_bvD;v2a;yA)u49tM~J> z@JSYq@WPY6BdoaD2pQu_0j$HJ6Tt?q=mYFfDfo90yw=LJv%xwmbr<~%w4_;`Knrg9 zsaAAdBjv;nv_n83;-dl{m?DRjgrw?NEp=jzqu+(re5Yu8??sIR{<5uw<=n)(^KTMv zUDL)HEfGU1Hl?0O|Bx;M*lGR+0LXsC)$T836JxYc{Jmhw81+w5n43hOqh`V>x;#I2X@F~+Y68?%%% z6mUhmRD`j_ih`>i7tL|*U)V0CZT8Q_yC3Lv2Y2+(SuT=g`IdiU#~?*>Q21QLU!$cY z_`+C&`I2pD&TyodfY)}BmZVd=#ZRQ*gu9{4LbPW4+JwJWt}@SzyPJKqnmOSDwxqV> zD}42ymAAq7>PAV&>zto-4&_CxB9^hEDdQl#KZ5IYgWcI54Dt|XW<9Z+ci|WI0qmzE z{;eFW&O-EM9wZwtGPCQX54B1TWhE}DsL>Ms`=ua|`|N}$T+Km@`bTU{H9m#_=5bdnMFE(YXLGH7~BH0)Xr{mEe6-sZ2E;!pbA1~g6zE1no^D-pQeY{*c zPXv{YsMo>=d$7>i7@+mf_%V1SerPb-Sd}c0`?905WcW}MPKP((vbK=(7y7CtxFoaE z*cqy^C*ZO8k(xBtSyAfj??-T-x6V-Ig;OlMrN*Ng`Ueqzx42X0=O;z+KaXiW-+)dy zS*?@J*mvc{PB5Pyx9_rbym%oEzP2LCt;X?DkYXW}OMzbc4JF*8+D@!bfHhjWcQTA&m` z@u7e^O_}FjX;Z=yxLP!G|3i&BxNjHS*CE@$GWcN7;7HFzm`$FjLB4IHn*+)xAkH)AFfA-$g6%Pf=9f)DqHl!a7G>j z$Q!tLDYm?Uhh0a|Z2f`Hg~%J_>7{iW_Q)B8l%zfZ7xuz6Bud{5f)X*}%+yO3*xa@J z;Rf6v(g;uV2R(GzBl`cuKQu7x5~K0Y3=C21B4~~81ghkweY5~E$zCp{`xg|k0)9S% zhk{5gg>_m|$CY0uk-m?coejzD-%}r*(n!zF4ns)Z8=H=d*}!KxfqeY8^)Hx?uK zSP0BHHM&fI$J28W;$7Jn``4t4xmVfwk5qpwoRu=39p0b<;7GiP)2&A;ueRQb{pk>@ zM{l`HRByM%JJ87(L2?hz$3?S3L8m3{p6gqopyOg!_IC-MAX)@-SePJjq6zN`fjH4A zp>Ob86@lZ4>g5f}3It1x~1Ax3DO$0}MGz7>?>8ilPpdc?DjYuW{a7by2 z3N6g57Lb>YoAY9Q`dFB4hje(A;!rRswz2s`J9Io6YeJyDl&V^JwV=CnT*{3S2ShI& zKH=Y#<1;_+=$}5_8>20uJxPbjz3npWO`jf((LsV6qwQXcJLfn*6}2$$DiFhmq5zxB zO4SL&4KlkNERh{TssX-~{J`I8!d6`xkmh$)6I!nw1n4RBit%eu-_rp96*zjx|7wm|$h5$-ZZiILjiROZ zy;O{}vg4GKnHXtx`F*}oU$#P~nYL_5?Ma>HUmx-G!WUahr0EF$q(+mZ@KkN7B~w~5 z+x%&{0ZQ3J!`DM>g;#{Xyy7c5<=SjH1-5F_YZ#d8f7Tc-7=))bDPZd!)s>8;)uHtU zKec^-s{#Jo5cT%WVnMZuA0tEl`|}m75S*xT%og^X|F5XNdX4TtuV0}wC0QpiK>L`_ z*5w>x%bo*hkJnbXrd!?LS{wB|FTfu)VY^)~aqK4ETu(rMww|!>Csd z$K8cChQ)iCACj3zk;07>w->3=w+KGofVbLufvsr7fKWL@*bc;s{p~f!w{rA`KW^G* zSzDG=z!d}P&)mS0uC;8dv00oVQP}VWNCnmj3qIR~x7Fh*!b2toE3zN`mBp^4f$MCn7%4JByG#d2EOWV4IO;2 z7rlBqoNSLKz0Rq)aeV}zYS{0()-Ya{D{_g2{Ek1{D-dU<0bUT@R1n77UrVwpu1A8O ztbm1C3Q>`lWtWWwaDwFF5LrWJ1n`+AoMG)9*_5L&lXoR|#f1=)eQR5Z6$evw@!eUefK*sg7QMD8mEK0x`Br!` ze!M<)qCH#cw=dSRw=e!-h4+(q#?cX3D03WXr8m~%B$s}d^;L`HyTmr!&nhewO0Wbe z*Wgbp96TtO5LXE>1svahTEFQdAEc~?g4fFZo4~vLLH8b%(PmA(@593q$3)G@Hgw(iG3Zdn+7OViuhu z5BjF7vUVyQ>}sBMPJRVGQeh9fnQZ+jP$C!mE$p@oFddt1QCS#i_zYO)IX#Ni`Y0<~ zuFEYqmjESKU{%SuWhPLIR;%jh-aVgVbIrmdu4HJ;*5Ic+a~}@5?u}UvzB%{|%S7Tc3?ZA7+ER_KKLTiRmGS%VDQC-LtvBl?8#&#(R;nINn5WGexBGE0${*)KLR@_(~hI`dv9<3%UBT#%lO9|Bi(o;{h2 zWPqNGC#&+Ba4E3dpLq6UG6rUo0n4n|XF%U|G8xx~1nAY1@#B#;rC!zPHxx4aL_+su za%R1IG8}T0YrmdMAS^m%5immc zWWvA1-66aQ7?gL zHAxL-xl3OzdI%njkNdt{B#9;Vx#t6ZFZ}VQr3xe5rukZ;!)z3HhAQp}cr1RzTXCGd z*!EnX^vpcvN`>xBv_3A-*>(9V(Pd=VU5No!z?$WzA%ge&y>=jJTA5z!8aDflPEC5_ zbKPlwG_=0~MkW@{R8)RegqTXrRTI4=FWrLp!wq!R@`zVXV+o2nEbJ|GN@GXG-`l3NCj5fm`+J-XsMN(} zG@sXZxzTd2SCUz%EbzV@DI=OTDhoRB!3b_BN0?K+X?F!3I{I{P-0Bbe(|&i*IoBQR zpgT`*9}Oo{$sGu%PoKVR`HeI`AAxp!sFa`iz{*`zZ$_Sv{R1ZWOv-} zo*MK{w!NoxIqXRb;BJ4|8^?`!E}i2fyZ*JYn*FdjJ&*2!Z5&cx7=dyDdScw$>JQIkCs)z3z*FDwOA#z-RoNQ%&~`*we_G`(gc$EGH99~wSJb!*nYo?26 zdYScjJ|p0AE$={%Zg#g?_u%Z)e+SA$=-+5UG7xvOdK53B5*nA$^XPK*x#e4FCThXK z_y$8JV#no^ka{Bu&Y$0k;0-K4IQFu?F>t8%KEu#yU>3d5gX>bByOQKNSf#>+Z#Li# zmbuF8wggA@6Q1mfSax~_*_W+L;gA-pQ07Q?oxpb@SZCR(E^Q$m_({9yHr#nK7Nox) zf#eo$Rtg5Cc2>%2y~u*bA4PB*%TZ;$qV9*Sak->zVXHF_8{cWbot_Mffx}mTva4&^ zDLPnL5dV-D=N3v80ucOJ_S!aeX6 z>-!g@cs>K|aJ6wWtu_wA4e@KNs*Q5Pt2r1Az7oOfSsv^{2IYzi7tXUS)d-i98ew4- zi-U_q8h#|j3w*iZlIVMJm$@R4u;k!YBQ zFE}+G)RuHLgtdv+5HH{loZ1bBFD?*Rvm$9VoGsZHMBopdnhb_XF%&qIy$m&%CWCpr zT#YK442?Z&K{!`Uz&D-R3zjvZKJb%s%^?Bpi;*m^vf)rt@}WSWMI%}ne%k3yI#YB| z^=K?@G@4nh&{$erL&!w>Z~67KURZ%ve9)#|pfh3diKb?QOaq>eWH&ANa-1yy5I(Oa zHPOCxKsC|aGOmY0@u!3!;YXVeZ6cMSs?wXkvp*Qf)`@+ECYVVgIbhy*2)r;uk?t!1 z`oqoMGo5^SqUmRJ>Z&_E;e8GM#HFR>^`TBphQTGpq#v`T6eVC`nB_lpX=-_v#Y9!T z!Q$yTG_^bdNwU(x-(G$$FKG?B26Sy_bp%piQb=3AQZEvhmpn|IBQ#KQv~i`df#gsE z)Gz83_Ir_RYo|ZY7e^BNu_hd6d9iBhd0UogG{$Lc7RHi1Qp5aP7dDmsj|h%>bHyby zN&Dm#DXj7nTPGUNzk@$-z}?28@H3ztceY|W25`f6>1l8Bb>A5_#$+aJZBYldOU6;)pSbZN0Wu)AOU7mS z3ytlPF%-PzO|iogfPt`RCN9{nX^2`HVlt?y&UeIi3HeMPrb^bO>mmazmy(qXuH{Y7 zgtfV0yJSY#1cjZB4ndTo=aRHI0+OT^8rvn~DN1IO)ZMUMGANe1f9m4s6{*Y1L`|H# zVY}Q+Wwovtn@DKqHcy9Ho+NW&{Y;<2+^}6TPNr=s&vjwDWMq~@brN&KcF7n@?%c6m zBWclCT(Dj9AQv{3xna8kS1+_==7#MGOp8631HLFwbisB75*L3;bHjE;Hie|-FMCtX z7bfjFV!Q1NfbHTj`OvQGINh*ahBGdRID@aUtq(4+PtLwrNjqY@44Y!ua6lXxYA)C= z^LWK@;UUP%NRA`6%Q7bf2|q~}0l-5FItOyYb{T$UwI8W(126pGQSF0+8@9{w#)XGK z*lx~-=;Ww>&fjRSx{Dv&uw9N(F+e!A$qaVs)+RFqF6JQNhVAn57Q%$DylW2>ZrCop zUGairyJRM8_|P}TZu@r0I2Z;gEqn3UF1Pbvb@D?%0GLe9ao;X^T)~?=wre0F2ETX;Nrn{$&9cG`eVBsJ(r}#5vZ87yi_8uZA}QL z8@9_c>7TkddPVB;l81?NH*A-isjSu&V-u-;YruBNTgyLeRl;NXVsa=g7T!h;*O%P}ej2nVc@qvzHp>#<#0Q@H!LVz*PZK;yV? zmy?fpK#Q-uYY!A|*zOUxGZ~mfti@lKYv`cKlpW7xAdh44$$;$!gl);Ve0Wsg#g5o6 z8AHLFJGN^eTxy9OuwBy-wKT+J(3-GaLO#<6m#VzC)yhg-msXBUv4jqT+rC{w51XJr zw#(6TNm?8MNo5;)-!2(Xk;zO5ryI6Q2E|erDqM%u#nCHLmzRN=ICsN#xtYppT`@M1 z+PB8c#D;indIUFY*UWQW*e)5FOC+Q+Uco?G2f!y}(GW<%bqx@Q_z}RlCX9oC*w79h!w#)Ixg$LV#Wqq-S z`iybJao;Y-s2Cue8a4)+b!(Fu0vB_TaND=b%UcK&zVg;NLWm%e2F8LZ-Qu|2rT~MF$n^# zElKXqWktiE_pP!XUA@BLaTh!S{qKtB2&y^esu&4U;;%h1f-Fnml4_AIU4E)`ZHJnD zv9_sncafTGQky+ql#%GYjW9U@IiL`W0_kIx(Fk2@c7lyPrlWp2E233yxyB{6kfd`W zb&%&Jt$ewmZVe+q*P2zju1?M)7khl?UJ5e;q#}eE4R?}43cl-;YYU|cHBSH;CbigF z;V-^wk(V|G=?kS6pmcXv0GASCO91JIlaodw@KvAwiW;A;y0L^WqUHL6Px^kEd*#V+ z$wd`w#w4YWujv~j68w-)FU7DdRxv^3=+R5DQ!8y@(pE6OGOrwN;GIDtd%1E6qJ!DQ zm%i+V`s{7$L9aU=qPyTbV|0T&DL~4*&fvM9dp%z~s5Xvu;pOE(o(*-XH|?&dhd6!u z^lc%AXp|gl~ zm%Zh>b#!}sa6XlByoY|9yxQ*Jr1}~KyJa(}j$N{kcUF=mcAmJD&MXBn|1`QNJWc7U z&#*V8C$B2gDO-4=HBRkc+lj#>l2hdwoEdjF`@P|G(qjK6Q{}zg>AgFHPWnJ=y0bm# zwKhh>>3B5gZ1%Q$==P`K#`(@lIke6d5yWAcA=kxn4ExfOY}lJj(Z%fN;>PuJnk|X= zy7Uk1f;Y2VVclgiw=`dzMP$ayDN`n{^wGrU(n=S*XR@BkBv)Q|W-GZ{#@=k#ksgk2 zZW`^DuJO9wa40oVbxpk{@f7&XF1X*zVo?!5Rx@N)dp%2BVzu;fhp0)P%@!6ChwUfD z+)<`@Q3c*jR_-_qSz9oYYk<(}vfYg6Lu%Q2M0^c?#kY1qe5=E+sdmU8c8H;LSY(54 zUhv`!!aY9|whhCwJYOw{dX*cw_5%ds;#5tiKujKYF+uXIb0FpjTnfa!R@TgP+7~Ji z6MbBuKunHwlNqux5aWkk0x=oZF%UD4zo<;uD-gF^wt={`-N3JpL(Y@tMX3dA;(LHV zTpWQRvtMIHOdfX$#N=7$K+F+%QCYE9MSR#c5Ywj4rTitrm)fkM{FOC#{uMEK|5|3q z#z2f8b_v8}SjRxjJpQ6GVXr`Z*g6nj=!zJtFF+tJ-dB;?uQ3pl$6W$3dDb})a|B*g zR_qmskIW>fzUtY@~pwKIsDXruP{D(1MX+A&5mxo1#=Z$`Sd zp{`i1^PH{XG%1yk%EdVvzU}0ds!A%C!*%+#t(tB#&qnUGlnV zjUsO>ji1YV+I8dN9cfj~6mbV?rbxWSOc7TmW{RW@G*iUgotYxBME0$r-I}v-_Q79= zm$OziIosXtb-KgN&ScQvC@kv+VpvP-*HN>YTbKUNe(-P1!(L|ecmdmX!uU?2H2Oq4 z@uZ5)C)I48f3Zeo^VY8$nS3L+`NZ1hjU(rKqs$bE&6_D=n>SM=HgBegZQe|g+I#{F zo73~#$Y`&C{q21b^B?>TxF$2S@*70);yox%P5aaHoZ0Prk}*tDhVDj(=Pz_N2HnXd zz7h_^muIIgWgo-`$g1keug}BFvX4E0UMCuj74duNGPLjW9CEwW+wXsH_0aOAaK24; z!d?pJnQ6z3L;7(3qo`LC3{4TzMPp;$4b={@TqPC2??$knpHIAp`T*x}I{!#aJc-QQ z0sA|no$2<@w3D$viQ8~!wQ+4!VWc2%;%Bz!8Z&(Q-X#rEbXx zp|O``y2C6oU4F%Mc?E9)t1AUp{%gPT#=i3EZYXpy^ke$Xf(xdTl zxkcD37A=fQBrRrAVo-AY1XHuQ1XnVaf<=RQz!xLHmNv$nW~VzC|;pZd5HzDfA`X5r^sgr9GP_gx9|{b7HavvZhRV&PF3 z$!?;_qt` zs$}hGsj>L7rBzrMj3yJb>~aWhgx9hu4yVwqUU%4;ynUxT?rnB@y3OKmBGBfBVY#|Gg4^GXhg-o`-L9}SSq<~+=e-G_5tZZYwa za=CU zRZ&iz<*1q$k*s8+BJx>l710N5RYV_P8FEw+t^jOQL}~>~73CkdRZ;$NmZNGaB1zFk zMdY*ADxweAs)#hjKt!nf2(sz!*o}x

aqAM$1YWgbRRV@`%6_~Z;+jpjmq~+RpOwJysbq& z4r~f}QkU7}H}Y8$GBuGoGfY%Z#QEqOfO#I#f)Ld`dq(@+on8|EK|6rH&a=_Xr%W znI05^rhfL(iT29}DQG2m7c+ft>|jo4-eJx)aAMI#r_eI9Jc+F5XO zG%A|Rh8$dkAwb0ANyXjK*ty88$H~!*WNxvFtX<+t$|1@y1c<0Sskp}~b}ljzxjG=@*lxuFGcm7N z#nbU|K!&A&v5LZW?y-s^K`hE4=J9mgMa9#RS(RI?;^?}@DuDYy zgomhjI_{$4>BvOo7OObAuCYoWLd4||t3*ofu}Y*Q6Pa7ABD-0fV--Vyh{_>Wu~gh+ z6-z}XBDYw@P<4$}90?*Ohgika@o_*?tolR)D$S2u1>r%fA7Bv<6PTl-LxdKFST4DT zgUn23X}{KhN&!Q~c4lz*hL~ni&xazH3-QrFvy+MOBMqn&OgJ%86Gy$!JmWPn-8#OCF^l;Oc^^Kvdv~_@1nv3< z+o-pD=sHn6UFpB#Ec5F7RekU8`&HelgA4e-pZ~!hIJ9SBVSRJueb3yM zTybjB8;ovv-TD4#w$@o+{@^$bHmt?4KPExB8RTUf8uYyRhAl8!J)k`(S@}yffS0nRTaU zd)xhKd@WqPb|Bun{8!|^C~hn_TQ7uN*#~0TUttGgcI@6}c~~8R$Z1lMIP9=@sz03a z+}1Jd??iAH%kHQ@dvZK^qC4FjZ};0<{n51Fexg4a^@r`dM}yg*HypgJKWR^Tqpk6| z?#^g%dOSJT#^T&{`*<{+wQhl>?&h#JoyKiA9A7^iZ?3J|4bgfL>^k2Y?)2kpHsa-G zyol7*IJDeER+FWqadmDmiv4-ImhiN11D2zBp|$wW zZ~CdHSze;n^GdudLStBpp@BL;X@6$*Iys^XGZq`wz>EU?#CpQ z74wRDaY-@z0M?e%FoOQAw*VHW{r*@$#r;!R^kp-$g~s6NAy2?2$aK`$Q`EcM)7~DX~evw-qnnjY_ZC zV0OV)Vsi-wpNQZ_6Bnljy(w2zp>lCNCH2Qmv^G2p*T=80str?$t>=>}>Z*g1Zf(?XFgB z-(O8b^eVLD!!{wB4x(t_)8;+>$A(IM1CK(rsiC8Ta@j@zCYTnUD|!^2L(7Z#a`XGl zhaP)6O*>(DDcbn()4Y|{jT(^nSOeY|!s58zU2NsU*)-3Ty7pUFH}hVi=GytE8)58# zHuRya%7~rVx|#~dDngr&G~siB1V|hE@TMF_lw5P~BQj09lbJmHY6ET$;Lo_h4`Yw0 z#ScRWHTfowr>&ijkl~9FyxdlZTyG&X=Qs*+dCJ5{FmW7afQkp@U z|Nm(2tMcJE7nL}e2P2|;wVc5O_&GBMm-8dKr3r(}A4{AyUKpi^sO0dSrgN6~Km1!W z>_&tnc%Y?_g0QM#3chTWTLeJBh+|}pX|?EN-5*OZ0RC4zwgA6t25XRxN(g~Ph|Dnp z6AHpwK+6*xp!NWN&nn(9IDrYuD1<-)rHRiF=6NC6p0%S(Y2aMh$@T&I$e%tOxbtzd%`5E?pO{Ez~h5wguIyn+n% zkbK$S%WNtm1y-DqrGH~XWkc;8TKo?~2z6#EQ}pV{2p3jJ0r{}b`0SAa8)}>>hY_VW zQb6WyYL%^!0zyn9QcI-3f>Jx(3=qs7O&%Y{E^Uwk!aCQ6aADjuS4q#}6`M+3iww80 zLJA0PVoPEy=E#F)q=0-_Y@eh0xAS+h=6iqg%uo`2Ub7^nJZR62IbPz zMc7q9<{|H|?RFIq8d_fFfEAE=$a%5D3J47yFIy7{@P|&0cA_*RE$^wd63qEPAdbHiisB+tl&ruSOKOvH>>~;XL3=O&M#-A zFp1-VM9Vg>jyGggq3FJlFAhtV|kTHke-x7DtKoD0DNjqNsmtUjy&)3?G3j#S19 zaEqE4_Z5%;k-dIV#F~qM6&!KE3W!|HSOH@L1()-{3XW953aAi@N!qHy3M)7|53GO; zGFPmC463Og+RPgWF69ClLf$$D+*d#b((*C~tboiz&afR;KxpWA;To*USb@z57ZWQW z^Q(>(kSBGrv&RZ-s99kJR)p%>S3m~0!V1WTbpo`<3T&uxrfkLvY)E=z1!RyQSOFoX z5ve6sU_r?aE3hKui(QdHj6$^8S3roF8WFE7Rcb}%gB1|oDq{uYK{HlBKB=+6qJHv{ zp}fyAG}Dq^$7){zWlQK(j93AA&}5rogaS5L!O2E5V@GK0>%dDv``D=cT$1PI|xTB zVFgqO#UyQYk{Z%6^-E;;;*%TS(7S++7p!>V;5t3B;6^wacML}sNJi422I&!%?Ba(0 z+yZo*;v0hT$Up~N5>V}(_7_9><%M_s{hxUn!g5uB2sKUE@+<>%O5Dj{y5f2;5l#qf z_N+PTPP9tjCywOrD)>vD+l26+8Uf9oq6+~%3B9@ykeUt7;4NYJT1cT%#GxjW4=7#^ zd+-*cp+AY>_Zsl}P+z2Xp%8jDBNFW9eYUx1fnVXp1*pI@nikzf(RY0dg3 zJ4(w+p~ZsFG~j48^AVaM&2pJUMoBPREh;#FQM*7EnKZ?d+rAJ%r-5nQlIYc^U$ z@J;*tRrRe3+;k_lhL*R(sl*D_E&J{)%oYbbs<041+4=@c^=*i!HQEN<{6L1vX5tBi zL{w<-E5Zr3NHBv@`&_>_>Q3Lflbv$hpG?M+_Hq2v9fQ-S`;-1?vmYNQ{VJ&_u!Or< z0^jgJr)K-2Mg;GO_#?TMJoKjM+KgHE)MVV-+U!kd>G9I-C;NjlXJ_<4-g3vku<-WR z@*SoEZ-93T2OBy~gugrD069Hjd1EO(VjrDg%|IW`*1^(w{7bO7mYzxdt_bdA?bmHG zliuhFa#nFn=CWsFH=4`BS|m(HB^Pu|vd2`i3oxccEXUKEO!)J7m7J9fR1@ABh2iao zg-s<#SYMg{9f1tQ2r;SxGp%aVB@zd1yt8{wh#p1RyV@C~|rT zyq$)Q@0?4o>^PU$eHq0(et)~WH8?lT4uVG4oSYbJJ(b>W0_ai$ z?qo2f0gQIeb+;$|DLTl${W_FRhwTe<)KoS9LE1t6nVmzF_={ay(etENs7>ybP z)JMPC%7IvBvX}6?!@(2%y7eThI{0N4rTXCaBjB!+vJ%{Q)SqUYwX=j;H-GMB%zyRc zZZ7dqrajx5@y{bzt8AE5d-39(gW)jo*1s2pOvZ%^UIj21ZS|j`1(fnl!yj+JO_g~+ z-XHci(JA%#t7Rp~dNqzsKG7iY&{!cSMY4gnH=tixDzvBR=KV}tT8xrEBG49H5{U0| ztF26Mcl!Sm!B*WOU<&fuB*3+53VpXEZ58~hMtuSZ*iY3aIC{F2(2MCBIQYv5HtVvQ zau-({?`3<&?rfG#U3ZV9z21S|uGv3aaZ9PkrOxIX|_G-Z;4#|ucPi`!&o*GRMZ z%wK~=PO9Q=f7A%9n0eG{7fHZA72+p_Z4ZuaO&uVw8EbH=BrHv1U1<%U{BQ#{{9_2d z{L-~z<@YW4DzR``YayVMv#a;>^YBRnOvHkKTDS?mBdoaD2pQu_0j$HJ6Tt?q1D6Ul zE2>BM_Yu6(%Cxh=x~p{;{anebT%AA*ulcFabX_Cm#16DWKp^6y0v?zmhn0k66uTY~ zw8qizLMxavw7s`>98i{jZ1JU!+ge!8O}sn*CgIjKZJf~(F{ENM>M30Wu+#jr0FeE# zt%GE|5D>Py5-JJItb*T)_zhu7L!cE3Dbcf#RadFarP>bI+NG?zRo^Kx4p60Krz ztM_It<#i;t+0XnG)@@StDV!=PEMSi@eofezrJSLFE83-^D(f87ysO7Wvy=Tx+oiOv z!TEUi{r%qXj=_1$MY1g4@^9=Iq-dV+XCwX^EhWKc#~RF+Y(sN~BT?|$F4B^8YPa}_ z6r6B3lv#+@tgkKjYvn5Q%%rz9K&zQkE?`S)JIh!LZ-MXCjgpSny)f;rtNL+Rg)YNqXx{9dQSr$ z4JoPF&NkZRcV;r)LHjrQ+xcFn zyqJ@^y+R8*Q-LXC+%E>UExd;aVF}qcNmwNXcXgFm3c2lrr4yK^bXV^`i9(v)vHqF3 ziTU(N@x{=H(T|`c&NiP@@j+I|cW3 z$TqMHJ`glG(lZg}lP7ABZ^!I3D4&EZBZ{@ifX;#xk@UiOwzQa9TQ$l`n{eQRf&DLw zy*T>dj1pQ+byRzb;8ci~&niFKfQQ5BRcwLrYh20w{(Uy>8c23O5uPRjHs0HWQ=!FC zZj_leIcU{~t@M$Ak`WbZE?Tx|=ln3dDLxsda~8E927sbeRgD7&Xar3Td1y&$)fFvR zPE(yc3J=F`sz)apFym(SDcC|?N`uT?%a9>CxJTVyd(PfY5{}cbvz_3e<#y>MK zM6rvYHNF$5lA8_C0t9+(bfsCSR#7&a$TAid~uMi@Vv&yXc<1 zFfwDOS~ziV<4Nigtme$5C_8YFCI-G1{AdWwIW@XWfXDG0UD+4=*Q5)Jtg`bTsh&H* z(Fr)EGiN+Iyg>!Pk?2G-*%lJB-FV$9#R2$K2-Tyv+$E~F+u~h1!=PZRiXjBaJv<+m z%nAjamb81WZ-s)6i(T2@C3u3UZp`D6+X}@yLLg3bO6VK>Rz=`=V!4sn=SYrU3n~Z{ zWbnF#KZ?W(H^7<%B`5U*Ekr~tEm9p*bTEQXgeW8E9P^5&C?f)j6R9XbHlb6+*~B`D zD2fQQVCWB%(K)5xpf@Yf7t6O!=NB99gRpP0B}fYiV7{vs1`7nj+^shf39MXZHIJtmEuq^ zD7LZrLpyXl8f!wJzLaW7c?u+_P--n`Fr5$OGCjQSF@NYtJmpi;VREktjry}E#}jmr z;O2O{-{#Ia&QC=xBvGZa{9qJdlUb=cVYoqNw+nYz+CxaI%=vFO;apuBkmh$)6I!nw z1n4RBiXj%`)}X$t0sbp;iV5PgGs!E|oBrcaKUPIY7H+Thip#<}e4zo4)Mb$8NayY^ z1g)?W|J59`kZE&e-DLcm8bwR*d#M;{WydKeb1~BD^7~w)zHEg|Gi}+B+LJoXzdqvJ zg)i17`KL9SB!#DHOD&nwlKJLOZR<<2kPf{1dWfy?itv|Lc15RLn=PlnR&9F4Fc%FF zT&HZ`iu{in!v%xz)FuUN-Q&8Fv9vlw=#SmeGD>I&zSRK#ZHRjNX0f2!#E+37|NZ$2 zRtQeyYvunds;^$7JJ9P_Xc0NBbJqtNaJDYz5L@;fKzp>d!ZqFM{^r`K=XnACxCz_! zicGtcvNmVR`AY`1Nl-M1!d@qI=iXU39dCu%xu*ykIiL19@V_--GH-=*;9PA5Na`?i zRJUzZD(qrJ?P3c~#c!_FE|4(bCo7clW&{qSUO60h7upyW?`3{S<{m{25qcIm2 zeXId*w)FyA(TD+|a)z)Sh!y)=YmRT_=na3;w9m4(EUADi2GpOsfh9d_*;Zq-I7Om} z4VQGz_)HVtQjfQk&C4xU*eZZ^oq4L)C-gh;ra9m|g3mW(_dliEKk+F{tfn<%<6Gx& zRuC@gvo69l#>;X=F0qu~ z@%J*da?qJ8Xmvtc{Nn; zFV_pM|ABMsP3(f>T6BiIwS3Sq)kw!+B%8d^lWHa;*wK_JLL^@klMaU$s;1-eC zgS~Pi)9SRkZ?!gQBzy=Sj34lbgha*?#eK9XyGSg!;U@sPH4Bfp zk|{dDPkZJ*9CG=NAg<7ujKv;CjB4C8>$%KI!j%w}6jGgBeNKbx*yM&3OcFFMiXa%|GCC*y~}Chysk$w;F2ihX-Bo~+7i z!ll5n`+FAlG6t9oNI|X8JsCePX3M&k_3p{|(N$^-*OT$%&R;MtQ&IhaRAlJR1nk$ihHDcg1SWWs=~UHkQ90%6f9 zsrLt8Cldx{II6WL>(thh2@I*6+1!(Lo)tZrOpsN2vef`RnWe}JBi3&bn%6oRFFMiX zg7jqk5V-2|?8#N3A(S(Vp>OM!U>oUYL1p}?~zlQA%#3|MBxJ_Gu$lgYR?BtWlq zGJZVL#>`1|p?fkpGqv|*IOMwbWO>N;>dAQ4Rqk$W$27Ei_hhT{=*eUnpe#5vXWl)T zz_lV5`K^-)V=$GTXHO=Rrgu*!%vNQgp%Rje2hW~N%E3G`l8hH?Pxf9Xld^3{NnYz@ z!hoz@`}Jf3VbLjd{ZSE2La(6-lVZ5_-n+P3TTdoTTII~_^V{R;6#TD(vk5)1nJ0RBm;D0JXw{`giCCdYpKUvMfMBbvGMaF%)(DpxE4pUYcLDA%6`l)Tzhq6 zJX6kO;FC=!FQ~9Jh=!kM zpa*;g2^hRT$wmXw(uN=A*bXRB?~NBk@DtisS5s>8o^o9(>X>^OP$Ux--%G zxIkyu<*!7Sk@4A;*yOtl%V<8Y?{cH%T(2hcP+8!e zqEbdo0SP`3!S&?`bE-e|BqD13N4B+osdOnNhri zzb1N)`K(?h#_2HSb_bn-g_QQhV00$k>t4hWU$(D-{YbHct=_EP&Dlld75DbQFGX-j ztIGDIk514mt}N1vK{ZY=_B8I&nXkJ%`ScHZyGuUhXSKT!vD>*z{%MuF5Ms|hcOf&k zbC>+n!Q4XSckpHD)yu5M^N9kNYk3E9+g9&f`)-_l25&=|2>lyRNd~%wW}r44j;}Xl zAeJzH*LoB$qskmt=ZomN^#wj-sJE?UkHEJgcn!-Bj=k(}3>7H(Dw2Bmga%WA#Eg2o?5@LHCm%6diJkFv%!2v}C@ zdZ^PX#=v(PaHl83V&L!3SXCTZv-^C~>|V98A2qv+84@U|HPlD)ufta&cs0v|UC5wZ zsl$b4P+6#O7D6ddTuF+NrL{~V@a2X}wxcYH0m4UINmvUi?Alp;@&dx3JfX{nZ$z-} zVMJm$@R4u;k!YBQ&pS09)RuHLgtH1mgn>VFYBw0Zo(+NEkDQtehDk9LIF!8%HJ2uX zdAwYWwn8|wzXQQHo!SeQHK9K6le0fZ+GSg`FGjMw%7#Np$%g`g7L90U7-zRP?at7> z0^`w-FGjMP7JNCreLR}Z z2%lclFE0Y1Squ1!3OW9QxXli)^l4M0sw(${`BIT*E3aT=(hDFzQR!1NOCWW-+EA>E5XAlM;G*EK1aiy?< z4S0@u?9kI77|-)hq%{C*_c+Ud{prIEybya~ryUaXpW-j-z= zjd2>Ahp{9N)iD3|#Z6`ZJ%Zc3x#E&p;LMZWG`-2~E$~i`@aGM9y*JZhI`bD}Qip#8 zir4Zk&Je{V7%_Y|azMvRVL(V~{<04wCWAoW85gJqaZmfua!MoJiNm@^PPi2UtofP? zSi^G01(A}|ivTK{RU=K)ne9SF4GFj1$z4?N4HvkEWm60r)nNpdngd+JK3=vaMeVAA zA0YI6DNu_tX4of>48Jt0i^%DeVJ z;fC$f)86Fkz9Ac9GL!+bWmCAmF?Pdt$ut!BCvLn*fOG)>$N-WJ-gRTUWM~C%c~k7L z1YjU6nu!axYZ@XzLrjKf%?sNl17`Z*Dp~JtRkks~%UZ?S+7a93SPqc1IC?Hgiz6UO zTA^ClBEb8o0o<@%o=N}I#nB7HnQ{5V4cp~rDywzH*c2qRUtDb0%rl4Wa#g-!G0|xe zmdrzu>A(%!B~-JZDbuJswreCU8jB0IYaZm{rZP8dSKx|EW@74SU2@sY5!)4*7Sovn zz9>+1!FB}_l9Z-gzDLOF|y6J}Pifjr=&0qF&N^5S|Zl^A67ZcW?GHm~)1%<#3 z+hsW8f{0{+$OU!E`74DaXKct3+hy1k!-fOm$WU{^cA3W|;ezgN@Q}B>imrwagB!NX zGA9HHKS>t>!b30`5jbMI48PLqXcsOoJmwr?;l~Zz<#^-5gR}q28f6dJh=Iis+vOM) z1B3(C$kB6alUeYyv|{w`G}kNedS$ypm4)>>FtX3W4mOii;V4( zX(;ede{7cwpy+t{+DXxBgWJAcGPHs>cWl={xYQCm?AtXB5uhO^L(C1^B?FeRT@G(C zG)J=R zwcVRf2yxuEE3zphHGkR9DXqC-yGQE6b}`|Li0v|*aX};ywwv=OLNKW2&5sq#k5S;; zFddlSwr`hVQw$pph$BPI1>0pFw+{a2J*%w#zXp1_-Byje%y}+GK{nr5q&Ouw7o>LYVNC zckO|~4ck5Hb|!;Mtw)9m))>3(+a=S$Fi2^E!XMiu16%}bmkh1o%^lk{5H7XE4%n_~ zhyV>S8DehOE*UV>2bZcOm8_Rfl~SMg)+tA9mt#3V(&Fg3BrT4Bb<&!5BTQ!Li0zWc zvE%!vE{M(gwI@(=FJh?6_^&&nFGEk zP;|j|1rm~!7J9jojOe&r>9%i|3`)X)kktHTZ+ej=Oxklili{chw#%e2?GD?P3v_{J zpaFp$_w6#AaY4jmj-?Rd4CKl}ha0xbuqlQO2gH$~=7Q}qk4wUZ*S=j$S`itl;lqyD zF3X${B>W^@1ON}))QG@w-!8+ito9@It;d`%9=6Nz#)St-!|cIfxQOA1?Q)EY0m1=m zLcR>Ga~d~nmy=I`eY+gJ+5-i8q2Xke6UDeo;a9s*|`$>e5Z?V0!j~_j9QY@`Snmh1b0BdLq!2$p#b4f)4~`1HSvxH+#N&{LRk_-69E}DbV3oJ zwI#{jxvXgT^S)KqqpMfE)fF@X{qKtB2&y^eVvwYY@#i~yU`t;usB~?IntidhsdRUd znru>=J%;ovd394gsKL7%VR8a;Kp_?d(uXO81e7YEYt2ruvBz}OFK0!x$}Km-q!#kM zw^hc)5WAz});SHivWWVqy_ ziln0P<<%H0_$E!K*dOxgr5KjQDkg{=J$fm2YNahqmso6hdyvRpu3UoXK(qHY+)eH< zklj$9y-hvr_a-BBb9{G#Zh0pKNcnv#c;@F{&0D?drQjaCupG#=(2z1 zJ6!e>?DlQj+rtZ~jN?7@+vH_-4=2^vDA+BVNp-B4eZ0Gx9Af8*OXi^9{C zuKEmnQ+o2!GM%!8CtBmw{f|p23+(Z)?yW&8BVkZ!%Th)0^G1Gwh}hv}Zfp z!+v`cT?;W854&6a?LNA|X|#EvyV{CjUs|?}`qLS@LH&H(c=>{+^}$!6mrA{bqN;I# z&U5|2`**<`Sgx?{GMQVNFU}$|ho6Ic3Z;&W-Gi`_F>Pi2y;&pxx2+%02owi`$f zw{2}1?N(=l&bM&NoIzcUt;45x!F^s9i;4iUnjy2=m$SqrR!bjuh??}-Y+)gB*nU#X z9c7B=RN&oW<&MLUwFM)&T83Vo3A^k-Bl@Tq)3e*u;Fo=C2gJ8J{F-Wq{9%U}N{2-@ z=;j5_%^=+OGhy2>TrpynJ2}77A=iF@KwO-v=@f{`<1Qvho^=ky9D&P$xZlp2nQq56 z5Yzo)E>V{V-zDlw+#x_9CP%u-4A~fn@xv~Gm<;O}h?&QqQzq;ch&yfDKum4ep#ejl zk|sf^1#8M*nf3Zt#KjR9GW#`F#N=_8Kun%>4#XUR=adzDRm4Z;5{QZLrDm*OqRwu; z<5#a10t906{=KB{u#SP4dHgwL!d`*+h;<;=b#*SW8LLBT)SB2Fpdv2b zSCQGTF%Xl-T>>$A);SP!1fEk?>=lTQ+6H2s5xW%0L?2Q!HpJ!tftYM=mKm}!5aWkk z0x=oZF%UD4Kc`ICD-a*OXn`23Lu$dgK>YPx@IV=hv0K&PHbLot;=}9+JWwp5k=((4 zywYwnGKtir?+iiZIc3>{vP5ifV@^eu;IRyN(+ARQADmF+7VCE2yLRD=yFhXwyMv)) zKvov3uV>|}G4qr{5@55IhofL7kefWG4E!yu0_+z?I0N6XT;ZdLhVS+(nX9NHw#-f7 z=k`ESW<9g*b34PhiB{UJ7I5{u1x`G!7{j>y%?X(IpX6j1swDCgzfA6Nri=HQ2-CdR zlU!+jo&@Q_5@q;}eltV_gU^wGrorY&(ClIhP~s~`f+qx&XJVb!RAOBo??zg zs`ZU$id|vwtq5ME2q3~Pa>>V-EmDal)P>_hzNcHkn+$6Hr+bt;15r%wu4}XrMeuHF9+pO zwmi@_Hb-PPh|qBOCXUOcauz|o6L7gyhWh}fz4Lwk=;G(WuJgU&PCve4WAF0rcwuAL zx$#zi$i79gyQPZ4zq<$rGm^&<-IJa6tUsELC*4u+Tz^Vly?Ey$8R0LR8-LM6V=SWc zKK5-?L7g_LA+Z}TW#c4|Xcb-ZhG~r=Z!C?U%X``lwQf7+8 z=FJqb&6_C_n>SO$HgBd#Z9ajet=WZbWVBbo{?5LL`49dkT$dSI`3<6Y`EC@aW`o%U z&g}L*&KM>sLwBPCO_sWw!`^fnUkwN1E3-qHvJc_|WL3fY>x=M$>|^(%x0J>cMf^T= z1={y{9=YAx?e{&fwqor^t;=zLn{1i=|99MnA49#GU}%bvE*e`s5&rig*w4==-a~zW zb2y!UBqp9jW;CIobqDP4j(29;JF{*knrYmDm9@sfsLDV1N|dKA0hp|mb_CIs8{lv+ zJS$G~QkMo%ipE}%=?=5Vbomw2eTXdk;>TX8d8|M=^CPK}8;qaksT)6^mnnq) ziitk@eAN-iJ{-)ic?1&p$(+nQ;OI*}Zytd}IP!#f1nlYL^X3tVtC1(Zmy;{rD!}Y2 zF@22e$jBY+`z6%WSh>Vv(bh{cwnD`t0lC3>v)F_U_o z50PRnE@T*{h`{f1m<>b5Yd+{q$PdhnT3_fQH@>)U&oz^s&6W2(bKCd- z@^3H0k6asn=^9vrCOQ!-UV`YUL(z%!>jm^RL0{j7z8-o3pkFSgOE+uT%FX_(qsHRW zuI8TQcsHP9my09(;=RyB`}oj9Z;zsQ4@BpRZ_nr7UIlxZq>X-wmV>h202>urA-oURmu%o5irx^ruzk~-y zKUi>o81|w`^tz|VlXJaUyfmFn(7~F?5~|=||KROw`(Ypc4*mb_uYVAYzc(NME+p;v z0PI79bx-v+pV*#^x9cDOez!V1yQBQlToQ1KY5KUoJ~W>!DAkUr(h^Ab&+RvbD3BKA##ns@_M_?J+tSvPZ3d zf&arP<`2&+gt-^?bato0Y%a&}8c5%aDKV#yTnDnymCFAHXddgIA&;j4FAaQX8pK!E z_5pO3v(Zqr17tPn_CmNF_TD$_ZT8QOhg-qSIdl!|!hVjwB#Y@%7ILR6?ngU0 z#^)Xw4N?zGMVx&s6_~^v7pMLHR@@wnW=Z4V!lA3sr_nDu#c!_T40LT7mO6`9?~fXX z4ix<4cKK>F1p@?-hIBkA*)FJut8lOVy7gX#}0oyh*GG8YsF z^G{%p;w*Oz#X2Ms%+fZBq=Tmp?ncRCAG@^*{pK2ks0iC)4(?74z;67xX(!)yfMPn? zfB?h6boTPp&0g{HXeCOeF@$_~i_2&z7FHYWwP>{du)jHruYf&h)5F$aHW-f%LaKq( zMbfYR)DBdxb|36Io-)n(B{I-F{4b}GIpvRy7B2+)Yl+0~f&<4_Xs2Fi@T+D6iN2B!N%oRwqiJuP#(4BAKiKEO(Yt9#D8#gXQ$& zS$51_DNRXRoKGp9-ApGj)8OEt&4KQ3Hm+HH=hrRPf7Th0-Wqk2E9{5 zw8aI*n5=%mSuL%X_VaN{$ap9$rlvaU4Mu4`$%}`GUSm`|fJpShJWV3Vl$l)`+8@FygeY)3Qxh4MG7k0zj;5rt58K0W=C+B;q(qlH* zvb%1g*7ub*3!p-8?LfSD`LD=-5!zzeY`u`MCi^SwK+KNa%WTbm*ho8UBn~_5o$3## z^$+_y!qpW#Hj5FPyf0x-%10R+C9CcZ&Q|!|Z?d!XMWk4|W+Ps1X4_gDhnAbD=U7Mo zd{&{Ea6N{KGd@VE*M#tv2>@K$p$Fk3jSyBmMfo>P z!bu2^=sgvL%wR;s?6oUJwT1K3K?H|VEPksqwCRQrA=0Yx?1g7QqT+ZC3Kuns8j7mm_XR-AbD#3gLSLyN~;g;7d zNWuFXpxIW)6%81nx89wQi-SKP-pS^)i~a5nQF*B!OYGl8pxJH8Nldr}%+i2@#8ZuC*lG#-{ea;yr{^dN= zl6Mh!Bwkue-~L+XYXu!IKix1JRHh2S0qyq3I%>HvLT3?G-m)k0bNE;UuduRH!Pl&W z6M#vbVt>;jXAoc&e@#@qvhKaAQphRTLKe>=vgsW?maTPmvTgC~sq}syKn#7KNg4T4 zLQK{)jBgy}M@uI5YA*P@R#@r4>l)TP!aFU(NQM+;cu``@d@(^=X~Q!Sf5WyUprJL% zAnkkFx@YaX%RV_W2R6Lv8+a6|O%0tDXG{yvr3ppnAi3-8nGZeobjye7H0^}pMc2lo zY2NBJO;$H*K;mN!c%#9Yt@NU8<+)qqy=%B^0tt{d_Tf!Aj40FQo;L43u}9NA;)|GmwE?#W@MqlM zhp|W0;)fv=%pMJK1HKr6aa%e`fn0CFr6j{BX`L%J`d+3{YDR=s9~;O=8(_Npv4{>r zuVF2eB!mqH@)&soVxb)Vu-hn2`zDa2ketwnSW7Mt(+2k7lM&o(4inC;zXKeDM3)Z7 zCwN_7^&icBRZf$0QHj@i%(mon?f}5gnK8JWpL|cMH3pYImN;u%(vlZ;>LES3YwDE# zZ_Th95t876CfF^1EC^pZ4!&%aTLeJBh-0MfsqP?^3>`o$=B;BYn}XB8_`vU)!5TD^ z5<*}RB6E1e1iZ2qP;lvs68t@@c*EcXCMcs2LNUppNCDk{&Z4)~7y7-B0x}0SuKObe zglb#E8G{s*7s;JL05USmkGi|IK?(>(EiVO9z?GPa#!8NQxWNfzdSHVTkPqv4@k0u1 zM!?JdNKmv@=e1=hj>5qPWU^LB0r{{_cJ@et4YhM<@!!~)SrPK>%*Y_heKR&l0U@Fj zpgmGxLya@#FrxHE3dp>JAO(b&Mx>TVfd!>@x)~stJ(`@{3yvY&SRn=E!(1D}g>lnd zB|VD^Qa~osDs?T+NC6=!wj{=4jyz~a3dkolHdxe8USg5ABNobWK?*2aLZ4zSxj;-C z*t0?k@>eg}V$61vfJ^t79|ps@kgnyjK?*QU&P63&=P}!o&((_*khfv8BL(b{0{oa6 zDagr}G+r2`hp6O!sL`+lp8y-AAm>8xKqFFsA8Y9*M6-*#7~l^qkOFc^d7QtSWn5=R zO{X1Jz{RIRtrr0+pto2Ty9q3Uq%Bs!*Z^vq_*W(?ZX}>m0voKLlPMy`dkTzL0Tlur z#D0H+&8`Ca9yaUJI;?<+zS@2so!XkeYRSDl-~}p>$y%+%lMid^hju+!0U4R^J{3KC zI9@IuRzMiG!V1WTb$a0%tXOzKz__cxW(0~AWbXyXjTPMn-N3sF$ox|KpiqSsRzRNA z$<7`tu%Tv!6<87S#R|wEURVJkq7$G!R$xPoGi7s|YeO={3Jm3Yku6+0BQgoF*;PP3 ztP!atR$xKN4lA%C6wDqCs<6Te$c(v0q|`dd+I2q380Hdc?*W-E*z77GAFhlQkO$3J z0r{lH1|_{h7$>RxA_3lyVY91%vL$pX#xj^ZIHxdSv#TIKf`~0M>%|<<$@ndGJASYkac;R*)49^WK$hi2^v^o z1xG4j1yl%yDuV7Spy!dXF0I1~YJdR99N5UN#UZTvYeH31Kh%U3kdaw_{Pqvx`RHoIi0`jCzcJ^3-4K*vQz>1J> zZ$<|3!U_lxodE5z0vl?aDVwnZ8BG^AjC8xwZsZ6DA{2JR)m7tqoF^y z!V1WYxkkhV<{^YyV)xLh18A_q3docyV+G_vGgd%8sj!H3h;1z)uL;lP_sp>xnM#K`y<@@PXC7f}D%0SV2A~ue}2~4V~HAUs|7V^K%b6NK2+LIgA~2gml!HrNPC%^R>d!i$ zQ{qkr(-qf)iEu(-vnRgS^cMn$53pu#+Sd@VuRXwjY6LWUiY^57B=qV+Kx%AX=O0{D zJAA$tQm7PhsLA95ikHJ4{Kc#9EF$>52E0C07{Dk*KubffI0U4I?A9O+5?Uhv&;no8 zz5p-D_olYT-xf;Uj|8!ZDuqG#OaqQqGasQD(i{>Xkx>%NR<%8nzo_M{lEM7aD-k=@ zq$zYw)ub8F+)bVZzY)P}ELqbZq`*~8>dq}{Ne)|#^1~Kyg{AK1us5B?N1+q149A;m z>lG%Ko;;>`t(vpjlo`&mOHL?25P}2hIFw)`zMP>$mu)RDQfn* zrKd02?)s0?Y<+{J`buXp*aqGFC5y^tQgG;ph(z!!!U?wc;?Q&b-l#jp7oK$clgW6} zE?<9Az7Zv+Rt3~gRVT&yo}aJ&o||+3Q&$GwXb~otRTxWYOn^@R4%ta1`oKabUH&9#%y+{ zo4sLg(%n4U-+ZDw-PuGJ2B8~)vYWd;BZNF9qbJ694#&M&n_Q!joRnOFDhVP|0cqyN zN~H=Qmbz@ZVToI`Q*v_;x!1tas`#*{9EJ+5utH0yI06JXwyZRt-Pe~pY;9VR??*$( z@Z`==<4B=N1#uPnPz5z~5Ndj*7J9v@nJ)4dH)*Bk zIipJrxRb$@1~A$=*F{&gP0`Ja?d*2M_Jz7N#i*j1|12z|{h6IZl=zDA<&RbUf?wXO zB1WSI(?0suRu056lP#8ahl3~jb?Zr1b)bbB8}-5ON5HkJR)QOk`qPZFc9u}<=C53z zu)Jw18~Cq&+|4B(%Cu)&GyXZc6R@&jQtidr6{(50{=Fz;3D(>FZG-N8mD*Si@ zZmP_Cel=u%MWkgV$a-}adJ~^$ka%dUkdsO(Z*M@qvQ%hK)6M&twzSqFf6TVxUb;^I zeMZ0sE=i1gEJQ3kMQ15RUOlVcUbFTT=(fYsMO!DhW%|SXWxZ zCqLYP4gVN|FTZrHSowVmzDg|I(pm`U);RiI zXtTx)ZSSoe2b3Wcw)oP=Z7nS4Cf=QYlW^;rHqK~?7*esBRe=@(>@+_oK-NbQAGURn zj28mJR#!qLfvLb9sv7vMh~E&VGz40qkPQiN+RSa(R z-mIm(j^sA`nV-VCO{zYHQzd=1#~x$+ny@iTIYR+gv`a-8OROlk>T!`elgiuRU)nCE zZ4J)HyYKJ!hIb6kTP~7i`IdiU#~?-Xd_NoU*JvpTK0DT6zGNGkGaM-<;I&<(CF#^| z@e?UH;ch6i5Up8XTkzM)RpyyVZ)<>7GpAg@mPAa;SPO4~@70Zxj@P{~?XKiStmIV~ z`^`GA^!R{Okhk1xIv!2+^yO6Ve0Q;GUe=7&8vk-lm2g$~Z%NQ@dVOX@uh zcr>J>W;@$xm*1JmcxSu2)!)wdIwi*?K&!hcV09!9)IhYm3cuKttp>Tz7K>zaJep0$ z!&E5!5xU@P^8$0+fdt8OA1{~AV?m`O>b0<}KHbaz2s{)&SlY|)8$mHU%2MouO*kFi zfXmuK&cA#&(%Wn>yO7ipT#|Wd>s@!A#f-sQg{|3$)&fRbOpDdlZm(eFt%Rk%lWRTin%DJlvkBE|@Qy#e=x zGdI-BV&piyn3KA_LKVI2&}#U_z_x|=5Fsof`z8siq~NZu6duKt_QBE#%oEzP2LB`q zX?Dl@XW}OMzbc4}amMi72#$wyjXYYQ6hZN!fI3Z?=U!>U(N@x{>dUJlF#J%X4(>Yz z_jSm&nhdCIDxxHOAZT!;XCllePt+ja4mA&z_oVqGWEoMcMFw;hq==*!&a8c23O5uPRj zHs0HWQ=!FCZj_leIcU{~t@M$Ak`WbJc<2`GkROIO#V5mb$fA~&W*Y>6qNAxA2M*8( znjG@blFUa}r>U+Ug@@xe)uSs7nDMg_Jml?F*`gnYGx8Wf-oV96vF>m5d^SYhC{Hh~ z+ptH@A*3Yr3ApfiyOa_HC1S*xsh2FUxoi1D4Y)6)5uO+f`{=Sq^#6%}XkgeSM&q9u z7^2ukOx1e}EdriErQU3SRw$mdnF)M z{jqRX%6N8og9?Bn@ggoHX1noP;^vRz=`=V!4sn=SYrU3n~Z{WbnF# zKZ?W(H^7<%B`5U*Ekr~tEmECabTEQXgeW8E9P^5&C?f)j6R9XbHlb6^+`EtvU+mlF zmUO|;A10%7O20vGR-iAIZ?ly0ND|nQ3jtY+04g@GvOIOGhJ;2>={YnxaChpLp1+1r4SX%$W|e?T}JcDGmjL zVjG)3v_r?Eu_grSOR1KWM?s!iP+vMOS?cE8P?bDW=wT13ez5W@$f0GrH8)d|B5GP@isiCL-ThmcaOR+;nP zZo)a1meYPR#hj)2UDbrvYX<>(3I&rS-#YA^BUaXgcQwF&MNTn6e0C;zg?iI}9O~by z=qP_VW1Zc;sXR~uzR-Y2>N3c4q;vNdf>zjx|7wm|$hN%&v9bvMO^u?Z_q|k%w6f!r zleriv4VSipgwHkV%T~xV)0PdXJ*m_D>m#0C_+l%`!X*E+Mw6uQRBfpxQ(7|L{7Gf! z*e1i*Lu`dtgulGvD>~)cY&iwCYSU{NnCpMk7%muur#2~I>mJvYjHT6K0QBcu4e;NF zsJCwx3#v{07#Z^4pRZtr)U7lPa(Iw+D2M+Q)mN|49q9Edvd!ZqFM{^r`K6LjW}o3LH4$h12tYjdWYzhqFG1Vw|ecF@AGCv=P7SvVbUh1ue# zh%Q9o|E&p=c`KX)=V~iJQiqwNx@njmQ7vx4srb#cY7y~hZD;V46-s$C0*6tr9FDsS zZ48U|GCw49k0J=u*t^x6^}46WlXJaUyo4h1>mR&bv*jqFo(CUmz?*Hoz*aP3K&YG{ zYzJb+{??k~TRD2epET{WtSw6_;EDnD=Wbw0&sw(C*ep(wpa$8x2&uq6kbI^IZ>h&y z%I4*kD{K|Oy3QQbt3H6dh(#@*{Ncbie7+&O|0&)6iBDl-HLanKZ=J(gLAa>Tx(L^h z&ozbVtHOi-RezPP^IJy;8SF*tF{A1Bc-rr#*8~=NgkNpg@4D77UY09zi6#8o#P$lr zxoMz?V4P@Dv6}BX=N`!}U4);ifQ4BKQIVHrmyHE*g5*shvWCnE;L}Yw!#byKWw4TL z%2AlfyOO)&LWs$}wXMX8EVrJe1^VOgX#8-c0^M?OtS{S)x(=Ue!eCCSQ;l>CMzYBpJqeekneN|{T&rV~Nu;w?RfJ693T_dJJ=iNZGObPv zd)uS|?jd+Ee!wHZ5z&?PVeQHA(WdMovEYj1(voK-R0re~CR`aro1rJcw;oH;qn)<|+)YcFvxx--6WtJd|wzpijRt$dbcK~`z% zj)66)z%ncL84w-CYJHTIE!X9in@fP!2j`l{Bi2`81G|~1=!k|!iOGLH z$LgBrMVN)3_RM`av{Q!{HScc-z|-k^PZvLkI(%@MFClWD-A6{F(u z?#TqMMHdLtlL=!m)tqNfCX=RjPbSP(Wuc*ZG8qq^J(-k)d1NFRFZK@X+mlJzHl!r4 zo=g~!wQIkgOdu>erDit(laoUCWWuBvZoPZ5Bkgh}6Q(BNC*wsYx?G+I{dzKf2yF76J(-Lodau~GC*#Sg zye3=tq7gid^KkP9}`O zRCAs^nM|7AJ()0Dm4${%NHQKgdon2pOh%#`uR|vq887w@?Awz`*{-uE69#1M+OH=Q z2#Zcx1dPyqmoPBHk**K)?#Vi}^<)A=DrYwLWF4+2qaMU=u zY;`U@*+)G0F3JSS^kgqJ?9dF*ky#47FyZR$m!zBX?#Ot-i4GT}Bjbm_HJ|4ynT%wB zj*KU(@|kc6u-wLYF15%Qm{0yIvtpnBd^<83*M{`x)sgYzktSwNstetb$(b>BWIs{i zS{%u)!7Sh^`!Tz4?bVI(Y^z+oh}O0X%oM5w!{p+vUYcQ6*nDxyAcCs({bXXnx zyAAs*_+-<`i!E#oiVXCC&#PWanI`vokuATk1i#d@KSa(_7Uf9BB%r`{kUZ=q5UnN) z8|%@Riynjr;^V$A7fE7?eeQX`-wS`dX{o{pw?U(p=uoTTtlk(qLzVg%JQ6?TtyIpI zn0^dC>6v-Tl?vUNXnkCuv+MF#qRYszyAlJefHliaLj>>jd+k8dv@*Tcb!_$C>-m-AZ%tQRu`gr5wx$R_+3mQ-j{r?xKZX ziQq-rDaUZKHyQL!4f`iMRwUD~Jw4l-^tZaB-no9|BqD13N4B+osdOnNhrizb1N)`K(?h#_2HSb_X4*g0%C*V04B%oy(i; zYhXVzi~%}vvfs_wMJHT65B7J*JG1Sb8UE!oUW8N(zZAhCtt#7-J~~0OxUxtuh9VsL zYELy`wYzk5?!sIHtrUjDr=EQJ2fcj?)b5f`nRAzUdxDjJYTSj0-OgR|PpjO85PSBy z3z@l{yX2p)-(9{ey?UATcs?WGaxL#bZrkdeYu}Bt&){t+6QO_ODak-DA{pqKjTmL1 zWmGR`8LhR5?n_^2-9j@_8xF_U8!{0)E^m?QmjiJA{8j|7Vfn$am;H@_L(cm=W?jlR zSCf1LYlU#QKxE)ek`>{b4Y-44t}?rA!BPE$C%Yn+ot{DVW$P+j!b27E9B3hJAuGdo zBG_QrsV;3H9{5Q+h;R&IfIo;patk*r1%pz%t7Wy4pi#;EYl|@O#}T}i<*2e=QTM~v zxRRvpVZPxb34EskcX~1`1`b~V%C4?ur|4j1+QQmHAx`lG{%7PFs*xiJ;lo$va|s~7 z6M^I&a+7M)_Oq?8IG#@`j;l8Iql#lOMd5-f!uYdW-vREDZbZ}HB-a41_R_7$37CW+*LdEX)M!i>7TPETTI z4zsulU@+S1Kh>RGNY^A*nqEc;f9lfG^7>GxCKFSV{@2UHO0}whrDdXoKXYknd6vaQ zRlULD={YpDJON3vqI*ERL@G~t7K#;XSafY?bp%piQb=3AQZEvhmxv6)NYOya(Z-d+ z29iSwP`{*8*zZTOt)2cnUm8j5$D44R<;AM0=WSW0(HN((c^FIbP!02M<5WfiPuNtY z*%E={`hSn$HgB%DWF|`=Y#C=f_dtfkc*Pnq!@-|7;Pu{2i|Nc?j7c5-5hz{-?XrLMgI6fbfcU&C^^)(l;hUJV4BBIN- zL;%%#VzOxBE)V#I3tYppDTa;eFak@>0j^;mFI!XSWu1Jq>L?I=+Xb-Um=l78pQMWb z;ch(|0AoD`FsiQwv;3m(OZDz05*)tn*f7(uSIZhOvCVM72EOgsCNqqR0m7+aW9YfH z$qa$ARR!?{c}1Z~F_O@fwuw#+GRvM2CVb@`L^v8Tr0+aBV7v6RH~G5n92;XY6E=m{ zf$frU6!<4@yhwlyi0zVbS^h#}yJQRnZ+TN(znQpTyN0>v4Y8q;F1##E!%QaSi0zWc zGktKCEVuu_#8e<`m&^uL&BDHXS#$1)?UF&T2?{$M9fBxF&n0Pb1SCmIo^UKwBA~K` z>A9d1WR~ED?UKi_)csQzM=uO#M$E|#+vR2|t938w6y}EQnt84Z+a)90B{4T_myD6w zvP`4y*shVZXe=(+u6dA)vP?;)1#Z|bc|6z3vn`pqVY>p;Vh`qkFA5Z0uw8+KB&DHx zp_b&NfNKo765Oy|kxe0~`OAJzY0VAW?OX(G7mvw@c3sEmhV3$(aX~~<W^@1PBi)==gQRb{T%9 z)lvR>df+1!FB}_ zmwrlf!*)eBg{0;$`#GgGH*ELFMZk9Pm~8TjwA&8duw8~TE{Fuec5@~~2*!OEup_q1 zuqi?}4u~T|%>~#^Ms4j#gx9QW;V@(~Yc z@s+nOO9tWD4ck3>$(+eRX2RAM`o`GpOa?MeRe4KW$i*buv&$w0_w`ruL(mm}xY4wKnfw!>~`GLS*A2@1`yLlEWYxg;%) zK-u3VHD(cxU9e0tp5rDbw|%>0P%L$!!u6GUAbA+9+!5R5IG5GBVr(L{Zw=TknF|}@ z^(o8^+a=><+LrQsQL$YzJx6SpJg(%<9osdM7LCOP+cghzaZ{NawkvSOB{K;u;}tne zz>Ms@64uF#hdwxBy8_c<59WX`3KU(iU4g`2RCe&3^XGD-R=Z@GeFK;1C_{zJe6g-5ZNqK2tJXz&LG43`v?V-CQ zT5#Pkz5*yc!7a)U5&T8Pb2lUdX5FJYf*58XD>D3L zP^vW~eN6=KY{Cx&QMLfSRXWK;CDEiNJCt~O;Z^>Um)9Z&BY<}_;O-!r5aLY`O&Hpj zD4_V#Ghrlm=dz;V&-+$ckFH+1v?9>|u6T~1nqw{oSw(Dmr8`(kZV z>Fy#m*`zjmyd)#hyBlG00&+kh76sBrz-VI%Z0WUTC)n6yI_j6RB3k8^Yg|$bNjfJ| z2l?KGY6)^g>9uB+uB(&t$i*Jpv1Jj(m+w;C6Ht~2zUz}~3#AG*PXHMvwb+_A%J@oK zUhSwGj0jM=yDNZ839%)B^ozLSD-<33U-jv)sPXBl8*AsQFZiVIr@2?243}I~5gnN? zUNl0VN0@;h^68}*mc=S2h#Wn7DRye5*HbUC*z)!uk-c2G1ku5K;>$pGLw)u(^|0TY zjL^;T-3hwoofIHku&{p_3!eG;S3?h8SPtabP^bE{-m2#ir%#`Ltzn3c=Md>d?XU?i zR&deZo(_iN5gDX>u;!DWy}rW-A%z6t6T9G;Vw9cjEu@bw`&YihWpBA|-?qIyypYN` z-b24lUS{`jQhkkr-Ljcf$1d5&yQ|3|cAmJD&MXBn|0KF7JWc7U&#*V8Coe72DO-4= zHBRkcJBh(0l2hdwoSF2t2K~`&+GhVIQ{_Fq**!bMZu&rbwzEC#w>QV5*k=7sKRIkYYm5yS!f&2`a=VPE=^@2EeWp^Mqi$BmaSXxborUHbcX!5dhvuFX{C$ZGg(h%lB>@?vz6Q}V{f(_NDsGdZ5i!WW?<^#Y+Y;T zt{1Pur+2}9UKWdr0J54Pv)Y%l#3fcsA9skF^x14-A#vD#Qp_D?isw||-D2gA!;rNF zBe@0$y)N6$h(0O?UwpCAFZ12IS7av<)vvu38-u?@s@D~LI7>Jq2pHn966^J`++dv$$9(L&akh7(EG(aFOj=+%F zudyN~kGlk7@~m?p<_J8etk|m}K4Ke)Wky^*gcbmXOFSn!3=oLP``0oM!1E`gW~>llcc$DdOs>=lTQS_fjC4ZBphSRGOe);Z4k$j%j=%%O5*o=J?8htZHY1ZrP5RCdRGw3oJt#}W1~=wZWC_VCWc-mBs4oS@~+rJf)BX*sSH@D3}T4CeJAYe+#Ps`-KtC zz&9*c_;m5xcki6cd(=TZf}h(1NtyM`w$JSh<0e{Zw_3o}?-n@mxMB?B@;4`7-hYyl zVW^VGPy8~u$C)nPYa&eZUQcqR`FRqg3rm#YH~P&G5ez;@0-6S!BSEu^EkKE{90{Hf zY>ot>2{=bmY6qJmX?Th`7O0eO@KSC6`c?!lQUnlT7rEqP%oeFc6Y9coA>Y%j;7wAx zz(3ui+!=^sa(7i5mARc>x~SYqE=T17+9`IvH<`Ap@5l-s@x)yQ$>pf*qL}XGsGMzZ z4HwT8kt=;k;d``E8AYlk9G??KT+V})ayMxf?!s~69hskAhUdvyju(%O@7+#btg57P zIlM&wchpi{4$7B<@+ey#Xd9a&vKvHbID8YwqaKu z$ZbBcwt3^o`Ccb8MPl=2irD7O6p787DPo&9Q=~SZz|z+2!ZtG6t6+a;U&Q;h+Y`yOWulT@a=TQ8!gE?o@=;w!ULm$DDy17uaf`|FGFg6w1W zqt}VX6Gi+!bOqY?c^*2cbG+{%deO&ui`CWP4doW$g{DW zj`@!OnJUL_hk_bbC2k^e*(ucN`g)#%6SW>C`|%_AlEByz%9yG0$gA=aYey6lQ}1-m z-j6L~;EdWGHnil^ga0frc+{FfQd)_+J6h<@VO}{^g^((0?+*p$UPYO+uLni;UZvH9 z%l0^upA|f-4FkWsCu;CU%B0CeNhvF`c%z>NaZ{+^##4hgQRa*qr1iJR-c1F2F6tt5 z6$~2GC9$5858h0fG-@z!9YhvymJCyH?c$brAO+7FrGn^#w@~Jc8l?S>$lfiAc~qBu zAru!=AQOcDLP|A~?pG+6Miu70+5s`quPB(*2s^me@!S9%jgQMM!d|gxVN@b%F_RL5 zs|p6^twl0J_(BBO(k8gm?DmGke9a>?WE{2WLjft85=4%f9%rS85(B zP|o~Fs^kXar+Mne&*udNp}%6Hk3L^@1hNkY^J^Y~WDqhZGY>fWlFyq*Ac2WIVIBc{ zI{CbL1mZO0iSOm)inj_dyGl$SBRev32m5{rH8oZ)u~@YAk_`EV0JM-%wV0mUgtxkn2QS;hAATOyBuc2knx%idKfPoGSYFF4@;()j~gL| zH6o$At4j#Yf+1AQ$w6m%6MS8`_gc#O*WK@J{ZcZ*TBJtbPju)X$J9=Ho^vkR27NlC`5ljpY{{`a9Ra)mg6Wof`Jj zU3P=?n`!**tKKtb+fUd*`}hgFt+@Yge#M=k&ov* zt(kf9R1pd7HYy^YCCnIAL?5tK5q*HQIY$-YG-{(Fl5p+f0ZH?=D$1#|998ool38q2 zL_TY+BKm->is%C@LyjuKS=>fNB;Q-ADF3*vit>-M992^hNo+PMBA>NZ5q-c`Mf3rd zAx9PABEm*Rq};GnQT}mT73CjiIVx22%?LEHh-->zLOQArt&#_bvMN56bNBt##$uV2 zs0}T1XwnEb`8-K=EXU7sN#40sdM4+SJG0n?rLkO6RUUd7EN5k4cf1`Za8R{@`p|cd z!JeW@{OYmzE5|NZiF7YCFZ;_?Vt<&Oca6&T%T?lVfV{0uJubb+*jUJuy38iOk`z80$>g=u}UcVnW#R{0B-Tg%F%ebnMJ+6 zvYCrSh`0h!$^|Al8jzi(Br=ueBompF10u@QYMnA2OMr-~x&v}loE?y(A`_9T15yz= zW;%9a=zHf(C(!Y6K!zo0ICm2jMJw{k$sxvYBs@gL({UFSPe*1|Zn27^>l&*BB1Bvc zu}Y-m9;-x3GLgB(DzbKoD=CL4!w?{%@}%M(tJt~7MC9s#jAOeM3(Um4Viixv#{n6Z z0>&x|+quUojs&qNhnUCHaTgU&M`l%Sv5KSX8mj~%L|hKBN~Gi-t3*mNk-5bxvWV{- zs~7@AR1UFgmiPshgrO|j||4X89fZWV+Ft$u(-I80!UiVhK47-G5P-V|hJGE4il z22=_dDz-C&yC=jni+Vm3xm<`32b!Hsj2~`5rC`E|k(xN_h3_l6FvPr_mz|kJe23OxX*ixv(OVfSa09%utfX`O-l#i$>rQXd V-|F@!lkudTFNwbIdEVac+1c5DS1aw6gqF+0WP&lYSoKct>NOms(MU+hMh~>B6?!(kGrg;p zdb@}2o|Tq7j1vb4u@g?5R^2@A_}OF8kc_XFk6N-U?T3jfaDc^WAg3 z$+SH_HSJH%^=53pAIoC1m6X)I1dFTLBk+j`US~I?j;phSQS8stm4v7L>#!8XyIKqX z{Q94KhUFz{y`aR)0yI{4h2o?4bX|PhWXHz`8*q=mKJLqm?tW}@@g>}kNhmAk8G30+ zG5Y~lm(nnT{;jnD7N-6FX52WmtF;HB6DSZheH;JaB?upGgs|f2_NlmeeEU=q z!XtW52O%>UsZ5?oG4hZid@_jO&TbFm=3U#vB(&*<5FyfP2vM~R1`fa6fLpvhH*Vgt zxfz0Mq>>+V7(B z76hf!1E8qYsQ3jj6Ss#~~8D)bt!M}^(YDNaJV~%<`>-K7HT?zY8(C&59hP5|~ z7vqJyU;n_Zwx~g&T6l*=7&*}!JsvNl|DV`0U)YW%kg*2OMsTM=wB41e?fYwJh+ctq zeAp&L(?JvseA>LH|Jd-RZ{Sg=HZ^o~P%hgDzy#C6d+}9n;-CHCqn1s4FkXr_K0Gg$ zjr>O&@Wv1p$Mx=FD<96LiBC0KUJ_%wa@K|;O&75y;sfwZBaBVchCYmkpq4%iA=Fgm z5J==FZWS8#nLq-hjeU4i4kJpgx%aYMbw!`qC;VCiZVlkixWNx&MySOPLnxR%nuPd; z2wrI`M6S0Gnrj>~xHS#K&}qFTcK=@XB6cY7kp{GFc@vv`YoR0|Y*5>LcnL+`fEZMh zcAF-Uq$pv8M#Nfjfg-MnmybtqgV_qQev@4#;#z+PR0_!_r5U98|1EQ0l@G_cDCdou zo>S(D9vq8)*b*y!asnHX~%4SDf2=;*4$(1DRhse6jFvgA|Y_b+WTZ3T&vI zwLuE32%*kQ(c=?ox*-T;aIirN$cJ?Tv_}eTsBxxjZgOo%rbvOIcn>5}8>GNu#+FEd z1*LYn8Ni4XSP??@I3FR}AO&Q`TpJRo&nL7|JE33auMS06Aq8Z7u_ZAUbL2rYQb0bb zacgaV;(`>AF)MElz#yCrQb2|fmn^L%7bXmt`~+-}f=<@ea|-fC3NX#NAq99i&PC^q z6yzM2JlP2;$W6&w))Poe{TvSC0@;TA95f8p}SjQh& zAO)SafD{-tywbMA3b^=G#2VWMl?MjAu!4>QRzT!p+%eH*Y@pzB9#}yqQ$)U}z=#!4 zAvo-Bu-R2`bRJj%8Dy?l0U4A_PwHI-T+~-JH7+#;nTNayeQyUH@K6X50TtP=wX40xQnQYXYIH%G{gTObcz`DdMb` zh!xIhg%w!Ml)Nrr+*M#hjWgviqO2V&AbeTvDj*-$__HNeU_r?aE3hI|H&#F<0XA3x z`LMbXG2t44fCVxUJ7B>ID6Tt(k zT?M&Hs$vEBF*8<>ldm5u$Ymh+LyZosb`|7Y2p(u`xAQr9VFfu29S1g8!O_ZC0d7$f z_OS#0tnjI$pR2t1?z#Gr~^pDD_KWDwQv1S_n75YY+HeqVtNHO`dHSb+`66f2-bh)bJ81~Ih~ zR#*WcrV*(nR$xKN4lA%C6wDq?U7-zDKxWJ}B9eB(OWR^Y(ei%9!3rxNyj8{u$b)9A zfP7M8gTb)^GG>kY3dj(Z_Z5%_=M*Mv_7xm6U@$!2K@BI5eZHXU*<*EP?7`-XaKn$QWDeE_-r^KBMrYo)o6XArwW>0*t89+8j z1@yfGzLe4T91(l;A^fLCK(nXlLO@SKuPy|n#`bmoo~hcH@YRq)rHDgKCLd6|9QNQZ zUV|OO?=|4{p~3)0Ap%+&dc`3iHDtF2X^_wo`G*$xiuMI~Npj`d9)HD`Iv5ESAsm}* zl1{nBf=@NzST*wznjy_{nM6iOFk4kLGZrFH9;@iBGx&T2oodn)x~6K<3~25q&w}5K z;I)>l=?_x4i59WPpc5adQpBoGQ;4b^r5L5_ewx>+IlE2APk zmtul|YDDRw%eEGnsY&qWDpE=O?4dATU#V2x0+Fdkhyv+3e@Cj=x8S%{1Q(8TjrqYh z?DJREw<>Vco!A;$-VUb{v)#|1si?q8RBu#JNv7HQ8cX%9i>Ec(2HpHXhRS9_XNgvh zeGYMq;8%qcY>|WpqxRW;Z`7T>Wji}Yxj&hVC+*|-r`ranPxmMN(MCT$So&2`QD6yo zu>`*1!Cs#P=YzLL{E^&B9(q%BZN{v7YBKI^ZuF+J^mys^Q~kl2$7l3F-g0leYuDRu z<~u=EgYqun6$Gb=@OMWrxuMxQR62Zr5f)a{6Rr=ELl&0S7t@pY<6ZF12ySQX*KIPB z-snkkR&h)Q*}J|64YF%B5)QCb$psyg>@k(>0*omU%klIk6aGA2C1)iA)tpY#D17Pw zERE5fDfvAr&GdXz?y3OgcvAbyr>_@egEg?Ga=HCcxZho+$lhmz4Rn!7Z`hl3Hy-bA zpj%D0H#Yj{Vv>cs$MR(4>O6TBrzqvftn8ZVt{)vvZ!&`%EVWn@?w#myk50bV_uo0e3Q( z(f~%=XS-XI{uCW#-+moRr^EL7Icln!{~+id^=EbtQQ|A!ov~NdFZkunDq>XWhQ{=? zkA97n1F_6xFX4BGgD3lS>q!>E_~W6K>Vw~pfV)o0N^s**f0}XD&Jt?f{FUnyHneLh z8~Cq&+|4B(%Cu)&Gkzz6)yjrRwHGhkJ{S%YZ~c2w$Ye}it#ui|V6@qPnifzh?}Q&~ z!1a}RKi(hqH_&nQ_^V|l$a*!7O+L{e@z7WyCsnolwg&VoONI6{-MpV^ON&wR$80NZ z-A?b`yZqk>HtQAvQ;^T6P*Ui-C26bRUp49zK)`;gHo>W3C{3UC7ZGgKWi{n4t~TDw z_F&l^q9y@dR#My&YE=kW1`cEMSeH29$D43p0Iq1tBn6KbkWd%5$;_^iX7ic92Hjwh zKWYS4%sgtf9TKolh4^t{+k>N9QwPXv#u}U|2}{#hS6c40i{NiHVBJ54;L9&vD^`Br zg0B*bGSXTI=;Z9`{ro(9Qn81Sz(jnfsHqI#+ro;QjgT>}6u>$hIuUH(s=Ck$m4bgC z!K_9sN1R_2v;DIS}SV>5#j@1q+w8zo!Kr5Itw7s`>98i{j zZ1JUEwzaUFn|M$DO~S2f+Bl;nVo1eSL_viUrHcS|nx7LO>!XMd**ZwZ3jtxPE1{CW zl&(T>bNKCu-w>uW1X`hx5mITtHQ=CNFYI8oM=ubfaDMApHoNvb9}Bt{Js9-yk&-`#*mLP~13y@hu9 zotccc(FTwHR=(FMIW7TOebtfNUjxzZ3jAVIwi@IN=MXdVOf3pKET89VEjPoeE?s-$Ly$l9zM{7)8P%ctS#jH zg}!PDF3G$!c7|%~QFu6huqKUlSCl&YhY{T4tus`4;S|emsqv_W{!zr=E$(Eu{G=%U z=P|7>G@u(!R_kOl_8oaKdCTO?G`>>$>m;;}67Fc~Ao6W>N}IG_1}|N`B;L35m*l@l z_>qLJCZOaOa7uZbSM+<5O%?7@U6nLGhu0I!&49UTMS8R?@5H!aRS!ojsd?2v@Wic5?ADmG_ ztErA^k47oGtnwobcqp7+#TFR9#+BUX-)GaVfn@g+;b|gZg8%vSGq2VJTFlQ>j zOtxjGE%a->75ft*RFB?r=aw6=>g~38m(DOKm=I3C&{+rs$vr$D7t9I;otCtFu5X2c zj*Io_?-D#gR5#{%sp+y*0dEh1IMFGgZ}3|cf#ZqgMq-~MIetB;AW)FO>k|Gb5-Z#Q zYZ8>4)DN@}5wWyLb#l?62tF2~jG%MOE1sf^2q;dZq5#>1P8DYp+mum=lIsf61w((B zjLs?j2EAEAr&**|~TB($IdK)h6QQs-nLUd3+qLy^{l z!+^YWmbrT(fzoC@pinba1ps;JbTbz%5@uw%bQlEWrIQdb!Ehii9fI`00YF}nCIWds z90KH}bXDMCP>`37MkEseIHWX1g%%;M7Lb>YoAY9Q`dFB4hje(A;!rRswz2s`J9Io6 zYeJyDl&V^JwV=LqT*{3SqeqALJ?ak~iKl!@I!x|0p;3SK)Odmp65JSX_1oMz$N8zK zg?U%$EI$wh*ko3!P8e>G+2vq~>=054-Bp?M-)_R$x-=lo@2V!WUONcTQ|J}r(w7n| zYr;Dl;J+fLm>@nole|K`;Xe-bZ&h@Zznrng!nEp>;XdDhhwC!Pv!rwP7lKyUiT`Sj zS;*Agt(%O0Q=@3Rwt?W4EWG+TpU4EZw)R(Q0X{Ie3QhQRT`PWB0z3|1>5=r2T z;7@BbNeWNZmRd5UCG*XnRCa27O5YuSEyPxMMfl4rzM@mE&6ZPOt2Vucfw}%?jp2ep zcxsaZw(fCV$yiz)20(wl*#Q4-h8ZjVL&JeZ(v0{Hq z&GD@qz2Q%q_F2}JB^7YRfckScu%u@#+iGkUr$`hwd_m`oPc`9*dc37Tx(L^h&ozbVtHOiRRex1s z#x-=x!9KJ(bu`@?Py1cPoqrMhTEl+VwTAJsT#-vG=6C$PLaiKh<_el!eg)#(G*Cn^ ziyLlvAqsaXo=Wdbf}gB_g;@$wk(XtcjRkOmR-2Ex{_?lQJBfQ zlDpzUh{?XSt;C8fw?0k_^vB?l_@PP#y5-#LT?cZqGdpI2BY zlwgTQuEC#GICxMlA+8c&3OK(1w0_e^K1eEZUJa^Can)_*8Wj)x1LxM8*agS6=nQ#l z`G8}pk&eMgHhH5b;j%Q-{do555fcS z{T>01h_0*;`&9LjrtBiI;ELnYl=Z=q7IJ3odQJ))Pz843(fHv23gj$@;Yicyt)*v_ zc=|xO45gjOJe)Z>%+^S9U9*?C*VWnm>k7xy%4b;?WR<4wXghV_WF8_nR6LSiSk}hT zSO)K^a8!v|bc#Iao36^*sc^8XdDc1k75GqvJ?v((^`}6ITB=C|hElL=faa*htW$WF!OhWIS1w*Mv)f zK%zt`iq1QSYPgJG0aO-6Jc%+S~S9PXnLEG)nJ)4|a z@16{YT=$+V57}Nl8PA%^ZVG3xbsz7ZY-JujnM?x(uRG9_30y03k>5I*Fa}fUdG=&7 zX?pi$!faI*8Y&^lc<}7Wq#Q6AiH5DviAKiD{CYAe+jaJ2!hoz@`}Jf3VbLjzfDyVU z69#5DsB(Mh*r6Gq zBeN7Z&wTbaSz*Xcf5u9MNjs5Gd*=sPCy_ogQ19q}6_;l#R{%*tm3O?C%@?r~H zgJ}492D;zpRWGGXlY6|#mfu%`UuxPPB4;U!awKCCP+&Vq9`X{1R+EK|_2`R755WEL zao-n@B(cOk_q@;Vg+Jc3RAGbz18a#6^HJOxs<=nt;rKys#c}px+jD)~GxL-y6}mIg z`nW)6*X6H7myz+=mDuFF3*{Lg@AZ4_K+?1_z1CH1_8Xs?_9y3hv%z>|e*=t6ES#yR z{HzEu>Q1VOt|_#9KMa1j0dHctshZ1KQ&S{1zYN3?~EP z3zgH@v2uw#=#|r0f}##>(@e84%+R@yogV%ke!=hkJf$n*&+EI~XgM=0NsTJ2 z3j>xiqG_YDI0WyH;OcUOIn|%_R?wlNPxUA5!DuiW^oHGYz2P>x^Yqs7XgX`%L=*8b z=)^}#iI|U|+)X8?2ED1|qJ`6^Pv2~J(Za7r@DlBmV>sEH40@-A{gWNNClt>!bxgHY-c2?}SOs9i0qj(X2 zP4pb|S-nh*(_!lhIDnKn*zC>v-JBH!qpI#rMzh~WCuSiPKRFnk;SpC)Gk{-;U|Flm z_N0$a&@8Sj(u+aK1NAxI39H?uqjQ&iuzPcM9$gvSIHDG`=iDSd_0%&zcr&QoC7&|q zE@-CS)*amEH10ygZs#ufr&aDki1F@HqXSfPMUA_VncKNb{^|PNBy0nFO;3w^(iE!u1Sdjif1d?00St%Hl+FdED zl>`lsdF$flJlBY>j&XMTnLxgu%BPaJwhNV&L!4+Z-=i(u;yVzVmR=TZ~&2Ln1|0fH6GNK zbTx#t3PXf}KXht07{0hbVC$L^1gqsN24lp4KXPg^7$(I~;86B5)Lfbj=J9eh8o>60 zgoAH5wHGXFLVe&T=Kx~Sg#@%OMzXxhhC@lohXR2Xjc8|JS+_Us&d|LA;|XarUPEhz z6==tYZE6KN6c0HxvwESiytIbkhV&m>Gc+}ugB$RLNOseLFUQ#eKoCB!Bz4jLbwG8| zu9l=GQlQfCqfLi4k&0MV=`Gwo7!GCY#J)lk%p{Q^Xo@FsnRd29(dJau3 zPe7Nfyi_7jSZbgIt4LDAqH8;=Bai}$V9Si}6=JDbY>M+CQc zbHyby3E<*oPT}(#e5V1g_hwp5XZ~VL>hO=sIISuf6&;I@I^nyK13F#`142^smwg!S zOvw?Rb%AOS_p}c!r!>-?aDiRZ8@eo!<}+xiQ62cI3s}Q)#sv{;W+c(ZVcm=HSBf(;`V+;supasg~O=7b>OCut4%z?@|7 z!7lpJ{G#tm_3i}{9KPn*Fw?LntN|PSl<#*9){F28cE6%WfP`;3w#f{mVt{aJ z*cf_lZ8AfkY*muwmJlvqJ@%w+qEmy+vL}QIUwH?SgvMgicOD(EU3%J^eBC!>V@!tH zVQiO7LxF$d#)||pD(1Y?tB8R1tgdSBeTE zWNXi+8@9`^DTWOP#F3%qg6%SoR}2>(Dv;N|I)ie;W20RdXBxvMq9eA;$w%D8?67J+h}-LPGHyJG#=E*WZvv0X9^1^(%e?UDg@0NW))D|mCq zb`69}EwRJCUDFV_AvSf6URwys6v_>=?xE*ID}Ad=IzG&}HB9e;O1%k<^U5YBs z*pTDCU4~6DY&al}3^f;Qmw8+gE-Hht!C@ObKgWH$EOSDT@RM{AAUq6FcWl={ zxYQCmV7sOv0yM;A2#1E)?MwzTV5Sc)RdHpxp*@gf6elQ#%*GA~cEoncAlL-`v0aXy zOVZ*9NRpQB{W6!PkBm20Y?lm*rS6})IC@2m>m?6^l{;d)+)QP)t{9t0?d!$M&SJY} zo;hsSi_`h<+ck3Mj_n#ri^k%D?V1PK*;M9+?Fw9R$;`{ZN}ZQ%-z{G>86t@!UpnsF z6_^%#Fb8~5py-0_3M3>cO%B#rNKH88GECriJd;6WQ%Gw5vNtV)1mfNe+da19*e;$y zcx;#9j0+-MMXW)@B;M_WUFZCj!jA|7J7T*Gn_}2-KpYurF4!*fxOKQNs6ZJiTtGNH zKSykrWljhZev&Q%fICk**vApuW%!lVyQIGLnDfqIyBu%Njqu=hCIiQ)7$6+5Mvk6a zo283dkyOP%>)dG#mJ!KLqScq85&U_@b2lXODMN0O#bbZ2OXY*4dx@%aFA=<>2|o}- z*#h`h=_C`CM3b8AFsC@^Nc|Mr{Xz+YL2-oCVaS%wL{*bOqZW3UE85%U#x8^-Cd+6o785H zPPlzkZ~TIHHNxZsI40qN5%ny+hq@ zhz)qDf{XsvbTAx`G=qHl>W}t`KQoD;b}@&eTKa$J$ZSVPT9f}t#NAq z+DQy1k(?^e;LN1AIp~jO(>D7znJVw<&F6zr8UY%_ievceB6MM;ADa zHqLif%7JoM5kVY~@mv?J81|>=*Qh_8p^Mqi#f?|)(kxGBm!`w}cEcN3uCVShnOmAK z<47<&-HCSNdq;b7`fE-7{HFWs)n;J+qbEEn{!C>qrl`Y;GFumM-5o-!&;SQFTeN zCh-*b12IS7Vj%9fvu38-*{MKG^dU84{nF%GfIv)+ zbdwpfF%aX2T>>!~)-ez>k3X+W*eeis+B+49u{xv{TyG#Qj=+%FudyN~kGlk7@~m?p z<_J8mtk|m}K583?Wi6Q@UP{`s0>E&oqKOUz1Y+|3wak!>ffzsR5{SvLj)9nY{CQ=< zUV-@Njs;?@jvWZZ#rrBU`!xn)^0-SNCeJztVvfM`%8I=L@v-MpASOBt5Qxdp)BqVuxjnE|z`Os4FQ(;A^|#{xTM0x2nNyg3dW&*j%^UA=VU=?7$Fv1zQEutZ72G!7mWWzE8MSJd6 zgZJ!(q|AC|+h@0jaTBeyTP@(~cMF_&OfiOW`I{3k??1`OFjPt8CpA=Zmor_w*F>1+ zJ)Y!B^YbJ~7nUf)Z}giXA{cy*1T+mcM*>|JTYwT@ITAb}*c=H$6L5~C)DAXB(!3ON zEKn)WVAN9Wl>24`FHr;#VHdgNW6Tz*L=);H6>d1jNeJ-F61+gpa=dW&_@1rgrK(CQm&42Se@89V#h`pK zD37w`fwr+ZBD+C^hQl{;Tr8Ed2+?q!zW{ch>kYU2@g?i~miENE z)_0#BZ}x}ms}XyYwee>c;7~^LIHG&9)1LK5)A6J`>YeRR$*UL4n_qi0#~kf%y<>q) z|d_}PpwyjY}a4T22>vUD%$CF(_%*6ff_%TH^1w~RX)WxD$_maNTtQ@#;`Y?#+Spv_|oi9rtE|G09jQD{j~*n zQTDO>(Cb9wi6VY4UxN01o)jC+z>(F)p{U9~Z9w^Ql&3BM zn5>j`1ksQ%ipe>@;a63Ul6U+f_@cnrQOcO9^2m$r5^G1bs&Lq?k`Gkb zF7Ru&|5;%0m^Fi>v=Vi9OfeI+2gxK~7=VLF@~Oce3d~(gnX|74MfR@M>d$4*G0D#g zx#>N74MYuIN0~I4C@Ez{7O(RYe*W@65dRZ3cs*s#s6kqPi|kz=T!Un&KpMP(GHKLc z-a3dZ-k@dC#iQ^*3I;W$DDN0-vOU6=gZBQ(H zJ_2lM6WnQbd&6PAW|6nFSiQ*s+~5*rCztX+9}$1CB0SL%em*Mvd`$THTH)vGgrBb$ ze!fBY`9^s6<*+aq4Q4qzN4X^y9)*$YCYnwMXGY1jwhxhIU;NmsHIEhGWqu@8a*grR zJayye^ICw=KVqVfK3{bNvJVIIYaW553o<7&4>XXZhTm=!a(yWQu;P%CD4 zYbAQD_c4=toez;>E-qvkrij4raF`84#%n(4VZ3a}NXKD5ESX|HZiE=th=lH}E+I4v zhEOpl2c6|j1YV8Joy3JT5=4=xi}|<_Vp#K$Q4yxOIl0J)#BWbKV#xT(sK^h@j9l!9 zBUH@EMMfk8F*9;eKSQRNj~k&tc2uNF){d4NOE0?l@BG|*KKaVVuJpg3|Lb4azjyuV zJD#}q7oNT4`+wzkmf(l4jK6pVtU?o=h!rnF^z?FcBK>+EeNE8Ux1z7hF9P(-g>>m= zHCwqkaCy{NSlr#*yA{-W$m$9Rz z+@~1?bH9iOMn70^e;D?mN%Xp>$CIjq)LBE-#zsbHyf3tigm4DJW(p+7Hg-)Zn7jA&v+0tNgDG<@^&Q>Xx(ZSq!H^?I1 z%CUS2;J;v3fOXjYy0>iih9$SzjojuCEPoFjaP@yliDXg&1F^y{hg3FNQHMmD!M(&tknN7egi zx-~{8IQFRZ@8bV(iuuC}3SsVpy`4R&Fq=y;ycW_u*d^xlk?UXEUH1I@epXUOAe zz)J%kng;RJ)%^gSrED}5?EqO#y1fu?g?;x9dmH`7$HUG3q-;C9sVrXsyRo0+FUdl> z%!S zg2m3lExa-#k6 z;)(39Dsw?`F#iPhD9&=nP^?2D!7Of}NIH1>&>oa5_On~7&~L6lh>EZ+=FpzxAnd`P zn|AVT2PmeK4G1tCOlPl5-RxDbjE+R9G=`AxZgCk6#lmW%-Hb--5BnRl_!8KQHa%<( zW`ptQ5TqJNT_pY5PwhbEYWKqK<0;deUm^p|!~aqmnN$AQXz^m8zm`b+4mfxm#j{C& z^R~h1)BOoLzO}D343&qZPKBz80AkWpX~Ryx@jb3LWj8sI0P9riA)Zozw}0r6JBV5D3Zx4z*1*1 z_d7 zDZt6zWY9Y`L|a@?jLGU3oYm5L=>Q+6gp7y6Vrr_h-e8pGle}1H#JiSXh}PO4Pc0|g z?#TWLc~)z2<(iewHAgxxJcqwIJlW_RK6>~}@4C$!PM_}ekK7o4=JR{tt#B0!zl={! z`;&9MROvArY}vgQQS19k8wpUMw|X$%xAd3fzX@z)1mA@K!M!d<}PrthqB;A4t<2`Agm&;r1f z7%IZZ@>$bEQ0d^fkV6FF!;KJDJVp68O~OeCkLW!egv?+>q!sx{OSOgblR*TBQY?O} zGqmZ35FyfP5D^*-9Dcb0++p2Lo)xE%QV6b*N`jC<_Ge%`gmB?=5tx?a^VG&&3RJom zOt=*lhaN7Jo4Ao&ALOvepHCLv*8t77Lau1Q2)*_0gnR`4fOsdH3!%Wu>-tzTrDLii z_;(R#cAIh%6K(;sG@!ZI^T#5%&XmTp1uV8yd@H(=gTD17xrl2mzv9?=<0a<^nw=)_ zA0qxDA(>sJ)90MgmU_C6g>8ILFVu2osT2VSjuJ1frf+|(!A~{J29>Eoa6r5Lv5s0U ze9oEkAiMZJ8o{fq>{ReIE8zrSQfH+9(IRIMU=@E&RK2q9y{b~kDcC}W%p$VsWjdDa zWVW%kc=l9!5f31SzR#qLd?_I&YZ}Hkj`E`=6MHom{9P%mbl`OjYaZbp7GWeqiZZ+? zv1PuPpslpw*@(YkTN2RFnqYUr#u)3)%> ze(=#}T0Ya{dqq_@5{#FkjW2iTh?kq@z{^J)@J53(Tj@pH%5%5Id)YMcsiwDRXjjhb zTwQl(7u~S<06fzOW7D*u591-Ir4K_0HB|~1AZm!b)Bv9eBtY8Ohd1RgqD-56lV#BZ zXMF+~{8|HU4dBnX!4G3bsKpOMNZaIN_Y5P%FGOJ6mQFGo*ISgze4)|DlVlQdWHbv& z7vg0gA8CN;^2Z|J3%!Q5P?8WfSh-op(k(aZfua>k`zDa2ketwnSW7O+Z15HR8xX?vP{3EJ9=skC=d0)&dGHeNlqHXBBT4oWKNS6hbH_85Aj? z`_EbQw)#T97XU!!z=lRG3Sos55UOnrXZ0WjWMq~f<-&V7UTQ%Ks_|lj6cCC!z4##o zHX~G8_Aj;d#2MWn1~R|YJ}AUsg%pq{b+WTZ3T&vIwLuE32(iomF5L+kM1?n4Aq9kp zPJs4Efekgzl+8`94M}brfR`u@c!L#EU@>D$q`-nwJKYRmL<+13)r}O8Nq`MfKt9a1 zAv=KqkcqTPjf@PY02`!$kQ7@IV=+e_G$RG%lNz_yb{k$!BwxKWxt0x5K-m)d6l=+m zJmrSLlHr3O1^KI&Y%#`d65xzo*EMQ(6Sy>ucwS(G6yT4lT~s$~2}I{U+p=?tD1#MJ zK;DMUjufy*3h-lQq#!3>(zw9oTNNPuW(SV35bfQk-mumXCEb+MbkB1qa|1&j@#ebW3ZlNC1-6l~dG z1)WS0G2T;P#0sbothY53v$5G#K;Oe=U3$(~0ht5aTl-@LgsP@~XxD=kkdaw_{ICK- zQOip$SOFQ?ju#uOfKb%&!Zld2@PdGGSAoq4JBbyL`BldX$dfwR*<%GZ)U2=qD?+|l z0U5*#D=B?9BF0?L-qsTj*(@?apQ4eVL%D#(u@V#`bpD+mk$;NkeHMb~uMUOYRNt-wJT0)n3XbM`?Cr4v+@hNI6_7B4u>!gWFDz(ag%uoi*jGU0Vtl)T+umXA>8SBz>#tO(B*g)%#6%eYL`k^MQfQ-!Yb4MkSvHxS5> zI@#G{1vb>IumUSWzP%Y4#0x7RM05hQ#|mtyai(m>3T#MvV+CZ8AXot*rV*(nR$xKN z4lA%C6wDqCPHwfYfXtX{L|j(82_d^)zgn~^uZ>#mD$;<87s)imo#1&r3+S&Gc5N*jfQ1DX!8a_&V}HCMyw#8ChaUeSk*9P zg%upDj1}M()r=L8FthXf3XVBo1w<}ptbnnB!g%L_6&$OC6;L5KU;+{QE!s z%mv3)0V0&~0W{&8{3p!kWCc0_=#;pV!F0v-U?Q9l*z9@s^p#V@0sthkG{cCPA`IO86+xrT;TzpqD{~FJ5s^cvW7mUcgpBGQMSIuQhlYf7;J-X{*pyyGbxPJ4-tvrSA`R7 zkrW1__St@K)ScoBPrCidWISn?uRkf@h>}zkSi)T_fp2&yShGCKA%eF@{E^&B9#9_W zPtc`Y@Ffp-aqG&)U$zdM4d`kJjnr3*dK zjYX^J!MX>@eOycHi%aOx<#-poGlJV$`*oWPLYbZ*{3qg||AnyMZU z&@ss#Q^_tGn-Z}cPcQaKxbt|GoRw_0Fs=j4bwB9JGfe<(r01J*R|P1?liF84eZ3$X zc7l8q?sr$|bcQaB+2~F;dc)qNyYYB`9wA#ll+9|oYi`;AAXjOdJQw~FgR#>5Rh(j=H%S!Xv zeSNvZ)}|FMD5=Ys5O7RbuyXq|l^-xC(tJ0(%M>rdMjk zX{10Eb8(YadY&`7)POq~Olbh4?Xz8URofKZ%-GIuM{J+3TT`-ZhH!mGflBl|?a%BS zqQqB>FMq7+7yR;O6)_q$2&j*Kjg?&cB?WnN}mGkyo%30T=MsrKURiqyng|6UX_85b^i z8F%k$8Zs4O6@IJ%*H`8}zZx>XBGR%FWW72Iy@^jWNIW!F$VnxYw>6+&St_)r>E``R zTUu+8KW1BT>vno3sA)W0*1P=Q2sY~$0aK9IRwy}lO#iA;p8x{(Q?&_>?%}D@r~O3) z8+BPtxr?ifcg}4TL84W*tVViuCAu|wIM_JfMfc24+g62uW#G^>X}Ec;OC0dyO}H-r zS2Sgkg2xL;sEgZVX4gow`OIH~I`pH+SAr|TA2k9iW*)WL4hfj?X=TGDt?%DmDcdd-)g|Re++NgsrZGN&=&zR_OwMJK{HlDGh;ED5ONsLRMX+wgh+J zGmUb!6@I5WM{~)eJ~$*=#o$)&&05OqNN%&A`6;a1r0P>RRZ>{M9%KBfurW(HLjhN` zOGOwf0HBJq2rAdvg)geY9iL5%uLPL`$RKG#B$gv!veLfJZ_~YPP+FcKMx|jJLPCoBgePuTyed0<^lD0#-+I ze+@*tEAWd=*=mscY_Ui-#-rI}JWPesAE67*HqJARyit)qmy@H&WmNf>IjI;Gv(FN2q^UJ~zH`b+X(B>V^{`30O(-sTnko@7&ndsJ6t zk!qbHZRu5&({D83u5jjtdRdGdhZl2Fw^tC;QuZ`j*cyH@ux;TzLVO?w^U9=>Mu9F2#&huJPRnj)!xNJX)X>LGhu0I!&49-UYyT|=8N5GeaHMA<%qLIOAm0u(4`r!`@=3@tqF9Rz=qyMP zNiUpdON*)16{D>5`Q!tE{V$8jIQrm>5?W0)R(ny)F01@V10D*eSFr`ocjDdW-)GaV zfn@g+;b|gZVE&eOk zWAqmPLflwtEH9<+<7P)ba(=DbZ23$GI3bl8JJrI8gBwp$mtZvqszpe+%mFN%L&HZx zV9r#4$J28W;@#O7``4t4U?c4A*S!(|NcDGzvr@*h!#E*pPyuixUc|QNyhdg3rox6# zgit+t%bi~;vMK@j3Bv(=i`D|p`g=}cF*;#P|$I)KK)&SCy44shlL4Zx=na{ z2*im_34Md#st6oUEH@GtM{?M{ML`4+!LJ7u1PU^EUBVwlVuc%EO@fk>`hgZAqQX+0 zTy!XckA)~B=p6Hkrzj%=iW8|QKsKRM&D^_?5LX*mCz*7?&>tqFb4tHKZ&si$mT$9^ z@<%ViWtJ>=~f`37MkEseIHWX1g%;*j3&=~y&3UoUEkRIUI=o78C>Rvm*!-a#Iv$NRAy8jR zRjs^QP+vMO51YnV+`}PM_{i(3a4?q{HOib{X|&PmL$&Ai<6CR=>?1 zOq-vIszVYA(-+_e_&^k3lUb=cVYoqNmxCoSD>d;DQej>?X5eo(;VetbX+N1_&eHs@ zYC`L^g8)5+f=MCYx^1C*X9N6Kq^Gb>X6;w=LQ48Hyhx;4N-63EEZIo_%Sl%zdv8Wicq(T zn*nV@IsEUazIu)BK(Ak+G$mOlF+ls6AFspF+a80+mzqvN*d0v1&Zo*c* zBGc}qtj(En{*pm$5)=)hu-6IQx%W7njyJ>X+*1UNoKJfn`EN~_%v<3cI9FQ%k~+*z zsCz`!y$Pq{H`l6e#G}2cf1*MuZ${uS>XpNBcc6`7@jm8l?>9f%eCTWXGP<>(E6(zMUAwk)ZDD+bh`yMZM=YuQ#~vp7Yfu;B|j zXMCy&Pt@ZrW%F{&6}AdsU1y%^^~n(qylEB~BKTZGcK=hl-4UO{#A;eIHokQZX9eM+ zKI{WlITl$#2wcOu&F}j$?Hr=kluQlv&re?e4+`1 zIjNFMsMc>URN}k4QUR&3q%3-EQ7Sz_)%j+4EPkXub)r36rrW3gafSDjc*fCDS<`;^ zSUbJ37ALv%yR5HTBHtyp;eK9W!P>y7w&2ey96TtO5LXE>1svahTEFQdAEc}Xv}V3< zvp4H?Pmd>Od$V{E^>eR(;8sm(wN`Nb51dJg%TU^h%)^6Vr0Lz0 z3A0sMXsDh{#)D^1Cgorr8A-;AeMa)_$)s!>Qj%9sCJe~hwO>yr5Eh+M*B=$ZBy_(f zOp4*wyC*x^E>|*PdNN_sDrYwLWJlZ2g`P|%$SOTqCqPeTDe}TlOMR_rv2Bg-*|Nnp z@1Beoo#=9T9`x(U_#v>#d-h~9lIXo+-=2&otMZy~DX{GRo`t=P0VV_57L|pOhHl>Y z31a@edorG=N^RkKGJd>Vi7WN0j>2J~doDRMrXJ9{C&MAvy(h~jv(v$I`6J0JyPsR@sq$iV+4A7JD zWK~`hE(Hef$z%*L8PK+plfB%qLo+}}W-0J;XS%aH7kR;n z4i}^&T-MPq*u2NgL zRWg1&(xl8ub)h>hIWwjX5PrPEwK$SpgIT~;_G5P8+N&Gm*;d(1QE*syV}BCC2{zl< zzy6xN-D25`S>HThC;Ng=hfeJ8Htet9lT9Ztwy-rQGSK}#uX-tEn%v_>R`MFsFE#BC zk+YOVxsYcPP+&Vq9`X{1R+EK|_2`R755WELao-n@B(cOk_q@;Vg+Jc3RAGcmJ+(xK z`6%uTRotWSaQvXR;y8P;?YTbgnR&`Z7u}g?eO#ck>+)Bk%gC_161hd2>?Nn~@AZ4_K+?1_z1CH1_8Xs?_9y3hv%z>|e*=t6ES#yR{7fSq-EvnGo%bQ#g80J? zcoWM_)m+Y+nj*3JWgvC{{JkHrDAg?di>B)_WVobZXQ@(MsGP=*l}qG7ubjpb6m?kG zTj-R=j*7pxO=(T|1;6+AI2%x@i_2&}ukUiBdp58hhO=pse5>B5!eRJz3nxBtBCq7cj&wOCz zE-*PY=uPb|TKLrnUZS0H3@3Y&LGRSCf3nlM6!xYCY;Q2?PvS|BqD13N4BTc*>&nNhrmzb1N)`K(?h z#_2HSXB!==g0%DGV00!si;5bZCu6NE-~cj|0Xh++-_6C0j=EA^-t|inENfNSp7hZP zn#GkxdNCB?(3cI>gw^iSspc+EJ@bP*?k@S1pVRI_#BS#<`KMLxLWn*0+=a~C&Rz0P z*Y7T0l3u;cdOV*IaJiOuAh&Gx&bIHw*=O)pl!?&4@swmB?q>BUUP2`_&YuhDa`j!U z8)+tL!{PX9LndO!<&%&hTf;XacrD8hj=k(}3>L)ze6|wB}46-jMkkxg9s$IaI;b{D7Cv%R_g^8H2ye(n^}%3>lJlBY>n&f*Rm4VU5BNb8@}Cu+dUZ; z1Bb5wWmng-Q*^MhBAiAVw)keADRiDJa249ZgH7pNyA%k+og^N;M za4D$^7FRJ>Tuq>vmMm_#$^yRBaLIO*MKM75h${(eL51DB3P;gb8&o57`SA4!);x?z z3W@^YA&R#)I0Du7=prFz|;??FPda7YKI3%}d1r`b7@N<ws#aT`gP>`AnO~UsOQsk2W3JL@F9prMGbVU^tYm6Z;BHFq1@b zz`XAecwvSD`Ii9Yx+;a$n3>oRuT78e`;lyGr$5hJ5@V9Ja;Z|X;*IjNXM+B*CLCvZ zv1;mhTb5}w#%XLG#*#c(!~EMjo67!21h;r|#U(RY`e1!Px9&2Z3!-wueyLWEN5I039~kwlQ*LXz*2L7YuLxj)>PQJ zyDbEPeBfIyfDOl-5G4F0tpPTelkDAvmI4^n*MeDo(f6f#mjs7eeBRz`zUJ64)37J3 z0b9qqImZ$BmSdaDFe(NJr-qH8=hh}O1j<&Wtts4HpnBW@)FwJL$Siw8nDCW%kO-e4KW$iROdTlyM%nE53Z86f>(J{v49)JE&47fBbhU}VY}pUY=Xj0 zM~5KF(Q`>!9D#~S%SV2Sv}MquBeu&k>7TkddPVB;l81?NH*A-isjSu&V-pGOHDJ4B zF09|`QfqF+ZEXqlA6El z=akmmu-(pujP2sdIbypEXX+rLX>?@)#06Zy^C*Xp(pEe-Y?onEgl-%VM~0dUw#z&& z2^VrlH>g0lv%y^#j@T~CoDd}ZBwYjucb;_kup_q1@GGnRNPQa^+r^`@m2ITu%?;b- zc;mu@hYCu)w&>#4TuEY&Beu&iDh3FrHkrXL-P&Y^fFwxJn!-a7Ap(sfw#&;~2ot{Y z);U6mAOddKF1=lG$FW^96V}o6jjfbEjGupwTb z!rZW3GESy#DbICbyJTdRLv<2!!*4J?TwtLL&Ookd7V=@!A zw$L}mZrCmv2g4xq!*aFtTLC7fpspBHLZ#hV2SWi#?bF zz9>+1!FB}_l9YyEGUc{9FI~wSJcBwMv0af(A*uPxeokr44ck4o1K2JelXo=@I_HS( zGMsTigv(NzVI|(}gw^ESmcow+A~|Ba44Y!ua6lXxYA)C=^SC5j&Ji z3PH*d+hv&(f`p%>ivZyv7>x)Vv0a8=S?x#a+rZdvu4h{>1vhM$?_;xNoOh^54Rz$1ZawAM?AxY;%>LA~{ zP%Yc12+*}=m9DFk^T@>>NeAW2juyv8>r3y7q02wB=*jnK) zzB-VXHX2b&0vw=pcSiu15@JgL>1$f6D3}c4D?a@dH9lQ+b0NeOw1>gH@?^Mt4n>t8 z^68}*mc=S2h#Wn7DRye5Elk1uLb0o%Vr+R^kjP%HT!QEzoUAlbfr0FX`s{7$VZS#S zp_}8o6LiZvDL~2xZNalY`x+itP+jKJgBOI40qN5%nyQrNj zfmB>`m#(%gc1i7M^I0 zQ~TFWVlav1RCxwxCcVu;e>9u6*}utDc~@_C*Y>cRKG2?RZw>qHjqzwU84tUg{jEN_ z{b{suzPnO{3x_44T=1IK@>7$9yrIjvr&tyH7Nv=Hi%vSP3v%T4_BR$-*xoNc9vi%gM?0k`` z%#hV-a1B1W8}9Y8SX2a%)eM={zLF&_v0D1LL)4_tW(y06!}gP6?kH0{uLAETD|Z}* ztSuPH)iU(TtW6JGua#TfU4_}Z?1UmZgkSNk9T4B@@N23a@`oK_C><8rpqm#wKZ9`J z&xCEmu-8@_Y%$XEK@)W)J`f-f7pH1E1!D5JiwTlvodYpP;9?-|x3gxZ+p!JA)QDXo zC=tHYjH`$90D+hs=_WH|V<5&4y98n~tYaW%9)Dh$uvZ}NwC56tvFxQ5Joi9c9DyOT zUt>i~9(M`EzX>3*i7^xwQNId4iJdR``0oLV_liyzkgO%ZRqRe2)?izBp0$f7`hrpzM7S<#>`U+Nr25-9*%;UKyLE9GVmu@ z1=uf)a0Xr((eT|+C^Ket5RYU*{5^XiDYKs0_Sx-W+(awwRtvcL-2x{bQ;cC;{^kVC z`%iK*3{?{ONez|U>^uRM(rx&Duk2Wf!NVSOLbE1gLd9YIMCe6ZKow2rB)Q4x5;01D) z=w2jRX*$pBz9KMO;VyT?HBH*1k%B3>g z3oz}S>+{#ry#RKf>kYU2@g?i~miENE)_0#BZ}x}ms}Xxzif|5}U4TOw$>WIb$xeIL zA5F)T?x=URKP9hTykmik@E6VXKX0Nj7SM?y``4?WPV3c>*!7pQags;0iY|HGv__FP zmd4NJJ?*-2@s6~rW{S82HB%(sVy1}uMKeWGaF{9L?#@h+SR!vyYN%*G{55znYgN<7 zdt3c(Z?xH+4hI{BWihV^?a{mdy1#WbvB2f^mr?uMwJ!Z1e0>4-F{8(e*tQeKcM_%1 zC)$Z8Rct<~X7l`uH7c98e%;9A8@bIV);4b(8Eq^~Q;C@(v3WB^Z1ZM{#OBQuvCW$) zQkzd;adUQl3mNTYaGWxTK+xg_&rCaR9MOhf_)*lW35KQ!>7ub!3EcMS<@X{uz|SY%OMQTIIGukaCZ0rQ zG@+n%8yx74w`W`1vu-AuY21M$tBpfZm4EQ%C{JAiFj*<>2%;fjCTe-M?22wzT?a^( zj3Pj2>?N7*FpEr=Uol-?##_MZO1aZ#PLA+j`<0)+VOsq~rpmF~p`eCUiJORAcC=hN zzu_lmPOaKC_@cnrQOcO9^2n?55^F~l2BsFV;+0uO?GAff^69~U78pEc%^)eQMBN=z zFsKo;R@)BDCQT+vN?DP` z>xy9NBK-V)g38V&5o}3=*Hh+<8l?5N$lmpCf|g9Lkduqv$plLpyn!-l)L`B^h%DZq zove#e@jwd6XpcX(X5OKOYf)u_8Rt5q>@@{CrIK`C8%U>x7@L7k<7$`1wY7 z_vNrK7!77QJ4d-C79NF>>?WE{2WLjfwYCqDWncW*t2K`mC}(~oRdS8-(>!(K=kr>C z&_80Lk3L^@1hNkY^J^Y~1b#9nGY>fWlFyq*AmNNWVIBc{I{CbL1mb<%b4^e|pFWTfLTAC^opA2&h_YeYhKR+kW(1w*KqlY`Fk zCIYX<=1$_m8VRCE)Wv+<2r;br$fyX@+?-rwMB=xn9WiA5WK`q_W=1Y{#1SgyiBGTdo(ybo}67hazg~qL}2QrvS^L? z(E!P8&coc;eK_ak1`{t?zF;|dD>@W!nlXT`PHPz+M;41kjhHZ<@kc*PVx0X0IWw=b zoeg)mxI{I-VKz22+f+0b#+HAQaAh(!^6{LfRWnbXDk5pYMn&Yagc+lX=mWMYq7Se( z=cpo_f^AeplC@ntASHsWigM~KN7cNDWF;FFk-M992^hNs2ZqBA>NZ5q-c`Mf3rdAx9PA0>nl|q*Sp~QT}mT73CjiIVx22 zjR-Wch-;{6LOP}nt&#_bvMN56bNBt##$uV2s0}T0Y0?Nc`8-K=EXU7sN#6BTdM4+S zJMWmUjY`wvvM^QU@+)8|D+9aZtvG>0s?FES-@Y667FFU`?vB5F_r)ra?xW^qf3ZsJ z53{qfQTcwcN*oT5x3#IqflVP#>N1=BMm|eIrY16HhKcH_ydksAZ}b5}ptLFEFII_Y zEySoITJ+hfh&~WNMHj0?8x_%_&sIhBfdDGHSS9u+Ba~$F+Ua7Ih_>sNE52Y=BK(Z> zMMP$W4kapXuby%V&+w^y%3Qr&rZqro(SzO)!ItQqp4B6o>E$bf>gfqfhl=TuPwA&e zJ|#}?pEQ80)GirGi7N4vfji;Mg)axsoxk!YFD*&ZjV3K16*;z^=Q)x~zkvTaaqD-yU zDbuk8h^VSNAVev0U|0-D(Rw{A|@vX zWMhnv1G03&iHhRa?y-s^;UOxXj=QLMIx5;EEV@y#Zr-p$SqbeR9#~gM}mmSAy)Bpd>qget3K9%O7r7ZL3q&W z2Uvu|1m>vd5TS)3mP_tUL1reiv|n#PrGTMgJ2SYuLQJ!$=R=Xph4@gQ*~!HCp$1e6 zCY%_liKAZVPUlHdD#QXqkjh1OGQg~4V)Jrd%5Y|T?*7KmA)*UI%-ea{nMuU=S3mom GxBoxSeuAX{ diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860969.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860969.Akashs-MacBook-Pro-2.local deleted file mode 100644 index 7c59cdfdcadd549ead313ea8df07aae86ba79bb9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 267629 zcmeIb3zTF>eIGoxdv6z)> zZPeR6boZ>Z%9qbLaexpz;lw#Uew~vqb~v%)*g)Vwz(m0h9AZd}V}rmXfDMM653o&~ z93M;^e|2x&s#{gJUbm`l?@Hf_v&^gSzv}<~|9@4t>fjvy?-zdfhnDut&8EqvuvJc+7`&)PJ`REsqXP;a8%;$H*GjQehXwct0 z*E!oAPg*eE+-|0F2cfU_6U3;g4fv%spINQe;E7obOqsQ-#RQt@!azK zzq;Y4pJjQ8mS0@rWgZ%F4-U1(Jz-;l*&GJMMQk2E7w)ACGtu?AaU*r>M~CjpGJN zHkXkKiH%xZ7BDced;s>M^gixwb%xzDsFsT7;JpnvWXD(Qw#lS_Y8WrzuZg8N5`UXZ z=e;xObx`^7WPf-nZd@G&ahS_tKPplBTit077m^uegAn02Be=#=V&h(SD_*=Cm0r{S z^qj54g79YI;$sn9Z{lL3-<@zp6)G1;6KVaSuHe>|Z=glwA-E=fwN(+B4!!&mQf^(k zz8{raX+5~MJfHR%@E;?1rOos?XDpk)``k+2`6A+-xNu$Liw})_;Aa|UgUVDPIG~jP z5=Sj-ea=~BA6Wn<_7HtJ?a{1QH-^?8BRK7*VFpy~(n2v?=nkZ1{}^+#bN6af2Vmj8KaohEOnj zG^hf;5W&^9LgadjQa>Pc>o|s3cI)`Vl6J+%7vaMVXxZ{6Hv85>NkZ5liS>RSu`KU4 z3??9He9~^y1duH#dPq<1hZ+sbeDL3!VK*Wq z!2>OY6ogd`Q}AW0+#&!1MjRt+OshpF>;9O>093F56&(D58LUA%Dj@_GAu`7ZOehFz z0n47?K*82Ow2C(jPGEvE3L)65f)t5XNI`oRNC6pSrbq!9v__1vVpOn^)v3`Cig)y%L}z^VNP!JC&Xmnft_?|U8-SO!8af;+q`+dvmPmmG zrB=EbAdF!LvqzHMMF-r|N*r zL*9fxn-&em;+N3S@={<0=ZP1)T?ID0a1B=FT?IBHV61=>l%eJi2P}~JrT9znl7P*w z0`jEJ4(xXo*if^=3akkE_GV-dFRXwN(FxEVE3l!)nXka-3lu>S`{l`qNDaL0jIv2%*FsJSoPP0mZpAadA@-_hETu& ze5O+158-&30al=A*bXZoG<1658m!7#fz1f#6DuI|tBw_rCv~#3#|mtySz!fMgivp$ zaDgJUxm4d|aIo1|Kt8M!pgmS#Lya?KGge?jGQ|o^Jq|5WyhLh)6A z6_6n+V+G{FFp<@0;Wk*o(HgJqqD#Yj#k17s1S-tT4M!hgUY??m&oqJXEVN`dk!5hSn8$98;&fHjHCer(jzL_n5ac003E0JhG0B0(1AaP<@XZa`Hz0q@&tphTooWfOaSe(cVcU3c{`j+tcX~aeRt+Y#Rd!2c?^KE z^|hAjTNh7jv<oM4Lt(;v3Z^t!{&crl8ORLxQiw54G-1z32%@1Be|74bSLQAjA>_MJnC+3b|=&H zco%EjclabZt2ice*|WYI&1G&i5+#^rbdY`Pbts(-%zxXUT+qi?oyAeNczCH&5y|77n%*3%aw;I5Oh65MFmn`E4| zvV>YUf9_?>fAiyRCh<__WwtfrcOzJ>Y?xGg@%)|r!65P0zZZo}#zj)Q6rexc>OEZ) zSE|VQ6AiebGVjNFgWe`Or5=B^tOQxF#<9sK8YCVXE99h-%G(;yt1K1T({%EFrX?*# z$se<=xEHR||DOo9>J|Z0kk?iyId@F|rcs{&0`^n22~Jm3(t=Fr;oz?$*sRNH%3WM- zyqAr}vO7dg0=jynxFyu85U>m!#^$juallVD;eh~L(UeIF9xouFE^d>lT_errGk*;h zIZ1FS4X)gPKWPM3%q(iP^CV!VV^%g?3a|q{E^K>nbZhDWdCgdZQzc<(;=0lnBPooy zRDil_Gz6D<;7Tns(Cp|g4dOtr4pESTkJT$7Q4B$J$ikpp)F|HKA zIvhF?p>a|)1X_3s{zC+>u`=y!u+GYbTl6DN>_9sN1R_2v;DIS}SV>5QcRA_A8b`ki ztzb^k_TCE`2mGk5h2`ADyYp`nZe7#H87&b*DmJ5@(pP@$H2*vRWIt%@AQ>+NgsrZG zN&+*h;CCZ_LzvPKXoW&b^ekl6RcfQ%BKg7em6uk3uR2FF$)i41CR)YdR`1PP%IipO zv!D4XtlOmOQ$ngNm3XQZ0Qj1)F-ti^0avt3MOD^0sCie9i)JVL72Bn>t^V0~_XEA| z;Ew)T%SEy*-|}zl7^G;P@24aF8Z9Nk*{#7idkuP?5x6cnf^LZj^Mq&bdkFa9+eJI%akP z%5EFp6Twxw!P145Jj9t`5HQnXzp zevcFca-W?Lg{wJ;QJ)F-vPU>c7J60m3-B)+aJ;N3Upb*Rh^&KElT=M|NQ@dVOX}SX zcr2u(raRkcm*1)JXa{ZZ=xyhFoswe`pw(9$$%8cz?X18rH)X3q?z6=r*&Ge0cvCv|%TF%xA^qhBfb<-oRu_Yfg0A^Ro?tEAwrUI=d=EF8ykZyau_ss{MyQAo49 zw|6RTqW`OcxD+#1H5K2B;8-}<$fE^H5fmQ^sMC~r?v*wiZ6&=bE~8Y6p;Cb#Y1F}e zyWqYK*#?%u`+^2XdM3hb@a?Vrq58C@XzF zd4FL4%VIK)J~*R@j7u(|J%_&@{h4{3zQ`-2|3>=FHc;vX6q zc8SsWX9k8Sb`iA3cN|r6(>_{&K<|~JW|M{pRY&)W5j+$`YALMKk~;3o$+nlv*JJb+ z|9sq7Y%DFN@8ecG*%95d7e;36R0}5#Zahg{f-SVlJFNQrqX@}7f)9tloKvIA1b8ex z7a`u2eX)N{x|n;Fo&QMn+zF0Oz$u+M=D6vfBOO6CqTO-g1|y z-foL`=?numLkN<4cs`013isR-WQBrGOWHlxw?aY3#jfn{59SM-Zx4Yu z(J7&C@LLsu9ek-UTP>{jv68E8GBU5|o_O53~>wv9w5aOwqvzJ{F>k zpmWSCo}!EhC{CoJ0NI326=xG#5%RTzZIVeB4EBif`GhqmbrT(fzoC@pinba1ps;J zbTb!K11&{(BA#S-A)R#^$XF3e(OR1_9hk`+|jm;n0q2tk569V<6R7=VuX=k|>)R&G+xly9N z=%vHQ{h=fAlut>A$-O2t>`k8(3cX^8gxwm{cQ(L(MNTn6e0C;zg?iI} z9O~by=qP_VV~yRusoV|*pKrjUbs6Lt(z*K!K`ZRUe>KM}WE#Y+`>uaiqiE@UFBK!L z>^S9QCPrFaexGU7m#vU#rY##%ds3(Q*GHVY@Wt9B|GY+%r0`U2sU=fdGTZ#AZGA}= z(t%fB53vCsd$K8cChQ)iCACj3zk;07>*BGhMw+KGcfH&KEfvsr7fKWL@*bc;s{jD{}w{rA` zKWo}&SzDG=z!d}P&)mS0uC;8dv00oVQP}W#NCozR8rwn(^Y@y{!6sg zbU4`_O?sUTbW=}pk)5qetf4m&7$Vv2fAAX(`(4)>#>;X=F0qi`@%JjVa?qJ8h@bol z#F=S;7ep$}xgw~k_b#ucdzblVQz1t1(-p8VOCc)qvh1?408Ws+DMZ$g83BB<38z^7 z>o!bRl1(`ZGkI5XS6m1&*|)ZpSdrz{)3iW;0v?MWsZ^j_4vzI@9X{X_P3X@^m0Uu# zetV%3-<_2TNQEV3(Q8c?&KFVVEmYlah9~03>Qgt`mGsXlyr0A~&W3B8q%?y7Zna3luL-K1egMj??0{I^pOveikw#iS~Hj4j;n4f*Qj{l zA3L|+#4b3lMQ6xc%ZD6OjdTn~vdJ4g374gr?%$JKt7DT%q_b63giPWJZV`z+*ef?O ztxl=&EN{{P_AopYKj;y_i0I1tuuoMVZptna3$8dWO<5l-X(4CUu4kmcK2=}`9*-Xl zpg_)Y7>+cJ-dcJ_iKh>g%TU^h%)*(ILu`#C*EM^IyKPb`Hy(5RulTMI{%wWhY2~vl z3$jX6cchg%a54*#7g0#xyh%D>p`XioL3me%qe{%8Q{+M4bXC?)g@awqv(Cw{zy~Yr zVKYCq$d`*6tRJA$}EW0r?(>uslI)>Q6pUCGhV?%k6eX{qXwfIXQ^ z1C#}a=FGb%6S!97BEOzY7=x+wJbN;kG`)K=VYVs@4b_v$c<}7Wq#RsO8A--8`1WK{ zwhbxCt0xl%WbN9oCld&ZPFVzu&^?(jFvC%;J=u{~Ik$)D$pnT}&TQ_mD4 z?8$@yS-bY@$ppfpQx$pnT}&TQ_<+RuxgOeV-GJ=sctp3G9@ zrecS0S+l*3{IB?}lHTiNyy!%i3(}MEL*S~)w;)A=|4b<5^d^ zdr@}S7LeXO*~%<>GMNSnUe7~MCUC9DMSkmK!Wc}Y=h>6Vr0Lz03A0sMXsCoF`_ABbuua2hLq&BP9_Y<+O=O#CJ+{#vIrQVdop2QhND`0vX$C; zGJzqLGn;#|m6`NpAM)J0C=(>plfBZgLo+}}W-0K(gfEO$GG1_^!v*Qc_#uLHWHOQg zIx?QD%4fnQz;buuS=P%Km`(mGvtpnBe0MI&xHhCeua1l#FHlFu6BQk?2pge0GJagb zjIks8$qLuvNOlco0aw|N*@bJbZj5JJ@!dI%njkNLiMB#9;Vx#zupFZ}VQr3xe528~*x!)z3HiYo4Lcr_sl%yN`>xBv_3A-*>(9V(Pd~6aX)Hlehqh^kxePUQ?qjEizlUG) zdw-9!0hPMAjOO$DE;m}v%t}(D%Id;^rHp9Ws4Nb_`y#le9AP$k)9wm7bo8m-xYZx_ zr~U4rbGAF!L3f_sJ{nG@RyQIn-$JwXQE119OWB$av)pwi8~yG?avQ_RlP7P5UytDB z+9}6yqC4()HwL{EZM&JG4dIj1-EnWLGwhz}#f^B*d-5-WJ!#?G?GJn7j3Bzh8;yak zjcu&X0SfF~kBI|2EB4zalm4k;ynw$ZdXD+5UM9xrFipri=nO2Rk0<-XQ~b$ZzB;=c z_9H{-Z*`}=PR=emI81c|^RGs*q*Y~W+(Rd57FQPO#ZaVQUp7<|R=Z1E=Ptg}xq@6H zy>A^BBV?WPPd)p?w_4e@#$EC$bM7*0Pq5=b?Jh*@cJ7jYTIDW;81F8XOuA0z0=2u4 zncKNb{%Mt426H3$vh?a@*5mmcfXlVK1G#Ohd!}_a&OZHTP$okEMiY{OZlcMh1&88m z49SHhyv$w79>t5OAjd`dJi3>Cj<-ld9S`4%;I%A2IQFu?F>q+|J`X-iDd;kif?%~^ z8s(*}>A5s79gd9U!Z#am2g_V#c3Xm@`Uy{VMJzi#gY3)JrFCBqRmfwA$+*4~!8*%M zb!iLnz)#vmT=0;DiRV9xKynK=D+Pm6J1b?iUSL7vPb0XM<*2e=QTM~vxZZx9n!Q*6 zd+1mUJn)?c-08`%7&v?dD7(6rouY%41@RB3NFfPZ@V_F@P>mc(2p_&OH>H;l0)*d> zKynYcNwsOi+0s`WT%yz+Z+HVOO%A~|@vE&$ldR!=2`Te0UEhz&{P_$i%!(5DN(48v zJlKT{$`uzbtQnC+1!vyGn1HSW%lqMl)r?X2a>FItQ5MAj;UlgjtOZHjvoLjMiOM5| z#xUU<5v+L_kr)nqBpiT$9jBjlYCNbd>1qgPeW|^hZ#i^){jpQK!SMBb2n2uP)MPMB zilM-v>}9CAG#Sj}r#OJaI5!pCkKX_n#w)BqhbyI+@+=E^`TBpYRn)TT-LpMv0tiHg$5h5{1+}wEzh!; zsH!(uJUxe|mM0)dR$(kkUaVRuR#>S5bZuvK1X5s9NL#*AFA|rR)ft2V2o01RZCojA zAUTu(byW)U;)R3QOPPtSceUvez8J~2cKY*tVI;AiXu>g;7ptb8w`G||W1PljVJykR zHO#+tepA{1h~PGFuDE38A#9;{|C%ha2tofP?Si^G01rZT1Xz*2L7YuLxj)}#m*28j!GYvC;JqK9w005%+RLXhy2 zbO@d_;K*7EU{qfVX8A?mm+D;-92kfu(#>MoU16Fi0$+D*m}%G()_|?qku2(Xa41HD z^0Lmi9ouAvQ87R`HEaw$w>Fs}P`0Wdz96qlss);~O>}CIS@wi5;VbVT!qJEU+^}7G z+M9gcH)LZ>hB835Y);oV#%|aynT7)Y#ElmTkS<*d89=gkJB4jo{*-vJBeqM1R`8ZL z#STjV2EwA5xL~`cAp$hSWQgTiVY_6&Odnh&E6G4!wpPkPGPssEJrmaEhV61J2S{2R zJ(r}#5s)M;qzPr6>ZKBSCNoLh4cq0J^iN$Jy&`pa$-~6C8@9{MR95SXv5ACsrdZUb z8K}QnL+_2rY z4Yteqou)T*f^YB)bnuNEw##tF1rbR_?1IVU(=J5?XXw}w+hy1k!-fOm$WU{^cA3X3 zh6@iB$m?H?nL1*-EOSDT@RM{AAUvd?l+cs*OZpVGQWEu+mlgDPgYUI<+Qb7775^5!)qH6}-7)y9UCgme^t6u4#w> z4KW#FX4oznu#D}x;4Oq2T00cZOz{SCC3_#h4cjG?#3m@b#qJP9IeIQhiz6VZY~=~Z z07OcXP7&aJbnCcpmuFI_aDAm7hBMPPDaU=g9OtrHSBy=h_Vv+HuoULDZ`aH-hwYxX zB<6XqqOFR?H)Pr*e;%d4!&{2b{WpNAi`xSO`|I*Rh_WwoWD}65kX)_ zY?onE3>yxJBSXyv+hrb?gbUs71{YK_m#n<7)@ard+hv&(f`p%>wYy#E>wfMMa@@De z@GGl#NquYWPwoupf`c2j%kjpAhd@g|xosM~o<*Uw&^WkZyBwopfN*Nq7--h5O=bv4 zf<$F+*0#v*Hs^@#^70nKgs;4dO2VBlU9fT7w|ms>Oa_GA9HHKS>t>!ks5A zWaGAPm*H1d?~?k~+Mhg_Q=jbOcqRkK+w&tlxM8~-qhf$?z#2JvZf&w2+x1$@z_SZ- z_=_X9%gHCezFm%<&dEarku)%#sB)qhcmJFA&|PV?(0A(5@)|v8_j))Qw+#pFN+!cY z!1&sr^f8K2PqR~66QkPK0V+$42_KO?2`LDH&3d`V@HF1(`&KNLjS!p6!6+LFRwO)ec#1aEJ^-9a=VlqEqlQ3Mu$@t8~!y*D7aJC_v=f8MvsdUW-QKNlP* zR|+cCobZ2FJV#K?vAT*eNIi84@ggCAAz1~i^wok&*LJAc7i*hJcNeM2CbijPNWXFl zJCaZc?`nj}3CID3SQJPfA*E*U3qaSJ9cN>Y>8M}MifENvZiGoKUMmtv<@e#aV#T6q+iugt5*mbV3o?B&WOhz^3q z5M7{xzU+qj>}~2nuR9*1o8voUbjv#_K+0?G@Z2xl%oh)e7mc9{FDnP~Y^aUiw7Wuv z;M`V0OY+|F$&)8{|(MnlaYpT4GTC#sL_f_oLC>}+o# zeRSBr@*OUF33ltY?d`$2RL1ch`fc(myN8qNYZUC3&7?Y3%s$>(NtW1o;!--Z6vX^f z=%Vm6rK>)}-jtravP`FJ;fdBbwSR3V29roO$}>1M?r!ya!|9~O{!OOJd%Dwmb_SjF zf!1_qd(dlbj)v3mXwcc}ZTHXxPQ%S}ot1LgHdjOthlP|}7t1m1OC$EMH<_Y~+0Vv} ztLHTD!e%$D!+UqZ8(6Ne?lPHMnlH{GGUMfxDHB)vXyS8erHkD&Sx;q>E6+c(mE0|3 zZ?@}554UY?8SR!X-#A~nDKk-ZOR*;L6!_#WxZlfSQ4v5^Gh|kKHA`G#we)d^s7asA z78VkR?I*?DQKoo71>P-I?l=rtTQHKVW$2X|pdKoc8DSBwUG_8)9m22q)((hob@(;a z4*A0lF_aFAY|zaMUYJ3+=V!vUVYt+p`IQg3Hl^i*CN>8M#KozaPJx&_?qY)ES?55^ z5x5wLd#$XQ>9n6uftctpKp-YZy2%XL7>M!1E`gW~>llcc$6rt;>=lUHt@9O#u{zE} zATEx;klC-XA|{W!1Y+{6b0Fpjyr8Vut0F#P8;Et)giCcx^dU84L#Z7g5R><>Wrl1F z#Q0&CKum^p48+XiFDMiC3dBdwTOh{jkXmrP6>;&tip+kEftWn*5{SvO&ViUC@Pe{p zuRwg%HV{*E%_Zs*;Y)4S5OrlJn?E!`HaE)**%*lN!!CiC4C@$(na5vHChQf6k6H&} zYQql87#%EosRe7IuFQJBz6%~KV=;EC8r&u*9Z-CT9f1dmB{Y&d*pFA*ZAK=Mn)IC^ zsJx&oyI+=w4Q|Y-$Pzr(mCF8`YO6!j5qx16NG@b|FmyGHd<`pKjhUwuk^q~vJRAix zf!yQ;W#DgN6=1(G!Wp~4c^MeuS(01eg&C@9(yE#%;tteIk$8)lBCbr#6iFLsrii;cGeu&Fyh*7sr+x6Z;bp84d=GLYEvmg8$^XNQ<)J?EW#|!vt!uU?2H2Oq4@uZ5)C)I48 zf3Zeo^VY8$nS3L+`NZ1hjU%Itg=s1=QzSNTrig9cOp(~UnIg7%Gev6i2`p?)&ut^4 zy%hGh_eIQq@OR+K%+Shj5XFmkqc}C~PtS2?x9ZwB?7T=tHwM5yqG0)t1b86>5ZsJo+H4CYi73~E#* zWA2Xy=B}g6+1G<2d)G+@DmZr8F-P*Vf@if-(=#U5N*cVLGHEhVQp$=fUaw`*!D{n? z6bx#V3Ze#Ypv)OHNb7Hry&DwsFbG;J6c@dd36?Z?BW2R4!Mt@4S-ep@Sr-lBffSOf zqQRReb4Cr)en({QCO?NGbQRoGw*&aEq*Np6{xIdzsKUHgJ0K?d!;0x?gdJS#cy54> z#>eFrVXs)UFe;I>m`RC2rj&7UA3|6mCI>BkJ_2lMW87(Wx`RQ!W|6nFnEzxyZg7b- zk{9tm9~OVHB0SL+em)}nd{p@PI^pN*g`aN_e!fxo`6hVxWia0#_NO^JN4O;x9)*$Y zCYnt8r-sROwhxhIU;Nl>G>;W1XMQAAa;@>xJayye^MZoVKWw6pK3{bNvJVIIYaW3F zeljOB4>ujy%Igv`s0h>CoLpo?;55h~{7A|sN4m>IdKpCMDs$Bj@RJ1SBo zYe!3s#g{(*-CckA$*UW4>3_fYe|=%!o?p4_p=Yl9rRQ$@!C(8mMflMx;xAnetI$Lz zV#Ny(J-rkiPrsf+Ula888T57OrGS1ppDx|3W-B-QFN+%U3%iEr#&#Az6FE6P0$1ou#CP#6Ii;cnD`ZNS^GfLXwL6iSlqLB-zEDG#FxGJ%B3s$ zb3f-dMx#N3j*?qCC>$?G{{l}#zL_dzcHBFO$4Y0KZ5~J`fqux*ZTj~mB#c~$OQ^d- zAMI!7C1&t?x*zu8@6i9>{`!Z|_ zw;Rnpa3k!>mIjj-0TJDmSchG&d+SbjP;#4H$ZZb7 z()ZB;S1*USeQDt}`}SV48{&5(jXnYMclJ>O7{mNl|IB0w`smoir#q>uu_{JZO(usp zVJzttz&??=D97DvVfS6=I7)OvqH30+Z(|qPv;P3{4!9QR;%hpc{tzwLq(W6p2ex4W zzg(7%) zHWy=fEu?S8l$g^;t^---qSS-Ko{VL!)TlKFI*3%S!J51^eKqcaZ<`>6+}BF;XR3QS^- z^OIh0D{l6O)1+~5Zs}6=Y4nSB@tZ3-16^H&h4%bq`=iFvfr6jh?%#=+zXf(b(CZHF z=$|D5NWWS{ek?zDG<_cPg$yEh5+pcsYW~u~@$9cEb3t)1{{;3Z&T>aktV1HfENr7l z+JE}sZj>zcv0JOqZ!SlOim)x_;O^uA?8cv)wDVO$6w}FiAq@JH>D8&5z543taFj}8 z0Qv3~m(frxtTx)MXtds-w>gb3fjwx`!&ZOV9}N#es)5u+(yzVL4pgpoKkPb|GR^rV zGSEEyFQ$<><&TXPF9Z5(iNx=M1IJK28~3*E=$|~<8>8b}drHGlc}VJ1sG2yov!Q%c zj5s(h<~H?XQTlM^3MAUZFM$JSQYdsF33X6_O!qx4%^&Y?JuCaVC8?S+&F6dqP#G5vU!9dlPoQ_>daQ;KId(@6|C zea2V99<=j%+DB*WqTFzte$VsG=Y!%?5`HECC%WT)cVmFIxS$x5)h{@!rS;N&K28Z4 z4~50lRHxnkFwG}RB_nYi&We!JgYBs<$_-pK%+5XqY(qa=eCu`{6vcs}bjxxX}B6uCEL^rO^^yxwA z3{R12+ppxn_kVT6Pd|GB@X>vm=%#$CJeuMy9#iC9P_?J-0}Z&(Umtvh#1~8ncL9r= zzO%|9WeC?&sIcO>Q!nx0?G6CAEK48E&jV?Ch!#H72w}w&lz-DCoP_X*?$bfY^oK-1 zk%xd(jtHL&A~=*{@mrmtO*e!Hkye9<&|uT>(FSmbbvt=hoI*+=xJD`oLI&BNf$&49EGTZFqBwiKBt|8ek4Kg61QIS}&F@ZVfM#1ES2SRR-gHcqf~yyiBj!vKGV=elr5iZc|QT!YyEy1~e0U{#XRpo6>l;fW?-I&!8(g=vz;c z`?c2at;NgEyDYidOtV`B{$s>nBqXz|bo!h#O8m=trX}wp@JPI{n!f$D#@8DZ#wIWtH&;37JJC_h>Kw#*k3v=uu%7x6c2O9C2NldRajbN4{gt~~9?v##7_ z>l~Q_8{V|gKpLt|4V@Kd{w@4-A9(!PWgn))@lv$$T*^~4-4Zwg@p6kFFCS^Z8x78E zr59~0&)pjDWz(#|MVb;R+u6P>?caS=pAJv41bDU)#-?dQAI3vaOCN?1+thnWO<~aR znLq-hjeU4i4kOC6xmWjn*jPz^_4DW7Xu$0O{24d+Vay1%_+bbIvqys};0qBLx22O5 zXzDGz9H-P@2;Doj94>j7c>8bzOqV|v0bl4ftc8+fb7rS&2{jmj!;%n^aXK&fPv*WV|0w68C6{t;*;D%rfYcFw ze%_41@MWvqA^-wL93yQ{ zbqA?r=m26dZyi(F6zyqPpaA~B4A!8Tln?@o5ShaxCg7E|0Ms<`+aN1~15`@z53S-2 zgAE2lKWEWf-9mp2NCBAx+qn571%zr#!x@7Vlo!dJK>#u`%a3vc zffZ6fC~A2rkOC<$ZXf^|*^U<*q<~P=@#2RR*o;7z{mHQ{Geio={Hh}blt__j&4qnLpu@*=`bW>7?**eAS}CAFPl9 zOp|lbl1rU;qySG6Y>6jI@T z9(?II_#+FXfLu}@=kI12*V$3iX@?bX@hMenu^*tD?FX=3)aIWR_7d1&1@so{VmEIoWHVFm3>5i#CVV8jZj5Q<6KD!^)20eugfb?F&n1!N9vWcSAk2vtq} z(5eS3AS1K<_+bTvqL!ChumUo&9WORm0imelg=?^4;RONXt^%78Fjhb}_q){KWSCSm zbaMbNCqu!=Zd9<^RX~Q+$<7`tu%Tv!6<87S?ajy_D#XDGD?$CFLP$K(rG3oV68&&lgea`Af?Oq4u>$;<87s)imo#1& zrH2IMPN&hZ#Q(ttE6BMJJkW?0;Kzcns$mLju!1A`9(#MN0Jo^-eFY@UWLT!I-{=2U z4M(urS8&8(UjdPe@$Cw7>tSjGg+^zA6&$IA6;L4*lMIR#(DTSxm!2_JK<2=pH7l&ZijZ$_Mh5Z13J4LM0PV2?8)}>>o3R2LlHOPW86*f+K!|BXYKawC zP_n}ctOy0OM^h(dv#)^6Slx&iMoFu&5U>E52lg*gJ!IN zd{SeBZp;6Y4x=c5V^({wwb@eKs>;mZ38$b%*a55$WNR*)aX%a)mSVg-0OzG~4m zT{c(&rm1#O4Skt=x^&7pTP5do!U}SgRK*JLV`i)%CtuQdVRRl6m>E`(b5Rv5z>fuC zRby6GSi#ZCSOIQP%~$~mGv^;GIO>2E5V@GK0>%akjm`oqI9drSph74nX^j;i(CJ?F zOJw)qD;}qWzj7RM@gLnR;pM5qgXKMT) zELS=Z0daN9thhp_#GMGHE3O9<;e^0u&zBtfsy$*i3lOJmh7oZAEc};7K(nXlLO@SK zuPy|n#`bmo!A0>Z1bi)|P$}Y2lgS4ZZ;CybjGCT);4OamTmxPoDhyy0BA}(AR~!OT zLv}ku!G2ZSeoFE^*Y=e94+%IBRf~iY{KDUSKGlGu)yzj|hBPg@L`F$4TP4XL3Xlxs zt&+*skUn&7)f?@t-_eRJE@z(DBUBhd99kW+msp3vwbI7iVOlWAw&fY8xqJQ zJ#^XD0y8xU-dsg0iJv`afkR&*%RO2n%k-ST!xd~z1RSdft_WSeZ0Z>LrhWbr&R;NY z^T17aVryuDk70;X%PHQ^$lkF4l&!C|RA1>V2HT*UzhqI_ObUJbAtDj{x^RLml0tvj zI@9Y8I}?22NvAg+kH)R?^(W;UQId)ROSp?A@C^^PXE{Vv2JrTXKayL?1Ih!vF}jqC z+>@1F_0f8&*FSZ7N)O~MpARrM_qJR4M4)O=-X$E;=rj?{-4O>%7kZ!@i&oQxq66eU zuEq6*MRag;JO}TL;7-BI~h(mzmvX$nu`}%T+txYSo z=&#~J8w5emr8qR8L*VT+l-_!kUVW9=eHg;J^8CHxWmO1R1`bV=hMUK_!~s9qga-m}MN=jzc)Wmwy0}fIc8xTf z&-^u5v+zj+OvHDJn#usaBdoaD2pQu_0j$HJ z6VW$LR;U#GhX`I{W!l+bos|o>=trE`fp!Q8M0`}h15@O%l8}mos4CiZ$^mN}{Vudw zV~V!-UeGw;M{O-E=O*5rf0J*W~TK&E19L*$;`c#={6@y#7H)|=cBe~6f z=BKc3ld4bYQzc!aDVKg<6EWyGTpYsomlyQgFiE zP-Y=ov%a?AZ@fkcJnU$xD=>VDuADj__uPfIt$U4d5~5Tibm6J=?+&$Wd<%HTGw@^}(91^1jRAx>k2=8vd zV<9Cq-PuOF{7#KWJKLSD-gds%DLEzqTHQ?nt0Q@^2BMu6_~oW-HOPIoSR|XH;dDG2 zq(bQp(FJFl=ic4?FL zRq%?{OX9tYe?$I@gda)rYo^IB;FR(lA4WP*qO9-GF<-nH%b5 zF>)MU%t_r|p^9F1`&Z$Y1KSqfLxiw|?3*O4l7hRsQh3w=_rbz(%u~8I?Vm>>&FZtbQ*+VI_A8WuP;q)rD!1+$R2mJeN+BJ~uemp!)1Z=#g2^*osQErr( zHaTe3hpqI{fRYgvs)8V3&02$~%7(2~@uD;f$; zQk^^okHl}PM<*IEJzZp=lMn@2uj3=GgB{FU~|{<2O4mHNFzMnAN0^=kLdps|Iom&ON_=pGcZK4 zi=Z{WtKM#lcll%t8kz)U>nsF<tqFb4tHKZ&si$mT$9^@<a=F)LzuTmTe2E{fue`tq}M`KM0)R$5%DNhju3Z>S92GjXaF4LC|ANOy{@tL1@^iQ7b zjnS6So}|O%-gX)GrcaH==pez((RQ!JopYR@idsmbN@w~0D8MGOQgy;`gUl`mOJs+T z>TogIDhT|*-)q8|x-=lo@2V!WUONcTQ|J{#EXJ)teP;vwSL75E#Aj!cSEx7r$Dw|# zijMM^GuGG_YSjCKKHq>x>oUkQq;vNdf>zjx|7wm|$hJ*CvCIhmU5%op_q|k%w6f!r zlbIMP4T-jbgwHhU%T~xV)0PdXJ*m_D>m$xxHft@?Kd;dwDLhqMYRQzA%r<{|gI8Y< zu@zns{_@JM=#*=-T+N6N3dsJ64mR5%d{h@JL3Git50K&H# z;J*z~Z{I8yRGauQGUUHMU%`q2C$f&kP-fZgywxSUOLgfr$I}j`O zx7Hlr%F!GCtZAQRZCO$QR}82>a|27d*0Qa}W^szd^CV|{stIqY$6Lzg<(4aK6~MaA zEY<6iBOG|s1wr_1Lw5gDy4?|)TthzB6sE5V4^CJ8akun! z(fz1%wkdTDy@(*(@W9 zZ-yu0$Ldoj+OuW2Ya9Mqh4+(q#?cX3=#p-HV=Yc{$GgMRSzonCzOxSZvkD7^5-c&f zHTa7P2M@|6#8m=J0mt{B)^GaA2T4WFtD)eva-+Eh|Jb?pCU(JbEjmNqT0Z2MYNTT@ zl1<*|Nw_S{bpM{@S{<8ABAu%I#C-cHr^&(E!Tjtcu|l&>1D3K2R=0 zX(uuZXHE{WHIiJ{fiH2FwRrGvD;!TNpJiE)RhqgZt<-^&S%|zaRIW?2Y3wATn}c^% zII6@fIz=AzO;=^@R5;kxJnNkN3Vg7_9(FU?`ct4pF7{j4Z5fbtY~eFtnHBpCh>l{l zKFZ3L>vGG@B|ym)SXDA^nOoGN%|>-T$K#rXM_kF!n61Ijc;-GFa`}!RuF#m}A=`S} zshM?^yB86AWsJy&;GJ$JFF7oTigfW>@j0ex2Ov=G5GLnoJ`;6q0YrmdMAS^m%5gbGJUBbW&N1DaA zHSeD6NUNON!}MeVLn>!B_hd&}&xf8&Cdev1Svx>aW+`%hjcp@av7~!)tG8+1JsB@L z(dBY%(yu4uhrlN9*^|jgqW6k@dorG^%4@=ZCah<;{0S$h^enM?x( zujgT%OyF8{fgtN-!Wc}Y=h>6Vr0Lz03A0sMXsCoFZA`tYqqt4z zo=nb6?L8R|x$Zq#9D`kFvsGDWsDvcr!Luima=>IH8rDK58W}Ivo(#S{nUw81dop1_)~@|} zGJ&w@ltsV@-IECeGaS|0ldaU&lL-u|oY~xyt#CaV^&qY}tJZ+=O-a_+!G}EeF3JSS z^klCz?9dF*ky#47FyWE}U0O5mj*J(a=x{+gGJc349hr<|fR2nOtMZv}2{3p^CSzbW z`LE21eg5<9$Yfj_(x2BV89!d2j*KTNI${wvLU&~RxP%#FNA{ByuEmk;8q5N&vLCYx z*IwNi&$i0dt!>-WMRZ(6D_!hwOcM2A`xM~nVbR3DGSqw&Mu`rz!vw&(h|XXYtaDs*R} z^>KmDuFGGEE+Z3oS7KAvZN10uwF61h%Jf=Sve|F6G3kxZcBlQ((EbJ(nOHbeQTbV! z*W%0${%@u?#hT~{kTv+x2E2*orfM!{O-+&5{4x+b0RG+&Sd?lO{&my!7&2Vau(MRD zE>uoq$I2!0uvboF35q%_%BIkniX9byZ<{`w@JoL0?{PMuQWuxed|uz>M$5TgNoJw4 zz!;Yp58thPNt|z--UnP zw+@R@Jhwc5^5n@|mv5o@`6#sG!=?Pp2UhL^lZ}3NVt3KPuSf85?UZ9U(H-}@8-w17 zwiU@VG*3=<$Gxr2uzRK#HxNUw26nE;#DSd^ z`)!j+|I{#Ez+V$R$9z^V6XSGP1PxyvFNgie3i{}H$zCVt4egOpUDAVuyo1hiK}PXp ze|Re0P6NLh!ID;$t#J>Xpjlj5q!&Xt&J;X=+Fjb!+~uiffB3w+OFrf2wYw0p+qp~r zX_dPWV$VN!Av3pgm;BT9yUUlQS1+?3&u0W&uH_xbZCl+lt-Epd=|6)q5&AcpkPP&4 zl7TK?k5L9%MD=o((U#}Yo#}JSH_=SgfFC<4hX+^iG~O6{za)p~&i zjX#axR+gj6dPUt2TjP5BwX6X4Z~!szz;_yOrzgW=;P4fo?CM%}iVjvLap52?;}lQe ze?^|58aa{>K73_v>PACI3w}QW$vxyI)u!!dOJ8wtiBfln*-J=ia_RbhRGQ4EP+ne^ z+(65cLvT&}YOAs&of>>4f}2?$>_P_RiVGK#D#LYN%c85JU*v7>!YXFmR&4O)hD)}i zEQ$fbM_fr*3tE(qEqThi4KHF!7Q za%ju~{@AJAVEEz!fvs!G$^%y;G)D~h6Q?GFVNwhQ4rMPx&85j;9xqp;{`MUtpujhs z+6$I7p+4}Fa{#gETnFuoku0yW;ZRcYp+KNTBU%~k+38L?Q*==EXiORn#RlC*ARq}{#(A8)(b1piVxY;3v|lxg-CYOf-lF}0s!H&ybge7?Kf>c7^;BS zA8$IeiBvSIN^kzo{$LD$IF*l5x>98yOVM*ERQxXli)^x@6@e zEJeaHS!=lp26Sy_bp%piQb=3AQrEP#g46kk5D*$DIoh~V*g$e90qUw0R%2#jL%cRU z!WSdi)=qz(FN`Gi6HPeA@?zE0^R_J0XpGa?EQ}?2xQ6++a4Ms_{=%jzlefbEh~PGF zuDE1&!R4+}69(UH!0Wx47Sox(7?V2uqi8gI;&Hf?_Pxjf9WR9eA*uPx-sC7@#0by1 zKsAVa+J}}?8tG2B@=((oE^9Cyhh>m_a_}`5u!iM~3nHb8*coY+!8smme!~T>Vc8VJ zMs*m0rRD(Fu#cCmsjzc*At{1|&L$@CP!(77!8~Cuc|R-stRrMS_igErtFCAlE)SJCvLn*fDDN3l5yF_ zU%62j+^}6ThJv@eDRx)_Fc22a#0A?m4N*%&Oa?X8`QV1_67rcoxJvedX@|)a%k8im zwoB+?6BKqjIs{RUo=ei=2uPBaJmFMFWs3ms(ovIHI%2!zaV&NJ)Wy*=IY%JQ-LPG5 zrm|XBj7=o8Ul1wG4cj&I%wf9>r^+ZC7=doTxlQK0C8?Fu9$DUGfg zcnLOCVgxsAS7cL2YW}hhleSFB(GlBi+hDtaAzM5q-=F9@PB(0q;fxC+fxw%biGbjZ z8@9`^DTWOP#F3%qg6%SoOTtANWi4F81wYGGAdeq9gL1=mS>}Ww;V0=JQqb}1hV3%^ zN~@#%_4M-`9Ne&7jyEnma5aWTsG$~iaw9gz5l3v7V^je4KW$iSfRUNyM%nE4|S@VQ9Dd#W7!V7VY_4y zY=ZvSE=SKLX>kN3Nz1DdCd3u{7AzDizRT{mZL`+hV2sa z8RxQESBy;r+pS^WE}07(;B~iV!LDvC3o)Fu9388 zEH2otc@Uh+Ou>N$rZD?@$+R5z?UKiHtvuh7ncKcyfoZV^bHEn`iZ0l$K;ptrX>QoA z$fl6g{AE9*wC1*N_sDs`cJY{eXxE@9j{9~Q&bT0AL{Y3K77E_MGX*znmtj*38xDvg zL(K)-Wggds3m5bOlUCHf8taijKj4P#vdjrV!cWpgfbbBEMg)%gb{T$U^)9Jzt-;X1 zATXXDxM8~-Z(MlrP(iJc#Sz=(7!?DA1J=mVb8D0J*e$FnA-oauOMmkW>z95&>ZCo(z5eY@mw1#j-yu7Pl=C3e7e zO+(az?UF%_z>^!cOUP&X;8K+fN=B&JVY7TGYyuL>yC%R5+a;64Cg_jta`ar17Du4$ z??qhpQi(imSx7w$w#zdqRJabQi=!7vL#l-#*vyGGKYvAAHn=0VPHDs$VnD{#dnvr_XU+d9jej3F4v z;2EFz?6_}NU|Q_K9PmYfq6@Ywkht(unj5w&vMD4rf7zRAzA$Oeao_II^MLK*F?m1L zK~rw~b{WpNAi}j;nqg(3!3A%~xh=&m@hAvKY?onE3>yxJBSXyv+hrb?gp1Joc5}(f zOJa>?9kE@OIUz{+NxBFC9`>c<*KOY}!>_FNBlT@y$TnB1t(Q7tyBu#^c<@kx(Z#Jp zo+S1-V!IrpVt{aJ*cfQmtxaYKT*yJf?Mwz<-a?r0m3MvncJZ{+z<8p{iDKMsaN0w6 zNwnbnT3?(;{9z3EzM%AQ#wb6Se~lirdp#VD+lGU7C3A(ps(9{(ghm;1D}@0`mlE-z zz$Az=YPRgp{5zWPLqU`+fNzyfGEqr1smTr{OT9(<^_Hn(>Di!OczXlx4x$Mm-UQKv zp?!%0iZ6X{Kyr63D;oa1ZXi#C0{!ob=Lo7f=Bk)F0*2LcK()Y@EDgoM zl_$d`7gcN{S`9IyT=<9)4SvL@mtt5JtC%2i^ysD7sg+(&Ra7uKtJ6WizRvZwAd$UX zxdhQcQMY)qsS8xlm)%gGy-hvnb;m<=g?nd=E`BEkNcm+fc=&*m~J6!gb>(*`C+k+!uN;2cNe^Yj5+t!ws-8x@CDYII2m9Yk&+y(c0Su837$ZCenYOiLAORSbY?hrNUv)RHz z;;{Xsm^;c8FQ~w~#mXIrA!`dpat#oAUG|0%eN+s-*)?SFYreGu;#(blO|?V*utN-` z!y+4W^MV&<5bpVzaELJMP(I|^lv;4Tfw(wT($6ZX2JnI~YIRY00aj%s%Go7|= zAf~VRxkOzee5uXq7i8BY)pvkEOpbJu8L}}D~teDW0u96^H6cR469IFq$z6%~KV=;EC8r&u*9Z-CT9f1dmB{Y&d z*pFA*ZAK=Mn)IC^sJx&oyI+=w4Q|Y-$PzrJWrqJ=a+#y(V{#;M7Wl$0kX*>_VCZTX z`5IQf8Z%ERBmp*Sc{mDY0=dZx%D~^kD!_hWgfnnkM1xwm??x7xeXA3J)V6VnNyv8Pidq;f+F|6-4FXCR8nofU0V=5~7NqH;UA z7?t~Ir`XxE|uYafJygkk3YKj#jxvacd*loFInHaxI3O(-*slR)f=#{ zM(kD<+3*MRa4;iz4ADK&ZcTf`$!OdecF*)C|3t_PpwyjY}a4O22>vUD%$CF(_%*6ff_%TH^1w~RX)3g=GLYE_%b*UzbHFMDf=KkKvq>ke|;Wantkj6^g7XK ztcc%Bm!N&0XOY{j-hTgstA{T}>CraX3H$%=C~Y56e;oB{f}trwx@c_HwL_Iu0H2Fs zKR=&%5A^}g;dK6yn0OMIxdZlhMmy8(ooOdye-gLh@M_~=ROO#GpnN6DQbkgcnELk#&QZ)9GOm~a$|EnfOROEyvgWYWBp*m&w`$Gd z*KYq%VDP9lgQT<)b$7IIMF(zjstRs2s*;Em{#an{I?9}VJt(qwon)YbW0&o5BtI*7 zR%;NyyC-V!ddj59L`f+tvUt6gMHi*!?-Nw_KT(4>Q09ypr1iJR-VIvzTr^0A3Z%gs zDU(JG=B|#P+as*CJ6tPlxig1 zAEsOyRhai`2gF2wSUX)89paG`veW4h9gUC6Ey7;0Xkk<$X)%)$gDZ-e%vy_NhVc0a zu%(T0r`hQa2Kkyr-qK?Jll{2CCDKS<#Q%I){Kbm!L|gdzi171K;pgjwpRX5wzCrl; zM&ak1;N6$Oe1F)V=Ik8dmRNWcMzWh|GU=ZhCfC_MM3#N=W3SOXR)ClJkyOdG#!vIq zji1j83PS&|i9Y&#)e*=(9L%qI1d=YuoXkAn=u19t9)ScV@`QN=92>~z%_9(}Ay0fi zCs({xfZ0`I`WV@fkvrHAN~ozJaEZmDt(RoTHw2)Cj3S?z2Q^|=%-rsEpBqE1nBDD_ z=&|0%OzL$$M2fk%kYSi20>8^)HVhfB`JjjKvLPcKhxxE%iut$^Vpt;*y1TlB&@322 z#he^;mNyZ2H8yt=7uHA+MWQa|<3@;K%|}K>nC9l>A|n#NJ?)4g<0qpcKQJ?Lu_KO9 zF((%pkqpGl$VL4OnPNU}gaX-7kt$g`T52r5bm>*Fn5D|jXgf~eplajH(s%BKJ!lVJ zzq>I&OEh1-H~z}K@VgP1itpUXR}aS;)DdR^!Yi&}Y(JhJr?+~zeY-k0bJW?hJ;!LF zbabY(Gwh!njnAwezA=JlBQSMKSv*GkXrN*?t6^^JKAiJ%qluR+Be0x2gU+~{WDGnP z!Ls3TWRqCbhzZjve)O{>irGJqGxIvz*>J~)OH}jQW@AIMO+{m2Z22b%S0-a4AJ2JO zHS^@DA`-my^; z`K+~y=mWMYq7SePIjRWfcpDXwYQR!O`NwTllz*J%sG5pM+Ots+`K+~y=mWMYq7SeP zIjRVk7&a;*#fYVf@{il9DE~OiQK6!5MxcpBT+2)o(ouD2l{`R{Rq?5uyC1AJ7R#hW zZDJpuMnOpkm@KRxm(aeDtx12`REhSb{UD%SKR zPc;2I*pF8xKpe%BO3wZp{pKPuG5Ey>R6hZhM-Y&Z?(~`%s`7F#^QMyvO7cEBJ44iDR5WgmMn%(EadR|=0I?tk zM`NkDI~q$xW<5@hhAfeE=h%X(I|xsLh{>Cdv!e;~k%{Vk4d7q}D@Wt$W)}7O%4RMS zA>s-^DHoXJNEmjOlE_q=lT2hz4u~jIt95cfmH-h|bqD0AI6ELmMJ6Iw2c#m(Y|6=T z7!pKG-gJB%keyFBQBkxaubdoMgZZ0wrxy34GZmzM4BS9?6$&uL@ z#!801=f#tYWF;VseO8EESoE++r0&)iqXeB#4+CViixv#{uh!RTRH=k5wEA4^i=S z+(pIHky({ntm5dp#wvjb5tl=(5-GXIDv^>*WNxvFtW!A0Duw_Nl|!szskp}~mWoV7 zZn27?>Kdy!5=2Z6v5KeTn#3CFfFh@m)2rUe;Tyk#;GBcT_ z{Z<1i1q>D2nZex?Vwwmb$=jY#ZC5@>cZF@CTS;-+T=Zr$xXN$My>d?85XqB{{_ zRx+`9IWJ{6vpsizW9Sgkg(2qcyzI;*;#*pUg~4btLHq3v!?p05vXaj9y2H-otvlUu zZ>!TAk4NKHz9hQz_b-RbvKW_c*h^2(%XjX5`?C1|z3k%7J$ibT1nJJvXuM Date: Mon, 4 May 2020 12:50:36 -0700 Subject: [PATCH 17/57] minor cleanup --- flow/controllers/imitation_learning/imitating_network.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 0ea5c32c8..3b1e826da 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -127,10 +127,6 @@ def define_forward_pass(self): # build forward pass and get the tensor for output of last layer network_output = build_neural_net(self.obs_placeholder, output_size=output_size, scope='network_scope', n_layers=self.num_layers, size=self.size) - # unpack array of array into just array - # if self.stochastic: - # # network_output = network_output[0] - # parse the mean and covariance from output if stochastic, and set up distribution if self.stochastic: # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution From b4f844fe870220e13e0ced5f7c1709678ad7ee7f Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sat, 23 May 2020 13:12:40 -0700 Subject: [PATCH 18/57] Bug fixes for stochastic policies --- .../imitation_learning/imitating_network.py | 43 +++++++++++++------ flow/controllers/imitation_learning/run.py | 4 +- .../controllers/imitation_learning/trainer.py | 5 ++- flow/controllers/imitation_learning/utils.py | 15 ++++--- .../imitation_learning/utils_tensorflow.py | 2 +- 5 files changed, 44 insertions(+), 25 deletions(-) diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 3b1e826da..04a0a4ce4 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -46,8 +46,9 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r self.load_network(load_path) else: - with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): - self.build_network() + print("HERE") + self.build_network() + # init replay buffer if self.training: @@ -57,9 +58,14 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r # set up policy variables, and saver to save model. Save only non-training variables (weights/biases) if not load_existing: - self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] + self.policy_vars = [v for v in tf.all_variables() if 'network_scope' in v.name and 'train' not in v.name] self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) + # tensorboard + self.writer = tf.summary.FileWriter('/Users/akashvelu/Documents/Random/tensorboard/', tf.get_default_graph()) + # track number of training steps + self.train_steps = 0 + def build_network(self): """ Defines neural network for choosing actions. Defines placeholders and forward pass @@ -69,10 +75,11 @@ def build_network(self): self.define_forward_pass() # set up training operation (e.g. Adam optimizer) if self.training: - with tf.variable_scope('train', reuse=tf.AUTO_REUSE): + with tf.variable_scope('train'): self.define_train_op() + def load_network(self, path): """ Load tensorflow model from the path specified, set action prediction to proper placeholder @@ -91,11 +98,12 @@ def load_network(self, path): if self.stochastic: # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution means = network_output[:, :self.action_dim] - cov_diags = network_output[:, self.action_dim:] + log_vars = network_output[:, self.action_dim:] + vars = tf.math.exp(log_vars) # set up action distribution (parameterized by network output) # if a batch of size k is input as observations, then the the self.dist will store k different Gaussians - self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=cov_diags) + self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=vars, name='Prediction Distribution') # action is a sample from this distribution; one sample output per Gaussian contained in self.dist self.action_predictions = self.dist.sample() else: @@ -109,7 +117,7 @@ def define_placeholders(self): # placeholder for observations (input into network) self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="observation", dtype=tf.float32) - # if training, define placeholder for labels (supervised leearning) + # if training, define placeholder for labels (supervised learning) if self.training: self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) @@ -130,12 +138,14 @@ def define_forward_pass(self): # parse the mean and covariance from output if stochastic, and set up distribution if self.stochastic: # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution - means = network_output[:, :self.action_dim] - cov_diags = network_output[:, self.action_dim:] + + means, log_vars = tf.split(network_output, num_or_size_splits=2, axis=1) + vars = tf.math.exp(log_vars) # set up action distribution (parameterized by network output) # if a batch of size k is input as observations, then the the self.dist will store k different Gaussians - self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=cov_diags) + with tf.variable_scope('Action_Distribution'): + self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=vars) # action is a sample from this distribution; one sample output per Gaussian contained in self.dist self.action_predictions = self.dist.sample() @@ -154,12 +164,17 @@ def define_train_op(self): if self.stochastic: # negative log likelihood loss for stochastic policy - log_likelihood = self.dist.log_prob(true_actions) - self.loss = -tf.reduce_mean(log_likelihood) + self.loss = self.dist.log_prob(true_actions) + self.loss = tf.negative(self.loss) + self.loss = tf.reduce_mean(self.loss) + summary_name = 'Loss_tracking_NLL' else: # MSE loss for deterministic policy self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) + summary_name = 'Loss_tracking_MSE' + + self.loss_summary = tf.summary.scalar(name=summary_name, tensor=self.loss) # Adam optimizer self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) @@ -169,7 +184,9 @@ def train(self, observation_batch, action_batch): """ # reshape action_batch to ensure a shape (batch_size, action_dim) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) + _, loss, summary = self.sess.run([self.train_op, self.loss, self.loss_summary], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) + self.writer.add_summary(summary, global_step=self.train_steps) + self.train_steps += 1 def get_accel_from_observation(self, observation): """ diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 17434d63e..265991e20 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -41,7 +41,7 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument('--ep_len', type=int, default=5000) - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy parser.add_argument('--n_iter', type=int, default=5) parser.add_argument('--batch_size', type=int, default=3000) # training data collected (in the env) during each iteration @@ -50,7 +50,7 @@ def main(): parser.add_argument('--train_batch_size', type=int, default=100) # number of sampled data points to be used per gradient/train step - parser.add_argument('--num_layers', type=int, default=3) # depth, of policy to be learned + parser.add_argument('--num_layers', type=int, default=3) # number of hidden layers, of policy to be learned parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning parser.add_argument('--replay_buffer_size', type=int, default=1000000) diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 801c7517f..b6d04ed25 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -5,7 +5,7 @@ import gym import os from flow.utils.registry import make_create_env -from env_configs.singleagent_straight_road import flow_params +from examples.exp_configs.rl.multiagent.multiagent_straight_road import flow_params from imitating_controller import ImitatingController from imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController @@ -111,7 +111,8 @@ def collect_training_trajectories(self, itr, batch_size): """ print("\nCollecting data to be used for training...") - trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, batch_size, self.params['ep_len'], self.multiagent, use_expert=itr==0, v_des=self.params['v_des']) + max_decel = flow_params['env'].additional_params['max_decel'] + trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, batch_size, self.params['ep_len'], self.multiagent, use_expert=itr==0, v_des=self.params['v_des'], max_decel=max_decel) return trajectories, envsteps_this_batch diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 198a2a4ad..a55f32c97 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -11,7 +11,7 @@ """ Class agnostic helper functions """ -def sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des): +def sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des, max_decel): """ Samples a trajectory for a given vehicle using the actions prescribed by specified controller. Args: @@ -68,8 +68,9 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto for i in range(action_dim): # if max number of RL vehicles is not reached, insert dummy values if i >= len(vehicle_ids): - rl_actions.append(0.0) - actions_expert.append(0.0) + ignore_accel = -2 * max_decel + rl_actions.append(ignore_accel) + actions_expert.append(ignore_accel) else: imitator = controllers[vehicle_ids[i]][0] expert = controllers[vehicle_ids[i]][1] @@ -224,7 +225,7 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length -def sample_trajectories(env, controllers, action_network, min_batch_timesteps, max_trajectory_length, multiagent, use_expert, v_des=15): +def sample_trajectories(env, controllers, action_network, min_batch_timesteps, max_trajectory_length, multiagent, use_expert, v_des=15, max_decel=4.5): """ Samples trajectories to collect at least min_batch_timesteps steps in the environment @@ -248,7 +249,7 @@ def sample_trajectories(env, controllers, action_network, min_batch_timesteps, m if multiagent: trajectory, traj_length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) else: - trajectory, traj_length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) + trajectory, traj_length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des, max_decel) trajectories.append(trajectory) @@ -256,7 +257,7 @@ def sample_trajectories(env, controllers, action_network, min_batch_timesteps, m return trajectories, total_envsteps -def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_length, multiagent, use_expert, v_des=15): +def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_length, multiagent, use_expert, v_des=15, max_decel=4.5): """ Collects a fixed number of trajectories. @@ -280,7 +281,7 @@ def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_le if multiagent: trajectory, length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) else: - trajectory, length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) + trajectory, length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des, max_decel) trajectories.append((trajectory, length)) diff --git a/flow/controllers/imitation_learning/utils_tensorflow.py b/flow/controllers/imitation_learning/utils_tensorflow.py index 1636da035..70df79693 100644 --- a/flow/controllers/imitation_learning/utils_tensorflow.py +++ b/flow/controllers/imitation_learning/utils_tensorflow.py @@ -23,7 +23,7 @@ def build_neural_net(input_placeholder, output_size, scope, n_layers, size, acti output_placeholder: the result of pass through Neural Network """ output_placeholder = input_placeholder - with tf.variable_scope(scope): + with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): for _ in range(n_layers): output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation,name='Output_Layer') From db0442b45d1145cd8097d0fc446d41c89b81b599 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Mon, 25 May 2020 22:01:13 -0700 Subject: [PATCH 19/57] Ported to Keras, initial implementation of loading to RLLib --- .../imitating_controller.py | 2 +- .../imitation_learning/imitating_network2.py | 131 ++++++++++++++++++ .../imitation_learning/keras_utils.py | 47 +++++++ .../imitation_learning/ppo_model.py | 69 +++++++++ .../controllers/imitation_learning/trainer.py | 6 +- 5 files changed, 251 insertions(+), 4 deletions(-) create mode 100644 flow/controllers/imitation_learning/imitating_network2.py create mode 100644 flow/controllers/imitation_learning/keras_utils.py create mode 100644 flow/controllers/imitation_learning/ppo_model.py diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 70c483596..935a66831 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -58,4 +58,4 @@ def get_accel(self, env): return action[ind] # in other cases, acceleration is the output of the network - return action[0] + return action diff --git a/flow/controllers/imitation_learning/imitating_network2.py b/flow/controllers/imitation_learning/imitating_network2.py new file mode 100644 index 000000000..f750fbad6 --- /dev/null +++ b/flow/controllers/imitation_learning/imitating_network2.py @@ -0,0 +1,131 @@ +import numpy as np +import tensorflow as tf +from utils_tensorflow import * +from keras_utils import * +import tensorflow_probability as tfp +from flow.controllers.base_controller import BaseController +from replay_buffer import ReplayBuffer + + +class ImitatingNetwork2(): + """ + Class containing neural network which learns to imitate a given expert controller. + """ + + def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, stochastic=False, policy_scope='policy_vars', load_existing=False, load_path=''): + + """ + Initializes and constructs neural network + + Args: + sess: Tensorflow session variable + action_dim: dimension of action space (determines size of network output) + obs_dim: dimension of observation space (size of network input) + num_layers: number of hidden layers (for an MLP) + size: size of each layer in network + learning_rate: learning rate used in optimizer + replay_buffer_size: maximum size of replay buffer used to hold data for training + training: boolean, whether the network will be trained (as opposed to loaded) + stochastic: boolean indicating if the network outputs a stochastic (multivariate Gaussian) or deterministic policy + policy_scope: variable scope used by Tensorflow for weights/biases + load_existing: boolean, whether to load an existing tensorflow model + load_path: path to directory containing an existing tensorflow model + + """ + + self.sess = sess + self.action_dim = action_dim + self.obs_dim = obs_dim + self.num_layers = num_layers + self.size = size + self.learning_rate = learning_rate + self.training = training + self.stochastic=stochastic + + print("INNNNNITITTTTT") + + # load network if specified, or construct network + if load_existing: + self.load_network(load_path) + + else: + self.build_network() + self.compile_network() + + + # init replay buffer + if self.training: + self.replay_buffer = ReplayBuffer(replay_buffer_size) + else: + self.replay_buffer = None + + + def build_network(self): + """ + Defines neural network for choosing actions. Defines placeholders and forward pass + """ + # setup placeholders for network input and labels for training, and hidden layers/output + if self.stochastic: + self.model = build_neural_net_stochastic(self.obs_dim, self.action_dim, self.num_layers, self.size) + else: + self.model = build_neural_net_deterministic(self.obs_dim, self.action_dim, self.num_layers, self.size) + + + def compile_network(self): + loss = get_loss(self.stochastic) + self.model.compile(loss=loss, optimizer='adam') + + + def train(self, observation_batch, action_batch): + """ + Executes one training step for the given batch of observation and action data + """ + # reshape action_batch to ensure a shape (batch_size, action_dim) + action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) + batch_size = action_batch.shape[0] + self.model.fit(observation_batch, action_batch, batch_size=batch_size, epochs=1, steps_per_epoch=1) + + def get_accel_from_observation(self, observation): + """ + Gets the network's acceleration prediction based on given observation/state + """ + + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + if len(observation.shape)<=1: + observation = observation[None] + # "batch size" is 1, so just get single acceleration/acceleration vector + network_output = self.model.predict(observation) + if self.stochastic: + mean, log_std = network_output[:, :self.action_dim], network_output[:, self.action_dim:] + var = np.exp(2 * log_std) + action = np.random.multivariate_normal(mean[0], var) + return action + else: + return network_output + + def get_accel(self, env): + """ + Get network's acceleration prediction(s) based on given env + """ + observation = env.get_state() + return self.get_accel_from_observation(observation) + + + def add_to_replay_buffer(self, rollout_list): + """ Add rollouts to replay buffer """ + + self.replay_buffer.add_rollouts(rollout_list) + + + def sample_data(self, batch_size): + """ Sample a batch of data from replay buffer """ + + return self.replay_buffer.sample_batch(batch_size) + + def save_network(self, save_path): + """ Save network to given path and to tensorboard """ + + self.model.save(save_path) + # tensorboard + + # writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py new file mode 100644 index 000000000..429c75bea --- /dev/null +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -0,0 +1,47 @@ +import tensorflow as tf +import tensorflow_probability as tfp +from tensorflow.keras import Input +from tensorflow.keras.layers import Dense + +def build_neural_net_deterministic(input_dim, action_dim, n_layers, size): + input_layer = Input(shape=(input_dim, )) + curr_layer = input_layer + + for _ in range(n_layers): + dense = Dense(size, activation="tanh") + curr_layer = dense(curr_layer) + output_layer = Dense(action_dim, activation=None)(curr_layer) + model = tf.keras.Model(inputs=input_layer, outputs=output_layer, name="policy_network") + + return model + +def build_neural_net_stochastic(input_dim, action_dim, n_layers, size): + input_layer = Input(shape=(input_dim, )) + curr_layer = input_layer + + for _ in range(n_layers): + dense = Dense(size, activation="tanh") + curr_layer = dense(curr_layer) + + out = Dense(2 * action_dim, activation=None)(curr_layer) + model = tf.keras.Model(inputs=input_layer, outputs=out, name="policy_network") + + return model + +def get_loss(stochastic): + if stochastic: + return negative_log_likelihood_loss + else: + return tf.keras.losses.mean_squared_error + +def negative_log_likelihood_loss(y, distribution_params): + assert distribution_params.shape[1] % 2 == 0, "Stochastic policies must output vectors of even length" + action_dim = distribution_params.shape[1]//2 + means, log_stds = distribution_params[:, :action_dim], distribution_params[:, action_dim:] + stds = tf.math.exp(log_stds) + variances = tf.math.square(stds) + dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=variances) + loss = dist.log_prob(y) + loss = tf.negative(loss) + loss = tf.reduce_mean(loss) + (0.5 * tf.norm(variances)) + return loss diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py new file mode 100644 index 000000000..f5c022cb8 --- /dev/null +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -0,0 +1,69 @@ +import numpy as np + +from ray.rllib.models.tf.misc import normc_initializer +from ray.rllib.models.tf.tf_modelv2 import TFModelV2 +from ray.rllib.utils.framework import get_activation_fn, try_import_tf + +tf = try_import_tf() + + + +class PPONetwork(TFModelV2): + + def __init__(self, obs_space, action_space, num_outputs, model_config, name): + + super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) + self.setup_model(obs_space, action_space, model_config, num_outputs, None) + + def setup_model(self, obs_space, action_space, model_config, num_outputs, load_path): + if load_path: + try: + loaded_policy_model = tf.keras.load_model(load_path) + inp_layer = loaded_policy_model.input + curr_layer = loaded_policy_model.layers[-2].output + + except Exception as e: + print("Error in loading existing model specified by load_path") + raise e + else: + activation = get_activation_fn(model_config.get("fcnet_activation")) + hiddens = model_config.get("fcnet_hiddens", []) + vf_share_layers = model_config.get("vf_share_layers") + + inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") + curr_layer = inp_layer + + i = 1 + for size in hiddens: + curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), activation=activation)(curr_layer) + i += 1 + + output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)(curr_layer) + + if not vf_share_layers: + curr_layer = inp_layer + i = 1 + for size in hiddens: + curr_layer = tf.keras.layers.Dense(size, name="vf_hidden_layer_{}".format(i), activation=activation)(curr_layer) + i += 1 + + output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) + + self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) + self.register_variables(self.base_model.variables) + + + def forward(self, input_dict, state, seq_lens): + policy_out, value_out = self.base_model(input_dict["obs_flat"]) + self.value_out = value_out + return policy_out, state + + def value_function(self): + return tf.reshape(self.value_out, [-1]) + + def import_from_h5(self, import_file): + self.setup_model(self, self.obs_space, self.action_space, self.model_config, self.num_outputs, import_file) + + + + diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index b6d04ed25..940feffb8 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -7,7 +7,7 @@ from flow.utils.registry import make_create_env from examples.exp_configs.rl.multiagent.multiagent_straight_road import flow_params from imitating_controller import ImitatingController -from imitating_network import ImitatingNetwork +from imitating_network2 import ImitatingNetwork2 from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import SumoCarFollowingParams @@ -51,9 +51,9 @@ def __init__(self, params): self.params['obs_dim'] = obs_dim # initialize neural network class and tf variables - self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], stochastic=self.params['stochastic']) + self.action_network = ImitatingNetwork2(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], stochastic=self.params['stochastic']) - tf.global_variables_initializer().run(session=self.sess) + # tf.global_variables_initializer().run(session=self.sess) # controllers setup v_des = self.params['v_des'] # for FollowerStopper From ed065b3112432ccaae1e4f0d872a578dd4b61773 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 26 May 2020 13:20:29 -0700 Subject: [PATCH 20/57] Bug fixes for starting training from imitation model --- .../rl/multiagent/multiagent_straight_road.py | 3 +- examples/train.py | 55 ++++++++++++- .../imitation_learning/ppo_model.py | 80 ++++++++++++------- flow/envs/multiagent/i210.py | 49 ++++++------ 4 files changed, 131 insertions(+), 56 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py index ec71a2f42..00b60ff0b 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -58,7 +58,8 @@ 'local_reward': True, 'lead_obs': True, # whether to reroute vehicles once they have exited - "reroute_on_exit": True + "reroute_on_exit": True, + "control_range": [500, 2300] }) diff --git a/examples/train.py b/examples/train.py index 1689d846f..361a1f277 100644 --- a/examples/train.py +++ b/examples/train.py @@ -184,7 +184,7 @@ def setup_exps_rllib(flow_params, config["num_workers"] = n_cpus config["horizon"] = horizon - config["model"].update({"fcnet_hiddens": [32, 32, 32]}) + config["model"].update({"fcnet_hiddens": [12, 12]}) config["train_batch_size"] = horizon * n_rollouts config["gamma"] = 0.999 # discount rate config["use_gae"] = True @@ -252,6 +252,56 @@ def on_episode_end(info): register_env(gym_name, create_env) return alg_run, gym_name, config +def train_rllib_with_imitation(submodule, flags): + """Train policies using the PPO algorithm in RLlib.""" + import ray + from flow.controllers.imitation_learning.ppo_model import PPONetwork + from ray.rllib.models import ModelCatalog + + flow_params = submodule.flow_params + flow_params['sim'].render = flags.render + policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) + policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) + policies_to_train = getattr(submodule, "policies_to_train", None) + + alg_run, gym_name, config = setup_exps_rllib( + flow_params, flags.num_cpus, flags.num_rollouts, flags, + policy_graphs, policy_mapping_fn, policies_to_train) + + ModelCatalog.register_custom_model("Imitation_Learning", PPONetwork) + + config['num_workers'] = flags.num_cpus + config['env'] = gym_name + config['model']['custom_model'] = "Imitation_Learning" + + # create a custom string that makes looking at the experiment names easier + def trial_str_creator(trial): + return "{}_{}".format(trial.trainable_name, trial.experiment_tag) + + if flags.local_mode: + ray.init(local_mode=True) + else: + ray.init() + + exp_dict = { + "run_or_experiment": alg_run, + "name": gym_name, + "config": config, + "checkpoint_freq": flags.checkpoint_freq, + "checkpoint_at_end": True, + 'trial_name_creator': trial_str_creator, + "max_failures": 0, + "stop": { + "training_iteration": flags.num_iterations, + }, + } + date = datetime.now(tz=pytz.utc) + date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") + s3_string = "s3://i210.experiments/i210/" \ + + date + '/' + flags.exp_title + if flags.use_s3: + exp_dict['upload_dir'] = s3_string + tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" @@ -278,6 +328,7 @@ def trial_str_creator(trial): ray.init(local_mode=True) else: ray.init() + exp_dict = { "run_or_experiment": alg_run, "name": gym_name, @@ -472,7 +523,7 @@ def main(args): # Perform the training operation. if flags.rl_trainer.lower() == "rllib": - train_rllib(submodule, flags) + train_rllib_with_imitation(submodule, flags) elif flags.rl_trainer.lower() == "stable-baselines": train_stable_baselines(submodule, flags) elif flags.rl_trainer.lower() == "h-baselines": diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index f5c022cb8..643fd5670 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -1,8 +1,10 @@ import numpy as np - +import json +import h5py from ray.rllib.models.tf.misc import normc_initializer from ray.rllib.models.tf.tf_modelv2 import TFModelV2 from ray.rllib.utils.framework import get_activation_fn, try_import_tf +# from flow.controllers.imitation_learning.keras_utils import * tf = try_import_tf() @@ -13,32 +15,25 @@ class PPONetwork(TFModelV2): def __init__(self, obs_space, action_space, num_outputs, model_config, name): super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) - self.setup_model(obs_space, action_space, model_config, num_outputs, None) - - def setup_model(self, obs_space, action_space, model_config, num_outputs, load_path): - if load_path: - try: - loaded_policy_model = tf.keras.load_model(load_path) - inp_layer = loaded_policy_model.input - curr_layer = loaded_policy_model.layers[-2].output - - except Exception as e: - print("Error in loading existing model specified by load_path") - raise e - else: - activation = get_activation_fn(model_config.get("fcnet_activation")) - hiddens = model_config.get("fcnet_hiddens", []) - vf_share_layers = model_config.get("vf_share_layers") - - inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") - curr_layer = inp_layer + self.setup_model(obs_space, action_space, model_config, num_outputs, '/Users/akashvelu/Desktop/follower_stopper1.h5') + self.register_variables(self.base_model.variables) - i = 1 - for size in hiddens: - curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), activation=activation)(curr_layer) - i += 1 + def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): + + activation = get_activation_fn(model_config.get("fcnet_activation")) + hiddens = model_config.get("fcnet_hiddens", []) + vf_share_layers = model_config.get("vf_share_layers") - output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)(curr_layer) + inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") + curr_layer = inp_layer + + i = 1 + for size in hiddens: + curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), activation=activation)(curr_layer) + i += 1 + + + output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)(curr_layer) if not vf_share_layers: curr_layer = inp_layer @@ -50,7 +45,36 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, load_p output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) - self.register_variables(self.base_model.variables) + + + if imitation_h5_path: + # imitation_model = tf.keras.models.load_model(imitation_h5_path, custom_objects={"negative_log_likelihood_loss": negative_log_likelihood_loss}) + imitation_inp = tf.keras.layers.Input(shape=(3,), name="imitation_inp") + curr_imitation_layer = imitation_inp + i = 1 + for size in hiddens: + curr_imitation_layer = tf.keras.layers.Dense(size, name="imitation_hidden_layer_{}".format(i), activation=activation)(curr_imitation_layer) + i += 1 + + imitation_output_layer = tf.keras.layers.Dense(num_outputs, name="imitation_output_layer", activation=None)(curr_imitation_layer) + imitation_model = tf.keras.Model(imitation_inp, [imitation_output_layer]) + + imitation_model.load_weights(imitation_h5_path) + self.register_variables(imitation_model.variables) + + for i in range(len(hiddens)): + imitation_layer = imitation_model.layers[i + 1] + base_model_layer_name = 'policy_hidden_layer_' + str(i + 1) + base_model_layer = self.base_model.get_layer(base_model_layer_name) + base_model_layer.set_weights(imitation_layer.get_weights()) + + imitation_layer = imitation_model.layers[-1] + base_model_layer_name = 'policy_output_layer' + base_model_layer = self.base_model.get_layer(base_model_layer_name) + base_model_layer.set_weights(imitation_layer.get_weights()) + + + def forward(self, input_dict, state, seq_lens): @@ -63,7 +87,3 @@ def value_function(self): def import_from_h5(self, import_file): self.setup_model(self, self.obs_space, self.action_space, self.model_config, self.num_outputs, import_file) - - - - diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index a6e39cdec..14e34d927 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -68,6 +68,7 @@ def __init__(self, env_params, sim_params, network, simulator='traci'): self.num_enter_lanes = 5 self.entrance_edge = "119257914" self.exit_edge = "119257908#3" + self.control_range = env_params.additional_params['control_range'] self.leader = [] @property @@ -126,17 +127,18 @@ def get_state(self): if self.lead_obs: veh_info = {} for rl_id in self.k.vehicle.get_rl_ids(): - speed = self.k.vehicle.get_speed(rl_id) - lead_id = self.k.vehicle.get_leader(rl_id) - if lead_id in ["", None]: - # in case leader is not visible - lead_speed = SPEED_SCALE - headway = HEADWAY_SCALE - else: - lead_speed = self.k.vehicle.get_speed(lead_id) - headway = self.k.vehicle.get_headway(rl_id) - veh_info.update({rl_id: np.array([speed / SPEED_SCALE, headway / HEADWAY_SCALE, - lead_speed / SPEED_SCALE])}) + if self.k.vehicle.get_x_by_id(rl_id) < self.control_range[1] and self.k.vehicle.get_x_by_id(rl_id) > self.control_range[0]: + speed = self.k.vehicle.get_speed(rl_id) + lead_id = self.k.vehicle.get_leader(rl_id) + if lead_id in ["", None]: + # in case leader is not visible + lead_speed = SPEED_SCALE + headway = HEADWAY_SCALE + else: + lead_speed = self.k.vehicle.get_speed(lead_id) + headway = self.k.vehicle.get_headway(rl_id) + veh_info.update({rl_id: np.array([speed / SPEED_SCALE, headway / HEADWAY_SCALE, + lead_speed / SPEED_SCALE])}) else: veh_info = {rl_id: np.concatenate((self.state_util(rl_id), self.veh_statistics(rl_id))) @@ -153,24 +155,25 @@ def compute_reward(self, rl_actions, **kwargs): if self.env_params.additional_params["local_reward"]: des_speed = self.env_params.additional_params["target_velocity"] for rl_id in self.k.vehicle.get_rl_ids(): - rewards[rl_id] = 0 - speeds = [] - follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) - if follow_speed >= 0: - speeds.append(follow_speed) - if self.k.vehicle.get_speed(rl_id) >= 0: - speeds.append(self.k.vehicle.get_speed(rl_id)) - if len(speeds) > 0: - # rescale so the critic can estimate it quickly - rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 - for speed in speeds]) / (des_speed ** 2) + if self.k.vehicle.get_x_by_id(rl_id) < self.control_range[1] and self.k.vehicle.get_x_by_id(rl_id) > self.control_range[0]: + rewards[rl_id] = 0 + speeds = [] + follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) + if follow_speed >= 0: + speeds.append(follow_speed) + if self.k.vehicle.get_speed(rl_id) >= 0: + speeds.append(self.k.vehicle.get_speed(rl_id)) + if len(speeds) > 0: + # rescale so the critic can estimate it quickly + rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 + for speed in speeds]) / (des_speed ** 2) else: speeds = self.k.vehicle.get_speed(self.k.vehicle.get_ids()) des_speed = self.env_params.additional_params["target_velocity"] # rescale so the critic can estimate it quickly reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 for speed in speeds]) / (des_speed ** 2)) - rewards = {rl_id: reward for rl_id in self.k.vehicle.get_rl_ids()} + rewards = {rl_id: reward for rl_id in self.k.vehicle.get_rl_ids() if self.k.vehicle.get_x_by_id(rl_id) < self.control_range[1] and self.k.vehicle.get_x_by_id(rl_id) > self.control_range[0]} return rewards def additional_command(self): From cc0aa3276e1d52dc12e4059ff0abd225b5af8858 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 26 May 2020 13:35:40 -0700 Subject: [PATCH 21/57] Minor cleanup --- examples/train.py | 5 ++++- .../imitation_learning/ppo_model.py | 19 ++++++++++++++----- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/examples/train.py b/examples/train.py index 361a1f277..8c65a68c1 100644 --- a/examples/train.py +++ b/examples/train.py @@ -253,7 +253,7 @@ def on_episode_end(info): return alg_run, gym_name, config def train_rllib_with_imitation(submodule, flags): - """Train policies using the PPO algorithm in RLlib.""" + """Train policies using the PPO algorithm in RLlib, with initiale policy weights from imitation learning.""" import ray from flow.controllers.imitation_learning.ppo_model import PPONetwork from ray.rllib.models import ModelCatalog @@ -268,10 +268,13 @@ def train_rllib_with_imitation(submodule, flags): flow_params, flags.num_cpus, flags.num_rollouts, flags, policy_graphs, policy_mapping_fn, policies_to_train) + # Register custom model ModelCatalog.register_custom_model("Imitation_Learning", PPONetwork) config['num_workers'] = flags.num_cpus config['env'] = gym_name + + # set model to the custom model for run config['model']['custom_model'] = "Imitation_Learning" # create a custom string that makes looking at the experiment names easier diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index 643fd5670..68e7f13dc 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -15,26 +15,32 @@ class PPONetwork(TFModelV2): def __init__(self, obs_space, action_space, num_outputs, model_config, name): super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) + + # setup model with weights loaded in from model in h5 path self.setup_model(obs_space, action_space, model_config, num_outputs, '/Users/akashvelu/Desktop/follower_stopper1.h5') + # register variables for base model self.register_variables(self.base_model.variables) + def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): activation = get_activation_fn(model_config.get("fcnet_activation")) hiddens = model_config.get("fcnet_hiddens", []) vf_share_layers = model_config.get("vf_share_layers") + # set up model inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") curr_layer = inp_layer + # hidden layers and output for policy i = 1 for size in hiddens: curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), activation=activation)(curr_layer) i += 1 - output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)(curr_layer) + # set up value function if not vf_share_layers: curr_layer = inp_layer i = 1 @@ -44,12 +50,15 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) + # build model from layers self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) if imitation_h5_path: # imitation_model = tf.keras.models.load_model(imitation_h5_path, custom_objects={"negative_log_likelihood_loss": negative_log_likelihood_loss}) - imitation_inp = tf.keras.layers.Input(shape=(3,), name="imitation_inp") + + # set up a model to load in weights from imitation network (without the training variables, e.g. adam variables) + imitation_inp = tf.keras.layers.Input(shape=obs_space.shape, name="imitation_inp") curr_imitation_layer = imitation_inp i = 1 for size in hiddens: @@ -59,9 +68,12 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat imitation_output_layer = tf.keras.layers.Dense(num_outputs, name="imitation_output_layer", activation=None)(curr_imitation_layer) imitation_model = tf.keras.Model(imitation_inp, [imitation_output_layer]) + # load weights from file into model imitation_model.load_weights(imitation_h5_path) + # register model variables (to prevent error) self.register_variables(imitation_model.variables) + # copy these weights into the base model (only the policy hidden layer and output weights) for i in range(len(hiddens)): imitation_layer = imitation_model.layers[i + 1] base_model_layer_name = 'policy_hidden_layer_' + str(i + 1) @@ -74,9 +86,6 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat base_model_layer.set_weights(imitation_layer.get_weights()) - - - def forward(self, input_dict, state, seq_lens): policy_out, value_out = self.base_model(input_dict["obs_flat"]) self.value_out = value_out From 3a2e1359d1c37194351f6b54bd840ad6722fc279 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 26 May 2020 13:38:12 -0700 Subject: [PATCH 22/57] Minor cleanup --- .../imitation_learning/ppo_model.py | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index 68e7f13dc..49d354488 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -74,16 +74,20 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat self.register_variables(imitation_model.variables) # copy these weights into the base model (only the policy hidden layer and output weights) - for i in range(len(hiddens)): - imitation_layer = imitation_model.layers[i + 1] - base_model_layer_name = 'policy_hidden_layer_' + str(i + 1) + try: + for i in range(len(hiddens)): + imitation_layer = imitation_model.layers[i + 1] + base_model_layer_name = 'policy_hidden_layer_' + str(i + 1) + base_model_layer = self.base_model.get_layer(base_model_layer_name) + base_model_layer.set_weights(imitation_layer.get_weights()) + + imitation_layer = imitation_model.layers[-1] + base_model_layer_name = 'policy_output_layer' base_model_layer = self.base_model.get_layer(base_model_layer_name) base_model_layer.set_weights(imitation_layer.get_weights()) - - imitation_layer = imitation_model.layers[-1] - base_model_layer_name = 'policy_output_layer' - base_model_layer = self.base_model.get_layer(base_model_layer_name) - base_model_layer.set_weights(imitation_layer.get_weights()) + except Exception as e: + print("Error in loading weights from h5 file to this model") + raise e def forward(self, input_dict, state, seq_lens): From 288a1cf87e5216c56f6a8aeb0ea43eb058c108c1 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 26 May 2020 16:54:10 -0700 Subject: [PATCH 23/57] Removed usage of rllib.utils.freamwork --- flow/controllers/imitation_learning/ppo_model.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index 49d354488..c5a30fb1b 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -3,10 +3,9 @@ import h5py from ray.rllib.models.tf.misc import normc_initializer from ray.rllib.models.tf.tf_modelv2 import TFModelV2 -from ray.rllib.utils.framework import get_activation_fn, try_import_tf +# from ray.rllib.utils.framework import get_activation_fn, try_import_tf # from flow.controllers.imitation_learning.keras_utils import * - -tf = try_import_tf() +import tensorflow as tf @@ -24,7 +23,7 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name): def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): - activation = get_activation_fn(model_config.get("fcnet_activation")) + activation = model_config.get("fcnet_activation") hiddens = model_config.get("fcnet_hiddens", []) vf_share_layers = model_config.get("vf_share_layers") From c1db60af46d9e08751bc263c3de1138afd15d0a7 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 26 May 2020 17:03:22 -0700 Subject: [PATCH 24/57] Changed location of h5 file --- .../model_files/follower_stopper1.h5 | Bin 0 -> 35456 bytes .../controllers/imitation_learning/ppo_model.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 flow/controllers/imitation_learning/model_files/follower_stopper1.h5 diff --git a/flow/controllers/imitation_learning/model_files/follower_stopper1.h5 b/flow/controllers/imitation_learning/model_files/follower_stopper1.h5 new file mode 100644 index 0000000000000000000000000000000000000000..45b46d582cb089fed0c240f42bbf47cf14e28397 GIT binary patch literal 35456 zcmeHP30xD$_YVk46%a4P8!1IZg>Y2}$?oRh&?@4MT8;n#QbV95D0otp3iZOHcvPy^ zs#vR{SOiISQ?=ED6*fZV|bCidZUhJXeX$zT)lSWMbUeO8ONE|mDcg8 zifp+iVVWXaqs&y(^*!hbn4QG@2Xi^l-_Un@t1$&o#kM>02soYtJ6@7Jd8$I4VsRXH zrH*V#fh^BxkBqm1)eBmgGuo*C-dWMbUnZiC}v3c@i8`=VfE0E99 zzK-e*7~v9NFDq#+2ypkcH(dBes7D9~r8+A|o8Zq&QK&Tvx{{@+oDcx_7>|eS0_tau zqJ5pnY-0M859f2FEezP#jKVVgl zO%pG`H^4V&h&C0OSr91jcv|{8WM-$TGN*%n(4F&G`$1RD^hj38H5wK<4}rVKNX9OE z40iVbriiYZ>A|4Nv}I+gl*xGsYK3-sX7*HuiA7yQw@qo}g2=LC7!5s)QH>GIb>fVl zB-bXVCxCvMba@st*%Wn-N;TL$Fpi!jMVrUgdC<#{YXbwA?wTxlwuWI$m8&!gx&jG_ zrk_pMDb-1tIqH-IHN7ZS$~d~VQBwADo60F;RF@+n8vSt>ZMIygmM5tg5wzJkj08=I znWI)}nVJ0qXa;$*Ryj?sr5z7lU{IrJUh{1JOQ8O*=;=-B&4BF;08ik*7-Pd|Sryn$e^3=n&4Nm)R>)ZkX*CQj zY#3NwV0ZA7+7L$@M_@7i;-byS=p=eh#qxKNXdRxY(totBU-*D}6~@5|@=^T8)*Xi(_T zpkV(%dT}X=WO*KFXPkvXE2n{C*h50u&(K%>gG2m-0{l^H0NYBpF=P1$gbWROl^@8A zq{z}JRjkYNR}A3~rEBCFnzU>=%OK2`c4Og-V&8 znVCWt>2|trhC-XIOa{)JE`kH3A|;`*9=aWkJR?h`fTzg>=6FR958If*&^Z&<&W6_k zku~T7pb9IsB49}}i0=w4 z^M*Z#MZ5r0Hs)Rbd_EY&ZZILkSsTcw^32~S;qY6FtqTEeJ{9HF>w6jFfc9;Sadopx zz>YNDJ#^^{hC=g%z;diRs7H3B7A9;FUjr<=1p+?E&MOWBWyDtn%XK19kJeeoBxmUl z;wy*w(OS;>0oRN77v-h7ayQx+=Dv;_4;)Y$6VsjCacIK{UQh>K&~Gpyi}KW_D{b1o zn9Cd2x`Ppl;Xsz|>+oLzjOe<+ZdTGt5a8M&q;0wG#sILH4Y~^L*C^xWKU_llhih*d z@9Q|R_jS1Yhq!AA`h_Z6}f!wix4@ykBSOD!6xYKJYt_`m(SQ}iR4bPip zvatmf=)Q>~>}pzeWBYei^{YTepjR%;q$`epa_9fQ~@LBB(z@fGM<=Pi_ z2tXawAA|?mv8aspEZBWKUI=RVofO15%+n9xlLhq_jmOpF-#KinV?*aoWCsj>Plo{E zo!I~Be66i@`!`Fmv-&T|y=%*;)N)U1`uNwW%j5$5(kJgyx7D+$p8gZ5;Ukw*{7XLg z-1>RcM>igkdCTpobEmqH9k;K+6?@;qqIca8R`_lu1^dpD-xb`Tew=%QJmlhr-BuT3 z?UHv2Ucrwl)^Eqyt8 z|JM7$_>g$q*?l`zkk~@tpT7v&)f))>5_zv}B zfP(6HbqbXeT|dr$`7G5F@GcGUaB#^4{Xn@43k=VR|p8i2ptK|_i3pAx#R zUex)C1hL9C0e?gu)@OuVCu{ObsjtsD;?)#InAee8 zWOQg(VqAQ%A=Y=EesSeetZkhS_HEjF?43=^sk;)5;r77g*x<-T)cT$Uc>N+LvTI%> zaaH&gl{RH8#sBacwr#(c{#M(ESjj|htYE*rL1OnU>FQ^1_<7C{YW5dAyk8qn{FlnJ zjU9FxXUOxPXsEk0gX&#$hn&~W1D`(p09i@AO+55YH&8t~ zQ@hF)*yc?oslyZVv6>4r4Zp6Rjla7@juRaM zsOn#);o0RneayZB!{vH!+*S1fMd~(S>xuWtHXr6-mjvs{jM^TCy!yNNgf%~lH1X^7 z8F#vosu{17{rMNkjq9AShIPHMk@bDCzP8R}rFb$fmOmivl0PH`L*AqM9ZSYDlP_R* zv1012%un!ky9Q9(y*e2d?#ZKy9$djM*lg2}-{(mctz3rxwB#4;;47yo@3xz+Oa$t$7&Md}lPrrrT zpJt#6OFJ4|oX1iB?BGO5*XA1T#=T?+{`WNU^S(O`hxEhn-X}?e;iNwv6zGH5e*Oj( z?^H#l2HH~NwOjClY9~W`+YhK)wpDumu?WNcYxl5^CMgZISTXK;>mw}6tDIUD+|_XU zbDZ+~>=FL;gh_^nqZ_EnFLxu){Ib^2&o>M&esibc!^n9=&EXI6sf#}_w7u)_>E&EE zLsa4w;>g*^Pq$ap8SK{HAXBTCd^#Y-h3NbJ#!n+QJtX@av%?mAya4-h;Y4cMVO3Z= z%^`hJUbHB8ODT5MZ4>sdJAG@0t~^2YIUk9isTx9gR`kL)KB&T??BAmt<5z?!g1*-K zye$>w%>51%tiOj{DIXm=Yd~j%s(cwfMN>(x_x2Ob9))9*2a2hS0d<11JDqh}VvVk@ zOA)sI(l%_*8f6CPgRfs z-}WVsh3^bo>a_$9x$i)g=y#BBXUB`an(vRTIqOB$-trZ=+^o{sy^~&Z(k2D_^5-QO zZ&&vaSKE!$fp^?+`KA@3dwZ|x3P+UH2qz?A7e`bG8nqCBN{Nrsm8W+q3FB5di}EZ?DZKx@7B91>}%9R zys_J(Khsw&9u&5BN4RKM?hO5ZDk}AE`|NAl?j0#AqxHQ~{*AEnT)zHl+Z#2d(R0c6 zx3-D|X9rR0cY9*{_wK=@-L_Eo$9x{PYVvM%f$w)Q1S@d*X5kxoo+o1>)Wrz zT|e>Cc@3E_bWr_QR~X%qJn?eYff?H)M7#N=dUZ;YDA)UaVfRIq2Rc-&By;CDVvCm^ z33FdCQh(*tSkW1*NL0MlIqb}l-g->CR#bO=ly2}6k*F}*PPlJrJa(ez>-tB&o%Dk5 zH|fsax+AQ|`jEfgf91dt;cDS+)k9tHRcg`n)T5&4A7_O<$SxuGMRdfzd{}@R4&Bz@ z+}NLrm)yj^9MoCYJ24E?tqLMt^{-Gf>*J|1nNatjcsz09>Q|KPfd_WP(V09@G!o;r z?}$}=CB+MpMq^I>x{#iuDn+XYg;9~+mbfKrFmkXPsPsG}##bR**5 z6?$I0p>x{P4tFg)N**TqU|G_3Ls=JTHB&%yNSy#UF=_UdSFta`?u0aa>+x+7 zr<2}{tV&)Q5w)?Gbj8(sk@jEr3b!eBje4kFDShSgal-U`2_h3K32usx^0=JHp?FrSD{!PmSdgvIU&G3U%Eg! zLZBWYvA~l#{_LJ&>Y`9XGbzf4aP#25Cx6Ari1t0oTOkVNG|pfp`1_&raC7rl$gY^9 z&9^uC(B2Se2`r+f_9nfVNH?#Wx%^dZFc>cv+6_zgB_tH^TkT8puESrw7uUYH!v(E@ z`h(Wj()lYs)LWFl;_5My!%B@2;O4LL1;*{%658kPLEM(sU4C})tHB2#{nw0NV=y57 z?~Px}C4e94UF+7e*0*}De?jqU1AJZrAp+vU#`ouP7%DL!jNT6u&k|0l(G07+v00Q{dVccewIcs6PmZ zmX2TRaWE5#U%7gW1i0}l5pLYhEunpW1;!!V_SRaq6u%-rf@t&oNz5RCF}f0< zWa>|zZ~nFMIYk%t_b1r*7Mb_Lz{_P8e}95~FAXjcyefqv7h?@Gm*>re&!2CE2Emda zyB^&Xs>X9!OMhPkxPA<2n>*({2If6S1_k72qW&Z#C{E_SH{r-jw_#qnu;H11PO&@F z4xdwWf^s0r$myrlDzY0NOk_CNYVgz|@W_4{48s%EOd=RmK3Pp?h#UDExnbg%J4gbG z&zXiNxIviCu%z}2*lD*}}0Dgeki?v3p zG%b$+*WP-=DT|^0APJBOV{H~)3GZJ)+L*7Pjo9z{aG%@shI)(kGgps)*PyMC3E1G5l~C|8#4*;x%|QTt?Yjf1kCAgIn*DtO7{3h z7tl%O&+0GA2O_;In*wG+!JCe7xP=q7GCwKJISB~&@pY?RU z*4Ano9|22_@*e0u9iq+8tJcGKh;Ib3X*`r`)c&js=KKqHUt01n>GT_)%m?WmC|Uou z76i=s7Z<1(2#1#PFUT&gnT%#?nj-z=bALdsY4%oAYibw0*|no{BeI9)X!Gr)`(OaF z`R8$8q;n%TzhKEO5~*NyNIzSRLRlBeB4Exgdc*t_g6UbE8_VFlM-^aZOZr=w2KbQO zMDki`>p;Mq{$7XrgHUKGzmD|oiW1C(&VvqbfU>s=P-I{8p?)JiuD&CATMOl$8_N@( zzH)>&G3V)gt*zBGK5D6RW1=s>mTv^{U*+7$-Itd9%SO0=k=_|C+FJPABVf+IL_xhk z0JM~UL4Kn)9nI7JT5D<-y;&%@=SK9Lq9X(fm79NVEQWS6(Fl4|J89v< z8ukA31#|f{?)_#oLx+W&nuv{Jgal#6*Mo@OH2A|$N>@} zyNTqr($;~1IsJ8q`h!qtDZh^N&JEsPLFYjNmJ??H36OoQg8Gg4xcZLdZ7r00ZoJMu zrEGrn2=Bx~^E|Dw#WX%LJ~yH`2+`)pL-jBoLK+RwHjRhi26#>v!(4pC-Itd9OC8+5 zNbjE0a;(!nCj`v-mwc!fNUWCfFKCxJOh+>{O_6?P6*b-7T5C=1qBjc#Iybg~z@YP@ zX}P(-H`V~{WU&$Srgk#jsJ*2Nrg#|p34{Zp&G!?9@E39UMoThX|MT_}Pyc;$go8QS ze0*ZyPoTe8_oEXA?U8&tOe~(&yFz?^=Qe4>YC;gSvYeRwu|HOF+gEaDjlJZxRpnt5 z56qF-c}$nKAM7i2=}L+nzxR{$&hnIf;p`Wda&L(E^5G-cgvmQ3KiSWhWbyNaXHNYh z`~27)>D1A)B^7lqiL-p>iqBT6rSd^{1Qq^y#DYWr5OqJ(AU^R(FHZfzUiibTU|D&H zpLFopK*QxYDW+8X;Ohfn)oop6j&{ZTgvc@Cw&E2HBYi~D^JWTI)Ue}n~9y+eH@bwuO{OC&xg(t+!k%o z923q8kl_-ZEAH8?LNLQ^iFoaU1;k#@YH3-&$&z`MI!X4)9kSlNa)qk%m&DW44iQuQ z0;MAb?WJDFCy9Q_n-|{a+DzG&iM8UsE4E-S9sE$dW8OggrwswpUSI!)i&l*n9~xCc zByC(FjURrT_;1M|Y3-Zu;pM`WQnfgfa1C~n$fD;H>)X#Kq?6C#2l^LE?bWAtO!^Uup(eZz$^mKEly~^NV$UK Date: Wed, 27 May 2020 11:40:14 -0700 Subject: [PATCH 25/57] Function for ppo architecture --- .../imitation_learning/imitating_network2.py | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/flow/controllers/imitation_learning/imitating_network2.py b/flow/controllers/imitation_learning/imitating_network2.py index f750fbad6..12b1a45d4 100644 --- a/flow/controllers/imitation_learning/imitating_network2.py +++ b/flow/controllers/imitation_learning/imitating_network2.py @@ -129,3 +129,42 @@ def save_network(self, save_path): # tensorboard # writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + + def save_network_PPO(self, save_path): + input = tf.keras.layers.Input(self.model.input.shape[1].value) + curr_layer = input + + # build layers for policy + for i in range(num_layers): + size = self.model.layers[i + 1].output.shape[1].value + activation = tf.keras.activations.serialize(self.model.layers[i + 1].activation) + curr_layer = tf.keras.layers.Dense(size, activation=activation, name="policy_hidden_layer_{}".format(i + 1))(curr_layer) + output_layer_policy = tf.keras.layers.Dense(self.model.output.shape[1].value, activation=None, name="policy_output_layer") + + # build layers for value function + curr_layer = input + for i in range(num_layers): + curr_layer = tf.keras.layers.Dense(self.size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) + output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer") + + ppo_model = tf.keras.Model(inputs=input, outputs=[output_layer_policy, output_layer_vf], name="ppo_model") + + # set the policy weights to those learned from imitation + for i in range(num_layers): + policy_layer = ppo_model.get_layer(name="policy_hidden_layer_{}".format(i + 1)) + policy_layer.set_weights(self.model.layers[i + 1].get_weights()) + policy_output = ppo_model.get_layer("policy_output_layer") + policy_output.set_weights(self.model.layers[-1].get_weights) + + # save the model (as a h5 file) + ppo_model.save(save_path) + + + + + + + + + From 5c0923d0d1d170679c68a71e9f33b8834ef232c4 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 27 May 2020 13:47:21 -0700 Subject: [PATCH 26/57] Load weights for training in train.py --- examples/train.py | 119 ++++++++++-------- .../imitation_learning/imitating_network2.py | 15 ++- .../imitation_learning/ppo_model.py | 87 +++++-------- 3 files changed, 108 insertions(+), 113 deletions(-) diff --git a/examples/train.py b/examples/train.py index 8c65a68c1..3764f7318 100644 --- a/examples/train.py +++ b/examples/train.py @@ -64,6 +64,10 @@ def parse_args(args): parser.add_argument( '--rl_trainer', type=str, default="rllib", help='the RL trainer to use. either rllib or Stable-Baselines') + parser.add_argument( + '--load_weights_path', type=str, default=None, + help='Path to h5 file containing a pretrained model. Relevent for PPO with RLLib' + ) parser.add_argument( '--algorithm', type=str, default="PPO", help='RL algorithm to use. Options are PPO, TD3, MATD3 (MADDPG w/ TD3) right now.' @@ -191,6 +195,17 @@ def setup_exps_rllib(flow_params, config["lambda"] = 0.97 config["kl_target"] = 0.02 config["num_sgd_iter"] = 10 + + + if flags.load_weights_path: + from flow.controllers.imitation_learning.ppo_model import PPONetwork + from ray.rllib.models import ModelCatalog + # Register custom model + ModelCatalog.register_custom_model("PPO_loaded_weights", PPONetwork) + # set model to the custom model for run + config['model']['custom_model'] = "PPO_loaded_weights" + config['model']['custom_options'] = {"h5_load_path": flags.load_weights_path} + elif alg_run == "TD3": agent_cls = get_agent_class(alg_run) config = deepcopy(agent_cls._default_config) @@ -252,59 +267,55 @@ def on_episode_end(info): register_env(gym_name, create_env) return alg_run, gym_name, config -def train_rllib_with_imitation(submodule, flags): - """Train policies using the PPO algorithm in RLlib, with initiale policy weights from imitation learning.""" - import ray - from flow.controllers.imitation_learning.ppo_model import PPONetwork - from ray.rllib.models import ModelCatalog - - flow_params = submodule.flow_params - flow_params['sim'].render = flags.render - policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) - policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) - policies_to_train = getattr(submodule, "policies_to_train", None) - - alg_run, gym_name, config = setup_exps_rllib( - flow_params, flags.num_cpus, flags.num_rollouts, flags, - policy_graphs, policy_mapping_fn, policies_to_train) - - # Register custom model - ModelCatalog.register_custom_model("Imitation_Learning", PPONetwork) - - config['num_workers'] = flags.num_cpus - config['env'] = gym_name - - # set model to the custom model for run - config['model']['custom_model'] = "Imitation_Learning" - - # create a custom string that makes looking at the experiment names easier - def trial_str_creator(trial): - return "{}_{}".format(trial.trainable_name, trial.experiment_tag) - - if flags.local_mode: - ray.init(local_mode=True) - else: - ray.init() - - exp_dict = { - "run_or_experiment": alg_run, - "name": gym_name, - "config": config, - "checkpoint_freq": flags.checkpoint_freq, - "checkpoint_at_end": True, - 'trial_name_creator': trial_str_creator, - "max_failures": 0, - "stop": { - "training_iteration": flags.num_iterations, - }, - } - date = datetime.now(tz=pytz.utc) - date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") - s3_string = "s3://i210.experiments/i210/" \ - + date + '/' + flags.exp_title - if flags.use_s3: - exp_dict['upload_dir'] = s3_string - tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) +# def train_rllib_with_imitation(submodule, flags): +# """Train policies using the PPO algorithm in RLlib, with initiale policy weights from imitation learning.""" +# import ray +# from flow.controllers.imitation_learning.ppo_model import PPONetwork +# from ray.rllib.models import ModelCatalog +# +# flow_params = submodule.flow_params +# flow_params['sim'].render = flags.render +# policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) +# policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) +# policies_to_train = getattr(submodule, "policies_to_train", None) +# +# alg_run, gym_name, config = setup_exps_rllib( +# flow_params, flags.num_cpus, flags.num_rollouts, flags, +# policy_graphs, policy_mapping_fn, policies_to_train) +# +# +# +# config['num_workers'] = flags.num_cpus +# config['env'] = gym_name +# +# # create a custom string that makes looking at the experiment names easier +# def trial_str_creator(trial): +# return "{}_{}".format(trial.trainable_name, trial.experiment_tag) +# +# if flags.local_mode: +# ray.init(local_mode=True) +# else: +# ray.init() +# +# exp_dict = { +# "run_or_experiment": alg_run, +# "name": gym_name, +# "config": config, +# "checkpoint_freq": flags.checkpoint_freq, +# "checkpoint_at_end": True, +# 'trial_name_creator': trial_str_creator, +# "max_failures": 0, +# "stop": { +# "training_iteration": flags.num_iterations, +# }, +# } +# date = datetime.now(tz=pytz.utc) +# date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") +# s3_string = "s3://i210.experiments/i210/" \ +# + date + '/' + flags.exp_title +# if flags.use_s3: +# exp_dict['upload_dir'] = s3_string +# tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" @@ -526,7 +537,7 @@ def main(args): # Perform the training operation. if flags.rl_trainer.lower() == "rllib": - train_rllib_with_imitation(submodule, flags) + train_rllib(submodule, flags) elif flags.rl_trainer.lower() == "stable-baselines": train_stable_baselines(submodule, flags) elif flags.rl_trainer.lower() == "h-baselines": diff --git a/flow/controllers/imitation_learning/imitating_network2.py b/flow/controllers/imitation_learning/imitating_network2.py index 12b1a45d4..a6b502165 100644 --- a/flow/controllers/imitation_learning/imitating_network2.py +++ b/flow/controllers/imitation_learning/imitating_network2.py @@ -130,23 +130,30 @@ def save_network(self, save_path): # writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + def load_network(self, load_path): + if self.stochastic: + self.model = tf.keras.models.load_model(load_path, custom_objects={'negative_log_likelihood_loss': negative_log_likelihood_loss}) + def save_network_PPO(self, save_path): input = tf.keras.layers.Input(self.model.input.shape[1].value) curr_layer = input + # number of hidden layers + num_layers = len(self.model.layers) - 2 + # build layers for policy for i in range(num_layers): size = self.model.layers[i + 1].output.shape[1].value activation = tf.keras.activations.serialize(self.model.layers[i + 1].activation) curr_layer = tf.keras.layers.Dense(size, activation=activation, name="policy_hidden_layer_{}".format(i + 1))(curr_layer) - output_layer_policy = tf.keras.layers.Dense(self.model.output.shape[1].value, activation=None, name="policy_output_layer") + output_layer_policy = tf.keras.layers.Dense(self.model.output.shape[1].value, activation=None, name="policy_output_layer")(curr_layer) # build layers for value function curr_layer = input for i in range(num_layers): curr_layer = tf.keras.layers.Dense(self.size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) - output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer") + output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer")(curr_layer) ppo_model = tf.keras.Model(inputs=input, outputs=[output_layer_policy, output_layer_vf], name="ppo_model") @@ -155,9 +162,9 @@ def save_network_PPO(self, save_path): policy_layer = ppo_model.get_layer(name="policy_hidden_layer_{}".format(i + 1)) policy_layer.set_weights(self.model.layers[i + 1].get_weights()) policy_output = ppo_model.get_layer("policy_output_layer") - policy_output.set_weights(self.model.layers[-1].get_weights) + policy_output.set_weights(self.model.layers[-1].get_weights()) - # save the model (as a h5 file) + # save the model (as a h5 file) ppo_model.save(save_path) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index ac606f363..f9668c229 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -8,85 +8,62 @@ import tensorflow as tf - class PPONetwork(TFModelV2): def __init__(self, obs_space, action_space, num_outputs, model_config, name): super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) + h5_path = model_config.get("custom_options").get("h5_load_path", "") + # setup model with weights loaded in from model in h5 path - self.setup_model(obs_space, action_space, model_config, num_outputs, 'model_files/follower_stopper1.h5') + self.setup_model(obs_space, action_space, model_config, num_outputs, h5_path) + # register variables for base model self.register_variables(self.base_model.variables) def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): - activation = model_config.get("fcnet_activation") - hiddens = model_config.get("fcnet_hiddens", []) - vf_share_layers = model_config.get("vf_share_layers") - - # set up model - inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") - curr_layer = inp_layer + if imitation_h5_path: + # imitation_model = tf.keras.models.load_model(imitation_h5_path, custom_objects={"negative_log_likelihood_loss": negative_log_likelihood_loss}) - # hidden layers and output for policy - i = 1 - for size in hiddens: - curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), activation=activation)(curr_layer) - i += 1 + # set up a model to load in weights from imitation network (without the training variables, e.g. adam variables) + self.base_model = tf.keras.models.load_model(imitation_h5_path) - output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)(curr_layer) + else: + activation = model_config.get("fcnet_activation") + hiddens = model_config.get("fcnet_hiddens", []) + vf_share_layers = model_config.get("vf_share_layers") - # set up value function - if not vf_share_layers: + # set up model + inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") curr_layer = inp_layer + + # hidden layers and output for policy i = 1 for size in hiddens: - curr_layer = tf.keras.layers.Dense(size, name="vf_hidden_layer_{}".format(i), activation=activation)(curr_layer) + curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), + activation=activation)(curr_layer) i += 1 - output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) + output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)( + curr_layer) - # build model from layers - self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) + # set up value function + if not vf_share_layers: + curr_layer = inp_layer + i = 1 + for size in hiddens: + curr_layer = tf.keras.layers.Dense(size, name="vf_hidden_layer_{}".format(i), + activation=activation)(curr_layer) + i += 1 + output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) - if imitation_h5_path: - # imitation_model = tf.keras.models.load_model(imitation_h5_path, custom_objects={"negative_log_likelihood_loss": negative_log_likelihood_loss}) - - # set up a model to load in weights from imitation network (without the training variables, e.g. adam variables) - imitation_inp = tf.keras.layers.Input(shape=obs_space.shape, name="imitation_inp") - curr_imitation_layer = imitation_inp - i = 1 - for size in hiddens: - curr_imitation_layer = tf.keras.layers.Dense(size, name="imitation_hidden_layer_{}".format(i), activation=activation)(curr_imitation_layer) - i += 1 - - imitation_output_layer = tf.keras.layers.Dense(num_outputs, name="imitation_output_layer", activation=None)(curr_imitation_layer) - imitation_model = tf.keras.Model(imitation_inp, [imitation_output_layer]) - - # load weights from file into model - imitation_model.load_weights(imitation_h5_path) - # register model variables (to prevent error) - self.register_variables(imitation_model.variables) - - # copy these weights into the base model (only the policy hidden layer and output weights) - try: - for i in range(len(hiddens)): - imitation_layer = imitation_model.layers[i + 1] - base_model_layer_name = 'policy_hidden_layer_' + str(i + 1) - base_model_layer = self.base_model.get_layer(base_model_layer_name) - base_model_layer.set_weights(imitation_layer.get_weights()) - - imitation_layer = imitation_model.layers[-1] - base_model_layer_name = 'policy_output_layer' - base_model_layer = self.base_model.get_layer(base_model_layer_name) - base_model_layer.set_weights(imitation_layer.get_weights()) - except Exception as e: - print("Error in loading weights from h5 file to this model") - raise e + # build model from layers + self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) + def forward(self, input_dict, state, seq_lens): From c785944471ebb08a63511350029bb67bad516971 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 27 May 2020 20:52:31 -0700 Subject: [PATCH 27/57] Code structure changes --- examples/train.py | 2 +- .../imitating_controller.py | 3 +- .../imitation_learning/imitating_network.py | 208 +++++++----------- .../imitation_learning/imitating_network2.py | 177 --------------- .../imitation_learning/keras_utils.py | 41 ++-- .../imitation_learning/ppo_model.py | 28 ++- flow/controllers/imitation_learning/run.py | 30 ++- .../controllers/imitation_learning/trainer.py | 18 +- flow/controllers/imitation_learning/utils.py | 21 +- 9 files changed, 169 insertions(+), 359 deletions(-) delete mode 100644 flow/controllers/imitation_learning/imitating_network2.py diff --git a/examples/train.py b/examples/train.py index 3764f7318..bc731e465 100644 --- a/examples/train.py +++ b/examples/train.py @@ -188,7 +188,7 @@ def setup_exps_rllib(flow_params, config["num_workers"] = n_cpus config["horizon"] = horizon - config["model"].update({"fcnet_hiddens": [12, 12]}) + config["model"].update({"fcnet_hiddens": [32, 32, 32]}) config["train_batch_size"] = horizon * n_rollouts config["gamma"] = 0.999 # discount rate config["use_gae"] = True diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 935a66831..a13ce2083 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -9,7 +9,8 @@ class ImitatingController(BaseController): """ Controller which uses a given neural net to imitate an expert. Subclasses BaseController """ - # Implementation in Tensorflow + + # Implementation in Tensorflow Keras def __init__(self, veh_id, action_network, multiagent, car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): """ diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 04a0a4ce4..7d68c076a 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -1,6 +1,7 @@ import numpy as np import tensorflow as tf from utils_tensorflow import * +from keras_utils import * import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController from replay_buffer import ReplayBuffer @@ -11,7 +12,7 @@ class ImitatingNetwork(): Class containing neural network which learns to imitate a given expert controller. """ - def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, stochastic=False, policy_scope='policy_vars', load_existing=False, load_path=''): + def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, stochastic=False, variance_regularizer = 0, load_existing=False, load_path=''): """ Initializes and constructs neural network @@ -22,11 +23,8 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r obs_dim: dimension of observation space (size of network input) num_layers: number of hidden layers (for an MLP) size: size of each layer in network - learning_rate: learning rate used in optimizer replay_buffer_size: maximum size of replay buffer used to hold data for training - training: boolean, whether the network will be trained (as opposed to loaded) stochastic: boolean indicating if the network outputs a stochastic (multivariate Gaussian) or deterministic policy - policy_scope: variable scope used by Tensorflow for weights/biases load_existing: boolean, whether to load an existing tensorflow model load_path: path to directory containing an existing tensorflow model @@ -35,148 +33,37 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r self.sess = sess self.action_dim = action_dim self.obs_dim = obs_dim - self.num_layers = num_layers - self.size = size - self.learning_rate = learning_rate - self.training = training + self.fcnet_hiddens = fcnet_hiddens self.stochastic=stochastic + self.variance_regularizer = variance_regularizer # load network if specified, or construct network if load_existing: self.load_network(load_path) else: - print("HERE") self.build_network() + self.compile_network() + self.replay_buffer = ReplayBuffer(replay_buffer_size) - # init replay buffer - if self.training: - self.replay_buffer = ReplayBuffer(replay_buffer_size) - else: - self.replay_buffer = None - - # set up policy variables, and saver to save model. Save only non-training variables (weights/biases) - if not load_existing: - self.policy_vars = [v for v in tf.all_variables() if 'network_scope' in v.name and 'train' not in v.name] - self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) - # tensorboard - self.writer = tf.summary.FileWriter('/Users/akashvelu/Documents/Random/tensorboard/', tf.get_default_graph()) - # track number of training steps - self.train_steps = 0 def build_network(self): """ Defines neural network for choosing actions. Defines placeholders and forward pass """ # setup placeholders for network input and labels for training, and hidden layers/output - self.define_placeholders() - self.define_forward_pass() - # set up training operation (e.g. Adam optimizer) - if self.training: - with tf.variable_scope('train'): - self.define_train_op() - - - - def load_network(self, path): - """ - Load tensorflow model from the path specified, set action prediction to proper placeholder - """ - # load and restore model - loader = tf.train.import_meta_graph(path + 'model.ckpt.meta') - loader.restore(self.sess, path+'model.ckpt') - - # get observation placeholder (for input into network) - self.obs_placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/observation:0') - # get output tensor (using name of appropriate tensor) - network_output = tf.get_default_graph().get_tensor_by_name('policy_vars/network_scope/Output_Layer/BiasAdd:0') - - # for stochastic policies, the network output is twice the action dimension. First half specifies the mean of a multivariate gaussian distribution, second half specifies the diagonal entries for the diagonal covariance matrix. - # for deterministic policies, network output is the action. - if self.stochastic: - # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution - means = network_output[:, :self.action_dim] - log_vars = network_output[:, self.action_dim:] - vars = tf.math.exp(log_vars) - - # set up action distribution (parameterized by network output) - # if a batch of size k is input as observations, then the the self.dist will store k different Gaussians - self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=vars, name='Prediction Distribution') - # action is a sample from this distribution; one sample output per Gaussian contained in self.dist - self.action_predictions = self.dist.sample() - else: - self.dist = None - self.action_predictions = network_output - - def define_placeholders(self): - """ - Defines input, output, and training placeholders for neural net - """ - # placeholder for observations (input into network) - self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="observation", dtype=tf.float32) - - # if training, define placeholder for labels (supervised learning) - if self.training: - self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) - - - def define_forward_pass(self): - """ - Build network and initialize proper action prediction op - """ - # network output is twice action dim if stochastic (1st half mean, 2nd half diagonal elements of covariance) if self.stochastic: - output_size = 2 * self.action_dim + self.model = build_neural_net_stochastic(self.obs_dim, self.action_dim, self.fcnet_hiddens) else: - output_size = self.action_dim - - # build forward pass and get the tensor for output of last layer - network_output = build_neural_net(self.obs_placeholder, output_size=output_size, scope='network_scope', n_layers=self.num_layers, size=self.size) - - # parse the mean and covariance from output if stochastic, and set up distribution - if self.stochastic: - # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution - - means, log_vars = tf.split(network_output, num_or_size_splits=2, axis=1) - vars = tf.math.exp(log_vars) - - # set up action distribution (parameterized by network output) - # if a batch of size k is input as observations, then the the self.dist will store k different Gaussians - with tf.variable_scope('Action_Distribution'): - self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=vars) - # action is a sample from this distribution; one sample output per Gaussian contained in self.dist - self.action_predictions = self.dist.sample() - - else: - self.dist = None - self.action_predictions = network_output - - - def define_train_op(self): - """ - Defines training operations for network (loss function and optimizer) - """ - # labels - true_actions = self.action_labels_placeholder - predicted_actions = self.action_predictions + self.model = build_neural_net_deterministic(self.obs_dim, self.action_dim, self.fcnet_hiddens) - if self.stochastic: - # negative log likelihood loss for stochastic policy - self.loss = self.dist.log_prob(true_actions) - self.loss = tf.negative(self.loss) - self.loss = tf.reduce_mean(self.loss) - summary_name = 'Loss_tracking_NLL' - else: - # MSE loss for deterministic policy - self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) - summary_name = 'Loss_tracking_MSE' + def compile_network(self): + loss = get_loss(self.stochastic, self.variance_regularizer) + self.model.compile(loss=loss, optimizer='adam') - self.loss_summary = tf.summary.scalar(name=summary_name, tensor=self.loss) - # Adam optimizer - self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) def train(self, observation_batch, action_batch): """ @@ -184,9 +71,8 @@ def train(self, observation_batch, action_batch): """ # reshape action_batch to ensure a shape (batch_size, action_dim) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - _, loss, summary = self.sess.run([self.train_op, self.loss, self.loss_summary], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) - self.writer.add_summary(summary, global_step=self.train_steps) - self.train_steps += 1 + batch_size = action_batch.shape[0] + self.model.fit(observation_batch, action_batch, batch_size=batch_size, epochs=1, steps_per_epoch=1, verbose=0) def get_accel_from_observation(self, observation): """ @@ -197,8 +83,14 @@ def get_accel_from_observation(self, observation): if len(observation.shape)<=1: observation = observation[None] # "batch size" is 1, so just get single acceleration/acceleration vector - ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] - return ret_val + network_output = self.model.predict(observation) + if self.stochastic: + mean, log_std = network_output[:, :self.action_dim], network_output[:, self.action_dim:] + var = np.exp(2 * log_std) + action = np.random.multivariate_normal(mean[0], var) + return action + else: + return network_output def get_accel(self, env): """ @@ -222,6 +114,60 @@ def sample_data(self, batch_size): def save_network(self, save_path): """ Save network to given path and to tensorboard """ - self.saver.save(self.sess, save_path) + self.model.save(save_path) # tensorboard - writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + # writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + def load_network(self, load_path): + if self.stochastic: + self.model = tf.keras.models.load_model(load_path, custom_objects={'negative_log_likelihood_loss': negative_log_likelihood_loss}) + + + def save_network_PPO(self, save_path): + """ + Builds and saves keras model for training PPO using policy weights learned from imitation. + + Args: + save_path: path (including h5 format filename) where the PPO model should be saved + + """ + input = tf.keras.layers.Input(self.model.input.shape[1].value) + curr_layer = input + + # number of hidden layers + num_layers = len(self.model.layers) - 2 + + # build layers for policy + for i in range(num_layers): + size = self.model.layers[i + 1].output.shape[1].value + activation = tf.keras.activations.serialize(self.model.layers[i + 1].activation) + curr_layer = tf.keras.layers.Dense(size, activation=activation, name="policy_hidden_layer_{}".format(i + 1))(curr_layer) + output_layer_policy = tf.keras.layers.Dense(self.model.output.shape[1].value, activation=None, name="policy_output_layer")(curr_layer) + + # build layers for value function + curr_layer = input + for i in range(num_layers): + curr_layer = tf.keras.layers.Dense(self.size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) + output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer")(curr_layer) + + ppo_model = tf.keras.Model(inputs=input, outputs=[output_layer_policy, output_layer_vf], name="ppo_model") + + # set the policy weights to those learned from imitation + for i in range(num_layers): + policy_layer = ppo_model.get_layer(name="policy_hidden_layer_{}".format(i + 1)) + policy_layer.set_weights(self.model.layers[i + 1].get_weights()) + policy_output = ppo_model.get_layer("policy_output_layer") + policy_output.set_weights(self.model.layers[-1].get_weights()) + + # save the model (as a h5 file) + ppo_model.save(save_path) + + + + + + + + + diff --git a/flow/controllers/imitation_learning/imitating_network2.py b/flow/controllers/imitation_learning/imitating_network2.py deleted file mode 100644 index a6b502165..000000000 --- a/flow/controllers/imitation_learning/imitating_network2.py +++ /dev/null @@ -1,177 +0,0 @@ -import numpy as np -import tensorflow as tf -from utils_tensorflow import * -from keras_utils import * -import tensorflow_probability as tfp -from flow.controllers.base_controller import BaseController -from replay_buffer import ReplayBuffer - - -class ImitatingNetwork2(): - """ - Class containing neural network which learns to imitate a given expert controller. - """ - - def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, stochastic=False, policy_scope='policy_vars', load_existing=False, load_path=''): - - """ - Initializes and constructs neural network - - Args: - sess: Tensorflow session variable - action_dim: dimension of action space (determines size of network output) - obs_dim: dimension of observation space (size of network input) - num_layers: number of hidden layers (for an MLP) - size: size of each layer in network - learning_rate: learning rate used in optimizer - replay_buffer_size: maximum size of replay buffer used to hold data for training - training: boolean, whether the network will be trained (as opposed to loaded) - stochastic: boolean indicating if the network outputs a stochastic (multivariate Gaussian) or deterministic policy - policy_scope: variable scope used by Tensorflow for weights/biases - load_existing: boolean, whether to load an existing tensorflow model - load_path: path to directory containing an existing tensorflow model - - """ - - self.sess = sess - self.action_dim = action_dim - self.obs_dim = obs_dim - self.num_layers = num_layers - self.size = size - self.learning_rate = learning_rate - self.training = training - self.stochastic=stochastic - - print("INNNNNITITTTTT") - - # load network if specified, or construct network - if load_existing: - self.load_network(load_path) - - else: - self.build_network() - self.compile_network() - - - # init replay buffer - if self.training: - self.replay_buffer = ReplayBuffer(replay_buffer_size) - else: - self.replay_buffer = None - - - def build_network(self): - """ - Defines neural network for choosing actions. Defines placeholders and forward pass - """ - # setup placeholders for network input and labels for training, and hidden layers/output - if self.stochastic: - self.model = build_neural_net_stochastic(self.obs_dim, self.action_dim, self.num_layers, self.size) - else: - self.model = build_neural_net_deterministic(self.obs_dim, self.action_dim, self.num_layers, self.size) - - - def compile_network(self): - loss = get_loss(self.stochastic) - self.model.compile(loss=loss, optimizer='adam') - - - def train(self, observation_batch, action_batch): - """ - Executes one training step for the given batch of observation and action data - """ - # reshape action_batch to ensure a shape (batch_size, action_dim) - action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - batch_size = action_batch.shape[0] - self.model.fit(observation_batch, action_batch, batch_size=batch_size, epochs=1, steps_per_epoch=1) - - def get_accel_from_observation(self, observation): - """ - Gets the network's acceleration prediction based on given observation/state - """ - - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - if len(observation.shape)<=1: - observation = observation[None] - # "batch size" is 1, so just get single acceleration/acceleration vector - network_output = self.model.predict(observation) - if self.stochastic: - mean, log_std = network_output[:, :self.action_dim], network_output[:, self.action_dim:] - var = np.exp(2 * log_std) - action = np.random.multivariate_normal(mean[0], var) - return action - else: - return network_output - - def get_accel(self, env): - """ - Get network's acceleration prediction(s) based on given env - """ - observation = env.get_state() - return self.get_accel_from_observation(observation) - - - def add_to_replay_buffer(self, rollout_list): - """ Add rollouts to replay buffer """ - - self.replay_buffer.add_rollouts(rollout_list) - - - def sample_data(self, batch_size): - """ Sample a batch of data from replay buffer """ - - return self.replay_buffer.sample_batch(batch_size) - - def save_network(self, save_path): - """ Save network to given path and to tensorboard """ - - self.model.save(save_path) - # tensorboard - - # writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) - - def load_network(self, load_path): - if self.stochastic: - self.model = tf.keras.models.load_model(load_path, custom_objects={'negative_log_likelihood_loss': negative_log_likelihood_loss}) - - - def save_network_PPO(self, save_path): - input = tf.keras.layers.Input(self.model.input.shape[1].value) - curr_layer = input - - # number of hidden layers - num_layers = len(self.model.layers) - 2 - - # build layers for policy - for i in range(num_layers): - size = self.model.layers[i + 1].output.shape[1].value - activation = tf.keras.activations.serialize(self.model.layers[i + 1].activation) - curr_layer = tf.keras.layers.Dense(size, activation=activation, name="policy_hidden_layer_{}".format(i + 1))(curr_layer) - output_layer_policy = tf.keras.layers.Dense(self.model.output.shape[1].value, activation=None, name="policy_output_layer")(curr_layer) - - # build layers for value function - curr_layer = input - for i in range(num_layers): - curr_layer = tf.keras.layers.Dense(self.size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) - output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer")(curr_layer) - - ppo_model = tf.keras.Model(inputs=input, outputs=[output_layer_policy, output_layer_vf], name="ppo_model") - - # set the policy weights to those learned from imitation - for i in range(num_layers): - policy_layer = ppo_model.get_layer(name="policy_hidden_layer_{}".format(i + 1)) - policy_layer.set_weights(self.model.layers[i + 1].get_weights()) - policy_output = ppo_model.get_layer("policy_output_layer") - policy_output.set_weights(self.model.layers[-1].get_weights()) - - # save the model (as a h5 file) - ppo_model.save(save_path) - - - - - - - - - diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py index 429c75bea..e8dbaf458 100644 --- a/flow/controllers/imitation_learning/keras_utils.py +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -3,11 +3,12 @@ from tensorflow.keras import Input from tensorflow.keras.layers import Dense -def build_neural_net_deterministic(input_dim, action_dim, n_layers, size): +def build_neural_net_deterministic(input_dim, action_dim, fcnet_hiddens): input_layer = Input(shape=(input_dim, )) curr_layer = input_layer - for _ in range(n_layers): + for i in range(len(fcnet_hiddens)): + size = fcnet_hiddens[i] dense = Dense(size, activation="tanh") curr_layer = dense(curr_layer) output_layer = Dense(action_dim, activation=None)(curr_layer) @@ -15,11 +16,12 @@ def build_neural_net_deterministic(input_dim, action_dim, n_layers, size): return model -def build_neural_net_stochastic(input_dim, action_dim, n_layers, size): +def build_neural_net_stochastic(input_dim, action_dim, fcnet_hiddens): input_layer = Input(shape=(input_dim, )) curr_layer = input_layer - for _ in range(n_layers): + for i in range(len(fcnet_hiddens)): + size = fcnet_hiddens[i] dense = Dense(size, activation="tanh") curr_layer = dense(curr_layer) @@ -28,20 +30,25 @@ def build_neural_net_stochastic(input_dim, action_dim, n_layers, size): return model -def get_loss(stochastic): +def get_loss(stochastic, variance_regularizer): if stochastic: - return negative_log_likelihood_loss + return negative_log_likelihood_loss(variance_regularizer) else: return tf.keras.losses.mean_squared_error -def negative_log_likelihood_loss(y, distribution_params): - assert distribution_params.shape[1] % 2 == 0, "Stochastic policies must output vectors of even length" - action_dim = distribution_params.shape[1]//2 - means, log_stds = distribution_params[:, :action_dim], distribution_params[:, action_dim:] - stds = tf.math.exp(log_stds) - variances = tf.math.square(stds) - dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=variances) - loss = dist.log_prob(y) - loss = tf.negative(loss) - loss = tf.reduce_mean(loss) + (0.5 * tf.norm(variances)) - return loss +def negative_log_likelihood_loss(variance_regularizer): + + def nll_loss(y, distribution_params): + assert distribution_params.shape[1] % 2 == 0, "Stochastic policies must output vectors of even length" + + action_dim = distribution_params.shape[1]//2 + means, log_stds = distribution_params[:, :action_dim], distribution_params[:, action_dim:] + stds = tf.math.exp(log_stds) + variances = tf.math.square(stds) + dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=variances) + loss = dist.log_prob(y) + loss = tf.negative(loss) + loss = tf.reduce_mean(loss) + (variance_regularizer * tf.norm(variances)) + return loss + + return nll_loss diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index f9668c229..5ad97a75d 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -3,12 +3,14 @@ import h5py from ray.rllib.models.tf.misc import normc_initializer from ray.rllib.models.tf.tf_modelv2 import TFModelV2 -# from ray.rllib.utils.framework import get_activation_fn, try_import_tf -# from flow.controllers.imitation_learning.keras_utils import * import tensorflow as tf class PPONetwork(TFModelV2): + """ + Custom RLLib PPOModel (using tensorflow keras) to load weights from a pretained policy model (e.g. from imitation learning) and start RL training with loaded weights. + Subclass of TFModelV2 + """ def __init__(self, obs_space, action_space, num_outputs, model_config, name): @@ -24,11 +26,18 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name): def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): + """ + Loads/builds model for both policy and value function + Args: + obs_space: observation space of env + action_space: action space of env + model_config: configuration parameters for model + num_outputs: number of outputs expected for policy + imitation_h5_path: path to h5 file containing weights of a pretrained network (empty string if no such file) + """ if imitation_h5_path: - # imitation_model = tf.keras.models.load_model(imitation_h5_path, custom_objects={"negative_log_likelihood_loss": negative_log_likelihood_loss}) - - # set up a model to load in weights from imitation network (without the training variables, e.g. adam variables) + # set base model to be loaded model self.base_model = tf.keras.models.load_model(imitation_h5_path) else: @@ -63,15 +72,22 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat # build model from layers self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) - + def forward(self, input_dict, state, seq_lens): + """ + Overrides parent class's method. Used to pass a input through model and get policy/vf output. + """ + policy_out, value_out = self.base_model(input_dict["obs_flat"]) self.value_out = value_out return policy_out, state def value_function(self): + """ + Overrides parent class's method. Get value function method. + """ return tf.reshape(self.value_out, [-1]) def import_from_h5(self, import_file): diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 265991e20..eba837b9e 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -12,7 +12,13 @@ def __init__(self, params): # initialize trainer class instance and params self.params = params - self.trainer = Trainer(params) + if self.params['multiagent']: + module = __import__("examples.exp_configs.rl.multiagent", fromlist=[self.params['exp_config']]) + else: + module = __import__("examples.exp_configs.rl.singleagent", fromlist=[self.params['exp_config']]) + + submodule = getattr(module, self.params['exp_config']) + self.trainer = Trainer(params, submodule) def run_training_loop(self): """ @@ -39,6 +45,13 @@ def main(): """ import argparse parser = argparse.ArgumentParser() + + # required input parameters + parser.add_argument( + 'exp_config', type=str, + help='Name of the experiment configuration file, as located in ' + 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') + parser.add_argument('--ep_len', type=int, default=5000) parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy @@ -50,23 +63,23 @@ def main(): parser.add_argument('--train_batch_size', type=int, default=100) # number of sampled data points to be used per gradient/train step - parser.add_argument('--num_layers', type=int, default=3) # number of hidden layers, of policy to be learned - parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned - parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning + parser.add_argument('--replay_buffer_size', type=int, default=1000000) parser.add_argument('--save_path', type=str, default='') parser.add_argument('--save_model', type=int, default=0) - parser.add_argument('--num_eval_episodes', type=int, default=30) + parser.add_argument('--num_eval_episodes', type=int, default=0) parser.add_argument('--stochastic', type=bool, default=False) - parser.add_argument('--noise_variance',type=float, default=0.5) - parser.add_argument('--vehicle_id', type=str, default='rl_0') parser.add_argument('--multiagent', type=bool, default=False) parser.add_argument('--v_des', type=float, default=15) - + parser.add_argument('--variance_regularizer', type=float, default=0.5) args = parser.parse_args() # convert args to dictionary params = vars(args) + + # change this to determine number and size of hidden layers + params["fcnet_hiddens"] = [32, 32, 32] + assert args.n_iter>1, ('DAgger needs >1 iteration') @@ -78,7 +91,6 @@ def main(): if params['save_model'] == 1: train.save_controller_network() - # evaluate train.evaluate() print("DONE") diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 940feffb8..6bd0e5dd1 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -5,9 +5,8 @@ import gym import os from flow.utils.registry import make_create_env -from examples.exp_configs.rl.multiagent.multiagent_straight_road import flow_params from imitating_controller import ImitatingController -from imitating_network2 import ImitatingNetwork2 +from imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import SumoCarFollowingParams @@ -20,18 +19,21 @@ class Trainer(object): Class to initialize and run training for imitation learning (with DAgger) """ - def __init__(self, params): + def __init__(self, params, submodule): - # param setup + # get flow params + self.flow_params = submodule.flow_params + + # setup parameters for training self.params = params self.sess = create_tf_session() # environment setup - create_env, _ = make_create_env(flow_params) + create_env, _ = make_create_env(self.flow_params) self.env = create_env() # vehicle setup - self.multiagent = params['multiagent'] # multiagent or singleagent env + self.multiagent = self.params['multiagent'] # multiagent or singleagent env if not self.multiagent and self.env.action_space.shape[0] > 1: # use sorted rl ids if the method exists (e.g.. singlagent straightroad) @@ -51,7 +53,7 @@ def __init__(self, params): self.params['obs_dim'] = obs_dim # initialize neural network class and tf variables - self.action_network = ImitatingNetwork2(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], stochastic=self.params['stochastic']) + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer']) # tf.global_variables_initializer().run(session=self.sess) @@ -111,7 +113,7 @@ def collect_training_trajectories(self, itr, batch_size): """ print("\nCollecting data to be used for training...") - max_decel = flow_params['env'].additional_params['max_decel'] + max_decel = self.flow_params['env'].additional_params['max_decel'] trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, batch_size, self.params['ep_len'], self.multiagent, use_expert=itr==0, v_des=self.params['v_des'], max_decel=max_decel) return trajectories, envsteps_this_batch diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index a55f32c97..3be12f849 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -68,6 +68,7 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto for i in range(action_dim): # if max number of RL vehicles is not reached, insert dummy values if i >= len(vehicle_ids): + # dummy value is -2 * max_decel ignore_accel = -2 * max_decel rl_actions.append(ignore_accel) actions_expert.append(ignore_accel) @@ -149,7 +150,9 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector while True: - vehicle_ids = env.k.vehicle.get_rl_ids() + + # vehicle_ids = env.k.vehicle.get_rl_ids() **this doesn't work now due to control range restriction** + vehicle_ids = list(observation_dict.keys()) # add nothing to replay buffer if no vehicles if len(vehicle_ids) == 0: observation_dict, reward, done, _ = env.step(None) @@ -213,8 +216,8 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector terminate_rollout = done_dict['__all__'] or (traj_length == max_trajectory_length) for vehicle_id in vehicle_ids: - next_observations.append(observation_dict[vehicle_id]) - rewards.append(reward_dict[vehicle_id]) + next_observations.append(observation_dict.get(vehicle_id, None)) + rewards.append(reward_dict.get(vehicle_id, 0)) terminals.append(terminate_rollout) traj_length += 1 @@ -292,9 +295,9 @@ def traj_dict(observations, actions, expert_actions, rewards, next_observations, """ Collects individual observation, action, expert_action, rewards, next observation, terminal arrays into a single rollout dictionary """ - return {"observations" : np.array(observations, dtype=np.float32), - "actions" : np.array(actions, dtype=np.float32), - "expert_actions": np.array(expert_actions, dtype=np.float32), - "rewards" : np.array(rewards, dtype=np.float32), - "next_observations": np.array(next_observations, dtype=np.float32), - "terminals": np.array(terminals, dtype=np.float32)} + return {"observations" : np.array(observations), + "actions" : np.array(actions), + "expert_actions": np.array(expert_actions), + "rewards" : np.array(rewards), + "next_observations": np.array(next_observations), + "terminals": np.array(terminals)} From fef3a831c941ff15cbd9b52ae3f48e338c50a87b Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 28 May 2020 10:22:15 -0700 Subject: [PATCH 28/57] Combine imitation and PPO training into one step --- .../imitation_learning/imitating_network.py | 3 +- flow/controllers/imitation_learning/run.py | 3 + .../train_with_imitation.py | 164 ++++++++++++++++++ .../controllers/imitation_learning/trainer.py | 3 + .../imitation_learning/utils_tensorflow.py | 4 +- 5 files changed, 174 insertions(+), 3 deletions(-) create mode 100644 flow/controllers/imitation_learning/train_with_imitation.py diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 7d68c076a..c2ab892cc 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -148,7 +148,8 @@ def save_network_PPO(self, save_path): # build layers for value function curr_layer = input for i in range(num_layers): - curr_layer = tf.keras.layers.Dense(self.size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) + size = self.fcnet_hiddens[i] + curr_layer = tf.keras.layers.Dense(size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer")(curr_layer) ppo_model = tf.keras.Model(inputs=input, outputs=[output_layer_policy, output_layer_vf], name="ppo_model") diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index eba837b9e..439b5e5d0 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -38,6 +38,9 @@ def save_controller_network(self): """ self.trainer.save_controller_network() + def save_controller_for_PPO(self): + self.trainer.save_controller_for_PPO() + def main(): """ diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/controllers/imitation_learning/train_with_imitation.py new file mode 100644 index 000000000..3dfbb1265 --- /dev/null +++ b/flow/controllers/imitation_learning/train_with_imitation.py @@ -0,0 +1,164 @@ +from run import * +from examples.train import * + +def parse_args(args): + """Parse training options user can specify in command line. + + Returns + ------- + argparse.Namespace + the output parser object + """ + + # train.py args + + + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description="Parse argument used when running a Flow simulation.", + epilog="python train.py EXP_CONFIG") + + # required input parameters + parser.add_argument( + 'exp_config', type=str, + help='Name of the experiment configuration file, as located in ' + 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') + + parser.add_argument( + 'exp_title', type=str, + help='Title to give the run.') + + + # optional input parameters + parser.add_argument( + '--rl_trainer', type=str, default="rllib", + help='the RL trainer to use. either rllib or Stable-Baselines') + parser.add_argument( + '--load_weights_path', type=str, default=None, + help='Path to h5 file containing a pretrained model. Relevent for PPO with RLLib' + ) + parser.add_argument( + '--algorithm', type=str, default="PPO", + help='RL algorithm to use. Options are PPO, TD3, MATD3 (MADDPG w/ TD3) right now.' + ) + parser.add_argument( + '--num_cpus', type=int, default=1, + help='How many CPUs to use') + parser.add_argument( + '--num_steps', type=int, default=5000, + help='How many total steps to perform learning over. Relevant for stable-baselines') + parser.add_argument( + '--grid_search', action='store_true', default=False, + help='Whether to grid search over hyperparams') + parser.add_argument( + '--num_iterations', type=int, default=200, + help='How many iterations are in a training run.') + parser.add_argument( + '--checkpoint_freq', type=int, default=20, + help='How often to checkpoint.') + parser.add_argument( + '--num_rollouts', type=int, default=1, + help='How many rollouts are in a training batch') + parser.add_argument( + '--rollout_size', type=int, default=1000, + help='How many steps are in a training batch.') + parser.add_argument('--use_s3', action='store_true', help='If true, upload results to s3') + parser.add_argument('--local_mode', action='store_true', default=False, + help='If true only 1 CPU will be used') + parser.add_argument('--render', action='store_true', default=False, + help='If true, we render the display') + parser.add_argument( + '--checkpoint_path', type=str, default=None, + help='Directory with checkpoint to restore training from.') + + + + parser.add_argument('--ep_len', type=int, default=5000) + + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy + parser.add_argument('--n_iter', type=int, default=5) + + parser.add_argument('--batch_size', type=int, default=3000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=4000) + + parser.add_argument('--train_batch_size', type=int, + default=100) # number of sampled data points to be used per gradient/train step + + + parser.add_argument('--replay_buffer_size', type=int, default=1000000) + parser.add_argument('--num_eval_episodes', type=int, default=0) + parser.add_argument('--stochastic', type=bool, default=False) + parser.add_argument('--multiagent', type=bool, default=False) + parser.add_argument('--v_des', type=float, default=15) + parser.add_argument('--variance_regularizer', type=float, default=0.5) + + parsed_args = parser.parse_known_args(args)[0] + dict_args = vars(parsed_args) + dict_args['save_model'] = 1 + dict_args['save_path'] = dict_args['load_weights_path'] + + return parsed_args, dict_args + + + +def main(args): + """ + Parse args, run training, and evalutation + """ + flags, params = parse_args(args) + params["fcnet_hiddens"] = [32, 32, 32] + + # change this to determine number and size of hidden layers + params["fcnet_hiddens"] = [32, 32, 32] + + assert flags.n_iter>1, ('DAgger needs >1 iteration') + + + # run training + imitation_runner = Runner(params) + imitation_runner.run_training_loop() + + # save model after training + imitation_runner.save_controller_for_PPO() + + ### IMITATION DONE + + + + # Import relevant information from the exp_config script. + module = __import__( + "examples.exp_configs.rl.singleagent", fromlist=[flags.exp_config]) + module_ma = __import__( + "examples.exp_configs.rl.multiagent", fromlist=[flags.exp_config]) + + # Import the sub-module containing the specified exp_config and determine + # whether the environment is single agent or multi-agent. + if hasattr(module, flags.exp_config): + submodule = getattr(module, flags.exp_config) + multiagent = False + elif hasattr(module_ma, flags.exp_config): + submodule = getattr(module_ma, flags.exp_config) + assert flags.rl_trainer.lower() in ["rllib", "h-baselines"], \ + "Currently, multiagent experiments are only supported through "\ + "RLlib. Try running this experiment using RLlib: " \ + "'python train.py EXP_CONFIG'" + multiagent = True + else: + raise ValueError("Unable to find experiment config.") + + # Perform the training operation. + if flags.rl_trainer.lower() == "rllib": + train_rllib(submodule, flags) + elif flags.rl_trainer.lower() == "stable-baselines": + train_stable_baselines(submodule, flags) + elif flags.rl_trainer.lower() == "h-baselines": + flow_params = submodule.flow_params + train_h_baselines(flow_params, args, multiagent) + else: + raise ValueError("rl_trainer should be either 'rllib', 'h-baselines', " + "or 'stable-baselines'.") + + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 6bd0e5dd1..fc055ccda 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -215,3 +215,6 @@ def save_controller_network(self): """ print("Saving tensorflow model to: ", self.params['save_path']) self.action_network.save_network(self.params['save_path']) + + def save_controller_for_PPO(self): + self.action_network.save_network_PPO(self.params['save_path']) diff --git a/flow/controllers/imitation_learning/utils_tensorflow.py b/flow/controllers/imitation_learning/utils_tensorflow.py index 70df79693..7be44cf60 100644 --- a/flow/controllers/imitation_learning/utils_tensorflow.py +++ b/flow/controllers/imitation_learning/utils_tensorflow.py @@ -30,6 +30,6 @@ def build_neural_net(input_placeholder, output_size, scope, n_layers, size, acti return output_placeholder def create_tf_session(): - config = tf.ConfigProto(device_count={'GPU': 0}) - sess = tf.Session(config=config) + config = tf.compat.v1.ConfigProto(device_count={'GPU': 0}) + sess = tf.compat.v1.Session(config=config) return sess From fd29e0fad7dcc67a1115b43390fc264ce8fc9740 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 28 May 2020 16:06:23 -0700 Subject: [PATCH 29/57] Code cleanup --- .../imitating_controller.py | 22 ++- .../imitation_learning/imitating_network.py | 129 +++++++++++--- .../imitation_learning/keras_utils.py | 63 ++++++- .../imitation_learning/ppo_model.py | 67 +++++-- .../imitation_learning/replay_buffer.py | 32 +++- flow/controllers/imitation_learning/run.py | 53 +++--- .../train_with_imitation.py | 50 +++--- .../controllers/imitation_learning/trainer.py | 45 +++-- flow/controllers/imitation_learning/utils.py | 164 ++++++++++++------ .../imitation_learning/utils_tensorflow.py | 44 +++-- 10 files changed, 487 insertions(+), 182 deletions(-) diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index a13ce2083..53212f3ab 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -14,10 +14,14 @@ class ImitatingController(BaseController): def __init__(self, veh_id, action_network, multiagent, car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): """ - Args: - veh_id: ID of vehicle to control - action_network: Instance of imitating_network class; neural net that gives action given state - multiagent: boolean indicating if env is multiagent or singleagent + Parameters + __________ + veh_id: String + ID of vehicle to control + action_network: ImitatingNetwork + Instance of imitating_network class; neural net that gives action given state + multiagent: bool + boolean indicating if env is multiagent or singleagent """ BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) @@ -25,12 +29,14 @@ def __init__(self, veh_id, action_network, multiagent, car_following_params=None self.multiagent = multiagent # whether env is multiagent or singleagent self.veh_id = veh_id # vehicle id that controller is controlling + def get_accel(self, env): """ - Args: - env: instance of environment being used - - Get acceleration for vehicle in the env, using action_network. Overrides superclass method. + Get acceleration for vehicle in the environment. Overrides superclass method. + Parameters + __________ + env: Gym Env + instance of environment being used """ # observation is a dictionary for multiagent envs, list for singleagent envs if self.multiagent: diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index c2ab892cc..1db349b14 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -5,6 +5,8 @@ import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController from replay_buffer import ReplayBuffer +from time import time +from tensorflow.python.keras.callbacks import TensorBoard class ImitatingNetwork(): @@ -12,21 +14,29 @@ class ImitatingNetwork(): Class containing neural network which learns to imitate a given expert controller. """ - def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, stochastic=False, variance_regularizer = 0, load_existing=False, load_path=''): - - """ - Initializes and constructs neural network - - Args: - sess: Tensorflow session variable - action_dim: dimension of action space (determines size of network output) - obs_dim: dimension of observation space (size of network input) - num_layers: number of hidden layers (for an MLP) - size: size of each layer in network - replay_buffer_size: maximum size of replay buffer used to hold data for training - stochastic: boolean indicating if the network outputs a stochastic (multivariate Gaussian) or deterministic policy - load_existing: boolean, whether to load an existing tensorflow model - load_path: path to directory containing an existing tensorflow model + def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, stochastic=False, variance_regularizer = 0, load_model=False, load_path=''): + + """Initializes and constructs neural network. + Parameters + ---------- + sess : tf.Session + Tensorflow session variable + action_dim : int + action_space dimension + obs_dim : int + dimension of observation space (size of network input) + fcnet_hiddens : list + list of hidden layer sizes for fully connected network (length of list is number of hidden layers) + replay_buffer_size: int + maximum size of replay buffer used to hold data for training + stochastic: bool + indicates if network outputs a stochastic (MV Gaussian) or deterministic policy + variance_regularizer: float + regularization hyperparameter to penalize high variance policies + load_model: bool + if True, load model from path specified in load_path + load_path: String + path to h5 file containing model to load. """ @@ -38,7 +48,7 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, self.variance_regularizer = variance_regularizer # load network if specified, or construct network - if load_existing: + if load_model: self.load_network(load_path) else: @@ -47,8 +57,6 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, self.replay_buffer = ReplayBuffer(replay_buffer_size) - - def build_network(self): """ Defines neural network for choosing actions. Defines placeholders and forward pass @@ -61,22 +69,43 @@ def build_network(self): def compile_network(self): + """ + Compiles Keras network with appropriate loss and optimizer + """ loss = get_loss(self.stochastic, self.variance_regularizer) self.model.compile(loss=loss, optimizer='adam') def train(self, observation_batch, action_batch): """ - Executes one training step for the given batch of observation and action data + Executes one training (gradient) step for the given batch of observation and action data + + Parameters + ---------- + observation_batch : numpy array + numpy array containing batch of observations (inputs) + action_batch : numpy array + numpy array containing batch of actions (labels) """ + # reshape action_batch to ensure a shape (batch_size, action_dim) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - batch_size = action_batch.shape[0] - self.model.fit(observation_batch, action_batch, batch_size=batch_size, epochs=1, steps_per_epoch=1, verbose=0) + # one gradient step on batch + self.model.train_on_batch(observation_batch, action_batch) def get_accel_from_observation(self, observation): """ Gets the network's acceleration prediction based on given observation/state + + Parameters + ---------- + observation : numpy array + numpy array containing a single observation + + Returns + ------- + numpy array + one element numpy array containing accleeration """ # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays @@ -95,24 +124,56 @@ def get_accel_from_observation(self, observation): def get_accel(self, env): """ Get network's acceleration prediction(s) based on given env + + Parameters + ---------- + env : + environment object + + Returns + ------- + numpy array + one element numpy array containing accleeration + """ observation = env.get_state() return self.get_accel_from_observation(observation) def add_to_replay_buffer(self, rollout_list): - """ Add rollouts to replay buffer """ + """ + Add data to a replay buffer + + Parameters + ---------- + rollout_list : list + list of rollout dictionaries + """ self.replay_buffer.add_rollouts(rollout_list) def sample_data(self, batch_size): - """ Sample a batch of data from replay buffer """ + """ + Sample a batch of data from replay buffer. + + Parameters + ---------- + batch_size : int + size of batch to sample + """ return self.replay_buffer.sample_batch(batch_size) def save_network(self, save_path): - """ Save network to given path and to tensorboard """ + """ + Save imitation network as a h5 file in save_path + + Parameters + ---------- + save_path : String + path to h5 file to save to + """ self.model.save(save_path) # tensorboard @@ -120,18 +181,30 @@ def save_network(self, save_path): # writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) def load_network(self, load_path): + """ + Load imitation network from a h5 file in load_path + + Parameters + ---------- + load_path : String + path to h5 file containing model to load from + """ if self.stochastic: self.model = tf.keras.models.load_model(load_path, custom_objects={'negative_log_likelihood_loss': negative_log_likelihood_loss}) + else: + self.model = tf.keras.models.load_model(load_path) def save_network_PPO(self, save_path): """ - Builds and saves keras model for training PPO using policy weights learned from imitation. - - Args: - save_path: path (including h5 format filename) where the PPO model should be saved + Build a model, with same policy architecture as imitation network, to run PPO, copy weights from imitation, and save this model. + Parameters + ---------- + load_path : save_path + path to h5 file to save to """ + input = tf.keras.layers.Input(self.model.input.shape[1].value) curr_layer = input diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py index e8dbaf458..87679a005 100644 --- a/flow/controllers/imitation_learning/keras_utils.py +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -4,6 +4,21 @@ from tensorflow.keras.layers import Dense def build_neural_net_deterministic(input_dim, action_dim, fcnet_hiddens): + """Build a keras model to output a deterministic policy. + Parameters + ---------- + input_dim : int + dimension of input layer + action_dim : int + action_space dimension + fcnet_hiddens : list + list containing size of each hidden layer (length of list is number of hidden layers) + + Returns + ------- + Keras model (untrained) + """ + input_layer = Input(shape=(input_dim, )) curr_layer = input_layer @@ -17,6 +32,20 @@ def build_neural_net_deterministic(input_dim, action_dim, fcnet_hiddens): return model def build_neural_net_stochastic(input_dim, action_dim, fcnet_hiddens): + """Build a keras model to output a stochastic policy. + Parameters + ---------- + input_dim : int + dimension of input layer + action_dim : int + action_space dimension + fcnet_hiddens : list + list containing size of each hidden layer (length of list is number of hidden layers) + + Returns + ------- + Keras model (untrained) + """ input_layer = Input(shape=(input_dim, )) curr_layer = input_layer @@ -31,20 +60,46 @@ def build_neural_net_stochastic(input_dim, action_dim, fcnet_hiddens): return model def get_loss(stochastic, variance_regularizer): + """Get appropriate loss function for training. + Parameters + ---------- + stochastic : bool + determines if policy to be learned is deterministic or stochastic + variance_regularizer : float + regularization hyperparameter to penalize high variance policies + + Returns + ------- + Keras loss function to use for imitation learning. + """ if stochastic: return negative_log_likelihood_loss(variance_regularizer) else: return tf.keras.losses.mean_squared_error def negative_log_likelihood_loss(variance_regularizer): + """Negative log likelihood loss for learning stochastic policies. - def nll_loss(y, distribution_params): - assert distribution_params.shape[1] % 2 == 0, "Stochastic policies must output vectors of even length" + Parameters + ---------- + variance_regularizer : float + regularization hyperparameter to penalize high variance policies + Returns + ------- + Negative log likelihood loss function with variance regularization. + """ - action_dim = distribution_params.shape[1]//2 - means, log_stds = distribution_params[:, :action_dim], distribution_params[:, action_dim:] + def nll_loss(y, network_output): + assert network_output.shape[1] % 2 == 0, "Stochastic policies must output vectors of even length" + + action_dim = network_output.shape[1] // 2 + + # first half of network_output is mean, second half is log_std + means, log_stds = network_output[:, :action_dim], network_output[:, action_dim:] stds = tf.math.exp(log_stds) variances = tf.math.square(stds) + + # Multivariate Gaussian distribution dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=variances) loss = dist.log_prob(y) loss = tf.negative(loss) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index 5ad97a75d..df8648afa 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -8,11 +8,26 @@ class PPONetwork(TFModelV2): """ - Custom RLLib PPOModel (using tensorflow keras) to load weights from a pretained policy model (e.g. from imitation learning) and start RL training with loaded weights. - Subclass of TFModelV2 + Custom RLLib PPOModel (using tensorflow keras) to load weights from a pre-trained policy model (e.g. from imitation learning) and start RL training with loaded weights. + Subclass of TFModelV2. See https://docs.ray.io/en/master/rllib-models.html. """ def __init__(self, obs_space, action_space, num_outputs, model_config, name): + """ + Parameters + __________ + obs_space: gym.Space + observation space of gym environment + action_space: gym.Space + action_space of gym environment + num_outputs: int + number of outputs for policy network. For deterministic policies, this is dimension of the action space. For continuous stochastic policies, this is 2 * dimension of the action space + model_config: dict + configuration of model + name: str + name of model + + """ super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) @@ -28,12 +43,19 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name): def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): """ Loads/builds model for both policy and value function - Args: - obs_space: observation space of env - action_space: action space of env - model_config: configuration parameters for model - num_outputs: number of outputs expected for policy - imitation_h5_path: path to h5 file containing weights of a pretrained network (empty string if no such file) + Parameters + __________ + + obs_space: gym.Space + observation space of env + action_space: gym.Space + action space of env + model_config: dict + configuration parameters for model + num_outputs: int + number of outputs expected for policy + imitation_h5_path: str + path to h5 file containing weights of a pretrained network (empty string if no such file) """ if imitation_h5_path: @@ -77,7 +99,20 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat def forward(self, input_dict, state, seq_lens): """ - Overrides parent class's method. Used to pass a input through model and get policy/vf output. + Overrides parent class's method. Used to pass a input through model and get policy/vf output. + Parameters + __________ + input_dict: dict + dictionary of input tensors, including “obs”, “obs_flat”, “prev_action”, “prev_reward”, “is_training” + state: list + list of state tensors with sizes matching those returned by get_initial_state + the batch dimension + seq_lens: tensor + 1d tensor holding input sequence lengths + + Returns + _______ + (outputs, state) + Tuple, first element is policy output, second element state """ policy_out, value_out = self.base_model(input_dict["obs_flat"]) @@ -86,9 +121,21 @@ def forward(self, input_dict, state, seq_lens): def value_function(self): """ - Overrides parent class's method. Get value function method. + Returns the value function output for the most recent forward pass. + + Returns + _______ + tensor + value estimate tensor of shape [BATCH]. """ return tf.reshape(self.value_out, [-1]) def import_from_h5(self, import_file): + """ + Overrides parent class method. Import base_model from h5 import_file. + Parameters: + __________ + import_file: str + filepath to h5 file + """ self.setup_model(self, self.obs_space, self.action_space, self.model_config, self.num_outputs, import_file) diff --git a/flow/controllers/imitation_learning/replay_buffer.py b/flow/controllers/imitation_learning/replay_buffer.py index 58bdd2cd7..4e02a52c8 100644 --- a/flow/controllers/imitation_learning/replay_buffer.py +++ b/flow/controllers/imitation_learning/replay_buffer.py @@ -26,6 +26,12 @@ def __init__(self, max_size=100000): def add_rollouts(self, rollouts_list): """ Add a list of rollouts to the replay buffer + + Parameters + __________ + rollouts_list: list + list of rollout dictionaries + """ for rollout in rollouts_list: @@ -53,10 +59,18 @@ def add_rollouts(self, rollouts_list): def sample_batch(self, batch_size): """ - Sample a batch of data (with size batch_size) from replay buffer. - Returns data in separate numpy arrays of observations, actions, rewards, next_observations, terminals + Sample a batch of data (with size batch_size) from replay buffer. + + Parameters + ---------- + batch_size: int + size of batch to sample + + Returns + _______ + Data in separate numpy arrays of observations, actions, and expert actionis """ - assert self.observations is not None and self.actions is not None and self.expert_actions is not None and self.rewards is not None and self.next_observations is not None and self.terminals is not None + assert self.observations is not None and self.actions is not None and self.expert_actions is not None size = len(self.observations) rand_inds = np.random.randint(0, size, batch_size) @@ -66,9 +80,15 @@ def sample_batch(self, batch_size): def unpack_rollouts(self, rollouts_list): """ - Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays - rollouts: list of rollout dictionaries, rollout dictionary: dictionary with keys "observations", "actions", "rewards", "next_observations", "is_terminals" - return separate np arrays of observations, actions, rewards, next_observations, and is_terminals + Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays + Parameters + ---------- + rollouts: list + list of rollout dictionaries + + Returns + ---------- + separate numpy arrays of observations, actions, rewards, next_observations, and is_terminals """ observations = np.concatenate([rollout["observations"] for rollout in rollouts_list]) actions = np.concatenate([rollout["actions"] for rollout in rollouts_list]) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 439b5e5d0..40a1b08a3 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -9,9 +9,17 @@ class Runner(object): """ Class to run imitation learning (training and evaluation) """ def __init__(self, params): + """ + Parameters + __________ + params: dict + dictionary of parameters relevent to running imitation learning. + """ # initialize trainer class instance and params self.params = params + + # import appropriate exp_config module if self.params['multiagent']: module = __import__("examples.exp_configs.rl.multiagent", fromlist=[self.params['exp_config']]) else: @@ -22,7 +30,7 @@ def __init__(self, params): def run_training_loop(self): """ - Runs training for imitation learning for specified number of iterations + Runs training for imitation learning for number of iterations specified in params. """ self.trainer.run_training_loop(n_iter=self.params['n_iter']) @@ -34,18 +42,22 @@ def evaluate(self): def save_controller_network(self): """ - Saves a tensorflow checkpoint to path specified in params (and writes to tensorboard) + Saves the tensorflow keras model of the imitation policy to a h5 file, whose path is specified in params """ self.trainer.save_controller_network() def save_controller_for_PPO(self): + """ + Creates and saves (in h5 file format) new tensorflow keras model to run PPO with weighs loaded from imitation learning + """ self.trainer.save_controller_for_PPO() def main(): """ - Parse args, run training, and evalutation + Parse args, run training, and evaluate. """ + import argparse parser = argparse.ArgumentParser() @@ -55,26 +67,25 @@ def main(): help='Name of the experiment configuration file, as located in ' 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') - parser.add_argument('--ep_len', type=int, default=5000) + parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts. ') - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy - parser.add_argument('--n_iter', type=int, default=5) + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training policy.') # number of gradient steps for training policy + parser.add_argument('--n_iter', type=int, default=5, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') - parser.add_argument('--batch_size', type=int, default=3000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=4000) + parser.add_argument('--batch_size', type=int, default=3000, help='Number of environment steps to collect in iteration of DAgger') + parser.add_argument('--init_batch_size', type=int, default=4000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') - parser.add_argument('--train_batch_size', type=int, - default=100) # number of sampled data points to be used per gradient/train step + parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to train on') - - parser.add_argument('--replay_buffer_size', type=int, default=1000000) - parser.add_argument('--save_path', type=str, default='') - parser.add_argument('--save_model', type=int, default=0) - parser.add_argument('--num_eval_episodes', type=int, default=0) - parser.add_argument('--stochastic', type=bool, default=False) - parser.add_argument('--multiagent', type=bool, default=False) - parser.add_argument('--v_des', type=float, default=15) - parser.add_argument('--variance_regularizer', type=float, default=0.5) + parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') + parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') + parser.add_argument('--PPO_save_path', type=str, default='', help='Filepath to h5 file in which PPO model with copied weights should be saved') + parser.add_argument('--save_model', type=int, default=0, help='If true, save models in h5 format') + parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') + parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') + parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent. ') + parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') + parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') args = parser.parse_args() # convert args to dictionary @@ -94,10 +105,8 @@ def main(): if params['save_model'] == 1: train.save_controller_network() - # evaluate + # evaluate controller on difference, compared to expert, in action taken and average reward accumulated per rollout train.evaluate() - print("DONE") - if __name__ == "__main__": main() diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/controllers/imitation_learning/train_with_imitation.py index 3dfbb1265..416ae7048 100644 --- a/flow/controllers/imitation_learning/train_with_imitation.py +++ b/flow/controllers/imitation_learning/train_with_imitation.py @@ -8,11 +8,12 @@ def parse_args(args): ------- argparse.Namespace the output parser object + dict_args + dictionary version of the argparse """ # train.py args - parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="Parse argument used when running a Flow simulation.", @@ -24,6 +25,7 @@ def parse_args(args): help='Name of the experiment configuration file, as located in ' 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') + parser.add_argument( 'exp_title', type=str, help='Title to give the run.') @@ -71,26 +73,26 @@ def parse_args(args): '--checkpoint_path', type=str, default=None, help='Directory with checkpoint to restore training from.') + # Imitation Learning args + parser.add_argument('--ep_len', type=int, default=5000, help="Maximum length of episode for imitation learning") - parser.add_argument('--ep_len', type=int, default=5000) + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help="Number of gradient steps to take per iteration") # number of gradient steps for training policy + parser.add_argument('--n_iter', type=int, default=5, help="Number of iterations of DAgger to perform (1st iteration is behavioral cloning)") - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy - parser.add_argument('--n_iter', type=int, default=5) - - parser.add_argument('--batch_size', type=int, default=3000) # training data collected (in the env) during each iteration + parser.add_argument('--batch_size', type=int, default=3000, help="") # training data collected (in the env) during each iteration parser.add_argument('--init_batch_size', type=int, default=4000) - parser.add_argument('--train_batch_size', type=int, - default=100) # number of sampled data points to be used per gradient/train step - + parser.add_argument('--train_batch_size', type=int, default=100, help="Batch size for training") # number of sampled data points to be used per gradient/train step + parser.add_argument('--tensorboard_path', type=str, help='Path to tensorboard log dir for imitation') parser.add_argument('--replay_buffer_size', type=int, default=1000000) - parser.add_argument('--num_eval_episodes', type=int, default=0) - parser.add_argument('--stochastic', type=bool, default=False) - parser.add_argument('--multiagent', type=bool, default=False) - parser.add_argument('--v_des', type=float, default=15) - parser.add_argument('--variance_regularizer', type=float, default=0.5) + parser.add_argument('--num_eval_episodes', type=int, default=0, help="Number of episodes to evaluate imitation controller.") + parser.add_argument('--stochastic', type=bool, default=False, help="If true, controller learns stochastic policy (multivariate gaussian)") + parser.add_argument('--multiagent', type=bool, default=False, help="Whether the env is multiagent") + parser.add_argument('--v_des', type=float, default=15, help="v_des for FollowerStopper") + parser.add_argument('--variance_regularizer', type=float, default=0.5, help="Regularization parameter to penalize high variance in negative log likelihood loss") + parsed_args = parser.parse_known_args(args)[0] dict_args = vars(parsed_args) @@ -102,28 +104,24 @@ def parse_args(args): def main(args): - """ - Parse args, run training, and evalutation - """ - flags, params = parse_args(args) - params["fcnet_hiddens"] = [32, 32, 32] - # change this to determine number and size of hidden layers + # Parse args, train imitation learning + + flags, params = parse_args(args) params["fcnet_hiddens"] = [32, 32, 32] assert flags.n_iter>1, ('DAgger needs >1 iteration') - + print("\n\n********** IMITATION LEARNING ************ \n") # run training imitation_runner = Runner(params) imitation_runner.run_training_loop() - # save model after training + # convert model to work for PPO and save for training imitation_runner.save_controller_for_PPO() - ### IMITATION DONE - - + # Imitation Done, start RL + print("\n\n********** RL ************ \n") # Import relevant information from the exp_config script. module = __import__( @@ -158,7 +156,5 @@ def main(args): raise ValueError("rl_trainer should be either 'rllib', 'h-baselines', " "or 'stable-baselines'.") - - if __name__ == "__main__": main(sys.argv[1:]) diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index fc055ccda..84a2ed15d 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -20,6 +20,14 @@ class Trainer(object): """ def __init__(self, params, submodule): + """ + Parameters + __________ + params: dict + Dictionary of parameters used to run imitation learning + submodule: Module + Python module for file containing flow_params + """ # get flow params self.flow_params = submodule.flow_params @@ -72,8 +80,10 @@ def run_training_loop(self, n_iter): """ Trains imitator for n_iter iterations (each iteration collects new trajectories to put in replay buffer) - Args: - param n_iter: number of iterations to execute training + Parameters + __________ + n_iter : + intnumber of iterations to execute training """ # init vars at beginning of training @@ -104,12 +114,18 @@ def collect_training_trajectories(self, itr, batch_size): """ Collect (state, action, reward, next_state, terminal) tuples for training - Args: - itr: iteration of training during which function is called. Used to determine whether to run behavioral cloning or DAgger - batch_size: number of tuples to collect - Returns: - paths: list of trajectories - envsteps_this_batch: the sum over the numbers of environment steps in paths + Parameters + __________ + itr: int + iteration of training during which function is called. Used to determine whether to run behavioral cloning or DAgger + batch_size: int + number of tuples to collect + Returns + _______ + paths: list + list of trajectories + envsteps_this_batch: int + the sum over the numbers of environment steps in paths (total number of env transitions in trajectories collected) """ print("\nCollecting data to be used for training...") @@ -132,10 +148,12 @@ def train_controller(self): def evaluate_controller(self, num_trajs = 10): """ - Evaluates a trained imitation controller on similarity with expert with respect to action taken and total reward per rollout + Evaluates a trained imitation controller on similarity with expert with respect to action taken and total reward per rollout. - Args: - num_trajs: number of trajectories to evaluate performance on + Parameters + __________ + num_trajs: int + number of trajectories to evaluate performance on """ print("\n\n********** Evaluation ************ \n") @@ -211,10 +229,13 @@ def evaluate_controller(self, num_trajs = 10): def save_controller_network(self): """ - Saves a tensorflow model to the specified path given in the command line params. Path must end with .ckpt + Saves a keras tensorflow model to the specified path given in the command line params. Path must end with .h5. """ print("Saving tensorflow model to: ", self.params['save_path']) self.action_network.save_network(self.params['save_path']) def save_controller_for_PPO(self): + """ + Creates and saves a keras tensorflow model for training PPO with weights learned from imitation, to the specified path given in the command line params. Path must end with .h5. + """ self.action_network.save_network_PPO(self.params['save_path']) diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 3be12f849..483b76e7d 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -13,14 +13,26 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des, max_decel): """ - Samples a trajectory for a given vehicle using the actions prescribed by specified controller. - Args: - env: environment - vehicle_id: id of the vehicle that is being controlled/tracked during trajectory - controller: subclass of BaseController, decides actions taken by vehicle - expert_controller: subclass of BaseController, "expert" for imitation learning - max_trajectory_length: maximum steps in a trajectory - Returns: + Samples a single trajectory from a singleagent environment. + Parameters + __________ + env: gym.Env + environment + controllers: dict + Dictionary of 2-tuples (Imitating_Controller, Expert_Controller), with keys of vehicle_ids + action_network: ImitatingNetwork + ImitatingNetwork class containing neural net for action prediction + max_trajectory_length: int + maximum steps in a trajectory + use_expert: bool + if True, trajectory is collected using expert policy (for behavioral cloning) + v_des: float + v_des parameter for follower-stopper + max_decel: float + maximum deceleration of environment. Used to determine dummy values to put as labels when environment has less vehicles than the maximum amount. + Returns + _______ + dict Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ @@ -85,7 +97,7 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto if use_expert: if traj_length == 0 and i == 0: - print("Controller collecing trajectory: ", type(expert)) + print("Controller collecting trajectory: ", type(expert)) rl_actions.append(expert_action) else: if traj_length == 0 and i == 0: @@ -130,16 +142,25 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto def sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des): """ - Samples a trajectory for a given set of vehicles using the actions prescribed by specified controller. - - Args: - env: environment - vehicle_ids: id of the vehicle that is being controlled/tracked during trajectory - controllers: subclass of BaseController, decides actions taken by vehicle - expert_controllers: subclass of BaseController, "expert" for imitation learning - max_trajectory_length: maximum steps in a trajectory - - Returns: + Samples a single trajectory from a multiagent environment. + + Parameters + __________ + env: gym.Env + environment + controllers: dict + Dictionary of 2-tuples (Imitating_Controller, Expert_Controller), with keys of vehicle_ids + action_network: ImitatingNetwork + ImitatingNetwork class containing neural net for action prediction + max_trajectory_length: int + maximum steps in a trajectory + use_expert: bool + if True, trajectory is collected using expert policy (for behavioral cloning) + v_des: float + v_des parameter for follower-stopper + Returns + _______ + dict Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ @@ -150,8 +171,6 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector while True: - - # vehicle_ids = env.k.vehicle.get_rl_ids() **this doesn't work now due to control range restriction** vehicle_ids = list(observation_dict.keys()) # add nothing to replay buffer if no vehicles if len(vehicle_ids) == 0: @@ -230,19 +249,34 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector def sample_trajectories(env, controllers, action_network, min_batch_timesteps, max_trajectory_length, multiagent, use_expert, v_des=15, max_decel=4.5): """ - Samples trajectories to collect at least min_batch_timesteps steps in the environment - - Args: - env: environment - vehicle_id: id of vehicle being tracked/controlled - controller: subclass of BaseController, decides actions taken by vehicle - expert_controller: subclass of BaseController, "expert" for imitation learning - min_batch_timesteps: minimum number of environment steps to collect - max_trajectory_length: maximum steps in a trajectory - v_des: parameter used for follower-stopper (applies if Expert controller is follower-stopper) - - Returns: - List of rollout dictionaries, total steps taken by environment + Samples trajectories from environment. + + Parameters + __________ + env: gym.Env + environment + controllers: dict + Dictionary of 2-tuples (Imitating_Controller, Expert_Controller), with keys of vehicle_ids + action_network: ImitatingNetwork + ImitatingNetwork class containing neural net for action prediction + min_batch_timesteps: int + minimum number of env transitions to collect + max_trajectory_length: int + maximum steps in a trajectory + multiagent: bool + if True, env is a multiagent env + use_expert: bool + if True, trajectory is collected using expert policy (for behavioral cloning) + v_des: float + v_des parameter for follower-stopper + max_decel: float + maximum deceleration of environment. Used to determine dummy values to put as labels when environment has less vehicles than the maximum amount. + + Returns + _______ + dict, int + Dictionary of trajectory numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples + Total number of env transitions seen over trajectories """ total_envsteps = 0 trajectories = [] @@ -262,22 +296,35 @@ def sample_trajectories(env, controllers, action_network, min_batch_timesteps, m def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_length, multiagent, use_expert, v_des=15, max_decel=4.5): """ - Collects a fixed number of trajectories. - - Args: - env: environment - vehicle_id: id of vehicle being tracked/controlled - controller: subclass of BaseController, decides actions taken by vehicle - expert_controller: subclass of BaseController, "expert" for imitation learning - n: number of trajectories to collect - max_trajectory_length: maximum steps in a trajectory - v_des: parameter used for follower-stopper (applies if Expert controller is follower-stopper) - - - Returns: - List of rollouts (tuple of rollout dictionary, length of rollout) - + Samples n trajectories from environment. + + Parameters + __________ + env: gym.Env + environment + controllers: dict + Dictionary of 2-tuples (Imitating_Controller, Expert_Controller), with keys of vehicle_ids + action_network: ImitatingNetwork + ImitatingNetwork class containing neural net for action prediction + n: int + number of trajectories to collect + max_trajectory_length: int + maximum steps in a trajectory + multiagent: bool + if True, env is a multiagent env + use_expert: bool + if True, trajectory is collected using expert policy (for behavioral cloning) + v_des: float + v_des parameter for follower-stopper + max_decel: float + maximum deceleration of environment. Used to determine dummy values to put as labels when environment has less vehicles than the maximum amount. + + Returns + _______ + dict + Dictionary of trajectory numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ + trajectories = [] for _ in range(n): @@ -293,7 +340,24 @@ def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_le def traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals): """ - Collects individual observation, action, expert_action, rewards, next observation, terminal arrays into a single rollout dictionary + Collects observation, action, expert_action, rewards, next observation, terminal lists (collected over a rollout) into a single rollout dictionary. + Parameters + __________ + observations: list + list of observations; ith entry is ith observation + actions: list + list of actions; ith entry is action taken at ith timestep + rewards: list + list of rewards; ith entry is reward received at ith timestep + next_observations: list + list of next observations; ith entry is the observation transitioned to due to state and action at ith timestep + terminals: list + list of booleans indicating if rollout ended at that timestep + + Returns + _______ + dict + dictionary containing above lists in numpy array form. """ return {"observations" : np.array(observations), "actions" : np.array(actions), diff --git a/flow/controllers/imitation_learning/utils_tensorflow.py b/flow/controllers/imitation_learning/utils_tensorflow.py index 7be44cf60..cbbfa633d 100644 --- a/flow/controllers/imitation_learning/utils_tensorflow.py +++ b/flow/controllers/imitation_learning/utils_tensorflow.py @@ -6,21 +6,28 @@ def build_neural_net(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): """ - Builds a feedfoward neural network for action prediction - - arguments: - input_placeholder: placeholder variable for the state (batch_size, input_size) - scope: variable scope of the network - - n_layers: number of hidden layers - size: dimension of each hidden layer - activation: activation of each hidden layer - - output_size: size of the output layer - output_activation: activation of the output layer - - returns: - output_placeholder: the result of pass through Neural Network + Builds a feedfoward neural network for action prediction + Parameters + __________ + input_placeholder: tensor + placeholder variable for the state (batch_size, input_size) + scope: str + variable scope of the network + n_layers: int + number of hidden layers + size: int + dimension of each hidden layer + activation: str + activation function of each hidden layer + output_size: int + size of the output layer + output_activation: str + activation function of the output layer + + Returns + _______ + output_placeholder: tensor + the result of pass through Neural Network """ output_placeholder = input_placeholder with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): @@ -30,6 +37,13 @@ def build_neural_net(input_placeholder, output_size, scope, n_layers, size, acti return output_placeholder def create_tf_session(): + """ + Creates a tf session + Returns + _______ + tf.Session + new tensorflow session + """ config = tf.compat.v1.ConfigProto(device_count={'GPU': 0}) sess = tf.compat.v1.Session(config=config) return sess From 2b6cc0838758aad2017715dd706d0bc3a98fff82 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 28 May 2020 16:08:19 -0700 Subject: [PATCH 30/57] test with cluster --- scripts/ray_autoscale.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index 5cf0eca96..a216a3747 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -1,4 +1,4 @@ -# cluster.yaml ========================================= +# cluster.yaml ========================================= # An unique identifier for the head node and workers of this cluster. cluster_name: test # @@ -67,7 +67,7 @@ worker_nodes: # Additional options in the boto docs. setup_commands: - - cd flow && git fetch && git checkout origin/i210_dev + - cd flow && git fetch && git checkout origin/akash-dagger - pip install ray==0.8.0 - pip install tabulate - pip install boto3==1.10.45 # 1.4.8 adds InstanceMarketOptions From 3dfafe157093adecef9640d58425e5c210ecf065 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Fri, 29 May 2020 16:39:34 -0700 Subject: [PATCH 31/57] Bug fix --- .../imitation_learning/imitating_network.py | 2 +- .../imitation_learning/keras_utils.py | 35 ++++++++++++++++--- .../imitation_learning/ppo_model.py | 12 +++++-- flow/controllers/imitation_learning/run.py | 7 ++-- .../controllers/imitation_learning/trainer.py | 5 +-- 5 files changed, 48 insertions(+), 13 deletions(-) diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 1db349b14..569ec6fd1 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -190,7 +190,7 @@ def load_network(self, load_path): path to h5 file containing model to load from """ if self.stochastic: - self.model = tf.keras.models.load_model(load_path, custom_objects={'negative_log_likelihood_loss': negative_log_likelihood_loss}) + self.model = tf.keras.models.load_model(load_path, custom_objects={'nll_loss': negative_log_likelihood_loss(self.variance_regularizer)}) else: self.model = tf.keras.models.load_model(load_path) diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py index 87679a005..59928affc 100644 --- a/flow/controllers/imitation_learning/keras_utils.py +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -95,15 +95,42 @@ def nll_loss(y, network_output): action_dim = network_output.shape[1] // 2 # first half of network_output is mean, second half is log_std - means, log_stds = network_output[:, :action_dim], network_output[:, action_dim:] + means, log_stds = tf.split(network_output, 2, axis=1) stds = tf.math.exp(log_stds) - variances = tf.math.square(stds) + # variances = tf.math.square(stds) # Multivariate Gaussian distribution - dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=variances) + dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=stds) loss = dist.log_prob(y) loss = tf.negative(loss) - loss = tf.reduce_mean(loss) + (variance_regularizer * tf.norm(variances)) + loss = tf.reduce_mean(loss) + (variance_regularizer * tf.norm(stds)) return loss return nll_loss + +def compare_weights(ppo_model, imitation_path): + imitation_model = tf.keras.models.load_model(imitation_path, custom_objects={'nll_loss': negative_log_likelihood_loss(0.5)}) + + for i in range(len(imitation_model.layers) - 2): + ppo_name = 'policy_hidden_layer_' + str(i + 1) + ppo_layer = ppo_model.get_layer(ppo_name) + im_layer = imitation_model.layers[i + 1] + + ppo_weights = ppo_layer.get_weights() + im_weights = im_layer.get_weights() + for i in range(len(ppo_weights)): + print("\n\n") + print(type((ppo_weights[i] == im_weights[i]))) + print("\n\n") + assert (ppo_weights[i] == im_weights[i]).all(), "Weights don't match!" + + ppo_layer = ppo_model.get_layer('policy_output_layer') + im_layer = imitation_model.layers[-1] + ppo_weights = ppo_layer.get_weights() + im_weights = im_layer.get_weights() + for i in range(len(ppo_weights)): + assert (ppo_weights[i] == im_weights[i]).all(), "Weights don't match!" + + print("\n\nWeights properly loaded\n\n") + + diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index df8648afa..55f2fafc5 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -4,6 +4,7 @@ from ray.rllib.models.tf.misc import normc_initializer from ray.rllib.models.tf.tf_modelv2 import TFModelV2 import tensorflow as tf +from flow.controllers.imitation_learning.keras_utils import * class PPONetwork(TFModelV2): @@ -12,7 +13,7 @@ class PPONetwork(TFModelV2): Subclass of TFModelV2. See https://docs.ray.io/en/master/rllib-models.html. """ - def __init__(self, obs_space, action_space, num_outputs, model_config, name): + def __init__(self, obs_space, action_space, num_outputs, model_config, name, **kwargs): """ Parameters __________ @@ -31,13 +32,15 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name): super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) - h5_path = model_config.get("custom_options").get("h5_load_path", "") + h5_path = kwargs.get("h5_load_path", "") + # print("\n\nH5 PATH: ", h5_path + "\n\n") # setup model with weights loaded in from model in h5 path self.setup_model(obs_space, action_space, model_config, num_outputs, h5_path) # register variables for base model self.register_variables(self.base_model.variables) + # compare_weights(self.base_model, "/Users/akashvelu/Desktop/latest_run/imitation_model.h5") def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): @@ -60,6 +63,8 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat if imitation_h5_path: # set base model to be loaded model + print("\n\nLOAAAADING IMMMMITATIONNNNN MODELLLLLL\n\n") + self.base_model = tf.keras.models.load_model(imitation_h5_path) else: @@ -130,6 +135,7 @@ def value_function(self): """ return tf.reshape(self.value_out, [-1]) + def import_from_h5(self, import_file): """ Overrides parent class method. Import base_model from h5 import_file. @@ -138,4 +144,4 @@ def import_from_h5(self, import_file): import_file: str filepath to h5 file """ - self.setup_model(self, self.obs_space, self.action_space, self.model_config, self.num_outputs, import_file) + self.setup_model(self.obs_space, self.action_space, self.model_config, self.num_outputs, import_file) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 40a1b08a3..8ebdfb899 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -70,10 +70,10 @@ def main(): parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts. ') parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training policy.') # number of gradient steps for training policy - parser.add_argument('--n_iter', type=int, default=5, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') + parser.add_argument('--n_iter', type=int, default=3, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') - parser.add_argument('--batch_size', type=int, default=3000, help='Number of environment steps to collect in iteration of DAgger') - parser.add_argument('--init_batch_size', type=int, default=4000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') + parser.add_argument('--batch_size', type=int, default=1000, help='Number of environment steps to collect in iteration of DAgger') + parser.add_argument('--init_batch_size', type=int, default=2000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to train on') @@ -104,6 +104,7 @@ def main(): # save model after training if params['save_model'] == 1: train.save_controller_network() + train.save_controller_for_PPO() # evaluate controller on difference, compared to expert, in action taken and average reward accumulated per rollout train.evaluate() diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 84a2ed15d..d45c30bc7 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -63,7 +63,8 @@ def __init__(self, params, submodule): # initialize neural network class and tf variables self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer']) - # tf.global_variables_initializer().run(session=self.sess) + # tf.global_variab + # les_initializer().run(session=self.sess) # controllers setup v_des = self.params['v_des'] # for FollowerStopper @@ -238,4 +239,4 @@ def save_controller_for_PPO(self): """ Creates and saves a keras tensorflow model for training PPO with weights learned from imitation, to the specified path given in the command line params. Path must end with .h5. """ - self.action_network.save_network_PPO(self.params['save_path']) + self.action_network.save_network_PPO(self.params['PPO_save_path']) From ba6796160242464877ee06afe40a0d9054420105 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Fri, 29 May 2020 16:50:44 -0700 Subject: [PATCH 32/57] Minor cleanup --- flow/controllers/imitation_learning/ppo_model.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index 55f2fafc5..a15eb6cc5 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -33,7 +33,6 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name, **k super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) h5_path = kwargs.get("h5_load_path", "") - # print("\n\nH5 PATH: ", h5_path + "\n\n") # setup model with weights loaded in from model in h5 path self.setup_model(obs_space, action_space, model_config, num_outputs, h5_path) @@ -63,8 +62,6 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat if imitation_h5_path: # set base model to be loaded model - print("\n\nLOAAAADING IMMMMITATIONNNNN MODELLLLLL\n\n") - self.base_model = tf.keras.models.load_model(imitation_h5_path) else: From bff9e47d01e0b48e7cb446d267f1ad165476d4f9 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Mon, 1 Jun 2020 20:58:14 -0700 Subject: [PATCH 33/57] Load weights into rllib --- examples/train.py | 27 +++-- .../imitation_learning/imitating_network.py | 5 +- .../imitation_learning/imitation_trainer.py | 49 +++++++++ .../imitation_learning/keras_utils.py | 1 + .../imitation_learning/ppo_model.py | 55 +++++----- flow/controllers/imitation_learning/run.py | 17 +-- .../controllers/imitation_learning/trainer.py | 100 +++++++++++++++++- flow/controllers/imitation_learning/utils.py | 5 +- flow/controllers/velocity_controllers.py | 8 +- flow/visualize/visualizer_rllib.py | 17 ++- 10 files changed, 229 insertions(+), 55 deletions(-) create mode 100644 flow/controllers/imitation_learning/imitation_trainer.py diff --git a/examples/train.py b/examples/train.py index d80f5a1e2..083593548 100644 --- a/examples/train.py +++ b/examples/train.py @@ -185,32 +185,46 @@ def setup_exps_rllib(flow_params, alg_run = flags.algorithm.upper() if alg_run == "PPO": - from flow.algorithms.custom_ppo import CustomPPOTrainer - from ray.rllib.agents.ppo import DEFAULT_CONFIG - alg_run = CustomPPOTrainer - config = deepcopy(DEFAULT_CONFIG) + from ray import tune + from ray.tune.registry import register_env + try: + from ray.rllib.agents.agent import get_agent_class + except ImportError: + from ray.rllib.agents.registry import get_agent_class + + horizon = flow_params['env'].horizon + + alg_run = "PPO" + + agent_cls = get_agent_class(alg_run) + config = deepcopy(agent_cls._default_config) config["num_workers"] = n_cpus config["horizon"] = horizon - config["model"].update({"fcnet_hiddens": [32, 32]}) + config["model"].update({"fcnet_hiddens": [32, 32, 32]}) config["train_batch_size"] = horizon * n_rollouts config["gamma"] = 0.995 # discount rate config["use_gae"] = True config["lambda"] = 0.97 config["kl_target"] = 0.02 - config["num_sgd_iter"] = 10 + # TODO: restore this to 10 + config["num_sgd_iter"] = 1 + # config["num_sgd_iter"] = 10 if flags.grid_search: config["lambda"] = tune.grid_search([0.5, 0.9]) config["lr"] = tune.grid_search([5e-4, 5e-5]) if flags.load_weights_path: from flow.controllers.imitation_learning.ppo_model import PPONetwork + from flow.controllers.imitation_learning.imitation_trainer import Imitation_PPO_Trainable from ray.rllib.models import ModelCatalog # Register custom model ModelCatalog.register_custom_model("PPO_loaded_weights", PPONetwork) # set model to the custom model for run config['model']['custom_model'] = "PPO_loaded_weights" config['model']['custom_options'] = {"h5_load_path": flags.load_weights_path} + config['observation_filter'] = 'NoFilter' + alg_run = Imitation_PPO_Trainable elif alg_run == "CENTRALIZEDPPO": from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel @@ -402,6 +416,7 @@ def trial_str_creator(trial): return "{}_{}".format(trial.trainable_name, trial.experiment_tag) if flags.local_mode: + print("LOCAL MODE") ray.init(local_mode=True) else: ray.init() diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 569ec6fd1..47cdf3064 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -105,7 +105,7 @@ def get_accel_from_observation(self, observation): Returns ------- numpy array - one element numpy array containing accleeration + one element numpy array containing acceleration """ # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays @@ -116,7 +116,8 @@ def get_accel_from_observation(self, observation): if self.stochastic: mean, log_std = network_output[:, :self.action_dim], network_output[:, self.action_dim:] var = np.exp(2 * log_std) - action = np.random.multivariate_normal(mean[0], var) + cov_matrix = np.diag(var[0]) + action = np.random.multivariate_normal(mean[0], cov_matrix) return action else: return network_output diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py new file mode 100644 index 000000000..503bb6d07 --- /dev/null +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -0,0 +1,49 @@ +from ray import tune +from flow.controllers.imitation_learning.ppo_model import * +from ray.rllib.agents import ppo +try: + from ray.rllib.agents.agent import get_agent_class +except ImportError: + from ray.rllib.agents.registry import get_agent_class + + +class Imitation_PPO_Trainable(tune.Trainable): + def _setup(self, config): + env_name = config['env'] + # agent_cls = get_agent_class(config['env_config']['run']) + self.trainer = ppo.PPOTrainer(env=env_name, config=config) + print("\n\n\nPOLICY_NAME") + policy_id = list(self.trainer.get_weights().keys())[0] + print(policy_id) + print("\n\n\n") + self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id) + print("here") + + def _train(self): + print("TRAIN CALLED") + # return self.trainer.train() + return self.trainer.train() + + # def train(self): + # print("TRAIN CALLED") + # return self.trainer.train() + + def _save(self, tmp_checkpoint_dir): + return self.trainer._save(tmp_checkpoint_dir) + + def _restore(self, checkpoint): + self.trainer.restore(checkpoint) + + def _log_result(self, result): + self.trainer._log_result(result) + + def _stop(self): + self.trainer.stop() + + def _export_model(self, export_formats, export_dir): + return self.trainer.export_model(export_formats, export_dir=export_dir) + + + + + diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py index 59928affc..34b32f692 100644 --- a/flow/controllers/imitation_learning/keras_utils.py +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -108,6 +108,7 @@ def nll_loss(y, network_output): return nll_loss + def compare_weights(ppo_model, imitation_path): imitation_model = tf.keras.models.load_model(imitation_path, custom_objects={'nll_loss': negative_log_likelihood_loss(0.5)}) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index a15eb6cc5..f7490d180 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -36,9 +36,9 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name, **k # setup model with weights loaded in from model in h5 path self.setup_model(obs_space, action_space, model_config, num_outputs, h5_path) + self.register_variables(self.base_model.variables) # register variables for base model - self.register_variables(self.base_model.variables) # compare_weights(self.base_model, "/Users/akashvelu/Desktop/latest_run/imitation_model.h5") @@ -60,42 +60,38 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat path to h5 file containing weights of a pretrained network (empty string if no such file) """ - if imitation_h5_path: - # set base model to be loaded model - self.base_model = tf.keras.models.load_model(imitation_h5_path) + activation = model_config.get("fcnet_activation") + hiddens = model_config.get("fcnet_hiddens", []) + vf_share_layers = model_config.get("vf_share_layers") - else: - activation = model_config.get("fcnet_activation") - hiddens = model_config.get("fcnet_hiddens", []) - vf_share_layers = model_config.get("vf_share_layers") + # set up model + inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") + curr_layer = inp_layer - # set up model - inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") - curr_layer = inp_layer + # hidden layers and output for policy + i = 1 + for size in hiddens: + curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), + activation=activation)(curr_layer) + i += 1 + + output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)( + curr_layer) - # hidden layers and output for policy + # set up value function + if not vf_share_layers: + curr_layer = inp_layer i = 1 for size in hiddens: - curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), + curr_layer = tf.keras.layers.Dense(size, name="vf_hidden_layer_{}".format(i), activation=activation)(curr_layer) i += 1 - output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)( - curr_layer) - - # set up value function - if not vf_share_layers: - curr_layer = inp_layer - i = 1 - for size in hiddens: - curr_layer = tf.keras.layers.Dense(size, name="vf_hidden_layer_{}".format(i), - activation=activation)(curr_layer) - i += 1 + output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) - output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) + # build model from layers + self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) - # build model from layers - self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) @@ -116,7 +112,7 @@ def forward(self, input_dict, state, seq_lens): (outputs, state) Tuple, first element is policy output, second element state """ - + # print(self.base_model.get_weights()) policy_out, value_out = self.base_model(input_dict["obs_flat"]) self.value_out = value_out return policy_out, state @@ -141,4 +137,5 @@ def import_from_h5(self, import_file): import_file: str filepath to h5 file """ - self.setup_model(self.obs_space, self.action_space, self.model_config, self.num_outputs, import_file) + print("LOADING WEIGHTS FROM H6") + self.base_model.load_weights(import_file) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 8ebdfb899..aea231be7 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -74,9 +74,13 @@ def main(): parser.add_argument('--batch_size', type=int, default=1000, help='Number of environment steps to collect in iteration of DAgger') parser.add_argument('--init_batch_size', type=int, default=2000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') + parser.add_argument('--vf_batch_size', type=int, default=2000, help='Number of environment steps to collect to learn value function for a policy') + parser.add_argument('--num_vf_iters', type=int, default=100, help='Number of iterations to run vf training') # TODO: better help description for this parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to train on') + parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existin imitation neural net') + parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') parser.add_argument('--PPO_save_path', type=str, default='', help='Filepath to h5 file in which PPO model with copied weights should be saved') @@ -86,6 +90,7 @@ def main(): parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent. ') parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') + args = parser.parse_args() # convert args to dictionary @@ -94,20 +99,18 @@ def main(): # change this to determine number and size of hidden layers params["fcnet_hiddens"] = [32, 32, 32] - assert args.n_iter>1, ('DAgger needs >1 iteration') - # run training - train = Runner(params) - train.run_training_loop() + runner = Runner(params) + runner.run_training_loop() # save model after training if params['save_model'] == 1: - train.save_controller_network() - train.save_controller_for_PPO() + runner.save_controller_network() + runner.save_controller_for_PPO() # evaluate controller on difference, compared to expert, in action taken and average reward accumulated per rollout - train.evaluate() + runner.evaluate() if __name__ == "__main__": main() diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index d45c30bc7..67e00eb25 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -13,6 +13,7 @@ import tensorflow as tf from utils import * from utils_tensorflow import * +from flow.controllers.imitation_learning.keras_utils import * class Trainer(object): """ @@ -227,6 +228,93 @@ def evaluate_controller(self, num_trajs = 10): print("Total imitator steps: ", total_imitator_steps) print("Total expert steps: ", total_expert_steps) + def learn_value_function(self, num_samples, num_iterations, num_grad_steps): + # init value function neural net + vf_net = build_neural_net_deterministic(self.params['obs_dim'], 1, self.params['fcnet_hiddens']) + vf_net.compile(loss='mean_squared_error', optimizer = 'adam') + + max_decel = self.flow_params['env'].additional_params['max_decel'] + # collect trajectory samples to train on + trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, + num_samples, self.params['ep_len'], self.multiagent, + use_expert=False, v_des=self.params['v_des'], + max_decel=max_decel) + observations = np.array([]) + rewards = np.array([]) + next_observations = np.array([]) + + # accumulate trajectory data + for traj in trajectories: + observations = np.append(observations, traj['observations']) + rewards = np.append(rewards, traj['rewards']) + next_observations = np.append(next_observations, traj['next_observations']) + + # iterate over data multiple times (labels change every iteration) + for _ in range(num_iterations): + # form labels + next_state_value_preds = vf_net.predict(next_observations).flatten() + next_state_value_preds[np.isnan(next_state_value_preds)] = 0 + labels = rewards + next_state_value_preds + for i in range(num_grad_steps): + vf_net.train_on_batch(observations, labels) + + return vf_net + + + + def save_controller_for_PPO(self): + """ + Build a model, with same policy architecture as imitation network, to run PPO, copy weights from imitation, and save this model. + + Parameters + ---------- + load_path : save_path + path to h5 file to save to + """ + + vf_net = self.learn_value_function(self.params['vf_batch_size'], self.params['num_vf_iters'], self.params['num_agent_train_steps_per_iter']) + + input = tf.keras.layers.Input(self.action_network.model.input.shape[1].value) + curr_layer = input + + # number of hidden layers + num_layers = len(self.action_network.model.layers) - 2 + + # build layers for policy + for i in range(num_layers): + size = self.action_network.model.layers[i + 1].output.shape[1].value + activation = tf.keras.activations.serialize(self.action_network.model.layers[i + 1].activation) + curr_layer = tf.keras.layers.Dense(size, activation=activation, name="policy_hidden_layer_{}".format(i + 1))(curr_layer) + output_layer_policy = tf.keras.layers.Dense(self.action_network.model.output.shape[1].value, activation=None, name="policy_output_layer")(curr_layer) + + # build layers for value function + curr_layer = input + for i in range(num_layers): + size = self.fcnet_hiddens[i] + curr_layer = tf.keras.layers.Dense(size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) + output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer")(curr_layer) + + ppo_model = tf.keras.Model(inputs=input, outputs=[output_layer_policy, output_layer_vf], name="ppo_model") + + # set the policy weights to those learned from imitation + for i in range(num_layers): + policy_layer = ppo_model.get_layer(name="policy_hidden_layer_{}".format(i + 1)) + policy_layer.set_weights(self.action_network.model.layers[i + 1].get_weights()) + policy_output = ppo_model.get_layer("policy_output_layer") + policy_output.set_weights(self.action_network.model.layers[-1].get_weights()) + + # set value function weights to those learned + num_vf_layers = len(vf_net.layers) - 2 + for i in range(num_vf_layers): + vf_layer = ppo_model.get_layer(name-'vf_hidden_layer{}'.format(i + 1)) + vf_layer.set_weights(vf_net.layers[i + 1].get_weights()) + vf_output = ppo_model.get_layer("vf_output_layer") + vf_output.set_weights(vf_net.layers[-1].get_weights()) + + + # save the model (as a h5 file) + ppo_model.save(self.params['PPO_save_path']) + def save_controller_network(self): """ @@ -235,8 +323,10 @@ def save_controller_network(self): print("Saving tensorflow model to: ", self.params['save_path']) self.action_network.save_network(self.params['save_path']) - def save_controller_for_PPO(self): - """ - Creates and saves a keras tensorflow model for training PPO with weights learned from imitation, to the specified path given in the command line params. Path must end with .h5. - """ - self.action_network.save_network_PPO(self.params['PPO_save_path']) + + + # def save_controller_for_PPO(self): + # """ + # Creates and saves a keras tensorflow model for training PPO with weights learned from imitation, to the specified path given in the command line params. Path must end with .h5. + # """ + # self.action_network.save_network_PPO(self.params['PPO_save_path']) diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 483b76e7d..5d6134500 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -205,6 +205,8 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector print("Controller collecting trajectory: ", controller) action = controller.get_action(env) + if type(action) == tuple: + mean, log_std = action[1], action[2] # action should be a scalar acceleration if type(action) == np.ndarray: @@ -235,7 +237,8 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector terminate_rollout = done_dict['__all__'] or (traj_length == max_trajectory_length) for vehicle_id in vehicle_ids: - next_observations.append(observation_dict.get(vehicle_id, None)) + # default next observation to nans + next_observations.append(observation_dict.get(vehicle_id, np.empty((env.observation_space.shape[0], )))) rewards.append(reward_dict.get(vehicle_id, 0)) terminals.append(terminate_rollout) diff --git a/flow/controllers/velocity_controllers.py b/flow/controllers/velocity_controllers.py index 62ce15beb..7ad12bbe8 100644 --- a/flow/controllers/velocity_controllers.py +++ b/flow/controllers/velocity_controllers.py @@ -118,9 +118,11 @@ def get_accel(self, env): if (self.find_intersection_dist(env) <= 10 and \ env.k.vehicle.get_edge(self.veh_id) in self.danger_edges) or \ env.k.vehicle.get_edge(self.veh_id)[0] == ":"\ - or (self.control_length and (env.k.vehicle.get_x_by_id(self.veh_id) < self.control_length[0] - or env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1]))\ - or edge in self.no_control_edges: + or (self.control_length and env.k.vehicle.get_x_by_id(self.veh_id) < self.control_length[0] \ + and env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1]) or \ + (self.no_control_edges and len(self.no_control_edges) > 0 and env.k.vehicle.get_edge(self.veh_id) not in self.no_control_edges): + + # TODO(@evinitsky) put back # or env.k.vehicle.get_edge(self.veh_id) in self.no_control_edges: return None diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 5c52e196f..d11773e4b 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -80,6 +80,8 @@ def visualizer_rllib(args): sim_params.use_ballistic = False # Determine agent and checkpoint + # TODO(akashvelu): remove this + # print("NEW CONFIGGG: ", config['env_config']['run']) config_run = config['env_config']['run'] if 'run' in config['env_config'] \ else None if args.run and config_run: @@ -91,6 +93,14 @@ def visualizer_rllib(args): sys.exit(1) if args.run: agent_cls = get_agent_class(args.run) + elif config['env_config']['run'] == "": + from flow.controllers.imitation_learning.imitation_trainer import Imitation_PPO_Trainable + from flow.controllers.imitation_learning.ppo_model import PPONetwork + from ray.rllib.models import ModelCatalog + agent_cls = get_agent_class("PPO") + ModelCatalog.register_custom_model("imitation_ppo_trainable", Imitation_PPO_Trainable) + ModelCatalog.register_custom_model("PPO_loaded_weights", PPONetwork) + elif config['env_config']['run'] == "": from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel from ray.rllib.models import ModelCatalog @@ -162,6 +172,8 @@ def visualizer_rllib(args): checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num agent.restore(checkpoint) + # agent.import_model('/Users/akashvelu/Desktop/latest_run3/ppo.h5', 'av') + if hasattr(agent, "local_evaluator") and \ os.environ.get("TEST_FLAG") != 'True': @@ -169,7 +181,7 @@ def visualizer_rllib(args): else: env = gym.make(env_name) - # reroute on exit is a training hack, it should be turned off at test time. + # reroute on exit is a training hack, it should be turned off at test time. if hasattr(env, "reroute_on_exit"): env.reroute_on_exit = False @@ -401,5 +413,6 @@ def create_parser(): if __name__ == '__main__': parser = create_parser() args = parser.parse_args() - ray.init(num_cpus=1) + print("GEN EMISSION: ", args.gen_emission) + ray.init(local_mode=True) visualizer_rllib(args) From d10f8e50f10ea7124afb6da09b1263a04cc62dd7 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 3 Jun 2020 16:34:31 -0700 Subject: [PATCH 34/57] Value function learning --- .../imitating_controller.py | 3 +- .../imitation_learning/imitating_network.py | 18 +++--- .../imitation_learning/imitation_trainer.py | 32 ++++++++-- flow/controllers/imitation_learning/run.py | 9 +-- .../controllers/imitation_learning/trainer.py | 59 ++++++++----------- 5 files changed, 70 insertions(+), 51 deletions(-) diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 53212f3ab..4d912179d 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -2,8 +2,7 @@ import tensorflow as tf import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController -from replay_buffer import ReplayBuffer - +from flow.controllers.imitation_learning.replay_buffer import ReplayBuffer class ImitatingController(BaseController): """ diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 47cdf3064..bf0a190a9 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -1,12 +1,12 @@ import numpy as np import tensorflow as tf -from utils_tensorflow import * -from keras_utils import * -import tensorflow_probability as tfp -from flow.controllers.base_controller import BaseController -from replay_buffer import ReplayBuffer from time import time from tensorflow.python.keras.callbacks import TensorBoard +import tensorflow_probability as tfp +from flow.controllers.imitation_learning.utils_tensorflow import * +from flow.controllers.imitation_learning.keras_utils import * +from flow.controllers.base_controller import BaseController +from flow.controllers.imitation_learning.replay_buffer import ReplayBuffer class ImitatingNetwork(): @@ -14,7 +14,7 @@ class ImitatingNetwork(): Class containing neural network which learns to imitate a given expert controller. """ - def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, stochastic=False, variance_regularizer = 0, load_model=False, load_path=''): + def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, stochastic=False, variance_regularizer = 0, load_model=False, load_path='', tensorboard_path=''): """Initializes and constructs neural network. Parameters @@ -47,6 +47,9 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, self.stochastic=stochastic self.variance_regularizer = variance_regularizer + self.train_steps = 0 + self.writer = tf.summary.FileWriter(tensorboard_path, tf.get_default_graph()) + # load network if specified, or construct network if load_model: self.load_network(load_path) @@ -91,7 +94,8 @@ def train(self, observation_batch, action_batch): # reshape action_batch to ensure a shape (batch_size, action_dim) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) # one gradient step on batch - self.model.train_on_batch(observation_batch, action_batch) + loss = self.model.train_on_batch(observation_batch, action_batch) + self.writer.add_summary() def get_accel_from_observation(self, observation): """ diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index 503bb6d07..b27b46609 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -8,7 +8,15 @@ class Imitation_PPO_Trainable(tune.Trainable): + """ + Class to train PPO with imitation, with Tune. Extends Trainable. + """ + def _setup(self, config): + """ + Sets up trainable. See superclass definition. + """ + env_name = config['env'] # agent_cls = get_agent_class(config['env_config']['run']) self.trainer = ppo.PPOTrainer(env=env_name, config=config) @@ -16,31 +24,45 @@ def _setup(self, config): policy_id = list(self.trainer.get_weights().keys())[0] print(policy_id) print("\n\n\n") - self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id) + self.trainer.import_model(config['model']['custom_options']['h5_load_path']) print("here") def _train(self): + """ + Executes one training iteration on trainer. See superclass definition. + """ print("TRAIN CALLED") # return self.trainer.train() return self.trainer.train() - # def train(self): - # print("TRAIN CALLED") - # return self.trainer.train() - def _save(self, tmp_checkpoint_dir): + """ + Saves trainer. See superclass definition. + """ return self.trainer._save(tmp_checkpoint_dir) def _restore(self, checkpoint): + """ + Restores trainer from checkpoint. See superclass definition. + """ self.trainer.restore(checkpoint) def _log_result(self, result): + """ + Logs results of trainer. See superclass definition. + """ self.trainer._log_result(result) def _stop(self): + """ + Stops trainer. See superclass definition. + """ self.trainer.stop() def _export_model(self, export_formats, export_dir): + """ + Exports trainer model. See superclass definition. + """ return self.trainer.export_model(export_formats, export_dir=export_dir) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index aea231be7..0fbd2006b 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -1,7 +1,7 @@ import os import time import numpy as np -from trainer import Trainer +from flow.controllers.imitation_learning.trainer import Trainer from flow.controllers.car_following_models import IDMController @@ -67,7 +67,7 @@ def main(): help='Name of the experiment configuration file, as located in ' 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') - parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts. ') + parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training policy.') # number of gradient steps for training policy parser.add_argument('--n_iter', type=int, default=3, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') @@ -87,7 +87,7 @@ def main(): parser.add_argument('--save_model', type=int, default=0, help='If true, save models in h5 format') parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') - parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent. ') + parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') @@ -110,7 +110,8 @@ def main(): runner.save_controller_for_PPO() # evaluate controller on difference, compared to expert, in action taken and average reward accumulated per rollout - runner.evaluate() + if params['num_eval_episodes'] > 0: + ßrunner.evaluate() if __name__ == "__main__": main() diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 67e00eb25..99a091083 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -4,16 +4,16 @@ import numpy as np import gym import os +import tensorflow as tf +from utils import * from flow.utils.registry import make_create_env -from imitating_controller import ImitatingController -from imitating_network import ImitatingNetwork +from flow.controllers.imitation_learning.imitating_controller import ImitatingController +from flow.controllers.imitation_learning.imitating_network import ImitatingNetwork +from flow.controllers.imitation_learning.utils_tensorflow import * +from flow.controllers.imitation_learning.keras_utils import * from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import SumoCarFollowingParams -import tensorflow as tf -from utils import * -from utils_tensorflow import * -from flow.controllers.imitation_learning.keras_utils import * class Trainer(object): """ @@ -62,10 +62,8 @@ def __init__(self, params, submodule): self.params['obs_dim'] = obs_dim # initialize neural network class and tf variables - self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer']) + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer'], load_model=self.params['load_imitation_model'], load_path=self.params['load_imitation_path']) - # tf.global_variab - # les_initializer().run(session=self.sess) # controllers setup v_des = self.params['v_des'] # for FollowerStopper @@ -229,6 +227,18 @@ def evaluate_controller(self, num_trajs = 10): print("Total expert steps: ", total_expert_steps) def learn_value_function(self, num_samples, num_iterations, num_grad_steps): + """ + Learn the value function under imitation policy. + Parameters + __________ + num_samples: number of environment transition samples to collect to learn from + num_iterations: number of iterations to relabel data, and train + num_grad_steps: number of gradient steps per training iteration + + Returns + _______ + Value function neural net + """ # init value function neural net vf_net = build_neural_net_deterministic(self.params['obs_dim'], 1, self.params['fcnet_hiddens']) vf_net.compile(loss='mean_squared_error', optimizer = 'adam') @@ -239,15 +249,11 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): num_samples, self.params['ep_len'], self.multiagent, use_expert=False, v_des=self.params['v_des'], max_decel=max_decel) - observations = np.array([]) - rewards = np.array([]) - next_observations = np.array([]) - # accumulate trajectory data - for traj in trajectories: - observations = np.append(observations, traj['observations']) - rewards = np.append(rewards, traj['rewards']) - next_observations = np.append(next_observations, traj['next_observations']) + # combine trajectories into one + observations = np.concatenate([traj['observations'] for traj in trajectories]) + rewards = np.concatenate([traj['rewards'] for traj in trajectories]) + next_observations = np.concatenate([traj['next_observations'] for traj in trajectories]) # iterate over data multiple times (labels change every iteration) for _ in range(num_iterations): @@ -255,8 +261,7 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): next_state_value_preds = vf_net.predict(next_observations).flatten() next_state_value_preds[np.isnan(next_state_value_preds)] = 0 labels = rewards + next_state_value_preds - for i in range(num_grad_steps): - vf_net.train_on_batch(observations, labels) + vf_net.fit(observations, labels, verbose=0) return vf_net @@ -266,10 +271,6 @@ def save_controller_for_PPO(self): """ Build a model, with same policy architecture as imitation network, to run PPO, copy weights from imitation, and save this model. - Parameters - ---------- - load_path : save_path - path to h5 file to save to """ vf_net = self.learn_value_function(self.params['vf_batch_size'], self.params['num_vf_iters'], self.params['num_agent_train_steps_per_iter']) @@ -290,7 +291,7 @@ def save_controller_for_PPO(self): # build layers for value function curr_layer = input for i in range(num_layers): - size = self.fcnet_hiddens[i] + size = self.params['fcnet_hiddens'][i] curr_layer = tf.keras.layers.Dense(size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer")(curr_layer) @@ -306,7 +307,7 @@ def save_controller_for_PPO(self): # set value function weights to those learned num_vf_layers = len(vf_net.layers) - 2 for i in range(num_vf_layers): - vf_layer = ppo_model.get_layer(name-'vf_hidden_layer{}'.format(i + 1)) + vf_layer = ppo_model.get_layer('vf_hidden_layer_{}'.format(i + 1)) vf_layer.set_weights(vf_net.layers[i + 1].get_weights()) vf_output = ppo_model.get_layer("vf_output_layer") vf_output.set_weights(vf_net.layers[-1].get_weights()) @@ -322,11 +323,3 @@ def save_controller_network(self): """ print("Saving tensorflow model to: ", self.params['save_path']) self.action_network.save_network(self.params['save_path']) - - - - # def save_controller_for_PPO(self): - # """ - # Creates and saves a keras tensorflow model for training PPO with weights learned from imitation, to the specified path given in the command line params. Path must end with .h5. - # """ - # self.action_network.save_network_PPO(self.params['PPO_save_path']) From 7fa3e3a2a3b8f7353914cb4c50b3aeb2f760e5b0 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 3 Jun 2020 16:50:24 -0700 Subject: [PATCH 35/57] Tensorboard plotting for loss --- flow/controllers/imitation_learning/imitating_network.py | 8 +++++++- flow/controllers/imitation_learning/run.py | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index bf0a190a9..2ee4de3f8 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -48,6 +48,8 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, self.variance_regularizer = variance_regularizer self.train_steps = 0 + + tensorboard_path = tensorboard_path + 'imitation_tensorboard/' self.writer = tf.summary.FileWriter(tensorboard_path, tf.get_default_graph()) # load network if specified, or construct network @@ -95,7 +97,11 @@ def train(self, observation_batch, action_batch): action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) # one gradient step on batch loss = self.model.train_on_batch(observation_batch, action_batch) - self.writer.add_summary() + + # tensorboard + summary = tf.Summary(value=[tf.Summary.Value(tag="imitation training loss", simple_value=loss), ]) + self.writer.add_summary(summary, global_step=self.train_steps) + self.train_steps += 1 def get_accel_from_observation(self, observation): """ diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 0fbd2006b..59fdeea90 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -81,6 +81,7 @@ def main(): parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existin imitation neural net') parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') + parser.add_argument('--tensorboad_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') parser.add_argument('--PPO_save_path', type=str, default='', help='Filepath to h5 file in which PPO model with copied weights should be saved') From 91fab74509573cc5e0cfe661319afd4d38e624a7 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 3 Jun 2020 16:51:18 -0700 Subject: [PATCH 36/57] Bug fixes --- flow/controllers/imitation_learning/trainer.py | 2 +- flow/controllers/imitation_learning/utils.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 99a091083..8ac5a3208 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -62,7 +62,7 @@ def __init__(self, params, submodule): self.params['obs_dim'] = obs_dim # initialize neural network class and tf variables - self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer'], load_model=self.params['load_imitation_model'], load_path=self.params['load_imitation_path']) + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer'], load_model=self.params['load_imitation_model'], load_path=self.params['load_imitation_path'], tensorboard_path=self.params['tensorboard_path']) # controllers setup diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 5d6134500..4708e96cb 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -3,8 +3,8 @@ import numpy as np import math from flow.core.params import SumoCarFollowingParams -from imitating_controller import ImitatingController -from imitating_network import ImitatingNetwork +from flow.controllers.imitation_learning.imitating_controller import ImitatingController +from flow.controllers.imitation_learning.imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.rewards import * From d38839f31d87ee97d51c30e3ac2162c9638a99cb Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 4 Jun 2020 11:03:35 -0700 Subject: [PATCH 37/57] Code cleanup --- examples/train.py | 2 + .../imitation_learning/imitating_network.py | 11 +++++- .../imitation_learning/imitation_trainer.py | 6 +-- .../imitation_learning/keras_utils.py | 1 - flow/controllers/imitation_learning/run.py | 4 +- .../train_with_imitation.py | 37 +++++++++++-------- .../controllers/imitation_learning/trainer.py | 2 + flow/controllers/imitation_learning/utils.py | 3 +- 8 files changed, 40 insertions(+), 26 deletions(-) diff --git a/examples/train.py b/examples/train.py index 083593548..6d7b13879 100644 --- a/examples/train.py +++ b/examples/train.py @@ -218,12 +218,14 @@ def setup_exps_rllib(flow_params, from flow.controllers.imitation_learning.ppo_model import PPONetwork from flow.controllers.imitation_learning.imitation_trainer import Imitation_PPO_Trainable from ray.rllib.models import ModelCatalog + # Register custom model ModelCatalog.register_custom_model("PPO_loaded_weights", PPONetwork) # set model to the custom model for run config['model']['custom_model'] = "PPO_loaded_weights" config['model']['custom_options'] = {"h5_load_path": flags.load_weights_path} config['observation_filter'] = 'NoFilter' + # alg run is the Trainable class alg_run = Imitation_PPO_Trainable elif alg_run == "CENTRALIZEDPPO": diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 2ee4de3f8..30eec3696 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -48,8 +48,8 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, self.variance_regularizer = variance_regularizer self.train_steps = 0 + self.action_steps = 0 - tensorboard_path = tensorboard_path + 'imitation_tensorboard/' self.writer = tf.summary.FileWriter(tensorboard_path, tf.get_default_graph()) # load network if specified, or construct network @@ -126,12 +126,21 @@ def get_accel_from_observation(self, observation): if self.stochastic: mean, log_std = network_output[:, :self.action_dim], network_output[:, self.action_dim:] var = np.exp(2 * log_std) + + variance_norm = np.linalg.norm(var) + summary = tf.Summary(value=[tf.Summary.Value(tag="Variance norm", simple_value=variance_norm), ]) + self.writer.add_summary(summary, global_step=self.action_steps) + cov_matrix = np.diag(var[0]) action = np.random.multivariate_normal(mean[0], cov_matrix) + + self.action_steps += 1 return action else: + self.action_steps += 1 return network_output + def get_accel(self, env): """ Get network's acceleration prediction(s) based on given env diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index b27b46609..a6f75ea45 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -20,12 +20,8 @@ def _setup(self, config): env_name = config['env'] # agent_cls = get_agent_class(config['env_config']['run']) self.trainer = ppo.PPOTrainer(env=env_name, config=config) - print("\n\n\nPOLICY_NAME") policy_id = list(self.trainer.get_weights().keys())[0] - print(policy_id) - print("\n\n\n") - self.trainer.import_model(config['model']['custom_options']['h5_load_path']) - print("here") + self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id) def _train(self): """ diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py index 34b32f692..59928affc 100644 --- a/flow/controllers/imitation_learning/keras_utils.py +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -108,7 +108,6 @@ def nll_loss(y, network_output): return nll_loss - def compare_weights(ppo_model, imitation_path): imitation_model = tf.keras.models.load_model(imitation_path, custom_objects={'nll_loss': negative_log_likelihood_loss(0.5)}) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 59fdeea90..25cb0f230 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -81,7 +81,7 @@ def main(): parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existin imitation neural net') parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') - parser.add_argument('--tensorboad_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') + parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') parser.add_argument('--PPO_save_path', type=str, default='', help='Filepath to h5 file in which PPO model with copied weights should be saved') @@ -112,7 +112,7 @@ def main(): # evaluate controller on difference, compared to expert, in action taken and average reward accumulated per rollout if params['num_eval_episodes'] > 0: - ßrunner.evaluate() + runner.evaluate() if __name__ == "__main__": main() diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/controllers/imitation_learning/train_with_imitation.py index 416ae7048..78053fe2e 100644 --- a/flow/controllers/imitation_learning/train_with_imitation.py +++ b/flow/controllers/imitation_learning/train_with_imitation.py @@ -1,4 +1,4 @@ -from run import * +from flow.controllers.imitation_learning.run import * from examples.train import * def parse_args(args): @@ -75,23 +75,30 @@ def parse_args(args): # Imitation Learning args - parser.add_argument('--ep_len', type=int, default=5000, help="Maximum length of episode for imitation learning") + parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help="Number of gradient steps to take per iteration") # number of gradient steps for training policy - parser.add_argument('--n_iter', type=int, default=5, help="Number of iterations of DAgger to perform (1st iteration is behavioral cloning)") + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training policy.') # number of gradient steps for training policy + parser.add_argument('--n_iter', type=int, default=3, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') - parser.add_argument('--batch_size', type=int, default=3000, help="") # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=4000) + parser.add_argument('--batch_size', type=int, default=1000, help='Number of environment steps to collect in iteration of DAgger') + parser.add_argument('--init_batch_size', type=int, default=2000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') + parser.add_argument('--vf_batch_size', type=int, default=2000, help='Number of environment steps to collect to learn value function for a policy') + parser.add_argument('--num_vf_iters', type=int, default=100, help='Number of iterations to run vf training') # TODO: better help description for this - parser.add_argument('--train_batch_size', type=int, default=100, help="Batch size for training") # number of sampled data points to be used per gradient/train step - parser.add_argument('--tensorboard_path', type=str, help='Path to tensorboard log dir for imitation') + parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to train on') + + parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existing imitation neural net') + parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') + parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') + parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') + parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') + parser.add_argument('--save_model', type=int, default=0, help='If true, save models in h5 format') + parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') + parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') + parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') + parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') + parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') - parser.add_argument('--replay_buffer_size', type=int, default=1000000) - parser.add_argument('--num_eval_episodes', type=int, default=0, help="Number of episodes to evaluate imitation controller.") - parser.add_argument('--stochastic', type=bool, default=False, help="If true, controller learns stochastic policy (multivariate gaussian)") - parser.add_argument('--multiagent', type=bool, default=False, help="Whether the env is multiagent") - parser.add_argument('--v_des', type=float, default=15, help="v_des for FollowerStopper") - parser.add_argument('--variance_regularizer', type=float, default=0.5, help="Regularization parameter to penalize high variance in negative log likelihood loss") parsed_args = parser.parse_known_args(args)[0] @@ -109,8 +116,8 @@ def main(args): flags, params = parse_args(args) params["fcnet_hiddens"] = [32, 32, 32] + params['PPO_save_path'] = params['load_weights_path'] - assert flags.n_iter>1, ('DAgger needs >1 iteration') print("\n\n********** IMITATION LEARNING ************ \n") # run training diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 8ac5a3208..c1ff5f981 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -239,6 +239,8 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): _______ Value function neural net """ + + print("\n\n********** Learning value function of imitation policy ************ \n") # init value function neural net vf_net = build_neural_net_deterministic(self.params['obs_dim'], 1, self.params['fcnet_hiddens']) vf_net.compile(loss='mean_squared_error', optimizer = 'adam') diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 4708e96cb..36f7844e9 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -205,8 +205,7 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector print("Controller collecting trajectory: ", controller) action = controller.get_action(env) - if type(action) == tuple: - mean, log_std = action[1], action[2] + # action should be a scalar acceleration if type(action) == np.ndarray: From 81e8d6a47e2293cf8877e72f40c231fb4b1097ec Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 11 Jun 2020 16:14:49 -0700 Subject: [PATCH 38/57] Test file changes --- .../multi_agent/checkpoint_1/checkpoint-1 | Bin 10209 -> 19590 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 180 -> 210 bytes tests/data/rllib_data/multi_agent/params.json | 54 ++++++++++-------- tests/data/rllib_data/multi_agent/params.pkl | Bin 17562 -> 17746 bytes .../single_agent/checkpoint_1/checkpoint-1 | Bin 582 -> 27018 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 180 -> 210 bytes .../data/rllib_data/single_agent/params.json | 42 ++++++++------ tests/data/rllib_data/single_agent/params.pkl | Bin 6414 -> 10890 bytes 8 files changed, 55 insertions(+), 41 deletions(-) diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 index 0693ed4b62a9cabcdbecb267201ea862144f212c..bc68b0b99d2171f688662b7b1e4c841a36f8ffe0 100644 GIT binary patch literal 19590 zcmZU)c|29`*Z+@_Sy84)h76I)km>Ae8A?Kfln5ot6s3boG?2NFu`)*ql_r_a+1H|y zl1j5?5e53Iw#|$P3k&g|xj8B% zEOO@mjFMa&KL0<%CAnj#$8$v^Jw*k7h>Y@!3gC|0!4(S@oK{@lQ}#b+nH9L!*LarT zni~VcxDsnOZr-qI+sw7$5dkwdYz~VG@rw{#L((@QU~Txu$f$_TYojE&lK(SGa^t%H zIm(rC5lZ4p&lUFP%A{~*-MHiRU4&i6xM(DEC;acwby3?k1xRw`{$qNe8CTv#$VE7X zJJF3hN#A9Rr?9{jhO4l6@nYw{g5Q5OE<#aUMgKJWIKjavrHi0+oU~~AuC{sqaaqew ztyUPT7TfZ52gU1AzBpmUTUh*cxsB*!otB3`RBYBfUT9M|4s3Q?7J`%VdYid{J3!HY zQ_Dx*N*mq+@sSpBB6c>z4w>ZJQ$3q*Eqj~M zs&(jFdlj2*1-3|z*$V5H+@=qIWs@)Cf;VMOOmHj=X z|4+pDhxkQuRe~jh|Kt40!2-{#x^dP1@w`S7cS`W2w7GGmW2-IuJMUSy?s08isH4V- zJ#E3M(S2fdHC%_|QhlAL)9}Q4R<$U9a>jHHUnhdMR^Hcoy^#UGQSt%r`E5;(Scfc! z`$3nJSmtE?eocq<$m=lcC3CuYgCSFRH%`g&n>vnK#=1pXkB?U7_XP!64VMY=XAbVO zo`X%iOk+X1B}vMqUDzvHNi!{&1*3&9EU(`pp1*~-w~k_Bp_k3_NXM21x|>?e zc5!UFSKX&;7o39fcE^_Ow|3dg2w8(aQ9?SJ{gjr^61Dh4C7X1o=AlUU8?r@G3!8(CY^EyS#82MFIH2m#QZqWQqg~!;wt_sa|Lrd)UXE5tU6_qkxY~w91Zu36uC4I2# zG_0K2jq9(xhMlL&*m02?F{o`36jocbXdj<#bEn(g=G3Mro1mICxM^QhOIDwz&Ft%X zEvKu(Ta4OeZQdN!woxkgw$a~LN?w|-Zy|CA=xTV4^L|v~?I#FC*e^2@U$A|w3K)ruGsQ-@#r@L_t{x<*({}X_}-Fckp<2v~E6R&f^H>Pt+ zEwVUwX4djY{buv8TyNx~{$>6#r{(;svx4|G0SOrPc?BawxsjdrdDvCW(jlD*A$-kL)=D&u|`p59uZd~L482(SrH2Ej=geC|H-TKqX8Xc0t zk*Okh@xgN%GEEob{}NO^a}*MkD@cc;Ha;mGAhiqnspD`cIO-2Dp_dz}Mbta8Jb5pF z?34nib+rIa`6GJwUK>droC2la^3g72H_@suh09I5Aio<-f0 zSEzH`5S6Sa(DVjA9TQZ8Hv2x%DF^n_VzougRpA7*UOX2*zSP9a2}j7D=0_xB*cf8{ ztuW_n7TGbYlm^WUfErULd=t`5a<-qRm$sgxO7G_2Q=@HIH2jQp-29AY$ym`WcPZli z#Q_xl>_aE>E9BwPJ8*Wr3*DZ!leqdBAx~xx%A9!(da*KW&dPVpWzR`qMqbh%_A6+| zW|nqOON4tnTk*i}K1N9+1^NRVX~6yc)RNtUhy0DOJV%Qf-<^f+4g!&g{fAw!Y?~QR?&3+@`aYd}v=^gh<{k8IQ4)=AF@elN87QP}Y^PKb zz4FF}47weHS<3?m+g5<#?^>x`ZXPiR^~0v2Q*cKk2|l_{flvhz?2VU%RbTcAW{XK| z!>!NsTzn7?-Pnh@Mu*@)@+b0^GZXV-)}zO`?PT2fVlqE&ANjme7>v*9z!dtF#2W-* zW#UBoXiXjspSGWdC0%9K-r0fUcg>***(Hp=r8fCF=_hHb-UeeU)&r_cA;O}TaC2-X zo!J?T%KB3vs$RShM14gDO$UHSC%Gk19Es7RArzq3uhtYEcl0$s4zhuxK%3va#E z!8tOW2rt$|@7WrlzI`EFz4M+9&!{4Mckd?oZd&;H{0nB)sVn4%Z5-*!^ryUgLO5x0 zGP9{Y27f1qpndvRswy@Q>cuAFomv~U4 zi`ysd#SQCSF?qoOI1)3PGu~|~zBI~!2}&s>)_DiIndo6C^NtyblfW)Z9*pr)2GbRL zY4XMeXzjj{ep0I>QqT%1Wn;lVYydp>AB2|muj#&3N*J4L42rybxP5pz9iHAn^`dhz zz2guBT;kDJx5t6ro5w_Lyckq2c|<1u5hmk~3t>=%HE1&|tufdP!m62QKU&W>+AWBs zxN@*F{zeM?#HgjuO1!}_#QtzGc>Lx)`#!}U2K$n6?8g~Q&hth*E1Q8S{&}?4V+Z6a z93#IpUs|t{{Yn138AFVgYtm2G*D?y3ZA56hAbnT;pf&xgQI5?8Wv@JPVT}W7D;|J9 zb*;p_u!Bi0GY5vK(^9(&@N16-R-S32JRu2Uzda39K1KnAEXCYfF*MXL!8e*;=_ijK zqA^AYH@sK|0hconRQDq>9uFPE_cMmT0F_>pgm=sXIr8=^IjQASp`^Q+D$jXM>Ym!-Gu#X>W*?gdiVntYvPGQH6WrtKg|`Avr0e2>sD1s57g9TzJet;GQ@b8>EFo zzq0t5&%tY{JaW-&1DSV1n!L^5MpY)=M?FPzQrxZz0d{p{$u4JNDH{k~ zOgv`YjE1D&6+jy6sa;zsc+L!khVp$-mUIt?r+J`p1dkf~$yg6P2M_a0v~`eH_tD`U^JuFXgUM_k{c@VXp?%)uhIutD>3>3# z*Gl1@l42;^|Bn6~TE(WtX+iTFXBxhu3G&b#i?*$R8;z6T@0{zbcPu*&Ha>D0ZzL`++Xg*x>#$2TAP>tDmsLvrNawJ|^sh*OCT z`^X$vfr|G%n7p9`ocqoRFN_HMA;E=)Ipt*B^mx4W(-%FAQ#q0s2k294MlhewVs=E0 zFu4)4>8mb7a?je?I=p^A`}lV{M0Oa0{kw&Dcbpw)gCy9h$>DEbJ?Jwp;4fo8vI9XM zh{tdY?#g;h6q`-xfp8ZT8F)(VPR#|y>b$9VZ5qE9o##1v&RrlslFP zF}A*V#5E5RRz}0{!!qjo!5WtCRfe7~F(g$@68*?m`to5S-m#lQ-y41-am$Yrm1Eo3 zAA;B$I)9GsV!c5nQyFf0Zf35ZbwrQ(4w(4h2=+M?k`$+(Bx-mZjtc9MOzVR%^YT~f zepU*4M=P1(--z&J9k5nNNVAkk1@k| zjnnMzfJaQB_c>;csW(WynM7x;FJ=2BqhZUS1|%*QgA;XOWO76@4Adlp`zMyZyKDus zeNWIeYSoxlp^gFP9P#=_D-hb`i(`gHNS;F{U8sMB?nO7&$Bf`UsRP(hVh0!htOGga zYUpzChky(E_&Q_*ejIa;&Rx_B!B3wsk*WjKb>0W6c3zHNmY+{{UEIOweh`Q7Q@$`6 z$Kb;mc6jto9I^H^0H$#U9DcA6E|^3wFU+= z(&%I71_u?CAmzRe1gg4n8iS&AS$d%hyZd4e-aaMqkP8y=u_oS47^$@#($QrF5I(W_+SEkxkrEMvRwa(`A8U z@LTf?l-4{1$@lLujcY&B)x(V>F5Q@LmZ+iY?OYtTSqc7~Eb*W7kR2UyW$(+}!Rv1i z!{;=2*r?<{ePg7^B9jETG$9l&i0i;w=S0Z3ZjUt*3#s>MBkYy(g@D3isH7@|k1!c$ zztF>xm`_AKR}?+I?Z@l<2*Smm^tMtRRr;Ml-ug|(@6)YF+$$-ZJh&FM921~Y*%Zs9 z)UY9ZDHKW6pw~%poFcS|`Wqj?U%#5j*Nk>JlVe8Q%6dpw*9Ep~r64!v7{QNO6k>j_ z$KA`P;urtpw5xTHY-!mFFD35KS)X5{k@0ch>RlsFIR{C_+7tAz?LL}Nnulj?R^uf7 zD4&A8GZ+d}9@yYw>`r za=J%KE_|b3#D|E);Z*q6evlp`K9Fd(fNY(XjP8yrNOedOT!{lpqLXM`zc$9*)xyoA zvLO7Cg~V^AgunPRITITKgOb(cXV(B@X!05IMO$FttQwtcYfMBB>?ivbQqj|}hVsug z(Bjo2RKko$t?S|;d%{@KQ`SQduJS^=ldkBjXi9H4>?S5T(+PL(YZ`P5K|16FE&gy0 zbjJ)cnT09fvug=72AqQTlFj_J=Bq(|i7^)TiXx-85#O1HkP+c6^n>VRGU2N*d;gdu z-nlwcEY6in>A4{h6!XElkVII~m5@9$cQ^DYvE7d&un;!gqj+U!= zG8!9NnD1q}_(*>Y25(UyKFiKAFJ$`3+j?D+dv6lCe(gAUA?FQ0)^b4f(-p{Cx(fN* zH6T^A6Rez$qju3-QgZbh*>`U;-KcvLFTEHGe#(0II^ZpxZkJ0ka^5k=W7{z5=Ve;@ zZ~@NT|AH)`2`C<(Pwz12$cr2LkYYYSf4jNR_O6Ze-1ezh6O)TVix=bXky2W=;|Vo# zUkqojcQ9uKbFIO1DS_XcNUNkj?%i^T%F0`@FZ81^rEVVXkc)w%wRdTom=qqKT|-if z7UPP|gK%fz0{RX4IMgbN+1qW=NuwIv&rc@jK9<7VIdW)l=?~eK{*3+c*NC{vu7$NS zn|L)Y8)<5-3QXgf;B8@1sH*CQ`$u98$%%n-;*ngRPV&(TI~D&%`Z866kqf z1xAEV(~UE%Kz6MzI$kcot!Cq)ewH*|e7=WY=a|OK+G9w1|7@iz1`Ki1*PCecMh%_+ zY#{w_l)-FDBB#=}%Id0f3N$C*reOxlVfpzz^i{J13_nW&zuFvJUlvX(EV^lTbq-O? zXe3c4+4P#rWd7qPZ|U=ielUjbfkN$de3Z|_hz4cgnnrQW{x5Yine`v&G3Q@)VEzv~ zuyEsA{x9^f`VaIl_^r)XOE=|f43=8EPg}tI?mm`(29!9iPPW!%!Cc;pNL$W?wHiG6 zi4*xhjxOR2@ojj~^nbLR7n zl{j}ZE?K+GHsF`aow442ZqPdFjw=7ci5Q-$m>lOycmt1X9mVDRztru&=!5y!eDaU^ z;KsH2FZ!_k7k%UoIN+=7cjQg`Lqg_0CkgXJz@o~B{Pq&S-by<%+~IHiC(jKtL)*v- zle6Ib;*)DoCZCD(i`9Ng55x ziX)!I59!{=lZe_XaeSybOea+v!AtoSP@b%XrQP~Owd5Yey%EJFclTo2z&vQ{OoUfC z9#mICgseP19#pppp?#bz{P8ve9pVC`Ng>2(*#+7jkwf}9W+1Ywkkx(slS$|ig-cT( zQxTi_C|h`f8Q$5!98F$?8lzSatKCIjui8qr90M`PeLwW4+T((+k7$C>VfyT-3H=%K z7*8cXgq=4;91vBr-VEZ!zHaSZ% zk##(JR?`s;xtT=ox-NdWZBD|r^T>L$P^hfvqzxgu$hJwtPMzCStVa=E(mZP8F3g(s zj8d~Jf0+s6^N8M_E*Sl81fhjjnQ+%>pcY+4S5@Dj5*|im-k%)ch8zHPt~lIz+0Qu_ zWlQxhH`7mFU$Cx4uSt4$KgUyc9i3Gghwkfsa`Iyg>4)8UbkT&jOngN+Ej!pnJ$7>G zp+Xt{%A+}S=vx_`qHcm)O{Qa}-3S{Hs}Gkq%z>*H=i|s%9rzoYj_aH9@y^L1VtXuu ztr3)i<3H=N#hLRd?}#oQ?An1v_bZwF2j58RvMwU|aR`Hs?q!lN1^;}o!~;@U)c=kr zE2O>+Ht$}DYx=?%6ZKH$ZjUwf9#w^Ksh7l$nxNHcam*`o5FoML{8wL|($w~R3TBdw z^eGESSm%e`HE(I#?!ElZ>_vD$PXjOXDu6zxp4J_DOhkTVf&c4ERDH4%=)Vdj2kmbP zN|TTD?*KAkSCnCkq#w1&oetSf*HK+W3q*^J@z&q-xUBCKyj{5l%D;xgqVFB_^tvY=gL=V+59^RoC-ER$} zo_3hgv7dJOE2He2pJd=e0<(c9M^4}I0JX$K*x9>-UO(}X=pD|X8l9Z28rUb;8-$zkxQkYaZz>RB(kGc zmnpbV2<5zCc4Wgab!~KEUKP)#T@sGqerSmPe6K{^zRQvo+J6{##bE4cHDPBwO2cH! zAaE-=OZ{KXN8dYZtjjw-(ZUM`q|Rv@jZ8d8a)kBqujyF0N)OL@od2^3WcASlThk4X5d=HdZi-P!Z zQ5-+d8+Q0!Bl%K`aFbFrR5>{k7X=ZBJ9v=Jx$H!wUcKhI_C29LTc@K$r#*9$bBug# zACDC)8_9%yl~niQFr7cTlx*Y{AolLYw@0KI$DJR@<`vq6ygNf8RZf$2MaS5w{&Vnf z^>4O7Y8vhheGmSg&&etEW0B zDW;(gdw2%idt@~37ISv!0g;~GMw8t$a9p?s4eW~`I|eiGrtDZ|yqK8bXx z`gi&uVksQkXoKS_$HVLc1k+x{H4g?*G3_^+M&{y)*S8qyj6lx5 z!!uB2%`B43IRp=Fjqo^0#V_h%oC3dh^yb02*ePh#olQSUl~3nmr@0cCn`eQn;u@l{ zY$;JFmO)MXe4?0OLv~MDLgUxUqHc8$iYcmK@E;!gzG*WC{;8mb`bMPKLj!D&jECEy zZy|J~iy2thMBcTlQ)}6ypeWW!>wfQ~4^jqU{)jxjEzyKcVYM`zI{^-lsUY(X-GbtU z739{vOJq^WdPaPk6YbMvpbb1}_pf7g<<2-9;T?rQ&;3|j5CHjQk@!7tDm+!v!4n@=dUyB>+#oV#Gxk!{o|vg&SsiJKXmotliFMEH30E)TX6hj4z@^xe5@w%)s=^l8*8kQO2R1G0A zwZ;m=I~>5ezJ&Te(8P72SLnxBfLjvPGGZsU%m(l}V9%z+hlbQt@JgW&`=_yegSash7?0X}~o$5Rq{3i!5&OMB~r#z;U z`e)Nce({iQ8H;~7b*xc}F`Qp%OqRbXVjf6$kv8RQR2lk99$7cC)*rQDjpI194c3Lm z(Ob0Zu_?n=6_Ab9R?P1MTWP|IIM!aDCB;WilAzc?+$p{vZkL+lp;Pn0Vx2bZ)W6MV zi-o|sWHKuMY=+lgWMFXZddQ0QA;lxp!GEhSjJ(glrA>3N@atvhoTdSWDZTXDrW!hJ zZagN~s6eD-DqN4=4Z`*woL`?(*sKMSVA)hl`@;>Pcg1tC>)8xvmyCh%4bRAv@8Ohe zP{)Wx%8)R3HB?%p(8Y6g$etiGHs&f|_lP#0oo@l#e&1pfEtWCi8}*6!{(kb&ClW>i z4^W3S`(as}2_Adc2Fi1L={?mU=Jz2Ud+|vW&YgdrIG-ti4xvRbKE zaWdRoe+T9aZe_RC2~Y4P}-PBZH~dnYFHLy(O;0b4zn5&3F)&|I8G zl8c_vyNAta>BLfURCgJC=IeoC_)569wG|8XSvoOjEUxd6Z}IBeK_ynofsdt|0O7|# z7S9X+1h!yr(;ISV!yRg5(o0=(0;%rH0{HIM1^dRGfgclPh?9~CoQx2KWXY*GqePZ) zIty`8TPU~+7B(>#_R|dcbYl8=626PO0iI8@Nygm0P|XcN^micGQ;PWHJ2As8JL|Cj zK_EWjr=!t!Er{#0q$7$O=|Fr4kzJNYUzBm^^ZCB8JTw*Z;uo;DgwBxG^T&zmdqrOKa*_5}vRJrIZzF*wTaPT23@!bW^iX5SCn+a4%s9}oyI9Ox9 z8GZ-!u&2MZlL6nO(8VTV{A>%NV2}i}_Y>4Nt|J*Lx5?^TRv?+}3*%-{R%*jadjHlb z;uOe)^|L8{Y@Ekf1Ni@%Fq( zSnzTQS@e)%m*F%<-9xE_55M9(IMZ-jvRt zTZ>$~D6akgtNQp~05bPqf-vtNg5coBo&Uc8#PL4> zjzv==hr1w(>-2wBAEy7RKAitG+~ps`7rJp5{TF>K{uh17=e@&1t9If##|0;9uw-#% zD&NYm0-nTmk&9O+V)WhpuwefHy^)d(8eX?Z?`;>XZf0oQP!`!2oJ*GVdSk5f82U}8 zi}=JSAV=stbuzNWeOJrjn_@Vc{eDPJx||?smI*wfWlsV>29e2*arAUYB)--8PQuF6 zNzvLe6q^~+u3*UyzXLrd-*7xEYXU`q6yqt9zqKrt)~xf>B82Sw`8jCI+)@9 zop?4Hp+uUX6W6nhq*wkTn4}BYUvy!*!DKR=WJ$fY`VgavGHhp~pdg;W@@WgGleHgn zjI;eg`l5@d zo#uLSUhfkrm)(OR;b*A!t4p}L{}c&*VM=QMs)1^bpvzWLPs_JTGD3GQk#ELz@H*cd zY9w-qRhAzqDf$J*Ymc(K2KPdrmL*>QJq=$k8l{JHEzxe>De|j45NCHyMa%4yq;6m; z$(KBgmO(Ng=2wqP5=~I7ldxO2Pi*ODitxB}JCv_qh3;;;#Ccm5*x+>%zEtoEGY8R3 zNEE$1ZQ;Jc6FR@e3_1)1-3Z-E;H@ekY3J(bnE9H(-mPF>?0<&2BPS_LNTR(=9%k{5 z;E(&|?Bgey;B9{#Lwn4Lb6+(cvvP*sg_LPYy2!!@9vS>o0mh~$$lQs^ApUabYwP+J4)I`jaMLM9ww#$Eh}q`@liVUSrSZwn#4G!7~!|Im-r1i=p zkk>7x>JppiwMW*lyx)+KG?T*mmmHk%`Y@TEtBuDVtYqhYUkOb|)IoY<8iolvSoord zY|hcamK()XXj(aSa-Rl6d%baQ#USY)pM#}cR!q>1Bap(xLGQcmWc-)Y)H|k}a`w&z z=My}V8Dm85zpBIotDPDBZPCn{y+!omZD;Va`aqOK`C-X&#;bDTF-yae&J7*$2zk_N=^%GG$Hw^OHK-aX7(09q3h@#s-svdv))R_I;${UymCwnvP9Pye@wW=LUXGan25ij`$fILU(t}BXrD&B8~%`4#-cDV z9K`q}tFY$B&)_VA!wY;X5ws|z_V6>Vy|zZhLd9GU3m*MYD&c-K_Bw@!UFto+JxyN zK_q{i4rmwIfyLk5nA~DZ6=Rdgjho`oTLkc-vXY6rC<=$8*O3$xL1+6x9L#<)ft_Md zN*+fSk%jZNk;7xnVSCnUkdg?a4aKQ={^4Wt@YX)qW-UTz%GkpC>@w!f-Z}JTk^ocp zPK9ViOI#v77QaQOKor?a%{GNm-)|pi#;ive8kt7EI3~KXlXSB+BVc zriF`ptW8G>Nn`0nn#WHeUnH#P+Vu)>?S&lGn&Sh@FSOCGHoa8;qZJWzm=8+N*5Upq zmN4`@1}!X3!o#oxY7=yT7SwEKb5r~=Z={*t@ZL^sPI_XE(=xO)&LAS2Hc{3qo7k1k zL#_E+1geDuVl1o6myFs*URm&f_dAhOVZIVK`P`JMzKY zu81AA%_cpy9H^cDnfBcsizI0q_Jzx>zBs75jF5R2 z)%fu9ZH#+;0Ad{ZTl0=Jml3o6QVy6LU`$mzRU8dNW6GhE@a^OI_8_d@bC?y)A zfSp7jY1wC3vAdEczx>MYEMah1RRzvn>|@qlKLoYc%t1-il3Zu*F`LrjP)y1Rf=rK- zh3A|=p>Qr|-)(JTA$*r=?W|_+y{{$fydTq|YpQ(XC+g63S&jIp7}1UCVdSlZ6xn>_ zIe6QirB*+MX}f7Q*~k=NNplJn(NxFJE5w*H;tNPDYeRSyznCjk)9H@jI68k?80ptZ zg&W5g(Iw0$qTkU;8-MFU!DLVNgtP=sS5PGGC6nof)hWF7gOnCTWsv+$BJ|=ELX($$ zCvy&%5wk7N(C^DOFzl!Y>1~p5Lfrx-%Y(_=eSpJ#k{E4qhqSEvLXST6#Cic5T^@Om zxTU(F+0#5++xm&XP#x|6B2eGeWzi0qVzzbBDC@GW25z2?fvQJ>?p>q_M65MIlYMa{ zQQ|Lg-H-(Ok*|mvio)jO0xUUEn74njpln{|K#var+UHZcWUT^ZoOOq<{3B#s%UY_a zeV=si6b3EdUYNJQ4B{3|fJM@JBrq=M^a~5(;D}glO?mD~^j#Qba%^aT zwir?MVzE+2m$qHs3j^H=AYY+|VbA(lwZ=Yrc~vN!>JxM;yJg7XKYG|OV;@ZU8ciy9 ziQ|bvA0XlV8ynshsz#CIK%Y}n^UMluMQ4ZS3BU?qeIqe3+#z&MI26DxE4L5r?F0IHs~GVg4Z^GB+c@n zxF=;A?s6zV{TvH=Qz4P%maAa6hXEY3y-%i_&LZpD_mF+J*VDJ{69mii{d8gII5yg} zm=s=qMK7OBhm#uPsOi08T>aP>C!S6w*&chys@PkEQ(28sBQxM#odvF05=09gyR(iF z`nV!D9`11M;bN6E+-%#4)??HJ3aDB*Pys1FvV*(~AEJUD zIa&UCE}h-G7|*r;rU!aNKt$sgtaqpXyyxA`P%~ZecH&phqaJVrwN@7 z&)K+xc{tn=K`p|P;E{ z9PG(E4y!V}k*goYUi2QII}&2)F85M0IBo;>s}$qh5l7hB_<)X9e&kM;hy_MN)RwB!mUOYkPt+~se0e6USN+PMwkwgo-5Y}hfWYX1Wg8qF8 zu;$JedbxT7oENl&!_^MJ@d-#~{{F`9nRb~x2|mK?8_J&9w+|JV)wh@-!N@BoOY&93!%?HsN)pVUlm0LLA%2pv_7pqNXi`RSWiG`Sy151=XOo^8kFj zIUty$w1`Bi78sgtC8{3kXw?(}GcIbO-o-;SWWf(&Z+isZG0`Z&aR5|{grj5UKnYla z(NGCH#cngN!nlMEDcmOZu60c7d`l*Oo*ta_^~OD~oT;kU3fyTcQ1L%pgu>Sf@uqqR zIZ)_AryL8zzr&VTsCcy8d6)hw1;QK9>H=4&44>2g}^J%l{X8xc>)w z$cenQ?$X-8GtI8BmVG13&pzU4-Ky)sJJyqF{VK7{TDV$)Cp|pUY*(#dU0x{9_fzHY zWpDQI+%1pKK_e7JpVO+99m+LvFU@pXE@n#?C_wwPk_0Vnv`_ml1{ z=;Lon9iky(Zqzni4=1q?36=$6*4TGcDgP?{QuBidDWHmrlKD z2#Zf{qFLEmVE?#|gq+Nze9g^d=FcgQAXuQ+(_kC zGh%$X3-s?ca#epLqJ{x-mIb#sx_aVdmhEG`i}8FhmFD zgYU_wi59md{I3(>r3HyTnu;KV1>+LA&1@?$=k9q$hr zC&y8Io(l&#N727?I>6!}8Zyxm-F?fjeS+T(SwSefPL6@MqkQ6G|z8DQ_M6*yqpOl&{AB2FVNFkX-% zHy8b4r%pXX9>`bm#SdnX8QjN=^cNRKE=8a3zt{$49k0OW9W8W{;Pve+J&K9%ud%rg z*5lL%)x||;ZN+qGTb)i({tw+-m_^V;2KFVyLDd9tqTl|P zq^ntg*%n!7v)IQnul7>Wgb>JDHWPa14ABR|wU9D?CS7*;GS9MF2>-r4jD04HiE2YC zIX*>&ZrUY=$0kIQz#Dteec@uFcH}HbXXXaeX0Hu;3sMhm^thc+dc8n-?$0wkZs~UBX$VT_Ft0euV2?Wd5 z;ccT=lp7IB@BLjUSZxZPtT z+Knp`l*`q0_AV_fzH5p{cE+NZc`H#4@Wi0}pARk;l5JLpX~YRLlx#bQ4)!}~Tk&T`zPy2i3??yV9=R-j zoq{i#t}spCF5q0@+w7v2kEGL)L7XRp5f>MMdG*+q$1|71-=+sND!z*h&zpo9J;5Zl zc!*x;cBI|)#lZX&lnv(}k|{N1{M0YIVJ5eoCW=+kpb=gAb=@b%_--Rc%H?B5S_d#2 zib%rmD{RhHO6yD`1msbJTr{L9Z~Laaw+uyt!2Zc~2(8)fqgZeWsq=J-i5S=-(pzw-Zov zS^;LPZe~kUuFy^6BsjaM6wS0(WWVV?p)w1;lb?S5ba2NM*#9OO_2*}UaJ)Ev+p7oj zUhgJlHjM&NC{pb~Tf$Rmrz^^nnfWFc1ZdqD9^c8uqT=%~n01lm77vm&Z*`#hi5#tC z8X?$bCzy|a4$=F5uoA}4*jv&iVBV4r{N7E1`|-wtBeRH;)CAxJ_mH|j@r;^AJFQ-< zjP3g`@ogq1kkF@@I8QkM6OKyY*@+tj&y&HB=tX3XpvzDZ{1d`USCaRJ;pBDeXjGw5ccT3WZl ziKe$X@>lj~z~c+mP-!?sevAkSIwbDk_g5M@bP0KVB9wrNGuTc1j4z^_cts0N(#OM9 zM0&ypNO(9AZ(I7{N3UGs;_C*wKhrQbRubOia0py|L#ccM)`w-{+&4NXKFp&*VMoxX zNtP@WsM|Kb|3iv^ntxyue-2|g&`e@$j zQGReY7wUWW;HEMSGLf4G)5KQM&(q~_Q~ybFrdggAn+lfom2;`z@yXCIt^yX8t3%Wj zRY>%bhy6YcZ2dMr5HF9wYv9b})p1#8zfEX3;~35Q?!X@z5Cz*MYVgH}2e03y)0Z#Q z;gy&t1WY$Mi(U zL*jU|4YfB{5s!u{Ch0>VjPcoodk1IYHYpRj=DayhJ2nOD3`NoQY_8RG5ei_ioJOha z#p50u;jV%KM*NK7X!lRSbFCBkb2o9B%|`RV)+UQpx)Z^VFx^KJtq)P1J?H67VR0NS zJPUyW-0ZCIn$>aZq%yZV>At39tdSjMS7%#cXjVM7#!p7)1Y6vExtkVSX+njUA~udx zGP$Rl=<@l}=s(FwP_{S06oI&SaXpW*6vX1oyh%_h*MSd4_Tv5Uw`j500zkNrPIx9w zTCEQgdv9Rxd9Q?>DQj{3Wgq;#-U4%uFT`chv&hs9H6&?KHKp%YLT$x8R`0Y5tZ2$+ z#SWar2Tg8xai$Q|$TdTnkOpjD$HlE}sdV+nG-$GRB=eu~aP101Y`ZAHOt%B^%fa28 zOz$vU_U$j(vcrMor|p3PeldM~dn+n)&g1HIhu{{)U?*D-|z3e*0a9rdET|xen0#9ejR|<9z!F6 zOOPZ=gms^H;~`gXu zJy^TdQ`37%veo-{K=NP~xzm*hgKkoE-71?ECFR46kX7(#i#~D=Q;{V}e?r_0GiWNa zLiqbJ`pn=4JEg6Ho^G9oGkd2%Nue9=JFpmL>>sDIw-e??OEaYSYvR250&H@2GaZzk zKq7%RP8!$1zWu)=eY-@MX|e;oIvxqbB30Ov;|?y#YLE~SOp(PR&>8xf?YdGYi>}S4 z`3lRiW4$tvqxEQkt^+>3wG|nRCF2dfFA=*ULslM^PWvxLflurQ$iHL{iJu$=gPo~J z?@uI0FPU&OfPz_*9d;j#!kaZ>>0oFB2DA-x#`dUqO$=VUsGeSGKMeyOLi%{m5#-%0 zkQ+H%arsSGT9@@D8j}0yZB0J|ySfZk*YX~H5m<`Ef=n`hXDW0=DBv@5T~MlfE@YXN zf?Cf4{Bchr;*M3y){b4E^`@4%Ni&No9L_+7Zxqp$dva{ycZ_CV(!w@oA$Zhc9w|0) z!LGw9cv0hQ%%zvGued~6jL#{#G1!7S6d@60@k=@g#A0e zhduhALEp7&v~R!)hs(Ut%e_j#+&n-t3WBM<$QO@xNRWvr1r=89#@iqQ$Mq-U6I~;0 zr=&=BS??pAPIiL(y z;ZsnmMvE-<;v}SFZ-|qOUGP5hr7(Y)2R7R!0YAg*bkI)=-lScps&!&)>3RdC%ha)_ zu^-O)GJ;mQHX@yMJiSLNGi(FurYm!jBP#&k?4 zmwr$lhYiNW=w(A4JG8t5kzg-ma)h9SthxBcp1&y3`4~msTZ+9dyrKLjgcnbp z1=T(luxYvvyzab%n;v#DeYpv;-Fynxs3?$~4_yy2uglT0|D2_Di&oppc53e41!uAN{_lDjey_Q%CD!S{2BzBSy@ZirxH@DXv5qt-%QM z&I-+<^ohIULZQT1g;7jd%yig#FuD=GwFk+t?O4e%+xh?*KT`j=?t!xt@jX>0G(M=t zY_v%t3-gM3hmaor2lRXGL#u8+;hQc#Z`C@cRrFteV0s0E#!rnK3EtXt05#=e+my|flw@|uUqPWP&m3Z&i3}KBmB_w-n#O(Uuvu3br~F_d+JRx7CFUo#>w8ZjPH6yxMP*^qHp5J8HQw zF&FOZru}XJ@1lPGH3RYcDiIS^B3-zscQa5(;o)Cw^}p@G_V?;zCe+8ea62Z{E9~atCbTOX0T<*4$U}cbg)ABo6Zl3h@u~eisU+63eMZSp5z4ypCf4 literal 10209 zcmXYX2{cvT`@RSv$&g4&6b*)=TK$u^&epqmDkiULJcu??8{r^Kr zJ8if5A6zHi})gh%fPln#~qUq3f44xMZ-Vm~e|RDM;c!d!bX&v9d&VL}z1oSf|b zjg|j3_9Ee-O8yxZ1==DaeSW`D&|)fV4|xp33)XX$o^KU)HV5K9!5r?@#EkBW{ zt;51e?%WmwH{qbqAVG&{VbP0e+{U7Q407?|(vFufct}zBZ^L|;ck~Z#KxyuqIqUJG zOdOoH@(}v*`FN>=5e}@&!^Zv7&8(;B3UjPfg>#zag<#SL>Z*~jxcLe=L|>w@Rt+>f z*_*3?I{ zxqEV=xaVhGCz?zNCdWqz*Iv;PuADSZ=rX#T8}~$!D?8tWdn7!Nd*i@+h#U|H#fO_w zs@hy=+odIJ$aChVl#HNURx6IKc!6EAzd=Vo3qN+0lJ{5rxl&OsIHN5M-Sn4n?Yd*g z4~u@dbI?qQouf$Ng2*hN)xi3zy5WWfI;Ik-W?yhyV&=)g~+p_yCgsN!@ zYYGP-;iDq=m6M(Dskt3jdVeV=wf8VDS@{@e@tI=IjlvSn$fi8r*YaZCiql0L^-t!U zKG8Z(=dN;&s(&TVqNJ49o_?5j@M}HqYep$&^~ExtMOZ0MC$E~9J?$Xp^M_o{zOX!w znq~>-idzDY8dY&(FPHMpR+REKIv(ac-B`veytJOfP0TbozcZ7!Kg> z!CED7kU9>dZiiW~ICs3_xD$SSn~jppCR7=>kjT5#;pp*h{8q1uZ~9hY$1@L*^m;^Q ze$rv(zMW&c%Z<@Q`8>`ZT|)gM7vO_Ksjx@S2nIS^h_X=yJ+vW~OzAiZ4lfl*b(Aq| z@=8Tr=MpyS#SgkhgwpdjQb?SNH<)ZN0mnl(snX8CU#G?y6~2?68x@nn~*E9Nd;hv^@e;OvE`A#P1FtsVa!{goU++AEv% z$f!e>!hFzd3}$~T%!Xxk*O)2xv z<8vu=W!RDBrN&5SEu9b=a#rP{sC2bTSC8>&BNa&;?%R;N$@u`jpmz3WB2T5q{`Nr>>a*M zj`Nm6=m%dMihf7VTQ4KVHH`Mthk4 zk69$w>mGetD~@X)QZ!q$6p}Q{@k-DnFy4L?(^a!^8Ak~gzBos!Zi|6L^mA%`!2*l3 zwIKbaDDZAokP?k#dfHP7KW|ATF7YBz=wnXMrklO>Zwl&3CgI+;0$O@s1-?b`!75%7 zE8PEMX3D0J5WgzC>^wqb$NLh_)KYjUohZ0FO$n=y6_P_ex~Qfuju+mS!=g78aIbzl zU8l@MvT|9`@2g@;Q_S z&zcQ}Z6Z+Sq)Yx?%B0gnSFznwe-Mi~9?n?`v zRhy{Xn{KkeGZ3_!*W&Qgc_1Zn4d(@1zz-uUY1>FC`Mmuc-tJ!nB~zEcl%)W=b~=zz z^N374H5I?ioQiq!hIrFu5e9h=(z8#wRe)tY%-itE! z^jtYsJZvkoRWc9VzIM=P+i!IFHF@m4kjg}tPQ(6$QS#X08}nOK1RXuOuycnBof)En zFFdTlA|Qtr{!GFpjmg;EX3Lr$V~KtZLq*G{0vjxb6+OZ1?o&agBU?UV?&l`@`G*{8 zq_i^Z%em~!G6NjJFjAux5c{nxKW3IF$sub4j@6L?-!fjS^Zi$?W_q=7NW|6lVn@z1*)}lki@(A(4&K!!LIxg8zZ?2hwpZh2TP|@)$)82y;qe~ zemP6!t_6^Ou_~N!C=JF%?ZullE6CC`6~-l`3QcvNQa{yx;v!m#WlOwZ{?0sXjo}b$ z;(%#uW$C+JEZR&nWxBR5!gi_Sg8S35$){J_aN{)_YF;&q4BKXzWoY&uryC zd)0|o&nQ1mT_nD7AI1b{k=R%{a&Jx@E*ouS-F$P%cTN=PvD`;H zl(k7tWCL^`SO$_UGO$?lIrIFR5o%?(lXESP$RFcbpsbP%lC`>!-1?bxt<}I^4O-As z^PPHg_)sw36Sh3`#pj)IV0(5hY&Y?Q+8|5p+PfVaKP`dpKev*eNHb={IYTqCpr#Yfz3gSb*~HV!?w#a6xyi77;R&=qXoAAV zGV<#)#nlrUVeLj+^jejU;bt#b_nAnN{QX!5Ra@#+u#ZaXj;&Rzby#ONg_>R8z)W;H z0(NQzn7j8nbK<=*O5_HU?5GYB+oTKqpUyE}m-UH(+*-^?XTkZXD!q2B1bzM)z_4f! zy_N2Pg8Y04&{;&ZziPocjyC=}xCTpR8PGi|CqwMlPUymV4EXjA7gjBXx^-u%Xth6j zZ%IUcjUA4XENszG0?pq>n0&OA8tBM^ET)<92Jca|$qCejxk)svbkTulk0P3mpcQ); zQ%ddd;ss?G*P#iv=9H+}mlJn)W%3}r#q^A0li>Em2zZ{_MMd5|r|jQQ^xT{XVJ3#K zdgTQ0IPO48= z)cHT9ZvzgH#dT?%$`8rZPHZx;oHEub(FgMGH!-_5tpoFO8t}5Rz-&qQRt!J20*fj> zbC|^vwEIa4&W($~)}}jTn~N#7x#qE(d)4uCmk%8Kk^;RYE(Et{!iSh45-jk**^_Ru zp8xJro-G5eJ;P?3zb8XjTmn6IVIp#ze~^v>NxI8H7qmk2apg%3QX=Jre_W=Z{G0}i z+q#q*-ndSEzeHf;ZwqRq-AUG|)G+TXkC4sbktDq4GaZ*P8IKtmQ5)%bAZ+CkY%PTT zoEdOCX%_|(1u{uT0Tg~7!IjDa@}wl1HQKq4xqNpeabH_Ovt{H#WobRFNQ^;u6<@Y* zUL9&nDkDz#Kmx9d;O6VM=+%c7WXH(K)}_aclR1*#$ee9`cv={`18-A|13# z?*ty#CrFIlw9s4>f!I+qyP z%8)>P26m@kVg5dIrLraoRKHPpCUTX3kn|R-!;c{W3F+gIL%pW`C&Nw6FFadjH(#Te}A%f*n?4BBF*3Av+l z;klavgt;hSZ0|LCZ^t_}#kGvdcpymydNLR}s|jWMt*GAV8K9v%7b`_;NR?(VtG=m; zEIt{Iu8~)0rjj3=2$4r~;e6bud=H}NPqV1zjWGCikS^S`37R&Fv;4>@Xr;Ih-3^?O zDG-HEuQ!leKA}+1R13!wPBYfOl(Bc@BqUF`O4`RakchUKkeqCeCTr&~i`gT1iob`h zJ6ejT9Q{~hjkoN|HTAr-h=1hu$%$l9mJe-pK1R+in~dNb0e^mqVEgA0vh>zNG8&o) zDfefA#@CzlbKN&JwVpb97L2fbU&(u;5bEA?IAy>qkI#|ldeYnfiTQkG7 zQ{2ULma382sPr#>qjQ+4-`RbFQ7OhOK>dWNCog`{KV!a9i7t=ayE_ ze{$)i>C(Mog8cO&rj71SW|HzneDzBKrYAI|1rpV#`ALWVaJtv&nf+I#ZT5+C%Jh7L zoFF6e5x=_kCa*-iiJ$a#h1o(w`{n>K%8wBL%(HrH#`~{iaJt75NT(yiJRDHkdYL|K%TyIml0qlN3aIzcsDb zkTpGZ_ybQt!19e=z2SsJOc1y^Z!%Rd@a272=wKT8uGn;`hpFjX{Q&+h%W_ULM%Q`JNeEsh=j?e!D8NF#=#!U$n4%f*Lo4w}pHA)l6>IzR!5& z@Co3_Z$rf!j4;d73c4C&xH~5~awmUZhkHJS3p3`+2yI$!!=(-hs8Vqj-Zg9k_xg9V zYlZ~(rukIv6~pzyRQo{g!ryV!Li;(WW$AL={(5ky<_Iy^lPi2tsK-^?q|L4K+%3e{ zKIp1yEBx~J7;>Y>3Fmg~#bjO$)TUSovn>}0%VtFj8x<|FUBwt@$?xEbpUl9|Z4SaB z%lX1RKmTG=iX`{XZB_2KW!5e+5vJd~3V)Wr0>4^u zu6glJjD0MDTRqIV$_{^-&h@~2+c6GDZGADyOojWs-$VHJus_$MQ$^U_be5*|iNYQ3 zL0aBZ#_QZy$}9G&;B}AYbDU}lIllA|C*tr?jyPM+@sX+Gz5I~L5wEG_B>y_ZvllPs zlzk}Y`N@}YN<6AKX%>0B2ajENnro_f@6<|pG0_z~Wz{N9Xw%!~rt-1%L(6#U50>%P zDJF5$>Z&+@eD-nl=T~xK6-s$CjVd`&$J=N_-U~tNuq9pg-+snM(*j-7Z*f%3WoUF2 zLmXGw!C;Ub%4yd@-n|TgOw}=PU6F>|Gq$YNrTwU{`+?eo>?e|;j#NqeG}FO3$`r>M z;==R=G;qRZ#$QStb06E|z$FDx)LIL6nwI2(n#mVi^FpK`d=j`C;CE0Lm+Z`x1xAvJ<(A9|hJ9p(0ho*L_b9foE`r#G4 zw=j^IwX2qX9Mk+hE?5lq-i~BOff?#_Wuu^aJ9sgZX_re98kgvSs8s>|xu=sV=xM?^ zvvfH2^cxWy+sjiO{78I=A7o7*`#xHCj~;nBM6J#&L&MRd{LrF6h&(WlEPA|;COI6y z^z~J+5fu?3xB4e>YFLBuRsm% z>Pn;Stt~i9*PYxj8`H=72s7c9Jn|rvi5kBeIy1MBkOPNl@Uwj+&1(m-t@=$*bo8)w z?th4n@)G#FkBb9)$05jNv*E7K$;F)WwEV(Tl72-FvPB;=lCNDDJ)d-BWX0jKWhy*Q zSAl`qh7i5Pn610YMRS`qQ52ogp6H>10rMkQxVS&{xaSalwn- z)PI#bsynqIZ&?s&zLP{IZ2d_MpKIU}DRo@E8KaraWN!Om;#0+EIOPk;os1cTDTspUHa}Sp zyF#LokWP*y9>kTo!$j=%ezWhl%m}~uBo$sa20>3S>=RqgPCPgOh08>#-)b9B4thj0 zpUJXQXH%HBVT8W?84nJX8{vxDT8unhOCs-2B_XQ1aO(I}`oU~7$|tyzoH2b?EOQ$y zNONQYgSsH$?MAXSCy~B;f0iv!eM0=a=i-t<9XxH8M`sj2qZ$V1iEXzrr6 z@EZdxVD!K{Z!@))%47BRF>rih1OEJY5N_Q+2t~%vSj(rkDChfq`oJNSO11=(m7_{D z_0w!vIO7mwq}GPyLx*5Ae+t|>8c(G+J*UQdIc$%fB3z`2RA?1U>(^J3xQL06*=CQK z4S6JA_B(%G`!G4;t4MWkzM!Q$jL4#q7|fG%fj`CPsdfbmi|xy>>7X?+h&}+?^M=W} zQA6nF4Zyt^eR!HAiun~DG}h0A%9t-8K3|=oqB5WT_a%c&YST6Q^yG}-y81=pk(?_K z(|u1wy6vf;znKY?I>ZiZ??cz!vrvI+0Rdkd$>2%_nEL294c?N1^PRnE&z{RP*D@cR zCYF;XpGRbQ>UsLIH^eOJ+A0E4b{MtuIPG1`AoH%5F0W94t>MKWFIS5PE9>d5%GIR# ziUAm3wx(@?3t-ZM3~YMcL9B|C+2jX~Oq{3({0UiWX6IQ+5A`&W4yDa>!`>}q>dJWX zeS6(lZe58nqMM0P*E72LS~03VRKb^bZ_}->7trSxxwPi+M5H7W!y@`=ZvSjp)T9B^ zuU%nZoaiRG4{yTMVOgA8_<-)3yB{xfCW23WIb6A{hYO8%jIbk((J68oPCSWQo% z-N6J+la~|aE=d?{K0*?&w+kL^J4DwlSB9F&8}U-@GsdJx3{zKRgZth>rX(T5w7u3D ze7`l(<2TZv|Lr7HoS%SqKi0tUj)`cx;|A0E^%MC$Qbn#jdW^jTj>yh*f}5|$oo`P_IL= zKQil4a+rZ?(NA#@V20b!90QvhHu|oDaRE+n<*}1;Brpq3eB8ziRPNjjz z$MW*13VfHEfvk!s_qby=)*lvynvA)yu3;INzOG?iTZic3sXrj+#Sst&HjxR{18kqo z7e*!X6qGEkg<$Z-JKg(LeV2gY%y}GcAj=o(D0ly9L{ApuMK6ei8*|pHWOT=itQx_3$vtsOCWnfsOEKW7L zBEYxf@kv2C8g1=|Z&OY}OGrLj`1J%{{CdN5Z>9)7d}0avYZSO63Q1(|7*{qHXo7{D z6#n)bhyFq0f{Vjtg1qww=HD#k=r7)ab55$G#cmTQoqhyk6ypWg+NRP@ zjwVEU$iUa0CgwqGAFXh1z(=Ac?1_bU;bwrrm^K*-Tl3_omMX!^(h_KS*_ZBXjDxuj zJ4v{GDt62|2#UVHsdSMMEU1%Ya;uGTO~eK~<|bf5KbEsiI-#bqK50 z2KGk~$$v2e71o`mrV){tE|zcBAC-w07Hy;#^V}d)ejSdRT|&-mQ^8@a9n>kV38pz| zU{kyq)c$)wQw;~{lSL}fV%N#Ud$zIXU-#1a-rf8!OJ&&4zieQ-`W$?xv7dCgx(k~3 zY=n-kC(J5vg=St3qxxPRwmZyX7mw>>VkT&jymDXGbj2gS=ujJxx%rL^Jn+XGzk2B= z5odaGx;^Fv9b%I@>&e|TK12=c!(DfGqViK61|p?k>gF`OA(KdJglf#xd5?(+2jF9s zJN(>x2&PE}vrz|c(&G~=Nr(O>JaTP>k>YA##79rGvO7dRU(yEpqyols`=+tS|)j*bc-~O z4F9I-XGLM@EH!A%&Z4Q2LV8^+5a#p)evD26`^jBIw!j-T?)Ss}i`8Vmv=^*Um4lWL0{5V?f0E}IT$eq>BqSW7>K3P&(`)#& zxMe;`2~WnH_MgPEHk<5xQx3ePd#T=ue?+7C3(f!Aj+67J5Uns@6*WXNr5p76g>$5@{|J45(VoZzY{9SH2^e6~i$bx@@Ig(9!u?1XI2;b0 zYfWi|$9IxaI}1l9%fpj2E&8E;2Xxm|!rCQYnV`#*e*6&+&Nli#tG;QZ4D|B z;DgUcTG*fiYvIKERRFbnLFE24FgSC8lpe_--@`R%X4EvS_*#t#i|-KK;-{>k&s`KzY(HgD(Sz!Had03yZ9HLBqkP+J}g5UGx zvHRV6`p~@>-%DI1&Fyb_9_iv;ik!Titub%qnD#PcBPxMZuGqdh#A$+&G zNW)un@Z>gEIC;MjzHVL(!VAjW$Es=6J@yxK&1;ww!i@PC%Fjd8v=?M$!?-a`{~FaW zlVc(V`Jf&AnAYvcr0YKg(qBdSu%dY<2D(iE{*G^KSr}mR!vip*Zoo`pLn8k3t0&(w zHc}uxozv~FUrB$bj^@05tn+SQPGazANMjK$QB@ANG#X1|Y) z!@MXD)c%x4e{Vg=>UbrBLCsh8@C_AU`C%wObO;1RpRmhv5}USJftj&O9*3&a;L6yZ zv!<%lOnO=t%0IAzSie{}_9`EG43ofhOC<{9lbN1d4-#<|Q6?KRNo zvw~^3918XJtLcMnRqU&h7iK;76}(MP4)AUcRdLkJOL<;trM%ihMLd7$N}hRh2`_PJ zDQ|W`C8yrHg5&?Aio^UZ<$WfVoQ=AroVT47oHuv#dDq7LJ;}zUysxe$JoC;X-o~?a zye$30oR}?zyv>=1c};2sycWqy&c(87o>$Kij)iS0Peb7lFX>-3&(JbSu=|}7uX|d& z>FTgI{F&|(1iJSY^Cv%>CeXfgo_~G83x0jlC(|mLX7=mH9@ExYjNsb^8MD*fPfT%@ zky+V?eZ1yl0#obrgZ!$?C44O};IC+!#BW!*YN{Ifm^VJ^6#r*V9?yJ0&1{d-27cay z`KH#zL;SLte|avM?tDGla?XveE;43U6A7IX9;)oWGeUpNeJ3?`ynRP_(6*qMF{hrG kcj)-Jj_ZfUYM`g=*s1U^zo3x7FyH_4+llW9m59*)f66*xxBvhE diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata index 7eef2ef15bba26f49eb7e79079714b5c7015bddd..7c2f8bfeeb78c7ffcd7131dd9a8cca69c635a3bb 100644 GIT binary patch delta 147 zcmdnOc!_a>yn>0jd2(W^rA1mwN|K3%d9sCJvZaY}nq`Wqp@~tdN#aC%?Ti=95nK!m z3?-SlsqrQGC5btOtnLmud;ft1Blv(sYC&dkeoAUFRF=(~F@hH;T%1^zni8K_9G{Yz nTvEv18Nm${EK015FH0>d&dkp%.on_episode_end at 0x14c429f28>", + "on_episode_start": ".on_episode_start at 0x14c3f5d90>", + "on_episode_step": ".on_episode_step at 0x14c429ea0>", + "on_train_result": ".on_train_result at 0x14c44a048>" }, - "clip_actions": false, + "clip_actions": true, "clip_param": 0.3, "clip_rewards": null, "collect_metrics_timeout": 180, "compress_observations": false, "custom_resources_per_worker": {}, + "eager": false, + "eager_tracing": false, "entropy_coeff": 0.0, "entropy_coeff_schedule": null, - "env": "MultiWaveAttenuationPOEnv-v0", + "env": "MultiStraightRoad-v1", "env_config": { - "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"max_accel\": 1,\n \"max_decel\": 1,\n \"ring_length\": [\n 230,\n 230\n ],\n \"target_velocity\": 4\n },\n \"clip_actions\": true,\n \"evaluate\": false,\n \"horizon\": 3000,\n \"sims_per_step\": 1,\n \"warmup_steps\": 750\n },\n \"env_name\": \"MultiWaveAttenuationPOEnv\",\n \"exp_tag\": \"lord_of_numrings1\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 20.0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"custom\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"lanes\": 1,\n \"length\": 230,\n \"num_rings\": 1,\n \"resolution\": 40,\n \"speed_limit\": 30\n },\n \"inflows\": {\n \"_InFlows__flows\": []\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"MultiRingNetwork\",\n \"sim\": {\n \"color_vehicles\": true,\n \"emission_path\": null,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": false,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.1,\n \"teleport_time\": -1\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"noise\": 0.2\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 21,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"human_0\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 1,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"rl_0\"\n }\n ]\n}", + "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"control_range\": [\n 500,\n 2300\n ],\n \"headway_curriculum\": false,\n \"headway_curriculum_iters\": 100,\n \"headway_reward_gain\": 2.0,\n \"lead_obs\": true,\n \"local_reward\": true,\n \"look_back_length\": 3,\n \"max_accel\": 2.6,\n \"max_decel\": 4.5,\n \"max_num_agents\": 10,\n \"min_time_headway\": 2.0,\n \"mpg_reward\": false,\n \"mpj_reward\": false,\n \"penalize_accel\": true,\n \"penalize_stops\": true,\n \"reroute_on_exit\": true,\n \"sort_vehicles\": false,\n \"speed_curriculum\": true,\n \"speed_curriculum_iters\": 20,\n \"speed_reward_gain\": 1.0,\n \"target_velocity\": 6.0\n },\n \"clip_actions\": true,\n \"done_at_exit\": true,\n \"evaluate\": false,\n \"horizon\": 1000,\n \"sims_per_step\": 3,\n \"warmup_steps\": 500\n },\n \"env_name\": \"flow.envs.multiagent.i210.MultiStraightRoad\",\n \"exp_tag\": \"multiagent_highway\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"boundary_cell_length\": 300,\n \"ghost_speed_limit\": 6.0,\n \"lanes\": 1,\n \"length\": 2500,\n \"num_edges\": 2,\n \"speed_limit\": 30,\n \"use_ghost_edge\": true\n },\n \"inflows\": {\n \"_InFlows__flows\": [\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": 24.1,\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"idm_highway_inflow_0\",\n \"vehsPerHour\": 1993,\n \"vtype\": \"human\"\n },\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": 24.1,\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"rl_highway_inflow_1\",\n \"vehsPerHour\": 221,\n \"vtype\": \"rl\"\n }\n ]\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"flow.networks.highway.HighwayNetwork\",\n \"sim\": {\n \"color_by_speed\": false,\n \"disable_collisions\": false,\n \"emission_path\": null,\n \"force_color_update\": false,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": true,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.4,\n \"teleport_time\": -1,\n \"use_ballistic\": true\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"a\": 1.3,\n \"b\": 2.0,\n \"noise\": 0.3\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 0.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"SL2015\",\n \"lcAccelLat\": \"1.0\",\n \"lcAssertive\": \"1\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcLookaheadLeft\": \"2.0\",\n \"lcPushy\": \"0\",\n \"lcPushyGap\": \"0.6\",\n \"lcSpeedGain\": \"1.0\",\n \"lcSpeedGainRight\": \"1.0\",\n \"lcStrategic\": \"1.0\",\n \"lcSublane\": \"2.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"rl\"\n }\n ]\n}", "run": "PPO" }, "evaluation_config": {}, "evaluation_interval": null, "evaluation_num_episodes": 10, - "gamma": 0.999, + "gamma": 0.995, "grad_clip": null, - "horizon": 3000, + "horizon": 1000, "ignore_worker_failures": false, "input": "sampler", "input_evaluation": [ @@ -34,27 +34,31 @@ "wis" ], "kl_coeff": 0.2, - "kl_target": 0.01, - "lambda": 1.0, + "kl_target": 0.02, + "lambda": 0.97, "local_tf_session_args": { "inter_op_parallelism_threads": 8, "intra_op_parallelism_threads": 8 }, - "log_level": "INFO", + "log_level": "WARN", "log_sys_usage": true, - "lr": 1e-05, + "lr": 5e-05, "lr_schedule": null, + "memory": 0, + "memory_per_worker": 0, "metrics_smoothing_episodes": 100, "min_iter_time_s": 0, "model": { "conv_activation": "relu", "conv_filters": null, + "custom_action_dist": null, "custom_model": null, "custom_options": {}, "custom_preprocessor": null, "dim": 84, "fcnet_activation": "tanh", "fcnet_hiddens": [ + 32, 32, 32 ], @@ -74,24 +78,26 @@ "multiagent": { "policies": { "av": [ - "", + null, "Box(3,)", "Box(1,)", {} ] }, - "policies_to_train": [ - "av" - ], - "policy_mapping_fn": "tune.function(.policy_mapping_fn at 0x7fda132e6c80>)" + "policies_to_train": null, + "policy_mapping_fn": "" }, + "no_done_at_end": false, + "no_eager_on_workers": false, "num_cpus_for_driver": 1, "num_cpus_per_worker": 1, "num_envs_per_worker": 1, "num_gpus": 0, "num_gpus_per_worker": 0, - "num_sgd_iter": 30, - "num_workers": 2, + "num_sgd_iter": 1, + "num_workers": 1, + "object_store_memory": 0, + "object_store_memory_per_worker": 0, "observation_filter": "NoFilter", "optimizer": {}, "output": null, @@ -110,7 +116,7 @@ "sgd_minibatch_size": 128, "shuffle_buffer_size": 0, "shuffle_sequences": true, - "simple_optimizer": true, + "simple_optimizer": false, "soft_horizon": false, "synchronize_filters": true, "tf_session_args": { @@ -126,7 +132,7 @@ "log_device_placement": false }, "timesteps_per_iteration": 0, - "train_batch_size": 60000, + "train_batch_size": 1000, "use_gae": true, "vf_clip_param": 10.0, "vf_loss_coeff": 1.0, diff --git a/tests/data/rllib_data/multi_agent/params.pkl b/tests/data/rllib_data/multi_agent/params.pkl index cd832aa1c3eb1713e608fef452dbe168746e4cfa..83774e73e7f850e929d5da88d68d077099b99676 100644 GIT binary patch literal 17746 zcmeHv2UJs8*Jwg-Qmv?z*Z?II1r;gML5hkL8(c$j!%avt1q4BHP;6HPjgH{h3yLEs zj&-ni$KJ4EFNoO2hLyL^O#%UB=KI(GzxCF8>)|rod(J-l?6c2q=j1WP zDCXcw(n>B>h}bMSj^PrX44Pz0(g>kTlPCZ;jkJ_YSv-+8ikMnmQXh>LC=oGcY_TW< za;iMT?isP6-oQBEd9OZ1%LL`Q%L@WU&;E8aS6g6dVF&V*<0vIa`kuZxiQpn&< zCKp1tsgN}Zli?FF0WMNTwI|I{JI2NZ0u@7IXmqkG)rJO1q<98Jp;&}T@l2K!O=>EW zBL=}F5*!w2!j-awIJB|TVJ=+`PpFKFCympjtSlLuCBR{XaW*a$V?vxI0tg$nLM9gr z5pIc8%*JIhu@umlshd@tj3uqr?}$GzJBKHe1C+VCRV+cRGT^v^G)6B%*i9+ClKN>Z zp^&8v4|r8=;wM>KO&StnDK8rsk2cCoCg#d9P0MW392m$Ei*Sr32Z=$eGXcTDV9&U` zG-^LhxdL$}CSk!MWy;Z(ooTemCJyk!062z^gX)E9U~xD+L>26Y3cN)(1p;{GQn3J& zvP5Y(bdRG@p)Spjm%Dqv9~wRTySuk&Ol}FoAaE8Zla+@LfSk(`$nbv! z#CS+h0o1hi>oDLerPDetS zCMN)@e=E9?)UwzhrW66*Kk4P*s@?%iub=cH6=tR3BDrd%tsy|8L4iDP(~0 z6DlNNcX4W4Ku;-+RFjLW0VV=F2%{LtVla{aVG0!jIggS%CePEuof(7PMr^M|4sixh>S9t_g}{OF8Y4nf1v4z_bh$vS@9f28zO@f*gg+5MsgrW?LxL%VelE0&{N*BRFxuG{}2E>y6aR z%g|^kk0aD57^dP<+gQPl%0}SQ2(dy6*gXah*0Ky5#MFW^Ti9!>LrGlUEw*|+Oo8Gke&JfEW_+(LGhDae0G;1D1 zj>Gvtf_FerhW2!}x8KE-Pv1&LKTI5n-Nb?>u*kXZLilt_$ zObYb1a?A{fbXlozN&!FtPllr0p8(-Po(%Q1u@EqtYzhNRrI2ub?gr6HrBrAp6ALsR zpcO|TJVqW59XWuCmcapKz`#XoqAD#$iZj6Uv(j-)iX7cf=$Alr0?n;vlSp|YIR>6q z1orcz=F+F>lXv`3EG;Cdb12tmWqm|VgHeHM&})LE)h_iY#yR9 zQH;otnBZoRX5uvfEpcmF87v^cgV~m?OoiSb4iimdMPi-|ZJIl?f0Gt#I&)jJB)6o8 z!DdM@E-Xfz2~yP7Q4FC1UDPc5pTH>4HV+>fn$Wai)T}Y+E&wP;Tx-kNwRu`;LdO$= zjRM`GB-g$5p{5B84n`XH**Z#ShT)0ARl!-CXh3p>Xt&y)h>A!;aCGK%Z_`avKw)rX zr{wX6(7%ZyIZFX}pufdNqHZli-2_cx#n8n74D$49F{TwfLmeYGS=ZK)6N+dtHi6FH z|0?yPctRA!1XIxeEdT#3k=m2CaW#Mz`(Keqgi=LVlemqF_H_5~YD?k->_BvWj%Fdd z%K+B@1E5R>=9`y61u|{=YXLi0EQSytq*M+0PdKA+ToR{t2LC*Jv{;M<`?jIzlr34MwE_40on?+sG(Q3Rhio{4?;AbtEIL1|H~XfbTBxA z{%xDN|2=lQ$t3=}jcaWUTbL>&uUh6qBfivKl*V)apR#h=M^2T?Pd0&^gDxbE;B&-? z;gF8dg_}Pm+5U&CKGzHnC1g3!m8R;36lI50@2VUJO4V3tXGKtogbVp64){(`U5=8} z5lA7kVdDMZdADF!(oiUddpO9RK!ye7gakO;sX~4!DKIWpNgBhoy8zv)qb!LNX^C!& zRXHsxD!J6(WlKSLG@OI1J?3tf%ForZFek~E2C2~#<&c=S3$ z8K-n2{79o<2+NvVqUceyD4i(Xs8PZGIW!Kv3oVzPM$4h|X&fD2n)*q%=}E8YNx$jI zz?a6MjiM=?Vo5_a!LS%s2IRcZJrNMr^o+4GAU|h<-fYJYUN?55e zoTH=|oP$|FIH?EMJz^yhS^&I7cd;U-Buh#7#8}gV&1iHZx{3Oq(%D%K%cN3_7@QE; z>2Q^cVX$pR7i{$onA(Exd}@*jR*xbod8uV_X)bw@7P@3zxOjiA;ve zVVC5H@5H!VL<5mX_>g88Hc`P6pgSxyjSGOmr8L|g%P=XN5>uJJRJujx&-`(_EijBF z5{UsTx>SJSAi3rkmY&JMxh%MtMol1*7}nz51lA|!D5zx-Nu(iki`f)pCrnFrFqtA% z1&0!rq$!4_3B+)6Rn4O)P!`0wlNK$A!X-*#s1p(FLqE1HNHCErga& z&()!N47GZU)IGYQ0aTxnR$t3e##%j1qvXbX6OIY=(9|=T=IG?p(OfW_9`u^!=;hO8 zS3u(1aZG`FbM#K9QRB>7jMHrzr%#R3&!-pCW#-h#cGO5{p(EXC$1z9ma*J8C9374Y z$8sKwRrgp?J(ewd)cQk&5=@k`hD#i^hr_5_f2D@!I#GI2Ho@MYmb7GMF0hwRqM_sP)tcJ);Sg zgam|Jhv}{)4O9`MvOr0UAWeMHy#`w>l=v%S9O=OZ2DJ8cGrE#s&QMaCNBaGT@{LQH z1z!v4|KK_a86hVWLYF_!X@iBs ziA54fC3ur45Gk+?QvWO&OD>0J*get^q2O_l$3RbTHBG(fh$P?=sNw`dLF)5lA}CWJ zqDccZ6LPwiLNMy8ykHu*R8+!Y)7&J)(^z~l*hla)PDCsbN@;@%wSjsLqJ%VJWvKN6 ze#`vCEX76z>;oD2WEjXApb$cs~riELhRQ# z1QEd^o&RGUQ4;K2C4t8h+*l%wNFsn*Ub2!%N6w{;WFY^fav~7HFw+o6)j4xa#dhFh z8MveEq!pFnWQ(OZQxnP%+o7kDI6+jx?{Z=YX$%el?y%ru6NL`?kn92{i=qaF+8LN@ zIRnIJEnlEV1rWbz9Dy9H5|}|9JA7B%&{2c%gJ zG9)YxVlLI9j_Ue3utXdOo`@~Qg^)vV5KA54T2%c4_v{XkLCbPraUhoMkH+lKDSdr@@Wh?}NR=_f<~73qRo3ZVzn)Z8ay{*lYt4Ab^TDoj7NC}%>L1c@4)1;uFS%m75`;OH0F zhgUJdU>Wr^8&3}TZ%nO(_>(l$)W?zLnmJW1$Q8#DwUj-691{TkjHY7*7U@Tk;~PM5 zrjfeG|7v-(`SFA0A)abXk4oeeWJg|_2$-QR*MoqVCs2U&!P%xARqvK1!xS=zzu}Oi zkHc?t>S;$kU@#LZ7=Q*F2z69#U|cjShGum!2ugK8h6$HKZ3u+AowS;%WW1*2FUo|p z(&~fYNmUI7HWWq3(Ma9k5eZN+2_sY} zsH(Z<5wjFz2US_uPZq-O4q_5z#J8h<0VE3WU?*j$L`=ghw6C2|h7)JYp)ioLg;W9z zE*_Ope~`ACHknW?mZS0&&EXYT!6B`|N{FEXO0~sQttTMttZoHmQWt1uvUq@=D%`X{ zH7cmqO;zbdwRJ*8UsOd8WvNjwwUwyQQx7EoK}e`#QZ`h7QXp!bfnrjs9aV+4Xs3}~ z)fI}cVyG5?@<#1`U~Mx#&_j$Ph0Aj(Do!;J!jhy}XlfgADj)F$O29xNn%k{ZkJ<^RAox)@E|zS&4f>!w3a~Ki@HD)lm^D5J+o1l z?ZDjtmkYd9717d21K2k-1t^v32~Hg9U58y(2vudQq%Md6kRie|kubn4?bXnzzK=RP zQwWDa2&hMzG*?2Q^Dk*65es;1@B>IfbyykbhQK^6OUOh8L%58YDxL(?2f^nerSmju zh(3q9yCBYx>L&5_C`B}7)k8sUffh@#1rsSxc_<*iY=V3l=2 ziW~ynPX%L-W&=MpL&=W^ z&1g*qe>61yNXKlrk)Y)@YW_pGkV@*IiUTDdg8_UNQZk6C`D}R5o`VPPs=-rXF!i4r z+>R=If~vn%D8I}6H!LbjQ$1QTEaxW-n;onRo7Qp)Rw~p1{M!_hDMFhD8;jt24B`T@ z5bn@a99O+EG1Vm%x+PIVP!}W^N;aYp(=a9;73K`2f=nbY?NUMc;Yfg#8bJ7SVJ6sD z>Q)8y>!#yb|3F=gM5k%56fH!it-T65XiS}5^7N$lKkQ#^J-YO-fdv^kUVCdEm3v;_ zv*P=rz%Cs^_I-Zeso=NRo%L&u?{Z+4`sK)7ua(-#%Kk1Xx6ArPeBhI<_}zSShsdBu z?P?VEg$C*C%WhV8JicVD@-oMw($W5L#l!%s?ooaXtb$c3lM@r%#uiG~2^E1h>A(F| zIL+njs#sH_X(bct8b2i;dV0-$QTI|;sao;Io0=+^Lh`)#r4lePMZEOsL!22XY?A^E#Ainl<99bakjOv9_Ll`EqPkM z$~FF+_c+#AIpIKY7sJVE_B$?4uz0cZ*471sd0P~9PuyQe$XB-8-<~B2c)xee=ggN% z;e|loNVoHSbhCI^?cO)H8Yi~jemro}+^W!xTcWCq_AY*4>ilcZ;v$=nQ67%fpB5J_ zyHEq#01`4VeJUL7-eN8qE;>t5Uo{DkeHb9(98e{jp#`*?Feb>F#> zA?8W%r}93+W%yn#us(_B2hTdtf=9kL(B^k5=|k;=X~&30^AurRsIs(tEME4NzTn;;3$A^gQII}!)!*gP zdTFXJSy09}omNT@9q9M&)3)S&HMGCJ@80=2M(4wlfbwhk+`s2{?tZoKspI1W^83ZW z*LNnae(Yx%@%kox(j^-)<4-eIc|b8w2DwRiEOvwLd0-O&A0(r3k6*{Q9P_>XIUGfL02%g9+Z(AZ+;;}4HK@T;@C z?@us&nAHB-=0PjEo;P>29GtSg=iv=kuHE6`F3cY3fA`Wo-IuYhIO6J~&dF*0x9{q_ zhV9@Yj;tBx^P69Q%jPM)8V;CG-^M(->iE}tbC1`b`kZ!ka9@9KhtorRwv~v0eVDh+H`;Ed8!52?sCtQa(6pf6ZG|y)Ka)!tLj4wCVE$lJ=SG%zX zM-9l^H~C@N#Nr7N<@}QERlY)(&fLGpNBtHeeq1l~YEW*7ntA2hbBjH0n?7~AUYd4j z!;!xBJ0Do?u$V`fo}6^=y~F4l{<|-mm)yPiEviF@!bi`|Ht))1ys1CB!Pe1yZ)kOl z{?3(!JNI0$b*wQu>+oUosM2?T?s2INIiIwFwxPze)6&x!=RO{;PwC*-cqiL1B>k5e zVI6~BtcgGTw9ADhgnw|J%-8a2VgFeAwxPF`FR$V^%Gim|hwf@Gx6g7kqmAtT`Pbc3 zE^<1t1Ja$A)m({{*I)B|nLEiK(tosI-rUa>5tX~FI`*2n;k$bk_p3+$r5)`T+vmqL z{t?pPv;B*nL*(cUH7f_{rznRqJ*|_-1qmO!B|k0wbJA(fo$1BLZd}h>aATi-e3nbK z(N+5Hhue+#Z#o%W6pKv!Bfs7~BD5R0E8Tbh>hHMqbCap_54>M$;IMh{j0YWG>v$O* zCfpwDUV1sz$<6p^;^>O@lSUmn@Y0C2v)x|PWZ$63*fTYsvcKoDQ+qng?{KHa_`ZB; zdpbTPW=i#Nr)ux3R<-7Ktn1G@ZjsqJ1@*A8`Zi|Qw6eYHeEV)y-uW_y)$st$UjI&= zoPYWkFimc^@{%F~cyt?YuWbxsVyEnHnc;&c&Xk^}9ZcEm9r(PP!}BEL6?=OeU>hq@#b~5Q@F&xZ05+{+)S?Btk|b&EpJG#3dw_OC2nVG*C%)Q-F4Qd)MUHbEt~zm zJmPg)a&sts&Yg2IaSvJA;Sc>RUT@mqJz3uj56zjl!c=fKFRV-VbouNxuU9nm3A2Bn zA5na7gm0$#oIv}drn}~>@VZlZ<(&QUy1jl69~_z#N0)y-ak2lbkIRyC^e2oOeelp- zTjAG({@Xe@`(Avv@pQxMOS_+3>ih0a#+mNh_{RlfT!!`Su>I3to2(Yc8N9h)@;;q& z;n+aexwnQNJ$G$x7rQCF2mRTJJzFSz?E1ef4nKlZLnAHJ<$ ze)53Q>Is+y=f-vCsOYfN+Q-9PSPl1=#l+sJ@Gp{)bP z<$Zsc^w;6aFk12K9CMqYPWi6mdz+7)aOTdY>&oW?Mlw22O!(S+*2Pf!&QmrO+y1%p zh3=~4iy>F1#7&;(+Hi_BTDJB8(_DYoviZrodw=`5=Ger`^=1}t9kOO@d30CcrOf>j zz2!s9^WuZ6h?G(In?IGocQ$B` z*QWU0bN3JIakkf*7jJ6!uj;jrQzwkBJIZl&tuNr(Z~n3=KX)Co!$f}j7oiKN%g{e~v$M?}$D&2SN=#xHmgN~nimSIzV{CH+> zhi=^l)o;Pj*%A7Cz z@+$iExT9zP+MOL&erN32CqzPzf=dbG74KF^X6ny%ym$Y>##ry7yr8`6u`j(2_DQJx zc;A)^VRhl~ql5quzFY=Z$nez$g^W5d!k5d?Q@HAinnqKkDpL8wA+A)NH~4Yz$WKBa zW8ou;4gutN^wxH)jzCngx*m;Ico5Cb7yidXxZK`DsUh-zAEFAh>2wM{?>~g+ZwEj` z(@ZpA4t)F&XY&`JuD$4^5I!KX<}XAqZLV1rSQi7#?52o$n{3RtvTjqBF5=#>4#b34 zdrGILT?})2T6NTJtupyVzE8;QoWhC`quEntk2BqR?HKRm^Ze+u&x~)a7BOXy3!S!YewBzwf5|tdIWv2zo68sO z+vQ{4+f+1uo@i6F$K`{Kl~v5G!H*)UEpB9+uyyB*4fUGgXz4a+MmG`b#Mo+Pu*;wo zMSEunGS=^L=*Taeb9iOl!;2@9Z!Yzic;Nl$z`M-Yv5_I)%s#wac1c_{d#2cjm)Ed( z?ye>A_Vqp--(HE(*?ps`+ zw2O_sI%v%xb`w+!Je_*L@q3_N(ht zW`RjW{lMTAmcMOXJkRf=xiQAvDIaoeQMZb@QPz=x3ir}8t34W)-!qWu?5@ALI$&k( z#?R(?7KwS+j&^-MIdU1TxUg!`HRCRq$uns~cJqXMMsZAkv%dLtwjuvCTri*ZiA+A{Q@hdKcb@gg1uLR# zmSjgvcQER3`oy#TH(qu%NL#JH&uxd(vX9m4@2$VJbWvVDPw#u_%O%s=s#&I;m+y@7o-kwVge#7NULA9b*>rh8(e3kJ z4Ufb~yU;fB7raO|))#cx-yO{I$M81=&l5 zCZ#J3B87jhsrQ|h`z(N$7CUFmxIf<}&Mhgs>Ez_ddCdvG9q)K^mW@-yyR1VdByrjUAkd zddLq-JXaiB$s2q9t^CC>-5q7#Jq;z{;ZqL;n)f-rbVy`g;l+36DSpW&bu+g{FW&QX z-My_xSN5o0eC)`Pxn2pE#%)U~y<@qja_5;%vcM4mtLLuu^zPqLV$SgOch4&|W-jzM z8#$Ge*Z5W0{^ElQ{eymhC()=me zYxerQICe5FC9wX8+7Q$NnzXE><7W>e~!!;2~+lq>r^ zJ|e_VFFx6QOVp>CVR6S5I-wr-?hdVR74@hOa#@_c;{_f!mZZ&2TzBfx1OuHxa_h;v z#DnB|Z;U4`5)51Fo|4!jZeP)KQPuK?Rxv@tC1J{uca#Y^BcAz$-;Fr&YT|pZ)rUO) zG}g_YQyG}}c28*ZwDZRF--EwpmB#%cOY|)`w))-r+}R_JOF9iWv}EDBQXh|LRsEMO zV4GPfbvw^7S~YdRgxZq}miKe}t=2$q%!wP)S@&O@ul!bRd~k-IbZ}8OuM)j{Ztjk7 z>}LML0hcVNFy}2A9)3I|i0R?hSH8BWFMXU-O5J{je)78Raw2HEc2`SCDL<9X8o0_?36cyPh#EoO|i!&4euAYdPS_o>*ZpnQiiV_r z4u7V&8}>c4?sMFu+6wZ{MZ0f-(;MsA8>6X@)y0&|!^l^PKQS>>O(fe5b zT*CSr>-yG;or@p7FEVR*|996^?!GgOy-uZ5J+B;IXF6r*#4BTnAuFCOoL=K3IRN$X=H$Ce~T4|2j5&$oAQOJ)W-dW9l7F^xwJX>(#GQPtF>^%gZ;v;Cka2 zLpS;Rv~&C(U3&!SH5l#~v*bp~e$Rvs-RqsVy?Nig;%NDwQ(P|ZG^-g|y47h?)vGCa zYqKmY>;|8#iN1jKcfBKl5{9I?oU-j}`xow7wbt!{o#N=noVYiMPp+qwI&JhQIIcf& zpX=8rOD24c8o#eU_xF7xO4uuP16;c2mHRP5{ruAdNzT&Y<=;xzb_UUTlS zVRie>hl7@zr~j&JXX!Y8qM1`aJ;Oks_K^uW%O`f8GR5(iGm{6E7(K5ybX+kqtYZ1& z!v*(m*`EI-T3meNh5c-+W3!h%d0kznoAHaUBJ|YMw1E~uUX>{qLi%j8y6M|}e%;s4 zjQir^QQvzmN?5z6y7sp5xf7R9H9mYC`giWN$D`v@KQVnhQxh#Vw0}L|_uD2TU3{YM zuUNbP`+bkQgk$C5cizbhgw?Jo&#!DKtCGD-=OmAR*ZASf+}nqrUr)-|_+{rIvmx?5^=kH?+tZk$}rXt;bU>8ifZlXV3yW(!OO~xQbNt4Pk)qd z+M>T8m;GQQdtR@zennX?WL;n69kHL9R^He1m15<#Y<+LD3VY$Rk_pZiUI){JZo`Um zsDN@ggmWlh=C6Qnny{Av*<=K`1v>KoA=eioF>NhBz#!-Z_FC;IZUD#s>+0tD?Z zO*dx%AQ__i)-)!CuT3lYCjdWO`l|3jP5_gOQ63N~Mq(ADA^O@>8VltL4OE$2^!FgB zKL=q#8l?+hHk`|awo}FEr-ZbYrExGw5b?DCG)Nxgh*e)wMt?9vfxe1^N;J(-su01W zbP>wjm*Z)M^n?%4)WYmu>`(#tRg^jjvza>=+Ht( znot#^)VD@};LH7~A8H2ej-fqOsHnfY0bl8dDpSbC7{gLk-?oRL%Iy2$10rjvd6VsC;XmPy(Yf@7(wPai8aY;UVPgw$}cwve#aFlX*JJGCXP2YmUl} z)EA0HJh@n^N+Ase;s{KDN8th$sWXn{HBm(xa@Ybv7@HF*Q=!IYViAT*crq~;$7FK0 zRIXx?rY*{F2^5)V6yYK+l(dBs8Cxh3;F!7q^^!~3JP{_vWeNcrs2vnZ#4@==D(2ua z8LH;1SPHTOFG3`i;#jm;8i`A>a5hh%fKI@0Tg0#|R)#5L>;4O|P@v4{aBX`qn7lT0pz zY9k>#29x2l0Bwm$Se^dK8&21SR0Z-B(Tp2WyOBJ|AEgr=d zC_v6o6V4XMG^-jwg&KDk7Yzm(GM-R|Nx)KMa$Ew{8c;Amd4viHRfDJv8sI`P)xZ!! z9TR~bK^4Ontdci{9fgmU%W)Af0pR;h91C($^T)trB$%8Xf!YehQZ6PA$3zMt0#oMN z*oFtbg~vt$`6p3oViKkhaR?X(y1BVHY0eGjM&L4x%ah5ayf6h~36TdM)j$wQh9O{! z{@Q39=o~N;s{yVMyiWuhL8M6nkdrHaOM<)o4nJCqf2{RH=C;RBvpY$P3kASYv}3Ax)Ff96>AtC&Z>KPoxkCG{hsv zVZBi6KB{Sfo{WQw*ixPt2&)x!f>4en zuI|yw9vVv-A~8>fcy@7eYchw%$~i4cLRyZ=;IJX~0fxlUAnRYPfoLdD^G4SHt{K&| zak+GJ8rYItBTtB|7e&<ndUW{r}8W{=eWUrGgf2 zl5&_KZ6JZ7Emc4=O54wGBIH)IVF#`vt$(@qc8qdSL6QdL4Ahw*l-g21qXZU8(WH>( znn@j$;R#of23%PBzzuL!q?SOc@*Yw3`Jl3rs3qGqq!>`l3!4dC3K<6&v!g~a+O{-g z{bVX;8`4~(4i+1AE{8Ni&D1$MXr)COpyvpQ0v>2&J8J6#4^(18-AcuztIJWN7UeXu zOGDlXZM76qNC#@x18B|Tg~N`HD+P^~HjM`uwrCG@%o~!Au&w71>Kq&M0%@ksKx!2F z&U`_(Q9q+qSw)(|7EZ*01-=;pNNTAwnQU3Chy!?Ah{I$EH)`LY&Nu?#EU_1rd<14u zJ30VOD-P8<3MUrz~Vj%1AtH^{LEBj{1YMp;0z8rWYN1OZVN8`^ z%|?JS!=x5Z28cz&J5E|?rm?Bkh`n|V$s;Tba!4pWhcdlDNe||XNPy9)KS2c7fEXa8 z5UNwpq&C0@Y>4n^1SC+gY-o*Ao1k?tCp6%A$n5KoZD0!wiMI$OS(Lapkcb654iB>C zAXhDR6clxV)UXprAXacCJWiwl*Ziy^JA;2AV!0@^#blAN;g+L4v4Aa;sWQ=cVG2-g zNEJXDT^mg;A;!bQ|Js7=-aI@tYnhWoB1XSW zG>n-LQ$lB7UJOs9F?J$p9t25NoQ2DgIyF|4jOGy=80N-8Zj{GC>fXq>si(TEY2KNt zMF?Vj4C9J97^Y&$l2!hwqz#6FM}cWUA!*t?HpJ`#nMx-2NBxNO#`eS#V(GssOlr~H zj#xtWY$nngajY@727U?yw+~i=sfZ}j1SygL3kR2n{09;>l$v5fl$t_nKvf~|qR6(O z$CM6WjS67k0WLETX+sjs*^a5lq%rlGX3TaT5~aS>LQYfaC(^>{@!G%B=V&XnrM;Be zN?JTUUMpTZQHM@T)=Ab)riE*B>0GUa26Ob}biP(&z0y!=tTc$%Q?^mk<8|V7m3m4e zu6DW!ou<^13Y1Ol&^TOL08M4bBKF!bb$|mMrY>;cqNGVJU^ESSN;;PoX&`0rU<`~V zf{~S4iSUc5M_?tX$o3c>gWR!tZNq`vXI=l%~s$vj9LX`BN_}F+=R@jQLCCOTNA_@ zSp#o^&az|_skU%p#f7wA!!vOjTp3b6v>L;)qS22y}fSi!S zfs@?`fjA5fjgVtVAyQ>12BZdpn31(eMO+21WC9C-3{F!qbj}2t4J#RoMI=IqgUjKZ zsDUEbdsC4RxlgnRKv5SsLyqeU)q0W|7|&h+pwk<<9#Qux~NqXRAi z4BQT)Z#LMCE@fOQQm=tNsg3B6CZ1!xMo*p)hNLwH88Q3S)*go>i3iPmrb30j&1 zYv2Re5w=Yr(Lv4WOl?@{y1`0kMgc_Q(-O4;XlNacrzHYx)XE5IPaGo+nj#j$HkooC z;KXd18$9&TDq##OgmKd`Kk~9aM}hB(6d-~6lc}MGAu3jji4(S(vv&MXX5B=cQWA8L zflD2nHS2{oOj#S2_jVqPrYS=(k*Ez*21{2j_2{W_+7z4o(ZDizzkI@$)S)yTrhkGe zWp2}$eG|cdm_2_ujMr>{lrDf*g$;Vs2!Bxn#KOM@q|Yg z9k>0(S^_*Qn>SMQ=mbbj~gOgSu}^ z-C(uw1!y46DUbS3ru?_lLjZL9MGH8}S4S|7URCWdw7`0-zoz>fI$z_tMGCpOnm-w1mvSpQ>%TLqjpDI9uW7BwqeyOwzU*ZPLW*M#N&z!qCg z8v$b+&65+DLc)chN~R)WAwB^~@i>s%BE(!+6`HU?K`I^gsa-r^mmCfXr$qZ?+VAv6 zZ$z555)_2*iy)Xr_eP{VF105G!^}oFUH8zUflisiHM|JwR$?Sv5P|gKFP8A#&<8>} z^rz(#j`k3G4dFiQ3y68d0wRe>#??_@JW;^!h+wlpZ;t3~0KBn?VkL2bMcm@IM@2s9 zEdbt#LKbn8-v)iU%;KlRUlxA@dWUV!EBDC>O#I6ZKxZb zVc((^-XD^6Ogf4I?U>pmeHiSbSX5L_y)nVU!$@8AeN3h{BoDM2p4FY_ zLTBoi>f9DG7;piIi-p0>#^`?OVg6K20GJ`%TtK&;LGmC^Qr|bwb;uC+L4QNcSEAJfy`qIK z{0jOpd4|+A>v_K!+Xc?sPak)>{vk8$>8AKg0}kEj6gzL*&_yr$bxe7|Y4>g66^C;d zYEI5{C^TC!NK0yHzN4&Qifd_Ym!erU82#%|t-HZV9G!DjOApk1xP_lt%+ftVPI~<& zrB_}+ZSuhS*)h`lvl2pr(vq&U5B_1;%Q50g-$#3Iu3G%^@zy!DQFDxkjJndL-LP!) zk&Bmh?ehKdp-j;Qo6(XkJ+JrfI&M>r*@F2F^_KKK%imrtd~UJ-^W(<_@sG~z`Ih}= z#htH%a*x06H!SzOo3Kv^XP;M1_ejVim~9C?Q@m|qq)J*mV_=8-Zw4-%X{}n|ZWs0E zC)~3C50ALB$4uu&1`XTWe{e~LnP1JU=%lHxLr)nv#&~RB@y=#P@QyxvHy%Ghzte8P zoyE&^A8wj4ere2t&VvdDn|+wvahmj9m*?$wV-r7_YcWhgxm+kxT7gVVN# z&HUn+TD)i!eOh!EJy!C)!obe6mb1@~zp<&*bjrGO+srDY#;4bg?Ge{?w%hfh+&hOW zh4Vw_KYQ&p;zZi;g5-rxzGZt1vB;iV3#vW#bY5`ZHmuvYqP_A{CE-?+hfLmf{qUBV zGG6Z9jGUkkf3lX9)OyMo)$d(4mE_(KT+vUtC(Fq54f9(VcgO9LYr)>otKs&A{EMsZ zRhM=w*t>Yuq1fM_TzuSpZNd6yW6paX`rgezR8{+Zyl;=7n32(CXT%dTy48I(J#{@{ z@;lYLi0}J!tq*jv7?eR`HdzPfo+;mAy(RGdoB&&cPugR>AG3Vj11AgCd|WWzNULB} z&ci!y*&S%aQ^Ar!1D1Dp+R4iEHLGd9a`F=gg+(IqYO4054l_2)>mg8z39WT zN=J@KmTJSb4PAOM7GNi`zt+%r)#YPsA8DzkY#6-x*!*hiXxF{wH%>p#tely8d%th@ zYVW>U0q$=)UCnufd)4b~H%>Q5WYE25^|#1fQnd2zsXf#8tv56ZJrVd=cDd)};a&Xh zr*(Dzv_iQ}f4Za3w_X18ZPq^?DnI1>#AWVk?A-aziGDX844n7jOr**7<>u!W=eWja z1vt&DjeGnmow3B*iF}(+kBIZA{KC{-k9pqV%*YZ<==I1b>XzyDNY3n`>%#b%cf6j| zU(9?I6L?}(*QCVW9_iOI_Z&GB@+cxh@N|{J`_PIj=@)x>zACx5v+LNESAt{v4d>Y7 zSHB)wwe)aUX5y~omshu3e&^ABW-0C*OvZ~l4DRv1c&mN7-MmvdzV7jFv~sYQiFMz0 zx*m|MPv?Cd>A$%Rt=r+bTOPJQ(j_QIw*yBj&O76JnM3WPyVFAMJJdM`LN zFfoMTv%xViZm3yQhz>SmN!zZf{Ua)dznJmpdmjDaFx^7|mC+0LmB!vQ8GPrY;F-O@ zi>FTUIvt6{myYC`zQZQX4PEnI=sR>-e*P8HwS^x`-qf~NIgGK=+YmPQ`&32x*XPBq zRc=Eo>s%Qf56_!X`K-&XP_9+_*O&;)5oau}ru~p!nA3OQsH4-R=Tw}U&h@O_-5wme zwN1OjuKVjIY_6zpKP3cLhAAALO)lH!ku-;V$9S-1z$&dNs`r)$@cg+OeXF#cJkxHB z<~_k6cabrhnmnQt)t(6$?Cu5Xrmr*Lgnd((|a$s=ZO} zs1^2aOSW1q*m$7BvwTMP5O4Q)pJ&x2WS=i8)4LzK`DDo|uGNF0E7!DlYcKyp-sP5S zk$>K|Yroa;+u!$B<;~I`yxX)4AC-UD@ociW!8*?X`=0bg0}n2fcQaVHtNuY&Z1tX9 zyXTx(QNMlodL2)uU-h}L+O_=oGv_Yxix0BxTXsH2=YFF5f)TR1xB&0Z>Ej09ypu{B zd3)%Bq!;HN1Rd#L-1AHI4!(Yzv2}4pd@0T0bWKPtUg?%-TAEh&up*3OEpd&d8eCqz#zWaUphun}sor90A zH491IGJnRtkE)Z;$G(&9p_kphv5B@m)vtDIwmA9l!DW@gqUqmis`pf{b$YXF)ald9 z+f6>7#qM+Ywa>|$hK{?}_s%Wb^R?^SAK^thW0vN-7nUsPbLn%!tQ+(pR)&vQ`$aax zH;72#nQrATBTqUmx!N)6@T0WUO{+uNbt@h{OZ0YQJ#9qFy;Z7=GVM-JlKFAfmtH*( z{l4kb)y(~A&K|F~(8g82oYc|!VGpAM#jN*No`pR$>6KWzTy`^R;>hABj}|+`TOPae zBjIp$@6v^57Ykl1H~A`eA3v=4;B8hqVEWNMHQrtcQ@QVXTJs8Om@A&tRM@>+2X2NW_ejrvZ%HcK>@Gj{8{-Z{EQEZkEL$f$zAJuyWo#{g8^5NM z)*Rjwu)5eK)cLTTdu+k!s?VuMqpFs+t+6?}Y>uN|U+LCR?=5daD(jEEFFHB{&p23e zIoh>%S6$AKK|yJ^et+|(yMNTqrJTiHcOpjKWhf1QbF!@{Ev`eNX%ESUmLUD64N7s+hB#98g3Gp&ygxO~rEX1_g>Rej$XN>P-Xx z5`5%eMQ^3>#-u}-cmqBCI&MWx)B&~*1lOk#4gR11rprjG%jxjCo)ofJB``B@m!7|7u#mP$X@nLI^c6+;rRX8_G?dU8tVOF z#e%GP(K}8bmM6xCj(xs=&kT9r>4zS5Egc)YYxQZJm0rsPj>$Hr@l|W8^gkZYU-doH zq~7gf`Ix~|yz9nKaUL+9HQnI-7A;nSYCLXM|7t&jwX0(9iq7p8ZrnLVcl*V_yi3P+ z^mlkrPIkO@boKamv$CDqb;}72R;FNqr?fE}HnUETXX))K zpI2$uJ$`1c-PwZFqF(!SQazU~9YwPL)s*Ml9GY}@`bX0b^@;TFm$ib= zUVY@cAa8AqnKovZzo#rScSrD|_4&tlzT0N&&N$G=WLo*kD?X`buMB@zwD{V~ee=uM zJ7%3`3t)@kUh)J;bA7YAd8o%=8Ab9;J%)XOV>fvj_I)tTGFyAK*t zU%T!Jan$a+{Z+s2UuJZnkDX@OA@lx1!(i+DPDZ@lI}Z zBkvf;Cj9W|=5VaN3v1zv2PRuDCx=#_3Sdkh-y`RTVK`@aFJg}A?p~%lXCE1Q zprmB<7++Nanf~l@-3_IA@6Xd7dQIyiFg|*;Lb1>+a=>)2cZ1%@(ydp2UW#!-;@yh- z{W0NdZPbFuQKp**RxPP75pS;z_2?B(9(%W+6T}T4=RCu=*z&7n-3Q|r$Lt5^A0 z4prTAEXj(m&EsvWy}5*b{r$S}B}48?hK%I-=Z_kv@A9OBYhu~GwugpioZ0eZ?U_U4 zxn;{l`-HzAdAfybeD#=b`IV^3vbnFP)EDa7mKcm(>SR}bOv*_cbM(&d=5e%dqlEH9 zRvp^yGUJS0XuV&*G9{+i^|D3Ria32+y-7XC*6YR87EC!5zrffkN5Avj2;E5wMs`^> z#h+gOqwcphc&|R0oqEmt(_>v!+ruM$bk?^yELkWR8WB|zlWQF?(Wd|EgW~q#bkk3N z?$|OtcX4kkNy3GW-Mxl=jzM|PTCX7Rag)v5*U z*Z$D?JvGyEm`J{KM_T{;PqS{*74ruk8WoC}=l-F(A+CP-(sLXA!yvz?%YQm7%p0($ z>V9Z~>B-O`@_~k`8U#CnSF|lh_PWRypwd5;#b|w~M_Mgh~ zy|3h5x%G!nl4Iw&p<#iv1v1Z#i=7r3*qWa2TskI9xy8OHxaOXOh+n5y@_pB_^`{<| zEw!oBNhwf%TH+k_M{Js?$Bl(iQwz?;y!jxzy)HcV>*AduTdQ37wB4U09*lMjmp|=u z!sI|(htN%fZhm&l{p@<6KQr~ynTmw@#u2X4p2eB>g893PvyNsx`G6fBKgXhEOdWgs z0i*U$W@p@d`f>J^YqAALtj{qRYu-N^+|KP=Ilnq-`NMFBq(wfDOnvXI)=Et%ht>6pcDWTsk6!iNO)GK8gtlVF+(Fra*w(zb^5S`UiN_wi-RZ0ww|G|J zlMmneFAX*`zczE{q2)V-DA@zI^v)!sGT8-D`*kg`=iUT9mQSYd#P6J$K$u zuiM#?%xt~$IyW~BCfIw^wvYH?G}4XvjF=l8HY{Xq&S}#U+G2b-ndj?MU~C=Iw*HcB z;y~`eEy*93b+;KcX*IJzvOMkk_?`5-`jy9KjAouU=pOsfwb;{Qc;U41i{zWS2Mg>b zzZ|}%`0$Tw(~rM8`fT~tlt*2I9LC+@T`#;wn=Ui&P3@**7pV8 z)yv%Whve1E`RyXs9L_&&yTK*X?0Q*1WQPi;mtHs1p7+OQzQqOf2r3*d+Y2eQN;p>^V|HrX zja#k@wQkx~?Ob)*VuYcfWKD-;iw*R|38lj)FQ00YTX2nCRnya^=uY382lsz`ZuolE z1>et$Kh`eISZ)3Bv(K$5%LYB{=e_h+(%}fcQ=cP)R~o)_C>~_lHpc#hI4ZzvL%M6y zur5|sxjL2iP6T~;_^@{UQhdME|~b$7J@1yd=427OYH)IR3HU*t@e?T{5&8 zp8B`Q+%zb-?4jG z^^nY)_;H+xXAHX^%l=E7E04{S-Eywy?yYyjR(){=dOfe z5!|t%V{lkOGMewOq9ZPJWh)NRf=eCLzci!EGW-D$Z!$N+iARg0*p~NXnl8ss=db9- zqY!=Z1g4@+GN>V194e^qKEc1Y%bL#<{yJYcO^}5(009?kF}Ya%g%)s!icqi>q(1uU z26PETN$ROD^uU)Syiso=NCUW5!$G%Eq3q%+^jb|?$RfBH+*0PLFRH1p2hW46>+1hG zN8cBrK>s+2zDxpTwlUNm-C0&KyOBl$I0tOJILjhB!gQ~9+fWx8sQ>W;hD+V47ezs{VpOk&*-^JO(0@9kF9R`zsj9yW0lF@X Yd*r>#ayDrO|J;kd45ay%hf?A6Kk00n`}zll`-E)^<=-7Duu_6Iu*t`FV?i1HZ#`2Ze-C$^SldQ~1uUK4PI# zD+T^jqhYAD1D^wbVyKK;=u}k)!Ik{HQ6^M&>C&b4e|fKe-W>SCL#KHqTl0zY@oC1L zWTlseFj{}Jpm#6eNJ9rzoGS~L)kaD7`%F6FPZ{Us!*SrgZYEyqZqjpNndG9{0InKY z1$I9d5Ls6v65QQI7JRItcz-IWy%3_`gJbDZrGxO-GzvDjd4idL0yX`y7VJ+KQ$2QRG_4);`NltVx7Pl{Lg=yad8;GH9-{6Eay{`p$7OcsgjpZW{?G6dtEjBsbvK z4|yd2wGcT}H%99u^e`&qBS||lL^3-MkpPhdqZ%SJ}4T?)4v(=t&R69P&i>7Hp|viPpji>nvycU(N=`~`PXPM-zc*)GYBK{ zC&W4G}*RX=}?TOAuo7EWk~@|Jl-@Zk$oo699%4}YScj!&oOKkg#?Z6~7m zr`hOdTSa3`hN#e+LDp)^9qKD>4SE|i=&#;Mj8FYCY&tg!3N8tuZVqJ>d^%a(*Gtf~ z#+s=Y&Y%*ZUaWFP7*W31$p}OxV6u2JN!e6`pNrzb@xC*WZ{LTOb%L!gbk!j>B?m7r zj%S#r0&FTT2VHk-e75oioj$Gx+>drJ^0AZr*y+L5xn;`SQqN`jb}ppLm#}1oum)&( z1NF52P9nxF>E94PQYUo^I=5T{`REZUv!E0vPs=5T7TltC+Tu*~@FLu3e~-%i^rt=I z*`&3qm`-zVB>iT^n6pL|r{5Pqwbo3W(PvCHR+p1%yCFJrVH|n=%nKqXWsx6qrxG(0 zDI7EIB32m}$m{NQP%qg>TIRNsv-heXe(E#$T$@U_Ef%69t@|-(Ml-yfJ4QFS<)A^+ zKKNnynAv#Z9z7u~hn2$CpyPHNOa9)ZO_Br5E&o>HlBkb$G4AkW;zaD9;fTp|DruR| zZH`8vGWDpR1tC@1^!vW^pj5@d&WFSNTNVvAY@bmIR{g?2jWx6D*V@>&euBS}^yp4@Nia!}mH@NcY+Ooaz&u z)bg1p621zou5Ccoa6qqX>+qtgAiis*Fwn7&+*X&P7j`~`Cn7rFCi9u3NZz5jZ7ZN{ zStxd%8KNVaI#@0jj`pT&@#ow)+*`C3@}F*JkLa|M@~4MjV6PZl=}TlHyXHgt#!ULK z;~o`UoP?F8jZ|A~3#<&&Cb1V@g5zX0{4`q!TE+uFwXF>8MTbF28OXBCJ9KuXGo}SC z1=umy;^Vm`aA)dsy5@)w`iu#}%LzBY@MjwG`D`WBFOlAoM^e@x1#?Qk3{&z)1DsFHAe?p4_(i&z`DL&N z&IPIB$2Wm!r5cHLk0-XW9eK>2*HZM-*F)4Sv4OC3bhpzF?#!vgD zVAMJvMQt*$x#10SZGI28qw_i}dbo`1I#Cl{_J!fq=~+bg+&+3NdmFevi^BHnPSko; zH`;AXhDWl0sl3B$63kIVdAqMT^raZA0#D%g2nmk&&J4_}{ElxX#zIk|BK8ZI6Q_P5 zxVK9lUg%80yiOrT{Lxj`;rS*6K&-Bmp)2QSHbsfrl4%=11%jdNT5*x z2E>1YNv9*QN-Bf~Eb+q1tYW$#%7;C@*^@ZGDaO{GOgO1D55`(mP{Fd7p>6Jvc3A=s z>5t#in3kc8(e^E{MC2^2Q#gZ_npLVro~i5_Ic1tkQ*jWYYd# zdj9t$^!4mz|6mIHeSsEt!4pF|bXFN_=cG`_l7rY8Z-y5hXpkAL>p&oHK4@Cc$0(<7 zbhO|Eed9vNilSle=KHGV9PK3PvHmn|k$yv7Uz&ttipfCiR%6_&_po349QEAvhwSUp zgsj#kQh59r8ToU75|i1`u+t9C8>B=1QGS$?JVTD8rL$)bUZb0I+8MqrE)ZdSkvUN! zYO!4N4wxOSVc#i^v*vl7=oLIUn46sk==BnV8eeRH^H;L%U8gR;#w) zwCf&Nmhg$OjUDH7L|;dbqg;~LkiZ`2Q=xTXqr}+tDE77cK>uDn;-@?lcaRYHzPJK) zCv79%R}QiD&Fz@CP84OfM+0A(5Vbs#kKM~O(Q4u#owp|i3kHJWaaI;-_q$EqIWK8r zYc)M`P5`og>%z661{z;l1vk$u04B-@o_3`(GBSU$Vmut?4E_d_MP;OkD*)D)E3xT= z5H-JO2z)#8@FinMp3Hv7Ja9>*(gU`*GAj=Gb}hz}vyNhD>{Kf2G>=-3_`&OdnfO*e z2S1rt(#ENWITw9uh+JtZowp<%(v5WK?$goWl`lt^1E=hfVoMeY~lUdR^jMNP`@J{6*xg>^8*}Q%D-~-^Sh{30^xv*hS7mH2n z7+HnAke6M}jD1eS^n}Hz_gNi|{PKd8Y1T}Xj#)OfHQl6T0F8SF1kX8aU3 zU}zN>r+=h}73}E6oX7P1pA&)4f(s|6kOLm;c6Xp)>r%{QeOlGyQlf zWR_c~{C`x4LVW0Kzp2SKF=D3!OdV&4H0#vuH07$bm>j!#*UVyk#B_>cu9-u>p4rmt z*PF%HWtmkNUTPY**=@FFW29-5?o=~{POe$zOogU~X$8#>M7A`i#Q2(d?%6^4d)Cr(H0%k&f5#kcFc}5-tb+fsqkd7S?HYb(79eK z75;xq*uF6^!sq{$VG942GVed7%y$b_{2xK3#4F|>N%U>WYtH5cG1TRp6BP8T;+&sm zbkZ+BaG-oRcF_awALyb-{hUx+KaIY%u)*_f{jh6ib@7VC_mNXjpCuUY}Hee|9MqS+IZ(Ok_#3>?3+KWt2=C{->85? z-UHn3nlwDHMjJ*37Z7=q9(D_V5Uie1hfZzcRA`n4Duqaq$c=80GvixJb=)P|{dzHN zoYVjnZKA|iEgAQ0mV?5uGMvdDh~K&)?!~)!L zosy$WJy<_o%IdB!q8+XYv}DmCQX+JY)b8Ac8S=s~X|xMI2Aw7|?R9wPI>!W_HiQJh zL^`(MG--%jif&a$K=Yw11g9Ke_L^#uk+IX9ooz=XzKbn$j8JUcfX$FvG);q?8iu}M6;XvSsyMHzJY zzz01(SIOjsSJ}G}L1->J3+gJjLGgsF7PabC?AGKT#FiZeyXW7DM#yp^FfE=Kf^lJ?BPNyDprUa!K9iaV1+K<4Wp)Sk|4`3ST9Aa- z4&I^BJh^bSU55A+*@2O2A__RP;)MG%aN)WTBrh`PgzqwRvbR1eg_bbAKc3PbS`5xs zkcT|+N%W1zW+E>Xi=$hnw!W53qpKsW>4zh~n5>td$xpjR?A%-cXXn)6_ABF{I;09I zlWwqiQm>gQXQ~*Ls~lXYdJW=Uo1w^41xS(`p$V&gaGPT$GSU8kAh{F13*E!4V|Qs) zNj6^DeG07vPm*;VK#h7NuvfneRi7Fn zQER1CIyeX~iQlS`HYzd^Dti-m?^GJNG?Rj}X9K=U5nNQN%|Qz>|4lROrQ9TDeLa=D#|GKI3MDD4Wq1 zFAm5{@lz)iE2iV??3UkK^H3(t7w?@^M?F>wqP9rF&DPDhYuj>QVz%O?`_E{6$vq6n zkcSYNB2a2IgGZYWFx{cQ$fH99%sabL;m=Bj-i8+0+j=y5Mjtt4Xo-CTI_U5JhVDL? zPsDq!Q*)bM;u@p|*~xu)Q9PR2dAk|%)Jt%2!cF!ou?59J145sApeTjePaavuRB?)JQ?rl`=ES>HMJ77#q)}}cpyd!biPJoR>%vg7MO=g(H{7{Up`|FIU0j5Jh=_G8g;}G{@WAylQk-S)-4_jM) zg4tVZJi1sO&fbXzHH~XzPj(6Id;gSJGpgi)V?50e)`r{tCD=S=6I1B75k9Y#B!&AH z(~39^uvN6cNf)QVTN7XM)TNL5tMS&D=@+nDMUEK0Rs{NRguYzQ2dmPjL5}q|x;c5A zSz2a@A2;k{&eu*MXWG}J-uGPaS>%F(UN_*%)hd`Tn?wo**1^n*9%4Gz9T%Hruy&76 zfaZeZc=X!=$aCKZ6pBJ!0i%2`!Q$WoB4+<+!{rPPbgq?wkYbEZAaS&gv#m(6BGW8L`)}wnw1`- z4-Psqc8SN(QehU9WmV!r2M0V`A4NqZ*W#WXz96(s2+!O|f_aO0GOq0aQFl?G5;6kt zc8WdJ44hz!Tqd^`dj^uQje9|S*Br2MnuAHkA4!vWFzMR%lPG_T1OB~Zw05@$Jp0Ly z8<)otkNj^`-|GkaX}><4(sIR->6##Xzl@^i8FZNzfa`?^sg~aZ8s~a|c`(>O=NbiJ zfQ>wTwc!#DPK(7~&(6Tb&!<`a>=+c!O{Xd`^4M0L3$NWWV9(MR$Q8K8{+;-Vc9q-1 zqVpG^J}U&KXXoPKi!X`))Jih8<}6t!6^TvzVuAZq6=Y?Wf=*u?I1+7Ge=8Q=U(X<0 z($g`zEQeKUR0dVH8FuabffoK2e{}I7wl8jx=avHEn!j zi9@wAt)mU@7>L${W2FV+~6M-?TwJFo~^e|cePst)utO~9|dHKb&o0P}0L z7EIKijcJ3Ul-UqX`e*y3cGrd>Tgr%gKa@H1MW|FuI_Q zW-pb7fQjeu_mRce-B1a*dM0_`FN*4mh4I0s=Ww#EoaFJ4--%y?+)ke}ELuFk(KHES z<|IN4F3ZEwnabqLQ*}mCFB0r}VqwLNPLj6%E@xh6dm{U0vSR})gPc+)C&S)M3N507R9%BRuMXBWtG zBS$jZtdGuo;|mM6ouGyy{-ozu1zq=^Ql;*XxT)zaBh^eAjzs+M;~9`z>^RC@FDp%SS|D<_AaJOX0a)`px9&nUX?)8`8=2rtpX0k$}qNW zI_Bo7!H(2rXeKia;&$hNNsAAhO*%~rFG!IUIZJ7lPAOb&P=nUiVZt|jfGC_UV1HkT z#gzp=sPGqY$aITht+a1I-&H_gqnp$;HkpQqO(RbXC&Ba+0*t3aBF$9ShCm%x+$h^g z9}9@Wpr#}dbhN_!$_mcq^94}l)4_V|QpRbj3M6PwAtp?)$Q0+TbzC5;+FDy1UWW@f5kZX$7qQd!Hz8If?I? z58s}I1TT9!D=!fx3zX?O?W-uQ!v~kfMaXi_dUATlCe)(c5GC`RuJGA_D|4;LwH#Zb za!Zk%R9}QEzg)oR*ca^b^+sr36a!C{`tepo7k&|*iI)5uz_>{nO;u`O!;~^$Z#{$; zRwXd%X3Cxj}VD!!T#CUG4LO#$ zO8*e;B`c{*NG6#zG6(Iy$%4z-P3+#gV?^R;HP@N*o_YqXVGrMKWsILJMWyf}vYIFP zGEbyXXTwDDs@{-BVIGkm973lTtKsg-DYSL07-0QX2)$@c(sQ+-(!CM+0&mg9aXO&m z;EkH>7N}xol3TyGlNHh#aQIOo|Y1*jn25uOc>fT z6WP1Zk;=bnLiImtLd`8y{lA5g#y`U7{1v;FKdCw`Z&q8iK)qc{ z*SAIH-i}O5q^_;Ga=3=M_3p#X!=4r`QG*9+5F@OBl)|?!-Huu+C)Y6-2&@y;b zw`IC8r)9H-wRxy!c&L`w|Kd#P|LaU?|67F4e~Qp`3)TCli2ry@`u|8GNj|;_m*zs- zS0@@ZaXYlhSCYs_{7@$y%Vry_r`vK@z&##8&OIbgw2QXEW4#D8USfa`mF0M3)eEzl zGwF-NiqPNBYlTJkl4ngksvo)sWDexQlxH(QID9c;`g8JeJs*zm8^)zWYw!#|Li(y1 z?A1SUbXT+{^*>m_Q`_q(XSM`}{_RDstOG3Rd%!k5^rMdQFS+S_4~T5`TjHd$9cmVu z;MJ$b@KBSFjNaMIhW)4pkH1aK8liLagS7|5@|N)Xm##97Q;gv7r6k<@cmepWdPgQ4 zorOh`6Lg?NTG+2CqXcT$l!8hN@$`@H+o9jhQT*m01mb&@T zJqvD;$8r~2Jl^idSvMuY`|D~lk11z6n$$?_tF8E8k_>p-$3^ZbH$Pxp2j>eWd4&)TSF1=xI zKnC~n$0+`<_gK7(vu=`RI zUGX*%^86COv;P^LXykzNR~*2FvZ;`!SHzmU7QxU20MIkQ)V_IK%% zopG9AeETaY*}H=Ak}bim#WI`;*=wONN(4)7LvXdyHniO4j4@_<_&&%Erp`W31I3q8 zr#ornV$ubAHL-y>6iAYeQ>#(kBLObEtViEB`=Ks%AMtsjLpM4If{vUkv6;+=m3c*s zeZ>KE8<-194_1K8{oVA|(nJVL7AI$77eIfWGz1O(XxZJ6Oqt!ij7fnUoc&o23U9Na z`dTg6mA&Thg-48(l`9AsZ~!Lzz6T>dNB?5dq{DC&naAny#b z?YuU!*3(h=emZ#B=8&8E49e^16T|j6n3vr_eapS6@9iq)+mdax_vHyz$Ycm?^_yvf zojMiQ+dz@oZ zY57Fy{Z(+k=*jwp?}IB5_9VfXps9B>XMFoCQr(;mR!=Vy3HpkL-akzzC_JHG&E7J7 z_Qz=dzUSmqRTBH;)>W`VXB}V#sn4Nq-{B*X`u$X z2~Wod8q?7IyDBXC9YqG-SCP)rW=61Hh=_;WpifNls9AX*p@t=_>kA9gdeV@dbZf@; z@!2F|G!DG~u3)-{@}cK&F8U-^z)a;@Qv7Vl+_yO$1z)67L-$^GujM@|AUi7_*Nj!7On?UYc}kz|pfPx*i)B@@0XFQ8rBPWfIGJOP`aTUb?8i|$*?K9u zrYMqa(IT)y<1rbX(Zf2PN~iBF@~BFx4<7qfiz1su@zTxd(0O_jqfs{r&z@RF7D&$k zC*f!?C6_t7hc05w^8tD?z=D14UIVVjZxV%DF2H=b%BX40ZjDzy$Cf@mO%|wa#_i)G z)Y8KO4m!uvosLCJ#)v6Vs-6cQ3fwU7*A`H;IYLGqcQEo@Hn{ryeRApiO1fK8f!=wO zNhbL2K)c3pSTi8+FYjTXB_b1L19Rivu0`hQCP0dhn53- zn9&30$<3b!2(vB+(l$7gO!sWYx?Ke=BkhQ!i6bo@pb&NZFlDTcvRQX+8Ih^Q?8mK= zP`q4eyxF0=cwV z@WeL?X6&8Ew)l#-rrvI)WSTQ-COP&tkf!Ivkcwhh+-s_}YGmWGS`t z3^Ns^GUas7tt%uh(vSSnQ>B(Y!tld+2Qi-zM^>pg@f?4^_6vn{d*gFZa8#!Q>656f znmfE>B!EiEgPbbBXyQIhN;iR<+Y<0^NDLLN{l+=_(wUj_`6OvcPQroFOxQA+=akZ! z_e*2R zWp#e$t^&{1d*KU*SMicCo8GeGj4d7zO@LO(LTbdfn_Mup#Y)a}te<;~%KW_ty1adV zZKM;hr&Gvgoo{&mkr!0YkU}GWM|@>=iZqoN!!H|aQrIH_EyJIwkWn6Odmsn{(r?M7 zwUpHMXF}rbIEWGbOMXoZhOD?|Ht4SmlF$!Ra7zM557-Ax*eIzrhZtlriUDrp27Lh zBn^FAo^YftRI?{12jIswrg%(474D^cW|nlEgM}Fxs1JH@{oO;N))7R5bIUMDvjb0E z7ef_YZE7;}JU#e#5?n1DBv!6dut#_igw%gydpaiJq?@9+@lX}h71hTcK9`KolmJ$R zJz;ON*6b#~Bk0oKO{z?u;j%RwP+Mye+>^5aI|&Pro{&gFrYAu4E(hvSmc`2lwq1-|LFSPN=2Rg;WjJdPH2(=Rg&_lu*mz4*S zh_(B`@u49~ta(M9b84Z;ISq9#y3yaiF4N^P*|bi)fnMO+Vvzbo4DGne4GSuvK~CFX zuG?W~lq`iKjs9@(@-%2xo{Qf$?I#?UWGY)F2oHGw?OLUF)SS8+K1-EB-=`$HQtJkt zoIL}iizYzplqgIM`%H>8UZGym6DHheKOOxWLsK@Vky8zBBx_?9)-7JoOHFv-Hl`BY zbyz0RrJlU=iAS~ei{#}{2G+}{!SL@0Mlv83bgmvJ6Zyiad6g!|_n`)z+nq!etG_|) zcs|>_c#sW!a-WPok7k$IjnF+Zu~fa-i=_5?;?lO`#PM?{5k8Yme0tW?)Y0|Lo9j(c ziY~#KZL>-F?G$*ob_SXlf1(u&Lz(ej{YP#bhcrCvIVWGt+?Jj}MBNZ(B zSrL|64AS^R7R*=2^;Epj8Z>Xl&`#MQCd;RbdQXjHle#98U(ar$_Y_59XTJz%L)a*z&@F*8Hj_RQI)yY^yrOCN54$ieKAae#YInM?+ zQ3J1XHnlMx1pBv6MyiGIWPh&-88omDeBxFjef<;XhbefKy3@*3laZn*|px&CG!aVyCop@YP&(*QRG zm7&k`d)$vSk30QC>_b6$tx!)|`3E}y2LHM%D7q2mEAVOCIrS|X<8z}=)!SJK^^N;v8x0i~vS>oM851F>( zQ%T+nUC=($Nk7LKVLnVFjY{2^T2M^8R~uq$+9mR3mI#dGEJp2zf}Bu;@KD45%_Y{? z`3Hs={mT)I|HBbX+(J$N8-|$u14GW8*KB^gRI6EFy^q<ZTWW`zn&Y9lIM=&b9m<=&7aeIr zmf!U4)DDwkg_+IGg`Uklf3upGEVpSgUsGwCIX}8NC|R%BdiiIQ<38%m2453RL(RiO zIsdmy{69#9`L`7EpHjdr)Z%}Th~>XX58jR* zMka9U<3~m;G=~;@H*#*;hOr)Xn!MJ1I-ZiQVcr|1g7WHlH0zNjo$HN6PjoieHOxo- z@{h#wG6&sO>}LKP35I3v89-yqQ1zfP`{l7CeryWIv4_TV-`n+c$r>d(y|EMpSDScg2(zMLuFbn?)6}q8LL!5CuTXue|6+7p~+NNb%66Exq>r&#aXa;=t4C& z9wI0A?kC=XvRE(u3cQ>qQ8HZyI}gS%eCyX>nXNpTe8rA^(Eo;xH+13TUOAMkTfr{) z5KH+#Eulv|t%*(SMUZWojz7k-@O8xkxNh~FC^bi+kVIjNsFxt_+}j0CS2&>7@s2KY zJji`{=r?^aF$HEB0$IL24jImBk{@x9#%>9P4=xe(>Fp=<5_15KCqH6Bo@K$9fB;Ae z76QL=DeBDUIl{rCNj`cDkxa!ulDJJd&sMg%+D=vq_dB;I?`#vOAM(|P^lHAMI%+15P&Dt>TvlfOU zC!o;#!(DWJn)nJx8QOQ3YL zAH4OC;Lep9kYH6xcNMy$zg`hu$qXg0TI0}TR|5C@D-E_`rZm1vc*v$6n@CqLn~ZZl z3(z%j*Vs6TPn;;{Se)T2Mmn`9GnAh~2Q@~C^_!av-}S@Tn|PWUT@#1Eq^UforhyLK zyGars{Uo}<>)FR?Sr`+Tj+q~d@KkXzb8+E*PP218InvY0sw?<$W-95zjRFZgmuQE> zg$HnnTRk~rR8BY3EFu-7ib*p9aL|5$Q&DUVKhj=Pjk_sC)pZG3#`#5`_>__OV<}kI zw*s!n>cQMgftb9!1_WIXv!(Uh8OvxLoF6JfF4@h2vo`Xub0dfLstMw+Gd{5JOEKwj z)+fSeH!&emmzc7G_Yf6OO_rbAh>zCn;Lhcn3j>aiv9DQ9wSF5cAWw=Y-2Y2J3 zelp#_X<}mjju4ToZE&>l9jnoFhW<=ki;5Z#asMQ7nEvAsIb0V5f)6&rikd8RFBd@H z=YeoBy`4T1@5cML^wIaRIP-@S!Ad11`9ecpr*}8L8%mEdg$RMBTrO}Gl6+`oJr395VMm+ z7%zveEdXTCq1lM3gl@U zS=?d_Wrs@XzPe>3%%=hNtcZcLvnGMxaSKq2okugSv~tvHG~wqH1Nt+1GoG)#&z31p z!=o<+nUV|6B<-0fp4YTw2O~BZRvSn?dD`hqw>uNjDpeG&Zgn9i>N43c^^N4lUU|~Kg{mSOb!zdzoA7&xe%k0gtBBWyQljv>-+DpjU>{zCGt~wfLZ@}>Jd@5IW0DJfMkvn?f8bZ{T@nQs@2EgI`in&&11yt6&F(+&(KXtLuB>4!>BJ^O;hTpL-P|C z+VrxQY`z)>5_fMgN3`Oxy6qIbq2@p**@(eTRcU-AB8J~(Zqo~;y5LghKrct?;@Q4D zxaOWuFT0xHpYZ*tp{t0|T7C3@);D@D@DQQ#ZruGHzNGY1F~&s?(u29lU>bITT=V{YGx)Tj2Us9-@*?#E4x&uz-68?u`6q zx0=aAdI^QA-A8atH5>!(m4RFJE6OLi$T6wBsp9<3#=u#ukWY(Z(oX&1a zgSK2r^vOvkI+r%FzRY2oV>X%j*iZ*)hGvj+`W=1o$dtOSI!`B?%c1P6^*Dji<_dGl zNLjuhp6W9PT_Znos4*F>-}{jzA*y8DQIDo=aWfZm9OI13%i#9!sq9zXRI+o=RXQM5 zN+QLz;D}jmbGAb+QeA}@(}b5%9B12jNdx`1Ba%@6NljNyo6Dk~K& ziO;57r>f>2=p~j;Bae5}p0n$*V$L2?=HmngHTC3|q$;geUk|ssw297c738}vO15Nt zWpfMWp|ZIGTss@b5!IUkmN$z?zgQw{yrF^t%zmm=_?mbIABMkM8;N+?b6OCw7-S_5 zqeA{69`xs^r?eDddXy%4ua`j`+(huWR3YvXa>7SrcBtn$Y;Jav4{!O~!o*XSP_!q6 zSgW^@ijcq5{X;i)bUI?`?ihCDb2V76euz`AwUA%wK_qj%2##Ih^$Xu$QCAEHn+LV9 zaK%iJZ|;DtM^9q$3Lf(EQ=#=%p|r*=9(+UW@NAY6cqQi1X#-Z+a6Xa5+qpqyx)nV( zT?}j{WTJ|uChE_+K%$5IKy$nj4m~=Eq1pQAuT@OfPe>x}A(R{|I0&N?wnF~Ry=2w5 z#Fhb%MNCo33_LV^7BzZnn2Tl4=&Lb+b&@hDYZyqmrHzmm+zCHptfc1(>S}_$jiF{QR7N?-$pQt>V(~aEb6SVBeBT~joU98B^!&ioCbVcfA`ug&mmNw=a@SPTf388c8 zDX}Wrys85fy99xIe=bJunvBn7mBIdDEokg(rE9FpiD7yK+*Dfu(S2*d)3zL1!en5q zpSM?r3=sqKIyTg~mYj-dW90JM(dThITr8Hv)ciKC-%CCwYW-3spL3X17d=Y6-&}`z z4Zh5N#}Vd!dKsHA^CxlIACD`YD$qGHnaHfT#Oxa9B`?;Fk`VP+Tr%8GVr2VhrsQpE zG7yQS{;F_bOCr{b0;;C@J7 z23931(4PI3Op-r{vtPcZaz(N*EZo*ovn~r3eg?K7`9Af(;((#OT-xfnj?NbRLiKO% zr?sXl&?8!!xm6>GgH2(~y?Oh2R^0^VPqidQcvzBp`9ZRjJA?jS!%I8N-GUoxdvIP? zD)oQGL+nnx{;_B)>b_HiavrLTfAEu#z>TCb`2uX%=uAHzC?+?jG!kidE&h! zfZWKq!M!2+lME#A(eK;i$VlcFjLuyL?o%#7Sy49`PW*@sPLVL}mMq%ZbHGbaj?6zG zk9V@4lNkX6WM)q~_iMvAs~zk_e$7+GcY=pWUH~6aP0|HNuTX_AAmDFj}_WMn4M6RHt8$Z&ew)4?4+5ryg$ikbD2x|F22sHTa5z%el zyuQQ^={+qn?h=CmN7Fz$;0+1oE8sBBUzx*q?l99VH<9qM`Q%sCek@sALaJ|;(hV;B zgfD`F-uvA!<7frF@OU+bT7`#N|KBN*{{|yA|I!58|ImbmZlR0*8;scf14dR`TbS$p zS!CY%H@_)solT2t=FAqw4yzWuyH@7s0v5IWjtBFV3&H%FrK$Pz(-tjnTo<=|tFdmm zsczTu{O6(;-y_B?*1KfPmY?&D%?u4YJDz8fzz=k>bgt9tEP z4)W$MNtJblS94~@ScNmc}hFBJ*B-X^5D;32H%`4Al^debY7-5+|nth*`I9aBj*a* zJB0%@vxGit&;y;13NS(1jC}jP1%IqQO|!J^ak+UPMVLdmopr2a{t~$IeIxCiTuZi2 zoCe)dJBhyiGEj8VfgsL(bahOk+sw+T=ck8M=r2E+>iUv>C}Tv2lAP!XjYf7zN)&@{ zWZ}Bif~?N&H%vlvIIb^org|$EgZkV_c-!_O>odn2<=R!q+1m%%XQGa*?}{E;roDw8 z+B!(it?x&*mJWI-CmDw7!l7kNINdIsiHxr%tdw3x$J8#6k0*e1Jc@>aErrCxSqL?X zgyGgU0v-PPU>y*EclW4a&6|93;zk8MvO^#9@1I6aD1(cPWRQuG#SMSY;RP=NDtwKI1!Igd&#`zZMK6+v+&`a6@zg6d_`LHcyENYk_THFGEDR5U z5+ejV28+o{?oH~fx)pj$^$(MwE%cTRHfc~q2HnRA>b z9`3@2s?yAHKXDNFCJ2Lz!*KH{4lPTMqZ-SWqUZPZa3yXZREeC%&k8SS)!z~JmLG>~ z{gcRkdwGfevN(uU%pqJ^7YWtNY^dH?J>y*%23953?>-m-E_`X*JDY@??)+GAn5D*ZU?H=DMSMeeaSYW00PEz_wbr!^FCmdJjb z8KQ(DU;T-TOCMagGY1ZE8z8{qJGrFtjr0$?V_QxYF6y32_8x9wZ|zWprq9<%^q z$GOMJsfopOO;rHMuha#h75lgq%l*jjWmECtlrG|bJr<^_?IG)*l|hN16t2tD!BUTV z+;M|%Y!tr<*laLEXNQBJ!5yaWZ)AgGxima3XlG?v?##t(z%;c?I%+=Yk zQ0?PHGM-hDXN77Yx@;b1tueNj=r)8j%KSB=OJ%N}U$nYkh$fdLO4d`6l2? z`3&x~v?{iVPQ%G z5t>JmsUr1W_wzjWZ@p{1@9+8hTI*W-*n920zsL9Z9N*nW_J90|;#d2F^(6+cZL)^p z@u7I8VH{*`Ou&X0HWQ5-ExPCuzi9XrY14J_6SKH z-vB(XX)JDbyhrTxq#4nwe1X*;Ig*$=O4i%i!s5$v__0km4)}RmFu&s_!iH(&_LgDp zzTp{&o(N#IDV32w7lPdS5^?C8vvi*KdzA0rL>76L3Qh-|A~IoGT<<$ounQX|+2{Mo zOU)zb@T%2NrsOYB(f7e(!rjR8Gy5@PHxV19M!{Z@)3D}zDlC}HlD@Kf(l9U%6$oX7 zqNFGmE1HE((#r6)pm)TmPo2pdX}~% zrtUN1dEh*flDUCR*BRi3(dnc^uZ%dn6T>&#PBUs1aX9|OH%2B=1s)g&A`Qu>g!a(D z%AbA`-|csi7vD}$UwV+(-ZsWEC#>LU=_omAkRq_yX^o4%%)+~LRm?Z4w~@BXN3b}Z zfg8P_kusTiPiRTH0FNofcI}R^tkD~<&`~6Xt#J?- zd5(#>V@rkx!|+;NQCwoBgl~pFgni|e&~@6K8Fso4A4@|3s1(R+655ZQ@@UgHTiEEG zhFyHkg@lqezOrpSaamVN@@7nhH~B(Ru1Xa|ZfhZtsJYm-qk{X8H-tVO+KI;>e8E8Z zZ{lq>7St?$Y&=N7_-QX$mwSic}rhrmc?W|=0yX^_t-_&R^1g;9Hx+=Wjj$k z{F=BX24YjYeK4)%I!PLT4jT;{!tMjl1ZO|5M+YQcp`^#NF}vm+(MWi~%1x1l*bVWd zL3Smc)US-k-Pi;P6ONL;H?oi$ae>@B^$@T0yi9%w+xTOP=itIU4j^M62hCpd@%uGu zMEAx~s(TVEn196vo7YmrVKzcJri-`}aekoor$KNbKpai-vcgS2l^MsovA`ea1KU5Z zA&u#Qc>a;MD0OrSc43^cY`hiqaF|2Av!(Dl>kgvL6G4(kmytU{ns3II75J3Gc36@f zfKP9-06i%OaNBzfsa8gUR%9AlGqZ~BKInqvBOST(UqqM_rzP;Ut_ZfXeHC-z%~cX? z@D{IXjm7JPc^%G1^+dWTg&Zyx$I0y*iC=FQ<0>RX>r_+mo5s=p@V z)g|%wb;7KYJHQ1I z$VlfH8u7reE;R>-gqJY~n&&}l%Xe~r${V8EF3U~+k&n+ODPvttVCJm|!fR|xu=hG; z)N=F@iQkfm+YS2AyFePuTuhlBPvQ7v$^^W7M<9_6Loigc9N*cq0Jb%pXOst0(C({h zxb9K8px4+P=Qnqg)r=l!y_$~itPv{lcV9Sn%|uuqu@8E#?1bJ`RgfbV&Fsqb5M2Iw zg}FJ9h10jLz|MH9;5}8%H4BNvvp+h}LBDtiy5$5zM#WImKM!nm+l2UC07Ph;!>L4; zS^Yc{8x*k2i>iBM$f^KLlSD~tWf1gAPb8z3dysv$9Ia6fD7$1Kc{$k<4p?`SwH~v{ z^N-HZ8ZsM?JGKNX*QCL-VkgEz=CZ(8HH#dnsX$wUClRr?r&fSLoI`@kQK+)l){>rKedba^JmV-;#@biyP3Pf*i-Ei%w-gYT~J zBc<*q!TyW{n6*D4fpRRlVk(Oz7LJhSjDsK+d>aM>vLH}>AM_Quq0q3?sC$W9$Y!ZN`{>Ay8E#C+?R%6V`b-U8+z;gQz)HCBM|eHhqxkoF zYgpYMiJDy};8a-=*dn(Fr5-#e=s(kiH$B+`Q(s;pnOUh!;*3k^mEjrG^-dJf+zd3< zrW}8|qy=|9r@|7Ae)8p~BivNJ2OkJ&ZDi5L2{?*Phc**6s1YQAGJQyJcVYpU2r`KE3r`3xSRfSEe*`IyGw_O? zubBm@Ld*Ai0(S2SA@(^_7_n$6n0Jqdk|vZ3uTKZEa(yO9+M`TPobtk3pZnmaW8#Ty z)?plPItj1rJAj{xe&%Kx>?JqKl3}OYMZq0fmXur2Ohx-l;F)}6x@JgY*AqelSuc+p z6k1U8o#Np9f-EAFFoeW5X2PG{Nu+qII(m~gfxO(83|_+V=G5D+MA5sPv2R@uC@7!Q z?wU_jQ?B6hHNiO9$Qd%F=0nBBB!Pj|Inu)_kUKXdk*t#$=El#)W}(n73bHt?ikx?p#`aO^usW|vA`__+DOgvG(g5g@S0IgoVP9@YcH>6MjTk? z`1~fcFRemgXd#MM1zs>L@?v zgL^!VKmCz0t@cIkHiPRFdoW&P4^HSN_?o(+x*M0!9+m5Kc#Q+-pXk8- zUDDuT{vJJPI#29IqS4}<@h~IR433Elm!Os23fLnm(9CZj^CCYproD}V`(LkN!Sg{P zUD1H=eRGAMPqMJUVJw=M9Sbv(4A7Qv9!!>fOgAF;6}` zmzYERo|Z8>=RXkf_jYS$)>dD*#Pw~y7 zEzGxx9Oh-ZV!H$tygw}ob#$D7>wETsiQH^dC$Ud(uLz+j{u9YHn|o-#vH@&-T!ih0 zJxGc}FVniKN4OjsKy)^*KsmBg@lgLR@Qy14jdAzz=YeGK@Y_dbb}hh_!v5&l`=y|C z{S<^chljfSZcH`sST=uNo40Th6)HtUL!x=a+nr;5F_n<+c8i;&-gkK_m zgdeP1Wj6JC{DM%A@KDeHPKo@FC9?dVQoQ~y#oH~^=YK2_-+wHTp8Kt&T{i;{MLuH= z8|$JybKA&A+YJ0fdNm|{b0I;uCquUTX=3fMj66ErgJvC%#y!TnNr}4$nzwc;?j0Wj z*X~p?$2Bxy%ra|cLg`;PX|gubOGHS$e;qm@?hk`O*GQSLP3Ye=1uh(o2g#0$Wc;az zB*|(O-XdN{5+o{MmzNQ7{uo2DiY}3W1sZs~n+?n=`%LM#&PL}hCF91T{rIA2Da3yO zCO|kL9l7lZo;jv?&aAo6$nsEemm8euPlK0d<}!M6(d6=XS@4K8g7ox>xF}j2{GOhK zm!*L)Rk%CdcXl5ge4`0nGBM%8Qy!zY9@B+xS{+=VM9@*EK_a*`KxChmAme@R@P1`K zvj1R0-v2s;ax1qXq>}_^R;~l3FHUH^<1k5lAq|$Bza!boQW9-{4KLLQ1-Zs2q$b=P zs2$FDkMAi6U_4R$jUHmVSstu}vee+B10Eb$0g>gqkc%*j;j;T)P#l= z5d)-dd<+K$Ux8WmQACBYg@)UU@qYU#VB~2~Eu9Ij^=T+%O9dIWZ70=^RbXkF3mvOY zqn`@z$;0Am_!b;NR{qJxZdJWFw|aa(x)t%&=b3_7x|9@ecSrSuL4vC9G5B%*NfiHf z2btR(3k!c&1JRBW%(WJW#|u<(tzj+gXEzRlk9I&yRvzqET@JQY`tW5+BYCQ|j~p9J zhS^H`kZ;8>xNsUC49f)c@(;1vstO986nLzE05)oO87R)gzsCGTsWzmUrj+2bwR6;0<6Uw!)-k*X> z5kBniKY=hdh0`}v;bGbfGUw!LQf5hkk-9gGIiv=e39h(vh7Xw?H33^++=Ub0=Rw}u zuZ%O7Lv~AlC9qK(ZyjkT&WrsZPsakPhm`PQ&jj*H7~y5jp1)=57PLa(@id@c4qx=3y2-liof1!CEs^eBiGs({N=ML))Ka^yA|#5 zsqO@_oHc;GCvUUbz44^nio%CSov`W53^E{Eg6ns$#G_BA6LI0Ac+YilA$^fWG>iI( zn)O05DbO6<^UFd{>ca4M^VLw$KN}Y;+=$M7-3SHlE)c%>s^Du}3K-g+#u^W-n0lR5 zG|tfzSsa>1R1c4Vt&`(X!kYx}$?(B`t1;|VC?l#blku|XRAz6MA`aeKgxf7uVR^d) zURGa?-kIi*ORM5xqEHB;?KJs~K9-kaTfQz?yO<-(Y`O%2;(EJ zhXi1zkPZb>Z2=IsahqNPZ>RGP?N6k!Y{6>;td76n|@#sR-<83mtc9A6_##CRgOzv>(- z`7H!jOgu<39FmYX9znNLgt4HP_u+3n+GO*u1n3==2HA7&`0Ah`7UuKe_&wtAqcwyp zIaEPzmLFrd3Dr0Xj2 zlqwM%^71cO`$QK-CMUz3^zY2Knm0I;Ck_`H!Vr0|4(bWR}d+fosExq|a+3W1-qc8b2pMX{4dx z{IC}x#t8B|&w_UFLo#@{8fk_-CG%hJ2jy-lTvTxti5R;>(qOu9p~D?dUULZaKKV0B zO}9{_FbiVF<}?(& z3c`5DH0=A=Gm`TomsI?vhBC#P$)S=-VA1c3b-Op?6(*Wk;ZmePa&;~a4gP{gu3MvL zdmrGbj}}3?Y%wXj9f0jP5maP)0Mbn~@Wb-;cthk^(oxDGe`ztutet~z8p>m{YWm&je;o`XvorEui+`3S1p>3bE^gt>8A*lR*RBQsqOitNteif6Src9@U# z?w1R}@`=c-#~HsJaU%B`oXFh`StMjYoAkZvLosLlNm{=@>Ab6j^~~=GN?#mltSj6U@~6vLwy@~I^Ui#HYH zt3$&=!cG^e52$jU<7Js03)2L{VW~tVVF|8zx)oijY$87EhRAV6J*=BK8}c6VvNQ1+`wxmNW%dk zjVY$BiS>vV>~$(bo~~hJ%9mwAOHUHEoIlP;4NqknzrRJr!*OKM=R)W`t3+OPD`C?| z`dIFc4BAiaAdS~lNUdKiz0KhlT9f?_f7N(TUgySO^RIWwa;BTG_R_E+L{!+OpToXB zr63r4n@E&Ifo<(ch`Xr?`pQp`%AX{B@m)8{Z_q)~zn7p&;|W;VU^md z&7|q+Mx+;bSkO1*2x${G;a$^zQ%F`B*Q$DwtBK=~&5Tsy^dkqJ&y;}iJasTo-vFzw z?1E;!deqt{2RYR*kne`CWT^QU>ZI#IL2f+$Lw=LjdFPp~fWN@uWCrp0Ycft+ZVJU{ zAxz+>K~-BG7O7If$Jb{Fr|w6Q`=eq(ps72~>G6f@pKcJjj-y0vygn{7SPBJ~vygAs z8a$;oUoZ<1=6FaPin(BhZ&o#NLpBL0Pje!^KYcUKFVzs_)f|L*Kej{H#ZCBeb{9H5 zeFkJZ&BEpOWuWg-j(Ek(U}0@ObnVn2P7nz%-x=bF)tV4xsfEYfipJKGHeh$OT!2mG z@qDv&V0&vH3AD(=ExA^ZYunA2WbYyC_1)>$1Ko>L3CU!I`Isd2E&C6!4@X~Ck=JeV0(i%xt> zB^RVJi0V&&G};~koL(G`>lnf-O@r`QJwP?A8`gE%hF+c5CT-ugf&0uCNK|qj&aCrA zADx!Mz^Wo-vP=!)rx+9G!6Vpzbt5jGN~7xEu{g0<9W0HVgmIvbe@4RpZ+k}k4%a6PJMh}D3K&;p5UzVDj@37IK0~26d%n{#qOt5 z$o>7#Q0CSRc+Q5kMD$iJtkar?+5I`B)zuVYkL|=Whn@*Kdlo}<_HrPim(f-kPdIOx zhhzINr2?p)z?SI&(f$-v72uZH%EfeOV$@uT;!fmY`AK=`qQ)vxvdI}m8NMa!UmYZ^ zu?!wsc7@38uSHkw{~*z`*9GQ@k>rq;1gtz{2r2bNNIb|DyuMzC2_I9?^SU0Ax|oM% z-zp{zUo6mD$3nENEFW)|c|~I8jsacwX}FLvgJ1hRpgKufxUV^f)M=K$j#@p~#x|fy zpLZ~u6m)QxyaeVg%7EmUX*i|W2eZ!&@R~#^Y=31oD4Bjo39fZyUNr;3KDO|yTMwkP z$0BtlUv9&o0!Y<8MrTLUVC_-?mUEmB0d3o1Xwn~|ka`DADmsIX&Q*bef;r^ZB1b{G zwFtiHQ;R-HO~UUtMUn#kWnsMKKGMIY8!4}~#D04oA;;J1I3-vD!lLG3nY)=tHg7c! z^$QPO@!u(t{}?0w|F{V&|8^4s+(HBYV~hm-%NRM{xsYAyN>eM_=CjmuimHvmY|JcM z3ePZP!>Sl+=_Je^^0r_dV{ItiAJ(i?mnLPcVM84|BT2cPTf{m^o3oE5Qk2(sEB4S# zD{8>sf)Z5`21IYetdx%l<=kgOjWM@mOUG)nE;=@pMYa{IKi`g3e_=`48>+COtHMKr z1DC7+e?w6=M}+^wiLCyoj5U9kvDPhg-M>u{xunqb|1w3Sgu(lk?Np?b9J~9P3_n!g zo{Ri&kJTR1;fsZzrp|p0G}Ab>k>|hfCEMIC!L?8s?5#bRR_|HKPQ75yc?`O zvQ4(s)fb)N<$2WB_!=+dLR?K~tfk0JFOi^Uj;r9sZHk~Idt%wiY#pob@oYi9W3rj5 zG{cR4pTaro+tI4-t7v`ye(IywY+B6v7<)upfjhJMA?siDnm_n>8SST%#_rAQ;%%Fz z#Qo){Mjw)~qk8-0IaNJ#dcf14a`)+FZ*?5wU(7aO)5ngmPJ1afYgsNYdagOYEc$lc zygT>n%oWd2oh`X^_1o9%!a)P>(>4YAuC6mzv9X78UZT#;kD1T?KCqZ>`8=~OzIH6f z*AL{9D+Fwfrvx{rZV{J!t%Tz27SZG1lv7VT*6;?qr*Jc>zVYp8N$xk5LzgJNr~KIh ze)8Nf_R2g3dd9aBN`6H$wSI0Otq>wh53J>QLkgC3+OOSILdG@LGeevuNjGJDt%{mo6~fl;`bt@8n^9haiu97E zN=oAOCiY#QHrE{?LWMQiQ?+3Zw4$FJ9TtC(O%b2VEiIzCC)4kg`7Qq8m*Jm%Qnv%p)}e&&APXJ zo*bsnt+ljd>npGDJl6TL(z3d>47 zxWqrPAaR>Yl@r|nuJ<%kcJ7bq?v29yN(tYA!F#q188gn5Rwq1}@>B(wsuwShj#w zEPl$)2$QB)d1`R?+7#%%b;l|F)yAB3Ya{>2z3J4^`a{&wi3R-3CHvS5lY97;0}0e| zZwKmrv_GpRcAtH!eTKR)DnYF}_LJ=&m8WKJzee3iA4~7IxJ*5ok;?w;t6spQ>r-_F z5&Vyo2|G=A&s5Qp^a>LZ?#H9^tijZkY#H~By;u8){UE&Wo>`8t%iOi;PG?0vFCm%o zmmK7M5NoAc47PJKU*@q!UmsHK2ikbXa>}gXsc5QKR-XPIw8!k-$PTu?^(2)s?J>`H zlL=R7yq)qM;CL>o32bpxF*|&(jm^~E$5+%8P>1hspqf5b^2EJXQq$VJ>k1_|voQrW zl+9gWZC`9-_upGocXo3s|LZhEUTWfV>S5DfcCG1Rdd=lAT%w8%bx`RTb;_ieHoKI{ z{t_Igo%F4_kvF%giF+mK?1DUMiNXl49x(0lOOKT_YNkF2#?cjt>HMX)<+tMD-MD&}lJIv}s8>Rdmst?Nn6eHag1FHuo@?G*iD3cTp7TIcbuhe6{hgEP7>o5dYjP` zebwkV^_05KJNn$Ev+msSASJeRdna|NbPPA;T0gbmaxho9=q@!{)WVy2b2ZNa0;$!r z!&&d#W%Oj12);O9l-s1wvQK9yv%gf*D5Z~*tjcvBT~|Awp1RAJI|FL0D={I1>-3vI2}W_rtt__MKx4sa80mQd@)mhzNZzOo9&&-l-sfABIKwy-;e zVcek`!b3y;H{$efE%C2?;*I|(iA{eiiOp`ITmDuOa*3g#e+wdu6C*+V_j9lD)x<>k z7p!x5qFWK4E_hzIWl{5vxG|I#$!{~Srg-$$~|Ep+?8HH}D8=#GDhGhU!4_aQEgVl`Is z{W@k*O5XFhgERf8Q%w?lzbB$}{9Z$@;KLVo+jM7Y;=nKVSCKZ=yXzGdGs}Yi_+TYF zc4jbVrsGUqysgez??dzol?ckpIFX*FN3k6lg}kQS0xIE%2-obX%ln}6nKG(T=dz~7 zuw%XwO4VZ>FElbdbZ6l5ssF8(f1T4^|5Ot7cO|>sLihY#iAZ8-^xt)ey*p+>g~o)3 z#s)@gjnEfec)0b+3r zhj9#2aSbR-0ZTrGaSmf{b!~8Y3S@M7aAgW{4_E>K003q*aS%%YO96**5mIpyC`$ua z0RR91JaH3FPEJby|Ns9=|NsC0O8|6n6k>@(_{ZcBKeeY9zJ%;=KRa$1zbFTPCqKrB z1V7Sw48QI>K|d>19zNh*c)h))kv+dxwmq*L2fr~SguTqOc)#~2AiwU&c0X>hPra@X z7e72M{yxh`x4!Q%sy)VSs6DN2guO0tLp+Qbr@c9F;yVn2h`mK-!8><_Y(0BeH9Jqj za=qKSB)=CLMLt}5hrjrfNxyz=(g;6=Wyn;oVxp}H(a*Cm;p}9$-p;3~dxn*jqfw_fIVv>P{f#F1Z?TicT5nK!m z3?-SlsqrQGC5btOtnLnOO26I#1ta)?L~221aehi_F;te#n=yhHC|sOamYNcuSR9{{ onOsuH-WkCS6f8=tj4w+qD$dN$E98h^1&SExS?C!Qa+c}=0CRRKh5!Hn delta 117 zcmcb_xP@_oyh2K{ak8bMiII7tiLptlxw)lBw7Ff diff --git a/tests/data/rllib_data/single_agent/params.json b/tests/data/rllib_data/single_agent/params.json index c5e605ef4..0e508b4e9 100644 --- a/tests/data/rllib_data/single_agent/params.json +++ b/tests/data/rllib_data/single_agent/params.json @@ -1,32 +1,32 @@ { "batch_mode": "truncate_episodes", "callbacks": { - "on_episode_end": null, - "on_episode_start": null, - "on_episode_step": null, - "on_postprocess_traj": null, - "on_sample_end": null, - "on_train_result": null + "on_episode_end": ".on_episode_end at 0x147eb0400>", + "on_episode_start": ".on_episode_start at 0x147e97e18>", + "on_episode_step": ".on_episode_step at 0x10be8fea0>", + "on_train_result": ".on_train_result at 0x147eb0510>" }, - "clip_actions": false, + "clip_actions": true, "clip_param": 0.3, "clip_rewards": null, "collect_metrics_timeout": 180, "compress_observations": false, "custom_resources_per_worker": {}, + "eager": false, + "eager_tracing": false, "entropy_coeff": 0.0, "entropy_coeff_schedule": null, - "env": "WaveAttenuationPOEnv-v0", + "env": "SingleStraightRoad-v1", "env_config": { - "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"max_accel\": 1,\n \"max_decel\": 1,\n \"ring_length\": [\n 220,\n 270\n ]\n },\n \"clip_actions\": false,\n \"evaluate\": false,\n \"horizon\": 3000,\n \"sims_per_step\": 1,\n \"warmup_steps\": 750\n },\n \"env_name\": \"WaveAttenuationPOEnv\",\n \"exp_tag\": \"stabilizing_the_ring\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"lanes\": 1,\n \"length\": 260,\n \"resolution\": 40,\n \"speed_limit\": 30\n },\n \"inflows\": {\n \"_InFlows__flows\": []\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"RingNetwork\",\n \"sim\": {\n \"color_vehicles\": true,\n \"emission_path\": null,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": false,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.1,\n \"teleport_time\": -1\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"noise\": 0.2\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 0,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 21,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 1,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"rl\"\n }\n ]\n}", + "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"lead_obs\": true,\n \"local_reward\": true,\n \"max_accel\": 2.6,\n \"max_decel\": 4.5,\n \"reward_after_exit\": true,\n \"sort_vehicles\": false,\n \"target_velocity\": 18.0,\n \"terminate_on_wave\": false,\n \"wave_termination_horizon\": 1000,\n \"wave_termination_speed\": 10.0\n },\n \"clip_actions\": true,\n \"done_at_exit\": false,\n \"evaluate\": false,\n \"horizon\": 2000,\n \"sims_per_step\": 1,\n \"warmup_steps\": 0\n },\n \"env_name\": \"flow.envs.straightroad_env.SingleStraightRoad\",\n \"exp_tag\": \"singleagent_highway\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"boundary_cell_length\": 500,\n \"ghost_speed_limit\": 25,\n \"lanes\": 1,\n \"length\": 2000,\n \"num_edges\": 2,\n \"speed_limit\": 30,\n \"use_ghost_edge\": false\n },\n \"inflows\": {\n \"_InFlows__flows\": [\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": \"23.0\",\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"idm_highway_inflow_0\",\n \"vehsPerHour\": 1944,\n \"vtype\": \"human\"\n },\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": \"23.0\",\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"rl_highway_inflow_1\",\n \"vehsPerHour\": 216,\n \"vtype\": \"rl\"\n }\n ]\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"flow.networks.highway.HighwayNetwork\",\n \"sim\": {\n \"color_by_speed\": false,\n \"disable_collisions\": false,\n \"emission_path\": null,\n \"force_color_update\": false,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": true,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.5,\n \"teleport_time\": -1,\n \"use_ballistic\": true\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"a\": 0.3,\n \"b\": 2.0,\n \"noise\": 0.5\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 1621\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"rl\"\n }\n ]\n}", "run": "PPO" }, "evaluation_config": {}, "evaluation_interval": null, "evaluation_num_episodes": 10, - "gamma": 0.999, + "gamma": 0.995, "grad_clip": null, - "horizon": 3000, + "horizon": 2000, "ignore_worker_failures": false, "input": "sampler", "input_evaluation": [ @@ -40,23 +40,27 @@ "inter_op_parallelism_threads": 8, "intra_op_parallelism_threads": 8 }, - "log_level": "INFO", + "log_level": "WARN", "log_sys_usage": true, "lr": 5e-05, "lr_schedule": null, + "memory": 0, + "memory_per_worker": 0, "metrics_smoothing_episodes": 100, "min_iter_time_s": 0, "model": { "conv_activation": "relu", "conv_filters": null, + "custom_action_dist": null, "custom_model": null, "custom_options": {}, "custom_preprocessor": null, "dim": 84, "fcnet_activation": "tanh", "fcnet_hiddens": [ - 3, - 3 + 32, + 32, + 32 ], "framestack": true, "free_log_std": false, @@ -76,13 +80,17 @@ "policies_to_train": null, "policy_mapping_fn": null }, + "no_done_at_end": false, + "no_eager_on_workers": false, "num_cpus_for_driver": 1, "num_cpus_per_worker": 1, "num_envs_per_worker": 1, "num_gpus": 0, "num_gpus_per_worker": 0, - "num_sgd_iter": 10, - "num_workers": 2, + "num_sgd_iter": 1, + "num_workers": 1, + "object_store_memory": 0, + "object_store_memory_per_worker": 0, "observation_filter": "NoFilter", "optimizer": {}, "output": null, @@ -117,7 +125,7 @@ "log_device_placement": false }, "timesteps_per_iteration": 0, - "train_batch_size": 60000, + "train_batch_size": 2000, "use_gae": true, "vf_clip_param": 10.0, "vf_loss_coeff": 1.0, diff --git a/tests/data/rllib_data/single_agent/params.pkl b/tests/data/rllib_data/single_agent/params.pkl index 511d3434300e6270d326503ee9bcfce6f9127b1c..60cfcb075c7ed028b5ef23a05739e2fb931e1abd 100644 GIT binary patch literal 10890 zcmeHN&2J>fb?+Vy$>EU8kL60TR*JwheBe6PfaIR$fbFx-aZUmfz^5E+z(@ce>^~vDSJge!GbDEv z*zh3`7t(Z9y?XWPy;twOdiB2F{HMSDhN1o*7VC08%ma29CHp){i=HW0sm;TKl*K$L z2dIA~wS(B_Y}d{l!BX!5FM9tb+qxM46;tc5&m4-~OWg zTKq4+kh3C6ya!-B32GsY`WdTwK9!4Lpc{pp*%>s3cGrgDFvzU$4U|0R`+juDVjG)G zi@*N%GlubDdkudU;H+2IbVGI6uImw1kcI{y8&)7bw_O(PrWk>u@oy?jG(vxtAAw&b zpHGJ%;n@zPh2gEv)rt8u>T*4LrSrMi?$j*l&S}e!+;!=lC^4%H=Uo&K4w-!IJ{TzEZNxL;7b8C!DEA;|xdD4u>L|Kg( z9HAhOae8@7#Au33YDuG5h_i$Ni?psoHmsA9#Tke!iJ&{^>D=|gfzR)j-FMJ!=@l$~ zKW3Rdp!ul+vIjhbx?}pGeN;htA=K0+6?&TT*4elVyC{H^sIF~@y91u8)0ud?IjKLb zZu@?@npSK(JPC9YM+}r4)sR77*uW;9)t7(~&+-ITS|i`6Ynqjcyx;dvlTRAkj;7bj zL$4ntL7k-g8?=g2n2P3bn5B&{*xkHW!jhSzC?u?pb*Ap!!Rzws%$59V%{4?NPAiQNvzT@Evv-2zVlWRd5q z39y2qUrR&YEiHkyb-A-~7HGLst2`6zY`~DL%D1jwnG|KUvQ-^b&kd?0#x$SK#tOrl z-sZ{MQJ&DwzkTJ(xX6ac53-|J?OWu59a<-NS`$t+=l?mI6MvG+n`a4moXcCA6OOWC zm^EZ_;-A9dF`nNWwrZTWq7-qAt&V({`~FClR>l!(YR5q%YIe=gP_(MFn#?>554F=y zsg#|!wZY!e{pBvnuVBhZ%9a!PQNnhQv{}K}r1wJ5WbY#21)%SxFrRdQCxLk2r8IWN zMqoG{g#k3gJgS!pk0|@8Dg7N*BxLLE9R)PSK)3Kc7P(slyq9QNEZ|Kmm zoWPb+40GoER7WTi6s7`kWn#NtewtKL+U;n#?kY$T6e~f;Mm@}f()pa&!4MALwH#uFO zHaayGtpGF{^5)lZ`&0V3d8TkPI|m+&j|q}c)C_fnSuf4#j)_TIxRp`hAq@ zD>GLa2>tt&nH%xLs-K!MH9y6aTwDV)tb^8$?JoAU*M66p9~F0s zb@7^PbrE%pdU|Hh=rwv%y}RAlj}6y+-gsyZjAL`pa2wZ*@@s1N)f|3J55H!v8Ln~H zDAsr6Y)LRSV;|skfK&{G4ZoRvfR~ImwRy;s!BHV+sS(73;n!Y)ozCH!kT6{HY!EJ+ z$Y(@_cEmhz2ZCq62~tJ~+z?t)>q%y#*&xO6GQHq~PcL%`weBlE}@#MWXerHOC) z4Cw7df8vFNAZmE?ot+)RBG;b4ZPvH(5FpR9KJt<3!jbX}Qp$+$;{At2gZL})s$5{~ zgWUEhNkhwg17JwJB14+81ec)Q&NY>8Phanx6!#Kib{Iy$N_S>Vhw3gewtwjIzMcCS zbwMK;vhB5Y8~cmgTy0CdCucDnISTSQ+---t!_s_L!_m`@oM&v{N4S{jb@GpkkMC^A zr7@y-T!``cjgBzj7;y%?9Dzh-ZUPW4;yM^wl>l#N2bCnYOGZo8=o~-8yU{Z z=Jwid8zV-!()@Abkx6S|wI)Uvj+>9n^gp5T=iGTvzesJ;sk9bRT=y4A@iqgd~mDnyS zo;P~U-fH(Mj-_$0^APOaGxp5A#xXYI8vD};7_54xs0ZK8{n?~>Z2n{8gC-7W&-e&- z#oWhHD`QCC2?O)Yq6Fa|1*t3TS#%ePTJk> z*A#|j!l3nNh>3It$i!zQn$8%S8{+1TH-4v|z-4m)vkG%9-pQb5=SZ3I@G!CCn4CZx znZGh|RmfF#16P*#d-N`pzNbJEunmu-c4}uCz9Bt1ODH^-JO+Is^Qsz+FosJ|&$uF& z(_R|lL08<8Gqe&p-Q@sAUB7(|;8N+adv(+m@PWM-!9K#ztcxA-mO2d@^-Q^j_)NC! zgYsNJe{PF#`vtRWk+WHbvzftv7pGoqUNkQ@Uc&dX`4Y~-vU$-2vv3&5e~5QWM+b)MD-%X z81~K#duMCCtzqxnFbIJWy)JHux8?a!n4u~#V95$`Q!bSO$Efd}s&8-DN2>9;_*ZCE z_0%8mEb*MQQW{!ADaIGuubAECN-;<|p2fd15~=IJe^om01$1EjOX+~(@>wbX`hK3) z{Im*;5UHk!4C=Dc!QG3MY`|Ga$8ES%KLDRiIAjXlW;*clC*uxl+Eeng?=4uZfHu#9<>;8*b$l$~r14jUX(>zc13UAUz; z&8|^$Uv58<%XNm;y^+RrvZwe2VG`aUxQ((Mx-@{O2B#m`hu>~@VHum1;N^FIkEIL5 zpK`XUB$A8On!1Nv@s9W(WsiTo{RaG*(P+UUom9EK83bpQ*8TdamZ#2-6P8E(cV&9C z%H{G|ZxDhRWnmKnV$aW^eYo44QfB)) z3uT{!cmIrLPIYhpC2GqsL!(gjMWY#ybNP%e>p5j3dw&8o>9zJ=Qa^yC01uK(sYIw^ z7RmKFdcfmOh7u}e3)TA$GLJ>?r}ARemj+RkQH4fLI<7nqD-odvM~j$F4K)r1vc-i7LBuP$#EA$}@wqJJnD1 zb7TF6e4(u6!jADAjzX~dI9NNvhjYl>1j*)v%J^miY$-XSrB|cfJ>m-r%5We?{VV#j zju6lJb0u@_dZajuhX(VB#j9my z3UUMeK6t6i4GlR1xzQ3hRQd}yj#BE7avtb{KRE>z02v`aBxS%V7fNU<@=|wah43AO zfKA!P!yQU`D5#_RuWZGU?>X=Tq@gRSafG{f9FunRL#(Yk0l~6}tODZ4om|eUiV&MD z$Bee?R7a=E=-RTi?}Jp{@1yVM|4e_M$@A&JWl$HdUQ+pecnn>YLxqMpy*p9DW`PU` z!ISck9Or#-#SxsY%~uu`a9^dsnhwN4e`p2Npd^ZIwS# zPb47tKQ603wL*R1yI(J3XsTCq=?OXpcjmBFUE_wS#e-ko`250$avATVR6RPXHvBa2 F{4W_Wq{{#R delta 778 zcmZ`%OKTHR6z)vY2a`u*YfS=5+i9znz9z8`T5wTBA+$}455$U0Zzs2%sgrw$xpz_t z7Tk$4i0PSv8*$N{U>C-Pd-rYx(Z3)lxGIVZ?=)SsMepwWzVm(O+{1Z#>sdbeD7q8L zHXXdJU=XxSHd-iNTZDy7A1eCol<3&87%gF)aqI*|KY4`0q$HSxU>GVpHr5+N=;AK2 zi0)$M*<93g3d*63J1#XaW1+ke6yZ<*Xl;8X5}AwC_P@h#X}EX)l8v zVBvktk2AW)^+N{k$i4Dz3LeUr;g=jQ<%X4>BTp@(2YLw`*j8tHPHL`2)^!_`b#4mv z?lEJSkTySa&$tv1ka<>+SG)0VBcyOR7@&5L1ip*)!V3oXF12j zL)@ZdY4IwlK&tt4Cr7K6ZB@f5xQX>}ocd(*O$olVs6CS>daGi2)b_#z5jw6xwzV7X zwax-l<}=K|1i2Os+s#wz+&IMWL+MUduxS}KWo42i$dF5q$@v_hbU(aGbtbP+!Y$%ak6ou8hrO=x`rD4Z zsEGN01-wX)PpAL2hmndF_;JIw9B~|*$SpB(9Sw|(@UJX*0+GxpEDsdU#)wzeH>rCc zyDXTK{k9OVJDvbduyetxb&qzX8ke?;xMAwds^VZ_OD*Q&T${NBpU!`% bFAr`=9f}{bU7Z;wE_pT%ie?r*Uwro$+&}(6 From 240cc05f8f37f7cbbfcd0a010febc8a4a56bf342 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 14 Jun 2020 15:42:36 -0700 Subject: [PATCH 39/57] Code cleanup --- flow/controllers/imitation_learning/ppo_model.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index f7490d180..cbc51c6c4 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -38,8 +38,6 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name, **k self.setup_model(obs_space, action_space, model_config, num_outputs, h5_path) self.register_variables(self.base_model.variables) - # register variables for base model - # compare_weights(self.base_model, "/Users/akashvelu/Desktop/latest_run/imitation_model.h5") def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): @@ -137,5 +135,4 @@ def import_from_h5(self, import_file): import_file: str filepath to h5 file """ - print("LOADING WEIGHTS FROM H6") self.base_model.load_weights(import_file) From 10bf24fde3b34d0f612ddbd26d3e7e9306eb722b Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 17 Jun 2020 15:35:33 -0700 Subject: [PATCH 40/57] Flag reconfig --- .../imitation_learning/imitation_trainer.py | 2 +- flow/controllers/imitation_learning/run.py | 4 +-- .../train_with_imitation.py | 27 +++++++++---------- .../controllers/imitation_learning/trainer.py | 4 +-- 4 files changed, 18 insertions(+), 19 deletions(-) diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index a6f75ea45..18c41a795 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -21,7 +21,7 @@ def _setup(self, config): # agent_cls = get_agent_class(config['env_config']['run']) self.trainer = ppo.PPOTrainer(env=env_name, config=config) policy_id = list(self.trainer.get_weights().keys())[0] - self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id) + self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id=policy_id) def _train(self): """ diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 25cb0f230..eee7d7b3f 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -48,7 +48,7 @@ def save_controller_network(self): def save_controller_for_PPO(self): """ - Creates and saves (in h5 file format) new tensorflow keras model to run PPO with weighs loaded from imitation learning + Creates and saves (in h5 file format) new tensorflow keras model to run PPO with weighs loaded from imitation learning. This model encapsulates both a policy network and a value function network. """ self.trainer.save_controller_for_PPO() @@ -83,7 +83,7 @@ def main(): parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') - parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') + parser.add_argument('--imitation_save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') parser.add_argument('--PPO_save_path', type=str, default='', help='Filepath to h5 file in which PPO model with copied weights should be saved') parser.add_argument('--save_model', type=int, default=0, help='If true, save models in h5 format') parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/controllers/imitation_learning/train_with_imitation.py index 78053fe2e..cd80131cb 100644 --- a/flow/controllers/imitation_learning/train_with_imitation.py +++ b/flow/controllers/imitation_learning/train_with_imitation.py @@ -12,7 +12,7 @@ def parse_args(args): dictionary version of the argparse """ - # train.py args + # **** TRAIN.PY ARGS **** parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, @@ -35,10 +35,6 @@ def parse_args(args): parser.add_argument( '--rl_trainer', type=str, default="rllib", help='the RL trainer to use. either rllib or Stable-Baselines') - parser.add_argument( - '--load_weights_path', type=str, default=None, - help='Path to h5 file containing a pretrained model. Relevent for PPO with RLLib' - ) parser.add_argument( '--algorithm', type=str, default="PPO", help='RL algorithm to use. Options are PPO, TD3, MATD3 (MADDPG w/ TD3) right now.' @@ -73,26 +69,31 @@ def parse_args(args): '--checkpoint_path', type=str, default=None, help='Directory with checkpoint to restore training from.') + + parser.add_argument( + '--load_weights_path', type=str, default=None, + help='Path to h5 file containing a pretrained model. Relevent for PPO with RLLib' + ) # Imitation Learning args parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training policy.') # number of gradient steps for training policy + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training imitation policy.') parser.add_argument('--n_iter', type=int, default=3, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') parser.add_argument('--batch_size', type=int, default=1000, help='Number of environment steps to collect in iteration of DAgger') parser.add_argument('--init_batch_size', type=int, default=2000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') parser.add_argument('--vf_batch_size', type=int, default=2000, help='Number of environment steps to collect to learn value function for a policy') - parser.add_argument('--num_vf_iters', type=int, default=100, help='Number of iterations to run vf training') # TODO: better help description for this + parser.add_argument('--num_vf_iters', type=int, default=100, help='Number of iterations to run value function learning, after imitation') - parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to train on') + parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to run SGD on during imitation learning.') parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existing imitation neural net') - parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') + parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net. load_imitation_model must be True') parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') - parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') - parser.add_argument('--save_model', type=int, default=0, help='If true, save models in h5 format') + parser.add_argument('--imitation_save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') + parser.add_argument('--PPO_save_path', type=str, default='', help="Filepath to h5 file in which the ppo model should be saved") parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') @@ -103,8 +104,6 @@ def parse_args(args): parsed_args = parser.parse_known_args(args)[0] dict_args = vars(parsed_args) - dict_args['save_model'] = 1 - dict_args['save_path'] = dict_args['load_weights_path'] return parsed_args, dict_args @@ -116,7 +115,7 @@ def main(args): flags, params = parse_args(args) params["fcnet_hiddens"] = [32, 32, 32] - params['PPO_save_path'] = params['load_weights_path'] + params['load_weights_path'] = params["PPO_save_path"] print("\n\n********** IMITATION LEARNING ************ \n") diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index c1ff5f981..daba2f9f4 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -323,5 +323,5 @@ def save_controller_network(self): """ Saves a keras tensorflow model to the specified path given in the command line params. Path must end with .h5. """ - print("Saving tensorflow model to: ", self.params['save_path']) - self.action_network.save_network(self.params['save_path']) + print("Saving tensorflow model to: ", self.params['imitation_save_path']) + self.action_network.save_network(self.params['imitation_save_path']) From aa72b2e7361e121f59a45d97c36f555a6804ca51 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 17 Jun 2020 15:41:46 -0700 Subject: [PATCH 41/57] flag cleanup --- .../train_with_imitation.py | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/controllers/imitation_learning/train_with_imitation.py index cd80131cb..db6500d43 100644 --- a/flow/controllers/imitation_learning/train_with_imitation.py +++ b/flow/controllers/imitation_learning/train_with_imitation.py @@ -74,33 +74,35 @@ def parse_args(args): '--load_weights_path', type=str, default=None, help='Path to h5 file containing a pretrained model. Relevent for PPO with RLLib' ) - # Imitation Learning args - parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') + # *** IMITATION LEARNING ARGS *** + # rollout collection params: + parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training imitation policy.') parser.add_argument('--n_iter', type=int, default=3, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') + parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') + parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') + parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') + # imitation training params: parser.add_argument('--batch_size', type=int, default=1000, help='Number of environment steps to collect in iteration of DAgger') parser.add_argument('--init_batch_size', type=int, default=2000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') parser.add_argument('--vf_batch_size', type=int, default=2000, help='Number of environment steps to collect to learn value function for a policy') parser.add_argument('--num_vf_iters', type=int, default=100, help='Number of iterations to run value function learning, after imitation') - parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to run SGD on during imitation learning.') + parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning negative log-likelihood loss, for stochastic policies.') + parser.add_argument('--stochastic', type=bool, default=True, help='If true, learn a stochastic policy (MV Gaussian). Must be true to continue with PPO training.') + parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') - parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existing imitation neural net') + # loading and saving params: + parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existing imitation neural network.') parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net. load_imitation_model must be True') - parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') - parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') parser.add_argument('--imitation_save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') - parser.add_argument('--PPO_save_path', type=str, default='', help="Filepath to h5 file in which the ppo model should be saved") - parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') - parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') - parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') - parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') - parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') - + parser.add_argument('--PPO_save_path', type=str, default='', help="Filepath to h5 file in which the ppo model should be saved. Before starting PPO training, weights (for both policy and value function) will be loaded from this model") + # misc + parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') parsed_args = parser.parse_known_args(args)[0] dict_args = vars(parsed_args) From e209de24ba21e1ab5645f874999941f0382a1ee7 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 17 Jun 2020 15:47:18 -0700 Subject: [PATCH 42/57] Reorganize method arguments --- flow/controllers/imitation_learning/run.py | 4 ++-- .../imitation_learning/train_with_imitation.py | 14 ++++++++------ flow/controllers/imitation_learning/trainer.py | 10 +++++----- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index eee7d7b3f..c88801825 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -44,13 +44,13 @@ def save_controller_network(self): """ Saves the tensorflow keras model of the imitation policy to a h5 file, whose path is specified in params """ - self.trainer.save_controller_network() + self.trainer.save_controller_network(imitation_save_path=self.params['imitation_save_path']) def save_controller_for_PPO(self): """ Creates and saves (in h5 file format) new tensorflow keras model to run PPO with weighs loaded from imitation learning. This model encapsulates both a policy network and a value function network. """ - self.trainer.save_controller_for_PPO() + self.trainer.save_controller_for_PPO(PPO_save_path=self.params['PPO_save_path']) def main(): diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/controllers/imitation_learning/train_with_imitation.py index db6500d43..057c62835 100644 --- a/flow/controllers/imitation_learning/train_with_imitation.py +++ b/flow/controllers/imitation_learning/train_with_imitation.py @@ -70,11 +70,6 @@ def parse_args(args): help='Directory with checkpoint to restore training from.') - parser.add_argument( - '--load_weights_path', type=str, default=None, - help='Path to h5 file containing a pretrained model. Relevent for PPO with RLLib' - ) - # *** IMITATION LEARNING ARGS *** # rollout collection params: @@ -116,7 +111,11 @@ def main(args): # Parse args, train imitation learning flags, params = parse_args(args) + + # depth and size of MLP layers params["fcnet_hiddens"] = [32, 32, 32] + + # load_weights_path for PPO must be set to same path as PPO_save_path (a result from imitation) params['load_weights_path'] = params["PPO_save_path"] @@ -125,7 +124,10 @@ def main(args): imitation_runner = Runner(params) imitation_runner.run_training_loop() - # convert model to work for PPO and save for training + # save imitation network + imitation_runner.save_controller_network() + + # save PPO network (contains policy and value function) imitation_runner.save_controller_for_PPO() # Imitation Done, start RL diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index daba2f9f4..56845031e 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -269,7 +269,7 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): - def save_controller_for_PPO(self): + def save_controller_for_PPO(self, PPO_save_path): """ Build a model, with same policy architecture as imitation network, to run PPO, copy weights from imitation, and save this model. @@ -316,12 +316,12 @@ def save_controller_for_PPO(self): # save the model (as a h5 file) - ppo_model.save(self.params['PPO_save_path']) + ppo_model.save(PPO_save_path) - def save_controller_network(self): + def save_controller_network(self, imitation_save_path): """ Saves a keras tensorflow model to the specified path given in the command line params. Path must end with .h5. """ - print("Saving tensorflow model to: ", self.params['imitation_save_path']) - self.action_network.save_network(self.params['imitation_save_path']) + print("Saving tensorflow model to: ", imitation_save_path) + self.action_network.save_network(imitation_save_path) From 5ce3c4de5c550ab5a0b460e3fe27323cdeb8fd0b Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 17 Jun 2020 15:56:00 -0700 Subject: [PATCH 43/57] Argument reorganizing --- flow/controllers/imitation_learning/run.py | 9 +++--- .../controllers/imitation_learning/trainer.py | 31 ++++++++++--------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index c88801825..e78c2ce0b 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -2,7 +2,6 @@ import time import numpy as np from flow.controllers.imitation_learning.trainer import Trainer -from flow.controllers.car_following_models import IDMController class Runner(object): @@ -32,25 +31,25 @@ def run_training_loop(self): """ Runs training for imitation learning for number of iterations specified in params. """ - self.trainer.run_training_loop(n_iter=self.params['n_iter']) + self.trainer.run_training_loop() def evaluate(self): """ Evaluates a trained controller over a specified number trajectories; compares average action per step and average reward per trajectory between imitator and expert """ - self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) + self.trainer.evaluate_controller() def save_controller_network(self): """ Saves the tensorflow keras model of the imitation policy to a h5 file, whose path is specified in params """ - self.trainer.save_controller_network(imitation_save_path=self.params['imitation_save_path']) + self.trainer.save_controller_network() def save_controller_for_PPO(self): """ Creates and saves (in h5 file format) new tensorflow keras model to run PPO with weighs loaded from imitation learning. This model encapsulates both a policy network and a value function network. """ - self.trainer.save_controller_for_PPO(PPO_save_path=self.params['PPO_save_path']) + self.trainer.save_controller_for_PPO() def main(): diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 56845031e..786444cd2 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -76,18 +76,16 @@ def __init__(self, params, submodule): self.controllers[vehicle_id] = (imitator, expert) - def run_training_loop(self, n_iter): + def run_training_loop(self): """ - Trains imitator for n_iter iterations (each iteration collects new trajectories to put in replay buffer) - - Parameters - __________ - n_iter : - intnumber of iterations to execute training + Trains imitator for self.params['n_iter'] iterations (each iteration collects new trajectories to put in replay buffer) """ + # number of imitation learning iterations (1st iteration is behavioral cloning + n_iter = self.params['n_iter'] # init vars at beginning of training # number of environment steps taken throughout training + self.total_envsteps = 0 for itr in range(n_iter): @@ -146,19 +144,17 @@ def train_controller(self): # train network on sampled data self.action_network.train(ob_batch, expert_ac_batch) - def evaluate_controller(self, num_trajs = 10): + def evaluate_controller(self): """ Evaluates a trained imitation controller on similarity with expert with respect to action taken and total reward per rollout. - - Parameters - __________ - num_trajs: int - number of trajectories to evaluate performance on """ print("\n\n********** Evaluation ************ \n") + # number of trajectories to evaluate performance on + num_trajs = self.params['num_eval_episodes'] + # collect imitator driven trajectories (along with corresponding expert actions) trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, False, v_des=self.params['v_des']) @@ -269,12 +265,15 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): - def save_controller_for_PPO(self, PPO_save_path): + def save_controller_for_PPO(self): """ Build a model, with same policy architecture as imitation network, to run PPO, copy weights from imitation, and save this model. """ + # filepath to h5 file in which keras model will be saved + PPO_save_path = self.params['PPO_save_path'] + vf_net = self.learn_value_function(self.params['vf_batch_size'], self.params['num_vf_iters'], self.params['num_agent_train_steps_per_iter']) input = tf.keras.layers.Input(self.action_network.model.input.shape[1].value) @@ -319,9 +318,11 @@ def save_controller_for_PPO(self, PPO_save_path): ppo_model.save(PPO_save_path) - def save_controller_network(self, imitation_save_path): + def save_controller_network(self): """ Saves a keras tensorflow model to the specified path given in the command line params. Path must end with .h5. """ + + imitation_save_path = self.params['imitation_save_path'] print("Saving tensorflow model to: ", imitation_save_path) self.action_network.save_network(imitation_save_path) From 1aae8f89e6b25fd765be6e10ea66a448d0bd9ca3 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 17 Jun 2020 18:44:53 -0700 Subject: [PATCH 44/57] Cleanup and rearrange args --- .../imitation_learning/imitation_trainer.py | 3 +-- flow/controllers/imitation_learning/run.py | 25 +++++++++++-------- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index 18c41a795..5a30035d3 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -27,8 +27,7 @@ def _train(self): """ Executes one training iteration on trainer. See superclass definition. """ - print("TRAIN CALLED") - # return self.trainer.train() + return self.trainer.train() def _save(self, tmp_checkpoint_dir): diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index e78c2ce0b..924e1a400 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -66,30 +66,33 @@ def main(): help='Name of the experiment configuration file, as located in ' 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') - parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') + # rollout collection params + parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training policy.') # number of gradient steps for training policy parser.add_argument('--n_iter', type=int, default=3, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') + parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') + parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') + parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') + # imitation training params parser.add_argument('--batch_size', type=int, default=1000, help='Number of environment steps to collect in iteration of DAgger') parser.add_argument('--init_batch_size', type=int, default=2000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') parser.add_argument('--vf_batch_size', type=int, default=2000, help='Number of environment steps to collect to learn value function for a policy') parser.add_argument('--num_vf_iters', type=int, default=100, help='Number of iterations to run vf training') # TODO: better help description for this - - parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to train on') + parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size for SGD') + parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') + parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') + parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existin imitation neural net') parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') - parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') - parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') + parser.add_argument('--save_model', type=int, default=0, help='If true, save both imitation model and PPO model in h5 format') parser.add_argument('--imitation_save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') parser.add_argument('--PPO_save_path', type=str, default='', help='Filepath to h5 file in which PPO model with copied weights should be saved') - parser.add_argument('--save_model', type=int, default=0, help='If true, save models in h5 format') - parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') - parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') - parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') - parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') - parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') + + # misc params + parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') args = parser.parse_args() From f7451d0212b1084f9dade476dc1210a1d587fd60 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 18 Jun 2020 10:52:08 -0700 Subject: [PATCH 45/57] Custom PPO to log value function predictions: --- examples/train.py | 13 +- .../imitation_learning/custom_ppo.py | 321 ++++++++++++++++++ .../imitation_learning/imitation_trainer.py | 4 +- 3 files changed, 328 insertions(+), 10 deletions(-) create mode 100644 flow/controllers/imitation_learning/custom_ppo.py diff --git a/examples/train.py b/examples/train.py index 6d7b13879..019a96de6 100644 --- a/examples/train.py +++ b/examples/train.py @@ -187,17 +187,14 @@ def setup_exps_rllib(flow_params, if alg_run == "PPO": from ray import tune from ray.tune.registry import register_env - try: - from ray.rllib.agents.agent import get_agent_class - except ImportError: - from ray.rllib.agents.registry import get_agent_class + from custom_ppo import CustomPPOTrainer + from ray.rllib.agents.ppo import DEFAULT_CONFIG + config = deepcopy(DEFAULT_CONFIG) - horizon = flow_params['env'].horizon - alg_run = "PPO" + alg_run = CustomPPOTrainer - agent_cls = get_agent_class(alg_run) - config = deepcopy(agent_cls._default_config) + horizon = flow_params['env'].horizon config["num_workers"] = n_cpus config["horizon"] = horizon diff --git a/flow/controllers/imitation_learning/custom_ppo.py b/flow/controllers/imitation_learning/custom_ppo.py new file mode 100644 index 000000000..cf0def369 --- /dev/null +++ b/flow/controllers/imitation_learning/custom_ppo.py @@ -0,0 +1,321 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +import ray +from ray.rllib.evaluation.postprocessing import compute_advantages, \ + Postprocessing +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.tf_policy import LearningRateSchedule, \ + EntropyCoeffSchedule, ACTION_LOGP +from ray.rllib.policy.tf_policy_template import build_tf_policy +from ray.rllib.utils.explained_variance import explained_variance +from ray.rllib.utils.tf_ops import make_tf_callable +from ray.rllib.utils import try_import_tf + +from ray.rllib.agents.trainer_template import build_trainer +from ray.rllib.agents.ppo.ppo import choose_policy_optimizer, DEFAULT_CONFIG +from ray.rllib.agents.ppo.ppo import warn_about_bad_reward_scales +from ray.rllib.agents.impala.vtrace_policy import BEHAVIOUR_LOGITS + +tf = try_import_tf() + +logger = logging.getLogger(__name__) + + +class PPOLoss: + def __init__(self, + dist_class, + model, + value_targets, + advantages, + actions, + prev_logits, + prev_actions_logp, + vf_preds, + curr_action_dist, + value_fn, + cur_kl_coeff, + valid_mask, + entropy_coeff=0, + clip_param=0.1, + vf_clip_param=0.1, + vf_loss_coeff=1.0, + use_gae=True): + """Constructs the loss for Proximal Policy Objective. + + Arguments: + dist_class: action distribution class for logits. + value_targets (Placeholder): Placeholder for target values; used + for GAE. + actions (Placeholder): Placeholder for actions taken + from previous model evaluation. + advantages (Placeholder): Placeholder for calculated advantages + from previous model evaluation. + prev_logits (Placeholder): Placeholder for logits output from + previous model evaluation. + prev_actions_logp (Placeholder): Placeholder for action prob output + from the previous (before update) Model evaluation. + vf_preds (Placeholder): Placeholder for value function output + from the previous (before update) Model evaluation. + curr_action_dist (ActionDistribution): ActionDistribution + of the current model. + value_fn (Tensor): Current value function output Tensor. + cur_kl_coeff (Variable): Variable holding the current PPO KL + coefficient. + valid_mask (Optional[tf.Tensor]): An optional bool mask of valid + input elements (for max-len padded sequences (RNNs)). + entropy_coeff (float): Coefficient of the entropy regularizer. + clip_param (float): Clip parameter + vf_clip_param (float): Clip parameter for the value function + vf_loss_coeff (float): Coefficient of the value function loss + use_gae (bool): If true, use the Generalized Advantage Estimator. + """ + if valid_mask is not None: + + def reduce_mean_valid(t): + return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) + + else: + + def reduce_mean_valid(t): + return tf.reduce_mean(t) + + prev_dist = dist_class(prev_logits, model) + # Make loss functions. + logp_ratio = tf.exp(curr_action_dist.logp(actions) - prev_actions_logp) + action_kl = prev_dist.kl(curr_action_dist) + self.mean_kl = reduce_mean_valid(action_kl) + + curr_entropy = curr_action_dist.entropy() + self.mean_entropy = reduce_mean_valid(curr_entropy) + + surrogate_loss = tf.minimum( + advantages * logp_ratio, + advantages * tf.clip_by_value(logp_ratio, 1 - clip_param, + 1 + clip_param)) + self.mean_policy_loss = reduce_mean_valid(-surrogate_loss) + + if use_gae: + vf_loss1 = tf.square(value_fn - value_targets) + vf_clipped = vf_preds + tf.clip_by_value( + value_fn - vf_preds, -vf_clip_param, vf_clip_param) + vf_loss2 = tf.square(vf_clipped - value_targets) + vf_loss = tf.maximum(vf_loss1, vf_loss2) + self.mean_vf_loss = reduce_mean_valid(vf_loss) + loss = reduce_mean_valid( + -surrogate_loss + cur_kl_coeff * action_kl + + vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy) + else: + self.mean_vf_loss = tf.constant(0.0) + loss = reduce_mean_valid(-surrogate_loss + + cur_kl_coeff * action_kl - + entropy_coeff * curr_entropy) + self.loss = loss + + +def ppo_surrogate_loss(policy, model, dist_class, train_batch): + logits, state = model.from_batch(train_batch) + action_dist = dist_class(logits, model) + + mask = None + if state: + max_seq_len = tf.reduce_max(train_batch["seq_lens"]) + mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len) + mask = tf.reshape(mask, [-1]) + + policy.loss_obj = PPOLoss( + dist_class, + model, + train_batch[Postprocessing.VALUE_TARGETS], + train_batch[Postprocessing.ADVANTAGES], + train_batch[SampleBatch.ACTIONS], + train_batch[BEHAVIOUR_LOGITS], + train_batch[ACTION_LOGP], + train_batch[SampleBatch.VF_PREDS], + action_dist, + model.value_function(), + policy.kl_coeff, + mask, + entropy_coeff=policy.entropy_coeff, + clip_param=policy.config["clip_param"], + vf_clip_param=policy.config["vf_clip_param"], + vf_loss_coeff=policy.config["vf_loss_coeff"], + use_gae=policy.config["use_gae"], + ) + + return policy.loss_obj.loss + + +def kl_and_loss_stats(policy, train_batch): + return { + "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), + "cur_lr": tf.cast(policy.cur_lr, tf.float64), + "total_loss": policy.loss_obj.loss, + "policy_loss": policy.loss_obj.mean_policy_loss, + "vf_loss": policy.loss_obj.mean_vf_loss, + "vf_explained_var": explained_variance( + train_batch[Postprocessing.VALUE_TARGETS], + policy.model.value_function()), + "vf_preds": policy.model.value_function(), + "kl": policy.loss_obj.mean_kl, + "entropy": policy.loss_obj.mean_entropy, + "entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64), + } + + +def vf_preds_and_logits_fetches(policy): + """Adds value function and logits outputs to experience train_batches.""" + return { + SampleBatch.VF_PREDS: policy.model.value_function(), + BEHAVIOUR_LOGITS: policy.model.last_output(), + } + + + +def postprocess_ppo_gae(policy, + sample_batch, + other_agent_batches=None, + episode=None): + """Adds the policy logits, VF preds, and advantages to the trajectory.""" + + completed = sample_batch["dones"][-1] + if completed: + last_r = 0.0 + else: + next_state = [] + for i in range(policy.num_state_tensors()): + next_state.append([sample_batch["state_out_{}".format(i)][-1]]) + last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1], + sample_batch[SampleBatch.ACTIONS][-1], + sample_batch[SampleBatch.REWARDS][-1], + *next_state) + batch = compute_advantages( + sample_batch, + last_r, + policy.config["gamma"], + policy.config["lambda"], + use_gae=policy.config["use_gae"]) + return batch + + +def clip_gradients(policy, optimizer, loss): + variables = policy.model.trainable_variables() + if policy.config["grad_clip"] is not None: + grads_and_vars = optimizer.compute_gradients(loss, variables) + grads = [g for (g, v) in grads_and_vars] + policy.grads, _ = tf.clip_by_global_norm(grads, + policy.config["grad_clip"]) + clipped_grads = list(zip(policy.grads, variables)) + return clipped_grads + else: + return optimizer.compute_gradients(loss, variables) + + +class KLCoeffMixin: + def __init__(self, config): + # KL Coefficient + self.kl_coeff_val = config["kl_coeff"] + self.kl_target = config["kl_target"] + self.kl_coeff = tf.get_variable( + initializer=tf.constant_initializer(self.kl_coeff_val), + name="kl_coeff", + shape=(), + trainable=False, + dtype=tf.float32) + + def update_kl(self, sampled_kl): + if sampled_kl > 2.0 * self.kl_target: + self.kl_coeff_val *= 1.5 + elif sampled_kl < 0.5 * self.kl_target: + self.kl_coeff_val *= 0.5 + self.kl_coeff.load(self.kl_coeff_val, session=self.get_session()) + return self.kl_coeff_val + + +class ValueNetworkMixin: + def __init__(self, obs_space, action_space, config): + if config["use_gae"]: + + @make_tf_callable(self.get_session()) + def value(ob, prev_action, prev_reward, *state): + model_out, _ = self.model({ + SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]), + SampleBatch.PREV_ACTIONS: tf.convert_to_tensor( + [prev_action]), + SampleBatch.PREV_REWARDS: tf.convert_to_tensor( + [prev_reward]), + "is_training": tf.convert_to_tensor(False), + }, [tf.convert_to_tensor([s]) for s in state], + tf.convert_to_tensor([1])) + return self.model.value_function()[0] + + else: + + @make_tf_callable(self.get_session()) + def value(ob, prev_action, prev_reward, *state): + return tf.constant(0.0) + + self._value = value + + +def setup_config(policy, obs_space, action_space, config): + # auto set the model option for layer sharing + config["model"]["vf_share_layers"] = config["vf_share_layers"] + + +def setup_mixins(policy, obs_space, action_space, config): + ValueNetworkMixin.__init__(policy, obs_space, action_space, config) + KLCoeffMixin.__init__(policy, config) + EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], + config["entropy_coeff_schedule"]) + LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"]) + + +CustomPPOTFPolicy = build_tf_policy( + name="PPOTFPolicy", + get_default_config=lambda: ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG, + loss_fn=ppo_surrogate_loss, + stats_fn=kl_and_loss_stats, + extra_action_fetches_fn=vf_preds_and_logits_fetches, + postprocess_fn=postprocess_ppo_gae, + gradients_fn=clip_gradients, + before_init=setup_config, + before_loss_init=setup_mixins, + mixins=[ + LearningRateSchedule, EntropyCoeffSchedule, KLCoeffMixin, + ValueNetworkMixin + ]) + + +def validate_config(config): + """Check that the config is set up properly.""" + if config["entropy_coeff"] < 0: + raise DeprecationWarning("entropy_coeff must be >= 0") + if isinstance(config["entropy_coeff"], int): + config["entropy_coeff"] = float(config["entropy_coeff"]) + if config["batch_mode"] == "truncate_episodes" and not config["use_gae"]: + raise ValueError( + "Episode truncation is not supported without a value " + "function. Consider setting batch_mode=complete_episodes.") + if config["multiagent"]["policies"] and not config["simple_optimizer"]: + logger.info( + "In multi-agent mode, policies will be optimized sequentially " + "by the multi-GPU optimizer. Consider setting " + "simple_optimizer=True if this doesn't work for you.") + if config["simple_optimizer"]: + logger.warning( + "Using the simple minibatch optimizer. This will significantly " + "reduce performance, consider simple_optimizer=False.") + elif tf and tf.executing_eagerly(): + config["simple_optimizer"] = True # multi-gpu not supported + +CustomPPOTrainer = build_trainer( + name="CustomPPOTrainer", + default_config=DEFAULT_CONFIG, + default_policy=CustomPPOTFPolicy, + make_policy_optimizer=choose_policy_optimizer, + validate_config=validate_config, + after_train_result=warn_about_bad_reward_scales) \ No newline at end of file diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index 5a30035d3..afa6680cc 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -5,7 +5,7 @@ from ray.rllib.agents.agent import get_agent_class except ImportError: from ray.rllib.agents.registry import get_agent_class - +import custom_ppo class Imitation_PPO_Trainable(tune.Trainable): """ @@ -19,7 +19,7 @@ def _setup(self, config): env_name = config['env'] # agent_cls = get_agent_class(config['env_config']['run']) - self.trainer = ppo.PPOTrainer(env=env_name, config=config) + self.trainer = custom_ppo.CustomPPOTrainer(env=env_name, config=config) policy_id = list(self.trainer.get_weights().keys())[0] self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id=policy_id) From 2d964603c47f4a27904a7089a44e43ac927586d2 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 18 Jun 2020 16:18:17 -0700 Subject: [PATCH 46/57] Cleanup to train.py --- examples/train.py | 54 +------------------ .../imitation_learning/imitation_trainer.py | 2 +- 2 files changed, 2 insertions(+), 54 deletions(-) diff --git a/examples/train.py b/examples/train.py index dd07f7c38..87853e6c4 100644 --- a/examples/train.py +++ b/examples/train.py @@ -193,9 +193,7 @@ def setup_exps_rllib(flow_params, config["use_gae"] = True config["lambda"] = 0.97 config["kl_target"] = 0.02 - # TODO: restore this to 10 - config["num_sgd_iter"] = 1 - # config["num_sgd_iter"] = 10 + config["num_sgd_iter"] = 10 if flags.grid_search: config["lambda"] = tune.grid_search([0.5, 0.9]) config["lr"] = tune.grid_search([5e-4, 5e-5]) @@ -335,56 +333,6 @@ def on_train_result(info): register_env(gym_name, create_env) return alg_run, gym_name, config -# def train_rllib_with_imitation(submodule, flags): -# """Train policies using the PPO algorithm in RLlib, with initiale policy weights from imitation learning.""" -# import ray -# from flow.controllers.imitation_learning.ppo_model import PPONetwork -# from ray.rllib.models import ModelCatalog -# -# flow_params = submodule.flow_params -# flow_params['sim'].render = flags.render -# policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) -# policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) -# policies_to_train = getattr(submodule, "policies_to_train", None) -# -# alg_run, gym_name, config = setup_exps_rllib( -# flow_params, flags.num_cpus, flags.num_rollouts, flags, -# policy_graphs, policy_mapping_fn, policies_to_train) -# -# -# -# config['num_workers'] = flags.num_cpus -# config['env'] = gym_name -# -# # create a custom string that makes looking at the experiment names easier -# def trial_str_creator(trial): -# return "{}_{}".format(trial.trainable_name, trial.experiment_tag) -# -# if flags.local_mode: -# ray.init(local_mode=True) -# else: -# ray.init() -# -# exp_dict = { -# "run_or_experiment": alg_run, -# "name": gym_name, -# "config": config, -# "checkpoint_freq": flags.checkpoint_freq, -# "checkpoint_at_end": True, -# 'trial_name_creator': trial_str_creator, -# "max_failures": 0, -# "stop": { -# "training_iteration": flags.num_iterations, -# }, -# } -# date = datetime.now(tz=pytz.utc) -# date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") -# s3_string = "s3://i210.experiments/i210/" \ -# + date + '/' + flags.exp_title -# if flags.use_s3: -# exp_dict['upload_dir'] = s3_string -# tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) - def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" import ray diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index afa6680cc..2f2d8f8df 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -5,7 +5,7 @@ from ray.rllib.agents.agent import get_agent_class except ImportError: from ray.rllib.agents.registry import get_agent_class -import custom_ppo +import flow.controllers.imitation_learning.custom_ppo class Imitation_PPO_Trainable(tune.Trainable): """ From 18c0d9ed8bc8f00f1ca269d0af9d7dce14908b7f Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 18 Jun 2020 22:44:14 -0700 Subject: [PATCH 47/57] add imitation custom models --- flow/visualize/visualizer_rllib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 75a8c5c8b..312c1dbb4 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -172,7 +172,7 @@ def visualizer_rllib(args): checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num agent.restore(checkpoint) - # agent.import_model('/Users/akashvelu/Desktop/latest_run3/ppo.h5', 'av') + agent.import_model('/Users/akashvelu/Desktop/combined_test3/ppo_model.h5', 'av') if hasattr(agent, "local_evaluator") and \ From 024cb9360671d0300d805dae45d6e2ce9512f703 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Fri, 19 Jun 2020 10:22:14 -0700 Subject: [PATCH 48/57] code cleanup --- examples/train.py | 2 +- flow/controllers/imitation_learning/imitation_trainer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/train.py b/examples/train.py index 87853e6c4..05ebb3fe3 100644 --- a/examples/train.py +++ b/examples/train.py @@ -176,7 +176,7 @@ def setup_exps_rllib(flow_params, alg_run = flags.algorithm.upper() if alg_run == "PPO": - from custom_ppo import CustomPPOTrainer + from flow.controllers.imitation_learning.custom_ppo import CustomPPOTrainer from ray.rllib.agents.ppo import DEFAULT_CONFIG config = deepcopy(DEFAULT_CONFIG) diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index 2f2d8f8df..7db18d005 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -5,7 +5,7 @@ from ray.rllib.agents.agent import get_agent_class except ImportError: from ray.rllib.agents.registry import get_agent_class -import flow.controllers.imitation_learning.custom_ppo +import flow.controllers.imitation_learning.custom_ppo as custom_ppo class Imitation_PPO_Trainable(tune.Trainable): """ From 885ab6f13eae864909fd79ba3143c2ade48d4c32 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Fri, 19 Jun 2020 12:07:13 -0700 Subject: [PATCH 49/57] custom ppo for vf plotting edits --- .../imitation_learning/custom_ppo.py | 436 +++++++----------- .../custom_ppo_tf_policy.py | 283 ++++++++++++ 2 files changed, 437 insertions(+), 282 deletions(-) create mode 100644 flow/controllers/imitation_learning/custom_ppo_tf_policy.py diff --git a/flow/controllers/imitation_learning/custom_ppo.py b/flow/controllers/imitation_learning/custom_ppo.py index cf0def369..fdbc073a8 100644 --- a/flow/controllers/imitation_learning/custom_ppo.py +++ b/flow/controllers/imitation_learning/custom_ppo.py @@ -1,301 +1,162 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - import logging -import ray -from ray.rllib.evaluation.postprocessing import compute_advantages, \ - Postprocessing -from ray.rllib.policy.sample_batch import SampleBatch -from ray.rllib.policy.tf_policy import LearningRateSchedule, \ - EntropyCoeffSchedule, ACTION_LOGP -from ray.rllib.policy.tf_policy_template import build_tf_policy -from ray.rllib.utils.explained_variance import explained_variance -from ray.rllib.utils.tf_ops import make_tf_callable -from ray.rllib.utils import try_import_tf - +from ray.rllib.agents import with_common_config +from flow.controllers.imitation_learning.custom_ppo_tf_policy import CustomPPOTFPolicy from ray.rllib.agents.trainer_template import build_trainer -from ray.rllib.agents.ppo.ppo import choose_policy_optimizer, DEFAULT_CONFIG -from ray.rllib.agents.ppo.ppo import warn_about_bad_reward_scales -from ray.rllib.agents.impala.vtrace_policy import BEHAVIOUR_LOGITS +from ray.rllib.optimizers import SyncSamplesOptimizer, LocalMultiGPUOptimizer +from ray.rllib.utils import try_import_tf tf = try_import_tf() logger = logging.getLogger(__name__) - -class PPOLoss: - def __init__(self, - dist_class, - model, - value_targets, - advantages, - actions, - prev_logits, - prev_actions_logp, - vf_preds, - curr_action_dist, - value_fn, - cur_kl_coeff, - valid_mask, - entropy_coeff=0, - clip_param=0.1, - vf_clip_param=0.1, - vf_loss_coeff=1.0, - use_gae=True): - """Constructs the loss for Proximal Policy Objective. - - Arguments: - dist_class: action distribution class for logits. - value_targets (Placeholder): Placeholder for target values; used - for GAE. - actions (Placeholder): Placeholder for actions taken - from previous model evaluation. - advantages (Placeholder): Placeholder for calculated advantages - from previous model evaluation. - prev_logits (Placeholder): Placeholder for logits output from - previous model evaluation. - prev_actions_logp (Placeholder): Placeholder for action prob output - from the previous (before update) Model evaluation. - vf_preds (Placeholder): Placeholder for value function output - from the previous (before update) Model evaluation. - curr_action_dist (ActionDistribution): ActionDistribution - of the current model. - value_fn (Tensor): Current value function output Tensor. - cur_kl_coeff (Variable): Variable holding the current PPO KL - coefficient. - valid_mask (Optional[tf.Tensor]): An optional bool mask of valid - input elements (for max-len padded sequences (RNNs)). - entropy_coeff (float): Coefficient of the entropy regularizer. - clip_param (float): Clip parameter - vf_clip_param (float): Clip parameter for the value function - vf_loss_coeff (float): Coefficient of the value function loss - use_gae (bool): If true, use the Generalized Advantage Estimator. - """ - if valid_mask is not None: - - def reduce_mean_valid(t): - return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) - - else: - - def reduce_mean_valid(t): - return tf.reduce_mean(t) - - prev_dist = dist_class(prev_logits, model) - # Make loss functions. - logp_ratio = tf.exp(curr_action_dist.logp(actions) - prev_actions_logp) - action_kl = prev_dist.kl(curr_action_dist) - self.mean_kl = reduce_mean_valid(action_kl) - - curr_entropy = curr_action_dist.entropy() - self.mean_entropy = reduce_mean_valid(curr_entropy) - - surrogate_loss = tf.minimum( - advantages * logp_ratio, - advantages * tf.clip_by_value(logp_ratio, 1 - clip_param, - 1 + clip_param)) - self.mean_policy_loss = reduce_mean_valid(-surrogate_loss) - - if use_gae: - vf_loss1 = tf.square(value_fn - value_targets) - vf_clipped = vf_preds + tf.clip_by_value( - value_fn - vf_preds, -vf_clip_param, vf_clip_param) - vf_loss2 = tf.square(vf_clipped - value_targets) - vf_loss = tf.maximum(vf_loss1, vf_loss2) - self.mean_vf_loss = reduce_mean_valid(vf_loss) - loss = reduce_mean_valid( - -surrogate_loss + cur_kl_coeff * action_kl + - vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy) - else: - self.mean_vf_loss = tf.constant(0.0) - loss = reduce_mean_valid(-surrogate_loss + - cur_kl_coeff * action_kl - - entropy_coeff * curr_entropy) - self.loss = loss - - -def ppo_surrogate_loss(policy, model, dist_class, train_batch): - logits, state = model.from_batch(train_batch) - action_dist = dist_class(logits, model) - - mask = None - if state: - max_seq_len = tf.reduce_max(train_batch["seq_lens"]) - mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len) - mask = tf.reshape(mask, [-1]) - - policy.loss_obj = PPOLoss( - dist_class, - model, - train_batch[Postprocessing.VALUE_TARGETS], - train_batch[Postprocessing.ADVANTAGES], - train_batch[SampleBatch.ACTIONS], - train_batch[BEHAVIOUR_LOGITS], - train_batch[ACTION_LOGP], - train_batch[SampleBatch.VF_PREDS], - action_dist, - model.value_function(), - policy.kl_coeff, - mask, - entropy_coeff=policy.entropy_coeff, - clip_param=policy.config["clip_param"], - vf_clip_param=policy.config["vf_clip_param"], - vf_loss_coeff=policy.config["vf_loss_coeff"], - use_gae=policy.config["use_gae"], - ) - - return policy.loss_obj.loss - - -def kl_and_loss_stats(policy, train_batch): - return { - "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), - "cur_lr": tf.cast(policy.cur_lr, tf.float64), - "total_loss": policy.loss_obj.loss, - "policy_loss": policy.loss_obj.mean_policy_loss, - "vf_loss": policy.loss_obj.mean_vf_loss, - "vf_explained_var": explained_variance( - train_batch[Postprocessing.VALUE_TARGETS], - policy.model.value_function()), - "vf_preds": policy.model.value_function(), - "kl": policy.loss_obj.mean_kl, - "entropy": policy.loss_obj.mean_entropy, - "entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64), - } - - -def vf_preds_and_logits_fetches(policy): - """Adds value function and logits outputs to experience train_batches.""" - return { - SampleBatch.VF_PREDS: policy.model.value_function(), - BEHAVIOUR_LOGITS: policy.model.last_output(), - } - - - -def postprocess_ppo_gae(policy, - sample_batch, - other_agent_batches=None, - episode=None): - """Adds the policy logits, VF preds, and advantages to the trajectory.""" - - completed = sample_batch["dones"][-1] - if completed: - last_r = 0.0 +# yapf: disable +# __sphinx_doc_begin__ +DEFAULT_CONFIG = with_common_config({ + # Should use a critic as a baseline (otherwise don't use value baseline; + # required for using GAE). + "use_critic": True, + # If true, use the Generalized Advantage Estimator (GAE) + # with a value function, see https://arxiv.org/pdf/1506.02438.pdf. + "use_gae": True, + # The GAE(lambda) parameter. + "lambda": 1.0, + # Initial coefficient for KL divergence. + "kl_coeff": 0.2, + # Size of batches collected from each worker. + "rollout_fragment_length": 200, + # Number of timesteps collected for each SGD round. This defines the size + # of each SGD epoch. + "train_batch_size": 4000, + # Total SGD batch size across all devices for SGD. This defines the + # minibatch size within each epoch. + "sgd_minibatch_size": 128, + # Whether to shuffle sequences in the batch when training (recommended). + "shuffle_sequences": True, + # Number of SGD iterations in each outer loop (i.e., number of epochs to + # execute per train batch). + "num_sgd_iter": 30, + # Stepsize of SGD. + "lr": 5e-5, + # Learning rate schedule. + "lr_schedule": None, + # Share layers for value function. If you set this to True, it's important + # to tune vf_loss_coeff. + "vf_share_layers": False, + # Coefficient of the value function loss. IMPORTANT: you must tune this if + # you set vf_share_layers: True. + "vf_loss_coeff": 1.0, + # Coefficient of the entropy regularizer. + "entropy_coeff": 0.0, + # Decay schedule for the entropy regularizer. + "entropy_coeff_schedule": None, + # PPO clip parameter. + "clip_param": 0.3, + # Clip param for the value function. Note that this is sensitive to the + # scale of the rewards. If your expected V is large, increase this. + "vf_clip_param": 10.0, + # If specified, clip the global norm of gradients by this amount. + "grad_clip": None, + # Target value for KL divergence. + "kl_target": 0.01, + # Whether to rollout "complete_episodes" or "truncate_episodes". + "batch_mode": "truncate_episodes", + # Which observation filter to apply to the observation. + "observation_filter": "NoFilter", + # Uses the sync samples optimizer instead of the multi-gpu one. This is + # usually slower, but you might want to try it if you run into issues with + # the default optimizer. + "simple_optimizer": False, + # Use PyTorch as framework? + "use_pytorch": False +}) +# __sphinx_doc_end__ +# yapf: enable + + +def choose_policy_optimizer(workers, config): + if config["simple_optimizer"]: + return SyncSamplesOptimizer( + workers, + num_sgd_iter=config["num_sgd_iter"], + train_batch_size=config["train_batch_size"], + sgd_minibatch_size=config["sgd_minibatch_size"], + standardize_fields=["advantages"]) + + return LocalMultiGPUOptimizer( + workers, + sgd_batch_size=config["sgd_minibatch_size"], + num_sgd_iter=config["num_sgd_iter"], + num_gpus=config["num_gpus"], + rollout_fragment_length=config["rollout_fragment_length"], + num_envs_per_worker=config["num_envs_per_worker"], + train_batch_size=config["train_batch_size"], + standardize_fields=["advantages"], + shuffle_sequences=config["shuffle_sequences"]) + + +def update_kl(trainer, fetches): + # Single-agent. + if "kl" in fetches: + trainer.workers.local_worker().for_policy( + lambda pi: pi.update_kl(fetches["kl"])) + + # Multi-agent. else: - next_state = [] - for i in range(policy.num_state_tensors()): - next_state.append([sample_batch["state_out_{}".format(i)][-1]]) - last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1], - sample_batch[SampleBatch.ACTIONS][-1], - sample_batch[SampleBatch.REWARDS][-1], - *next_state) - batch = compute_advantages( - sample_batch, - last_r, - policy.config["gamma"], - policy.config["lambda"], - use_gae=policy.config["use_gae"]) - return batch - -def clip_gradients(policy, optimizer, loss): - variables = policy.model.trainable_variables() - if policy.config["grad_clip"] is not None: - grads_and_vars = optimizer.compute_gradients(loss, variables) - grads = [g for (g, v) in grads_and_vars] - policy.grads, _ = tf.clip_by_global_norm(grads, - policy.config["grad_clip"]) - clipped_grads = list(zip(policy.grads, variables)) - return clipped_grads + def update(pi, pi_id): + if pi_id in fetches: + pi.update_kl(fetches[pi_id]["kl"]) + else: + logger.debug("No data for {}, not updating kl".format(pi_id)) + + trainer.workers.local_worker().foreach_trainable_policy(update) + + +def warn_about_bad_reward_scales(trainer, result): + if result["policy_reward_mean"]: + return # Punt on handling multiagent case. + + # Warn about excessively high VF loss. + learner_stats = result["info"]["learner"] + if "default_policy" in learner_stats: + scaled_vf_loss = (trainer.config["vf_loss_coeff"] * + learner_stats["default_policy"]["vf_loss"]) + policy_loss = learner_stats["default_policy"]["policy_loss"] + if trainer.config["vf_share_layers"] and scaled_vf_loss > 100: + logger.warning( + "The magnitude of your value function loss is extremely large " + "({}) compared to the policy loss ({}). This can prevent the " + "policy from learning. Consider scaling down the VF loss by " + "reducing vf_loss_coeff, or disabling vf_share_layers.".format( + scaled_vf_loss, policy_loss)) + + # Warn about bad clipping configs + if trainer.config["vf_clip_param"] <= 0: + rew_scale = float("inf") else: - return optimizer.compute_gradients(loss, variables) - - -class KLCoeffMixin: - def __init__(self, config): - # KL Coefficient - self.kl_coeff_val = config["kl_coeff"] - self.kl_target = config["kl_target"] - self.kl_coeff = tf.get_variable( - initializer=tf.constant_initializer(self.kl_coeff_val), - name="kl_coeff", - shape=(), - trainable=False, - dtype=tf.float32) - - def update_kl(self, sampled_kl): - if sampled_kl > 2.0 * self.kl_target: - self.kl_coeff_val *= 1.5 - elif sampled_kl < 0.5 * self.kl_target: - self.kl_coeff_val *= 0.5 - self.kl_coeff.load(self.kl_coeff_val, session=self.get_session()) - return self.kl_coeff_val - - -class ValueNetworkMixin: - def __init__(self, obs_space, action_space, config): - if config["use_gae"]: - - @make_tf_callable(self.get_session()) - def value(ob, prev_action, prev_reward, *state): - model_out, _ = self.model({ - SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]), - SampleBatch.PREV_ACTIONS: tf.convert_to_tensor( - [prev_action]), - SampleBatch.PREV_REWARDS: tf.convert_to_tensor( - [prev_reward]), - "is_training": tf.convert_to_tensor(False), - }, [tf.convert_to_tensor([s]) for s in state], - tf.convert_to_tensor([1])) - return self.model.value_function()[0] - - else: - - @make_tf_callable(self.get_session()) - def value(ob, prev_action, prev_reward, *state): - return tf.constant(0.0) - - self._value = value - - -def setup_config(policy, obs_space, action_space, config): - # auto set the model option for layer sharing - config["model"]["vf_share_layers"] = config["vf_share_layers"] - - -def setup_mixins(policy, obs_space, action_space, config): - ValueNetworkMixin.__init__(policy, obs_space, action_space, config) - KLCoeffMixin.__init__(policy, config) - EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], - config["entropy_coeff_schedule"]) - LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"]) - - -CustomPPOTFPolicy = build_tf_policy( - name="PPOTFPolicy", - get_default_config=lambda: ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG, - loss_fn=ppo_surrogate_loss, - stats_fn=kl_and_loss_stats, - extra_action_fetches_fn=vf_preds_and_logits_fetches, - postprocess_fn=postprocess_ppo_gae, - gradients_fn=clip_gradients, - before_init=setup_config, - before_loss_init=setup_mixins, - mixins=[ - LearningRateSchedule, EntropyCoeffSchedule, KLCoeffMixin, - ValueNetworkMixin - ]) + rew_scale = round( + abs(result["episode_reward_mean"]) / + trainer.config["vf_clip_param"], 0) + if rew_scale > 200: + logger.warning( + "The magnitude of your environment rewards are more than " + "{}x the scale of `vf_clip_param`. ".format(rew_scale) + + "This means that it will take more than " + "{} iterations for your value ".format(rew_scale) + + "function to converge. If this is not intended, consider " + "increasing `vf_clip_param`.") def validate_config(config): - """Check that the config is set up properly.""" if config["entropy_coeff"] < 0: raise DeprecationWarning("entropy_coeff must be >= 0") if isinstance(config["entropy_coeff"], int): config["entropy_coeff"] = float(config["entropy_coeff"]) + if config["sgd_minibatch_size"] > config["train_batch_size"]: + raise ValueError( + "Minibatch size {} must be <= train batch size {}.".format( + config["sgd_minibatch_size"], config["train_batch_size"])) if config["batch_mode"] == "truncate_episodes" and not config["use_gae"]: raise ValueError( "Episode truncation is not supported without a value " @@ -309,13 +170,24 @@ def validate_config(config): logger.warning( "Using the simple minibatch optimizer. This will significantly " "reduce performance, consider simple_optimizer=False.") - elif tf and tf.executing_eagerly(): + elif config["use_pytorch"] or (tf and tf.executing_eagerly()): config["simple_optimizer"] = True # multi-gpu not supported + +def get_policy_class(config): + if config.get("use_pytorch") is True: + from ray.rllib.agents.ppo.ppo_torch_policy import PPOTorchPolicy + return PPOTorchPolicy + else: + return CustomPPOTFPolicy + + CustomPPOTrainer = build_trainer( - name="CustomPPOTrainer", + name="PPO", default_config=DEFAULT_CONFIG, default_policy=CustomPPOTFPolicy, + get_policy_class=get_policy_class, make_policy_optimizer=choose_policy_optimizer, validate_config=validate_config, + after_optimizer_step=update_kl, after_train_result=warn_about_bad_reward_scales) \ No newline at end of file diff --git a/flow/controllers/imitation_learning/custom_ppo_tf_policy.py b/flow/controllers/imitation_learning/custom_ppo_tf_policy.py new file mode 100644 index 000000000..0dc381b55 --- /dev/null +++ b/flow/controllers/imitation_learning/custom_ppo_tf_policy.py @@ -0,0 +1,283 @@ +import logging + +import ray +from ray.rllib.agents.impala.vtrace_policy import BEHAVIOUR_LOGITS +from ray.rllib.evaluation.postprocessing import compute_advantages, \ + Postprocessing +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.policy import ACTION_LOGP +from ray.rllib.policy.tf_policy import LearningRateSchedule, \ + EntropyCoeffSchedule +from ray.rllib.policy.tf_policy_template import build_tf_policy +from ray.rllib.utils.explained_variance import explained_variance +from ray.rllib.utils.tf_ops import make_tf_callable +from ray.rllib.utils import try_import_tf + +tf = try_import_tf() + +logger = logging.getLogger(__name__) + + +class PPOLoss: + def __init__(self, + dist_class, + model, + value_targets, + advantages, + actions, + prev_logits, + prev_actions_logp, + vf_preds, + curr_action_dist, + value_fn, + cur_kl_coeff, + valid_mask, + entropy_coeff=0, + clip_param=0.1, + vf_clip_param=0.1, + vf_loss_coeff=1.0, + use_gae=True): + """Constructs the loss for Proximal Policy Objective. + + Arguments: + dist_class: action distribution class for logits. + value_targets (Placeholder): Placeholder for target values; used + for GAE. + actions (Placeholder): Placeholder for actions taken + from previous model evaluation. + advantages (Placeholder): Placeholder for calculated advantages + from previous model evaluation. + prev_logits (Placeholder): Placeholder for logits output from + previous model evaluation. + prev_actions_logp (Placeholder): Placeholder for action prob output + from the previous (before update) Model evaluation. + vf_preds (Placeholder): Placeholder for value function output + from the previous (before update) Model evaluation. + curr_action_dist (ActionDistribution): ActionDistribution + of the current model. + value_fn (Tensor): Current value function output Tensor. + cur_kl_coeff (Variable): Variable holding the current PPO KL + coefficient. + valid_mask (Optional[tf.Tensor]): An optional bool mask of valid + input elements (for max-len padded sequences (RNNs)). + entropy_coeff (float): Coefficient of the entropy regularizer. + clip_param (float): Clip parameter + vf_clip_param (float): Clip parameter for the value function + vf_loss_coeff (float): Coefficient of the value function loss + use_gae (bool): If true, use the Generalized Advantage Estimator. + """ + if valid_mask is not None: + + def reduce_mean_valid(t): + return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) + + else: + + def reduce_mean_valid(t): + return tf.reduce_mean(t) + + prev_dist = dist_class(prev_logits, model) + # Make loss functions. + logp_ratio = tf.exp(curr_action_dist.logp(actions) - prev_actions_logp) + action_kl = prev_dist.kl(curr_action_dist) + self.mean_kl = reduce_mean_valid(action_kl) + + curr_entropy = curr_action_dist.entropy() + self.mean_entropy = reduce_mean_valid(curr_entropy) + + surrogate_loss = tf.minimum( + advantages * logp_ratio, + advantages * tf.clip_by_value(logp_ratio, 1 - clip_param, + 1 + clip_param)) + self.mean_policy_loss = reduce_mean_valid(-surrogate_loss) + + if use_gae: + vf_loss1 = tf.square(value_fn - value_targets) + vf_clipped = vf_preds + tf.clip_by_value( + value_fn - vf_preds, -vf_clip_param, vf_clip_param) + vf_loss2 = tf.square(vf_clipped - value_targets) + vf_loss = tf.maximum(vf_loss1, vf_loss2) + self.mean_vf_loss = reduce_mean_valid(vf_loss) + loss = reduce_mean_valid( + -surrogate_loss + cur_kl_coeff * action_kl + + vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy) + else: + self.mean_vf_loss = tf.constant(0.0) + loss = reduce_mean_valid(-surrogate_loss + + cur_kl_coeff * action_kl - + entropy_coeff * curr_entropy) + self.loss = loss + + +def ppo_surrogate_loss(policy, model, dist_class, train_batch): + logits, state = model.from_batch(train_batch) + action_dist = dist_class(logits, model) + + mask = None + if state: + max_seq_len = tf.reduce_max(train_batch["seq_lens"]) + mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len) + mask = tf.reshape(mask, [-1]) + + policy.loss_obj = PPOLoss( + dist_class, + model, + train_batch[Postprocessing.VALUE_TARGETS], + train_batch[Postprocessing.ADVANTAGES], + train_batch[SampleBatch.ACTIONS], + train_batch[BEHAVIOUR_LOGITS], + train_batch[ACTION_LOGP], + train_batch[SampleBatch.VF_PREDS], + action_dist, + model.value_function(), + policy.kl_coeff, + mask, + entropy_coeff=policy.entropy_coeff, + clip_param=policy.config["clip_param"], + vf_clip_param=policy.config["vf_clip_param"], + vf_loss_coeff=policy.config["vf_loss_coeff"], + use_gae=policy.config["use_gae"], + ) + + return policy.loss_obj.loss + + +def kl_and_loss_stats(policy, train_batch): + return { + "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), + "cur_lr": tf.cast(policy.cur_lr, tf.float64), + "total_loss": policy.loss_obj.loss, + "policy_loss": policy.loss_obj.mean_policy_loss, + "vf_loss": policy.loss_obj.mean_vf_loss, + "vf_preds": policy.model.value_function(), + "vf_targets": train_batch[Postprocessing.VALUE_TARGETS], + "vf_explained_var": explained_variance( + train_batch[Postprocessing.VALUE_TARGETS], + policy.model.value_function()), + "kl": policy.loss_obj.mean_kl, + "entropy": policy.loss_obj.mean_entropy, + "entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64), + } + + +def vf_preds_and_logits_fetches(policy): + """Adds value function and logits outputs to experience train_batches.""" + return { + SampleBatch.VF_PREDS: policy.model.value_function(), + BEHAVIOUR_LOGITS: policy.model.last_output(), + } + + +def postprocess_ppo_gae(policy, + sample_batch, + other_agent_batches=None, + episode=None): + """Adds the policy logits, VF preds, and advantages to the trajectory.""" + + completed = sample_batch["dones"][-1] + if completed: + last_r = 0.0 + else: + next_state = [] + for i in range(policy.num_state_tensors()): + next_state.append([sample_batch["state_out_{}".format(i)][-1]]) + last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1], + sample_batch[SampleBatch.ACTIONS][-1], + sample_batch[SampleBatch.REWARDS][-1], + *next_state) + batch = compute_advantages( + sample_batch, + last_r, + policy.config["gamma"], + policy.config["lambda"], + use_gae=policy.config["use_gae"]) + return batch + + +def clip_gradients(policy, optimizer, loss): + variables = policy.model.trainable_variables() + if policy.config["grad_clip"] is not None: + grads_and_vars = optimizer.compute_gradients(loss, variables) + grads = [g for (g, v) in grads_and_vars] + policy.grads, _ = tf.clip_by_global_norm(grads, + policy.config["grad_clip"]) + clipped_grads = list(zip(policy.grads, variables)) + return clipped_grads + else: + return optimizer.compute_gradients(loss, variables) + + +class KLCoeffMixin: + def __init__(self, config): + # KL Coefficient + self.kl_coeff_val = config["kl_coeff"] + self.kl_target = config["kl_target"] + self.kl_coeff = tf.get_variable( + initializer=tf.constant_initializer(self.kl_coeff_val), + name="kl_coeff", + shape=(), + trainable=False, + dtype=tf.float32) + + def update_kl(self, sampled_kl): + if sampled_kl > 2.0 * self.kl_target: + self.kl_coeff_val *= 1.5 + elif sampled_kl < 0.5 * self.kl_target: + self.kl_coeff_val *= 0.5 + self.kl_coeff.load(self.kl_coeff_val, session=self.get_session()) + return self.kl_coeff_val + + +class ValueNetworkMixin: + def __init__(self, obs_space, action_space, config): + if config["use_gae"]: + + @make_tf_callable(self.get_session()) + def value(ob, prev_action, prev_reward, *state): + model_out, _ = self.model({ + SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]), + SampleBatch.PREV_ACTIONS: tf.convert_to_tensor( + [prev_action]), + SampleBatch.PREV_REWARDS: tf.convert_to_tensor( + [prev_reward]), + "is_training": tf.convert_to_tensor(False), + }, [tf.convert_to_tensor([s]) for s in state], + tf.convert_to_tensor([1])) + return self.model.value_function()[0] + + else: + + @make_tf_callable(self.get_session()) + def value(ob, prev_action, prev_reward, *state): + return tf.constant(0.0) + + self._value = value + + +def setup_config(policy, obs_space, action_space, config): + # auto set the model option for layer sharing + config["model"]["vf_share_layers"] = config["vf_share_layers"] + + +def setup_mixins(policy, obs_space, action_space, config): + ValueNetworkMixin.__init__(policy, obs_space, action_space, config) + KLCoeffMixin.__init__(policy, config) + EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], + config["entropy_coeff_schedule"]) + LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"]) + + +CustomPPOTFPolicy = build_tf_policy( + name="PPOTFPolicy", + get_default_config=lambda: ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG, + loss_fn=ppo_surrogate_loss, + stats_fn=kl_and_loss_stats, + extra_action_fetches_fn=vf_preds_and_logits_fetches, + postprocess_fn=postprocess_ppo_gae, + gradients_fn=clip_gradients, + before_init=setup_config, + before_loss_init=setup_mixins, + mixins=[ + LearningRateSchedule, EntropyCoeffSchedule, KLCoeffMixin, + ValueNetworkMixin + ]) From 85fdd6334be153f7d740fa7bd3b3fbebc195e978 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Fri, 19 Jun 2020 23:44:49 -0700 Subject: [PATCH 50/57] i210 imitation model file --- .../model_files/ppo_model_i210.h5 | Bin 0 -> 53208 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 flow/controllers/imitation_learning/model_files/ppo_model_i210.h5 diff --git a/flow/controllers/imitation_learning/model_files/ppo_model_i210.h5 b/flow/controllers/imitation_learning/model_files/ppo_model_i210.h5 new file mode 100644 index 0000000000000000000000000000000000000000..f93f6df56568b627dedd5a87e98480dc3a013734 GIT binary patch literal 53208 zcmeEv2|SkD);J-NiYB2D%}Gjw;eFPAWk_jKX`m7^m3c~o%tDe1r3q1?L4|~Oul>@T z=2>Y@sc2T3|HOOm&8d6tz2E=d@7(Y7{?4(Vz1MoyUVA-juV?SI)_#gHH!*7`t1K(l z_-oTfOj1n#+okBw*XG}~vrR3cx(GhE`9i$8jBYNoS~XpWi?tSOgmr6fmv5%8F45G` zt@*~OlTA&<8ZbrlMP>7CVq*Q8uG;*i;n&5VNdo4kMw1%HJEM8fB7HjEtY;!wxO>iX zbhBOP=<4j^=iAUI(o4Pw;h!uOn)S7%UdxG@H_Vkr`t8L!i?tD3;OJxLYa8I`FG}37N{cK*F_iut9ZyspVyl(Z=d95?3 z3E!)!5DPilbaAk`{;T-EEdkNIp4+T1$D4=yqPXcoyg}0%L8AFCnvX7SP4MW($=2`_ zZknJ6e#oxASvwv2D7~*Tv4O zkp~+Oe>XQRb=}zwz0C7l^sP;!VFY%5xo~=j2c)oEtyP@^3oO*e>xjHPeb#a|H&(Xv7+gNOe{FZN%2I)DrU|H1LPf7Us z*tvSx*}FBe=;!0#$l;HM;qT$<*T|Xf&;|rM2S3*UJHLi^QbUV|pPh$GW3T?cj<)u$ zc8yrggc@F79u4nJ*M@st?c7`!Hwd)x34Q+zN4t6Yc=}m+xH@_IxVPZm*U@oa!!1oB z{w-fYjk0U(@^`whcJ%S|Z9)1sgZxIUnPVSEXMZ<4pJtAmyZ_eF4e;v^`lD`4`P$O(?8ni^DusT}m4gW0(nmzk_MG0{FA1TQXZ~vtr&6D~6 zf`a_W416$uYZT>2YS8#X`A6-_KdCY;?aD88X?O+vWs&g5w<|55 z-``V}AF0+4)>!|nh51I}mxcMSwlIy;{vWk4|D?*a)X-n*^51P?{+_BdTNqD&zsBY8 zx82aUwf%Q{xPQRDv?TCPT9@B$gxp*`9PNA>dHJ;!`ZsTde)g96JLaY3s z<<0-3t_=CjrumnyG`kW0{oCdry&L{kPi8kfoTjbyU%f;vH`}utk^WW*zIkbEG=6N0 z8mTwjV59L{&G`nJ9Te2$-)i*rH~xO+|B`9~i(KboBEMO4vsjv(=;~MbO9H32iO9Hy6zol1E zUP?=O$%)lB=XJ?7$hezWQ%XQ%tiLfkO-!s)gP^-Kl;Yw|5|{bbDkdf>nsoN6sTT;mncG6;EE%wiU;rIVXUR$FI{xz@df1YpG zv{#q^3y*($(Uh+y_S1Z|zs3D^{pXWFlf9Jui@mIGwu>TrDL=4DChE=kVInPC-&_~r z)i>vvb!*P&;x~(O?zpB45ngn2J!?W!yH_*b@fQ82g*5?$5zPpH@IquSTgvON^Sm*C zwVPN|Chxa2vTp`cL7gbq2C=XmTVDaXI8se>*Bp}KQRc|Kq z+j_Tew<@TseH)*cn7Wu^8!^S;W_+oJ(Kb?Q)vAeL;~J{*Cm|l#EZUH!j=r^uiFItY z4vmcpqiW}fiMcdG{xikz*%h6UO?>>aEB~42@ki_BXLco_MV_{3W{UV1%|(&2|Fd?b zOd{99w!A94d~Xp!WmZDl-YBMs;LcS6&6hPAY^<8OaWF0hpJf$0{_- z@%2Fovgq|Sczj8P%`8wS!|yJ@@TYq*!|fJ4P*P!sHFrZ_?k;RKH$=sSpTSMqfCO0H zggIL!$@KLXL35)TStq;>w?9v3{q?Q6eODh~ywwR<9#Bg6dCsIc0sCBD zos0bnl5yn6=aAED1z=xu!vdqL(6uxHH3~8yWAa$&rtgX`o~qGIl~(M@VpFgQ=nc9P zHbUruAhahTkUq&89IG;TIiADF<}u4@wnN_R zuYl|c+tK;t2^==37@qGlg-bi~sOj*A>v`CDGhIv*BFP zy`8+;NE>?S*h=)y@?-mQpIT5`T<7LGb4mE ztvi#lg5@Q43VllFJ5Li19BV>+qerkMZufD6axXGu(ICF<0|WNKP}!&={3cuZ^Z*Q6 zzMO<#izF{z29vo`{xtH?K;gXLBC7jw3p;t?1s*W0W;{)Kez&NloT~OgQhy_n9g>TJ z%ITf>BkR@p+KbMhyNNa%J4aFYdj26eFO=tp^EQzE>tuvsDIW!N@V`wI2J!{s$l=blMwdqc*(N7_GrB?j9RvuhW6COo>GSlJ^s#$euCnbIu3*AJUe1>d0^et+ zxKq{f)ac6&dL~%KV95DnCF8eDMCYBJfGQpM zyp$o8?7$T#27|51B%g)oy=EWOjJV2_JdCkw+z}XZVH|t6V-N{8>qt6}Il!EB-$SP{ z?TLEFx8S&RAFeQb2ik*Ti_3d2rQ!0^L1KX(h8cKs?n;-ri2JR$%Tfg;#;26ItHSew zgjcsaZU~2_5_6`>)Z!n`OzDqf-`x(K}Rr3uV zII9TA+h|VhwJRLD{h{Ptz#tkiFor(iXY+Cn9^|a{cR-L?0fBvn5yzRg=-5aOheT~; zfqSD#yjv2=+Aks}_r8YfZ!f@BsRhJotpQwLK#5P}5SaPIn~dK$p2azDBL;TiEZ@nE z9qW3Al)4m=4rkkfsrz>LygrA-9@mB!dac34au0UyXO0@yxx{n1DO)psCk|XPf=!rJ zD3~pF1SXs5L9cP@Ec22gd23wFk%rYldH)rwKy63RXCmnZM z<$=1e1V2j1;lx%WFs_?EE_n9>diu^|Pt>KLN@5VCmDItK&)HDAT)-_(+6q>a6`?5K zmpfGxi6?z@G5%^ExSTBFGM=mP4pv+M`J+{^Gc*S5mwv{zJH?n!LLUek-<6%4b&#`j zZcARZt)%wT9l5Cw&ceP9YXnKxo`Y$JOM(~AZSkGOTbO@pFl>|%$JK>rVY>GmIQc?I znWQR-?h;1L-%ZA1*W;X08DVti9`1e7S5Q!BP0s4n(z@i;v}@5aqv40cjjH)RrIMjb zN==3FrJC6*jLh^`l@6T|Yjhzrz)0i53Zu17D~upD+z2997%93$8Y>P9ZVu7WDr>`gtny|{~ zP3Vf!L1%qRseD9fLF7uK|BZFWZ}*K&`#RD7RlS)hQ7O@|;`+9y`=|Gb_4-YuLYgTy z6~Eml27U+sPgj0+-qYNqiIc2mhJJ5Y6ZL<~x>kK+6We>6x&OWQ zOWKqsM3LToH|SrL|ML>~+56>qb6$i<06&UkHoD$L?}L9fZX#OOw#>u$cf@0d{$*|Z z!%+sDXPotd!)zVhC1xselSd9ft6*;2$^R9a0f zt`L0J-yJeLw4!Iz=CPCww>Ysj{i$nOGR|3X2x_-!@Pf`6z{Tg23{%cT!tzraAuj6- zt{>k9RPOrH_Y%I)&fXIXY)`@S>=%Ml(dq&w@sWG_A|8rn4#4R`15lQapcgxI!Q?p( zII+4t)|h6a{k1Z9DeZ*2(l+3+jah=u;cK`F6W!_MH{IaX<;NhS89{YYyYiH_E#rb# zZ|C)KyC_Im*b`&jpBP^AzebC!U%_ZDjs}hFfS1*Wg7pC>+N;ZC93Y5;E!k~Zbzlt` z4eCKEDq5FZS9;1l6(0=iJaoZLO#qLxdt$F1b-W>gPwBn%J@^d8d43Vekfy4~4tDMg zD%(|nFA;@F6@9osxntCM#(dOUz8e<@Dv|cpmYntDhj>}@6I@@CgQMH$!lQ1V=pkis za^5Tw+K4~IB{6gGYmeh#JVISC>ZJ=zh<(N5tx4eSS#3go-XUdduq)DlNO)EJ~?+_Cpd8f)TEjAu_RFKmyDb5t>DF{?#8V8yEvlG2D^0`fT?^% z9Hr0)E*)--Cx)itPJ>u(myH)EoU#UN%i9sdqC`$Vtrf2Go&=v>C(^uF7o5H1FdCU? zksA-@-~&M{aEx5FuwlBqnR)*`r2q>E$wR;mV!3 zZqrt9y?UDV9OF+PjmUtI9BtS<@ha$)x`4QqEWE08#jz)sW9`Iiy!rM=>BsssIMg-? zrmVY-`8HytrfvcbuDgnxBvR<$C^tcISrL`YUx!;&Gcme%C(hpF758+_NI}A#uK3lh zGs3f5oUpAnDX|*>MQ5&X`svz`J+vLXJQj>M4u8hu-o-HH{1fzWYmG-j*TQtN2Sc~N zgZ$2WuZYiZV4PsIC`gxuT-I3a=77p=DwqCTA5wo3RPBkGBT+-13AG6`|nyY9|D3d<~VX8={fL(@>m@+B##o!xn-P(~8@iUxzke9Mz69){BShd1vW}@(%d+dLb&B z+(Vf;FKL{Z0c+Lz5Zcz{G}!DO)MV3bL6;7nX)llG;Aqy0y_Rw0&L6WNb;0kkd|xmq za{J*yvKrFXo4{#yIp*1$B7462G~L;$H6))iAl273S=Fi|a7spx z>GxKLl&|Yy-?buKvRfau>jy#c(yq9XAB*MI(O|ly3OI*4E?DLuc#N>epf0^hjK>TJ zzc`gd-dHQ>e&Y+Bb}fc&_|%`d%dViU_2bcduLdmKWDTI+gZIJ04F;7ZgR%cj!JRL6 zP-o3lTpuwE=n{E$X8js)yFLIOs?A3Q^&HUMFpXT#PkJ#AO%d81ab0)Qqpx zu1h(V_nC(p$-D5}z@uE-qDR;z8DYwO8MylJ0TiD-25J|?V8++!+{Je4U{W`gh6lgn zl=mORi{s8hh;ADw+A)u-N{XdbqbDQZsXb5G%nT{%l59jl22Wzaei)!U z8ss1A<7j0LGW&#crrz=#-8cuQY(Bxs9Fd3aq37|%5lien+n86a*OQeU>qU2s9)oVX zv+Zv)Dd2gj3i%pp9=x)R%{Un+jNt0zp@pOKN zPE0p9kqaqqPmWL658*ct@@90jf&RG@*^twA4A!oOHa$Y{5Y2$aBjeC}Wm|T*T!ZDV zv*VumO{b1u(_#C4X<~Qi9Xe|b6f7T-%)N7MkK)s|qp!0ns;fnS)wmUcd&S~JZE-G6 zcdCRqJSR9ft~EIs*Awm!9*1;mCwOGE2i|mg1@q2j!ItD5bke#?se;1Y_K zhWEktVkfNMpM~ewe#OI;ZE=KIHRcD|LZ2XC*yJ-5AM+QXV`?$Cc*Q!*pUfA?TIcfY zUFXuVBhKKdE;(S_ZZI6t><4$V_re1WIXbkJ1<$sFfM@cd0@L~|1GNtA;lS-SFmAyk z2-toF9CXwmRcZuOosWRKv&Gx8=xb--hVF+7c{g=lE_nVvtQ01wT> zv6t~xY-jTY#P2PPp+1i$QJx%}z!!i`p*azAv(rUR{^U$Z7?Z-ynco95411A@UHXw$8pVP!r#z|qXG^r1n-8f) z_86LEOqRJM@aA7E!GjycNT(g0X(vB3*dVS>-*9a)=$tCcTWrG0Ld@Zbn;*WYiJ}+j zK|E)>0o7j2!cuW>+JPrY&JUkReBV5VYkNH4jPh#yOowCX-W&MDlX3g*xw5fx;%xh| z5L)k43_F%4La0F@CV$o-2|4|7%ezD@Sf3B(tc)gBc7^@&fs}lR1&@!*;99~%kWtwM z674Bnt5Xfm;XUA0PIp)llK^#3&cHyQLAYk-G4xdFhy6zT;q*I+pw)dQjH)Vz-3Ei< z=!-f!IVqA$x==%lzI=rv)6#L}D<8U9?II@#uBY;}idN+BgX)hZoL0Z_m{Bqk;`gq^ z$~$>FYpf&HJC}&Z*La|qUR&%q-hi{Lk_YUiLuV!UVZ3A~n15dq z^9BvX%?n>~q3=sz5Y|Uk zg0(>w#@;i;wD1CSg;rR;;|3mdd&8}EYmaM$FK}D=bPTl0;ubvV2ErXzXnRjXj2NCq zvo$0kpQiwqj+nsV!zPe^WLZOwSRZ(?dq3BubPO$cmWXo=5(H!9uYg_~d4YHOeH?Y_ zK7F?$7c%oNbDDMTFbgi@>Dxt6EvLkW9#J5-*W`n-;5e^rjW`<>Bu=Ua1;dCN;SfkC z(jgW*aChN7oY5}pTbG{Kd(euhC#*b^aj6>R%Wjt!f-uB7E<-Kx%=Pgb`ZBJq6 zSHDBSq=rN zJy0P<0mJs>z|FooTvXpxc<NHm9V@jKk( zeX?ljy%%8GI3x}YqcLzk{N6ju^K=#sZ6&v83f4bnf+IYC%DGrt?O0Ypa zYB|wsnvqmqCT? z)JVjc5++3G^NCs%MsND9UmP$eaX zr^6J9y-yfDe?J`poF&PyZZc%S_ARJfH5w!{&(rjeg}nb&eif;fASwQ zZ)@-TCbqMhx&J-?vY2NRqR8*`-JpL}{?AL`Xa40Z*YAhkGV4intVHAw{#WB$GY&TO z6Ik3-{v&x?-5XGUmbcX$!TQ?|@wJR@eFyt%Z0o!5(f5OIsEOinF3wG$=w|u+BmDj| zd1il&#r;O&fABb5f_Jmfz7ync?WW6bWc%-I_&xh)?$y-$FZ=iJd`!`GJSTNcxk7 zBxsNZe~emJ67#G(9=lXQy!7>0;tXy6t7pSl_XGA&Kja3?bJ$Lf`R`~ruNDO%Pli#O z1v6O`Ye%?{eMC4g7G9D*nXQe>*?J&CjD8I-!MLI=*y;&+(hD3n?@o_tn$ftSf5 zrdl-6sQaTt+(UJi~(V%k=rrFGm0tTVlm@N#XYUq5S&S>MUqSD>PrTgSUzs z1igf7g}rtCNS8Y^IYU`**0b;o%>b7y5kJN=(RpR`B!*ly_BNZMh+&QbnJgOY9;H4JhVY ze{$eYm|9a3oxh#9_i-V%c|<6mCScyBBZyV_YjU8IJk#hO0T%5Bk~8Nuh=TlCa$HMC zm^eR#&D`jKNh37a7fF=|{2wbMHWZ?Il8M=Q8qW(+5`MhD5g3 zioIF3g;+aI6y`fB3mxlv@&hKR^A(;O3tw!OC0Rj3h-ZIOw#7|Hc+IE{IaN4-TzFZ= zhW0wd+A1pXy-zV{Um8H-MrDAh(wG zgY8H{oGgWNl(WE1QjdsT*hW@wIZK1J&-2%m_JF4Y&x7jXtB|l&7AyB(UuI@{+O%?c&q(9%&ijeD%Y}mQpZODaT>C8R;EYpkjWv`=p z3g~W(Qhf3Kq!A%RipxTwcRPro)V=hHhuMg)hUNsYw%QwRd zs|UJN=l0l4%pi(ws_-Rq z1i5%OLzk#lMhbgg+|CXyU&O$ru47T{IER$KIG-olHM%VD%0$*hW&;iz>x`C;iFAOw z1W~eX1L>hhI5&;*hP{6_{j9i|&0QhOl!SY@$8|HoO?E8lyty-(M^}{G{!-6fO}j$^ zI*ed8+8nF9@*2D7og;ypZgAC>DRlY;QDSeN+C4znC@X0{Ss5o{jQk@B9YeA=W7&)wP58k+^un7(t{Pdz7a)twYoa4??hD76( z3tEO(NT1Su{;4>iUnU+tc%0o&2_{(@BlwkN>#2Q}BuTO}BN5hnNmMdlc+yQ$xZ6sV zt%^Fpj%myj&fN8ot=WAOEbNR}2f1U+ctbf^+-neDtb;ZGs!D5;n(|ptt>H;-uU8`z z49$7ljW%(d-AH1#`WD<+B994geOTKpO?FJ;3KyWXhxCZo&KpCe5u359 zC<<&c+p(RAO2WCP_u|%33qh*%7W49n7J9uO!t8g*u#0*b%xiiHTl09oFnL-gF`4t3 zTE-t?o%F-8-o6jJBb5ljx#!^Q(50kydpLPK|1Qc*P~(4eAI-x02ap=)Cp2_O4cmPw zm(%B0kyE!~$r6>hRTth>rY?-g@L?}r zY-ARFR}#%Ad4B7;XIRHp-oi_IM@atA&j-7I4Pnqa?5HijTjX|uB-InyF5@~wyy4Ed+Yl+;A;o6erOca)4Kr~7bS(gJUeo4 z1HXc@^dVN<<1y3pwZt>h1IQ3#8KKFOR5rP^7uHFskcKD+8LZ>QY-@WPX5LxN&i1GV zu?Qz(GT)etR2V`O#+AYGmjn5-GjiBbXI*CcC6PQmsw`CObsoA&*O8ckK)R1kXS3b< z2xs-PNBwT8tjD;8P*W&m{GKcE4L_gNxR*fw&hBi&@Es&}b2dA;QHxpGrSU%JI}zSj zYmiGu)=lv__-qN~ooF=@_}a^Hg-=i7Dmj`&JL?N$s*TAeCu#1q#~xv1a4~s`>)7^f zM?kwW2Z~Kg$Znpgu=PAeE_2*?zND0p*lgX%PpC?Rr!y=`*CQp&Lvt&cke&77QX)4z28_IcxVnYgpDwsm89{*bD6wtVz14j4ma<`&)L5bfnv6o`%g!T?)TZ0$ip&{0+z54&ppJB+TIU@a}1)$=}lU~{xez$oo?9( zRKh-SsrJsolcUUpr!&Lh@(3Y6U9XIoP8q@XGwH*+IeU=Qi3;TO=`SSYX$%u@@Hvlq zV8W^-PZHg2iwJ+dkPx2?rqJF8J`F|UvQe9LKGT-nD!h%x5kugRrvf{+OomO!G~#Ra zdx;N5DU(%GGWaoxec0#(Mzk$e*qVc??BSbr%<@$^`D)*T<&;hlI;%&pfghE4^S16J z1rh^EN7I!C|EA|21Bk&M%-EOhX7vZG@(GTpvp z)|P!FcToy^xVy6v%ANU{>U?o^`nTNATL_Vzi%u2UsTiFIN{PvlAdb@K+>HiPAuzK2=kRtWDu4`c)S zJ%apYV}uS;er(rqd$Q+vI(x6#0j6J7pf^&6kSxO^EbQPkMs_WR+j1w^mGY@<%bOym z;im%}5R(A_#?{A2arx=r$?ZpZ49Y@B5F*bEF#htxd@hx({YF_=2q$UL)_PYOu%Y$1!KOJZojumM@s_ z9A~E62q)yGvEk-v#m~co$u0E(WaX=4Tw2$2ocXY+WS-m#SU0rc`y)C@Y^}92>oaR3 z)XY3V&gg~l4&T-UsW=yKej`o>uHQ)Z^(kY@)g57GxEK=`-$Cw=5dhQ6$Iy-^A;Qgx zkya&47}}3)^^Rl*)Rmb}-;WSgr%3b;XJYP+aZo$54I7f41Lso)um%2NY<7_ZYvXte zd#mN+_|7qKtZ*e0Uo6IYY&eJpdMg>v(T=S)?@6|MajbWx9n%S0NoJ@#;*|S(atZr$ znc0;Z>^#pLRrd~NHD3BmeXBlE;pefjwU$I{Oe8rznq$WHg>-JUDzTDFCSs~d&Pwk zVaz=KoD)&}FFNbNHKd+BF9~B7ejWKI4^k4Rxe48#tYyZD%OQ2XFWWL{70kTXj(PiU zV-E#t{Jc)eq)1X4=gqH1o??5l@{B$9U9f}o+_DQ3_e~(P9Od!aQA_g7#)1W_7IX6D zM_FF%F4*}rfn9BXkAQa-xwfh=gt_^#Fz;xtM8S=%esKjXlcj|tx0#T!EALS8K}Dow z`As~m(v^vQPNHRFK9Z%enegoTL$=T24!Kyb3hiWM$*WymNw$d$QE3%H%+0geBo{GeKa1?3(N|ME_IKe)wR3&zsZ;;FkC1N;#Eeum!1-Bkg z1K&$KP@>IP^o@?=Y+u`_0*Quvj-X3)bsJb!8O;|awA?)3HvT48`Z5`fXZK(uunw&EYop4_q2$Ju6yfN8{rI_?4wVef z?+HuuP1v>@!yw?cI`iu|hX49y4;|B`o z@+NNi&35JY?2p)rCj4Lar}-)W$%>!ZpSjKYBNE5IJ8o4U)zp*7p0(6t5g)(G?vJ3zm0a1HjR)nV*n8K}uy>3es_7L8_|gfy4im1^kc4o< zf@1<$Q0NU?_11D8eZ7D@ZHvRF^#u*1iQw%2p3}N|l3ukD$M@aOf>$dS*nGwee1b!; zEFm6T(#3Jt-$-Sr`H&*8mT84}+8P%kj3_M>S8ys(pC zbfPTF?AigvD+W-vY&p=7%D^tOeQuUSq+a!N{ zU9lFonu>8kc}05E&=|9$6X28S2r8y(j$`Ie0`8$9o+;Xgr}zZw<}IL=Ia+Xfx+hqb zoaDmGO|Wz4Yup6!6z=BpScJO{4d;#%$@2Arq-R9 zi>k69gLQ_$uR&PivJqxQS<~~+{BgNk67)3)0k_lp(5^JDA!oNpu*f@YO+o+E^s9m$;;%9m4yp>x8BJf&TP+l#Yp2>9$~!|YB4o20Zcxxr5z8vpsAV` zFh{!;R-7D#cDKx6*5-~R;chrr;PniRE?@IH0|_-u>Be!X{raz7L5SS^S19^_*2(-LBR=OwMs7zUU1)-vV0 zvgDHuuuq4N3t~gNllimLG30iYKy$Dqrz*D`9IXejo%Nxxbo4^BbkIceF*Yb|v>X(@ z&(arzwBbR`63p6Dg|5f#(f&*!ZV)WM_71z?=y3}=eSJ4lcEBG3Ixa)oTRq@yow?w3 zt0`>6+g3P3Ya8|`u11M69m31?VmDt`aR$yl#OYxUcktOAOfr;)^%mRF{ZT0EPshQo z>(02!Mvab|norFV{UDbPq`^5!*!@#5*ZI;kw)AxnlsX*X4N6PJ?%P{Kkh&&54eLWV zUSC`=NEeCZbzUE%qtJI!ErZ-oBcv8&Qb0b*rgXV1wQ^ZKH5z8WT!SgehpA?7RTy?j4jp=V@XSefxZ1rJhTT#D*@5*uhj#br<2SLm)OaFw zSf!78GZ(iIGl+`TXI-q@i5O2$oi6xaaUfSGYO~(n2_?6uT9V}IEx2#zF0T4Z z1eh)2@PX`I{M^f$>uk3gllFAss^6$U+}XAGvFkGI-F7vmo>PY1Oomy0Oa*R8IKE!I z85QD}!?T%&n6cn3tUnV9U(GA&D~-vxDsC`zTrr9*GQNqktxgNXhV22x1y9l5`4Vhh z6^A3cOR%aVU#Z!N5pdB-hRaxUk@j4wLY}@JhI(nPINMGIRB#A-Tslcp`?%umV=36% zrz5>n)rTy48VBtcu7h)0{o&|`gIxP0O`dI+J!m>26kfjV040FcWF5<`|M>hK{INSIu3^od8tdj#L!vrdl&H zu>0X4R6MXB?vA~|CVjQTwokfXr#tZ_ugjLfif(6%S_QqPD&x!Hy_+U??8R=*J1rBJ zJuyLk{au$Xt~z7>oUOM+L60bb3!h540nX#KJcKAmHZ zuX)44V&@EQma;VrRMLbUs}i{FzAkj{f)l)#R$H)-HU)<>@=&6>ja(bE4u=JAg&r2} z5ZZS%KGyPv6YF=v-YK0xdi^te*7knG*~Q0P{LviT-cc1tMNH!?wyARVB^m%Rn+$JO zPR7FnZ*vBdZ883R5*<)nMOTfui(Q`f5==C`fqt=fxru`Yf$zH*%zCvOuhkDFQw@`W z@3jIuPDiF{o($*0T+za$6Tb1v;%v?x#{st!v2?Zzl^7cX?k8h0+iwL7=sXQhHsqMk zdi{>>UEg3w=t5|fWyBr68V+wKl8fJPr+M*H*%dOJciNIS0K}*3ny#ehPphk z!-4(Q;63xLxNz1suJo}wJRLCw)a(s-cOGq_bq}2|%tHYNBxR#lQ6O}XdQUTg2LW`c z$EATgVDXDD@Ww9`eIDAvSt6Z6QLC-Ly`zy@yTmkIR9 z@I;X6k_g%lExD5}I#gTE2M-TlhWiI;VaV8>+~&*#y3+PBciNy0CSTX$#79O`x2`4h z$%X*xvuzh@cUHiDS`xI?l=m1aSp*-xUg55-oPZn3%4nN@x2TlrD3~|#zTpG46jZF6 z$bGruOvd^Zf<|cqEgIqorwfnM1M6DBX(w;Y%k`q~pN_!~H(SF@lVZVax}$iNbtt}) zlZVr@;^~t%D@!Cz^EhM0Ffd%T6t#sNc&VEP(C`t9VR9tH{4e`>MlLtGJDbkZS*1Rl zRfsnTHck~NUOvy290&z9lZ8BYsRXboQG;<8l5o<8Ei`Wa2G}-&aVq*ZxskQW5PdhP zWYL+foY5+4s9H1%0}uF6gL@AR)>a$grpY%s`$s}dEZ@l6Ff|?~OX@;y)>Og3;%Iyo zdy59&UB|f(zfI2@rQ)5qTJG-oQ}nL(1{yH@B2Dfp^Kvf-UdUuw2`8D(@z6zgD9yP% ziQ9&IXwaP(G@;i)>{&PfB&OSQN3MEfJ6m7KD}Kpk-VK3AAKp`oy55|R`@RyVrRH$- zYZhG`JDYR7UC!C_ictKVHV%yNpc$MY&bjrHR*j3II_DPio?WtFostW%-MJND)vCc} z%e7;hKO2GTr$m8*Yin-bEYD9KF+y!|(yP&&bHB1?}2mPrloPF6FZcFEvhHE-jX>e4v17ZPmc(O-jSMGmq0qxz6j{M;sFT^FvzGJHcbwtW1hdK69VRA zT8$}-u#&`t_ySOken@%Jai}xhoqWA_o^u?+UugtpuwTX_fFta~)&U3(n zL#$BakR_MNKM!&mUC9>D0}#}+VSddy$;nz4TWu+UT~$X z1|*iJH+&bpBSsyRArf0s@p3^i5m#V33I#A}mZv~QeFmJ*E5W@- z?a9!-!E|HW?yy>36^CCLK!o{o(X8k?{k%LH*kEbA7}O40PfDQr=Tty)yAB2?+d$^b z_HeOw6ppU1ecEGTSh>RzH~Tl7w;$6U11smkqzy|sUJXGHosU#?s47v_ z909SWo3U%6D`<|J4crY+Hs!`f=vZ-$rWl`P2QRlpC!1I-sEdW-p*!KC*-5-^FdV9O z214+tICw56pr>kb;X|MCDAQ&-_Hs?8i{_rDBTU}YkT$ynM-@Wwv)AJ54T-&`Hd6PHk()%_c_-K7BRdUS~#F!6ZE+!vf%R@cZ ze`;GA?Dhqu54B_4E`7o0^Cjs-=K$`b_z)KQE*j=c>qoP*BT?<@OJ3aNI_|2b1@|`l zG-bW4gZv%ea`dHT!I<;Trvsn!ZGHZ10K^;RXV&Dx>U zr(@h57ayGW={jxeF`3b4r|>@)w+iH%JS`#LIb42wzTuB~F05Nx6WdwM-2a|`IU%zN zQRH`OzU^1>Cy~I<{L8}B?*-8Elu~1CtMMoD2mh<*!a_DT(MfCW@BjBXtG~v!elvnU zcw9?;N0Vg4{wv~I_32IAc5AjLzh`%@ZEM2+Wp|pN@}I2uncdNA)*q2L{@rn{bIp1y zvSTgvSj5M#@_PxijBBYEeh+V16U}pBT1&{EyT1O*0FYEY2CEmG=giW_;HnBK_R89V zEP7sp(;ssrLaPUiFp1zw?kkf6!Sf*_^fU=e-h%y~YZA|W(^=^0c|=Oti^U{`;}N$6 z(ldKG>0)+-vp*zBrY))>qi=8IBzDAOf-Ax!lVO~)`EF8mH;E0E{lt~Kw_`SbnnK9` zif=Lb~oJQuE&lU4rXgNS(3z$C2Wk!NWSvD+hpuC zL$2R9)lY}9DQ>>J$%!ACp8j68%%Lwi zgezB4eU@@?QksDT#DEi??PvQ}_Ef@Vc=K zo2x8GbVjXZvA6Tdl>`0Bs#)vs&EWwUe9(%_p8Epd_1gi9-U4wH3TfYlQ&|2tV>pX= zJ;7P2G9ZIYiK)CFX`OGz!p+>tsF9W^n`%PJw({7@B|K8O=q0R}Wr$htd$P#IGug?> zQl$2BBw;7*FiT-PoZb8oHp)9=d&{p_v2hxEVYd(F3%Zjm|IzH!VhiRQJdNdc&Bl)N zk73%PDTdQ4rOC9mrfgb6oKwDJ4HSB(LvBN6`?{LY~<0+yw)9h3El4i29MQbhffTJi3P^| ze&v(cjoJRpcjnv@n}=Vp+ZK7|_ofS7?5W3ew^S1S4LwNi|7-8r<7&>fKPh@pB9tB~ zq(o1T3Fo`ksYc2u&q;X{8j3vfi1L^o>YMPSq z9Jz{tuO5HtEC9!i z2k5C5RjOv!pN0h3qM>m8E43+!Sfv#)eeWHm&d>Q&xKV~ajZLNP-x$(GOZKw!y#v@B zvW3Te&+AHWe&w;p6pyDDv&COy=ciVQ$N09@jA-ohyAd2$yd+p}Px%FwF1? zT)ryIA=r{ec4Y|n=tY{NM?n~C_F^ht%?g2@#-rh$-*I|+zc7E=41Jov>on?|=Ahra z3-G99FFME-vjsbXP$kNgjGL>C0XYe@Gb9{yCkIj68@e#@RVpfOW6^TeJTi9D6(&8y z58WHvCn!}9Btu?2B}@8cv#E2VFm$sE4V)PS>f;_06%$3;bB_WYTsMsicH=>+5IZy8*9+)oCS2!FO|%~ zo_ciS@K&NLFv@pcky}dF#-?x%x4~oDN%R3}9Kb6)uts+?7N^Sh6Qf1~L44h*w zP#L(Kj@3vatv+>7W>7>uFTZ7FotMJor&F-{U@=ZM9SJJ!>SWl{RkVffqT_15C5DB` z*xY`Ec1#ocmNxORKJWqgdH)D(T(uv6JQq*0glnM+{`=`vcVql6sg}6iNdj)OEN^By zgGsgncxyIn!qx7pDAsmhs_Pku^*9c5XDp*LXB+bLl-*dr6eoNJGSo@mO3?k+DtOuc z3m95%WlwKQfTzZ)_@?3n>HW$a=Qt|SY#2aK8Jl1e8%PEFgXm2!dz7)&!0~}+SugX2 zFex^OTrz57b>z>(dL2)wFWspf5b-$D#D42)NRA85Cmj*eG@pUb5It!qoh+ zm$nSWYfhl4s(>8lF{t!y8tL|MYC)}67!+CGfwAXrGK!HiXy(R5xc%lKY%#mR1}l%G zCPH}_H%gxJSNr4XqDlFcd$OLU(Du_30pe@hK3VM4yBddRmqLWuu)8{TWFkaw~%iS|*>B zrYDVOmvzI+`K9D=`d(5KH3Il8Y0$_k1HZ++A=2FhZ7bDKwvQTaY?LLzVRwjmp)1Pf zo`I^wZJ<_r6%L&=p{vKp(L;hMa9~X|;Z&Rfn3_hunec#7ysk^S&ss5_dvz$?GZ|Um z8^!Q?_IzfFK9|}Ayd>=n%eYRf8`$l)2h+2>6n0^NJ$Ao!jP0rXigbLR4(EfOgZz#W zB(o}qQ5o+}+G4bs!cti%b?eWl+sNW+$4vG{aL|O&d9B=M8Ds+A#~%(2a)3vwk?gig zM~2&N6eidz(?){}#92WbyBixaVPhSc7KwU^-xuxC1M*MRo3 zI+)}P_VLMJwSqygboJj3_JNEd-gH#wK zM1C+BIYH0lKIlKHU`;0_vw^O@bfSeXqvd2p&0F;7t+!F+Sik@f`l2YWT9dxqcL5TAZwyF;|A^XkbbF_4qc#2S_4BMUw0y-rnQUc2iMUa zZr3nykrDT;qV>7i^N^l68AArxxihV4-H_ul2)z#~5gE<9q^?$z9yjr#yviCG<_7X z`Acs^YcjGXcL!Cs}SaAQ!5{*k0{R z>I7RsyVVfG=kLOO_Y)wt!wR?NxHI)v7BQP|UuO<&d`9Xu&8X6n4JfkWI# zy&)w2HVN)~gN-~4Xkgn)v@9+%&*C&NN;@31g}x%*V@*_-^CR~BXY8$g^-!MK!tOS1 zAf-O1Fm_ZnWGePAcrA4e76+;^Qw^SQ`|4X!Kb1yC?a^a6-Z7S1jc#PiXGPQErIyHl zu!Y7pDAC4)xeU4I4pDj!C@-;)=-gP1ezF76-=QxxaEN3TZoPrm{S)AUq7*nfg|e;E zy0k=n4AE>eqX~I2sB7;*TZipnvc5hCP2LCSnx9qZtht_G_w`w}rC}mNM;nulou2Tt zPdBPrw+GHhE3;Sb-s2AOS0qQ%LSav9BF`xF5U(iLNO0{`tiZD@o|Ygru1L|^2^eKSVrOOT@ct1B?8c^K8n9$4T$_!w@uHnq& z>*y`?M-^|wb(S+}c2*d~SO-xx`D7C3JdsW~u!AOy(A=fH3y=GDU&IagmE>StYI8qh9=Z!68Dx=P`l2Dc$Eo&LrK1xfflag zk7PMjYr*c!6^K4E1qVMoMvmOtgu2t#lcL~O;uW@w+-}Zgj|?I!6sc}3vS zza7I)wHi`iUkM^Ga^RX6OK$?qh&IaTy(rxGzZnL-W=_3WLqyP0z90XWlH1u=dgoAn?Mo!WB1 zu*n`%cGtjRjay6Q)o{$jx6y%sKrDYk0G}4vDX|hR}O0}5%Jx@ZftrT~* z)q>H0L(Ik@y4(nN3y?mw8|Cjx)1iIJNkw}<`uY4?FaBTqwK)G`zC#^B*SeB;-RZBr z%JkRk{ioKKV@3G~#Ut_?c?91ViPwYwRlkx@SZcNZe25`BjQth zzON+y$Il=00a4Xr(Rh!392ejrBKthxQ~S(zvR&#5VKU*uG z(w8M7`4RK`@Ak(n5Xr6hA(ND2F&|&xha({Ak2`VU!*WSX{G3lKkSzIY;~(1>wf6(^ zsXWC02YZ5DBQ2hZF6?7_;`1M!5Gjt#`~L!6qredcqjZI%loWTdl`P`!_w0*Bc^Bb_ zy%pX31^m~KfTVp9%acLThh6{F|4`nCWdCcdOD}fkB#Z7a&Fi`l+ZV~aOVH`kxipAz%^k4zMjzyDtT z_K)>;*Gx*j7l_AS=N)sxj~M^sdfZ>6$Nk^eC;s|ak9&xVJ`4QRdqHw(SM!3hF8F(T z+y6=zo!n>P`b#5!wh@rj+d3i!>qPP+=J((IUf?5=Tk-QMDaT?yzQ7MhK=Qqy28_b8Dp`D^1J+n0A%?Vol{ESF;V$L$G+_rBuyR<-Nu-S@}##OI%d|ADK&1NeE} A?*IS* literal 0 HcmV?d00001 From 9dd65c88fe1b85112ad378bad635623ce21fa09d Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 25 Jun 2020 16:20:24 -0700 Subject: [PATCH 51/57] Code cleanup --- examples/train.py | 8 +- .../imitation_learning/custom_ppo.py | 6 +- .../custom_ppo_tf_policy.py | 5 ++ ...itation_trainer.py => custom_trainable.py} | 3 - .../imitating_controller.py | 13 ++- .../imitation_learning/imitating_network.py | 19 +---- .../imitation_learning/keras_utils.py | 3 - .../imitation_learning/ppo_model.py | 10 +-- .../imitation_learning/replay_buffer.py | 6 ++ .../imitation_learning/replay_script.py | 80 ------------------- flow/controllers/imitation_learning/run.py | 10 ++- .../controllers/imitation_learning/trainer.py | 3 +- 12 files changed, 40 insertions(+), 126 deletions(-) rename flow/controllers/imitation_learning/{imitation_trainer.py => custom_trainable.py} (91%) delete mode 100644 flow/controllers/imitation_learning/replay_script.py diff --git a/examples/train.py b/examples/train.py index 05ebb3fe3..9445e81e0 100644 --- a/examples/train.py +++ b/examples/train.py @@ -75,7 +75,7 @@ def parse_args(args): '--checkpoint_freq', type=int, default=20, help='How often to checkpoint.') parser.add_argument( - '--num_rollouts', type=int, default=1, + '--num_rollouts', type=int, default=20, help='How many rollouts are in a training batch') parser.add_argument( '--rollout_size', type=int, default=1000, @@ -115,6 +115,9 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv + from stable_baselines import PPO2 + if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -200,7 +203,7 @@ def setup_exps_rllib(flow_params, if flags.load_weights_path: from flow.controllers.imitation_learning.ppo_model import PPONetwork - from flow.controllers.imitation_learning.imitation_trainer import Imitation_PPO_Trainable + from flow.controllers.imitation_learning.custom_trainable import Imitation_PPO_Trainable from ray.rllib.models import ModelCatalog # Register custom model @@ -356,7 +359,6 @@ def trial_str_creator(trial): return "{}_{}".format(trial.trainable_name, trial.experiment_tag) if flags.local_mode: - print("LOCAL MODE") ray.init(local_mode=True) else: ray.init() diff --git a/flow/controllers/imitation_learning/custom_ppo.py b/flow/controllers/imitation_learning/custom_ppo.py index fdbc073a8..0075741d3 100644 --- a/flow/controllers/imitation_learning/custom_ppo.py +++ b/flow/controllers/imitation_learning/custom_ppo.py @@ -1,3 +1,7 @@ +""" +Copied from RLLib's PPO, but uses CustomPPOTFPolicy, which tracks value function predictions in Tensorboard. +""" + import logging from ray.rllib.agents import with_common_config @@ -190,4 +194,4 @@ def get_policy_class(config): make_policy_optimizer=choose_policy_optimizer, validate_config=validate_config, after_optimizer_step=update_kl, - after_train_result=warn_about_bad_reward_scales) \ No newline at end of file + after_train_result=warn_about_bad_reward_scales) diff --git a/flow/controllers/imitation_learning/custom_ppo_tf_policy.py b/flow/controllers/imitation_learning/custom_ppo_tf_policy.py index 0dc381b55..680b7cf76 100644 --- a/flow/controllers/imitation_learning/custom_ppo_tf_policy.py +++ b/flow/controllers/imitation_learning/custom_ppo_tf_policy.py @@ -1,3 +1,8 @@ +""" +Copied from RLLIb's ppo_tf_policy, but additionally tracks value function predictions in kl_and_loss_stats. Used +to evaluate the value function learned after imitation. +""" + import logging import ray diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/custom_trainable.py similarity index 91% rename from flow/controllers/imitation_learning/imitation_trainer.py rename to flow/controllers/imitation_learning/custom_trainable.py index 7db18d005..b41728f11 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/custom_trainable.py @@ -1,6 +1,4 @@ from ray import tune -from flow.controllers.imitation_learning.ppo_model import * -from ray.rllib.agents import ppo try: from ray.rllib.agents.agent import get_agent_class except ImportError: @@ -18,7 +16,6 @@ def _setup(self, config): """ env_name = config['env'] - # agent_cls = get_agent_class(config['env_config']['run']) self.trainer = custom_ppo.CustomPPOTrainer(env=env_name, config=config) policy_id = list(self.trainer.get_weights().keys())[0] self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id=policy_id) diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 4d912179d..39fd2421e 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -24,9 +24,9 @@ def __init__(self, veh_id, action_network, multiagent, car_following_params=None """ BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) - self.action_network = action_network # neural network which specifies action to take - self.multiagent = multiagent # whether env is multiagent or singleagent - self.veh_id = veh_id # vehicle id that controller is controlling + self.action_network = action_network + self.multiagent = multiagent + self.veh_id = veh_id def get_accel(self, env): @@ -51,11 +51,10 @@ def get_accel(self, env): if not self.multiagent and self.action_network.action_dim > 1: # get_sorted_rl_ids used for singleagent_straight_road; use get_rl_ids if method does not exist - try: + if hasattr(env, 'get_sorted_rl_ids'): rl_ids = env.get_sorted_rl_ids() - except: - print("Error caught: no get_sorted_rl_ids function, using get_rl_ids instead") - rl_ids = env.k.vehicle.get_rl_ids() + else: + rl_ids = env.get_rl_ids() assert self.veh_id in rl_ids, "Vehicle corresponding to controller not in env!" diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 30eec3696..81642883a 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -1,11 +1,6 @@ import numpy as np import tensorflow as tf -from time import time -from tensorflow.python.keras.callbacks import TensorBoard -import tensorflow_probability as tfp -from flow.controllers.imitation_learning.utils_tensorflow import * -from flow.controllers.imitation_learning.keras_utils import * -from flow.controllers.base_controller import BaseController +from flow.controllers.imitation_learning.keras_utils import build_neural_net_deterministic, build_neural_net_stochastic, get_loss, negative_log_likelihood_loss from flow.controllers.imitation_learning.replay_buffer import ReplayBuffer @@ -55,7 +50,6 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, # load network if specified, or construct network if load_model: self.load_network(load_path) - else: self.build_network() self.compile_network() @@ -127,10 +121,12 @@ def get_accel_from_observation(self, observation): mean, log_std = network_output[:, :self.action_dim], network_output[:, self.action_dim:] var = np.exp(2 * log_std) + # track variance norm on tensorboard variance_norm = np.linalg.norm(var) summary = tf.Summary(value=[tf.Summary.Value(tag="Variance norm", simple_value=variance_norm), ]) self.writer.add_summary(summary, global_step=self.action_steps) + # var is a 1 x d numpy array, where d is the dimension of the action space, so get the first element and form cov matrix cov_matrix = np.diag(var[0]) action = np.random.multivariate_normal(mean[0], cov_matrix) @@ -256,12 +252,3 @@ def save_network_PPO(self, save_path): # save the model (as a h5 file) ppo_model.save(save_path) - - - - - - - - - diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py index 59928affc..f5d9924b8 100644 --- a/flow/controllers/imitation_learning/keras_utils.py +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -119,9 +119,6 @@ def compare_weights(ppo_model, imitation_path): ppo_weights = ppo_layer.get_weights() im_weights = im_layer.get_weights() for i in range(len(ppo_weights)): - print("\n\n") - print(type((ppo_weights[i] == im_weights[i]))) - print("\n\n") assert (ppo_weights[i] == im_weights[i]).all(), "Weights don't match!" ppo_layer = ppo_model.get_layer('policy_output_layer') diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index cbc51c6c4..85a7c841e 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -1,9 +1,5 @@ -import numpy as np -import json -import h5py -from ray.rllib.models.tf.misc import normc_initializer + from ray.rllib.models.tf.tf_modelv2 import TFModelV2 -import tensorflow as tf from flow.controllers.imitation_learning.keras_utils import * @@ -90,9 +86,6 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat # build model from layers self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) - - - def forward(self, input_dict, state, seq_lens): """ Overrides parent class's method. Used to pass a input through model and get policy/vf output. @@ -110,7 +103,6 @@ def forward(self, input_dict, state, seq_lens): (outputs, state) Tuple, first element is policy output, second element state """ - # print(self.base_model.get_weights()) policy_out, value_out = self.base_model(input_dict["obs_flat"]) self.value_out = value_out return policy_out, state diff --git a/flow/controllers/imitation_learning/replay_buffer.py b/flow/controllers/imitation_learning/replay_buffer.py index 4e02a52c8..47ebebaa6 100644 --- a/flow/controllers/imitation_learning/replay_buffer.py +++ b/flow/controllers/imitation_learning/replay_buffer.py @@ -7,6 +7,12 @@ class ReplayBuffer(object): """ Replay buffer class to store state, action, expert_action, reward, next_state, terminal tuples""" def __init__(self, max_size=100000): + """ + Parameters + __________ + max_size: int + maximum size of replay buffer + """ # max size of buffer self.max_size = max_size diff --git a/flow/controllers/imitation_learning/replay_script.py b/flow/controllers/imitation_learning/replay_script.py deleted file mode 100644 index 9d41afea8..000000000 --- a/flow/controllers/imitation_learning/replay_script.py +++ /dev/null @@ -1,80 +0,0 @@ -import time -import numpy as np -import gym -import os -from flow.utils.registry import make_create_env -from i210_multiagent import flow_params as flow_params -from utils import * -from imitating_network import * -from utils_tensorflow import * -from flow.core.experiment import Experiment -from flow.core.params import SimParams - - - -def run_experiment(): - create_env, _ = make_create_env(flow_params) - env = create_env() - - obs_dim = env.observation_space.shape[0] - action_dim = (1,)[0] - - sess = create_tf_session() - action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models8_vdes14/') - - def get_rl_actions(state): - rl_actions = {} - for vehicle_id in state.keys(): - obs = state[vehicle_id] - action = action_network.get_accel_from_observation(obs) - rl_actions[vehicle_id] = action - return rl_actions - - exp = Experiment(flow_params) - exp.run(num_runs=1, rl_actions=get_rl_actions, convert_to_csv=True) - - - -def run_rollout(): - - create_env, _ = make_create_env(flow_params) - env = create_env() - - obs_dim = env.observation_space.shape[0] - action_dim = (1,)[0] - - sess = create_tf_session() - action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/') - - init_state = env.reset() - - test_state = np.array([[1.0,1.0,1.0]], dtype='float32') - - reward = 0 - while(True): - rl_vehicles = env.k.vehicle.get_rl_ids() - if len(rl_vehicles) == 0: - observation_dict, reward_dict, done_dict, _ = env.step(None) - reward += sum(reward_dict.values()) - if done_dict['__all__']: - break - continue - - rl_actions = {} - observations = env.get_state() - - for vehicle_id in rl_vehicles: - obs = observations[vehicle_id] - action = action_network.get_accel_from_observation(obs) - rl_actions[vehicle_id] = action - - - observation_dict, reward_dict, done_dict, _ = env.step(rl_actions) - reward += sum(reward_dict.values()) - if done_dict['__all__']: - break - - print("Final Reward: ", reward) - -if __name__ == "__main__": - run_experiment() diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 924e1a400..6adc04199 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -1,6 +1,10 @@ -import os -import time -import numpy as np +""" +Runner file for imitation learning. This script performs imitation learning using DAgger and also configures the trained +model to conduct further training with Reinforcement Learning (see train_with_imitation.py). + +Usage: + python run.py EXP_CONFIG +""" from flow.controllers.imitation_learning.trainer import Trainer diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 786444cd2..2c951ac5b 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -254,7 +254,8 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): next_observations = np.concatenate([traj['next_observations'] for traj in trajectories]) # iterate over data multiple times (labels change every iteration) - for _ in range(num_iterations): + for i in range(num_iterations): + print("Iteration: ", i) # form labels next_state_value_preds = vf_net.predict(next_observations).flatten() next_state_value_preds[np.isnan(next_state_value_preds)] = 0 From 739c2ca6346e38b8d97bda1fbf41c51bd51b1a32 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 25 Jun 2020 16:54:54 -0700 Subject: [PATCH 52/57] test files synced to i210_dev --- .../multi_agent/checkpoint_1/checkpoint-1 | Bin 19590 -> 10209 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 210 -> 180 bytes tests/data/rllib_data/multi_agent/params.json | 54 ++- tests/data/rllib_data/multi_agent/params.pkl | Bin 17746 -> 17562 bytes .../single_agent/checkpoint_1/checkpoint-1 | Bin 27018 -> 582 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 210 -> 180 bytes .../data/rllib_data/single_agent/params.json | 42 +- tests/data/rllib_data/single_agent/params.pkl | Bin 10890 -> 6414 bytes .../fast_tests/test_environment_base_class.py | 78 +--- tests/fast_tests/test_examples.py | 31 -- tests/fast_tests/test_files/i210_emission.csv | 2 +- tests/fast_tests/test_scenarios.py | 183 +------- tests/fast_tests/test_vehicles.py | 9 +- tests/fast_tests/test_visualizers.py | 404 +++++++++--------- 14 files changed, 273 insertions(+), 530 deletions(-) diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 index bc68b0b99d2171f688662b7b1e4c841a36f8ffe0..0693ed4b62a9cabcdbecb267201ea862144f212c 100644 GIT binary patch literal 10209 zcmXYX2{cvT`@RSv$&g4&6b*)=TK$u^&epqmDkiULJcu??8{r^Kr zJ8if5A6zHi})gh%fPln#~qUq3f44xMZ-Vm~e|RDM;c!d!bX&v9d&VL}z1oSf|b zjg|j3_9Ee-O8yxZ1==DaeSW`D&|)fV4|xp33)XX$o^KU)HV5K9!5r?@#EkBW{ zt;51e?%WmwH{qbqAVG&{VbP0e+{U7Q407?|(vFufct}zBZ^L|;ck~Z#KxyuqIqUJG zOdOoH@(}v*`FN>=5e}@&!^Zv7&8(;B3UjPfg>#zag<#SL>Z*~jxcLe=L|>w@Rt+>f z*_*3?I{ zxqEV=xaVhGCz?zNCdWqz*Iv;PuADSZ=rX#T8}~$!D?8tWdn7!Nd*i@+h#U|H#fO_w zs@hy=+odIJ$aChVl#HNURx6IKc!6EAzd=Vo3qN+0lJ{5rxl&OsIHN5M-Sn4n?Yd*g z4~u@dbI?qQouf$Ng2*hN)xi3zy5WWfI;Ik-W?yhyV&=)g~+p_yCgsN!@ zYYGP-;iDq=m6M(Dskt3jdVeV=wf8VDS@{@e@tI=IjlvSn$fi8r*YaZCiql0L^-t!U zKG8Z(=dN;&s(&TVqNJ49o_?5j@M}HqYep$&^~ExtMOZ0MC$E~9J?$Xp^M_o{zOX!w znq~>-idzDY8dY&(FPHMpR+REKIv(ac-B`veytJOfP0TbozcZ7!Kg> z!CED7kU9>dZiiW~ICs3_xD$SSn~jppCR7=>kjT5#;pp*h{8q1uZ~9hY$1@L*^m;^Q ze$rv(zMW&c%Z<@Q`8>`ZT|)gM7vO_Ksjx@S2nIS^h_X=yJ+vW~OzAiZ4lfl*b(Aq| z@=8Tr=MpyS#SgkhgwpdjQb?SNH<)ZN0mnl(snX8CU#G?y6~2?68x@nn~*E9Nd;hv^@e;OvE`A#P1FtsVa!{goU++AEv% z$f!e>!hFzd3}$~T%!Xxk*O)2xv z<8vu=W!RDBrN&5SEu9b=a#rP{sC2bTSC8>&BNa&;?%R;N$@u`jpmz3WB2T5q{`Nr>>a*M zj`Nm6=m%dMihf7VTQ4KVHH`Mthk4 zk69$w>mGetD~@X)QZ!q$6p}Q{@k-DnFy4L?(^a!^8Ak~gzBos!Zi|6L^mA%`!2*l3 zwIKbaDDZAokP?k#dfHP7KW|ATF7YBz=wnXMrklO>Zwl&3CgI+;0$O@s1-?b`!75%7 zE8PEMX3D0J5WgzC>^wqb$NLh_)KYjUohZ0FO$n=y6_P_ex~Qfuju+mS!=g78aIbzl zU8l@MvT|9`@2g@;Q_S z&zcQ}Z6Z+Sq)Yx?%B0gnSFznwe-Mi~9?n?`v zRhy{Xn{KkeGZ3_!*W&Qgc_1Zn4d(@1zz-uUY1>FC`Mmuc-tJ!nB~zEcl%)W=b~=zz z^N374H5I?ioQiq!hIrFu5e9h=(z8#wRe)tY%-itE! z^jtYsJZvkoRWc9VzIM=P+i!IFHF@m4kjg}tPQ(6$QS#X08}nOK1RXuOuycnBof)En zFFdTlA|Qtr{!GFpjmg;EX3Lr$V~KtZLq*G{0vjxb6+OZ1?o&agBU?UV?&l`@`G*{8 zq_i^Z%em~!G6NjJFjAux5c{nxKW3IF$sub4j@6L?-!fjS^Zi$?W_q=7NW|6lVn@z1*)}lki@(A(4&K!!LIxg8zZ?2hwpZh2TP|@)$)82y;qe~ zemP6!t_6^Ou_~N!C=JF%?ZullE6CC`6~-l`3QcvNQa{yx;v!m#WlOwZ{?0sXjo}b$ z;(%#uW$C+JEZR&nWxBR5!gi_Sg8S35$){J_aN{)_YF;&q4BKXzWoY&uryC zd)0|o&nQ1mT_nD7AI1b{k=R%{a&Jx@E*ouS-F$P%cTN=PvD`;H zl(k7tWCL^`SO$_UGO$?lIrIFR5o%?(lXESP$RFcbpsbP%lC`>!-1?bxt<}I^4O-As z^PPHg_)sw36Sh3`#pj)IV0(5hY&Y?Q+8|5p+PfVaKP`dpKev*eNHb={IYTqCpr#Yfz3gSb*~HV!?w#a6xyi77;R&=qXoAAV zGV<#)#nlrUVeLj+^jejU;bt#b_nAnN{QX!5Ra@#+u#ZaXj;&Rzby#ONg_>R8z)W;H z0(NQzn7j8nbK<=*O5_HU?5GYB+oTKqpUyE}m-UH(+*-^?XTkZXD!q2B1bzM)z_4f! zy_N2Pg8Y04&{;&ZziPocjyC=}xCTpR8PGi|CqwMlPUymV4EXjA7gjBXx^-u%Xth6j zZ%IUcjUA4XENszG0?pq>n0&OA8tBM^ET)<92Jca|$qCejxk)svbkTulk0P3mpcQ); zQ%ddd;ss?G*P#iv=9H+}mlJn)W%3}r#q^A0li>Em2zZ{_MMd5|r|jQQ^xT{XVJ3#K zdgTQ0IPO48= z)cHT9ZvzgH#dT?%$`8rZPHZx;oHEub(FgMGH!-_5tpoFO8t}5Rz-&qQRt!J20*fj> zbC|^vwEIa4&W($~)}}jTn~N#7x#qE(d)4uCmk%8Kk^;RYE(Et{!iSh45-jk**^_Ru zp8xJro-G5eJ;P?3zb8XjTmn6IVIp#ze~^v>NxI8H7qmk2apg%3QX=Jre_W=Z{G0}i z+q#q*-ndSEzeHf;ZwqRq-AUG|)G+TXkC4sbktDq4GaZ*P8IKtmQ5)%bAZ+CkY%PTT zoEdOCX%_|(1u{uT0Tg~7!IjDa@}wl1HQKq4xqNpeabH_Ovt{H#WobRFNQ^;u6<@Y* zUL9&nDkDz#Kmx9d;O6VM=+%c7WXH(K)}_aclR1*#$ee9`cv={`18-A|13# z?*ty#CrFIlw9s4>f!I+qyP z%8)>P26m@kVg5dIrLraoRKHPpCUTX3kn|R-!;c{W3F+gIL%pW`C&Nw6FFadjH(#Te}A%f*n?4BBF*3Av+l z;klavgt;hSZ0|LCZ^t_}#kGvdcpymydNLR}s|jWMt*GAV8K9v%7b`_;NR?(VtG=m; zEIt{Iu8~)0rjj3=2$4r~;e6bud=H}NPqV1zjWGCikS^S`37R&Fv;4>@Xr;Ih-3^?O zDG-HEuQ!leKA}+1R13!wPBYfOl(Bc@BqUF`O4`RakchUKkeqCeCTr&~i`gT1iob`h zJ6ejT9Q{~hjkoN|HTAr-h=1hu$%$l9mJe-pK1R+in~dNb0e^mqVEgA0vh>zNG8&o) zDfefA#@CzlbKN&JwVpb97L2fbU&(u;5bEA?IAy>qkI#|ldeYnfiTQkG7 zQ{2ULma382sPr#>qjQ+4-`RbFQ7OhOK>dWNCog`{KV!a9i7t=ayE_ ze{$)i>C(Mog8cO&rj71SW|HzneDzBKrYAI|1rpV#`ALWVaJtv&nf+I#ZT5+C%Jh7L zoFF6e5x=_kCa*-iiJ$a#h1o(w`{n>K%8wBL%(HrH#`~{iaJt75NT(yiJRDHkdYL|K%TyIml0qlN3aIzcsDb zkTpGZ_ybQt!19e=z2SsJOc1y^Z!%Rd@a272=wKT8uGn;`hpFjX{Q&+h%W_ULM%Q`JNeEsh=j?e!D8NF#=#!U$n4%f*Lo4w}pHA)l6>IzR!5& z@Co3_Z$rf!j4;d73c4C&xH~5~awmUZhkHJS3p3`+2yI$!!=(-hs8Vqj-Zg9k_xg9V zYlZ~(rukIv6~pzyRQo{g!ryV!Li;(WW$AL={(5ky<_Iy^lPi2tsK-^?q|L4K+%3e{ zKIp1yEBx~J7;>Y>3Fmg~#bjO$)TUSovn>}0%VtFj8x<|FUBwt@$?xEbpUl9|Z4SaB z%lX1RKmTG=iX`{XZB_2KW!5e+5vJd~3V)Wr0>4^u zu6glJjD0MDTRqIV$_{^-&h@~2+c6GDZGADyOojWs-$VHJus_$MQ$^U_be5*|iNYQ3 zL0aBZ#_QZy$}9G&;B}AYbDU}lIllA|C*tr?jyPM+@sX+Gz5I~L5wEG_B>y_ZvllPs zlzk}Y`N@}YN<6AKX%>0B2ajENnro_f@6<|pG0_z~Wz{N9Xw%!~rt-1%L(6#U50>%P zDJF5$>Z&+@eD-nl=T~xK6-s$CjVd`&$J=N_-U~tNuq9pg-+snM(*j-7Z*f%3WoUF2 zLmXGw!C;Ub%4yd@-n|TgOw}=PU6F>|Gq$YNrTwU{`+?eo>?e|;j#NqeG}FO3$`r>M z;==R=G;qRZ#$QStb06E|z$FDx)LIL6nwI2(n#mVi^FpK`d=j`C;CE0Lm+Z`x1xAvJ<(A9|hJ9p(0ho*L_b9foE`r#G4 zw=j^IwX2qX9Mk+hE?5lq-i~BOff?#_Wuu^aJ9sgZX_re98kgvSs8s>|xu=sV=xM?^ zvvfH2^cxWy+sjiO{78I=A7o7*`#xHCj~;nBM6J#&L&MRd{LrF6h&(WlEPA|;COI6y z^z~J+5fu?3xB4e>YFLBuRsm% z>Pn;Stt~i9*PYxj8`H=72s7c9Jn|rvi5kBeIy1MBkOPNl@Uwj+&1(m-t@=$*bo8)w z?th4n@)G#FkBb9)$05jNv*E7K$;F)WwEV(Tl72-FvPB;=lCNDDJ)d-BWX0jKWhy*Q zSAl`qh7i5Pn610YMRS`qQ52ogp6H>10rMkQxVS&{xaSalwn- z)PI#bsynqIZ&?s&zLP{IZ2d_MpKIU}DRo@E8KaraWN!Om;#0+EIOPk;os1cTDTspUHa}Sp zyF#LokWP*y9>kTo!$j=%ezWhl%m}~uBo$sa20>3S>=RqgPCPgOh08>#-)b9B4thj0 zpUJXQXH%HBVT8W?84nJX8{vxDT8unhOCs-2B_XQ1aO(I}`oU~7$|tyzoH2b?EOQ$y zNONQYgSsH$?MAXSCy~B;f0iv!eM0=a=i-t<9XxH8M`sj2qZ$V1iEXzrr6 z@EZdxVD!K{Z!@))%47BRF>rih1OEJY5N_Q+2t~%vSj(rkDChfq`oJNSO11=(m7_{D z_0w!vIO7mwq}GPyLx*5Ae+t|>8c(G+J*UQdIc$%fB3z`2RA?1U>(^J3xQL06*=CQK z4S6JA_B(%G`!G4;t4MWkzM!Q$jL4#q7|fG%fj`CPsdfbmi|xy>>7X?+h&}+?^M=W} zQA6nF4Zyt^eR!HAiun~DG}h0A%9t-8K3|=oqB5WT_a%c&YST6Q^yG}-y81=pk(?_K z(|u1wy6vf;znKY?I>ZiZ??cz!vrvI+0Rdkd$>2%_nEL294c?N1^PRnE&z{RP*D@cR zCYF;XpGRbQ>UsLIH^eOJ+A0E4b{MtuIPG1`AoH%5F0W94t>MKWFIS5PE9>d5%GIR# ziUAm3wx(@?3t-ZM3~YMcL9B|C+2jX~Oq{3({0UiWX6IQ+5A`&W4yDa>!`>}q>dJWX zeS6(lZe58nqMM0P*E72LS~03VRKb^bZ_}->7trSxxwPi+M5H7W!y@`=ZvSjp)T9B^ zuU%nZoaiRG4{yTMVOgA8_<-)3yB{xfCW23WIb6A{hYO8%jIbk((J68oPCSWQo% z-N6J+la~|aE=d?{K0*?&w+kL^J4DwlSB9F&8}U-@GsdJx3{zKRgZth>rX(T5w7u3D ze7`l(<2TZv|Lr7HoS%SqKi0tUj)`cx;|A0E^%MC$Qbn#jdW^jTj>yh*f}5|$oo`P_IL= zKQil4a+rZ?(NA#@V20b!90QvhHu|oDaRE+n<*}1;Brpq3eB8ziRPNjjz z$MW*13VfHEfvk!s_qby=)*lvynvA)yu3;INzOG?iTZic3sXrj+#Sst&HjxR{18kqo z7e*!X6qGEkg<$Z-JKg(LeV2gY%y}GcAj=o(D0ly9L{ApuMK6ei8*|pHWOT=itQx_3$vtsOCWnfsOEKW7L zBEYxf@kv2C8g1=|Z&OY}OGrLj`1J%{{CdN5Z>9)7d}0avYZSO63Q1(|7*{qHXo7{D z6#n)bhyFq0f{Vjtg1qww=HD#k=r7)ab55$G#cmTQoqhyk6ypWg+NRP@ zjwVEU$iUa0CgwqGAFXh1z(=Ac?1_bU;bwrrm^K*-Tl3_omMX!^(h_KS*_ZBXjDxuj zJ4v{GDt62|2#UVHsdSMMEU1%Ya;uGTO~eK~<|bf5KbEsiI-#bqK50 z2KGk~$$v2e71o`mrV){tE|zcBAC-w07Hy;#^V}d)ejSdRT|&-mQ^8@a9n>kV38pz| zU{kyq)c$)wQw;~{lSL}fV%N#Ud$zIXU-#1a-rf8!OJ&&4zieQ-`W$?xv7dCgx(k~3 zY=n-kC(J5vg=St3qxxPRwmZyX7mw>>VkT&jymDXGbj2gS=ujJxx%rL^Jn+XGzk2B= z5odaGx;^Fv9b%I@>&e|TK12=c!(DfGqViK61|p?k>gF`OA(KdJglf#xd5?(+2jF9s zJN(>x2&PE}vrz|c(&G~=Nr(O>JaTP>k>YA##79rGvO7dRU(yEpqyols`=+tS|)j*bc-~O z4F9I-XGLM@EH!A%&Z4Q2LV8^+5a#p)evD26`^jBIw!j-T?)Ss}i`8Vmv=^*Um4lWL0{5V?f0E}IT$eq>BqSW7>K3P&(`)#& zxMe;`2~WnH_MgPEHk<5xQx3ePd#T=ue?+7C3(f!Aj+67J5Uns@6*WXNr5p76g>$5@{|J45(VoZzY{9SH2^e6~i$bx@@Ig(9!u?1XI2;b0 zYfWi|$9IxaI}1l9%fpj2E&8E;2Xxm|!rCQYnV`#*e*6&+&Nli#tG;QZ4D|B z;DgUcTG*fiYvIKERRFbnLFE24FgSC8lpe_--@`R%X4EvS_*#t#i|-KK;-{>k&s`KzY(HgD(Sz!Had03yZ9HLBqkP+J}g5UGx zvHRV6`p~@>-%DI1&Fyb_9_iv;ik!Titub%qnD#PcBPxMZuGqdh#A$+&G zNW)un@Z>gEIC;MjzHVL(!VAjW$Es=6J@yxK&1;ww!i@PC%Fjd8v=?M$!?-a`{~FaW zlVc(V`Jf&AnAYvcr0YKg(qBdSu%dY<2D(iE{*G^KSr}mR!vip*Zoo`pLn8k3t0&(w zHc}uxozv~FUrB$bj^@05tn+SQPGazANMjK$QB@ANG#X1|Y) z!@MXD)c%x4e{Vg=>UbrBLCsh8@C_AU`C%wObO;1RpRmhv5}USJftj&O9*3&a;L6yZ zv!<%lOnO=t%0IAzSie{}_9`EG43ofhOC<{9lbN1d4-#<|Q6?KRNo zvw~^3918XJtLcMnRqU&h7iK;76}(MP4)AUcRdLkJOL<;trM%ihMLd7$N}hRh2`_PJ zDQ|W`C8yrHg5&?Aio^UZ<$WfVoQ=AroVT47oHuv#dDq7LJ;}zUysxe$JoC;X-o~?a zye$30oR}?zyv>=1c};2sycWqy&c(87o>$Kij)iS0Peb7lFX>-3&(JbSu=|}7uX|d& z>FTgI{F&|(1iJSY^Cv%>CeXfgo_~G83x0jlC(|mLX7=mH9@ExYjNsb^8MD*fPfT%@ zky+V?eZ1yl0#obrgZ!$?C44O};IC+!#BW!*YN{Ifm^VJ^6#r*V9?yJ0&1{d-27cay z`KH#zL;SLte|avM?tDGla?XveE;43U6A7IX9;)oWGeUpNeJ3?`ynRP_(6*qMF{hrG kcj)-Jj_ZfUYM`g=*s1U^zo3x7FyH_4+llW9m59*)f66*xxBvhE literal 19590 zcmZU)c|29`*Z+@_Sy84)h76I)km>Ae8A?Kfln5ot6s3boG?2NFu`)*ql_r_a+1H|y zl1j5?5e53Iw#|$P3k&g|xj8B% zEOO@mjFMa&KL0<%CAnj#$8$v^Jw*k7h>Y@!3gC|0!4(S@oK{@lQ}#b+nH9L!*LarT zni~VcxDsnOZr-qI+sw7$5dkwdYz~VG@rw{#L((@QU~Txu$f$_TYojE&lK(SGa^t%H zIm(rC5lZ4p&lUFP%A{~*-MHiRU4&i6xM(DEC;acwby3?k1xRw`{$qNe8CTv#$VE7X zJJF3hN#A9Rr?9{jhO4l6@nYw{g5Q5OE<#aUMgKJWIKjavrHi0+oU~~AuC{sqaaqew ztyUPT7TfZ52gU1AzBpmUTUh*cxsB*!otB3`RBYBfUT9M|4s3Q?7J`%VdYid{J3!HY zQ_Dx*N*mq+@sSpBB6c>z4w>ZJQ$3q*Eqj~M zs&(jFdlj2*1-3|z*$V5H+@=qIWs@)Cf;VMOOmHj=X z|4+pDhxkQuRe~jh|Kt40!2-{#x^dP1@w`S7cS`W2w7GGmW2-IuJMUSy?s08isH4V- zJ#E3M(S2fdHC%_|QhlAL)9}Q4R<$U9a>jHHUnhdMR^Hcoy^#UGQSt%r`E5;(Scfc! z`$3nJSmtE?eocq<$m=lcC3CuYgCSFRH%`g&n>vnK#=1pXkB?U7_XP!64VMY=XAbVO zo`X%iOk+X1B}vMqUDzvHNi!{&1*3&9EU(`pp1*~-w~k_Bp_k3_NXM21x|>?e zc5!UFSKX&;7o39fcE^_Ow|3dg2w8(aQ9?SJ{gjr^61Dh4C7X1o=AlUU8?r@G3!8(CY^EyS#82MFIH2m#QZqWQqg~!;wt_sa|Lrd)UXE5tU6_qkxY~w91Zu36uC4I2# zG_0K2jq9(xhMlL&*m02?F{o`36jocbXdj<#bEn(g=G3Mro1mICxM^QhOIDwz&Ft%X zEvKu(Ta4OeZQdN!woxkgw$a~LN?w|-Zy|CA=xTV4^L|v~?I#FC*e^2@U$A|w3K)ruGsQ-@#r@L_t{x<*({}X_}-Fckp<2v~E6R&f^H>Pt+ zEwVUwX4djY{buv8TyNx~{$>6#r{(;svx4|G0SOrPc?BawxsjdrdDvCW(jlD*A$-kL)=D&u|`p59uZd~L482(SrH2Ej=geC|H-TKqX8Xc0t zk*Okh@xgN%GEEob{}NO^a}*MkD@cc;Ha;mGAhiqnspD`cIO-2Dp_dz}Mbta8Jb5pF z?34nib+rIa`6GJwUK>droC2la^3g72H_@suh09I5Aio<-f0 zSEzH`5S6Sa(DVjA9TQZ8Hv2x%DF^n_VzougRpA7*UOX2*zSP9a2}j7D=0_xB*cf8{ ztuW_n7TGbYlm^WUfErULd=t`5a<-qRm$sgxO7G_2Q=@HIH2jQp-29AY$ym`WcPZli z#Q_xl>_aE>E9BwPJ8*Wr3*DZ!leqdBAx~xx%A9!(da*KW&dPVpWzR`qMqbh%_A6+| zW|nqOON4tnTk*i}K1N9+1^NRVX~6yc)RNtUhy0DOJV%Qf-<^f+4g!&g{fAw!Y?~QR?&3+@`aYd}v=^gh<{k8IQ4)=AF@elN87QP}Y^PKb zz4FF}47weHS<3?m+g5<#?^>x`ZXPiR^~0v2Q*cKk2|l_{flvhz?2VU%RbTcAW{XK| z!>!NsTzn7?-Pnh@Mu*@)@+b0^GZXV-)}zO`?PT2fVlqE&ANjme7>v*9z!dtF#2W-* zW#UBoXiXjspSGWdC0%9K-r0fUcg>***(Hp=r8fCF=_hHb-UeeU)&r_cA;O}TaC2-X zo!J?T%KB3vs$RShM14gDO$UHSC%Gk19Es7RArzq3uhtYEcl0$s4zhuxK%3va#E z!8tOW2rt$|@7WrlzI`EFz4M+9&!{4Mckd?oZd&;H{0nB)sVn4%Z5-*!^ryUgLO5x0 zGP9{Y27f1qpndvRswy@Q>cuAFomv~U4 zi`ysd#SQCSF?qoOI1)3PGu~|~zBI~!2}&s>)_DiIndo6C^NtyblfW)Z9*pr)2GbRL zY4XMeXzjj{ep0I>QqT%1Wn;lVYydp>AB2|muj#&3N*J4L42rybxP5pz9iHAn^`dhz zz2guBT;kDJx5t6ro5w_Lyckq2c|<1u5hmk~3t>=%HE1&|tufdP!m62QKU&W>+AWBs zxN@*F{zeM?#HgjuO1!}_#QtzGc>Lx)`#!}U2K$n6?8g~Q&hth*E1Q8S{&}?4V+Z6a z93#IpUs|t{{Yn138AFVgYtm2G*D?y3ZA56hAbnT;pf&xgQI5?8Wv@JPVT}W7D;|J9 zb*;p_u!Bi0GY5vK(^9(&@N16-R-S32JRu2Uzda39K1KnAEXCYfF*MXL!8e*;=_ijK zqA^AYH@sK|0hconRQDq>9uFPE_cMmT0F_>pgm=sXIr8=^IjQASp`^Q+D$jXM>Ym!-Gu#X>W*?gdiVntYvPGQH6WrtKg|`Avr0e2>sD1s57g9TzJet;GQ@b8>EFo zzq0t5&%tY{JaW-&1DSV1n!L^5MpY)=M?FPzQrxZz0d{p{$u4JNDH{k~ zOgv`YjE1D&6+jy6sa;zsc+L!khVp$-mUIt?r+J`p1dkf~$yg6P2M_a0v~`eH_tD`U^JuFXgUM_k{c@VXp?%)uhIutD>3>3# z*Gl1@l42;^|Bn6~TE(WtX+iTFXBxhu3G&b#i?*$R8;z6T@0{zbcPu*&Ha>D0ZzL`++Xg*x>#$2TAP>tDmsLvrNawJ|^sh*OCT z`^X$vfr|G%n7p9`ocqoRFN_HMA;E=)Ipt*B^mx4W(-%FAQ#q0s2k294MlhewVs=E0 zFu4)4>8mb7a?je?I=p^A`}lV{M0Oa0{kw&Dcbpw)gCy9h$>DEbJ?Jwp;4fo8vI9XM zh{tdY?#g;h6q`-xfp8ZT8F)(VPR#|y>b$9VZ5qE9o##1v&RrlslFP zF}A*V#5E5RRz}0{!!qjo!5WtCRfe7~F(g$@68*?m`to5S-m#lQ-y41-am$Yrm1Eo3 zAA;B$I)9GsV!c5nQyFf0Zf35ZbwrQ(4w(4h2=+M?k`$+(Bx-mZjtc9MOzVR%^YT~f zepU*4M=P1(--z&J9k5nNNVAkk1@k| zjnnMzfJaQB_c>;csW(WynM7x;FJ=2BqhZUS1|%*QgA;XOWO76@4Adlp`zMyZyKDus zeNWIeYSoxlp^gFP9P#=_D-hb`i(`gHNS;F{U8sMB?nO7&$Bf`UsRP(hVh0!htOGga zYUpzChky(E_&Q_*ejIa;&Rx_B!B3wsk*WjKb>0W6c3zHNmY+{{UEIOweh`Q7Q@$`6 z$Kb;mc6jto9I^H^0H$#U9DcA6E|^3wFU+= z(&%I71_u?CAmzRe1gg4n8iS&AS$d%hyZd4e-aaMqkP8y=u_oS47^$@#($QrF5I(W_+SEkxkrEMvRwa(`A8U z@LTf?l-4{1$@lLujcY&B)x(V>F5Q@LmZ+iY?OYtTSqc7~Eb*W7kR2UyW$(+}!Rv1i z!{;=2*r?<{ePg7^B9jETG$9l&i0i;w=S0Z3ZjUt*3#s>MBkYy(g@D3isH7@|k1!c$ zztF>xm`_AKR}?+I?Z@l<2*Smm^tMtRRr;Ml-ug|(@6)YF+$$-ZJh&FM921~Y*%Zs9 z)UY9ZDHKW6pw~%poFcS|`Wqj?U%#5j*Nk>JlVe8Q%6dpw*9Ep~r64!v7{QNO6k>j_ z$KA`P;urtpw5xTHY-!mFFD35KS)X5{k@0ch>RlsFIR{C_+7tAz?LL}Nnulj?R^uf7 zD4&A8GZ+d}9@yYw>`r za=J%KE_|b3#D|E);Z*q6evlp`K9Fd(fNY(XjP8yrNOedOT!{lpqLXM`zc$9*)xyoA zvLO7Cg~V^AgunPRITITKgOb(cXV(B@X!05IMO$FttQwtcYfMBB>?ivbQqj|}hVsug z(Bjo2RKko$t?S|;d%{@KQ`SQduJS^=ldkBjXi9H4>?S5T(+PL(YZ`P5K|16FE&gy0 zbjJ)cnT09fvug=72AqQTlFj_J=Bq(|i7^)TiXx-85#O1HkP+c6^n>VRGU2N*d;gdu z-nlwcEY6in>A4{h6!XElkVII~m5@9$cQ^DYvE7d&un;!gqj+U!= zG8!9NnD1q}_(*>Y25(UyKFiKAFJ$`3+j?D+dv6lCe(gAUA?FQ0)^b4f(-p{Cx(fN* zH6T^A6Rez$qju3-QgZbh*>`U;-KcvLFTEHGe#(0II^ZpxZkJ0ka^5k=W7{z5=Ve;@ zZ~@NT|AH)`2`C<(Pwz12$cr2LkYYYSf4jNR_O6Ze-1ezh6O)TVix=bXky2W=;|Vo# zUkqojcQ9uKbFIO1DS_XcNUNkj?%i^T%F0`@FZ81^rEVVXkc)w%wRdTom=qqKT|-if z7UPP|gK%fz0{RX4IMgbN+1qW=NuwIv&rc@jK9<7VIdW)l=?~eK{*3+c*NC{vu7$NS zn|L)Y8)<5-3QXgf;B8@1sH*CQ`$u98$%%n-;*ngRPV&(TI~D&%`Z866kqf z1xAEV(~UE%Kz6MzI$kcot!Cq)ewH*|e7=WY=a|OK+G9w1|7@iz1`Ki1*PCecMh%_+ zY#{w_l)-FDBB#=}%Id0f3N$C*reOxlVfpzz^i{J13_nW&zuFvJUlvX(EV^lTbq-O? zXe3c4+4P#rWd7qPZ|U=ielUjbfkN$de3Z|_hz4cgnnrQW{x5Yine`v&G3Q@)VEzv~ zuyEsA{x9^f`VaIl_^r)XOE=|f43=8EPg}tI?mm`(29!9iPPW!%!Cc;pNL$W?wHiG6 zi4*xhjxOR2@ojj~^nbLR7n zl{j}ZE?K+GHsF`aow442ZqPdFjw=7ci5Q-$m>lOycmt1X9mVDRztru&=!5y!eDaU^ z;KsH2FZ!_k7k%UoIN+=7cjQg`Lqg_0CkgXJz@o~B{Pq&S-by<%+~IHiC(jKtL)*v- zle6Ib;*)DoCZCD(i`9Ng55x ziX)!I59!{=lZe_XaeSybOea+v!AtoSP@b%XrQP~Owd5Yey%EJFclTo2z&vQ{OoUfC z9#mICgseP19#pppp?#bz{P8ve9pVC`Ng>2(*#+7jkwf}9W+1Ywkkx(slS$|ig-cT( zQxTi_C|h`f8Q$5!98F$?8lzSatKCIjui8qr90M`PeLwW4+T((+k7$C>VfyT-3H=%K z7*8cXgq=4;91vBr-VEZ!zHaSZ% zk##(JR?`s;xtT=ox-NdWZBD|r^T>L$P^hfvqzxgu$hJwtPMzCStVa=E(mZP8F3g(s zj8d~Jf0+s6^N8M_E*Sl81fhjjnQ+%>pcY+4S5@Dj5*|im-k%)ch8zHPt~lIz+0Qu_ zWlQxhH`7mFU$Cx4uSt4$KgUyc9i3Gghwkfsa`Iyg>4)8UbkT&jOngN+Ej!pnJ$7>G zp+Xt{%A+}S=vx_`qHcm)O{Qa}-3S{Hs}Gkq%z>*H=i|s%9rzoYj_aH9@y^L1VtXuu ztr3)i<3H=N#hLRd?}#oQ?An1v_bZwF2j58RvMwU|aR`Hs?q!lN1^;}o!~;@U)c=kr zE2O>+Ht$}DYx=?%6ZKH$ZjUwf9#w^Ksh7l$nxNHcam*`o5FoML{8wL|($w~R3TBdw z^eGESSm%e`HE(I#?!ElZ>_vD$PXjOXDu6zxp4J_DOhkTVf&c4ERDH4%=)Vdj2kmbP zN|TTD?*KAkSCnCkq#w1&oetSf*HK+W3q*^J@z&q-xUBCKyj{5l%D;xgqVFB_^tvY=gL=V+59^RoC-ER$} zo_3hgv7dJOE2He2pJd=e0<(c9M^4}I0JX$K*x9>-UO(}X=pD|X8l9Z28rUb;8-$zkxQkYaZz>RB(kGc zmnpbV2<5zCc4Wgab!~KEUKP)#T@sGqerSmPe6K{^zRQvo+J6{##bE4cHDPBwO2cH! zAaE-=OZ{KXN8dYZtjjw-(ZUM`q|Rv@jZ8d8a)kBqujyF0N)OL@od2^3WcASlThk4X5d=HdZi-P!Z zQ5-+d8+Q0!Bl%K`aFbFrR5>{k7X=ZBJ9v=Jx$H!wUcKhI_C29LTc@K$r#*9$bBug# zACDC)8_9%yl~niQFr7cTlx*Y{AolLYw@0KI$DJR@<`vq6ygNf8RZf$2MaS5w{&Vnf z^>4O7Y8vhheGmSg&&etEW0B zDW;(gdw2%idt@~37ISv!0g;~GMw8t$a9p?s4eW~`I|eiGrtDZ|yqK8bXx z`gi&uVksQkXoKS_$HVLc1k+x{H4g?*G3_^+M&{y)*S8qyj6lx5 z!!uB2%`B43IRp=Fjqo^0#V_h%oC3dh^yb02*ePh#olQSUl~3nmr@0cCn`eQn;u@l{ zY$;JFmO)MXe4?0OLv~MDLgUxUqHc8$iYcmK@E;!gzG*WC{;8mb`bMPKLj!D&jECEy zZy|J~iy2thMBcTlQ)}6ypeWW!>wfQ~4^jqU{)jxjEzyKcVYM`zI{^-lsUY(X-GbtU z739{vOJq^WdPaPk6YbMvpbb1}_pf7g<<2-9;T?rQ&;3|j5CHjQk@!7tDm+!v!4n@=dUyB>+#oV#Gxk!{o|vg&SsiJKXmotliFMEH30E)TX6hj4z@^xe5@w%)s=^l8*8kQO2R1G0A zwZ;m=I~>5ezJ&Te(8P72SLnxBfLjvPGGZsU%m(l}V9%z+hlbQt@JgW&`=_yegSash7?0X}~o$5Rq{3i!5&OMB~r#z;U z`e)Nce({iQ8H;~7b*xc}F`Qp%OqRbXVjf6$kv8RQR2lk99$7cC)*rQDjpI194c3Lm z(Ob0Zu_?n=6_Ab9R?P1MTWP|IIM!aDCB;WilAzc?+$p{vZkL+lp;Pn0Vx2bZ)W6MV zi-o|sWHKuMY=+lgWMFXZddQ0QA;lxp!GEhSjJ(glrA>3N@atvhoTdSWDZTXDrW!hJ zZagN~s6eD-DqN4=4Z`*woL`?(*sKMSVA)hl`@;>Pcg1tC>)8xvmyCh%4bRAv@8Ohe zP{)Wx%8)R3HB?%p(8Y6g$etiGHs&f|_lP#0oo@l#e&1pfEtWCi8}*6!{(kb&ClW>i z4^W3S`(as}2_Adc2Fi1L={?mU=Jz2Ud+|vW&YgdrIG-ti4xvRbKE zaWdRoe+T9aZe_RC2~Y4P}-PBZH~dnYFHLy(O;0b4zn5&3F)&|I8G zl8c_vyNAta>BLfURCgJC=IeoC_)569wG|8XSvoOjEUxd6Z}IBeK_ynofsdt|0O7|# z7S9X+1h!yr(;ISV!yRg5(o0=(0;%rH0{HIM1^dRGfgclPh?9~CoQx2KWXY*GqePZ) zIty`8TPU~+7B(>#_R|dcbYl8=626PO0iI8@Nygm0P|XcN^micGQ;PWHJ2As8JL|Cj zK_EWjr=!t!Er{#0q$7$O=|Fr4kzJNYUzBm^^ZCB8JTw*Z;uo;DgwBxG^T&zmdqrOKa*_5}vRJrIZzF*wTaPT23@!bW^iX5SCn+a4%s9}oyI9Ox9 z8GZ-!u&2MZlL6nO(8VTV{A>%NV2}i}_Y>4Nt|J*Lx5?^TRv?+}3*%-{R%*jadjHlb z;uOe)^|L8{Y@Ekf1Ni@%Fq( zSnzTQS@e)%m*F%<-9xE_55M9(IMZ-jvRt zTZ>$~D6akgtNQp~05bPqf-vtNg5coBo&Uc8#PL4> zjzv==hr1w(>-2wBAEy7RKAitG+~ps`7rJp5{TF>K{uh17=e@&1t9If##|0;9uw-#% zD&NYm0-nTmk&9O+V)WhpuwefHy^)d(8eX?Z?`;>XZf0oQP!`!2oJ*GVdSk5f82U}8 zi}=JSAV=stbuzNWeOJrjn_@Vc{eDPJx||?smI*wfWlsV>29e2*arAUYB)--8PQuF6 zNzvLe6q^~+u3*UyzXLrd-*7xEYXU`q6yqt9zqKrt)~xf>B82Sw`8jCI+)@9 zop?4Hp+uUX6W6nhq*wkTn4}BYUvy!*!DKR=WJ$fY`VgavGHhp~pdg;W@@WgGleHgn zjI;eg`l5@d zo#uLSUhfkrm)(OR;b*A!t4p}L{}c&*VM=QMs)1^bpvzWLPs_JTGD3GQk#ELz@H*cd zY9w-qRhAzqDf$J*Ymc(K2KPdrmL*>QJq=$k8l{JHEzxe>De|j45NCHyMa%4yq;6m; z$(KBgmO(Ng=2wqP5=~I7ldxO2Pi*ODitxB}JCv_qh3;;;#Ccm5*x+>%zEtoEGY8R3 zNEE$1ZQ;Jc6FR@e3_1)1-3Z-E;H@ekY3J(bnE9H(-mPF>?0<&2BPS_LNTR(=9%k{5 z;E(&|?Bgey;B9{#Lwn4Lb6+(cvvP*sg_LPYy2!!@9vS>o0mh~$$lQs^ApUabYwP+J4)I`jaMLM9ww#$Eh}q`@liVUSrSZwn#4G!7~!|Im-r1i=p zkk>7x>JppiwMW*lyx)+KG?T*mmmHk%`Y@TEtBuDVtYqhYUkOb|)IoY<8iolvSoord zY|hcamK()XXj(aSa-Rl6d%baQ#USY)pM#}cR!q>1Bap(xLGQcmWc-)Y)H|k}a`w&z z=My}V8Dm85zpBIotDPDBZPCn{y+!omZD;Va`aqOK`C-X&#;bDTF-yae&J7*$2zk_N=^%GG$Hw^OHK-aX7(09q3h@#s-svdv))R_I;${UymCwnvP9Pye@wW=LUXGan25ij`$fILU(t}BXrD&B8~%`4#-cDV z9K`q}tFY$B&)_VA!wY;X5ws|z_V6>Vy|zZhLd9GU3m*MYD&c-K_Bw@!UFto+JxyN zK_q{i4rmwIfyLk5nA~DZ6=Rdgjho`oTLkc-vXY6rC<=$8*O3$xL1+6x9L#<)ft_Md zN*+fSk%jZNk;7xnVSCnUkdg?a4aKQ={^4Wt@YX)qW-UTz%GkpC>@w!f-Z}JTk^ocp zPK9ViOI#v77QaQOKor?a%{GNm-)|pi#;ive8kt7EI3~KXlXSB+BVc zriF`ptW8G>Nn`0nn#WHeUnH#P+Vu)>?S&lGn&Sh@FSOCGHoa8;qZJWzm=8+N*5Upq zmN4`@1}!X3!o#oxY7=yT7SwEKb5r~=Z={*t@ZL^sPI_XE(=xO)&LAS2Hc{3qo7k1k zL#_E+1geDuVl1o6myFs*URm&f_dAhOVZIVK`P`JMzKY zu81AA%_cpy9H^cDnfBcsizI0q_Jzx>zBs75jF5R2 z)%fu9ZH#+;0Ad{ZTl0=Jml3o6QVy6LU`$mzRU8dNW6GhE@a^OI_8_d@bC?y)A zfSp7jY1wC3vAdEczx>MYEMah1RRzvn>|@qlKLoYc%t1-il3Zu*F`LrjP)y1Rf=rK- zh3A|=p>Qr|-)(JTA$*r=?W|_+y{{$fydTq|YpQ(XC+g63S&jIp7}1UCVdSlZ6xn>_ zIe6QirB*+MX}f7Q*~k=NNplJn(NxFJE5w*H;tNPDYeRSyznCjk)9H@jI68k?80ptZ zg&W5g(Iw0$qTkU;8-MFU!DLVNgtP=sS5PGGC6nof)hWF7gOnCTWsv+$BJ|=ELX($$ zCvy&%5wk7N(C^DOFzl!Y>1~p5Lfrx-%Y(_=eSpJ#k{E4qhqSEvLXST6#Cic5T^@Om zxTU(F+0#5++xm&XP#x|6B2eGeWzi0qVzzbBDC@GW25z2?fvQJ>?p>q_M65MIlYMa{ zQQ|Lg-H-(Ok*|mvio)jO0xUUEn74njpln{|K#var+UHZcWUT^ZoOOq<{3B#s%UY_a zeV=si6b3EdUYNJQ4B{3|fJM@JBrq=M^a~5(;D}glO?mD~^j#Qba%^aT zwir?MVzE+2m$qHs3j^H=AYY+|VbA(lwZ=Yrc~vN!>JxM;yJg7XKYG|OV;@ZU8ciy9 ziQ|bvA0XlV8ynshsz#CIK%Y}n^UMluMQ4ZS3BU?qeIqe3+#z&MI26DxE4L5r?F0IHs~GVg4Z^GB+c@n zxF=;A?s6zV{TvH=Qz4P%maAa6hXEY3y-%i_&LZpD_mF+J*VDJ{69mii{d8gII5yg} zm=s=qMK7OBhm#uPsOi08T>aP>C!S6w*&chys@PkEQ(28sBQxM#odvF05=09gyR(iF z`nV!D9`11M;bN6E+-%#4)??HJ3aDB*Pys1FvV*(~AEJUD zIa&UCE}h-G7|*r;rU!aNKt$sgtaqpXyyxA`P%~ZecH&phqaJVrwN@7 z&)K+xc{tn=K`p|P;E{ z9PG(E4y!V}k*goYUi2QII}&2)F85M0IBo;>s}$qh5l7hB_<)X9e&kM;hy_MN)RwB!mUOYkPt+~se0e6USN+PMwkwgo-5Y}hfWYX1Wg8qF8 zu;$JedbxT7oENl&!_^MJ@d-#~{{F`9nRb~x2|mK?8_J&9w+|JV)wh@-!N@BoOY&93!%?HsN)pVUlm0LLA%2pv_7pqNXi`RSWiG`Sy151=XOo^8kFj zIUty$w1`Bi78sgtC8{3kXw?(}GcIbO-o-;SWWf(&Z+isZG0`Z&aR5|{grj5UKnYla z(NGCH#cngN!nlMEDcmOZu60c7d`l*Oo*ta_^~OD~oT;kU3fyTcQ1L%pgu>Sf@uqqR zIZ)_AryL8zzr&VTsCcy8d6)hw1;QK9>H=4&44>2g}^J%l{X8xc>)w z$cenQ?$X-8GtI8BmVG13&pzU4-Ky)sJJyqF{VK7{TDV$)Cp|pUY*(#dU0x{9_fzHY zWpDQI+%1pKK_e7JpVO+99m+LvFU@pXE@n#?C_wwPk_0Vnv`_ml1{ z=;Lon9iky(Zqzni4=1q?36=$6*4TGcDgP?{QuBidDWHmrlKD z2#Zf{qFLEmVE?#|gq+Nze9g^d=FcgQAXuQ+(_kC zGh%$X3-s?ca#epLqJ{x-mIb#sx_aVdmhEG`i}8FhmFD zgYU_wi59md{I3(>r3HyTnu;KV1>+LA&1@?$=k9q$hr zC&y8Io(l&#N727?I>6!}8Zyxm-F?fjeS+T(SwSefPL6@MqkQ6G|z8DQ_M6*yqpOl&{AB2FVNFkX-% zHy8b4r%pXX9>`bm#SdnX8QjN=^cNRKE=8a3zt{$49k0OW9W8W{;Pve+J&K9%ud%rg z*5lL%)x||;ZN+qGTb)i({tw+-m_^V;2KFVyLDd9tqTl|P zq^ntg*%n!7v)IQnul7>Wgb>JDHWPa14ABR|wU9D?CS7*;GS9MF2>-r4jD04HiE2YC zIX*>&ZrUY=$0kIQz#Dteec@uFcH}HbXXXaeX0Hu;3sMhm^thc+dc8n-?$0wkZs~UBX$VT_Ft0euV2?Wd5 z;ccT=lp7IB@BLjUSZxZPtT z+Knp`l*`q0_AV_fzH5p{cE+NZc`H#4@Wi0}pARk;l5JLpX~YRLlx#bQ4)!}~Tk&T`zPy2i3??yV9=R-j zoq{i#t}spCF5q0@+w7v2kEGL)L7XRp5f>MMdG*+q$1|71-=+sND!z*h&zpo9J;5Zl zc!*x;cBI|)#lZX&lnv(}k|{N1{M0YIVJ5eoCW=+kpb=gAb=@b%_--Rc%H?B5S_d#2 zib%rmD{RhHO6yD`1msbJTr{L9Z~Laaw+uyt!2Zc~2(8)fqgZeWsq=J-i5S=-(pzw-Zov zS^;LPZe~kUuFy^6BsjaM6wS0(WWVV?p)w1;lb?S5ba2NM*#9OO_2*}UaJ)Ev+p7oj zUhgJlHjM&NC{pb~Tf$Rmrz^^nnfWFc1ZdqD9^c8uqT=%~n01lm77vm&Z*`#hi5#tC z8X?$bCzy|a4$=F5uoA}4*jv&iVBV4r{N7E1`|-wtBeRH;)CAxJ_mH|j@r;^AJFQ-< zjP3g`@ogq1kkF@@I8QkM6OKyY*@+tj&y&HB=tX3XpvzDZ{1d`USCaRJ;pBDeXjGw5ccT3WZl ziKe$X@>lj~z~c+mP-!?sevAkSIwbDk_g5M@bP0KVB9wrNGuTc1j4z^_cts0N(#OM9 zM0&ypNO(9AZ(I7{N3UGs;_C*wKhrQbRubOia0py|L#ccM)`w-{+&4NXKFp&*VMoxX zNtP@WsM|Kb|3iv^ntxyue-2|g&`e@$j zQGReY7wUWW;HEMSGLf4G)5KQM&(q~_Q~ybFrdggAn+lfom2;`z@yXCIt^yX8t3%Wj zRY>%bhy6YcZ2dMr5HF9wYv9b})p1#8zfEX3;~35Q?!X@z5Cz*MYVgH}2e03y)0Z#Q z;gy&t1WY$Mi(U zL*jU|4YfB{5s!u{Ch0>VjPcoodk1IYHYpRj=DayhJ2nOD3`NoQY_8RG5ei_ioJOha z#p50u;jV%KM*NK7X!lRSbFCBkb2o9B%|`RV)+UQpx)Z^VFx^KJtq)P1J?H67VR0NS zJPUyW-0ZCIn$>aZq%yZV>At39tdSjMS7%#cXjVM7#!p7)1Y6vExtkVSX+njUA~udx zGP$Rl=<@l}=s(FwP_{S06oI&SaXpW*6vX1oyh%_h*MSd4_Tv5Uw`j500zkNrPIx9w zTCEQgdv9Rxd9Q?>DQj{3Wgq;#-U4%uFT`chv&hs9H6&?KHKp%YLT$x8R`0Y5tZ2$+ z#SWar2Tg8xai$Q|$TdTnkOpjD$HlE}sdV+nG-$GRB=eu~aP101Y`ZAHOt%B^%fa28 zOz$vU_U$j(vcrMor|p3PeldM~dn+n)&g1HIhu{{)U?*D-|z3e*0a9rdET|xen0#9ejR|<9z!F6 zOOPZ=gms^H;~`gXu zJy^TdQ`37%veo-{K=NP~xzm*hgKkoE-71?ECFR46kX7(#i#~D=Q;{V}e?r_0GiWNa zLiqbJ`pn=4JEg6Ho^G9oGkd2%Nue9=JFpmL>>sDIw-e??OEaYSYvR250&H@2GaZzk zKq7%RP8!$1zWu)=eY-@MX|e;oIvxqbB30Ov;|?y#YLE~SOp(PR&>8xf?YdGYi>}S4 z`3lRiW4$tvqxEQkt^+>3wG|nRCF2dfFA=*ULslM^PWvxLflurQ$iHL{iJu$=gPo~J z?@uI0FPU&OfPz_*9d;j#!kaZ>>0oFB2DA-x#`dUqO$=VUsGeSGKMeyOLi%{m5#-%0 zkQ+H%arsSGT9@@D8j}0yZB0J|ySfZk*YX~H5m<`Ef=n`hXDW0=DBv@5T~MlfE@YXN zf?Cf4{Bchr;*M3y){b4E^`@4%Ni&No9L_+7Zxqp$dva{ycZ_CV(!w@oA$Zhc9w|0) z!LGw9cv0hQ%%zvGued~6jL#{#G1!7S6d@60@k=@g#A0e zhduhALEp7&v~R!)hs(Ut%e_j#+&n-t3WBM<$QO@xNRWvr1r=89#@iqQ$Mq-U6I~;0 zr=&=BS??pAPIiL(y z;ZsnmMvE-<;v}SFZ-|qOUGP5hr7(Y)2R7R!0YAg*bkI)=-lScps&!&)>3RdC%ha)_ zu^-O)GJ;mQHX@yMJiSLNGi(FurYm!jBP#&k?4 zmwr$lhYiNW=w(A4JG8t5kzg-ma)h9SthxBcp1&y3`4~msTZ+9dyrKLjgcnbp z1=T(luxYvvyzab%n;v#DeYpv;-Fynxs3?$~4_yy2uglT0|D2_Di&oppc53e41!uAN{_lDjey_Q%CD!S{2BzBSy@ZirxH@DXv5qt-%QM z&I-+<^ohIULZQT1g;7jd%yig#FuD=GwFk+t?O4e%+xh?*KT`j=?t!xt@jX>0G(M=t zY_v%t3-gM3hmaor2lRXGL#u8+;hQc#Z`C@cRrFteV0s0E#!rnK3EtXt05#=e+my|flw@|uUqPWP&m3Z&i3}KBmB_w-n#O(Uuvu3br~F_d+JRx7CFUo#>w8ZjPH6yxMP*^qHp5J8HQw zF&FOZru}XJ@1lPGH3RYcDiIS^B3-zscQa5(;o)Cw^}p@G_V?;zCe+8ea62Z{E9~atCbTOX0T<*4$U}cbg)ABo6Zl3h@u~eisU+63eMZSp5z4ypCf4 diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata index 7c2f8bfeeb78c7ffcd7131dd9a8cca69c635a3bb..7eef2ef15bba26f49eb7e79079714b5c7015bddd 100644 GIT binary patch delta 117 zcmcb_xP@_oyh4hlr9q;lk*PtFQId&iN}`2{shOprp-HNxMUqi!qUA(;ZLfq^5nK!m z3?-SlsqrQGC5btOtnLm)-yTdlz`zi}2b4)I$SlrJNiBxTvU!U{@B)R46U$Ol;uDMG NQ!yn>0jd2(W^rA1mwN|K3%d9sCJvZaY}nq`Wqp@~tdN#aC%?Ti=95nK!m z3?-SlsqrQGC5btOtnLmud;ft1Blv(sYC&dkeoAUFRF=(~F@hH;T%1^zni8K_9G{Yz nTvEv18Nm${EK015FH0>d&dkp%.on_episode_end at 0x14c429f28>", - "on_episode_start": ".on_episode_start at 0x14c3f5d90>", - "on_episode_step": ".on_episode_step at 0x14c429ea0>", - "on_train_result": ".on_train_result at 0x14c44a048>" + "on_episode_end": null, + "on_episode_start": null, + "on_episode_step": null, + "on_postprocess_traj": null, + "on_sample_end": null, + "on_train_result": null }, - "clip_actions": true, + "clip_actions": false, "clip_param": 0.3, "clip_rewards": null, "collect_metrics_timeout": 180, "compress_observations": false, "custom_resources_per_worker": {}, - "eager": false, - "eager_tracing": false, "entropy_coeff": 0.0, "entropy_coeff_schedule": null, - "env": "MultiStraightRoad-v1", + "env": "MultiWaveAttenuationPOEnv-v0", "env_config": { - "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"control_range\": [\n 500,\n 2300\n ],\n \"headway_curriculum\": false,\n \"headway_curriculum_iters\": 100,\n \"headway_reward_gain\": 2.0,\n \"lead_obs\": true,\n \"local_reward\": true,\n \"look_back_length\": 3,\n \"max_accel\": 2.6,\n \"max_decel\": 4.5,\n \"max_num_agents\": 10,\n \"min_time_headway\": 2.0,\n \"mpg_reward\": false,\n \"mpj_reward\": false,\n \"penalize_accel\": true,\n \"penalize_stops\": true,\n \"reroute_on_exit\": true,\n \"sort_vehicles\": false,\n \"speed_curriculum\": true,\n \"speed_curriculum_iters\": 20,\n \"speed_reward_gain\": 1.0,\n \"target_velocity\": 6.0\n },\n \"clip_actions\": true,\n \"done_at_exit\": true,\n \"evaluate\": false,\n \"horizon\": 1000,\n \"sims_per_step\": 3,\n \"warmup_steps\": 500\n },\n \"env_name\": \"flow.envs.multiagent.i210.MultiStraightRoad\",\n \"exp_tag\": \"multiagent_highway\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"boundary_cell_length\": 300,\n \"ghost_speed_limit\": 6.0,\n \"lanes\": 1,\n \"length\": 2500,\n \"num_edges\": 2,\n \"speed_limit\": 30,\n \"use_ghost_edge\": true\n },\n \"inflows\": {\n \"_InFlows__flows\": [\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": 24.1,\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"idm_highway_inflow_0\",\n \"vehsPerHour\": 1993,\n \"vtype\": \"human\"\n },\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": 24.1,\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"rl_highway_inflow_1\",\n \"vehsPerHour\": 221,\n \"vtype\": \"rl\"\n }\n ]\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"flow.networks.highway.HighwayNetwork\",\n \"sim\": {\n \"color_by_speed\": false,\n \"disable_collisions\": false,\n \"emission_path\": null,\n \"force_color_update\": false,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": true,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.4,\n \"teleport_time\": -1,\n \"use_ballistic\": true\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"a\": 1.3,\n \"b\": 2.0,\n \"noise\": 0.3\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 0.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"SL2015\",\n \"lcAccelLat\": \"1.0\",\n \"lcAssertive\": \"1\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcLookaheadLeft\": \"2.0\",\n \"lcPushy\": \"0\",\n \"lcPushyGap\": \"0.6\",\n \"lcSpeedGain\": \"1.0\",\n \"lcSpeedGainRight\": \"1.0\",\n \"lcStrategic\": \"1.0\",\n \"lcSublane\": \"2.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"rl\"\n }\n ]\n}", + "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"max_accel\": 1,\n \"max_decel\": 1,\n \"ring_length\": [\n 230,\n 230\n ],\n \"target_velocity\": 4\n },\n \"clip_actions\": true,\n \"evaluate\": false,\n \"horizon\": 3000,\n \"sims_per_step\": 1,\n \"warmup_steps\": 750\n },\n \"env_name\": \"MultiWaveAttenuationPOEnv\",\n \"exp_tag\": \"lord_of_numrings1\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 20.0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"custom\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"lanes\": 1,\n \"length\": 230,\n \"num_rings\": 1,\n \"resolution\": 40,\n \"speed_limit\": 30\n },\n \"inflows\": {\n \"_InFlows__flows\": []\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"MultiRingNetwork\",\n \"sim\": {\n \"color_vehicles\": true,\n \"emission_path\": null,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": false,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.1,\n \"teleport_time\": -1\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"noise\": 0.2\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 21,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"human_0\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 1,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"rl_0\"\n }\n ]\n}", "run": "PPO" }, "evaluation_config": {}, "evaluation_interval": null, "evaluation_num_episodes": 10, - "gamma": 0.995, + "gamma": 0.999, "grad_clip": null, - "horizon": 1000, + "horizon": 3000, "ignore_worker_failures": false, "input": "sampler", "input_evaluation": [ @@ -34,31 +34,27 @@ "wis" ], "kl_coeff": 0.2, - "kl_target": 0.02, - "lambda": 0.97, + "kl_target": 0.01, + "lambda": 1.0, "local_tf_session_args": { "inter_op_parallelism_threads": 8, "intra_op_parallelism_threads": 8 }, - "log_level": "WARN", + "log_level": "INFO", "log_sys_usage": true, - "lr": 5e-05, + "lr": 1e-05, "lr_schedule": null, - "memory": 0, - "memory_per_worker": 0, "metrics_smoothing_episodes": 100, "min_iter_time_s": 0, "model": { "conv_activation": "relu", "conv_filters": null, - "custom_action_dist": null, "custom_model": null, "custom_options": {}, "custom_preprocessor": null, "dim": 84, "fcnet_activation": "tanh", "fcnet_hiddens": [ - 32, 32, 32 ], @@ -78,26 +74,24 @@ "multiagent": { "policies": { "av": [ - null, + "", "Box(3,)", "Box(1,)", {} ] }, - "policies_to_train": null, - "policy_mapping_fn": "" + "policies_to_train": [ + "av" + ], + "policy_mapping_fn": "tune.function(.policy_mapping_fn at 0x7fda132e6c80>)" }, - "no_done_at_end": false, - "no_eager_on_workers": false, "num_cpus_for_driver": 1, "num_cpus_per_worker": 1, "num_envs_per_worker": 1, "num_gpus": 0, "num_gpus_per_worker": 0, - "num_sgd_iter": 1, - "num_workers": 1, - "object_store_memory": 0, - "object_store_memory_per_worker": 0, + "num_sgd_iter": 30, + "num_workers": 2, "observation_filter": "NoFilter", "optimizer": {}, "output": null, @@ -116,7 +110,7 @@ "sgd_minibatch_size": 128, "shuffle_buffer_size": 0, "shuffle_sequences": true, - "simple_optimizer": false, + "simple_optimizer": true, "soft_horizon": false, "synchronize_filters": true, "tf_session_args": { @@ -132,7 +126,7 @@ "log_device_placement": false }, "timesteps_per_iteration": 0, - "train_batch_size": 1000, + "train_batch_size": 60000, "use_gae": true, "vf_clip_param": 10.0, "vf_loss_coeff": 1.0, diff --git a/tests/data/rllib_data/multi_agent/params.pkl b/tests/data/rllib_data/multi_agent/params.pkl index 83774e73e7f850e929d5da88d68d077099b99676..cd832aa1c3eb1713e608fef452dbe168746e4cfa 100644 GIT binary patch literal 17562 zcmeHvcUV(d*Ka~ckR~?7LM)(ku#R02&`3l;Y``%j2TnqoDKrraA^{GAO6+Z{C}8g$ zETAG-z>10rjvd6VsC;XmPy(Yf@7(wPai8aY;UVPgw$}cwve#aFlX*JJGCXP2YmUl} z)EA0HJh@n^N+Ase;s{KDN8th$sWXn{HBm(xa@Ybv7@HF*Q=!IYViAT*crq~;$7FK0 zRIXx?rY*{F2^5)V6yYK+l(dBs8Cxh3;F!7q^^!~3JP{_vWeNcrs2vnZ#4@==D(2ua z8LH;1SPHTOFG3`i;#jm;8i`A>a5hh%fKI@0Tg0#|R)#5L>;4O|P@v4{aBX`qn7lT0pz zY9k>#29x2l0Bwm$Se^dK8&21SR0Z-B(Tp2WyOBJ|AEgr=d zC_v6o6V4XMG^-jwg&KDk7Yzm(GM-R|Nx)KMa$Ew{8c;Amd4viHRfDJv8sI`P)xZ!! z9TR~bK^4Ontdci{9fgmU%W)Af0pR;h91C($^T)trB$%8Xf!YehQZ6PA$3zMt0#oMN z*oFtbg~vt$`6p3oViKkhaR?X(y1BVHY0eGjM&L4x%ah5ayf6h~36TdM)j$wQh9O{! z{@Q39=o~N;s{yVMyiWuhL8M6nkdrHaOM<)o4nJCqf2{RH=C;RBvpY$P3kASYv}3Ax)Ff96>AtC&Z>KPoxkCG{hsv zVZBi6KB{Sfo{WQw*ixPt2&)x!f>4en zuI|yw9vVv-A~8>fcy@7eYchw%$~i4cLRyZ=;IJX~0fxlUAnRYPfoLdD^G4SHt{K&| zak+GJ8rYItBTtB|7e&<ndUW{r}8W{=eWUrGgf2 zl5&_KZ6JZ7Emc4=O54wGBIH)IVF#`vt$(@qc8qdSL6QdL4Ahw*l-g21qXZU8(WH>( znn@j$;R#of23%PBzzuL!q?SOc@*Yw3`Jl3rs3qGqq!>`l3!4dC3K<6&v!g~a+O{-g z{bVX;8`4~(4i+1AE{8Ni&D1$MXr)COpyvpQ0v>2&J8J6#4^(18-AcuztIJWN7UeXu zOGDlXZM76qNC#@x18B|Tg~N`HD+P^~HjM`uwrCG@%o~!Au&w71>Kq&M0%@ksKx!2F z&U`_(Q9q+qSw)(|7EZ*01-=;pNNTAwnQU3Chy!?Ah{I$EH)`LY&Nu?#EU_1rd<14u zJ30VOD-P8<3MUrz~Vj%1AtH^{LEBj{1YMp;0z8rWYN1OZVN8`^ z%|?JS!=x5Z28cz&J5E|?rm?Bkh`n|V$s;Tba!4pWhcdlDNe||XNPy9)KS2c7fEXa8 z5UNwpq&C0@Y>4n^1SC+gY-o*Ao1k?tCp6%A$n5KoZD0!wiMI$OS(Lapkcb654iB>C zAXhDR6clxV)UXprAXacCJWiwl*Ziy^JA;2AV!0@^#blAN;g+L4v4Aa;sWQ=cVG2-g zNEJXDT^mg;A;!bQ|Js7=-aI@tYnhWoB1XSW zG>n-LQ$lB7UJOs9F?J$p9t25NoQ2DgIyF|4jOGy=80N-8Zj{GC>fXq>si(TEY2KNt zMF?Vj4C9J97^Y&$l2!hwqz#6FM}cWUA!*t?HpJ`#nMx-2NBxNO#`eS#V(GssOlr~H zj#xtWY$nngajY@727U?yw+~i=sfZ}j1SygL3kR2n{09;>l$v5fl$t_nKvf~|qR6(O z$CM6WjS67k0WLETX+sjs*^a5lq%rlGX3TaT5~aS>LQYfaC(^>{@!G%B=V&XnrM;Be zN?JTUUMpTZQHM@T)=Ab)riE*B>0GUa26Ob}biP(&z0y!=tTc$%Q?^mk<8|V7m3m4e zu6DW!ou<^13Y1Ol&^TOL08M4bBKF!bb$|mMrY>;cqNGVJU^ESSN;;PoX&`0rU<`~V zf{~S4iSUc5M_?tX$o3c>gWR!tZNq`vXI=l%~s$vj9LX`BN_}F+=R@jQLCCOTNA_@ zSp#o^&az|_skU%p#f7wA!!vOjTp3b6v>L;)qS22y}fSi!S zfs@?`fjA5fjgVtVAyQ>12BZdpn31(eMO+21WC9C-3{F!qbj}2t4J#RoMI=IqgUjKZ zsDUEbdsC4RxlgnRKv5SsLyqeU)q0W|7|&h+pwk<<9#Qux~NqXRAi z4BQT)Z#LMCE@fOQQm=tNsg3B6CZ1!xMo*p)hNLwH88Q3S)*go>i3iPmrb30j&1 zYv2Re5w=Yr(Lv4WOl?@{y1`0kMgc_Q(-O4;XlNacrzHYx)XE5IPaGo+nj#j$HkooC z;KXd18$9&TDq##OgmKd`Kk~9aM}hB(6d-~6lc}MGAu3jji4(S(vv&MXX5B=cQWA8L zflD2nHS2{oOj#S2_jVqPrYS=(k*Ez*21{2j_2{W_+7z4o(ZDizzkI@$)S)yTrhkGe zWp2}$eG|cdm_2_ujMr>{lrDf*g$;Vs2!Bxn#KOM@q|Yg z9k>0(S^_*Qn>SMQ=mbbj~gOgSu}^ z-C(uw1!y46DUbS3ru?_lLjZL9MGH8}S4S|7URCWdw7`0-zoz>fI$z_tMGCpOnm-w1mvSpQ>%TLqjpDI9uW7BwqeyOwzU*ZPLW*M#N&z!qCg z8v$b+&65+DLc)chN~R)WAwB^~@i>s%BE(!+6`HU?K`I^gsa-r^mmCfXr$qZ?+VAv6 zZ$z555)_2*iy)Xr_eP{VF105G!^}oFUH8zUflisiHM|JwR$?Sv5P|gKFP8A#&<8>} z^rz(#j`k3G4dFiQ3y68d0wRe>#??_@JW;^!h+wlpZ;t3~0KBn?VkL2bMcm@IM@2s9 zEdbt#LKbn8-v)iU%;KlRUlxA@dWUV!EBDC>O#I6ZKxZb zVc((^-XD^6Ogf4I?U>pmeHiSbSX5L_y)nVU!$@8AeN3h{BoDM2p4FY_ zLTBoi>f9DG7;piIi-p0>#^`?OVg6K20GJ`%TtK&;LGmC^Qr|bwb;uC+L4QNcSEAJfy`qIK z{0jOpd4|+A>v_K!+Xc?sPak)>{vk8$>8AKg0}kEj6gzL*&_yr$bxe7|Y4>g66^C;d zYEI5{C^TC!NK0yHzN4&Qifd_Ym!erU82#%|t-HZV9G!DjOApk1xP_lt%+ftVPI~<& zrB_}+ZSuhS*)h`lvl2pr(vq&U5B_1;%Q50g-$#3Iu3G%^@zy!DQFDxkjJndL-LP!) zk&Bmh?ehKdp-j;Qo6(XkJ+JrfI&M>r*@F2F^_KKK%imrtd~UJ-^W(<_@sG~z`Ih}= z#htH%a*x06H!SzOo3Kv^XP;M1_ejVim~9C?Q@m|qq)J*mV_=8-Zw4-%X{}n|ZWs0E zC)~3C50ALB$4uu&1`XTWe{e~LnP1JU=%lHxLr)nv#&~RB@y=#P@QyxvHy%Ghzte8P zoyE&^A8wj4ere2t&VvdDn|+wvahmj9m*?$wV-r7_YcWhgxm+kxT7gVVN# z&HUn+TD)i!eOh!EJy!C)!obe6mb1@~zp<&*bjrGO+srDY#;4bg?Ge{?w%hfh+&hOW zh4Vw_KYQ&p;zZi;g5-rxzGZt1vB;iV3#vW#bY5`ZHmuvYqP_A{CE-?+hfLmf{qUBV zGG6Z9jGUkkf3lX9)OyMo)$d(4mE_(KT+vUtC(Fq54f9(VcgO9LYr)>otKs&A{EMsZ zRhM=w*t>Yuq1fM_TzuSpZNd6yW6paX`rgezR8{+Zyl;=7n32(CXT%dTy48I(J#{@{ z@;lYLi0}J!tq*jv7?eR`HdzPfo+;mAy(RGdoB&&cPugR>AG3Vj11AgCd|WWzNULB} z&ci!y*&S%aQ^Ar!1D1Dp+R4iEHLGd9a`F=gg+(IqYO4054l_2)>mg8z39WT zN=J@KmTJSb4PAOM7GNi`zt+%r)#YPsA8DzkY#6-x*!*hiXxF{wH%>p#tely8d%th@ zYVW>U0q$=)UCnufd)4b~H%>Q5WYE25^|#1fQnd2zsXf#8tv56ZJrVd=cDd)};a&Xh zr*(Dzv_iQ}f4Za3w_X18ZPq^?DnI1>#AWVk?A-aziGDX844n7jOr**7<>u!W=eWja z1vt&DjeGnmow3B*iF}(+kBIZA{KC{-k9pqV%*YZ<==I1b>XzyDNY3n`>%#b%cf6j| zU(9?I6L?}(*QCVW9_iOI_Z&GB@+cxh@N|{J`_PIj=@)x>zACx5v+LNESAt{v4d>Y7 zSHB)wwe)aUX5y~omshu3e&^ABW-0C*OvZ~l4DRv1c&mN7-MmvdzV7jFv~sYQiFMz0 zx*m|MPv?Cd>A$%Rt=r+bTOPJQ(j_QIw*yBj&O76JnM3WPyVFAMJJdM`LN zFfoMTv%xViZm3yQhz>SmN!zZf{Ua)dznJmpdmjDaFx^7|mC+0LmB!vQ8GPrY;F-O@ zi>FTUIvt6{myYC`zQZQX4PEnI=sR>-e*P8HwS^x`-qf~NIgGK=+YmPQ`&32x*XPBq zRc=Eo>s%Qf56_!X`K-&XP_9+_*O&;)5oau}ru~p!nA3OQsH4-R=Tw}U&h@O_-5wme zwN1OjuKVjIY_6zpKP3cLhAAALO)lH!ku-;V$9S-1z$&dNs`r)$@cg+OeXF#cJkxHB z<~_k6cabrhnmnQt)t(6$?Cu5Xrmr*Lgnd((|a$s=ZO} zs1^2aOSW1q*m$7BvwTMP5O4Q)pJ&x2WS=i8)4LzK`DDo|uGNF0E7!DlYcKyp-sP5S zk$>K|Yroa;+u!$B<;~I`yxX)4AC-UD@ociW!8*?X`=0bg0}n2fcQaVHtNuY&Z1tX9 zyXTx(QNMlodL2)uU-h}L+O_=oGv_Yxix0BxTXsH2=YFF5f)TR1xB&0Z>Ej09ypu{B zd3)%Bq!;HN1Rd#L-1AHI4!(Yzv2}4pd@0T0bWKPtUg?%-TAEh&up*3OEpd&d8eCqz#zWaUphun}sor90A zH491IGJnRtkE)Z;$G(&9p_kphv5B@m)vtDIwmA9l!DW@gqUqmis`pf{b$YXF)ald9 z+f6>7#qM+Ywa>|$hK{?}_s%Wb^R?^SAK^thW0vN-7nUsPbLn%!tQ+(pR)&vQ`$aax zH;72#nQrATBTqUmx!N)6@T0WUO{+uNbt@h{OZ0YQJ#9qFy;Z7=GVM-JlKFAfmtH*( z{l4kb)y(~A&K|F~(8g82oYc|!VGpAM#jN*No`pR$>6KWzTy`^R;>hABj}|+`TOPae zBjIp$@6v^57Ykl1H~A`eA3v=4;B8hqVEWNMHQrtcQ@QVXTJs8Om@A&tRM@>+2X2NW_ejrvZ%HcK>@Gj{8{-Z{EQEZkEL$f$zAJuyWo#{g8^5NM z)*Rjwu)5eK)cLTTdu+k!s?VuMqpFs+t+6?}Y>uN|U+LCR?=5daD(jEEFFHB{&p23e zIoh>%S6$AKK|yJ^et+|(yMNTqrJTiHcOpjKWhf1QbF!@{Ev`eNX%ESUmLUD64N7s+hB#98g3Gp&ygxO~rEX1_g>Rej$XN>P-Xx z5`5%eMQ^3>#-u}-cmqBCI&MWx)B&~*1lOk#4gR11rprjG%jxjCo)ofJB``B@m!7|7u#mP$X@nLI^c6+;rRX8_G?dU8tVOF z#e%GP(K}8bmM6xCj(xs=&kT9r>4zS5Egc)YYxQZJm0rsPj>$Hr@l|W8^gkZYU-doH zq~7gf`Ix~|yz9nKaUL+9HQnI-7A;nSYCLXM|7t&jwX0(9iq7p8ZrnLVcl*V_yi3P+ z^mlkrPIkO@boKamv$CDqb;}72R;FNqr?fE}HnUETXX))K zpI2$uJ$`1c-PwZFqF(!SQazU~9YwPL)s*Ml9GY}@`bX0b^@;TFm$ib= zUVY@cAa8AqnKovZzo#rScSrD|_4&tlzT0N&&N$G=WLo*kD?X`buMB@zwD{V~ee=uM zJ7%3`3t)@kUh)J;bA7YAd8o%=8Ab9;J%)XOV>fvj_I)tTGFyAK*t zU%T!Jan$a+{Z+s2UuJZnkDX@OA@lx1!(i+DPDZ@lI}Z zBkvf;Cj9W|=5VaN3v1zv2PRuDCx=#_3Sdkh-y`RTVK`@aFJg}A?p~%lXCE1Q zprmB<7++Nanf~l@-3_IA@6Xd7dQIyiFg|*;Lb1>+a=>)2cZ1%@(ydp2UW#!-;@yh- z{W0NdZPbFuQKp**RxPP75pS;z_2?B(9(%W+6T}T4=RCu=*z&7n-3Q|r$Lt5^A0 z4prTAEXj(m&EsvWy}5*b{r$S}B}48?hK%I-=Z_kv@A9OBYhu~GwugpioZ0eZ?U_U4 zxn;{l`-HzAdAfybeD#=b`IV^3vbnFP)EDa7mKcm(>SR}bOv*_cbM(&d=5e%dqlEH9 zRvp^yGUJS0XuV&*G9{+i^|D3Ria32+y-7XC*6YR87EC!5zrffkN5Avj2;E5wMs`^> z#h+gOqwcphc&|R0oqEmt(_>v!+ruM$bk?^yELkWR8WB|zlWQF?(Wd|EgW~q#bkk3N z?$|OtcX4kkNy3GW-Mxl=jzM|PTCX7Rag)v5*U z*Z$D?JvGyEm`J{KM_T{;PqS{*74ruk8WoC}=l-F(A+CP-(sLXA!yvz?%YQm7%p0($ z>V9Z~>B-O`@_~k`8U#CnSF|lh_PWRypwd5;#b|w~M_Mgh~ zy|3h5x%G!nl4Iw&p<#iv1v1Z#i=7r3*qWa2TskI9xy8OHxaOXOh+n5y@_pB_^`{<| zEw!oBNhwf%TH+k_M{Js?$Bl(iQwz?;y!jxzy)HcV>*AduTdQ37wB4U09*lMjmp|=u z!sI|(htN%fZhm&l{p@<6KQr~ynTmw@#u2X4p2eB>g893PvyNsx`G6fBKgXhEOdWgs z0i*U$W@p@d`f>J^YqAALtj{qRYu-N^+|KP=Ilnq-`NMFBq(wfDOnvXI)=Et%ht>6pcDWTsk6!iNO)GK8gtlVF+(Fra*w(zb^5S`UiN_wi-RZ0ww|G|J zlMmneFAX*`zczE{q2)V-DA@zI^v)!sGT8-D`*kg`=iUT9mQSYd#P6J$K$u zuiM#?%xt~$IyW~BCfIw^wvYH?G}4XvjF=l8HY{Xq&S}#U+G2b-ndj?MU~C=Iw*HcB z;y~`eEy*93b+;KcX*IJzvOMkk_?`5-`jy9KjAouU=pOsfwb;{Qc;U41i{zWS2Mg>b zzZ|}%`0$Tw(~rM8`fT~tlt*2I9LC+@T`#;wn=Ui&P3@**7pV8 z)yv%Whve1E`RyXs9L_&&yTK*X?0Q*1WQPi;mtHs1p7+OQzQqOf2r3*d+Y2eQN;p>^V|HrX zja#k@wQkx~?Ob)*VuYcfWKD-;iw*R|38lj)FQ00YTX2nCRnya^=uY382lsz`ZuolE z1>et$Kh`eISZ)3Bv(K$5%LYB{=e_h+(%}fcQ=cP)R~o)_C>~_lHpc#hI4ZzvL%M6y zur5|sxjL2iP6T~;_^@{UQhdME|~b$7J@1yd=427OYH)IR3HU*t@e?T{5&8 zp8B`Q+%zb-?4jG z^^nY)_;H+xXAHX^%l=E7E04{S-Eywy?yYyjR(){=dOfe z5!|t%V{lkOGMewOq9ZPJWh)NRf=eCLzci!EGW-D$Z!$N+iARg0*p~NXnl8ss=db9- zqY!=Z1g4@+GN>V194e^qKEc1Y%bL#<{yJYcO^}5(009?kF}Ya%g%)s!icqi>q(1uU z26PETN$ROD^uU)Syiso=NCUW5!$G%Eq3q%+^jb|?$RfBH+*0PLFRH1p2hW46>+1hG zN8cBrK>s+2zDxpTwlUNm-C0&KyOBl$I0tOJILjhB!gQ~9+fWx8sQ>W;hD+V47ezs{VpOk&*-^JO(0@9kF9R`zsj9yW0lF@X Yd*r>#ayDrO|J;kd45ay%hf?A6Kk00n)|rod(J-l?6c2q=j1WP zDCXcw(n>B>h}bMSj^PrX44Pz0(g>kTlPCZ;jkJ_YSv-+8ikMnmQXh>LC=oGcY_TW< za;iMT?isP6-oQBEd9OZ1%LL`Q%L@WU&;E8aS6g6dVF&V*<0vIa`kuZxiQpn&< zCKp1tsgN}Zli?FF0WMNTwI|I{JI2NZ0u@7IXmqkG)rJO1q<98Jp;&}T@l2K!O=>EW zBL=}F5*!w2!j-awIJB|TVJ=+`PpFKFCympjtSlLuCBR{XaW*a$V?vxI0tg$nLM9gr z5pIc8%*JIhu@umlshd@tj3uqr?}$GzJBKHe1C+VCRV+cRGT^v^G)6B%*i9+ClKN>Z zp^&8v4|r8=;wM>KO&StnDK8rsk2cCoCg#d9P0MW392m$Ei*Sr32Z=$eGXcTDV9&U` zG-^LhxdL$}CSk!MWy;Z(ooTemCJyk!062z^gX)E9U~xD+L>26Y3cN)(1p;{GQn3J& zvP5Y(bdRG@p)Spjm%Dqv9~wRTySuk&Ol}FoAaE8Zla+@LfSk(`$nbv! z#CS+h0o1hi>oDLerPDetS zCMN)@e=E9?)UwzhrW66*Kk4P*s@?%iub=cH6=tR3BDrd%tsy|8L4iDP(~0 z6DlNNcX4W4Ku;-+RFjLW0VV=F2%{LtVla{aVG0!jIggS%CePEuof(7PMr^M|4sixh>S9t_g}{OF8Y4nf1v4z_bh$vS@9f28zO@f*gg+5MsgrW?LxL%VelE0&{N*BRFxuG{}2E>y6aR z%g|^kk0aD57^dP<+gQPl%0}SQ2(dy6*gXah*0Ky5#MFW^Ti9!>LrGlUEw*|+Oo8Gke&JfEW_+(LGhDae0G;1D1 zj>Gvtf_FerhW2!}x8KE-Pv1&LKTI5n-Nb?>u*kXZLilt_$ zObYb1a?A{fbXlozN&!FtPllr0p8(-Po(%Q1u@EqtYzhNRrI2ub?gr6HrBrAp6ALsR zpcO|TJVqW59XWuCmcapKz`#XoqAD#$iZj6Uv(j-)iX7cf=$Alr0?n;vlSp|YIR>6q z1orcz=F+F>lXv`3EG;Cdb12tmWqm|VgHeHM&})LE)h_iY#yR9 zQH;otnBZoRX5uvfEpcmF87v^cgV~m?OoiSb4iimdMPi-|ZJIl?f0Gt#I&)jJB)6o8 z!DdM@E-Xfz2~yP7Q4FC1UDPc5pTH>4HV+>fn$Wai)T}Y+E&wP;Tx-kNwRu`;LdO$= zjRM`GB-g$5p{5B84n`XH**Z#ShT)0ARl!-CXh3p>Xt&y)h>A!;aCGK%Z_`avKw)rX zr{wX6(7%ZyIZFX}pufdNqHZli-2_cx#n8n74D$49F{TwfLmeYGS=ZK)6N+dtHi6FH z|0?yPctRA!1XIxeEdT#3k=m2CaW#Mz`(Keqgi=LVlemqF_H_5~YD?k->_BvWj%Fdd z%K+B@1E5R>=9`y61u|{=YXLi0EQSytq*M+0PdKA+ToR{t2LC*Jv{;M<`?jIzlr34MwE_40on?+sG(Q3Rhio{4?;AbtEIL1|H~XfbTBxA z{%xDN|2=lQ$t3=}jcaWUTbL>&uUh6qBfivKl*V)apR#h=M^2T?Pd0&^gDxbE;B&-? z;gF8dg_}Pm+5U&CKGzHnC1g3!m8R;36lI50@2VUJO4V3tXGKtogbVp64){(`U5=8} z5lA7kVdDMZdADF!(oiUddpO9RK!ye7gakO;sX~4!DKIWpNgBhoy8zv)qb!LNX^C!& zRXHsxD!J6(WlKSLG@OI1J?3tf%ForZFek~E2C2~#<&c=S3$ z8K-n2{79o<2+NvVqUceyD4i(Xs8PZGIW!Kv3oVzPM$4h|X&fD2n)*q%=}E8YNx$jI zz?a6MjiM=?Vo5_a!LS%s2IRcZJrNMr^o+4GAU|h<-fYJYUN?55e zoTH=|oP$|FIH?EMJz^yhS^&I7cd;U-Buh#7#8}gV&1iHZx{3Oq(%D%K%cN3_7@QE; z>2Q^cVX$pR7i{$onA(Exd}@*jR*xbod8uV_X)bw@7P@3zxOjiA;ve zVVC5H@5H!VL<5mX_>g88Hc`P6pgSxyjSGOmr8L|g%P=XN5>uJJRJujx&-`(_EijBF z5{UsTx>SJSAi3rkmY&JMxh%MtMol1*7}nz51lA|!D5zx-Nu(iki`f)pCrnFrFqtA% z1&0!rq$!4_3B+)6Rn4O)P!`0wlNK$A!X-*#s1p(FLqE1HNHCErga& z&()!N47GZU)IGYQ0aTxnR$t3e##%j1qvXbX6OIY=(9|=T=IG?p(OfW_9`u^!=;hO8 zS3u(1aZG`FbM#K9QRB>7jMHrzr%#R3&!-pCW#-h#cGO5{p(EXC$1z9ma*J8C9374Y z$8sKwRrgp?J(ewd)cQk&5=@k`hD#i^hr_5_f2D@!I#GI2Ho@MYmb7GMF0hwRqM_sP)tcJ);Sg zgam|Jhv}{)4O9`MvOr0UAWeMHy#`w>l=v%S9O=OZ2DJ8cGrE#s&QMaCNBaGT@{LQH z1z!v4|KK_a86hVWLYF_!X@iBs ziA54fC3ur45Gk+?QvWO&OD>0J*get^q2O_l$3RbTHBG(fh$P?=sNw`dLF)5lA}CWJ zqDccZ6LPwiLNMy8ykHu*R8+!Y)7&J)(^z~l*hla)PDCsbN@;@%wSjsLqJ%VJWvKN6 ze#`vCEX76z>;oD2WEjXApb$cs~riELhRQ# z1QEd^o&RGUQ4;K2C4t8h+*l%wNFsn*Ub2!%N6w{;WFY^fav~7HFw+o6)j4xa#dhFh z8MveEq!pFnWQ(OZQxnP%+o7kDI6+jx?{Z=YX$%el?y%ru6NL`?kn92{i=qaF+8LN@ zIRnIJEnlEV1rWbz9Dy9H5|}|9JA7B%&{2c%gJ zG9)YxVlLI9j_Ue3utXdOo`@~Qg^)vV5KA54T2%c4_v{XkLCbPraUhoMkH+lKDSdr@@Wh?}NR=_f<~73qRo3ZVzn)Z8ay{*lYt4Ab^TDoj7NC}%>L1c@4)1;uFS%m75`;OH0F zhgUJdU>Wr^8&3}TZ%nO(_>(l$)W?zLnmJW1$Q8#DwUj-691{TkjHY7*7U@Tk;~PM5 zrjfeG|7v-(`SFA0A)abXk4oeeWJg|_2$-QR*MoqVCs2U&!P%xARqvK1!xS=zzu}Oi zkHc?t>S;$kU@#LZ7=Q*F2z69#U|cjShGum!2ugK8h6$HKZ3u+AowS;%WW1*2FUo|p z(&~fYNmUI7HWWq3(Ma9k5eZN+2_sY} zsH(Z<5wjFz2US_uPZq-O4q_5z#J8h<0VE3WU?*j$L`=ghw6C2|h7)JYp)ioLg;W9z zE*_Ope~`ACHknW?mZS0&&EXYT!6B`|N{FEXO0~sQttTMttZoHmQWt1uvUq@=D%`X{ zH7cmqO;zbdwRJ*8UsOd8WvNjwwUwyQQx7EoK}e`#QZ`h7QXp!bfnrjs9aV+4Xs3}~ z)fI}cVyG5?@<#1`U~Mx#&_j$Ph0Aj(Do!;J!jhy}XlfgADj)F$O29xNn%k{ZkJ<^RAox)@E|zS&4f>!w3a~Ki@HD)lm^D5J+o1l z?ZDjtmkYd9717d21K2k-1t^v32~Hg9U58y(2vudQq%Md6kRie|kubn4?bXnzzK=RP zQwWDa2&hMzG*?2Q^Dk*65es;1@B>IfbyykbhQK^6OUOh8L%58YDxL(?2f^nerSmju zh(3q9yCBYx>L&5_C`B}7)k8sUffh@#1rsSxc_<*iY=V3l=2 ziW~ynPX%L-W&=MpL&=W^ z&1g*qe>61yNXKlrk)Y)@YW_pGkV@*IiUTDdg8_UNQZk6C`D}R5o`VPPs=-rXF!i4r z+>R=If~vn%D8I}6H!LbjQ$1QTEaxW-n;onRo7Qp)Rw~p1{M!_hDMFhD8;jt24B`T@ z5bn@a99O+EG1Vm%x+PIVP!}W^N;aYp(=a9;73K`2f=nbY?NUMc;Yfg#8bJ7SVJ6sD z>Q)8y>!#yb|3F=gM5k%56fH!it-T65XiS}5^7N$lKkQ#^J-YO-fdv^kUVCdEm3v;_ zv*P=rz%Cs^_I-Zeso=NRo%L&u?{Z+4`sK)7ua(-#%Kk1Xx6ArPeBhI<_}zSShsdBu z?P?VEg$C*C%WhV8JicVD@-oMw($W5L#l!%s?ooaXtb$c3lM@r%#uiG~2^E1h>A(F| zIL+njs#sH_X(bct8b2i;dV0-$QTI|;sao;Io0=+^Lh`)#r4lePMZEOsL!22XY?A^E#Ainl<99bakjOv9_Ll`EqPkM z$~FF+_c+#AIpIKY7sJVE_B$?4uz0cZ*471sd0P~9PuyQe$XB-8-<~B2c)xee=ggN% z;e|loNVoHSbhCI^?cO)H8Yi~jemro}+^W!xTcWCq_AY*4>ilcZ;v$=nQ67%fpB5J_ zyHEq#01`4VeJUL7-eN8qE;>t5Uo{DkeHb9(98e{jp#`*?Feb>F#> zA?8W%r}93+W%yn#us(_B2hTdtf=9kL(B^k5=|k;=X~&30^AurRsIs(tEME4NzTn;;3$A^gQII}!)!*gP zdTFXJSy09}omNT@9q9M&)3)S&HMGCJ@80=2M(4wlfbwhk+`s2{?tZoKspI1W^83ZW z*LNnae(Yx%@%kox(j^-)<4-eIc|b8w2DwRiEOvwLd0-O&A0(r3k6*{Q9P_>XIUGfL02%g9+Z(AZ+;;}4HK@T;@C z?@us&nAHB-=0PjEo;P>29GtSg=iv=kuHE6`F3cY3fA`Wo-IuYhIO6J~&dF*0x9{q_ zhV9@Yj;tBx^P69Q%jPM)8V;CG-^M(->iE}tbC1`b`kZ!ka9@9KhtorRwv~v0eVDh+H`;Ed8!52?sCtQa(6pf6ZG|y)Ka)!tLj4wCVE$lJ=SG%zX zM-9l^H~C@N#Nr7N<@}QERlY)(&fLGpNBtHeeq1l~YEW*7ntA2hbBjH0n?7~AUYd4j z!;!xBJ0Do?u$V`fo}6^=y~F4l{<|-mm)yPiEviF@!bi`|Ht))1ys1CB!Pe1yZ)kOl z{?3(!JNI0$b*wQu>+oUosM2?T?s2INIiIwFwxPze)6&x!=RO{;PwC*-cqiL1B>k5e zVI6~BtcgGTw9ADhgnw|J%-8a2VgFeAwxPF`FR$V^%Gim|hwf@Gx6g7kqmAtT`Pbc3 zE^<1t1Ja$A)m({{*I)B|nLEiK(tosI-rUa>5tX~FI`*2n;k$bk_p3+$r5)`T+vmqL z{t?pPv;B*nL*(cUH7f_{rznRqJ*|_-1qmO!B|k0wbJA(fo$1BLZd}h>aATi-e3nbK z(N+5Hhue+#Z#o%W6pKv!Bfs7~BD5R0E8Tbh>hHMqbCap_54>M$;IMh{j0YWG>v$O* zCfpwDUV1sz$<6p^;^>O@lSUmn@Y0C2v)x|PWZ$63*fTYsvcKoDQ+qng?{KHa_`ZB; zdpbTPW=i#Nr)ux3R<-7Ktn1G@ZjsqJ1@*A8`Zi|Qw6eYHeEV)y-uW_y)$st$UjI&= zoPYWkFimc^@{%F~cyt?YuWbxsVyEnHnc;&c&Xk^}9ZcEm9r(PP!}BEL6?=OeU>hq@#b~5Q@F&xZ05+{+)S?Btk|b&EpJG#3dw_OC2nVG*C%)Q-F4Qd)MUHbEt~zm zJmPg)a&sts&Yg2IaSvJA;Sc>RUT@mqJz3uj56zjl!c=fKFRV-VbouNxuU9nm3A2Bn zA5na7gm0$#oIv}drn}~>@VZlZ<(&QUy1jl69~_z#N0)y-ak2lbkIRyC^e2oOeelp- zTjAG({@Xe@`(Avv@pQxMOS_+3>ih0a#+mNh_{RlfT!!`Su>I3to2(Yc8N9h)@;;q& z;n+aexwnQNJ$G$x7rQCF2mRTJJzFSz?E1ef4nKlZLnAHJ<$ ze)53Q>Is+y=f-vCsOYfN+Q-9PSPl1=#l+sJ@Gp{)bP z<$Zsc^w;6aFk12K9CMqYPWi6mdz+7)aOTdY>&oW?Mlw22O!(S+*2Pf!&QmrO+y1%p zh3=~4iy>F1#7&;(+Hi_BTDJB8(_DYoviZrodw=`5=Ger`^=1}t9kOO@d30CcrOf>j zz2!s9^WuZ6h?G(In?IGocQ$B` z*QWU0bN3JIakkf*7jJ6!uj;jrQzwkBJIZl&tuNr(Z~n3=KX)Co!$f}j7oiKN%g{e~v$M?}$D&2SN=#xHmgN~nimSIzV{CH+> zhi=^l)o;Pj*%A7Cz z@+$iExT9zP+MOL&erN32CqzPzf=dbG74KF^X6ny%ym$Y>##ry7yr8`6u`j(2_DQJx zc;A)^VRhl~ql5quzFY=Z$nez$g^W5d!k5d?Q@HAinnqKkDpL8wA+A)NH~4Yz$WKBa zW8ou;4gutN^wxH)jzCngx*m;Ico5Cb7yidXxZK`DsUh-zAEFAh>2wM{?>~g+ZwEj` z(@ZpA4t)F&XY&`JuD$4^5I!KX<}XAqZLV1rSQi7#?52o$n{3RtvTjqBF5=#>4#b34 zdrGILT?})2T6NTJtupyVzE8;QoWhC`quEntk2BqR?HKRm^Ze+u&x~)a7BOXy3!S!YewBzwf5|tdIWv2zo68sO z+vQ{4+f+1uo@i6F$K`{Kl~v5G!H*)UEpB9+uyyB*4fUGgXz4a+MmG`b#Mo+Pu*;wo zMSEunGS=^L=*Taeb9iOl!;2@9Z!Yzic;Nl$z`M-Yv5_I)%s#wac1c_{d#2cjm)Ed( z?ye>A_Vqp--(HE(*?ps`+ zw2O_sI%v%xb`w+!Je_*L@q3_N(ht zW`RjW{lMTAmcMOXJkRf=xiQAvDIaoeQMZb@QPz=x3ir}8t34W)-!qWu?5@ALI$&k( z#?R(?7KwS+j&^-MIdU1TxUg!`HRCRq$uns~cJqXMMsZAkv%dLtwjuvCTri*ZiA+A{Q@hdKcb@gg1uLR# zmSjgvcQER3`oy#TH(qu%NL#JH&uxd(vX9m4@2$VJbWvVDPw#u_%O%s=s#&I;m+y@7o-kwVge#7NULA9b*>rh8(e3kJ z4Ufb~yU;fB7raO|))#cx-yO{I$M81=&l5 zCZ#J3B87jhsrQ|h`z(N$7CUFmxIf<}&Mhgs>Ez_ddCdvG9q)K^mW@-yyR1VdByrjUAkd zddLq-JXaiB$s2q9t^CC>-5q7#Jq;z{;ZqL;n)f-rbVy`g;l+36DSpW&bu+g{FW&QX z-My_xSN5o0eC)`Pxn2pE#%)U~y<@qja_5;%vcM4mtLLuu^zPqLV$SgOch4&|W-jzM z8#$Ge*Z5W0{^ElQ{eymhC()=me zYxerQICe5FC9wX8+7Q$NnzXE><7W>e~!!;2~+lq>r^ zJ|e_VFFx6QOVp>CVR6S5I-wr-?hdVR74@hOa#@_c;{_f!mZZ&2TzBfx1OuHxa_h;v z#DnB|Z;U4`5)51Fo|4!jZeP)KQPuK?Rxv@tC1J{uca#Y^BcAz$-;Fr&YT|pZ)rUO) zG}g_YQyG}}c28*ZwDZRF--EwpmB#%cOY|)`w))-r+}R_JOF9iWv}EDBQXh|LRsEMO zV4GPfbvw^7S~YdRgxZq}miKe}t=2$q%!wP)S@&O@ul!bRd~k-IbZ}8OuM)j{Ztjk7 z>}LML0hcVNFy}2A9)3I|i0R?hSH8BWFMXU-O5J{je)78Raw2HEc2`SCDL<9X8o0_?36cyPh#EoO|i!&4euAYdPS_o>*ZpnQiiV_r z4u7V&8}>c4?sMFu+6wZ{MZ0f-(;MsA8>6X@)y0&|!^l^PKQS>>O(fe5b zT*CSr>-yG;or@p7FEVR*|996^?!GgOy-uZ5J+B;IXF6r*#4BTnAuFCOoL=K3IRN$X=H$Ce~T4|2j5&$oAQOJ)W-dW9l7F^xwJX>(#GQPtF>^%gZ;v;Cka2 zLpS;Rv~&C(U3&!SH5l#~v*bp~e$Rvs-RqsVy?Nig;%NDwQ(P|ZG^-g|y47h?)vGCa zYqKmY>;|8#iN1jKcfBKl5{9I?oU-j}`xow7wbt!{o#N=noVYiMPp+qwI&JhQIIcf& zpX=8rOD24c8o#eU_xF7xO4uuP16;c2mHRP5{ruAdNzT&Y<=;xzb_UUTlS zVRie>hl7@zr~j&JXX!Y8qM1`aJ;Oks_K^uW%O`f8GR5(iGm{6E7(K5ybX+kqtYZ1& z!v*(m*`EI-T3meNh5c-+W3!h%d0kznoAHaUBJ|YMw1E~uUX>{qLi%j8y6M|}e%;s4 zjQir^QQvzmN?5z6y7sp5xf7R9H9mYC`giWN$D`v@KQVnhQxh#Vw0}L|_uD2TU3{YM zuUNbP`+bkQgk$C5cizbhgw?Jo&#!DKtCGD-=OmAR*ZASf+}nqrUr)-|_+{rIvmx?5^=kH?+tZk$}rXt;bU>8ifZlXV3yW(!OO~xQbNt4Pk)qd z+M>T8m;GQQdtR@zennX?WL;n69kHL9R^He1m15<#Y<+LD3VY$Rk_pZiUI){JZo`Um zsDN@ggmWlh=C6Qnny{Av*<=K`1v>KoA=eioF>NhBz#!-Z_FC;IZUD#s>+0tD?Z zO*dx%AQ__i)-)!CuT3lYCjdWO`l|3jP5_gOQ63N~Mq(ADA^O@>8VltL4OE$2^!FgB zKL=q#8l?+hHk`|awo}FEr-ZbYrExGw5b?DCG)Nxgh*e)wMt?9vfxe1^N;J(-su01W zbP>wjm*Z)M^n?%4)WYmu>`(#tRg^jjvza>=+Ht( znot#^)VD@};LH7~A8H2ej-fqOsHnfY0bl8dDpSbC7{gLk-?oRL%Iy2$ec)0b+3r zhj9#2aSbR-0ZTrGaSmf{b!~8Y3S@M7aAgW{4_E>K003q*aS%%YO96**5mIpyC`$ua z0RR91JaH3FPEJby|Ns9=|NsC0O8|6n6k>@(_{ZcBKeeY9zJ%;=KRa$1zbFTPCqKrB z1V7Sw48QI>K|d>19zNh*c)h))kv+dxwmq*L2fr~SguTqOc)#~2AiwU&c0X>hPra@X z7e72M{yxh`x4!Q%sy)VSs6DN2guO0tLp+Qbr@c9F;yVn2h`mK-!8><_Y(0BeH9Jqj za=qKSB)=CLMLt}5hrjrfNxyz=(g;6=W`}zll`-E)^<=-7Duu_6Iu*t`FV?i1HZ#`2Ze-C$^SldQ~1uUK4PI# zD+T^jqhYAD1D^wbVyKK;=u}k)!Ik{HQ6^M&>C&b4e|fKe-W>SCL#KHqTl0zY@oC1L zWTlseFj{}Jpm#6eNJ9rzoGS~L)kaD7`%F6FPZ{Us!*SrgZYEyqZqjpNndG9{0InKY z1$I9d5Ls6v65QQI7JRItcz-IWy%3_`gJbDZrGxO-GzvDjd4idL0yX`y7VJ+KQ$2QRG_4);`NltVx7Pl{Lg=yad8;GH9-{6Eay{`p$7OcsgjpZW{?G6dtEjBsbvK z4|yd2wGcT}H%99u^e`&qBS||lL^3-MkpPhdqZ%SJ}4T?)4v(=t&R69P&i>7Hp|viPpji>nvycU(N=`~`PXPM-zc*)GYBK{ zC&W4G}*RX=}?TOAuo7EWk~@|Jl-@Zk$oo699%4}YScj!&oOKkg#?Z6~7m zr`hOdTSa3`hN#e+LDp)^9qKD>4SE|i=&#;Mj8FYCY&tg!3N8tuZVqJ>d^%a(*Gtf~ z#+s=Y&Y%*ZUaWFP7*W31$p}OxV6u2JN!e6`pNrzb@xC*WZ{LTOb%L!gbk!j>B?m7r zj%S#r0&FTT2VHk-e75oioj$Gx+>drJ^0AZr*y+L5xn;`SQqN`jb}ppLm#}1oum)&( z1NF52P9nxF>E94PQYUo^I=5T{`REZUv!E0vPs=5T7TltC+Tu*~@FLu3e~-%i^rt=I z*`&3qm`-zVB>iT^n6pL|r{5Pqwbo3W(PvCHR+p1%yCFJrVH|n=%nKqXWsx6qrxG(0 zDI7EIB32m}$m{NQP%qg>TIRNsv-heXe(E#$T$@U_Ef%69t@|-(Ml-yfJ4QFS<)A^+ zKKNnynAv#Z9z7u~hn2$CpyPHNOa9)ZO_Br5E&o>HlBkb$G4AkW;zaD9;fTp|DruR| zZH`8vGWDpR1tC@1^!vW^pj5@d&WFSNTNVvAY@bmIR{g?2jWx6D*V@>&euBS}^yp4@Nia!}mH@NcY+Ooaz&u z)bg1p621zou5Ccoa6qqX>+qtgAiis*Fwn7&+*X&P7j`~`Cn7rFCi9u3NZz5jZ7ZN{ zStxd%8KNVaI#@0jj`pT&@#ow)+*`C3@}F*JkLa|M@~4MjV6PZl=}TlHyXHgt#!ULK z;~o`UoP?F8jZ|A~3#<&&Cb1V@g5zX0{4`q!TE+uFwXF>8MTbF28OXBCJ9KuXGo}SC z1=umy;^Vm`aA)dsy5@)w`iu#}%LzBY@MjwG`D`WBFOlAoM^e@x1#?Qk3{&z)1DsFHAe?p4_(i&z`DL&N z&IPIB$2Wm!r5cHLk0-XW9eK>2*HZM-*F)4Sv4OC3bhpzF?#!vgD zVAMJvMQt*$x#10SZGI28qw_i}dbo`1I#Cl{_J!fq=~+bg+&+3NdmFevi^BHnPSko; zH`;AXhDWl0sl3B$63kIVdAqMT^raZA0#D%g2nmk&&J4_}{ElxX#zIk|BK8ZI6Q_P5 zxVK9lUg%80yiOrT{Lxj`;rS*6K&-Bmp)2QSHbsfrl4%=11%jdNT5*x z2E>1YNv9*QN-Bf~Eb+q1tYW$#%7;C@*^@ZGDaO{GOgO1D55`(mP{Fd7p>6Jvc3A=s z>5t#in3kc8(e^E{MC2^2Q#gZ_npLVro~i5_Ic1tkQ*jWYYd# zdj9t$^!4mz|6mIHeSsEt!4pF|bXFN_=cG`_l7rY8Z-y5hXpkAL>p&oHK4@Cc$0(<7 zbhO|Eed9vNilSle=KHGV9PK3PvHmn|k$yv7Uz&ttipfCiR%6_&_po349QEAvhwSUp zgsj#kQh59r8ToU75|i1`u+t9C8>B=1QGS$?JVTD8rL$)bUZb0I+8MqrE)ZdSkvUN! zYO!4N4wxOSVc#i^v*vl7=oLIUn46sk==BnV8eeRH^H;L%U8gR;#w) zwCf&Nmhg$OjUDH7L|;dbqg;~LkiZ`2Q=xTXqr}+tDE77cK>uDn;-@?lcaRYHzPJK) zCv79%R}QiD&Fz@CP84OfM+0A(5Vbs#kKM~O(Q4u#owp|i3kHJWaaI;-_q$EqIWK8r zYc)M`P5`og>%z661{z;l1vk$u04B-@o_3`(GBSU$Vmut?4E_d_MP;OkD*)D)E3xT= z5H-JO2z)#8@FinMp3Hv7Ja9>*(gU`*GAj=Gb}hz}vyNhD>{Kf2G>=-3_`&OdnfO*e z2S1rt(#ENWITw9uh+JtZowp<%(v5WK?$goWl`lt^1E=hfVoMeY~lUdR^jMNP`@J{6*xg>^8*}Q%D-~-^Sh{30^xv*hS7mH2n z7+HnAke6M}jD1eS^n}Hz_gNi|{PKd8Y1T}Xj#)OfHQl6T0F8SF1kX8aU3 zU}zN>r+=h}73}E6oX7P1pA&)4f(s|6kOLm;c6Xp)>r%{QeOlGyQlf zWR_c~{C`x4LVW0Kzp2SKF=D3!OdV&4H0#vuH07$bm>j!#*UVyk#B_>cu9-u>p4rmt z*PF%HWtmkNUTPY**=@FFW29-5?o=~{POe$zOogU~X$8#>M7A`i#Q2(d?%6^4d)Cr(H0%k&f5#kcFc}5-tb+fsqkd7S?HYb(79eK z75;xq*uF6^!sq{$VG942GVed7%y$b_{2xK3#4F|>N%U>WYtH5cG1TRp6BP8T;+&sm zbkZ+BaG-oRcF_awALyb-{hUx+KaIY%u)*_f{jh6ib@7VC_mNXjpCuUY}Hee|9MqS+IZ(Ok_#3>?3+KWt2=C{->85? z-UHn3nlwDHMjJ*37Z7=q9(D_V5Uie1hfZzcRA`n4Duqaq$c=80GvixJb=)P|{dzHN zoYVjnZKA|iEgAQ0mV?5uGMvdDh~K&)?!~)!L zosy$WJy<_o%IdB!q8+XYv}DmCQX+JY)b8Ac8S=s~X|xMI2Aw7|?R9wPI>!W_HiQJh zL^`(MG--%jif&a$K=Yw11g9Ke_L^#uk+IX9ooz=XzKbn$j8JUcfX$FvG);q?8iu}M6;XvSsyMHzJY zzz01(SIOjsSJ}G}L1->J3+gJjLGgsF7PabC?AGKT#FiZeyXW7DM#yp^FfE=Kf^lJ?BPNyDprUa!K9iaV1+K<4Wp)Sk|4`3ST9Aa- z4&I^BJh^bSU55A+*@2O2A__RP;)MG%aN)WTBrh`PgzqwRvbR1eg_bbAKc3PbS`5xs zkcT|+N%W1zW+E>Xi=$hnw!W53qpKsW>4zh~n5>td$xpjR?A%-cXXn)6_ABF{I;09I zlWwqiQm>gQXQ~*Ls~lXYdJW=Uo1w^41xS(`p$V&gaGPT$GSU8kAh{F13*E!4V|Qs) zNj6^DeG07vPm*;VK#h7NuvfneRi7Fn zQER1CIyeX~iQlS`HYzd^Dti-m?^GJNG?Rj}X9K=U5nNQN%|Qz>|4lROrQ9TDeLa=D#|GKI3MDD4Wq1 zFAm5{@lz)iE2iV??3UkK^H3(t7w?@^M?F>wqP9rF&DPDhYuj>QVz%O?`_E{6$vq6n zkcSYNB2a2IgGZYWFx{cQ$fH99%sabL;m=Bj-i8+0+j=y5Mjtt4Xo-CTI_U5JhVDL? zPsDq!Q*)bM;u@p|*~xu)Q9PR2dAk|%)Jt%2!cF!ou?59J145sApeTjePaavuRB?)JQ?rl`=ES>HMJ77#q)}}cpyd!biPJoR>%vg7MO=g(H{7{Up`|FIU0j5Jh=_G8g;}G{@WAylQk-S)-4_jM) zg4tVZJi1sO&fbXzHH~XzPj(6Id;gSJGpgi)V?50e)`r{tCD=S=6I1B75k9Y#B!&AH z(~39^uvN6cNf)QVTN7XM)TNL5tMS&D=@+nDMUEK0Rs{NRguYzQ2dmPjL5}q|x;c5A zSz2a@A2;k{&eu*MXWG}J-uGPaS>%F(UN_*%)hd`Tn?wo**1^n*9%4Gz9T%Hruy&76 zfaZeZc=X!=$aCKZ6pBJ!0i%2`!Q$WoB4+<+!{rPPbgq?wkYbEZAaS&gv#m(6BGW8L`)}wnw1`- z4-Psqc8SN(QehU9WmV!r2M0V`A4NqZ*W#WXz96(s2+!O|f_aO0GOq0aQFl?G5;6kt zc8WdJ44hz!Tqd^`dj^uQje9|S*Br2MnuAHkA4!vWFzMR%lPG_T1OB~Zw05@$Jp0Ly z8<)otkNj^`-|GkaX}><4(sIR->6##Xzl@^i8FZNzfa`?^sg~aZ8s~a|c`(>O=NbiJ zfQ>wTwc!#DPK(7~&(6Tb&!<`a>=+c!O{Xd`^4M0L3$NWWV9(MR$Q8K8{+;-Vc9q-1 zqVpG^J}U&KXXoPKi!X`))Jih8<}6t!6^TvzVuAZq6=Y?Wf=*u?I1+7Ge=8Q=U(X<0 z($g`zEQeKUR0dVH8FuabffoK2e{}I7wl8jx=avHEn!j zi9@wAt)mU@7>L${W2FV+~6M-?TwJFo~^e|cePst)utO~9|dHKb&o0P}0L z7EIKijcJ3Ul-UqX`e*y3cGrd>Tgr%gKa@H1MW|FuI_Q zW-pb7fQjeu_mRce-B1a*dM0_`FN*4mh4I0s=Ww#EoaFJ4--%y?+)ke}ELuFk(KHES z<|IN4F3ZEwnabqLQ*}mCFB0r}VqwLNPLj6%E@xh6dm{U0vSR})gPc+)C&S)M3N507R9%BRuMXBWtG zBS$jZtdGuo;|mM6ouGyy{-ozu1zq=^Ql;*XxT)zaBh^eAjzs+M;~9`z>^RC@FDp%SS|D<_AaJOX0a)`px9&nUX?)8`8=2rtpX0k$}qNW zI_Bo7!H(2rXeKia;&$hNNsAAhO*%~rFG!IUIZJ7lPAOb&P=nUiVZt|jfGC_UV1HkT z#gzp=sPGqY$aITht+a1I-&H_gqnp$;HkpQqO(RbXC&Ba+0*t3aBF$9ShCm%x+$h^g z9}9@Wpr#}dbhN_!$_mcq^94}l)4_V|QpRbj3M6PwAtp?)$Q0+TbzC5;+FDy1UWW@f5kZX$7qQd!Hz8If?I? z58s}I1TT9!D=!fx3zX?O?W-uQ!v~kfMaXi_dUATlCe)(c5GC`RuJGA_D|4;LwH#Zb za!Zk%R9}QEzg)oR*ca^b^+sr36a!C{`tepo7k&|*iI)5uz_>{nO;u`O!;~^$Z#{$; zRwXd%X3Cxj}VD!!T#CUG4LO#$ zO8*e;B`c{*NG6#zG6(Iy$%4z-P3+#gV?^R;HP@N*o_YqXVGrMKWsILJMWyf}vYIFP zGEbyXXTwDDs@{-BVIGkm973lTtKsg-DYSL07-0QX2)$@c(sQ+-(!CM+0&mg9aXO&m z;EkH>7N}xol3TyGlNHh#aQIOo|Y1*jn25uOc>fT z6WP1Zk;=bnLiImtLd`8y{lA5g#y`U7{1v;FKdCw`Z&q8iK)qc{ z*SAIH-i}O5q^_;Ga=3=M_3p#X!=4r`QG*9+5F@OBl)|?!-Huu+C)Y6-2&@y;b zw`IC8r)9H-wRxy!c&L`w|Kd#P|LaU?|67F4e~Qp`3)TCli2ry@`u|8GNj|;_m*zs- zS0@@ZaXYlhSCYs_{7@$y%Vry_r`vK@z&##8&OIbgw2QXEW4#D8USfa`mF0M3)eEzl zGwF-NiqPNBYlTJkl4ngksvo)sWDexQlxH(QID9c;`g8JeJs*zm8^)zWYw!#|Li(y1 z?A1SUbXT+{^*>m_Q`_q(XSM`}{_RDstOG3Rd%!k5^rMdQFS+S_4~T5`TjHd$9cmVu z;MJ$b@KBSFjNaMIhW)4pkH1aK8liLagS7|5@|N)Xm##97Q;gv7r6k<@cmepWdPgQ4 zorOh`6Lg?NTG+2CqXcT$l!8hN@$`@H+o9jhQT*m01mb&@T zJqvD;$8r~2Jl^idSvMuY`|D~lk11z6n$$?_tF8E8k_>p-$3^ZbH$Pxp2j>eWd4&)TSF1=xI zKnC~n$0+`<_gK7(vu=`RI zUGX*%^86COv;P^LXykzNR~*2FvZ;`!SHzmU7QxU20MIkQ)V_IK%% zopG9AeETaY*}H=Ak}bim#WI`;*=wONN(4)7LvXdyHniO4j4@_<_&&%Erp`W31I3q8 zr#ornV$ubAHL-y>6iAYeQ>#(kBLObEtViEB`=Ks%AMtsjLpM4If{vUkv6;+=m3c*s zeZ>KE8<-194_1K8{oVA|(nJVL7AI$77eIfWGz1O(XxZJ6Oqt!ij7fnUoc&o23U9Na z`dTg6mA&Thg-48(l`9AsZ~!Lzz6T>dNB?5dq{DC&naAny#b z?YuU!*3(h=emZ#B=8&8E49e^16T|j6n3vr_eapS6@9iq)+mdax_vHyz$Ycm?^_yvf zojMiQ+dz@oZ zY57Fy{Z(+k=*jwp?}IB5_9VfXps9B>XMFoCQr(;mR!=Vy3HpkL-akzzC_JHG&E7J7 z_Qz=dzUSmqRTBH;)>W`VXB}V#sn4Nq-{B*X`u$X z2~Wod8q?7IyDBXC9YqG-SCP)rW=61Hh=_;WpifNls9AX*p@t=_>kA9gdeV@dbZf@; z@!2F|G!DG~u3)-{@}cK&F8U-^z)a;@Qv7Vl+_yO$1z)67L-$^GujM@|AUi7_*Nj!7On?UYc}kz|pfPx*i)B@@0XFQ8rBPWfIGJOP`aTUb?8i|$*?K9u zrYMqa(IT)y<1rbX(Zf2PN~iBF@~BFx4<7qfiz1su@zTxd(0O_jqfs{r&z@RF7D&$k zC*f!?C6_t7hc05w^8tD?z=D14UIVVjZxV%DF2H=b%BX40ZjDzy$Cf@mO%|wa#_i)G z)Y8KO4m!uvosLCJ#)v6Vs-6cQ3fwU7*A`H;IYLGqcQEo@Hn{ryeRApiO1fK8f!=wO zNhbL2K)c3pSTi8+FYjTXB_b1L19Rivu0`hQCP0dhn53- zn9&30$<3b!2(vB+(l$7gO!sWYx?Ke=BkhQ!i6bo@pb&NZFlDTcvRQX+8Ih^Q?8mK= zP`q4eyxF0=cwV z@WeL?X6&8Ew)l#-rrvI)WSTQ-COP&tkf!Ivkcwhh+-s_}YGmWGS`t z3^Ns^GUas7tt%uh(vSSnQ>B(Y!tld+2Qi-zM^>pg@f?4^_6vn{d*gFZa8#!Q>656f znmfE>B!EiEgPbbBXyQIhN;iR<+Y<0^NDLLN{l+=_(wUj_`6OvcPQroFOxQA+=akZ! z_e*2R zWp#e$t^&{1d*KU*SMicCo8GeGj4d7zO@LO(LTbdfn_Mup#Y)a}te<;~%KW_ty1adV zZKM;hr&Gvgoo{&mkr!0YkU}GWM|@>=iZqoN!!H|aQrIH_EyJIwkWn6Odmsn{(r?M7 zwUpHMXF}rbIEWGbOMXoZhOD?|Ht4SmlF$!Ra7zM557-Ax*eIzrhZtlriUDrp27Lh zBn^FAo^YftRI?{12jIswrg%(474D^cW|nlEgM}Fxs1JH@{oO;N))7R5bIUMDvjb0E z7ef_YZE7;}JU#e#5?n1DBv!6dut#_igw%gydpaiJq?@9+@lX}h71hTcK9`KolmJ$R zJz;ON*6b#~Bk0oKO{z?u;j%RwP+Mye+>^5aI|&Pro{&gFrYAu4E(hvSmc`2lwq1-|LFSPN=2Rg;WjJdPH2(=Rg&_lu*mz4*S zh_(B`@u49~ta(M9b84Z;ISq9#y3yaiF4N^P*|bi)fnMO+Vvzbo4DGne4GSuvK~CFX zuG?W~lq`iKjs9@(@-%2xo{Qf$?I#?UWGY)F2oHGw?OLUF)SS8+K1-EB-=`$HQtJkt zoIL}iizYzplqgIM`%H>8UZGym6DHheKOOxWLsK@Vky8zBBx_?9)-7JoOHFv-Hl`BY zbyz0RrJlU=iAS~ei{#}{2G+}{!SL@0Mlv83bgmvJ6Zyiad6g!|_n`)z+nq!etG_|) zcs|>_c#sW!a-WPok7k$IjnF+Zu~fa-i=_5?;?lO`#PM?{5k8Yme0tW?)Y0|Lo9j(c ziY~#KZL>-F?G$*ob_SXlf1(u&Lz(ej{YP#bhcrCvIVWGt+?Jj}MBNZ(B zSrL|64AS^R7R*=2^;Epj8Z>Xl&`#MQCd;RbdQXjHle#98U(ar$_Y_59XTJz%L)a*z&@F*8Hj_RQI)yY^yrOCN54$ieKAae#YInM?+ zQ3J1XHnlMx1pBv6MyiGIWPh&-88omDeBxFjef<;XhbefKy3@*3laZn*|px&CG!aVyCop@YP&(*QRG zm7&k`d)$vSk30QC>_b6$tx!)|`3E}y2LHM%D7q2mEAVOCIrS|X<8z}=)!SJK^^N;v8x0i~vS>oM851F>( zQ%T+nUC=($Nk7LKVLnVFjY{2^T2M^8R~uq$+9mR3mI#dGEJp2zf}Bu;@KD45%_Y{? z`3Hs={mT)I|HBbX+(J$N8-|$u14GW8*KB^gRI6EFy^q<ZTWW`zn&Y9lIM=&b9m<=&7aeIr zmf!U4)DDwkg_+IGg`Uklf3upGEVpSgUsGwCIX}8NC|R%BdiiIQ<38%m2453RL(RiO zIsdmy{69#9`L`7EpHjdr)Z%}Th~>XX58jR* zMka9U<3~m;G=~;@H*#*;hOr)Xn!MJ1I-ZiQVcr|1g7WHlH0zNjo$HN6PjoieHOxo- z@{h#wG6&sO>}LKP35I3v89-yqQ1zfP`{l7CeryWIv4_TV-`n+c$r>d(y|EMpSDScg2(zMLuFbn?)6}q8LL!5CuTXue|6+7p~+NNb%66Exq>r&#aXa;=t4C& z9wI0A?kC=XvRE(u3cQ>qQ8HZyI}gS%eCyX>nXNpTe8rA^(Eo;xH+13TUOAMkTfr{) z5KH+#Eulv|t%*(SMUZWojz7k-@O8xkxNh~FC^bi+kVIjNsFxt_+}j0CS2&>7@s2KY zJji`{=r?^aF$HEB0$IL24jImBk{@x9#%>9P4=xe(>Fp=<5_15KCqH6Bo@K$9fB;Ae z76QL=DeBDUIl{rCNj`cDkxa!ulDJJd&sMg%+D=vq_dB;I?`#vOAM(|P^lHAMI%+15P&Dt>TvlfOU zC!o;#!(DWJn)nJx8QOQ3YL zAH4OC;Lep9kYH6xcNMy$zg`hu$qXg0TI0}TR|5C@D-E_`rZm1vc*v$6n@CqLn~ZZl z3(z%j*Vs6TPn;;{Se)T2Mmn`9GnAh~2Q@~C^_!av-}S@Tn|PWUT@#1Eq^UforhyLK zyGars{Uo}<>)FR?Sr`+Tj+q~d@KkXzb8+E*PP218InvY0sw?<$W-95zjRFZgmuQE> zg$HnnTRk~rR8BY3EFu-7ib*p9aL|5$Q&DUVKhj=Pjk_sC)pZG3#`#5`_>__OV<}kI zw*s!n>cQMgftb9!1_WIXv!(Uh8OvxLoF6JfF4@h2vo`Xub0dfLstMw+Gd{5JOEKwj z)+fSeH!&emmzc7G_Yf6OO_rbAh>zCn;Lhcn3j>aiv9DQ9wSF5cAWw=Y-2Y2J3 zelp#_X<}mjju4ToZE&>l9jnoFhW<=ki;5Z#asMQ7nEvAsIb0V5f)6&rikd8RFBd@H z=YeoBy`4T1@5cML^wIaRIP-@S!Ad11`9ecpr*}8L8%mEdg$RMBTrO}Gl6+`oJr395VMm+ z7%zveEdXTCq1lM3gl@U zS=?d_Wrs@XzPe>3%%=hNtcZcLvnGMxaSKq2okugSv~tvHG~wqH1Nt+1GoG)#&z31p z!=o<+nUV|6B<-0fp4YTw2O~BZRvSn?dD`hqw>uNjDpeG&Zgn9i>N43c^^N4lUU|~Kg{mSOb!zdzoA7&xe%k0gtBBWyQljv>-+DpjU>{zCGt~wfLZ@}>Jd@5IW0DJfMkvn?f8bZ{T@nQs@2EgI`in&&11yt6&F(+&(KXtLuB>4!>BJ^O;hTpL-P|C z+VrxQY`z)>5_fMgN3`Oxy6qIbq2@p**@(eTRcU-AB8J~(Zqo~;y5LghKrct?;@Q4D zxaOWuFT0xHpYZ*tp{t0|T7C3@);D@D@DQQ#ZruGHzNGY1F~&s?(u29lU>bITT=V{YGx)Tj2Us9-@*?#E4x&uz-68?u`6q zx0=aAdI^QA-A8atH5>!(m4RFJE6OLi$T6wBsp9<3#=u#ukWY(Z(oX&1a zgSK2r^vOvkI+r%FzRY2oV>X%j*iZ*)hGvj+`W=1o$dtOSI!`B?%c1P6^*Dji<_dGl zNLjuhp6W9PT_Znos4*F>-}{jzA*y8DQIDo=aWfZm9OI13%i#9!sq9zXRI+o=RXQM5 zN+QLz;D}jmbGAb+QeA}@(}b5%9B12jNdx`1Ba%@6NljNyo6Dk~K& ziO;57r>f>2=p~j;Bae5}p0n$*V$L2?=HmngHTC3|q$;geUk|ssw297c738}vO15Nt zWpfMWp|ZIGTss@b5!IUkmN$z?zgQw{yrF^t%zmm=_?mbIABMkM8;N+?b6OCw7-S_5 zqeA{69`xs^r?eDddXy%4ua`j`+(huWR3YvXa>7SrcBtn$Y;Jav4{!O~!o*XSP_!q6 zSgW^@ijcq5{X;i)bUI?`?ihCDb2V76euz`AwUA%wK_qj%2##Ih^$Xu$QCAEHn+LV9 zaK%iJZ|;DtM^9q$3Lf(EQ=#=%p|r*=9(+UW@NAY6cqQi1X#-Z+a6Xa5+qpqyx)nV( zT?}j{WTJ|uChE_+K%$5IKy$nj4m~=Eq1pQAuT@OfPe>x}A(R{|I0&N?wnF~Ry=2w5 z#Fhb%MNCo33_LV^7BzZnn2Tl4=&Lb+b&@hDYZyqmrHzmm+zCHptfc1(>S}_$jiF{QR7N?-$pQt>V(~aEb6SVBeBT~joU98B^!&ioCbVcfA`ug&mmNw=a@SPTf388c8 zDX}Wrys85fy99xIe=bJunvBn7mBIdDEokg(rE9FpiD7yK+*Dfu(S2*d)3zL1!en5q zpSM?r3=sqKIyTg~mYj-dW90JM(dThITr8Hv)ciKC-%CCwYW-3spL3X17d=Y6-&}`z z4Zh5N#}Vd!dKsHA^CxlIACD`YD$qGHnaHfT#Oxa9B`?;Fk`VP+Tr%8GVr2VhrsQpE zG7yQS{;F_bOCr{b0;;C@J7 z23931(4PI3Op-r{vtPcZaz(N*EZo*ovn~r3eg?K7`9Af(;((#OT-xfnj?NbRLiKO% zr?sXl&?8!!xm6>GgH2(~y?Oh2R^0^VPqidQcvzBp`9ZRjJA?jS!%I8N-GUoxdvIP? zD)oQGL+nnx{;_B)>b_HiavrLTfAEu#z>TCb`2uX%=uAHzC?+?jG!kidE&h! zfZWKq!M!2+lME#A(eK;i$VlcFjLuyL?o%#7Sy49`PW*@sPLVL}mMq%ZbHGbaj?6zG zk9V@4lNkX6WM)q~_iMvAs~zk_e$7+GcY=pWUH~6aP0|HNuTX_AAmDFj}_WMn4M6RHt8$Z&ew)4?4+5ryg$ikbD2x|F22sHTa5z%el zyuQQ^={+qn?h=CmN7Fz$;0+1oE8sBBUzx*q?l99VH<9qM`Q%sCek@sALaJ|;(hV;B zgfD`F-uvA!<7frF@OU+bT7`#N|KBN*{{|yA|I!58|ImbmZlR0*8;scf14dR`TbS$p zS!CY%H@_)solT2t=FAqw4yzWuyH@7s0v5IWjtBFV3&H%FrK$Pz(-tjnTo<=|tFdmm zsczTu{O6(;-y_B?*1KfPmY?&D%?u4YJDz8fzz=k>bgt9tEP z4)W$MNtJblS94~@ScNmc}hFBJ*B-X^5D;32H%`4Al^debY7-5+|nth*`I9aBj*a* zJB0%@vxGit&;y;13NS(1jC}jP1%IqQO|!J^ak+UPMVLdmopr2a{t~$IeIxCiTuZi2 zoCe)dJBhyiGEj8VfgsL(bahOk+sw+T=ck8M=r2E+>iUv>C}Tv2lAP!XjYf7zN)&@{ zWZ}Bif~?N&H%vlvIIb^org|$EgZkV_c-!_O>odn2<=R!q+1m%%XQGa*?}{E;roDw8 z+B!(it?x&*mJWI-CmDw7!l7kNINdIsiHxr%tdw3x$J8#6k0*e1Jc@>aErrCxSqL?X zgyGgU0v-PPU>y*EclW4a&6|93;zk8MvO^#9@1I6aD1(cPWRQuG#SMSY;RP=NDtwKI1!Igd&#`zZMK6+v+&`a6@zg6d_`LHcyENYk_THFGEDR5U z5+ejV28+o{?oH~fx)pj$^$(MwE%cTRHfc~q2HnRA>b z9`3@2s?yAHKXDNFCJ2Lz!*KH{4lPTMqZ-SWqUZPZa3yXZREeC%&k8SS)!z~JmLG>~ z{gcRkdwGfevN(uU%pqJ^7YWtNY^dH?J>y*%23953?>-m-E_`X*JDY@??)+GAn5D*ZU?H=DMSMeeaSYW00PEz_wbr!^FCmdJjb z8KQ(DU;T-TOCMagGY1ZE8z8{qJGrFtjr0$?V_QxYF6y32_8x9wZ|zWprq9<%^q z$GOMJsfopOO;rHMuha#h75lgq%l*jjWmECtlrG|bJr<^_?IG)*l|hN16t2tD!BUTV z+;M|%Y!tr<*laLEXNQBJ!5yaWZ)AgGxima3XlG?v?##t(z%;c?I%+=Yk zQ0?PHGM-hDXN77Yx@;b1tueNj=r)8j%KSB=OJ%N}U$nYkh$fdLO4d`6l2? z`3&x~v?{iVPQ%G z5t>JmsUr1W_wzjWZ@p{1@9+8hTI*W-*n920zsL9Z9N*nW_J90|;#d2F^(6+cZL)^p z@u7I8VH{*`Ou&X0HWQ5-ExPCuzi9XrY14J_6SKH z-vB(XX)JDbyhrTxq#4nwe1X*;Ig*$=O4i%i!s5$v__0km4)}RmFu&s_!iH(&_LgDp zzTp{&o(N#IDV32w7lPdS5^?C8vvi*KdzA0rL>76L3Qh-|A~IoGT<<$ounQX|+2{Mo zOU)zb@T%2NrsOYB(f7e(!rjR8Gy5@PHxV19M!{Z@)3D}zDlC}HlD@Kf(l9U%6$oX7 zqNFGmE1HE((#r6)pm)TmPo2pdX}~% zrtUN1dEh*flDUCR*BRi3(dnc^uZ%dn6T>&#PBUs1aX9|OH%2B=1s)g&A`Qu>g!a(D z%AbA`-|csi7vD}$UwV+(-ZsWEC#>LU=_omAkRq_yX^o4%%)+~LRm?Z4w~@BXN3b}Z zfg8P_kusTiPiRTH0FNofcI}R^tkD~<&`~6Xt#J?- zd5(#>V@rkx!|+;NQCwoBgl~pFgni|e&~@6K8Fso4A4@|3s1(R+655ZQ@@UgHTiEEG zhFyHkg@lqezOrpSaamVN@@7nhH~B(Ru1Xa|ZfhZtsJYm-qk{X8H-tVO+KI;>e8E8Z zZ{lq>7St?$Y&=N7_-QX$mwSic}rhrmc?W|=0yX^_t-_&R^1g;9Hx+=Wjj$k z{F=BX24YjYeK4)%I!PLT4jT;{!tMjl1ZO|5M+YQcp`^#NF}vm+(MWi~%1x1l*bVWd zL3Smc)US-k-Pi;P6ONL;H?oi$ae>@B^$@T0yi9%w+xTOP=itIU4j^M62hCpd@%uGu zMEAx~s(TVEn196vo7YmrVKzcJri-`}aekoor$KNbKpai-vcgS2l^MsovA`ea1KU5Z zA&u#Qc>a;MD0OrSc43^cY`hiqaF|2Av!(Dl>kgvL6G4(kmytU{ns3II75J3Gc36@f zfKP9-06i%OaNBzfsa8gUR%9AlGqZ~BKInqvBOST(UqqM_rzP;Ut_ZfXeHC-z%~cX? z@D{IXjm7JPc^%G1^+dWTg&Zyx$I0y*iC=FQ<0>RX>r_+mo5s=p@V z)g|%wb;7KYJHQ1I z$VlfH8u7reE;R>-gqJY~n&&}l%Xe~r${V8EF3U~+k&n+ODPvttVCJm|!fR|xu=hG; z)N=F@iQkfm+YS2AyFePuTuhlBPvQ7v$^^W7M<9_6Loigc9N*cq0Jb%pXOst0(C({h zxb9K8px4+P=Qnqg)r=l!y_$~itPv{lcV9Sn%|uuqu@8E#?1bJ`RgfbV&Fsqb5M2Iw zg}FJ9h10jLz|MH9;5}8%H4BNvvp+h}LBDtiy5$5zM#WImKM!nm+l2UC07Ph;!>L4; zS^Yc{8x*k2i>iBM$f^KLlSD~tWf1gAPb8z3dysv$9Ia6fD7$1Kc{$k<4p?`SwH~v{ z^N-HZ8ZsM?JGKNX*QCL-VkgEz=CZ(8HH#dnsX$wUClRr?r&fSLoI`@kQK+)l){>rKedba^JmV-;#@biyP3Pf*i-Ei%w-gYT~J zBc<*q!TyW{n6*D4fpRRlVk(Oz7LJhSjDsK+d>aM>vLH}>AM_Quq0q3?sC$W9$Y!ZN`{>Ay8E#C+?R%6V`b-U8+z;gQz)HCBM|eHhqxkoF zYgpYMiJDy};8a-=*dn(Fr5-#e=s(kiH$B+`Q(s;pnOUh!;*3k^mEjrG^-dJf+zd3< zrW}8|qy=|9r@|7Ae)8p~BivNJ2OkJ&ZDi5L2{?*Phc**6s1YQAGJQyJcVYpU2r`KE3r`3xSRfSEe*`IyGw_O? zubBm@Ld*Ai0(S2SA@(^_7_n$6n0Jqdk|vZ3uTKZEa(yO9+M`TPobtk3pZnmaW8#Ty z)?plPItj1rJAj{xe&%Kx>?JqKl3}OYMZq0fmXur2Ohx-l;F)}6x@JgY*AqelSuc+p z6k1U8o#Np9f-EAFFoeW5X2PG{Nu+qII(m~gfxO(83|_+V=G5D+MA5sPv2R@uC@7!Q z?wU_jQ?B6hHNiO9$Qd%F=0nBBB!Pj|Inu)_kUKXdk*t#$=El#)W}(n73bHt?ikx?p#`aO^usW|vA`__+DOgvG(g5g@S0IgoVP9@YcH>6MjTk? z`1~fcFRemgXd#MM1zs>L@?v zgL^!VKmCz0t@cIkHiPRFdoW&P4^HSN_?o(+x*M0!9+m5Kc#Q+-pXk8- zUDDuT{vJJPI#29IqS4}<@h~IR433Elm!Os23fLnm(9CZj^CCYproD}V`(LkN!Sg{P zUD1H=eRGAMPqMJUVJw=M9Sbv(4A7Qv9!!>fOgAF;6}` zmzYERo|Z8>=RXkf_jYS$)>dD*#Pw~y7 zEzGxx9Oh-ZV!H$tygw}ob#$D7>wETsiQH^dC$Ud(uLz+j{u9YHn|o-#vH@&-T!ih0 zJxGc}FVniKN4OjsKy)^*KsmBg@lgLR@Qy14jdAzz=YeGK@Y_dbb}hh_!v5&l`=y|C z{S<^chljfSZcH`sST=uNo40Th6)HtUL!x=a+nr;5F_n<+c8i;&-gkK_m zgdeP1Wj6JC{DM%A@KDeHPKo@FC9?dVQoQ~y#oH~^=YK2_-+wHTp8Kt&T{i;{MLuH= z8|$JybKA&A+YJ0fdNm|{b0I;uCquUTX=3fMj66ErgJvC%#y!TnNr}4$nzwc;?j0Wj z*X~p?$2Bxy%ra|cLg`;PX|gubOGHS$e;qm@?hk`O*GQSLP3Ye=1uh(o2g#0$Wc;az zB*|(O-XdN{5+o{MmzNQ7{uo2DiY}3W1sZs~n+?n=`%LM#&PL}hCF91T{rIA2Da3yO zCO|kL9l7lZo;jv?&aAo6$nsEemm8euPlK0d<}!M6(d6=XS@4K8g7ox>xF}j2{GOhK zm!*L)Rk%CdcXl5ge4`0nGBM%8Qy!zY9@B+xS{+=VM9@*EK_a*`KxChmAme@R@P1`K zvj1R0-v2s;ax1qXq>}_^R;~l3FHUH^<1k5lAq|$Bza!boQW9-{4KLLQ1-Zs2q$b=P zs2$FDkMAi6U_4R$jUHmVSstu}vee+B10Eb$0g>gqkc%*j;j;T)P#l= z5d)-dd<+K$Ux8WmQACBYg@)UU@qYU#VB~2~Eu9Ij^=T+%O9dIWZ70=^RbXkF3mvOY zqn`@z$;0Am_!b;NR{qJxZdJWFw|aa(x)t%&=b3_7x|9@ecSrSuL4vC9G5B%*NfiHf z2btR(3k!c&1JRBW%(WJW#|u<(tzj+gXEzRlk9I&yRvzqET@JQY`tW5+BYCQ|j~p9J zhS^H`kZ;8>xNsUC49f)c@(;1vstO986nLzE05)oO87R)gzsCGTsWzmUrj+2bwR6;0<6Uw!)-k*X> z5kBniKY=hdh0`}v;bGbfGUw!LQf5hkk-9gGIiv=e39h(vh7Xw?H33^++=Ub0=Rw}u zuZ%O7Lv~AlC9qK(ZyjkT&WrsZPsakPhm`PQ&jj*H7~y5jp1)=57PLa(@id@c4qx=3y2-liof1!CEs^eBiGs({N=ML))Ka^yA|#5 zsqO@_oHc;GCvUUbz44^nio%CSov`W53^E{Eg6ns$#G_BA6LI0Ac+YilA$^fWG>iI( zn)O05DbO6<^UFd{>ca4M^VLw$KN}Y;+=$M7-3SHlE)c%>s^Du}3K-g+#u^W-n0lR5 zG|tfzSsa>1R1c4Vt&`(X!kYx}$?(B`t1;|VC?l#blku|XRAz6MA`aeKgxf7uVR^d) zURGa?-kIi*ORM5xqEHB;?KJs~K9-kaTfQz?yO<-(Y`O%2;(EJ zhXi1zkPZb>Z2=IsahqNPZ>RGP?N6k!Y{6>;td76n|@#sR-<83mtc9A6_##CRgOzv>(- z`7H!jOgu<39FmYX9znNLgt4HP_u+3n+GO*u1n3==2HA7&`0Ah`7UuKe_&wtAqcwyp zIaEPzmLFrd3Dr0Xj2 zlqwM%^71cO`$QK-CMUz3^zY2Knm0I;Ck_`H!Vr0|4(bWR}d+fosExq|a+3W1-qc8b2pMX{4dx z{IC}x#t8B|&w_UFLo#@{8fk_-CG%hJ2jy-lTvTxti5R;>(qOu9p~D?dUULZaKKV0B zO}9{_FbiVF<}?(& z3c`5DH0=A=Gm`TomsI?vhBC#P$)S=-VA1c3b-Op?6(*Wk;ZmePa&;~a4gP{gu3MvL zdmrGbj}}3?Y%wXj9f0jP5maP)0Mbn~@Wb-;cthk^(oxDGe`ztutet~z8p>m{YWm&je;o`XvorEui+`3S1p>3bE^gt>8A*lR*RBQsqOitNteif6Src9@U# z?w1R}@`=c-#~HsJaU%B`oXFh`StMjYoAkZvLosLlNm{=@>Ab6j^~~=GN?#mltSj6U@~6vLwy@~I^Ui#HYH zt3$&=!cG^e52$jU<7Js03)2L{VW~tVVF|8zx)oijY$87EhRAV6J*=BK8}c6VvNQ1+`wxmNW%dk zjVY$BiS>vV>~$(bo~~hJ%9mwAOHUHEoIlP;4NqknzrRJr!*OKM=R)W`t3+OPD`C?| z`dIFc4BAiaAdS~lNUdKiz0KhlT9f?_f7N(TUgySO^RIWwa;BTG_R_E+L{!+OpToXB zr63r4n@E&Ifo<(ch`Xr?`pQp`%AX{B@m)8{Z_q)~zn7p&;|W;VU^md z&7|q+Mx+;bSkO1*2x${G;a$^zQ%F`B*Q$DwtBK=~&5Tsy^dkqJ&y;}iJasTo-vFzw z?1E;!deqt{2RYR*kne`CWT^QU>ZI#IL2f+$Lw=LjdFPp~fWN@uWCrp0Ycft+ZVJU{ zAxz+>K~-BG7O7If$Jb{Fr|w6Q`=eq(ps72~>G6f@pKcJjj-y0vygn{7SPBJ~vygAs z8a$;oUoZ<1=6FaPin(BhZ&o#NLpBL0Pje!^KYcUKFVzs_)f|L*Kej{H#ZCBeb{9H5 zeFkJZ&BEpOWuWg-j(Ek(U}0@ObnVn2P7nz%-x=bF)tV4xsfEYfipJKGHeh$OT!2mG z@qDv&V0&vH3AD(=ExA^ZYunA2WbYyC_1)>$1Ko>L3CU!I`Isd2E&C6!4@X~Ck=JeV0(i%xt> zB^RVJi0V&&G};~koL(G`>lnf-O@r`QJwP?A8`gE%hF+c5CT-ugf&0uCNK|qj&aCrA zADx!Mz^Wo-vP=!)rx+9G!6Vpzbt5jGN~7xEu{g0<9W0HVgmIvbe@4RpZ+k}k4%a6PJMh}D3K&;p5UzVDj@37IK0~26d%n{#qOt5 z$o>7#Q0CSRc+Q5kMD$iJtkar?+5I`B)zuVYkL|=Whn@*Kdlo}<_HrPim(f-kPdIOx zhhzINr2?p)z?SI&(f$-v72uZH%EfeOV$@uT;!fmY`AK=`qQ)vxvdI}m8NMa!UmYZ^ zu?!wsc7@38uSHkw{~*z`*9GQ@k>rq;1gtz{2r2bNNIb|DyuMzC2_I9?^SU0Ax|oM% z-zp{zUo6mD$3nENEFW)|c|~I8jsacwX}FLvgJ1hRpgKufxUV^f)M=K$j#@p~#x|fy zpLZ~u6m)QxyaeVg%7EmUX*i|W2eZ!&@R~#^Y=31oD4Bjo39fZyUNr;3KDO|yTMwkP z$0BtlUv9&o0!Y<8MrTLUVC_-?mUEmB0d3o1Xwn~|ka`DADmsIX&Q*bef;r^ZB1b{G zwFtiHQ;R-HO~UUtMUn#kWnsMKKGMIY8!4}~#D04oA;;J1I3-vD!lLG3nY)=tHg7c! z^$QPO@!u(t{}?0w|F{V&|8^4s+(HBYV~hm-%NRM{xsYAyN>eM_=CjmuimHvmY|JcM z3ePZP!>Sl+=_Je^^0r_dV{ItiAJ(i?mnLPcVM84|BT2cPTf{m^o3oE5Qk2(sEB4S# zD{8>sf)Z5`21IYetdx%l<=kgOjWM@mOUG)nE;=@pMYa{IKi`g3e_=`48>+COtHMKr z1DC7+e?w6=M}+^wiLCyoj5U9kvDPhg-M>u{xunqb|1w3Sgu(lk?Np?b9J~9P3_n!g zo{Ri&kJTR1;fsZzrp|p0G}Ab>k>|hfCEMIC!L?8s?5#bRR_|HKPQ75yc?`O zvQ4(s)fb)N<$2WB_!=+dLR?K~tfk0JFOi^Uj;r9sZHk~Idt%wiY#pob@oYi9W3rj5 zG{cR4pTaro+tI4-t7v`ye(IywY+B6v7<)upfjhJMA?siDnm_n>8SST%#_rAQ;%%Fz z#Qo){Mjw)~qk8-0IaNJ#dcf14a`)+FZ*?5wU(7aO)5ngmPJ1afYgsNYdagOYEc$lc zygT>n%oWd2oh`X^_1o9%!a)P>(>4YAuC6mzv9X78UZT#;kD1T?KCqZ>`8=~OzIH6f z*AL{9D+Fwfrvx{rZV{J!t%Tz27SZG1lv7VT*6;?qr*Jc>zVYp8N$xk5LzgJNr~KIh ze)8Nf_R2g3dd9aBN`6H$wSI0Otq>wh53J>QLkgC3+OOSILdG@LGeevuNjGJDt%{mo6~fl;`bt@8n^9haiu97E zN=oAOCiY#QHrE{?LWMQiQ?+3Zw4$FJ9TtC(O%b2VEiIzCC)4kg`7Qq8m*Jm%Qnv%p)}e&&APXJ zo*bsnt+ljd>npGDJl6TL(z3d>47 zxWqrPAaR>Yl@r|nuJ<%kcJ7bq?v29yN(tYA!F#q188gn5Rwq1}@>B(wsuwShj#w zEPl$)2$QB)d1`R?+7#%%b;l|F)yAB3Ya{>2z3J4^`a{&wi3R-3CHvS5lY97;0}0e| zZwKmrv_GpRcAtH!eTKR)DnYF}_LJ=&m8WKJzee3iA4~7IxJ*5ok;?w;t6spQ>r-_F z5&Vyo2|G=A&s5Qp^a>LZ?#H9^tijZkY#H~By;u8){UE&Wo>`8t%iOi;PG?0vFCm%o zmmK7M5NoAc47PJKU*@q!UmsHK2ikbXa>}gXsc5QKR-XPIw8!k-$PTu?^(2)s?J>`H zlL=R7yq)qM;CL>o32bpxF*|&(jm^~E$5+%8P>1hspqf5b^2EJXQq$VJ>k1_|voQrW zl+9gWZC`9-_upGocXo3s|LZhEUTWfV>S5DfcCG1Rdd=lAT%w8%bx`RTb;_ieHoKI{ z{t_Igo%F4_kvF%giF+mK?1DUMiNXl49x(0lOOKT_YNkF2#?cjt>HMX)<+tMD-MD&}lJIv}s8>Rdmst?Nn6eHag1FHuo@?G*iD3cTp7TIcbuhe6{hgEP7>o5dYjP` zebwkV^_05KJNn$Ev+msSASJeRdna|NbPPA;T0gbmaxho9=q@!{)WVy2b2ZNa0;$!r z!&&d#W%Oj12);O9l-s1wvQK9yv%gf*D5Z~*tjcvBT~|Awp1RAJI|FL0D={I1>-3vI2}W_rtt__MKx4sa80mQd@)mhzNZzOo9&&-l-sfABIKwy-;e zVcek`!b3y;H{$efE%C2?;*I|(iA{eiiOp`ITmDuOa*3g#e+wdu6C*+V_j9lD)x<>k z7p!x5qFWK4E_hzIWl{5vxG|I#$!{~Srg-$$~|Ep+?8HH}D8=#GDhGhU!4_aQEgVl`Is z{W@k*O5XFhgERf8Q%w?lzbB$}{9Z$@;KLVo+jM7Y;=nKVSCKZ=yXzGdGs}Yi_+TYF zc4jbVrsGUqysgez??dzol?ckpIFX*FN3k6lg}kQS0xIE%2-obX%ln}6nKG(T=dz~7 zuw%XwO4VZ>FElbdbZ6l5ssF8(f1T4^|5Ot7cO|>sLihY#iAZ8-^xt)ey*p+>g~o)3 z#s)@gjnEfBw7Ff delta 147 zcmdnOc!_a>yn;oVxp}H(a*Cm;p}9$-p;3~dxn*jqfw_fIVv>P{f#F1Z?TicT5nK!m z3?-SlsqrQGC5btOtnLnOO26I#1ta)?L~221aehi_F;te#n=yhHC|sOamYNcuSR9{{ onOsuH-WkCS6f8=tj4w+qD$dN$E98h^1&SExS?C!Qa+c}=0CRRKh5!Hn diff --git a/tests/data/rllib_data/single_agent/params.json b/tests/data/rllib_data/single_agent/params.json index 0e508b4e9..c5e605ef4 100644 --- a/tests/data/rllib_data/single_agent/params.json +++ b/tests/data/rllib_data/single_agent/params.json @@ -1,32 +1,32 @@ { "batch_mode": "truncate_episodes", "callbacks": { - "on_episode_end": ".on_episode_end at 0x147eb0400>", - "on_episode_start": ".on_episode_start at 0x147e97e18>", - "on_episode_step": ".on_episode_step at 0x10be8fea0>", - "on_train_result": ".on_train_result at 0x147eb0510>" + "on_episode_end": null, + "on_episode_start": null, + "on_episode_step": null, + "on_postprocess_traj": null, + "on_sample_end": null, + "on_train_result": null }, - "clip_actions": true, + "clip_actions": false, "clip_param": 0.3, "clip_rewards": null, "collect_metrics_timeout": 180, "compress_observations": false, "custom_resources_per_worker": {}, - "eager": false, - "eager_tracing": false, "entropy_coeff": 0.0, "entropy_coeff_schedule": null, - "env": "SingleStraightRoad-v1", + "env": "WaveAttenuationPOEnv-v0", "env_config": { - "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"lead_obs\": true,\n \"local_reward\": true,\n \"max_accel\": 2.6,\n \"max_decel\": 4.5,\n \"reward_after_exit\": true,\n \"sort_vehicles\": false,\n \"target_velocity\": 18.0,\n \"terminate_on_wave\": false,\n \"wave_termination_horizon\": 1000,\n \"wave_termination_speed\": 10.0\n },\n \"clip_actions\": true,\n \"done_at_exit\": false,\n \"evaluate\": false,\n \"horizon\": 2000,\n \"sims_per_step\": 1,\n \"warmup_steps\": 0\n },\n \"env_name\": \"flow.envs.straightroad_env.SingleStraightRoad\",\n \"exp_tag\": \"singleagent_highway\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"boundary_cell_length\": 500,\n \"ghost_speed_limit\": 25,\n \"lanes\": 1,\n \"length\": 2000,\n \"num_edges\": 2,\n \"speed_limit\": 30,\n \"use_ghost_edge\": false\n },\n \"inflows\": {\n \"_InFlows__flows\": [\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": \"23.0\",\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"idm_highway_inflow_0\",\n \"vehsPerHour\": 1944,\n \"vtype\": \"human\"\n },\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": \"23.0\",\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"rl_highway_inflow_1\",\n \"vehsPerHour\": 216,\n \"vtype\": \"rl\"\n }\n ]\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"flow.networks.highway.HighwayNetwork\",\n \"sim\": {\n \"color_by_speed\": false,\n \"disable_collisions\": false,\n \"emission_path\": null,\n \"force_color_update\": false,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": true,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.5,\n \"teleport_time\": -1,\n \"use_ballistic\": true\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"a\": 0.3,\n \"b\": 2.0,\n \"noise\": 0.5\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 1621\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"rl\"\n }\n ]\n}", + "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"max_accel\": 1,\n \"max_decel\": 1,\n \"ring_length\": [\n 220,\n 270\n ]\n },\n \"clip_actions\": false,\n \"evaluate\": false,\n \"horizon\": 3000,\n \"sims_per_step\": 1,\n \"warmup_steps\": 750\n },\n \"env_name\": \"WaveAttenuationPOEnv\",\n \"exp_tag\": \"stabilizing_the_ring\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"lanes\": 1,\n \"length\": 260,\n \"resolution\": 40,\n \"speed_limit\": 30\n },\n \"inflows\": {\n \"_InFlows__flows\": []\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"RingNetwork\",\n \"sim\": {\n \"color_vehicles\": true,\n \"emission_path\": null,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": false,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.1,\n \"teleport_time\": -1\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"noise\": 0.2\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 0,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 21,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 1,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"rl\"\n }\n ]\n}", "run": "PPO" }, "evaluation_config": {}, "evaluation_interval": null, "evaluation_num_episodes": 10, - "gamma": 0.995, + "gamma": 0.999, "grad_clip": null, - "horizon": 2000, + "horizon": 3000, "ignore_worker_failures": false, "input": "sampler", "input_evaluation": [ @@ -40,27 +40,23 @@ "inter_op_parallelism_threads": 8, "intra_op_parallelism_threads": 8 }, - "log_level": "WARN", + "log_level": "INFO", "log_sys_usage": true, "lr": 5e-05, "lr_schedule": null, - "memory": 0, - "memory_per_worker": 0, "metrics_smoothing_episodes": 100, "min_iter_time_s": 0, "model": { "conv_activation": "relu", "conv_filters": null, - "custom_action_dist": null, "custom_model": null, "custom_options": {}, "custom_preprocessor": null, "dim": 84, "fcnet_activation": "tanh", "fcnet_hiddens": [ - 32, - 32, - 32 + 3, + 3 ], "framestack": true, "free_log_std": false, @@ -80,17 +76,13 @@ "policies_to_train": null, "policy_mapping_fn": null }, - "no_done_at_end": false, - "no_eager_on_workers": false, "num_cpus_for_driver": 1, "num_cpus_per_worker": 1, "num_envs_per_worker": 1, "num_gpus": 0, "num_gpus_per_worker": 0, - "num_sgd_iter": 1, - "num_workers": 1, - "object_store_memory": 0, - "object_store_memory_per_worker": 0, + "num_sgd_iter": 10, + "num_workers": 2, "observation_filter": "NoFilter", "optimizer": {}, "output": null, @@ -125,7 +117,7 @@ "log_device_placement": false }, "timesteps_per_iteration": 0, - "train_batch_size": 2000, + "train_batch_size": 60000, "use_gae": true, "vf_clip_param": 10.0, "vf_loss_coeff": 1.0, diff --git a/tests/data/rllib_data/single_agent/params.pkl b/tests/data/rllib_data/single_agent/params.pkl index 60cfcb075c7ed028b5ef23a05739e2fb931e1abd..511d3434300e6270d326503ee9bcfce6f9127b1c 100644 GIT binary patch delta 778 zcmZ`%OKTHR6z)vY2a`u*YfS=5+i9znz9z8`T5wTBA+$}455$U0Zzs2%sgrw$xpz_t z7Tk$4i0PSv8*$N{U>C-Pd-rYx(Z3)lxGIVZ?=)SsMepwWzVm(O+{1Z#>sdbeD7q8L zHXXdJU=XxSHd-iNTZDy7A1eCol<3&87%gF)aqI*|KY4`0q$HSxU>GVpHr5+N=;AK2 zi0)$M*<93g3d*63J1#XaW1+ke6yZ<*Xl;8X5}AwC_P@h#X}EX)l8v zVBvktk2AW)^+N{k$i4Dz3LeUr;g=jQ<%X4>BTp@(2YLw`*j8tHPHL`2)^!_`b#4mv z?lEJSkTySa&$tv1ka<>+SG)0VBcyOR7@&5L1ip*)!V3oXF12j zL)@ZdY4IwlK&tt4Cr7K6ZB@f5xQX>}ocd(*O$olVs6CS>daGi2)b_#z5jw6xwzV7X zwax-l<}=K|1i2Os+s#wz+&IMWL+MUduxS}KWo42i$dF5q$@v_hbU(aGbtbP+!Y$%ak6ou8hrO=x`rD4Z zsEGN01-wX)PpAL2hmndF_;JIw9B~|*$SpB(9Sw|(@UJX*0+GxpEDsdU#)wzeH>rCc zyDXTK{k9OVJDvbduyetxb&qzX8ke?;xMAwds^VZ_OD*Q&T${NBpU!`% bFAr`=9f}{bU7Z;wE_pT%ie?r*Uwro$+&}(6 literal 10890 zcmeHN&2J>fb?+Vy$>EU8kL60TR*JwheBe6PfaIR$fbFx-aZUmfz^5E+z(@ce>^~vDSJge!GbDEv z*zh3`7t(Z9y?XWPy;twOdiB2F{HMSDhN1o*7VC08%ma29CHp){i=HW0sm;TKl*K$L z2dIA~wS(B_Y}d{l!BX!5FM9tb+qxM46;tc5&m4-~OWg zTKq4+kh3C6ya!-B32GsY`WdTwK9!4Lpc{pp*%>s3cGrgDFvzU$4U|0R`+juDVjG)G zi@*N%GlubDdkudU;H+2IbVGI6uImw1kcI{y8&)7bw_O(PrWk>u@oy?jG(vxtAAw&b zpHGJ%;n@zPh2gEv)rt8u>T*4LrSrMi?$j*l&S}e!+;!=lC^4%H=Uo&K4w-!IJ{TzEZNxL;7b8C!DEA;|xdD4u>L|Kg( z9HAhOae8@7#Au33YDuG5h_i$Ni?psoHmsA9#Tke!iJ&{^>D=|gfzR)j-FMJ!=@l$~ zKW3Rdp!ul+vIjhbx?}pGeN;htA=K0+6?&TT*4elVyC{H^sIF~@y91u8)0ud?IjKLb zZu@?@npSK(JPC9YM+}r4)sR77*uW;9)t7(~&+-ITS|i`6Ynqjcyx;dvlTRAkj;7bj zL$4ntL7k-g8?=g2n2P3bn5B&{*xkHW!jhSzC?u?pb*Ap!!Rzws%$59V%{4?NPAiQNvzT@Evv-2zVlWRd5q z39y2qUrR&YEiHkyb-A-~7HGLst2`6zY`~DL%D1jwnG|KUvQ-^b&kd?0#x$SK#tOrl z-sZ{MQJ&DwzkTJ(xX6ac53-|J?OWu59a<-NS`$t+=l?mI6MvG+n`a4moXcCA6OOWC zm^EZ_;-A9dF`nNWwrZTWq7-qAt&V({`~FClR>l!(YR5q%YIe=gP_(MFn#?>554F=y zsg#|!wZY!e{pBvnuVBhZ%9a!PQNnhQv{}K}r1wJ5WbY#21)%SxFrRdQCxLk2r8IWN zMqoG{g#k3gJgS!pk0|@8Dg7N*BxLLE9R)PSK)3Kc7P(slyq9QNEZ|Kmm zoWPb+40GoER7WTi6s7`kWn#NtewtKL+U;n#?kY$T6e~f;Mm@}f()pa&!4MALwH#uFO zHaayGtpGF{^5)lZ`&0V3d8TkPI|m+&j|q}c)C_fnSuf4#j)_TIxRp`hAq@ zD>GLa2>tt&nH%xLs-K!MH9y6aTwDV)tb^8$?JoAU*M66p9~F0s zb@7^PbrE%pdU|Hh=rwv%y}RAlj}6y+-gsyZjAL`pa2wZ*@@s1N)f|3J55H!v8Ln~H zDAsr6Y)LRSV;|skfK&{G4ZoRvfR~ImwRy;s!BHV+sS(73;n!Y)ozCH!kT6{HY!EJ+ z$Y(@_cEmhz2ZCq62~tJ~+z?t)>q%y#*&xO6GQHq~PcL%`weBlE}@#MWXerHOC) z4Cw7df8vFNAZmE?ot+)RBG;b4ZPvH(5FpR9KJt<3!jbX}Qp$+$;{At2gZL})s$5{~ zgWUEhNkhwg17JwJB14+81ec)Q&NY>8Phanx6!#Kib{Iy$N_S>Vhw3gewtwjIzMcCS zbwMK;vhB5Y8~cmgTy0CdCucDnISTSQ+---t!_s_L!_m`@oM&v{N4S{jb@GpkkMC^A zr7@y-T!``cjgBzj7;y%?9Dzh-ZUPW4;yM^wl>l#N2bCnYOGZo8=o~-8yU{Z z=Jwid8zV-!()@Abkx6S|wI)Uvj+>9n^gp5T=iGTvzesJ;sk9bRT=y4A@iqgd~mDnyS zo;P~U-fH(Mj-_$0^APOaGxp5A#xXYI8vD};7_54xs0ZK8{n?~>Z2n{8gC-7W&-e&- z#oWhHD`QCC2?O)Yq6Fa|1*t3TS#%ePTJk> z*A#|j!l3nNh>3It$i!zQn$8%S8{+1TH-4v|z-4m)vkG%9-pQb5=SZ3I@G!CCn4CZx znZGh|RmfF#16P*#d-N`pzNbJEunmu-c4}uCz9Bt1ODH^-JO+Is^Qsz+FosJ|&$uF& z(_R|lL08<8Gqe&p-Q@sAUB7(|;8N+adv(+m@PWM-!9K#ztcxA-mO2d@^-Q^j_)NC! zgYsNJe{PF#`vtRWk+WHbvzftv7pGoqUNkQ@Uc&dX`4Y~-vU$-2vv3&5e~5QWM+b)MD-%X z81~K#duMCCtzqxnFbIJWy)JHux8?a!n4u~#V95$`Q!bSO$Efd}s&8-DN2>9;_*ZCE z_0%8mEb*MQQW{!ADaIGuubAECN-;<|p2fd15~=IJe^om01$1EjOX+~(@>wbX`hK3) z{Im*;5UHk!4C=Dc!QG3MY`|Ga$8ES%KLDRiIAjXlW;*clC*uxl+Eeng?=4uZfHu#9<>;8*b$l$~r14jUX(>zc13UAUz; z&8|^$Uv58<%XNm;y^+RrvZwe2VG`aUxQ((Mx-@{O2B#m`hu>~@VHum1;N^FIkEIL5 zpK`XUB$A8On!1Nv@s9W(WsiTo{RaG*(P+UUom9EK83bpQ*8TdamZ#2-6P8E(cV&9C z%H{G|ZxDhRWnmKnV$aW^eYo44QfB)) z3uT{!cmIrLPIYhpC2GqsL!(gjMWY#ybNP%e>p5j3dw&8o>9zJ=Qa^yC01uK(sYIw^ z7RmKFdcfmOh7u}e3)TA$GLJ>?r}ARemj+RkQH4fLI<7nqD-odvM~j$F4K)r1vc-i7LBuP$#EA$}@wqJJnD1 zb7TF6e4(u6!jADAjzX~dI9NNvhjYl>1j*)v%J^miY$-XSrB|cfJ>m-r%5We?{VV#j zju6lJb0u@_dZajuhX(VB#j9my z3UUMeK6t6i4GlR1xzQ3hRQd}yj#BE7avtb{KRE>z02v`aBxS%V7fNU<@=|wah43AO zfKA!P!yQU`D5#_RuWZGU?>X=Tq@gRSafG{f9FunRL#(Yk0l~6}tODZ4om|eUiV&MD z$Bee?R7a=E=-RTi?}Jp{@1yVM|4e_M$@A&JWl$HdUQ+pecnn>YLxqMpy*p9DW`PU` z!ISck9Or#-#SxsY%~uu`a9^dsnhwN4e`p2Npd^ZIwS# zPb47tKQ603wL*R1yI(J3XsTCq=?OXpcjmBFUE_wS#e-ko`250$avATVR6RPXHvBa2 F{4W_Wq{{#R diff --git a/tests/fast_tests/test_environment_base_class.py b/tests/fast_tests/test_environment_base_class.py index ee815393c..b5c6cbc17 100644 --- a/tests/fast_tests/test_environment_base_class.py +++ b/tests/fast_tests/test_environment_base_class.py @@ -13,9 +13,8 @@ from tests.setup_scripts import ring_road_exp_setup, highway_exp_setup import os -import gym.spaces as spaces -from gym.spaces.box import Box import numpy as np +import gym.spaces as spaces os.environ["TEST_FLAG"] = "True" @@ -26,41 +25,6 @@ YELLOW = (255, 255, 0) -class TestFailRLActionsEnv(Env): - """Test environment designed to fail _apply_rl_actions not-implemented test.""" - - @property - def action_space(self): - """See parent class.""" - return Box(low=0, high=0, shape=(0,), dtype=np.float32) # pragma: no cover - - @property - def observation_space(self): - """See parent class.""" - return Box(low=0, high=0, shape=(0,), dtype=np.float32) # pragma: no cover - - def get_state(self, **kwargs): - """See class definition.""" - return np.array([]) # pragma: no cover - - -class TestFailGetStateEnv(Env): - """Test environment designed to fail get_state not-implemented test.""" - - @property - def action_space(self): - """See parent class.""" - return Box(low=0, high=0, shape=(0,), dtype=np.float32) # pragma: no cover - - @property - def observation_space(self): - """See parent class.""" - return Box(low=0, high=0, shape=(0,), dtype=np.float32) # pragma: no cover - - def _apply_rl_actions(self, rl_actions): - return # pragma: no cover - - class TestShuffle(unittest.TestCase): """ Tests that, at resets, the ordering of vehicles changes while the starting @@ -347,34 +311,28 @@ class TestAbstractMethods(unittest.TestCase): """ def setUp(self): - self.env, self.network, _ = ring_road_exp_setup() - self.sim_params = SumoParams() # FIXME: make ambiguous - self.env_params = EnvParams() + env, network, _ = ring_road_exp_setup() + sim_params = SumoParams() # FIXME: make ambiguous + env_params = EnvParams() + self.env = Env(sim_params=sim_params, + env_params=env_params, + network=network) - def test_abstract_base_class(self): - """Checks that instantiating abstract base class raises an error.""" - with self.assertRaises(TypeError): - Env(sim_params=self.sim_params, - env_params=self.env_params, - network=self.network) + def tearDown(self): + self.env.terminate() + self.env = None def test_get_state(self): - """Checks that instantiating without get_state implemented - raises an error. - """ - with self.assertRaises(TypeError): - TestFailGetStateEnv(sim_params=self.sim_params, - env_params=self.env_params, - network=self.network) + """Checks that get_state raises an error.""" + self.assertRaises(NotImplementedError, self.env.get_state) + + def test_compute_reward(self): + """Checks that compute_reward returns 0.""" + self.assertEqual(self.env.compute_reward([]), 0) def test__apply_rl_actions(self): - """Checks that instantiating without _apply_rl_actions - implemented raises an error. - """ - with self.assertRaises(TypeError): - TestFailRLActionsEnv(sim_params=self.sim_params, - env_params=self.env_params, - network=self.network) + self.assertRaises(NotImplementedError, self.env._apply_rl_actions, + rl_actions=None) class TestVehicleColoring(unittest.TestCase): diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index 0b385f28a..336c17bf8 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -26,7 +26,6 @@ flow_params as multiagent_traffic_light_grid from examples.exp_configs.rl.multiagent.multiagent_highway import flow_params as multiagent_highway -from examples.simulate import parse_args as parse_simulate_args from examples.train import parse_args as parse_train_args from examples.train import run_model_stablebaseline as run_stable_baselines_model from examples.train import setup_exps_rllib as setup_rllib_exps @@ -60,36 +59,6 @@ class TestNonRLExamples(unittest.TestCase): done to the functions within the experiment class. """ - def test_parse_args(self): - """Validate the functionality of the parse_args method in simulate.py.""" - # test the default case - args = parse_simulate_args(["exp_config"]) - - self.assertDictEqual(vars(args), { - 'aimsun': False, - 'exp_config': 'exp_config', - 'gen_emission': False, - 'no_render': False, - 'num_runs': 1 - }) - - # test the case when optional args are specified - args = parse_simulate_args([ - "exp_config", - '--aimsun', - '--gen_emission', - '--no_render', - '--num_runs', '2' - ]) - - self.assertDictEqual(vars(args), { - 'aimsun': True, - 'exp_config': 'exp_config', - 'gen_emission': True, - 'no_render': True, - 'num_runs': 2 - }) - def test_bottleneck(self): """Verify that examples/exp_configs/non_rl/bottleneck.py is working.""" self.run_simulation(non_rl_bottleneck) diff --git a/tests/fast_tests/test_files/i210_emission.csv b/tests/fast_tests/test_files/i210_emission.csv index ec63cf9cf..d43c115a4 100644 --- a/tests/fast_tests/test_files/i210_emission.csv +++ b/tests/fast_tests/test_files/i210_emission.csv @@ -1,4 +1,4 @@ -x,time,edge_id,eclass,type,PMx,speed,angle,CO,CO2,electricity,noise,lane_number,NOx,distance,route,y,id,fuel,HC,waiting +x,time,edge_id,eclass,type,PMx,speed,angle,CO,CO2,electricity,noise,lane_number,NOx,relative_position,route,y,id,fuel,HC,waiting 485.04,0.8,119257914,HBEFA3/PC_G_EU4,human,0.05,23.0,119.74,3.32,3793.12,0.0,70.29,1,1.17,5.1,route119257914_0,1068.18,flow_00.0,1.63,0.11,0.0 500.91,1.6,119257914,HBEFA3/PC_G_EU4,human,0.0,22.84,119.74,0.0,0.0,0.0,69.9,1,0.0,23.37,route119257914_0,1059.12,flow_00.0,0.0,0.0,0.0 517.1,2.4,119257914,HBEFA3/PC_G_EU4,human,0.15,23.31,119.74,78.83,7435.5,0.0,71.61,1,2.88,42.02,route119257914_0,1049.87,flow_00.0,3.2,0.54,0.0 diff --git a/tests/fast_tests/test_scenarios.py b/tests/fast_tests/test_scenarios.py index 5fccdcb3b..2263f3474 100644 --- a/tests/fast_tests/test_scenarios.py +++ b/tests/fast_tests/test_scenarios.py @@ -5,11 +5,8 @@ from flow.networks import BottleneckNetwork, FigureEightNetwork, \ TrafficLightGridNetwork, HighwayNetwork, RingNetwork, MergeNetwork, \ MiniCityNetwork, MultiRingNetwork -from flow.networks import I210SubNetwork from tests.setup_scripts import highway_exp_setup -import flow.config as config - __all__ = [ "MultiRingNetwork", "MiniCityNetwork" ] @@ -136,7 +133,7 @@ def test_ghost_edge(self): self.assertEqual(env.k.network.speed_limit("highway_0"), 30) # =================================================================== # - # With a ghost edge (300m, 25m/s) # + # With a ghost edge # # =================================================================== # # create the network @@ -147,37 +144,7 @@ def test_ghost_edge(self): "speed_limit": 30, "num_edges": 1, "use_ghost_edge": True, - "ghost_speed_limit": 25, - "boundary_cell_length": 300, - }) - ) - env.reset() - - # check the network length - self.assertEqual(env.k.network.length(), 1300.1) - - # check the edge list - self.assertEqual(env.k.network.get_edge_list(), - ["highway_0", "highway_end"]) - - # check the speed limits of the edges - self.assertEqual(env.k.network.speed_limit("highway_0"), 30) - self.assertEqual(env.k.network.speed_limit("highway_end"), 25) - - # =================================================================== # - # With a ghost edge (500m, 10m/s) # - # =================================================================== # - - # create the network - env, _, _ = highway_exp_setup( - net_params=NetParams(additional_params={ - "length": 1000, - "lanes": 4, - "speed_limit": 30, - "num_edges": 1, - "use_ghost_edge": True, - "ghost_speed_limit": 10, - "boundary_cell_length": 500, + "ghost_speed_limit": 25 }) ) env.reset() @@ -191,7 +158,7 @@ def test_ghost_edge(self): # check the speed limits of the edges self.assertEqual(env.k.network.speed_limit("highway_0"), 30) - self.assertEqual(env.k.network.speed_limit("highway_end"), 10) + self.assertEqual(env.k.network.speed_limit("highway_end"), 25) class TestRingNetwork(unittest.TestCase): @@ -254,150 +221,6 @@ def test_additional_net_params(self): ) -class TestI210SubNetwork(unittest.TestCase): - - """Tests I210SubNetwork in flow/networks/i210_subnetwork.py.""" - - def test_additional_net_params(self): - """Ensures that not returning the correct params leads to an error.""" - self.assertTrue( - test_additional_params( - network_class=I210SubNetwork, - additional_params={ - "on_ramp": False, - "ghost_edge": False, - } - ) - ) - - def test_specify_routes(self): - """Validates that the routes are properly specified for the network. - - This is done simply by checking the initial edges routes are specified - from, which alternates based on choice of network configuration. - - This method tests the routes for the following cases: - - 1. on_ramp = False, ghost_edge = False - 2. on_ramp = True, ghost_edge = False - 3. on_ramp = False, ghost_edge = True - 4. on_ramp = True, ghost_edge = True - """ - # test case 1 - network = I210SubNetwork( - name='test-3', - vehicles=VehicleParams(), - net_params=NetParams( - template=os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml" - ), - additional_params={ - "on_ramp": False, - "ghost_edge": False, - }, - ), - ) - - self.assertEqual( - ['119257914'], - sorted(list(network.specify_routes(network.net_params).keys())) - ) - - del network - - # test case 2 - network = I210SubNetwork( - name='test-3', - vehicles=VehicleParams(), - net_params=NetParams( - template=os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml" - ), - additional_params={ - "on_ramp": True, - "ghost_edge": True, - }, - ), - ) - - self.assertEqual( - ['119257908#0', - '119257908#1', - '119257908#1-AddedOffRampEdge', - '119257908#1-AddedOnRampEdge', - '119257908#2', - '119257908#3', - '119257914', - '173381935', - '27414342#0', - '27414342#1-AddedOnRampEdge', - '27414345', - 'ghost0'], - sorted(list(network.specify_routes(network.net_params).keys())) - ) - - del network - - # test case 3 - network = I210SubNetwork( - name='test-3', - vehicles=VehicleParams(), - net_params=NetParams( - template=os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml" - ), - additional_params={ - "on_ramp": False, - "ghost_edge": True, - }, - ), - ) - - self.assertEqual( - ['119257914', 'ghost0'], - sorted(list(network.specify_routes(network.net_params).keys())) - ) - - del network - - # test case 4 - network = I210SubNetwork( - name='test-3', - vehicles=VehicleParams(), - net_params=NetParams( - template=os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml" - ), - additional_params={ - "on_ramp": True, - "ghost_edge": True, - }, - ), - ) - - self.assertEqual( - ['119257908#0', - '119257908#1', - '119257908#1-AddedOffRampEdge', - '119257908#1-AddedOnRampEdge', - '119257908#2', - '119257908#3', - '119257914', - '173381935', - '27414342#0', - '27414342#1-AddedOnRampEdge', - '27414345', - 'ghost0'], - sorted(list(network.specify_routes(network.net_params).keys())) - ) - - del network - - ############################################################################### # Utility methods # ############################################################################### diff --git a/tests/fast_tests/test_vehicles.py b/tests/fast_tests/test_vehicles.py index 7e1405007..a37b235ff 100644 --- a/tests/fast_tests/test_vehicles.py +++ b/tests/fast_tests/test_vehicles.py @@ -33,7 +33,7 @@ def test_speed_lane_change_modes(self): speed_mode='obey_safe_speed', ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="no_lc_safe", + lane_change_mode="no_lat_collide", ) ) @@ -56,7 +56,7 @@ def test_speed_lane_change_modes(self): self.assertEqual(vehicles.type_parameters["typeB"][ "car_following_params"].speed_mode, 0) self.assertEqual(vehicles.type_parameters["typeB"][ - "lane_change_params"].lane_change_mode, 512) + "lane_change_params"].lane_change_mode, 1621) vehicles.add( "typeC", @@ -89,7 +89,7 @@ def test_controlled_id_params(self): speed_mode="obey_safe_speed", ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="no_lc_safe", + lane_change_mode="no_lat_collide", )) default_mingap = SumoCarFollowingParams().controller_params["minGap"] self.assertEqual(vehicles.types[0]["type_params"]["minGap"], @@ -336,7 +336,6 @@ def test_no_junctions_highway(self): "num_edges": 1, "use_ghost_edge": False, "ghost_speed_limit": 25, - "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -407,7 +406,6 @@ def test_no_junctions_highway(self): "num_edges": 3, "use_ghost_edge": False, "ghost_speed_limit": 25, - "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -477,7 +475,6 @@ def test_no_junctions_highway(self): "num_edges": 3, "use_ghost_edge": False, "ghost_speed_limit": 25, - "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() diff --git a/tests/fast_tests/test_visualizers.py b/tests/fast_tests/test_visualizers.py index d2f4a20a4..7af413909 100644 --- a/tests/fast_tests/test_visualizers.py +++ b/tests/fast_tests/test_visualizers.py @@ -91,226 +91,236 @@ def test_capacity_diagram_generator(self): np.testing.assert_array_almost_equal(std_outflows, expected_stds) def test_time_space_diagram_figure_eight(self): + # check that the exported data matches the expected emission file data + fig8_emission_data = { + 'idm_3': {'pos': [27.25, 28.25, 30.22, 33.17], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['upper_ring', 'upper_ring', 'upper_ring', + 'upper_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_4': {'pos': [56.02, 57.01, 58.99, 61.93], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['upper_ring', 'upper_ring', 'upper_ring', + 'upper_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_5': {'pos': [84.79, 85.78, 87.76, 90.7], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['upper_ring', 'upper_ring', 'upper_ring', + 'upper_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_2': {'pos': [28.77, 29.76, 1.63, 4.58], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.97, 2.95], + 'edge': ['top', 'top', 'upper_ring', 'upper_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_13': {'pos': [106.79, 107.79, 109.77, 112.74], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.96], + 'edge': ['lower_ring', 'lower_ring', 'lower_ring', + 'lower_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_9': {'pos': [22.01, 23.0, 24.97, 27.92], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.97, 2.95], + 'edge': ['left', 'left', 'left', 'left'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_6': {'pos': [113.56, 114.55, 116.52, 119.47], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.97, 2.95], + 'edge': ['upper_ring', 'upper_ring', 'upper_ring', + 'upper_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_8': {'pos': [29.44, 0.28, 2.03, 4.78], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.84, 1.76, 2.75], + 'edge': ['right', ':center_0', ':center_0', + ':center_0'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_12': {'pos': [78.03, 79.02, 80.99, 83.94], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['lower_ring', 'lower_ring', 'lower_ring', + 'lower_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_10': {'pos': [20.49, 21.48, 23.46, 26.41], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['lower_ring', 'lower_ring', 'lower_ring', + 'lower_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_11': {'pos': [49.26, 50.25, 52.23, 55.17], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['lower_ring', 'lower_ring', 'lower_ring', + 'lower_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_1': {'pos': [0.0, 0.99, 2.97, 5.91], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['top', 'top', 'top', 'top'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_7': {'pos': [0.67, 1.66, 3.64, 6.58], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.97, 2.94], + 'edge': ['right', 'right', 'right', 'right'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_0': {'pos': [0.0, 1.0, 2.98, 5.95], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 1.0, 1.99, 2.97], + 'edge': ['bottom', 'bottom', 'bottom', 'bottom'], + 'lane': [0.0, 0.0, 0.0, 0.0]} + } dir_path = os.path.dirname(os.path.realpath(__file__)) + actual_emission_data = tsd.import_data_from_emission( + os.path.join(dir_path, 'test_files/fig8_emission.csv')) + self.assertDictEqual(fig8_emission_data, actual_emission_data) + + # test get_time_space_data for figure eight networks flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/fig8.json')) - emission_data = tsd.import_data_from_trajectory( - os.path.join(dir_path, 'test_files/fig8_emission.csv'), flow_params) - - segs, _ = tsd.get_time_space_data(emission_data, flow_params) - - expected_segs = np.array([ - [[1., 60.], [2., 59.]], - [[2., 59.], [3., 57.02]], - [[3., 57.02], [4., 54.05]], - [[1., 23.8], [2., 22.81]], - [[2., 22.81], [3., 20.83]], - [[3., 20.83], [4., 17.89]], - [[1., 182.84166941], [2., 181.85166941]], - [[2., 181.85166941], [3., 179.87166941]], - [[3., 179.87166941], [4., 176.92166941]], - [[1., 154.07166941], [2., 153.08166941]], - [[2., 153.08166941], [3., 151.10166941]], - [[3., 151.10166941], [4., 148.16166941]], - [[1., 125.30166941], [2., 124.31166941]], - [[2., 124.31166941], [3., 122.34166941]], - [[3., 122.34166941], [4., 119.39166941]], - [[1., 96.54166941], [2., 95.54166941]], - [[2., 95.54166941], [3., 93.56166941]], - [[3., 93.56166941], [4., 90.59166941]], - [[1., -203.16166941], [2., -202.17166941]], - [[2., -202.17166941], [3., -200.02166941]], - [[3., -200.02166941], [4., -197.07166941]], - [[1., -174.40166941], [2., -173.40166941]], - [[2., -173.40166941], [3., -171.43166941]], - [[3., -171.43166941], [4., -168.48166941]], - [[1., -145.63166941], [2., -144.64166941]], - [[2., -144.64166941], [3., -142.66166941]], - [[3., -142.66166941], [4., -139.72166941]], - [[1., -116.86166941], [2., -115.87166941]], - [[2., -115.87166941], [3., -113.89166941]], - [[3., -113.89166941], [4., -110.95166941]], - [[1., -88.09166941], [2., -87.10166941]], - [[2., -87.10166941], [3., -85.13166941]], - [[3., -85.13166941], [4., -82.18166941]], - [[1., -59.33], [2., -58.34]], - [[2., -58.34], [3., -56.36]], - [[3., -56.36], [4., -53.42]], - [[1., -30.56], [2., -29.72]], - [[2., -29.72], [3., -27.97]], - [[3., -27.97], [4., -25.22]], - [[1., -1.79], [2., -0.8]], - [[2., -0.8], [3., 208.64166941]], - [[3., 208.64166941], [4., 205.69166941]]] + pos, speed, _ = tsd.get_time_space_data( + actual_emission_data, flow_params) + + expected_pos = np.array( + [[60, 23.8, 182.84166941, 154.07166941, 125.30166941, 96.54166941, + -203.16166941, -174.40166941, -145.63166941, -116.86166941, + -88.09166941, -59.33, -30.56, -1.79], + [59, 22.81, 181.85166941, 153.08166941, 124.31166941, 95.54166941, + -202.17166941, -173.40166941, -144.64166941, -115.87166941, + -87.10166941, -58.34, -29.72, -0.8], + [57.02, 20.83, 179.87166941, 151.10166941, 122.34166941, + 93.56166941, -200.02166941, -171.43166941, -142.66166941, + -113.89166941, -85.13166941, -56.36, -27.97, 208.64166941]] ) + expected_speed = np.array([ + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, + 0.99, 0.84, 0.99], + [1.99, 1.98, 1.98, 1.98, 1.98, 1.98, 1.97, 1.98, 1.98, 1.98, 1.97, + 1.97, 1.76, 1.97] + ]) - np.testing.assert_array_almost_equal(segs, expected_segs) + np.testing.assert_array_almost_equal(pos[:-1, :], expected_pos) + np.testing.assert_array_almost_equal(speed[:-1, :], expected_speed) def test_time_space_diagram_merge(self): dir_path = os.path.dirname(os.path.realpath(__file__)) + emission_data = tsd.import_data_from_emission( + os.path.join(dir_path, 'test_files/merge_emission.csv')) + flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/merge.json')) - emission_data = tsd.import_data_from_trajectory( - os.path.join(dir_path, 'test_files/merge_emission.csv'), flow_params) - - segs, _ = tsd.get_time_space_data(emission_data, flow_params) - - expected_segs = np.array([ - [[2.0000e-01, 7.2949e+02], [4.0000e-01, 7.2953e+02]], - [[4.0000e-01, 7.2953e+02], [6.0000e-01, 7.2961e+02]], - [[6.0000e-01, 7.2961e+02], [8.0000e-01, 7.2973e+02]], - [[8.0000e-01, 7.2973e+02], [1.0000e+00, 7.2988e+02]]] + pos, speed, _ = tsd.get_time_space_data(emission_data, flow_params) + + expected_pos = np.array( + [[4.86, 180.32, 361.32, 547.77, 0], + [4.88, 180.36, 361.36, 547.8, 0], + [4.95, 180.43, 361.44, 547.87, 0], + [5.06, 180.54, 361.56, 547.98, 0], + [5.21, 180.68, 361.72, 548.12, 0], + [5.4, 180.86, 0, 0, 0]] + ) + expected_speed = np.array( + [[0, 0, 0, 0, 0], + [0.15, 0.17, 0.19, 0.14, 0], + [0.35, 0.37, 0.39, 0.34, 0], + [0.54, 0.57, 0.59, 0.54, 0], + [0.74, 0.7, 0.79, 0.71, 0], + [0.94, 0.9, 0, 0, 0]] ) - np.testing.assert_array_almost_equal(segs, expected_segs) + np.testing.assert_array_almost_equal(pos, expected_pos) + np.testing.assert_array_almost_equal(speed, expected_speed) def test_time_space_diagram_I210(self): dir_path = os.path.dirname(os.path.realpath(__file__)) + emission_data = tsd.import_data_from_emission( + os.path.join(dir_path, 'test_files/i210_emission.csv')) + module = __import__("examples.exp_configs.non_rl", fromlist=["i210_subnetwork"]) flow_params = getattr(module, "i210_subnetwork").flow_params - emission_data = tsd.import_data_from_trajectory( - os.path.join(dir_path, 'test_files/i210_emission.csv'), flow_params) - - segs, _ = tsd.get_time_space_data(emission_data, flow_params) - - expected_segs = { - 1: np.array([ - [[0.8, 5.1], [1.6, 23.37]], - [[1.6, 23.37], [2.4, 42.02]], - [[2.4, 42.02], [3.2, 61.21]], - [[3.2, 61.21], [4., 18.87]], - [[4., 18.87], [4.8, 39.93]], - [[2.4, 5.1], [3.2, 22.97]], - [[3.2, 22.97], [4., 40.73]]] - ), - 2: np.array([ - [[2.4, 5.1], [3.2, 23.98]], - [[3.2, 23.98], [4., 43.18]]] - ), - 3: np.array([ - [[0.8, 5.1], [1.6, 23.72]], - [[1.6, 23.72], [2.4, 43.06]], - [[2.4, 43.06], [3.2, 1.33]], - [[3.2, 1.33], [4., 21.65]], - [[4., 21.65], [4.8, 43.46]], - [[2.4, 5.1], [3.2, 23.74]], - [[3.2, 23.74], [4., 42.38]]] - ), - 4: np.array([ - [[2.4, 5.1], [3.2, 23.6]], - [[3.2, 23.6], [4., 42.46]]] - )} - - for lane, expected_seg in expected_segs.items(): - np.testing.assert_array_almost_equal(segs[lane], expected_seg) + pos, speed, _ = tsd.get_time_space_data(emission_data, flow_params) + + expected_pos = np.array( + [[5.1, 0., 0.], + [23.37, 0., 0.], + [42.02, 5.1, 0.], + [61.21, 22.97, 0.], + [80.45, 40.73, 5.1], + [101.51, 0., 0.]] + ) + expected_speed = np.array( + [[23., 0., 0.], + [22.84, 0., 0.], + [23.31, 23., 0.], + [23.98, 22.33, 0.], + [24.25, 22.21, 23.], + [26.33, 0., 0.]] + ) + + np.testing.assert_array_almost_equal(pos, expected_pos) + np.testing.assert_array_almost_equal(speed, expected_speed) def test_time_space_diagram_ring_road(self): dir_path = os.path.dirname(os.path.realpath(__file__)) + emission_data = tsd.import_data_from_emission( + os.path.join(dir_path, 'test_files/ring_230_emission.csv')) + flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/ring_230.json')) - emission_data = tsd.import_data_from_trajectory( - os.path.join(dir_path, 'test_files/ring_230_emission.csv'), flow_params) - - segs, _ = tsd.get_time_space_data(emission_data, flow_params) - - expected_segs = np.array([ - [[1.0000e-01, 0.0000e+00], [2.0000e-01, 1.0000e-02]], - [[2.0000e-01, 1.0000e-02], [3.0000e-01, 2.0000e-02]], - [[3.0000e-01, 2.0000e-02], [4.0000e-01, 5.0000e-02]], - [[4.0000e-01, 5.0000e-02], [5.0000e-01, 8.0000e-02]], - [[5.0000e-01, 8.0000e-02], [6.0000e-01, 1.2000e-01]], - [[1.0000e-01, 9.5500e+00], [2.0000e-01, 9.5500e+00]], - [[2.0000e-01, 9.5500e+00], [3.0000e-01, 9.5700e+00]], - [[3.0000e-01, 9.5700e+00], [4.0000e-01, 9.5900e+00]], - [[4.0000e-01, 9.5900e+00], [5.0000e-01, 9.6200e+00]], - [[5.0000e-01, 9.6200e+00], [6.0000e-01, 9.6600e+00]], - [[1.0000e-01, 9.5550e+01], [2.0000e-01, 9.5560e+01]], - [[2.0000e-01, 9.5560e+01], [3.0000e-01, 9.5580e+01]], - [[3.0000e-01, 9.5580e+01], [4.0000e-01, 9.5600e+01]], - [[4.0000e-01, 9.5600e+01], [5.0000e-01, 9.5630e+01]], - [[5.0000e-01, 9.5630e+01], [6.0000e-01, 9.5670e+01]], - [[1.0000e-01, 1.0510e+02], [2.0000e-01, 1.0511e+02]], - [[2.0000e-01, 1.0511e+02], [3.0000e-01, 1.0512e+02]], - [[3.0000e-01, 1.0512e+02], [4.0000e-01, 1.0515e+02]], - [[4.0000e-01, 1.0515e+02], [5.0000e-01, 1.0518e+02]], - [[5.0000e-01, 1.0518e+02], [6.0000e-01, 1.0522e+02]], - [[1.0000e-01, 1.1465e+02], [2.0000e-01, 1.1465e+02]], - [[2.0000e-01, 1.1465e+02], [3.0000e-01, 1.1467e+02]], - [[3.0000e-01, 1.1467e+02], [4.0000e-01, 1.1469e+02]], - [[4.0000e-01, 1.1469e+02], [5.0000e-01, 1.1472e+02]], - [[5.0000e-01, 1.1472e+02], [6.0000e-01, 1.1476e+02]], - [[1.0000e-01, 1.2429e+02], [2.0000e-01, 1.2430e+02]], - [[2.0000e-01, 1.2430e+02], [3.0000e-01, 1.2431e+02]], - [[3.0000e-01, 1.2431e+02], [4.0000e-01, 1.2434e+02]], - [[4.0000e-01, 1.2434e+02], [5.0000e-01, 1.2437e+02]], - [[5.0000e-01, 1.2437e+02], [6.0000e-01, 1.2441e+02]], - [[1.0000e-01, 1.3384e+02], [2.0000e-01, 1.3384e+02]], - [[2.0000e-01, 1.3384e+02], [3.0000e-01, 1.3386e+02]], - [[3.0000e-01, 1.3386e+02], [4.0000e-01, 1.3388e+02]], - [[4.0000e-01, 1.3388e+02], [5.0000e-01, 1.3391e+02]], - [[1.0000e-01, 1.4338e+02], [2.0000e-01, 1.4339e+02]], - [[2.0000e-01, 1.4339e+02], [3.0000e-01, 1.4341e+02]], - [[3.0000e-01, 1.4341e+02], [4.0000e-01, 1.4343e+02]], - [[4.0000e-01, 1.4343e+02], [5.0000e-01, 1.4346e+02]], - [[1.0000e-01, 1.5293e+02], [2.0000e-01, 1.5294e+02]], - [[2.0000e-01, 1.5294e+02], [3.0000e-01, 1.5295e+02]], - [[3.0000e-01, 1.5295e+02], [4.0000e-01, 1.5297e+02]], - [[4.0000e-01, 1.5297e+02], [5.0000e-01, 1.5301e+02]], - [[1.0000e-01, 1.6247e+02], [2.0000e-01, 1.6248e+02]], - [[2.0000e-01, 1.6248e+02], [3.0000e-01, 1.6250e+02]], - [[3.0000e-01, 1.6250e+02], [4.0000e-01, 1.6252e+02]], - [[4.0000e-01, 1.6252e+02], [5.0000e-01, 1.6255e+02]], - [[1.0000e-01, 1.7202e+02], [2.0000e-01, 1.7203e+02]], - [[2.0000e-01, 1.7203e+02], [3.0000e-01, 1.7204e+02]], - [[3.0000e-01, 1.7204e+02], [4.0000e-01, 1.7207e+02]], - [[4.0000e-01, 1.7207e+02], [5.0000e-01, 1.7210e+02]], - [[1.0000e-01, 1.8166e+02], [2.0000e-01, 1.8167e+02]], - [[2.0000e-01, 1.8167e+02], [3.0000e-01, 1.8169e+02]], - [[3.0000e-01, 1.8169e+02], [4.0000e-01, 1.8171e+02]], - [[4.0000e-01, 1.8171e+02], [5.0000e-01, 1.8174e+02]], - [[1.0000e-01, 1.9090e+01], [2.0000e-01, 1.9100e+01]], - [[2.0000e-01, 1.9100e+01], [3.0000e-01, 1.9110e+01]], - [[3.0000e-01, 1.9110e+01], [4.0000e-01, 1.9140e+01]], - [[4.0000e-01, 1.9140e+01], [5.0000e-01, 1.9170e+01]], - [[1.0000e-01, 1.9121e+02], [2.0000e-01, 1.9122e+02]], - [[2.0000e-01, 1.9122e+02], [3.0000e-01, 1.9123e+02]], - [[3.0000e-01, 1.9123e+02], [4.0000e-01, 1.9126e+02]], - [[4.0000e-01, 1.9126e+02], [5.0000e-01, 1.9129e+02]], - [[1.0000e-01, 2.0075e+02], [2.0000e-01, 2.0076e+02]], - [[2.0000e-01, 2.0076e+02], [3.0000e-01, 2.0078e+02]], - [[3.0000e-01, 2.0078e+02], [4.0000e-01, 2.0081e+02]], - [[4.0000e-01, 2.0081e+02], [5.0000e-01, 2.0085e+02]], - [[1.0000e-01, 2.8640e+01], [2.0000e-01, 2.8640e+01]], - [[2.0000e-01, 2.8640e+01], [3.0000e-01, 2.8660e+01]], - [[3.0000e-01, 2.8660e+01], [4.0000e-01, 2.8680e+01]], - [[4.0000e-01, 2.8680e+01], [5.0000e-01, 2.8710e+01]], - [[1.0000e-01, 3.8180e+01], [2.0000e-01, 3.8190e+01]], - [[2.0000e-01, 3.8190e+01], [3.0000e-01, 3.8210e+01]], - [[3.0000e-01, 3.8210e+01], [4.0000e-01, 3.8230e+01]], - [[4.0000e-01, 3.8230e+01], [5.0000e-01, 3.8260e+01]], - [[1.0000e-01, 4.7730e+01], [2.0000e-01, 4.7740e+01]], - [[2.0000e-01, 4.7740e+01], [3.0000e-01, 4.7750e+01]], - [[3.0000e-01, 4.7750e+01], [4.0000e-01, 4.7770e+01]], - [[4.0000e-01, 4.7770e+01], [5.0000e-01, 4.7810e+01]], - [[1.0000e-01, 5.7270e+01], [2.0000e-01, 5.7280e+01]], - [[2.0000e-01, 5.7280e+01], [3.0000e-01, 5.7300e+01]], - [[3.0000e-01, 5.7300e+01], [4.0000e-01, 5.7320e+01]], - [[4.0000e-01, 5.7320e+01], [5.0000e-01, 5.7350e+01]], - [[1.0000e-01, 6.6920e+01], [2.0000e-01, 6.6930e+01]], - [[2.0000e-01, 6.6930e+01], [3.0000e-01, 6.6940e+01]], - [[3.0000e-01, 6.6940e+01], [4.0000e-01, 6.6970e+01]], - [[4.0000e-01, 6.6970e+01], [5.0000e-01, 6.7000e+01]], - [[1.0000e-01, 7.6460e+01], [2.0000e-01, 7.6470e+01]], - [[2.0000e-01, 7.6470e+01], [3.0000e-01, 7.6490e+01]], - [[3.0000e-01, 7.6490e+01], [4.0000e-01, 7.6510e+01]], - [[4.0000e-01, 7.6510e+01], [5.0000e-01, 7.6540e+01]], - [[1.0000e-01, 8.6010e+01], [2.0000e-01, 8.6020e+01]], - [[2.0000e-01, 8.6020e+01], [3.0000e-01, 8.6030e+01]], - [[3.0000e-01, 8.6030e+01], [4.0000e-01, 8.6060e+01]], - [[4.0000e-01, 8.6060e+01], [5.0000e-01, 8.6090e+01]]] + pos, speed, _ = tsd.get_time_space_data(emission_data, flow_params) + + expected_pos = np.array( + [[0.0000e+00, 9.5500e+00, 9.5550e+01, 1.0510e+02, 1.1465e+02, + 1.2429e+02, 1.3384e+02, 1.4338e+02, 1.5293e+02, 1.6247e+02, + 1.7202e+02, 1.8166e+02, 1.9090e+01, 1.9121e+02, 2.0075e+02, + 2.8640e+01, 3.8180e+01, 4.7730e+01, 5.7270e+01, 6.6920e+01, + 7.6460e+01, 8.6010e+01], + [1.0000e-02, 9.5500e+00, 9.5560e+01, 1.0511e+02, 1.1465e+02, + 1.2430e+02, 1.3384e+02, 1.4339e+02, 1.5294e+02, 1.6248e+02, + 1.7203e+02, 1.8167e+02, 1.9100e+01, 1.9122e+02, 2.0076e+02, + 2.8640e+01, 3.8190e+01, 4.7740e+01, 5.7280e+01, 6.6930e+01, + 7.6470e+01, 8.6020e+01], + [2.0000e-02, 9.5700e+00, 9.5580e+01, 1.0512e+02, 1.1467e+02, + 1.2431e+02, 1.3386e+02, 1.4341e+02, 1.5295e+02, 1.6250e+02, + 1.7204e+02, 1.8169e+02, 1.9110e+01, 1.9123e+02, 2.0078e+02, + 2.8660e+01, 3.8210e+01, 4.7750e+01, 5.7300e+01, 6.6940e+01, + 7.6490e+01, 8.6030e+01], + [5.0000e-02, 9.5900e+00, 9.5600e+01, 1.0515e+02, 1.1469e+02, + 1.2434e+02, 1.3388e+02, 1.4343e+02, 1.5297e+02, 1.6252e+02, + 1.7207e+02, 1.8171e+02, 1.9140e+01, 1.9126e+02, 2.0081e+02, + 2.8680e+01, 3.8230e+01, 4.7770e+01, 5.7320e+01, 6.6970e+01, + 7.6510e+01, 8.6060e+01], + [8.0000e-02, 9.6200e+00, 9.5630e+01, 1.0518e+02, 1.1472e+02, + 1.2437e+02, 1.3391e+02, 1.4346e+02, 1.5301e+02, 1.6255e+02, + 1.7210e+02, 1.8174e+02, 1.9170e+01, 1.9129e+02, 2.0085e+02, + 2.8710e+01, 3.8260e+01, 4.7810e+01, 5.7350e+01, 6.7000e+01, + 7.6540e+01, 8.6090e+01], + [1.2000e-01, 9.6600e+00, 9.5670e+01, 1.0522e+02, 1.1476e+02, + 1.2441e+02, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 0.0000e+00, 0.0000e+00]] ) - - np.testing.assert_array_almost_equal(segs, expected_segs) + expected_speed = np.array([ + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, + 0.08, 0.08, 0.08, 0.1, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08], + [0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, + 0.16, 0.16, 0.16, 0.2, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16], + [0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, + 0.23, 0.23, 0.23, 0.29, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23], + [0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, + 0.31, 0.31, 0.31, 0.39, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31], + [0.41, 0.41, 0.41, 0.41, 0.41, 0.41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0] + ]) + + np.testing.assert_array_almost_equal(pos, expected_pos) + np.testing.assert_array_almost_equal(speed, expected_speed) def test_plot_ray_results(self): dir_path = os.path.dirname(os.path.realpath(__file__)) From ddce32ed60b5404e16a42d5f8ed8e17ac862c329 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 25 Jun 2020 17:04:47 -0700 Subject: [PATCH 53/57] Cleanup code --- flow/visualize/visualizer_rllib.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 5afbb39a3..80b901ebb 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -85,8 +85,6 @@ def visualizer_rllib(args): sim_params.use_ballistic = False # Determine agent and checkpoint - # TODO(akashvelu): remove this - # print("NEW CONFIGGG: ", config['env_config']['run']) config_run = config['env_config']['run'] if 'run' in config['env_config'] \ else None if args.run and config_run: @@ -177,7 +175,6 @@ def visualizer_rllib(args): checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num agent.restore(checkpoint) - agent.import_model('/Users/akashvelu/Desktop/combined_test3/ppo_model.h5', 'av') if hasattr(agent, "local_evaluator") and \ From 4e6302e2d2d5223e567a25817b84e64be4dedd64 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 25 Jun 2020 17:17:53 -0700 Subject: [PATCH 54/57] Handle case with vehicle in no-control edge --- .../imitation_learning/imitating_controller.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 39fd2421e..64622ef73 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -38,7 +38,11 @@ def get_accel(self, env): instance of environment being used """ # observation is a dictionary for multiagent envs, list for singleagent envs + if self.multiagent: + # if vehicle is in non-control edge, it will not be in observation, so return None to default control to Sumo + if self.veh_id not in env.get_state().keys(): + return None observation = env.get_state()[self.veh_id] else: observation = env.get_state() @@ -56,7 +60,9 @@ def get_accel(self, env): else: rl_ids = env.get_rl_ids() - assert self.veh_id in rl_ids, "Vehicle corresponding to controller not in env!" + if not (self.veh_id in rl_ids): + # vehicle in non-control edge, so return None to default control to Sumo + return None # return the action taken by the vehicle ind = rl_ids.index(self.veh_id) From 29eb5a02733bbace696701c2d1994e9d3fd81823 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 9 Jul 2020 17:03:19 -0700 Subject: [PATCH 55/57] Add learning rate as a parameter, override import_from_h5 method using setattr --- .../imitation_learning/custom_ppo.py | 31 +++++++++++++++++++ .../imitation_learning/custom_trainable.py | 2 ++ .../imitating_controller.py | 2 +- .../imitation_learning/imitating_network.py | 5 +-- flow/controllers/imitation_learning/run.py | 3 ++ .../controllers/imitation_learning/trainer.py | 4 +-- 6 files changed, 42 insertions(+), 5 deletions(-) diff --git a/flow/controllers/imitation_learning/custom_ppo.py b/flow/controllers/imitation_learning/custom_ppo.py index 0075741d3..ed6fa032b 100644 --- a/flow/controllers/imitation_learning/custom_ppo.py +++ b/flow/controllers/imitation_learning/custom_ppo.py @@ -195,3 +195,34 @@ def get_policy_class(config): validate_config=validate_config, after_optimizer_step=update_kl, after_train_result=warn_about_bad_reward_scales) + + +from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID +def import_model(self, import_file, policy_id=DEFAULT_POLICY_ID): + """Imports a model from import_file. + + Note: Currently, only h5 files are supported. + + Args: + import_file (str): The file to import the model from. + + Returns: + A dict that maps ExportFormats to successfully exported models. + """ + # Check for existence. + if not os.path.exists(import_file): + raise FileNotFoundError( + "`import_file` '{}' does not exist! Can't import Model.". + format(import_file)) + # Get the format of the given file. + import_format = "h5" # TODO(sven): Support checkpoint loading. + + ExportFormat.validate([import_format]) + if import_format != ExportFormat.H5: + raise NotImplementedError + else: + return self.import_policy_model_from_h5(import_file, policy_id=policy_id) + +from ray.rllib.agents import Trainer +print('Overriding import model') +setattr(Trainer, 'import_model', import_model) \ No newline at end of file diff --git a/flow/controllers/imitation_learning/custom_trainable.py b/flow/controllers/imitation_learning/custom_trainable.py index b41728f11..66785d905 100644 --- a/flow/controllers/imitation_learning/custom_trainable.py +++ b/flow/controllers/imitation_learning/custom_trainable.py @@ -17,7 +17,9 @@ def _setup(self, config): env_name = config['env'] self.trainer = custom_ppo.CustomPPOTrainer(env=env_name, config=config) + # kind of hacky, but don't know a better solution to the default policy not existing policy_id = list(self.trainer.get_weights().keys())[0] + print("test: ", list(self.trainer.get_weights().keys())) self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id=policy_id) def _train(self): diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 64622ef73..4fdd4ebd7 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -62,7 +62,7 @@ def get_accel(self, env): if not (self.veh_id in rl_ids): # vehicle in non-control edge, so return None to default control to Sumo - return None + return None # return the action taken by the vehicle ind = rl_ids.index(self.veh_id) diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 81642883a..a95222855 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -9,7 +9,7 @@ class ImitatingNetwork(): Class containing neural network which learns to imitate a given expert controller. """ - def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, stochastic=False, variance_regularizer = 0, load_model=False, load_path='', tensorboard_path=''): + def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, learning_rate, stochastic=False, variance_regularizer = 0, load_model=False, load_path='', tensorboard_path=''): """Initializes and constructs neural network. Parameters @@ -41,6 +41,7 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, self.fcnet_hiddens = fcnet_hiddens self.stochastic=stochastic self.variance_regularizer = variance_regularizer + self.learning_rate = learning_rate self.train_steps = 0 self.action_steps = 0 @@ -72,7 +73,7 @@ def compile_network(self): Compiles Keras network with appropriate loss and optimizer """ loss = get_loss(self.stochastic, self.variance_regularizer) - self.model.compile(loss=loss, optimizer='adam') + self.model.compile(loss=loss, optimizer=tf.keras.optimizers.Adam(learning_rate=self.learning_rate)) def train(self, observation_batch, action_batch): diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 6adc04199..41ceb82a6 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -88,6 +88,7 @@ def main(): parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') + parser.add_argument('--lr', type=float, default=0.001, help='Learning rate for imitation learning and value function learning') parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existin imitation neural net') parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') @@ -120,5 +121,7 @@ def main(): if params['num_eval_episodes'] > 0: runner.evaluate() + print('done') + if __name__ == "__main__": main() diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 2c951ac5b..c027368ae 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -62,7 +62,7 @@ def __init__(self, params, submodule): self.params['obs_dim'] = obs_dim # initialize neural network class and tf variables - self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer'], load_model=self.params['load_imitation_model'], load_path=self.params['load_imitation_path'], tensorboard_path=self.params['tensorboard_path']) + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], self.params['lr'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer'], load_model=self.params['load_imitation_model'], load_path=self.params['load_imitation_path'], tensorboard_path=self.params['tensorboard_path']) # controllers setup @@ -239,7 +239,7 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): print("\n\n********** Learning value function of imitation policy ************ \n") # init value function neural net vf_net = build_neural_net_deterministic(self.params['obs_dim'], 1, self.params['fcnet_hiddens']) - vf_net.compile(loss='mean_squared_error', optimizer = 'adam') + vf_net.compile(loss='mean_squared_error', optimizer = tf.keras.optimizers.Adam(learning_rate=self.params['lr'])) max_decel = self.flow_params['env'].additional_params['max_decel'] # collect trajectory samples to train on From 6c6880096c874779efec8932b649239cdeb5ba1e Mon Sep 17 00:00:00 2001 From: akashvelu Date: Mon, 13 Jul 2020 10:25:32 -0700 Subject: [PATCH 56/57] Move imitation to algorithms folder --- examples/train.py | 6 +++--- flow/algorithms/__init__.py | 0 .../algorithms/imitation_learning/__init__.py | 0 .../imitation_learning/custom_ppo.py | 6 ++++-- .../custom_ppo_tf_policy.py | 0 .../imitation_learning/custom_trainable.py | 2 +- .../imitating_controller.py | 5 +---- .../imitation_learning/imitating_network.py | 4 ++-- .../imitation_learning/keras_utils.py | 0 .../imitation_learning/ppo_model.py | 2 +- .../imitation_learning/replay_buffer.py | 0 .../imitation_learning/run.py | 3 +-- .../train_with_imitation.py | 5 +++-- .../imitation_learning/trainer.py | 19 +++++------------- .../imitation_learning/utils.py | 4 ++-- .../imitation_learning/utils_tensorflow.py | 0 .../model_files/follower_stopper1.h5 | Bin 35456 -> 0 bytes .../model_files/ppo_model_i210.h5 | Bin 53208 -> 0 bytes 18 files changed, 23 insertions(+), 33 deletions(-) create mode 100644 flow/algorithms/__init__.py create mode 100644 flow/algorithms/imitation_learning/__init__.py rename flow/{controllers => algorithms}/imitation_learning/custom_ppo.py (98%) rename flow/{controllers => algorithms}/imitation_learning/custom_ppo_tf_policy.py (100%) rename flow/{controllers => algorithms}/imitation_learning/custom_trainable.py (96%) rename flow/{controllers => algorithms}/imitation_learning/imitating_controller.py (94%) rename flow/{controllers => algorithms}/imitation_learning/imitating_network.py (97%) rename flow/{controllers => algorithms}/imitation_learning/keras_utils.py (100%) rename flow/{controllers => algorithms}/imitation_learning/ppo_model.py (98%) rename flow/{controllers => algorithms}/imitation_learning/replay_buffer.py (100%) rename flow/{controllers => algorithms}/imitation_learning/run.py (98%) rename flow/{controllers => algorithms}/imitation_learning/train_with_imitation.py (97%) rename flow/{controllers => algorithms}/imitation_learning/trainer.py (96%) rename flow/{controllers => algorithms}/imitation_learning/utils.py (98%) rename flow/{controllers => algorithms}/imitation_learning/utils_tensorflow.py (100%) delete mode 100644 flow/controllers/imitation_learning/model_files/follower_stopper1.h5 delete mode 100644 flow/controllers/imitation_learning/model_files/ppo_model_i210.h5 diff --git a/examples/train.py b/examples/train.py index 9445e81e0..20b4b373a 100644 --- a/examples/train.py +++ b/examples/train.py @@ -179,7 +179,7 @@ def setup_exps_rllib(flow_params, alg_run = flags.algorithm.upper() if alg_run == "PPO": - from flow.controllers.imitation_learning.custom_ppo import CustomPPOTrainer + from flow.algorithms.imitation_learning.custom_ppo import CustomPPOTrainer from ray.rllib.agents.ppo import DEFAULT_CONFIG config = deepcopy(DEFAULT_CONFIG) @@ -202,8 +202,8 @@ def setup_exps_rllib(flow_params, config["lr"] = tune.grid_search([5e-4, 5e-5]) if flags.load_weights_path: - from flow.controllers.imitation_learning.ppo_model import PPONetwork - from flow.controllers.imitation_learning.custom_trainable import Imitation_PPO_Trainable + from flow.algorithms.imitation_learning.ppo_model import PPONetwork + from flow.algorithms.imitation_learning.custom_trainable import Imitation_PPO_Trainable from ray.rllib.models import ModelCatalog # Register custom model diff --git a/flow/algorithms/__init__.py b/flow/algorithms/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/flow/algorithms/imitation_learning/__init__.py b/flow/algorithms/imitation_learning/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/flow/controllers/imitation_learning/custom_ppo.py b/flow/algorithms/imitation_learning/custom_ppo.py similarity index 98% rename from flow/controllers/imitation_learning/custom_ppo.py rename to flow/algorithms/imitation_learning/custom_ppo.py index ed6fa032b..c7e81e13c 100644 --- a/flow/controllers/imitation_learning/custom_ppo.py +++ b/flow/algorithms/imitation_learning/custom_ppo.py @@ -3,9 +3,12 @@ """ import logging +import os from ray.rllib.agents import with_common_config -from flow.controllers.imitation_learning.custom_ppo_tf_policy import CustomPPOTFPolicy +from ray.tune.trial import ExportFormat + +from flow.algorithms.imitation_learning.custom_ppo_tf_policy import CustomPPOTFPolicy from ray.rllib.agents.trainer_template import build_trainer from ray.rllib.optimizers import SyncSamplesOptimizer, LocalMultiGPUOptimizer from ray.rllib.utils import try_import_tf @@ -224,5 +227,4 @@ def import_model(self, import_file, policy_id=DEFAULT_POLICY_ID): return self.import_policy_model_from_h5(import_file, policy_id=policy_id) from ray.rllib.agents import Trainer -print('Overriding import model') setattr(Trainer, 'import_model', import_model) \ No newline at end of file diff --git a/flow/controllers/imitation_learning/custom_ppo_tf_policy.py b/flow/algorithms/imitation_learning/custom_ppo_tf_policy.py similarity index 100% rename from flow/controllers/imitation_learning/custom_ppo_tf_policy.py rename to flow/algorithms/imitation_learning/custom_ppo_tf_policy.py diff --git a/flow/controllers/imitation_learning/custom_trainable.py b/flow/algorithms/imitation_learning/custom_trainable.py similarity index 96% rename from flow/controllers/imitation_learning/custom_trainable.py rename to flow/algorithms/imitation_learning/custom_trainable.py index 66785d905..993113607 100644 --- a/flow/controllers/imitation_learning/custom_trainable.py +++ b/flow/algorithms/imitation_learning/custom_trainable.py @@ -3,7 +3,7 @@ from ray.rllib.agents.agent import get_agent_class except ImportError: from ray.rllib.agents.registry import get_agent_class -import flow.controllers.imitation_learning.custom_ppo as custom_ppo +import flow.algorithms.imitation_learning.custom_ppo as custom_ppo class Imitation_PPO_Trainable(tune.Trainable): """ diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/algorithms/imitation_learning/imitating_controller.py similarity index 94% rename from flow/controllers/imitation_learning/imitating_controller.py rename to flow/algorithms/imitation_learning/imitating_controller.py index 4fdd4ebd7..115930744 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/algorithms/imitation_learning/imitating_controller.py @@ -1,8 +1,5 @@ -import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController -from flow.controllers.imitation_learning.replay_buffer import ReplayBuffer + class ImitatingController(BaseController): """ diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/algorithms/imitation_learning/imitating_network.py similarity index 97% rename from flow/controllers/imitation_learning/imitating_network.py rename to flow/algorithms/imitation_learning/imitating_network.py index a95222855..6e9e9c3c7 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/algorithms/imitation_learning/imitating_network.py @@ -1,7 +1,7 @@ import numpy as np import tensorflow as tf -from flow.controllers.imitation_learning.keras_utils import build_neural_net_deterministic, build_neural_net_stochastic, get_loss, negative_log_likelihood_loss -from flow.controllers.imitation_learning.replay_buffer import ReplayBuffer +from flow.algorithms.imitation_learning.keras_utils import build_neural_net_deterministic, build_neural_net_stochastic, get_loss, negative_log_likelihood_loss +from flow.algorithms.imitation_learning.replay_buffer import ReplayBuffer class ImitatingNetwork(): diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/algorithms/imitation_learning/keras_utils.py similarity index 100% rename from flow/controllers/imitation_learning/keras_utils.py rename to flow/algorithms/imitation_learning/keras_utils.py diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/algorithms/imitation_learning/ppo_model.py similarity index 98% rename from flow/controllers/imitation_learning/ppo_model.py rename to flow/algorithms/imitation_learning/ppo_model.py index 85a7c841e..47ae61f77 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/algorithms/imitation_learning/ppo_model.py @@ -1,6 +1,6 @@ from ray.rllib.models.tf.tf_modelv2 import TFModelV2 -from flow.controllers.imitation_learning.keras_utils import * +from flow.algorithms.imitation_learning.keras_utils import * class PPONetwork(TFModelV2): diff --git a/flow/controllers/imitation_learning/replay_buffer.py b/flow/algorithms/imitation_learning/replay_buffer.py similarity index 100% rename from flow/controllers/imitation_learning/replay_buffer.py rename to flow/algorithms/imitation_learning/replay_buffer.py diff --git a/flow/controllers/imitation_learning/run.py b/flow/algorithms/imitation_learning/run.py similarity index 98% rename from flow/controllers/imitation_learning/run.py rename to flow/algorithms/imitation_learning/run.py index 41ceb82a6..ed8717a5a 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/algorithms/imitation_learning/run.py @@ -5,7 +5,7 @@ Usage: python run.py EXP_CONFIG """ -from flow.controllers.imitation_learning.trainer import Trainer +from flow.algorithms.imitation_learning.trainer import Trainer class Runner(object): @@ -121,7 +121,6 @@ def main(): if params['num_eval_episodes'] > 0: runner.evaluate() - print('done') if __name__ == "__main__": main() diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/algorithms/imitation_learning/train_with_imitation.py similarity index 97% rename from flow/controllers/imitation_learning/train_with_imitation.py rename to flow/algorithms/imitation_learning/train_with_imitation.py index 057c62835..2aae7c2e8 100644 --- a/flow/controllers/imitation_learning/train_with_imitation.py +++ b/flow/algorithms/imitation_learning/train_with_imitation.py @@ -1,4 +1,4 @@ -from flow.controllers.imitation_learning.run import * +from flow.algorithms.imitation_learning.run import * from examples.train import * def parse_args(args): @@ -17,7 +17,7 @@ def parse_args(args): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="Parse argument used when running a Flow simulation.", - epilog="python train.py EXP_CONFIG") + epilog="python train.py EXP_CONFIG EXP_TITLE") # required input parameters parser.add_argument( @@ -89,6 +89,7 @@ def parse_args(args): parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning negative log-likelihood loss, for stochastic policies.') parser.add_argument('--stochastic', type=bool, default=True, help='If true, learn a stochastic policy (MV Gaussian). Must be true to continue with PPO training.') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') + parser.add_argument('--lr', type=float, default=0.001, help='Learning rate for imitation learning and value function learning') # loading and saving params: parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existing imitation neural network.') diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/algorithms/imitation_learning/trainer.py similarity index 96% rename from flow/controllers/imitation_learning/trainer.py rename to flow/algorithms/imitation_learning/trainer.py index c027368ae..203eee0b1 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/algorithms/imitation_learning/trainer.py @@ -1,17 +1,9 @@ -import time -from collections import OrderedDict -import pickle -import numpy as np -import gym -import os -import tensorflow as tf -from utils import * +from flow.algorithms.imitation_learning.utils import sample_n_trajectories, sample_trajectories from flow.utils.registry import make_create_env -from flow.controllers.imitation_learning.imitating_controller import ImitatingController -from flow.controllers.imitation_learning.imitating_network import ImitatingNetwork -from flow.controllers.imitation_learning.utils_tensorflow import * -from flow.controllers.imitation_learning.keras_utils import * -from flow.controllers.car_following_models import IDMController +from flow.algorithms.imitation_learning.imitating_controller import ImitatingController +from flow.algorithms.imitation_learning.imitating_network import ImitatingNetwork +from flow.algorithms.imitation_learning.utils_tensorflow import * +from flow.algorithms.imitation_learning.keras_utils import * from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import SumoCarFollowingParams @@ -255,7 +247,6 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): # iterate over data multiple times (labels change every iteration) for i in range(num_iterations): - print("Iteration: ", i) # form labels next_state_value_preds = vf_net.predict(next_observations).flatten() next_state_value_preds[np.isnan(next_state_value_preds)] = 0 diff --git a/flow/controllers/imitation_learning/utils.py b/flow/algorithms/imitation_learning/utils.py similarity index 98% rename from flow/controllers/imitation_learning/utils.py rename to flow/algorithms/imitation_learning/utils.py index 36f7844e9..cb75ccc19 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/algorithms/imitation_learning/utils.py @@ -3,8 +3,8 @@ import numpy as np import math from flow.core.params import SumoCarFollowingParams -from flow.controllers.imitation_learning.imitating_controller import ImitatingController -from flow.controllers.imitation_learning.imitating_network import ImitatingNetwork +from flow.algorithms.imitation_learning.imitating_controller import ImitatingController +from flow.algorithms.imitation_learning.imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.rewards import * diff --git a/flow/controllers/imitation_learning/utils_tensorflow.py b/flow/algorithms/imitation_learning/utils_tensorflow.py similarity index 100% rename from flow/controllers/imitation_learning/utils_tensorflow.py rename to flow/algorithms/imitation_learning/utils_tensorflow.py diff --git a/flow/controllers/imitation_learning/model_files/follower_stopper1.h5 b/flow/controllers/imitation_learning/model_files/follower_stopper1.h5 deleted file mode 100644 index 45b46d582cb089fed0c240f42bbf47cf14e28397..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35456 zcmeHP30xD$_YVk46%a4P8!1IZg>Y2}$?oRh&?@4MT8;n#QbV95D0otp3iZOHcvPy^ zs#vR{SOiISQ?=ED6*fZV|bCidZUhJXeX$zT)lSWMbUeO8ONE|mDcg8 zifp+iVVWXaqs&y(^*!hbn4QG@2Xi^l-_Un@t1$&o#kM>02soYtJ6@7Jd8$I4VsRXH zrH*V#fh^BxkBqm1)eBmgGuo*C-dWMbUnZiC}v3c@i8`=VfE0E99 zzK-e*7~v9NFDq#+2ypkcH(dBes7D9~r8+A|o8Zq&QK&Tvx{{@+oDcx_7>|eS0_tau zqJ5pnY-0M859f2FEezP#jKVVgl zO%pG`H^4V&h&C0OSr91jcv|{8WM-$TGN*%n(4F&G`$1RD^hj38H5wK<4}rVKNX9OE z40iVbriiYZ>A|4Nv}I+gl*xGsYK3-sX7*HuiA7yQw@qo}g2=LC7!5s)QH>GIb>fVl zB-bXVCxCvMba@st*%Wn-N;TL$Fpi!jMVrUgdC<#{YXbwA?wTxlwuWI$m8&!gx&jG_ zrk_pMDb-1tIqH-IHN7ZS$~d~VQBwADo60F;RF@+n8vSt>ZMIygmM5tg5wzJkj08=I znWI)}nVJ0qXa;$*Ryj?sr5z7lU{IrJUh{1JOQ8O*=;=-B&4BF;08ik*7-Pd|Sryn$e^3=n&4Nm)R>)ZkX*CQj zY#3NwV0ZA7+7L$@M_@7i;-byS=p=eh#qxKNXdRxY(totBU-*D}6~@5|@=^T8)*Xi(_T zpkV(%dT}X=WO*KFXPkvXE2n{C*h50u&(K%>gG2m-0{l^H0NYBpF=P1$gbWROl^@8A zq{z}JRjkYNR}A3~rEBCFnzU>=%OK2`c4Og-V&8 znVCWt>2|trhC-XIOa{)JE`kH3A|;`*9=aWkJR?h`fTzg>=6FR958If*&^Z&<&W6_k zku~T7pb9IsB49}}i0=w4 z^M*Z#MZ5r0Hs)Rbd_EY&ZZILkSsTcw^32~S;qY6FtqTEeJ{9HF>w6jFfc9;Sadopx zz>YNDJ#^^{hC=g%z;diRs7H3B7A9;FUjr<=1p+?E&MOWBWyDtn%XK19kJeeoBxmUl z;wy*w(OS;>0oRN77v-h7ayQx+=Dv;_4;)Y$6VsjCacIK{UQh>K&~Gpyi}KW_D{b1o zn9Cd2x`Ppl;Xsz|>+oLzjOe<+ZdTGt5a8M&q;0wG#sILH4Y~^L*C^xWKU_llhih*d z@9Q|R_jS1Yhq!AA`h_Z6}f!wix4@ykBSOD!6xYKJYt_`m(SQ}iR4bPip zvatmf=)Q>~>}pzeWBYei^{YTepjR%;q$`epa_9fQ~@LBB(z@fGM<=Pi_ z2tXawAA|?mv8aspEZBWKUI=RVofO15%+n9xlLhq_jmOpF-#KinV?*aoWCsj>Plo{E zo!I~Be66i@`!`Fmv-&T|y=%*;)N)U1`uNwW%j5$5(kJgyx7D+$p8gZ5;Ukw*{7XLg z-1>RcM>igkdCTpobEmqH9k;K+6?@;qqIca8R`_lu1^dpD-xb`Tew=%QJmlhr-BuT3 z?UHv2Ucrwl)^Eqyt8 z|JM7$_>g$q*?l`zkk~@tpT7v&)f))>5_zv}B zfP(6HbqbXeT|dr$`7G5F@GcGUaB#^4{Xn@43k=VR|p8i2ptK|_i3pAx#R zUex)C1hL9C0e?gu)@OuVCu{ObsjtsD;?)#InAee8 zWOQg(VqAQ%A=Y=EesSeetZkhS_HEjF?43=^sk;)5;r77g*x<-T)cT$Uc>N+LvTI%> zaaH&gl{RH8#sBacwr#(c{#M(ESjj|htYE*rL1OnU>FQ^1_<7C{YW5dAyk8qn{FlnJ zjU9FxXUOxPXsEk0gX&#$hn&~W1D`(p09i@AO+55YH&8t~ zQ@hF)*yc?oslyZVv6>4r4Zp6Rjla7@juRaM zsOn#);o0RneayZB!{vH!+*S1fMd~(S>xuWtHXr6-mjvs{jM^TCy!yNNgf%~lH1X^7 z8F#vosu{17{rMNkjq9AShIPHMk@bDCzP8R}rFb$fmOmivl0PH`L*AqM9ZSYDlP_R* zv1012%un!ky9Q9(y*e2d?#ZKy9$djM*lg2}-{(mctz3rxwB#4;;47yo@3xz+Oa$t$7&Md}lPrrrT zpJt#6OFJ4|oX1iB?BGO5*XA1T#=T?+{`WNU^S(O`hxEhn-X}?e;iNwv6zGH5e*Oj( z?^H#l2HH~NwOjClY9~W`+YhK)wpDumu?WNcYxl5^CMgZISTXK;>mw}6tDIUD+|_XU zbDZ+~>=FL;gh_^nqZ_EnFLxu){Ib^2&o>M&esibc!^n9=&EXI6sf#}_w7u)_>E&EE zLsa4w;>g*^Pq$ap8SK{HAXBTCd^#Y-h3NbJ#!n+QJtX@av%?mAya4-h;Y4cMVO3Z= z%^`hJUbHB8ODT5MZ4>sdJAG@0t~^2YIUk9isTx9gR`kL)KB&T??BAmt<5z?!g1*-K zye$>w%>51%tiOj{DIXm=Yd~j%s(cwfMN>(x_x2Ob9))9*2a2hS0d<11JDqh}VvVk@ zOA)sI(l%_*8f6CPgRfs z-}WVsh3^bo>a_$9x$i)g=y#BBXUB`an(vRTIqOB$-trZ=+^o{sy^~&Z(k2D_^5-QO zZ&&vaSKE!$fp^?+`KA@3dwZ|x3P+UH2qz?A7e`bG8nqCBN{Nrsm8W+q3FB5di}EZ?DZKx@7B91>}%9R zys_J(Khsw&9u&5BN4RKM?hO5ZDk}AE`|NAl?j0#AqxHQ~{*AEnT)zHl+Z#2d(R0c6 zx3-D|X9rR0cY9*{_wK=@-L_Eo$9x{PYVvM%f$w)Q1S@d*X5kxoo+o1>)Wrz zT|e>Cc@3E_bWr_QR~X%qJn?eYff?H)M7#N=dUZ;YDA)UaVfRIq2Rc-&By;CDVvCm^ z33FdCQh(*tSkW1*NL0MlIqb}l-g->CR#bO=ly2}6k*F}*PPlJrJa(ez>-tB&o%Dk5 zH|fsax+AQ|`jEfgf91dt;cDS+)k9tHRcg`n)T5&4A7_O<$SxuGMRdfzd{}@R4&Bz@ z+}NLrm)yj^9MoCYJ24E?tqLMt^{-Gf>*J|1nNatjcsz09>Q|KPfd_WP(V09@G!o;r z?}$}=CB+MpMq^I>x{#iuDn+XYg;9~+mbfKrFmkXPsPsG}##bR**5 z6?$I0p>x{P4tFg)N**TqU|G_3Ls=JTHB&%yNSy#UF=_UdSFta`?u0aa>+x+7 zr<2}{tV&)Q5w)?Gbj8(sk@jEr3b!eBje4kFDShSgal-U`2_h3K32usx^0=JHp?FrSD{!PmSdgvIU&G3U%Eg! zLZBWYvA~l#{_LJ&>Y`9XGbzf4aP#25Cx6Ari1t0oTOkVNG|pfp`1_&raC7rl$gY^9 z&9^uC(B2Se2`r+f_9nfVNH?#Wx%^dZFc>cv+6_zgB_tH^TkT8puESrw7uUYH!v(E@ z`h(Wj()lYs)LWFl;_5My!%B@2;O4LL1;*{%658kPLEM(sU4C})tHB2#{nw0NV=y57 z?~Px}C4e94UF+7e*0*}De?jqU1AJZrAp+vU#`ouP7%DL!jNT6u&k|0l(G07+v00Q{dVccewIcs6PmZ zmX2TRaWE5#U%7gW1i0}l5pLYhEunpW1;!!V_SRaq6u%-rf@t&oNz5RCF}f0< zWa>|zZ~nFMIYk%t_b1r*7Mb_Lz{_P8e}95~FAXjcyefqv7h?@Gm*>re&!2CE2Emda zyB^&Xs>X9!OMhPkxPA<2n>*({2If6S1_k72qW&Z#C{E_SH{r-jw_#qnu;H11PO&@F z4xdwWf^s0r$myrlDzY0NOk_CNYVgz|@W_4{48s%EOd=RmK3Pp?h#UDExnbg%J4gbG z&zXiNxIviCu%z}2*lD*}}0Dgeki?v3p zG%b$+*WP-=DT|^0APJBOV{H~)3GZJ)+L*7Pjo9z{aG%@shI)(kGgps)*PyMC3E1G5l~C|8#4*;x%|QTt?Yjf1kCAgIn*DtO7{3h z7tl%O&+0GA2O_;In*wG+!JCe7xP=q7GCwKJISB~&@pY?RU z*4Ano9|22_@*e0u9iq+8tJcGKh;Ib3X*`r`)c&js=KKqHUt01n>GT_)%m?WmC|Uou z76i=s7Z<1(2#1#PFUT&gnT%#?nj-z=bALdsY4%oAYibw0*|no{BeI9)X!Gr)`(OaF z`R8$8q;n%TzhKEO5~*NyNIzSRLRlBeB4Exgdc*t_g6UbE8_VFlM-^aZOZr=w2KbQO zMDki`>p;Mq{$7XrgHUKGzmD|oiW1C(&VvqbfU>s=P-I{8p?)JiuD&CATMOl$8_N@( zzH)>&G3V)gt*zBGK5D6RW1=s>mTv^{U*+7$-Itd9%SO0=k=_|C+FJPABVf+IL_xhk z0JM~UL4Kn)9nI7JT5D<-y;&%@=SK9Lq9X(fm79NVEQWS6(Fl4|J89v< z8ukA31#|f{?)_#oLx+W&nuv{Jgal#6*Mo@OH2A|$N>@} zyNTqr($;~1IsJ8q`h!qtDZh^N&JEsPLFYjNmJ??H36OoQg8Gg4xcZLdZ7r00ZoJMu zrEGrn2=Bx~^E|Dw#WX%LJ~yH`2+`)pL-jBoLK+RwHjRhi26#>v!(4pC-Itd9OC8+5 zNbjE0a;(!nCj`v-mwc!fNUWCfFKCxJOh+>{O_6?P6*b-7T5C=1qBjc#Iybg~z@YP@ zX}P(-H`V~{WU&$Srgk#jsJ*2Nrg#|p34{Zp&G!?9@E39UMoThX|MT_}Pyc;$go8QS ze0*ZyPoTe8_oEXA?U8&tOe~(&yFz?^=Qe4>YC;gSvYeRwu|HOF+gEaDjlJZxRpnt5 z56qF-c}$nKAM7i2=}L+nzxR{$&hnIf;p`Wda&L(E^5G-cgvmQ3KiSWhWbyNaXHNYh z`~27)>D1A)B^7lqiL-p>iqBT6rSd^{1Qq^y#DYWr5OqJ(AU^R(FHZfzUiibTU|D&H zpLFopK*QxYDW+8X;Ohfn)oop6j&{ZTgvc@Cw&E2HBYi~D^JWTI)Ue}n~9y+eH@bwuO{OC&xg(t+!k%o z923q8kl_-ZEAH8?LNLQ^iFoaU1;k#@YH3-&$&z`MI!X4)9kSlNa)qk%m&DW44iQuQ z0;MAb?WJDFCy9Q_n-|{a+DzG&iM8UsE4E-S9sE$dW8OggrwswpUSI!)i&l*n9~xCc zByC(FjURrT_;1M|Y3-Zu;pM`WQnfgfa1C~n$fD;H>)X#Kq?6C#2l^LE?bWAtO!^Uup(eZz$^mKEly~^NV$UK@T z=2>Y@sc2T3|HOOm&8d6tz2E=d@7(Y7{?4(Vz1MoyUVA-juV?SI)_#gHH!*7`t1K(l z_-oTfOj1n#+okBw*XG}~vrR3cx(GhE`9i$8jBYNoS~XpWi?tSOgmr6fmv5%8F45G` zt@*~OlTA&<8ZbrlMP>7CVq*Q8uG;*i;n&5VNdo4kMw1%HJEM8fB7HjEtY;!wxO>iX zbhBOP=<4j^=iAUI(o4Pw;h!uOn)S7%UdxG@H_Vkr`t8L!i?tD3;OJxLYa8I`FG}37N{cK*F_iut9ZyspVyl(Z=d95?3 z3E!)!5DPilbaAk`{;T-EEdkNIp4+T1$D4=yqPXcoyg}0%L8AFCnvX7SP4MW($=2`_ zZknJ6e#oxASvwv2D7~*Tv4O zkp~+Oe>XQRb=}zwz0C7l^sP;!VFY%5xo~=j2c)oEtyP@^3oO*e>xjHPeb#a|H&(Xv7+gNOe{FZN%2I)DrU|H1LPf7Us z*tvSx*}FBe=;!0#$l;HM;qT$<*T|Xf&;|rM2S3*UJHLi^QbUV|pPh$GW3T?cj<)u$ zc8yrggc@F79u4nJ*M@st?c7`!Hwd)x34Q+zN4t6Yc=}m+xH@_IxVPZm*U@oa!!1oB z{w-fYjk0U(@^`whcJ%S|Z9)1sgZxIUnPVSEXMZ<4pJtAmyZ_eF4e;v^`lD`4`P$O(?8ni^DusT}m4gW0(nmzk_MG0{FA1TQXZ~vtr&6D~6 zf`a_W416$uYZT>2YS8#X`A6-_KdCY;?aD88X?O+vWs&g5w<|55 z-``V}AF0+4)>!|nh51I}mxcMSwlIy;{vWk4|D?*a)X-n*^51P?{+_BdTNqD&zsBY8 zx82aUwf%Q{xPQRDv?TCPT9@B$gxp*`9PNA>dHJ;!`ZsTde)g96JLaY3s z<<0-3t_=CjrumnyG`kW0{oCdry&L{kPi8kfoTjbyU%f;vH`}utk^WW*zIkbEG=6N0 z8mTwjV59L{&G`nJ9Te2$-)i*rH~xO+|B`9~i(KboBEMO4vsjv(=;~MbO9H32iO9Hy6zol1E zUP?=O$%)lB=XJ?7$hezWQ%XQ%tiLfkO-!s)gP^-Kl;Yw|5|{bbDkdf>nsoN6sTT;mncG6;EE%wiU;rIVXUR$FI{xz@df1YpG zv{#q^3y*($(Uh+y_S1Z|zs3D^{pXWFlf9Jui@mIGwu>TrDL=4DChE=kVInPC-&_~r z)i>vvb!*P&;x~(O?zpB45ngn2J!?W!yH_*b@fQ82g*5?$5zPpH@IquSTgvON^Sm*C zwVPN|Chxa2vTp`cL7gbq2C=XmTVDaXI8se>*Bp}KQRc|Kq z+j_Tew<@TseH)*cn7Wu^8!^S;W_+oJ(Kb?Q)vAeL;~J{*Cm|l#EZUH!j=r^uiFItY z4vmcpqiW}fiMcdG{xikz*%h6UO?>>aEB~42@ki_BXLco_MV_{3W{UV1%|(&2|Fd?b zOd{99w!A94d~Xp!WmZDl-YBMs;LcS6&6hPAY^<8OaWF0hpJf$0{_- z@%2Fovgq|Sczj8P%`8wS!|yJ@@TYq*!|fJ4P*P!sHFrZ_?k;RKH$=sSpTSMqfCO0H zggIL!$@KLXL35)TStq;>w?9v3{q?Q6eODh~ywwR<9#Bg6dCsIc0sCBD zos0bnl5yn6=aAED1z=xu!vdqL(6uxHH3~8yWAa$&rtgX`o~qGIl~(M@VpFgQ=nc9P zHbUruAhahTkUq&89IG;TIiADF<}u4@wnN_R zuYl|c+tK;t2^==37@qGlg-bi~sOj*A>v`CDGhIv*BFP zy`8+;NE>?S*h=)y@?-mQpIT5`T<7LGb4mE ztvi#lg5@Q43VllFJ5Li19BV>+qerkMZufD6axXGu(ICF<0|WNKP}!&={3cuZ^Z*Q6 zzMO<#izF{z29vo`{xtH?K;gXLBC7jw3p;t?1s*W0W;{)Kez&NloT~OgQhy_n9g>TJ z%ITf>BkR@p+KbMhyNNa%J4aFYdj26eFO=tp^EQzE>tuvsDIW!N@V`wI2J!{s$l=blMwdqc*(N7_GrB?j9RvuhW6COo>GSlJ^s#$euCnbIu3*AJUe1>d0^et+ zxKq{f)ac6&dL~%KV95DnCF8eDMCYBJfGQpM zyp$o8?7$T#27|51B%g)oy=EWOjJV2_JdCkw+z}XZVH|t6V-N{8>qt6}Il!EB-$SP{ z?TLEFx8S&RAFeQb2ik*Ti_3d2rQ!0^L1KX(h8cKs?n;-ri2JR$%Tfg;#;26ItHSew zgjcsaZU~2_5_6`>)Z!n`OzDqf-`x(K}Rr3uV zII9TA+h|VhwJRLD{h{Ptz#tkiFor(iXY+Cn9^|a{cR-L?0fBvn5yzRg=-5aOheT~; zfqSD#yjv2=+Aks}_r8YfZ!f@BsRhJotpQwLK#5P}5SaPIn~dK$p2azDBL;TiEZ@nE z9qW3Al)4m=4rkkfsrz>LygrA-9@mB!dac34au0UyXO0@yxx{n1DO)psCk|XPf=!rJ zD3~pF1SXs5L9cP@Ec22gd23wFk%rYldH)rwKy63RXCmnZM z<$=1e1V2j1;lx%WFs_?EE_n9>diu^|Pt>KLN@5VCmDItK&)HDAT)-_(+6q>a6`?5K zmpfGxi6?z@G5%^ExSTBFGM=mP4pv+M`J+{^Gc*S5mwv{zJH?n!LLUek-<6%4b&#`j zZcARZt)%wT9l5Cw&ceP9YXnKxo`Y$JOM(~AZSkGOTbO@pFl>|%$JK>rVY>GmIQc?I znWQR-?h;1L-%ZA1*W;X08DVti9`1e7S5Q!BP0s4n(z@i;v}@5aqv40cjjH)RrIMjb zN==3FrJC6*jLh^`l@6T|Yjhzrz)0i53Zu17D~upD+z2997%93$8Y>P9ZVu7WDr>`gtny|{~ zP3Vf!L1%qRseD9fLF7uK|BZFWZ}*K&`#RD7RlS)hQ7O@|;`+9y`=|Gb_4-YuLYgTy z6~Eml27U+sPgj0+-qYNqiIc2mhJJ5Y6ZL<~x>kK+6We>6x&OWQ zOWKqsM3LToH|SrL|ML>~+56>qb6$i<06&UkHoD$L?}L9fZX#OOw#>u$cf@0d{$*|Z z!%+sDXPotd!)zVhC1xselSd9ft6*;2$^R9a0f zt`L0J-yJeLw4!Iz=CPCww>Ysj{i$nOGR|3X2x_-!@Pf`6z{Tg23{%cT!tzraAuj6- zt{>k9RPOrH_Y%I)&fXIXY)`@S>=%Ml(dq&w@sWG_A|8rn4#4R`15lQapcgxI!Q?p( zII+4t)|h6a{k1Z9DeZ*2(l+3+jah=u;cK`F6W!_MH{IaX<;NhS89{YYyYiH_E#rb# zZ|C)KyC_Im*b`&jpBP^AzebC!U%_ZDjs}hFfS1*Wg7pC>+N;ZC93Y5;E!k~Zbzlt` z4eCKEDq5FZS9;1l6(0=iJaoZLO#qLxdt$F1b-W>gPwBn%J@^d8d43Vekfy4~4tDMg zD%(|nFA;@F6@9osxntCM#(dOUz8e<@Dv|cpmYntDhj>}@6I@@CgQMH$!lQ1V=pkis za^5Tw+K4~IB{6gGYmeh#JVISC>ZJ=zh<(N5tx4eSS#3go-XUdduq)DlNO)EJ~?+_Cpd8f)TEjAu_RFKmyDb5t>DF{?#8V8yEvlG2D^0`fT?^% z9Hr0)E*)--Cx)itPJ>u(myH)EoU#UN%i9sdqC`$Vtrf2Go&=v>C(^uF7o5H1FdCU? zksA-@-~&M{aEx5FuwlBqnR)*`r2q>E$wR;mV!3 zZqrt9y?UDV9OF+PjmUtI9BtS<@ha$)x`4QqEWE08#jz)sW9`Iiy!rM=>BsssIMg-? zrmVY-`8HytrfvcbuDgnxBvR<$C^tcISrL`YUx!;&Gcme%C(hpF758+_NI}A#uK3lh zGs3f5oUpAnDX|*>MQ5&X`svz`J+vLXJQj>M4u8hu-o-HH{1fzWYmG-j*TQtN2Sc~N zgZ$2WuZYiZV4PsIC`gxuT-I3a=77p=DwqCTA5wo3RPBkGBT+-13AG6`|nyY9|D3d<~VX8={fL(@>m@+B##o!xn-P(~8@iUxzke9Mz69){BShd1vW}@(%d+dLb&B z+(Vf;FKL{Z0c+Lz5Zcz{G}!DO)MV3bL6;7nX)llG;Aqy0y_Rw0&L6WNb;0kkd|xmq za{J*yvKrFXo4{#yIp*1$B7462G~L;$H6))iAl273S=Fi|a7spx z>GxKLl&|Yy-?buKvRfau>jy#c(yq9XAB*MI(O|ly3OI*4E?DLuc#N>epf0^hjK>TJ zzc`gd-dHQ>e&Y+Bb}fc&_|%`d%dViU_2bcduLdmKWDTI+gZIJ04F;7ZgR%cj!JRL6 zP-o3lTpuwE=n{E$X8js)yFLIOs?A3Q^&HUMFpXT#PkJ#AO%d81ab0)Qqpx zu1h(V_nC(p$-D5}z@uE-qDR;z8DYwO8MylJ0TiD-25J|?V8++!+{Je4U{W`gh6lgn zl=mORi{s8hh;ADw+A)u-N{XdbqbDQZsXb5G%nT{%l59jl22Wzaei)!U z8ss1A<7j0LGW&#crrz=#-8cuQY(Bxs9Fd3aq37|%5lien+n86a*OQeU>qU2s9)oVX zv+Zv)Dd2gj3i%pp9=x)R%{Un+jNt0zp@pOKN zPE0p9kqaqqPmWL658*ct@@90jf&RG@*^twA4A!oOHa$Y{5Y2$aBjeC}Wm|T*T!ZDV zv*VumO{b1u(_#C4X<~Qi9Xe|b6f7T-%)N7MkK)s|qp!0ns;fnS)wmUcd&S~JZE-G6 zcdCRqJSR9ft~EIs*Awm!9*1;mCwOGE2i|mg1@q2j!ItD5bke#?se;1Y_K zhWEktVkfNMpM~ewe#OI;ZE=KIHRcD|LZ2XC*yJ-5AM+QXV`?$Cc*Q!*pUfA?TIcfY zUFXuVBhKKdE;(S_ZZI6t><4$V_re1WIXbkJ1<$sFfM@cd0@L~|1GNtA;lS-SFmAyk z2-toF9CXwmRcZuOosWRKv&Gx8=xb--hVF+7c{g=lE_nVvtQ01wT> zv6t~xY-jTY#P2PPp+1i$QJx%}z!!i`p*azAv(rUR{^U$Z7?Z-ynco95411A@UHXw$8pVP!r#z|qXG^r1n-8f) z_86LEOqRJM@aA7E!GjycNT(g0X(vB3*dVS>-*9a)=$tCcTWrG0Ld@Zbn;*WYiJ}+j zK|E)>0o7j2!cuW>+JPrY&JUkReBV5VYkNH4jPh#yOowCX-W&MDlX3g*xw5fx;%xh| z5L)k43_F%4La0F@CV$o-2|4|7%ezD@Sf3B(tc)gBc7^@&fs}lR1&@!*;99~%kWtwM z674Bnt5Xfm;XUA0PIp)llK^#3&cHyQLAYk-G4xdFhy6zT;q*I+pw)dQjH)Vz-3Ei< z=!-f!IVqA$x==%lzI=rv)6#L}D<8U9?II@#uBY;}idN+BgX)hZoL0Z_m{Bqk;`gq^ z$~$>FYpf&HJC}&Z*La|qUR&%q-hi{Lk_YUiLuV!UVZ3A~n15dq z^9BvX%?n>~q3=sz5Y|Uk zg0(>w#@;i;wD1CSg;rR;;|3mdd&8}EYmaM$FK}D=bPTl0;ubvV2ErXzXnRjXj2NCq zvo$0kpQiwqj+nsV!zPe^WLZOwSRZ(?dq3BubPO$cmWXo=5(H!9uYg_~d4YHOeH?Y_ zK7F?$7c%oNbDDMTFbgi@>Dxt6EvLkW9#J5-*W`n-;5e^rjW`<>Bu=Ua1;dCN;SfkC z(jgW*aChN7oY5}pTbG{Kd(euhC#*b^aj6>R%Wjt!f-uB7E<-Kx%=Pgb`ZBJq6 zSHDBSq=rN zJy0P<0mJs>z|FooTvXpxc<NHm9V@jKk( zeX?ljy%%8GI3x}YqcLzk{N6ju^K=#sZ6&v83f4bnf+IYC%DGrt?O0Ypa zYB|wsnvqmqCT? z)JVjc5++3G^NCs%MsND9UmP$eaX zr^6J9y-yfDe?J`poF&PyZZc%S_ARJfH5w!{&(rjeg}nb&eif;fASwQ zZ)@-TCbqMhx&J-?vY2NRqR8*`-JpL}{?AL`Xa40Z*YAhkGV4intVHAw{#WB$GY&TO z6Ik3-{v&x?-5XGUmbcX$!TQ?|@wJR@eFyt%Z0o!5(f5OIsEOinF3wG$=w|u+BmDj| zd1il&#r;O&fABb5f_Jmfz7ync?WW6bWc%-I_&xh)?$y-$FZ=iJd`!`GJSTNcxk7 zBxsNZe~emJ67#G(9=lXQy!7>0;tXy6t7pSl_XGA&Kja3?bJ$Lf`R`~ruNDO%Pli#O z1v6O`Ye%?{eMC4g7G9D*nXQe>*?J&CjD8I-!MLI=*y;&+(hD3n?@o_tn$ftSf5 zrdl-6sQaTt+(UJi~(V%k=rrFGm0tTVlm@N#XYUq5S&S>MUqSD>PrTgSUzs z1igf7g}rtCNS8Y^IYU`**0b;o%>b7y5kJN=(RpR`B!*ly_BNZMh+&QbnJgOY9;H4JhVY ze{$eYm|9a3oxh#9_i-V%c|<6mCScyBBZyV_YjU8IJk#hO0T%5Bk~8Nuh=TlCa$HMC zm^eR#&D`jKNh37a7fF=|{2wbMHWZ?Il8M=Q8qW(+5`MhD5g3 zioIF3g;+aI6y`fB3mxlv@&hKR^A(;O3tw!OC0Rj3h-ZIOw#7|Hc+IE{IaN4-TzFZ= zhW0wd+A1pXy-zV{Um8H-MrDAh(wG zgY8H{oGgWNl(WE1QjdsT*hW@wIZK1J&-2%m_JF4Y&x7jXtB|l&7AyB(UuI@{+O%?c&q(9%&ijeD%Y}mQpZODaT>C8R;EYpkjWv`=p z3g~W(Qhf3Kq!A%RipxTwcRPro)V=hHhuMg)hUNsYw%QwRd zs|UJN=l0l4%pi(ws_-Rq z1i5%OLzk#lMhbgg+|CXyU&O$ru47T{IER$KIG-olHM%VD%0$*hW&;iz>x`C;iFAOw z1W~eX1L>hhI5&;*hP{6_{j9i|&0QhOl!SY@$8|HoO?E8lyty-(M^}{G{!-6fO}j$^ zI*ed8+8nF9@*2D7og;ypZgAC>DRlY;QDSeN+C4znC@X0{Ss5o{jQk@B9YeA=W7&)wP58k+^un7(t{Pdz7a)twYoa4??hD76( z3tEO(NT1Su{;4>iUnU+tc%0o&2_{(@BlwkN>#2Q}BuTO}BN5hnNmMdlc+yQ$xZ6sV zt%^Fpj%myj&fN8ot=WAOEbNR}2f1U+ctbf^+-neDtb;ZGs!D5;n(|ptt>H;-uU8`z z49$7ljW%(d-AH1#`WD<+B994geOTKpO?FJ;3KyWXhxCZo&KpCe5u359 zC<<&c+p(RAO2WCP_u|%33qh*%7W49n7J9uO!t8g*u#0*b%xiiHTl09oFnL-gF`4t3 zTE-t?o%F-8-o6jJBb5ljx#!^Q(50kydpLPK|1Qc*P~(4eAI-x02ap=)Cp2_O4cmPw zm(%B0kyE!~$r6>hRTth>rY?-g@L?}r zY-ARFR}#%Ad4B7;XIRHp-oi_IM@atA&j-7I4Pnqa?5HijTjX|uB-InyF5@~wyy4Ed+Yl+;A;o6erOca)4Kr~7bS(gJUeo4 z1HXc@^dVN<<1y3pwZt>h1IQ3#8KKFOR5rP^7uHFskcKD+8LZ>QY-@WPX5LxN&i1GV zu?Qz(GT)etR2V`O#+AYGmjn5-GjiBbXI*CcC6PQmsw`CObsoA&*O8ckK)R1kXS3b< z2xs-PNBwT8tjD;8P*W&m{GKcE4L_gNxR*fw&hBi&@Es&}b2dA;QHxpGrSU%JI}zSj zYmiGu)=lv__-qN~ooF=@_}a^Hg-=i7Dmj`&JL?N$s*TAeCu#1q#~xv1a4~s`>)7^f zM?kwW2Z~Kg$Znpgu=PAeE_2*?zND0p*lgX%PpC?Rr!y=`*CQp&Lvt&cke&77QX)4z28_IcxVnYgpDwsm89{*bD6wtVz14j4ma<`&)L5bfnv6o`%g!T?)TZ0$ip&{0+z54&ppJB+TIU@a}1)$=}lU~{xez$oo?9( zRKh-SsrJsolcUUpr!&Lh@(3Y6U9XIoP8q@XGwH*+IeU=Qi3;TO=`SSYX$%u@@Hvlq zV8W^-PZHg2iwJ+dkPx2?rqJF8J`F|UvQe9LKGT-nD!h%x5kugRrvf{+OomO!G~#Ra zdx;N5DU(%GGWaoxec0#(Mzk$e*qVc??BSbr%<@$^`D)*T<&;hlI;%&pfghE4^S16J z1rh^EN7I!C|EA|21Bk&M%-EOhX7vZG@(GTpvp z)|P!FcToy^xVy6v%ANU{>U?o^`nTNATL_Vzi%u2UsTiFIN{PvlAdb@K+>HiPAuzK2=kRtWDu4`c)S zJ%apYV}uS;er(rqd$Q+vI(x6#0j6J7pf^&6kSxO^EbQPkMs_WR+j1w^mGY@<%bOym z;im%}5R(A_#?{A2arx=r$?ZpZ49Y@B5F*bEF#htxd@hx({YF_=2q$UL)_PYOu%Y$1!KOJZojumM@s_ z9A~E62q)yGvEk-v#m~co$u0E(WaX=4Tw2$2ocXY+WS-m#SU0rc`y)C@Y^}92>oaR3 z)XY3V&gg~l4&T-UsW=yKej`o>uHQ)Z^(kY@)g57GxEK=`-$Cw=5dhQ6$Iy-^A;Qgx zkya&47}}3)^^Rl*)Rmb}-;WSgr%3b;XJYP+aZo$54I7f41Lso)um%2NY<7_ZYvXte zd#mN+_|7qKtZ*e0Uo6IYY&eJpdMg>v(T=S)?@6|MajbWx9n%S0NoJ@#;*|S(atZr$ znc0;Z>^#pLRrd~NHD3BmeXBlE;pefjwU$I{Oe8rznq$WHg>-JUDzTDFCSs~d&Pwk zVaz=KoD)&}FFNbNHKd+BF9~B7ejWKI4^k4Rxe48#tYyZD%OQ2XFWWL{70kTXj(PiU zV-E#t{Jc)eq)1X4=gqH1o??5l@{B$9U9f}o+_DQ3_e~(P9Od!aQA_g7#)1W_7IX6D zM_FF%F4*}rfn9BXkAQa-xwfh=gt_^#Fz;xtM8S=%esKjXlcj|tx0#T!EALS8K}Dow z`As~m(v^vQPNHRFK9Z%enegoTL$=T24!Kyb3hiWM$*WymNw$d$QE3%H%+0geBo{GeKa1?3(N|ME_IKe)wR3&zsZ;;FkC1N;#Eeum!1-Bkg z1K&$KP@>IP^o@?=Y+u`_0*Quvj-X3)bsJb!8O;|awA?)3HvT48`Z5`fXZK(uunw&EYop4_q2$Ju6yfN8{rI_?4wVef z?+HuuP1v>@!yw?cI`iu|hX49y4;|B`o z@+NNi&35JY?2p)rCj4Lar}-)W$%>!ZpSjKYBNE5IJ8o4U)zp*7p0(6t5g)(G?vJ3zm0a1HjR)nV*n8K}uy>3es_7L8_|gfy4im1^kc4o< zf@1<$Q0NU?_11D8eZ7D@ZHvRF^#u*1iQw%2p3}N|l3ukD$M@aOf>$dS*nGwee1b!; zEFm6T(#3Jt-$-Sr`H&*8mT84}+8P%kj3_M>S8ys(pC zbfPTF?AigvD+W-vY&p=7%D^tOeQuUSq+a!N{ zU9lFonu>8kc}05E&=|9$6X28S2r8y(j$`Ie0`8$9o+;Xgr}zZw<}IL=Ia+Xfx+hqb zoaDmGO|Wz4Yup6!6z=BpScJO{4d;#%$@2Arq-R9 zi>k69gLQ_$uR&PivJqxQS<~~+{BgNk67)3)0k_lp(5^JDA!oNpu*f@YO+o+E^s9m$;;%9m4yp>x8BJf&TP+l#Yp2>9$~!|YB4o20Zcxxr5z8vpsAV` zFh{!;R-7D#cDKx6*5-~R;chrr;PniRE?@IH0|_-u>Be!X{raz7L5SS^S19^_*2(-LBR=OwMs7zUU1)-vV0 zvgDHuuuq4N3t~gNllimLG30iYKy$Dqrz*D`9IXejo%Nxxbo4^BbkIceF*Yb|v>X(@ z&(arzwBbR`63p6Dg|5f#(f&*!ZV)WM_71z?=y3}=eSJ4lcEBG3Ixa)oTRq@yow?w3 zt0`>6+g3P3Ya8|`u11M69m31?VmDt`aR$yl#OYxUcktOAOfr;)^%mRF{ZT0EPshQo z>(02!Mvab|norFV{UDbPq`^5!*!@#5*ZI;kw)AxnlsX*X4N6PJ?%P{Kkh&&54eLWV zUSC`=NEeCZbzUE%qtJI!ErZ-oBcv8&Qb0b*rgXV1wQ^ZKH5z8WT!SgehpA?7RTy?j4jp=V@XSefxZ1rJhTT#D*@5*uhj#br<2SLm)OaFw zSf!78GZ(iIGl+`TXI-q@i5O2$oi6xaaUfSGYO~(n2_?6uT9V}IEx2#zF0T4Z z1eh)2@PX`I{M^f$>uk3gllFAss^6$U+}XAGvFkGI-F7vmo>PY1Oomy0Oa*R8IKE!I z85QD}!?T%&n6cn3tUnV9U(GA&D~-vxDsC`zTrr9*GQNqktxgNXhV22x1y9l5`4Vhh z6^A3cOR%aVU#Z!N5pdB-hRaxUk@j4wLY}@JhI(nPINMGIRB#A-Tslcp`?%umV=36% zrz5>n)rTy48VBtcu7h)0{o&|`gIxP0O`dI+J!m>26kfjV040FcWF5<`|M>hK{INSIu3^od8tdj#L!vrdl&H zu>0X4R6MXB?vA~|CVjQTwokfXr#tZ_ugjLfif(6%S_QqPD&x!Hy_+U??8R=*J1rBJ zJuyLk{au$Xt~z7>oUOM+L60bb3!h540nX#KJcKAmHZ zuX)44V&@EQma;VrRMLbUs}i{FzAkj{f)l)#R$H)-HU)<>@=&6>ja(bE4u=JAg&r2} z5ZZS%KGyPv6YF=v-YK0xdi^te*7knG*~Q0P{LviT-cc1tMNH!?wyARVB^m%Rn+$JO zPR7FnZ*vBdZ883R5*<)nMOTfui(Q`f5==C`fqt=fxru`Yf$zH*%zCvOuhkDFQw@`W z@3jIuPDiF{o($*0T+za$6Tb1v;%v?x#{st!v2?Zzl^7cX?k8h0+iwL7=sXQhHsqMk zdi{>>UEg3w=t5|fWyBr68V+wKl8fJPr+M*H*%dOJciNIS0K}*3ny#ehPphk z!-4(Q;63xLxNz1suJo}wJRLCw)a(s-cOGq_bq}2|%tHYNBxR#lQ6O}XdQUTg2LW`c z$EATgVDXDD@Ww9`eIDAvSt6Z6QLC-Ly`zy@yTmkIR9 z@I;X6k_g%lExD5}I#gTE2M-TlhWiI;VaV8>+~&*#y3+PBciNy0CSTX$#79O`x2`4h z$%X*xvuzh@cUHiDS`xI?l=m1aSp*-xUg55-oPZn3%4nN@x2TlrD3~|#zTpG46jZF6 z$bGruOvd^Zf<|cqEgIqorwfnM1M6DBX(w;Y%k`q~pN_!~H(SF@lVZVax}$iNbtt}) zlZVr@;^~t%D@!Cz^EhM0Ffd%T6t#sNc&VEP(C`t9VR9tH{4e`>MlLtGJDbkZS*1Rl zRfsnTHck~NUOvy290&z9lZ8BYsRXboQG;<8l5o<8Ei`Wa2G}-&aVq*ZxskQW5PdhP zWYL+foY5+4s9H1%0}uF6gL@AR)>a$grpY%s`$s}dEZ@l6Ff|?~OX@;y)>Og3;%Iyo zdy59&UB|f(zfI2@rQ)5qTJG-oQ}nL(1{yH@B2Dfp^Kvf-UdUuw2`8D(@z6zgD9yP% ziQ9&IXwaP(G@;i)>{&PfB&OSQN3MEfJ6m7KD}Kpk-VK3AAKp`oy55|R`@RyVrRH$- zYZhG`JDYR7UC!C_ictKVHV%yNpc$MY&bjrHR*j3II_DPio?WtFostW%-MJND)vCc} z%e7;hKO2GTr$m8*Yin-bEYD9KF+y!|(yP&&bHB1?}2mPrloPF6FZcFEvhHE-jX>e4v17ZPmc(O-jSMGmq0qxz6j{M;sFT^FvzGJHcbwtW1hdK69VRA zT8$}-u#&`t_ySOken@%Jai}xhoqWA_o^u?+UugtpuwTX_fFta~)&U3(n zL#$BakR_MNKM!&mUC9>D0}#}+VSddy$;nz4TWu+UT~$X z1|*iJH+&bpBSsyRArf0s@p3^i5m#V33I#A}mZv~QeFmJ*E5W@- z?a9!-!E|HW?yy>36^CCLK!o{o(X8k?{k%LH*kEbA7}O40PfDQr=Tty)yAB2?+d$^b z_HeOw6ppU1ecEGTSh>RzH~Tl7w;$6U11smkqzy|sUJXGHosU#?s47v_ z909SWo3U%6D`<|J4crY+Hs!`f=vZ-$rWl`P2QRlpC!1I-sEdW-p*!KC*-5-^FdV9O z214+tICw56pr>kb;X|MCDAQ&-_Hs?8i{_rDBTU}YkT$ynM-@Wwv)AJ54T-&`Hd6PHk()%_c_-K7BRdUS~#F!6ZE+!vf%R@cZ ze`;GA?Dhqu54B_4E`7o0^Cjs-=K$`b_z)KQE*j=c>qoP*BT?<@OJ3aNI_|2b1@|`l zG-bW4gZv%ea`dHT!I<;Trvsn!ZGHZ10K^;RXV&Dx>U zr(@h57ayGW={jxeF`3b4r|>@)w+iH%JS`#LIb42wzTuB~F05Nx6WdwM-2a|`IU%zN zQRH`OzU^1>Cy~I<{L8}B?*-8Elu~1CtMMoD2mh<*!a_DT(MfCW@BjBXtG~v!elvnU zcw9?;N0Vg4{wv~I_32IAc5AjLzh`%@ZEM2+Wp|pN@}I2uncdNA)*q2L{@rn{bIp1y zvSTgvSj5M#@_PxijBBYEeh+V16U}pBT1&{EyT1O*0FYEY2CEmG=giW_;HnBK_R89V zEP7sp(;ssrLaPUiFp1zw?kkf6!Sf*_^fU=e-h%y~YZA|W(^=^0c|=Oti^U{`;}N$6 z(ldKG>0)+-vp*zBrY))>qi=8IBzDAOf-Ax!lVO~)`EF8mH;E0E{lt~Kw_`SbnnK9` zif=Lb~oJQuE&lU4rXgNS(3z$C2Wk!NWSvD+hpuC zL$2R9)lY}9DQ>>J$%!ACp8j68%%Lwi zgezB4eU@@?QksDT#DEi??PvQ}_Ef@Vc=K zo2x8GbVjXZvA6Tdl>`0Bs#)vs&EWwUe9(%_p8Epd_1gi9-U4wH3TfYlQ&|2tV>pX= zJ;7P2G9ZIYiK)CFX`OGz!p+>tsF9W^n`%PJw({7@B|K8O=q0R}Wr$htd$P#IGug?> zQl$2BBw;7*FiT-PoZb8oHp)9=d&{p_v2hxEVYd(F3%Zjm|IzH!VhiRQJdNdc&Bl)N zk73%PDTdQ4rOC9mrfgb6oKwDJ4HSB(LvBN6`?{LY~<0+yw)9h3El4i29MQbhffTJi3P^| ze&v(cjoJRpcjnv@n}=Vp+ZK7|_ofS7?5W3ew^S1S4LwNi|7-8r<7&>fKPh@pB9tB~ zq(o1T3Fo`ksYc2u&q;X{8j3vfi1L^o>YMPSq z9Jz{tuO5HtEC9!i z2k5C5RjOv!pN0h3qM>m8E43+!Sfv#)eeWHm&d>Q&xKV~ajZLNP-x$(GOZKw!y#v@B zvW3Te&+AHWe&w;p6pyDDv&COy=ciVQ$N09@jA-ohyAd2$yd+p}Px%FwF1? zT)ryIA=r{ec4Y|n=tY{NM?n~C_F^ht%?g2@#-rh$-*I|+zc7E=41Jov>on?|=Ahra z3-G99FFME-vjsbXP$kNgjGL>C0XYe@Gb9{yCkIj68@e#@RVpfOW6^TeJTi9D6(&8y z58WHvCn!}9Btu?2B}@8cv#E2VFm$sE4V)PS>f;_06%$3;bB_WYTsMsicH=>+5IZy8*9+)oCS2!FO|%~ zo_ciS@K&NLFv@pcky}dF#-?x%x4~oDN%R3}9Kb6)uts+?7N^Sh6Qf1~L44h*w zP#L(Kj@3vatv+>7W>7>uFTZ7FotMJor&F-{U@=ZM9SJJ!>SWl{RkVffqT_15C5DB` z*xY`Ec1#ocmNxORKJWqgdH)D(T(uv6JQq*0glnM+{`=`vcVql6sg}6iNdj)OEN^By zgGsgncxyIn!qx7pDAsmhs_Pku^*9c5XDp*LXB+bLl-*dr6eoNJGSo@mO3?k+DtOuc z3m95%WlwKQfTzZ)_@?3n>HW$a=Qt|SY#2aK8Jl1e8%PEFgXm2!dz7)&!0~}+SugX2 zFex^OTrz57b>z>(dL2)wFWspf5b-$D#D42)NRA85Cmj*eG@pUb5It!qoh+ zm$nSWYfhl4s(>8lF{t!y8tL|MYC)}67!+CGfwAXrGK!HiXy(R5xc%lKY%#mR1}l%G zCPH}_H%gxJSNr4XqDlFcd$OLU(Du_30pe@hK3VM4yBddRmqLWuu)8{TWFkaw~%iS|*>B zrYDVOmvzI+`K9D=`d(5KH3Il8Y0$_k1HZ++A=2FhZ7bDKwvQTaY?LLzVRwjmp)1Pf zo`I^wZJ<_r6%L&=p{vKp(L;hMa9~X|;Z&Rfn3_hunec#7ysk^S&ss5_dvz$?GZ|Um z8^!Q?_IzfFK9|}Ayd>=n%eYRf8`$l)2h+2>6n0^NJ$Ao!jP0rXigbLR4(EfOgZz#W zB(o}qQ5o+}+G4bs!cti%b?eWl+sNW+$4vG{aL|O&d9B=M8Ds+A#~%(2a)3vwk?gig zM~2&N6eidz(?){}#92WbyBixaVPhSc7KwU^-xuxC1M*MRo3 zI+)}P_VLMJwSqygboJj3_JNEd-gH#wK zM1C+BIYH0lKIlKHU`;0_vw^O@bfSeXqvd2p&0F;7t+!F+Sik@f`l2YWT9dxqcL5TAZwyF;|A^XkbbF_4qc#2S_4BMUw0y-rnQUc2iMUa zZr3nykrDT;qV>7i^N^l68AArxxihV4-H_ul2)z#~5gE<9q^?$z9yjr#yviCG<_7X z`Acs^YcjGXcL!Cs}SaAQ!5{*k0{R z>I7RsyVVfG=kLOO_Y)wt!wR?NxHI)v7BQP|UuO<&d`9Xu&8X6n4JfkWI# zy&)w2HVN)~gN-~4Xkgn)v@9+%&*C&NN;@31g}x%*V@*_-^CR~BXY8$g^-!MK!tOS1 zAf-O1Fm_ZnWGePAcrA4e76+;^Qw^SQ`|4X!Kb1yC?a^a6-Z7S1jc#PiXGPQErIyHl zu!Y7pDAC4)xeU4I4pDj!C@-;)=-gP1ezF76-=QxxaEN3TZoPrm{S)AUq7*nfg|e;E zy0k=n4AE>eqX~I2sB7;*TZipnvc5hCP2LCSnx9qZtht_G_w`w}rC}mNM;nulou2Tt zPdBPrw+GHhE3;Sb-s2AOS0qQ%LSav9BF`xF5U(iLNO0{`tiZD@o|Ygru1L|^2^eKSVrOOT@ct1B?8c^K8n9$4T$_!w@uHnq& z>*y`?M-^|wb(S+}c2*d~SO-xx`D7C3JdsW~u!AOy(A=fH3y=GDU&IagmE>StYI8qh9=Z!68Dx=P`l2Dc$Eo&LrK1xfflag zk7PMjYr*c!6^K4E1qVMoMvmOtgu2t#lcL~O;uW@w+-}Zgj|?I!6sc}3vS zza7I)wHi`iUkM^Ga^RX6OK$?qh&IaTy(rxGzZnL-W=_3WLqyP0z90XWlH1u=dgoAn?Mo!WB1 zu*n`%cGtjRjay6Q)o{$jx6y%sKrDYk0G}4vDX|hR}O0}5%Jx@ZftrT~* z)q>H0L(Ik@y4(nN3y?mw8|Cjx)1iIJNkw}<`uY4?FaBTqwK)G`zC#^B*SeB;-RZBr z%JkRk{ioKKV@3G~#Ut_?c?91ViPwYwRlkx@SZcNZe25`BjQth zzON+y$Il=00a4Xr(Rh!392ejrBKthxQ~S(zvR&#5VKU*uG z(w8M7`4RK`@Ak(n5Xr6hA(ND2F&|&xha({Ak2`VU!*WSX{G3lKkSzIY;~(1>wf6(^ zsXWC02YZ5DBQ2hZF6?7_;`1M!5Gjt#`~L!6qredcqjZI%loWTdl`P`!_w0*Bc^Bb_ zy%pX31^m~KfTVp9%acLThh6{F|4`nCWdCcdOD}fkB#Z7a&Fi`l+ZV~aOVH`kxipAz%^k4zMjzyDtT z_K)>;*Gx*j7l_AS=N)sxj~M^sdfZ>6$Nk^eC;s|ak9&xVJ`4QRdqHw(SM!3hF8F(T z+y6=zo!n>P`b#5!wh@rj+d3i!>qPP+=J((IUf?5=Tk-QMDaT?yzQ7MhK=Qqy28_b8Dp`D^1J+n0A%?Vol{ESF;V$L$G+_rBuyR<-Nu-S@}##OI%d|ADK&1NeE} A?*IS* From 6aca7c5b5d3eeee9f3015682463169522ea230ca Mon Sep 17 00:00:00 2001 From: akashvelu Date: Mon, 13 Jul 2020 10:36:14 -0700 Subject: [PATCH 57/57] Revert model architecture and # rollouts to previous defaults --- examples/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/train.py b/examples/train.py index 16cfa5c59..b669dc59f 100644 --- a/examples/train.py +++ b/examples/train.py @@ -71,7 +71,7 @@ def parse_args(args): '--checkpoint_freq', type=int, default=20, help='How often to checkpoint.') parser.add_argument( - '--num_rollouts', type=int, default=20, + '--num_rollouts', type=int, default=1, help='How many rollouts are in a training batch') parser.add_argument( '--rollout_size', type=int, default=1000, @@ -192,7 +192,7 @@ def setup_exps_rllib(flow_params, config["num_workers"] = n_cpus config["horizon"] = horizon - config["model"].update({"fcnet_hiddens": [32, 32, 32]}) + config["model"].update({"fcnet_hiddens": [32, 32]}) config["train_batch_size"] = horizon * n_rollouts config["gamma"] = 0.995 # discount rate config["use_gae"] = True