-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathQ_learning.py
More file actions
167 lines (136 loc) · 6.04 KB
/
Q_learning.py
File metadata and controls
167 lines (136 loc) · 6.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
# Q_learning.py
import numpy as np
import config
class QLearningAgent:
# Tabular Q-learning
# epsilon = greedy probability
def __init__(self, env, alpha=None, gamma=None, epsilon=None, seed=None):
# Store environment and basic dimensions
self.env = env
self.n_states = int(env.n_states)
self.n_actions = int(env.n_actions)
# Hyperparameters
self.alpha = float(config.LEARNING_RATE if alpha is None else alpha)
self.gamma = float(config.GAMMA if gamma is None else gamma)
self.epsilon = float(config.EPSILON if epsilon is None else epsilon)
# RNG for tie-breaking and exploration
self.rng = np.random.default_rng(seed)
# Q-table initialization
self.Q = np.zeros((self.n_states, self.n_actions), dtype=float)
def _default_num_episodes(self) -> int:
# Choose default number of episodes based on grid size
if int(self.env.grid_size) == 4:
return int(config.NUM_EPISODES_4)
return int(config.NUM_EPISODES_10)
def _default_max_steps(self) -> int:
# Choose default max steps per episode based on grid size
if int(self.env.grid_size) == 4:
return int(config.MAX_STEPS_4)
return int(config.MAX_STEPS_10)
def greedy_action(self, state: int) -> int:
# Greedy action with random tie-breaking
q = self.Q[state]
m = np.max(q)
candidates = np.flatnonzero(q == m)
return int(self.rng.choice(candidates))
def epsilon_greedy(self, state: int) -> int:
# Epsilon-greedy action selection
if self.rng.random() < self.epsilon:
return self.greedy_action(state)
return int(self.rng.integers(0, self.n_actions))
def optimal_action(self, state: int) -> int:
# Deterministic action used during testing
return self.greedy_action(state)
def train(self, num_episodes=None, max_steps=None, window=None):
# Train Q-learning using off-policy TD updates
num_episodes = int(self._default_num_episodes() if num_episodes is None else num_episodes)
max_steps = int(self._default_max_steps() if max_steps is None else max_steps)
window = int(config.ACCURACY_WINDOW if window is None else window)
# Logs for plotting
steps = []
accuracy = []
avg_rewards = []
# Sliding-window success counting (goal reached)
goal_count = 0
total_reward = 0.0
for ep in range(1, num_episodes + 1):
# Reset environment at the fixed start state
s = int(self.env.reset())
ep_return = 0.0
done_flag = False
for t in range(int(max_steps)):
# Behavior policy for exploration during training
a = int(self.epsilon_greedy(s))
res = self.env.step(a)
r = float(res.reward)
s2 = int(res.next_state)
done_flag = bool(res.done)
ep_return += r
# Q-learning target uses the max action value at the next state
if done_flag:
td_target = r
else:
td_target = r + self.gamma * np.max(self.Q[s2])
# TD update
td_error = td_target - self.Q[s, a]
self.Q[s, a] += self.alpha * td_error
# Move to next state
s = s2
if done_flag:
# Record episode length and success when the episode terminates
steps.append(t + 1)
if r == float(config.REWARD_GOAL):
goal_count += 1
break
# If episode did not terminate, cap the length at max_steps
if not done_flag:
steps.append(int(max_steps))
# Running mean episodic return
total_reward += ep_return
avg_rewards.append(total_reward / ep)
# Record success rate over a fixed window
if ep % window == 0:
accuracy.append(goal_count / window)
goal_count = 0
return {"Q": self.Q, "steps": steps, "accuracy": accuracy, "avg_return": avg_rewards}
def test(self, num_test=None, max_steps=None):
# Evaluate learned Q-table without exploration noise
num_test = int(config.NUM_TEST if num_test is None else num_test)
max_steps = int(self._default_max_steps() if max_steps is None else max_steps)
test_steps = []
test_success = []
test_returns = []
for _ in range(num_test):
# Fixed-start evaluation episode
s = int(self.env.reset())
ep_return = 0.0
done_flag = False
for t in range(int(max_steps)):
# Select action from the learned greedy policy
a = int(self.optimal_action(s))
res = self.env.step(a)
s = int(res.next_state)
ep_return += float(res.reward)
if res.done:
# Record terminal episode length and success
test_steps.append(t + 1)
test_success.append(1 if float(res.reward) == float(config.REWARD_GOAL) else 0)
test_returns.append(ep_return)
done_flag = True
break
# If no terminal state was reached, mark as failure at max_steps
if not done_flag:
test_steps.append(int(max_steps))
test_success.append(0)
test_returns.append(ep_return)
return {
"success_rate": float(np.mean(test_success)),
"avg_steps": float(np.mean(test_steps)),
"std_steps": float(np.std(test_steps)),
"var_steps": float(np.var(test_steps)),
"avg_return": float(np.mean(test_returns)),
"var_return": float(np.var(test_returns)),
"test_steps": test_steps,
"test_success": test_success,
"test_returns": test_returns,
}