forked from k4zmu2a/SpaceCadetPinball
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathballbuffer.py
More file actions
87 lines (65 loc) · 3.37 KB
/
ballbuffer.py
File metadata and controls
87 lines (65 loc) · 3.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import torch
import random
from tree import SumTree
from lib import device
class PrioritizedReplayBuffer:
def __init__(self, buffer_size, eps=1e-2, alpha=0.5, beta=0.5):
self.tree = SumTree(size=buffer_size)
self.eps = eps # minimal priority, prevents zero probabilities
self.alpha = alpha # determines how much prioritization is used, α = 0 corresponding to the uniform case
self.beta = beta # determines the amount of importance-sampling correction, b = 1 fully compensate for the non-uniform probabilities
self.max_priority = eps # priority for new samples, init as eps
# transition: state, action, reward, next_state, done
self.state = torch.empty(buffer_size, 7, dtype=torch.float32)
self.action = torch.empty(buffer_size, dtype=torch.int)
self.reward = torch.empty(buffer_size, dtype=torch.float32)
self.next_state = torch.empty(buffer_size, 7, dtype=torch.float32)
self.done = torch.empty(buffer_size, dtype=torch.int)
self.count = 0
self.real_size = 0
self.size = buffer_size
def add(self, transition):
state, action, reward, next_state, done = transition
# store transition index with maximum priority in sum tree
self.tree.add(self.max_priority, self.count)
# store transition in the buffer
self.state[self.count] = torch.as_tensor(state)
self.action[self.count] = torch.as_tensor(action)
self.reward[self.count] = torch.as_tensor(reward)
self.next_state[self.count] = torch.as_tensor(next_state)
self.done[self.count] = torch.as_tensor(done)
# update counters
self.count = (self.count + 1) % self.size
self.real_size = min(self.size, self.real_size + 1)
def sample(self, batch_size):
assert self.real_size >= batch_size, "buffer contains less samples than batch size"
sample_idxs, tree_idxs = [], []
priorities = torch.empty(batch_size, 1, dtype=torch.float)
segment = self.tree.total / batch_size
for i in range(batch_size):
a, b = segment * i, segment * (i + 1)
cumsum = random.uniform(a, b)
# sample_idx is a sample index in buffer, needed further to sample actual transitions
# tree_idx is a index of a sample in the tree, needed further to update priorities
tree_idx, priority, sample_idx = self.tree.get(cumsum)
priorities[i] = priority
tree_idxs.append(tree_idx)
sample_idxs.append(sample_idx)
probs = priorities / self.tree.total
weights = (self.real_size * probs) ** -self.beta
weights = weights / weights.max()
batch = (
self.state[sample_idxs].to(device),
self.action[sample_idxs].to(device),
self.reward[sample_idxs].to(device),
self.next_state[sample_idxs].to(device),
self.done[sample_idxs].to(device)
)
return batch, weights, tree_idxs
def update_priorities(self, data_idxs, priorities):
if isinstance(priorities, torch.Tensor):
priorities = priorities.detach().cpu().numpy()
for data_idx, priority in zip(data_idxs, priorities):
priority = (priority + self.eps) ** self.alpha
self.tree.update(data_idx, priority)
self.max_priority = max(self.max_priority, priority)