-
Notifications
You must be signed in to change notification settings - Fork 23
Expand file tree
/
Copy pathmapreduce.go
More file actions
184 lines (166 loc) · 4.26 KB
/
mapreduce.go
File metadata and controls
184 lines (166 loc) · 4.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
package hamt
import (
"bytes"
"context"
"errors"
"fmt"
"math/rand/v2"
"sync"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
)
type cacheEntry[T any] struct {
value T
weight int
}
type weigthted2RCache[T any] struct {
lk sync.Mutex
cache map[cid.Cid]cacheEntry[T]
cacheSize int
}
func newWeighted2RCache[T any](cacheSize int) *weigthted2RCache[T] {
return &weigthted2RCache[T]{
cache: make(map[cid.Cid]cacheEntry[T]),
cacheSize: cacheSize,
}
}
func (c *weigthted2RCache[T]) Get(k cid.Cid) (cacheEntry[T], bool) {
c.lk.Lock()
defer c.lk.Unlock()
v, ok := c.cache[k]
if !ok {
return v, false
}
return v, true
}
func (c *weigthted2RCache[T]) Add(k cid.Cid, v cacheEntry[T]) {
// dont cache nodes that require less than 6 reads
if v.weight <= 5 {
return
}
c.lk.Lock()
defer c.lk.Unlock()
if _, ok := c.cache[k]; ok {
c.cache[k] = v
return
}
c.cache[k] = v
if len(c.cache) > c.cacheSize {
// pick two random entries using map iteration
// this works well for cacheSize > 8
var k1, k2 cid.Cid
var v1, v2 cacheEntry[T]
for k, v := range c.cache {
k1 = k
v1 = v
break
}
for k, v := range c.cache {
k2 = k
v2 = v
break
}
// pick random one based on weight
r1 := rand.Float64()
if r1 < float64(v1.weight)/float64(v1.weight+v2.weight) {
delete(c.cache, k2)
} else {
delete(c.cache, k1)
}
}
}
// CachedMapReduce is a map reduce implementation that caches intermediate results
// to reduce the number of reads from the underlying store.
type CachedMapReduce[T any, PT interface {
*T
cbg.CBORUnmarshaler
}, U any] struct {
mapper func(string, T) (U, error)
reducer func([]U) (U, error)
cache *weigthted2RCache[U]
}
// NewCachedMapReduce creates a new CachedMapReduce instance.
// The mapper translates a key-value pair stored in the HAMT into a chosen U value.
// The reducer reduces the U values into a single U value.
// The cacheSize parameter specifies the maximum number of intermediate results to cache.
func NewCachedMapReduce[T any, PT interface {
*T
cbg.CBORUnmarshaler
}, U any](
mapper func(string, T) (U, error),
reducer func([]U) (U, error),
cacheSize int,
) (*CachedMapReduce[T, PT, U], error) {
return &CachedMapReduce[T, PT, U]{
mapper: mapper,
reducer: reducer,
cache: newWeighted2RCache[U](cacheSize),
}, nil
}
// MapReduce applies the map reduce function to the given root node.
func (cmr *CachedMapReduce[T, PT, U]) MapReduce(ctx context.Context, cs cbor.IpldStore, c cid.Cid, options ...Option) (U, error) {
var res U
root, err := LoadNode(ctx, cs, c, options...)
if err != nil {
return res, xerrors.Errorf("failed to load root node: %w", err)
}
ce, err := cmr.mapReduceInternal(ctx, root)
if err != nil {
return res, err
}
return ce.value, nil
}
func (cmr *CachedMapReduce[T, PT, U]) mapReduceInternal(ctx context.Context, node *Node) (cacheEntry[U], error) {
var res cacheEntry[U]
Us := make([]U, 0)
weight := 1
for _, p := range node.Pointers {
if p.cache != nil && p.dirty {
return res, errors.New("cannot iterate over a dirty node")
}
if p.isShard() {
if p.cache != nil && p.dirty {
return res, errors.New("cannot iterate over a dirty node")
}
linkU, ok := cmr.cache.Get(p.Link)
if !ok {
chnd, err := p.loadChild(ctx, node.store, node.bitWidth, node.hash)
if err != nil {
return res, fmt.Errorf("loading child: %w", err)
}
linkU, err = cmr.mapReduceInternal(ctx, chnd)
if err != nil {
return res, fmt.Errorf("map reduce child: %w", err)
}
cmr.cache.Add(p.Link, linkU)
}
Us = append(Us, linkU.value)
weight += linkU.weight
} else {
reader := bytes.NewReader(nil)
for _, v := range p.KVs {
var pt = PT(new(T))
reader.Reset(v.Value.Raw)
err := pt.UnmarshalCBOR(reader)
if err != nil {
return res, fmt.Errorf("failed to unmarshal value: %w", err)
}
u, err := cmr.mapper(string(v.Key), *pt)
if err != nil {
return res, fmt.Errorf("failed to map value: %w", err)
}
Us = append(Us, u)
}
}
}
resU, err := cmr.reducer(Us)
if err != nil {
return res, fmt.Errorf("failed to reduce self values: %w", err)
}
return cacheEntry[U]{
value: resU,
weight: weight,
}, nil
}