-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstorage.go
More file actions
102 lines (84 loc) · 3.32 KB
/
storage.go
File metadata and controls
102 lines (84 loc) · 3.32 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
package node
import (
blockstore "github.com/ipfs/boxo/blockstore"
"github.com/ipfs/go-datastore"
config "github.com/ipfs/kubo/config"
"go.uber.org/fx"
"github.com/ipfs/boxo/filestore"
"github.com/ipfs/boxo/provider"
"github.com/ipfs/kubo/core/node/helpers"
"github.com/ipfs/kubo/repo"
"github.com/ipfs/kubo/thirdparty/verifbs"
)
// RepoConfig loads configuration from the repo
func RepoConfig(repo repo.Repo) (*config.Config, error) {
cfg, err := repo.Config()
return cfg, err
}
// Datastore provides the datastore
func Datastore(repo repo.Repo) datastore.Datastore {
return repo.Datastore()
}
// BaseBlocks is the lower level blockstore without GC or Filestore layers
type BaseBlocks blockstore.Blockstore
// BaseBlockstoreCtor creates cached blockstore backed by the provided datastore
func BaseBlockstoreCtor(
cacheOpts blockstore.CacheOpts,
hashOnRead bool,
writeThrough bool,
providingStrategy string,
) func(mctx helpers.MetricsCtx, repo repo.Repo, prov DHTProvider, lc fx.Lifecycle) (bs BaseBlocks, err error) {
return func(mctx helpers.MetricsCtx, repo repo.Repo, prov DHTProvider, lc fx.Lifecycle) (bs BaseBlocks, err error) {
opts := []blockstore.Option{blockstore.WriteThrough(writeThrough)}
// Blockstore providing integration:
// When strategy includes "all" the blockstore directly provides blocks as they're Put.
// Important: Provide calls from blockstore are intentionally BLOCKING.
// The Provider implementation (not the blockstore) should handle concurrency/queuing.
// This avoids spawning unbounded goroutines for concurrent block additions.
strategyFlag := config.ParseProvideStrategy(providingStrategy)
if strategyFlag&config.ProvideStrategyAll != 0 {
opts = append(opts, blockstore.Provider(prov))
}
// hash security
bs = blockstore.NewBlockstore(
repo.Datastore(),
opts...,
)
bs = &verifbs.VerifBS{Blockstore: bs}
bs, err = blockstore.CachedBlockstore(helpers.LifecycleCtx(mctx, lc), bs, cacheOpts)
if err != nil {
return nil, err
}
bs = blockstore.NewIdStore(bs)
if hashOnRead {
bs = &blockstore.ValidatingBlockstore{Blockstore: bs}
}
return
}
}
// GcBlockstoreCtor wraps the base blockstore with GC and Filestore layers
func GcBlockstoreCtor(bb BaseBlocks) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore) {
gclocker = blockstore.NewGCLocker()
gcbs = blockstore.NewGCBlockstore(bb, gclocker)
bs = gcbs
return
}
// FilestoreBlockstoreCtor wraps GcBlockstore and adds Filestore support
func FilestoreBlockstoreCtor(
providingStrategy string,
) func(repo repo.Repo, bb BaseBlocks, prov DHTProvider) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) {
return func(repo repo.Repo, bb BaseBlocks, prov DHTProvider) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) {
gclocker = blockstore.NewGCLocker()
var fstoreProv provider.MultihashProvider
strategyFlag := config.ParseProvideStrategy(providingStrategy)
if strategyFlag&config.ProvideStrategyAll != 0 {
fstoreProv = prov
}
fstore = filestore.NewFilestore(bb, repo.FileManager(), fstoreProv)
// hash security
gcbs = blockstore.NewGCBlockstore(fstore, gclocker)
gcbs = &verifbs.VerifBSGC{GCBlockstore: gcbs}
bs = gcbs
return
}
}