Skip to content

Commit 7772fe3

Browse files
committed
misc: fix lint
1 parent 720f0a0 commit 7772fe3

File tree

2 files changed

+71
-27
lines changed

2 files changed

+71
-27
lines changed

channeldb/graph.go

+49-27
Original file line numberDiff line numberDiff line change
@@ -248,51 +248,51 @@ func (c *ChannelGraph) startWorker() {
248248
}
249249

250250
func (c *ChannelGraph) enqueueWriteOperation(op func() error) {
251-
// Send the operation to the channel; non-blocking if the buffer isn't full
251+
// Send the operation to the channel.
252+
// This is non-blocking as long as the buffer isn't full.
252253
select {
253254
case c.writeOps <- op:
254255
// Operation enqueued successfully
255256
default:
256-
// Handle the case where the channel is full
257-
// Could log an error, block until space is available, or drop the operation
258-
// For example, to block until space is available, remove the select and default case
259257
log.Warn("writeOps queue is full, operation not enqueued")
260258
}
261259
}
262260

263-
func (g *ChannelGraph) populateGraphCache() {
261+
func (c *ChannelGraph) populateGraphCache() {
264262
startTime := time.Now()
265263
log.Debugf("Populating in-memory channel graph, this might " +
266264
"take a while...")
267265

268-
err := g.ForEachNodeCacheable(
266+
err := c.ForEachNodeCacheable(
269267
func(tx kvdb.RTx, node GraphCacheNode) error {
270-
g.graphCache.AddNodeFeatures(node)
268+
c.graphCache.AddNodeFeatures(node)
271269

272270
return nil
273271
},
274272
)
275273
if err != nil {
276-
g.graphCacheErr = err
274+
c.graphCacheErr = err
277275
return
278276
}
279277

280-
err = g.ForEachChannel(func(info *models.ChannelEdgeInfo,
278+
err = c.ForEachChannel(func(info *models.ChannelEdgeInfo,
281279
policy1, policy2 *models.ChannelEdgePolicy) error {
282280

283-
g.graphCache.AddChannel(info, policy1, policy2)
281+
c.graphCache.AddChannel(info, policy1, policy2)
284282

285283
return nil
286284
})
287285
if err != nil {
288-
g.graphCacheErr = err
286+
c.graphCacheErr = err
289287
return
290288
}
291289

292-
log.Debugf("Finished populating in-memory channel graph (took "+
293-
"%v, %s)", time.Since(startTime), g.graphCache.Stats())
290+
if c.graphCache != nil {
291+
log.Debugf("Finished populating in-memory channel graph (took "+
292+
"%v, %s)", time.Since(startTime), c.graphCache.Stats())
293+
}
294294

295-
g.graphCacheReady.Store(true)
295+
c.graphCacheReady.Store(true)
296296
}
297297

298298
func (c *ChannelGraph) getGraphCache() (*GraphCache, error) {
@@ -302,8 +302,7 @@ func (c *ChannelGraph) getGraphCache() (*GraphCache, error) {
302302

303303
// Check if graph cache is ready without waiting
304304
if !c.graphCacheReady.Load() {
305-
// Return an error or a special indicator that cache is not ready
306-
// Caller should handle this case appropriately, maybe by queuing write operations
305+
// Return an error to show that cache is not ready.
307306
return nil, ErrGraphCacheNotReady
308307
}
309308

@@ -898,16 +897,18 @@ func (c *ChannelGraph) AddLightningNode(node *LightningNode,
898897
Update: func(tx kvdb.RwTx) error {
899898
graphCache, err := c.getGraphCache()
900899
if err != nil {
901-
if err == ErrGraphCacheNotReady {
900+
if errors.Is(err, ErrGraphCacheNotReady) {
902901
// Queue this update function
903902
c.enqueueWriteOperation(func() error {
904903
return c.AddLightningNode(
905904
node,
906905
op...,
907906
)
908907
})
908+
909909
return nil
910910
}
911+
911912
return err
912913
}
913914

@@ -917,6 +918,7 @@ func (c *ChannelGraph) AddLightningNode(node *LightningNode,
917918
)
918919
err := graphCache.AddNode(tx, cNode)
919920
if err != nil {
921+
920922
return err
921923
}
922924
}
@@ -1001,13 +1003,15 @@ func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
10011003

10021004
graphCache, err := c.getGraphCache()
10031005
if err != nil {
1004-
if err == ErrGraphCacheNotReady {
1006+
if errors.Is(err, ErrGraphCacheNotReady) {
10051007
// Queue this delete function
10061008
c.enqueueWriteOperation(func() error {
10071009
return c.DeleteLightningNode(nodePub)
10081010
})
1011+
10091012
return nil
10101013
}
1014+
10111015
return err
10121016
}
10131017

@@ -1144,13 +1148,15 @@ func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx,
11441148

11451149
graphCache, err := c.getGraphCache()
11461150
if err != nil {
1147-
if err == ErrGraphCacheNotReady {
1151+
if errors.Is(err, ErrGraphCacheNotReady) {
11481152
// Queue this function
11491153
c.enqueueWriteOperation(func() error {
11501154
return c.addChannelEdge(tx, edge)
11511155
})
1156+
11521157
return nil
11531158
}
1159+
11541160
return err
11551161
}
11561162

@@ -1359,13 +1365,15 @@ func (c *ChannelGraph) UpdateChannelEdge(edge *models.ChannelEdgeInfo) error {
13591365

13601366
graphCache, err := c.getGraphCache()
13611367
if err != nil {
1362-
if err == ErrGraphCacheNotReady {
1368+
if errors.Is(err, ErrGraphCacheNotReady) {
13631369
// Queue this update function
13641370
c.enqueueWriteOperation(func() error {
13651371
return c.UpdateChannelEdge(edge)
13661372
})
1373+
13671374
return nil
13681375
}
1376+
13691377
return err
13701378
}
13711379

@@ -1606,13 +1614,15 @@ func (c *ChannelGraph) pruneGraphNodes(nodes kvdb.RwBucket,
16061614

16071615
graphCache, err := c.getGraphCache()
16081616
if err != nil {
1609-
if err == ErrGraphCacheNotReady {
1617+
if errors.Is(err, ErrGraphCacheNotReady) {
16101618
// Queue this prune operation
16111619
c.enqueueWriteOperation(func() error {
16121620
return c.pruneGraphNodes(nodes, edgeIndex)
16131621
})
1622+
16141623
return nil
16151624
}
1625+
16161626
return err
16171627
}
16181628

@@ -2660,10 +2670,18 @@ func (c *ChannelGraph) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
26602670

26612671
graphCache, err := c.getGraphCache()
26622672
if err != nil {
2663-
if err == ErrGraphCacheNotReady {
2673+
if errors.Is(err, ErrGraphCacheNotReady) {
26642674
// Queue this delete function
26652675
c.enqueueWriteOperation(func() error {
2666-
return c.delChannelEdgeUnsafe(edges, edgeIndex, chanIndex, zombieIndex, chanID, isZombie, strictZombie)
2676+
return c.delChannelEdgeUnsafe(
2677+
edges,
2678+
edgeIndex,
2679+
chanIndex,
2680+
zombieIndex,
2681+
chanID,
2682+
isZombie,
2683+
strictZombie,
2684+
)
26672685
})
26682686
return nil
26692687
}
@@ -2929,7 +2947,7 @@ func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy,
29292947

29302948
graphCache, err := c.getGraphCache()
29312949
if err != nil {
2932-
if err == ErrGraphCacheNotReady {
2950+
if errors.Is(err, ErrGraphCacheNotReady) {
29332951
// Queue this update function
29342952
c.enqueueWriteOperation(func() error {
29352953
_, err := updateEdgePolicy(tx, edge, c)
@@ -3822,10 +3840,14 @@ func (c *ChannelGraph) MarkEdgeZombie(chanID uint64,
38223840

38233841
graphCache, err := c.getGraphCache()
38243842
if err != nil {
3825-
if err == ErrGraphCacheNotReady {
3843+
if errors.Is(err, ErrGraphCacheNotReady) {
38263844
// Queue this function
38273845
c.enqueueWriteOperation(func() error {
3828-
return c.MarkEdgeZombie(chanID, pubKey1, pubKey2)
3846+
return c.MarkEdgeZombie(
3847+
chanID,
3848+
pubKey1,
3849+
pubKey2,
3850+
)
38293851
})
38303852
return nil
38313853
}
@@ -3918,7 +3940,7 @@ func (c *ChannelGraph) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
39183940
// won't use it for path finding.
39193941
graphCache, err := c.getGraphCache()
39203942
if err != nil {
3921-
if err == ErrGraphCacheNotReady {
3943+
if errors.Is(err, ErrGraphCacheNotReady) {
39223944
// Queue the operation to add the channel back.
39233945
c.enqueueWriteOperation(func() error {
39243946
return c.markEdgeLiveUnsafe(tx, chanID)

channeldb/graph_test.go

+22
Original file line numberDiff line numberDiff line change
@@ -3986,6 +3986,26 @@ func TestGraphCacheForEachNodeChannel(t *testing.T) {
39863986
require.Nil(t, getSingleChannel())
39873987
}
39883988

3989+
func waitForGraphCache(graph *ChannelGraph, timeout time.Duration) error {
3990+
ticker := time.NewTicker(100 * time.Millisecond)
3991+
defer ticker.Stop()
3992+
3993+
timeoutChan := time.After(timeout)
3994+
for {
3995+
select {
3996+
case <-timeoutChan:
3997+
return fmt.Errorf("timed out waiting for graphCache " +
3998+
"to be ready")
3999+
case <-ticker.C:
4000+
if graphCache, err := graph.getGraphCache(); err != nil {
4001+
return fmt.Errorf("error getting graphCache: %v", err)
4002+
} else if graphCache != nil {
4003+
return nil
4004+
}
4005+
}
4006+
}
4007+
}
4008+
39894009
// TestGraphLoading asserts that the cache is properly reconstructed after a
39904010
// restart.
39914011
func TestGraphLoading(t *testing.T) {
@@ -4007,6 +4027,7 @@ func TestGraphLoading(t *testing.T) {
40074027
)
40084028
require.NoError(t, err)
40094029

4030+
waitForGraphCache(graph, 5*time.Second)
40104031
_, err = graph.getGraphCache()
40114032
require.NoError(t, err)
40124033

@@ -4024,6 +4045,7 @@ func TestGraphLoading(t *testing.T) {
40244045
)
40254046
require.NoError(t, err)
40264047

4048+
waitForGraphCache(graphReloaded, 5*time.Second)
40274049
_, err = graphReloaded.getGraphCache()
40284050
require.NoError(t, err)
40294051

0 commit comments

Comments
 (0)