Skip to content

Commit 90a0849

Browse files
committed
peerconn: fix gomod
1 parent dcfd76b commit 90a0849

File tree

3 files changed

+51
-16
lines changed

3 files changed

+51
-16
lines changed

.golangci.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ linters-settings:
9090
ignored-numbers:
9191
- '0666'
9292
- '0755'
93+
- "2" # ignore two as we want to easily double things.
9394

9495
# List of function patterns to exclude from analysis.
9596
# Values always ignored: `time.Date`

peerconn/conn_manager.go

Lines changed: 44 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,42 @@ const (
5656
// multiAddrConnectionStagger is the number of seconds to wait between
5757
// attempting to a peer with each of its advertised addresses.
5858
multiAddrConnectionStagger = 10 * time.Second
59+
60+
// bootstrapConnTimeout defines the timeout used when connecting to
61+
// peers during bootstrapping.
62+
//
63+
// TODO: tune timeout? 3 seconds might be *too* aggressive but works
64+
// well.
65+
bootstrapConnTimeout = 3 * time.Second
66+
67+
// We'll use a 15 second backoff, and double the time every time an
68+
// epoch fails up to a ceiling.
69+
bootstrapBackoff = 15 * time.Second
70+
71+
// We'll start off by waiting 2 seconds between failed attempts, then
72+
// double each time we fail until we hit the bootstrapBackOffCeiling.
73+
bootstrapDelayTime = 2 * time.Second
74+
75+
// bootstrapBackOffCeiling is the maximum amount of time we'll wait
76+
// between failed attempts to locate a set of bootstrap peers. We'll
77+
// slowly double our query back off each time we encounter a failure.
78+
bootstrapBackOffCeiling = time.Minute * 5
79+
80+
// As want to be more aggressive, we'll use a lower back off celling
81+
// then the main peer bootstrap logic.
82+
initalBootstrapBackoffCeiling = time.Minute
83+
84+
// Using 1/10 of our duration as a margin, compute a random offset to
85+
// avoid the nodes entering connection cycles.
86+
nextBackoffMargin = 10
87+
88+
// connRetryDuration is the duration to wait before retrying connection
89+
// requests.
90+
connRetryDuration = 5 * time.Second
91+
92+
// outboundNum is the number of outbound network connections to
93+
// maintain.
94+
outboundNum = 100
5995
)
6096

6197
var (
@@ -214,8 +250,8 @@ func (p *PeerConnManager) Start() error {
214250
cmgr, err := connmgr.New(&connmgr.Config{
215251
Listeners: p.Config.Listeners,
216252
OnAccept: p.InboundPeerConnected,
217-
RetryDuration: time.Second * 5,
218-
TargetOutbound: 100,
253+
RetryDuration: connRetryDuration,
254+
TargetOutbound: outboundNum,
219255
Dial: noiseDial(
220256
p.IdentityECDH, p.Config.Net, p.Config.ConnectionTimeout,
221257
),
@@ -452,7 +488,7 @@ func (p *PeerConnManager) PeerBootstrapper(numTargetPeers uint32,
452488
//
453489
// We'll use a 15 second backoff, and double the time every time an
454490
// epoch fails up to a ceiling.
455-
backOff := time.Second * 15
491+
backOff := bootstrapBackoff
456492

457493
// We'll create a new ticker to wake us up every 15 seconds so we can
458494
// see if we've reached our minimum number of peers.
@@ -564,11 +600,6 @@ func (p *PeerConnManager) PeerBootstrapper(numTargetPeers uint32,
564600
}
565601
}
566602

567-
// bootstrapBackOffCeiling is the maximum amount of time we'll wait between
568-
// failed attempts to locate a set of bootstrap peers. We'll slowly double our
569-
// query back off each time we encounter a failure.
570-
const bootstrapBackOffCeiling = time.Minute * 5
571-
572603
// initialPeerBootstrap attempts to continuously connect to peers on startup
573604
// until the target number of peers has been reached. This ensures that nodes
574605
// receive an up to date network view as soon as possible.
@@ -582,11 +613,11 @@ func (p *PeerConnManager) initialPeerBootstrap(ignore map[autopilot.NodeID]struc
582613
// We'll start off by waiting 2 seconds between failed attempts, then
583614
// double each time we fail until we hit the bootstrapBackOffCeiling.
584615
var delaySignal <-chan time.Time
585-
delayTime := time.Second * 2
616+
delayTime := bootstrapDelayTime
586617

587618
// As want to be more aggressive, we'll use a lower back off celling
588619
// then the main peer bootstrap logic.
589-
backOffCeiling := bootstrapBackOffCeiling / 5
620+
backOffCeiling := initalBootstrapBackoffCeiling
590621

591622
for attempts := 0; ; attempts++ {
592623
// Check if the server has been requested to shut down in order
@@ -665,9 +696,8 @@ func (p *PeerConnManager) initialPeerBootstrap(ignore map[autopilot.NodeID]struc
665696
}
666697
connLog.Errorf("Unable to connect to "+
667698
"%v: %v", addr, err)
668-
// TODO: tune timeout? 3 seconds might be *too*
669-
// aggressive but works well.
670-
case <-time.After(3 * time.Second):
699+
700+
case <-time.After(bootstrapConnTimeout):
671701
connLog.Tracef("Skipping peer %v due "+
672702
"to not establishing a "+
673703
"connection within 3 seconds",
@@ -2146,7 +2176,7 @@ func computeNextBackoff(currBackoff, maxBackoff time.Duration) time.Duration {
21462176

21472177
// Using 1/10 of our duration as a margin, compute a random offset to
21482178
// avoid the nodes entering connection cycles.
2149-
margin := nextBackoff / 10
2179+
margin := nextBackoff / nextBackoffMargin
21502180

21512181
var wiggle big.Int
21522182
wiggle.SetUint64(uint64(margin))

server.go

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2565,6 +2565,8 @@ func (s *server) SubscribeCustomMessages() (*subscribe.Client, error) {
25652565
// createPartialPeerConfig creates a partially filled peer config that will be
25662566
// used by peer conn manager when making new peers.
25672567
func (s *server) createPartialPeerConfig() peer.Config {
2568+
const feeFactor = 1000
2569+
25682570
return peer.Config{
25692571
OutgoingCltvRejectDelta: lncfg.DefaultOutgoingCltvRejectDelta,
25702572
ChanActiveTimeout: s.cfg.ChanEnableTimeout,
@@ -2614,7 +2616,8 @@ func (s *server) createPartialPeerConfig() peer.Config {
26142616
MaxChannelFeeAllocation: s.cfg.MaxChannelFeeAllocation,
26152617
CoopCloseTargetConfs: s.cfg.CoopCloseTargetConfs,
26162618
MaxAnchorsCommitFeeRate: chainfee.SatPerKVByte(
2617-
s.cfg.MaxCommitFeeRateAnchors * 1000).FeePerKWeight(),
2619+
s.cfg.MaxCommitFeeRateAnchors * feeFactor).
2620+
FeePerKWeight(),
26182621
ChannelCommitInterval: s.cfg.ChannelCommitInterval,
26192622
PendingCommitInterval: s.cfg.PendingCommitInterval,
26202623
ChannelCommitBatchSize: s.cfg.ChannelCommitBatchSize,
@@ -2665,9 +2668,10 @@ func (s *server) OpenChannel(
26652668

26662669
// If the fee rate wasn't specified, then we'll use a default
26672670
// confirmation target.
2671+
const defaultConfTarget = 6
26682672
if req.FundingFeePerKw == 0 {
26692673
estimator := s.cc.FeeEstimator
2670-
feeRate, err := estimator.EstimateFeePerKW(6)
2674+
feeRate, err := estimator.EstimateFeePerKW(defaultConfTarget)
26712675
if err != nil {
26722676
req.Err <- err
26732677
return req.Updates, req.Err

0 commit comments

Comments
 (0)