Skip to content

Commit ee25c22

Browse files
authored
Merge pull request #8330 from bitromortac/2401-bimodal-improvements
bimodal pathfinding probability improvements
2 parents 43e822c + 86249fb commit ee25c22

File tree

3 files changed

+170
-54
lines changed

3 files changed

+170
-54
lines changed

docs/release-notes/release-notes-0.19.0.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,12 @@ when running LND with an aux component injected (custom channels).
122122
address is added to LND using the `ImportTapscript` RPC, LND previously failed
123123
to perform a cooperative close to that address.
124124

125+
* [Bimodal pathfinding probability
126+
improvements](https://github.com/lightningnetwork/lnd/pull/8330). A fallback
127+
probability is used if the bimodal model is not applicable. Fixes are added
128+
such that the probability is evaluated quicker and to be more accurate in
129+
outdated scenarios.
130+
125131
# New Features
126132

127133
* Add support for [archiving channel backup](https://github.com/lightningnetwork/lnd/pull/9232)

routing/probability_bimodal.go

Lines changed: 58 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -416,34 +416,38 @@ func cannotSend(failAmount, capacity lnwire.MilliSatoshi, now,
416416

417417
// primitive computes the indefinite integral of our assumed (normalized)
418418
// liquidity probability distribution. The distribution of liquidity x here is
419-
// the function P(x) ~ exp(-x/s) + exp((x-c)/s), i.e., two exponentials residing
420-
// at the ends of channels. This means that we expect liquidity to be at either
421-
// side of the channel with capacity c. The s parameter (scale) defines how far
422-
// the liquidity leaks into the channel. A very low scale assumes completely
423-
// unbalanced channels, a very high scale assumes a random distribution. More
424-
// details can be found in
419+
// the function P(x) ~ exp(-x/s) + exp((x-c)/s) + 1/c, i.e., two exponentials
420+
// residing at the ends of channels. This means that we expect liquidity to be
421+
// at either side of the channel with capacity c. The s parameter (scale)
422+
// defines how far the liquidity leaks into the channel. A very low scale
423+
// assumes completely unbalanced channels, a very high scale assumes a random
424+
// distribution. More details can be found in
425425
// https://github.com/lightningnetwork/lnd/issues/5988#issuecomment-1131234858.
426+
// Additionally, we add a constant term 1/c to the distribution to avoid
427+
// normalization issues and to fall back to a uniform distribution should the
428+
// previous success and fail amounts contradict a bimodal distribution.
426429
func (p *BimodalEstimator) primitive(c, x float64) float64 {
427430
s := float64(p.BimodalScaleMsat)
428431

429432
// The indefinite integral of P(x) is given by
430-
// Int P(x) dx = H(x) = s * (-e(-x/s) + e((x-c)/s)),
433+
// Int P(x) dx = H(x) = s * (-e(-x/s) + e((x-c)/s) + x/(c*s)),
431434
// and its norm from 0 to c can be computed from it,
432-
// norm = [H(x)]_0^c = s * (-e(-c/s) + 1 -(1 + e(-c/s))).
435+
// norm = [H(x)]_0^c = s * (-e(-c/s) + 1 + 1/s -(-1 + e(-c/s))) =
436+
// = s * (-2*e(-c/s) + 2 + 1/s).
437+
// The prefactors s are left out, as they cancel out in the end.
438+
// norm can only become zero, if c is zero, which we sorted out before
439+
// calling this method.
433440
ecs := math.Exp(-c / s)
434-
exs := math.Exp(-x / s)
441+
norm := -2*ecs + 2 + 1/s
435442

436443
// It would be possible to split the next term and reuse the factors
437444
// from before, but this can lead to numerical issues with large
438445
// numbers.
439446
excs := math.Exp((x - c) / s)
440-
441-
// norm can only become zero, if c is zero, which we sorted out before
442-
// calling this method.
443-
norm := -2*ecs + 2
447+
exs := math.Exp(-x / s)
444448

445449
// We end up with the primitive function of the normalized P(x).
446-
return (-exs + excs) / norm
450+
return (-exs + excs + x/(c*s)) / norm
447451
}
448452

449453
// integral computes the integral of our liquidity distribution from the lower
@@ -484,43 +488,60 @@ func (p *BimodalEstimator) probabilityFormula(capacityMsat, successAmountMsat,
484488
return 0.0, nil
485489
}
486490

487-
// Mission control may have some outdated values, we correct them here.
488-
// TODO(bitromortac): there may be better decisions to make in these
489-
// cases, e.g., resetting failAmount=cap and successAmount=0.
490-
491-
// failAmount should be capacity at max.
492-
if failAmount > capacity {
493-
log.Debugf("Correcting failAmount %v to capacity %v",
494-
failAmount, capacity)
491+
// The next statement is a safety check against an illogical condition.
492+
// We discard the knowledge for the channel in that case since we have
493+
// inconsistent data.
494+
if failAmount <= successAmount {
495+
log.Warnf("Fail amount (%s) is smaller than or equal to the "+
496+
"success amount (%s) for capacity (%s)",
497+
failAmountMsat, successAmountMsat, capacityMsat)
495498

499+
successAmount = 0
496500
failAmount = capacity
497501
}
498502

499-
// successAmount should be capacity at max.
500-
if successAmount > capacity {
501-
log.Debugf("Correcting successAmount %v to capacity %v",
502-
successAmount, capacity)
503-
504-
successAmount = capacity
503+
// Mission control may have some outdated values with regard to the
504+
// current channel capacity between a node pair. This can happen in case
505+
// a large parallel channel was closed or if a channel was downscaled
506+
// and can lead to success and/or failure amounts to be out of the range
507+
// [0, capacity]. We assume that the liquidity situation of the channel
508+
// is similar as before due to flow bias.
509+
510+
// In case we have a large success we need to correct it to be in the
511+
// valid range. We set the success amount close to the capacity, because
512+
// we assume to still be able to send. Any possible failure (that must
513+
// in this case be larger than the capacity) is corrected as well.
514+
if successAmount >= capacity {
515+
log.Debugf("Correcting success amount %s and failure amount "+
516+
"%s to capacity %s", successAmountMsat,
517+
failAmount, capacityMsat)
518+
519+
// We choose the success amount to be one less than the
520+
// capacity, to both fit success and failure amounts into the
521+
// capacity range in a consistent manner.
522+
successAmount = capacity - 1
523+
failAmount = capacity
505524
}
506525

507-
// The next statement is a safety check against an illogical condition,
508-
// otherwise the renormalization integral would become zero. This may
509-
// happen if a large channel gets closed and smaller ones remain, but
510-
// it should recover with the time decay.
511-
if failAmount <= successAmount {
512-
log.Tracef("fail amount (%v) is smaller than or equal the "+
513-
"success amount (%v) for capacity (%v)",
514-
failAmountMsat, successAmountMsat, capacityMsat)
526+
// Having no or only a small success, but a large failure only needs
527+
// adjustment of the failure amount.
528+
if failAmount > capacity {
529+
log.Debugf("Correcting failure amount %s to capacity %s",
530+
failAmountMsat, capacityMsat)
515531

516-
return 0.0, nil
532+
failAmount = capacity
517533
}
518534

519535
// We cannot send more than the fail amount.
520536
if amount >= failAmount {
521537
return 0.0, nil
522538
}
523539

540+
// We can send the amount if it is smaller than the success amount.
541+
if amount <= successAmount {
542+
return 1.0, nil
543+
}
544+
524545
// The success probability for payment amount a is the integral over the
525546
// prior distribution P(x), the probability to find liquidity between
526547
// the amount a and channel capacity c (or failAmount a_f):

routing/probability_bimodal_test.go

Lines changed: 106 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,14 @@ import (
1111
)
1212

1313
const (
14-
smallAmount = lnwire.MilliSatoshi(400_000)
15-
largeAmount = lnwire.MilliSatoshi(5_000_000)
16-
capacity = lnwire.MilliSatoshi(10_000_000)
17-
scale = lnwire.MilliSatoshi(400_000)
14+
smallAmount = lnwire.MilliSatoshi(400_000_000)
15+
largeAmount = lnwire.MilliSatoshi(5_000_000_000)
16+
capacity = lnwire.MilliSatoshi(10_000_000_000)
17+
scale = lnwire.MilliSatoshi(400_000_000)
18+
19+
// defaultTolerance is the default absolute tolerance for comparing
20+
// probability calculations to expected values.
21+
defaultTolerance = 0.001
1822
)
1923

2024
// TestSuccessProbability tests that we get correct probability estimates for
@@ -25,7 +29,6 @@ func TestSuccessProbability(t *testing.T) {
2529
tests := []struct {
2630
name string
2731
expectedProbability float64
28-
tolerance float64
2932
successAmount lnwire.MilliSatoshi
3033
failAmount lnwire.MilliSatoshi
3134
amount lnwire.MilliSatoshi
@@ -78,7 +81,6 @@ func TestSuccessProbability(t *testing.T) {
7881
failAmount: capacity,
7982
amount: smallAmount,
8083
expectedProbability: 0.684,
81-
tolerance: 0.001,
8284
},
8385
// If we had an unsettled success, we are sure we can send a
8486
// lower amount.
@@ -110,7 +112,6 @@ func TestSuccessProbability(t *testing.T) {
110112
failAmount: capacity,
111113
amount: smallAmount,
112114
expectedProbability: 0.851,
113-
tolerance: 0.001,
114115
},
115116
// If we had a large unsettled success before, we know we can
116117
// send even larger payments with high probability.
@@ -122,7 +123,6 @@ func TestSuccessProbability(t *testing.T) {
122123
failAmount: capacity,
123124
amount: largeAmount,
124125
expectedProbability: 0.998,
125-
tolerance: 0.001,
126126
},
127127
// If we had a failure before, we can't send with the fail
128128
// amount.
@@ -151,7 +151,6 @@ func TestSuccessProbability(t *testing.T) {
151151
failAmount: largeAmount,
152152
amount: smallAmount,
153153
expectedProbability: 0.368,
154-
tolerance: 0.001,
155154
},
156155
// From here on we deal with mixed previous successes and
157156
// failures.
@@ -183,7 +182,6 @@ func TestSuccessProbability(t *testing.T) {
183182
successAmount: smallAmount,
184183
amount: smallAmount + largeAmount/10,
185184
expectedProbability: 0.287,
186-
tolerance: 0.001,
187185
},
188186
// We still can't send the fail amount.
189187
{
@@ -194,22 +192,45 @@ func TestSuccessProbability(t *testing.T) {
194192
amount: largeAmount,
195193
expectedProbability: 0.0,
196194
},
197-
// Same success and failure amounts (illogical).
195+
// Same success and failure amounts (illogical), which gets
196+
// reset to no knowledge.
198197
{
199198
name: "previous f/s, same",
200199
capacity: capacity,
201200
failAmount: largeAmount,
202201
successAmount: largeAmount,
203202
amount: largeAmount,
204-
expectedProbability: 0.0,
203+
expectedProbability: 0.5,
205204
},
206-
// Higher success than failure amount (illogical).
205+
// Higher success than failure amount (illogical), which gets
206+
// reset to no knowledge.
207207
{
208-
name: "previous f/s, higher success",
208+
name: "previous f/s, illogical",
209209
capacity: capacity,
210210
failAmount: smallAmount,
211211
successAmount: largeAmount,
212-
expectedProbability: 0.0,
212+
amount: largeAmount,
213+
expectedProbability: 0.5,
214+
},
215+
// Larger success and larger failure than the old capacity are
216+
// rescaled to still give a very high success rate.
217+
{
218+
name: "smaller cap, large success/fail",
219+
capacity: capacity,
220+
failAmount: 2*capacity + 1,
221+
successAmount: 2 * capacity,
222+
amount: largeAmount,
223+
expectedProbability: 1.0,
224+
},
225+
// A lower success amount is not rescaled.
226+
{
227+
name: "smaller cap, large fail",
228+
capacity: capacity,
229+
successAmount: smallAmount / 2,
230+
failAmount: 2 * capacity,
231+
amount: smallAmount,
232+
// See "previous success, larger amount".
233+
expectedProbability: 0.851,
213234
},
214235
}
215236

@@ -228,7 +249,7 @@ func TestSuccessProbability(t *testing.T) {
228249
test.failAmount, test.amount,
229250
)
230251
require.InDelta(t, test.expectedProbability, p,
231-
test.tolerance)
252+
defaultTolerance)
232253
require.NoError(t, err)
233254
})
234255
}
@@ -244,6 +265,59 @@ func TestSuccessProbability(t *testing.T) {
244265
})
245266
}
246267

268+
// TestSmallScale tests that the probability formula works with small scale
269+
// values.
270+
func TestSmallScale(t *testing.T) {
271+
var (
272+
// We use the smallest possible scale value together with a
273+
// large capacity. This is an extreme form of a bimodal
274+
// distribution.
275+
scale lnwire.MilliSatoshi = 1
276+
capacity lnwire.MilliSatoshi = 7e+09
277+
278+
// Success and failure amounts are chosen such that the expected
279+
// balance must be somewhere in the middle of the channel, a
280+
// value not expected when dealing with a bimodal distribution.
281+
// In this case, the bimodal model fails to give good forecasts
282+
// due to the numerics of the exponential functions, which get
283+
// evaluated to exact zero floats.
284+
successAmount lnwire.MilliSatoshi = 1.0e+09
285+
failAmount lnwire.MilliSatoshi = 4.0e+09
286+
)
287+
288+
estimator := BimodalEstimator{
289+
BimodalConfig: BimodalConfig{BimodalScaleMsat: scale},
290+
}
291+
292+
// An amount that's close to the success amount should have a very high
293+
// probability.
294+
amtCloseSuccess := successAmount + 1
295+
p, err := estimator.probabilityFormula(
296+
capacity, successAmount, failAmount, amtCloseSuccess,
297+
)
298+
require.NoError(t, err)
299+
require.InDelta(t, 1.0, p, defaultTolerance)
300+
301+
// An amount that's close to the fail amount should have a very low
302+
// probability.
303+
amtCloseFail := failAmount - 1
304+
p, err = estimator.probabilityFormula(
305+
capacity, successAmount, failAmount, amtCloseFail,
306+
)
307+
require.NoError(t, err)
308+
require.InDelta(t, 0.0, p, defaultTolerance)
309+
310+
// In the region where the bimodal model doesn't give good forecasts, we
311+
// fall back to a uniform model, which interpolates probabilities
312+
// linearly.
313+
amtLinear := successAmount + (failAmount-successAmount)*1/4
314+
p, err = estimator.probabilityFormula(
315+
capacity, successAmount, failAmount, amtLinear,
316+
)
317+
require.NoError(t, err)
318+
require.InDelta(t, 0.75, p, defaultTolerance)
319+
}
320+
247321
// TestIntegral tests certain limits of the probability distribution integral.
248322
func TestIntegral(t *testing.T) {
249323
t.Parallel()
@@ -689,9 +763,24 @@ func TestLocalPairProbability(t *testing.T) {
689763
// FuzzProbability checks that we don't encounter errors related to NaNs.
690764
func FuzzProbability(f *testing.F) {
691765
estimator := BimodalEstimator{
692-
BimodalConfig: BimodalConfig{BimodalScaleMsat: scale},
766+
BimodalConfig: BimodalConfig{BimodalScaleMsat: 400_000},
693767
}
694768

769+
// Predefined seed reported in
770+
// https://github.com/lightningnetwork/lnd/issues/9085. This test found
771+
// a case where we could not compute a normalization factor because we
772+
// learned that the balance lies somewhere in the middle of the channel,
773+
// a surprising result for the bimodal model, which predicts two
774+
// distinct modes at the edges and therefore has numerical issues in the
775+
// middle. Additionally, the scale is small with respect to the values
776+
// used here.
777+
f.Add(
778+
uint64(1_000_000_000),
779+
uint64(300_000_000),
780+
uint64(400_000_000),
781+
uint64(300_000_000),
782+
)
783+
695784
f.Fuzz(func(t *testing.T, capacity, successAmt, failAmt, amt uint64) {
696785
if capacity == 0 {
697786
return

0 commit comments

Comments
 (0)