Skip to content
Merged

nits #2945

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions docs/release-notes/eclair-vnext.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,10 @@ Eclair will not allow remote peers to open new obsolete channels that do not sup

### Peer storage

When `option_provide_storage` is enabled, eclair will store a small for our peers.
This is mostly intended for LSPs that serve mobile wallets to allow the users to restore their channel when they switch phones.
With this release, eclair supports the `option_provide_storage` feature introduced in <https://github.com/lightning/bolts/pull/1110>.
When `option_provide_storage` is enabled, eclair will store a small encrypted backup for peers that request it.
This backup is limited to 65kB and node operators should customize the `eclair.peer-storage` configuration section to match their desired SLAs.
This is mostly intended for LSPs that serve mobile wallets to allow users to restore their channels when they switch phones.

### API changes

Expand Down
9 changes: 7 additions & 2 deletions eclair-core/src/main/resources/reference.conf
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,8 @@ eclair {
option_dual_fund = optional
option_quiesce = optional
option_onion_messages = optional
// Enable this if you serve mobile wallets.
// This feature should only be enabled when acting as an LSP for mobile wallets.
// When activating this feature, the peer-storage section should be customized to match desired SLAs.
option_provide_storage = disabled
option_channel_type = optional
option_scid_alias = optional
Expand Down Expand Up @@ -601,8 +602,12 @@ eclair {

peer-storage {
// Peer storage is persisted only after this delay to reduce the number of writes when updating it multiple times in a row.
// A small delay may result in a lot of IO write operations, which can have a negative performance impact on the node.
// But using a large delay increases the risk of not storing the latest peer data if you restart your node while writes are pending.
write-delay = 1 minute
// Peer storage is kept this long after the last channel has been closed.
// Peer storage is kept this long after the last channel with that peer has been closed.
// A long delay here guarantees that peers who are offline while their channels are closed will be able to get their funds
// back if they restore from seed on a different device after the channels have been closed.
removal-delay = 30 days
}
}
Expand Down
15 changes: 11 additions & 4 deletions eclair-core/src/main/scala/fr/acinq/eclair/NodeParams.scala
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,7 @@ case class NodeParams(nodeKeyManager: NodeKeyManager,
willFundRates_opt: Option[LiquidityAds.WillFundRates],
peerWakeUpConfig: PeerReadyNotifier.WakeUpConfig,
onTheFlyFundingConfig: OnTheFlyFunding.Config,
peerStorageWriteDelay: FiniteDuration,
peerStorageRemovalDelay: FiniteDuration) {
peerStorageConfig: PeerStorageConfig) {
val privateKey: Crypto.PrivateKey = nodeKeyManager.nodeKey.privateKey

val nodeId: PublicKey = nodeKeyManager.nodeId
Expand Down Expand Up @@ -158,6 +157,12 @@ case class PaymentFinalExpiryConf(min: CltvExpiryDelta, max: CltvExpiryDelta) {
}
}

/**
* @param writeDelay delay before writing the peer's data to disk, which avoids doing multiple writes during bursts of storage updates.
* @param removalDelay we keep our peer's data in our DB even after closing all of our channels with them, up to this duration.
*/
case class PeerStorageConfig(writeDelay: FiniteDuration, removalDelay: FiniteDuration)

object NodeParams extends Logging {

/**
Expand Down Expand Up @@ -682,8 +687,10 @@ object NodeParams extends Logging {
onTheFlyFundingConfig = OnTheFlyFunding.Config(
proposalTimeout = FiniteDuration(config.getDuration("on-the-fly-funding.proposal-timeout").getSeconds, TimeUnit.SECONDS),
),
peerStorageWriteDelay = FiniteDuration(config.getDuration("peer-storage.write-delay").getSeconds, TimeUnit.SECONDS),
peerStorageRemovalDelay = FiniteDuration(config.getDuration("peer-storage.removal-delay").getSeconds, TimeUnit.SECONDS),
peerStorageConfig = PeerStorageConfig(
writeDelay = FiniteDuration(config.getDuration("peer-storage.write-delay").getSeconds, TimeUnit.SECONDS),
removalDelay = FiniteDuration(config.getDuration("peer-storage.removal-delay").getSeconds, TimeUnit.SECONDS),
)
)
}
}
2 changes: 1 addition & 1 deletion eclair-core/src/main/scala/fr/acinq/eclair/Setup.scala
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ class Setup(val datadir: File,
system.deadLetters
}
_ = if (nodeParams.features.hasFeature(Features.ProvideStorage)) {
system.spawn(Behaviors.supervise(PeerStorageCleaner(nodeParams.db.peers, nodeParams.peerStorageRemovalDelay)).onFailure(typed.SupervisorStrategy.restart), name = "peer-storage-cleaner")
system.spawn(Behaviors.supervise(PeerStorageCleaner(nodeParams.db.peers, nodeParams.peerStorageConfig.removalDelay)).onFailure(typed.SupervisorStrategy.restart), name = "peer-storage-cleaner")
}
dbEventHandler = system.actorOf(SimpleSupervisor.props(DbEventHandler.props(nodeParams), "db-event-handler", SupervisorStrategy.Resume))
register = system.actorOf(SimpleSupervisor.props(Register.props(), "register", SupervisorStrategy.Resume))
Expand Down
7 changes: 4 additions & 3 deletions eclair-core/src/main/scala/fr/acinq/eclair/db/PeersDb.scala
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,13 @@ trait PeersDb {

def getRelayFees(nodeId: PublicKey): Option[RelayFees]

// Used only when option_provide_storage is enabled.
/** Update our peer's blob data when [[fr.acinq.eclair.Features.ProvideStorage]] is enabled. */
def updateStorage(nodeId: PublicKey, data: ByteVector): Unit

// Used only when option_provide_storage is enabled.
/** Get the last blob of data we stored for that peer, if [[fr.acinq.eclair.Features.ProvideStorage]] is enabled. */
def getStorage(nodeId: PublicKey): Option[ByteVector]

// Reclaim storage from peers that have had no active channel with us for a while.
/** Remove storage from peers that have had no active channel with us for a while. */
def removePeerStorage(peerRemovedBefore: TimestampSecond): Unit

}
8 changes: 7 additions & 1 deletion eclair-core/src/main/scala/fr/acinq/eclair/io/Peer.scala
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,13 @@ class Peer(val nodeParams: NodeParams,
stay()

case Event(store: PeerStorageStore, d: ConnectedData) if nodeParams.features.hasFeature(Features.ProvideStorage) && d.channels.nonEmpty =>
startSingleTimer("peer-storage-write", WritePeerStorage, nodeParams.peerStorageWriteDelay)
// If we don't have any pending write operations, we write the updated peer storage to disk after a delay.
// This ensures that when we receive a burst of peer storage updates, we will rate-limit our IO disk operations.
// If we already have a pending write operation, we must not reset the timer, otherwise we may indefinitely delay
// writing to the DB and may never store our peer's backup.
if (d.peerStorage.written) {
startSingleTimer("peer-storage-write", WritePeerStorage, nodeParams.peerStorageConfig.writeDelay)
}
stay() using d.copy(peerStorage = PeerStorage(Some(store.blob), written = false))

case Event(WritePeerStorage, d: ConnectedData) =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -602,9 +602,9 @@ case class GossipTimestampFilter(chainHash: BlockHash, firstTimestamp: Timestamp

case class OnionMessage(blindingKey: PublicKey, onionRoutingPacket: OnionRoutingPacket, tlvStream: TlvStream[OnionMessageTlv] = TlvStream.empty) extends LightningMessage

case class PeerStorageStore(blob: ByteVector, tlvStream: TlvStream[PeerStorageTlv] = TlvStream.empty) extends LightningMessage
case class PeerStorageStore(blob: ByteVector, tlvStream: TlvStream[PeerStorageTlv] = TlvStream.empty) extends SetupMessage

case class PeerStorageRetrieval(blob: ByteVector, tlvStream: TlvStream[PeerStorageTlv] = TlvStream.empty) extends LightningMessage
case class PeerStorageRetrieval(blob: ByteVector, tlvStream: TlvStream[PeerStorageTlv] = TlvStream.empty) extends SetupMessage

// NB: blank lines to minimize merge conflicts

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2021 ACINQ SAS
* Copyright 2024 ACINQ SAS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -241,8 +241,7 @@ object TestConstants {
willFundRates_opt = Some(defaultLiquidityRates),
peerWakeUpConfig = PeerReadyNotifier.WakeUpConfig(enabled = false, timeout = 30 seconds),
onTheFlyFundingConfig = OnTheFlyFunding.Config(proposalTimeout = 90 seconds),
peerStorageWriteDelay = 5 seconds,
peerStorageRemovalDelay = 10 seconds,
peerStorageConfig = PeerStorageConfig(writeDelay = 5 seconds, removalDelay = 10 seconds)
)

def channelParams: LocalParams = OpenChannelInterceptor.makeChannelParams(
Expand Down Expand Up @@ -419,8 +418,7 @@ object TestConstants {
willFundRates_opt = Some(defaultLiquidityRates),
peerWakeUpConfig = PeerReadyNotifier.WakeUpConfig(enabled = false, timeout = 30 seconds),
onTheFlyFundingConfig = OnTheFlyFunding.Config(proposalTimeout = 90 seconds),
peerStorageWriteDelay = 5 seconds,
peerStorageRemovalDelay = 10 seconds,
peerStorageConfig = PeerStorageConfig(writeDelay = 5 seconds, removalDelay = 10 seconds)
)

def channelParams: LocalParams = OpenChannelInterceptor.makeChannelParams(
Expand Down
13 changes: 7 additions & 6 deletions eclair-core/src/test/scala/fr/acinq/eclair/io/PeerSpec.scala
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,9 @@ class PeerSpec extends FixtureSpec {

def cleanupFixture(fixture: FixtureParam): Unit = fixture.cleanup()

def connect(remoteNodeId: PublicKey, peer: TestFSMRef[Peer.State, Peer.Data, Peer], peerConnection: TestProbe, switchboard: TestProbe, channels: Set[PersistentChannelData] = Set.empty, remoteInit: protocol.Init = protocol.Init(Bob.nodeParams.features.initFeatures()), sendInit: Boolean = true, peerStorage: Option[ByteVector] = None)(implicit system: ActorSystem): Unit = {
def connect(remoteNodeId: PublicKey, peer: TestFSMRef[Peer.State, Peer.Data, Peer], peerConnection: TestProbe, switchboard: TestProbe, channels: Set[PersistentChannelData] = Set.empty, remoteInit: protocol.Init = protocol.Init(Bob.nodeParams.features.initFeatures()), initializePeer: Boolean = true, peerStorage: Option[ByteVector] = None)(implicit system: ActorSystem): Unit = {
// let's simulate a connection
if (sendInit) {
if (initializePeer) {
switchboard.send(peer, Peer.Init(channels, Map.empty))
}
val localInit = protocol.Init(peer.underlyingActor.nodeParams.features.initFeatures())
Expand Down Expand Up @@ -764,14 +764,15 @@ class PeerSpec extends FixtureSpec {

nodeParams.db.peers.updateStorage(remoteNodeId, hex"abcdef")
connect(remoteNodeId, peer, peerConnection1, switchboard, channels = Set(ChannelCodecsSpec.normal), peerStorage = Some(hex"abcdef"))
peerConnection1.send(peer, PeerStorageStore(hex"deadbeef"))
peerConnection1.send(peer, PeerStorageStore(hex"0123456789"))
peer ! Peer.Disconnect(f.remoteNodeId)
connect(remoteNodeId, peer, peerConnection2, switchboard, channels = Set(ChannelCodecsSpec.normal), sendInit = false, peerStorage = Some(hex"0123456789"))
connect(remoteNodeId, peer, peerConnection2, switchboard, channels = Set(ChannelCodecsSpec.normal), initializePeer = false, peerStorage = Some(hex"0123456789"))
peerConnection2.send(peer, PeerStorageStore(hex"1111"))
connect(remoteNodeId, peer, peerConnection3, switchboard, channels = Set(ChannelCodecsSpec.normal), sendInit = false, peerStorage = Some(hex"1111"))
assert(nodeParams.db.peers.getStorage(remoteNodeId).contains(hex"abcdef")) // Because of the delayed writes, the original value hasn't been updated yet.
connect(remoteNodeId, peer, peerConnection3, switchboard, channels = Set(ChannelCodecsSpec.normal), initializePeer = false, peerStorage = Some(hex"1111"))
// Because of the delayed writes, we may not have stored the latest value immediately, but we will eventually store it.
eventually {
assert(nodeParams.db.peers.getStorage(remoteNodeId).contains(hex"1111")) // Now it is updated.
assert(nodeParams.db.peers.getStorage(remoteNodeId).contains(hex"1111"))
}
}

Expand Down
Loading