From 4a04e4dca1b79300cae592bb45598d346322698e Mon Sep 17 00:00:00 2001 From: t-bast Date: Fri, 22 Nov 2024 17:04:30 +0100 Subject: [PATCH] nits --- docs/release-notes/eclair-vnext.md | 6 ++++-- eclair-core/src/main/resources/reference.conf | 9 +++++++-- .../main/scala/fr/acinq/eclair/NodeParams.scala | 15 +++++++++++---- .../src/main/scala/fr/acinq/eclair/Setup.scala | 2 +- .../main/scala/fr/acinq/eclair/db/PeersDb.scala | 7 ++++--- .../src/main/scala/fr/acinq/eclair/io/Peer.scala | 8 +++++++- .../wire/protocol/LightningMessageTypes.scala | 4 ++-- .../eclair/wire/protocol/PeerStorageTlv.scala | 2 +- .../scala/fr/acinq/eclair/TestConstants.scala | 6 ++---- .../test/scala/fr/acinq/eclair/io/PeerSpec.scala | 13 +++++++------ 10 files changed, 46 insertions(+), 26 deletions(-) diff --git a/docs/release-notes/eclair-vnext.md b/docs/release-notes/eclair-vnext.md index 7f400127eb..b4126ebcf7 100644 --- a/docs/release-notes/eclair-vnext.md +++ b/docs/release-notes/eclair-vnext.md @@ -35,8 +35,10 @@ Eclair will not allow remote peers to open new obsolete channels that do not sup ### Peer storage -When `option_provide_storage` is enabled, eclair will store a small for our peers. -This is mostly intended for LSPs that serve mobile wallets to allow the users to restore their channel when they switch phones. +With this release, eclair supports the `option_provide_storage` feature introduced in . +When `option_provide_storage` is enabled, eclair will store a small encrypted backup for peers that request it. +This backup is limited to 65kB and node operators should customize the `eclair.peer-storage` configuration section to match their desired SLAs. +This is mostly intended for LSPs that serve mobile wallets to allow users to restore their channels when they switch phones. ### API changes diff --git a/eclair-core/src/main/resources/reference.conf b/eclair-core/src/main/resources/reference.conf index 79ae0a35ae..6825a82c97 100644 --- a/eclair-core/src/main/resources/reference.conf +++ b/eclair-core/src/main/resources/reference.conf @@ -73,7 +73,8 @@ eclair { option_dual_fund = optional option_quiesce = optional option_onion_messages = optional - // Enable this if you serve mobile wallets. + // This feature should only be enabled when acting as an LSP for mobile wallets. + // When activating this feature, the peer-storage section should be customized to match desired SLAs. option_provide_storage = disabled option_channel_type = optional option_scid_alias = optional @@ -601,8 +602,12 @@ eclair { peer-storage { // Peer storage is persisted only after this delay to reduce the number of writes when updating it multiple times in a row. + // A small delay may result in a lot of IO write operations, which can have a negative performance impact on the node. + // But using a large delay increases the risk of not storing the latest peer data if you restart your node while writes are pending. write-delay = 1 minute - // Peer storage is kept this long after the last channel has been closed. + // Peer storage is kept this long after the last channel with that peer has been closed. + // A long delay here guarantees that peers who are offline while their channels are closed will be able to get their funds + // back if they restore from seed on a different device after the channels have been closed. removal-delay = 30 days } } diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/NodeParams.scala b/eclair-core/src/main/scala/fr/acinq/eclair/NodeParams.scala index 28e7165b19..a71457ad64 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/NodeParams.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/NodeParams.scala @@ -93,8 +93,7 @@ case class NodeParams(nodeKeyManager: NodeKeyManager, willFundRates_opt: Option[LiquidityAds.WillFundRates], peerWakeUpConfig: PeerReadyNotifier.WakeUpConfig, onTheFlyFundingConfig: OnTheFlyFunding.Config, - peerStorageWriteDelay: FiniteDuration, - peerStorageRemovalDelay: FiniteDuration) { + peerStorageConfig: PeerStorageConfig) { val privateKey: Crypto.PrivateKey = nodeKeyManager.nodeKey.privateKey val nodeId: PublicKey = nodeKeyManager.nodeId @@ -158,6 +157,12 @@ case class PaymentFinalExpiryConf(min: CltvExpiryDelta, max: CltvExpiryDelta) { } } +/** + * @param writeDelay delay before writing the peer's data to disk, which avoids doing multiple writes during bursts of storage updates. + * @param removalDelay we keep our peer's data in our DB even after closing all of our channels with them, up to this duration. + */ +case class PeerStorageConfig(writeDelay: FiniteDuration, removalDelay: FiniteDuration) + object NodeParams extends Logging { /** @@ -682,8 +687,10 @@ object NodeParams extends Logging { onTheFlyFundingConfig = OnTheFlyFunding.Config( proposalTimeout = FiniteDuration(config.getDuration("on-the-fly-funding.proposal-timeout").getSeconds, TimeUnit.SECONDS), ), - peerStorageWriteDelay = FiniteDuration(config.getDuration("peer-storage.write-delay").getSeconds, TimeUnit.SECONDS), - peerStorageRemovalDelay = FiniteDuration(config.getDuration("peer-storage.removal-delay").getSeconds, TimeUnit.SECONDS), + peerStorageConfig = PeerStorageConfig( + writeDelay = FiniteDuration(config.getDuration("peer-storage.write-delay").getSeconds, TimeUnit.SECONDS), + removalDelay = FiniteDuration(config.getDuration("peer-storage.removal-delay").getSeconds, TimeUnit.SECONDS), + ) ) } } diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/Setup.scala b/eclair-core/src/main/scala/fr/acinq/eclair/Setup.scala index 5a773f9824..83d8e9f7af 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/Setup.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/Setup.scala @@ -357,7 +357,7 @@ class Setup(val datadir: File, system.deadLetters } _ = if (nodeParams.features.hasFeature(Features.ProvideStorage)) { - system.spawn(Behaviors.supervise(PeerStorageCleaner(nodeParams.db.peers, nodeParams.peerStorageRemovalDelay)).onFailure(typed.SupervisorStrategy.restart), name = "peer-storage-cleaner") + system.spawn(Behaviors.supervise(PeerStorageCleaner(nodeParams.db.peers, nodeParams.peerStorageConfig.removalDelay)).onFailure(typed.SupervisorStrategy.restart), name = "peer-storage-cleaner") } dbEventHandler = system.actorOf(SimpleSupervisor.props(DbEventHandler.props(nodeParams), "db-event-handler", SupervisorStrategy.Resume)) register = system.actorOf(SimpleSupervisor.props(Register.props(), "register", SupervisorStrategy.Resume)) diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/db/PeersDb.scala b/eclair-core/src/main/scala/fr/acinq/eclair/db/PeersDb.scala index 82ba3933a3..2515accf1e 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/db/PeersDb.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/db/PeersDb.scala @@ -36,12 +36,13 @@ trait PeersDb { def getRelayFees(nodeId: PublicKey): Option[RelayFees] - // Used only when option_provide_storage is enabled. + /** Update our peer's blob data when [[fr.acinq.eclair.Features.ProvideStorage]] is enabled. */ def updateStorage(nodeId: PublicKey, data: ByteVector): Unit - // Used only when option_provide_storage is enabled. + /** Get the last blob of data we stored for that peer, if [[fr.acinq.eclair.Features.ProvideStorage]] is enabled. */ def getStorage(nodeId: PublicKey): Option[ByteVector] - // Reclaim storage from peers that have had no active channel with us for a while. + /** Remove storage from peers that have had no active channel with us for a while. */ def removePeerStorage(peerRemovedBefore: TimestampSecond): Unit + } diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/io/Peer.scala b/eclair-core/src/main/scala/fr/acinq/eclair/io/Peer.scala index dd25c12538..b414b2fbc1 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/io/Peer.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/io/Peer.scala @@ -528,7 +528,13 @@ class Peer(val nodeParams: NodeParams, stay() case Event(store: PeerStorageStore, d: ConnectedData) if nodeParams.features.hasFeature(Features.ProvideStorage) && d.channels.nonEmpty => - startSingleTimer("peer-storage-write", WritePeerStorage, nodeParams.peerStorageWriteDelay) + // If we don't have any pending write operations, we write the updated peer storage to disk after a delay. + // This ensures that when we receive a burst of peer storage updates, we will rate-limit our IO disk operations. + // If we already have a pending write operation, we must not reset the timer, otherwise we may indefinitely delay + // writing to the DB and may never store our peer's backup. + if (d.peerStorage.written) { + startSingleTimer("peer-storage-write", WritePeerStorage, nodeParams.peerStorageConfig.writeDelay) + } stay() using d.copy(peerStorage = PeerStorage(Some(store.blob), written = false)) case Event(WritePeerStorage, d: ConnectedData) => diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/wire/protocol/LightningMessageTypes.scala b/eclair-core/src/main/scala/fr/acinq/eclair/wire/protocol/LightningMessageTypes.scala index 1fde180a96..8775612659 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/wire/protocol/LightningMessageTypes.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/wire/protocol/LightningMessageTypes.scala @@ -602,9 +602,9 @@ case class GossipTimestampFilter(chainHash: BlockHash, firstTimestamp: Timestamp case class OnionMessage(blindingKey: PublicKey, onionRoutingPacket: OnionRoutingPacket, tlvStream: TlvStream[OnionMessageTlv] = TlvStream.empty) extends LightningMessage -case class PeerStorageStore(blob: ByteVector, tlvStream: TlvStream[PeerStorageTlv] = TlvStream.empty) extends LightningMessage +case class PeerStorageStore(blob: ByteVector, tlvStream: TlvStream[PeerStorageTlv] = TlvStream.empty) extends SetupMessage -case class PeerStorageRetrieval(blob: ByteVector, tlvStream: TlvStream[PeerStorageTlv] = TlvStream.empty) extends LightningMessage +case class PeerStorageRetrieval(blob: ByteVector, tlvStream: TlvStream[PeerStorageTlv] = TlvStream.empty) extends SetupMessage // NB: blank lines to minimize merge conflicts diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/wire/protocol/PeerStorageTlv.scala b/eclair-core/src/main/scala/fr/acinq/eclair/wire/protocol/PeerStorageTlv.scala index 4ebb3fec39..e4701c7868 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/wire/protocol/PeerStorageTlv.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/wire/protocol/PeerStorageTlv.scala @@ -1,5 +1,5 @@ /* - * Copyright 2021 ACINQ SAS + * Copyright 2024 ACINQ SAS * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/TestConstants.scala b/eclair-core/src/test/scala/fr/acinq/eclair/TestConstants.scala index bb7de459d2..7baff3b2aa 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/TestConstants.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/TestConstants.scala @@ -241,8 +241,7 @@ object TestConstants { willFundRates_opt = Some(defaultLiquidityRates), peerWakeUpConfig = PeerReadyNotifier.WakeUpConfig(enabled = false, timeout = 30 seconds), onTheFlyFundingConfig = OnTheFlyFunding.Config(proposalTimeout = 90 seconds), - peerStorageWriteDelay = 5 seconds, - peerStorageRemovalDelay = 10 seconds, + peerStorageConfig = PeerStorageConfig(writeDelay = 5 seconds, removalDelay = 10 seconds) ) def channelParams: LocalParams = OpenChannelInterceptor.makeChannelParams( @@ -419,8 +418,7 @@ object TestConstants { willFundRates_opt = Some(defaultLiquidityRates), peerWakeUpConfig = PeerReadyNotifier.WakeUpConfig(enabled = false, timeout = 30 seconds), onTheFlyFundingConfig = OnTheFlyFunding.Config(proposalTimeout = 90 seconds), - peerStorageWriteDelay = 5 seconds, - peerStorageRemovalDelay = 10 seconds, + peerStorageConfig = PeerStorageConfig(writeDelay = 5 seconds, removalDelay = 10 seconds) ) def channelParams: LocalParams = OpenChannelInterceptor.makeChannelParams( diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/io/PeerSpec.scala b/eclair-core/src/test/scala/fr/acinq/eclair/io/PeerSpec.scala index 5083244588..57214da1b7 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/io/PeerSpec.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/io/PeerSpec.scala @@ -108,9 +108,9 @@ class PeerSpec extends FixtureSpec { def cleanupFixture(fixture: FixtureParam): Unit = fixture.cleanup() - def connect(remoteNodeId: PublicKey, peer: TestFSMRef[Peer.State, Peer.Data, Peer], peerConnection: TestProbe, switchboard: TestProbe, channels: Set[PersistentChannelData] = Set.empty, remoteInit: protocol.Init = protocol.Init(Bob.nodeParams.features.initFeatures()), sendInit: Boolean = true, peerStorage: Option[ByteVector] = None)(implicit system: ActorSystem): Unit = { + def connect(remoteNodeId: PublicKey, peer: TestFSMRef[Peer.State, Peer.Data, Peer], peerConnection: TestProbe, switchboard: TestProbe, channels: Set[PersistentChannelData] = Set.empty, remoteInit: protocol.Init = protocol.Init(Bob.nodeParams.features.initFeatures()), initializePeer: Boolean = true, peerStorage: Option[ByteVector] = None)(implicit system: ActorSystem): Unit = { // let's simulate a connection - if (sendInit) { + if (initializePeer) { switchboard.send(peer, Peer.Init(channels, Map.empty)) } val localInit = protocol.Init(peer.underlyingActor.nodeParams.features.initFeatures()) @@ -764,14 +764,15 @@ class PeerSpec extends FixtureSpec { nodeParams.db.peers.updateStorage(remoteNodeId, hex"abcdef") connect(remoteNodeId, peer, peerConnection1, switchboard, channels = Set(ChannelCodecsSpec.normal), peerStorage = Some(hex"abcdef")) + peerConnection1.send(peer, PeerStorageStore(hex"deadbeef")) peerConnection1.send(peer, PeerStorageStore(hex"0123456789")) peer ! Peer.Disconnect(f.remoteNodeId) - connect(remoteNodeId, peer, peerConnection2, switchboard, channels = Set(ChannelCodecsSpec.normal), sendInit = false, peerStorage = Some(hex"0123456789")) + connect(remoteNodeId, peer, peerConnection2, switchboard, channels = Set(ChannelCodecsSpec.normal), initializePeer = false, peerStorage = Some(hex"0123456789")) peerConnection2.send(peer, PeerStorageStore(hex"1111")) - connect(remoteNodeId, peer, peerConnection3, switchboard, channels = Set(ChannelCodecsSpec.normal), sendInit = false, peerStorage = Some(hex"1111")) - assert(nodeParams.db.peers.getStorage(remoteNodeId).contains(hex"abcdef")) // Because of the delayed writes, the original value hasn't been updated yet. + connect(remoteNodeId, peer, peerConnection3, switchboard, channels = Set(ChannelCodecsSpec.normal), initializePeer = false, peerStorage = Some(hex"1111")) + // Because of the delayed writes, we may not have stored the latest value immediately, but we will eventually store it. eventually { - assert(nodeParams.db.peers.getStorage(remoteNodeId).contains(hex"1111")) // Now it is updated. + assert(nodeParams.db.peers.getStorage(remoteNodeId).contains(hex"1111")) } }