diff --git a/damus-c/damus-Bridging-Header.h b/damus-c/damus-Bridging-Header.h index 76b546b3e2..565da04bfd 100644 --- a/damus-c/damus-Bridging-Header.h +++ b/damus-c/damus-Bridging-Header.h @@ -8,5 +8,6 @@ #include "wasm.h" #include "nostrscript.h" #include "nostrdb.h" +#include "ndb_negentropy.h" #include "lmdb.h" diff --git a/damus.xcodeproj/project.pbxproj b/damus.xcodeproj/project.pbxproj index 6bb9c181e2..7fb10c4570 100644 --- a/damus.xcodeproj/project.pbxproj +++ b/damus.xcodeproj/project.pbxproj @@ -7,6 +7,7 @@ objects = { /* Begin PBXBuildFile section */ + 0061F28AA1B809DE3B8AEFA6 /* NdbNegentropyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 013E67E6987C2127BC2EC989 /* NdbNegentropyTests.swift */; }; 0E8A4BB72AE4359200065E81 /* NostrFilter+Hashable.swift in Sources */ = {isa = PBXBuildFile; fileRef = 0E8A4BB62AE4359200065E81 /* NostrFilter+Hashable.swift */; }; 2710433D2E6BFE340005C3B0 /* PostingTimelineSwitcherView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 2710433C2E6BFE2A0005C3B0 /* PostingTimelineSwitcherView.swift */; }; 2710433E2E6BFE340005C3B0 /* PostingTimelineSwitcherView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 2710433C2E6BFE2A0005C3B0 /* PostingTimelineSwitcherView.swift */; }; @@ -66,6 +67,7 @@ 3ACF94482DAA006500971A4E /* NIP05DomainEventsModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3ACF94452DAA006500971A4E /* NIP05DomainEventsModel.swift */; }; 3AE45AF6297BB2E700C1D842 /* LibreTranslateServer.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3AE45AF5297BB2E700C1D842 /* LibreTranslateServer.swift */; }; 3CCD1E6A2A874C4E0099A953 /* Nip98HTTPAuth.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3CCD1E692A874C4E0099A953 /* Nip98HTTPAuth.swift */; }; + 4A1D642FD54AF62882835565 /* NegentropySync.swift in Sources */ = {isa = PBXBuildFile; fileRef = 643EF18DE5FB1FADEAB2D229 /* NegentropySync.swift */; }; 4C011B5E2BD0A56A002F2F9B /* ChatEventView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4C011B5C2BD0A56A002F2F9B /* ChatEventView.swift */; }; 4C011B5F2BD0A56A002F2F9B /* ChatroomThreadView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4C011B5D2BD0A56A002F2F9B /* ChatroomThreadView.swift */; }; 4C011B612BD0B25C002F2F9B /* ReplyQuoteView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4C011B602BD0B25C002F2F9B /* ReplyQuoteView.swift */; }; @@ -531,6 +533,7 @@ 5C8498032D5D150000F74FEB /* ZapExplainer.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5C8498012D5D14FA00F74FEB /* ZapExplainer.swift */; }; 5C8498042D5D150000F74FEB /* ZapExplainer.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5C8498012D5D14FA00F74FEB /* ZapExplainer.swift */; }; 5C8711DE2C460C06007879C2 /* PostingTimelineView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5C8711DD2C460C06007879C2 /* PostingTimelineView.swift */; }; + 5C8F91A171EB8DBA39083DCD /* ndb_negentropy.c in Sources */ = {isa = PBXBuildFile; fileRef = 98AC4B61018224440582B313 /* ndb_negentropy.c */; }; 5C8F970A2EB45E8C009399B1 /* LiveChatModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5C8F97092EB45E85009399B1 /* LiveChatModel.swift */; }; 5C8F970B2EB45E8C009399B1 /* LiveChatModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5C8F97092EB45E85009399B1 /* LiveChatModel.swift */; }; 5C8F970C2EB45E8C009399B1 /* LiveChatModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5C8F97092EB45E85009399B1 /* LiveChatModel.swift */; }; @@ -628,6 +631,7 @@ 7C902AE32981D55B002AB16E /* ZoomableScrollView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7C902AE22981D55B002AB16E /* ZoomableScrollView.swift */; }; 7C95CAEE299DCEF1009DCB67 /* KFOptionSetter+.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7C95CAED299DCEF1009DCB67 /* KFOptionSetter+.swift */; }; 7CFF6317299FEFE5005D382A /* SelectableText.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7CFF6316299FEFE5005D382A /* SelectableText.swift */; }; + 82711500EC248E77B4CCBB18 /* ndb_negentropy.c in Sources */ = {isa = PBXBuildFile; fileRef = 98AC4B61018224440582B313 /* ndb_negentropy.c */; }; 82D6FA9A2CD9820500C925F4 /* ShareViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 82D6FA992CD9820500C925F4 /* ShareViewController.swift */; }; 82D6FAA12CD9820500C925F4 /* ShareExtension.appex in Embed Foundation Extensions */ = {isa = PBXBuildFile; fileRef = 82D6FA972CD9820500C925F4 /* ShareExtension.appex */; settings = {ATTRIBUTES = (RemoveHeadersOnCopy, ); }; }; 82D6FAA92CD99F7900C925F4 /* FbConstants.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4C32B9372A9AD44700DC3548 /* FbConstants.swift */; }; @@ -1075,6 +1079,7 @@ 9609F058296E220800069BF3 /* BannerImageView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 9609F057296E220800069BF3 /* BannerImageView.swift */; }; 9C83F89329A937B900136C08 /* TextViewWrapper.swift in Sources */ = {isa = PBXBuildFile; fileRef = 9C83F89229A937B900136C08 /* TextViewWrapper.swift */; }; 9CA876E229A00CEA0003B9A3 /* AttachMediaUtility.swift in Sources */ = {isa = PBXBuildFile; fileRef = 9CA876E129A00CE90003B9A3 /* AttachMediaUtility.swift */; }; + A1A6C169937620FEBDF2F0A8 /* NdbNegentropy.swift in Sources */ = {isa = PBXBuildFile; fileRef = FFCA8F8A70FFE1C55544B3CE /* NdbNegentropy.swift */; }; ADFE73552AD4793100EC7326 /* QRScanNSECView.swift in Sources */ = {isa = PBXBuildFile; fileRef = ADFE73542AD4793100EC7326 /* QRScanNSECView.swift */; }; B501062D2B363036003874F5 /* AuthIntegrationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = B501062C2B363036003874F5 /* AuthIntegrationTests.swift */; }; B51C1CEA2B55A60A00E312A9 /* AddMuteItemView.swift in Sources */ = {isa = PBXBuildFile; fileRef = B51C1CE82B55A60A00E312A9 /* AddMuteItemView.swift */; }; @@ -1111,6 +1116,7 @@ D5C1AFD32E5EE2820092F72F /* FavoriteButtonView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D5C1AFD12E5EE2820092F72F /* FavoriteButtonView.swift */; }; D5C1AFD42E5EE2820092F72F /* FavoriteButtonView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D5C1AFD12E5EE2820092F72F /* FavoriteButtonView.swift */; }; D5C1AFD52E5EE2820092F72F /* FavoriteButtonView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D5C1AFD12E5EE2820092F72F /* FavoriteButtonView.swift */; }; + D5E9A388AE3969F7D3CCDA55 /* ndb_negentropy.c in Sources */ = {isa = PBXBuildFile; fileRef = 98AC4B61018224440582B313 /* ndb_negentropy.c */; }; D703D7192C66E47100A400EA /* UniformTypeIdentifiers.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D703D7182C66E47100A400EA /* UniformTypeIdentifiers.framework */; }; D703D71C2C66E47100A400EA /* Media.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = D703D71B2C66E47100A400EA /* Media.xcassets */; }; D703D71E2C66E47100A400EA /* ActionViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D703D71D2C66E47100A400EA /* ActionViewController.swift */; }; @@ -1491,7 +1497,7 @@ D73E5EFB2C6A97F4007EB227 /* ProfilePicturesView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4C30AC7F29A6A53F00E2BD5A /* ProfilePicturesView.swift */; }; D73E5EFC2C6A97F4007EB227 /* DamusAppNotificationView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D78CD5972B8990300014D539 /* DamusAppNotificationView.swift */; }; D73E5EFD2C6A97F4007EB227 /* InnerTimelineView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4CE0E2B529A3ED5500DB4CA2 /* InnerTimelineView.swift */; }; - D73E5EFE2C6A97F4007EB227 /* (null) in Sources */ = {isa = PBXBuildFile; }; + D73E5EFE2C6A97F4007EB227 /* BuildFile in Sources */ = {isa = PBXBuildFile; }; D73E5EFF2C6A97F4007EB227 /* ZapsView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4CE879572996C45300F758CC /* ZapsView.swift */; }; D73E5F002C6A97F4007EB227 /* CustomizeZapView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4C9F18E129AA9B6C008C55EC /* CustomizeZapView.swift */; }; D73E5F012C6A97F4007EB227 /* ZapTypePicker.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4CA3FA0F29F593D000FDB3C3 /* ZapTypePicker.swift */; }; @@ -1905,6 +1911,7 @@ E0EE9DD42B8E5FEA00F3002D /* ImageProcessing.swift in Sources */ = {isa = PBXBuildFile; fileRef = E0EE9DD32B8E5FEA00F3002D /* ImageProcessing.swift */; }; E4FA1C032A24BB7F00482697 /* SearchSettingsView.swift in Sources */ = {isa = PBXBuildFile; fileRef = E4FA1C022A24BB7F00482697 /* SearchSettingsView.swift */; }; E990020F2955F837003BBC5A /* EditMetadataView.swift in Sources */ = {isa = PBXBuildFile; fileRef = E990020E2955F837003BBC5A /* EditMetadataView.swift */; }; + F3CB5B256FC0F811E47C8E30 /* ndb_negentropy.c in Sources */ = {isa = PBXBuildFile; fileRef = 98AC4B61018224440582B313 /* ndb_negentropy.c */; }; F71694EA2A662232001F4053 /* OnboardingSuggestionsView.swift in Sources */ = {isa = PBXBuildFile; fileRef = F71694E92A662232001F4053 /* OnboardingSuggestionsView.swift */; }; F71694EC2A662292001F4053 /* SuggestedUsersViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = F71694EB2A662292001F4053 /* SuggestedUsersViewModel.swift */; }; F71694F22A67314D001F4053 /* SuggestedUserView.swift in Sources */ = {isa = PBXBuildFile; fileRef = F71694F12A67314D001F4053 /* SuggestedUserView.swift */; }; @@ -1976,6 +1983,7 @@ /* End PBXCopyFilesBuildPhase section */ /* Begin PBXFileReference section */ + 013E67E6987C2127BC2EC989 /* NdbNegentropyTests.swift */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.swift; path = NdbNegentropyTests.swift; sourceTree = ""; }; 0E8A4BB62AE4359200065E81 /* NostrFilter+Hashable.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "NostrFilter+Hashable.swift"; sourceTree = ""; }; 2710433C2E6BFE2A0005C3B0 /* PostingTimelineSwitcherView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PostingTimelineSwitcherView.swift; sourceTree = ""; }; 3165648A295B70D500C64604 /* LinkView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LinkView.swift; sourceTree = ""; }; @@ -2666,6 +2674,7 @@ 5CF72FC129B9142F00124A13 /* ShareAction.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ShareAction.swift; sourceTree = ""; }; 6439E013296790CF0020672B /* ProfilePicImageView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ProfilePicImageView.swift; sourceTree = ""; }; 643EA5C7296B764E005081BB /* RelayFilterView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RelayFilterView.swift; sourceTree = ""; }; + 643EF18DE5FB1FADEAB2D229 /* NegentropySync.swift */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.swift; name = NegentropySync.swift; path = NegentropySync.swift; sourceTree = ""; }; 647D9A8C2968520300A295DE /* SideMenuView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SideMenuView.swift; sourceTree = ""; }; 64FBD06E296255C400D9D3B2 /* Theme.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Theme.swift; sourceTree = ""; }; 7527271D2A93FF0100214108 /* Block.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Block.swift; sourceTree = ""; }; @@ -2678,7 +2687,9 @@ 82D6FA992CD9820500C925F4 /* ShareViewController.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ShareViewController.swift; sourceTree = ""; }; 82D6FA9E2CD9820500C925F4 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; 82D6FAA62CD9820500C925F4 /* share extension.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = "share extension.entitlements"; sourceTree = ""; }; + 937C9307E38CECD491F47C47 /* ndb_negentropy.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; path = ndb_negentropy.h; sourceTree = ""; }; 9609F057296E220800069BF3 /* BannerImageView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = BannerImageView.swift; sourceTree = ""; }; + 98AC4B61018224440582B313 /* ndb_negentropy.c */ = {isa = PBXFileReference; includeInIndex = 1; path = ndb_negentropy.c; sourceTree = ""; }; 9C83F89229A937B900136C08 /* TextViewWrapper.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TextViewWrapper.swift; sourceTree = ""; }; 9CA876E129A00CE90003B9A3 /* AttachMediaUtility.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AttachMediaUtility.swift; sourceTree = ""; }; ADFE73542AD4793100EC7326 /* QRScanNSECView.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = QRScanNSECView.swift; sourceTree = ""; }; @@ -2862,6 +2873,7 @@ F7F0BA24297892BD009531F3 /* SwipeToDismiss.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SwipeToDismiss.swift; sourceTree = ""; }; F7F0BA262978E54D009531F3 /* ParticipantsView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ParticipantsView.swift; sourceTree = ""; }; F944F56D29EA9CCC0067B3BF /* DamusParseContentTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DamusParseContentTests.swift; sourceTree = ""; }; + FFCA8F8A70FFE1C55544B3CE /* NdbNegentropy.swift */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.swift; path = NdbNegentropy.swift; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -3253,6 +3265,7 @@ 4C7FF7D42823313F009601DB /* Mentions.swift */, 4C285C8B28398BC6008A31F1 /* Keys.swift */, D773BC5E2C6D538500349F0A /* CommentItem.swift */, + 643EF18DE5FB1FADEAB2D229 /* NegentropySync.swift */, ); path = Nostr; sourceTree = ""; @@ -3325,6 +3338,7 @@ 4C78EFD92A707C4D007E8197 /* secp256k1.h */, D798D2272B085CDA00234419 /* NdbNote+.swift */, 4CF480582B633F3800F2B2C0 /* NdbBlock.swift */, + FFCA8F8A70FFE1C55544B3CE /* NdbNegentropy.swift */, ); path = nostrdb; sourceTree = ""; @@ -3861,6 +3875,7 @@ 3A96E3FD2D6BCE3800AE1630 /* RepostedTests.swift */, 4C0ED07E2D7A1E260020D8A2 /* Benchmarking.swift */, 3A92C1012DE17ACA00CEEBAC /* NIP05DomainTimelineHeaderViewTests.swift */, + 013E67E6987C2127BC2EC989 /* NdbNegentropyTests.swift */, ); path = damusTests; sourceTree = ""; @@ -3978,6 +3993,8 @@ 4CF480352B631C0100F2B2C0 /* protected_queue.h */, 4CF480362B631C0100F2B2C0 /* random.h */, 4CF480372B631C0100F2B2C0 /* invoice.c */, + 98AC4B61018224440582B313 /* ndb_negentropy.c */, + 937C9307E38CECD491F47C47 /* ndb_negentropy.h */, ); path = src; sourceTree = ""; @@ -5556,7 +5573,7 @@ ); mainGroup = 4CE6DEDA27F7A08100C66700; packageReferences = ( - 4C64987F286E0EE300EAE2B3 /* XCRemoteSwiftPackageReference "secp256k1" */, + 4C64987F286E0EE300EAE2B3 /* XCRemoteSwiftPackageReference "secp256k1.swift" */, 4C06670228FC7EC500038D2A /* XCRemoteSwiftPackageReference "Kingfisher" */, 4CCF9AB02A1FE80B00E03CFB /* XCRemoteSwiftPackageReference "GSPlayer" */, 4C27C9302A64766F007DBC75 /* XCRemoteSwiftPackageReference "swift-markdown-ui" */, @@ -6236,6 +6253,9 @@ 4C9B0DF32A65C46800CBDA21 /* ProfileEditButton.swift in Sources */, 4C32B95F2A9AD44700DC3548 /* Enum.swift in Sources */, 4C2859622A12A7F0004746F7 /* GoldSupportGradient.swift in Sources */, + 4A1D642FD54AF62882835565 /* NegentropySync.swift in Sources */, + D5E9A388AE3969F7D3CCDA55 /* ndb_negentropy.c in Sources */, + A1A6C169937620FEBDF2F0A8 /* NdbNegentropy.swift in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -6297,6 +6317,7 @@ 4C684A552A7E91FE005E6031 /* LargeEventTests.swift in Sources */, E02B54182B4DFADA0077FF42 /* Bech32ObjectTests.swift in Sources */, 3A92C1022DE17ACA00CEEBAC /* NIP05DomainTimelineHeaderViewTests.swift in Sources */, + 0061F28AA1B809DE3B8AEFA6 /* NdbNegentropyTests.swift in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -6854,6 +6875,7 @@ 82D6FC7B2CD99F7900C925F4 /* TestData.swift in Sources */, 82D6FC7C2CD99F7900C925F4 /* ContentParsing.swift in Sources */, 82D6FC7D2CD99F7900C925F4 /* NotificationFormatter.swift in Sources */, + 5C8F91A171EB8DBA39083DCD /* ndb_negentropy.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -7143,7 +7165,7 @@ D73E5EFB2C6A97F4007EB227 /* ProfilePicturesView.swift in Sources */, D73E5EFC2C6A97F4007EB227 /* DamusAppNotificationView.swift in Sources */, D73E5EFD2C6A97F4007EB227 /* InnerTimelineView.swift in Sources */, - D73E5EFE2C6A97F4007EB227 /* (null) in Sources */, + D73E5EFE2C6A97F4007EB227 /* BuildFile in Sources */, D7EB00B02CD59C8D00660C07 /* PresentFullScreenItemNotify.swift in Sources */, D73E5EFF2C6A97F4007EB227 /* ZapsView.swift in Sources */, D73E5F002C6A97F4007EB227 /* CustomizeZapView.swift in Sources */, @@ -7409,6 +7431,7 @@ D703D75B2C670A7F00A400EA /* Contacts.swift in Sources */, D703D7812C670C2B00A400EA /* Bech32.swift in Sources */, D73E5E1E2C6A9694007EB227 /* RelayFilters.swift in Sources */, + 82711500EC248E77B4CCBB18 /* ndb_negentropy.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -7564,6 +7587,7 @@ D7CE1B1E2B0BE190002EDAD4 /* midl.c in Sources */, D7CB5D3C2B1130C600AD4105 /* LocalNotification.swift in Sources */, B59CAD4D2B688D1000677E8B /* MutelistManager.swift in Sources */, + F3CB5B256FC0F811E47C8E30 /* ndb_negentropy.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -8074,7 +8098,7 @@ PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; SUPPORTS_MACCATALYST = YES; - SWIFT_ACTIVE_COMPILATION_CONDITIONS = "DEBUG $(inherited)"; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = "DEBUG $(inherited) EXTENSION_TARGET"; SWIFT_EMIT_LOC_STRINGS = YES; SWIFT_OBJC_BRIDGING_HEADER = "damus-c/damus-Bridging-Header.h"; SWIFT_VERSION = 5.0; @@ -8106,6 +8130,7 @@ PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; SUPPORTS_MACCATALYST = YES; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = "$(inherited) EXTENSION_TARGET"; SWIFT_EMIT_LOC_STRINGS = YES; SWIFT_OBJC_BRIDGING_HEADER = "damus-c/damus-Bridging-Header.h"; SWIFT_VERSION = 5.0; @@ -8138,7 +8163,7 @@ PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; SUPPORTS_MACCATALYST = YES; - SWIFT_ACTIVE_COMPILATION_CONDITIONS = "DEBUG $(inherited)"; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = "DEBUG $(inherited) EXTENSION_TARGET"; SWIFT_EMIT_LOC_STRINGS = YES; SWIFT_OBJC_BRIDGING_HEADER = "damus-c/damus-Bridging-Header.h"; SWIFT_VERSION = 5.0; @@ -8171,6 +8196,7 @@ PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; SUPPORTS_MACCATALYST = YES; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = "$(inherited) EXTENSION_TARGET"; SWIFT_EMIT_LOC_STRINGS = YES; SWIFT_OBJC_BRIDGING_HEADER = "damus-c/damus-Bridging-Header.h"; SWIFT_VERSION = 5.0; @@ -8203,7 +8229,7 @@ PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; SUPPORTS_MACCATALYST = YES; - SWIFT_ACTIVE_COMPILATION_CONDITIONS = "DEBUG $(inherited)"; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = "DEBUG $(inherited) EXTENSION_TARGET"; SWIFT_EMIT_LOC_STRINGS = YES; SWIFT_OBJC_BRIDGING_HEADER = "damus-c/damus-Bridging-Header.h"; SWIFT_VERSION = 5.0; @@ -8236,6 +8262,7 @@ PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; SUPPORTS_MACCATALYST = YES; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = "$(inherited) EXTENSION_TARGET"; SWIFT_EMIT_LOC_STRINGS = YES; SWIFT_OBJC_BRIDGING_HEADER = "damus-c/damus-Bridging-Header.h"; SWIFT_VERSION = 5.0; @@ -8344,7 +8371,7 @@ kind = branch; }; }; - 4C64987F286E0EE300EAE2B3 /* XCRemoteSwiftPackageReference "secp256k1" */ = { + 4C64987F286E0EE300EAE2B3 /* XCRemoteSwiftPackageReference "secp256k1.swift" */ = { isa = XCRemoteSwiftPackageReference; repositoryURL = "https://github.com/jb55/secp256k1.swift"; requirement = { @@ -8440,12 +8467,12 @@ }; 4C649880286E0EE300EAE2B3 /* secp256k1 */ = { isa = XCSwiftPackageProductDependency; - package = 4C64987F286E0EE300EAE2B3 /* XCRemoteSwiftPackageReference "secp256k1" */; + package = 4C64987F286E0EE300EAE2B3 /* XCRemoteSwiftPackageReference "secp256k1.swift" */; productName = secp256k1; }; 82D6FC802CD99FC500C925F4 /* secp256k1 */ = { isa = XCSwiftPackageProductDependency; - package = 4C64987F286E0EE300EAE2B3 /* XCRemoteSwiftPackageReference "secp256k1" */; + package = 4C64987F286E0EE300EAE2B3 /* XCRemoteSwiftPackageReference "secp256k1.swift" */; productName = secp256k1; }; 82D6FC832CD9A48500C925F4 /* Kingfisher */ = { @@ -8470,7 +8497,7 @@ }; D703D7482C6709B100A400EA /* secp256k1 */ = { isa = XCSwiftPackageProductDependency; - package = 4C64987F286E0EE300EAE2B3 /* XCRemoteSwiftPackageReference "secp256k1" */; + package = 4C64987F286E0EE300EAE2B3 /* XCRemoteSwiftPackageReference "secp256k1.swift" */; productName = secp256k1; }; D703D7AC2C670FA700A400EA /* MarkdownUI */ = { @@ -8515,7 +8542,7 @@ }; D789D11F2AFEFBF20083A7AB /* secp256k1 */ = { isa = XCSwiftPackageProductDependency; - package = 4C64987F286E0EE300EAE2B3 /* XCRemoteSwiftPackageReference "secp256k1" */; + package = 4C64987F286E0EE300EAE2B3 /* XCRemoteSwiftPackageReference "secp256k1.swift" */; productName = secp256k1; }; D78DB8582C1CE9CA00F0AB12 /* SwipeActions */ = { diff --git a/damus/Core/Networking/NostrNetworkManager/NostrNetworkManager.swift b/damus/Core/Networking/NostrNetworkManager/NostrNetworkManager.swift index 28ef46c7d1..84683ec4cd 100644 --- a/damus/Core/Networking/NostrNetworkManager/NostrNetworkManager.swift +++ b/damus/Core/Networking/NostrNetworkManager/NostrNetworkManager.swift @@ -72,7 +72,42 @@ class NostrNetworkManager { self.delegate.ndb.reopen() // Pinging the network will automatically reconnect any dead websocket connections await self.ping() + + #if !EXTENSION_TARGET + // Try to use NIP-77 negentropy for efficient sync of missing events + await syncTimelineWithNegentropy() + #endif + } + + #if !EXTENSION_TARGET + /// Use NIP-77 negentropy to sync timeline events efficiently + /// This fetches only the events we're missing instead of re-fetching everything + private func syncTimelineWithNegentropy() async { + guard await pool.isNegentropyAvailable else { + Log.info("Negentropy sync not available, skipping", for: .networking) + return + } + + // Create a timeline filter for negentropy sync + // Note: limit is required for relay.damus.io NEG-OPEN - use large value for fingerprinting accuracy + var timelineFilter = NostrFilter(kinds: [.text, .longform, .boost, .highlight, .like]) + timelineFilter.limit = 50000 + + do { + let startTime = CFAbsoluteTimeGetCurrent() + let results = try await pool.syncWithNegentropy(filter: timelineFilter) + + let totalNeedIds = results.values.reduce(0) { $0 + $1.needIds.count } + let elapsed = CFAbsoluteTimeGetCurrent() - startTime + let relayNames = results.keys.map { $0.absoluteString }.joined(separator: ", ") + + Log.info("Negentropy sync completed in %.2fs: requested %d missing events from %d relays: %s", + for: .networking, elapsed, totalNeedIds, results.count, relayNames) + } catch { + Log.error("Negentropy sync failed: %s", for: .networking, error.localizedDescription) + } } + #endif func close() async { await withTaskGroup { group in diff --git a/damus/Core/Nostr/NegentropySync.swift b/damus/Core/Nostr/NegentropySync.swift new file mode 100644 index 0000000000..e53d06a299 --- /dev/null +++ b/damus/Core/Nostr/NegentropySync.swift @@ -0,0 +1,890 @@ +// +// NegentropySync.swift +// damus +// +// NIP-77 negentropy set reconciliation for efficient timeline sync. +// +// Instead of re-fetching thousands of events on app foreground, negentropy +// efficiently identifies only the events we're missing by comparing fingerprints +// of our local event set against the relay's set. This typically reduces +// network traffic from 5000+ events to ~50-200 missing events. +// +// Protocol flow: +// 1. Client sends NEG-OPEN with filter and initial fingerprint +// 2. Relay responds with NEG-MSG containing its fingerprint +// 3. Client/relay exchange NEG-MSG until sets are reconciled +// 4. Client sends NEG-CLOSE, then fetches missing events via REQ +// +// See: https://github.com/nostr-protocol/nips/blob/master/77.md +// + +#if !EXTENSION_TARGET +import Foundation + +// MARK: - Negentropy Support Cache + +/// Caches which relays support negentropy to avoid repeated checks. +/// Results are cached for 7 days to allow relays to update their support. +final class NegentropySupportCache { + private static let cacheKey = "negentropy_relay_support_cache" + + /// Cached result for a relay + struct CacheEntry: Codable { + let supported: Bool + let timestamp: Date + + var isExpired: Bool { + Date().timeIntervalSince(timestamp) > Self.cacheExpiryDays * 24 * 60 * 60 + } + + private static let cacheExpiryDays = 7.0 + } + + private var cache: [String: CacheEntry] = [:] + + init() { + loadFromUserDefaults() + } + + /// Check if a relay is known to support negentropy + func isKnownSupported(_ relay: RelayURL) -> Bool { + guard let entry = cache[relay.absoluteString] else { + return false // Unknown + } + if entry.isExpired { + cache.removeValue(forKey: relay.absoluteString) + return false // Expired + } + return entry.supported + } + + /// Check if a relay is known to NOT support negentropy + func isKnownUnsupported(_ relay: RelayURL) -> Bool { + guard let entry = cache[relay.absoluteString] else { + return false // Unknown + } + if entry.isExpired { + cache.removeValue(forKey: relay.absoluteString) + return false // Expired + } + return !entry.supported + } + + /// Check if a relay's support status is unknown (not in cache) + func isUnknown(_ relay: RelayURL) -> Bool { + guard let entry = cache[relay.absoluteString] else { + return true // Not in cache + } + if entry.isExpired { + cache.removeValue(forKey: relay.absoluteString) + return true // Expired = unknown + } + return false + } + + /// Mark a relay as supporting or not supporting negentropy + func setSupport(_ relay: RelayURL, supported: Bool) { + cache[relay.absoluteString] = CacheEntry(supported: supported, timestamp: Date()) + saveToUserDefaults() + } + + /// Get all relays known to support negentropy + func knownSupportedRelays() -> [String] { + return cache.filter { !$0.value.isExpired && $0.value.supported }.map { $0.key } + } + + private func loadFromUserDefaults() { + guard let data = UserDefaults.standard.data(forKey: Self.cacheKey), + let decoded = try? JSONDecoder().decode([String: CacheEntry].self, from: data) else { + return + } + // Filter out expired entries on load + cache = decoded.filter { !$0.value.isExpired } + } + + private func saveToUserDefaults() { + guard let data = try? JSONEncoder().encode(cache) else { return } + UserDefaults.standard.set(data, forKey: Self.cacheKey) + } +} + +// MARK: - Types + +/// Represents the state of a negentropy sync session +enum NegentropySyncState { + case idle + case syncing + case completed + case failed(String) +} + +/// Result of a negentropy reconciliation +struct NegentropySyncResult { + /// Event IDs that we have but the relay doesn't (could upload if we wanted) + let haveIds: [NoteId] + /// Event IDs that the relay has but we don't (need to fetch) + let needIds: [NoteId] + /// Whether the session timed out (vs completed normally) + let timedOut: Bool + + init(haveIds: [NoteId] = [], needIds: [NoteId] = [], timedOut: Bool = false) { + self.haveIds = haveIds + self.needIds = needIds + self.timedOut = timedOut + } +} + +// MARK: - NegentropySession + +/// Manages a single negentropy sync session with one relay. +/// +/// Each session handles the multi-round reconciliation protocol: +/// 1. Initialize with local events to build our fingerprint +/// 2. Exchange messages with relay until reconciliation completes +/// 3. Signal completion so caller can fetch missing events +actor NegentropySession { + let relay: RelayURL + let filter: NostrFilter + let subId: String + + private var negentropy: NdbNegentropy? + private var storage: NdbNegentropyStorage? + private(set) var state: NegentropySyncState = .idle + + /// Results accumulated across multiple reconciliation rounds + private var accumulatedHaveIds: [NoteId] = [] + private var accumulatedNeedIds: [NoteId] = [] + + /// Continuation for async waiting on session completion + private var completionContinuation: CheckedContinuation? + + /// Tracks when we last received a message (for inactivity timeout) + private(set) var lastActivityTime: Date = Date() + + /// Whether we've received at least one response (confirms relay supports negentropy) + private(set) var hasReceivedResponse: Bool = false + + init(relay: RelayURL, filter: NostrFilter, subId: String? = nil) { + self.relay = relay + self.filter = filter + self.subId = subId ?? "neg-\(UUID().uuidString.prefix(8))" + } + + /// Wait for the session to complete and return the results + func waitForCompletion() async -> NegentropySyncResult { + // If already completed or failed, return immediately + switch state { + case .completed, .failed: + return NegentropySyncResult(haveIds: accumulatedHaveIds, needIds: accumulatedNeedIds) + default: + break + } + + // Wait for completion signal + return await withCheckedContinuation { continuation in + self.completionContinuation = continuation + } + } + + /// Signal that the session has completed + private func signalCompletion() { + let result = NegentropySyncResult(haveIds: accumulatedHaveIds, needIds: accumulatedNeedIds) + completionContinuation?.resume(returning: result) + completionContinuation = nil + } + + /// Initialize the negentropy session using NostrDB + /// - Parameters: + /// - ndb: NostrDB instance to query local events + /// - filter: Filter to query events (should match the sync filter) + /// - Returns: The initial message to send to the relay (hex-encoded), or nil on failure + func initiate(with ndb: Ndb, filter: NostrFilter) throws -> String? { + do { + // Create storage and populate directly from NostrDB + storage = try NdbNegentropyStorage() + + // Convert NostrFilter to NdbFilter for the query + let ndbFilter = try NdbFilter(from: filter) + + // Use a reasonable limit for timeline sync + let limit = Int32(filter.limit ?? 50_000) + + // Populate storage from NostrDB - this queries LMDB directly + // without loading full events into memory + guard let txn = NdbTxn<()>(ndb: ndb) else { + throw NdbNegentropyError.storageFromFilterFailed + } + let count = try storage?.populate(txn: txn, filter: ndbFilter, limit: limit) ?? 0 + + Log.debug("Negentropy: populated storage with %d events from NostrDB", for: .networking, count) + + // Create negentropy reconciliation context with conservative settings + // Relay max message size is ~40KB unsigned + // Use smaller split_count (4 vs default 16) to reduce response sizes + guard let storage = storage else { return nil } + let config = NdbNegentropyConfig( + frameSizeLimit: 32 * 1024, + idlistThreshold: 16, + splitCount: 4 // Fewer splits = smaller responses + ) + negentropy = try NdbNegentropy(storage: storage, config: config) + + // Generate initial message (hex-encoded for NIP-77) + let initMessage = try negentropy?.initiateHex() + + state = .syncing + return initMessage + } catch { + Log.error("Failed to initialize negentropy session: %s", for: .networking, error.localizedDescription) + throw error + } + } + + /// Process a NEG-MSG response from the relay + /// - Parameter messageHex: Hex-encoded negentropy message from relay + /// - Returns: Next message to send (nil if reconciliation complete), and partial results + func processMessage(_ messageHex: String) throws -> (nextMessage: String?, haveIds: [NoteId], needIds: [NoteId]) { + guard let negentropy = negentropy else { + throw NegentropySyncError.sessionNotFound + } + + // Track activity for timeout management + hasReceivedResponse = true + lastActivityTime = Date() + + // Process the message and generate response using native implementation + let nextMsg = try negentropy.reconcileHex(hexMessage: messageHex) + + // Get IDs from the reconciliation - native implementation accumulates them + let haveNoteIds = negentropy.haveIds + let needNoteIds = negentropy.needIds + + // Update accumulated results (in case we need them before completion) + accumulatedHaveIds = haveNoteIds + accumulatedNeedIds = needNoteIds + + // Empty response means reconciliation is complete + if nextMsg.isEmpty || negentropy.isComplete { + state = .completed + signalCompletion() + return (nil, haveNoteIds, needNoteIds) + } else { + return (nextMsg, haveNoteIds, needNoteIds) + } + } + + /// Get the final accumulated results + func getResults() -> NegentropySyncResult { + return NegentropySyncResult(haveIds: accumulatedHaveIds, needIds: accumulatedNeedIds) + } + + /// Mark the session as failed + func fail(reason: String) { + state = .failed(reason) + signalCompletion() + } +} + +/// Error types for negentropy operations +enum NegentropySyncError: Error { + case invalidMessage + case sessionNotFound + case initializationFailed +} + +// MARK: - NegentropyManager + +/// Tracks an active negentropy fetch subscription for completion logging +struct NegentropyFetchTracker { + let relay: RelayURL + let expectedCount: Int + var receivedCount: Int = 0 +} + +/// Manages negentropy sync sessions across multiple relays. +/// +/// This is the main entry point for negentropy sync. It: +/// - Filters relays by NIP-77 support (via NIP-11) +/// - Runs sync sessions in parallel across relays +/// - Batches event fetches to avoid relay message limits +/// - Tracks fetch completion for logging +actor NegentropyManager { + private var sessions: [String: NegentropySession] = [:] + private weak var pool: RelayPool? + private var ndb: Ndb? + + /// Tracks active fetch subscriptions by sub_id for completion logging + private var fetchTrackers: [String: NegentropyFetchTracker] = [:] + + /// Tracks whether a full sync is in progress (to avoid duplicate syncs) + private var isFullSyncInProgress = false + + /// Tracks which relays have individual syncs in progress + private var relaySyncsInProgress: Set = [] + + /// Cache for relay negentropy support (avoids NIP-11 checks on every startup) + private let supportCache = NegentropySupportCache() + + init(pool: RelayPool?, ndb: Ndb?) { + self.pool = pool + self.ndb = ndb + } + + /// Mark a relay as not supporting negentropy (called when we get NEG-ERR or "negentropy disabled") + /// Also cancels any pending sessions for this relay. + func markRelayUnsupported(_ relay: RelayURL) { + supportCache.setSupport(relay, supported: false) + + // Cancel any pending sessions for this relay + for (subId, session) in sessions { + Task { + let sessionRelay = await session.relay + if sessionRelay == relay { + await session.fail(reason: "Relay does not support negentropy") + sessions.removeValue(forKey: subId) + } + } + } + + // Remove from in-progress tracking + relaySyncsInProgress.remove(relay) + } + + /// Mark a relay as supporting negentropy (called when we get a successful NEG-MSG) + func markRelaySupported(_ relay: RelayURL) { + supportCache.setSupport(relay, supported: true) + } + + /// Check if relay support status is unknown + func isRelayUnknown(_ relay: RelayURL) -> Bool { + return supportCache.isUnknown(relay) + } + + // MARK: NIP-11 Background Check + + /// Check NIP-11 for unknown relays and sync those that support NIP-77. + /// This runs in the background and doesn't block startup. + private func checkNIP11AndSyncIfSupported(_ relays: [RelayURL], filter: NostrFilter) async { + await withTaskGroup(of: Void.self) { group in + for relay in relays { + group.addTask { + do { + // Fetch NIP-11 metadata + guard let metadata = try await fetch_relay_metadata(relay_id: relay) else { + Log.debug("Negentropy: no NIP-11 metadata for %s", for: .networking, relay.absoluteString) + return + } + + // Check if relay advertises NIP-77 support + let supportsNIP77 = metadata.supported_nips?.contains(77) ?? false + + if supportsNIP77 { + Log.info("Negentropy: %s advertises NIP-77, starting sync", for: .networking, relay.absoluteString) + // Mark as potentially supported and try to sync + // (will be confirmed as supported when we get NEG-MSG) + await self.syncSingleRelayInternal(relay, filter: filter) + } else { + // Mark as unsupported so we don't check again + await self.markRelayUnsupported(relay) + Log.debug("Negentropy: %s does not advertise NIP-77", for: .networking, relay.absoluteString) + } + } catch { + Log.debug("Negentropy: failed to fetch NIP-11 for %s: %s", + for: .networking, relay.absoluteString, error.localizedDescription) + } + } + } + } + } + + // MARK: Fetch Tracking + + /// Called when an event is received for a negentropy fetch subscription + func trackFetchedEvent(subId: String) { + guard var tracker = fetchTrackers[subId] else { return } + tracker.receivedCount += 1 + fetchTrackers[subId] = tracker + } + + /// Called when EOSE is received for a negentropy fetch subscription + func handleFetchEOSE(subId: String) { + guard let tracker = fetchTrackers.removeValue(forKey: subId) else { return } + + let received = tracker.receivedCount + let expected = tracker.expectedCount + let relay = tracker.relay.absoluteString + let missing = expected - received + + // All events received + guard missing > 0 else { + Log.info("Negentropy fetch complete: received %d/%d events from %s", + for: .networking, received, expected, relay) + return + } + + // Some events missing (relay may not have them, or they were deleted) + Log.info("Negentropy fetch complete: received %d/%d events from %s (missing %d)", + for: .networking, received, expected, relay, missing) + } + + /// Called when relay sends CLOSED for a negentropy fetch subscription (e.g., rate limiting) + func handleFetchClosed(subId: String, message: String) { + guard let tracker = fetchTrackers.removeValue(forKey: subId) else { return } + + let received = tracker.receivedCount + let expected = tracker.expectedCount + let relay = tracker.relay.absoluteString + + // Rate-limited by relay + guard !message.hasPrefix("rate-limited:") else { + Log.info("Negentropy fetch rate-limited: received %d/%d events from %s before limit (%s)", + for: .networking, received, expected, relay, message) + return + } + + // Other closure reason + Log.info("Negentropy fetch closed by relay: received %d/%d events from %s (%s)", + for: .networking, received, expected, relay, message) + } + + /// Check if a subscription ID is a negentropy fetch + func isNegentropyFetch(subId: String) -> Bool { + return fetchTrackers[subId] != nil + } + + // MARK: Sync Entry Point + + /// Start a negentropy sync for a filter across specified relays. + /// + /// This is the main entry point. It: + /// 1. Waits for relay connections (with grace period for slow relays) + /// 2. Tries all relays optimistically (skips those cached as unsupported) + /// 3. Runs sync sessions in parallel + /// 4. Caches relay support based on responses (NEG-MSG = supported, NEG-ERR = unsupported) + /// + /// - Parameters: + /// - filter: The filter to sync (e.g., timeline events) + /// - relays: Specific relays to sync with (nil = all connected relays) + /// - Returns: Dictionary of relay URL to sync results + func sync(filter: NostrFilter, to relays: [RelayURL]? = nil) async throws -> [RelayURL: NegentropySyncResult] { + // Skip if a full sync is already in progress + guard !isFullSyncInProgress else { + Log.info("Negentropy sync: skipping, full sync already in progress", for: .networking) + return [:] + } + + isFullSyncInProgress = true + defer { isFullSyncInProgress = false } + + guard let pool = pool else { + throw NegentropySyncError.initializationFailed + } + + // Wait for relays to connect (up to 5 seconds, with grace period for stragglers) + var targetRelays: [RelayURL] = [] + var disconnectedRelays: [RelayURL] = [] + var firstConnectionAttempt: Int? = nil + + for attempt in 0..<10 { + let allRelays = await pool.getRelays(targetRelays: relays) + targetRelays = allRelays + .filter { $0.connection.isConnected } + .map { $0.descriptor.url } + disconnectedRelays = allRelays + .filter { !$0.connection.isConnected } + .map { $0.descriptor.url } + + if !targetRelays.isEmpty { + // First time we have connections - record it + if firstConnectionAttempt == nil { + firstConnectionAttempt = attempt + } + + // Wait 2 more attempts (1 second) after first connection for other relays + // This gives slower relays time to connect + if attempt >= (firstConnectionAttempt! + 2) || disconnectedRelays.isEmpty { + break + } + } + + // Wait 500ms before checking again + try? await Task.sleep(nanoseconds: 500_000_000) + if targetRelays.isEmpty { + Log.debug("Negentropy sync: waiting for relay connections (attempt %d)", for: .networking, attempt + 1) + } + } + + // Log disconnected relays so user knows why some relays aren't being synced + if !disconnectedRelays.isEmpty { + let disconnectedNames = disconnectedRelays.map { $0.absoluteString }.joined(separator: ", ") + Log.info("Negentropy sync: %d relays not connected: %s", for: .networking, disconnectedRelays.count, disconnectedNames) + } + + if targetRelays.isEmpty { + Log.info("Negentropy sync: no connected relays available", for: .networking) + return [:] + } + + // Only sync relays we KNOW support negentropy (from cache). + // Unknown relays will be checked via NIP-11 in the background. + // Skip relays that are already syncing (from connect handler). + var syncRelays: [RelayURL] = [] + var skippedRelays: [RelayURL] = [] + var unknownRelays: [RelayURL] = [] + var alreadySyncing: [RelayURL] = [] + + for relay in targetRelays { + if relaySyncsInProgress.contains(relay) { + alreadySyncing.append(relay) + } else if supportCache.isKnownSupported(relay) { + syncRelays.append(relay) + } else if supportCache.isKnownUnsupported(relay) { + skippedRelays.append(relay) + } else { + unknownRelays.append(relay) + } + } + + if !alreadySyncing.isEmpty { + let names = alreadySyncing.map { $0.absoluteString }.joined(separator: ", ") + Log.debug("Negentropy sync: %d relays already syncing: %s", for: .networking, alreadySyncing.count, names) + } + + if !skippedRelays.isEmpty { + let names = skippedRelays.map { $0.absoluteString }.joined(separator: ", ") + Log.info("Negentropy sync: skipping %d relays (cached as unsupported): %s", for: .networking, skippedRelays.count, names) + } + + // Check NIP-11 for unknown relays in the background (doesn't block startup) + if !unknownRelays.isEmpty { + let names = unknownRelays.map { $0.absoluteString }.joined(separator: ", ") + Log.info("Negentropy sync: checking NIP-11 for %d unknown relays in background: %s", for: .networking, unknownRelays.count, names) + Task { + await self.checkNIP11AndSyncIfSupported(unknownRelays, filter: filter) + } + } + + if syncRelays.isEmpty { + Log.info("Negentropy sync: no known-supported relays to sync immediately", for: .networking) + return [:] + } + + // Short settling delay to let connections fully establish + // WebSocket "connected" doesn't mean the relay is ready to process messages. + // The NIP-11 check (for unknown relays) provides this delay naturally. + try? await Task.sleep(nanoseconds: 1_500_000_000) // 1.5 seconds + + let relayNames = syncRelays.map { $0.absoluteString }.joined(separator: ", ") + Log.info("Negentropy sync: starting sync with %d relays: %s", for: .networking, syncRelays.count, relayNames) + + // Track all relays we're about to sync to prevent duplicate reconnect syncs + for relay in syncRelays { + relaySyncsInProgress.insert(relay) + } + + // Sync with all relays in parallel for speed + let results = await withTaskGroup(of: (RelayURL, NegentropySyncResult?).self) { group in + for relay in syncRelays { + group.addTask { + do { + let result = try await self.syncWithRelay(relay, filter: filter) + return (relay, result) + } catch { + Log.error("Negentropy sync failed for %s: %s", for: .networking, relay.absoluteString, error.localizedDescription) + return (relay, nil) + } + } + } + + var results: [RelayURL: NegentropySyncResult] = [:] + for await (relay, result) in group { + if let result = result { + results[relay] = result + } + } + return results + } + + // Clear tracking for completed syncs + for relay in syncRelays { + relaySyncsInProgress.remove(relay) + } + + return results + } + + /// Sync a single relay on connect/reconnect. + /// + /// Called when a relay connects to sync any events we may have missed. + /// - Known supported: syncs immediately + /// - Known unsupported: skips + /// - Unknown: checks NIP-11 first, then syncs if supported + func syncSingleRelay(_ relay: RelayURL) async { + // Skip if this relay is already being synced + guard !relaySyncsInProgress.contains(relay) else { + Log.debug("Negentropy reconnect sync: skipping %s, already syncing", for: .networking, relay.absoluteString) + return + } + + // Skip if we know this relay doesn't support negentropy + guard !supportCache.isKnownUnsupported(relay) else { + return + } + + // Build filter for timeline events + var filter = NostrFilter(kinds: [.text, .longform, .highlight]) + filter.limit = 50000 + + // If relay is known supported, sync after short settling delay + if supportCache.isKnownSupported(relay) { + Log.info("Negentropy sync: relay %s reconnected, waiting for connection to settle", for: .networking, relay.absoluteString) + // Short delay to let reconnection fully establish + try? await Task.sleep(nanoseconds: 1_000_000_000) // 1 second + await syncSingleRelayInternal(relay, filter: filter) + return + } + + // Unknown relay - check NIP-11 first (in background to not block) + Log.debug("Negentropy sync: checking NIP-11 for unknown relay %s", for: .networking, relay.absoluteString) + Task { + do { + guard let metadata = try await fetch_relay_metadata(relay_id: relay) else { + Log.debug("Negentropy: no NIP-11 metadata for %s", for: .networking, relay.absoluteString) + return + } + + let supportsNIP77 = metadata.supported_nips?.contains(77) ?? false + + if supportsNIP77 { + Log.info("Negentropy: %s advertises NIP-77, starting sync", for: .networking, relay.absoluteString) + await self.syncSingleRelayInternal(relay, filter: filter) + } else { + await self.markRelayUnsupported(relay) + Log.debug("Negentropy: %s does not advertise NIP-77", for: .networking, relay.absoluteString) + } + } catch { + Log.debug("Negentropy: failed to fetch NIP-11 for %s: %s", + for: .networking, relay.absoluteString, error.localizedDescription) + } + } + } + + /// Internal method to sync a single relay (assumes checks already done). + private func syncSingleRelayInternal(_ relay: RelayURL, filter: NostrFilter) async { + // Skip if this relay is already being synced + guard !relaySyncsInProgress.contains(relay) else { + Log.debug("Negentropy sync: skipping %s, already syncing", for: .networking, relay.absoluteString) + return + } + + relaySyncsInProgress.insert(relay) + defer { relaySyncsInProgress.remove(relay) } + + do { + let result = try await syncWithRelay(relay, filter: filter) + + // If timed out, the error was already logged in syncWithRelay + guard !result.timedOut else { + return + } + + // Log result - fetching is already handled by handleNegentropyMessage + if result.needIds.isEmpty { + Log.info("Negentropy sync: %s - already up to date", for: .networking, relay.absoluteString) + } + } catch { + Log.error("Negentropy sync failed for %s: %s", + for: .networking, relay.absoluteString, error.localizedDescription) + } + } + + // MARK: Single Relay Sync (Private) + + /// Sync with a single relay. + /// + /// Creates a session, sends NEG-OPEN, waits for completion (with timeout), + /// then fetches missing events. + private func syncWithRelay(_ relay: RelayURL, filter: NostrFilter) async throws -> NegentropySyncResult { + guard let ndb = ndb else { + throw NegentropySyncError.initializationFailed + } + + let session = NegentropySession(relay: relay, filter: filter) + let subId = await session.subId + sessions[subId] = session + + defer { + sessions.removeValue(forKey: subId) + } + + // Initialize with NostrDB - this populates storage directly from LMDB + // without loading full events into memory + guard let initialMessage = try await session.initiate(with: ndb, filter: filter) else { + throw NegentropySyncError.initializationFailed + } + + // Send NEG-OPEN + let negOpen = NegentropyOpen(sub_id: subId, filter: filter, initial_message: initialMessage) + Log.info("Negentropy: sending NEG-OPEN to %s (msg size: %d bytes)", + for: .networking, relay.absoluteString, initialMessage.count / 2) // hex = 2 chars per byte + await pool?.send(.negOpen(negOpen), to: [relay]) + + // Two-phase timeout: + // - First response: 10s (fail fast if relay doesn't support NIP-77) + // - After first response: 30s inactivity timeout (give time for multi-round reconciliation) + return await withTaskGroup(of: NegentropySyncResult.self) { group in + // Task 1: Wait for actual completion + group.addTask { + await session.waitForCompletion() + } + + // Task 2: Inactivity-based timeout + group.addTask { + let startTime = Date() + let initialTimeout: TimeInterval = 10 // 10s for first response + let inactivityTimeout: TimeInterval = 30 // 30s between messages + + while true { + do { + // Check every 2 seconds + try await Task.sleep(nanoseconds: 2_000_000_000) + + let hasResponse = await session.hasReceivedResponse + let lastActivity = await session.lastActivityTime + + if !hasResponse { + // Still waiting for first response + if Date().timeIntervalSince(startTime) > initialTimeout { + Log.info("Negentropy: timeout waiting for %s (may not support NIP-77)", for: .networking, relay.absoluteString) + await session.fail(reason: "Timeout waiting for first response") + let result = await session.getResults() + return NegentropySyncResult(haveIds: result.haveIds, needIds: result.needIds, timedOut: true) + } + } else { + // Have received response, check for inactivity + if Date().timeIntervalSince(lastActivity) > inactivityTimeout { + Log.info("Negentropy: inactivity timeout for %s", for: .networking, relay.absoluteString) + await session.fail(reason: "Inactivity timeout") + let result = await session.getResults() + return NegentropySyncResult(haveIds: result.haveIds, needIds: result.needIds, timedOut: true) + } + } + } catch { + // Task was cancelled (session completed first) - return dummy result + return NegentropySyncResult() + } + } + } + + // Return the first result (either completion or timeout) + let result = await group.next() ?? NegentropySyncResult(timedOut: true) + group.cancelAll() + return result + } + } + + // MARK: Message Handlers + + /// Handle a NEG-MSG response from a relay. + /// + /// Continues the reconciliation by processing the message and either: + /// - Sending another NEG-MSG if more rounds needed + /// - Fetching missing events and closing if complete + func handleNegentropyMessage(_ response: NegentropyResponse, from relay: RelayURL) async { + guard let session = sessions[response.sub_id] else { + Log.error("Received NEG-MSG for unknown session: %s", for: .networking, response.sub_id) + return + } + + // First successful NEG-MSG confirms this relay supports negentropy + markRelaySupported(relay) + + Log.debug("Negentropy: received NEG-MSG from %s", for: .networking, relay.absoluteString) + + do { + let (nextMessage, _, _) = try await session.processMessage(response.message) + + if let nextMessage = nextMessage { + // Continue reconciliation + Log.debug("Negentropy: continuing reconciliation with %s", for: .networking, relay.absoluteString) + let negMsg = NegentropyMessage(sub_id: response.sub_id, message: nextMessage) + await pool?.send(.negMsg(negMsg), to: [relay]) + } else { + // Reconciliation complete, fetch missing events + let results = await session.getResults() + Log.debug("Negentropy: reconciliation complete for %s, need %d events", for: .networking, relay.absoluteString, results.needIds.count) + await fetchMissingEvents(results.needIds, from: relay) + + // Close the negentropy session + let negClose = NegentropyClose(sub_id: response.sub_id) + await pool?.send(.negClose(negClose), to: [relay]) + } + } catch { + Log.error("Failed to process NEG-MSG: %s", for: .networking, error.localizedDescription) + await session.fail(reason: error.localizedDescription) + } + } + + /// Handle a NEG-ERR response from a relay. + /// + /// Marks the relay as not supporting negentropy in the cache so we skip it in future syncs. + func handleNegentropyError(_ error: NegentropyError) async { + guard let session = sessions[error.sub_id] else { + return + } + + // Cache this relay as unsupported so we don't try again + let relay = await session.relay + markRelayUnsupported(relay) + Log.info("Negentropy: marking %s as unsupported (NEG-ERR: %s)", + for: .networking, relay.absoluteString, error.reason) + + await session.fail(reason: error.reason) + sessions.removeValue(forKey: error.sub_id) + } + + // MARK: Helpers + + /// Maximum IDs per fetch request to stay under relay message size limits. + /// At ~68 bytes per hex ID, 500 IDs ≈ 34KB, safely under the ~40KB typical limit. + private static let maxIdsPerFetch = 500 + + /// Fetch missing events from a relay in batches. + /// + /// Large ID lists are split into batches to avoid relay message size limits. + /// Each batch gets its own subscription for tracking completion. + private func fetchMissingEvents(_ eventIds: [NoteId], from relay: RelayURL) async { + guard !eventIds.isEmpty else { + Log.info("Negentropy: %s - already up to date", for: .networking, relay.absoluteString) + return + } + + // Split into batches to stay under relay message size limits + let batches = eventIds.chunked(into: Self.maxIdsPerFetch) + + Log.info("Negentropy: requesting %d events in %d batches from %s", + for: .networking, eventIds.count, batches.count, relay.absoluteString) + + for (index, batch) in batches.enumerated() { + var filter = NostrFilter() + filter.ids = batch + + let subId = "neg-fetch-\(UUID().uuidString.prefix(8))" + + // Track this fetch so we can log when it completes + fetchTrackers[subId] = NegentropyFetchTracker(relay: relay, expectedCount: batch.count) + + let sub = NostrSubscribe(filters: [filter], sub_id: subId) + await pool?.send(.subscribe(sub), to: [relay]) + + // Small delay between batches to avoid overwhelming the relay + if index < batches.count - 1 { + try? await Task.sleep(nanoseconds: 100_000_000) // 100ms + } + } + } +} + +// Note: hex_encode and hex_decode are defined in ProofOfWork.swift + +#endif // !EXTENSION_TARGET diff --git a/damus/Core/Nostr/NostrEvent.swift b/damus/Core/Nostr/NostrEvent.swift index ba68744ffa..09887830ff 100644 --- a/damus/Core/Nostr/NostrEvent.swift +++ b/damus/Core/Nostr/NostrEvent.swift @@ -352,6 +352,12 @@ func verify_nostr_response(response: borrowing NostrResponse) -> Bool { return true case .auth(_): return true + case .negMsg(_): + return true // NIP-77 negentropy messages don't need verification + case .negErr(_): + return true // NIP-77 negentropy errors don't need verification + case .closed(_): + return true // CLOSED messages don't need verification } } diff --git a/damus/Core/Nostr/NostrRequest.swift b/damus/Core/Nostr/NostrRequest.swift index d5554ad85c..97d89ae369 100644 --- a/damus/Core/Nostr/NostrRequest.swift +++ b/damus/Core/Nostr/NostrRequest.swift @@ -12,6 +12,26 @@ struct NostrSubscribe { let sub_id: String } +// MARK: - NIP-77 Negentropy Types + +/// NIP-77 NEG-OPEN request to initiate negentropy reconciliation +struct NegentropyOpen { + let sub_id: String + let filter: NostrFilter + let initial_message: String // hex-encoded negentropy message +} + +/// NIP-77 NEG-MSG request to continue negentropy reconciliation +struct NegentropyMessage { + let sub_id: String + let message: String // hex-encoded negentropy message +} + +/// NIP-77 NEG-CLOSE request to terminate negentropy reconciliation +struct NegentropyClose { + let sub_id: String +} + /// Models a request/message that is sent to a Nostr relay enum NostrRequestType { /// A standard nostr request @@ -48,6 +68,12 @@ enum NostrRequest { case event(NostrEvent) /// Authenticate with the relay case auth(NostrEvent) + /// NIP-77: Initiate negentropy reconciliation + case negOpen(NegentropyOpen) + /// NIP-77: Continue negentropy reconciliation + case negMsg(NegentropyMessage) + /// NIP-77: Close negentropy reconciliation + case negClose(NegentropyClose) /// Whether this request is meant to write data to a relay var is_write: Bool { @@ -60,12 +86,14 @@ enum NostrRequest { return true case .auth: return false + case .negOpen, .negMsg, .negClose: + return false } } - + /// Whether this request is meant to read data from a relay var is_read: Bool { return !is_write } - + } diff --git a/damus/Core/Nostr/NostrResponse.swift b/damus/Core/Nostr/NostrResponse.swift index ec5b3342a5..6cd9d4bb41 100644 --- a/damus/Core/Nostr/NostrResponse.swift +++ b/damus/Core/Nostr/NostrResponse.swift @@ -18,6 +18,34 @@ enum MaybeResponse { case ok(NostrResponse) } +/// NIP-77 NEG-MSG response from relay +struct NegentropyResponse { + let sub_id: String + let message: String // hex-encoded negentropy message +} + +/// NIP-77 NEG-ERR response from relay +struct NegentropyError { + let sub_id: String + let reason: String +} + +/// NIP-01 CLOSED response - relay closed a subscription +struct SubscriptionClosed { + let sub_id: String + let message: String + + /// Check if the closure was due to rate limiting + var isRateLimited: Bool { + message.hasPrefix("rate-limited:") + } + + /// Check if the closure was due to an error + var isError: Bool { + message.hasPrefix("error:") + } +} + enum NostrResponse { case event(String, NostrEvent) case notice(String) @@ -27,6 +55,12 @@ enum NostrResponse { /// /// The associated type of this case is the challenge string sent by the server. case auth(String) + /// NIP-77 negentropy message response + case negMsg(NegentropyResponse) + /// NIP-77 negentropy error response + case negErr(NegentropyError) + /// NIP-01 CLOSED - relay closed a subscription (e.g., rate limiting, error) + case closed(SubscriptionClosed) var subid: String? { switch self { @@ -40,10 +74,66 @@ enum NostrResponse { return nil case .auth(let challenge_string): return challenge_string + case .negMsg(let response): + return response.sub_id + case .negErr(let error): + return error.sub_id + case .closed(let closed): + return closed.sub_id + } + } + + /// Try to parse messages that nostrdb doesn't support (NIP-77 negentropy, CLOSED, NOTICE) + static func parse_extended(json: String) -> NostrResponse? { + // Quick check for messages we handle here + guard json.hasPrefix("[\"NEG-") || json.hasPrefix("[\"CLOSED\"") || json.hasPrefix("[\"NOTICE\"") else { return nil } + + guard let data = json.data(using: .utf8), + let array = try? JSONSerialization.jsonObject(with: data) as? [Any], + array.count >= 2, + let msgType = array[0] as? String else { + return nil + } + + switch msgType { + case "NOTICE": + // NIP-01: ["NOTICE", "message"] + guard let message = array[1] as? String else { return nil } + return .notice(message) + + case "NEG-MSG": + guard let subId = array[1] as? String, + array.count >= 3, + let message = array[2] as? String else { + return nil + } + return .negMsg(NegentropyResponse(sub_id: subId, message: message)) + + case "NEG-ERR": + guard let subId = array[1] as? String, + array.count >= 3, + let reason = array[2] as? String else { + return nil + } + return .negErr(NegentropyError(sub_id: subId, reason: reason)) + + case "CLOSED": + // NIP-01: ["CLOSED", , ] + guard let subId = array[1] as? String else { return nil } + let message = array.count >= 3 ? (array[2] as? String ?? "") : "" + return .closed(SubscriptionClosed(sub_id: subId, message: message)) + + default: + return nil } } static func owned_from_json(json: String) -> NostrResponse? { + // Try extended messages first (nostrdb doesn't support them) + if let extResponse = parse_extended(json: json) { + return extResponse + } + return json.withCString{ cstr in let bufsize: Int = max(Int(Double(json.utf8.count) * 8.0), Int(getpagesize())) let data = malloc(bufsize) diff --git a/damus/Core/Nostr/RelayConnection.swift b/damus/Core/Nostr/RelayConnection.swift index 1581b018af..387e0abecc 100644 --- a/damus/Core/Nostr/RelayConnection.swift +++ b/damus/Core/Nostr/RelayConnection.swift @@ -18,14 +18,15 @@ enum NostrConnectionEvent { /// /// Implementation note: Messaging events should use `.nostr_event` in `NostrConnectionEvent` enum WSConnectionEvent { - case connected + /// Relay connected. `isReconnect` is true if this relay was previously connected this session. + case connected(isReconnect: Bool) case disconnected(URLSessionWebSocketTask.CloseCode, String?) case error(Error) - - static func from(full_ws_event: WebSocketEvent) -> Self? { + + static func from(full_ws_event: WebSocketEvent, isReconnect: Bool = false) -> Self? { switch full_ws_event { case .connected: - return .connected + return .connected(isReconnect: isReconnect) case .message(_): return nil case .disconnected(let closeCode, let string): @@ -50,7 +51,11 @@ final class RelayConnection: ObservableObject { @Published private(set) var isConnected = false @Published private(set) var isConnecting = false private var isDisabled = false - + + /// Tracks whether this relay has ever been connected during this app session. + /// Used to distinguish reconnects from initial connects (for triggering negentropy sync). + private(set) var hasConnectedAtLeastOnce = false + private(set) var last_connection_attempt: TimeInterval = 0 private(set) var last_pong: Date? = nil private(set) var backoff: TimeInterval = 1.0 @@ -150,6 +155,13 @@ final class RelayConnection: ObservableObject { private func receive(event: WebSocketEvent) async { assert(!Thread.isMainThread, "This code must not be executed on the main thread") processEvent(event) + + // Track whether this is a reconnect (for negentropy sync on reconnect) + let isReconnect = hasConnectedAtLeastOnce + if case .connected = event { + hasConnectedAtLeastOnce = true + } + switch event { case .connected: DispatchQueue.main.async { @@ -185,7 +197,7 @@ final class RelayConnection: ObservableObject { self.reconnect_with_backoff() } } - guard let ws_connection_event = NostrConnectionEvent.WSConnectionEvent.from(full_ws_event: event) else { return } + guard let ws_connection_event = NostrConnectionEvent.WSConnectionEvent.from(full_ws_event: event, isReconnect: isReconnect) else { return } await self.handleEvent(.ws_connection_event(ws_connection_event)) if let description = event.description { @@ -229,7 +241,7 @@ final class RelayConnection: ObservableObject { await self.handleEvent(.nostr_event(ev)) return } - print("failed to decode event \(messageString)") + Log.info("failed to decode event from %s: %s", for: .networking, relay_url.absoluteString, messageString) case .data(let messageData): if let messageString = String(data: messageData, encoding: .utf8) { await receive(message: .string(messageString)) @@ -250,6 +262,12 @@ func make_nostr_req(_ req: NostrRequest) -> String? { return make_nostr_push_event(ev: ev) case .auth(let ev): return make_nostr_auth_event(ev: ev) + case .negOpen(let neg): + return make_negentropy_open(neg) + case .negMsg(let neg): + return make_negentropy_msg(neg) + case .negClose(let neg): + return make_negentropy_close(neg) } } @@ -289,3 +307,27 @@ func make_nostr_subscription_req(_ filters: [NostrFilter], sub_id: String) -> St req += "]" return req } + +// MARK: - NIP-77 Negentropy Message Serialization + +/// Creates a NEG-OPEN request: ["NEG-OPEN", , , ] +func make_negentropy_open(_ neg: NegentropyOpen) -> String? { + let encoder = JSONEncoder() + guard let filter_json = try? encoder.encode(neg.filter) else { + return nil + } + let filter_str = String(decoding: filter_json, as: UTF8.self) + let msg = "[\"NEG-OPEN\",\"\(neg.sub_id)\",\(filter_str),\"\(neg.initial_message)\"]" + Log.info("NEG-OPEN message: filter=%s", for: .networking, filter_str) + return msg +} + +/// Creates a NEG-MSG request: ["NEG-MSG", , ] +func make_negentropy_msg(_ neg: NegentropyMessage) -> String? { + return "[\"NEG-MSG\",\"\(neg.sub_id)\",\"\(neg.message)\"]" +} + +/// Creates a NEG-CLOSE request: ["NEG-CLOSE", ] +func make_negentropy_close(_ neg: NegentropyClose) -> String? { + return "[\"NEG-CLOSE\",\"\(neg.sub_id)\"]" +} diff --git a/damus/Core/Nostr/RelayPool.swift b/damus/Core/Nostr/RelayPool.swift index 88b3e6c5c9..8e214eb834 100644 --- a/damus/Core/Nostr/RelayPool.swift +++ b/damus/Core/Nostr/RelayPool.swift @@ -44,6 +44,11 @@ actor RelayPool { var delegate: Delegate? private(set) var signal: SignalModel = SignalModel() + #if !EXTENSION_TARGET + /// Manager for NIP-77 negentropy sync sessions + private(set) var negentropyManager: NegentropyManager? + #endif + let network_monitor = NWPathMonitor() private let network_monitor_queue = DispatchQueue(label: "io.damus.network_monitor") private var last_network_status: NWPath.Status = .unsatisfied @@ -80,7 +85,22 @@ actor RelayPool { Task { await self?.pathUpdateHandler(path: path) } } network_monitor.start(queue: network_monitor_queue) + + #if !EXTENSION_TARGET + // Initialize negentropy manager (needs to be done after self is available) + Task { [weak self] in + guard let self = self else { return } + await self.initializeNegentropyManager() + } + #endif } + + #if !EXTENSION_TARGET + /// Initialize the negentropy manager (must be called from async context) + private func initializeNegentropyManager() { + self.negentropyManager = NegentropyManager(pool: self, ndb: self.ndb) + } + #endif private func pathUpdateHandler(path: NWPath) async { if (path.status == .satisfied || path.status == .requiresConnection) && self.last_network_status != path.status { @@ -262,6 +282,8 @@ actor RelayPool { return true case .auth(_): return true + case .negOpen(_), .negMsg(_), .negClose(_): + return false // Do not persist negentropy requests between sessions } } } @@ -332,6 +354,8 @@ actor RelayPool { } case .ok(_): break // No need to handle this, we are not sending an event to the relay case .auth(_): break // Handled in a separate function in RelayPool + case .negMsg(_), .negErr(_): break // Handled by NegentropyManager + case .closed(_): break // Handled by NegentropyManager for neg-fetch subscriptions } } } @@ -498,13 +522,22 @@ actor RelayPool { func handle_event(relay_id: RelayURL, event: NostrConnectionEvent) async { record_seen(relay_id: relay_id, event: event) - // When we reconnect, do two things + // When we connect/reconnect, do these things: // - Send messages that were stored in the queue // - Re-subscribe to filters we had subscribed before + // - On reconnect: run negentropy sync for this relay if case .ws_connection_event(let ws) = event { - if case .connected = ws { + if case .connected(let isReconnect) = ws { run_queue(relay_id) await self.resubscribeAll(relayId: relay_id) + + // Sync with this relay via negentropy on RECONNECT only + // Initial connects are handled by background NIP-11 check which provides settling time + #if !EXTENSION_TARGET + if isReconnect { + await negentropyManager?.syncSingleRelay(relay_id) + } + #endif } } @@ -535,6 +568,41 @@ actor RelayPool { } } + #if !EXTENSION_TARGET + // Handle NIP-77 negentropy responses + if case let .nostr_event(nostrResponse) = event { + switch nostrResponse { + case .negMsg(let response): + await negentropyManager?.handleNegentropyMessage(response, from: relay_id) + case .negErr(let error): + await negentropyManager?.handleNegentropyError(error) + case .notice(let message): + // Handle "negentropy disabled" NOTICE - mark relay as unsupported + if message.lowercased().contains("negentropy disabled") { + Log.info("Negentropy: %s has negentropy disabled (NOTICE)", for: .networking, relay_id.absoluteString) + await negentropyManager?.markRelayUnsupported(relay_id) + } + case .event(let subId, _): + // Track events received for negentropy fetch subscriptions + if subId.hasPrefix("neg-fetch-") { + await negentropyManager?.trackFetchedEvent(subId: subId) + } + case .eose(let subId): + // Log completion when negentropy fetch finishes + if subId.hasPrefix("neg-fetch-") { + await negentropyManager?.handleFetchEOSE(subId: subId) + } + case .closed(let closed): + // Handle CLOSED for negentropy fetch subscriptions + if closed.sub_id.hasPrefix("neg-fetch-") { + await negentropyManager?.handleFetchClosed(subId: closed.sub_id, message: closed.message) + } + default: + break + } + } + #endif + for handler in handlers { // We send data to the handlers if: // - the subscription ID matches, or @@ -550,6 +618,30 @@ func add_rw_relay(_ pool: RelayPool, _ url: RelayURL) async { try? await pool.add_relay(RelayPool.RelayDescriptor(url: url, info: .readWrite)) } +#if !EXTENSION_TARGET +// MARK: - NIP-77 Negentropy Sync + +extension RelayPool { + /// Sync timeline events using NIP-77 negentropy protocol + /// This efficiently syncs only missing events rather than re-fetching everything + /// - Parameters: + /// - filter: The filter for events to sync (typically timeline filter) + /// - relays: Specific relays to sync with (nil = all connected relays) + /// - Returns: Dictionary of relay URL to sync results + func syncWithNegentropy(filter: NostrFilter, to relays: [RelayURL]? = nil) async throws -> [RelayURL: NegentropySyncResult] { + guard let manager = negentropyManager else { + throw NegentropySyncError.initializationFailed + } + return try await manager.sync(filter: filter, to: relays) + } + + /// Check if negentropy sync is available (manager is initialized) + var isNegentropyAvailable: Bool { + return negentropyManager != nil + } +} +#endif + extension RelayPool { protocol Delegate { diff --git a/damus/Features/Onboarding/Views/SaveKeysView.swift b/damus/Features/Onboarding/Views/SaveKeysView.swift index 468b4c168b..a272be4e10 100644 --- a/damus/Features/Onboarding/Views/SaveKeysView.swift +++ b/damus/Features/Onboarding/Views/SaveKeysView.swift @@ -173,7 +173,7 @@ struct SaveKeysView: View { switch ev { case .ws_connection_event(let wsev): switch wsev { - case .connected: + case .connected(_): let metadata = create_account_to_metadata(account) if let keypair = account.keypair.to_full(), @@ -217,6 +217,10 @@ struct SaveKeysView: View { break case .auth: break + case .negMsg, .negErr: + break // NIP-77 negentropy messages not relevant during signup + case .closed: + break // CLOSED messages not relevant during signup } } } diff --git a/damusTests/NdbNegentropyTests.swift b/damusTests/NdbNegentropyTests.swift new file mode 100644 index 0000000000..d86c1510b0 --- /dev/null +++ b/damusTests/NdbNegentropyTests.swift @@ -0,0 +1,232 @@ +// +// NdbNegentropyTests.swift +// damusTests +// +// Tests for native nostrdb negentropy integration. +// These tests verify the Swift bindings for ndb_negentropy work correctly. +// + +import XCTest +@testable import damus + +final class NdbNegentropyTests: XCTestCase { + + // MARK: - Storage Tests + + func testStorageInitialization() throws { + // Storage should initialize successfully + let storage = try NdbNegentropyStorage() + XCTAssertNotNil(storage) + XCTAssertEqual(storage.count, 0) + XCTAssertFalse(storage.isSealed) + } + + func testStorageAddItems() throws { + let storage = try NdbNegentropyStorage() + + // Create test event IDs (32 bytes each) + let id1 = Data(repeating: 0x01, count: 32) + let id2 = Data(repeating: 0x02, count: 32) + let id3 = Data(repeating: 0x03, count: 32) + + // Add items with different timestamps + try storage.add(timestamp: 1000, id: id1) + try storage.add(timestamp: 2000, id: id2) + try storage.add(timestamp: 3000, id: id3) + + XCTAssertEqual(storage.count, 3) + XCTAssertFalse(storage.isSealed) + } + + func testStorageSeal() throws { + let storage = try NdbNegentropyStorage() + + let id1 = Data(repeating: 0xAA, count: 32) + try storage.add(timestamp: 1000, id: id1) + + // Seal should succeed + try storage.seal() + XCTAssertTrue(storage.isSealed) + + // Adding after seal should fail + let id2 = Data(repeating: 0xBB, count: 32) + XCTAssertThrowsError(try storage.add(timestamp: 2000, id: id2)) { error in + XCTAssertEqual(error as? NdbNegentropyError, .storageAlreadySealed) + } + } + + func testStorageWithNoteId() throws { + let storage = try NdbNegentropyStorage() + + // Use a real hex ID + let hexId = "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" + guard let noteId = NoteId(hex: hexId) else { + XCTFail("Failed to create NoteId from hex") + return + } + + try storage.add(timestamp: 1000, noteId: noteId) + XCTAssertEqual(storage.count, 1) + } + + // MARK: - Reconciliation Tests + + func testNegentropyInitialization() throws { + let storage = try NdbNegentropyStorage() + + // Add some items + let id1 = Data(repeating: 0x01, count: 32) + let id2 = Data(repeating: 0x02, count: 32) + try storage.add(timestamp: 1000, id: id1) + try storage.add(timestamp: 2000, id: id2) + try storage.seal() + + // Negentropy should initialize with sealed storage + let negentropy = try NdbNegentropy(storage: storage) + XCTAssertNotNil(negentropy) + XCTAssertFalse(negentropy.isComplete) + } + + func testNegentropyInitialMessage() throws { + let storage = try NdbNegentropyStorage() + + let id1 = Data(repeating: 0x01, count: 32) + try storage.add(timestamp: 1000, id: id1) + try storage.seal() + + let negentropy = try NdbNegentropy(storage: storage) + + // Initial message should be valid hex + let initialHex = try negentropy.initiateHex() + XCTAssertFalse(initialHex.isEmpty) + + // Should start with protocol version 0x61 (hex "61") + XCTAssertTrue(initialHex.hasPrefix("61")) + + // Should be valid hex (even length, only hex characters) + XCTAssertEqual(initialHex.count % 2, 0) + XCTAssertTrue(initialHex.allSatisfy { $0.isHexDigit }) + } + + func testNegentropyEmptyStorage() throws { + let storage = try NdbNegentropyStorage() + try storage.seal() + + let negentropy = try NdbNegentropy(storage: storage) + + // Should still be able to initiate with empty storage + let initialHex = try negentropy.initiateHex() + XCTAssertFalse(initialHex.isEmpty) + XCTAssertTrue(initialHex.hasPrefix("61")) + } + + func testNegentropyHaveAndNeedIds() throws { + let storage = try NdbNegentropyStorage() + try storage.seal() + + let negentropy = try NdbNegentropy(storage: storage) + _ = try negentropy.initiateHex() + + // Initially no have/need IDs + XCTAssertTrue(negentropy.haveIds.isEmpty) + XCTAssertTrue(negentropy.needIds.isEmpty) + } + + func testNegentropyConfig() throws { + let storage = try NdbNegentropyStorage() + try storage.seal() + + // Test with custom config + let config = NdbNegentropyConfig( + frameSizeLimit: 4096, + idlistThreshold: 8, + splitCount: 8 + ) + + let negentropy = try NdbNegentropy(storage: storage, config: config) + XCTAssertNotNil(negentropy) + + let initialHex = try negentropy.initiateHex() + XCTAssertFalse(initialHex.isEmpty) + } + + // MARK: - Protocol Compliance Tests + + func testProtocolVersion() throws { + let storage = try NdbNegentropyStorage() + try storage.seal() + + let negentropy = try NdbNegentropy(storage: storage) + let initialData = try negentropy.initiate() + + // First byte should be protocol version 0x61 + XCTAssertEqual(initialData.first, 0x61) + } + + func testReconcileWithInvalidMessage() throws { + let storage = try NdbNegentropyStorage() + try storage.seal() + + let negentropy = try NdbNegentropy(storage: storage) + _ = try negentropy.initiateHex() + + // Invalid hex should throw + XCTAssertThrowsError(try negentropy.reconcileHex(hexMessage: "not-valid-hex")) + + // Wrong protocol version should throw + XCTAssertThrowsError(try negentropy.reconcileHex(hexMessage: "62")) + } + + func testReconcileWithVersionOnlyResponse() throws { + let storage = try NdbNegentropyStorage() + try storage.seal() + + let negentropy = try NdbNegentropy(storage: storage) + _ = try negentropy.initiateHex() + + // A response with just version byte (0x61) indicates sync complete + // This simulates what a relay sends when it has no differences + let response = try negentropy.reconcileHex(hexMessage: "61") + + // Empty response means reconciliation is complete + XCTAssertTrue(response.isEmpty || negentropy.isComplete) + } + + // MARK: - Memory Management Tests + + func testStorageDeallocation() throws { + // This test ensures storage is properly deallocated + weak var weakStorage: NdbNegentropyStorage? + + autoreleasepool { + let storage = try! NdbNegentropyStorage() + try! storage.add(timestamp: 1000, id: Data(repeating: 0x01, count: 32)) + try! storage.seal() + weakStorage = storage + } + + // Storage should be deallocated + XCTAssertNil(weakStorage) + } + + func testNegentropyKeepsStorageAlive() throws { + // Negentropy should keep storage alive while in use + weak var weakStorage: NdbNegentropyStorage? + var negentropy: NdbNegentropy? + + autoreleasepool { + let storage = try! NdbNegentropyStorage() + try! storage.seal() + weakStorage = storage + negentropy = try! NdbNegentropy(storage: storage) + } + + // Storage should still be alive because negentropy holds reference + XCTAssertNotNil(weakStorage) + XCTAssertNotNil(negentropy) + + // After releasing negentropy, storage should be deallocated + negentropy = nil + XCTAssertNil(weakStorage) + } +} diff --git a/nostrdb/NdbNegentropy.swift b/nostrdb/NdbNegentropy.swift new file mode 100644 index 0000000000..f7f99dc946 --- /dev/null +++ b/nostrdb/NdbNegentropy.swift @@ -0,0 +1,329 @@ +// +// NdbNegentropy.swift +// damus +// +// Created by Claude on 2025-01-17. +// + +import Foundation + +/// Errors that can occur when working with NdbNegentropy. +enum NdbNegentropyError: Error { + case storageInitFailed + case storageSealFailed + case storageAlreadySealed + case storageFromFilterFailed + case reconciliationInitFailed + case initiateFailed + case reconcileFailed + case bufferTooSmall +} + +/// Swift wrapper for negentropy storage (ndb_negentropy_storage). +/// Holds a sorted list of (timestamp, id) pairs for reconciliation. +final class NdbNegentropyStorage { + private var storage: ndb_negentropy_storage + private var isDestroyed = false + + /// Initialize empty storage. + init() throws { + storage = ndb_negentropy_storage() + guard ndb_negentropy_storage_init(&storage) == 1 else { + throw NdbNegentropyError.storageInitFailed + } + } + + /// Populate storage from a NostrDB filter query. + /// The storage will be automatically sealed after this call. + /// + /// - Parameters: + /// - txn: Active read transaction (RawNdbTxnAccessible) + /// - filter: NdbFilter to query events + /// - limit: Maximum number of events (0 uses filter's limit or 10000) + /// - Returns: Number of items added + @discardableResult + func populate(txn: any RawNdbTxnAccessible, filter: NdbFilter, limit: Int32 = 0) throws -> Int { + var txnCopy = txn.txn + let count = ndb_negentropy_storage_from_filter( + &storage, + &txnCopy, + filter.unsafePointer, + limit + ) + guard count >= 0 else { + throw NdbNegentropyError.storageFromFilterFailed + } + return Int(count) + } + + /// Add an item to storage manually. + /// Items can be added in any order - they will be sorted when sealed. + /// + /// - Parameters: + /// - timestamp: Event created_at timestamp + /// - id: 32-byte event ID + func add(timestamp: UInt64, id: Data) throws { + guard id.count == 32 else { return } + let result = id.withUnsafeBytes { idPtr -> Int32 in + guard let baseAddress = idPtr.baseAddress else { return 0 } + return ndb_negentropy_storage_add( + &storage, + timestamp, + baseAddress.assumingMemoryBound(to: UInt8.self) + ) + } + guard result == 1 else { + throw NdbNegentropyError.storageAlreadySealed + } + } + + /// Add an item using NoteId. + func add(timestamp: UInt64, noteId: NoteId) throws { + try noteId.withUnsafePointer { idPtr in + guard ndb_negentropy_storage_add(&storage, timestamp, idPtr) == 1 else { + throw NdbNegentropyError.storageAlreadySealed + } + } + } + + /// Seal the storage for use in reconciliation. + /// After sealing, no more items can be added. + func seal() throws { + guard ndb_negentropy_storage_seal(&storage) == 1 else { + throw NdbNegentropyError.storageSealFailed + } + } + + /// Number of items in storage. + var count: Int { + ndb_negentropy_storage_size(&storage) + } + + /// Whether the storage is sealed and ready for reconciliation. + var isSealed: Bool { + storage.sealed != 0 + } + + /// Internal pointer for use with NdbNegentropy. + var pointer: UnsafePointer { + withUnsafePointer(to: &storage) { $0 } + } + + deinit { + if !isDestroyed { + ndb_negentropy_storage_destroy(&storage) + isDestroyed = true + } + } +} + +/// Configuration for negentropy reconciliation. +struct NdbNegentropyConfig { + /// Maximum message size in bytes. 0 = unlimited. + var frameSizeLimit: Int32 = 0 + + /// Threshold for switching between fingerprint and idlist modes. + /// Ranges with fewer items send full ID lists. + var idlistThreshold: Int32 = 16 + + /// Number of sub-ranges to split into when fingerprints differ. + var splitCount: Int32 = 16 + + /// Create a C config struct. + func toCConfig() -> ndb_negentropy_config { + return ndb_negentropy_config( + frame_size_limit: frameSizeLimit, + idlist_threshold: idlistThreshold, + split_count: splitCount + ) + } +} + +/// Swift wrapper for negentropy reconciliation (ndb_negentropy). +/// Processes messages and determines which items each side has that the other lacks. +final class NdbNegentropy { + private var neg: ndb_negentropy + private var isDestroyed = false + + // Keep a strong reference to storage to prevent it from being deallocated + private let storageRef: NdbNegentropyStorage + + /// Initialize reconciliation context. + /// + /// - Parameters: + /// - storage: Sealed storage containing local items + /// - config: Optional configuration (nil uses defaults) + init(storage: NdbNegentropyStorage, config: NdbNegentropyConfig? = nil) throws { + self.storageRef = storage + self.neg = ndb_negentropy() + + var cConfig = config?.toCConfig() ?? ndb_negentropy_config() + let configPtr = config != nil ? withUnsafePointer(to: &cConfig) { $0 } : nil + + guard ndb_negentropy_init(&neg, storage.pointer, configPtr) == 1 else { + throw NdbNegentropyError.reconciliationInitFailed + } + } + + /// Create the initial message to start reconciliation. + /// Returns the binary message to send to the relay. + func initiate() throws -> Data { + var buffer = [UInt8](repeating: 0, count: 4096) + var outlen: Int = 0 + + guard ndb_negentropy_initiate(&neg, &buffer, buffer.count, &outlen) == 1 else { + throw NdbNegentropyError.initiateFailed + } + + return Data(buffer.prefix(outlen)) + } + + /// Create the initial message as a hex string for NIP-77. + func initiateHex() throws -> String { + let data = try initiate() + return data.map { String(format: "%02x", $0) }.joined() + } + + /// Process an incoming message and generate a response. + /// + /// - Parameter message: Binary message received from relay + /// - Returns: Response message to send back (empty if reconciliation complete) + func reconcile(message: Data) throws -> Data { + // Use 1MB buffer for response generation + // With 4000+ input ranges, output can exceed 512KB (seen 524KB+ in testing) + var buffer = [UInt8](repeating: 0, count: 1024 * 1024) + var outlen = buffer.count + + let result = message.withUnsafeBytes { msgPtr -> Int32 in + guard let baseAddress = msgPtr.baseAddress else { return 0 } + return ndb_negentropy_reconcile( + &neg, + baseAddress.assumingMemoryBound(to: UInt8.self), + message.count, + &buffer, + &outlen + ) + } + + guard result == 1 else { + // Try to diagnose the failure + let rangeCheck = message.withUnsafeBytes { ptr -> Int32 in + guard let baseAddress = ptr.baseAddress else { return -2 } + return ndb_negentropy_message_count_ranges( + baseAddress.assumingMemoryBound(to: UInt8.self), + message.count + ) + } + // Log first 32 bytes of message for debugging + let hexPrefix = message.prefix(32).map { String(format: "%02x", $0) }.joined() + Log.error("ndb_negentropy_reconcile failed: input=%d bytes, version=0x%02x, rangeCheck=%d, bufsize=%d, prefix=%s", + for: .networking, message.count, message.first ?? 0, rangeCheck, buffer.count, hexPrefix) + throw NdbNegentropyError.reconcileFailed + } + + return Data(buffer.prefix(outlen)) + } + + /// Process an incoming hex message and generate a hex response. + /// + /// - Parameter hexMessage: Hex-encoded message from relay + /// - Returns: Hex-encoded response (empty string if complete) + func reconcileHex(hexMessage: String) throws -> String { + guard let messageData = hexMessage.hexDecodedData else { + Log.error("ndb_negentropy: failed to decode hex message (length=%d)", for: .networking, hexMessage.count) + throw NdbNegentropyError.reconcileFailed + } + + // Count ranges in message for diagnostics + let rangeCount = messageData.withUnsafeBytes { ptr -> Int32 in + guard let baseAddress = ptr.baseAddress else { return -1 } + return ndb_negentropy_message_count_ranges( + baseAddress.assumingMemoryBound(to: UInt8.self), + messageData.count + ) + } + + Log.debug("ndb_negentropy: processing message of %d bytes (version=0x%02x, ranges=%d)", + for: .networking, messageData.count, messageData.first ?? 0, rangeCount) + + let response = try reconcile(message: messageData) + + // Empty response (just version byte) means complete + if response.count <= 1 { + Log.debug("ndb_negentropy: reconciliation complete (have=%d, need=%d)", + for: .networking, haveIds.count, needIds.count) + return "" + } + + Log.debug("ndb_negentropy: generated response of %d bytes", for: .networking, response.count) + return response.map { String(format: "%02x", $0) }.joined() + } + + /// Whether reconciliation is complete. + var isComplete: Bool { + ndb_negentropy_is_complete(&neg) == 1 + } + + /// IDs we have that the remote needs (events to send). + var haveIds: [NoteId] { + var idsPtr: UnsafePointer? + let count = ndb_negentropy_get_have_ids(&neg, &idsPtr) + + guard count > 0, let ptr = idsPtr else { + return [] + } + + var result: [NoteId] = [] + for i in 0..? + let count = ndb_negentropy_get_need_ids(&neg, &idsPtr) + + guard count > 0, let ptr = idsPtr else { + return [] + } + + var result: [NoteId] = [] + for i in 0.. +#include +#include + +/* ============================================================ + * VARINT ENCODING/DECODING + * ============================================================ + * + * Negentropy varints are MSB-first (most significant byte first). + * This is the opposite of the common LEB128 encoding. + * + * Encoding strategy: + * 1. Determine how many 7-bit groups we need + * 2. Write them MSB-first, setting the high bit on all but the last + * + * Example: Encoding 300 (0x12C) + * - Binary: 0000 0001 0010 1100 + * - 7-bit groups (MSB first): 0000010, 0101100 + * - Add continuation bits: 10000010, 00101100 + * - Result: 0x82 0x2C + */ + + +/* + * Calculate how many bytes a value needs when encoded as a varint. + * + * Each byte encodes 7 bits of data, so we count how many 7-bit + * groups are needed to represent the value. + */ +int ndb_negentropy_varint_size(uint64_t n) +{ + int size; + + /* Zero needs exactly one byte */ + if (n == 0) + return 1; + + /* Count 7-bit groups needed */ + size = 0; + while (n > 0) { + size++; + n >>= 7; + } + + return size; +} + + +/* + * Encode a 64-bit value as an MSB-first varint. + * + * We first calculate the size, then write bytes from most significant + * to least significant. All bytes except the last have the high bit set. + */ +int ndb_negentropy_varint_encode(unsigned char *buf, size_t buflen, uint64_t n) +{ + int size; + int i; + + /* Calculate required size */ + size = ndb_negentropy_varint_size(n); + + /* Guard: ensure buffer is large enough */ + if (buflen < (size_t)size) + return 0; + + /* + * Write bytes from right to left (LSB to MSB position in buffer). + * The rightmost byte (last written) has no continuation bit. + * All others have the high bit set. + */ + for (i = size - 1; i >= 0; i--) { + /* Extract lowest 7 bits */ + unsigned char byte = n & 0x7F; + + /* Set continuation bit on all but the last byte */ + if (i != size - 1) + byte |= 0x80; + + buf[i] = byte; + n >>= 7; + } + + return size; +} + + +/* + * Decode an MSB-first varint from a buffer. + * + * Read bytes until we find one without the continuation bit (high bit). + * Maximum length is 10 bytes (ceil(64/7) = 10). + */ +int ndb_negentropy_varint_decode(const unsigned char *buf, size_t buflen, + uint64_t *out) +{ + uint64_t result; + size_t i; + + /* Guard: need at least one byte */ + if (buflen == 0) + return 0; + + /* Guard: output pointer must be valid */ + if (out == NULL) + return 0; + + result = 0; + + for (i = 0; i < buflen && i < 10; i++) { + unsigned char byte = buf[i]; + + /* + * Shift existing value left by 7 bits and add new 7 bits. + * This builds the value MSB-first. + */ + result = (result << 7) | (byte & 0x7F); + + /* If high bit is not set, this is the last byte */ + if ((byte & 0x80) == 0) { + *out = result; + return (int)(i + 1); + } + } + + /* + * If we get here, either: + * - We consumed 10 bytes without finding a terminator (malformed) + * - We ran out of buffer (incomplete) + */ + return 0; +} + + +/* ============================================================ + * HEX ENCODING UTILITIES + * ============================================================ + */ + +/* Lookup table for hex encoding (lowercase as per nostr convention) */ +static const char hex_chars[] = "0123456789abcdef"; + + +/* + * Convert binary data to lowercase hex string. + * + * Each input byte becomes two hex characters. + * Output is NUL-terminated. + */ +size_t ndb_negentropy_to_hex(const unsigned char *bin, size_t len, char *hex) +{ + size_t i; + + for (i = 0; i < len; i++) { + hex[i * 2] = hex_chars[(bin[i] >> 4) & 0x0F]; + hex[i * 2 + 1] = hex_chars[bin[i] & 0x0F]; + } + + hex[len * 2] = '\0'; + return len * 2; +} + + +/* + * Convert a single hex character to its numeric value. + * Returns -1 for invalid characters. + */ +static int hex_char_value(char c) +{ + if (c >= '0' && c <= '9') + return c - '0'; + + if (c >= 'a' && c <= 'f') + return c - 'a' + 10; + + if (c >= 'A' && c <= 'F') + return c - 'A' + 10; + + return -1; +} + + +/* + * Convert hex string to binary data. + * + * Input length must be even (two hex chars per byte). + * Invalid hex characters cause an error return. + */ +size_t ndb_negentropy_from_hex(const char *hex, size_t hexlen, + unsigned char *bin, size_t binlen) +{ + size_t i; + size_t out_len; + int high, low; + + /* Guard: hex string must have even length */ + if (hexlen % 2 != 0) + return 0; + + out_len = hexlen / 2; + + /* Guard: output buffer must be large enough */ + if (binlen < out_len) + return 0; + + for (i = 0; i < out_len; i++) { + high = hex_char_value(hex[i * 2]); + low = hex_char_value(hex[i * 2 + 1]); + + /* Guard: both characters must be valid hex */ + if (high < 0 || low < 0) + return 0; + + bin[i] = (unsigned char)((high << 4) | low); + } + + return out_len; +} + + +/* ============================================================ + * FINGERPRINT COMPUTATION + * ============================================================ + */ + +/* + * Initialize accumulator to zero. + */ +void ndb_negentropy_accumulator_init(struct ndb_negentropy_accumulator *acc) +{ + memset(acc->sum, 0, sizeof(acc->sum)); +} + + +/* + * Add a 32-byte ID to the accumulator (mod 2^256). + * + * Both the accumulator and ID are treated as little-endian 256-bit + * unsigned integers. We perform byte-by-byte addition with carry + * propagation. Any final carry is discarded (mod 2^256). + */ +void ndb_negentropy_accumulator_add(struct ndb_negentropy_accumulator *acc, + const unsigned char *id) +{ + int i; + uint16_t carry = 0; + + /* + * Add byte-by-byte, propagating carry. + * Little-endian: byte 0 is least significant. + */ + for (i = 0; i < 32; i++) { + uint16_t sum = (uint16_t)acc->sum[i] + (uint16_t)id[i] + carry; + acc->sum[i] = (unsigned char)(sum & 0xFF); + carry = sum >> 8; + } + + /* Carry overflow is discarded (mod 2^256) */ +} + + +/* + * Compute fingerprint from accumulator and count. + * + * The fingerprint is: SHA256(sum || varint(count))[:16] + * + * We need access to SHA256. NostrDB uses the ccan/crypto/sha256 library. + */ +#include "ccan/crypto/sha256/sha256.h" + +void ndb_negentropy_fingerprint(const struct ndb_negentropy_accumulator *acc, + size_t count, + unsigned char *out) +{ + struct sha256 hash; + unsigned char buf[32 + 10]; /* 32-byte sum + up to 10-byte varint */ + int varint_len; + size_t total_len; + + /* Copy the 32-byte sum */ + memcpy(buf, acc->sum, 32); + + /* Append count as varint */ + varint_len = ndb_negentropy_varint_encode(buf + 32, 10, (uint64_t)count); + total_len = 32 + (size_t)varint_len; + + /* Hash and take first 16 bytes */ + sha256(&hash, buf, total_len); + memcpy(out, hash.u.u8, 16); +} + + +/* ============================================================ + * BOUND ENCODING/DECODING + * ============================================================ + */ + +/* + * Encode a bound into a buffer. + * + * Format: + * + * Timestamp encoding: + * - UINT64_MAX ("infinity") encodes as 0 + * - All other values encode as (1 + delta_from_previous) + */ +int ndb_negentropy_bound_encode(unsigned char *buf, size_t buflen, + const struct ndb_negentropy_bound *bound, + uint64_t *prev_timestamp) +{ + size_t offset = 0; + int written; + uint64_t encoded_ts; + + /* Guard: validate inputs */ + if (buf == NULL || bound == NULL || prev_timestamp == NULL) + return 0; + + /* + * Encode timestamp: + * - Infinity (UINT64_MAX) -> 0 + * - Otherwise -> 1 + (timestamp - prev_timestamp) + */ + if (bound->timestamp == UINT64_MAX) { + encoded_ts = 0; + } else { + uint64_t delta = bound->timestamp - *prev_timestamp; + encoded_ts = 1 + delta; + *prev_timestamp = bound->timestamp; + } + + /* Write encoded timestamp */ + written = ndb_negentropy_varint_encode(buf + offset, buflen - offset, encoded_ts); + if (written == 0) + return 0; + offset += (size_t)written; + + /* Write prefix length */ + written = ndb_negentropy_varint_encode(buf + offset, buflen - offset, + (uint64_t)bound->prefix_len); + if (written == 0) + return 0; + offset += (size_t)written; + + /* Guard: ensure room for prefix bytes */ + if (offset + bound->prefix_len > buflen) + return 0; + + /* Write ID prefix bytes */ + if (bound->prefix_len > 0) + memcpy(buf + offset, bound->id_prefix, bound->prefix_len); + offset += bound->prefix_len; + + return (int)offset; +} + + +/* + * Decode a bound from a buffer. + */ +int ndb_negentropy_bound_decode(const unsigned char *buf, size_t buflen, + struct ndb_negentropy_bound *bound, + uint64_t *prev_timestamp) +{ + size_t offset = 0; + int consumed; + uint64_t encoded_ts; + uint64_t prefix_len; + + /* Guard: validate inputs */ + if (buf == NULL || bound == NULL || prev_timestamp == NULL) + return 0; + + /* Read encoded timestamp */ + consumed = ndb_negentropy_varint_decode(buf + offset, buflen - offset, &encoded_ts); + if (consumed == 0) + return 0; + offset += (size_t)consumed; + + /* + * Decode timestamp: + * - 0 -> Infinity (UINT64_MAX) + * - Otherwise -> prev_timestamp + (encoded_ts - 1) + */ + if (encoded_ts == 0) { + bound->timestamp = UINT64_MAX; + } else { + uint64_t delta = encoded_ts - 1; + + /* Guard: check for timestamp overflow */ + if (delta > UINT64_MAX - *prev_timestamp) + return 0; + + bound->timestamp = *prev_timestamp + delta; + *prev_timestamp = bound->timestamp; + } + + /* Read prefix length */ + consumed = ndb_negentropy_varint_decode(buf + offset, buflen - offset, &prefix_len); + if (consumed == 0) + return 0; + offset += (size_t)consumed; + + /* Guard: prefix length must be <= 32 */ + if (prefix_len > 32) + return 0; + + bound->prefix_len = (uint8_t)prefix_len; + + /* Guard: ensure buffer has enough bytes for prefix */ + if (offset + bound->prefix_len > buflen) + return 0; + + /* Read ID prefix bytes, zero the rest */ + memset(bound->id_prefix, 0, 32); + if (bound->prefix_len > 0) + memcpy(bound->id_prefix, buf + offset, bound->prefix_len); + offset += bound->prefix_len; + + return (int)offset; +} + + +/* ============================================================ + * RANGE ENCODING/DECODING + * ============================================================ + * + * Ranges are the core unit of negentropy messages. Each range + * specifies a section of the item space and what to do with it. + */ + + +/* + * Encode a range into a buffer. + * + * Format: + * + * We use early returns for each error condition to avoid deep nesting. + */ +int ndb_negentropy_range_encode(unsigned char *buf, size_t buflen, + const struct ndb_negentropy_range *range, + uint64_t *prev_timestamp) +{ + size_t offset = 0; + int written; + + /* Guard: validate inputs */ + if (buf == NULL || range == NULL || prev_timestamp == NULL) + return 0; + + /* Encode the upper bound */ + written = ndb_negentropy_bound_encode(buf + offset, buflen - offset, + &range->upper_bound, prev_timestamp); + if (written == 0) + return 0; + offset += (size_t)written; + + /* Encode the mode */ + written = ndb_negentropy_varint_encode(buf + offset, buflen - offset, + (uint64_t)range->mode); + if (written == 0) + return 0; + offset += (size_t)written; + + /* Encode the payload based on mode */ + switch (range->mode) { + + case NDB_NEG_SKIP: + /* No payload for SKIP mode */ + break; + + case NDB_NEG_FINGERPRINT: + /* 16-byte fingerprint */ + if (offset + 16 > buflen) + return 0; + memcpy(buf + offset, range->payload.fingerprint, 16); + offset += 16; + break; + + case NDB_NEG_IDLIST: { + /* + * IdList: + */ + size_t id_count = range->payload.id_list.id_count; + size_t ids_size = id_count * 32; + + /* Write count */ + written = ndb_negentropy_varint_encode(buf + offset, buflen - offset, + (uint64_t)id_count); + if (written == 0) + return 0; + offset += (size_t)written; + + /* Guard: ensure room for all IDs */ + if (offset + ids_size > buflen) + return 0; + + /* Write IDs */ + if (id_count > 0 && range->payload.id_list.ids != NULL) + memcpy(buf + offset, range->payload.id_list.ids, ids_size); + offset += ids_size; + break; + } + + case NDB_NEG_IDLIST_RESPONSE: { + /* + * IdListResponse: + * + * + * haveIds is an IdList (count + ids) of IDs the server has. + * bitfield indicates which client IDs the server needs. + */ + size_t have_count = range->payload.id_list_response.have_count; + size_t have_size = have_count * 32; + size_t bf_len = range->payload.id_list_response.bitfield_len; + + /* Write have_count */ + written = ndb_negentropy_varint_encode(buf + offset, buflen - offset, + (uint64_t)have_count); + if (written == 0) + return 0; + offset += (size_t)written; + + /* Guard: ensure room for have_ids */ + if (offset + have_size > buflen) + return 0; + + /* Write have_ids */ + if (have_count > 0 && range->payload.id_list_response.have_ids != NULL) + memcpy(buf + offset, range->payload.id_list_response.have_ids, have_size); + offset += have_size; + + /* Write bitfield length */ + written = ndb_negentropy_varint_encode(buf + offset, buflen - offset, + (uint64_t)bf_len); + if (written == 0) + return 0; + offset += (size_t)written; + + /* Guard: ensure room for bitfield */ + if (offset + bf_len > buflen) + return 0; + + /* Write bitfield */ + if (bf_len > 0 && range->payload.id_list_response.bitfield != NULL) + memcpy(buf + offset, range->payload.id_list_response.bitfield, bf_len); + offset += bf_len; + break; + } + + default: + /* Unknown mode */ + return 0; + } + + return (int)offset; +} + + +/* + * Decode a range from a buffer. + * + * For IDLIST and IDLIST_RESPONSE modes, the payload pointers point + * directly into the input buffer for zero-copy access. + */ +int ndb_negentropy_range_decode(const unsigned char *buf, size_t buflen, + struct ndb_negentropy_range *range, + uint64_t *prev_timestamp) +{ + size_t offset = 0; + int consumed; + uint64_t mode_val; + + /* Guard: validate inputs */ + if (buf == NULL || range == NULL || prev_timestamp == NULL) + return 0; + + /* Decode the upper bound */ + consumed = ndb_negentropy_bound_decode(buf + offset, buflen - offset, + &range->upper_bound, prev_timestamp); + if (consumed == 0) + return 0; + offset += (size_t)consumed; + + /* Decode the mode */ + consumed = ndb_negentropy_varint_decode(buf + offset, buflen - offset, &mode_val); + if (consumed == 0) + return 0; + offset += (size_t)consumed; + + /* Guard: mode must be valid */ + if (mode_val > NDB_NEG_IDLIST_RESPONSE) + return 0; + range->mode = (enum ndb_negentropy_mode)mode_val; + + /* Decode payload based on mode */ + switch (range->mode) { + + case NDB_NEG_SKIP: + /* No payload */ + break; + + case NDB_NEG_FINGERPRINT: + /* 16-byte fingerprint */ + if (offset + 16 > buflen) + return 0; + memcpy(range->payload.fingerprint, buf + offset, 16); + offset += 16; + break; + + case NDB_NEG_IDLIST: { + /* + * IdList: + */ + uint64_t id_count; + size_t ids_size; + + /* Read count */ + consumed = ndb_negentropy_varint_decode(buf + offset, buflen - offset, &id_count); + if (consumed == 0) + return 0; + offset += (size_t)consumed; + + /* Guard: prevent DOS and overflow in multiplication */ + if (id_count > NDB_NEGENTROPY_MAX_IDS_PER_RANGE) + return 0; + + ids_size = (size_t)id_count * 32; + + /* Guard: ensure buffer has all IDs */ + if (offset + ids_size > buflen) + return 0; + + /* Point directly into buffer (zero-copy) */ + range->payload.id_list.id_count = (size_t)id_count; + range->payload.id_list.ids = (id_count > 0) ? (buf + offset) : NULL; + offset += ids_size; + break; + } + + case NDB_NEG_IDLIST_RESPONSE: { + /* + * IdListResponse: + * + */ + uint64_t have_count; + size_t have_size; + uint64_t bf_len; + + /* Read have_count */ + consumed = ndb_negentropy_varint_decode(buf + offset, buflen - offset, &have_count); + if (consumed == 0) + return 0; + offset += (size_t)consumed; + + /* Guard: prevent DOS and overflow in multiplication */ + if (have_count > NDB_NEGENTROPY_MAX_IDS_PER_RANGE) + return 0; + + have_size = (size_t)have_count * 32; + + /* Guard: ensure buffer has all have_ids */ + if (offset + have_size > buflen) + return 0; + + /* Point directly into buffer (zero-copy) */ + range->payload.id_list_response.have_count = (size_t)have_count; + range->payload.id_list_response.have_ids = (have_count > 0) ? (buf + offset) : NULL; + offset += have_size; + + /* Read bitfield length */ + consumed = ndb_negentropy_varint_decode(buf + offset, buflen - offset, &bf_len); + if (consumed == 0) + return 0; + offset += (size_t)consumed; + + /* + * Guard: bitfield length sanity check. + * Bitfield is ceil(client_id_count / 8), so max is ~12KB + * for 100K IDs. Use generous 1MB limit. + */ + if (bf_len > (1024 * 1024)) + return 0; + + /* Guard: ensure buffer has bitfield */ + if (offset + bf_len > buflen) + return 0; + + /* Point directly into buffer (zero-copy) */ + range->payload.id_list_response.bitfield_len = (size_t)bf_len; + range->payload.id_list_response.bitfield = (bf_len > 0) ? (buf + offset) : NULL; + offset += bf_len; + break; + } + + default: + /* Unknown mode */ + return 0; + } + + return (int)offset; +} + + +/* ============================================================ + * MESSAGE ENCODING/DECODING + * ============================================================ + * + * Messages are the complete wire-format units. Each message + * starts with a version byte followed by concatenated ranges. + */ + + +/* + * Encode a complete negentropy message. + * + * Format: * + */ +int ndb_negentropy_message_encode(unsigned char *buf, size_t buflen, + const struct ndb_negentropy_range *ranges, + size_t num_ranges) +{ + size_t offset = 0; + size_t i; + uint64_t prev_timestamp = 0; + int written; + + /* Guard: need at least 1 byte for version */ + if (buf == NULL || buflen < 1) + return 0; + + /* Guard: enforce range limit for DOS protection */ + if (num_ranges > NDB_NEGENTROPY_MAX_RANGES) + return 0; + + /* Write protocol version byte */ + buf[offset++] = NDB_NEGENTROPY_PROTOCOL_V1; + + /* Encode each range */ + for (i = 0; i < num_ranges; i++) { + written = ndb_negentropy_range_encode(buf + offset, buflen - offset, + &ranges[i], &prev_timestamp); + if (written == 0) + return 0; + + offset += (size_t)written; + } + + return (int)offset; +} + + +/* + * Get the protocol version from a message. + * + * Simply returns the first byte, which is the version. + */ +int ndb_negentropy_message_version(const unsigned char *buf, size_t buflen) +{ + if (buf == NULL || buflen < 1) + return 0; + + return (int)buf[0]; +} + + +/* + * Count ranges in a message. + * + * We parse through the message skipping the version byte, + * then iterate through ranges counting each one. + */ +int ndb_negentropy_message_count_ranges(const unsigned char *buf, size_t buflen) +{ + const unsigned char *p; + size_t remaining; + uint64_t prev_timestamp = 0; + struct ndb_negentropy_range range; + int count = 0; + int consumed; + + /* Guard: need at least version byte */ + if (buf == NULL || buflen < 1) + return -1; + + /* Check version is V1 */ + if (buf[0] != NDB_NEGENTROPY_PROTOCOL_V1) + return -1; + + /* Skip version byte */ + p = buf + 1; + remaining = buflen - 1; + + /* + * Parse ranges until buffer exhausted. + * We use the actual decode function to ensure we count + * correctly even with complex payloads. + */ + while (remaining > 0) { + consumed = ndb_negentropy_range_decode(p, remaining, &range, &prev_timestamp); + + /* Decode error */ + if (consumed == 0) + return -1; + + /* Guard: ensure we don't exceed limit */ + count++; + if (count > NDB_NEGENTROPY_MAX_RANGES) + return -1; + + p += consumed; + remaining -= (size_t)consumed; + } + + return count; +} + + +/* ============================================================ + * NEGENTROPY STORAGE + * ============================================================ + * + * Storage manages a sorted array of (timestamp, id) items for + * use in negentropy reconciliation. + */ + +/* Initial capacity for item array */ +#define STORAGE_INITIAL_CAPACITY 64 + + +/* + * Compare two items for sorting. + * + * Primary sort: timestamp (ascending) + * Secondary sort: id (lexicographic ascending) + */ +static int item_compare(const void *a, const void *b) +{ + const struct ndb_negentropy_item *ia = a; + const struct ndb_negentropy_item *ib = b; + + /* Compare timestamp first */ + if (ia->timestamp < ib->timestamp) + return -1; + if (ia->timestamp > ib->timestamp) + return 1; + + /* Timestamps equal - compare IDs lexicographically */ + return memcmp(ia->id, ib->id, 32); +} + + +/* + * Compare an item to a bound for binary search. + * + * Returns: + * < 0 if item < bound + * = 0 if item == bound + * > 0 if item > bound + */ +static int item_bound_compare(const struct ndb_negentropy_item *item, + const struct ndb_negentropy_bound *bound) +{ + int cmp; + int i; + + /* Handle infinity bound */ + if (bound->timestamp == UINT64_MAX) + return -1; /* Item is always < infinity */ + + /* Compare timestamp */ + if (item->timestamp < bound->timestamp) + return -1; + if (item->timestamp > bound->timestamp) + return 1; + + /* Timestamps equal - compare ID prefix */ + if (bound->prefix_len > 0) { + cmp = memcmp(item->id, bound->id_prefix, bound->prefix_len); + if (cmp != 0) + return cmp; + } + + /* + * Prefix matches. Per negentropy spec, omitted bytes in bound + * are implicitly zero. Check if item has any non-zero bytes + * after the prefix - if so, item > bound. + */ + for (i = bound->prefix_len; i < 32; i++) { + if (item->id[i] != 0) + return 1; /* item > bound */ + } + + /* Complete match */ + return 0; +} + + +/* + * Ensure storage has room for at least one more item. + * Grows the array if necessary. + */ +static int storage_ensure_capacity(struct ndb_negentropy_storage *storage) +{ + size_t new_capacity; + struct ndb_negentropy_item *new_items; + + if (storage->count < storage->capacity) + return 1; + + /* Grow by doubling */ + new_capacity = storage->capacity * 2; + if (new_capacity < STORAGE_INITIAL_CAPACITY) + new_capacity = STORAGE_INITIAL_CAPACITY; + + new_items = realloc(storage->items, + new_capacity * sizeof(struct ndb_negentropy_item)); + if (new_items == NULL) + return 0; + + storage->items = new_items; + storage->capacity = new_capacity; + return 1; +} + + +int ndb_negentropy_storage_init(struct ndb_negentropy_storage *storage) +{ + if (storage == NULL) + return 0; + + storage->items = NULL; + storage->count = 0; + storage->capacity = 0; + storage->sealed = 0; + + return 1; +} + + +void ndb_negentropy_storage_destroy(struct ndb_negentropy_storage *storage) +{ + if (storage == NULL) + return; + + free(storage->items); + storage->items = NULL; + storage->count = 0; + storage->capacity = 0; + storage->sealed = 0; +} + + +int ndb_negentropy_storage_add(struct ndb_negentropy_storage *storage, + uint64_t timestamp, + const unsigned char *id) +{ + struct ndb_negentropy_item *item; + + /* Guard: validate inputs */ + if (storage == NULL || id == NULL) + return 0; + + /* Guard: cannot add after sealing */ + if (storage->sealed) + return 0; + + /* Ensure capacity */ + if (!storage_ensure_capacity(storage)) + return 0; + + /* Add the item */ + item = &storage->items[storage->count]; + item->timestamp = timestamp; + memcpy(item->id, id, 32); + storage->count++; + + return 1; +} + + +int ndb_negentropy_storage_add_many(struct ndb_negentropy_storage *storage, + const struct ndb_negentropy_item *items, + size_t count) +{ + size_t needed; + size_t new_capacity; + struct ndb_negentropy_item *new_items; + size_t i; + + /* Guard: validate inputs */ + if (storage == NULL) + return 0; + + if (count == 0) + return 1; + + if (items == NULL) + return 0; + + /* Guard: cannot add after sealing */ + if (storage->sealed) + return 0; + + /* Ensure capacity for all items */ + needed = storage->count + count; + if (needed > storage->capacity) { + new_capacity = storage->capacity; + if (new_capacity < STORAGE_INITIAL_CAPACITY) + new_capacity = STORAGE_INITIAL_CAPACITY; + + while (new_capacity < needed) + new_capacity *= 2; + + new_items = realloc(storage->items, + new_capacity * sizeof(struct ndb_negentropy_item)); + if (new_items == NULL) + return 0; + + storage->items = new_items; + storage->capacity = new_capacity; + } + + /* Copy items */ + for (i = 0; i < count; i++) { + storage->items[storage->count + i] = items[i]; + } + storage->count += count; + + return 1; +} + + +int ndb_negentropy_storage_seal(struct ndb_negentropy_storage *storage) +{ + /* Guard: validate input */ + if (storage == NULL) + return 0; + + /* Guard: cannot seal twice */ + if (storage->sealed) + return 0; + + /* Sort items by (timestamp, id) */ + if (storage->count > 0) { + qsort(storage->items, storage->count, + sizeof(struct ndb_negentropy_item), item_compare); + } + + storage->sealed = 1; + return 1; +} + + +size_t ndb_negentropy_storage_size(const struct ndb_negentropy_storage *storage) +{ + if (storage == NULL) + return 0; + + return storage->count; +} + + +const struct ndb_negentropy_item * +ndb_negentropy_storage_get(const struct ndb_negentropy_storage *storage, size_t index) +{ + /* Guard: validate input */ + if (storage == NULL) + return NULL; + + /* Guard: must be sealed */ + if (!storage->sealed) + return NULL; + + /* Guard: bounds check */ + if (index >= storage->count) + return NULL; + + return &storage->items[index]; +} + + +size_t ndb_negentropy_storage_lower_bound(const struct ndb_negentropy_storage *storage, + const struct ndb_negentropy_bound *bound) +{ + size_t lo, hi, mid; + int cmp; + + /* Guard: validate inputs */ + if (storage == NULL || bound == NULL) + return 0; + + /* Guard: must be sealed */ + if (!storage->sealed) + return 0; + + /* Empty storage */ + if (storage->count == 0) + return 0; + + /* Binary search for lower bound */ + lo = 0; + hi = storage->count; + + while (lo < hi) { + mid = lo + (hi - lo) / 2; + + cmp = item_bound_compare(&storage->items[mid], bound); + + if (cmp < 0) { + /* Item is less than bound, search right half */ + lo = mid + 1; + } else { + /* Item is >= bound, search left half */ + hi = mid; + } + } + + return lo; +} + + +int ndb_negentropy_storage_fingerprint(const struct ndb_negentropy_storage *storage, + size_t begin, size_t end, + unsigned char *fingerprint_out) +{ + struct ndb_negentropy_accumulator acc; + size_t i; + size_t count; + + /* Guard: validate inputs */ + if (storage == NULL || fingerprint_out == NULL) + return 0; + + /* Guard: must be sealed */ + if (!storage->sealed) + return 0; + + /* Guard: valid range */ + if (begin > end || end > storage->count) + return 0; + + /* Initialize accumulator */ + ndb_negentropy_accumulator_init(&acc); + + /* Add all IDs in range to accumulator */ + for (i = begin; i < end; i++) { + ndb_negentropy_accumulator_add(&acc, storage->items[i].id); + } + + /* Compute fingerprint */ + count = end - begin; + ndb_negentropy_fingerprint(&acc, count, fingerprint_out); + + return 1; +} + + +/* ============================================================ + * FILTER-BASED INITIALIZATION (NostrDB Integration) + * ============================================================ + * + * This section requires the full nostrdb library. It's compiled + * only when NDB_NEGENTROPY_NOSTRDB is defined (which happens + * automatically when building as part of nostrdb). + * + * For standalone testing of core negentropy functions, compile + * without this define. + */ + +#ifndef NDB_NEGENTROPY_STANDALONE + +#include "nostrdb.h" + +/* Default limit for filter queries if not specified */ +#define DEFAULT_QUERY_LIMIT 10000 + + +/** + * Populate storage from a NostrDB filter query. + * + * Queries the database using the provided filter and adds all matching + * events to the storage. The storage is automatically sealed after + * population. + * + * @param storage Initialized (but not sealed) storage + * @param txn Active read transaction + * @param filter NIP-01 filter to query events + * @param limit Max events to add (0 = DEFAULT_QUERY_LIMIT) + * @return Number of items added, or -1 on error + */ +int ndb_negentropy_storage_from_filter(struct ndb_negentropy_storage *storage, + struct ndb_txn *txn, + struct ndb_filter *filter, + int limit) +{ + struct ndb_query_result *results; + int result_count; + int query_limit; + int i; + int added; + + /* Guard: validate inputs */ + if (storage == NULL || txn == NULL || filter == NULL) + return -1; + + /* Guard: storage must not already be sealed */ + if (storage->sealed) + return -1; + + /* Determine query limit */ + query_limit = (limit > 0) ? limit : DEFAULT_QUERY_LIMIT; + + /* Allocate results buffer */ + results = malloc((size_t)query_limit * sizeof(struct ndb_query_result)); + if (results == NULL) + return -1; + + result_count = 0; + added = 0; + + /* Execute query */ + if (!ndb_query(txn, filter, 1, results, query_limit, &result_count)) { + free(results); + return -1; + } + + /* Add each result to storage */ + for (i = 0; i < result_count; i++) { + struct ndb_note *note = results[i].note; + uint64_t timestamp; + unsigned char *id; + + /* Get timestamp and ID from note */ + timestamp = (uint64_t)ndb_note_created_at(note); + id = ndb_note_id(note); + + /* Add to storage (copies the ID) */ + if (!ndb_negentropy_storage_add(storage, timestamp, id)) { + free(results); + return -1; + } + + added++; + } + + free(results); + + /* Seal storage after populating from filter */ + if (!ndb_negentropy_storage_seal(storage)) + return -1; + + return added; +} + +#endif /* NDB_NEGENTROPY_STANDALONE */ + + +/* ============================================================ + * RECONCILIATION STATE MACHINE + * ============================================================ + * + * The reconciliation engine implements the negentropy protocol + * for determining set differences between two parties. + */ + +/* Initial capacity for ID arrays */ +#define IDS_INITIAL_CAPACITY 64 + + +/* + * Initialize an ID array. + */ +static void ids_init(struct ndb_negentropy_ids *ids) +{ + ids->ids = NULL; + ids->count = 0; + ids->capacity = 0; +} + + +/* + * Free an ID array. + */ +static void ids_destroy(struct ndb_negentropy_ids *ids) +{ + free(ids->ids); + ids->ids = NULL; + ids->count = 0; + ids->capacity = 0; +} + + +/* + * Add an ID to an ID array. + */ +static int ids_add(struct ndb_negentropy_ids *ids, const unsigned char *id) +{ + size_t new_capacity; + unsigned char *new_ids; + + /* Grow if needed */ + if (ids->count >= ids->capacity) { + new_capacity = ids->capacity * 2; + if (new_capacity < IDS_INITIAL_CAPACITY) + new_capacity = IDS_INITIAL_CAPACITY; + + new_ids = realloc(ids->ids, new_capacity * 32); + if (new_ids == NULL) + return 0; + + ids->ids = new_ids; + ids->capacity = new_capacity; + } + + /* Copy ID */ + memcpy(ids->ids + ids->count * 32, id, 32); + ids->count++; + + return 1; +} + + +/* + * Check if storage contains an ID using binary search. + * Returns 1 if found, 0 if not. + */ +static int storage_has_id(const struct ndb_negentropy_storage *storage, + uint64_t timestamp, const unsigned char *id) +{ + struct ndb_negentropy_bound bound; + size_t idx; + const struct ndb_negentropy_item *item; + + /* Create bound for the ID */ + bound.timestamp = timestamp; + memcpy(bound.id_prefix, id, 32); + bound.prefix_len = 32; + + /* Find lower bound */ + idx = ndb_negentropy_storage_lower_bound(storage, &bound); + + /* Check if we found an exact match */ + if (idx >= storage->count) + return 0; + + item = &storage->items[idx]; + if (item->timestamp != timestamp) + return 0; + + return memcmp(item->id, id, 32) == 0; +} + + +int ndb_negentropy_init(struct ndb_negentropy *neg, + const struct ndb_negentropy_storage *storage, + const struct ndb_negentropy_config *config) +{ + /* Guard: validate inputs */ + if (neg == NULL || storage == NULL) + return 0; + + /* Guard: storage must be sealed */ + if (!storage->sealed) + return 0; + + neg->storage = storage; + neg->is_initiator = 0; + neg->is_complete = 0; + + /* Apply config or use defaults */ + if (config != NULL) { + neg->frame_size_limit = config->frame_size_limit; + neg->idlist_threshold = config->idlist_threshold > 0 + ? config->idlist_threshold + : NDB_NEGENTROPY_IDLIST_THRESHOLD; + neg->split_count = config->split_count > 1 + ? config->split_count + : NDB_NEGENTROPY_SPLIT_COUNT; + } else { + neg->frame_size_limit = 0; /* unlimited */ + neg->idlist_threshold = NDB_NEGENTROPY_IDLIST_THRESHOLD; + neg->split_count = NDB_NEGENTROPY_SPLIT_COUNT; + } + + ids_init(&neg->have_ids); + ids_init(&neg->need_ids); + + return 1; +} + + +void ndb_negentropy_destroy(struct ndb_negentropy *neg) +{ + if (neg == NULL) + return; + + ids_destroy(&neg->have_ids); + ids_destroy(&neg->need_ids); + neg->storage = NULL; + neg->is_initiator = 0; + neg->is_complete = 0; +} + + +int ndb_negentropy_is_complete(const struct ndb_negentropy *neg) +{ + if (neg == NULL) + return 0; + + return neg->is_complete; +} + + +int ndb_negentropy_initiate(struct ndb_negentropy *neg, + unsigned char *buf, size_t buflen, + size_t *outlen) +{ + struct ndb_negentropy_range range; + int len; + + /* Guard: validate inputs */ + if (neg == NULL || buf == NULL || outlen == NULL) + return 0; + + /* Guard: need room for version + range */ + if (buflen < 2) + return 0; + + /* Mark as initiator */ + neg->is_initiator = 1; + + /* + * Create initial message with single FINGERPRINT range + * covering the entire item space (0 to infinity). + */ + range.upper_bound.timestamp = UINT64_MAX; + range.upper_bound.prefix_len = 0; + range.mode = NDB_NEG_FINGERPRINT; + + /* Compute fingerprint of all items */ + if (!ndb_negentropy_storage_fingerprint(neg->storage, 0, + neg->storage->count, + range.payload.fingerprint)) + return 0; + + /* Encode message */ + len = ndb_negentropy_message_encode(buf, buflen, &range, 1); + if (len == 0) + return 0; + + *outlen = (size_t)len; + return 1; +} + + +/* + * Create a bound from a storage item at the given index. + * If index == count, creates an infinity bound. + */ +static void bound_from_index(const struct ndb_negentropy_storage *storage, + size_t index, + struct ndb_negentropy_bound *bound) +{ + if (index >= storage->count) { + /* Infinity bound */ + bound->timestamp = UINT64_MAX; + bound->prefix_len = 0; + } else { + /* Bound from item */ + const struct ndb_negentropy_item *item = &storage->items[index]; + bound->timestamp = item->timestamp; + memcpy(bound->id_prefix, item->id, 32); + bound->prefix_len = 32; + } +} + + +/* + * Process incoming ranges and build response. + * This is the core reconciliation logic. + */ +int ndb_negentropy_reconcile(struct ndb_negentropy *neg, + const unsigned char *msg, size_t msglen, + unsigned char *out, size_t *outlen) +{ + const unsigned char *p; + size_t remaining; + uint64_t prev_ts_in = 0; + uint64_t prev_ts_out = 0; + size_t out_offset; + size_t lower_idx = 0; /* Current position in our storage */ + struct ndb_negentropy_range in_range; + int consumed; + int received_non_skip = 0; /* Track if we received any non-SKIP input */ + int range_count = 0; /* Track range number for error diagnostics */ + + /* Guard: validate inputs */ + if (neg == NULL || msg == NULL || out == NULL || outlen == NULL) + return 0; + + /* Guard: need at least version byte */ + if (msglen < 1 || *outlen < 1) + return 0; + + /* Guard: check version */ + if (msg[0] != NDB_NEGENTROPY_PROTOCOL_V1) + return 0; + + /* Write version byte to output */ + out[0] = NDB_NEGENTROPY_PROTOCOL_V1; + out_offset = 1; + + /* Process each incoming range */ + p = msg + 1; + remaining = msglen - 1; + + while (remaining > 0) { + range_count++; + + /* Decode next range */ + consumed = ndb_negentropy_range_decode(p, remaining, &in_range, &prev_ts_in); + if (consumed == 0) { + fprintf(stderr, "ndb_negentropy: decode failed at range %d, remaining=%zu\n", + range_count, remaining); + return 0; + } + + p += consumed; + remaining -= (size_t)consumed; + + /* Find the upper index for this range */ + size_t upper_idx = ndb_negentropy_storage_lower_bound( + neg->storage, &in_range.upper_bound); + + /* Number of items in our [lower, upper) range */ + size_t our_count = (upper_idx > lower_idx) ? (upper_idx - lower_idx) : 0; + + /* Process based on mode */ + switch (in_range.mode) { + + case NDB_NEG_SKIP: { + /* + * Peer is skipping this range (they agree it matches). + * We echo SKIP to maintain coverage (unless all input is SKIP, + * in which case we'll send empty message at the end). + */ + struct ndb_negentropy_range out_range; + int written; + + out_range.upper_bound = in_range.upper_bound; + out_range.mode = NDB_NEG_SKIP; + + written = ndb_negentropy_range_encode( + out + out_offset, *outlen - out_offset, + &out_range, &prev_ts_out); + if (written == 0) { + fprintf(stderr, "ndb_negentropy: SKIP encode failed at range %d, out_offset=%zu, buflen=%zu\n", + range_count, out_offset, *outlen); + return 0; + } + + out_offset += (size_t)written; + break; + } + + case NDB_NEG_FINGERPRINT: { + /* + * Compare fingerprints. If they match, respond with SKIP. + * If different, split the range. + */ + unsigned char our_fp[16]; + received_non_skip = 1; + + ndb_negentropy_storage_fingerprint(neg->storage, + lower_idx, upper_idx, our_fp); + + if (memcmp(our_fp, in_range.payload.fingerprint, 16) == 0) { + /* Fingerprints match - respond with SKIP */ + struct ndb_negentropy_range out_range; + int written; + + out_range.upper_bound = in_range.upper_bound; + out_range.mode = NDB_NEG_SKIP; + + written = ndb_negentropy_range_encode( + out + out_offset, *outlen - out_offset, + &out_range, &prev_ts_out); + if (written == 0) { + fprintf(stderr, "ndb_negentropy: FP-SKIP encode failed at range %d, out_offset=%zu\n", + range_count, out_offset); + return 0; + } + + out_offset += (size_t)written; + } else { + /* + * Fingerprints differ - need to split. + * For small ranges, send IdList. + * For large ranges, send multiple Fingerprint sub-ranges. + */ + if (our_count <= (size_t)neg->idlist_threshold) { + /* Small range: send IdList */ + struct ndb_negentropy_range out_range; + int written; + unsigned char *id_buf = NULL; + + out_range.upper_bound = in_range.upper_bound; + out_range.mode = NDB_NEG_IDLIST; + out_range.payload.id_list.id_count = our_count; + + /* + * Must copy IDs to contiguous buffer because + * storage items have timestamps interleaved. + */ + if (our_count > 0) { + size_t i; + id_buf = malloc(our_count * 32); + if (id_buf == NULL) { + fprintf(stderr, "ndb_negentropy: malloc failed at range %d, our_count=%zu\n", + range_count, our_count); + return 0; + } + + for (i = 0; i < our_count; i++) { + memcpy(id_buf + i * 32, + neg->storage->items[lower_idx + i].id, + 32); + } + out_range.payload.id_list.ids = id_buf; + } else { + out_range.payload.id_list.ids = NULL; + } + + written = ndb_negentropy_range_encode( + out + out_offset, *outlen - out_offset, + &out_range, &prev_ts_out); + + free(id_buf); + + if (written == 0) { + fprintf(stderr, "ndb_negentropy: IDLIST encode failed at range %d, our_count=%zu, out_offset=%zu\n", + range_count, our_count, out_offset); + return 0; + } + + out_offset += (size_t)written; + } else { + /* + * Large range: split into sub-ranges with fingerprints. + * Use configured split_count splits. + */ + size_t items_per_split = our_count / (size_t)neg->split_count; + if (items_per_split == 0) + items_per_split = 1; + + size_t split_lower = lower_idx; + int split_count = neg->split_count; + + for (int s = 0; s < split_count && split_lower < upper_idx; s++) { + size_t split_upper; + struct ndb_negentropy_range out_range; + int written; + + if (s == split_count - 1) { + /* Last split takes the rest */ + split_upper = upper_idx; + } else { + split_upper = split_lower + items_per_split; + if (split_upper > upper_idx) + split_upper = upper_idx; + } + + /* Create fingerprint for this split */ + bound_from_index(neg->storage, split_upper, + &out_range.upper_bound); + + /* Use the incoming upper bound for the last split */ + if (split_upper == upper_idx) + out_range.upper_bound = in_range.upper_bound; + + out_range.mode = NDB_NEG_FINGERPRINT; + ndb_negentropy_storage_fingerprint( + neg->storage, split_lower, split_upper, + out_range.payload.fingerprint); + + written = ndb_negentropy_range_encode( + out + out_offset, *outlen - out_offset, + &out_range, &prev_ts_out); + if (written == 0) { + fprintf(stderr, "ndb_negentropy: FP-SPLIT encode failed at range %d split %d, out_offset=%zu\n", + range_count, s, out_offset); + return 0; + } + + out_offset += (size_t)written; + split_lower = split_upper; + } + } + } + break; + } + + case NDB_NEG_IDLIST: { + /* + * Remote sent us their full ID list for this range. + * Per NIP-77, we respond with SKIP (range is resolved). + * + * We track: + * - have_ids: IDs we have that they don't (we should send) + * - need_ids: IDs they have that we don't (we should request) + */ + struct ndb_negentropy_range out_range; + int written; + size_t their_count = in_range.payload.id_list.id_count; + const unsigned char *their_ids = in_range.payload.id_list.ids; + size_t i, j; + + received_non_skip = 1; + + /* Find IDs we have that they don't -> have_ids */ + for (i = lower_idx; i < upper_idx; i++) { + const struct ndb_negentropy_item *item = &neg->storage->items[i]; + int found = 0; + + /* Check if they have this ID */ + for (j = 0; j < their_count; j++) { + if (memcmp(item->id, their_ids + j * 32, 32) == 0) { + found = 1; + break; + } + } + + if (!found) { + /* We have it, they don't */ + ids_add(&neg->have_ids, item->id); + } + } + + /* Find IDs they have that we don't -> need_ids */ + for (j = 0; j < their_count; j++) { + const unsigned char *their_id = their_ids + j * 32; + int we_have = 0; + + /* Check if we have this ID */ + for (i = lower_idx; i < upper_idx; i++) { + if (memcmp(neg->storage->items[i].id, their_id, 32) == 0) { + we_have = 1; + break; + } + } + + if (!we_have) { + /* We need this ID */ + ids_add(&neg->need_ids, their_id); + } + } + + /* Respond with SKIP per NIP-77 (range resolved) */ + out_range.upper_bound = in_range.upper_bound; + out_range.mode = NDB_NEG_SKIP; + + written = ndb_negentropy_range_encode( + out + out_offset, *outlen - out_offset, + &out_range, &prev_ts_out); + if (written == 0) { + fprintf(stderr, "ndb_negentropy: IDLIST-SKIP encode failed at range %d, out_offset=%zu\n", + range_count, out_offset); + return 0; + } + + out_offset += (size_t)written; + break; + } + + case NDB_NEG_IDLIST_RESPONSE: { + /* + * NOTE: Mode 3 (IDLIST_RESPONSE) is NOT in NIP-77. + * It's from hoytech's negentropy reference implementation. + * We accept it for compatibility but don't send it. + * + * Remote responded to our IdList with: + * - IDs they have that we don't (have_ids) + * - Bitfield of our IDs they need + * + * Extract the have/need IDs. + */ + size_t have_count = in_range.payload.id_list_response.have_count; + received_non_skip = 1; + const unsigned char *have_ids = in_range.payload.id_list_response.have_ids; + size_t bf_len = in_range.payload.id_list_response.bitfield_len; + const unsigned char *bitfield = in_range.payload.id_list_response.bitfield; + + /* IDs they have that we need */ + for (size_t i = 0; i < have_count; i++) { + ids_add(&neg->need_ids, have_ids + i * 32); + } + + /* IDs we have that they need (from bitfield) */ + /* We need to match against our original IdList... */ + /* For now, we iterate our items and check the bitfield */ + size_t bit_idx = 0; + for (size_t i = lower_idx; i < upper_idx && bit_idx / 8 < bf_len; i++) { + if (bitfield[bit_idx / 8] & (1 << (bit_idx % 8))) { + ids_add(&neg->have_ids, neg->storage->items[i].id); + } + bit_idx++; + } + + /* No response needed for IdListResponse - send SKIP */ + struct ndb_negentropy_range out_range; + int written; + + out_range.upper_bound = in_range.upper_bound; + out_range.mode = NDB_NEG_SKIP; + + written = ndb_negentropy_range_encode( + out + out_offset, *outlen - out_offset, + &out_range, &prev_ts_out); + if (written == 0) { + fprintf(stderr, "ndb_negentropy: IDLIST_RESPONSE-SKIP encode failed at range %d, out_offset=%zu\n", + range_count, out_offset); + return 0; + } + + out_offset += (size_t)written; + break; + } + + default: + fprintf(stderr, "ndb_negentropy: unknown mode %d at range %d\n", + in_range.mode, range_count); + return 0; + } + + /* Move to next range */ + lower_idx = upper_idx; + } + + /* + * If all incoming ranges were SKIP, we can signal completion + * by returning just the version byte (empty message). + * This prevents infinite SKIP echo loops. + */ + if (!received_non_skip) + out_offset = 1; + + *outlen = out_offset; + + /* + * Mark reconciliation as complete if output is just the version byte. + * This happens when all ranges in the response are SKIP mode, + * meaning there are no differences to resolve. + */ + if (out_offset == 1) + neg->is_complete = 1; + + return 1; +} + + +size_t ndb_negentropy_get_have_ids(const struct ndb_negentropy *neg, + const unsigned char **ids_out) +{ + if (neg == NULL || ids_out == NULL) + return 0; + + *ids_out = neg->have_ids.ids; + return neg->have_ids.count; +} + + +size_t ndb_negentropy_get_need_ids(const struct ndb_negentropy *neg, + const unsigned char **ids_out) +{ + if (neg == NULL || ids_out == NULL) + return 0; + + *ids_out = neg->need_ids.ids; + return neg->need_ids.count; +} diff --git a/nostrdb/src/ndb_negentropy.h b/nostrdb/src/ndb_negentropy.h new file mode 100644 index 0000000000..81e0fd4b9a --- /dev/null +++ b/nostrdb/src/ndb_negentropy.h @@ -0,0 +1,808 @@ +/* + * ndb_negentropy.h - Native Negentropy for NostrDB + * + * This implements the negentropy set reconciliation protocol (NIP-77) + * for efficient event syncing between clients and relays. + * + * Negentropy allows two parties to efficiently determine which items + * each has that the other lacks, using O(log n) round trips and + * minimal bandwidth via fingerprint comparison. + * + * The protocol works by: + * 1. Both sides sort their items by (timestamp, id) + * 2. Exchange fingerprints of ranges to find differences + * 3. Recursively split differing ranges until items are identified + * 4. Exchange the actual differing item IDs + * + * Reference: https://github.com/hoytech/negentropy + * NIP-77: https://github.com/nostr-protocol/nips/blob/master/77.md + */ + +#ifndef NDB_NEGENTROPY_H +#define NDB_NEGENTROPY_H + +#include +#include + +/* Forward declarations for NostrDB integration */ +struct ndb_txn; +struct ndb_filter; + +/* + * Protocol version byte. + * V1 = 0x61, future versions increment (0x62, 0x63, etc.) + * If a peer receives an incompatible version, it replies with + * a single byte containing its highest supported version. + */ +#define NDB_NEGENTROPY_PROTOCOL_V1 0x61 + +/* + * Range modes determine how each range in a message should be processed. + * + * SKIP: No further processing needed for this range. + * Payload is empty. + * + * FINGERPRINT: Payload contains a 16-byte fingerprint of all IDs + * in this range. If fingerprints match, ranges are + * identical. If not, further splitting is needed. + * + * IDLIST: Payload contains a complete list of all IDs in + * this range. Used for small ranges as a base case. + * + * IDLIST_RESPONSE: Server's response to an IDLIST. Contains IDs the + * server has (client needs) plus a bitfield indicating + * which client IDs the server needs. + */ +enum ndb_negentropy_mode { + NDB_NEG_SKIP = 0, + NDB_NEG_FINGERPRINT = 1, + NDB_NEG_IDLIST = 2, + NDB_NEG_IDLIST_RESPONSE = 3 +}; + +/* + * Bound: Represents a range boundary in the timestamp/ID space. + * + * Ranges in negentropy are specified by inclusive lower bounds and + * exclusive upper bounds. Each bound consists of a timestamp and + * an ID prefix of variable length. + * + * The prefix_len allows using the shortest possible prefix that + * distinguishes this bound from adjacent records. If timestamps + * differ, prefix_len can be 0. Otherwise, it's the length of the + * common prefix plus 1. + * + * Trailing bytes after prefix_len are implicitly zero. + */ +struct ndb_negentropy_bound { + uint64_t timestamp; + unsigned char id_prefix[32]; + uint8_t prefix_len; /* 0-32 bytes */ +}; + +/* + * Item: A (timestamp, id) pair for negentropy reconciliation. + * + * Items must be sorted by timestamp first, then lexicographically + * by ID for items with identical timestamps. + */ +struct ndb_negentropy_item { + uint64_t timestamp; + unsigned char id[32]; +}; + +/* + * Accumulator: 256-bit accumulator for fingerprint computation. + * + * The fingerprint algorithm sums all 32-byte IDs (treated as + * little-endian 256-bit unsigned integers) modulo 2^256, then + * hashes the result with the count. + * + * Formula: fingerprint = SHA256(sum || varint(count))[:16] + */ +struct ndb_negentropy_accumulator { + unsigned char sum[32]; /* little-endian 256-bit value */ +}; + + +/* ============================================================ + * VARINT ENCODING/DECODING + * ============================================================ + * + * Negentropy uses a specific varint format: + * - Base-128 encoding + * - Most significant byte FIRST (big-endian style) + * - High bit (0x80) set on all bytes EXCEPT the last + * + * This differs from the common LEB128 format which is LSB-first. + * + * Examples: + * 0 -> 0x00 + * 127 -> 0x7F + * 128 -> 0x81 0x00 + * 255 -> 0x81 0x7F + * 16383 -> 0xFF 0x7F + * 16384 -> 0x81 0x80 0x00 + */ + +/* + * Encode a 64-bit unsigned integer as a negentropy varint. + * + * Returns: Number of bytes written, or 0 if buffer too small. + * + * The maximum encoded size is 10 bytes (for UINT64_MAX). + */ +int ndb_negentropy_varint_encode(unsigned char *buf, size_t buflen, uint64_t n); + +/* + * Decode a negentropy varint into a 64-bit unsigned integer. + * + * Returns: Number of bytes consumed, or 0 on error. + * + * Errors include: buffer too small, malformed varint (> 10 bytes), + * or value overflow. + */ +int ndb_negentropy_varint_decode(const unsigned char *buf, size_t buflen, + uint64_t *out); + +/* + * Calculate the encoded size of a varint without actually encoding. + * + * Useful for pre-calculating buffer sizes. + */ +int ndb_negentropy_varint_size(uint64_t n); + + +/* ============================================================ + * FINGERPRINT COMPUTATION + * ============================================================ + * + * Fingerprints are computed by: + * 1. Summing all 32-byte IDs as little-endian 256-bit integers + * 2. Taking the sum modulo 2^256 (natural overflow) + * 3. Appending the count as a varint + * 4. Hashing with SHA-256 + * 5. Taking the first 16 bytes + */ + +/* + * Initialize an accumulator to zero. + */ +void ndb_negentropy_accumulator_init(struct ndb_negentropy_accumulator *acc); + +/* + * Add a 32-byte ID to the accumulator. + * + * Performs 256-bit addition with natural overflow (mod 2^256). + * The ID is interpreted as a little-endian unsigned integer. + */ +void ndb_negentropy_accumulator_add(struct ndb_negentropy_accumulator *acc, + const unsigned char *id); + +/* + * Compute the final 16-byte fingerprint. + * + * Formula: SHA256(acc->sum || varint(count))[:16] + * + * The output buffer must be at least 16 bytes. + */ +void ndb_negentropy_fingerprint(const struct ndb_negentropy_accumulator *acc, + size_t count, + unsigned char *out); + + +/* ============================================================ + * BOUND ENCODING/DECODING + * ============================================================ + * + * Bounds are encoded as: + * + * + * Timestamp encoding is special: + * - The "infinity" timestamp (UINT64_MAX) is encoded as 0 + * - All other values are encoded as (1 + delta) where delta is + * the difference from the previous timestamp + * - Deltas are always non-negative (ranges are ascending) + * + * The prev_timestamp parameter tracks state across multiple + * bound encodings within a single message. + */ + +/* + * Encode a bound into a buffer. + * + * prev_timestamp: In/out parameter for delta encoding. + * Initialize to 0 at the start of a message. + * + * Returns: Number of bytes written, or 0 on error. + */ +int ndb_negentropy_bound_encode(unsigned char *buf, size_t buflen, + const struct ndb_negentropy_bound *bound, + uint64_t *prev_timestamp); + +/* + * Decode a bound from a buffer. + * + * prev_timestamp: In/out parameter for delta decoding. + * Initialize to 0 at the start of a message. + * + * Returns: Number of bytes consumed, or 0 on error. + */ +int ndb_negentropy_bound_decode(const unsigned char *buf, size_t buflen, + struct ndb_negentropy_bound *bound, + uint64_t *prev_timestamp); + + +/* ============================================================ + * HEX ENCODING UTILITIES + * ============================================================ + * + * NIP-77 transmits negentropy messages as hex-encoded strings + * within JSON arrays: + * + * ["NEG-OPEN", "sub1", {"kinds":[1]}, "6181..."] + * ["NEG-MSG", "sub1", "6181..."] + */ + +/* + * Convert binary data to a hex string. + * + * The output is NUL-terminated. The hex buffer must be at least + * (len * 2 + 1) bytes. + * + * Returns: Number of hex characters written (excluding NUL). + */ +size_t ndb_negentropy_to_hex(const unsigned char *bin, size_t len, char *hex); + +/* + * Convert a hex string to binary data. + * + * Returns: Number of bytes written, or 0 on error (invalid hex, + * buffer too small, odd-length input). + */ +size_t ndb_negentropy_from_hex(const char *hex, size_t hexlen, + unsigned char *bin, size_t binlen); + + +/* ============================================================ + * RANGE ENCODING/DECODING + * ============================================================ + * + * A Range represents a contiguous section of the timestamp/ID space + * with associated data for reconciliation. + * + * Wire format: + * + * + * The lower bound is implicit - it's the upper bound of the previous + * range (or 0/0 for the first range). + * + * Payload format depends on mode: + * SKIP: (empty) + * FINGERPRINT: 16 bytes + * IDLIST: * + * IDLIST_RESPONSE: + */ + +/* + * Range structure with payload data. + * + * For IDLIST and IDLIST_RESPONSE modes, the caller is responsible + * for allocating and freeing the id arrays. The encode/decode + * functions work with raw buffers; higher-level wrappers should + * manage memory. + */ +struct ndb_negentropy_range { + struct ndb_negentropy_bound upper_bound; + enum ndb_negentropy_mode mode; + + /* + * Payload data (interpretation depends on mode): + * - SKIP: unused + * - FINGERPRINT: fingerprint[16] contains the fingerprint + * - IDLIST: ids points to (id_count * 32) bytes of IDs + * - IDLIST_RESPONSE: have_ids + bitfield for client IDs + */ + union { + unsigned char fingerprint[16]; + + struct { + size_t id_count; + const unsigned char *ids; /* id_count * 32 bytes */ + } id_list; + + struct { + size_t have_count; + const unsigned char *have_ids; /* have_count * 32 bytes */ + size_t bitfield_len; + const unsigned char *bitfield; + } id_list_response; + } payload; +}; + +/* + * Encode a range into a buffer. + * + * prev_timestamp: In/out parameter for bound delta encoding. + * + * For IDLIST mode, payload.id_list.ids must point to valid ID data. + * For IDLIST_RESPONSE mode, both have_ids and bitfield must be valid. + * + * Returns: Number of bytes written, or 0 on error. + */ +int ndb_negentropy_range_encode(unsigned char *buf, size_t buflen, + const struct ndb_negentropy_range *range, + uint64_t *prev_timestamp); + +/* + * Decode a range from a buffer. + * + * prev_timestamp: In/out parameter for bound delta decoding. + * + * For IDLIST and IDLIST_RESPONSE modes, the payload pointers will + * point directly into the input buffer (zero-copy). The caller must + * ensure the buffer remains valid while using the range data. + * + * Returns: Number of bytes consumed, or 0 on error. + */ +int ndb_negentropy_range_decode(const unsigned char *buf, size_t buflen, + struct ndb_negentropy_range *range, + uint64_t *prev_timestamp); + + +/* ============================================================ + * MESSAGE ENCODING/DECODING + * ============================================================ + * + * A negentropy message is the complete unit transmitted over the wire. + * It contains a version byte followed by zero or more ranges. + * + * Wire format: + * * + * + * The version byte is 0x61 for protocol V1. + * + * Messages are hex-encoded for transmission in NIP-77 JSON arrays: + * ["NEG-OPEN", "subId", {filter}, ""] + * ["NEG-MSG", "subId", ""] + * + * Note on range limits: The protocol doesn't impose a maximum number + * of ranges, but implementations typically limit them for DOS protection. + * A reasonable limit is 128-256 ranges per message. + */ + +/* + * Maximum ranges per message for DOS protection. + * This can be adjusted based on deployment requirements. + * Note: relay.damus.io can send 500KB+ messages with many ranges, + * so we use a higher limit than typical implementations. + */ +#define NDB_NEGENTROPY_MAX_RANGES 8192 + +/* + * Maximum IDs per IDLIST range for DOS protection. + * Prevents overflow when computing id_count * 32. + * 100,000 IDs = 3.2MB per range, which is generous. + */ +#define NDB_NEGENTROPY_MAX_IDS_PER_RANGE 100000 + +/* + * Encode a complete negentropy message. + * + * The message starts with the protocol version byte (NDB_NEGENTROPY_PROTOCOL_V1) + * followed by the encoded ranges. + * + * Parameters: + * buf: Output buffer for the encoded message + * buflen: Size of the output buffer + * ranges: Array of ranges to encode + * num_ranges: Number of ranges in the array + * + * Returns: Total bytes written, or 0 on error. + * + * Note: The timestamp delta encoding is reset for each message. The + * first range uses absolute timestamp encoding (delta from 0). + */ +int ndb_negentropy_message_encode(unsigned char *buf, size_t buflen, + const struct ndb_negentropy_range *ranges, + size_t num_ranges); + +/* + * Get the protocol version from a message. + * + * This reads just the first byte without parsing the full message. + * Returns the version byte, or 0 if the buffer is empty. + * + * Use this to check version compatibility before full decode. + */ +int ndb_negentropy_message_version(const unsigned char *buf, size_t buflen); + +/* + * Decode the next range from a message buffer. + * + * This is an incremental decoder for processing ranges one at a time. + * It avoids allocating memory for an array of ranges. + * + * Parameters: + * buf: Input buffer (should point past version byte for first call) + * buflen: Remaining bytes in buffer + * range: Output range structure + * prev_timestamp: In/out state for delta decoding (init to 0) + * + * Returns: Bytes consumed for this range, or 0 if no more ranges/error. + * + * Usage pattern: + * const unsigned char *p = buf + 1; // skip version + * size_t remaining = len - 1; + * uint64_t prev_ts = 0; + * struct ndb_negentropy_range range; + * + * while (remaining > 0) { + * int consumed = ndb_negentropy_range_decode(p, remaining, &range, &prev_ts); + * if (consumed == 0) break; + * // process range... + * p += consumed; + * remaining -= consumed; + * } + */ + +/* + * Count the number of ranges in a message. + * + * This parses through the message to count ranges without + * extracting the full data. Useful for pre-allocating arrays + * or validating message structure. + * + * Returns: Number of ranges, or -1 on parse error. + */ +int ndb_negentropy_message_count_ranges(const unsigned char *buf, size_t buflen); + + +/* ============================================================ + * NEGENTROPY STORAGE + * ============================================================ + * + * Storage holds a sorted list of items for negentropy reconciliation. + * Items are (timestamp, id) pairs sorted first by timestamp, then by id. + * + * The storage can be populated from a NostrDB query or built manually. + * Once sealed, the storage is ready for reconciliation. + * + * Memory management: The storage owns its item array and will free it + * when destroyed. Items are copied in, so the caller can free their + * original data after adding. + */ + +/* + * Storage structure for negentropy items. + * + * Items must be sorted by (timestamp, id) before sealing. + * The seal operation handles sorting automatically. + */ +struct ndb_negentropy_storage { + struct ndb_negentropy_item *items; /* Sorted item array */ + size_t count; /* Number of items */ + size_t capacity; /* Allocated capacity */ + int sealed; /* 1 if sealed (ready for use) */ +}; + +/* + * Initialize a new storage instance. + * + * Must be destroyed with ndb_negentropy_storage_destroy() when done. + * Returns 1 on success, 0 on failure (allocation error). + */ +int ndb_negentropy_storage_init(struct ndb_negentropy_storage *storage); + +/* + * Destroy a storage instance and free its memory. + */ +void ndb_negentropy_storage_destroy(struct ndb_negentropy_storage *storage); + +/* + * Add an item to the storage. + * + * Items can be added in any order - they will be sorted when sealed. + * Must not call after sealing. + * + * Returns 1 on success, 0 on failure (allocation error or already sealed). + */ +int ndb_negentropy_storage_add(struct ndb_negentropy_storage *storage, + uint64_t timestamp, + const unsigned char *id); + +/* + * Add multiple items at once. + * + * More efficient than adding one at a time due to reduced reallocation. + * The items array should contain count items. + * + * Returns 1 on success, 0 on failure. + */ +int ndb_negentropy_storage_add_many(struct ndb_negentropy_storage *storage, + const struct ndb_negentropy_item *items, + size_t count); + +/* + * Seal the storage for use. + * + * This sorts the items by (timestamp, id) and marks the storage as ready. + * After sealing: + * - No more items can be added + * - The storage can be used for fingerprint computation + * + * Returns 1 on success, 0 if already sealed. + */ +int ndb_negentropy_storage_seal(struct ndb_negentropy_storage *storage); + +/* + * Get the number of items in the storage. + */ +size_t ndb_negentropy_storage_size(const struct ndb_negentropy_storage *storage); + +/* + * Get an item by index. + * + * Index must be < size(). Returns NULL if out of bounds or not sealed. + */ +const struct ndb_negentropy_item * +ndb_negentropy_storage_get(const struct ndb_negentropy_storage *storage, size_t index); + +/* + * Find the index of the first item >= the given bound. + * + * Uses binary search for O(log n) performance. + * Returns the insertion point if no exact match (i.e., the index where + * an item with this bound would be inserted). + * + * Storage must be sealed. + */ +size_t ndb_negentropy_storage_lower_bound(const struct ndb_negentropy_storage *storage, + const struct ndb_negentropy_bound *bound); + +/* + * Compute the fingerprint for a range of items. + * + * Computes the fingerprint for items in [begin, end). + * The begin and end are indices into the storage. + * + * Storage must be sealed. + * Returns 1 on success, 0 on error (invalid indices or not sealed). + */ +int ndb_negentropy_storage_fingerprint(const struct ndb_negentropy_storage *storage, + size_t begin, size_t end, + unsigned char *fingerprint_out); + + +/* ============================================================ + * FILTER-BASED INITIALIZATION (NostrDB Integration) + * ============================================================ + * + * These functions integrate negentropy with NostrDB's query system, + * allowing storage to be populated directly from a NIP-01 filter + * rather than manually adding items. + */ + +/* + * Populate storage from a NostrDB filter query. + * + * This queries the database using the provided filter and adds all + * matching events to the storage. The storage should be initialized + * but not sealed before calling this function. + * + * After this function returns successfully, the storage is automatically + * sealed and ready for use. + * + * Parameters: + * storage: Initialized (but not sealed) storage + * txn: Active read transaction + * filter: NIP-01 filter to query events + * limit: Maximum number of events to add (0 = use filter's limit or 10000) + * + * Returns: Number of items added, or -1 on error. + * + * Note: The transaction must remain valid for the lifetime of the storage + * since we only store references to the event data. + */ +int ndb_negentropy_storage_from_filter(struct ndb_negentropy_storage *storage, + struct ndb_txn *txn, + struct ndb_filter *filter, + int limit); + + +/* ============================================================ + * RECONCILIATION STATE MACHINE + * ============================================================ + * + * The reconciliation engine processes negentropy messages and + * determines which items each side has that the other lacks. + * + * Protocol flow: + * 1. Client calls initiate() to create initial message + * 2. Server processes with reconcile(), sends reply + * 3. Client calls reconcile() on reply, extracts have/need IDs + * 4. Repeat until reconcile() returns empty message (sync complete) + * + * The engine is agnostic to client/server roles - both sides use + * the same API. The difference is who calls initiate() first. + */ + +/* + * Threshold for switching from IdList to Fingerprint mode. + * Ranges smaller than this send full IdLists (base case). + * Larger ranges send Fingerprints for sub-ranges. + */ +#define NDB_NEGENTROPY_IDLIST_THRESHOLD 16 + +/* + * Number of sub-ranges to split into when fingerprints differ. + * Must be > 1 to ensure progress. + */ +#define NDB_NEGENTROPY_SPLIT_COUNT 16 + +/* + * ID output array for have/need tracking. + * + * During reconciliation, IDs are accumulated into these arrays. + * The arrays are dynamically grown as needed. + */ +struct ndb_negentropy_ids { + unsigned char *ids; /* Array of 32-byte IDs */ + size_t count; /* Number of IDs */ + size_t capacity; /* Allocated capacity (in IDs, not bytes) */ +}; + +/* + * Configuration for negentropy reconciliation. + * + * Pass NULL to use defaults. All fields are optional - zero values + * use sensible defaults. + */ +struct ndb_negentropy_config { + /* + * Maximum frame/message size in bytes. 0 = unlimited. + * Useful for constraining message sizes on memory-limited devices. + */ + int frame_size_limit; + + /* + * Threshold for switching between fingerprint and idlist modes. + * Ranges with fewer items than this send full ID lists. + * Default: NDB_NEGENTROPY_IDLIST_THRESHOLD (16) + */ + int idlist_threshold; + + /* + * Number of sub-ranges to split into when fingerprints differ. + * Must be > 1 to ensure progress. + * Default: NDB_NEGENTROPY_SPLIT_COUNT (16) + */ + int split_count; +}; + +/* + * Reconciliation context. + * + * Holds the storage reference and tracks state across multiple + * reconcile() calls. Also accumulates have/need IDs. + */ +struct ndb_negentropy { + const struct ndb_negentropy_storage *storage; /* Item storage (not owned) */ + int is_initiator; /* 1 if we initiated */ + int is_complete; /* 1 when reconciliation done */ + + /* Configuration (copied from init) */ + int frame_size_limit; + int idlist_threshold; + int split_count; + + /* IDs we have that remote needs (to send) */ + struct ndb_negentropy_ids have_ids; + + /* IDs remote has that we need (to request) */ + struct ndb_negentropy_ids need_ids; +}; + +/* + * Initialize a negentropy reconciliation context. + * + * The storage must be sealed and remain valid for the lifetime + * of the context. The context does not own the storage. + * + * The config parameter is optional - pass NULL to use defaults. + * If provided, the config is copied so it doesn't need to remain valid. + * + * Returns 1 on success, 0 on failure. + */ +int ndb_negentropy_init(struct ndb_negentropy *neg, + const struct ndb_negentropy_storage *storage, + const struct ndb_negentropy_config *config); + +/* + * Destroy a negentropy context and free resources. + */ +void ndb_negentropy_destroy(struct ndb_negentropy *neg); + +/* + * Create the initial message to start reconciliation. + * + * This creates a single FINGERPRINT range covering the entire + * item space (from timestamp 0 to infinity). + * + * Parameters: + * neg: Initialized context + * buf: Output buffer for the encoded message + * buflen: Size of output buffer + * outlen: Receives the actual message length + * + * Returns 1 on success, 0 on failure. + */ +int ndb_negentropy_initiate(struct ndb_negentropy *neg, + unsigned char *buf, size_t buflen, + size_t *outlen); + +/* + * Process an incoming message and generate a response. + * + * This is the core reconciliation function. It: + * 1. Parses the incoming message + * 2. Compares fingerprints and splits differing ranges + * 3. Processes IdLists and IdListResponses + * 4. Accumulates have/need IDs + * 5. Generates a response message + * + * Parameters: + * neg: Initialized context + * msg: Incoming message (binary, not hex) + * msglen: Length of incoming message + * out: Output buffer for response message + * outlen: In: buffer size, Out: response length + * + * Returns: + * 1 - Success, response generated (check outlen > 1 for more rounds) + * 0 - Error (parse error, invalid message, etc.) + * + * When outlen == 1 on return (just version byte), reconciliation + * is complete - no more messages needed. + */ +int ndb_negentropy_reconcile(struct ndb_negentropy *neg, + const unsigned char *msg, size_t msglen, + unsigned char *out, size_t *outlen); + +/* + * Check if reconciliation is complete. + * + * Returns 1 if reconciliation is done (no more rounds needed), + * 0 if more rounds are required. + * + * Reconciliation is complete when reconcile() returns an empty + * response (just version byte, length == 1). + */ +int ndb_negentropy_is_complete(const struct ndb_negentropy *neg); + +/* + * Get the IDs we have that the remote needs. + * + * These are IDs we should send to the remote. + * The returned array remains valid until the context is destroyed + * or the next reconcile() call. + * + * Returns the number of IDs. ids_out receives pointer to the array. + */ +size_t ndb_negentropy_get_have_ids(const struct ndb_negentropy *neg, + const unsigned char **ids_out); + +/* + * Get the IDs the remote has that we need. + * + * These are IDs we should request from the remote. + * The returned array remains valid until the context is destroyed + * or the next reconcile() call. + * + * Returns the number of IDs. ids_out receives pointer to the array. + */ +size_t ndb_negentropy_get_need_ids(const struct ndb_negentropy *neg, + const unsigned char **ids_out); + + +#endif /* NDB_NEGENTROPY_H */ diff --git a/nostrscript/NostrScript.swift b/nostrscript/NostrScript.swift index 917ccb783e..eede2e9843 100644 --- a/nostrscript/NostrScript.swift +++ b/nostrscript/NostrScript.swift @@ -194,6 +194,9 @@ enum NScriptEventType: Int { case notice = 3 case eose = 4 case auth = 5 + case negMsg = 6 + case negErr = 7 + case closed = 8 init(resp: NostrResponse) { switch resp { @@ -207,6 +210,12 @@ enum NScriptEventType: Int { self = .ok case .auth: self = .auth + case .negMsg: + self = .negMsg + case .negErr: + self = .negErr + case .closed: + self = .closed } } }