diff --git a/cmd/qui/main.go b/cmd/qui/main.go index ac9f9d0a8..2ba9e7c40 100644 --- a/cmd/qui/main.go +++ b/cmd/qui/main.go @@ -621,6 +621,7 @@ func (app *Application) runServer() { } instanceCrossSeedCompletionStore := models.NewInstanceCrossSeedCompletionStore(db) crossSeedBlocklistStore := models.NewCrossSeedBlocklistStore(db) + crossSeedPartialPoolStore := models.NewCrossSeedPartialPoolMemberStore(db) crossSeedService := crossseed.NewService( instanceStore, syncManager, @@ -632,6 +633,7 @@ func (app *Application) runServer() { externalProgramStore, externalProgramService, instanceCrossSeedCompletionStore, + crossSeedPartialPoolStore, trackerCustomizationStore, notificationService, cfg.Config.CrossSeedRecoverErroredTorrents, @@ -651,6 +653,7 @@ func (app *Application) runServer() { }) syncManager.SetTorrentAddedHandler(func(ctx context.Context, instanceID int, torrent qbt.Torrent) { + crossSeedService.HandleTorrentAdded(ctx, instanceID, torrent) notifyTorrentAddedWithDelay(ctx, syncManager, notificationService, instanceID, torrent) }) @@ -784,6 +787,14 @@ func (app *Application) runServer() { defer reconcileCancel() crossSeedService.ReconcileInterruptedRuns(reconcileCtx) + // Restore active partial-pool members with a separate timeout budget so a slow + // reconcile pass doesn't consume the entire startup window. + restorePoolsCtx, restorePoolsCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer restorePoolsCancel() + if err := crossSeedService.RestoreActivePartialPools(restorePoolsCtx); err != nil { + log.Warn().Err(err).Msg("Failed to restore active cross-seed partial pools") + } + errorChannel := make(chan error) serverReady := make(chan struct{}, 1) go func() { diff --git a/documentation/docs/features/cross-seed/hardlink-mode.md b/documentation/docs/features/cross-seed/hardlink-mode.md index a88f9e8f3..6e2b98053 100644 --- a/documentation/docs/features/cross-seed/hardlink-mode.md +++ b/documentation/docs/features/cross-seed/hardlink-mode.md @@ -78,6 +78,9 @@ By default, hardlink-added torrents start seeding immediately (since `skip_check - Hardlinks share disk blocks with the original file but increase the link count. Deleting one link does not necessarily free space until all links are removed. - Windows support: folder names are sanitized to remove characters Windows forbids. Torrent file paths themselves still need to be valid for your qBittorrent setup. - Hardlink mode supports extra files when piece-boundary safe. If the incoming torrent contains extra files not present in the matched torrent (e.g., `.nfo`/`.srt` sidecars), hardlink mode will link the content files and trigger a recheck so qBittorrent downloads the extras. If extras share pieces with content (unsafe), the cross-seed is skipped. +- If you enable pooled partial completion in the **Hardlink / Reflink Mode** section of the Rules tab, related hardlink adds against the same matched local source torrent can cooperate temporarily. Hardlink automation only continues when post-recheck missing data is limited to whole missing files. If qBittorrent reports missing bytes inside an already linked file, qui leaves that torrent paused for manual review. +- With pooled partial completion enabled, hardlink mode can still add paused even when no files are immediately reusable, then rely on recheck and the pool to decide whether it can continue automatically. The pool waits for any currently active member to finish downloading its missing content before automatically moving on, and the preferred downloader rotates on a timer so another member can take over in long-lived pools. +- If you manually start another paused torrent from the same pool, qui notices it on the next pool review and uses that torrent's completed files in the same propagation/recheck flow as automatically selected members. ## Reflink Mode (Alternative) @@ -115,8 +118,23 @@ On Linux, check the filesystem type with `df -T /path` (you want `xfs`/`btrfs`, | Aspect | Hardlink Mode | Reflink Mode | |--------|--------------|--------------| | Piece-boundary check | Skips if unsafe | Never skips (safe to modify clones) | -| Recheck | Only when extras exist | Only when extras exist | +| Recheck | When the linked set is incomplete and **Skip recheck** is off | When the cloned set is incomplete and **Skip recheck** is off; the single-file size mismatch override also uses this path | | Disk usage | Zero (shared blocks) | Starts near-zero; grows as modified | +| Single-file size mismatch | Not supported | Optional normalized-name override | + +When pooled partial completion is enabled, reflink members may continue even when a file is only partially complete after recheck, as long as the total missing bytes remain within the configured post-recheck limit. As with hardlink pools, qui keeps coordination to one active downloader at a time, rotates the preferred downloader on a timer for long-lived pools, and re-reviews the pool when you manually start another paused member. + +### Single-File Size Mismatch Override + +If you enable **Allow reflink single-file size mismatch** in the **Hardlink / Reflink Mode** section, qui can accept a reflink cross-seed when: + +- both torrents contain exactly one file; +- the normalized file names match; and +- the sizes differ but are still within 1%. + +This option creates a recheck-required reflink add. If **Skip recheck-required matches** is enabled in the Rules tab, qui skips the match instead of adding it. When **Skip recheck-required matches** is disabled, qui clones the file into the reflink tree, adds the torrent paused, and queues a recheck; if qBittorrent reaches at least **99%** after recheck, qui resumes it automatically. + +This override is separate from pooled partial completion, so enabling **Allow reflink single-file size mismatch** does not place that add into a pooled partial-completion flow. Reflink mode can still enter the recheck path for other reasons, such as extra files or any other incomplete cloned set, and those rechecks can also be skipped entirely when **Skip recheck-required matches** is enabled. ### Disk Usage Implications diff --git a/documentation/docs/features/cross-seed/overview.md b/documentation/docs/features/cross-seed/overview.md index a7178a517..bc41a72a9 100644 --- a/documentation/docs/features/cross-seed/overview.md +++ b/documentation/docs/features/cross-seed/overview.md @@ -22,6 +22,8 @@ qui supports three modes for handling files: - **Hardlink mode** (optional): Creates a hardlinked copy of the matched files laid out exactly as the incoming torrent expects, then adds the torrent pointing at that tree. Avoids rename-alignment entirely. - **Reflink mode** (optional): Creates copy-on-write clones (reflinks) of the matched files. Allows safe cross-seeding of torrents with extra/missing files because qBittorrent can write/repair the clones without affecting originals. +For managed hardlink/reflink adds that need follow-up recheck handling, the **Hardlink / Reflink Mode** section on the Rules tab includes pooled partial completion controls and a reflink-only single-file size mismatch override. + Disc-based media (Blu-ray/DVD) requires manual verification. See [troubleshooting](troubleshooting#blu-ray-or-dvd-cross-seed-left-paused). ## Prerequisites diff --git a/documentation/docs/features/cross-seed/rules.md b/documentation/docs/features/cross-seed/rules.md index b87f83b45..23c111ca0 100644 --- a/documentation/docs/features/cross-seed/rules.md +++ b/documentation/docs/features/cross-seed/rules.md @@ -14,10 +14,32 @@ Configure matching behavior in the **Rules** tab on the Cross-Seed page. - **Skip recheck** - When enabled, skips any cross-seed that would require a recheck (alignment needed, extra files, or disc layouts like `BDMV`/`VIDEO_TS`). Applies to all modes including hardlink/reflink. - **Skip piece boundary safety check** - Enabled by default. When enabled, allows cross-seeds even if extra files share torrent pieces with content files. **Warning:** This may corrupt your existing seeded data if content differs. Uncheck this to enable the safety check, or use reflink mode which safely handles these cases. +Managed-link follow-up settings live in **Hardlink / Reflink Mode** on the same Rules tab: + +- **Enable pooled partial completion** - Only shown when at least one instance is using hardlink or reflink mode. Applies only to hardlink/reflink adds that already passed the normal acceptance rules and still need coordination after add time, such as non-exact matches, extra files, or disc layouts. Related partial adds against the same matched local source torrent are coordinated as a shared active pool, and active pool state can be restored while the pool remains active. +- **Max missing bytes after recheck** - Shown when pooled partial completion is enabled. Default `100 MiB`. Used only for pooled reflink automation. If a reflink pool member still has more missing bytes than this after recheck, it stays paused for manual review. +- **Allow reflink single-file size mismatch** - Only shown when at least one instance is using reflink mode. Reflink-only escape hatch for one-file torrents where the normalized file names match and the source file size is already within 1% of the incoming size. qui clones the file, forces a recheck, and auto-resumes once qBittorrent reaches 99%. Larger gaps are rejected before add. This path does not use pooled partial completion. + :::note Disc layouts (`BDMV`/`VIDEO_TS`) are treated more strictly: they only auto-resume after a full recheck reaches 100%. ::: +:::note +For pooled partial completion, hardlink automation only continues when the post-recheck gap is limited to whole missing files and those files are still piece-boundary safe. If bytes are missing inside an existing linked file, qui leaves the torrent paused for manual review. Reflink can continue with partial-file divergence as long as it stays within the byte limit above. +::: + +:::note +Pooled partial completion keeps managed hardlink/reflink adds paused while qBittorrent rechecks them. Once the pool no longer needs coordination, normal resume behavior takes over. If you also enable **Skip recheck**, any add that would have relied on pooled handling is skipped instead. +::: + +:::note +Within an active pool, qui lets one eligible member download missing content at a time while the others stay paused for coordination. If one member is already downloading, qui waits for that member instead of reshuffling the pool immediately. The preferred downloader is rotated on a timer (about 6 hours) so a long-lived pool can move on to another member instead of sticking to the same one forever. +::: + +:::tip +You can manually resume another paused torrent from the same pool if you want to kick the pool back into review. qui polls pooled members regularly (about every 10 seconds), notices the newly running torrent, and re-evaluates the pool so completed files can be propagated and rechecked for the remaining paused members. +::: + ## Categories Choose one of three mutually exclusive category modes: diff --git a/documentation/docs/features/cross-seed/troubleshooting.md b/documentation/docs/features/cross-seed/troubleshooting.md index 4d0d859c5..9b9e46c19 100644 --- a/documentation/docs/features/cross-seed/troubleshooting.md +++ b/documentation/docs/features/cross-seed/troubleshooting.md @@ -95,6 +95,30 @@ The incoming torrent has files not present in your matched torrent, and those fi - Verify the "Size mismatch tolerance" setting in Rules - Torrents below the auto-resume threshold stay paused for manual review +## Pooled partial completion stayed paused + +If **Enable pooled partial completion** is on in **Hardlink / Reflink Mode**, qui may intentionally add hardlink/reflink cross-seeds paused and wait for qBittorrent's recheck result before deciding whether to resume them. + +What to expect from the pool: +- qui usually keeps one eligible pool member downloading missing content at a time while the others stay paused +- If one member is already downloading, qui waits for that download instead of immediately switching the pool to someone else +- The preferred downloader rotates on a timer (about 6 hours) so another eligible member can be chosen in long-lived pools +- Manually resuming another paused torrent in the same pool triggers another pool review on the next worker poll (about 10 seconds), which can let qui recheck and reuse newly completed files across the pool + +Common reasons they remain paused: +- **Hardlink post-recheck gap is inside an existing linked file**: hardlink automation only continues automatically when the remaining gap is made of whole missing files +- **Reflink post-recheck gap exceeds the configured byte limit**: check **Max missing after recheck (MiB)** in **Hardlink / Reflink Mode** +- **Disc layout (`BDMV`/`VIDEO_TS`)**: these are handled more conservatively and require a full successful recheck +- **Skip recheck is enabled**: pooled handling cannot run if the add would have required a recheck + +If the result looks safe in qBittorrent, you can resume manually. + +## Reflink single-file size mismatch was skipped or stayed paused + +The **Allow reflink single-file size mismatch** option in **Hardlink / Reflink Mode** only applies when both torrents contain exactly one file, the normalized file names match, and the sizes are already within 1%. It does not apply to multi-file torrents. + +When it does apply, qui adds the torrent paused, queues a recheck, and only auto-resumes once qBittorrent reaches **99%**. If the size gap is larger than 1%, qui rejects it before add. If it still stays below 99% after recheck, leave it paused for manual review. + ## Blu-ray or DVD cross-seed left paused Torrents containing disc-based media (Blu-ray `BDMV` or DVD `VIDEO_TS` folder structures) are always added paused. diff --git a/internal/api/handlers/crossseed.go b/internal/api/handlers/crossseed.go index a641f19a1..f68adf1e9 100644 --- a/internal/api/handlers/crossseed.go +++ b/internal/api/handlers/crossseed.go @@ -32,24 +32,29 @@ type CrossSeedHandler struct { var infoHashRegex = regexp.MustCompile(`^[a-fA-F0-9]{40}$|^[a-fA-F0-9]{64}$`) +const minMaxMissingBytesAfterRecheck = 1024 * 1024 + type automationSettingsRequest struct { - Enabled bool `json:"enabled"` - RunIntervalMinutes int `json:"runIntervalMinutes"` - StartPaused bool `json:"startPaused"` - Category *string `json:"category"` - TargetInstanceIDs []int `json:"targetInstanceIds"` - TargetIndexerIDs []int `json:"targetIndexerIds"` - MaxResultsPerRun int `json:"maxResultsPerRun"` // Deprecated: automation now processes full feeds and ignores this value - FindIndividualEpisodes bool `json:"findIndividualEpisodes"` - SizeMismatchTolerancePercent float64 `json:"sizeMismatchTolerancePercent"` - UseCategoryFromIndexer bool `json:"useCategoryFromIndexer"` - UseCrossCategoryAffix bool `json:"useCrossCategoryAffix"` - CategoryAffixMode string `json:"categoryAffixMode"` - CategoryAffix string `json:"categoryAffix"` - UseCustomCategory bool `json:"useCustomCategory"` - CustomCategory string `json:"customCategory"` - RunExternalProgramID *int `json:"runExternalProgramId"` - SkipRecheck bool `json:"skipRecheck"` + Enabled bool `json:"enabled"` + RunIntervalMinutes int `json:"runIntervalMinutes"` + StartPaused bool `json:"startPaused"` + Category *string `json:"category"` + TargetInstanceIDs []int `json:"targetInstanceIds"` + TargetIndexerIDs []int `json:"targetIndexerIds"` + MaxResultsPerRun int `json:"maxResultsPerRun"` // Deprecated: automation now processes full feeds and ignores this value + FindIndividualEpisodes bool `json:"findIndividualEpisodes"` + SizeMismatchTolerancePercent float64 `json:"sizeMismatchTolerancePercent"` + UseCategoryFromIndexer bool `json:"useCategoryFromIndexer"` + UseCrossCategoryAffix bool `json:"useCrossCategoryAffix"` + CategoryAffixMode string `json:"categoryAffixMode"` + CategoryAffix string `json:"categoryAffix"` + UseCustomCategory bool `json:"useCustomCategory"` + CustomCategory string `json:"customCategory"` + RunExternalProgramID *int `json:"runExternalProgramId"` + SkipRecheck bool `json:"skipRecheck"` + EnablePooledPartialCompletion bool `json:"enablePooledPartialCompletion"` + AllowReflinkSingleFileSizeMismatch bool `json:"allowReflinkSingleFileSizeMismatch"` + MaxMissingBytesAfterRecheck int64 `json:"maxMissingBytesAfterRecheck"` // Gazelle (OPS/RED) cross-seed settings. GazelleEnabled bool `json:"gazelleEnabled"` RedactedAPIKey string `json:"redactedApiKey"` @@ -90,12 +95,15 @@ type automationSettingsPatchRequest struct { WebhookTags *[]string `json:"webhookTags,omitempty"` InheritSourceTags *bool `json:"inheritSourceTags,omitempty"` // Skip auto-resume settings per source mode - SkipAutoResumeRSS *bool `json:"skipAutoResumeRss,omitempty"` - SkipAutoResumeSeededSearch *bool `json:"skipAutoResumeSeededSearch,omitempty"` - SkipAutoResumeCompletion *bool `json:"skipAutoResumeCompletion,omitempty"` - SkipAutoResumeWebhook *bool `json:"skipAutoResumeWebhook,omitempty"` - SkipRecheck *bool `json:"skipRecheck,omitempty"` - SkipPieceBoundarySafetyCheck *bool `json:"skipPieceBoundarySafetyCheck,omitempty"` + SkipAutoResumeRSS *bool `json:"skipAutoResumeRss,omitempty"` + SkipAutoResumeSeededSearch *bool `json:"skipAutoResumeSeededSearch,omitempty"` + SkipAutoResumeCompletion *bool `json:"skipAutoResumeCompletion,omitempty"` + SkipAutoResumeWebhook *bool `json:"skipAutoResumeWebhook,omitempty"` + SkipRecheck *bool `json:"skipRecheck,omitempty"` + EnablePooledPartialCompletion *bool `json:"enablePooledPartialCompletion,omitempty"` + AllowReflinkSingleFileSizeMismatch *bool `json:"allowReflinkSingleFileSizeMismatch,omitempty"` + MaxMissingBytesAfterRecheck *int64 `json:"maxMissingBytesAfterRecheck,omitempty"` + SkipPieceBoundarySafetyCheck *bool `json:"skipPieceBoundarySafetyCheck,omitempty"` // Gazelle (OPS/RED) cross-seed settings. GazelleEnabled *bool `json:"gazelleEnabled,omitempty"` RedactedAPIKey *string `json:"redactedApiKey,omitempty"` @@ -193,12 +201,129 @@ func (r automationSettingsPatchRequest) isEmpty() bool { r.SkipAutoResumeCompletion == nil && r.SkipAutoResumeWebhook == nil && r.SkipRecheck == nil && + r.EnablePooledPartialCompletion == nil && + r.AllowReflinkSingleFileSizeMismatch == nil && + r.MaxMissingBytesAfterRecheck == nil && r.SkipPieceBoundarySafetyCheck == nil && r.GazelleEnabled == nil && r.RedactedAPIKey == nil && r.OrpheusAPIKey == nil } +func validateMaxMissingBytesAfterRecheck(w http.ResponseWriter, value int64) bool { + if value < minMaxMissingBytesAfterRecheck { + RespondError(w, http.StatusBadRequest, "maxMissingBytesAfterRecheck must be one MiB or greater") + return false + } + return true +} + +func decodeAutomationSettingsRequest(body io.Reader) (automationSettingsRequest, map[string]struct{}, error) { + var req automationSettingsRequest + + payload, err := io.ReadAll(body) + if err != nil { + return req, nil, err + } + + if err := json.Unmarshal(payload, &req); err != nil { + return req, nil, err + } + + var raw map[string]json.RawMessage + if err := json.Unmarshal(payload, &raw); err != nil { + return req, nil, err + } + + fields := make(map[string]struct{}, len(raw)) + for key := range raw { + fields[key] = struct{}{} + } + + return req, fields, nil +} + +func hasJSONField(fields map[string]struct{}, key string) bool { + _, ok := fields[key] + return ok +} + +func automationSettingsPatchFromRequest(req automationSettingsRequest, fields map[string]struct{}) automationSettingsPatchRequest { + patch := automationSettingsPatchRequest{} + + if hasJSONField(fields, "enabled") { + patch.Enabled = &req.Enabled + } + if hasJSONField(fields, "runIntervalMinutes") { + patch.RunIntervalMinutes = &req.RunIntervalMinutes + } + if hasJSONField(fields, "startPaused") { + patch.StartPaused = &req.StartPaused + } + if hasJSONField(fields, "category") { + patch.Category = optionalString{Set: true, Value: req.Category} + } + if hasJSONField(fields, "targetInstanceIds") { + patch.TargetInstanceIDs = &req.TargetInstanceIDs + } + if hasJSONField(fields, "targetIndexerIds") { + patch.TargetIndexerIDs = &req.TargetIndexerIDs + } + if hasJSONField(fields, "maxResultsPerRun") { + patch.MaxResultsPerRun = &req.MaxResultsPerRun + } + if hasJSONField(fields, "findIndividualEpisodes") { + patch.FindIndividualEpisodes = &req.FindIndividualEpisodes + } + if hasJSONField(fields, "sizeMismatchTolerancePercent") { + patch.SizeMismatchTolerancePercent = &req.SizeMismatchTolerancePercent + } + if hasJSONField(fields, "useCategoryFromIndexer") { + patch.UseCategoryFromIndexer = &req.UseCategoryFromIndexer + } + if hasJSONField(fields, "useCrossCategoryAffix") { + patch.UseCrossCategoryAffix = &req.UseCrossCategoryAffix + } + if hasJSONField(fields, "categoryAffixMode") { + patch.CategoryAffixMode = &req.CategoryAffixMode + } + if hasJSONField(fields, "categoryAffix") { + patch.CategoryAffix = &req.CategoryAffix + } + if hasJSONField(fields, "useCustomCategory") { + patch.UseCustomCategory = &req.UseCustomCategory + } + if hasJSONField(fields, "customCategory") { + patch.CustomCategory = &req.CustomCategory + } + if hasJSONField(fields, "runExternalProgramId") { + patch.RunExternalProgramID = optionalInt{Set: true, Value: req.RunExternalProgramID} + } + if hasJSONField(fields, "skipRecheck") { + patch.SkipRecheck = &req.SkipRecheck + } + if hasJSONField(fields, "enablePooledPartialCompletion") { + patch.EnablePooledPartialCompletion = &req.EnablePooledPartialCompletion + } + if hasJSONField(fields, "allowReflinkSingleFileSizeMismatch") { + patch.AllowReflinkSingleFileSizeMismatch = &req.AllowReflinkSingleFileSizeMismatch + } + if hasJSONField(fields, "maxMissingBytesAfterRecheck") { + patch.MaxMissingBytesAfterRecheck = &req.MaxMissingBytesAfterRecheck + } + if hasJSONField(fields, "gazelleEnabled") { + patch.GazelleEnabled = &req.GazelleEnabled + } + if hasJSONField(fields, "redactedApiKey") { + patch.RedactedAPIKey = &req.RedactedAPIKey + } + if hasJSONField(fields, "orpheusApiKey") { + patch.OrpheusAPIKey = &req.OrpheusAPIKey + } + + return patch +} + func applyAutomationSettingsPatch(settings *models.CrossSeedAutomationSettings, patch automationSettingsPatchRequest) { if patch.Enabled != nil { settings.Enabled = *patch.Enabled @@ -315,6 +440,15 @@ func applyAutomationSettingsPatch(settings *models.CrossSeedAutomationSettings, if patch.SkipRecheck != nil { settings.SkipRecheck = *patch.SkipRecheck } + if patch.EnablePooledPartialCompletion != nil { + settings.EnablePooledPartialCompletion = *patch.EnablePooledPartialCompletion + } + if patch.AllowReflinkSingleFileSizeMismatch != nil { + settings.AllowReflinkSingleFileSizeMismatch = *patch.AllowReflinkSingleFileSizeMismatch + } + if patch.MaxMissingBytesAfterRecheck != nil { + settings.MaxMissingBytesAfterRecheck = *patch.MaxMissingBytesAfterRecheck + } if patch.SkipPieceBoundarySafetyCheck != nil { settings.SkipPieceBoundarySafetyCheck = *patch.SkipPieceBoundarySafetyCheck } @@ -329,6 +463,55 @@ func applyAutomationSettingsPatch(settings *models.CrossSeedAutomationSettings, } } +func validateAutomationSettings(w http.ResponseWriter, settings *models.CrossSeedAutomationSettings) bool { + if settings.CategoryAffix != "" { + // No backslashes allowed + if strings.Contains(settings.CategoryAffix, "\\") { + RespondError(w, http.StatusBadRequest, "Category affix cannot contain backslashes") + return false + } + // No double slashes allowed + if strings.Contains(settings.CategoryAffix, "//") { + RespondError(w, http.StatusBadRequest, "Category affix cannot contain double slashes") + return false + } + // Prefix mode: cannot start with a slash (would create leading slash in category) + if settings.CategoryAffixMode == models.CategoryAffixModePrefix && strings.HasPrefix(settings.CategoryAffix, "/") { + RespondError(w, http.StatusBadRequest, "Category prefix cannot start with a slash") + return false + } + // Suffix mode: cannot end with a slash (would create trailing slash in category) + if settings.CategoryAffixMode == models.CategoryAffixModeSuffix && strings.HasSuffix(settings.CategoryAffix, "/") { + RespondError(w, http.StatusBadRequest, "Category suffix cannot end with a slash") + return false + } + } + + if settings.UseCrossCategoryAffix && + settings.CategoryAffixMode != models.CategoryAffixModePrefix && + settings.CategoryAffixMode != models.CategoryAffixModeSuffix { + RespondError(w, http.StatusBadRequest, "Category affix mode must be either 'prefix' or 'suffix'") + return false + } + + enabledModes := 0 + if settings.UseCategoryFromIndexer { + enabledModes++ + } + if settings.UseCrossCategoryAffix { + enabledModes++ + } + if settings.UseCustomCategory { + enabledModes++ + } + if enabledModes > 1 { + RespondError(w, http.StatusBadRequest, "Category modes are mutually exclusive. Enable only one of: indexer name, category affix, or custom category.") + return false + } + + return true +} + type automationRunRequest struct { DryRun bool `json:"dryRun"` } @@ -762,95 +945,39 @@ func (h *CrossSeedHandler) GetAutomationSettings(w http.ResponseWriter, r *http. // @Security ApiKeyAuth // @Router /api/cross-seed/settings [put] func (h *CrossSeedHandler) UpdateAutomationSettings(w http.ResponseWriter, r *http.Request) { - var req automationSettingsRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + req, fields, err := decodeAutomationSettingsRequest(r.Body) + if err != nil { RespondError(w, http.StatusBadRequest, "Invalid request body") return } - category := req.Category - if category != nil { - trimmed := strings.TrimSpace(*category) - if trimmed == "" { - category = nil - } else { - category = &trimmed - } + if hasJSONField(fields, "categoryAffixMode") && + req.CategoryAffixMode != "" && + req.CategoryAffixMode != models.CategoryAffixModePrefix && + req.CategoryAffixMode != models.CategoryAffixModeSuffix { + RespondError(w, http.StatusBadRequest, "Category affix mode must be either 'prefix' or 'suffix'") + return } - // Validate categoryAffixMode if provided OR if UseCrossCategoryAffix is enabled - if req.CategoryAffixMode != "" || req.UseCrossCategoryAffix { - if req.CategoryAffixMode != models.CategoryAffixModePrefix && req.CategoryAffixMode != models.CategoryAffixModeSuffix { - RespondError(w, http.StatusBadRequest, "Category affix mode must be either 'prefix' or 'suffix'") - return - } + if hasJSONField(fields, "maxMissingBytesAfterRecheck") && !validateMaxMissingBytesAfterRecheck(w, req.MaxMissingBytesAfterRecheck) { + return } - req.CategoryAffix = strings.TrimSpace(req.CategoryAffix) - - if req.CategoryAffix != "" { - // No backslashes allowed - if strings.Contains(req.CategoryAffix, "\\") { - RespondError(w, http.StatusBadRequest, "Category affix cannot contain backslashes") - return - } - // No double slashes allowed - if strings.Contains(req.CategoryAffix, "//") { - RespondError(w, http.StatusBadRequest, "Category affix cannot contain double slashes") - return - } - // Prefix mode: cannot start with a slash (would create leading slash in category) - if req.CategoryAffixMode == models.CategoryAffixModePrefix && strings.HasPrefix(req.CategoryAffix, "/") { - RespondError(w, http.StatusBadRequest, "Category prefix cannot start with a slash") - return - } - // Suffix mode: cannot end with a slash (would create trailing slash in category) - if req.CategoryAffixMode == models.CategoryAffixModeSuffix && strings.HasSuffix(req.CategoryAffix, "/") { - RespondError(w, http.StatusBadRequest, "Category suffix cannot end with a slash") - return - } + current, err := h.service.GetAutomationSettings(r.Context()) + if err != nil { + log.Error().Err(err).Msg("Failed to load cross-seed automation settings for update") + RespondError(w, http.StatusInternalServerError, "Failed to load automation settings") + return } - // Validate mutual exclusivity: category modes are mutually exclusive - enabledModes := 0 - if req.UseCategoryFromIndexer { - enabledModes++ - } - if req.UseCrossCategoryAffix { - enabledModes++ - } - if req.UseCustomCategory { - enabledModes++ - } - if enabledModes > 1 { - RespondError(w, http.StatusBadRequest, "Category modes are mutually exclusive. Enable only one of: indexer name, category affix, or custom category.") + merged := *current + applyAutomationSettingsPatch(&merged, automationSettingsPatchFromRequest(req, fields)) + + if !validateAutomationSettings(w, &merged) { return } - settings := &models.CrossSeedAutomationSettings{ - Enabled: req.Enabled, - RunIntervalMinutes: req.RunIntervalMinutes, - StartPaused: req.StartPaused, - Category: category, - TargetInstanceIDs: req.TargetInstanceIDs, - TargetIndexerIDs: req.TargetIndexerIDs, - MaxResultsPerRun: req.MaxResultsPerRun, - FindIndividualEpisodes: req.FindIndividualEpisodes, - SizeMismatchTolerancePercent: req.SizeMismatchTolerancePercent, - UseCategoryFromIndexer: req.UseCategoryFromIndexer, - UseCrossCategoryAffix: req.UseCrossCategoryAffix, - CategoryAffixMode: req.CategoryAffixMode, - CategoryAffix: req.CategoryAffix, - UseCustomCategory: req.UseCustomCategory, - CustomCategory: req.CustomCategory, - RunExternalProgramID: req.RunExternalProgramID, - SkipRecheck: req.SkipRecheck, - GazelleEnabled: req.GazelleEnabled, - RedactedAPIKey: strings.TrimSpace(req.RedactedAPIKey), - OrpheusAPIKey: strings.TrimSpace(req.OrpheusAPIKey), - } - - updated, err := h.service.UpdateAutomationSettings(r.Context(), settings) + updated, err := h.service.UpdateAutomationSettings(r.Context(), &merged) if err != nil { status := mapCrossSeedErrorStatus(err) log.Error().Err(err).Msg("Failed to update cross-seed automation settings") @@ -888,6 +1015,9 @@ func (h *CrossSeedHandler) PatchAutomationSettings(w http.ResponseWriter, r *htt RespondError(w, http.StatusBadRequest, "No fields provided to update") return } + if req.MaxMissingBytesAfterRecheck != nil && !validateMaxMissingBytesAfterRecheck(w, *req.MaxMissingBytesAfterRecheck) { + return + } // Log what the API received for debugging source filter issues if req.RSSSourceCategories != nil || req.RSSSourceExcludeCategories != nil || @@ -918,48 +1048,7 @@ func (h *CrossSeedHandler) PatchAutomationSettings(w http.ResponseWriter, r *htt merged := *current applyAutomationSettingsPatch(&merged, req) - if merged.CategoryAffix != "" { - // No backslashes allowed - if strings.Contains(merged.CategoryAffix, "\\") { - RespondError(w, http.StatusBadRequest, "Category affix cannot contain backslashes") - return - } - // No double slashes allowed - if strings.Contains(merged.CategoryAffix, "//") { - RespondError(w, http.StatusBadRequest, "Category affix cannot contain double slashes") - return - } - // Prefix mode: cannot start with a slash (would create leading slash in category) - if merged.CategoryAffixMode == models.CategoryAffixModePrefix && strings.HasPrefix(merged.CategoryAffix, "/") { - RespondError(w, http.StatusBadRequest, "Category prefix cannot start with a slash") - return - } - // Suffix mode: cannot end with a slash (would create trailing slash in category) - if merged.CategoryAffixMode == models.CategoryAffixModeSuffix && strings.HasSuffix(merged.CategoryAffix, "/") { - RespondError(w, http.StatusBadRequest, "Category suffix cannot end with a slash") - return - } - } - - // Validate categoryAffixMode if UseCrossCategoryAffix is enabled - if merged.UseCrossCategoryAffix && merged.CategoryAffixMode != models.CategoryAffixModePrefix && merged.CategoryAffixMode != models.CategoryAffixModeSuffix { - RespondError(w, http.StatusBadRequest, "Category affix mode must be either 'prefix' or 'suffix'") - return - } - - // Validate mutual exclusivity: category modes are mutually exclusive - enabledModes := 0 - if merged.UseCategoryFromIndexer { - enabledModes++ - } - if merged.UseCrossCategoryAffix { - enabledModes++ - } - if merged.UseCustomCategory { - enabledModes++ - } - if enabledModes > 1 { - RespondError(w, http.StatusBadRequest, "Category modes are mutually exclusive. Enable only one of: indexer name, category affix, or custom category.") + if !validateAutomationSettings(w, &merged) { return } diff --git a/internal/api/handlers/crossseed_patch_test.go b/internal/api/handlers/crossseed_patch_test.go index 68993c91c..ded6eb354 100644 --- a/internal/api/handlers/crossseed_patch_test.go +++ b/internal/api/handlers/crossseed_patch_test.go @@ -11,44 +11,46 @@ import ( func TestApplyAutomationSettingsPatch_MergesFields(t *testing.T) { existing := models.CrossSeedAutomationSettings{ - Enabled: false, - RunIntervalMinutes: 120, - StartPaused: true, - Category: new("tv"), - RSSAutomationTags: []string{"old"}, - SeededSearchTags: []string{"old"}, - CompletionSearchTags: []string{"old"}, - WebhookTags: []string{"old"}, - TargetInstanceIDs: []int{1}, - TargetIndexerIDs: []int{2}, - MaxResultsPerRun: 10, - FindIndividualEpisodes: false, - SizeMismatchTolerancePercent: 5.0, - UseCategoryFromIndexer: false, - RunExternalProgramID: new(42), - GazelleEnabled: false, - RedactedAPIKey: "", - OrpheusAPIKey: "", + Enabled: false, + RunIntervalMinutes: 120, + StartPaused: true, + Category: new("tv"), + RSSAutomationTags: []string{"old"}, + SeededSearchTags: []string{"old"}, + CompletionSearchTags: []string{"old"}, + WebhookTags: []string{"old"}, + TargetInstanceIDs: []int{1}, + TargetIndexerIDs: []int{2}, + MaxResultsPerRun: 10, + FindIndividualEpisodes: false, + SizeMismatchTolerancePercent: 5.0, + UseCategoryFromIndexer: false, + RunExternalProgramID: new(42), + AllowReflinkSingleFileSizeMismatch: false, + GazelleEnabled: false, + RedactedAPIKey: "", + OrpheusAPIKey: "", } newCategory := " movies " patch := automationSettingsPatchRequest{ - Enabled: new(true), - RunIntervalMinutes: new(45), - StartPaused: new(false), - Category: optionalString{Set: true, Value: &newCategory}, - RSSAutomationTags: &[]string{"new"}, - SeededSearchTags: &[]string{"new-seeded"}, - TargetInstanceIDs: &[]int{3, 4}, - TargetIndexerIDs: &[]int{7}, - MaxResultsPerRun: new(25), - FindIndividualEpisodes: new(true), - SizeMismatchTolerancePercent: new(12.5), - UseCategoryFromIndexer: new(true), - RunExternalProgramID: optionalInt{Set: true, Value: nil}, - GazelleEnabled: new(true), - RedactedAPIKey: new("red-key"), - OrpheusAPIKey: new("ops-key"), + Enabled: new(true), + RunIntervalMinutes: new(45), + StartPaused: new(false), + Category: optionalString{Set: true, Value: &newCategory}, + RSSAutomationTags: &[]string{"new"}, + SeededSearchTags: &[]string{"new-seeded"}, + TargetInstanceIDs: &[]int{3, 4}, + TargetIndexerIDs: &[]int{7}, + MaxResultsPerRun: new(25), + FindIndividualEpisodes: new(true), + SizeMismatchTolerancePercent: new(12.5), + UseCategoryFromIndexer: new(true), + RunExternalProgramID: optionalInt{Set: true, Value: nil}, + AllowReflinkSingleFileSizeMismatch: new(true), + GazelleEnabled: new(true), + RedactedAPIKey: new("red-key"), + OrpheusAPIKey: new("ops-key"), } applyAutomationSettingsPatch(&existing, patch) @@ -99,6 +101,9 @@ func TestApplyAutomationSettingsPatch_MergesFields(t *testing.T) { if existing.RunExternalProgramID != nil { t.Fatalf("expected runExternalProgramID to be nil") } + if !existing.AllowReflinkSingleFileSizeMismatch { + t.Fatalf("expected allowReflinkSingleFileSizeMismatch to be true") + } if !existing.GazelleEnabled { t.Fatalf("expected gazelleEnabled to be true") } diff --git a/internal/api/handlers/crossseed_settings_validation_test.go b/internal/api/handlers/crossseed_settings_validation_test.go new file mode 100644 index 000000000..7dc469bef --- /dev/null +++ b/internal/api/handlers/crossseed_settings_validation_test.go @@ -0,0 +1,129 @@ +// Copyright (c) 2025-2026, s0up and the autobrr contributors. +// SPDX-License-Identifier: GPL-2.0-or-later + +package handlers + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "path/filepath" + "reflect" + "strings" + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/autobrr/qui/internal/database" + "github.com/autobrr/qui/internal/models" + "github.com/autobrr/qui/internal/services/crossseed" +) + +func setCrossSeedServiceField[T any](service *crossseed.Service, fieldName string, value T) { + field := reflect.ValueOf(service).Elem().FieldByName(fieldName) + reflect.NewAt(field.Type(), unsafe.Pointer(field.UnsafeAddr())).Elem().Set(reflect.ValueOf(value)) +} + +func TestUpdateAutomationSettings_RejectsSubMiBPooledRecheckLimit(t *testing.T) { + t.Parallel() + + handler := &CrossSeedHandler{} + req := httptest.NewRequestWithContext( + context.Background(), + http.MethodPut, + "/api/cross-seed/settings", + strings.NewReader(`{"maxMissingBytesAfterRecheck":1048575}`), + ) + rec := httptest.NewRecorder() + + handler.UpdateAutomationSettings(rec, req) + + assert.Equal(t, http.StatusBadRequest, rec.Code) + assert.JSONEq(t, `{"error":"maxMissingBytesAfterRecheck must be one MiB or greater"}`, rec.Body.String()) +} + +func TestPatchAutomationSettings_RejectsSubMiBPooledRecheckLimit(t *testing.T) { + t.Parallel() + + handler := &CrossSeedHandler{} + req := httptest.NewRequestWithContext( + context.Background(), + http.MethodPatch, + "/api/cross-seed/settings", + strings.NewReader(`{"maxMissingBytesAfterRecheck":1048575}`), + ) + rec := httptest.NewRecorder() + + handler.PatchAutomationSettings(rec, req) + + assert.Equal(t, http.StatusBadRequest, rec.Code) + assert.JSONEq(t, `{"error":"maxMissingBytesAfterRecheck must be one MiB or greater"}`, rec.Body.String()) +} + +func TestUpdateAutomationSettings_MergesWithExistingSettings(t *testing.T) { + t.Parallel() + + ctx := context.Background() + dbPath := filepath.Join(t.TempDir(), "crossseed-settings.db") + db, err := database.New(dbPath) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + store, err := models.NewCrossSeedStore(db, []byte("01234567890123456789012345678901")) + require.NoError(t, err) + + existing := models.DefaultCrossSeedAutomationSettings() + existing.Enabled = false + existing.RunIntervalMinutes = 120 + existing.TargetInstanceIDs = []int{1} + existing.TargetIndexerIDs = []int{2} + existing.RSSSourceCategories = []string{"tv"} + existing.WebhookSourceTags = []string{"keep-webhook-tag"} + existing.RSSAutomationTags = []string{"keep-rss-tag"} + existing.SkipAutoResumeRSS = true + existing.SkipPieceBoundarySafetyCheck = false + existing.MaxMissingBytesAfterRecheck = 200 * 1024 * 1024 + + _, err = store.UpsertSettings(ctx, existing) + require.NoError(t, err) + + service := &crossseed.Service{} + setCrossSeedServiceField(service, "automationStore", store) + handler := &CrossSeedHandler{service: service} + + req := httptest.NewRequestWithContext( + ctx, + http.MethodPut, + "/api/cross-seed/settings", + strings.NewReader(`{ + "enabled": true, + "runIntervalMinutes": 45, + "targetInstanceIds": [9], + "targetIndexerIds": [10] + }`), + ) + rec := httptest.NewRecorder() + + handler.UpdateAutomationSettings(rec, req) + + require.Equal(t, http.StatusOK, rec.Code, rec.Body.String()) + + var updated models.CrossSeedAutomationSettings + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &updated)) + + assert.True(t, updated.Enabled) + assert.Equal(t, 45, updated.RunIntervalMinutes) + assert.Equal(t, []int{9}, updated.TargetInstanceIDs) + assert.Equal(t, []int{10}, updated.TargetIndexerIDs) + assert.Equal(t, []string{"tv"}, updated.RSSSourceCategories) + assert.Equal(t, []string{"keep-webhook-tag"}, updated.WebhookSourceTags) + assert.Equal(t, []string{"keep-rss-tag"}, updated.RSSAutomationTags) + assert.True(t, updated.SkipAutoResumeRSS) + assert.False(t, updated.SkipPieceBoundarySafetyCheck) + assert.EqualValues(t, 200*1024*1024, updated.MaxMissingBytesAfterRecheck) +} diff --git a/internal/database/migrations/067_add_cross_seed_partial_pools.sql b/internal/database/migrations/067_add_cross_seed_partial_pools.sql new file mode 100644 index 000000000..15a660a82 --- /dev/null +++ b/internal/database/migrations/067_add_cross_seed_partial_pools.sql @@ -0,0 +1,38 @@ +ALTER TABLE cross_seed_settings ADD COLUMN enable_pooled_partial_completion BOOLEAN NOT NULL DEFAULT 0; +ALTER TABLE cross_seed_settings ADD COLUMN allow_reflink_single_file_size_mismatch BOOLEAN NOT NULL DEFAULT 0; +ALTER TABLE cross_seed_settings ADD COLUMN max_missing_bytes_after_recheck INTEGER NOT NULL DEFAULT 104857600; + +CREATE TABLE IF NOT EXISTS cross_seed_partial_pool_members ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + source_instance_id INTEGER NOT NULL, + source_hash TEXT NOT NULL, + target_instance_id INTEGER NOT NULL, + target_hash TEXT NOT NULL, + target_hash_v2 TEXT, + target_added_on INTEGER NOT NULL DEFAULT 0, + target_name TEXT NOT NULL DEFAULT '', + mode TEXT NOT NULL, + managed_root TEXT NOT NULL, + source_piece_length INTEGER NOT NULL DEFAULT 0, + max_missing_bytes_after_recheck INTEGER NOT NULL DEFAULT 104857600, + source_files_json TEXT NOT NULL DEFAULT '[]', + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + expires_at DATETIME NOT NULL, + UNIQUE(target_instance_id, target_hash) +); + +CREATE INDEX IF NOT EXISTS idx_cross_seed_partial_pool_members_source + ON cross_seed_partial_pool_members(source_instance_id, source_hash); + +CREATE INDEX IF NOT EXISTS idx_cross_seed_partial_pool_members_expires + ON cross_seed_partial_pool_members(expires_at); + +CREATE TRIGGER IF NOT EXISTS trg_cross_seed_partial_pool_members_updated +AFTER UPDATE ON cross_seed_partial_pool_members +FOR EACH ROW +BEGIN + UPDATE cross_seed_partial_pool_members + SET updated_at = CURRENT_TIMESTAMP + WHERE id = NEW.id; +END; diff --git a/internal/database/postgres_integration_test.go b/internal/database/postgres_integration_test.go index 517bfaf95..af493a2a1 100644 --- a/internal/database/postgres_integration_test.go +++ b/internal/database/postgres_integration_test.go @@ -35,6 +35,21 @@ func TestOpenPostgres(t *testing.T) { if count == 0 { t.Fatalf("expected at least one postgres migration row, got %d", count) } + + var columnCount int + err := db.QueryRowContext(ctx, ` + SELECT COUNT(*) + FROM information_schema.columns + WHERE table_schema = current_schema() + AND table_name = 'cross_seed_settings' + AND column_name IN ( + 'enable_pooled_partial_completion', + 'allow_reflink_single_file_size_mismatch', + 'max_missing_bytes_after_recheck' + ) + `).Scan(&columnCount) + require.NoError(t, err) + require.Equal(t, 3, columnCount) } func TestCleanupUnusedStringsPostgres(t *testing.T) { diff --git a/internal/database/postgres_migrations/068_add_cross_seed_partial_pools.sql b/internal/database/postgres_migrations/068_add_cross_seed_partial_pools.sql new file mode 100644 index 000000000..e7d09e1cc --- /dev/null +++ b/internal/database/postgres_migrations/068_add_cross_seed_partial_pools.sql @@ -0,0 +1,43 @@ +ALTER TABLE cross_seed_settings + ADD COLUMN enable_pooled_partial_completion BOOLEAN NOT NULL DEFAULT FALSE, + ADD COLUMN allow_reflink_single_file_size_mismatch BOOLEAN NOT NULL DEFAULT FALSE, + ADD COLUMN max_missing_bytes_after_recheck BIGINT NOT NULL DEFAULT 104857600; + +CREATE TABLE cross_seed_partial_pool_members ( + id BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, + source_instance_id BIGINT NOT NULL, + source_hash TEXT NOT NULL, + target_instance_id BIGINT NOT NULL, + target_hash TEXT NOT NULL, + target_hash_v2 TEXT, + target_added_on BIGINT NOT NULL DEFAULT 0, + target_name TEXT NOT NULL DEFAULT '', + mode TEXT NOT NULL, + managed_root TEXT NOT NULL, + source_piece_length BIGINT NOT NULL DEFAULT 0, + max_missing_bytes_after_recheck BIGINT NOT NULL DEFAULT 104857600, + source_files_json TEXT NOT NULL DEFAULT '[]', + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + expires_at TIMESTAMP NOT NULL, + UNIQUE(target_instance_id, target_hash) +); + +CREATE INDEX idx_cross_seed_partial_pool_members_source + ON cross_seed_partial_pool_members(source_instance_id, source_hash); + +CREATE INDEX idx_cross_seed_partial_pool_members_expires + ON cross_seed_partial_pool_members(expires_at); + +CREATE OR REPLACE FUNCTION set_cross_seed_partial_pool_members_updated_at() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = CURRENT_TIMESTAMP; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trg_cross_seed_partial_pool_members_updated +BEFORE UPDATE ON cross_seed_partial_pool_members +FOR EACH ROW +EXECUTE FUNCTION set_cross_seed_partial_pool_members_updated_at(); diff --git a/internal/models/crossseed.go b/internal/models/crossseed.go index 8c8d7c6b9..b93508a00 100644 --- a/internal/models/crossseed.go +++ b/internal/models/crossseed.go @@ -27,6 +27,10 @@ const ( CategoryAffixModeSuffix = "suffix" ) +// MaxMissingBytesAfterRecheckDefault is the default maximum missing bytes +// allowed after recheck for pooled reflink automation (100 MiB). +const MaxMissingBytesAfterRecheckDefault = 100 * 1024 * 1024 + // CrossSeedAutomationSettings controls automatic cross-seed behaviour. // Contains both RSS Automation-specific settings and global cross-seed settings. type CrossSeedAutomationSettings struct { @@ -77,12 +81,15 @@ type CrossSeedAutomationSettings struct { // Skip auto-resume settings per source mode. // When enabled, torrents remain paused after hash check instead of auto-resuming. - SkipAutoResumeRSS bool `json:"skipAutoResumeRss"` // Skip auto-resume for RSS automation results - SkipAutoResumeSeededSearch bool `json:"skipAutoResumeSeededSearch"` // Skip auto-resume for seeded torrent search results - SkipAutoResumeCompletion bool `json:"skipAutoResumeCompletion"` // Skip auto-resume for completion-triggered search results - SkipAutoResumeWebhook bool `json:"skipAutoResumeWebhook"` // Skip auto-resume for /apply webhook results - SkipRecheck bool `json:"skipRecheck"` // Skip cross-seed matches that require a recheck - SkipPieceBoundarySafetyCheck bool `json:"skipPieceBoundarySafetyCheck"` // Skip piece boundary safety check (risky: may corrupt existing seeded data) + SkipAutoResumeRSS bool `json:"skipAutoResumeRss"` // Skip auto-resume for RSS automation results + SkipAutoResumeSeededSearch bool `json:"skipAutoResumeSeededSearch"` // Skip auto-resume for seeded torrent search results + SkipAutoResumeCompletion bool `json:"skipAutoResumeCompletion"` // Skip auto-resume for completion-triggered search results + SkipAutoResumeWebhook bool `json:"skipAutoResumeWebhook"` // Skip auto-resume for /apply webhook results + SkipRecheck bool `json:"skipRecheck"` // Skip cross-seed matches that require a recheck + SkipPieceBoundarySafetyCheck bool `json:"skipPieceBoundarySafetyCheck"` // Skip piece boundary safety check (risky: may corrupt existing seeded data) + EnablePooledPartialCompletion bool `json:"enablePooledPartialCompletion"` // Coordinate related managed partial cross-seeds as a temporary shared pool + AllowReflinkSingleFileSizeMismatch bool `json:"allowReflinkSingleFileSizeMismatch"` // In reflink mode, allow single-file size mismatches when normalized names still match and resume once recheck reaches 99% + MaxMissingBytesAfterRecheck int64 `json:"maxMissingBytesAfterRecheck"` // Maximum missing bytes after recheck to keep pooled automation active // Gazelle (OPS/RED) cross-seed settings. // When enabled, qui uses the tracker JSON APIs to find matches for OPS/RED torrents @@ -143,17 +150,20 @@ func DefaultCrossSeedAutomationSettings() *CrossSeedAutomationSettings { UseCustomCategory: false, CustomCategory: "", // Skip auto-resume - default to false to preserve existing behavior - SkipAutoResumeRSS: false, - SkipAutoResumeSeededSearch: false, - SkipAutoResumeCompletion: false, - SkipAutoResumeWebhook: false, - SkipRecheck: false, - SkipPieceBoundarySafetyCheck: true, // Skip by default to maximize matches - GazelleEnabled: false, - RedactedAPIKey: "", - OrpheusAPIKey: "", - CreatedAt: time.Now().UTC(), - UpdatedAt: time.Now().UTC(), + SkipAutoResumeRSS: false, + SkipAutoResumeSeededSearch: false, + SkipAutoResumeCompletion: false, + SkipAutoResumeWebhook: false, + SkipRecheck: false, + SkipPieceBoundarySafetyCheck: true, // Skip by default to maximize matches + EnablePooledPartialCompletion: false, + AllowReflinkSingleFileSizeMismatch: false, + MaxMissingBytesAfterRecheck: MaxMissingBytesAfterRecheckDefault, + GazelleEnabled: false, + RedactedAPIKey: "", + OrpheusAPIKey: "", + CreatedAt: time.Now().UTC(), + UpdatedAt: time.Now().UTC(), } } @@ -387,6 +397,7 @@ func (s *CrossSeedStore) GetSettings(ctx context.Context) (*CrossSeedAutomationS skip_auto_resume_rss, skip_auto_resume_seeded_search, skip_auto_resume_completion, skip_auto_resume_webhook, skip_recheck, skip_piece_boundary_safety_check, + enable_pooled_partial_completion, allow_reflink_single_file_size_mismatch, max_missing_bytes_after_recheck, gazelle_enabled, redacted_api_key_encrypted, orpheus_api_key_encrypted, created_at, updated_at FROM cross_seed_settings @@ -407,6 +418,8 @@ func (s *CrossSeedStore) GetSettings(ctx context.Context) (*CrossSeedAutomationS var inheritSourceTags, useCrossCategoryAffix, useCustomCategory int var skipAutoResumeRSS, skipAutoResumeSeededSearch, skipAutoResumeCompletion, skipAutoResumeWebhook int var skipRecheck, skipPieceBoundarySafetyCheck int + var enablePooledPartialCompletion int + var allowReflinkSingleFileSizeMismatch int var gazelleEnabled int var redactedAPIKeyEncrypted, orpheusAPIKeyEncrypted sql.NullString var createdAt, updatedAt sql.NullTime @@ -447,6 +460,9 @@ func (s *CrossSeedStore) GetSettings(ctx context.Context) (*CrossSeedAutomationS &skipAutoResumeWebhook, &skipRecheck, &skipPieceBoundarySafetyCheck, + &enablePooledPartialCompletion, + &allowReflinkSingleFileSizeMismatch, + &settings.MaxMissingBytesAfterRecheck, &gazelleEnabled, &redactedAPIKeyEncrypted, &orpheusAPIKeyEncrypted, @@ -539,6 +555,8 @@ func (s *CrossSeedStore) GetSettings(ctx context.Context) (*CrossSeedAutomationS settings.SkipAutoResumeWebhook = SQLiteIntToBool(skipAutoResumeWebhook) settings.SkipRecheck = SQLiteIntToBool(skipRecheck) settings.SkipPieceBoundarySafetyCheck = SQLiteIntToBool(skipPieceBoundarySafetyCheck) + settings.EnablePooledPartialCompletion = SQLiteIntToBool(enablePooledPartialCompletion) + settings.AllowReflinkSingleFileSizeMismatch = SQLiteIntToBool(allowReflinkSingleFileSizeMismatch) settings.GazelleEnabled = SQLiteIntToBool(gazelleEnabled) if redactedAPIKeyEncrypted.Valid { settings.RedactedAPIKey = s.apiKeyRedacted(redactedAPIKeyEncrypted.String) @@ -730,9 +748,10 @@ func (s *CrossSeedStore) UpsertSettings(ctx context.Context, settings *CrossSeed skip_auto_resume_rss, skip_auto_resume_seeded_search, skip_auto_resume_completion, skip_auto_resume_webhook, skip_recheck, skip_piece_boundary_safety_check, + enable_pooled_partial_completion, allow_reflink_single_file_size_mismatch, max_missing_bytes_after_recheck, gazelle_enabled, redacted_api_key_encrypted, orpheus_api_key_encrypted ) VALUES ( - ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? + ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) ON CONFLICT(id) DO UPDATE SET enabled = excluded.enabled, @@ -770,6 +789,9 @@ func (s *CrossSeedStore) UpsertSettings(ctx context.Context, settings *CrossSeed skip_auto_resume_webhook = excluded.skip_auto_resume_webhook, skip_recheck = excluded.skip_recheck, skip_piece_boundary_safety_check = excluded.skip_piece_boundary_safety_check, + enable_pooled_partial_completion = excluded.enable_pooled_partial_completion, + allow_reflink_single_file_size_mismatch = excluded.allow_reflink_single_file_size_mismatch, + max_missing_bytes_after_recheck = excluded.max_missing_bytes_after_recheck, gazelle_enabled = excluded.gazelle_enabled, redacted_api_key_encrypted = excluded.redacted_api_key_encrypted, orpheus_api_key_encrypted = excluded.orpheus_api_key_encrypted @@ -823,6 +845,9 @@ func (s *CrossSeedStore) UpsertSettings(ctx context.Context, settings *CrossSeed BoolToSQLite(settings.SkipAutoResumeWebhook), BoolToSQLite(settings.SkipRecheck), BoolToSQLite(settings.SkipPieceBoundarySafetyCheck), + BoolToSQLite(settings.EnablePooledPartialCompletion), + BoolToSQLite(settings.AllowReflinkSingleFileSizeMismatch), + settings.MaxMissingBytesAfterRecheck, BoolToSQLite(settings.GazelleEnabled), redactedAPIKeyEncrypted, orpheusAPIKeyEncrypted, diff --git a/internal/models/crossseed_partial_pool.go b/internal/models/crossseed_partial_pool.go new file mode 100644 index 000000000..ed8d9bb58 --- /dev/null +++ b/internal/models/crossseed_partial_pool.go @@ -0,0 +1,314 @@ +// Copyright (c) 2025-2026, s0up and the autobrr contributors. +// SPDX-License-Identifier: GPL-2.0-or-later + +package models + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/autobrr/qui/internal/dbinterface" +) + +const ( + CrossSeedPartialMemberModeHardlink = "hardlink" + CrossSeedPartialMemberModeReflink = "reflink" +) + +// CrossSeedPartialFile stores the pooled member's exact target-side file path and size. +// Key is the shared normalized identity used across layout variants. +type CrossSeedPartialFile struct { + Name string `json:"name"` + Size int64 `json:"size"` + Key string `json:"key,omitempty"` +} + +// CrossSeedPartialPoolMember stores the lightweight persisted marker for an active pooled member. +type CrossSeedPartialPoolMember struct { + ID int64 `json:"id"` + SourceInstanceID int `json:"sourceInstanceId"` + SourceHash string `json:"sourceHash"` + TargetInstanceID int `json:"targetInstanceId"` + TargetHash string `json:"targetHash"` + TargetHashV2 string `json:"targetHashV2,omitempty"` + TargetAddedOn int64 `json:"targetAddedOn"` + TargetName string `json:"targetName"` + Mode string `json:"mode"` + ManagedRoot string `json:"managedRoot"` + SourcePieceLength int64 `json:"sourcePieceLength"` + MaxMissingBytesAfterRecheck int64 `json:"maxMissingBytesAfterRecheck"` + SourceFiles []CrossSeedPartialFile `json:"sourceFiles"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + ExpiresAt time.Time `json:"expiresAt"` +} + +// CrossSeedPartialPoolMemberStore persists only active pooled members so pools can be rebuilt after restart. +type CrossSeedPartialPoolMemberStore struct { + db dbinterface.Querier +} + +func NewCrossSeedPartialPoolMemberStore(db dbinterface.Querier) *CrossSeedPartialPoolMemberStore { + if db == nil { + panic("db cannot be nil") + } + return &CrossSeedPartialPoolMemberStore{db: db} +} + +func (s *CrossSeedPartialPoolMemberStore) Upsert(ctx context.Context, member *CrossSeedPartialPoolMember) (*CrossSeedPartialPoolMember, error) { + if member == nil { + return nil, errors.New("member cannot be nil") + } + + normalized := *member + normalized.SourceHash = strings.ToUpper(strings.TrimSpace(normalized.SourceHash)) + normalized.TargetHash = strings.ToUpper(strings.TrimSpace(normalized.TargetHash)) + normalized.TargetHashV2 = strings.ToUpper(strings.TrimSpace(normalized.TargetHashV2)) + normalized.TargetName = strings.TrimSpace(normalized.TargetName) + normalized.Mode = strings.TrimSpace(normalized.Mode) + normalized.ManagedRoot = strings.TrimSpace(normalized.ManagedRoot) + + if normalized.SourceInstanceID <= 0 || normalized.TargetInstanceID <= 0 { + return nil, errors.New("instance ids must be positive") + } + if normalized.SourceHash == "" || normalized.TargetHash == "" { + return nil, errors.New("source and target hashes are required") + } + if normalized.Mode != CrossSeedPartialMemberModeHardlink && normalized.Mode != CrossSeedPartialMemberModeReflink { + return nil, fmt.Errorf("invalid pooled member mode %q", normalized.Mode) + } + if len(normalized.SourceFiles) == 0 { + return nil, errors.New("source files are required") + } + if normalized.ExpiresAt.IsZero() { + normalized.ExpiresAt = time.Now().UTC().Add(24 * time.Hour) + } + + filesJSON, err := json.Marshal(normalized.SourceFiles) + if err != nil { + return nil, fmt.Errorf("marshal source files: %w", err) + } + + const stmt = ` + INSERT INTO cross_seed_partial_pool_members ( + source_instance_id, source_hash, target_instance_id, target_hash, target_hash_v2, + target_added_on, target_name, mode, managed_root, source_piece_length, max_missing_bytes_after_recheck, source_files_json, expires_at + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(target_instance_id, target_hash) DO UPDATE SET + source_instance_id = excluded.source_instance_id, + source_hash = excluded.source_hash, + target_hash_v2 = excluded.target_hash_v2, + target_added_on = excluded.target_added_on, + target_name = excluded.target_name, + mode = excluded.mode, + managed_root = excluded.managed_root, + source_piece_length = excluded.source_piece_length, + max_missing_bytes_after_recheck = excluded.max_missing_bytes_after_recheck, + source_files_json = excluded.source_files_json, + expires_at = excluded.expires_at + ` + + if _, err := s.db.ExecContext(ctx, stmt, + normalized.SourceInstanceID, + normalized.SourceHash, + normalized.TargetInstanceID, + normalized.TargetHash, + nullIfBlank(normalized.TargetHashV2), + normalized.TargetAddedOn, + normalized.TargetName, + normalized.Mode, + normalized.ManagedRoot, + normalized.SourcePieceLength, + normalized.MaxMissingBytesAfterRecheck, + string(filesJSON), + normalized.ExpiresAt, + ); err != nil { + return nil, fmt.Errorf("upsert pooled member: %w", err) + } + + return s.GetByAnyHash(ctx, normalized.TargetInstanceID, normalized.TargetHash, normalized.TargetHashV2) +} + +func (s *CrossSeedPartialPoolMemberStore) ListActive(ctx context.Context, now time.Time) ([]*CrossSeedPartialPoolMember, error) { + const query = ` + SELECT id, source_instance_id, source_hash, target_instance_id, target_hash, target_hash_v2, target_added_on, + target_name, mode, managed_root, source_piece_length, max_missing_bytes_after_recheck, source_files_json, + created_at, updated_at, expires_at + FROM cross_seed_partial_pool_members + WHERE expires_at > ? + ORDER BY source_instance_id, source_hash, target_instance_id, target_hash + ` + + rows, err := s.db.QueryContext(ctx, query, now.UTC()) + if err != nil { + return nil, fmt.Errorf("list active pooled members: %w", err) + } + defer rows.Close() + + var members []*CrossSeedPartialPoolMember + for rows.Next() { + member, scanErr := scanCrossSeedPartialPoolMember(rows) + if scanErr != nil { + return nil, scanErr + } + members = append(members, member) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterate active pooled members: %w", err) + } + return members, nil +} + +func (s *CrossSeedPartialPoolMemberStore) GetByAnyHash(ctx context.Context, instanceID int, hashes ...string) (*CrossSeedPartialPoolMember, error) { + if instanceID <= 0 { + return nil, errors.New("instance id must be positive") + } + + normalized := make([]string, 0, len(hashes)) + seen := make(map[string]struct{}, len(hashes)) + for _, hash := range hashes { + hash = strings.ToUpper(strings.TrimSpace(hash)) + if hash == "" { + continue + } + if _, ok := seen[hash]; ok { + continue + } + seen[hash] = struct{}{} + normalized = append(normalized, hash) + } + if len(normalized) == 0 { + return nil, nil + } + + placeholders := strings.TrimSuffix(strings.Repeat("?,", len(normalized)), ",") + query := fmt.Sprintf(` + SELECT id, source_instance_id, source_hash, target_instance_id, target_hash, target_hash_v2, target_added_on, + target_name, mode, managed_root, source_piece_length, max_missing_bytes_after_recheck, source_files_json, + created_at, updated_at, expires_at + FROM cross_seed_partial_pool_members + WHERE target_instance_id = ? AND expires_at > ? AND (target_hash IN (%s) OR target_hash_v2 IN (%s)) + LIMIT 1 + `, placeholders, placeholders) + + args := make([]any, 0, 2+len(normalized)*2) + args = append(args, instanceID) + args = append(args, time.Now().UTC()) + for _, hash := range normalized { + args = append(args, hash) + } + for _, hash := range normalized { + args = append(args, hash) + } + + row := s.db.QueryRowContext(ctx, query, args...) + member, err := scanCrossSeedPartialPoolMember(row) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, err + } + return member, nil +} + +func (s *CrossSeedPartialPoolMemberStore) DeleteByAnyHash(ctx context.Context, instanceID int, hashes ...string) error { + if instanceID <= 0 { + return errors.New("instance id must be positive") + } + + normalized := make([]string, 0, len(hashes)) + seen := make(map[string]struct{}, len(hashes)) + for _, hash := range hashes { + hash = strings.ToUpper(strings.TrimSpace(hash)) + if hash == "" { + continue + } + if _, ok := seen[hash]; ok { + continue + } + seen[hash] = struct{}{} + normalized = append(normalized, hash) + } + if len(normalized) == 0 { + return nil + } + + placeholders := strings.TrimSuffix(strings.Repeat("?,", len(normalized)), ",") + stmt := fmt.Sprintf(` + DELETE FROM cross_seed_partial_pool_members + WHERE target_instance_id = ? AND (target_hash IN (%s) OR target_hash_v2 IN (%s)) + `, placeholders, placeholders) + args := make([]any, 0, 1+len(normalized)*2) + args = append(args, instanceID) + for _, hash := range normalized { + args = append(args, hash) + } + for _, hash := range normalized { + args = append(args, hash) + } + if _, err := s.db.ExecContext(ctx, stmt, args...); err != nil { + return fmt.Errorf("delete pooled member: %w", err) + } + return nil +} + +func (s *CrossSeedPartialPoolMemberStore) DeleteExpired(ctx context.Context, now time.Time) (int64, error) { + result, err := s.db.ExecContext(ctx, `DELETE FROM cross_seed_partial_pool_members WHERE expires_at <= ?`, now.UTC()) + if err != nil { + return 0, fmt.Errorf("delete expired pooled members: %w", err) + } + rows, err := result.RowsAffected() + if err != nil { + return 0, fmt.Errorf("delete expired pooled members rows affected: %w", err) + } + return rows, nil +} + +func scanCrossSeedPartialPoolMember(scanner interface{ Scan(dest ...any) error }) (*CrossSeedPartialPoolMember, error) { + var ( + member CrossSeedPartialPoolMember + targetHashV2 sql.NullString + sourceFiles string + ) + if err := scanner.Scan( + &member.ID, + &member.SourceInstanceID, + &member.SourceHash, + &member.TargetInstanceID, + &member.TargetHash, + &targetHashV2, + &member.TargetAddedOn, + &member.TargetName, + &member.Mode, + &member.ManagedRoot, + &member.SourcePieceLength, + &member.MaxMissingBytesAfterRecheck, + &sourceFiles, + &member.CreatedAt, + &member.UpdatedAt, + &member.ExpiresAt, + ); err != nil { + return nil, err + } + if targetHashV2.Valid { + member.TargetHashV2 = targetHashV2.String + } + if err := json.Unmarshal([]byte(sourceFiles), &member.SourceFiles); err != nil { + return nil, fmt.Errorf("decode source files json: %w", err) + } + return &member, nil +} + +func nullIfBlank(value string) any { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return nil + } + return trimmed +} diff --git a/internal/models/crossseed_partial_pool_test.go b/internal/models/crossseed_partial_pool_test.go new file mode 100644 index 000000000..9fd082222 --- /dev/null +++ b/internal/models/crossseed_partial_pool_test.go @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +package models_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/autobrr/qui/internal/models" +) + +func TestCrossSeedPartialPoolMemberStore_RoundTripAndPrune(t *testing.T) { + db := setupCrossSeedTestDB(t) + store := models.NewCrossSeedPartialPoolMemberStore(db) + ctx := context.Background() + + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 2, + TargetHash: "targethash", + TargetHashV2: "targethashv2", + TargetAddedOn: 12345, + TargetName: "Test Torrent", + Mode: models.CrossSeedPartialMemberModeHardlink, + ManagedRoot: t.TempDir(), + SourcePieceLength: 1024, + MaxMissingBytesAfterRecheck: 512, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "data/file.mkv", Size: 1234, Key: "file"}, + {Name: "data/file.nfo", Size: 56, Key: "file"}, + }, + ExpiresAt: time.Now().UTC().Add(time.Hour), + } + + stored, err := store.Upsert(ctx, member) + require.NoError(t, err) + require.NotNil(t, stored) + assert.Equal(t, "SOURCEHASH", stored.SourceHash) + assert.Equal(t, "TARGETHASH", stored.TargetHash) + assert.Equal(t, "TARGETHASHV2", stored.TargetHashV2) + assert.EqualValues(t, 12345, stored.TargetAddedOn) + + loaded, err := store.GetByAnyHash(ctx, 2, "targethashv2") + require.NoError(t, err) + require.NotNil(t, loaded) + assert.Equal(t, stored.ID, loaded.ID) + assert.Len(t, loaded.SourceFiles, 2) + assert.Equal(t, member.SourceFiles[0].Key, loaded.SourceFiles[0].Key) + assert.EqualValues(t, 512, loaded.MaxMissingBytesAfterRecheck) + assert.EqualValues(t, 12345, loaded.TargetAddedOn) + + active, err := store.ListActive(ctx, time.Now().UTC()) + require.NoError(t, err) + require.Len(t, active, 1) + assert.Equal(t, stored.ID, active[0].ID) + + rows, err := store.DeleteExpired(ctx, time.Now().UTC().Add(2*time.Hour)) + require.NoError(t, err) + assert.EqualValues(t, 1, rows) + + active, err = store.ListActive(ctx, time.Now().UTC()) + require.NoError(t, err) + assert.Empty(t, active) +} + +func TestCrossSeedPartialPoolMemberStore_DeleteByAnyHash(t *testing.T) { + db := setupCrossSeedTestDB(t) + store := models.NewCrossSeedPartialPoolMemberStore(db) + ctx := context.Background() + + _, err := store.Upsert(ctx, &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 2, + TargetHash: "targethash", + TargetHashV2: "targethashv2", + TargetName: "Test Torrent", + Mode: models.CrossSeedPartialMemberModeReflink, + ManagedRoot: t.TempDir(), + SourcePieceLength: 1024, + MaxMissingBytesAfterRecheck: 1024, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "data/file.mkv", Size: 1234}, + }, + ExpiresAt: time.Now().UTC().Add(time.Hour), + }) + require.NoError(t, err) + + require.NoError(t, store.DeleteByAnyHash(ctx, 2, "targethashv2")) + + loaded, err := store.GetByAnyHash(ctx, 2, "targethash", "targethashv2") + require.NoError(t, err) + assert.Nil(t, loaded) +} + +func TestCrossSeedPartialPoolMemberStore_GetByAnyHash_WithMoreThanTwoHashes(t *testing.T) { + db := setupCrossSeedTestDB(t) + store := models.NewCrossSeedPartialPoolMemberStore(db) + ctx := context.Background() + + stored, err := store.Upsert(ctx, &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 2, + TargetHash: "targethash", + TargetHashV2: "targethashv2", + TargetName: "Test Torrent", + Mode: models.CrossSeedPartialMemberModeReflink, + ManagedRoot: t.TempDir(), + SourcePieceLength: 1024, + MaxMissingBytesAfterRecheck: 1024, + SourceFiles: []models.CrossSeedPartialFile{{Name: "data/file.mkv", Size: 1234}}, + ExpiresAt: time.Now().UTC().Add(time.Hour), + }) + require.NoError(t, err) + + loaded, err := store.GetByAnyHash(ctx, 2, "does-not-exist", "also-missing", "targethashv2") + require.NoError(t, err) + require.NotNil(t, loaded) + assert.Equal(t, stored.ID, loaded.ID) +} + +func TestCrossSeedPartialPoolMemberStore_GetByAnyHash_IgnoresExpiredMembers(t *testing.T) { + db := setupCrossSeedTestDB(t) + store := models.NewCrossSeedPartialPoolMemberStore(db) + ctx := context.Background() + + _, err := store.Upsert(ctx, &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 2, + TargetHash: "targethash", + TargetHashV2: "targethashv2", + TargetName: "Expired Torrent", + Mode: models.CrossSeedPartialMemberModeHardlink, + ManagedRoot: t.TempDir(), + SourcePieceLength: 1024, + MaxMissingBytesAfterRecheck: 1024, + SourceFiles: []models.CrossSeedPartialFile{{Name: "data/file.mkv", Size: 1234}}, + ExpiresAt: time.Now().UTC().Add(-time.Hour), + }) + require.NoError(t, err) + + loaded, err := store.GetByAnyHash(ctx, 2, "targethash", "targethashv2") + require.NoError(t, err) + assert.Nil(t, loaded) +} + +func TestCrossSeedPartialPoolMemberStore_DeleteByAnyHash_WithMoreThanTwoHashes(t *testing.T) { + db := setupCrossSeedTestDB(t) + store := models.NewCrossSeedPartialPoolMemberStore(db) + ctx := context.Background() + + _, err := store.Upsert(ctx, &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 2, + TargetHash: "targethash", + TargetHashV2: "targethashv2", + TargetName: "Test Torrent", + Mode: models.CrossSeedPartialMemberModeHardlink, + ManagedRoot: t.TempDir(), + SourcePieceLength: 1024, + MaxMissingBytesAfterRecheck: 1024, + SourceFiles: []models.CrossSeedPartialFile{{Name: "data/file.mkv", Size: 1234}}, + ExpiresAt: time.Now().UTC().Add(time.Hour), + }) + require.NoError(t, err) + + require.NoError(t, store.DeleteByAnyHash(ctx, 2, "does-not-exist", "also-missing", "targethash")) + + loaded, err := store.GetByAnyHash(ctx, 2, "targethash", "targethashv2") + require.NoError(t, err) + assert.Nil(t, loaded) +} diff --git a/internal/models/crossseed_test.go b/internal/models/crossseed_test.go index d75f6a063..2f1e1f7b0 100644 --- a/internal/models/crossseed_test.go +++ b/internal/models/crossseed_test.go @@ -81,17 +81,20 @@ func TestCrossSeedStore_SettingsRoundTrip(t *testing.T) { category := "TV" updated, err := store.UpsertSettings(ctx, &models.CrossSeedAutomationSettings{ - Enabled: true, - RunIntervalMinutes: 30, - StartPaused: false, - Category: &category, - RSSAutomationTags: []string{"cross-seed", "automation"}, - SeededSearchTags: []string{"seeded"}, - CompletionSearchTags: []string{"completion"}, - WebhookTags: []string{"webhook"}, - TargetInstanceIDs: []int{1, 2}, - TargetIndexerIDs: []int{11, 42}, - MaxResultsPerRun: 25, + Enabled: true, + RunIntervalMinutes: 30, + StartPaused: false, + Category: &category, + RSSAutomationTags: []string{"cross-seed", "automation"}, + SeededSearchTags: []string{"seeded"}, + CompletionSearchTags: []string{"completion"}, + WebhookTags: []string{"webhook"}, + TargetInstanceIDs: []int{1, 2}, + TargetIndexerIDs: []int{11, 42}, + MaxResultsPerRun: 25, + EnablePooledPartialCompletion: true, + AllowReflinkSingleFileSizeMismatch: true, + MaxMissingBytesAfterRecheck: 200 * 1024 * 1024, }) require.NoError(t, err) @@ -107,6 +110,9 @@ func TestCrossSeedStore_SettingsRoundTrip(t *testing.T) { assert.ElementsMatch(t, []int{1, 2}, updated.TargetInstanceIDs) assert.ElementsMatch(t, []int{11, 42}, updated.TargetIndexerIDs) assert.Equal(t, 25, updated.MaxResultsPerRun) + assert.True(t, updated.EnablePooledPartialCompletion) + assert.True(t, updated.AllowReflinkSingleFileSizeMismatch) + assert.EqualValues(t, 200*1024*1024, updated.MaxMissingBytesAfterRecheck) reloaded, err := store.GetSettings(ctx) require.NoError(t, err) diff --git a/internal/models/postgres_bool_args_test.go b/internal/models/postgres_bool_args_test.go index 3b7f71ef5..ad491f2e3 100644 --- a/internal/models/postgres_bool_args_test.go +++ b/internal/models/postgres_bool_args_test.go @@ -399,6 +399,9 @@ func TestCrossSeedUpsertSettingsUsesIntegerBooleanArgs(t *testing.T) { skip_auto_resume_webhook INTEGER NOT NULL DEFAULT 0, skip_recheck INTEGER NOT NULL DEFAULT 0, skip_piece_boundary_safety_check INTEGER NOT NULL DEFAULT 1, + enable_pooled_partial_completion INTEGER NOT NULL DEFAULT 0, + allow_reflink_single_file_size_mismatch INTEGER NOT NULL DEFAULT 0, + max_missing_bytes_after_recheck INTEGER NOT NULL DEFAULT 104857600, gazelle_enabled INTEGER NOT NULL DEFAULT 0, redacted_api_key_encrypted TEXT NOT NULL DEFAULT '', orpheus_api_key_encrypted TEXT NOT NULL DEFAULT '', @@ -426,9 +429,9 @@ func TestCrossSeedUpsertSettingsUsesIntegerBooleanArgs(t *testing.T) { _, err = store.UpsertSettings(context.Background(), settings) require.NoError(t, err) - require.Len(t, insertArgs, 39) + require.Len(t, insertArgs, 42) - boolIndexes := []int{1, 3, 16, 18, 24, 25, 28, 30, 31, 32, 33, 34, 35, 36} + boolIndexes := []int{1, 3, 16, 18, 24, 25, 28, 30, 31, 32, 33, 34, 35, 36, 37, 39} for _, idx := range boolIndexes { _, ok := insertArgs[idx].(int) require.Truef(t, ok, "expected int arg at index %d, got %T", idx, insertArgs[idx]) diff --git a/internal/services/crossseed/ensure_cross_category_test.go b/internal/services/crossseed/ensure_cross_category_test.go new file mode 100644 index 000000000..b67ca42c0 --- /dev/null +++ b/internal/services/crossseed/ensure_cross_category_test.go @@ -0,0 +1,85 @@ +// Copyright (c) 2025-2026, s0up and the autobrr contributors. +// SPDX-License-Identifier: GPL-2.0-or-later + +package crossseed + +import ( + "context" + "maps" + "sync" + "testing" + + qbt "github.com/autobrr/go-qbittorrent" + "github.com/stretchr/testify/require" +) + +type ensureCrossCategorySyncManager struct { + *fakeSyncManager + + mu sync.Mutex + categories map[string]qbt.Category + createCalls int +} + +func newEnsureCrossCategorySyncManager() *ensureCrossCategorySyncManager { + return &ensureCrossCategorySyncManager{ + fakeSyncManager: &fakeSyncManager{}, + categories: make(map[string]qbt.Category), + } +} + +func (m *ensureCrossCategorySyncManager) GetCategories(_ context.Context, _ int) (map[string]qbt.Category, error) { + m.mu.Lock() + defer m.mu.Unlock() + + copyMap := make(map[string]qbt.Category, len(m.categories)) + maps.Copy(copyMap, m.categories) + return copyMap, nil +} + +func (m *ensureCrossCategorySyncManager) CreateCategory(_ context.Context, _ int, name, path string) error { + m.mu.Lock() + defer m.mu.Unlock() + + m.createCalls++ + m.categories[name] = qbt.Category{SavePath: path} + return nil +} + +func (m *ensureCrossCategorySyncManager) createCount() int { + m.mu.Lock() + defer m.mu.Unlock() + return m.createCalls +} + +func TestEnsureCrossCategory_UsesSingleflightForConcurrentCalls(t *testing.T) { + t.Parallel() + + syncMgr := newEnsureCrossCategorySyncManager() + svc := &Service{syncManager: syncMgr} + + const goroutines = 20 + start := make(chan struct{}) + errCh := make(chan error, goroutines) + var wg sync.WaitGroup + + for range goroutines { + wg.Go(func() { + <-start + errCh <- svc.ensureCrossCategory(context.Background(), 1, "movies.cross", "/downloads/movies") + }) + } + + close(start) + wg.Wait() + close(errCh) + + for err := range errCh { + require.NoError(t, err) + } + + require.Equal(t, 1, syncMgr.createCount()) + + require.NoError(t, svc.ensureCrossCategory(context.Background(), 1, "movies.cross", "/downloads/movies")) + require.Equal(t, 1, syncMgr.createCount()) +} diff --git a/internal/services/crossseed/hardlink_mode_test.go b/internal/services/crossseed/hardlink_mode_test.go index c0d9c3149..701845bc3 100644 --- a/internal/services/crossseed/hardlink_mode_test.go +++ b/internal/services/crossseed/hardlink_mode_test.go @@ -7,16 +7,20 @@ import ( "context" "errors" "fmt" + "maps" "os" "path/filepath" "strings" "testing" + "time" qbt "github.com/autobrr/go-qbittorrent" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/autobrr/qui/internal/database" "github.com/autobrr/qui/internal/models" + internalqb "github.com/autobrr/qui/internal/qbittorrent" "github.com/autobrr/qui/pkg/hardlinktree" "github.com/autobrr/qui/pkg/reflinktree" ) @@ -468,7 +472,10 @@ func TestProcessHardlinkMode_NotUsedWhenDisabled(t *testing.T) { "exact", nil, nil, + nil, &qbt.TorrentProperties{SavePath: "/downloads"}, + managedDestinationContext{}, + nil, "category", "category.cross", ) @@ -505,7 +512,10 @@ func TestProcessHardlinkMode_FailsWhenBaseDirEmpty(t *testing.T) { "exact", nil, nil, + nil, &qbt.TorrentProperties{SavePath: "/downloads"}, + managedDestinationContext{}, + nil, "category", "category.cross", ) @@ -537,6 +547,847 @@ func (m *mockInstanceStore) List(ctx context.Context) ([]*models.Instance, error return result, nil } +type recheckConfirmationSyncManager struct { + torrents []qbt.Torrent + filesByHash map[string]qbt.TorrentFiles + addOptions map[string]string + addCalls int + bulkActions []string + getTorrentIdx int + getTorrentsCalls int + blockOnGetTorrents bool +} + +func (m *recheckConfirmationSyncManager) GetTorrents(ctx context.Context, _ int, filter qbt.TorrentFilterOptions) ([]qbt.Torrent, error) { + m.getTorrentsCalls++ + if m.blockOnGetTorrents { + <-ctx.Done() + return nil, ctx.Err() + } + if len(m.torrents) == 0 { + return nil, nil + } + idx := m.getTorrentIdx + if idx >= len(m.torrents) { + idx = len(m.torrents) - 1 + } + m.getTorrentIdx++ + + torrent := m.torrents[idx] + if !recheckConfirmationMatchesFilter(torrent, filter) { + return nil, nil + } + + return []qbt.Torrent{torrent}, nil +} + +func (m *recheckConfirmationSyncManager) GetTorrentFilesBatch(_ context.Context, _ int, hashes []string) (map[string]qbt.TorrentFiles, error) { + result := make(map[string]qbt.TorrentFiles, len(hashes)) + for _, hash := range hashes { + if files, ok := m.filesByHash[normalizeHash(hash)]; ok { + result[normalizeHash(hash)] = files + } + } + return result, nil +} + +func (*recheckConfirmationSyncManager) ExportTorrent(context.Context, int, string) ([]byte, string, string, error) { + return nil, "", "", errors.New("not implemented") +} + +func (*recheckConfirmationSyncManager) HasTorrentByAnyHash(context.Context, int, []string) (*qbt.Torrent, bool, error) { + return nil, false, nil +} + +func (*recheckConfirmationSyncManager) GetTorrentProperties(context.Context, int, string) (*qbt.TorrentProperties, error) { + return nil, errors.New("not implemented") +} + +func (*recheckConfirmationSyncManager) GetAppPreferences(context.Context, int) (qbt.AppPreferences, error) { + return qbt.AppPreferences{}, nil +} + +func (m *recheckConfirmationSyncManager) AddTorrent(_ context.Context, _ int, _ []byte, options map[string]string) error { + m.addCalls++ + m.addOptions = maps.Clone(options) + return nil +} + +func (m *recheckConfirmationSyncManager) BulkAction(_ context.Context, instanceID int, hashes []string, action string) error { + m.bulkActions = append(m.bulkActions, fmt.Sprintf("%d:%s:%v", instanceID, action, hashes)) + return nil +} + +func (*recheckConfirmationSyncManager) GetCachedInstanceTorrents(context.Context, int) ([]internalqb.CrossInstanceTorrentView, error) { + return nil, nil +} + +func (*recheckConfirmationSyncManager) ExtractDomainFromURL(string) string { + return "" +} + +func (*recheckConfirmationSyncManager) GetQBittorrentSyncManager(context.Context, int) (*qbt.SyncManager, error) { + return nil, errors.New("not implemented") +} + +func (*recheckConfirmationSyncManager) RenameTorrent(context.Context, int, string, string) error { + return errors.New("not implemented") +} + +func (*recheckConfirmationSyncManager) RenameTorrentFile(context.Context, int, string, string, string) error { + return errors.New("not implemented") +} + +func (*recheckConfirmationSyncManager) RenameTorrentFolder(context.Context, int, string, string, string) error { + return errors.New("not implemented") +} + +func (*recheckConfirmationSyncManager) SetTags(context.Context, int, []string, string) error { + return nil +} + +func (*recheckConfirmationSyncManager) GetCategories(context.Context, int) (map[string]qbt.Category, error) { + return map[string]qbt.Category{}, nil +} + +func (*recheckConfirmationSyncManager) CreateCategory(context.Context, int, string, string) error { + return nil +} + +func recheckConfirmationMatchesFilter(torrent qbt.Torrent, filter qbt.TorrentFilterOptions) bool { + if len(filter.Hashes) > 0 { + matchedHash := false + for _, hash := range filter.Hashes { + normalized := normalizeHash(hash) + if normalized == normalizeHash(torrent.Hash) || + normalized == normalizeHash(torrent.InfohashV1) || + normalized == normalizeHash(torrent.InfohashV2) { + matchedHash = true + break + } + } + if !matchedHash { + return false + } + } + + if filter.Category != "" && torrent.Category != filter.Category { + return false + } + + if filter.Tag != "" && !recheckConfirmationContainsExactTag(torrent.Tags, filter.Tag) { + return false + } + + if filter.Filter != "" && !recheckConfirmationMatchesStateFilter(torrent.State, filter.Filter) { + return false + } + + return true +} + +func recheckConfirmationMatchesStateFilter(state qbt.TorrentState, filter qbt.TorrentFilter) bool { + switch filter { + case "", qbt.TorrentFilterAll: + return true + case qbt.TorrentFilterRunning: + return state != qbt.TorrentStateStoppedUp && state != qbt.TorrentStateStoppedDl + case qbt.TorrentFilterResumed: + return state != qbt.TorrentStatePausedUp && + state != qbt.TorrentStatePausedDl && + state != qbt.TorrentStateStoppedUp && + state != qbt.TorrentStateStoppedDl + case qbt.TorrentFilterPaused: + return state == qbt.TorrentStatePausedUp || state == qbt.TorrentStatePausedDl + case qbt.TorrentFilterStopped: + return state == qbt.TorrentStateStoppedUp || state == qbt.TorrentStateStoppedDl + case qbt.TorrentFilterChecking: + return state == qbt.TorrentStateCheckingUp || + state == qbt.TorrentStateCheckingDl || + state == qbt.TorrentStateCheckingResumeData + case qbt.TorrentFilterMoving: + return state == qbt.TorrentStateMoving + case qbt.TorrentFilterError: + return state == qbt.TorrentStateError + case qbt.TorrentFilterActive, qbt.TorrentFilterInactive, qbt.TorrentFilterCompleted, + qbt.TorrentFilterStalled, qbt.TorrentFilterUploading, qbt.TorrentFilterStalledUploading, + qbt.TorrentFilterDownloading, qbt.TorrentFilterStalledDownloading: + return true + default: + return true + } +} + +func recheckConfirmationContainsExactTag(tags string, target string) bool { + trimmedTarget := strings.TrimSpace(target) + if tags == "" || trimmedTarget == "" { + return false + } + + for tag := range strings.SplitSeq(tags, ",") { + if strings.TrimSpace(tag) == trimmedTarget { + return true + } + } + + return false +} + +func repeatedTorrentStates(count int, torrent qbt.Torrent, tail ...qbt.Torrent) []qbt.Torrent { + if count < 0 { + count = 0 + } + states := make([]qbt.Torrent, 0, count+len(tail)) + for range count { + states = append(states, torrent) + } + states = append(states, tail...) + return states +} + +func TestProcessHardlinkMode_DelayedRecheckStartStillRegistersPoolMember(t *testing.T) { + t.Parallel() + + sourceRoot := t.TempDir() + managedRoot := t.TempDir() + sourceFileName := "Movie.mkv" + require.NoError(t, os.WriteFile(filepath.Join(sourceRoot, sourceFileName), []byte("source"), 0o600)) + + dbPath := filepath.Join(t.TempDir(), "partial-pool.db") + db, err := database.New(dbPath) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + store := models.NewCrossSeedPartialPoolMemberStore(db) + syncManager := &recheckConfirmationSyncManager{ + torrents: repeatedTorrentStates(32, + qbt.Torrent{Hash: "targethash", State: qbt.TorrentStatePausedDl, Progress: 0}, + qbt.Torrent{Hash: "targethash", State: qbt.TorrentStateCheckingResumeData, Progress: 0}, + ), + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash("targethash"): {{Index: 0, Name: sourceFileName, Size: 6}}, + }, + } + svc := &Service{ + instanceStore: &mockInstanceStore{ + instances: map[int]*models.Instance{ + 1: { + ID: 1, + Name: "qbt1", + HasLocalFilesystemAccess: true, + UseHardlinks: true, + HardlinkBaseDir: managedRoot, + }, + }, + }, + syncManager: syncManager, + partialPoolStore: store, + partialPoolWake: make(chan struct{}, 1), + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + recheckConfirmPoll: time.Millisecond, + recheckConfirmWait: time.Nanosecond, + recheckConfirmTries: 1, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + settings := models.DefaultCrossSeedAutomationSettings() + settings.EnablePooledPartialCompletion = true + return settings, nil + }, + } + + result := svc.processHardlinkMode( + context.Background(), + CrossSeedCandidate{InstanceID: 1, InstanceName: "qbt1"}, + []byte("torrent"), + "targethash", + "", + "Movie", + &CrossSeedRequest{}, + &qbt.Torrent{Hash: "sourcehash", Name: "Movie"}, + "partial-in-pack", + qbt.TorrentFiles{{Name: sourceFileName, Size: 6}}, + qbt.TorrentFiles{{Name: sourceFileName, Size: 6, Progress: 1}}, + nil, + &qbt.TorrentProperties{SavePath: sourceRoot}, + managedDestinationContext{RootDir: managedRoot}, + nil, + "category", + "category.cross", + ) + + require.True(t, result.Used) + require.True(t, result.Success) + assert.Equal(t, "added_hardlink", result.Result.Status) + assert.Contains(t, result.Result.Message, "pooled completion active") + assert.NotContains(t, result.Result.Message, "manual intervention required") + assert.Len(t, syncManager.bulkActions, 1) + + member, err := store.GetByAnyHash(context.Background(), 1, "targethash") + require.NoError(t, err) + require.NotNil(t, member) + assert.Equal(t, "SOURCEHASH", member.SourceHash) +} + +func TestProcessHardlinkMode_RecheckConfirmedRegistersPoolMember(t *testing.T) { + t.Parallel() + + sourceRoot := t.TempDir() + managedRoot := t.TempDir() + sourceFileName := "Movie.mkv" + require.NoError(t, os.WriteFile(filepath.Join(sourceRoot, sourceFileName), []byte("source"), 0o600)) + + dbPath := filepath.Join(t.TempDir(), "partial-pool.db") + db, err := database.New(dbPath) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + store := models.NewCrossSeedPartialPoolMemberStore(db) + syncManager := &recheckConfirmationSyncManager{ + torrents: []qbt.Torrent{ + {Hash: "targethash", State: qbt.TorrentStateCheckingResumeData, Progress: 1}, + {Hash: "targethash", State: qbt.TorrentStatePausedDl, Progress: 0.5}, + }, + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash("targethash"): {{Index: 0, Name: sourceFileName, Size: 6}}, + }, + } + svc := &Service{ + instanceStore: &mockInstanceStore{ + instances: map[int]*models.Instance{ + 1: { + ID: 1, + Name: "qbt1", + HasLocalFilesystemAccess: true, + UseHardlinks: true, + HardlinkBaseDir: managedRoot, + }, + }, + }, + syncManager: syncManager, + partialPoolStore: store, + partialPoolWake: make(chan struct{}, 1), + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + recheckConfirmPoll: time.Millisecond, + recheckConfirmWait: 5 * time.Millisecond, + recheckConfirmTries: 1, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + settings := models.DefaultCrossSeedAutomationSettings() + settings.EnablePooledPartialCompletion = true + return settings, nil + }, + } + + result := svc.processHardlinkMode( + context.Background(), + CrossSeedCandidate{InstanceID: 1, InstanceName: "qbt1"}, + []byte("torrent"), + "targethash", + "", + "Movie", + &CrossSeedRequest{}, + &qbt.Torrent{Hash: "sourcehash", Name: "Movie"}, + "partial-in-pack", + qbt.TorrentFiles{{Name: sourceFileName, Size: 6}}, + qbt.TorrentFiles{{Name: sourceFileName, Size: 6, Progress: 1}}, + nil, + &qbt.TorrentProperties{SavePath: sourceRoot}, + managedDestinationContext{RootDir: managedRoot}, + nil, + "category", + "category.cross", + ) + + require.True(t, result.Used) + require.True(t, result.Success) + assert.Equal(t, "added_hardlink", result.Result.Status) + assert.Contains(t, result.Result.Message, "pooled completion active") + + member, err := store.GetByAnyHash(context.Background(), 1, "targethash") + require.NoError(t, err) + require.NotNil(t, member) + assert.Equal(t, "SOURCEHASH", member.SourceHash) +} + +func TestProcessHardlinkMode_DelayedRecheckStartStillQueuesAutoResume(t *testing.T) { + t.Parallel() + + sourceRoot := t.TempDir() + managedRoot := t.TempDir() + require.NoError(t, os.WriteFile(filepath.Join(sourceRoot, "Movie.mkv"), []byte("source"), 0o600)) + + syncManager := &recheckConfirmationSyncManager{ + torrents: repeatedTorrentStates(32, + qbt.Torrent{Hash: "targethash", State: qbt.TorrentStatePausedDl, Progress: 0}, + qbt.Torrent{Hash: "targethash", State: qbt.TorrentStateCheckingResumeData, Progress: 0}, + ), + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash("targethash"): { + {Index: 0, Name: "Movie.mkv", Size: 6}, + {Index: 1, Name: "Movie.nfo", Size: 2}, + }, + }, + } + svc := &Service{ + instanceStore: &mockInstanceStore{ + instances: map[int]*models.Instance{ + 1: { + ID: 1, + Name: "qbt1", + HasLocalFilesystemAccess: true, + UseHardlinks: true, + HardlinkBaseDir: managedRoot, + }, + }, + }, + syncManager: syncManager, + recheckResumeChan: make(chan *pendingResume, 1), + recheckConfirmPoll: time.Millisecond, + recheckConfirmWait: time.Nanosecond, + recheckConfirmTries: 1, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + } + + result := svc.processHardlinkMode( + context.Background(), + CrossSeedCandidate{InstanceID: 1, InstanceName: "qbt1"}, + []byte("torrent"), + "targethash", + "", + "Movie", + &CrossSeedRequest{}, + &qbt.Torrent{Hash: "sourcehash", Name: "Movie"}, + "partial-in-pack", + qbt.TorrentFiles{ + {Name: "Movie.mkv", Size: 6}, + {Name: "Movie.nfo", Size: 2}, + }, + qbt.TorrentFiles{{Name: "Movie.mkv", Size: 6, Progress: 1}}, + nil, + &qbt.TorrentProperties{SavePath: sourceRoot}, + managedDestinationContext{RootDir: managedRoot}, + nil, + "category", + "category.cross", + ) + + require.True(t, result.Used) + require.True(t, result.Success) + assert.Equal(t, "added_hardlink", result.Result.Status) + assert.NotContains(t, result.Result.Message, "manual intervention required") + + select { + case pending := <-svc.recheckResumeChan: + assert.Equal(t, 1, pending.instanceID) + assert.Equal(t, "targethash", pending.hash) + case <-time.After(time.Second): + t.Fatal("expected delayed-start torrent to still be queued for recheck resume") + } +} + +func TestProcessHardlinkMode_OnlyLinksAvailableFiles(t *testing.T) { + t.Parallel() + + sourceRoot := t.TempDir() + managedRoot := t.TempDir() + require.NoError(t, os.WriteFile(filepath.Join(sourceRoot, "Movie.mkv"), []byte("video"), 0o600)) + require.NoError(t, os.WriteFile(filepath.Join(sourceRoot, "Movie.nfo"), []byte("info"), 0o600)) + + syncManager := &recheckConfirmationSyncManager{ + torrents: repeatedTorrentStates(32, + qbt.Torrent{Hash: "targethash", State: qbt.TorrentStatePausedDl, Progress: 0}, + qbt.Torrent{Hash: "targethash", State: qbt.TorrentStateCheckingResumeData, Progress: 0}, + ), + } + svc := &Service{ + instanceStore: &mockInstanceStore{ + instances: map[int]*models.Instance{ + 1: { + ID: 1, + Name: "qbt1", + HasLocalFilesystemAccess: true, + UseHardlinks: true, + HardlinkBaseDir: managedRoot, + }, + }, + }, + syncManager: syncManager, + recheckResumeChan: make(chan *pendingResume, 1), + recheckConfirmPoll: time.Millisecond, + recheckConfirmWait: time.Nanosecond, + recheckConfirmTries: 1, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + } + + result := svc.processHardlinkMode( + context.Background(), + CrossSeedCandidate{InstanceID: 1, InstanceName: "qbt1"}, + []byte("torrent"), + "targethash", + "", + "Movie", + &CrossSeedRequest{}, + &qbt.Torrent{Hash: "sourcehash", Name: "Movie"}, + "exact", + qbt.TorrentFiles{ + {Name: "Movie.mkv", Size: 5}, + {Name: "Movie.nfo", Size: 4}, + }, + qbt.TorrentFiles{ + {Name: "Movie.mkv", Size: 5, Progress: 1}, + {Name: "Movie.nfo", Size: 4, Progress: 0.5}, + }, + nil, + &qbt.TorrentProperties{SavePath: sourceRoot}, + managedDestinationContext{RootDir: managedRoot}, + nil, + "category", + "category.cross", + ) + + require.True(t, result.Used) + require.True(t, result.Success) + assert.Equal(t, "added_hardlink", result.Result.Status) + assert.Contains(t, result.Result.Message, "files: 1/2") + assert.Equal(t, managedRoot, syncManager.addOptions["savepath"]) + assert.Equal(t, "true", syncManager.addOptions["paused"]) + assert.Equal(t, "true", syncManager.addOptions["stopped"]) + assert.Equal(t, "true", syncManager.addOptions["skip_checking"]) + + data, err := os.ReadFile(filepath.Join(managedRoot, "Movie.mkv")) + require.NoError(t, err) + assert.Equal(t, "video", string(data)) + + _, err = os.Stat(filepath.Join(managedRoot, "Movie.nfo")) + require.Error(t, err) + assert.True(t, os.IsNotExist(err)) +} + +func TestProcessHardlinkMode_LinksFileWhenQbitReportedSizeIsWrong(t *testing.T) { + t.Parallel() + + sourceRoot := t.TempDir() + managedRoot := t.TempDir() + require.NoError(t, os.WriteFile(filepath.Join(sourceRoot, "Movie.mkv"), []byte("video"), 0o600)) + + syncManager := &recheckConfirmationSyncManager{} + svc := &Service{ + instanceStore: &mockInstanceStore{ + instances: map[int]*models.Instance{ + 1: { + ID: 1, + Name: "qbt1", + HasLocalFilesystemAccess: true, + UseHardlinks: true, + HardlinkBaseDir: managedRoot, + }, + }, + }, + syncManager: syncManager, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + } + + result := svc.processHardlinkMode( + context.Background(), + CrossSeedCandidate{InstanceID: 1, InstanceName: "qbt1"}, + []byte("torrent"), + "targethash", + "", + "Movie", + &CrossSeedRequest{}, + &qbt.Torrent{Hash: "sourcehash", Name: "Movie"}, + "exact", + qbt.TorrentFiles{{Name: "Movie.mkv", Size: 5}}, + qbt.TorrentFiles{{Name: "Movie.mkv", Size: 500, Progress: 1}}, + nil, + &qbt.TorrentProperties{SavePath: sourceRoot}, + managedDestinationContext{RootDir: managedRoot}, + nil, + "category", + "category.cross", + ) + + require.True(t, result.Used) + require.True(t, result.Success) + assert.Equal(t, "added_hardlink", result.Result.Status) + assert.Contains(t, result.Result.Message, "files: 1/1") + + data, err := os.ReadFile(filepath.Join(managedRoot, "Movie.mkv")) + require.NoError(t, err) + assert.Equal(t, "video", string(data)) +} + +func TestProcessHardlinkMode_ZeroAvailableFilesStillAddsPausedAndRegistersPool(t *testing.T) { + t.Parallel() + + sourceRoot := t.TempDir() + managedRoot := t.TempDir() + dbPath := filepath.Join(t.TempDir(), "partial-pool.db") + db, err := database.New(dbPath) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + store := models.NewCrossSeedPartialPoolMemberStore(db) + syncManager := &recheckConfirmationSyncManager{ + torrents: repeatedTorrentStates(32, + qbt.Torrent{Hash: "targethash", State: qbt.TorrentStatePausedDl, Progress: 0}, + qbt.Torrent{Hash: "targethash", State: qbt.TorrentStateCheckingResumeData, Progress: 0}, + ), + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash("targethash"): { + {Index: 0, Name: "Movie.mkv", Size: 5}, + }, + }, + } + svc := &Service{ + instanceStore: &mockInstanceStore{ + instances: map[int]*models.Instance{ + 1: { + ID: 1, + Name: "qbt1", + HasLocalFilesystemAccess: true, + UseHardlinks: true, + HardlinkBaseDir: managedRoot, + }, + }, + }, + syncManager: syncManager, + partialPoolStore: store, + partialPoolWake: make(chan struct{}, 1), + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + recheckConfirmPoll: time.Millisecond, + recheckConfirmWait: time.Nanosecond, + recheckConfirmTries: 1, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + settings := models.DefaultCrossSeedAutomationSettings() + settings.EnablePooledPartialCompletion = true + return settings, nil + }, + } + + result := svc.processHardlinkMode( + context.Background(), + CrossSeedCandidate{InstanceID: 1, InstanceName: "qbt1"}, + []byte("torrent"), + "targethash", + "", + "Movie", + &CrossSeedRequest{}, + &qbt.Torrent{Hash: "sourcehash", Name: "Movie"}, + "partial-in-pack", + qbt.TorrentFiles{{Name: "Movie.mkv", Size: 5}}, + qbt.TorrentFiles{{Name: "Movie.mkv", Size: 5, Progress: 0}}, + nil, + &qbt.TorrentProperties{SavePath: sourceRoot}, + managedDestinationContext{RootDir: managedRoot}, + nil, + "category", + "category.cross", + ) + + require.True(t, result.Used) + require.True(t, result.Success) + assert.Equal(t, "added_hardlink", result.Result.Status) + assert.Contains(t, result.Result.Message, "files: 0/1") + assert.Contains(t, result.Result.Message, "pooled completion active") + assert.Equal(t, managedRoot, syncManager.addOptions["savepath"]) + assert.Equal(t, "true", syncManager.addOptions["paused"]) + assert.Equal(t, "true", syncManager.addOptions["stopped"]) + + entries, err := os.ReadDir(managedRoot) + require.NoError(t, err) + assert.Empty(t, entries) + + member, err := store.GetByAnyHash(context.Background(), 1, "targethash") + require.NoError(t, err) + require.NotNil(t, member) + assert.Equal(t, managedRoot, member.ManagedRoot) +} + +func TestProcessReflinkMode_OnlyClonesAvailableFiles(t *testing.T) { + t.Parallel() + + managedRoot := t.TempDir() + supported, reason := reflinktree.SupportsReflink(managedRoot) + if !supported { + t.Skipf("reflinks not supported in test environment: %s", reason) + } + + sourceRoot := t.TempDir() + require.NoError(t, os.WriteFile(filepath.Join(sourceRoot, "Movie.mkv"), []byte("video"), 0o600)) + require.NoError(t, os.WriteFile(filepath.Join(sourceRoot, "Movie.nfo"), []byte("info"), 0o600)) + + syncManager := &recheckConfirmationSyncManager{ + torrents: repeatedTorrentStates(32, + qbt.Torrent{Hash: "targethash", State: qbt.TorrentStatePausedDl, Progress: 0}, + qbt.Torrent{Hash: "targethash", State: qbt.TorrentStateCheckingResumeData, Progress: 0}, + ), + } + svc := &Service{ + instanceStore: &mockInstanceStore{ + instances: map[int]*models.Instance{ + 1: { + ID: 1, + Name: "qbt1", + HasLocalFilesystemAccess: true, + UseReflinks: true, + HardlinkBaseDir: managedRoot, + }, + }, + }, + syncManager: syncManager, + recheckResumeChan: make(chan *pendingResume, 1), + recheckConfirmPoll: time.Millisecond, + recheckConfirmWait: time.Nanosecond, + recheckConfirmTries: 1, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + } + + result := svc.processReflinkMode( + context.Background(), + CrossSeedCandidate{InstanceID: 1, InstanceName: "qbt1"}, + []byte("torrent"), + "targethash", + "", + "Movie", + &CrossSeedRequest{}, + &qbt.Torrent{Hash: "sourcehash", Name: "Movie"}, + "exact", + qbt.TorrentFiles{ + {Name: "Movie.mkv", Size: 5}, + {Name: "Movie.nfo", Size: 4}, + }, + qbt.TorrentFiles{ + {Name: "Movie.mkv", Size: 5, Progress: 1}, + {Name: "Movie.nfo", Size: 4, Progress: 0.5}, + }, + nil, + &qbt.TorrentProperties{SavePath: sourceRoot}, + managedDestinationContext{RootDir: managedRoot}, + nil, + "category", + "category.cross", + ) + + require.True(t, result.Used) + require.True(t, result.Success) + assert.Equal(t, "added_reflink", result.Result.Status) + assert.Contains(t, result.Result.Message, "files: 1/2") + assert.Equal(t, managedRoot, syncManager.addOptions["savepath"]) + assert.Equal(t, "true", syncManager.addOptions["paused"]) + assert.Equal(t, "true", syncManager.addOptions["stopped"]) + + data, err := os.ReadFile(filepath.Join(managedRoot, "Movie.mkv")) + require.NoError(t, err) + assert.Equal(t, "video", string(data)) + + _, err = os.Stat(filepath.Join(managedRoot, "Movie.nfo")) + require.Error(t, err) + assert.True(t, os.IsNotExist(err)) +} + +func TestProcessReflinkMode_ClonesFileWhenQbitReportedSizeIsWrong(t *testing.T) { + t.Parallel() + + managedRoot := t.TempDir() + supported, reason := reflinktree.SupportsReflink(managedRoot) + if !supported { + t.Skipf("reflinks not supported in test environment: %s", reason) + } + + sourceRoot := t.TempDir() + require.NoError(t, os.WriteFile(filepath.Join(sourceRoot, "Movie.mkv"), []byte("video"), 0o600)) + + syncManager := &recheckConfirmationSyncManager{} + svc := &Service{ + instanceStore: &mockInstanceStore{ + instances: map[int]*models.Instance{ + 1: { + ID: 1, + Name: "qbt1", + HasLocalFilesystemAccess: true, + UseReflinks: true, + HardlinkBaseDir: managedRoot, + }, + }, + }, + syncManager: syncManager, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + } + + result := svc.processReflinkMode( + context.Background(), + CrossSeedCandidate{InstanceID: 1, InstanceName: "qbt1"}, + []byte("torrent"), + "targethash", + "", + "Movie", + &CrossSeedRequest{}, + &qbt.Torrent{Hash: "sourcehash", Name: "Movie"}, + "exact", + qbt.TorrentFiles{{Name: "Movie.mkv", Size: 5}}, + qbt.TorrentFiles{{Name: "Movie.mkv", Size: 500, Progress: 1}}, + nil, + &qbt.TorrentProperties{SavePath: sourceRoot}, + managedDestinationContext{RootDir: managedRoot}, + nil, + "category", + "category.cross", + ) + + require.True(t, result.Used) + require.True(t, result.Success) + assert.Equal(t, "added_reflink", result.Result.Status) + assert.Contains(t, result.Result.Message, "files: 1/1") + + data, err := os.ReadFile(filepath.Join(managedRoot, "Movie.mkv")) + require.NoError(t, err) + assert.Equal(t, "video", string(data)) +} + +func TestTriggerAndConfirmInjectedTorrentRecheck_AttemptTimeoutBoundsHungAPI(t *testing.T) { + t.Parallel() + + syncManager := &recheckConfirmationSyncManager{ + blockOnGetTorrents: true, + } + svc := &Service{ + syncManager: syncManager, + recheckConfirmWait: 5 * time.Millisecond, + recheckConfirmTries: 3, + } + + start := time.Now() + confirmed, err := svc.triggerAndConfirmInjectedTorrentRecheck( + context.Background(), + 1, + []string{"targethash"}, + "targethash", + "[CROSSSEED] Test", + ) + elapsed := time.Since(start) + + require.NoError(t, err) + assert.False(t, confirmed) + assert.Len(t, syncManager.bulkActions, 3) + assert.Equal(t, 3, syncManager.getTorrentsCalls) + assert.Less(t, elapsed, 250*time.Millisecond) +} + func TestProcessHardlinkMode_FailsWhenNoLocalAccess(t *testing.T) { mockInstances := &mockInstanceStore{ instances: map[int]*models.Instance{ @@ -566,7 +1417,10 @@ func TestProcessHardlinkMode_FailsWhenNoLocalAccess(t *testing.T) { "exact", nil, qbt.TorrentFiles{{Name: "movie.mkv", Size: 1000}}, + nil, &qbt.TorrentProperties{SavePath: "/downloads"}, + managedDestinationContext{}, + nil, "category", "category.cross", ) @@ -611,7 +1465,10 @@ func TestProcessHardlinkMode_FailsOnInfrastructureError(t *testing.T) { "exact", nil, qbt.TorrentFiles{{Name: "movie.mkv", Size: 1000}}, + nil, &qbt.TorrentProperties{SavePath: "/also/nonexistent"}, + managedDestinationContext{}, + errors.New("failed to check filesystem for /nonexistent/hardlinks/path: boom"), "category", "category.cross", ) @@ -620,15 +1477,18 @@ func TestProcessHardlinkMode_FailsOnInfrastructureError(t *testing.T) { require.True(t, result.Used, "hardlink mode should be attempted") assert.False(t, result.Success, "hardlink mode should fail") assert.Equal(t, "hardlink_error", result.Result.Status) - // Error could be about directory creation or filesystem - both are valid infrastructure errors - assert.True(t, strings.Contains(result.Result.Message, "directory") || + // Direct helper tests now rely on a precomputed managed destination context. + assert.True(t, strings.Contains(result.Result.Message, "Managed destination root") || + strings.Contains(result.Result.Message, "directory") || strings.Contains(result.Result.Message, "filesystem"), "error message should mention directory or filesystem issue, got: %s", result.Result.Message) + assert.Contains(t, result.Result.Message, "boom") } func TestProcessHardlinkMode_SkipsWhenExtrasAndSkipRecheckEnabled(t *testing.T) { // This test verifies that when incoming torrent has extra files (files not in candidate) // and SkipRecheck is enabled, hardlink mode returns skipped_recheck before any plan building. + managedRoot := t.TempDir() mockInstances := &mockInstanceStore{ instances: map[int]*models.Instance{ @@ -669,7 +1529,10 @@ func TestProcessHardlinkMode_SkipsWhenExtrasAndSkipRecheckEnabled(t *testing.T) "exact", sourceFiles, candidateFiles, + nil, &qbt.TorrentProperties{SavePath: "/downloads"}, + managedDestinationContext{RootDir: managedRoot}, + nil, "category", "category.cross", ) @@ -685,6 +1548,7 @@ func TestProcessHardlinkMode_SkipsWhenExtrasAndSkipRecheckEnabled(t *testing.T) func TestProcessReflinkMode_SkipsWhenExtrasAndSkipRecheckEnabled(t *testing.T) { // This test verifies that when incoming torrent has extra files (files not in candidate) // and SkipRecheck is enabled, reflink mode returns skipped_recheck before any plan building. + managedRoot := t.TempDir() mockInstances := &mockInstanceStore{ instances: map[int]*models.Instance{ @@ -725,7 +1589,10 @@ func TestProcessReflinkMode_SkipsWhenExtrasAndSkipRecheckEnabled(t *testing.T) { "exact", sourceFiles, candidateFiles, + nil, &qbt.TorrentProperties{SavePath: "/downloads"}, + managedDestinationContext{RootDir: managedRoot}, + nil, "category", "category.cross", ) @@ -738,6 +1605,137 @@ func TestProcessReflinkMode_SkipsWhenExtrasAndSkipRecheckEnabled(t *testing.T) { assert.Contains(t, result.Result.Message, "Skip recheck") } +func TestProcessReflinkMode_SkipsSingleFileSizeMismatchWhenSkipRecheckEnabled(t *testing.T) { + managedRoot := t.TempDir() + + mockInstances := &mockInstanceStore{ + instances: map[int]*models.Instance{ + 1: { + ID: 1, + Name: "qbt1", + HasLocalFilesystemAccess: true, + UseReflinks: true, + HardlinkBaseDir: "/reflinks", + }, + }, + } + + s := &Service{ + instanceStore: mockInstances, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + settings := models.DefaultCrossSeedAutomationSettings() + settings.AllowReflinkSingleFileSizeMismatch = true + return settings, nil + }, + } + + sourceFiles := qbt.TorrentFiles{ + {Name: "Movie.2024.1080p.mkv", Size: 1_000}, + } + candidateFiles := qbt.TorrentFiles{ + {Name: "Movie 2024 1080p.mkv", Size: 990}, + } + + result := s.processReflinkMode( + context.Background(), + CrossSeedCandidate{InstanceID: 1, InstanceName: "qbt1"}, + []byte("torrent"), + "hash123", + "", + "TorrentName", + &CrossSeedRequest{SkipRecheck: true}, + &qbt.Torrent{ContentPath: "/downloads/movie.mkv"}, + "size", + sourceFiles, + candidateFiles, + nil, + &qbt.TorrentProperties{SavePath: "/downloads"}, + managedDestinationContext{RootDir: managedRoot}, + nil, + "category", + "category.cross", + ) + + require.True(t, result.Used, "reflink mode should be attempted") + assert.False(t, result.Success, "should not succeed - skipped") + assert.Equal(t, "skipped_recheck", result.Result.Status) + assert.Contains(t, result.Result.Message, "requires recheck") +} + +func TestShouldAllowReflinkSingleFileSizeMismatch(t *testing.T) { + s := &Service{} + settings := models.DefaultCrossSeedAutomationSettings() + settings.AllowReflinkSingleFileSizeMismatch = true + + assert.True(t, s.shouldAllowReflinkSingleFileSizeMismatch( + settings, + qbt.TorrentFiles{{Name: "Movie.2024.1080p.mkv", Size: 1_000}}, + qbt.TorrentFiles{{Name: "Movie 2024 1080p.mkv", Size: 990}}, + )) + + assert.False(t, s.shouldAllowReflinkSingleFileSizeMismatch( + settings, + qbt.TorrentFiles{{Name: "Movie.2024.1080p.mkv", Size: 1_000}}, + qbt.TorrentFiles{{Name: "Movie 2024 1080p.mkv", Size: 980}}, + )) + + assert.False(t, s.shouldAllowReflinkSingleFileSizeMismatch( + settings, + qbt.TorrentFiles{{Name: "Movie.2024.1080p.mkv", Size: 1_000}}, + qbt.TorrentFiles{{Name: "Different.Movie.2024.1080p.mkv", Size: 990}}, + )) +} + +func TestProcessReflinkMode_SingleFileSizeMismatchOverThresholdRejectedBeforeAdd(t *testing.T) { + managedRoot := t.TempDir() + + mockInstances := &mockInstanceStore{ + instances: map[int]*models.Instance{ + 1: { + ID: 1, + Name: "qbt1", + HasLocalFilesystemAccess: true, + UseReflinks: true, + HardlinkBaseDir: "/reflinks", + }, + }, + } + + s := &Service{ + instanceStore: mockInstances, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + settings := models.DefaultCrossSeedAutomationSettings() + settings.AllowReflinkSingleFileSizeMismatch = true + return settings, nil + }, + } + + result := s.processReflinkMode( + context.Background(), + CrossSeedCandidate{InstanceID: 1, InstanceName: "qbt1"}, + []byte("torrent"), + "hash123", + "", + "TorrentName", + &CrossSeedRequest{}, + &qbt.Torrent{ContentPath: "/downloads/movie.mkv"}, + "size", + qbt.TorrentFiles{{Name: "Movie.2024.1080p.mkv", Size: 1_000}}, + qbt.TorrentFiles{{Name: "Movie 2024 1080p.mkv", Size: 980}}, + nil, + &qbt.TorrentProperties{SavePath: "/downloads"}, + managedDestinationContext{RootDir: managedRoot}, + nil, + "category", + "category.cross", + ) + + require.True(t, result.Used, "reflink mode should be attempted") + assert.False(t, result.Success, "should reject before add") + assert.Equal(t, "rejected", result.Result.Status) + assert.Contains(t, result.Result.Message, "99% precheck threshold") +} + func TestProcessHardlinkMode_FallbackEnabled(t *testing.T) { // When FallbackToRegularMode is enabled, hardlink failures should return // Used=false so that regular cross-seed mode can proceed. @@ -770,7 +1768,10 @@ func TestProcessHardlinkMode_FallbackEnabled(t *testing.T) { "exact", nil, qbt.TorrentFiles{{Name: "movie.mkv", Size: 1000}}, + nil, &qbt.TorrentProperties{SavePath: "/downloads"}, + managedDestinationContext{}, + nil, "category", "category.cross", ) @@ -811,7 +1812,10 @@ func TestProcessHardlinkMode_FallbackDisabled(t *testing.T) { "exact", nil, qbt.TorrentFiles{{Name: "movie.mkv", Size: 1000}}, + nil, &qbt.TorrentProperties{SavePath: "/downloads"}, + managedDestinationContext{}, + nil, "category", "category.cross", ) @@ -855,7 +1859,10 @@ func TestProcessReflinkMode_FallbackEnabled(t *testing.T) { "exact", nil, qbt.TorrentFiles{{Name: "movie.mkv", Size: 1000}}, + nil, &qbt.TorrentProperties{SavePath: "/downloads"}, + managedDestinationContext{}, + nil, "category", "category.cross", ) @@ -896,7 +1903,10 @@ func TestProcessReflinkMode_FallbackDisabled(t *testing.T) { "exact", nil, qbt.TorrentFiles{{Name: "movie.mkv", Size: 1000}}, + nil, &qbt.TorrentProperties{SavePath: "/downloads"}, + managedDestinationContext{}, + nil, "category", "category.cross", ) diff --git a/internal/services/crossseed/matching_layout_test.go b/internal/services/crossseed/matching_layout_test.go index 56a43aabe..bcb398332 100644 --- a/internal/services/crossseed/matching_layout_test.go +++ b/internal/services/crossseed/matching_layout_test.go @@ -117,6 +117,67 @@ func TestFindBestCandidateMatch_PrefersTopLevelFolderOnTie(t *testing.T) { require.Len(t, files, 2, "should return folder-based file list") } +func TestFindBestCandidateMatch_PrefersNonCrossSeedOnTie(t *testing.T) { + t.Parallel() + + svc := &Service{ + releaseCache: releases.NewDefaultParser(), + stringNormalizer: stringutils.NewDefaultNormalizer(), + syncManager: &candidateSelectionSyncManager{ + files: map[string]qbt.TorrentFiles{ + "tagged": {{Name: "payload.bin", Size: 4 << 30}}, + "untagged": {{Name: "payload.bin", Size: 4 << 30}}, + }, + }, + } + + sourceRelease := rls.Release{} + sourceFiles := qbt.TorrentFiles{{Name: "payload.bin", Size: 4 << 30}} + candidate := CrossSeedCandidate{ + InstanceID: 1, + Torrents: []qbt.Torrent{ + {Hash: "tagged", Name: "Minimal.Payload", Progress: 1.0, Tags: "cross-seed"}, + {Hash: "untagged", Name: "Minimal.Payload", Progress: 1.0}, + }, + } + + filesByHash := svc.batchLoadCandidateFiles(context.Background(), candidate.InstanceID, candidate.Torrents) + bestTorrent, files, matchType, _ := svc.findBestCandidateMatch(context.Background(), candidate, &sourceRelease, sourceFiles, filesByHash, 5.0) + require.NotNil(t, bestTorrent) + require.Equal(t, "untagged", bestTorrent.Hash) + require.Equal(t, "exact", matchType) + require.Len(t, files, 1) +} + +func TestFindBestCandidateMatch_AllowsCrossSeedFallbackWhenOnlyOption(t *testing.T) { + t.Parallel() + + svc := &Service{ + releaseCache: releases.NewDefaultParser(), + stringNormalizer: stringutils.NewDefaultNormalizer(), + syncManager: &candidateSelectionSyncManager{ + files: map[string]qbt.TorrentFiles{ + "tagged": {{Name: "payload.bin", Size: 4 << 30}}, + }, + }, + } + + sourceRelease := rls.Release{} + sourceFiles := qbt.TorrentFiles{{Name: "payload.bin", Size: 4 << 30}} + candidate := CrossSeedCandidate{ + InstanceID: 1, + Torrents: []qbt.Torrent{ + {Hash: "tagged", Name: "Minimal.Payload", Progress: 1.0, Tags: "cross-seed"}, + }, + } + + filesByHash := svc.batchLoadCandidateFiles(context.Background(), candidate.InstanceID, candidate.Torrents) + bestTorrent, _, matchType, _ := svc.findBestCandidateMatch(context.Background(), candidate, &sourceRelease, sourceFiles, filesByHash, 5.0) + require.NotNil(t, bestTorrent) + require.Equal(t, "tagged", bestTorrent.Hash) + require.Equal(t, "exact", matchType) +} + func TestFindBestCandidateMatch_RejectsSeasonPackAgainstEpisodeCandidate(t *testing.T) { t.Parallel() diff --git a/internal/services/crossseed/partial_pool.go b/internal/services/crossseed/partial_pool.go new file mode 100644 index 000000000..98f817bef --- /dev/null +++ b/internal/services/crossseed/partial_pool.go @@ -0,0 +1,1555 @@ +// Copyright (c) 2025-2026, s0up and the autobrr contributors. +// SPDX-License-Identifier: GPL-2.0-or-later + +package crossseed + +import ( + "context" + "errors" + "fmt" + "math" + "os" + "path/filepath" + "sort" + "strings" + "time" + + qbt "github.com/autobrr/go-qbittorrent" + "github.com/rs/zerolog/log" + + "github.com/autobrr/qui/internal/models" + internalqb "github.com/autobrr/qui/internal/qbittorrent" + "github.com/autobrr/qui/pkg/hardlinktree" + "github.com/autobrr/qui/pkg/reflinktree" +) + +const ( + partialPoolPollInterval = 10 * time.Second + partialPoolMarkerTTL = 24 * time.Hour + partialPoolSelectionLimit = 6 * time.Hour + partialPoolMissingGrace = 30 * time.Second + partialPoolFileCompleteThreshold = 0.999999 +) + +const ( + partialPoolFileComplete = "complete" + partialPoolFileWholeMiss = "whole_file_missing" + partialPoolFilePartialMiss = "partial_file_missing" +) + +type partialPoolState struct { + member *models.CrossSeedPartialPoolMember + torrent qbt.Torrent + files qbt.TorrentFiles + classByName map[string]string + classByLiveName map[string]string + keyByName map[string]partialPoolFileKey + liveNameByName map[string]string + byName map[string]partialPoolLiveFile + missingBytes int64 + incompleteNames []string + incompleteKeys []partialPoolFileKey + completeNames []string + allWholeMissing bool + pieceSafe bool + eligibleDownload bool + manualReview bool + manualReason string + complete bool + checking bool + awaitingRecheck bool +} + +type partialPoolForceRefreshContextKey struct{} + +type partialPoolFileOwner struct { + state *partialPoolState + name string +} + +type partialPoolFileKey struct { + key string + size int64 +} + +type partialPoolLiveFile struct { + Index int + Name string + Progress float64 + Size int64 +} + +type partialPoolSelection struct { + MemberKey string + SelectedAt time.Time +} + +type partialPoolFileCacheInvalidator interface { + InvalidateFileCache(ctx context.Context, instanceID int, hash string) error +} + +func partialPoolLookupKey(instanceID int, hash string) string { + if instanceID <= 0 { + return "" + } + hash = normalizeHash(hash) + if hash == "" { + return "" + } + return fmt.Sprintf("%d|%s", instanceID, hash) +} + +func partialPoolSourceKey(member *models.CrossSeedPartialPoolMember) string { + return fmt.Sprintf("%d|%s", member.SourceInstanceID, normalizeHash(member.SourceHash)) +} + +func (s *Service) RestoreActivePartialPools(ctx context.Context) error { + if s == nil || s.partialPoolStore == nil { + return nil + } + + members, err := s.partialPoolStore.ListActive(ctx, time.Now().UTC()) + if err != nil { + return err + } + + s.partialPoolMu.Lock() + defer s.partialPoolMu.Unlock() + s.partialPoolByHash = make(map[string]*models.CrossSeedPartialPoolMember, len(members)*2) + s.partialPoolBySource = make(map[string]partialPoolSelection) + for _, member := range members { + s.storePartialPoolMemberLocked(member) + } + s.signalPartialPoolWake() + return nil +} + +func (s *Service) partialPoolWorker() { + ticker := time.NewTicker(partialPoolPollInterval) + defer ticker.Stop() + + runCtx, runCancel := context.WithCancel(context.Background()) + defer runCancel() + + if s.partialPoolStop != nil { + go func(stop <-chan struct{}) { + <-stop + runCancel() + }(s.partialPoolStop) + } + + for { + select { + case <-runCtx.Done(): + return + case <-s.partialPoolWake: + case <-ticker.C: + } + + s.triggerPartialPoolRun(runCtx, s.processPartialPools) + } +} + +func (s *Service) triggerPartialPoolRun(ctx context.Context, process func(context.Context)) { + if s == nil || process == nil { + return + } + + s.partialPoolRunPending.Store(true) + if !s.partialPoolRunActive.CompareAndSwap(false, true) { + return + } + + go s.partialPoolRunLoop(ctx, process) +} + +func (s *Service) partialPoolRunLoop(ctx context.Context, process func(context.Context)) { + for { + if ctx != nil { + select { + case <-ctx.Done(): + s.partialPoolRunActive.Store(false) + return + default: + } + } + + s.partialPoolRunPending.Store(false) + process(ctx) + + if s.partialPoolRunPending.Swap(false) { + continue + } + + s.partialPoolRunActive.Store(false) + if !s.partialPoolRunPending.Load() || !s.partialPoolRunActive.CompareAndSwap(false, true) { + return + } + } +} + +func (s *Service) processPartialPools(ctx context.Context) { + if s == nil { + return + } + + now := time.Now().UTC() + if s.partialPoolStore != nil { + if _, err := s.partialPoolStore.DeleteExpired(ctx, now); err != nil { + log.Debug().Err(err).Msg("[CROSSSEED-POOL] Failed to prune expired pooled members") + } + } + s.pruneExpiredPartialPoolMembers(now) + + settings, err := s.GetAutomationSettings(ctx) + if err != nil { + log.Debug().Err(err).Msg("[CROSSSEED-POOL] Failed to load automation settings") + return + } + if settings == nil || !settings.EnablePooledPartialCompletion { + if err := s.drainPartialPoolMembers(ctx); err != nil { + log.Debug().Err(err).Msg("[CROSSSEED-POOL] Failed to clear pooled members while automation is disabled") + } + return + } + + members := s.listPartialPoolMembers() + if len(members) == 0 { + return + } + + pools := make(map[string][]*models.CrossSeedPartialPoolMember) + for _, member := range members { + pools[partialPoolSourceKey(member)] = append(pools[partialPoolSourceKey(member)], member) + } + + for _, poolMembers := range pools { + s.processPartialPool(ctx, settings, poolMembers) + } +} + +func (s *Service) processPartialPool(ctx context.Context, settings *models.CrossSeedAutomationSettings, members []*models.CrossSeedPartialPoolMember) { + if len(members) == 0 { + return + } + + poolKey := partialPoolSourceKey(members[0]) + log.Debug(). + Str("poolKey", poolKey). + Int("memberCount", len(members)). + Msg("[CROSSSEED-POOL] Processing pooled partial members") + + states := s.loadPartialPoolStates(ctx, settings, members) + if len(states) == 0 { + log.Debug(). + Str("poolKey", poolKey). + Msg("[CROSSSEED-POOL] No active pooled states loaded") + return + } + + activeStates := make([]*partialPoolState, 0, len(states)) + completedStates := make([]*partialPoolState, 0, len(states)) + for _, state := range states { + if state.complete { + completedStates = append(completedStates, state) + activeStates = append(activeStates, state) + continue + } + if state.manualReview { + log.Info(). + Str("poolKey", poolKey). + Int("instanceID", state.member.TargetInstanceID). + Str("hash", state.member.TargetHash). + Str("torrentState", string(state.torrent.State)). + Int64("missingBytes", state.missingBytes). + Int("incompleteFiles", len(state.incompleteNames)). + Str("reason", state.manualReason). + Msg("[CROSSSEED-POOL] Pooled member requires manual review") + s.dropPartialPoolMember(ctx, state.member, state.manualReason) + continue + } + activeStates = append(activeStates, state) + } + if len(activeStates) == 0 { + log.Debug(). + Str("poolKey", poolKey). + Msg("[CROSSSEED-POOL] No pooled members remain active") + s.clearPartialPoolSelection(poolKey) + return + } + + s.resumePartialPoolMembers(ctx, completedStates) + s.propagateCompletedPoolFiles(ctx, activeStates) + + incomplete := make([]*partialPoolState, 0, len(activeStates)) + for _, state := range activeStates { + if state.complete { + continue + } + incomplete = append(incomplete, state) + } + if len(incomplete) == 0 { + for _, state := range activeStates { + s.removePartialPoolMember(ctx, state.member) + } + s.clearPartialPoolSelection(poolKey) + return + } + + // Don't churn member state while any pooled download is already active. Keep the + // preferred candidate updated in-memory so rotation can happen after the timeout. + if s.partialPoolHasActiveDownloader(incomplete) { + selected := s.selectPreferredPartialPoolDownloader(poolKey, incomplete, time.Now().UTC()) + if selected != nil { + log.Debug(). + Str("poolKey", poolKey). + Int("instanceID", selected.member.TargetInstanceID). + Str("selectedHash", selected.member.TargetHash). + Int64("missingBytes", selected.missingBytes). + Int("incompleteFiles", len(selected.incompleteNames)). + Msg("[CROSSSEED-POOL] Active downloader already running; leaving pool unchanged") + } + return + } + + selected := s.selectPreferredPartialPoolDownloader(poolKey, incomplete, time.Now().UTC()) + if selected == nil { + log.Debug(). + Str("poolKey", poolKey). + Int("incompleteMembers", len(incomplete)). + Msg("[CROSSSEED-POOL] No eligible pooled downloader selected") + return + } + log.Info(). + Str("poolKey", poolKey). + Int("instanceID", selected.member.TargetInstanceID). + Str("selectedHash", selected.member.TargetHash). + Str("mode", selected.member.Mode). + Int64("missingBytes", selected.missingBytes). + Int("incompleteFiles", len(selected.incompleteNames)). + Msg("[CROSSSEED-POOL] Selected pooled downloader") + s.resumePartialPoolMembers(ctx, []*partialPoolState{selected}) +} + +func (s *Service) loadPartialPoolStates(ctx context.Context, settings *models.CrossSeedAutomationSettings, members []*models.CrossSeedPartialPoolMember) []*partialPoolState { + if len(members) == 0 { + return nil + } + + byInstance := make(map[int][]*models.CrossSeedPartialPoolMember) + for _, member := range members { + byInstance[member.TargetInstanceID] = append(byInstance[member.TargetInstanceID], member) + } + + var states []*partialPoolState + for instanceID, instanceMembers := range byInstance { + hashes := make([]string, 0, len(instanceMembers)*2) + for _, member := range instanceMembers { + hashes = append(hashes, member.TargetHash) + hashes = append(hashes, member.TargetHashV2) + s.invalidatePartialPoolFileCache(ctx, member.TargetInstanceID, member.TargetHash) + s.invalidatePartialPoolFileCache(ctx, member.TargetInstanceID, member.TargetHashV2) + } + hashes = uniqueStrings(hashes) + + torrents, err := s.syncManager.GetTorrents(ctx, instanceID, qbt.TorrentFilterOptions{Hashes: hashes}) + if err != nil { + log.Debug().Err(err).Int("instanceID", instanceID).Msg("[CROSSSEED-POOL] Failed to load pool torrents") + continue + } + + torrentByHash := make(map[string]qbt.Torrent, len(torrents)) + for _, torrent := range torrents { + torrentByHash[normalizeHash(torrent.Hash)] = torrent + } + + filesByHash, err := s.getPartialPoolTorrentFilesBatch(ctx, instanceID, hashes) + if err != nil { + log.Debug().Err(err).Int("instanceID", instanceID).Msg("[CROSSSEED-POOL] Failed to load pool files") + continue + } + + for _, member := range instanceMembers { + torrent, ok := torrentByHash[normalizeHash(member.TargetHash)] + if !ok && member.TargetHashV2 != "" { + torrent, ok = torrentByHash[normalizeHash(member.TargetHashV2)] + } + if !ok { + resolvedTorrent, found, err := s.syncManager.HasTorrentByAnyHash(ctx, instanceID, []string{member.TargetHash, member.TargetHashV2}) + if err != nil { + log.Debug(). + Err(err). + Int("instanceID", member.TargetInstanceID). + Str("hash", member.TargetHash). + Str("hashV2", member.TargetHashV2). + Msg("[CROSSSEED-POOL] Failed to resolve pooled member via variant-aware lookup") + } else if found && resolvedTorrent != nil { + torrent = *resolvedTorrent + ok = true + torrentByHash[normalizeHash(torrent.Hash)] = torrent + if torrent.InfohashV1 != "" { + torrentByHash[normalizeHash(torrent.InfohashV1)] = torrent + } + if torrent.InfohashV2 != "" { + torrentByHash[normalizeHash(torrent.InfohashV2)] = torrent + } + log.Debug(). + Int("instanceID", member.TargetInstanceID). + Str("hash", member.TargetHash). + Str("hashV2", member.TargetHashV2). + Str("resolvedHash", torrent.Hash). + Str("torrentState", string(torrent.State)). + Msg("[CROSSSEED-POOL] Resolved pooled member via variant-aware lookup fallback") + } + } + if !ok { + if partialPoolMemberMissingGraceActive(member, time.Now().UTC()) { + log.Debug(). + Int("instanceID", member.TargetInstanceID). + Str("hash", member.TargetHash). + Str("hashV2", member.TargetHashV2). + Msg("[CROSSSEED-POOL] Deferring pooled member removal until torrent appears in sync state") + continue + } + log.Debug(). + Int("instanceID", member.TargetInstanceID). + Str("hash", member.TargetHash). + Str("hashV2", member.TargetHashV2). + Msg("[CROSSSEED-POOL] Removing pooled member because torrent is still missing after grace period") + s.removePartialPoolMember(ctx, member) + continue + } + if !s.partialPoolMemberMatchesTorrent(member, torrent) { + log.Info(). + Int("instanceID", member.TargetInstanceID). + Str("hash", member.TargetHash). + Int64("storedAddedOn", member.TargetAddedOn). + Int64("liveAddedOn", torrent.AddedOn). + Msg("[CROSSSEED-POOL] Removing stale pooled member after torrent re-add") + s.removePartialPoolMember(ctx, member) + continue + } + + files, ok := filesByHash[normalizeHash(member.TargetHash)] + if !ok && member.TargetHashV2 != "" { + files, ok = filesByHash[normalizeHash(member.TargetHashV2)] + } + if !ok { + log.Debug(). + Int("instanceID", member.TargetInstanceID). + Str("hash", member.TargetHash). + Str("hashV2", member.TargetHashV2). + Msg("[CROSSSEED-POOL] Skipping pooled member until torrent files are available") + continue + } + + state := s.buildPartialPoolState(member, torrent, files) + state = s.applyPartialPoolSettings(state, settings) + limit := models.DefaultCrossSeedAutomationSettings().MaxMissingBytesAfterRecheck + if state.member != nil && state.member.MaxMissingBytesAfterRecheck > 0 { + limit = state.member.MaxMissingBytesAfterRecheck + } else if settings != nil { + limit = settings.MaxMissingBytesAfterRecheck + } + log.Debug(). + Int("instanceID", member.TargetInstanceID). + Str("hash", member.TargetHash). + Str("mode", member.Mode). + Str("torrentState", string(torrent.State)). + Bool("checking", state.checking). + Bool("awaitingRecheckCompletion", state.awaitingRecheck). + Bool("complete", state.complete). + Bool("eligibleDownload", state.eligibleDownload). + Bool("manualReview", state.manualReview). + Int64("missingBytes", state.missingBytes). + Int64("missingLimit", limit). + Int("sourceFiles", len(member.SourceFiles)). + Int("incompleteFiles", len(state.incompleteNames)). + Str("manualReason", state.manualReason). + Msg("[CROSSSEED-POOL] Loaded pooled member state") + states = append(states, state) + } + } + + return states +} + +func (s *Service) getPartialPoolTorrentFilesBatch( + ctx context.Context, + instanceID int, + hashes []string, +) (map[string]qbt.TorrentFiles, error) { + if s == nil || s.syncManager == nil { + return map[string]qbt.TorrentFiles{}, nil + } + + ctx = context.WithValue(ctx, partialPoolForceRefreshContextKey{}, true) + ctx = internalqb.WithForceFilesRefresh(ctx) + + return s.syncManager.GetTorrentFilesBatch(ctx, instanceID, hashes) +} + +func partialPoolMemberMissingGraceActive(member *models.CrossSeedPartialPoolMember, now time.Time) bool { + if member == nil { + return false + } + + reference := member.CreatedAt + if reference.IsZero() || member.UpdatedAt.After(reference) { + reference = member.UpdatedAt + } + if reference.IsZero() { + return false + } + + return now.Sub(reference) < partialPoolMissingGrace +} + +func (s *Service) buildPartialPoolState(member *models.CrossSeedPartialPoolMember, torrent qbt.Torrent, files qbt.TorrentFiles) *partialPoolState { + state := &partialPoolState{ + member: member, + torrent: torrent, + files: files, + classByName: make(map[string]string, len(member.SourceFiles)), + classByLiveName: make(map[string]string, len(member.SourceFiles)), + keyByName: make(map[string]partialPoolFileKey, len(member.SourceFiles)), + liveNameByName: make(map[string]string, len(member.SourceFiles)), + byName: make(map[string]partialPoolLiveFile, len(files)), + allWholeMissing: true, + } + liveByKey := make(map[partialPoolFileKey][]partialPoolLiveFile, len(files)) + + for _, file := range files { + live := partialPoolLiveFile{ + Index: file.Index, + Name: file.Name, + Progress: float64(file.Progress), + Size: file.Size, + } + state.byName[file.Name] = live + key := partialPoolFileKey{ + key: normalizeFileKey(file.Name), + size: file.Size, + } + liveByKey[key] = append(liveByKey[key], live) + } + + for _, sourceFile := range member.SourceFiles { + fileKey := partialPoolStoredFileKey(sourceFile) + state.keyByName[sourceFile.Name] = fileKey + + live, ok := state.byName[sourceFile.Name] + if ok && live.Size == sourceFile.Size { + liveByKey = partialPoolConsumeLiveFile(liveByKey, live) + } else { + live, ok = partialPoolTakeLiveFile(liveByKey, fileKey) + } + if !ok { + state.classByName[sourceFile.Name] = partialPoolFileWholeMiss + state.missingBytes += sourceFile.Size + state.incompleteNames = append(state.incompleteNames, sourceFile.Name) + state.incompleteKeys = append(state.incompleteKeys, fileKey) + continue + } + state.liveNameByName[sourceFile.Name] = live.Name + + progress := live.Progress + switch { + case progress >= partialPoolFileCompleteThreshold: + state.classByName[sourceFile.Name] = partialPoolFileComplete + state.classByLiveName[live.Name] = partialPoolFileComplete + state.completeNames = append(state.completeNames, sourceFile.Name) + case progress <= 0: + state.classByName[sourceFile.Name] = partialPoolFileWholeMiss + state.classByLiveName[live.Name] = partialPoolFileWholeMiss + state.missingBytes += sourceFile.Size + state.incompleteNames = append(state.incompleteNames, sourceFile.Name) + state.incompleteKeys = append(state.incompleteKeys, fileKey) + default: + state.classByName[sourceFile.Name] = partialPoolFilePartialMiss + state.classByLiveName[live.Name] = partialPoolFilePartialMiss + state.allWholeMissing = false + state.incompleteNames = append(state.incompleteNames, sourceFile.Name) + state.incompleteKeys = append(state.incompleteKeys, fileKey) + state.missingBytes += int64(math.Ceil(float64(sourceFile.Size) * (1 - progress))) + } + } + + state.checking = isTorrentCheckingState(torrent.State) + state.awaitingRecheck = state.checking + state.complete = len(state.incompleteNames) == 0 && !state.checking + return state +} + +func partialPoolStoredFileKey(file models.CrossSeedPartialFile) partialPoolFileKey { + key := strings.TrimSpace(file.Key) + if key == "" { + key = normalizeFileKey(file.Name) + } + return partialPoolFileKey{ + key: key, + size: file.Size, + } +} + +func partialPoolTakeLiveFile( + liveByKey map[partialPoolFileKey][]partialPoolLiveFile, + key partialPoolFileKey, +) (partialPoolLiveFile, bool) { + liveFiles := liveByKey[key] + if len(liveFiles) == 0 { + return partialPoolLiveFile{}, false + } + live := liveFiles[0] + if len(liveFiles) == 1 { + delete(liveByKey, key) + } else { + liveByKey[key] = liveFiles[1:] + } + return live, true +} + +func partialPoolConsumeLiveFile( + liveByKey map[partialPoolFileKey][]partialPoolLiveFile, + live partialPoolLiveFile, +) map[partialPoolFileKey][]partialPoolLiveFile { + key := partialPoolFileKey{ + key: normalizeFileKey(live.Name), + size: live.Size, + } + liveFiles := liveByKey[key] + for i, candidate := range liveFiles { + if candidate.Index != live.Index || candidate.Name != live.Name { + continue + } + if len(liveFiles) == 1 { + delete(liveByKey, key) + return liveByKey + } + last := len(liveFiles) - 1 + liveFiles[i] = liveFiles[last] + liveFiles[last] = partialPoolLiveFile{} + liveByKey[key] = liveFiles[:last] + return liveByKey + } + return liveByKey +} + +func (s *Service) applyPartialPoolSettings(state *partialPoolState, settings *models.CrossSeedAutomationSettings) *partialPoolState { + if state == nil { + return nil + } + if state.awaitingRecheck { + return state + } + if state.member.Mode == models.CrossSeedPartialMemberModeHardlink { + if !state.allWholeMissing { + state.manualReview = true + state.manualReason = "post-recheck missing bytes exist inside linked files" + return state + } + + filesForBoundary := make([]TorrentFileForBoundaryCheck, 0, len(state.member.SourceFiles)) + for _, sourceFile := range state.member.SourceFiles { + filesForBoundary = append(filesForBoundary, TorrentFileForBoundaryCheck{ + Path: sourceFile.Name, + Size: sourceFile.Size, + IsContent: state.classByName[sourceFile.Name] == partialPoolFileComplete, + }) + } + result := CheckPieceBoundarySafety(filesForBoundary, state.member.SourcePieceLength) + state.pieceSafe = result.Safe + switch { + case state.complete: + state.eligibleDownload = false + case !result.Safe: + state.manualReview = true + state.manualReason = "missing whole files share pieces with linked content" + default: + state.eligibleDownload = true + } + return state + } + + state.pieceSafe = true + if state.complete { + return state + } + + limit := models.DefaultCrossSeedAutomationSettings().MaxMissingBytesAfterRecheck + if state.member != nil && state.member.MaxMissingBytesAfterRecheck > 0 { + limit = state.member.MaxMissingBytesAfterRecheck + } else if settings != nil { + limit = settings.MaxMissingBytesAfterRecheck + } + if state.missingBytes > limit { + state.manualReview = true + state.manualReason = "post-recheck missing bytes exceed pooled reflink limit" + return state + } + + state.eligibleDownload = true + return state +} + +func (s *Service) propagateCompletedPoolFiles(ctx context.Context, states []*partialPoolState) { + if len(states) < 2 { + return + } + + rechecksByInstance := make(map[int][]string) + ownersByKey := make(map[partialPoolFileKey][]partialPoolFileOwner) + for _, state := range states { + for _, name := range state.completeNames { + key, ok := state.keyByName[name] + if !ok || key.size <= 0 { + continue + } + ownersByKey[key] = append(ownersByKey[key], partialPoolFileOwner{state: state, name: name}) + } + } + if len(ownersByKey) == 0 { + return + } + + for _, recipient := range states { + if recipient.complete || recipient.checking { + continue + } + + recipientPaused := false + propagatedFiles := 0 + for _, name := range recipient.incompleteNames { + if recipient.classByName[name] != partialPoolFileWholeMiss { + continue + } + key, ok := recipient.keyByName[name] + if !ok { + continue + } + filePropagated := false + for _, owner := range ownersByKey[key] { + if owner.state.member.TargetInstanceID == recipient.member.TargetInstanceID && + normalizeHash(owner.state.member.TargetHash) == normalizeHash(recipient.member.TargetHash) { + continue + } + if !recipientPaused { + if !s.pausePartialPoolRecipientForPropagation(ctx, recipient) { + break + } + recipientPaused = true + } + if err := s.propagatePartialPoolFile(ctx, owner.state, owner.name, recipient, name); err != nil { + log.Debug(). + Err(err). + Str("sourceFile", owner.name). + Str("targetFile", name). + Str("targetHash", recipient.member.TargetHash). + Msg("[CROSSSEED-POOL] Failed to propagate completed file") + continue + } + filePropagated = true + propagatedFiles++ + break + } + if !filePropagated { + continue + } + } + + if propagatedFiles > 0 { + s.invalidatePartialPoolFileCache(ctx, recipient.member.TargetInstanceID, recipient.member.TargetHash) + // Keep propagated recipients out of downloader selection until the next poll + // refreshes qBittorrent state after the relinked files are rechecked. + recipient.checking = true + schedulePartialPoolBulkHash(rechecksByInstance, recipient.member.TargetInstanceID, recipient.member.TargetHash) + schedulePartialPoolBulkHash(rechecksByInstance, recipient.member.TargetInstanceID, recipient.member.TargetHashV2) + } + } + + s.runPartialPoolBulkAction(ctx, rechecksByInstance, "recheck", "[CROSSSEED-POOL] Failed to trigger recheck after propagation") +} + +func (s *Service) propagatePartialPoolFile( + _ context.Context, + owner *partialPoolState, + ownerName string, + recipient *partialPoolState, + recipientName string, +) error { + size := s.partialPoolFileSize(owner.member, ownerName) + if size <= 0 { + return fmt.Errorf("file %s not found in marker", ownerName) + } + + srcName := ownerName + if liveName := strings.TrimSpace(owner.liveNameByName[ownerName]); liveName != "" { + srcName = liveName + } + if !treeModeFileAvailable(owner.member.ManagedRoot, srcName, size, 1.0) { + return fmt.Errorf("source file unavailable on disk: %s", srcName) + } + dstName := recipientName + if liveName := strings.TrimSpace(recipient.liveNameByName[recipientName]); liveName != "" { + dstName = liveName + } + + srcPath := filepath.Join(owner.member.ManagedRoot, filepath.FromSlash(srcName)) + dstPath := filepath.Join(recipient.member.ManagedRoot, filepath.FromSlash(dstName)) + + if err := os.MkdirAll(filepath.Dir(dstPath), 0o755); err != nil { + return err + } + if _, err := os.Lstat(dstPath); err == nil { + if removeErr := os.Remove(dstPath); removeErr != nil { + return removeErr + } + } else if err != nil && !os.IsNotExist(err) { + return err + } + + plan := &hardlinktree.TreePlan{ + RootDir: recipient.member.ManagedRoot, + Files: []hardlinktree.FilePlan{{ + SourcePath: srcPath, + TargetPath: dstPath, + }}, + } + + if recipient.member.Mode == models.CrossSeedPartialMemberModeReflink { + return reflinktree.Create(plan) + } + return hardlinktree.Create(plan) +} + +func (s *Service) selectPartialPoolDownloader(states []*partialPoolState) *partialPoolState { + if len(states) == 0 { + return nil + } + + needCounts := make(map[partialPoolFileKey]int, len(states)*4) + for _, state := range states { + if !state.eligibleDownload || state.checking { + continue + } + for _, key := range state.incompleteKeys { + needCounts[key]++ + } + } + + sort.Slice(states, func(i, j int) bool { + left := partialPoolCandidateScore(states[i], needCounts) + right := partialPoolCandidateScore(states[j], needCounts) + if left != right { + return left > right + } + if states[i].member.Mode != states[j].member.Mode { + return states[i].member.Mode == models.CrossSeedPartialMemberModeReflink + } + return states[i].missingBytes < states[j].missingBytes + }) + + for _, state := range states { + if state.eligibleDownload && !state.checking { + return state + } + } + return nil +} + +func (s *Service) selectPreferredPartialPoolDownloader(poolKey string, states []*partialPoolState, now time.Time) *partialPoolState { + selected := s.selectPartialPoolDownloader(states) + if selected == nil { + s.clearPartialPoolSelection(poolKey) + return nil + } + + candidates := s.rankPartialPoolDownloaders(states) + if len(candidates) == 0 { + s.clearPartialPoolSelection(poolKey) + return nil + } + + selectedKey := partialPoolStateMemberKey(selected) + + s.partialPoolMu.Lock() + defer s.partialPoolMu.Unlock() + + if s.partialPoolBySource == nil { + s.partialPoolBySource = make(map[string]partialPoolSelection) + } + + selection, ok := s.partialPoolBySource[poolKey] + if !ok || selection.MemberKey == "" { + s.partialPoolBySource[poolKey] = partialPoolSelection{ + MemberKey: selectedKey, + SelectedAt: now, + } + return selected + } + + for _, candidate := range candidates { + if partialPoolStateMemberKey(candidate) != selection.MemberKey { + continue + } + if now.Sub(selection.SelectedAt) < partialPoolSelectionLimit || len(candidates) == 1 { + return candidate + } + break + } + + for _, candidate := range candidates { + candidateKey := partialPoolStateMemberKey(candidate) + if candidateKey == selection.MemberKey && len(candidates) > 1 { + continue + } + s.partialPoolBySource[poolKey] = partialPoolSelection{ + MemberKey: candidateKey, + SelectedAt: now, + } + return candidate + } + + s.partialPoolBySource[poolKey] = partialPoolSelection{ + MemberKey: selectedKey, + SelectedAt: now, + } + return selected +} + +func (s *Service) rankPartialPoolDownloaders(states []*partialPoolState) []*partialPoolState { + if len(states) == 0 { + return nil + } + + needCounts := make(map[partialPoolFileKey]int, len(states)*4) + candidates := make([]*partialPoolState, 0, len(states)) + for _, state := range states { + if !state.eligibleDownload || state.checking { + continue + } + candidates = append(candidates, state) + for _, key := range state.incompleteKeys { + needCounts[key]++ + } + } + + sort.Slice(candidates, func(i, j int) bool { + left := partialPoolCandidateScore(candidates[i], needCounts) + right := partialPoolCandidateScore(candidates[j], needCounts) + if left != right { + return left > right + } + if candidates[i].member.Mode != candidates[j].member.Mode { + return candidates[i].member.Mode == models.CrossSeedPartialMemberModeReflink + } + return candidates[i].missingBytes < candidates[j].missingBytes + }) + + return candidates +} + +func partialPoolCandidateScore(state *partialPoolState, needCounts map[partialPoolFileKey]int) int64 { + if state == nil || !state.eligibleDownload || state.checking { + return -1 + } + + var score int64 + for _, key := range state.incompleteKeys { + if needCounts[key] > 1 { + score += key.size * 10 + continue + } + score += key.size + } + if state.member.Mode == models.CrossSeedPartialMemberModeReflink { + score++ + } + return score +} + +func (s *Service) pausePartialPoolRecipientForPropagation(ctx context.Context, state *partialPoolState) bool { + if state == nil || state.complete || state.checking { + return false + } + if s.partialPoolTorrentPaused(state.torrent.State) { + return true + } + if err := s.pausePartialPoolHash(ctx, state.member.TargetInstanceID, state.member.TargetHash); err != nil { + log.Debug().Err(err).Str("hash", state.member.TargetHash).Msg("[CROSSSEED-POOL] Failed to pause pooled recipient before propagation") + return false + } + state.torrent.State = qbt.TorrentStatePausedDl + return true +} + +func (s *Service) resumePartialPoolMembers(ctx context.Context, states []*partialPoolState) { + byInstance := make(map[int][]string) + for _, state := range states { + if state == nil || state.checking || s.partialPoolTorrentRunning(state.torrent.State) { + continue + } + schedulePartialPoolBulkHash(byInstance, state.member.TargetInstanceID, state.member.TargetHash) + } + + s.runPartialPoolBulkAction(ctx, byInstance, "resume", "[CROSSSEED-POOL] Failed to resume pooled members") +} + +func (s *Service) registerPartialPoolMember( + ctx context.Context, + sourceInstanceID int, + sourceHash string, + targetInstanceID int, + targetHash string, + targetHashV2 string, + targetName string, + mode string, + managedRoot string, + sourcePieceLength int64, + maxMissingBytesAfterRecheck int64, + sourceFiles qbt.TorrentFiles, +) error { + if s == nil { + return nil + } + if s.partialPoolStore == nil { + log.Warn(). + Int("sourceInstanceID", sourceInstanceID). + Str("sourceHash", sourceHash). + Int("targetInstanceID", targetInstanceID). + Str("targetHash", targetHash). + Str("targetHashV2", targetHashV2). + Str("targetName", targetName). + Str("mode", mode). + Msg("[CROSSSEED-POOL] Partial pool store unavailable; skipping pooled registration") + return nil + } + + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: sourceInstanceID, + SourceHash: sourceHash, + TargetInstanceID: targetInstanceID, + TargetHash: targetHash, + TargetHashV2: targetHashV2, + TargetAddedOn: s.partialPoolRegistrationTargetAddedOn(ctx, targetInstanceID, targetHash, targetHashV2), + TargetName: targetName, + Mode: mode, + ManagedRoot: managedRoot, + SourcePieceLength: sourcePieceLength, + MaxMissingBytesAfterRecheck: maxMissingBytesAfterRecheck, + SourceFiles: s.partialPoolRegistrationFiles(ctx, targetInstanceID, targetHash, targetHashV2, sourceFiles), + ExpiresAt: time.Now().UTC().Add(partialPoolMarkerTTL), + } + + stored, err := s.partialPoolStore.Upsert(ctx, member) + if err != nil { + log.Warn(). + Err(err). + Int("sourceInstanceID", sourceInstanceID). + Str("sourceHash", sourceHash). + Int("targetInstanceID", targetInstanceID). + Str("targetHash", targetHash). + Str("targetHashV2", targetHashV2). + Str("targetName", targetName). + Str("mode", mode). + Int("sourceFiles", len(member.SourceFiles)). + Int64("maxMissingBytesAfterRecheck", maxMissingBytesAfterRecheck). + Msg("[CROSSSEED-POOL] Failed to register pooled member") + return err + } + log.Info(). + Int("sourceInstanceID", stored.SourceInstanceID). + Str("sourceHash", stored.SourceHash). + Int("targetInstanceID", stored.TargetInstanceID). + Str("targetHash", stored.TargetHash). + Str("targetHashV2", stored.TargetHashV2). + Int64("targetAddedOn", stored.TargetAddedOn). + Str("targetName", stored.TargetName). + Str("mode", stored.Mode). + Int("sourceFiles", len(stored.SourceFiles)). + Int64("missingBytesLimit", stored.MaxMissingBytesAfterRecheck). + Str("poolKey", partialPoolSourceKey(stored)). + Msg("[CROSSSEED-POOL] Registered pooled member") + + // Persist first, then update in-memory indexes. This is intentionally not + // atomic: a crash in between leaves restoreable state in the DB, and + // RestoreActivePartialPools rebuilds memory from the persisted members. + s.partialPoolMu.Lock() + s.storePartialPoolMemberLocked(stored) + s.partialPoolMu.Unlock() + s.signalPartialPoolWake() + return nil +} + +func (s *Service) partialPoolRegistrationTargetAddedOn( + ctx context.Context, + instanceID int, + targetHash string, + targetHashV2 string, +) int64 { + if s == nil || s.syncManager == nil || instanceID <= 0 { + return 0 + } + + torrent, found, err := s.syncManager.HasTorrentByAnyHash(ctx, instanceID, []string{targetHash, targetHashV2}) + if err != nil { + log.Debug(). + Err(err). + Int("instanceID", instanceID). + Str("targetHash", targetHash). + Str("targetHashV2", targetHashV2). + Msg("[CROSSSEED-POOL] Failed to resolve target torrent AddedOn for pooled registration") + return 0 + } + if !found || torrent == nil { + return 0 + } + + return torrent.AddedOn +} + +func (s *Service) partialPoolRegistrationFiles( + ctx context.Context, + instanceID int, + targetHash string, + targetHashV2 string, + fallback qbt.TorrentFiles, +) []models.CrossSeedPartialFile { + hashes := uniqueStrings([]string{targetHash, targetHashV2}) + if s != nil && s.syncManager != nil && instanceID > 0 && len(hashes) > 0 { + filesByHash, err := s.getPartialPoolTorrentFilesBatch(ctx, instanceID, hashes) + if err != nil { + log.Debug(). + Err(err). + Int("instanceID", instanceID). + Str("targetHash", targetHash). + Msg("[CROSSSEED-POOL] Failed to load target torrent files for pooled registration") + } else { + for _, hash := range hashes { + files, ok := filesByHash[normalizeHash(hash)] + if !ok || len(files) == 0 { + continue + } + return partialPoolStoredFilesFromTorrentFiles(files) + } + } + } + + return partialPoolStoredFilesFromTorrentFiles(fallback) +} + +func partialPoolStoredFilesFromTorrentFiles(files qbt.TorrentFiles) []models.CrossSeedPartialFile { + storedFiles := make([]models.CrossSeedPartialFile, 0, len(files)) + for _, file := range files { + storedFiles = append(storedFiles, models.CrossSeedPartialFile{ + Name: file.Name, + Size: file.Size, + Key: normalizeFileKey(file.Name), + }) + } + return storedFiles +} + +func (s *Service) partialPoolOwnsLiveTorrent(ctx context.Context, instanceID int, torrent qbt.Torrent) bool { + key := partialPoolLookupKey(instanceID, torrent.Hash) + if key == "" { + return false + } + + now := time.Now().UTC() + + s.partialPoolMu.RLock() + member, ok := s.partialPoolByHash[key] + expired := ok && partialPoolMemberExpired(member, now) + s.partialPoolMu.RUnlock() + if !ok { + return false + } + if expired { + s.partialPoolMu.Lock() + member, ok = s.partialPoolByHash[key] + if ok && partialPoolMemberExpired(member, now) { + s.removePartialPoolMemberLocked(member) + } + s.partialPoolMu.Unlock() + return false + } + if s.partialPoolMemberMatchesTorrent(member, torrent) { + return true + } + + log.Info(). + Int("instanceID", instanceID). + Str("hash", torrent.Hash). + Int64("storedAddedOn", member.TargetAddedOn). + Int64("liveAddedOn", torrent.AddedOn). + Msg("[CROSSSEED-POOL] Clearing stale pooled ownership for re-added torrent") + s.removePartialPoolMember(ctx, member) + return false +} + +func (s *Service) partialPoolOwnsTorrent(instanceID int, hash string) bool { + key := partialPoolLookupKey(instanceID, hash) + if key == "" { + return false + } + + now := time.Now().UTC() + + s.partialPoolMu.RLock() + member, ok := s.partialPoolByHash[key] + expired := ok && partialPoolMemberExpired(member, now) + s.partialPoolMu.RUnlock() + if !ok { + return false + } + if !expired { + return true + } + + s.partialPoolMu.Lock() + member, ok = s.partialPoolByHash[key] + if ok && partialPoolMemberExpired(member, now) { + s.removePartialPoolMemberLocked(member) + } + s.partialPoolMu.Unlock() + + return false +} + +func (s *Service) partialPoolMemberMatchesTorrent(member *models.CrossSeedPartialPoolMember, torrent qbt.Torrent) bool { + if member == nil { + return false + } + if !torrentMatchesAnyHash(torrent, []string{member.TargetHash, member.TargetHashV2}) { + return false + } + if member.TargetAddedOn == 0 || torrent.AddedOn == 0 { + return true + } + return member.TargetAddedOn == torrent.AddedOn +} + +func (s *Service) storePartialPoolMemberLocked(member *models.CrossSeedPartialPoolMember) { + if member == nil { + return + } + if s.partialPoolByHash == nil { + s.partialPoolByHash = make(map[string]*models.CrossSeedPartialPoolMember) + } + if s.partialPoolBySource == nil { + s.partialPoolBySource = make(map[string]partialPoolSelection) + } + if key := partialPoolLookupKey(member.TargetInstanceID, member.TargetHash); key != "" { + s.partialPoolByHash[key] = member + } + if key := partialPoolLookupKey(member.TargetInstanceID, member.TargetHashV2); key != "" { + s.partialPoolByHash[key] = member + } +} + +func (s *Service) listPartialPoolMembers() []*models.CrossSeedPartialPoolMember { + s.partialPoolMu.RLock() + defer s.partialPoolMu.RUnlock() + + now := time.Now().UTC() + seen := make(map[string]struct{}, len(s.partialPoolByHash)) + members := make([]*models.CrossSeedPartialPoolMember, 0, len(s.partialPoolByHash)) + for _, member := range s.partialPoolByHash { + if partialPoolMemberExpired(member, now) { + continue + } + key := partialPoolLookupKey(member.TargetInstanceID, member.TargetHash) + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + members = append(members, member) + } + return members +} + +func (s *Service) drainPartialPoolMembers(ctx context.Context) error { + if s == nil { + return nil + } + + membersByKey := make(map[string]*models.CrossSeedPartialPoolMember) + + s.partialPoolMu.Lock() + for _, member := range s.partialPoolByHash { + if member == nil { + continue + } + key := partialPoolLookupKey(member.TargetInstanceID, member.TargetHash) + if key == "" { + continue + } + membersByKey[key] = member + } + s.partialPoolByHash = make(map[string]*models.CrossSeedPartialPoolMember) + s.partialPoolBySource = make(map[string]partialPoolSelection) + s.partialPoolMu.Unlock() + + if s.partialPoolStore == nil { + return nil + } + + storedMembers, err := s.partialPoolStore.ListActive(ctx, time.Now().UTC()) + if err != nil { + return fmt.Errorf("list active pooled members for drain: %w", err) + } + for _, member := range storedMembers { + key := partialPoolLookupKey(member.TargetInstanceID, member.TargetHash) + if key == "" { + continue + } + membersByKey[key] = member + } + + var errs []error + for _, member := range membersByKey { + if err := s.partialPoolStore.DeleteByAnyHash(ctx, member.TargetInstanceID, member.TargetHash, member.TargetHashV2); err != nil { + errs = append(errs, fmt.Errorf("delete pooled member %d/%s: %w", member.TargetInstanceID, normalizeHash(member.TargetHash), err)) + } + } + if len(errs) > 0 { + return errors.Join(errs...) + } + + return nil +} + +func partialPoolMemberExpired(member *models.CrossSeedPartialPoolMember, now time.Time) bool { + if member == nil { + return true + } + if member.ExpiresAt.IsZero() { + return false + } + return !member.ExpiresAt.After(now) +} + +func (s *Service) pruneExpiredPartialPoolMembers(now time.Time) { + if s == nil { + return + } + + s.partialPoolMu.Lock() + defer s.partialPoolMu.Unlock() + + for _, member := range s.partialPoolByHash { + if !partialPoolMemberExpired(member, now) { + continue + } + s.removePartialPoolMemberLocked(member) + } +} + +func (s *Service) removePartialPoolMemberLocked(member *models.CrossSeedPartialPoolMember) { + if member == nil { + return + } + delete(s.partialPoolByHash, partialPoolLookupKey(member.TargetInstanceID, member.TargetHash)) + delete(s.partialPoolByHash, partialPoolLookupKey(member.TargetInstanceID, member.TargetHashV2)) + poolKey := partialPoolSourceKey(member) + if selection, ok := s.partialPoolBySource[poolKey]; ok { + memberKey := partialPoolLookupKey(member.TargetInstanceID, member.TargetHash) + if selection.MemberKey == memberKey { + delete(s.partialPoolBySource, poolKey) + } + } +} + +func (s *Service) removePartialPoolMember(ctx context.Context, member *models.CrossSeedPartialPoolMember) { + if member == nil { + return + } + if s.partialPoolStore != nil { + if err := s.partialPoolStore.DeleteByAnyHash(ctx, member.TargetInstanceID, member.TargetHash, member.TargetHashV2); err != nil { + log.Debug().Err(err).Str("hash", member.TargetHash).Msg("[CROSSSEED-POOL] Failed to delete pooled member marker") + } + } + + s.partialPoolMu.Lock() + s.removePartialPoolMemberLocked(member) + s.partialPoolMu.Unlock() +} + +func (s *Service) dropPartialPoolMember(ctx context.Context, member *models.CrossSeedPartialPoolMember, reason string) { + if member == nil { + return + } + if err := s.pausePartialPoolHash(ctx, member.TargetInstanceID, member.TargetHash); err != nil { + log.Debug().Err(err).Str("hash", member.TargetHash).Msg("[CROSSSEED-POOL] Failed to pause pooled member for manual review") + return + } + log.Info(). + Int("instanceID", member.TargetInstanceID). + Str("hash", member.TargetHash). + Str("mode", member.Mode). + Str("reason", reason). + Msg("[CROSSSEED-POOL] Leaving pooled member paused for manual review") + s.removePartialPoolMember(ctx, member) +} + +func (s *Service) signalPartialPoolWake() { + if s == nil || s.partialPoolWake == nil { + return + } + select { + case s.partialPoolWake <- struct{}{}: + default: + } +} + +func (s *Service) invalidatePartialPoolFileCache(ctx context.Context, instanceID int, hash string) { + invalidator, ok := s.syncManager.(partialPoolFileCacheInvalidator) + if !ok { + return + } + _ = invalidator.InvalidateFileCache(ctx, instanceID, hash) +} + +func (s *Service) partialPoolFileSize(member *models.CrossSeedPartialPoolMember, name string) int64 { + for _, file := range member.SourceFiles { + if file.Name == name { + return file.Size + } + } + return 0 +} + +func (s *Service) pausePartialPoolHash(ctx context.Context, instanceID int, hash string) error { + if s == nil || s.syncManager == nil || instanceID <= 0 || strings.TrimSpace(hash) == "" { + return nil + } + return s.syncManager.BulkAction(ctx, instanceID, []string{hash}, "pause") +} + +func (s *Service) partialPoolTorrentPaused(state qbt.TorrentState) bool { + return state == qbt.TorrentStatePausedDl || + state == qbt.TorrentStatePausedUp || + state == qbt.TorrentStateStoppedDl || + state == qbt.TorrentStateStoppedUp +} + +func (s *Service) partialPoolHasActiveDownloader(states []*partialPoolState) bool { + for _, state := range states { + if state == nil || state.complete || state.checking { + continue + } + if s.partialPoolTorrentDownloading(state.torrent.State) { + return true + } + } + return false +} + +func (s *Service) partialPoolTorrentDownloading(state qbt.TorrentState) bool { + return state == qbt.TorrentStateDownloading || + state == qbt.TorrentStateStalledDl || + state == qbt.TorrentStateMetaDl || + state == qbt.TorrentStateQueuedDl || + state == qbt.TorrentStateAllocating || + state == qbt.TorrentStateForcedDl +} + +func (s *Service) partialPoolTorrentRunning(state qbt.TorrentState) bool { + return s.partialPoolTorrentDownloading(state) || + state == qbt.TorrentStateUploading || + state == qbt.TorrentStateStalledUp || + state == qbt.TorrentStateQueuedUp || + state == qbt.TorrentStateForcedUp +} + +func isTorrentCheckingState(state qbt.TorrentState) bool { + return state == qbt.TorrentStateCheckingUp || + state == qbt.TorrentStateCheckingDl || + state == qbt.TorrentStateCheckingResumeData +} + +func shouldUsePartialPool(settings *models.CrossSeedAutomationSettings, matchType string, hasExtras bool, discLayout bool) bool { + if settings == nil || !settings.EnablePooledPartialCompletion { + return false + } + if hasExtras || discLayout { + return true + } + return matchType != "exact" +} + +func partialPoolShouldKeepPaused(req *CrossSeedRequest, pooled bool) bool { + if pooled { + return true + } + return req.SkipAutoResume +} + +func uniqueStrings(values []string) []string { + if len(values) == 0 { + return nil + } + out := make([]string, 0, len(values)) + seen := make(map[string]struct{}, len(values)) + for _, value := range values { + value = strings.TrimSpace(value) + if value == "" { + continue + } + if _, ok := seen[value]; ok { + continue + } + seen[value] = struct{}{} + out = append(out, value) + } + return out +} + +func partialPoolStateMemberKey(state *partialPoolState) string { + if state == nil || state.member == nil { + return "" + } + return partialPoolLookupKey(state.member.TargetInstanceID, state.member.TargetHash) +} + +func schedulePartialPoolBulkHash(byInstance map[int][]string, instanceID int, hash string) { + if byInstance == nil || instanceID <= 0 { + return + } + hash = strings.TrimSpace(hash) + if hash == "" { + return + } + hashes := byInstance[instanceID] + for _, existing := range hashes { + if strings.EqualFold(existing, hash) { + return + } + } + byInstance[instanceID] = append(hashes, hash) +} + +func (s *Service) runPartialPoolBulkAction(ctx context.Context, byInstance map[int][]string, action string, logMessage string) { + for instanceID, hashes := range byInstance { + if len(hashes) == 0 { + continue + } + if err := s.syncManager.BulkAction(ctx, instanceID, hashes, action); err != nil { + log.Debug(). + Err(err). + Int("instanceID", instanceID). + Strs("hashes", hashes). + Msg(logMessage) + } + } +} + +func (s *Service) clearPartialPoolSelection(poolKey string) { + if poolKey == "" { + return + } + s.partialPoolMu.Lock() + delete(s.partialPoolBySource, poolKey) + s.partialPoolMu.Unlock() +} diff --git a/internal/services/crossseed/partial_pool_test.go b/internal/services/crossseed/partial_pool_test.go new file mode 100644 index 000000000..a555c2e17 --- /dev/null +++ b/internal/services/crossseed/partial_pool_test.go @@ -0,0 +1,1543 @@ +// Copyright (c) 2025-2026, s0up and the autobrr contributors. +// SPDX-License-Identifier: GPL-2.0-or-later + +package crossseed + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync/atomic" + "syscall" + "testing" + "time" + + qbt "github.com/autobrr/go-qbittorrent" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/autobrr/qui/internal/database" + "github.com/autobrr/qui/internal/models" + internalqb "github.com/autobrr/qui/internal/qbittorrent" +) + +type partialPoolTestSyncManager struct { + torrentsByInstance map[int][]qbt.Torrent + filesByHash map[string]qbt.TorrentFiles + bulkActions []string + bulkActionErr error + torrentByAnyHash map[string]qbt.Torrent + forceRefreshCalls int + lastForceRefresh bool +} + +func (m *partialPoolTestSyncManager) GetTorrents(_ context.Context, instanceID int, filter qbt.TorrentFilterOptions) ([]qbt.Torrent, error) { + torrents := m.torrentsByInstance[instanceID] + if len(filter.Hashes) == 0 { + return torrents, nil + } + + allowed := make(map[string]struct{}, len(filter.Hashes)) + for _, hash := range filter.Hashes { + allowed[normalizeHash(hash)] = struct{}{} + } + + filtered := make([]qbt.Torrent, 0, len(torrents)) + for _, torrent := range torrents { + if _, ok := allowed[normalizeHash(torrent.Hash)]; ok { + filtered = append(filtered, torrent) + } + } + return filtered, nil +} + +func (m *partialPoolTestSyncManager) GetTorrentFilesBatch(ctx context.Context, _ int, hashes []string) (map[string]qbt.TorrentFiles, error) { + forced, _ := ctx.Value(partialPoolForceRefreshContextKey{}).(bool) + m.lastForceRefresh = forced + if forced { + m.forceRefreshCalls++ + } + result := make(map[string]qbt.TorrentFiles, len(hashes)) + for _, hash := range hashes { + if files, ok := m.filesByHash[normalizeHash(hash)]; ok { + result[normalizeHash(hash)] = files + } + } + return result, nil +} + +func (*partialPoolTestSyncManager) ExportTorrent(_ context.Context, _ int, hash string) ([]byte, string, string, error) { + return nil, "", "", errors.New("not implemented: " + hash) +} + +func (m *partialPoolTestSyncManager) HasTorrentByAnyHash(_ context.Context, _ int, hashes []string) (*qbt.Torrent, bool, error) { + for _, hash := range hashes { + if torrent, ok := m.torrentByAnyHash[normalizeHash(hash)]; ok { + torrentCopy := torrent + return &torrentCopy, true, nil + } + } + return nil, false, nil +} + +func (*partialPoolTestSyncManager) GetTorrentProperties(context.Context, int, string) (*qbt.TorrentProperties, error) { + return nil, errors.New("not implemented") +} + +func (*partialPoolTestSyncManager) GetAppPreferences(context.Context, int) (qbt.AppPreferences, error) { + return qbt.AppPreferences{}, errors.New("not implemented") +} + +func (*partialPoolTestSyncManager) AddTorrent(context.Context, int, []byte, map[string]string) error { + return errors.New("not implemented") +} + +func (m *partialPoolTestSyncManager) BulkAction(_ context.Context, instanceID int, hashes []string, action string) error { + m.bulkActions = append(m.bulkActions, fmt.Sprintf("%d:%s:%v", instanceID, action, hashes)) + return m.bulkActionErr +} + +func (*partialPoolTestSyncManager) GetCachedInstanceTorrents(context.Context, int) ([]internalqb.CrossInstanceTorrentView, error) { + return nil, nil +} + +func (*partialPoolTestSyncManager) ExtractDomainFromURL(urlStr string) string { + return urlStr +} + +func (*partialPoolTestSyncManager) GetQBittorrentSyncManager(context.Context, int) (*qbt.SyncManager, error) { + return nil, errors.New("not implemented") +} + +func (*partialPoolTestSyncManager) RenameTorrent(context.Context, int, string, string) error { + return errors.New("not implemented") +} + +func (*partialPoolTestSyncManager) RenameTorrentFile(context.Context, int, string, string, string) error { + return errors.New("not implemented") +} + +func (*partialPoolTestSyncManager) RenameTorrentFolder(context.Context, int, string, string, string) error { + return errors.New("not implemented") +} + +func (*partialPoolTestSyncManager) SetTags(context.Context, int, []string, string) error { + return errors.New("not implemented") +} + +func (*partialPoolTestSyncManager) GetCategories(context.Context, int) (map[string]qbt.Category, error) { + return nil, nil +} + +func (*partialPoolTestSyncManager) CreateCategory(context.Context, int, string, string) error { + return errors.New("not implemented") +} + +func TestValidateAndNormalizeSettingsPartialPoolDefaults(t *testing.T) { + t.Parallel() + + settings := &models.CrossSeedAutomationSettings{ + RunIntervalMinutes: 0, + MaxResultsPerRun: 0, + SizeMismatchTolerancePercent: -1, + MaxMissingBytesAfterRecheck: 0, + } + + (&Service{}).validateAndNormalizeSettings(settings) + + assert.Equal(t, 120, settings.RunIntervalMinutes) + assert.Equal(t, 50, settings.MaxResultsPerRun) + assert.InDelta(t, 5.0, settings.SizeMismatchTolerancePercent, 0.0001) + assert.Equal(t, models.DefaultCrossSeedAutomationSettings().MaxMissingBytesAfterRecheck, settings.MaxMissingBytesAfterRecheck) +} + +func TestApplyPartialPoolSettingsPolicies(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + member *models.CrossSeedPartialPoolMember + files qbt.TorrentFiles + torrentState qbt.TorrentState + settings *models.CrossSeedAutomationSettings + wantEligible bool + wantManual bool + wantReason string + wantMissing int64 + wantWholeOnly bool + wantAwaiting bool + }{ + { + name: "hardlink whole missing files stay eligible when piece safe", + member: &models.CrossSeedPartialPoolMember{ + Mode: models.CrossSeedPartialMemberModeHardlink, + SourcePieceLength: 100, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "disc/file1.mkv", Size: 100}, + {Name: "disc/file2.nfo", Size: 100}, + }, + }, + files: qbt.TorrentFiles{ + {Index: 0, Name: "disc/file1.mkv", Progress: 1, Size: 100}, + {Index: 1, Name: "disc/file2.nfo", Progress: 0, Size: 100}, + }, + settings: models.DefaultCrossSeedAutomationSettings(), + wantEligible: true, + wantManual: false, + wantMissing: 100, + wantWholeOnly: true, + }, + { + name: "hardlink partial file gaps require manual review", + member: &models.CrossSeedPartialPoolMember{ + Mode: models.CrossSeedPartialMemberModeHardlink, + SourcePieceLength: 100, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "disc/file1.mkv", Size: 200}, + }, + }, + files: qbt.TorrentFiles{ + {Index: 0, Name: "disc/file1.mkv", Progress: 0.5, Size: 200}, + }, + settings: models.DefaultCrossSeedAutomationSettings(), + wantEligible: false, + wantManual: true, + wantReason: "post-recheck missing bytes exist inside linked files", + wantMissing: 100, + wantWholeOnly: false, + }, + { + name: "hardlink whole missing files still stop when piece boundary is unsafe", + member: &models.CrossSeedPartialPoolMember{ + Mode: models.CrossSeedPartialMemberModeHardlink, + SourcePieceLength: 100, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "disc/file1.mkv", Size: 150}, + {Name: "disc/file2.nfo", Size: 100}, + }, + }, + files: qbt.TorrentFiles{ + {Index: 0, Name: "disc/file1.mkv", Progress: 1, Size: 150}, + {Index: 1, Name: "disc/file2.nfo", Progress: 0, Size: 100}, + }, + settings: models.DefaultCrossSeedAutomationSettings(), + wantEligible: false, + wantManual: true, + wantReason: "missing whole files share pieces with linked content", + wantMissing: 100, + wantWholeOnly: true, + }, + { + name: "reflink partial gaps within threshold stay eligible", + member: &models.CrossSeedPartialPoolMember{ + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "disc/file1.mkv", Size: 200}, + }, + }, + files: qbt.TorrentFiles{ + {Index: 0, Name: "disc/file1.mkv", Progress: 0.5, Size: 200}, + }, + settings: &models.CrossSeedAutomationSettings{ + MaxMissingBytesAfterRecheck: 150, + }, + wantEligible: true, + wantManual: false, + wantMissing: 100, + wantWholeOnly: false, + }, + { + name: "reflink partial gaps above threshold stay pending while checking", + member: &models.CrossSeedPartialPoolMember{ + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "disc/file1.mkv", Size: 200}, + }, + }, + files: qbt.TorrentFiles{ + {Index: 0, Name: "disc/file1.mkv", Progress: 0.25, Size: 200}, + }, + torrentState: qbt.TorrentStateCheckingResumeData, + settings: &models.CrossSeedAutomationSettings{ + MaxMissingBytesAfterRecheck: 100, + }, + wantEligible: false, + wantManual: false, + wantMissing: 150, + wantWholeOnly: false, + wantAwaiting: true, + }, + { + name: "reflink partial gaps above threshold pause for manual review", + member: &models.CrossSeedPartialPoolMember{ + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "disc/file1.mkv", Size: 200}, + }, + }, + files: qbt.TorrentFiles{ + {Index: 0, Name: "disc/file1.mkv", Progress: 0.25, Size: 200}, + }, + settings: &models.CrossSeedAutomationSettings{ + MaxMissingBytesAfterRecheck: 100, + }, + wantEligible: false, + wantManual: true, + wantReason: "post-recheck missing bytes exceed pooled reflink limit", + wantMissing: 150, + wantWholeOnly: false, + }, + { + name: "reflink partial gaps use member threshold snapshot over current settings", + member: &models.CrossSeedPartialPoolMember{ + Mode: models.CrossSeedPartialMemberModeReflink, + MaxMissingBytesAfterRecheck: 300, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "disc/file1.mkv", Size: 200}, + }, + }, + files: qbt.TorrentFiles{ + {Index: 0, Name: "disc/file1.mkv", Progress: 0.25, Size: 200}, + }, + settings: &models.CrossSeedAutomationSettings{ + MaxMissingBytesAfterRecheck: 100, + }, + wantEligible: true, + wantManual: false, + wantMissing: 150, + wantWholeOnly: false, + }, + { + name: "reflink complete files match by normalized basename and size across layout changes", + member: &models.CrossSeedPartialPoolMember{ + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "Movie.2024-GRP/Movie.2024-GRP.mkv", Size: 200}, + }, + }, + files: qbt.TorrentFiles{ + {Index: 0, Name: "Movie.2024-GRP.mkv", Progress: 1, Size: 200}, + }, + settings: models.DefaultCrossSeedAutomationSettings(), + wantEligible: false, + wantManual: false, + wantMissing: 0, + wantWholeOnly: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + svc := &Service{} + torrentState := tt.torrentState + if torrentState == "" { + torrentState = qbt.TorrentStatePausedDl + } + state := svc.buildPartialPoolState(tt.member, qbt.Torrent{State: torrentState}, tt.files) + state = svc.applyPartialPoolSettings(state, tt.settings) + + assert.Equal(t, tt.wantMissing, state.missingBytes) + assert.Equal(t, tt.wantWholeOnly, state.allWholeMissing) + assert.Equal(t, tt.wantEligible, state.eligibleDownload) + assert.Equal(t, tt.wantManual, state.manualReview) + assert.Equal(t, tt.wantAwaiting, state.awaitingRecheck) + if tt.wantMissing == 0 { + assert.True(t, state.complete) + } + if tt.wantReason != "" { + assert.Equal(t, tt.wantReason, state.manualReason) + } + }) + } +} + +func TestBuildPartialPoolStateExactNameRequiresSizeMatch(t *testing.T) { + t.Parallel() + + svc := &Service{} + member := &models.CrossSeedPartialPoolMember{ + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "Movie.mkv", Size: 10_000}, + }, + } + + state := svc.buildPartialPoolState(member, qbt.Torrent{State: qbt.TorrentStatePausedDl}, qbt.TorrentFiles{ + {Index: 0, Name: "Movie.mkv", Progress: 0.5, Size: 5_000}, + {Index: 1, Name: "movie.mkv", Progress: 1, Size: 10_000}, + }) + + require.NotNil(t, state) + assert.Equal(t, int64(0), state.missingBytes) + assert.True(t, state.complete) + assert.Equal(t, "movie.mkv", state.liveNameByName["Movie.mkv"]) + assert.Equal(t, partialPoolFileComplete, state.classByName["Movie.mkv"]) + assert.Equal(t, partialPoolFileComplete, state.classByLiveName["movie.mkv"]) +} + +func TestPartialPoolConsumeLiveFileRemovesMatchInPlace(t *testing.T) { + t.Parallel() + + key := partialPoolFileKey{ + key: normalizeFileKey("Movie.mkv"), + size: 10_000, + } + first := partialPoolLiveFile{Index: 0, Name: "Movie.mkv", Size: 10_000} + second := partialPoolLiveFile{Index: 1, Name: "movie.mkv", Size: 10_000} + third := partialPoolLiveFile{Index: 2, Name: "MOVIE.mkv", Size: 10_000} + liveByKey := map[partialPoolFileKey][]partialPoolLiveFile{ + key: {first, second, third}, + } + + partialPoolConsumeLiveFile(liveByKey, second) + + require.Contains(t, liveByKey, key) + assert.Len(t, liveByKey[key], 2) + assert.ElementsMatch(t, []partialPoolLiveFile{first, third}, liveByKey[key]) +} + +func TestSelectPartialPoolDownloaderPrefersReflinkOnTie(t *testing.T) { + t.Parallel() + + hardlink := &partialPoolState{ + member: &models.CrossSeedPartialPoolMember{Mode: models.CrossSeedPartialMemberModeHardlink}, + incompleteNames: []string{"shared.bin"}, + incompleteKeys: []partialPoolFileKey{{key: normalizeFileKey("shared.bin"), size: 0}}, + eligibleDownload: true, + } + reflink := &partialPoolState{ + member: &models.CrossSeedPartialPoolMember{Mode: models.CrossSeedPartialMemberModeReflink}, + incompleteNames: []string{"shared.bin"}, + incompleteKeys: []partialPoolFileKey{{key: normalizeFileKey("shared.bin"), size: 0}}, + eligibleDownload: true, + } + + selected := (&Service{}).selectPartialPoolDownloader([]*partialPoolState{hardlink, reflink}) + require.NotNil(t, selected) + assert.Equal(t, models.CrossSeedPartialMemberModeReflink, selected.member.Mode) +} + +func TestRestoreActivePartialPoolsOnlyRestoresActiveMembers(t *testing.T) { + t.Parallel() + + dbPath := filepath.Join(t.TempDir(), "partial-pool.db") + db, err := database.New(dbPath) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + store := models.NewCrossSeedPartialPoolMemberStore(db) + ctx := context.Background() + + _, err = store.Upsert(ctx, &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: "activehash", + Mode: models.CrossSeedPartialMemberModeHardlink, + ManagedRoot: t.TempDir(), + SourcePieceLength: 1024, + SourceFiles: []models.CrossSeedPartialFile{{Name: "file.mkv", Size: 100}}, + ExpiresAt: time.Now().UTC().Add(time.Hour), + }) + require.NoError(t, err) + + _, err = store.Upsert(ctx, &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: "expiredhash", + Mode: models.CrossSeedPartialMemberModeHardlink, + ManagedRoot: t.TempDir(), + SourcePieceLength: 1024, + SourceFiles: []models.CrossSeedPartialFile{{Name: "file.mkv", Size: 100}}, + ExpiresAt: time.Now().UTC().Add(-time.Hour), + }) + require.NoError(t, err) + + svc := &Service{ + partialPoolStore: store, + partialPoolWake: make(chan struct{}, 1), + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + } + + require.NoError(t, svc.RestoreActivePartialPools(ctx)) + assert.True(t, svc.partialPoolOwnsTorrent(1, "activehash")) + assert.False(t, svc.partialPoolOwnsTorrent(1, "expiredhash")) +} + +func TestProcessPartialPools_DrainsPooledMembersWhenAutomationDisabled(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + settings *models.CrossSeedAutomationSettings + }{ + { + name: "nil settings", + settings: nil, + }, + { + name: "pooled completion disabled", + settings: func() *models.CrossSeedAutomationSettings { + settings := models.DefaultCrossSeedAutomationSettings() + settings.EnablePooledPartialCompletion = false + return settings + }(), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + dbPath := filepath.Join(t.TempDir(), "partial-pool.db") + db, err := database.New(dbPath) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + store := models.NewCrossSeedPartialPoolMemberStore(db) + ctx := context.Background() + + member, err := store.Upsert(ctx, &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: "targethash", + Mode: models.CrossSeedPartialMemberModeHardlink, + ManagedRoot: t.TempDir(), + SourcePieceLength: 1024, + SourceFiles: []models.CrossSeedPartialFile{{Name: "file.mkv", Size: 100}}, + ExpiresAt: time.Now().UTC().Add(time.Hour), + }) + require.NoError(t, err) + + svc := &Service{ + partialPoolStore: store, + partialPoolWake: make(chan struct{}, 1), + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + partialPoolBySource: make(map[string]partialPoolSelection), + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { return tc.settings, nil }, + } + + svc.storePartialPoolMemberLocked(member) + require.True(t, svc.partialPoolOwnsTorrent(1, "targethash")) + + svc.processPartialPools(ctx) + + assert.False(t, svc.partialPoolOwnsTorrent(1, "targethash")) + + active, err := store.ListActive(ctx, time.Now().UTC()) + require.NoError(t, err) + assert.Empty(t, active) + }) + } +} + +func TestHandleTorrentCompletion_PooledMemberBypassesCompletionSettings(t *testing.T) { + t.Parallel() + + syncManager := &partialPoolTestSyncManager{} + svc := &Service{ + syncManager: syncManager, + partialPoolWake: make(chan struct{}, 1), + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + } + svc.partialPoolByHash[partialPoolLookupKey(1, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")] = &models.CrossSeedPartialPoolMember{ + TargetInstanceID: 1, + TargetHash: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + } + + svc.HandleTorrentCompletion(context.Background(), 1, qbt.Torrent{ + Hash: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + Name: "pooled", + Progress: 1, + CompletionOn: 123, + }) + + select { + case <-svc.partialPoolWake: + case <-time.After(time.Second): + t.Fatal("expected pooled completion to wake partial pool worker") + } + assert.Empty(t, syncManager.bulkActions) +} + +func TestHandleTorrentCompletion_RemovesStalePooledMemberForReaddedTorrent(t *testing.T) { + t.Parallel() + + targetHash := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + syncManager := &partialPoolTestSyncManager{} + svc := &Service{ + syncManager: syncManager, + partialPoolWake: make(chan struct{}, 1), + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + partialPoolBySource: make(map[string]partialPoolSelection), + } + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: strings.ToUpper(targetHash), + TargetAddedOn: 100, + ExpiresAt: time.Now().UTC().Add(time.Hour), + } + svc.storePartialPoolMemberLocked(member) + + svc.HandleTorrentCompletion(context.Background(), 1, qbt.Torrent{ + Hash: targetHash, + Name: "re-added", + AddedOn: 200, + Progress: 1, + CompletionOn: 123, + }) + + select { + case <-svc.partialPoolWake: + t.Fatal("did not expect stale pooled member to short-circuit completion handling") + default: + } + assert.False(t, svc.partialPoolOwnsTorrent(1, targetHash)) +} + +func TestHandleTorrentAdded_RemovesStalePooledMemberForReaddedTorrent(t *testing.T) { + t.Parallel() + + targetHash := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + svc := &Service{ + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + partialPoolBySource: make(map[string]partialPoolSelection), + } + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: strings.ToUpper(targetHash), + TargetAddedOn: 100, + ExpiresAt: time.Now().UTC().Add(time.Hour), + } + svc.storePartialPoolMemberLocked(member) + + svc.HandleTorrentAdded(context.Background(), 1, qbt.Torrent{ + Hash: targetHash, + AddedOn: 200, + }) + + assert.False(t, svc.partialPoolOwnsTorrent(1, targetHash)) +} + +func TestHandleTorrentAdded_KeepsPooledMemberWhenAddedOnUnknown(t *testing.T) { + t.Parallel() + + targetHash := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + svc := &Service{ + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + partialPoolBySource: make(map[string]partialPoolSelection), + } + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: strings.ToUpper(targetHash), + TargetAddedOn: 0, + ExpiresAt: time.Now().UTC().Add(time.Hour), + } + svc.storePartialPoolMemberLocked(member) + + svc.HandleTorrentAdded(context.Background(), 1, qbt.Torrent{ + Hash: targetHash, + AddedOn: 200, + }) + + assert.True(t, svc.partialPoolOwnsTorrent(1, targetHash)) +} + +func TestDropPartialPoolMember_KeepsMarkerWhenPauseFails(t *testing.T) { + t.Parallel() + + targetHash := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + syncManager := &partialPoolTestSyncManager{ + bulkActionErr: errors.New("pause failed"), + } + svc := &Service{ + syncManager: syncManager, + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + partialPoolBySource: make(map[string]partialPoolSelection), + } + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: strings.ToUpper(targetHash), + ExpiresAt: time.Now().UTC().Add(time.Hour), + } + svc.storePartialPoolMemberLocked(member) + + svc.dropPartialPoolMember(context.Background(), member, "manual review") + + assert.True(t, svc.partialPoolOwnsTorrent(1, targetHash)) + assert.Equal(t, []string{ + fmt.Sprintf("%d:%s:%v", member.TargetInstanceID, "pause", []string{member.TargetHash}), + }, syncManager.bulkActions) +} + +func TestProcessPartialPool_PropagationPausesRecipientBeforeRecheckAndSkipsResume(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ownerHash := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + recipientHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + fileName := "shared.bin" + + ownerRoot := t.TempDir() + recipientRoot := t.TempDir() + require.NoError(t, os.WriteFile(filepath.Join(ownerRoot, fileName), []byte("owner-data"), 0o600)) + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: { + {Hash: ownerHash, State: qbt.TorrentStateUploading}, + {Hash: recipientHash, State: qbt.TorrentStateDownloading}, + }, + }, + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash(ownerHash): { + {Index: 0, Name: fileName, Progress: 1, Size: 10}, + }, + normalizeHash(recipientHash): { + {Index: 0, Name: fileName, Progress: 0, Size: 10}, + }, + }, + } + + svc := &Service{syncManager: syncManager} + settings := models.DefaultCrossSeedAutomationSettings() + members := []*models.CrossSeedPartialPoolMember{ + { + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: ownerHash, + ManagedRoot: ownerRoot, + Mode: models.CrossSeedPartialMemberModeHardlink, + SourcePieceLength: 1024, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: fileName, Size: 10}, + }, + }, + { + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: recipientHash, + ManagedRoot: recipientRoot, + Mode: models.CrossSeedPartialMemberModeHardlink, + SourcePieceLength: 1024, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: fileName, Size: 10}, + }, + }, + } + + svc.processPartialPool(ctx, settings, members) + + require.Equal(t, []string{ + fmt.Sprintf("1:pause:[%s]", recipientHash), + fmt.Sprintf("1:recheck:[%s]", recipientHash), + }, syncManager.bulkActions) + + data, err := os.ReadFile(filepath.Join(recipientRoot, fileName)) + require.NoError(t, err) + assert.Equal(t, "owner-data", string(data)) +} + +func TestProcessPartialPool_PropagationMatchesSharedKeyAcrossLayouts(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ownerHash := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + recipientHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + ownerName := "Release/Release.mkv" + recipientName := "Release.mkv" + + ownerRoot := t.TempDir() + recipientRoot := t.TempDir() + require.NoError(t, os.MkdirAll(filepath.Join(ownerRoot, "Release"), 0o755)) + require.NoError(t, os.WriteFile(filepath.Join(ownerRoot, filepath.FromSlash(ownerName)), []byte("owner-data"), 0o600)) + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: { + {Hash: ownerHash, State: qbt.TorrentStateUploading}, + {Hash: recipientHash, State: qbt.TorrentStateDownloading}, + }, + }, + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash(ownerHash): { + {Index: 0, Name: ownerName, Progress: 1, Size: 10}, + }, + normalizeHash(recipientHash): { + {Index: 0, Name: recipientName, Progress: 0, Size: 10}, + }, + }, + } + + sharedKey := normalizeFileKey(ownerName) + svc := &Service{syncManager: syncManager} + settings := models.DefaultCrossSeedAutomationSettings() + members := []*models.CrossSeedPartialPoolMember{ + { + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: ownerHash, + ManagedRoot: ownerRoot, + Mode: models.CrossSeedPartialMemberModeHardlink, + SourcePieceLength: 1024, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: ownerName, Size: 10, Key: sharedKey}, + }, + }, + { + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: recipientHash, + ManagedRoot: recipientRoot, + Mode: models.CrossSeedPartialMemberModeHardlink, + SourcePieceLength: 1024, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: recipientName, Size: 10, Key: sharedKey}, + }, + }, + } + + svc.processPartialPool(ctx, settings, members) + + require.Equal(t, []string{ + fmt.Sprintf("1:pause:[%s]", recipientHash), + fmt.Sprintf("1:recheck:[%s]", recipientHash), + }, syncManager.bulkActions) + + data, err := os.ReadFile(filepath.Join(recipientRoot, recipientName)) + require.NoError(t, err) + assert.Equal(t, "owner-data", string(data)) +} + +func TestProcessPartialPool_SkipsPropagationWhenOwnerFileMissingOnDisk(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ownerHash := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + recipientHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + fileName := "shared.bin" + + ownerRoot := t.TempDir() + recipientRoot := t.TempDir() + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: { + {Hash: ownerHash, State: qbt.TorrentStateUploading}, + {Hash: recipientHash, State: qbt.TorrentStateDownloading}, + }, + }, + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash(ownerHash): { + {Index: 0, Name: fileName, Progress: 1, Size: 10}, + }, + normalizeHash(recipientHash): { + {Index: 0, Name: fileName, Progress: 0, Size: 10}, + }, + }, + } + + svc := &Service{syncManager: syncManager} + settings := models.DefaultCrossSeedAutomationSettings() + members := []*models.CrossSeedPartialPoolMember{ + { + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: ownerHash, + ManagedRoot: ownerRoot, + Mode: models.CrossSeedPartialMemberModeHardlink, + SourcePieceLength: 1024, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: fileName, Size: 10}, + }, + }, + { + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: recipientHash, + ManagedRoot: recipientRoot, + Mode: models.CrossSeedPartialMemberModeHardlink, + SourcePieceLength: 1024, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: fileName, Size: 10}, + }, + }, + } + + svc.processPartialPool(ctx, settings, members) + + assert.Equal(t, []string{ + fmt.Sprintf("1:pause:[%s]", recipientHash), + fmt.Sprintf("1:resume:[%s]", recipientHash), + }, syncManager.bulkActions) + _, err := os.Stat(filepath.Join(recipientRoot, fileName)) + require.Error(t, err) + assert.True(t, os.IsNotExist(err)) +} + +func TestProcessPartialPool_DoesNotRecheckWhenPropagationFails(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ownerHash := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + recipientHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + fileName := filepath.ToSlash(filepath.Join("blocked", "shared.bin")) + + ownerRoot := t.TempDir() + recipientRoot := t.TempDir() + require.NoError(t, os.MkdirAll(filepath.Join(ownerRoot, "blocked"), 0o755)) + require.NoError(t, os.WriteFile(filepath.Join(ownerRoot, filepath.FromSlash(fileName)), []byte("owner-data"), 0o600)) + require.NoError(t, os.WriteFile(filepath.Join(recipientRoot, "blocked"), []byte("not-a-directory"), 0o600)) + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: { + {Hash: ownerHash, State: qbt.TorrentStateUploading}, + {Hash: recipientHash, State: qbt.TorrentStateDownloading}, + }, + }, + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash(ownerHash): { + {Index: 0, Name: fileName, Progress: 1, Size: 10}, + }, + normalizeHash(recipientHash): { + {Index: 0, Name: fileName, Progress: 0, Size: 10}, + }, + }, + } + + svc := &Service{syncManager: syncManager} + settings := models.DefaultCrossSeedAutomationSettings() + members := []*models.CrossSeedPartialPoolMember{ + { + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: ownerHash, + ManagedRoot: ownerRoot, + Mode: models.CrossSeedPartialMemberModeHardlink, + SourcePieceLength: 1024, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: fileName, Size: 10}, + }, + }, + { + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: recipientHash, + ManagedRoot: recipientRoot, + Mode: models.CrossSeedPartialMemberModeHardlink, + SourcePieceLength: 1024, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: fileName, Size: 10}, + }, + }, + } + + svc.processPartialPool(ctx, settings, members) + + require.Equal(t, []string{ + fmt.Sprintf("1:pause:[%s]", recipientHash), + fmt.Sprintf("1:resume:[%s]", recipientHash), + }, syncManager.bulkActions) + _, err := os.Stat(filepath.Join(recipientRoot, filepath.FromSlash(fileName))) + require.Error(t, err) + assert.True(t, os.IsNotExist(err) || errors.Is(err, syscall.ENOTDIR)) +} + +func TestProcessPartialPool_LeavesActiveDownloaderAlone(t *testing.T) { + t.Parallel() + + ctx := context.Background() + activeHash := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + pausedHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + fileName := "shared.bin" + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: { + {Hash: activeHash, State: qbt.TorrentStateDownloading}, + {Hash: pausedHash, State: qbt.TorrentStatePausedDl}, + }, + }, + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash(activeHash): { + {Index: 0, Name: fileName, Progress: 0.25, Size: 100}, + }, + normalizeHash(pausedHash): { + {Index: 0, Name: fileName, Progress: 0, Size: 100}, + }, + }, + } + + svc := &Service{ + syncManager: syncManager, + partialPoolBySource: make(map[string]partialPoolSelection), + } + settings := &models.CrossSeedAutomationSettings{MaxMissingBytesAfterRecheck: 150} + members := []*models.CrossSeedPartialPoolMember{ + { + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: activeHash, + ManagedRoot: t.TempDir(), + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: fileName, Size: 100}, + }, + }, + { + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: pausedHash, + ManagedRoot: t.TempDir(), + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: fileName, Size: 100}, + }, + }, + } + + svc.processPartialPool(ctx, settings, members) + + assert.Empty(t, syncManager.bulkActions) +} + +func TestProcessPartialPool_DelaysManualReviewUntilRecheckSettles(t *testing.T) { + t.Parallel() + + ctx := context.Background() + targetHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: { + {Hash: targetHash, State: qbt.TorrentStateCheckingResumeData}, + }, + }, + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash(targetHash): { + {Index: 0, Name: "disc/file1.mkv", Progress: 0.25, Size: 200}, + }, + }, + } + + svc := &Service{ + syncManager: syncManager, + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + partialPoolBySource: map[string]partialPoolSelection{}, + } + settings := &models.CrossSeedAutomationSettings{MaxMissingBytesAfterRecheck: 100} + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: targetHash, + ManagedRoot: t.TempDir(), + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "disc/file1.mkv", Size: 200}, + }, + } + + svc.processPartialPool(ctx, settings, []*models.CrossSeedPartialPoolMember{member}) + assert.Empty(t, syncManager.bulkActions) + + syncManager.torrentsByInstance[1][0].State = qbt.TorrentStatePausedDl + svc.processPartialPool(ctx, settings, []*models.CrossSeedPartialPoolMember{member}) + + require.Equal(t, []string{ + fmt.Sprintf("1:pause:[%s]", targetHash), + }, syncManager.bulkActions) +} + +func TestRegisterPartialPoolMember_UsesTargetTorrentFiles(t *testing.T) { + t.Parallel() + + dbPath := filepath.Join(t.TempDir(), "partial-pool.db") + db, err := database.New(dbPath) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + targetHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + targetFiles := qbt.TorrentFiles{ + {Index: 0, Name: "Release/Release.mkv", Size: 100}, + } + syncManager := &partialPoolTestSyncManager{ + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash(targetHash): targetFiles, + }, + torrentByAnyHash: map[string]qbt.Torrent{ + normalizeHash(targetHash): { + Hash: targetHash, + AddedOn: 12345, + }, + }, + } + store := models.NewCrossSeedPartialPoolMemberStore(db) + svc := &Service{ + syncManager: syncManager, + partialPoolStore: store, + partialPoolWake: make(chan struct{}, 1), + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + } + + err = svc.registerPartialPoolMember( + context.Background(), + 1, + "sourcehash", + 1, + targetHash, + "", + "Release", + models.CrossSeedPartialMemberModeReflink, + t.TempDir(), + 0, + 1024, + qbt.TorrentFiles{{Index: 0, Name: "fallback.mkv", Size: 100}}, + ) + require.NoError(t, err) + + stored, err := store.GetByAnyHash(context.Background(), 1, targetHash) + require.NoError(t, err) + require.NotNil(t, stored) + require.Len(t, stored.SourceFiles, 1) + assert.EqualValues(t, 12345, stored.TargetAddedOn) + assert.Equal(t, "Release/Release.mkv", stored.SourceFiles[0].Name) + assert.Equal(t, normalizeFileKey("Release/Release.mkv"), stored.SourceFiles[0].Key) + assert.Equal(t, 1, syncManager.forceRefreshCalls) + assert.True(t, syncManager.lastForceRefresh) +} + +func TestLoadPartialPoolStates_UsesFreshTorrentFiles(t *testing.T) { + t.Parallel() + + ctx := context.Background() + targetHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: { + {Hash: targetHash, State: qbt.TorrentStatePausedDl}, + }, + }, + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash(targetHash): { + {Index: 0, Name: "file.mkv", Progress: 1, Size: 100}, + }, + }, + } + + svc := &Service{ + syncManager: syncManager, + } + + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: targetHash, + ManagedRoot: t.TempDir(), + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "file.mkv", Size: 100}, + }, + CreatedAt: time.Now().UTC().Add(-time.Minute), + UpdatedAt: time.Now().UTC().Add(-time.Minute), + ExpiresAt: time.Now().UTC().Add(time.Hour), + } + + states := svc.loadPartialPoolStates(ctx, models.DefaultCrossSeedAutomationSettings(), []*models.CrossSeedPartialPoolMember{member}) + require.Len(t, states, 1) + assert.Equal(t, 1, syncManager.forceRefreshCalls) + assert.True(t, syncManager.lastForceRefresh) +} + +func TestLoadPartialPoolStates_RemovesStaleMemberWhenTorrentWasReadded(t *testing.T) { + t.Parallel() + + ctx := context.Background() + targetHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: { + {Hash: targetHash, State: qbt.TorrentStatePausedDl, AddedOn: 200}, + }, + }, + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash(targetHash): { + {Index: 0, Name: "file.mkv", Progress: 1, Size: 100}, + }, + }, + } + + svc := &Service{ + syncManager: syncManager, + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + partialPoolBySource: make(map[string]partialPoolSelection), + } + + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: targetHash, + TargetAddedOn: 100, + ManagedRoot: t.TempDir(), + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "file.mkv", Size: 100}, + }, + CreatedAt: time.Now().UTC().Add(-time.Minute), + UpdatedAt: time.Now().UTC().Add(-time.Minute), + ExpiresAt: time.Now().UTC().Add(time.Hour), + } + + svc.storePartialPoolMemberLocked(member) + + states := svc.loadPartialPoolStates(ctx, models.DefaultCrossSeedAutomationSettings(), []*models.CrossSeedPartialPoolMember{member}) + assert.Empty(t, states) + assert.False(t, svc.partialPoolOwnsTorrent(1, targetHash)) +} + +func TestLoadPartialPoolStates_KeepsMemberWhenStoredAddedOnUnknown(t *testing.T) { + t.Parallel() + + ctx := context.Background() + targetHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: { + {Hash: targetHash, State: qbt.TorrentStatePausedDl, AddedOn: 200}, + }, + }, + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash(targetHash): { + {Index: 0, Name: "file.mkv", Progress: 1, Size: 100}, + }, + }, + } + + svc := &Service{ + syncManager: syncManager, + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + partialPoolBySource: make(map[string]partialPoolSelection), + } + + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: targetHash, + TargetAddedOn: 0, + ManagedRoot: t.TempDir(), + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "file.mkv", Size: 100}, + }, + CreatedAt: time.Now().UTC().Add(-time.Minute), + UpdatedAt: time.Now().UTC().Add(-time.Minute), + ExpiresAt: time.Now().UTC().Add(time.Hour), + } + + svc.storePartialPoolMemberLocked(member) + + states := svc.loadPartialPoolStates(ctx, models.DefaultCrossSeedAutomationSettings(), []*models.CrossSeedPartialPoolMember{member}) + require.Len(t, states, 1) + assert.True(t, svc.partialPoolOwnsTorrent(1, targetHash)) +} + +func TestTriggerPartialPoolRun_CoalescesPendingSignals(t *testing.T) { + t.Parallel() + + ctx := t.Context() + + svc := &Service{} + + started := make(chan struct{}, 1) + release := make(chan struct{}) + var runs atomic.Int32 + + process := func(context.Context) { + run := runs.Add(1) + if run != 1 { + return + } + + started <- struct{}{} + <-release + } + + svc.triggerPartialPoolRun(ctx, process) + + select { + case <-started: + case <-time.After(time.Second): + t.Fatal("timed out waiting for first pooled run to start") + } + + for range 5 { + svc.triggerPartialPoolRun(ctx, process) + } + + close(release) + + require.Eventually(t, func() bool { + return runs.Load() == 2 && !svc.partialPoolRunActive.Load() + }, time.Second, 10*time.Millisecond) + assert.EqualValues(t, 2, runs.Load()) +} + +func TestProcessPartialPool_KeepsFreshMissingMemberRegistered(t *testing.T) { + t.Parallel() + + ctx := context.Background() + targetHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: {}, + }, + } + + svc := &Service{ + syncManager: syncManager, + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + partialPoolBySource: map[string]partialPoolSelection{}, + } + + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: targetHash, + ManagedRoot: t.TempDir(), + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{{Name: "file.mkv", Size: 100}}, + CreatedAt: time.Now().UTC(), + UpdatedAt: time.Now().UTC(), + ExpiresAt: time.Now().UTC().Add(time.Hour), + } + + svc.storePartialPoolMemberLocked(member) + svc.processPartialPool(ctx, models.DefaultCrossSeedAutomationSettings(), []*models.CrossSeedPartialPoolMember{member}) + + assert.True(t, svc.partialPoolOwnsTorrent(1, targetHash)) + assert.Empty(t, syncManager.bulkActions) +} + +func TestProcessPartialPool_RemovesStaleMissingMember(t *testing.T) { + t.Parallel() + + ctx := context.Background() + targetHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: {}, + }, + } + + svc := &Service{ + syncManager: syncManager, + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + partialPoolBySource: map[string]partialPoolSelection{}, + } + + stale := time.Now().UTC().Add(-partialPoolMissingGrace - time.Second) + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: targetHash, + ManagedRoot: t.TempDir(), + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{{Name: "file.mkv", Size: 100}}, + CreatedAt: stale, + UpdatedAt: stale, + ExpiresAt: time.Now().UTC().Add(time.Hour), + } + + svc.storePartialPoolMemberLocked(member) + svc.processPartialPool(ctx, models.DefaultCrossSeedAutomationSettings(), []*models.CrossSeedPartialPoolMember{member}) + + assert.False(t, svc.partialPoolOwnsTorrent(1, targetHash)) + assert.Empty(t, syncManager.bulkActions) +} + +func TestPartialPoolOwnsTorrent_IgnoresAndRemovesExpiredMember(t *testing.T) { + t.Parallel() + + targetHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + expired := time.Now().UTC().Add(-time.Minute) + + svc := &Service{ + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + partialPoolBySource: make(map[string]partialPoolSelection), + } + + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: targetHash, + ExpiresAt: expired, + } + + svc.storePartialPoolMemberLocked(member) + + assert.False(t, svc.partialPoolOwnsTorrent(1, targetHash)) + assert.Empty(t, svc.listPartialPoolMembers()) +} + +func TestLoadPartialPoolStates_FallsBackToVariantAwareLookupWhenFilteredListMisses(t *testing.T) { + t.Parallel() + + ctx := context.Background() + targetHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: {}, + }, + torrentByAnyHash: map[string]qbt.Torrent{ + normalizeHash(targetHash): { + Hash: targetHash, + State: qbt.TorrentStatePausedDl, + }, + }, + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash(targetHash): { + {Index: 0, Name: "file.mkv", Progress: 0, Size: 100}, + }, + }, + } + + svc := &Service{ + syncManager: syncManager, + } + + stale := time.Now().UTC().Add(-partialPoolMissingGrace - time.Second) + member := &models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: targetHash, + ManagedRoot: t.TempDir(), + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: "file.mkv", Size: 100}, + }, + CreatedAt: stale, + UpdatedAt: stale, + ExpiresAt: time.Now().UTC().Add(time.Hour), + } + + states := svc.loadPartialPoolStates(ctx, models.DefaultCrossSeedAutomationSettings(), []*models.CrossSeedPartialPoolMember{member}) + require.Len(t, states, 1) + assert.Equal(t, normalizeHash(targetHash), normalizeHash(states[0].torrent.Hash)) + assert.False(t, states[0].complete) +} + +func TestProcessPartialPool_RotatesPreferredDownloaderAfterTimeout(t *testing.T) { + t.Parallel() + + ctx := context.Background() + oldHash := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + nextHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + fileName := "shared.bin" + + syncManager := &partialPoolTestSyncManager{ + torrentsByInstance: map[int][]qbt.Torrent{ + 1: { + {Hash: oldHash, State: qbt.TorrentStatePausedDl}, + {Hash: nextHash, State: qbt.TorrentStatePausedDl}, + }, + }, + filesByHash: map[string]qbt.TorrentFiles{ + normalizeHash(oldHash): { + {Index: 0, Name: fileName, Progress: 0, Size: 100}, + }, + normalizeHash(nextHash): { + {Index: 0, Name: fileName, Progress: 0, Size: 100}, + }, + }, + } + + poolKey := partialPoolSourceKey(&models.CrossSeedPartialPoolMember{ + SourceInstanceID: 1, + SourceHash: "sourcehash", + }) + svc := &Service{ + syncManager: syncManager, + partialPoolBySource: map[string]partialPoolSelection{ + poolKey: { + MemberKey: partialPoolLookupKey(1, oldHash), + SelectedAt: time.Now().UTC().Add(-partialPoolSelectionLimit - time.Minute), + }, + }, + } + settings := &models.CrossSeedAutomationSettings{MaxMissingBytesAfterRecheck: 150} + members := []*models.CrossSeedPartialPoolMember{ + { + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: oldHash, + ManagedRoot: t.TempDir(), + Mode: models.CrossSeedPartialMemberModeHardlink, + SourcePieceLength: 1024, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: fileName, Size: 100}, + }, + }, + { + SourceInstanceID: 1, + SourceHash: "sourcehash", + TargetInstanceID: 1, + TargetHash: nextHash, + ManagedRoot: t.TempDir(), + Mode: models.CrossSeedPartialMemberModeReflink, + SourceFiles: []models.CrossSeedPartialFile{ + {Name: fileName, Size: 100}, + }, + }, + } + + svc.processPartialPool(ctx, settings, members) + + require.Equal(t, []string{ + fmt.Sprintf("1:resume:[%s]", nextHash), + }, syncManager.bulkActions) + require.Equal(t, partialPoolLookupKey(1, nextHash), svc.partialPoolBySource[poolKey].MemberKey) +} diff --git a/internal/services/crossseed/rootless_savepath_test.go b/internal/services/crossseed/rootless_savepath_test.go index 5d5850bc6..aef0a2996 100644 --- a/internal/services/crossseed/rootless_savepath_test.go +++ b/internal/services/crossseed/rootless_savepath_test.go @@ -7,6 +7,8 @@ import ( "context" "errors" "maps" + "os" + "path/filepath" "strings" "testing" @@ -15,6 +17,7 @@ import ( "github.com/autobrr/qui/internal/models" internalqb "github.com/autobrr/qui/internal/qbittorrent" + "github.com/autobrr/qui/pkg/pathutil" "github.com/autobrr/qui/pkg/stringutils" ) @@ -147,7 +150,7 @@ func TestProcessCrossSeedCandidate_RootlessContentDirOverridesSavePath(t *testin matchedName := "Show.S01E01.1080p.WEB-DL-GROUP" candidateFiles := qbt.TorrentFiles{ - {Name: "Show.S01E01.mkv", Size: 1024}, + {Name: "Show.S01E01.mkv", Size: 1024, Progress: 1}, } sourceFiles := qbt.TorrentFiles{ {Name: "Show.S01E01.mkv", Size: 1024}, @@ -223,8 +226,8 @@ func TestProcessCrossSeedCandidate_RootlessContentDirOverridesSavePath_MultiFile matchedName := "Show.S01E01.1080p.WEB-DL-GROUP" candidateFiles := qbt.TorrentFiles{ - {Name: "Show.S01E01.mkv", Size: 1024}, - {Name: "Show.S01E01.srt", Size: 128}, + {Name: "Show.S01E01.mkv", Size: 1024, Progress: 1}, + {Name: "Show.S01E01.srt", Size: 128, Progress: 1}, } sourceFiles := qbt.TorrentFiles{ {Name: "Show.S01E01.mkv", Size: 1024}, @@ -301,7 +304,7 @@ func TestProcessCrossSeedCandidate_RootlessContentDirNoopWhenSavePathMatches(t * matchedName := "Show.S01E01.1080p.WEB-DL-GROUP" candidateFiles := qbt.TorrentFiles{ - {Name: "Show.S01E01.mkv", Size: 1024}, + {Name: "Show.S01E01.mkv", Size: 1024, Progress: 1}, } sourceFiles := qbt.TorrentFiles{ {Name: "Show.S01E01.mkv", Size: 1024}, @@ -367,3 +370,307 @@ func TestProcessCrossSeedCandidate_RootlessContentDirNoopWhenSavePathMatches(t * require.Equal(t, "Original", sync.addedOptions["contentLayout"]) require.Equal(t, "true", sync.addedOptions["skip_checking"]) } + +func TestProcessCrossSeedCandidate_HardlinkFallbackKeepsMatchedSavePath(t *testing.T) { + t.Parallel() + + ctx := context.Background() + instanceID := 1 + matchedHash := "matchedhash" + newHash := "newhash12345678" + torrentName := "Movie.2024.1080p.WEB-DL-GROUP" + + tempDir := t.TempDir() + baseDir := filepath.Join(tempDir, "managed") + oldRoot := filepath.Join(baseDir, "Old Tracker", pathutil.IsolationFolderName("oldhash12345678", torrentName)) + managedRoot := filepath.Join(baseDir, "New Tracker", pathutil.IsolationFolderName(newHash, torrentName)) + expectedRoot := oldRoot + + require.NoError(t, os.MkdirAll(oldRoot, 0o755)) + require.NoError(t, os.MkdirAll(managedRoot, 0o755)) + require.NoError(t, os.WriteFile(filepath.Join(oldRoot, "Movie.mkv"), []byte("movie"), 0o600)) + require.NoError(t, os.WriteFile(filepath.Join(managedRoot, "Movie.mkv"), []byte("conflict"), 0o600)) + + candidateFiles := qbt.TorrentFiles{ + {Name: "Movie.mkv", Size: 5, Progress: 1}, + } + sourceFiles := qbt.TorrentFiles{ + {Name: "Movie.mkv", Size: 5}, + } + + matchedTorrent := qbt.Torrent{ + Hash: matchedHash, + Name: torrentName, + Progress: 1.0, + ContentPath: filepath.Join(oldRoot, "Movie.mkv"), + } + + sync := &rootlessSavePathSyncManager{ + files: map[string]qbt.TorrentFiles{ + normalizeHash(matchedHash): candidateFiles, + }, + props: map[string]*qbt.TorrentProperties{ + normalizeHash(matchedHash): {SavePath: oldRoot}, + }, + } + + instanceStore := &rootlessSavePathInstanceStore{ + instances: map[int]*models.Instance{ + instanceID: { + ID: instanceID, + UseHardlinks: true, + FallbackToRegularMode: true, + HasLocalFilesystemAccess: true, + HardlinkBaseDir: baseDir, + HardlinkDirPreset: "by-tracker", + }, + }, + } + + service := &Service{ + syncManager: sync, + instanceStore: instanceStore, + releaseCache: NewReleaseCache(), + stringNormalizer: stringutils.NewDefaultNormalizer(), + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + } + + candidate := CrossSeedCandidate{ + InstanceID: instanceID, + InstanceName: "Test", + Torrents: []qbt.Torrent{matchedTorrent}, + } + + req := &CrossSeedRequest{IndexerName: "New Tracker"} + result := service.processCrossSeedCandidate(ctx, candidate, []byte("torrent"), newHash, "", torrentName, req, service.releaseCache.Parse(torrentName), sourceFiles, nil) + + require.True(t, result.Success) + require.Equal(t, "added", result.Status) + require.NotNil(t, sync.addedOptions) + require.Equal(t, "false", sync.addedOptions["autoTMM"]) + require.Equal(t, expectedRoot, sync.addedOptions["savepath"]) +} + +func TestProcessCrossSeedCandidate_ReflinkFallbackKeepsMatchedSavePath(t *testing.T) { + t.Parallel() + + ctx := context.Background() + instanceID := 1 + matchedHash := "matchedhash" + newHash := "newhash12345678" + torrentName := "Movie.2024.1080p.WEB-DL-GROUP" + + tempDir := t.TempDir() + baseDir := filepath.Join(tempDir, "managed") + oldRoot := filepath.Join(baseDir, "Old Tracker", pathutil.IsolationFolderName("oldhash12345678", torrentName)) + expectedRoot := oldRoot + + require.NoError(t, os.MkdirAll(oldRoot, 0o755)) + require.NoError(t, os.WriteFile(filepath.Join(oldRoot, "Movie.mkv"), []byte("movie"), 0o600)) + + candidateFiles := qbt.TorrentFiles{ + {Name: "Movie.mkv", Size: 5, Progress: 1}, + } + sourceFiles := qbt.TorrentFiles{ + {Name: "Movie.mkv", Size: 5}, + } + + matchedTorrent := qbt.Torrent{ + Hash: matchedHash, + Name: torrentName, + Progress: 1.0, + ContentPath: filepath.Join(oldRoot, "Movie.mkv"), + } + + sync := &rootlessSavePathSyncManager{ + files: map[string]qbt.TorrentFiles{ + normalizeHash(matchedHash): candidateFiles, + }, + props: map[string]*qbt.TorrentProperties{ + normalizeHash(matchedHash): {SavePath: oldRoot}, + }, + } + + instanceStore := &rootlessSavePathInstanceStore{ + instances: map[int]*models.Instance{ + instanceID: { + ID: instanceID, + UseReflinks: true, + FallbackToRegularMode: true, + HasLocalFilesystemAccess: true, + HardlinkBaseDir: baseDir, + HardlinkDirPreset: "by-tracker", + }, + }, + } + + service := &Service{ + syncManager: sync, + instanceStore: instanceStore, + releaseCache: NewReleaseCache(), + stringNormalizer: stringutils.NewDefaultNormalizer(), + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + } + + candidate := CrossSeedCandidate{ + InstanceID: instanceID, + InstanceName: "Test", + Torrents: []qbt.Torrent{matchedTorrent}, + } + + req := &CrossSeedRequest{IndexerName: "New Tracker"} + result := service.processCrossSeedCandidate(ctx, candidate, []byte("torrent"), newHash, "", torrentName, req, service.releaseCache.Parse(torrentName), sourceFiles, nil) + + require.True(t, result.Success) + require.Equal(t, "added", result.Status) + require.NotNil(t, sync.addedOptions) + require.Equal(t, "false", sync.addedOptions["autoTMM"]) + require.Equal(t, expectedRoot, sync.addedOptions["savepath"]) +} + +func TestProcessCrossSeedCandidate_HardlinkFallbackRootlessExtrasStillRequireLinkMode(t *testing.T) { + t.Parallel() + + ctx := context.Background() + instanceID := 1 + matchedHash := "matchedhash" + newHash := "newhash12345678" + torrentName := "Movie.2024.1080p.WEB-DL-GROUP" + + tempDir := t.TempDir() + downloadsDir := filepath.Join(tempDir, "downloads") + baseDir := filepath.Join(tempDir, "managed") + expectedRoot := filepath.Join(baseDir, pathutil.IsolationFolderName(newHash, torrentName)) + + require.NoError(t, os.MkdirAll(downloadsDir, 0o755)) + require.NoError(t, os.MkdirAll(filepath.Join(expectedRoot, torrentName), 0o755)) + require.NoError(t, os.WriteFile(filepath.Join(downloadsDir, "Movie.mkv"), []byte("movie"), 0o600)) + require.NoError(t, os.WriteFile(filepath.Join(expectedRoot, torrentName, "Movie.mkv"), []byte("conflict"), 0o600)) + + candidateFiles := qbt.TorrentFiles{ + {Name: "Movie.mkv", Size: 5, Progress: 1}, + } + sourceFiles := qbt.TorrentFiles{ + {Name: "Movie.2024.1080p.WEB-DL-GROUP/Movie.mkv", Size: 5}, + {Name: "Movie.2024.1080p.WEB-DL-GROUP/Sample/sample.mkv", Size: 1}, + } + + matchedTorrent := qbt.Torrent{ + Hash: matchedHash, + Name: torrentName, + Progress: 1.0, + ContentPath: filepath.Join(downloadsDir, "Movie.mkv"), + } + + sync := &rootlessSavePathSyncManager{ + files: map[string]qbt.TorrentFiles{ + normalizeHash(matchedHash): candidateFiles, + }, + props: map[string]*qbt.TorrentProperties{ + normalizeHash(matchedHash): {SavePath: downloadsDir}, + }, + } + + instanceStore := &rootlessSavePathInstanceStore{ + instances: map[int]*models.Instance{ + instanceID: { + ID: instanceID, + UseHardlinks: true, + FallbackToRegularMode: true, + HasLocalFilesystemAccess: true, + HardlinkBaseDir: baseDir, + HardlinkDirPreset: "flat", + }, + }, + } + + service := &Service{ + syncManager: sync, + instanceStore: instanceStore, + releaseCache: NewReleaseCache(), + stringNormalizer: stringutils.NewDefaultNormalizer(), + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + } + + candidate := CrossSeedCandidate{ + InstanceID: instanceID, + InstanceName: "Test", + Torrents: []qbt.Torrent{matchedTorrent}, + } + + result := service.processCrossSeedCandidate(ctx, candidate, []byte("torrent"), newHash, "", torrentName, &CrossSeedRequest{}, service.releaseCache.Parse(torrentName), sourceFiles, nil) + + require.False(t, result.Success) + require.Equal(t, "requires_hardlink_reflink", result.Status) + require.Nil(t, sync.addedOptions, "regular mode must not add when fallback would scatter extra files") +} + +func TestProcessCrossSeedCandidate_LinkModeDisabledKeepsMatchedSavePath(t *testing.T) { + t.Parallel() + + ctx := context.Background() + instanceID := 1 + matchedHash := "matchedhash" + newHash := "newhash" + torrentName := "Movie.2024.1080p.WEB-DL-GROUP" + + candidateFiles := qbt.TorrentFiles{ + {Name: "Movie.mkv", Size: 5, Progress: 1}, + } + sourceFiles := qbt.TorrentFiles{ + {Name: "Movie.mkv", Size: 5}, + } + + matchedTorrent := qbt.Torrent{ + Hash: matchedHash, + Name: torrentName, + Progress: 1.0, + AutoManaged: false, + ContentPath: "/downloads/other-tracker/Movie.mkv", + } + + sync := &rootlessSavePathSyncManager{ + files: map[string]qbt.TorrentFiles{ + normalizeHash(matchedHash): candidateFiles, + }, + props: map[string]*qbt.TorrentProperties{ + normalizeHash(matchedHash): {SavePath: "/downloads/other-tracker"}, + }, + } + + instanceStore := &rootlessSavePathInstanceStore{ + instances: map[int]*models.Instance{ + instanceID: {ID: instanceID}, + }, + } + + service := &Service{ + syncManager: sync, + instanceStore: instanceStore, + releaseCache: NewReleaseCache(), + stringNormalizer: stringutils.NewDefaultNormalizer(), + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + } + + candidate := CrossSeedCandidate{ + InstanceID: instanceID, + InstanceName: "Test", + Torrents: []qbt.Torrent{matchedTorrent}, + } + + result := service.processCrossSeedCandidate(ctx, candidate, []byte("torrent"), newHash, "", torrentName, &CrossSeedRequest{IndexerName: "New Tracker"}, service.releaseCache.Parse(torrentName), sourceFiles, nil) + + require.True(t, result.Success) + require.Equal(t, "added", result.Status) + require.NotNil(t, sync.addedOptions) + require.Equal(t, "false", sync.addedOptions["autoTMM"]) + require.Equal(t, "/downloads/other-tracker", sync.addedOptions["savepath"]) +} diff --git a/internal/services/crossseed/service.go b/internal/services/crossseed/service.go index e49c9babb..68e6a1567 100644 --- a/internal/services/crossseed/service.go +++ b/internal/services/crossseed/service.go @@ -216,26 +216,36 @@ type automationSnapshots struct { type automationContext struct { snapshots *automationSnapshots candidateCache map[string]*FindCandidatesResponse + candidateOrder []string } const ( - searchResultCacheTTL = 5 * time.Minute - indexerDomainCacheTTL = 1 * time.Minute - contentFilteringWaitTimeout = 5 * time.Second - contentFilteringPollInterval = 150 * time.Millisecond - selectedIndexerContentSkipReason = "selected indexers were filtered out" - selectedIndexerCapabilitySkipReason = "selected indexers do not support required caps" - crossSeedRenameWaitTimeout = 15 * time.Second - crossSeedRenamePollInterval = 200 * time.Millisecond - automationSettingsQueryTimeout = 5 * time.Second - recheckPollInterval = 3 * time.Second // Batch API calls per instance - recheckAbsoluteTimeout = 60 * time.Minute // Allow time for large recheck queues - recheckAPITimeout = 30 * time.Second - minSearchIntervalSecondsTorznab = 60 - minSearchIntervalSecondsGazelleOnly = 5 - minSearchCooldownMinutes = 720 - maxCompletionSearchAttempts = 3 - defaultCompletionRetryDelay = 30 * time.Second + searchResultCacheTTL = 5 * time.Minute + indexerDomainCacheTTL = 1 * time.Minute + contentFilteringWaitTimeout = 5 * time.Second + contentFilteringPollInterval = 150 * time.Millisecond + selectedIndexerContentSkipReason = "selected indexers were filtered out" + selectedIndexerCapabilitySkipReason = "selected indexers do not support required caps" + crossSeedRenameWaitTimeout = 15 * time.Second + crossSeedRenamePollInterval = 200 * time.Millisecond + automationSettingsQueryTimeout = 5 * time.Second + recheckPollInterval = 3 * time.Second // Batch API calls per instance + recheckAbsoluteTimeout = 60 * time.Minute // Allow time for large recheck queues + recheckAPITimeout = 30 * time.Second + recheckConfirmPollInterval = 250 * time.Millisecond + recheckConfirmTimeout = 3 * time.Second + recheckConfirmMaxAttempts = 3 + minSearchIntervalSecondsTorznab = 60 + minSearchIntervalSecondsGazelleOnly = 5 + minSearchCooldownMinutes = 720 + maxCompletionSearchAttempts = 3 + maxCompletionCheckingAttempts = 3 + defaultCompletionRetryDelay = 30 * time.Second + defaultCompletionCheckingRetryDelay = 30 * time.Second + defaultCompletionCheckingPollInterval = 2 * time.Second + defaultCompletionCheckingTimeout = 5 * time.Minute + automationCandidateCacheMaxEntries = 512 + candidateFileBatchSize = 200 // User-facing message when cross-seed is skipped due to recheck requirement skippedRecheckMessage = "Skipped: requires recheck. Disable 'Skip recheck' in Cross-Seed settings to allow" @@ -297,7 +307,8 @@ type Service struct { externalProgramService *externalprograms.Service // Per-instance completion settings - completionStore *models.InstanceCrossSeedCompletionStore + completionStore *models.InstanceCrossSeedCompletionStore + partialPoolStore *models.CrossSeedPartialPoolMemberStore // recoverErroredTorrentsEnabled controls whether to attempt recovery of errored/missingFiles // torrents before candidate selection. When false (default), errored torrents are simply @@ -310,8 +321,10 @@ type Service struct { runActive atomic.Bool runCancel context.CancelFunc // cancel func for the current automation run - // categoryCreationGroup deduplicates concurrent category creation calls. - // Key format: "instanceID:categoryName" + // categoryCreationGroup deduplicates concurrent category preflight/creation work. + // Key formats: + // - "instanceID:categoryName" for direct ensureCrossCategory calls + // - "prepare:instanceID:categoryName" for processCrossSeedCandidate preflight categoryCreationGroup singleflight.Group // createdCategories tracks categories we've successfully created in this session // to avoid relying on potentially stale GetCategories responses. @@ -336,9 +349,15 @@ type Service struct { metrics *ServiceMetrics // Per-instance completion coordination. - // Ensures completion-triggered searches run serially per instance. + // Queue bookkeeping/polling and completion-triggered search serialization + // use separate mutexes so a slow search does not stall other waits. completionLaneMu sync.Mutex completionLanes map[int]*completionLane + // Completion polling timings are injectable for tests; zero values use package defaults. + completionPollInterval time.Duration + completionTimeout time.Duration + completionRetryDelay time.Duration + completionMaxAttempts int // test hooks crossSeedInvoker func(ctx context.Context, req *CrossSeedRequest) (*CrossSeedResponse, error) @@ -349,6 +368,18 @@ type Service struct { recheckResumeChan chan *pendingResume recheckResumeCtx context.Context recheckResumeCancel context.CancelFunc + recheckConfirmPoll time.Duration + recheckConfirmWait time.Duration + recheckConfirmTries int + + partialPoolMu sync.RWMutex + partialPoolWake chan struct{} + partialPoolStop <-chan struct{} + partialPoolCancel context.CancelFunc + partialPoolRunActive atomic.Bool + partialPoolRunPending atomic.Bool + partialPoolByHash map[string]*models.CrossSeedPartialPoolMember + partialPoolBySource map[string]partialPoolSelection } // pendingResume tracks a torrent waiting for recheck to complete before resuming. @@ -360,7 +391,44 @@ type pendingResume struct { } type completionLane struct { - mu sync.Mutex + mu sync.Mutex + searchMu sync.Mutex + waits map[string]*completionWaitState + polling bool +} + +type completionWaitState struct { + done chan struct{} + attempt int + retryAt time.Time + deadline time.Time + timeout time.Duration + eventTorrent qbt.Torrent + lastSeen *qbt.Torrent + result *qbt.Torrent + err error + checkingLogged bool +} + +type completionWaitSnapshot struct { + state *completionWaitState + retryAt time.Time +} + +type managedDestinationContext struct { + RootDir string + Isolated bool +} + +func newBackgroundStopSignal() (<-chan struct{}, context.CancelFunc) { + stop := make(chan struct{}) + var once sync.Once + + return stop, func() { + once.Do(func() { + close(stop) + }) + } } // NewService creates a new cross-seed service @@ -375,6 +443,7 @@ func NewService( externalProgramStore *models.ExternalProgramStore, externalProgramService *externalprograms.Service, completionStore *models.InstanceCrossSeedCompletionStore, + partialPoolStore *models.CrossSeedPartialPoolMemberStore, trackerCustomizationStore *models.TrackerCustomizationStore, notifier notifications.Notifier, recoverErroredTorrents bool, @@ -392,6 +461,7 @@ func NewService( SetDefaultTTL(5 * time.Minute)) recheckCtx, recheckCancel := context.WithCancel(context.Background()) + partialPoolStop, partialPoolCancel := newBackgroundStopSignal() svc := &Service{ instanceStore: instanceStore, @@ -411,6 +481,7 @@ func NewService( externalProgramStore: externalProgramStore, externalProgramService: externalProgramService, completionStore: completionStore, + partialPoolStore: partialPoolStore, recoverErroredTorrentsEnabled: recoverErroredTorrents, automationWake: make(chan struct{}, 1), domainMappings: initializeDomainMappings(), @@ -418,17 +489,59 @@ func NewService( dedupCache: dedupCache, metrics: NewServiceMetrics(), completionLanes: make(map[int]*completionLane), + completionPollInterval: defaultCompletionCheckingPollInterval, + completionTimeout: defaultCompletionCheckingTimeout, + completionRetryDelay: defaultCompletionCheckingRetryDelay, + completionMaxAttempts: maxCompletionCheckingAttempts, recheckResumeChan: make(chan *pendingResume, 100), recheckResumeCtx: recheckCtx, recheckResumeCancel: recheckCancel, + partialPoolWake: make(chan struct{}, 1), + partialPoolStop: partialPoolStop, + partialPoolCancel: partialPoolCancel, + partialPoolByHash: make(map[string]*models.CrossSeedPartialPoolMember), + partialPoolBySource: make(map[string]partialPoolSelection), } // Start the single worker goroutine for processing recheck resumes go svc.recheckResumeWorker() + go svc.partialPoolWorker() return svc } +func (s *Service) getCompletionPollInterval() time.Duration { + if s != nil && s.completionPollInterval > 0 { + return s.completionPollInterval + } + + return defaultCompletionCheckingPollInterval +} + +func (s *Service) getCompletionTimeout() time.Duration { + if s != nil && s.completionTimeout > 0 { + return s.completionTimeout + } + + return defaultCompletionCheckingTimeout +} + +func (s *Service) getCompletionRetryDelay() time.Duration { + if s != nil && s.completionRetryDelay > 0 { + return s.completionRetryDelay + } + + return defaultCompletionCheckingRetryDelay +} + +func (s *Service) getCompletionMaxAttempts() int { + if s != nil && s.completionMaxAttempts > 0 { + return s.completionMaxAttempts + } + + return maxCompletionCheckingAttempts +} + // HealthCheck performs comprehensive health checks on the cross-seed service func (s *Service) HealthCheck(ctx context.Context) error { // Check if we can list instances @@ -1056,6 +1169,9 @@ func (s *Service) validateAndNormalizeSettings(settings *models.CrossSeedAutomat if settings.SizeMismatchTolerancePercent > 100.0 { settings.SizeMismatchTolerancePercent = 100.0 } + if settings.MaxMissingBytesAfterRecheck <= 0 { + settings.MaxMissingBytesAfterRecheck = models.DefaultCrossSeedAutomationSettings().MaxMissingBytesAfterRecheck + } } func normalizeSearchTiming(intervalSeconds, cooldownMinutes int) (int, int) { @@ -1390,6 +1506,11 @@ func (s *Service) HandleTorrentCompletion(ctx context.Context, instanceID int, t ctx = context.WithoutCancel(ctx) } + if torrent.Hash != "" && s.partialPoolOwnsLiveTorrent(ctx, instanceID, torrent) { + s.signalPartialPoolWake() + return + } + // Load per-instance completion settings if s.completionStore == nil { log.Error(). @@ -1410,34 +1531,62 @@ func (s *Service) HandleTorrentCompletion(ctx context.Context, instanceID int, t } if !completionSettings.Enabled { - log.Debug(). - Int("instanceID", instanceID). - Str("hash", torrent.Hash). - Str("name", torrent.Name). - Msg("[CROSSSEED-COMPLETION] Completion search disabled for this instance") + logCompletionSkip(instanceID, &torrent, "[CROSSSEED-COMPLETION] Completion search disabled for this instance") return } - if torrent.CompletionOn <= 0 || torrent.Hash == "" { - // Safety check – the qbittorrent completion hook should only fire for completed torrents. + if shouldSkipCompletionTorrent(instanceID, &torrent, completionSettings) { return } - if hasCrossSeedTag(torrent.Tags) { - log.Debug(). + readyTorrent, err := s.waitForCompletionTorrentReady(ctx, instanceID, torrent) + if err != nil { + log.Warn(). + Err(err). Int("instanceID", instanceID). Str("hash", torrent.Hash). Str("name", torrent.Name). - Msg("[CROSSSEED-COMPLETION] Skipping already tagged cross-seed torrent") + Msg("[CROSSSEED-COMPLETION] Failed to execute completion search") return } - if !matchesCompletionFilters(&torrent, completionSettings) { - log.Debug(). + lane := s.getCompletionLane(instanceID) + lane.searchMu.Lock() + defer lane.searchMu.Unlock() + + readyTorrent, err = s.getCompletionTorrent(ctx, instanceID, readyTorrent.Hash) + if err != nil { + log.Warn(). + Err(err). Int("instanceID", instanceID). Str("hash", torrent.Hash). Str("name", torrent.Name). - Msg("[CROSSSEED-COMPLETION] Torrent does not match completion filters") + Msg("[CROSSSEED-COMPLETION] Failed to reload completion torrent") + return + } + if isCompletionCheckingState(readyTorrent.State) { + logCompletionSkip(instanceID, readyTorrent, "[CROSSSEED-COMPLETION] Torrent resumed checking before completion search") + return + } + if readyTorrent.Progress < 1.0 { + logCompletionSkip(instanceID, readyTorrent, "[CROSSSEED-COMPLETION] Torrent is no longer fully downloaded") + return + } + + completionSettings, err = s.completionStore.Get(ctx, instanceID) + if err != nil { + log.Warn(). + Err(err). + Int("instanceID", instanceID). + Str("hash", readyTorrent.Hash). + Msg("[CROSSSEED-COMPLETION] Failed to reload instance completion settings") + return + } + if !completionSettings.Enabled { + logCompletionSkip(instanceID, readyTorrent, "[CROSSSEED-COMPLETION] Completion search disabled for this instance") + return + } + if shouldSkipCompletionTorrent(instanceID, readyTorrent, completionSettings) { return } @@ -1447,7 +1596,7 @@ func (s *Service) HandleTorrentCompletion(ctx context.Context, instanceID int, t log.Warn(). Err(err). Int("instanceID", instanceID). - Str("hash", torrent.Hash). + Str("hash", readyTorrent.Hash). Msg("[CROSSSEED-COMPLETION] Failed to load automation settings") return } @@ -1455,21 +1604,64 @@ func (s *Service) HandleTorrentCompletion(ctx context.Context, instanceID int, t settings = models.DefaultCrossSeedAutomationSettings() } - lane := s.getCompletionLane(instanceID) - lane.mu.Lock() - defer lane.mu.Unlock() - - err = s.executeCompletionSearchWithRetry(ctx, instanceID, &torrent, settings, completionSettings) + err = s.executeCompletionSearchWithRetry(ctx, instanceID, readyTorrent, settings, completionSettings) if err != nil { log.Warn(). Err(err). Int("instanceID", instanceID). - Str("hash", torrent.Hash). - Str("name", torrent.Name). + Str("hash", readyTorrent.Hash). + Str("name", readyTorrent.Name). Msg("[CROSSSEED-COMPLETION] Failed to execute completion search") } } +// HandleTorrentAdded removes stale pooled ownership when a previously pooled torrent +// was deleted and later re-added with the same hash. +func (s *Service) HandleTorrentAdded(ctx context.Context, instanceID int, torrent qbt.Torrent) { + if s == nil || instanceID <= 0 || strings.TrimSpace(torrent.Hash) == "" { + return + } + + if ctx == nil { + ctx = context.Background() + } else { + ctx = context.WithoutCancel(ctx) + } + + _ = s.partialPoolOwnsLiveTorrent(ctx, instanceID, torrent) +} + +func shouldSkipCompletionTorrent(instanceID int, torrent *qbt.Torrent, completionSettings *models.InstanceCrossSeedCompletionSettings) bool { + if torrent == nil { + return true + } + + if torrent.CompletionOn <= 0 || torrent.Hash == "" { + // Safety check – the qbittorrent completion hook should only fire for completed torrents. + return true + } + + if hasCrossSeedTag(torrent.Tags) { + logCompletionSkip(instanceID, torrent, "[CROSSSEED-COMPLETION] Skipping already tagged cross-seed torrent") + return true + } + + if !matchesCompletionFilters(torrent, completionSettings) { + logCompletionSkip(instanceID, torrent, "[CROSSSEED-COMPLETION] Torrent does not match completion filters") + return true + } + + return false +} + +func logCompletionSkip(instanceID int, torrent *qbt.Torrent, message string) { + event := log.Debug().Int("instanceID", instanceID) + if torrent != nil { + event = event.Str("hash", torrent.Hash).Str("name", torrent.Name) + } + event.Msg(message) +} + func (s *Service) getCompletionLane(instanceID int) *completionLane { s.completionLaneMu.Lock() defer s.completionLaneMu.Unlock() @@ -1486,155 +1678,603 @@ func (s *Service) getCompletionLane(instanceID int) *completionLane { return lane } -func (s *Service) executeCompletionSearchWithRetry( +func (s *Service) waitForCompletionTorrentReady(ctx context.Context, instanceID int, eventTorrent qbt.Torrent) (*qbt.Torrent, error) { + lane := s.getCompletionLane(instanceID) + lane.mu.Lock() + defer lane.mu.Unlock() + + return s.waitForCompletionTorrentReadyLocked(ctx, instanceID, lane, eventTorrent) +} + +func (s *Service) waitForCompletionTorrentReadyLocked( ctx context.Context, instanceID int, - torrent *qbt.Torrent, - settings *models.CrossSeedAutomationSettings, - completionSettings *models.InstanceCrossSeedCompletionSettings, -) error { - var lastErr error - for attempt := 1; attempt <= maxCompletionSearchAttempts; attempt++ { - err := s.invokeCompletionSearch(ctx, instanceID, torrent, settings, completionSettings) - if err == nil { - return nil - } - lastErr = err + lane *completionLane, + eventTorrent qbt.Torrent, +) (*qbt.Torrent, error) { + wait := s.registerCompletionWaitLocked(instanceID, lane, eventTorrent) + done := wait.done - retryAfter, retry := completionRetryDelay(err) - if !retry || attempt == maxCompletionSearchAttempts { - break - } - if retryAfter <= 0 { - retryAfter = defaultCompletionRetryDelay - } + lane.mu.Unlock() - log.Warn(). - Err(err). - Int("instanceID", instanceID). - Str("hash", torrent.Hash). - Int("attempt", attempt). - Dur("retryAfter", retryAfter). - Msg("[CROSSSEED-COMPLETION] Rate-limited completion search, retrying") + var result *qbt.Torrent + var err error - timer := time.NewTimer(retryAfter) - select { - case <-ctx.Done(): - timer.Stop() - return ctx.Err() - case <-timer.C: + select { + case <-ctx.Done(): + err = ctx.Err() + case <-done: + err = wait.err + if wait.result != nil { + torrent := *wait.result + result = &torrent } } - return lastErr -} -func (s *Service) invokeCompletionSearch( - ctx context.Context, - instanceID int, - torrent *qbt.Torrent, - settings *models.CrossSeedAutomationSettings, - completionSettings *models.InstanceCrossSeedCompletionSettings, -) error { - if s.completionSearchInvoker != nil { - return s.completionSearchInvoker(ctx, instanceID, torrent, settings, completionSettings) + lane.mu.Lock() + + if err != nil { + return nil, err } - return s.executeCompletionSearch(ctx, instanceID, torrent, settings, completionSettings) + + return result, nil } -func completionRetryDelay(err error) (time.Duration, bool) { - if err == nil { - return 0, false +func (s *Service) registerCompletionWaitLocked( + instanceID int, + lane *completionLane, + eventTorrent qbt.Torrent, +) *completionWaitState { + if lane.waits == nil { + lane.waits = make(map[string]*completionWaitState) } - var waitErr *jackett.RateLimitWaitError - if errors.As(err, &waitErr) { - if waitErr.Wait > 0 { - return waitErr.Wait, true + hash := normalizeHash(eventTorrent.Hash) + timeout := s.getCompletionTimeout() + now := time.Now() + deadline := now.Add(timeout) + + wait, ok := lane.waits[hash] + if ok { + base := now + if wait.retryAt.After(base) { + base = wait.retryAt } - return defaultCompletionRetryDelay, true + deadline = base.Add(timeout) + if deadline.After(wait.deadline) { + wait.deadline = deadline + wait.timeout = timeout + } + s.startCompletionLanePollerLocked(instanceID, lane) + return wait } - msg := strings.ToLower(strings.TrimSpace(err.Error())) - for _, token := range completionRateLimitTokens { - if strings.Contains(msg, token) { - return defaultCompletionRetryDelay, true - } + wait = &completionWaitState{ + done: make(chan struct{}), + attempt: 1, + deadline: deadline, + timeout: timeout, + eventTorrent: eventTorrent, } + lane.waits[hash] = wait - return 0, false + s.startCompletionLanePollerLocked(instanceID, lane) + + return wait } -// updateAutomationRunWithRetry attempts to update the automation run in the database with retries -func (s *Service) updateAutomationRunWithRetry(ctx context.Context, run *models.CrossSeedRun) (*models.CrossSeedRun, error) { - const maxRetries = 3 - var lastErr error +func (s *Service) startCompletionLanePollerLocked(instanceID int, lane *completionLane) { + if lane.polling { + return + } - for attempt := range maxRetries { - if attempt > 0 { - // Wait before retry - select { - case <-ctx.Done(): - return run, ctx.Err() - case <-time.After(time.Duration(attempt) * 100 * time.Millisecond): - } - } + lane.polling = true - updated, err := s.automationStore.UpdateRun(ctx, run) - if err == nil { - return updated, nil - } + go s.runCompletionLanePoller(instanceID, lane) +} - lastErr = err - log.Warn().Err(err).Int("attempt", attempt+1).Int64("runID", run.ID).Msg("Failed to update automation run, retrying") - } +func (s *Service) runCompletionLanePoller(instanceID int, lane *completionLane) { + timer := time.NewTimer(0) + defer timer.Stop() - log.Error().Err(lastErr).Int64("runID", run.ID).Msg("Failed to update automation run after retries") - return run, lastErr + for { + <-timer.C + nextDelay, ok := s.pollCompletionLane(instanceID, lane) + if !ok { + return + } + timer.Reset(nextDelay) + } } -// updateSearchRunWithRetry attempts to update the search run in the database with retries -func (s *Service) updateSearchRunWithRetry(ctx context.Context, run *models.CrossSeedSearchRun) (*models.CrossSeedSearchRun, error) { - const maxRetries = 3 - var lastErr error +func (s *Service) pollCompletionLane(instanceID int, lane *completionLane) (time.Duration, bool) { + waits := s.snapshotCompletionWaits(lane) + if len(waits) == 0 { + return 0, false + } - for attempt := range maxRetries { - if attempt > 0 { - // Wait before retry - select { - case <-ctx.Done(): - return run, ctx.Err() - case <-time.After(time.Duration(attempt) * 100 * time.Millisecond): - } + now := time.Now() + activeWaits := make(map[string]*completionWaitState, len(waits)) + hashes := make([]string, 0, len(waits)) + for hash, wait := range waits { + if wait.retryAt.After(now) { + continue } + activeWaits[hash] = wait.state + hashes = append(hashes, hash) + } - updated, err := s.automationStore.UpdateSearchRun(ctx, run) - if err == nil { - return updated, nil - } + if len(activeWaits) == 0 { + lane.mu.Lock() + defer lane.mu.Unlock() - lastErr = err - log.Warn().Err(err).Int("attempt", attempt+1).Int64("runID", run.ID).Msg("Failed to update search run, retrying") + return s.nextCompletionPollDelayLocked(lane, now) } - log.Error().Err(lastErr).Int64("runID", run.ID).Msg("Failed to update search run after retries") - return run, lastErr -} + torrents, err := s.getCompletionTorrents(context.Background(), instanceID, hashes) + now = time.Now() + + lane.mu.Lock() + defer lane.mu.Unlock() -// GetAutomationStatus returns scheduler information for the API. -func (s *Service) GetAutomationStatus(ctx context.Context) (*AutomationStatus, error) { - settings, err := s.GetAutomationSettings(ctx) if err != nil { - return nil, err + log.Warn(). + Err(err). + Int("instanceID", instanceID). + Int("torrents", len(hashes)). + Msg("[CROSSSEED-COMPLETION] Failed to refresh completion torrents while waiting for checking to finish") + s.expireCompletionWaitsLocked(instanceID, lane, now) + return s.nextCompletionPollDelayLocked(lane, now) } - status := &AutomationStatus{ - Settings: settings, - Running: s.runActive.Load(), - } + s.applyCompletionPollResultsLocked(instanceID, lane, activeWaits, torrents, now) - if s.automationStore != nil { - lastRun, err := s.automationStore.GetLatestRun(ctx) - if err != nil { + return s.nextCompletionPollDelayLocked(lane, now) +} + +func (s *Service) snapshotCompletionWaits(lane *completionLane) map[string]completionWaitSnapshot { + lane.mu.Lock() + defer lane.mu.Unlock() + + if len(lane.waits) == 0 { + lane.polling = false + return nil + } + + waits := make(map[string]completionWaitSnapshot, len(lane.waits)) + for hash, wait := range lane.waits { + waits[hash] = completionWaitSnapshot{ + state: wait, + retryAt: wait.retryAt, + } + } + + return waits +} + +func (s *Service) applyCompletionPollResultsLocked( + instanceID int, + lane *completionLane, + waits map[string]*completionWaitState, + torrents map[string]qbt.Torrent, + now time.Time, +) { + for hash, wait := range waits { + currentWait, ok := lane.waits[hash] + if !ok || currentWait != wait { + continue + } + + torrent, ok := torrents[hash] + if !ok { + s.failMissingCompletionWaitLocked(instanceID, lane, hash, wait) + continue + } + + current := torrent + wait.lastSeen = ¤t + + if s.keepWaitingForCompletion(instanceID, lane, hash, wait, current, now) { + continue + } + + if current.Progress < 1.0 { + log.Warn(). + Int("instanceID", instanceID). + Str("hash", current.Hash). + Str("name", current.Name). + Str("state", string(current.State)). + Float64("progress", current.Progress). + Msg("[CROSSSEED-COMPLETION] Torrent finished checking but is still incomplete") + s.completeCompletionWaitLocked( + lane, + hash, + wait, + nil, + fmt.Errorf("%w: torrent %s is not fully downloaded (progress %.2f)", ErrTorrentNotComplete, current.Name, current.Progress), + ) + continue + } + + s.completeCompletionWaitLocked(lane, hash, wait, ¤t, nil) + } +} + +func (s *Service) keepWaitingForCompletion( + instanceID int, + lane *completionLane, + hash string, + wait *completionWaitState, + current qbt.Torrent, + now time.Time, +) bool { + if !isCompletionCheckingState(current.State) { + return false + } + + if !wait.checkingLogged { + log.Debug(). + Int("instanceID", instanceID). + Str("hash", current.Hash). + Str("name", current.Name). + Str("state", string(current.State)). + Float64("progress", current.Progress). + Msg("[CROSSSEED-COMPLETION] Deferring completion search while torrent is checking") + wait.checkingLogged = true + } + + if now.Before(wait.deadline) { + return true + } + + if wait.attempt < s.getCompletionMaxAttempts() { + s.retryCompletionWaitLocked(instanceID, wait, current, now) + return true + } + + log.Warn(). + Int("instanceID", instanceID). + Str("hash", current.Hash). + Str("name", current.Name). + Str("state", string(current.State)). + Float64("progress", current.Progress). + Dur("timeout", wait.timeout). + Msg("[CROSSSEED-COMPLETION] Timed out waiting for torrent checking to finish") + s.completeCompletionWaitLocked( + lane, + hash, + wait, + nil, + fmt.Errorf("completion torrent %s still checking after %s", current.Name, wait.timeout), + ) + + return true +} + +func (s *Service) retryCompletionWaitLocked(instanceID int, wait *completionWaitState, current qbt.Torrent, now time.Time) { + retryAfter := s.getCompletionRetryDelay() + retryAt := now.Add(retryAfter) + nextAttempt := wait.attempt + 1 + + log.Warn(). + Int("instanceID", instanceID). + Str("hash", current.Hash). + Str("name", current.Name). + Str("state", string(current.State)). + Float64("progress", current.Progress). + Int("attempt", wait.attempt). + Int("nextAttempt", nextAttempt). + Int("maxAttempts", s.getCompletionMaxAttempts()). + Dur("timeout", wait.timeout). + Dur("retryAfter", retryAfter). + Msg("[CROSSSEED-COMPLETION] Timed out waiting for torrent checking to finish, retrying") + + wait.attempt = nextAttempt + wait.retryAt = retryAt + wait.deadline = retryAt.Add(wait.timeout) + wait.lastSeen = ¤t + wait.checkingLogged = false +} + +func (s *Service) failMissingCompletionWaitLocked( + instanceID int, + lane *completionLane, + hash string, + wait *completionWaitState, +) { + err := fmt.Errorf("%w: torrent %s not found in instance %d", ErrTorrentNotFound, wait.eventTorrent.Hash, instanceID) + + log.Warn(). + Int("instanceID", instanceID). + Str("hash", wait.eventTorrent.Hash). + Str("name", wait.eventTorrent.Name). + Err(err). + Msg("[CROSSSEED-COMPLETION] Completion torrent disappeared while waiting for checking to finish") + + s.completeCompletionWaitLocked(lane, hash, wait, nil, err) +} + +func (s *Service) expireCompletionWaitsLocked(instanceID int, lane *completionLane, now time.Time) { + for hash, wait := range lane.waits { + if now.Before(wait.deadline) { + continue + } + + s.failTimedOutCompletionWaitLocked(instanceID, lane, hash, wait) + } +} + +func (s *Service) failTimedOutCompletionWaitLocked( + instanceID int, + lane *completionLane, + hash string, + wait *completionWaitState, +) { + name := wait.eventTorrent.Name + state := qbt.TorrentState("") + progress := 0.0 + + if wait.lastSeen != nil { + name = wait.lastSeen.Name + state = wait.lastSeen.State + progress = wait.lastSeen.Progress + } + + log.Warn(). + Int("instanceID", instanceID). + Str("hash", wait.eventTorrent.Hash). + Str("name", name). + Str("state", string(state)). + Float64("progress", progress). + Dur("timeout", wait.timeout). + Msg("[CROSSSEED-COMPLETION] Timed out waiting for torrent checking to finish") + s.completeCompletionWaitLocked( + lane, + hash, + wait, + nil, + fmt.Errorf("completion torrent %s still checking after %s", name, wait.timeout), + ) +} + +func (s *Service) completeCompletionWaitLocked( + lane *completionLane, + hash string, + wait *completionWaitState, + result *qbt.Torrent, + err error, +) { + delete(lane.waits, hash) + + if result != nil { + torrent := *result + wait.result = &torrent + } + wait.err = err + + close(wait.done) +} + +func (s *Service) updateCompletionPollerStateLocked(lane *completionLane) bool { + if len(lane.waits) > 0 { + return true + } + + lane.polling = false + return false +} + +func (s *Service) nextCompletionPollDelayLocked(lane *completionLane, now time.Time) (time.Duration, bool) { + if !s.updateCompletionPollerStateLocked(lane) { + return 0, false + } + + pollInterval := s.getCompletionPollInterval() + nextDelay := pollInterval + + for _, wait := range lane.waits { + if !wait.retryAt.After(now) { + return pollInterval, true + } + + delay := wait.retryAt.Sub(now) + if delay < nextDelay { + nextDelay = delay + } + } + + return nextDelay, true +} + +func (s *Service) getCompletionTorrent(ctx context.Context, instanceID int, hash string) (*qbt.Torrent, error) { + torrents, err := s.getCompletionTorrents(ctx, instanceID, []string{hash}) + if err != nil { + return nil, err + } + + torrent, ok := torrents[normalizeHash(hash)] + if !ok { + return nil, fmt.Errorf("%w: torrent %s not found in instance %d", ErrTorrentNotFound, hash, instanceID) + } + + current := torrent + return ¤t, nil +} + +func (s *Service) getCompletionTorrents(ctx context.Context, instanceID int, hashes []string) (map[string]qbt.Torrent, error) { + apiCtx, cancel := context.WithTimeout(ctx, recheckAPITimeout) + defer cancel() + + torrents, err := s.syncManager.GetTorrents(apiCtx, instanceID, qbt.TorrentFilterOptions{ + Hashes: hashes, + }) + if err != nil { + return nil, fmt.Errorf("load torrents: %w", err) + } + + result := make(map[string]qbt.Torrent, len(torrents)) + for _, torrent := range torrents { + result[normalizeHash(torrent.Hash)] = torrent + } + + return result, nil +} + +func isCompletionCheckingState(state qbt.TorrentState) bool { + return state == qbt.TorrentStateCheckingDl || + state == qbt.TorrentStateCheckingUp || + state == qbt.TorrentStateCheckingResumeData +} + +func (s *Service) executeCompletionSearchWithRetry( + ctx context.Context, + instanceID int, + torrent *qbt.Torrent, + settings *models.CrossSeedAutomationSettings, + completionSettings *models.InstanceCrossSeedCompletionSettings, +) error { + var lastErr error + for attempt := 1; attempt <= maxCompletionSearchAttempts; attempt++ { + err := s.invokeCompletionSearch(ctx, instanceID, torrent, settings, completionSettings) + if err == nil { + return nil + } + lastErr = err + + retryAfter, retry := completionRetryDelay(err) + if !retry || attempt == maxCompletionSearchAttempts { + break + } + if retryAfter <= 0 { + retryAfter = defaultCompletionRetryDelay + } + + log.Warn(). + Err(err). + Int("instanceID", instanceID). + Str("hash", torrent.Hash). + Int("attempt", attempt). + Dur("retryAfter", retryAfter). + Msg("[CROSSSEED-COMPLETION] Rate-limited completion search, retrying") + + timer := time.NewTimer(retryAfter) + select { + case <-ctx.Done(): + timer.Stop() + return ctx.Err() + case <-timer.C: + } + } + return lastErr +} + +func (s *Service) invokeCompletionSearch( + ctx context.Context, + instanceID int, + torrent *qbt.Torrent, + settings *models.CrossSeedAutomationSettings, + completionSettings *models.InstanceCrossSeedCompletionSettings, +) error { + if s.completionSearchInvoker != nil { + return s.completionSearchInvoker(ctx, instanceID, torrent, settings, completionSettings) + } + return s.executeCompletionSearch(ctx, instanceID, torrent, settings, completionSettings) +} + +func completionRetryDelay(err error) (time.Duration, bool) { + if err == nil { + return 0, false + } + + var waitErr *jackett.RateLimitWaitError + if errors.As(err, &waitErr) { + if waitErr.Wait > 0 { + return waitErr.Wait, true + } + return defaultCompletionRetryDelay, true + } + + msg := strings.ToLower(strings.TrimSpace(err.Error())) + for _, token := range completionRateLimitTokens { + if strings.Contains(msg, token) { + return defaultCompletionRetryDelay, true + } + } + + return 0, false +} + +// updateAutomationRunWithRetry attempts to update the automation run in the database with retries +func (s *Service) updateAutomationRunWithRetry(ctx context.Context, run *models.CrossSeedRun) (*models.CrossSeedRun, error) { + const maxRetries = 3 + var lastErr error + + for attempt := range maxRetries { + if attempt > 0 { + // Wait before retry + select { + case <-ctx.Done(): + return run, ctx.Err() + case <-time.After(time.Duration(attempt) * 100 * time.Millisecond): + } + } + + updated, err := s.automationStore.UpdateRun(ctx, run) + if err == nil { + return updated, nil + } + + lastErr = err + log.Warn().Err(err).Int("attempt", attempt+1).Int64("runID", run.ID).Msg("Failed to update automation run, retrying") + } + + log.Error().Err(lastErr).Int64("runID", run.ID).Msg("Failed to update automation run after retries") + return run, lastErr +} + +// updateSearchRunWithRetry attempts to update the search run in the database with retries +func (s *Service) updateSearchRunWithRetry(ctx context.Context, run *models.CrossSeedSearchRun) (*models.CrossSeedSearchRun, error) { + const maxRetries = 3 + var lastErr error + + for attempt := range maxRetries { + if attempt > 0 { + // Wait before retry + select { + case <-ctx.Done(): + return run, ctx.Err() + case <-time.After(time.Duration(attempt) * 100 * time.Millisecond): + } + } + + updated, err := s.automationStore.UpdateSearchRun(ctx, run) + if err == nil { + return updated, nil + } + + lastErr = err + log.Warn().Err(err).Int("attempt", attempt+1).Int64("runID", run.ID).Msg("Failed to update search run, retrying") + } + + log.Error().Err(lastErr).Int64("runID", run.ID).Msg("Failed to update search run after retries") + return run, lastErr +} + +// GetAutomationStatus returns scheduler information for the API. +func (s *Service) GetAutomationStatus(ctx context.Context) (*AutomationStatus, error) { + settings, err := s.GetAutomationSettings(ctx) + if err != nil { + return nil, err + } + + status := &AutomationStatus{ + Settings: settings, + Running: s.runActive.Load(), + } + + if s.automationStore != nil { + lastRun, err := s.automationStore.GetLatestRun(ctx) + if err != nil { return nil, fmt.Errorf("load latest automation run: %w", err) } @@ -2361,7 +3001,9 @@ func (s *Service) executeAutomationRun(ctx context.Context, run *models.CrossSee s.notifyAutomationRun(context.WithoutCancel(ctx), run, runErr) }() - searchCtx := jackett.WithSearchPriority(ctx, jackett.RateLimitPriorityRSS) + searchCtxBase := jackett.WithSearchPriority(ctx, jackett.RateLimitPriorityRSS) + recentCtx, recentCancel := context.WithTimeout(searchCtxBase, 10*time.Minute) + defer recentCancel() var searchResp *jackett.SearchResponse respCh := make(chan *jackett.SearchResponse, 1) @@ -2391,7 +3033,7 @@ func (s *Service) executeAutomationRun(ctx context.Context, run *models.CrossSee return run, ErrNoIndexersConfigured } - err := s.jackettService.Recent(searchCtx, 0, resolvedIndexerIDs, func(resp *jackett.SearchResponse, err error) { + err := s.jackettService.Recent(recentCtx, 0, resolvedIndexerIDs, func(resp *jackett.SearchResponse, err error) { if err != nil { errCh <- err } else { @@ -2425,29 +3067,30 @@ func (s *Service) executeAutomationRun(ctx context.Context, run *models.CrossSee } runErr = err return run, err - case <-time.After(10 * time.Minute): // generous timeout for RSS automation - msg := "Recent search timed out" - run.ErrorMessage = &msg + case <-recentCtx.Done(): + err := recentCtx.Err() run.Status = models.CrossSeedRunStatusFailed completed := time.Now().UTC() run.CompletedAt = &completed - if updated, updateErr := s.updateAutomationRunWithRetry(ctx, run); updateErr == nil { - run = updated + + if errors.Is(err, context.DeadlineExceeded) { + msg := "Recent search timed out" + run.ErrorMessage = &msg + if updated, updateErr := s.updateAutomationRunWithRetry(ctx, run); updateErr == nil { + run = updated + } + timeoutErr := errors.New("recent search timed out") + runErr = timeoutErr + return run, timeoutErr } - timeoutErr := errors.New("recent search timed out") - runErr = timeoutErr - return run, timeoutErr - case <-ctx.Done(): + msg := "canceled by user" run.ErrorMessage = &msg - run.Status = models.CrossSeedRunStatusFailed - completed := time.Now().UTC() - run.CompletedAt = &completed if updated, updateErr := s.updateAutomationRunWithRetry(context.WithoutCancel(ctx), run); updateErr == nil { run = updated } - runErr = ctx.Err() - return run, ctx.Err() + runErr = err + return run, err } // Pre-fetch all indexer info (names and domains) for performance @@ -2460,6 +3103,7 @@ func (s *Service) executeAutomationRun(ctx context.Context, run *models.CrossSee autoCtx := &automationContext{ snapshots: s.buildAutomationSnapshots(ctx, settings.TargetInstanceIDs, settings), candidateCache: make(map[string]*FindCandidatesResponse), + candidateOrder: make([]string, 0, automationCandidateCacheMaxEntries), } log.Debug(). @@ -2999,6 +3643,9 @@ func (s *Service) findCandidatesWithAutomationContext(ctx context.Context, req * if autoCtx.candidateCache == nil { autoCtx.candidateCache = make(map[string]*FindCandidatesResponse) } + if autoCtx.candidateOrder == nil { + autoCtx.candidateOrder = make([]string, 0, automationCandidateCacheMaxEntries) + } cacheKey := s.automationCacheKey(req.TorrentName, req.FindIndividualEpisodes) if cacheKey != "" { @@ -3013,11 +3660,46 @@ func (s *Service) findCandidatesWithAutomationContext(ctx context.Context, req * } if cacheKey != "" { - autoCtx.candidateCache[cacheKey] = resp + cacheAutomationCandidateResponse(autoCtx, cacheKey, resp) } return resp, nil } +func cacheAutomationCandidateResponse(autoCtx *automationContext, cacheKey string, resp *FindCandidatesResponse) { + if autoCtx == nil || cacheKey == "" || resp == nil { + return + } + if autoCtx.candidateCache == nil { + autoCtx.candidateCache = make(map[string]*FindCandidatesResponse) + } + + if _, ok := autoCtx.candidateCache[cacheKey]; ok { + autoCtx.candidateCache[cacheKey] = resp + return + } + + if automationCandidateCacheMaxEntries > 0 && len(autoCtx.candidateCache) >= automationCandidateCacheMaxEntries { + for len(autoCtx.candidateOrder) > 0 { + oldestKey := autoCtx.candidateOrder[0] + autoCtx.candidateOrder = autoCtx.candidateOrder[1:] + if _, ok := autoCtx.candidateCache[oldestKey]; ok { + delete(autoCtx.candidateCache, oldestKey) + break + } + } + + if len(autoCtx.candidateCache) >= automationCandidateCacheMaxEntries { + for existingKey := range autoCtx.candidateCache { + delete(autoCtx.candidateCache, existingKey) + break + } + } + } + + autoCtx.candidateCache[cacheKey] = resp + autoCtx.candidateOrder = append(autoCtx.candidateOrder, cacheKey) +} + // FindCandidates finds ALL existing torrents across instances that match a title string // Input: Just a torrent NAME (string) - the torrent doesn't exist yet // Output: All existing torrents that have related content based on release name parsing @@ -3171,39 +3853,53 @@ func (s *Service) findCandidates(ctx context.Context, req *FindCandidatesRequest continue } - candidates := make([]qbt.Torrent, 0, len(torrentByHash)) - for _, t := range torrentByHash { - candidates = append(candidates, t) - } - filesByHash := s.batchLoadCandidateFiles(ctx, instanceID, candidates) - var matchedTorrents []qbt.Torrent matchTypeCounts := make(map[string]int) - for _, hashKey := range candidateHashes { - torrent := torrentByHash[hashKey] - candidateFiles, ok := filesByHash[hashKey] - if !ok || len(candidateFiles) == 0 { - continue - } + for start := 0; start < len(candidateHashes); start += candidateFileBatchSize { + end := min(start+candidateFileBatchSize, len(candidateHashes)) - // Now check if this torrent actually has the files we need - // This handles: single episode in season pack, season pack containing episodes, etc. - candidateRelease := s.releaseCache.Parse(torrent.Name) - matchType := s.getMatchTypeFromTitle(req.TorrentName, torrent.Name, targetRelease, candidateRelease, candidateFiles) - if matchType == "" { + batchTorrents := make([]qbt.Torrent, 0, end-start) + for _, hashKey := range candidateHashes[start:end] { + if torrent, ok := torrentByHash[hashKey]; ok { + batchTorrents = append(batchTorrents, torrent) + } + } + if len(batchTorrents) == 0 { continue } - matchedTorrents = append(matchedTorrents, torrent) - matchTypeCounts[matchType]++ - log.Debug(). - Str("targetTitle", req.TorrentName). - Str("existingTorrent", torrent.Name). - Int("instanceID", instanceID). - Str("instanceName", instance.Name). - Str("matchType", matchType). - Msg("Found matching torrent with required files") + filesByHash := s.batchLoadCandidateFiles(ctx, instanceID, batchTorrents) + + for _, hashKey := range candidateHashes[start:end] { + torrent, ok := torrentByHash[hashKey] + if !ok { + continue + } + + candidateFiles, ok := filesByHash[hashKey] + if !ok || len(candidateFiles) == 0 { + continue + } + + // Now check if this torrent actually has the files we need. + // This handles: single episode in season pack, season pack containing episodes, etc. + candidateRelease := s.releaseCache.Parse(torrent.Name) + matchType := s.getMatchTypeFromTitle(req.TorrentName, torrent.Name, targetRelease, candidateRelease, candidateFiles) + if matchType == "" { + continue + } + + matchedTorrents = append(matchedTorrents, torrent) + matchTypeCounts[matchType]++ + log.Debug(). + Str("targetTitle", req.TorrentName). + Str("existingTorrent", torrent.Name). + Int("instanceID", instanceID). + Str("instanceName", instance.Name). + Str("matchType", matchType). + Msg("Found matching torrent with required files") + } } // Add all matches from this instance @@ -3667,6 +4363,18 @@ func (s *Service) processCrossSeedCandidate( instance, instanceErr := s.instanceStore.Get(ctx, candidate.InstanceID) useReflinkMode := instanceErr == nil && instance != nil && instance.UseReflinks useHardlinkMode := instanceErr == nil && instance != nil && instance.UseHardlinks && !instance.UseReflinks + managedDest, _, managedDestErr := s.resolveManagedDestinationContext( + ctx, + instance, + candidate, + torrentBytes, + torrentHash, + torrentName, + req, + matchedTorrent, + props, + sourceFiles, + ) runReuseSafetyChecks := func() bool { // SAFETY: Reject cross-seeds where main content file sizes don't match. @@ -3844,29 +4552,18 @@ func (s *Service) processCrossSeedCandidate( var actualCategorySavePath string var categoryCreationFailed bool if crossCategory != "" { - // Try to get SavePath from the base category definition in qBittorrent - categories, catErr := s.syncManager.GetCategories(ctx, candidate.InstanceID) - if catErr != nil { - log.Debug().Err(catErr).Int("instanceID", candidate.InstanceID). - Msg("[CROSSSEED] Failed to fetch categories, falling back to torrent SavePath") - } - if catErr == nil && categories != nil { - if cat, exists := categories[baseCategory]; exists && cat.SavePath != "" { - categorySavePath = cat.SavePath - actualCategorySavePath = cat.SavePath - } - } - - // Fallback to matched torrent's SavePath if category has no explicit SavePath - if categorySavePath == "" { - categorySavePath = props.SavePath - } - - // Ensure the cross-seed category exists with the correct SavePath - if err := s.ensureCrossCategory(ctx, candidate.InstanceID, crossCategory, categorySavePath); err != nil { - log.Warn().Err(err). + var prepErr error + categorySavePath, actualCategorySavePath, prepErr = s.prepareCrossCategory( + ctx, + candidate.InstanceID, + baseCategory, + crossCategory, + props.SavePath, + ) + if prepErr != nil { + log.Warn().Err(prepErr). Str("category", crossCategory). - Str("savePath", categorySavePath). + Str("savePath", props.SavePath). Msg("[CROSSSEED] Failed to ensure category exists, continuing without category") crossCategory = "" // Clear category to proceed without it categoryCreationFailed = true // Track for result message @@ -3877,7 +4574,7 @@ func (s *Service) processCrossSeedCandidate( if useReflinkMode { rlResult := s.processReflinkMode( ctx, candidate, torrentBytes, torrentHash, torrentHashV2, torrentName, req, - matchedTorrent, matchType, sourceFiles, candidateFiles, props, + matchedTorrent, matchType, sourceFiles, candidateFiles, torrentInfo, props, managedDest, managedDestErr, baseCategory, crossCategory, ) if rlResult.Used { @@ -3896,7 +4593,7 @@ func (s *Service) processCrossSeedCandidate( if useHardlinkMode { hlResult := s.processHardlinkMode( ctx, candidate, torrentBytes, torrentHash, torrentHashV2, torrentName, req, - matchedTorrent, matchType, sourceFiles, candidateFiles, props, + matchedTorrent, matchType, sourceFiles, candidateFiles, torrentInfo, props, managedDest, managedDestErr, baseCategory, crossCategory, ) if hlResult.Used { @@ -3916,6 +4613,8 @@ func (s *Service) processCrossSeedCandidate( // // Hardlink/reflink modes are safe because they use contentLayout=Original and preserve // the incoming torrent's layout exactly via hardlink/reflink tree creation. + // If either mode fell back to regular mode before creating that tree, regular mode must + // still reject this case rather than treating a computed managed destination as usable. if sourceRoot != "" && candidateRoot == "" && hasExtraFiles { result.Status = "requires_hardlink_reflink" result.Message = "Skipped: cross-seed with extra files and rootless content requires hardlink or reflink mode to avoid scattering files in base directory" @@ -4299,7 +4998,7 @@ func dedupeHashes(hashes ...string) []string { return nil } - out := make([]string, 0, len(hashes)) + var out []string seen := make(map[string]struct{}, len(hashes)) for _, hash := range hashes { @@ -4363,10 +5062,182 @@ func (s *Service) queueRecheckResumeWithThreshold(_ context.Context, instanceID default: log.Warn(). Int("instanceID", instanceID). - Str("hash", hash). - Msg("Recheck resume channel full, skipping queue") - return errors.New("recheck resume queue full") + Str("hash", hash). + Msg("Recheck resume channel full, skipping queue") + return errors.New("recheck resume queue full") + } +} + +func (s *Service) recheckConfirmationPollInterval() time.Duration { + if s != nil && s.recheckConfirmPoll > 0 { + return s.recheckConfirmPoll + } + return recheckConfirmPollInterval +} + +func (s *Service) recheckConfirmationTimeout() time.Duration { + if s != nil && s.recheckConfirmWait > 0 { + return s.recheckConfirmWait + } + return recheckConfirmTimeout +} + +func (s *Service) recheckConfirmationAttempts() int { + if s != nil && s.recheckConfirmTries > 0 { + return s.recheckConfirmTries + } + return recheckConfirmMaxAttempts +} + +func torrentMatchesAnyHash(torrent qbt.Torrent, hashes []string) bool { + torrentHash := normalizeHash(torrent.Hash) + torrentHashV1 := normalizeHash(torrent.InfohashV1) + torrentHashV2 := normalizeHash(torrent.InfohashV2) + + for _, hash := range hashes { + normalized := normalizeHash(hash) + if normalized == "" { + continue + } + if torrentHash == normalized || + torrentHashV1 == normalized || + torrentHashV2 == normalized { + return true + } + } + return false +} + +func (s *Service) syncTorrentStateAfterAdd(ctx context.Context, instanceID int) { + if s == nil || s.syncManager == nil { + return + } + + syncManager, err := s.syncManager.GetQBittorrentSyncManager(ctx, instanceID) + if err != nil || syncManager == nil { + return + } + + syncCtx, cancel := context.WithTimeout(ctx, recheckAPITimeout) + defer cancel() + if err := syncManager.Sync(syncCtx); err != nil { + log.Debug(). + Err(err). + Int("instanceID", instanceID). + Msg("[CROSSSEED] Failed to force sync while confirming injected torrent recheck") + } +} + +func (s *Service) waitForInjectedTorrentRecheck( + ctx context.Context, + instanceID int, + hashes []string, +) (qbt.TorrentState, float64, bool) { + pollInterval := s.recheckConfirmationPollInterval() + timeout := s.recheckConfirmationTimeout() + deadline := time.Now().Add(timeout) + lastState := qbt.TorrentState("") + lastProgress := 0.0 + + for { + s.syncTorrentStateAfterAdd(ctx, instanceID) + + apiCtx, cancel := context.WithTimeout(ctx, recheckAPITimeout) + torrents, err := s.syncManager.GetTorrents(apiCtx, instanceID, qbt.TorrentFilterOptions{Hashes: hashes}) + cancel() + if err == nil { + for _, torrent := range torrents { + if !torrentMatchesAnyHash(torrent, hashes) { + continue + } + lastState = torrent.State + lastProgress = torrent.Progress + + if isTorrentCheckingState(torrent.State) { + return lastState, lastProgress, true + } + if s.partialPoolTorrentRunning(torrent.State) { + return lastState, lastProgress, true + } + if s.partialPoolTorrentPaused(torrent.State) && torrent.Progress > 0 && torrent.Progress < 1 { + return lastState, lastProgress, true + } + } + } + + if time.Now().After(deadline) { + return lastState, lastProgress, false + } + + select { + case <-ctx.Done(): + return lastState, lastProgress, false + case <-time.After(pollInterval): + } + } +} + +func (s *Service) triggerAndConfirmInjectedTorrentRecheck( + ctx context.Context, + instanceID int, + hashes []string, + primaryHash string, + logPrefix string, +) (bool, error) { + attempts := s.recheckConfirmationAttempts() + attemptTimeout := s.recheckConfirmationTimeout() + var lastState qbt.TorrentState + var lastProgress float64 + + for attempt := 1; attempt <= attempts; attempt++ { + attemptCtx, cancel := context.WithTimeout(ctx, attemptTimeout) + + log.Debug(). + Int("instanceID", instanceID). + Str("torrentHash", primaryHash). + Int("attempt", attempt). + Msg(logPrefix + ": recheck requested after add") + + if err := s.syncManager.BulkAction(attemptCtx, instanceID, hashes, "recheck"); err != nil { + cancel() + attemptTimedOut := attemptCtx.Err() != nil && + ctx.Err() == nil && + (errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled)) + if attemptTimedOut { + log.Debug(). + Err(err). + Int("instanceID", instanceID). + Str("torrentHash", primaryHash). + Int("attempt", attempt). + Msg(logPrefix + ": recheck trigger attempt timed out") + continue + } + return false, err + } + + state, progress, confirmed := s.waitForInjectedTorrentRecheck(attemptCtx, instanceID, hashes) + cancel() + lastState = state + lastProgress = progress + if confirmed { + log.Debug(). + Int("instanceID", instanceID). + Str("torrentHash", primaryHash). + Int("attempt", attempt). + Str("torrentState", string(state)). + Float64("progress", progress). + Msg(logPrefix + ": recheck confirmed after add") + return true, nil + } } + + log.Warn(). + Int("instanceID", instanceID). + Str("torrentHash", primaryHash). + Str("torrentState", string(lastState)). + Float64("progress", lastProgress). + Msg(logPrefix + ": recheck not confirmed after add; leaving torrent paused for manual intervention") + return false, nil } // recheckResumeWorker is a single goroutine that processes all pending recheck resumes. @@ -4499,6 +5370,20 @@ func (s *Service) recheckResumeWorker() { } case <-s.recheckResumeCtx.Done(): + if len(pending) > 0 { + for hash, req := range pending { + if req == nil { + continue + } + log.Debug(). + Int("instanceID", req.instanceID). + Str("hash", hash). + Float64("threshold", req.threshold). + Dur("queuedFor", time.Since(req.addedAt)). + Msg("Recheck resume worker cancellation dropped pending item") + } + clear(pending) + } log.Debug(). Int("pendingCount", len(pending)). Msg("Recheck resume worker shutting down") @@ -4683,13 +5568,27 @@ func (s *Service) batchLoadCandidateFiles(ctx context.Context, instanceID int, t return nil } - filesByHash, err := s.syncManager.GetTorrentFilesBatch(ctx, instanceID, hashes) - if err != nil { - log.Warn(). - Int("instanceID", instanceID). - Int("hashCount", len(hashes)). - Err(err). - Msg("Failed to batch load torrent files for candidate selection") + filesByHash := make(map[string]qbt.TorrentFiles, len(hashes)) + for start := 0; start < len(hashes); start += candidateFileBatchSize { + end := min(start+candidateFileBatchSize, len(hashes)) + + batch := hashes[start:end] + batchFiles, err := s.syncManager.GetTorrentFilesBatch(ctx, instanceID, batch) + if err != nil { + log.Warn(). + Int("instanceID", instanceID). + Int("hashCount", len(batch)). + Err(err). + Msg("Failed to batch load torrent files for candidate selection") + continue + } + + for hash, files := range batchFiles { + filesByHash[normalizeHash(hash)] = files + } + } + + if len(filesByHash) == 0 { return nil } @@ -4709,6 +5608,7 @@ func (s *Service) findBestCandidateMatch( candidateFiles qbt.TorrentFiles matchType string bestScore int + bestIsCrossSeed bool bestHasRoot bool bestFileCount int bestRejectReason string // Track the most informative rejection reason @@ -4777,13 +5677,17 @@ func (s *Service) findBestCandidateMatch( hasRootFolder := detectCommonRoot(files) != "" fileCount := len(files) + isCrossSeed := hasCrossSeedTag(torrent.Tags) shouldPromote := matchedTorrent == nil || score > bestScore if !shouldPromote && score == bestScore { - // Prefer candidates with a top-level folder so cross-seeded torrents inherit cleaner layouts. - if hasRootFolder && !bestHasRoot { + switch { + case bestIsCrossSeed && !isCrossSeed: + shouldPromote = true + case bestIsCrossSeed == isCrossSeed && hasRootFolder && !bestHasRoot: + // Prefer candidates with a top-level folder so cross-seeded torrents inherit cleaner layouts. shouldPromote = true - } else if hasRootFolder == bestHasRoot && fileCount > bestFileCount { + case bestIsCrossSeed == isCrossSeed && hasRootFolder == bestHasRoot && fileCount > bestFileCount: // Fall back to the candidate that carries more files (season packs vs single files). shouldPromote = true } @@ -4795,6 +5699,7 @@ func (s *Service) findBestCandidateMatch( candidateFiles = files matchType = actualMatchType bestScore = score + bestIsCrossSeed = isCrossSeed bestHasRoot = hasRootFolder bestFileCount = fileCount } @@ -9483,6 +10388,60 @@ func (s *Service) ensureCrossCategory(ctx context.Context, instanceID int, cross return err } +type crossCategoryPreparation struct { + categorySavePath string + actualCategorySavePath string +} + +func (s *Service) prepareCrossCategory(ctx context.Context, instanceID int, baseCategory, crossCategory, fallbackSavePath string) (string, string, error) { + if crossCategory == "" { + return "", "", nil + } + + key := fmt.Sprintf("prepare:%d:%s", instanceID, crossCategory) + prepared, err, _ := s.categoryCreationGroup.Do(key, func() (any, error) { + categorySavePath := "" + actualCategorySavePath := "" + + // Try to get SavePath from the base category definition in qBittorrent. + categories, catErr := s.syncManager.GetCategories(ctx, instanceID) + if catErr != nil { + log.Debug().Err(catErr).Int("instanceID", instanceID). + Msg("[CROSSSEED] Failed to fetch categories, falling back to torrent SavePath") + } + if catErr == nil && categories != nil { + if cat, exists := categories[baseCategory]; exists && cat.SavePath != "" { + categorySavePath = cat.SavePath + actualCategorySavePath = cat.SavePath + } + } + + // Fallback to matched torrent's SavePath if category has no explicit SavePath. + if categorySavePath == "" { + categorySavePath = fallbackSavePath + } + + if err := s.ensureCrossCategory(ctx, instanceID, crossCategory, categorySavePath); err != nil { + return nil, err + } + + return crossCategoryPreparation{ + categorySavePath: categorySavePath, + actualCategorySavePath: actualCategorySavePath, + }, nil + }) + if err != nil { + return "", "", err + } + + preparation, ok := prepared.(crossCategoryPreparation) + if !ok { + return fallbackSavePath, "", nil + } + + return preparation.categorySavePath, preparation.actualCategorySavePath, nil +} + // determineCrossSeedCategory selects the category to apply to a cross-seeded torrent. // Returns (baseCategory, crossCategory) where baseCategory is used to look up save_path // and crossCategory is the final category name (with .cross suffix if enabled). @@ -10336,6 +11295,155 @@ type hardlinkModeResult struct { Result InstanceCrossSeedResult } +func applyTreeModeAddState(options map[string]string, requiresRecheck bool, keepPaused bool) { + options["skip_checking"] = "true" + if requiresRecheck || keepPaused { + options["stopped"] = "true" + options["paused"] = "true" + return + } + + options["stopped"] = "false" + options["paused"] = "false" +} + +type treeModeAvailability struct { + existingFiles []hardlinktree.ExistingFile + availableByKey map[string]int + availableFileSet map[string]struct{} + unavailableFiles int +} + +type treeModeInitialFile struct { + absPath string + size int64 +} + +func buildTreeModeAvailability(basePath string, files qbt.TorrentFiles) treeModeAvailability { + availability := treeModeAvailability{ + existingFiles: make([]hardlinktree.ExistingFile, 0, len(files)), + availableByKey: make(map[string]int, len(files)), + availableFileSet: make(map[string]struct{}, len(files)), + } + + for _, file := range files { + initialFile, ok := treeModeInitialFileInfo(basePath, file.Name, float64(file.Progress)) + if !ok { + availability.unavailableFiles++ + continue + } + + availability.existingFiles = append(availability.existingFiles, hardlinktree.ExistingFile{ + AbsPath: initialFile.absPath, + RelPath: file.Name, + Size: initialFile.size, + }) + + key := treeModeFileKey(file.Name, initialFile.size) + availability.availableByKey[key]++ + availability.availableFileSet[key] = struct{}{} + } + + return availability +} + +func treeModeInitialFileInfo(basePath, relPath string, progress float64) (treeModeInitialFile, bool) { + if progress < partialPoolFileCompleteThreshold { + return treeModeInitialFile{}, false + } + + absPath := filepath.Join(basePath, filepath.FromSlash(relPath)) + info, err := os.Stat(absPath) + if err != nil { + return treeModeInitialFile{}, false + } + if !info.Mode().IsRegular() { + return treeModeInitialFile{}, false + } + + return treeModeInitialFile{ + absPath: absPath, + size: info.Size(), + }, true +} + +func treeModeFileAvailable(basePath, relPath string, expectedSize int64, progress float64) bool { + initialFile, ok := treeModeInitialFileInfo(basePath, relPath, progress) + if !ok { + return false + } + + return initialFile.size == expectedSize +} + +func treeModeFileKey(name string, size int64) string { + return normalizeFileKey(name) + "|" + strconv.FormatInt(size, 10) +} + +func (s *Service) resolveManagedDestinationContext( + ctx context.Context, + instance *models.Instance, + candidate CrossSeedCandidate, + torrentBytes []byte, + torrentHash string, + torrentName string, + req *CrossSeedRequest, + matchedTorrent *qbt.Torrent, + props *qbt.TorrentProperties, + sourceFiles qbt.TorrentFiles, +) (managedDestinationContext, bool, error) { + if instance == nil || (!instance.UseHardlinks && !instance.UseReflinks) { + return managedDestinationContext{}, false, nil + } + if !instance.HasLocalFilesystemAccess || strings.TrimSpace(instance.HardlinkBaseDir) == "" { + return managedDestinationContext{}, false, nil + } + if len(sourceFiles) == 0 { + return managedDestinationContext{}, false, errors.New("source torrent has no files") + } + + var existingFilePath string + switch { + case matchedTorrent != nil && matchedTorrent.ContentPath != "": + existingFilePath = matchedTorrent.ContentPath + case props != nil && props.SavePath != "": + existingFilePath = props.SavePath + default: + return managedDestinationContext{}, false, errors.New("matched torrent has no content path or save path") + } + + selectedBaseDir, err := findMatchingBaseDir(instance.HardlinkBaseDir, existingFilePath) + if err != nil { + return managedDestinationContext{}, false, err + } + + sourceTorrentFiles := make([]hardlinktree.TorrentFile, 0, len(sourceFiles)) + for _, f := range sourceFiles { + sourceTorrentFiles = append(sourceTorrentFiles, hardlinktree.TorrentFile{ + Path: f.Name, + Size: f.Size, + }) + } + + incomingTrackerDomain := ParseTorrentAnnounceDomain(torrentBytes) + dest := s.buildManagedDestination( + ctx, + instance, + selectedBaseDir, + torrentHash, + torrentName, + candidate, + incomingTrackerDomain, + req, + sourceTorrentFiles, + ) + if dest.RootDir == "" { + return managedDestinationContext{}, false, errors.New("managed destination root resolved empty") + } + + return dest, true, nil +} + // processHardlinkMode attempts to add a cross-seed torrent using hardlink mode. // This creates a hardlinked file tree matching the incoming torrent's layout, // eliminating the need for reuse+rename alignment. @@ -10354,7 +11462,10 @@ func (s *Service) processHardlinkMode( matchedTorrent *qbt.Torrent, matchType string, sourceFiles, candidateFiles qbt.TorrentFiles, + torrentInfo *metainfo.Info, props *qbt.TorrentProperties, + managedDest managedDestinationContext, + managedDestErr error, _, crossCategory string, // baseCategory unused, crossCategory used for torrent options ) hardlinkModeResult { notUsed := hardlinkModeResult{Used: false} @@ -10403,26 +11514,6 @@ func (s *Service) processHardlinkMode( return hardlinkError(message) } - // Check if source has extra files (files not present in candidate). - // If extras exist and piece-boundary check passed (checked earlier in processCrossSeedCandidate), - // we'll hardlink content files and let qBittorrent download the safe extras via recheck. - hasExtras := hasExtraSourceFiles(sourceFiles, candidateFiles) - - // Early guard: if SkipRecheck is enabled and we have extras, skip before any plan building - if req.SkipRecheck && hasExtras { - return hardlinkModeResult{ - Used: true, - Success: false, - Result: InstanceCrossSeedResult{ - InstanceID: candidate.InstanceID, - InstanceName: candidate.InstanceName, - Success: false, - Status: "skipped_recheck", - Message: skippedRecheckMessage, - }, - } - } - // Validate base directory is configured if instance.HardlinkBaseDir == "" { log.Warn(). @@ -10440,33 +11531,25 @@ func (s *Service) processHardlinkMode( return handleError(fmt.Sprintf("Instance '%s' does not have local filesystem access enabled", candidate.InstanceName)) } - // Need a valid file path from matched torrent to check filesystem + // Need candidate files to build the hardlink plan. if len(candidateFiles) == 0 { return handleError("No candidate files available for hardlink matching") } - - // Build path to existing file (matched torrent's content) - var existingFilePath string - if matchedTorrent.ContentPath != "" { - existingFilePath = matchedTorrent.ContentPath - } else if props.SavePath != "" { - existingFilePath = props.SavePath - } else { - log.Warn(). - Int("instanceID", candidate.InstanceID). - Str("matchedHash", matchedTorrent.Hash). - Msg("[CROSSSEED] Hardlink mode: no content path or save path available") - return handleError("No content path or save path available for matched torrent") + if props == nil || strings.TrimSpace(props.SavePath) == "" { + return handleError("Candidate save path is unavailable for hardlink mode") } - selectedBaseDir, err := findMatchingBaseDir(instance.HardlinkBaseDir, existingFilePath) - if err != nil { - log.Warn(). - Err(err). - Str("configuredDirs", instance.HardlinkBaseDir). - Str("existingPath", existingFilePath). - Msg("[CROSSSEED] Hardlink mode: no suitable base directory found") - return handleError(fmt.Sprintf("No suitable base directory: %v", err)) + destDir := managedDest.RootDir + if destDir == "" { + message := "Managed destination root is unavailable for hardlink mode" + logEvent := log.Warn(). + Int("instanceID", candidate.InstanceID) + if managedDestErr != nil { + message = fmt.Sprintf("Managed destination root is unavailable for hardlink mode: %v", managedDestErr) + logEvent = logEvent.Err(managedDestErr) + } + logEvent.Msg("[CROSSSEED] Hardlink mode: managed destination root unavailable") + return handleError(message) } // Hardlink mode always uses Original layout to match the incoming torrent's structure exactly. @@ -10474,96 +11557,77 @@ func (s *Service) processHardlinkMode( // double-folder nesting issues when the instance default is Subfolder. layout := hardlinktree.LayoutOriginal - // Build ALL source files list (for destDir calculation - reflects full torrent structure) - candidateTorrentFilesAll := make([]hardlinktree.TorrentFile, 0, len(sourceFiles)) - for _, f := range sourceFiles { - candidateTorrentFilesAll = append(candidateTorrentFilesAll, hardlinktree.TorrentFile{ - Path: f.Name, - Size: f.Size, - }) - } - - // Extract incoming tracker domain from torrent bytes (for "by-tracker" preset) - incomingTrackerDomain := ParseTorrentAnnounceDomain(torrentBytes) - - // Build destination directory based on preset and torrent structure - destDir := s.buildHardlinkDestDir(ctx, instance, selectedBaseDir, torrentHash, torrentName, candidate, incomingTrackerDomain, req, candidateTorrentFilesAll) - - // Build existing files list (all files on disk from matched torrent). - // We pass all existing files to BuildPlan so it can use path/name matching - // to select the correct source file for each target. - existingFiles := make([]hardlinktree.ExistingFile, 0, len(candidateFiles)) - for _, f := range candidateFiles { - existingFiles = append(existingFiles, hardlinktree.ExistingFile{ - AbsPath: filepath.Join(props.SavePath, f.Name), - RelPath: f.Name, - Size: f.Size, - }) - } - - // Build LINKABLE source files list (only files that have matching (normalizedKey, size) in candidate). - // When hasExtras=true, some source files won't be linked; qBittorrent will download them. - // We use a (normalizedKey, size) multiset to determine which source files have matches, - // but let BuildPlan handle the actual pairing by path/name similarity. - type fileKeySize struct { - key string - size int64 - } - candidateKeyMultiset := make(map[fileKeySize]int) - for _, cf := range candidateFiles { - key := fileKeySize{key: normalizeFileKey(cf.Name), size: cf.Size} - candidateKeyMultiset[key]++ - } + hasMetadataExtras := hasExtraSourceFiles(sourceFiles, candidateFiles) + availability := buildTreeModeAvailability(props.SavePath, candidateFiles) var candidateTorrentFilesToLink []hardlinktree.TorrentFile - sourceKeyUsed := make(map[fileKeySize]int) + sourceKeyUsed := make(map[string]int) for _, f := range sourceFiles { - key := fileKeySize{key: normalizeFileKey(f.Name), size: f.Size} - if sourceKeyUsed[key] < candidateKeyMultiset[key] { - // This source file has a matching (normalizedKey, size) in candidate - include it for linking + key := treeModeFileKey(f.Name, f.Size) + if sourceKeyUsed[key] < availability.availableByKey[key] { candidateTorrentFilesToLink = append(candidateTorrentFilesToLink, hardlinktree.TorrentFile{ Path: f.Name, Size: f.Size, }) sourceKeyUsed[key]++ } - // else: no matching existing file - this is an extra that will be downloaded } - if len(candidateTorrentFilesToLink) == 0 { - return handleError("No linkable files found (all source files are extras)") - } + linkedFiles := len(candidateTorrentFilesToLink) + totalFiles := len(sourceFiles) + requiresRecheck := linkedFiles < totalFiles + addPolicy := PolicyForSourceFiles(sourceFiles) + settings, _ := s.GetAutomationSettings(ctx) + pooledAutomation := shouldUsePartialPool(settings, matchType, requiresRecheck, addPolicy.DiscLayout) - // Build hardlink tree plan with only the linkable files - plan, err := hardlinktree.BuildPlan(candidateTorrentFilesToLink, existingFiles, layout, torrentName, destDir) - if err != nil { - log.Error(). - Err(err). - Int("instanceID", candidate.InstanceID). - Str("torrentName", torrentName). - Str("destDir", destDir). - Msg("[CROSSSEED] Hardlink mode: failed to build plan, aborting") - return handleError(fmt.Sprintf("Failed to build hardlink plan: %v", err)) + if req.SkipRecheck && requiresRecheck { + return hardlinkModeResult{ + Used: true, + Success: false, + Result: InstanceCrossSeedResult{ + InstanceID: candidate.InstanceID, + InstanceName: candidate.InstanceName, + Success: false, + Status: "skipped_recheck", + Message: skippedRecheckMessage, + }, + } } - // Create hardlink tree on disk - if err := hardlinktree.Create(plan); err != nil { - log.Error(). - Err(err). + var plan *hardlinktree.TreePlan + if linkedFiles > 0 { + plan, err = hardlinktree.BuildPlan(candidateTorrentFilesToLink, availability.existingFiles, layout, torrentName, destDir) + if err != nil { + log.Error(). + Err(err). + Int("instanceID", candidate.InstanceID). + Str("torrentName", torrentName). + Str("destDir", destDir). + Msg("[CROSSSEED] Hardlink mode: failed to build plan, aborting") + return handleError(fmt.Sprintf("Failed to build hardlink plan: %v", err)) + } + + if err := hardlinktree.Create(plan); err != nil { + log.Error(). + Err(err). + Int("instanceID", candidate.InstanceID). + Str("torrentName", torrentName). + Str("destDir", destDir). + Msg("[CROSSSEED] Hardlink mode: failed to create hardlink tree, aborting") + return handleError(fmt.Sprintf("Failed to create hardlink tree: %v", err)) + } + + log.Info(). Int("instanceID", candidate.InstanceID). Str("torrentName", torrentName). Str("destDir", destDir). - Msg("[CROSSSEED] Hardlink mode: failed to create hardlink tree, aborting") - return handleError(fmt.Sprintf("Failed to create hardlink tree: %v", err)) + Int("fileCount", len(plan.Files)). + Int("unavailableFiles", availability.unavailableFiles). + Msg("[CROSSSEED] Hardlink mode: created hardlink tree") + } else if err := os.MkdirAll(destDir, 0o755); err != nil { + return handleError(fmt.Sprintf("Failed to prepare managed destination root: %v", err)) } - log.Info(). - Int("instanceID", candidate.InstanceID). - Str("torrentName", torrentName). - Str("destDir", destDir). - Int("fileCount", len(plan.Files)). - Msg("[CROSSSEED] Hardlink mode: created hardlink tree") - // Build options for adding torrent options := make(map[string]string) @@ -10582,12 +11646,9 @@ func (s *Service) processHardlinkMode( // Force contentLayout=Original to match the hardlink tree layout exactly // and avoid double-folder nesting when instance default is Subfolder options["autoTMM"] = "false" - options["savepath"] = plan.RootDir + options["savepath"] = destDir options["contentLayout"] = "Original" - // Compute add policy from source files (e.g., disc layout detection) - addPolicy := PolicyForSourceFiles(sourceFiles) - if addPolicy.DiscLayout { log.Info(). Int("instanceID", candidate.InstanceID). @@ -10610,38 +11671,40 @@ func (s *Service) processHardlinkMode( }, } } - - // Handle skip_checking and pause behavior based on extras: - // - No extras: skip_checking=true, start immediately (100% complete) - // - With extras: skip_checking=true, add paused, then recheck to find missing pieces - // - Disc layout: policy will override to paused via ApplyToAddOptions - options["skip_checking"] = "true" - if hasExtras { - // With extras: add paused, we'll trigger recheck after add - options["stopped"] = "true" - options["paused"] = "true" - } else if req.SkipAutoResume { - // No extras but user wants paused - options["stopped"] = "true" - options["paused"] = "true" - } else { - // No extras: start immediately - options["stopped"] = "false" - options["paused"] = "false" + if pooledAutomation && req.SkipRecheck { + return hardlinkModeResult{ + Used: true, + Success: false, + Result: InstanceCrossSeedResult{ + InstanceID: candidate.InstanceID, + InstanceName: candidate.InstanceName, + Success: false, + Status: "skipped_recheck", + Message: skippedRecheckMessage, + }, + } } + // Handle skip_checking and pause behavior based on actual availability: + // - Fully available: skip_checking=true, start immediately (100% complete) + // - Missing files: skip_checking=true, add paused, then recheck to find missing pieces + // - Disc layout: policy will override to paused via ApplyToAddOptions + applyTreeModeAddState(options, requiresRecheck, partialPoolShouldKeepPaused(req, pooledAutomation)) + // Apply add policy (e.g., disc layout forces paused) addPolicy.ApplyToAddOptions(options) log.Debug(). Int("instanceID", candidate.InstanceID). Str("torrentName", torrentName). - Str("savepath", plan.RootDir). + Str("savepath", destDir). Str("category", crossCategory). - Bool("hasExtras", hasExtras). + Bool("hasExtras", hasMetadataExtras). + Bool("requiresRecheck", requiresRecheck). Bool("discLayout", addPolicy.DiscLayout). - Int("linkedFiles", len(candidateTorrentFilesToLink)). - Int("totalFiles", len(sourceFiles)). + Int("linkedFiles", linkedFiles). + Int("totalFiles", totalFiles). + Int("availableCandidateFiles", len(availability.existingFiles)). Msg("[CROSSSEED] Hardlink mode: adding torrent") // Add the torrent @@ -10662,55 +11725,93 @@ func (s *Service) processHardlinkMode( } // Build result message - statusMsg := fmt.Sprintf("Added via hardlink mode (match: %s, files: %d/%d)", matchType, len(candidateTorrentFilesToLink), len(sourceFiles)) + statusMsg := fmt.Sprintf("Added via hardlink mode (match: %s, files: %d/%d)", matchType, linkedFiles, totalFiles) if addPolicy.DiscLayout { statusMsg += addPolicy.StatusSuffix() } // Handle recheck and auto-resume when extras exist, or disc layout requires verification - if hasExtras || addPolicy.DiscLayout { + if requiresRecheck || addPolicy.DiscLayout || pooledAutomation { recheckHashes := []string{torrentHash} if torrentHashV2 != "" && !strings.EqualFold(torrentHash, torrentHashV2) { recheckHashes = append(recheckHashes, torrentHashV2) } // Trigger recheck so qBittorrent discovers which pieces are present (hardlinked) - // and which are missing (extras to download) - if err := s.syncManager.BulkAction(ctx, candidate.InstanceID, recheckHashes, "recheck"); err != nil { + // and which are missing (extras to download). Confirm it actually starts so + // stopped injections are not treated like completed rechecks. + confirmed, err := s.triggerAndConfirmInjectedTorrentRecheck( + ctx, + candidate.InstanceID, + recheckHashes, + torrentHash, + "[CROSSSEED] Hardlink mode", + ) + if err != nil { log.Warn(). Err(err). Int("instanceID", candidate.InstanceID). Str("torrentHash", torrentHash). Msg("[CROSSSEED] Hardlink mode: failed to trigger recheck after add") statusMsg += " - recheck failed, manual intervention required" - } else if addPolicy.ShouldSkipAutoResume() { - log.Debug(). - Int("instanceID", candidate.InstanceID). - Str("torrentHash", torrentHash). - Msg("[CROSSSEED] Hardlink mode: skipping auto-resume per add policy") - statusMsg += addPolicy.StatusSuffix() - } else if req.SkipAutoResume { - // User requested to skip auto-resume - leave paused after recheck - log.Debug(). - Int("instanceID", candidate.InstanceID). - Str("torrentHash", torrentHash). - Msg("[CROSSSEED] Hardlink mode: skipping auto-resume per user settings") - statusMsg += " - auto-resume skipped per settings" } else { - // Queue for background resume - worker will resume when recheck completes at threshold - log.Debug(). - Int("instanceID", candidate.InstanceID). - Str("torrentHash", torrentHash). - Int("extraFiles", len(sourceFiles)-len(candidateTorrentFilesToLink)). - Msg("[CROSSSEED] Hardlink mode: queuing torrent for recheck resume") - queueErr := error(nil) - if addPolicy.DiscLayout { - queueErr = s.queueRecheckResumeWithThreshold(ctx, candidate.InstanceID, torrentHash, 1.0) - } else { - queueErr = s.queueRecheckResume(ctx, candidate.InstanceID, torrentHash) + if !confirmed { + statusMsg += " - recheck start not yet confirmed" } - if queueErr != nil { - statusMsg += " - auto-resume queue full, manual resume required" + switch { + case addPolicy.ShouldSkipAutoResume(): + log.Debug(). + Int("instanceID", candidate.InstanceID). + Str("torrentHash", torrentHash). + Msg("[CROSSSEED] Hardlink mode: skipping auto-resume per add policy") + statusMsg += addPolicy.StatusSuffix() + case pooledAutomation: + pieceLength := int64(0) + if torrentInfo != nil { + pieceLength = torrentInfo.PieceLength + } + if err := s.registerPartialPoolMember( + ctx, + candidate.InstanceID, + matchedTorrent.Hash, + candidate.InstanceID, + torrentHash, + torrentHashV2, + torrentName, + models.CrossSeedPartialMemberModeHardlink, + destDir, + pieceLength, + settings.MaxMissingBytesAfterRecheck, + sourceFiles, + ); err != nil { + statusMsg += " - pooled automation registration failed, manual handling required" + } else { + statusMsg += " - pooled completion active" + } + case req.SkipAutoResume: + // User requested to skip auto-resume - leave paused after recheck + log.Debug(). + Int("instanceID", candidate.InstanceID). + Str("torrentHash", torrentHash). + Msg("[CROSSSEED] Hardlink mode: skipping auto-resume per user settings") + statusMsg += " - auto-resume skipped per settings" + default: + // Queue for background resume - worker will resume when recheck completes at threshold + log.Debug(). + Int("instanceID", candidate.InstanceID). + Str("torrentHash", torrentHash). + Int("extraFiles", totalFiles-linkedFiles). + Msg("[CROSSSEED] Hardlink mode: queuing torrent for recheck resume") + queueErr := error(nil) + switch { + case addPolicy.DiscLayout: + queueErr = s.queueRecheckResumeWithThreshold(ctx, candidate.InstanceID, torrentHash, 1.0) + default: + queueErr = s.queueRecheckResume(ctx, candidate.InstanceID, torrentHash) + } + if queueErr != nil { + statusMsg += " - auto-resume queue full, manual resume required" + } } } } else if addPolicy.ShouldSkipAutoResume() { @@ -10747,7 +11848,7 @@ func (s *Service) processHardlinkMode( // // When isolation is needed, a human-readable folder name is used: -- // For "flat" preset, isolation is always used to keep torrents separated. -func (s *Service) buildHardlinkDestDir( +func (s *Service) buildManagedDestination( ctx context.Context, instance *models.Instance, baseDir string, @@ -10756,8 +11857,7 @@ func (s *Service) buildHardlinkDestDir( incomingTrackerDomain string, req *CrossSeedRequest, candidateFiles []hardlinktree.TorrentFile, -) string { - +) managedDestinationContext { // Determine if isolation folder is needed based on torrent structure. // Since hardlink mode always uses contentLayout=Original, we only need // an isolation folder when the torrent doesn't have a common root folder. @@ -10774,22 +11874,50 @@ func (s *Service) buildHardlinkDestDir( // Get tracker display name using incoming torrent's tracker domain trackerDisplayName := s.resolveTrackerDisplayName(ctx, incomingTrackerDomain, req) if isolationFolder != "" { - return filepath.Join(baseDir, pathutil.SanitizePathSegment(trackerDisplayName), isolationFolder) + return managedDestinationContext{ + RootDir: filepath.Join(baseDir, pathutil.SanitizePathSegment(trackerDisplayName), isolationFolder), + Isolated: true, + } + } + return managedDestinationContext{ + RootDir: filepath.Join(baseDir, pathutil.SanitizePathSegment(trackerDisplayName)), + Isolated: false, } - return filepath.Join(baseDir, pathutil.SanitizePathSegment(trackerDisplayName)) case "by-instance": if isolationFolder != "" { - return filepath.Join(baseDir, pathutil.SanitizePathSegment(candidate.InstanceName), isolationFolder) + return managedDestinationContext{ + RootDir: filepath.Join(baseDir, pathutil.SanitizePathSegment(candidate.InstanceName), isolationFolder), + Isolated: true, + } + } + return managedDestinationContext{ + RootDir: filepath.Join(baseDir, pathutil.SanitizePathSegment(candidate.InstanceName)), + Isolated: false, } - return filepath.Join(baseDir, pathutil.SanitizePathSegment(candidate.InstanceName)) default: // "flat" or unknown // For flat layout, always use isolation folder to keep torrents separated - return filepath.Join(baseDir, pathutil.IsolationFolderName(torrentHash, torrentName)) + return managedDestinationContext{ + RootDir: filepath.Join(baseDir, pathutil.IsolationFolderName(torrentHash, torrentName)), + Isolated: true, + } } } +func (s *Service) buildHardlinkDestDir( + ctx context.Context, + instance *models.Instance, + baseDir string, + torrentHash, torrentName string, + candidate CrossSeedCandidate, + incomingTrackerDomain string, + req *CrossSeedRequest, + candidateFiles []hardlinktree.TorrentFile, +) string { + return s.buildManagedDestination(ctx, instance, baseDir, torrentHash, torrentName, candidate, incomingTrackerDomain, req, candidateFiles).RootDir +} + // resolveTrackerDisplayName resolves the display name for the tracker. // Uses the incoming torrent's tracker domain (extracted from .torrent bytes) for the // "by-tracker" hardlink directory preset. Falls back to indexer name, then domain. @@ -10883,10 +12011,11 @@ func shouldWarnForReflinkCreateError(err error) bool { // Returns Used=true with the final result when reflink mode is attempted. // // Key differences from hardlink mode: -// - Reflinks allow qBittorrent to safely write/repair bytes without corrupting originals -// - Reflink mode never skips due to piece-boundary safety (that's the whole point) -// - Reflink mode triggers recheck only when extra files are present -// - If SkipRecheck is enabled and reflink mode would require recheck, it returns skipped_recheck instead of adding +// - Reflinks allow qBittorrent to safely write/repair bytes without corrupting originals +// - Reflink mode never skips due to piece-boundary safety (that's the whole point) +// - Reflink mode triggers recheck when extra files are present, and can optionally +// recheck single-file normalized-name size mismatches +// - If SkipRecheck is enabled and reflink mode would require recheck, it returns skipped_recheck instead of adding func (s *Service) processReflinkMode( ctx context.Context, candidate CrossSeedCandidate, @@ -10898,7 +12027,10 @@ func (s *Service) processReflinkMode( matchedTorrent *qbt.Torrent, matchType string, sourceFiles, candidateFiles qbt.TorrentFiles, + torrentInfo *metainfo.Info, props *qbt.TorrentProperties, + managedDest managedDestinationContext, + managedDestErr error, _, crossCategory string, // baseCategory unused, crossCategory used for torrent options ) reflinkModeResult { notUsed := reflinkModeResult{Used: false} @@ -10946,12 +12078,14 @@ func (s *Service) processReflinkMode( return reflinkError(message) } - // Reflink mode only requires recheck when the incoming torrent has extra files - // (files not present in the matched torrent). - hasExtras := hasExtraSourceFiles(sourceFiles, candidateFiles) + settings, _ := s.GetAutomationSettings(ctx) + if settings == nil { + settings = models.DefaultCrossSeedAutomationSettings() + } - // Early guard: if SkipRecheck is enabled and we have extras, skip before any plan building - if req.SkipRecheck && hasExtras { + singleFileSizeMismatchCandidate := isReflinkSingleFileSizeMismatchCandidate(sourceFiles, candidateFiles) + allowSingleFileSizeMismatch := s.shouldAllowReflinkSingleFileSizeMismatch(settings, sourceFiles, candidateFiles) + if singleFileSizeMismatchCandidate && settings.AllowReflinkSingleFileSizeMismatch && !allowSingleFileSizeMismatch { return reflinkModeResult{ Used: true, Success: false, @@ -10959,8 +12093,8 @@ func (s *Service) processReflinkMode( InstanceID: candidate.InstanceID, InstanceName: candidate.InstanceName, Success: false, - Status: "skipped_recheck", - Message: skippedRecheckMessage, + Status: "rejected", + Message: "Single-file size mismatch exceeds the 99% precheck threshold", }, } } @@ -10982,138 +12116,128 @@ func (s *Service) processReflinkMode( return handleError(fmt.Sprintf("Instance '%s' does not have local filesystem access enabled", candidate.InstanceName)) } - // Need a valid file path from matched torrent to check filesystem + // Need candidate files to build the reflink plan. if len(candidateFiles) == 0 { return handleError("No candidate files available for reflink matching") } - - // Build path to existing file (matched torrent's content) - var existingFilePath string - if matchedTorrent.ContentPath != "" { - existingFilePath = matchedTorrent.ContentPath - } else if props.SavePath != "" { - existingFilePath = props.SavePath - } else { - log.Warn(). - Int("instanceID", candidate.InstanceID). - Str("matchedHash", matchedTorrent.Hash). - Msg("[CROSSSEED] Reflink mode: no content path or save path available") - return handleError("No content path or save path available for matched torrent") - } - - selectedBaseDir, err := findMatchingBaseDir(instance.HardlinkBaseDir, existingFilePath) - if err != nil { - log.Warn(). - Err(err). - Str("configuredDirs", instance.HardlinkBaseDir). - Str("existingPath", existingFilePath). - Msg("[CROSSSEED] Reflink mode: no suitable base directory found") - return handleError(fmt.Sprintf("No suitable base directory: %v", err)) + if props == nil || strings.TrimSpace(props.SavePath) == "" { + return handleError("Candidate save path is unavailable for reflink mode") } - // Check reflink support - supported, reason := reflinktree.SupportsReflink(selectedBaseDir) - if !supported { - log.Warn(). - Str("reason", reason). - Str("baseDir", selectedBaseDir). - Msg("[CROSSSEED] Reflink mode: filesystem does not support reflinks") - return handleError("Reflink not supported: " + reason) + destDir := managedDest.RootDir + if destDir == "" { + message := "Managed destination root is unavailable for reflink mode" + logEvent := log.Warn(). + Int("instanceID", candidate.InstanceID) + if managedDestErr != nil { + message = fmt.Sprintf("Managed destination root is unavailable for reflink mode: %v", managedDestErr) + logEvent = logEvent.Err(managedDestErr) + } + logEvent.Msg("[CROSSSEED] Reflink mode: managed destination root unavailable") + return handleError(message) } // Reflink mode always uses Original layout to match the incoming torrent's structure exactly. layout := hardlinktree.LayoutOriginal + hasMetadataExtras := hasExtraSourceFiles(sourceFiles, candidateFiles) + availability := buildTreeModeAvailability(props.SavePath, candidateFiles) - // Build ALL source files list (for destDir calculation - reflects full torrent structure) - candidateTorrentFilesAll := make([]hardlinktree.TorrentFile, 0, len(sourceFiles)) - for _, f := range sourceFiles { - candidateTorrentFilesAll = append(candidateTorrentFilesAll, hardlinktree.TorrentFile{ - Path: f.Name, - Size: f.Size, - }) - } - - // Extract incoming tracker domain from torrent bytes (for "by-tracker" preset) - incomingTrackerDomain := ParseTorrentAnnounceDomain(torrentBytes) - - // Build destination directory based on preset and torrent structure - destDir := s.buildHardlinkDestDir(ctx, instance, selectedBaseDir, torrentHash, torrentName, candidate, incomingTrackerDomain, req, candidateTorrentFilesAll) - - // Build existing files list (all files on disk from matched torrent) - existingFiles := make([]hardlinktree.ExistingFile, 0, len(candidateFiles)) - for _, f := range candidateFiles { - existingFiles = append(existingFiles, hardlinktree.ExistingFile{ - AbsPath: filepath.Join(props.SavePath, f.Name), - RelPath: f.Name, - Size: f.Size, - }) - } - - // Build CLONEABLE source files list (only files that have matching (normalizedKey, size) in candidate). - // Files without matches will be downloaded by qBittorrent. - type fileKeySize struct { - key string - size int64 - } - candidateKeyMultiset := make(map[fileKeySize]int) - for _, cf := range candidateFiles { - key := fileKeySize{key: normalizeFileKey(cf.Name), size: cf.Size} - candidateKeyMultiset[key]++ - } + clonedFiles := 0 + totalFiles := len(sourceFiles) + var plan *hardlinktree.TreePlan + if allowSingleFileSizeMismatch { + if _, ok := treeModeInitialFileInfo(props.SavePath, candidateFiles[0].Name, float64(candidateFiles[0].Progress)); ok { + plan, err = buildReflinkSingleFileSizeMismatchPlan(sourceFiles[0].Name, candidateFiles[0].Name, props, destDir) + if err != nil { + log.Error(). + Err(err). + Int("instanceID", candidate.InstanceID). + Str("torrentName", torrentName). + Str("destDir", destDir). + Msg("[CROSSSEED] Reflink mode: failed to build single-file size-mismatch plan, aborting") + return handleError(fmt.Sprintf("Failed to build reflink plan: %v", err)) + } + clonedFiles = 1 + } + } else { + var candidateTorrentFilesToClone []hardlinktree.TorrentFile + sourceKeyUsed := make(map[string]int) + for _, f := range sourceFiles { + key := treeModeFileKey(f.Name, f.Size) + if sourceKeyUsed[key] < availability.availableByKey[key] { + candidateTorrentFilesToClone = append(candidateTorrentFilesToClone, hardlinktree.TorrentFile{ + Path: f.Name, + Size: f.Size, + }) + sourceKeyUsed[key]++ + } + } - var candidateTorrentFilesToClone []hardlinktree.TorrentFile - sourceKeyUsed := make(map[fileKeySize]int) - for _, f := range sourceFiles { - key := fileKeySize{key: normalizeFileKey(f.Name), size: f.Size} - if sourceKeyUsed[key] < candidateKeyMultiset[key] { - // This source file has a matching (normalizedKey, size) in candidate - include it for cloning - candidateTorrentFilesToClone = append(candidateTorrentFilesToClone, hardlinktree.TorrentFile{ - Path: f.Name, - Size: f.Size, - }) - sourceKeyUsed[key]++ + if len(candidateTorrentFilesToClone) > 0 { + plan, err = hardlinktree.BuildPlan(candidateTorrentFilesToClone, availability.existingFiles, layout, torrentName, destDir) + if err != nil { + log.Error(). + Err(err). + Int("instanceID", candidate.InstanceID). + Str("torrentName", torrentName). + Str("destDir", destDir). + Msg("[CROSSSEED] Reflink mode: failed to build plan, aborting") + return handleError(fmt.Sprintf("Failed to build reflink plan: %v", err)) + } + clonedFiles = len(candidateTorrentFilesToClone) } - // else: no matching existing file - this will be downloaded } - if len(candidateTorrentFilesToClone) == 0 { - return handleError("No cloneable files found (all source files would need to be downloaded)") + requiresRecheck := clonedFiles < totalFiles || allowSingleFileSizeMismatch + if req.SkipRecheck && requiresRecheck { + return reflinkModeResult{ + Used: true, + Success: false, + Result: InstanceCrossSeedResult{ + InstanceID: candidate.InstanceID, + InstanceName: candidate.InstanceName, + Success: false, + Status: "skipped_recheck", + Message: skippedRecheckMessage, + }, + } } - // Build reflink tree plan with only the cloneable files - plan, err := hardlinktree.BuildPlan(candidateTorrentFilesToClone, existingFiles, layout, torrentName, destDir) - if err != nil { - log.Error(). - Err(err). - Int("instanceID", candidate.InstanceID). - Str("torrentName", torrentName). - Str("destDir", destDir). - Msg("[CROSSSEED] Reflink mode: failed to build plan, aborting") - return handleError(fmt.Sprintf("Failed to build reflink plan: %v", err)) + supported, reason := reflinktree.SupportsReflink(destDir) + if !supported { + log.Warn(). + Str("reason", reason). + Str("baseDir", destDir). + Msg("[CROSSSEED] Reflink mode: filesystem does not support reflinks") + return handleError("Reflink not supported: " + reason) } - // Create reflink tree on disk - if err := reflinktree.Create(plan); err != nil { - logEvent := log.Error() - if shouldWarnForReflinkCreateError(err) { - logEvent = log.Warn() + if plan != nil { + if err := reflinktree.Create(plan); err != nil { + logEvent := log.Error() + if shouldWarnForReflinkCreateError(err) { + logEvent = log.Warn() + } + logEvent. + Err(err). + Int("instanceID", candidate.InstanceID). + Str("torrentName", torrentName). + Str("destDir", destDir). + Msg("[CROSSSEED] Reflink mode: failed to create reflink tree, aborting") + return handleError(fmt.Sprintf("Failed to create reflink tree: %v", err)) } - logEvent. - Err(err). + + log.Info(). Int("instanceID", candidate.InstanceID). Str("torrentName", torrentName). Str("destDir", destDir). - Msg("[CROSSSEED] Reflink mode: failed to create reflink tree, aborting") - return handleError(fmt.Sprintf("Failed to create reflink tree: %v", err)) + Int("fileCount", len(plan.Files)). + Int("unavailableFiles", availability.unavailableFiles). + Msg("[CROSSSEED] Reflink mode: created reflink tree") + } else if err := os.MkdirAll(destDir, 0o755); err != nil { + return handleError(fmt.Sprintf("Failed to prepare managed destination root: %v", err)) } - log.Info(). - Int("instanceID", candidate.InstanceID). - Str("torrentName", torrentName). - Str("destDir", destDir). - Int("fileCount", len(plan.Files)). - Msg("[CROSSSEED] Reflink mode: created reflink tree") - // Build options for adding torrent options := make(map[string]string) @@ -11131,11 +12255,12 @@ func (s *Service) processReflinkMode( // Reflink mode: files are pre-created, so use savepath pointing to tree root // Force contentLayout=Original to match the reflink tree layout exactly options["autoTMM"] = "false" - options["savepath"] = plan.RootDir + options["savepath"] = destDir options["contentLayout"] = "Original" // Compute add policy from source files (e.g., disc layout detection) addPolicy := PolicyForSourceFiles(sourceFiles) + pooledAutomation := !allowSingleFileSizeMismatch && shouldUsePartialPool(settings, matchType, requiresRecheck, addPolicy.DiscLayout) if addPolicy.DiscLayout { log.Info(). @@ -11159,41 +12284,41 @@ func (s *Service) processReflinkMode( }, } } + if pooledAutomation && req.SkipRecheck { + return reflinkModeResult{ + Used: true, + Success: false, + Result: InstanceCrossSeedResult{ + InstanceID: candidate.InstanceID, + InstanceName: candidate.InstanceName, + Success: false, + Status: "skipped_recheck", + Message: skippedRecheckMessage, + }, + } + } - // Handle skip_checking and pause behavior based on extras: - // - No extras: skip_checking=true, start immediately (100% complete) - // - With extras: skip_checking=true, add paused, then recheck to find missing pieces + // Handle skip_checking and pause behavior based on recheck requirement: + // - No recheck required: skip_checking=true, start immediately (100% complete) + // - Recheck required: skip_checking=true, add paused, then recheck to find missing pieces // - Disc layout: policy will override to paused via ApplyToAddOptions - options["skip_checking"] = "true" - if hasExtras { - // With extras: add paused, we'll trigger recheck after add - options["stopped"] = "true" - options["paused"] = "true" - } else if req.SkipAutoResume { - // No extras but user wants paused - options["stopped"] = "true" - options["paused"] = "true" - } else { - // No extras: start immediately - options["stopped"] = "false" - options["paused"] = "false" - } + applyTreeModeAddState(options, requiresRecheck, partialPoolShouldKeepPaused(req, pooledAutomation)) // Apply add policy (e.g., disc layout forces paused) addPolicy.ApplyToAddOptions(options) - clonedFiles := len(candidateTorrentFilesToClone) - totalFiles := len(sourceFiles) - log.Debug(). Int("instanceID", candidate.InstanceID). Str("torrentName", torrentName). - Str("savepath", plan.RootDir). + Str("savepath", destDir). Str("category", crossCategory). - Bool("hasExtras", hasExtras). + Bool("hasExtras", hasMetadataExtras). + Bool("requiresRecheck", requiresRecheck). + Bool("singleFileSizeMismatch", allowSingleFileSizeMismatch). Bool("discLayout", addPolicy.DiscLayout). Int("clonedFiles", clonedFiles). Int("totalFiles", totalFiles). + Int("availableCandidateFiles", len(availability.existingFiles)). Msg("[CROSSSEED] Reflink mode: adding torrent") // Add the torrent @@ -11215,54 +12340,97 @@ func (s *Service) processReflinkMode( // Build result message statusMsg := fmt.Sprintf("Added via reflink mode (match: %s, files: %d/%d)", matchType, clonedFiles, totalFiles) + if allowSingleFileSizeMismatch { + statusMsg += " - single-file size mismatch allowed after normalized-name match" + } if addPolicy.DiscLayout { statusMsg += addPolicy.StatusSuffix() } - // Handle recheck and auto-resume when extras exist, or disc layout requires verification - if hasExtras || addPolicy.DiscLayout { + // Handle recheck and auto-resume when reflink mode needs verification, or disc layout requires it. + if requiresRecheck || addPolicy.DiscLayout || pooledAutomation { recheckHashes := []string{torrentHash} if torrentHashV2 != "" && !strings.EqualFold(torrentHash, torrentHashV2) { recheckHashes = append(recheckHashes, torrentHashV2) } // Trigger recheck so qBittorrent discovers which pieces are present (cloned) - // and which are missing (extras to download) - if err := s.syncManager.BulkAction(ctx, candidate.InstanceID, recheckHashes, "recheck"); err != nil { + // and which are missing (extras or size-drift bytes to download). Confirm it + // actually starts so stopped injections are not treated like settled rechecks. + confirmed, err := s.triggerAndConfirmInjectedTorrentRecheck( + ctx, + candidate.InstanceID, + recheckHashes, + torrentHash, + "[CROSSSEED] Reflink mode", + ) + if err != nil { log.Warn(). Err(err). Int("instanceID", candidate.InstanceID). Str("torrentHash", torrentHash). Msg("[CROSSSEED] Reflink mode: failed to trigger recheck after add") statusMsg += " - recheck failed, manual intervention required" - } else if addPolicy.ShouldSkipAutoResume() { - log.Debug(). - Int("instanceID", candidate.InstanceID). - Str("torrentHash", torrentHash). - Msg("[CROSSSEED] Reflink mode: skipping auto-resume per add policy") - statusMsg += addPolicy.StatusSuffix() - } else if req.SkipAutoResume { - // User requested to skip auto-resume - leave paused after recheck - log.Debug(). - Int("instanceID", candidate.InstanceID). - Str("torrentHash", torrentHash). - Msg("[CROSSSEED] Reflink mode: skipping auto-resume per user settings") - statusMsg += " - auto-resume skipped per settings" } else { - // Queue for background resume - worker will resume when recheck completes at threshold - log.Debug(). - Int("instanceID", candidate.InstanceID). - Str("torrentHash", torrentHash). - Int("missingFiles", totalFiles-clonedFiles). - Msg("[CROSSSEED] Reflink mode: queuing torrent for recheck resume") - queueErr := error(nil) - if addPolicy.DiscLayout { - queueErr = s.queueRecheckResumeWithThreshold(ctx, candidate.InstanceID, torrentHash, 1.0) - } else { - queueErr = s.queueRecheckResume(ctx, candidate.InstanceID, torrentHash) + if !confirmed { + statusMsg += " - recheck start not yet confirmed" } - if queueErr != nil { - statusMsg += " - auto-resume queue full, manual resume required" + switch { + case addPolicy.ShouldSkipAutoResume(): + log.Debug(). + Int("instanceID", candidate.InstanceID). + Str("torrentHash", torrentHash). + Msg("[CROSSSEED] Reflink mode: skipping auto-resume per add policy") + statusMsg += addPolicy.StatusSuffix() + case pooledAutomation: + pieceLength := int64(0) + if torrentInfo != nil { + pieceLength = torrentInfo.PieceLength + } + if err := s.registerPartialPoolMember( + ctx, + candidate.InstanceID, + matchedTorrent.Hash, + candidate.InstanceID, + torrentHash, + torrentHashV2, + torrentName, + models.CrossSeedPartialMemberModeReflink, + destDir, + pieceLength, + settings.MaxMissingBytesAfterRecheck, + sourceFiles, + ); err != nil { + statusMsg += " - pooled automation registration failed, manual handling required" + } else { + statusMsg += " - pooled completion active" + } + case req.SkipAutoResume: + // User requested to skip auto-resume - leave paused after recheck + log.Debug(). + Int("instanceID", candidate.InstanceID). + Str("torrentHash", torrentHash). + Msg("[CROSSSEED] Reflink mode: skipping auto-resume per user settings") + statusMsg += " - auto-resume skipped per settings" + default: + // Queue for background resume - worker will resume when recheck completes at threshold + log.Debug(). + Int("instanceID", candidate.InstanceID). + Str("torrentHash", torrentHash). + Int("missingFiles", totalFiles-clonedFiles). + Msg("[CROSSSEED] Reflink mode: queuing torrent for recheck resume") + queueErr := error(nil) + switch { + case addPolicy.DiscLayout: + queueErr = s.queueRecheckResumeWithThreshold(ctx, candidate.InstanceID, torrentHash, 1.0) + case allowSingleFileSizeMismatch: + queueErr = s.queueRecheckResumeWithThreshold(ctx, candidate.InstanceID, torrentHash, 0.99) + default: + queueErr = s.queueRecheckResume(ctx, candidate.InstanceID, torrentHash) + } + if queueErr != nil { + statusMsg += " - auto-resume queue full, manual resume required" + } } } } else if addPolicy.ShouldSkipAutoResume() { @@ -11271,7 +12439,7 @@ func (s *Service) processReflinkMode( } // Add note about low completion behavior - if hasExtras && clonedFiles < totalFiles { + if requiresRecheck && clonedFiles < totalFiles { statusMsg += " (below threshold = remains paused for manual review)" } @@ -11293,3 +12461,72 @@ func (s *Service) processReflinkMode( }, } } + +func isReflinkSingleFileSizeMismatchCandidate(sourceFiles, candidateFiles qbt.TorrentFiles) bool { + if len(sourceFiles) != 1 || len(candidateFiles) != 1 { + return false + } + + sourceFile := sourceFiles[0] + candidateFile := candidateFiles[0] + + if sourceFile.Size == candidateFile.Size { + return false + } + + sourceKey := normalizeFileKey(sourceFile.Name) + return sourceKey != "" && sourceKey == normalizeFileKey(candidateFile.Name) +} + +func (s *Service) shouldAllowReflinkSingleFileSizeMismatch( + settings *models.CrossSeedAutomationSettings, + sourceFiles, candidateFiles qbt.TorrentFiles, +) bool { + if settings == nil || !settings.AllowReflinkSingleFileSizeMismatch { + return false + } + if !isReflinkSingleFileSizeMismatchCandidate(sourceFiles, candidateFiles) { + return false + } + + return s.isSizeWithinTolerance(sourceFiles[0].Size, candidateFiles[0].Size, 1.0) +} + +func buildReflinkSingleFileSizeMismatchPlan( + sourceName string, + candidateName string, + props *qbt.TorrentProperties, + destDir string, +) (*hardlinktree.TreePlan, error) { + if props == nil || strings.TrimSpace(props.SavePath) == "" { + return nil, errors.New("candidate save path is unavailable") + } + if destDir == "" { + return nil, errors.New("destination directory is required") + } + + targetPath := filepath.Join(destDir, filepath.FromSlash(sourceName)) + absTarget, err := filepath.Abs(targetPath) + if err != nil { + return nil, fmt.Errorf("resolve target path: %w", err) + } + absBase, err := filepath.Abs(destDir) + if err != nil { + return nil, fmt.Errorf("resolve destination directory: %w", err) + } + rel, err := filepath.Rel(absBase, absTarget) + if err != nil { + return nil, fmt.Errorf("compute target path: %w", err) + } + if strings.HasPrefix(rel, ".."+string(filepath.Separator)) || rel == ".." { + return nil, fmt.Errorf("target path escapes base directory: %s", targetPath) + } + + return &hardlinktree.TreePlan{ + RootDir: destDir, + Files: []hardlinktree.FilePlan{{ + SourcePath: filepath.Join(props.SavePath, candidateName), + TargetPath: targetPath, + }}, + }, nil +} diff --git a/internal/services/crossseed/service_candidate_cache_test.go b/internal/services/crossseed/service_candidate_cache_test.go new file mode 100644 index 000000000..18f039225 --- /dev/null +++ b/internal/services/crossseed/service_candidate_cache_test.go @@ -0,0 +1,47 @@ +// Copyright (c) 2025-2026, s0up and the autobrr contributors. +// SPDX-License-Identifier: GPL-2.0-or-later + +package crossseed + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCacheAutomationCandidateResponse_BoundedSize(t *testing.T) { + t.Parallel() + + autoCtx := &automationContext{ + candidateCache: make(map[string]*FindCandidatesResponse), + candidateOrder: make([]string, 0, automationCandidateCacheMaxEntries), + } + + total := automationCandidateCacheMaxEntries + 25 + for i := range total { + key := fmt.Sprintf("release-%d", i) + cacheAutomationCandidateResponse(autoCtx, key, &FindCandidatesResponse{}) + } + + require.Len(t, autoCtx.candidateCache, automationCandidateCacheMaxEntries) + assert.NotContains(t, autoCtx.candidateCache, "release-0") + assert.Contains(t, autoCtx.candidateCache, fmt.Sprintf("release-%d", total-1)) +} + +func TestCacheAutomationCandidateResponse_DuplicateKeyDoesNotGrowOrder(t *testing.T) { + t.Parallel() + + autoCtx := &automationContext{ + candidateCache: make(map[string]*FindCandidatesResponse), + candidateOrder: make([]string, 0, automationCandidateCacheMaxEntries), + } + + cacheAutomationCandidateResponse(autoCtx, "same-release", &FindCandidatesResponse{}) + cacheAutomationCandidateResponse(autoCtx, "same-release", &FindCandidatesResponse{}) + + require.Len(t, autoCtx.candidateCache, 1) + require.Len(t, autoCtx.candidateOrder, 1) + assert.Equal(t, "same-release", autoCtx.candidateOrder[0]) +} diff --git a/internal/services/crossseed/service_completion_queue_test.go b/internal/services/crossseed/service_completion_queue_test.go index 5fbe77ad7..592538fba 100644 --- a/internal/services/crossseed/service_completion_queue_test.go +++ b/internal/services/crossseed/service_completion_queue_test.go @@ -16,6 +16,7 @@ import ( _ "modernc.org/sqlite" "github.com/autobrr/qui/internal/models" + internalqb "github.com/autobrr/qui/internal/qbittorrent" "github.com/autobrr/qui/internal/services/jackett" ) @@ -58,11 +59,156 @@ func setupCompletionStoreForQueueTests(t *testing.T) *models.InstanceCrossSeedCo return models.NewInstanceCrossSeedCompletionStore(q) } +type completionPollingSyncMock struct { + mu sync.Mutex + sequences map[string][]qbt.Torrent + hits map[string]int + delay time.Duration +} + +func newCompletionPollingSyncMock(sequences map[string][]qbt.Torrent) *completionPollingSyncMock { + normalized := make(map[string][]qbt.Torrent, len(sequences)) + for hash, sequence := range sequences { + normalized[normalizeHash(hash)] = sequence + } + + return &completionPollingSyncMock{ + sequences: normalized, + hits: make(map[string]int), + } +} + +func (m *completionPollingSyncMock) GetTorrents(_ context.Context, _ int, filter qbt.TorrentFilterOptions) ([]qbt.Torrent, error) { + if len(filter.Hashes) == 0 { + return nil, nil + } + + if m.delay > 0 { + time.Sleep(m.delay) + } + + hash := normalizeHash(filter.Hashes[0]) + + m.mu.Lock() + defer m.mu.Unlock() + + sequence, ok := m.sequences[hash] + if !ok || len(sequence) == 0 { + return nil, nil + } + + index := m.hits[hash] + if index >= len(sequence) { + index = len(sequence) - 1 + } + m.hits[hash]++ + + torrent := sequence[index] + return []qbt.Torrent{torrent}, nil +} + +func (m *completionPollingSyncMock) hitCount(hash string) int { + m.mu.Lock() + defer m.mu.Unlock() + + return m.hits[normalizeHash(hash)] +} + +func (m *completionPollingSyncMock) GetTorrentFilesBatch(context.Context, int, []string) (map[string]qbt.TorrentFiles, error) { + return nil, nil +} + +func (*completionPollingSyncMock) ExportTorrent(context.Context, int, string) ([]byte, string, string, error) { + return nil, "", "", nil +} + +func (*completionPollingSyncMock) HasTorrentByAnyHash(context.Context, int, []string) (*qbt.Torrent, bool, error) { + return nil, false, nil +} + +func (*completionPollingSyncMock) GetTorrentProperties(context.Context, int, string) (*qbt.TorrentProperties, error) { + return &qbt.TorrentProperties{}, nil +} + +func (*completionPollingSyncMock) GetAppPreferences(context.Context, int) (qbt.AppPreferences, error) { + return qbt.AppPreferences{}, nil +} + +func (*completionPollingSyncMock) AddTorrent(context.Context, int, []byte, map[string]string) error { + return nil +} + +func (*completionPollingSyncMock) BulkAction(context.Context, int, []string, string) error { + return nil +} + +func (*completionPollingSyncMock) GetCachedInstanceTorrents(context.Context, int) ([]internalqb.CrossInstanceTorrentView, error) { + return nil, nil +} + +func (*completionPollingSyncMock) ExtractDomainFromURL(string) string { + return "" +} + +func (*completionPollingSyncMock) GetQBittorrentSyncManager(context.Context, int) (*qbt.SyncManager, error) { + return nil, nil +} + +func (*completionPollingSyncMock) RenameTorrent(context.Context, int, string, string) error { + return nil +} + +func (*completionPollingSyncMock) RenameTorrentFile(context.Context, int, string, string, string) error { + return nil +} + +func (*completionPollingSyncMock) RenameTorrentFolder(context.Context, int, string, string, string) error { + return nil +} + +func (*completionPollingSyncMock) SetTags(context.Context, int, []string, string) error { + return nil +} + +func (*completionPollingSyncMock) GetCategories(context.Context, int) (map[string]qbt.Category, error) { + return map[string]qbt.Category{}, nil +} + +func (*completionPollingSyncMock) CreateCategory(context.Context, int, string, string) error { + return nil +} + +func setCompletionCheckingTimings(svc *Service, pollInterval time.Duration, timeout time.Duration) { + svc.completionPollInterval = pollInterval + svc.completionTimeout = timeout +} + +func setCompletionCheckingRetryPolicy(svc *Service, retryDelay time.Duration, maxAttempts int) { + svc.completionRetryDelay = retryDelay + svc.completionMaxAttempts = maxAttempts +} + func TestHandleTorrentCompletion_QueuesPerInstance(t *testing.T) { completionStore := setupCompletionStoreForQueueTests(t) firstHash := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" secondHash := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + syncMock := newCompletionPollingSyncMock(map[string][]qbt.Torrent{ + firstHash: {{ + Hash: firstHash, + Name: "first", + Progress: 1.0, + State: qbt.TorrentStateUploading, + CompletionOn: 123, + }}, + secondHash: {{ + Hash: secondHash, + Name: "second", + Progress: 1.0, + State: qbt.TorrentStateUploading, + CompletionOn: 124, + }}, + }) firstStarted := make(chan struct{}) secondStarted := make(chan struct{}) releaseFirst := make(chan struct{}) @@ -73,6 +219,7 @@ func TestHandleTorrentCompletion_QueuesPerInstance(t *testing.T) { svc := &Service{ completionStore: completionStore, + syncManager: syncMock, automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { return models.DefaultCrossSeedAutomationSettings(), nil }, @@ -140,12 +287,153 @@ func TestHandleTorrentCompletion_QueuesPerInstance(t *testing.T) { require.Equal(t, []string{firstHash, secondHash}, invocationOrder) } +func TestHandleTorrentCompletion_ContinuesPollingWhileSearchIsSerialized(t *testing.T) { + completionStore := setupCompletionStoreForQueueTests(t) + + firstHash := "abababababababababababababababababababab" + secondHash := "cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd" + syncMock := newCompletionPollingSyncMock(map[string][]qbt.Torrent{ + firstHash: {{ + Hash: firstHash, + Name: "first", + Progress: 1.0, + State: qbt.TorrentStateUploading, + CompletionOn: 123, + }}, + secondHash: { + { + Hash: secondHash, + Name: "second", + Progress: 0.42, + State: qbt.TorrentStateCheckingResumeData, + CompletionOn: 124, + }, + { + Hash: secondHash, + Name: "second", + Progress: 1.0, + State: qbt.TorrentStateUploading, + CompletionOn: 124, + }, + }, + }) + + firstStarted := make(chan struct{}) + secondStarted := make(chan struct{}) + releaseFirst := make(chan struct{}) + var firstOnce sync.Once + var secondOnce sync.Once + + svc := &Service{ + completionStore: completionStore, + syncManager: syncMock, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + completionSearchInvoker: func(_ context.Context, _ int, torrent *qbt.Torrent, _ *models.CrossSeedAutomationSettings, _ *models.InstanceCrossSeedCompletionSettings) error { + switch torrent.Hash { + case firstHash: + firstOnce.Do(func() { close(firstStarted) }) + <-releaseFirst + case secondHash: + secondOnce.Do(func() { close(secondStarted) }) + } + return nil + }, + } + setCompletionCheckingTimings(svc, 5*time.Millisecond, 200*time.Millisecond) + + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + svc.HandleTorrentCompletion(context.Background(), 1, qbt.Torrent{ + Hash: firstHash, + Name: "first", + Progress: 1.0, + CompletionOn: 123, + }) + }() + + select { + case <-firstStarted: + case <-time.After(5 * time.Second): + t.Fatal("first completion search did not start") + } + + go func() { + defer wg.Done() + svc.HandleTorrentCompletion(context.Background(), 1, qbt.Torrent{ + Hash: secondHash, + Name: "second", + Progress: 1.0, + CompletionOn: 124, + }) + }() + + require.Eventually(t, func() bool { + return syncMock.hitCount(secondHash) >= 2 + }, time.Second, 10*time.Millisecond, "second wait was not polled while first search held the serialization lock") + + select { + case <-secondStarted: + t.Fatal("second completion search started before first released") + case <-time.After(50 * time.Millisecond): + } + + close(releaseFirst) + + select { + case <-secondStarted: + case <-time.After(5 * time.Second): + t.Fatal("second completion search did not start after first completed") + } + + wg.Wait() +} + +func TestSnapshotCompletionWaits_CopiesSchedulingFields(t *testing.T) { + lane := &completionLane{ + waits: make(map[string]*completionWaitState), + } + + initialRetryAt := time.Now().Add(15 * time.Second) + wait := &completionWaitState{ + done: make(chan struct{}), + retryAt: initialRetryAt, + } + lane.waits["abc"] = wait + + svc := &Service{} + snapshot := svc.snapshotCompletionWaits(lane) + + lane.mu.Lock() + updatedRetryAt := initialRetryAt.Add(30 * time.Second) + wait.retryAt = updatedRetryAt + lane.mu.Unlock() + + entry, ok := snapshot["abc"] + require.True(t, ok) + require.Same(t, wait, entry.state) + require.True(t, entry.retryAt.Equal(initialRetryAt)) + require.False(t, entry.retryAt.Equal(updatedRetryAt)) +} + func TestHandleTorrentCompletion_RetriesOnRateLimitError(t *testing.T) { completionStore := setupCompletionStoreForQueueTests(t) attempts := 0 svc := &Service{ completionStore: completionStore, + syncManager: newCompletionPollingSyncMock(map[string][]qbt.Torrent{ + "cccccccccccccccccccccccccccccccccccccccc": {{ + Hash: "cccccccccccccccccccccccccccccccccccccccc", + Name: "retry-me", + Progress: 1.0, + State: qbt.TorrentStateUploading, + CompletionOn: 125, + }}, + }), automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { return models.DefaultCrossSeedAutomationSettings(), nil }, @@ -174,6 +462,376 @@ func TestHandleTorrentCompletion_RetriesOnRateLimitError(t *testing.T) { assert.Equal(t, 2, attempts) } +func TestHandleTorrentCompletion_DefersWhileChecking(t *testing.T) { + completionStore := setupCompletionStoreForQueueTests(t) + + hash := "dddddddddddddddddddddddddddddddddddddddd" + syncMock := newCompletionPollingSyncMock(map[string][]qbt.Torrent{ + hash: { + { + Hash: hash, + Name: "checking", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + CompletionOn: 200, + }, + { + Hash: hash, + Name: "checking", + Progress: 1.0, + State: qbt.TorrentStateUploading, + CompletionOn: 200, + }, + }, + }) + + invoked := make(chan qbt.Torrent, 1) + svc := &Service{ + completionStore: completionStore, + syncManager: syncMock, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + completionSearchInvoker: func(_ context.Context, _ int, torrent *qbt.Torrent, _ *models.CrossSeedAutomationSettings, _ *models.InstanceCrossSeedCompletionSettings) error { + invoked <- *torrent + return nil + }, + } + setCompletionCheckingTimings(svc, 5*time.Millisecond, 50*time.Millisecond) + + svc.HandleTorrentCompletion(context.Background(), 1, qbt.Torrent{ + Hash: hash, + Name: "checking", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + CompletionOn: 200, + }) + + select { + case torrent := <-invoked: + require.InDelta(t, 1.0, torrent.Progress, 0.0001) + require.Equal(t, qbt.TorrentStateUploading, torrent.State) + case <-time.After(time.Second): + t.Fatal("completion search was not invoked after checking finished") + } +} + +func TestHandleTorrentCompletion_RetriesAfterCheckingTimeout(t *testing.T) { + completionStore := setupCompletionStoreForQueueTests(t) + + hash := "cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd" + syncMock := newCompletionPollingSyncMock(map[string][]qbt.Torrent{ + hash: { + { + Hash: hash, + Name: "checking-retry", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + CompletionOn: 220, + }, + { + Hash: hash, + Name: "checking-retry", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + CompletionOn: 220, + }, + { + Hash: hash, + Name: "checking-retry", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + CompletionOn: 220, + }, + { + Hash: hash, + Name: "checking-retry", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + CompletionOn: 220, + }, + { + Hash: hash, + Name: "checking-retry", + Progress: 1.0, + State: qbt.TorrentStateUploading, + CompletionOn: 220, + }, + }, + }) + + invoked := make(chan qbt.Torrent, 1) + svc := &Service{ + completionStore: completionStore, + syncManager: syncMock, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + completionSearchInvoker: func(_ context.Context, _ int, torrent *qbt.Torrent, _ *models.CrossSeedAutomationSettings, _ *models.InstanceCrossSeedCompletionSettings) error { + invoked <- *torrent + return nil + }, + } + setCompletionCheckingTimings(svc, 5*time.Millisecond, 12*time.Millisecond) + setCompletionCheckingRetryPolicy(svc, 8*time.Millisecond, 3) + + svc.HandleTorrentCompletion(context.Background(), 1, qbt.Torrent{ + Hash: hash, + Name: "checking-retry", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + CompletionOn: 220, + }) + + select { + case torrent := <-invoked: + require.InDelta(t, 1.0, torrent.Progress, 0.0001) + require.Equal(t, qbt.TorrentStateUploading, torrent.State) + case <-time.After(time.Second): + t.Fatal("completion search was not invoked after checking retry") + } + + require.GreaterOrEqual(t, syncMock.hitCount(hash), 1) +} + +func TestHandleTorrentCompletion_RechecksSkipConditionsAfterWaiting(t *testing.T) { + completionStore := setupCompletionStoreForQueueTests(t) + + hash := "abababababababababababababababababababab" + syncMock := newCompletionPollingSyncMock(map[string][]qbt.Torrent{ + hash: { + { + Hash: hash, + Name: "checking-then-tagged", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + CompletionOn: 300, + }, + { + Hash: hash, + Name: "checking-then-tagged", + Progress: 1.0, + State: qbt.TorrentStateUploading, + CompletionOn: 300, + Tags: "cross-seed", + }, + }, + }) + + invoked := make(chan struct{}, 1) + svc := &Service{ + completionStore: completionStore, + syncManager: syncMock, + automationSettingsLoader: func(context.Context) (*models.CrossSeedAutomationSettings, error) { + return models.DefaultCrossSeedAutomationSettings(), nil + }, + completionSearchInvoker: func(_ context.Context, _ int, _ *qbt.Torrent, _ *models.CrossSeedAutomationSettings, _ *models.InstanceCrossSeedCompletionSettings) error { + invoked <- struct{}{} + return nil + }, + } + setCompletionCheckingTimings(svc, 5*time.Millisecond, 50*time.Millisecond) + + svc.HandleTorrentCompletion(context.Background(), 1, qbt.Torrent{ + Hash: hash, + Name: "checking-then-tagged", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + CompletionOn: 300, + }) + + select { + case <-invoked: + t.Fatal("completion search should be skipped after refreshed torrent gains cross-seed tag") + case <-time.After(100 * time.Millisecond): + } +} + +func TestWaitForCompletionTorrentReady_ReturnsNotCompleteAfterChecking(t *testing.T) { + hash := "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + svc := &Service{ + syncManager: newCompletionPollingSyncMock(map[string][]qbt.Torrent{ + hash: { + { + Hash: hash, + Name: "partial", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + }, + { + Hash: hash, + Name: "partial", + Progress: 0.27, + State: qbt.TorrentStatePausedUp, + }, + }, + }), + } + setCompletionCheckingTimings(svc, 5*time.Millisecond, 50*time.Millisecond) + + _, err := svc.waitForCompletionTorrentReady(context.Background(), 1, qbt.Torrent{ + Hash: hash, + Name: "partial", + }) + require.ErrorIs(t, err, ErrTorrentNotComplete) + require.Contains(t, err.Error(), "progress 0.27") +} + +func TestWaitForCompletionTorrentReady_TimesOutWhileChecking(t *testing.T) { + hash := "ffffffffffffffffffffffffffffffffffffffff" + svc := &Service{ + syncManager: newCompletionPollingSyncMock(map[string][]qbt.Torrent{ + hash: {{ + Hash: hash, + Name: "stuck-checking", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + }}, + }), + } + setCompletionCheckingTimings(svc, 5*time.Millisecond, 20*time.Millisecond) + setCompletionCheckingRetryPolicy(svc, 5*time.Millisecond, 1) + + _, err := svc.waitForCompletionTorrentReady(context.Background(), 1, qbt.Torrent{ + Hash: hash, + Name: "stuck-checking", + }) + require.EqualError(t, err, "completion torrent stuck-checking still checking after 20ms") +} + +func TestWaitForCompletionTorrentReady_DeduplicatesConcurrentWaiters(t *testing.T) { + hash := "9999999999999999999999999999999999999999" + syncMock := newCompletionPollingSyncMock(map[string][]qbt.Torrent{ + hash: { + { + Hash: hash, + Name: "shared-wait", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + }, + { + Hash: hash, + Name: "shared-wait", + Progress: 1.0, + State: qbt.TorrentStateUploading, + }, + }, + }) + syncMock.delay = 2 * time.Millisecond + + svc := &Service{ + syncManager: syncMock, + } + setCompletionCheckingTimings(svc, 5*time.Millisecond, 50*time.Millisecond) + + start := make(chan struct{}) + errs := make(chan error, 2) + + for range 2 { + go func() { + <-start + _, err := svc.waitForCompletionTorrentReady(context.Background(), 1, qbt.Torrent{ + Hash: hash, + Name: "shared-wait", + }) + errs <- err + }() + } + + close(start) + + for range 2 { + require.NoError(t, <-errs) + } + + require.Equal(t, 2, syncMock.hitCount(hash)) +} + +func TestWaitForCompletionTorrentReady_TimesOutAfterCheckingRetries(t *testing.T) { + hash := "1212121212121212121212121212121212121212" + syncMock := newCompletionPollingSyncMock(map[string][]qbt.Torrent{ + hash: {{ + Hash: hash, + Name: "retry-timeout", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + }}, + }) + + svc := &Service{ + syncManager: syncMock, + } + setCompletionCheckingTimings(svc, 5*time.Millisecond, 10*time.Millisecond) + setCompletionCheckingRetryPolicy(svc, 8*time.Millisecond, 3) + + _, err := svc.waitForCompletionTorrentReady(context.Background(), 1, qbt.Torrent{ + Hash: hash, + Name: "retry-timeout", + }) + require.EqualError(t, err, "completion torrent retry-timeout still checking after 10ms") + require.GreaterOrEqual(t, syncMock.hitCount(hash), 5) +} + +func TestWaitForCompletionTorrentReady_DeduplicatesConcurrentWaitersDuringRetryBackoff(t *testing.T) { + hash := "3434343434343434343434343434343434343434" + syncMock := newCompletionPollingSyncMock(map[string][]qbt.Torrent{ + hash: { + { + Hash: hash, + Name: "shared-retry-wait", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + }, + { + Hash: hash, + Name: "shared-retry-wait", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + }, + { + Hash: hash, + Name: "shared-retry-wait", + Progress: 0.27, + State: qbt.TorrentStateCheckingResumeData, + }, + { + Hash: hash, + Name: "shared-retry-wait", + Progress: 1.0, + State: qbt.TorrentStateUploading, + }, + }, + }) + + svc := &Service{ + syncManager: syncMock, + } + setCompletionCheckingTimings(svc, 5*time.Millisecond, 10*time.Millisecond) + setCompletionCheckingRetryPolicy(svc, 8*time.Millisecond, 3) + + start := make(chan struct{}) + errs := make(chan error, 2) + + for range 2 { + go func() { + <-start + _, err := svc.waitForCompletionTorrentReady(context.Background(), 1, qbt.Torrent{ + Hash: hash, + Name: "shared-retry-wait", + }) + errs <- err + }() + } + + close(start) + + for range 2 { + require.NoError(t, <-errs) + } + + require.Equal(t, 4, syncMock.hitCount(hash)) +} + func TestCompletionRetryDelay_FallbackRateLimitMessages(t *testing.T) { t.Parallel() diff --git a/internal/web/swagger/openapi.yaml b/internal/web/swagger/openapi.yaml index 2abafd62f..e483b03db 100644 --- a/internal/web/swagger/openapi.yaml +++ b/internal/web/swagger/openapi.yaml @@ -6218,6 +6218,17 @@ components: skipRecheck: type: boolean description: Skip cross-seed matches that would require a manual recheck (alignment, extra files, or disc layouts like BDMV/VIDEO_TS) + enablePooledPartialCompletion: + type: boolean + description: Coordinate related hardlink/reflink partial adds against the same matched local source torrent using a temporary in-memory pooled-completion state that can be restored only while the pool remains active + allowReflinkSingleFileSizeMismatch: + type: boolean + description: In reflink mode, allow single-file size mismatches when normalized filenames still match; add paused, recheck, and auto-resume once completion reaches 99% + maxMissingBytesAfterRecheck: + type: integer + format: int64 + minimum: 1048576 + description: Maximum missing bytes allowed after recheck for pooled reflink automation before the torrent is left paused for manual review. Minimum 1 MiB, default 100 MiB. useHardlinks: type: boolean description: Enable hardlink mode for cross-seeding (creates hardlinked file trees) @@ -6315,6 +6326,17 @@ components: skipRecheck: type: boolean description: Skip cross-seed matches that would require a manual recheck (alignment, extra files, or disc layouts like BDMV/VIDEO_TS) + enablePooledPartialCompletion: + type: boolean + description: Coordinate related hardlink/reflink partial adds against the same matched local source torrent using a temporary in-memory pooled-completion state that can be restored only while the pool remains active + allowReflinkSingleFileSizeMismatch: + type: boolean + description: In reflink mode, allow single-file size mismatches when normalized filenames still match; add paused, recheck, and auto-resume once completion reaches 99% + maxMissingBytesAfterRecheck: + type: integer + format: int64 + minimum: 1048576 + description: Maximum missing bytes allowed after recheck for pooled reflink automation before the torrent is left paused for manual review. Minimum 1 MiB, default 100 MiB. useHardlinks: type: boolean description: Enable hardlink mode for cross-seeding (creates hardlinked file trees) diff --git a/web/src/pages/CrossSeedPage.tsx b/web/src/pages/CrossSeedPage.tsx index fba7b8d06..0e1d6644d 100644 --- a/web/src/pages/CrossSeedPage.tsx +++ b/web/src/pages/CrossSeedPage.tsx @@ -64,7 +64,7 @@ import { XCircle, Zap } from "lucide-react" -import { useCallback, useEffect, useMemo, useState } from "react" +import { type Dispatch, type SetStateAction, useCallback, useEffect, useMemo, useState } from "react" import { toast } from "sonner" // RSS Automation settings @@ -108,6 +108,9 @@ interface GlobalCrossSeedSettings { skipAutoResumeWebhook: boolean skipRecheck: boolean skipPieceBoundarySafetyCheck: boolean + enablePooledPartialCompletion: boolean + allowReflinkSingleFileSizeMismatch: boolean + maxMissingBytesAfterRecheck: number // Webhook source filtering: filter which local torrents to search when checking webhook requests webhookSourceCategories: string[] webhookSourceTags: string[] @@ -125,6 +128,7 @@ const DEFAULT_RSS_INTERVAL_MINUTES = 120 // RSS: default interval (2 hours) const MIN_SEEDED_SEARCH_INTERVAL_SECONDS = 60 // Seeded Search: minimum interval between torrents const MIN_GAZELLE_ONLY_SEARCH_INTERVAL_SECONDS = 5 // Gazelle-only seeded search: still be polite; per-torrent work can trigger multiple API calls const MIN_SEEDED_SEARCH_COOLDOWN_MINUTES = 720 // Seeded Search: minimum cooldown (12 hours) +const BYTES_PER_MIB = 1024 * 1024 // RSS Automation defaults const DEFAULT_AUTOMATION_FORM: AutomationFormState = { @@ -164,6 +168,9 @@ const DEFAULT_GLOBAL_SETTINGS: GlobalCrossSeedSettings = { skipAutoResumeWebhook: false, skipRecheck: false, skipPieceBoundarySafetyCheck: true, + enablePooledPartialCompletion: false, + allowReflinkSingleFileSizeMismatch: false, + maxMissingBytesAfterRecheck: 100 * BYTES_PER_MIB, // Webhook source filtering defaults - empty means no filtering (all torrents) webhookSourceCategories: [], webhookSourceTags: [], @@ -304,8 +311,13 @@ function RSSRunItem({ run, formatDateValue }: RSSRunItemProps) { ) } +interface HardlinkModeSettingsProps { + globalSettings: GlobalCrossSeedSettings + setGlobalSettings: Dispatch> +} + /** Per-instance hardlink/reflink mode settings component */ -function HardlinkModeSettings() { +function HardlinkModeSettings({ globalSettings, setGlobalSettings }: HardlinkModeSettingsProps) { const { instances, updateInstance, isUpdating } = useInstances() const [expandedInstances, setExpandedInstances] = useState([]) const [dirtyMap, setDirtyMap] = useState>({}) @@ -323,6 +335,24 @@ function HardlinkModeSettings() { () => (instances ?? []).filter((inst) => inst.isActive), [instances] ) + const managedModeSummary = useMemo(() => { + let hasManagedMode = false + let hasReflinkMode = false + + for (const instance of activeInstances) { + const form = formMap[instance.id] + const useHardlinks = form?.useHardlinks ?? instance.useHardlinks + const useReflinks = form?.useReflinks ?? instance.useReflinks + if (useHardlinks || useReflinks) { + hasManagedMode = true + } + if (useReflinks) { + hasReflinkMode = true + } + } + + return { hasManagedMode, hasReflinkMode } + }, [activeInstances, formMap]) // Auto-expand when 3 or fewer instances (only on first load) useEffect(() => { @@ -612,6 +642,80 @@ function HardlinkModeSettings() { ) })} + +
+
+

Managed partial handling

+

+ These settings only apply when at least one instance is using hardlink or reflink mode. +

+ {!managedModeSummary.hasManagedMode && ( +

+ These settings are persisted but only applied when an instance enables hardlink/reflink; you can clear them here even if no instance currently uses managed mode. +

+ )} +
+ +
+
+ +

+ Applies only to hardlink/reflink adds that already passed the current acceptance rules. Related partial adds sharing the same matched local source torrent are coordinated as a temporary in-memory pool. +

+

+ Hardlink automation only continues when post-recheck missing data is limited to whole missing files. Reflink can continue with partial-file divergence if the missing bytes stay within the limit below. +

+
+ setGlobalSettings(prev => ({ ...prev, enablePooledPartialCompletion: !!value }))} + /> +
+ + {globalSettings.enablePooledPartialCompletion && ( +
+ + { + const nextMiB = Number(event.target.value) + setGlobalSettings(prev => ({ + ...prev, + maxMissingBytesAfterRecheck: Math.max(BYTES_PER_MIB, Math.round((Number.isFinite(nextMiB) ? nextMiB : 0) * BYTES_PER_MIB)), + })) + }} + /> +

+ Default is 100 MiB. Reflink pool members above this post-recheck gap stay paused for manual review; hardlink pool members still require whole-file-only gaps. +

+
+ )} + +
+
+ +

+ Reflink-only escape hatch for one-file torrents where normalized file names match and the size is already within 1%. qui adds paused, forces a recheck, and auto-resumes at 99%. +

+

+ This does not apply to multi-file torrents, rejects larger gaps before add, and does not use pooled partial completion. +

+
+ setGlobalSettings(prev => ({ ...prev, allowReflinkSingleFileSizeMismatch: !!value }))} + /> +
+
@@ -854,6 +958,9 @@ export function CrossSeedPage({ activeTab, onTabChange }: CrossSeedPageProps) { skipAutoResumeWebhook: settings.skipAutoResumeWebhook ?? false, skipRecheck: settings.skipRecheck ?? false, skipPieceBoundarySafetyCheck: settings.skipPieceBoundarySafetyCheck ?? true, + enablePooledPartialCompletion: settings.enablePooledPartialCompletion ?? false, + allowReflinkSingleFileSizeMismatch: settings.allowReflinkSingleFileSizeMismatch ?? false, + maxMissingBytesAfterRecheck: settings.maxMissingBytesAfterRecheck ?? (100 * BYTES_PER_MIB), // Webhook source filtering webhookSourceCategories: settings.webhookSourceCategories ?? [], webhookSourceTags: settings.webhookSourceTags ?? [], @@ -942,6 +1049,9 @@ export function CrossSeedPage({ activeTab, onTabChange }: CrossSeedPageProps) { skipAutoResumeWebhook: settings.skipAutoResumeWebhook ?? false, skipRecheck: settings.skipRecheck ?? false, skipPieceBoundarySafetyCheck: settings.skipPieceBoundarySafetyCheck ?? true, + enablePooledPartialCompletion: settings.enablePooledPartialCompletion ?? false, + allowReflinkSingleFileSizeMismatch: settings.allowReflinkSingleFileSizeMismatch ?? false, + maxMissingBytesAfterRecheck: settings.maxMissingBytesAfterRecheck ?? (100 * BYTES_PER_MIB), webhookSourceCategories: settings.webhookSourceCategories ?? [], webhookSourceTags: settings.webhookSourceTags ?? [], webhookSourceExcludeCategories: settings.webhookSourceExcludeCategories ?? [], @@ -975,6 +1085,9 @@ export function CrossSeedPage({ activeTab, onTabChange }: CrossSeedPageProps) { skipAutoResumeWebhook: globalSource.skipAutoResumeWebhook, skipRecheck: globalSource.skipRecheck, skipPieceBoundarySafetyCheck: globalSource.skipPieceBoundarySafetyCheck, + enablePooledPartialCompletion: globalSource.enablePooledPartialCompletion, + allowReflinkSingleFileSizeMismatch: globalSource.allowReflinkSingleFileSizeMismatch, + maxMissingBytesAfterRecheck: globalSource.maxMissingBytesAfterRecheck, // Webhook source filtering webhookSourceCategories: globalSource.webhookSourceCategories, webhookSourceTags: globalSource.webhookSourceTags, @@ -2468,7 +2581,10 @@ export function CrossSeedPage({ activeTab, onTabChange }: CrossSeedPageProps) { Settings that apply to all cross-seed operations. - + {/* Gazelle (OPS/RED) */}
diff --git a/web/src/types/index.ts b/web/src/types/index.ts index 382ad694d..bbd31ce2d 100644 --- a/web/src/types/index.ts +++ b/web/src/types/index.ts @@ -1993,6 +1993,9 @@ export interface CrossSeedAutomationSettings { skipAutoResumeWebhook: boolean skipRecheck: boolean skipPieceBoundarySafetyCheck: boolean + enablePooledPartialCompletion: boolean + allowReflinkSingleFileSizeMismatch: boolean + maxMissingBytesAfterRecheck: number // Hardlink mode settings useHardlinks: boolean hardlinkBaseDir: string @@ -2044,6 +2047,9 @@ export interface CrossSeedAutomationSettingsPatch { skipAutoResumeWebhook?: boolean skipRecheck?: boolean skipPieceBoundarySafetyCheck?: boolean + enablePooledPartialCompletion?: boolean + allowReflinkSingleFileSizeMismatch?: boolean + maxMissingBytesAfterRecheck?: number // Hardlink mode settings useHardlinks?: boolean hardlinkBaseDir?: string