@@ -488,3 +488,77 @@ func testStartWatcherFromCompactedRevision(t *testing.T, performCompactOnTombsto
488
488
}
489
489
}
490
490
}
491
+
492
+ // TestResumeCompactionOnTombstone verifies whether a deletion event is preserved
493
+ // when etcd restarts and resumes compaction on a key that only has a tombstone revision.
494
+ func TestResumeCompactionOnTombstone (t * testing.T ) {
495
+ e2e .BeforeTest (t )
496
+
497
+ ctx := context .Background ()
498
+ compactBatchLimit := 5
499
+
500
+ cfg := e2e .DefaultConfig ()
501
+ clus , err := e2e .NewEtcdProcessCluster (context .Background (),
502
+ t ,
503
+ e2e .WithConfig (cfg ),
504
+ e2e .WithClusterSize (1 ),
505
+ e2e .WithCompactionBatchLimit (compactBatchLimit ),
506
+ e2e .WithGoFailEnabled (true ),
507
+ e2e .WithWatchProcessNotifyInterval (100 * time .Millisecond ),
508
+ )
509
+ require .NoError (t , err )
510
+ defer clus .Close ()
511
+
512
+ c1 := newClient (t , clus .EndpointsGRPC (), cfg .Client )
513
+ defer c1 .Close ()
514
+
515
+ keyPrefix := "/key-"
516
+ for i := 0 ; i < compactBatchLimit ; i ++ {
517
+ key := fmt .Sprintf ("%s%d" , keyPrefix , i )
518
+ value := fmt .Sprintf ("%d" , i )
519
+
520
+ t .Logf ("PUT key=%s, val=%s" , key , value )
521
+ _ , err = c1 .KV .Put (ctx , key , value )
522
+ require .NoError (t , err )
523
+ }
524
+
525
+ firstKey := keyPrefix + "0"
526
+ t .Logf ("DELETE key=%s" , firstKey )
527
+ deleteResp , err := c1 .KV .Delete (ctx , firstKey )
528
+ require .NoError (t , err )
529
+
530
+ var deleteEvent * clientv3.Event
531
+ select {
532
+ case watchResp := <- c1 .Watch (ctx , firstKey , clientv3 .WithRev (deleteResp .Header .Revision )):
533
+ require .Len (t , watchResp .Events , 1 )
534
+
535
+ require .Equal (t , mvccpb .DELETE , watchResp .Events [0 ].Type )
536
+ deletedKey := string (watchResp .Events [0 ].Kv .Key )
537
+ require .Equal (t , firstKey , deletedKey )
538
+
539
+ deleteEvent = watchResp .Events [0 ]
540
+ case <- time .After (100 * time .Millisecond ):
541
+ t .Fatal ("timed out getting watch response" )
542
+ }
543
+
544
+ require .NoError (t , clus .Procs [0 ].Failpoints ().SetupHTTP (ctx , "compactBeforeSetFinishedCompact" , `panic` ))
545
+
546
+ t .Logf ("COMPACT rev=%d" , deleteResp .Header .Revision )
547
+ _ , err = c1 .KV .Compact (ctx , deleteResp .Header .Revision , clientv3 .WithCompactPhysical ())
548
+ require .Error (t , err )
549
+
550
+ require .NoError (t , clus .Restart (ctx ))
551
+
552
+ c2 := newClient (t , clus .EndpointsGRPC (), cfg .Client )
553
+ defer c2 .Close ()
554
+
555
+ watchChan := c2 .Watch (ctx , firstKey , clientv3 .WithRev (deleteResp .Header .Revision ))
556
+ select {
557
+ case watchResp := <- watchChan :
558
+ require .Equal (t , []* clientv3.Event {deleteEvent }, watchResp .Events )
559
+ case <- time .After (100 * time .Millisecond ):
560
+ // we care only about the first response, but have an
561
+ // escape hatch in case the watch response is delayed.
562
+ t .Fatal ("timed out getting watch response" )
563
+ }
564
+ }
0 commit comments