@@ -829,8 +829,9 @@ mod tests {
829
829
super :: * ,
830
830
rand:: SeedableRng ,
831
831
rand_chacha:: ChaChaRng ,
832
- solana_ledger:: shred:: { Shred , ShredFlags } ,
833
- solana_sdk:: signature:: Keypair ,
832
+ solana_entry:: entry:: create_ticks,
833
+ solana_ledger:: shred:: { ProcessShredsStats , ReedSolomonCache , Shredder } ,
834
+ solana_sdk:: { hash:: Hash , signature:: Keypair } ,
834
835
} ;
835
836
836
837
fn get_keypair ( ) -> Keypair {
@@ -845,74 +846,109 @@ mod tests {
845
846
}
846
847
847
848
#[ test]
848
- fn test_already_received ( ) {
849
- let slot = 1 ;
850
- let index = 5 ;
851
- let version = 0x40 ;
849
+ fn test_shred_deduper ( ) {
852
850
let keypair = get_keypair ( ) ;
853
- let mut shred = Shred :: new_from_data (
854
- slot,
855
- index,
856
- 0 ,
857
- & [ ] ,
858
- ShredFlags :: LAST_SHRED_IN_SLOT ,
859
- 0 ,
860
- version,
861
- 0 ,
862
- ) ;
863
- shred. sign ( & keypair) ;
851
+ let entries = create_ticks ( 10 , 1 , Hash :: new_unique ( ) ) ;
852
+ let rsc = ReedSolomonCache :: default ( ) ;
853
+ let make_shreds_for_slot = |slot, parent, code_index| {
854
+ let shredder = Shredder :: new ( slot, parent, 1 , 0 ) . unwrap ( ) ;
855
+ shredder. entries_to_shreds (
856
+ & keypair,
857
+ & entries,
858
+ true ,
859
+ // chained_merkle_root
860
+ Some ( Hash :: new_from_array ( rand:: thread_rng ( ) . gen ( ) ) ) ,
861
+ 0 ,
862
+ code_index,
863
+ true ,
864
+ & rsc,
865
+ & mut ProcessShredsStats :: default ( ) ,
866
+ )
867
+ } ;
868
+
864
869
let mut rng = ChaChaRng :: from_seed ( [ 0xa5 ; 32 ] ) ;
865
870
let shred_deduper = ShredDeduper :: < 2 > :: new ( & mut rng, /*num_bits:*/ 640_007 ) ;
866
- // unique shred for (1, 5) should pass
867
- assert ! ( !shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ) ;
868
- // duplicate shred for (1, 5) blocked
869
- assert ! ( shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ) ;
870
- let mut shred = Shred :: new_from_data (
871
- slot,
872
- index,
873
- 2 ,
874
- & [ ] ,
875
- ShredFlags :: LAST_SHRED_IN_SLOT ,
876
- 0 ,
877
- version,
878
- 0 ,
871
+
872
+ // make a set of shreds for slot 5 with parent slot 4
873
+ let ( shreds_data_5_4, shreds_code_5_4) = make_shreds_for_slot ( 5 , 4 , 0 ) ;
874
+ // make a set of shreds for slot 5 with parent slot 3
875
+ let ( shreds_data_5_3, _shreds_code_5_3) = make_shreds_for_slot ( 5 , 3 , 0 ) ;
876
+ // make a set of shreds for slot 5 with parent slot 2
877
+ let ( shreds_data_5_2, _shreds_code_5_2) = make_shreds_for_slot ( 5 , 2 , 0 ) ;
878
+ // pick a shred for tests
879
+ let shred = shreds_data_5_4. last ( ) . unwrap ( ) . clone ( ) ;
880
+ // unique shred should pass
881
+ assert ! (
882
+ !shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ,
883
+ "First time shred X => Not dup because it is the only shred"
884
+ ) ;
885
+ // duplicate shred blocked
886
+ assert ! (
887
+ shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ,
888
+ "
889
+ Second time shred X => Dup because common header is duplicate
890
+ "
891
+ ) ;
892
+ // Pick a shred with same index as `shred` but different parent offset
893
+ let shred_dup = shreds_data_5_3. last ( ) . unwrap ( ) . clone ( ) ;
894
+ // first shred passed through
895
+ assert ! ( !shred_deduper. dedup( shred_dup. id( ) , shred_dup. payload( ) , MAX_DUPLICATE_COUNT ) ,
896
+ "First time seeing shred X with differnt parent slot (3 instead of 4) => Not dup because common header is unique & shred ID only seen once"
879
897
) ;
880
- shred. sign ( & keypair) ;
881
- // first duplicate shred for (1, 5) passed
882
- assert ! ( !shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ) ;
883
898
// then blocked
884
- assert ! ( shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ) ;
885
-
886
- let mut shred = Shred :: new_from_data (
887
- slot,
888
- index,
889
- 8 ,
890
- & [ ] ,
891
- ShredFlags :: LAST_SHRED_IN_SLOT ,
892
- 0 ,
893
- version,
894
- 0 ,
899
+ assert ! ( shred_deduper. dedup( shred_dup. id( ) , shred_dup. payload( ) , MAX_DUPLICATE_COUNT ) ,
900
+ "Second time seeing shred X with parent slot 3 => Dup because common header is not unique & shred ID seen twice"
901
+ ) ;
902
+
903
+ let shred_dup2 = shreds_data_5_2. last ( ) . unwrap ( ) . clone ( ) ;
904
+
905
+ assert ! ( shred_deduper. dedup( shred_dup2. id( ) , shred_dup2. payload( ) , MAX_DUPLICATE_COUNT ) ,
906
+ "First time seeing shred X with parent slot 2 => Dup because common header is unique but shred ID seen twice already"
907
+ ) ;
908
+
909
+ /* Coding shreds */
910
+
911
+ // Pick a coding shred at index 4 based off FEC set index 0
912
+ let shred = shreds_code_5_4[ 4 ] . clone ( ) ;
913
+ // Coding passes
914
+ assert ! (
915
+ !shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ,
916
+ "
917
+ First time seeing coding shred Y => Not dup because common header & shred ID are unique"
895
918
) ;
896
- shred. sign ( & keypair) ;
897
- // 2nd duplicate shred for (1, 5) blocked
898
- assert ! ( shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ) ;
899
- assert ! ( shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ) ;
900
-
901
- let shred = Shred :: new_from_parity_shard ( slot, index, & [ ] , 0 , 1 , 1 , 0 , version) ;
902
- // Coding at (1, 5) passes
903
- assert ! ( !shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ) ;
904
919
// then blocked
905
- assert ! ( shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ) ;
920
+ assert ! (
921
+ shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ,
922
+ "
923
+ Second time seeing coding shred Y => Dup because common header is dup
924
+ "
925
+ ) ;
906
926
907
- let shred = Shred :: new_from_parity_shard ( slot, index, & [ ] , 2 , 1 , 1 , 0 , version) ;
908
- // 2nd unique coding at (1, 5) passes
909
- assert ! ( !shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ) ;
910
- // same again is blocked
911
- assert ! ( shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ) ;
927
+ // Make a coding shred at index 4 based off FEC set index 2
928
+ let ( _, shreds_code_invalid) = make_shreds_for_slot ( 5 , 4 , 2 ) ;
912
929
913
- let shred = Shred :: new_from_parity_shard ( slot, index, & [ ] , 3 , 1 , 1 , 0 , version) ;
914
- // Another unique coding at (1, 5) always blocked
915
- assert ! ( shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ) ;
916
- assert ! ( shred_deduper. dedup( shred. id( ) , shred. payload( ) , MAX_DUPLICATE_COUNT ) ) ;
930
+ let shred_inv_code_1 = shreds_code_invalid[ 2 ] . clone ( ) ;
931
+ assert_eq ! (
932
+ shred. index( ) ,
933
+ shred_inv_code_1. index( ) ,
934
+ "we want a shred with same index but different FEC set index"
935
+ ) ;
936
+ // 2nd unique coding passes
937
+ assert ! ( !shred_deduper. dedup( shred_inv_code_1. id( ) , shred_inv_code_1. payload( ) , MAX_DUPLICATE_COUNT ) ,
938
+ "First time seeing shred Y w/ changed header (FEC Set index 2) => Not dup because common header is unique & shred ID only seen once" ) ;
939
+ // same again is blocked
940
+ assert ! ( shred_deduper. dedup( shred_inv_code_1. id( ) , shred_inv_code_1. payload( ) , MAX_DUPLICATE_COUNT ) , "
941
+ Second time seeing shred Y w/ changed header (FEC Set index 2) => Dup because common header is not unique & shred ID seen twice " ) ;
942
+ // Make a coding shred at index 4 based off FEC set index 3
943
+ let ( _, shreds_code_invalid) = make_shreds_for_slot ( 5 , 4 , 3 ) ;
944
+
945
+ let shred_inv_code_2 = shreds_code_invalid[ 1 ] . clone ( ) ;
946
+ assert_eq ! (
947
+ shred. index( ) ,
948
+ shred_inv_code_2. index( ) ,
949
+ "we want a shred with same index but different FEC set index"
950
+ ) ;
951
+ assert ! ( shred_deduper. dedup( shred_inv_code_2. id( ) , shred_inv_code_2. payload( ) , MAX_DUPLICATE_COUNT ) , "
952
+ First time seeing shred Y w/ changed header (FEC Set index 3)=>Dup because common header is unique but shred ID seen twice already" ) ;
917
953
}
918
954
}
0 commit comments