@@ -30,15 +30,18 @@ use log::{debug, error, info, trace, warn};
3030use p256:: ecdsa:: SigningKey as EcdsaSigningKey ;
3131use serde:: { Deserialize , Serialize } ;
3232use sha2:: { Digest , Sha256 } ;
33- use static_ct_api:: { LogEntry , PendingLogEntry , TileIterator , TreeWithTimestamp } ;
33+ use static_ct_api:: { PendingLogEntry , TileIterator , TreeWithTimestamp } ;
3434use std:: collections:: HashMap ;
3535use std:: time:: Duration ;
3636use std:: {
3737 cmp:: { Ord , Ordering } ,
3838 sync:: LazyLock ,
3939} ;
4040use thiserror:: Error ;
41- use tlog_tiles:: { Hash , HashReader , PathElem , Tile , TlogError , TlogTile , HASH_SIZE } ;
41+ use tlog_tiles:: {
42+ Hash , HashReader , LogEntryTrait , PathElem , PendingLogEntryTrait , Tile , TlogError ,
43+ TlogIteratorTrait , TlogTile , HASH_SIZE ,
44+ } ;
4245use tokio:: sync:: watch:: { channel, Receiver , Sender } ;
4346
4447/// The maximum tile level is 63 (<c2sp.org/static-ct-api>), so safe to use [`u8::MAX`] as
@@ -354,6 +357,7 @@ impl SequenceState {
354357 edge_tiles. get ( & DATA_TILE_KEY ) . unwrap ( ) . b . clone ( ) ,
355358 data_tile. tile . width ( ) as usize ,
356359 )
360+ . into_entry_iter ( )
357361 . enumerate ( )
358362 {
359363 let got = tlog_tiles:: record_hash ( & entry?. merkle_tree_leaf ( ) ) ;
@@ -596,17 +600,18 @@ async fn sequence_entries(
596600 }
597601 let mut overlay = HashMap :: new ( ) ;
598602 let mut n = old_size;
599- let mut sequenced_entries: Vec < LogEntry > = Vec :: with_capacity ( entries. len ( ) ) ;
600603 let mut sequenced_metadata = Vec :: with_capacity ( entries. len ( ) ) ;
604+ let mut cache_metadata = Vec :: with_capacity ( entries. len ( ) ) ;
601605
602606 for ( entry, sender) in entries {
603- let sequenced_entry = LogEntry {
604- inner : entry,
605- leaf_index : n ,
606- timestamp ,
607- } ;
607+ let metadata = ( n , timestamp ) ;
608+ cache_metadata . push ( ( entry. lookup_key ( ) , metadata ) ) ;
609+ sequenced_metadata . push ( ( sender , metadata ) ) ;
610+
611+ let sequenced_entry = entry . into_log_entry ( metadata ) ;
608612 let tile_leaf = sequenced_entry. tile_leaf ( ) ;
609613 let merkle_tree_leaf = sequenced_entry. merkle_tree_leaf ( ) ;
614+
610615 metrics. seq_leaf_size . observe ( tile_leaf. len ( ) . as_f64 ( ) ) ;
611616 data_tile. extend ( tile_leaf) ;
612617
@@ -623,7 +628,7 @@ async fn sequence_entries(
623628 )
624629 . map_err ( |e| {
625630 SequenceError :: NonFatal ( format ! (
626- "couldn't compute new hashes for leaf {sequenced_entry:? }: {e}" ,
631+ "couldn't compute new hashes for leaf at index {n }: {e}" ,
627632 ) )
628633 } ) ?;
629634 for ( i, h) in hashes. iter ( ) . enumerate ( ) {
@@ -639,12 +644,6 @@ async fn sequence_entries(
639644 metrics. seq_data_tile_size . observe ( data_tile. len ( ) . as_f64 ( ) ) ;
640645 data_tile. clear ( ) ;
641646 }
642-
643- sequenced_metadata. push ( (
644- sender,
645- ( sequenced_entry. leaf_index , sequenced_entry. timestamp ) ,
646- ) ) ;
647- sequenced_entries. push ( sequenced_entry) ;
648647 }
649648
650649 // Stage leftover partial data tile, if any.
@@ -768,23 +767,10 @@ async fn sequence_entries(
768767 // only consequence of cache false negatives are duplicated leaves anyway. In fact, an
769768 // error might cause the clients to resubmit, producing more cache false negatives and
770769 // duplicates.
771- if let Err ( e) = cache
772- . put_entries (
773- & sequenced_entries
774- . iter ( )
775- . map ( |entry| {
776- (
777- entry. inner . lookup_key ( ) ,
778- ( entry. leaf_index , entry. timestamp ) ,
779- )
780- } )
781- . collect :: < Vec < _ > > ( ) ,
782- )
783- . await
784- {
770+ if let Err ( e) = cache. put_entries ( & cache_metadata) . await {
785771 warn ! (
786772 "{name}: Cache put failed (entries={}): {e}" ,
787- sequenced_entries . len( )
773+ cache_metadata . len( )
788774 ) ;
789775 }
790776
@@ -2097,23 +2083,24 @@ mod tests {
20972083 {
20982084 let entry = entry. unwrap ( ) ;
20992085 let idx = n * u64:: from ( TlogTile :: FULL_WIDTH ) + i as u64 ;
2100- assert_eq ! ( entry. leaf_index, idx) ;
2101- assert ! ( entry. timestamp <= sth_timestamp) ;
2086+ let ( leaf_index, timestamp) = entry. metadata ( ) ;
2087+ assert_eq ! ( leaf_index, idx) ;
2088+ assert ! ( timestamp <= sth_timestamp) ;
21022089 assert_eq ! (
21032090 leaf_hashes[ usize :: try_from( idx) . unwrap( ) ] ,
21042091 tlog_tiles:: record_hash( & entry. merkle_tree_leaf( ) )
21052092 ) ;
21062093
2107- assert ! ( !entry. inner . certificate. is_empty( ) ) ;
2108- if entry. inner . is_precert {
2109- assert ! ( !entry. inner . pre_certificate. is_empty( ) ) ;
2110- assert_ne ! ( entry. inner . issuer_key_hash, [ 0 ; 32 ] ) ;
2094+ assert ! ( !entry. as_pending_entry ( ) . certificate. is_empty( ) ) ;
2095+ if entry. as_pending_entry ( ) . is_precert {
2096+ assert ! ( !entry. as_pending_entry ( ) . pre_certificate. is_empty( ) ) ;
2097+ assert_ne ! ( entry. as_pending_entry ( ) . issuer_key_hash, [ 0 ; 32 ] ) ;
21112098 } else {
2112- assert ! ( entry. inner . pre_certificate. is_empty( ) ) ;
2113- assert_eq ! ( entry. inner . issuer_key_hash, [ 0 ; 32 ] ) ;
2099+ assert ! ( entry. as_pending_entry ( ) . pre_certificate. is_empty( ) ) ;
2100+ assert_eq ! ( entry. as_pending_entry ( ) . issuer_key_hash, [ 0 ; 32 ] ) ;
21142101 }
21152102
2116- for fp in entry. inner . chain_fingerprints {
2103+ for fp in & entry. as_pending_entry ( ) . chain_fingerprints {
21172104 let b = block_on ( self . object . fetch ( & format ! ( "issuer/{}" , hex:: encode( fp) ) ) )
21182105 . unwrap ( )
21192106 . unwrap ( ) ;
0 commit comments