@@ -13,6 +13,7 @@ use async_trait::async_trait;
13
13
use diesel:: {
14
14
ExpressionMethods , OptionalExtension , QueryDsl , RunQueryDsl ,
15
15
dsl:: { max, min} ,
16
+ sql_types,
16
17
upsert:: excluded,
17
18
} ;
18
19
use downcast:: Any ;
@@ -48,11 +49,12 @@ use crate::{
48
49
transactions:: { OptimisticTransaction , StoredTransaction , TxInsertionOrder } ,
49
50
tx_indices:: OptimisticTxIndices ,
50
51
} ,
51
- on_conflict_do_update, persist_chunk_into_table, read_only_blocking,
52
+ on_conflict_do_update, on_conflict_do_update_with_condition, persist_chunk_into_table,
53
+ read_only_blocking,
52
54
schema:: {
53
55
chain_identifier, checkpoints, display, epochs, event_emit_module, event_emit_package,
54
56
event_senders, event_struct_instantiation, event_struct_module, event_struct_name,
55
- event_struct_package, events, feature_flags, objects , objects_history, objects_snapshot,
57
+ event_struct_package, events, feature_flags, objects_history, objects_snapshot,
56
58
objects_version, optimistic_event_emit_module, optimistic_event_emit_package,
57
59
optimistic_event_senders, optimistic_event_struct_instantiation,
58
60
optimistic_event_struct_module, optimistic_event_struct_name,
@@ -339,7 +341,7 @@ impl PgIndexerStore {
339
341
transactional_blocking_with_retry ! (
340
342
& self . blocking_cp,
341
343
|conn| {
342
- on_conflict_do_update !(
344
+ on_conflict_do_update_with_condition !(
343
345
display:: table,
344
346
display_updates. values( ) . collect:: <Vec <_>>( ) ,
345
347
display:: object_type,
@@ -348,6 +350,7 @@ impl PgIndexerStore {
348
350
display:: version. eq( excluded( display:: version) ) ,
349
351
display:: bcs. eq( excluded( display:: bcs) ) ,
350
352
) ,
353
+ excluded( display:: version) . gt( display:: version) ,
351
354
conn
352
355
) ;
353
356
Ok :: <( ) , IndexerError >( ( ) )
@@ -362,42 +365,120 @@ impl PgIndexerStore {
362
365
& self ,
363
366
mutated_object_mutation_chunk : Vec < StoredObject > ,
364
367
) -> Result < ( ) , IndexerError > {
368
+ let chunk_size = mutated_object_mutation_chunk. len ( ) ;
369
+
370
+ let mut object_id_vec = Vec :: with_capacity ( chunk_size) ;
371
+ let mut object_version_vec = Vec :: with_capacity ( chunk_size) ;
372
+ let mut object_digest_vec = Vec :: with_capacity ( chunk_size) ;
373
+ let mut owner_type_vec = Vec :: with_capacity ( chunk_size) ;
374
+ let mut owner_id_vec = Vec :: with_capacity ( chunk_size) ;
375
+ let mut object_type_vec = Vec :: with_capacity ( chunk_size) ;
376
+ let mut serialized_object_vec = Vec :: with_capacity ( chunk_size) ;
377
+ let mut coin_type_vec = Vec :: with_capacity ( chunk_size) ;
378
+ let mut coin_balance_vec = Vec :: with_capacity ( chunk_size) ;
379
+ let mut df_kind_vec = Vec :: with_capacity ( chunk_size) ;
380
+
381
+ for obj in mutated_object_mutation_chunk. iter ( ) . cloned ( ) {
382
+ object_id_vec. push ( obj. object_id ) ;
383
+ object_version_vec. push ( obj. object_version ) ;
384
+ object_digest_vec. push ( obj. object_digest ) ;
385
+ owner_type_vec. push ( obj. owner_type ) ;
386
+ owner_id_vec. push ( obj. owner_id ) ;
387
+ object_type_vec. push ( obj. object_type ) ;
388
+ serialized_object_vec. push ( obj. serialized_object ) ;
389
+ coin_type_vec. push ( obj. coin_type ) ;
390
+ coin_balance_vec. push ( obj. coin_balance ) ;
391
+ df_kind_vec. push ( obj. df_kind ) ;
392
+ }
393
+
394
+ let query = diesel:: sql_query (
395
+ r#"
396
+ WITH new_data AS (
397
+ SELECT
398
+ unnest($1::bytea[]) AS object_id,
399
+ unnest($2::bigint[]) AS object_version,
400
+ unnest($3::bytea[]) AS object_digest,
401
+ unnest($4::smallint[]) AS owner_type,
402
+ unnest($5::bytea[]) AS owner_id,
403
+ unnest($6::text[]) AS object_type,
404
+ unnest($7::bytea[]) AS serialized_object,
405
+ unnest($8::text[]) AS coin_type,
406
+ unnest($9::bigint[]) AS coin_balance,
407
+ unnest($10::smallint[]) AS df_kind
408
+ ),
409
+ locked_objects AS (
410
+ SELECT o.*
411
+ FROM objects o
412
+ JOIN new_data nd ON o.object_id = nd.object_id
413
+ FOR UPDATE
414
+ ),
415
+ locked_deletes AS (
416
+ SELECT del.*
417
+ FROM optimistic_deleted_objects_versions del
418
+ JOIN new_data nd ON del.object_id = nd.object_id
419
+ FOR SHARE
420
+ )
421
+ INSERT INTO objects (
422
+ object_id,
423
+ object_version,
424
+ object_digest,
425
+ owner_type,
426
+ owner_id,
427
+ object_type,
428
+ serialized_object,
429
+ coin_type,
430
+ coin_balance,
431
+ df_kind
432
+ )
433
+ SELECT nd.*
434
+ FROM new_data nd
435
+ LEFT JOIN optimistic_deleted_objects_versions del
436
+ ON del.object_id = nd.object_id
437
+ WHERE COALESCE(del.object_version, -1) < nd.object_version
438
+ ON CONFLICT (object_id)
439
+ DO UPDATE SET
440
+ object_version = EXCLUDED.object_version,
441
+ object_digest = EXCLUDED.object_digest,
442
+ owner_type = EXCLUDED.owner_type,
443
+ owner_id = EXCLUDED.owner_id,
444
+ object_type = EXCLUDED.object_type,
445
+ serialized_object = EXCLUDED.serialized_object,
446
+ coin_type = EXCLUDED.coin_type,
447
+ coin_balance = EXCLUDED.coin_balance,
448
+ df_kind = EXCLUDED.df_kind
449
+ WHERE
450
+ EXCLUDED.object_version > objects.object_version;
451
+ "# ,
452
+ )
453
+ . bind :: < sql_types:: Array < sql_types:: Binary > , _ > ( object_id_vec)
454
+ . bind :: < sql_types:: Array < sql_types:: BigInt > , _ > ( object_version_vec)
455
+ . bind :: < sql_types:: Array < sql_types:: Binary > , _ > ( object_digest_vec)
456
+ . bind :: < sql_types:: Array < sql_types:: SmallInt > , _ > ( owner_type_vec)
457
+ . bind :: < sql_types:: Array < sql_types:: Nullable < sql_types:: Binary > > , _ > ( owner_id_vec)
458
+ . bind :: < sql_types:: Array < sql_types:: Nullable < sql_types:: Text > > , _ > ( object_type_vec)
459
+ . bind :: < sql_types:: Array < sql_types:: Binary > , _ > ( serialized_object_vec)
460
+ . bind :: < sql_types:: Array < sql_types:: Nullable < sql_types:: Text > > , _ > ( coin_type_vec)
461
+ . bind :: < sql_types:: Array < sql_types:: Nullable < sql_types:: BigInt > > , _ > ( coin_balance_vec)
462
+ . bind :: < sql_types:: Array < sql_types:: Nullable < sql_types:: SmallInt > > , _ > ( df_kind_vec) ;
463
+
365
464
let guard = self
366
465
. metrics
367
466
. checkpoint_db_commit_latency_objects_chunks
368
467
. start_timer ( ) ;
369
- let len = mutated_object_mutation_chunk. len ( ) ;
370
468
transactional_blocking_with_retry ! (
371
469
& self . blocking_cp,
372
470
|conn| {
373
- on_conflict_do_update!(
374
- objects:: table,
375
- mutated_object_mutation_chunk. clone( ) ,
376
- objects:: object_id,
377
- (
378
- objects:: object_id. eq( excluded( objects:: object_id) ) ,
379
- objects:: object_version. eq( excluded( objects:: object_version) ) ,
380
- objects:: object_digest. eq( excluded( objects:: object_digest) ) ,
381
- objects:: owner_type. eq( excluded( objects:: owner_type) ) ,
382
- objects:: owner_id. eq( excluded( objects:: owner_id) ) ,
383
- objects:: object_type. eq( excluded( objects:: object_type) ) ,
384
- objects:: serialized_object. eq( excluded( objects:: serialized_object) ) ,
385
- objects:: coin_type. eq( excluded( objects:: coin_type) ) ,
386
- objects:: coin_balance. eq( excluded( objects:: coin_balance) ) ,
387
- objects:: df_kind. eq( excluded( objects:: df_kind) ) ,
388
- ) ,
389
- conn
390
- ) ;
471
+ query. clone( ) . execute( conn) ?;
391
472
Ok :: <( ) , IndexerError >( ( ) )
392
473
} ,
393
474
PG_DB_COMMIT_SLEEP_DURATION
394
475
)
395
476
. tap_ok ( |_| {
396
477
let elapsed = guard. stop_and_record ( ) ;
397
- info ! ( elapsed, "Persisted {} chunked objects" , len ) ;
478
+ info ! ( elapsed, "Persisted {chunk_size } chunked objects" ) ;
398
479
} )
399
480
. tap_err ( |e| {
400
- tracing:: error!( "Failed to persist object mutations with error: {}" , e ) ;
481
+ tracing:: error!( "Failed to persist object mutations with error: {e}" ) ;
401
482
} )
402
483
}
403
484
@@ -409,34 +490,68 @@ impl PgIndexerStore {
409
490
. metrics
410
491
. checkpoint_db_commit_latency_objects_chunks
411
492
. start_timer ( ) ;
412
- let len = deleted_objects_chunk. len ( ) ;
493
+ let chunk_size = deleted_objects_chunk. len ( ) ;
494
+
495
+ let ( object_id_vec, object_version_vec) : ( Vec < _ > , Vec < _ > ) = deleted_objects_chunk
496
+ . into_iter ( )
497
+ . map ( |obj| ( obj. object_id , obj. object_version ) )
498
+ . unzip ( ) ;
499
+
500
+ let query = diesel:: sql_query (
501
+ r#"
502
+ WITH new_data AS (
503
+ SELECT
504
+ unnest($1::bytea[]) AS object_id,
505
+ unnest($2::bigint[]) AS object_version
506
+ ),
507
+ locked_objects AS (
508
+ SELECT o.*
509
+ FROM objects o
510
+ JOIN new_data nd ON o.object_id = nd.object_id
511
+ FOR UPDATE
512
+ ),
513
+ locked_deletes AS (
514
+ SELECT del.*
515
+ FROM optimistic_deleted_objects_versions del
516
+ JOIN new_data nd ON del.object_id = nd.object_id
517
+ FOR UPDATE
518
+ ),
519
+ deleted AS (
520
+ DELETE FROM objects o
521
+ USING new_data nd
522
+ WHERE o.object_id = nd.object_id
523
+ AND nd.object_version > o.object_version
524
+ )
525
+ INSERT INTO optimistic_deleted_objects_versions (object_id, object_version)
526
+ SELECT object_id, object_version
527
+ FROM new_data
528
+ ON CONFLICT (object_id)
529
+ DO UPDATE
530
+ SET object_version = EXCLUDED.object_version
531
+ WHERE EXCLUDED.object_version > optimistic_deleted_objects_versions.object_version;
532
+ "# ,
533
+ )
534
+ . bind :: < sql_types:: Array < sql_types:: Bytea > , _ > ( object_id_vec)
535
+ . bind :: < sql_types:: Array < sql_types:: BigInt > , _ > ( object_version_vec) ;
536
+
413
537
transactional_blocking_with_retry ! (
414
538
& self . blocking_cp,
415
539
|conn| {
416
- diesel:: delete(
417
- objects:: table. filter(
418
- objects:: object_id. eq_any(
419
- deleted_objects_chunk
420
- . iter( )
421
- . map( |o| o. object_id. clone( ) )
422
- . collect:: <Vec <_>>( ) ,
423
- ) ,
424
- ) ,
425
- )
426
- . execute( conn)
427
- . map_err( IndexerError :: from)
428
- . context( "Failed to write object deletion to PostgresDB" ) ?;
429
-
540
+ query
541
+ . clone( )
542
+ . execute( conn)
543
+ . map_err( IndexerError :: from)
544
+ . context( "Failed to write object deletion to PostgresDB" ) ?;
430
545
Ok :: <( ) , IndexerError >( ( ) )
431
546
} ,
432
547
PG_DB_COMMIT_SLEEP_DURATION
433
548
)
434
549
. tap_ok ( |_| {
435
550
let elapsed = guard. stop_and_record ( ) ;
436
- info ! ( elapsed, "Deleted {} chunked objects" , len ) ;
551
+ info ! ( elapsed, "Deleted {chunk_size } chunked objects" ) ;
437
552
} )
438
553
. tap_err ( |e| {
439
- tracing:: error!( "Failed to persist object deletions with error: {}" , e ) ;
554
+ tracing:: error!( "Failed to persist object deletions with error: {e}" ) ;
440
555
} )
441
556
}
442
557
@@ -677,17 +792,17 @@ impl PgIndexerStore {
677
792
} ,
678
793
PG_DB_COMMIT_SLEEP_DURATION
679
794
)
680
- . tap_ok ( |_| {
681
- let elapsed = guard. stop_and_record ( ) ;
682
- info ! (
795
+ . tap_ok ( |_| {
796
+ let elapsed = guard. stop_and_record ( ) ;
797
+ info ! (
683
798
elapsed,
684
799
"Persisted {} checkpoints" ,
685
800
stored_checkpoints. len( )
686
801
) ;
687
- } )
688
- . tap_err ( |e| {
689
- tracing:: error!( "Failed to persist checkpoints with error: {}" , e) ;
690
- } )
802
+ } )
803
+ . tap_err ( |e| {
804
+ tracing:: error!( "Failed to persist checkpoints with error: {}" , e) ;
805
+ } )
691
806
}
692
807
693
808
fn persist_transactions_chunk (
0 commit comments