@@ -369,66 +369,74 @@ function defineBatchTests(factory: StorageFactory) {
369
369
expect ( data . length ) . toEqual ( 11002 + deletedRowOps . length ) ;
370
370
}
371
371
372
- test . only ( 'chunked snapshot edge case' , async ( ) => {
372
+ test ( 'chunked snapshot edge case' , async ( ) => {
373
373
// 1. Start with 10k rows, one row with id = 10000, and a large TOAST value in another column.
374
374
// 2. Replicate one batch of rows (id < 10000).
375
375
// 3. `UPDATE table SET id = 0 WHERE id = 10000`
376
376
// 4. Replicate the rest of the table.
377
377
// 5. Logical replication picks up the UPDATE above, but it is missing the TOAST column.
378
378
// 6. We end up with a row that has a missing TOAST column.
379
379
380
- try {
381
- await using context = await WalStreamTestContext . open ( factory ) ;
380
+ await using context = await WalStreamTestContext . open ( factory , {
381
+ // We need to use a smaller chunk size here, so that we can run a query in between chunks
382
+ walStreamOptions : { snapshotChunkSize : 100 }
383
+ } ) ;
382
384
383
- await context . updateSyncRules ( `bucket_definitions:
385
+ await context . updateSyncRules ( `bucket_definitions:
384
386
global:
385
387
data:
386
388
- SELECT * FROM test_data` ) ;
387
- const { pool } = context ;
389
+ const { pool } = context ;
388
390
389
- await pool . query ( `CREATE TABLE test_data(id integer primary key, description text)` ) ;
391
+ await pool . query ( `CREATE TABLE test_data(id integer primary key, description text)` ) ;
390
392
391
- await pool . query ( {
392
- statement : `INSERT INTO test_data(id, description) SELECT i, 'foo' FROM generate_series(1, 10000) i`
393
- } ) ;
393
+ // 1. Start with 10k rows, one row with id = 10000...
394
+ await pool . query ( {
395
+ statement : `INSERT INTO test_data(id, description) SELECT i, 'foo' FROM generate_series(1, 10000) i`
396
+ } ) ;
394
397
395
- // Toast value, must be > 8kb after compression
396
- const largeDescription = crypto . randomBytes ( 20_000 ) . toString ( 'hex' ) ;
397
- await pool . query ( {
398
- statement : 'UPDATE test_data SET description = $1 WHERE id = 10000' ,
399
- params : [ { type : 'varchar' , value : largeDescription } ]
400
- } ) ;
398
+ // ...and a large TOAST value in another column.
399
+ // Toast value, must be > 8kb after compression
400
+ const largeDescription = crypto . randomBytes ( 20_000 ) . toString ( 'hex' ) ;
401
+ await pool . query ( {
402
+ statement : 'UPDATE test_data SET description = $1 WHERE id = 10000' ,
403
+ params : [ { type : 'varchar' , value : largeDescription } ]
404
+ } ) ;
401
405
402
- const p = context . replicateSnapshot ( ) ;
406
+ // 2. Replicate one batch of rows (id < 10000).
407
+ // Our "stopping point" here is not quite deterministic.
408
+ const p = context . replicateSnapshot ( ) ;
403
409
404
- const stopAfter = 1_000 ;
405
- const startRowCount =
406
- ( await Metrics . getInstance ( ) . getMetricValueForTests ( 'powersync_rows_replicated_total' ) ) ?? 0 ;
410
+ const stopAfter = 1_000 ;
411
+ const startRowCount = ( await Metrics . getInstance ( ) . getMetricValueForTests ( 'powersync_rows_replicated_total' ) ) ?? 0 ;
407
412
408
- while ( true ) {
409
- const count =
410
- ( ( await Metrics . getInstance ( ) . getMetricValueForTests ( 'powersync_rows_replicated_total' ) ) ?? 0 ) -
411
- startRowCount ;
413
+ while ( true ) {
414
+ const count =
415
+ ( ( await Metrics . getInstance ( ) . getMetricValueForTests ( 'powersync_rows_replicated_total' ) ) ?? 0 ) - startRowCount ;
412
416
413
- if ( count >= stopAfter ) {
414
- break ;
415
- }
416
- await timers . setTimeout ( 1 ) ;
417
+ if ( count >= stopAfter ) {
418
+ break ;
417
419
}
418
- await pool . query ( 'UPDATE test_data SET id = 0 WHERE id = 10000' ) ;
419
- await p ;
420
-
421
- context . startStreaming ( ) ;
422
- const data = await context . getBucketData ( 'global[]' , undefined , { } ) ;
423
- const reduced = reduceBucket ( data ) ;
424
- expect ( reduced . length ) . toEqual ( 10_001 ) ;
425
-
426
- const movedRow = reduced . find ( ( row ) => row . object_id === '0' ) ;
427
- expect ( movedRow ?. data ) . toEqual ( `{"id":0,"description":"${ largeDescription } "}` ) ;
428
- } catch ( e ) {
429
- console . error ( e ) ;
430
- throw e ;
420
+ await timers . setTimeout ( 1 ) ;
431
421
}
422
+
423
+ // 3. `UPDATE table SET id = 0 WHERE id = 10000`
424
+ await pool . query ( 'UPDATE test_data SET id = 0 WHERE id = 10000' ) ;
425
+
426
+ // 4. Replicate the rest of the table.
427
+ await p ;
428
+
429
+ // 5. Logical replication picks up the UPDATE above, but it is missing the TOAST column.
430
+ context . startStreaming ( ) ;
431
+
432
+ // 6. If all went well, the "resnapshot" process would take care of this.
433
+ const data = await context . getBucketData ( 'global[]' , undefined , { } ) ;
434
+ const reduced = reduceBucket ( data ) ;
435
+
436
+ const movedRow = reduced . find ( ( row ) => row . object_id === '0' ) ;
437
+ expect ( movedRow ?. data ) . toEqual ( `{"id":0,"description":"${ largeDescription } "}` ) ;
438
+
439
+ expect ( reduced . length ) . toEqual ( 10_001 ) ;
432
440
} ) ;
433
441
434
442
function printMemoryUsage ( ) {
0 commit comments