@@ -32,7 +32,7 @@ function prepare() {
3232 # record tso before we create tables to skip the system table DDLs
3333 start_ts=$( run_cdc_cli_tso_query ${UP_PD_HOST_1} ${UP_PD_PORT_1} )
3434
35- run_sql " CREATE TABLE test.iceberg_upsert_basic(id INT PRIMARY KEY, val INT);"
35+ do_retry 5 2 run_sql " CREATE TABLE test.iceberg_upsert_basic(id INT PRIMARY KEY, val INT);"
3636
3737 run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY
3838
@@ -57,17 +57,17 @@ function wait_file_exists() {
5757}
5858
5959function iceberg_check_upsert_basic() {
60- run_sql " INSERT INTO test.iceberg_upsert_basic(id, val) VALUES (1, 1);"
61- run_sql " INSERT INTO test.iceberg_upsert_basic(id, val) VALUES (2, 2);"
60+ do_retry 5 2 run_sql " INSERT INTO test.iceberg_upsert_basic(id, val) VALUES (1, 1);"
61+ do_retry 5 2 run_sql " INSERT INTO test.iceberg_upsert_basic(id, val) VALUES (2, 2);"
6262
6363 WAREHOUSE_DIR=" $WORK_DIR /iceberg_warehouse"
6464 TABLE_ROOT=" $WAREHOUSE_DIR /ns/test/iceberg_upsert_basic"
6565 METADATA_DIR=" $TABLE_ROOT /metadata"
6666 DATA_DIR=" $TABLE_ROOT /data"
6767
6868 # Wait for iceberg commit output files.
69- wait_file_exists " $METADATA_DIR /v*.metadata.json" 120
70- wait_file_exists " $DATA_DIR /snap-*.parquet" 120
69+ wait_file_exists " $METADATA_DIR /v*.metadata.json" 180
70+ wait_file_exists " $DATA_DIR /snap-*.parquet" 180
7171
7272 # Hint: Spark readback is disabled by default.
7373 # Enable it via:
@@ -83,16 +83,16 @@ function iceberg_check_upsert_basic() {
8383 # so equality delete files are required (otherwise they may be optimized away within the same batch).
8484 first_meta=$( ls -1 " $METADATA_DIR " /v* .metadata.json | sort -V | tail -n 1)
8585
86- run_sql " UPDATE test.iceberg_upsert_basic SET val = 22 WHERE id = 2;"
87- run_sql " DELETE FROM test.iceberg_upsert_basic WHERE id = 1;"
86+ do_retry 5 2 run_sql " UPDATE test.iceberg_upsert_basic SET val = 22 WHERE id = 2;"
87+ do_retry 5 2 run_sql " DELETE FROM test.iceberg_upsert_basic WHERE id = 1;"
8888
8989 # Upsert mode should produce equality delete files for UPDATE/DELETE events.
90- wait_file_exists " $DATA_DIR /delete-*.parquet" 120
90+ wait_file_exists " $DATA_DIR /delete-*.parquet" 180
9191
9292 # Wait for a new metadata file after the UPDATE/DELETE commit.
9393 i=0
9494 latest_meta=" $first_meta "
95- while [ $i -lt 120 ]; do
95+ while [ $i -lt 180 ]; do
9696 latest_meta=$( ls -1 " $METADATA_DIR " /v* .metadata.json | sort -V | tail -n 1)
9797 if [ " $latest_meta " != " $first_meta " ]; then
9898 break
@@ -116,8 +116,8 @@ function iceberg_check_upsert_basic() {
116116 # Verify checkpoint table is created.
117117 CHECKPOINT_DIR=" $WAREHOUSE_DIR /ns/__ticdc/__tidb_checkpoints/data"
118118 CHECKPOINT_METADATA_DIR=" $WAREHOUSE_DIR /ns/__ticdc/__tidb_checkpoints/metadata"
119- wait_file_exists " $CHECKPOINT_DIR /snap-*.parquet" 120
120- wait_file_exists " $CHECKPOINT_METADATA_DIR /v*.metadata.json" 120
119+ wait_file_exists " $CHECKPOINT_DIR /snap-*.parquet" 180
120+ wait_file_exists " $CHECKPOINT_METADATA_DIR /v*.metadata.json" 180
121121
122122 # Optional: Spark readback verification (requires Spark + Iceberg Spark runtime).
123123 if [ " ${ICEBERG_SPARK_READBACK:- 0} " = " 1" ]; then
0 commit comments