@@ -92,97 +92,124 @@ void testTiering() throws Exception {
9292 // then start tiering job
9393 JobClient jobClient = buildTieringJob (execEnv );
9494
95- // check the status of replica after synced
96- assertReplicaStatus (t1Bucket , 3 );
97- // check data in paimon
98- checkDataInPaimonPrimayKeyTable (t1 , rows );
99- // check snapshot property in paimon
100- Map <String , String > properties =
101- new HashMap <String , String >() {
102- {
103- put (
104- FLUSS_LAKE_SNAP_BUCKET_OFFSET_PROPERTY ,
105- "[{\" bucket_id\" :0,\" log_offset\" :3}]" );
106- }
107- };
108- checkSnapshotPropertyInPaimon (t1 , properties );
109-
110- // then, create another log table
111- TablePath t2 = TablePath .of (DEFAULT_DB , "logTable" );
112- long t2Id = createLogTable (t2 );
113- TableBucket t2Bucket = new TableBucket (t2Id , 0 );
114- List <InternalRow > flussRows = new ArrayList <>();
115- // write records
116- for (int i = 0 ; i < 10 ; i ++) {
117- rows = Arrays .asList (row (1 , "v1" ), row (2 , "v2" ), row (3 , "v3" ));
118- flussRows .addAll (rows );
95+ try {
96+ // check the status of replica after synced
97+ assertReplicaStatus (t1Bucket , 3 );
98+ // check data in paimon
99+ checkDataInPaimonPrimaryKeyTable (t1 , rows );
100+ // check snapshot property in paimon
101+ Map <String , String > properties =
102+ new HashMap <String , String >() {
103+ {
104+ put (
105+ FLUSS_LAKE_SNAP_BUCKET_OFFSET_PROPERTY ,
106+ "[{\" bucket_id\" :0,\" log_offset\" :3}]" );
107+ }
108+ };
109+ checkSnapshotPropertyInPaimon (t1 , properties );
110+
111+ // then, create another log table
112+ TablePath t2 = TablePath .of (DEFAULT_DB , "logTable" );
113+ long t2Id = createLogTable (t2 );
114+ TableBucket t2Bucket = new TableBucket (t2Id , 0 );
115+ List <InternalRow > flussRows = new ArrayList <>();
116+ // write records
117+ for (int i = 0 ; i < 10 ; i ++) {
118+ rows = Arrays .asList (row (1 , "v1" ), row (2 , "v2" ), row (3 , "v3" ));
119+ flussRows .addAll (rows );
120+ // write records
121+ writeRows (t2 , rows , true );
122+ }
123+ // check the status of replica after synced;
124+ // note: we can't update log start offset for unaware bucket mode log table
125+ assertReplicaStatus (t2Bucket , 30 );
126+
127+ // check data in paimon
128+ checkDataInPaimonAppendOnlyTable (t2 , flussRows , 0 );
129+
130+ // then write data to the pk tables
119131 // write records
120- writeRows (t2 , rows , true );
132+ rows = Arrays .asList (row (1 , "v111" ), row (2 , "v222" ), row (3 , "v333" ));
133+ // write records
134+ writeRows (t1 , rows , false );
135+
136+ // check the status of replica of t2 after synced
137+ // not check start offset since we won't
138+ // update start log offset for primary key table
139+ assertReplicaStatus (t1Bucket , 9 );
140+
141+ checkDataInPaimonPrimaryKeyTable (t1 , rows );
142+
143+ // then create partitioned table and wait partitions are ready
144+ TablePath partitionedTablePath = TablePath .of (DEFAULT_DB , "partitionedTable" );
145+ Tuple2 <Long , TableDescriptor > tableIdAndDescriptor =
146+ createPartitionedTable (partitionedTablePath );
147+ Map <Long , String > partitionNameByIds = waitUntilPartitions (partitionedTablePath );
148+
149+ // now, write rows into partitioned table
150+ TableDescriptor partitionedTableDescriptor = tableIdAndDescriptor .f1 ;
151+ Map <String , List <InternalRow >> writtenRowsByPartition =
152+ writeRowsIntoPartitionedTable (
153+ partitionedTablePath , partitionedTableDescriptor , partitionNameByIds );
154+ long tableId = tableIdAndDescriptor .f0 ;
155+
156+ // wait until synced to paimon
157+ for (Long partitionId : partitionNameByIds .keySet ()) {
158+ TableBucket tableBucket = new TableBucket (tableId , partitionId , 0 );
159+ assertReplicaStatus (tableBucket , 3 );
160+ }
161+
162+ // now, let's check data in paimon per partition
163+ // check data in paimon
164+ String partitionCol = partitionedTableDescriptor .getPartitionKeys ().get (0 );
165+ for (String partitionName : partitionNameByIds .values ()) {
166+ checkDataInPaimonAppendOnlyPartitionedTable (
167+ partitionedTablePath ,
168+ Collections .singletonMap (partitionCol , partitionName ),
169+ writtenRowsByPartition .get (partitionName ),
170+ 0 );
171+ }
172+
173+ properties =
174+ new HashMap <String , String >() {
175+ {
176+ put (
177+ FLUSS_LAKE_SNAP_BUCKET_OFFSET_PROPERTY ,
178+ "["
179+ + "{\" partition_id\" :0,\" bucket_id\" :0,\" partition_name\" :\" date=2025\" ,\" log_offset\" :3},"
180+ + "{\" partition_id\" :1,\" bucket_id\" :0,\" partition_name\" :\" date=2026\" ,\" log_offset\" :3}"
181+ + "]" );
182+ }
183+ };
184+ checkSnapshotPropertyInPaimon (partitionedTablePath , properties );
185+ } finally {
186+ jobClient .cancel ().get ();
121187 }
122- // check the status of replica after synced;
123- // note: we can't update log start offset for unaware bucket mode log table
124- assertReplicaStatus (t2Bucket , 30 );
125-
126- // check data in paimon
127- checkDataInPaimonAppendOnlyTable (t2 , flussRows , 0 );
188+ }
128189
129- // then write data to the pk tables
130- // write records
131- rows = Arrays .asList (row (1 , "v111" ), row (2 , "v222" ), row (3 , "v333" ));
190+ @ Test
191+ void testTieringToDvEnabledTable () throws Exception {
192+ TablePath t1 = TablePath .of (DEFAULT_DB , "pkTableWithDv" );
193+ long t1Id =
194+ createPkTable (
195+ t1 ,
196+ Collections .singletonMap ("table.datalake.auto-compaction" , "true" ),
197+ Collections .singletonMap ("paimon.deletion-vectors.enabled" , "true" ));
132198 // write records
199+ List <InternalRow > rows = Arrays .asList (row (1 , "v1" ), row (2 , "v2" ), row (3 , "v3" ));
133200 writeRows (t1 , rows , false );
201+ waitUntilSnapshot (t1Id , 1 , 0 );
134202
135- // check the status of replica of t2 after synced
136- // not check start offset since we won't
137- // update start log offset for primary key table
138- assertReplicaStatus (t1Bucket , 9 );
139-
140- checkDataInPaimonPrimayKeyTable (t1 , rows );
141-
142- // then create partitioned table and wait partitions are ready
143- TablePath partitionedTablePath = TablePath .of (DEFAULT_DB , "partitionedTable" );
144- Tuple2 <Long , TableDescriptor > tableIdAndDescriptor =
145- createPartitionedTable (partitionedTablePath );
146- Map <Long , String > partitionNameByIds = waitUntilPartitions (partitionedTablePath );
147-
148- // now, write rows into partitioned table
149- TableDescriptor partitionedTableDescriptor = tableIdAndDescriptor .f1 ;
150- Map <String , List <InternalRow >> writtenRowsByPartition =
151- writeRowsIntoPartitionedTable (
152- partitionedTablePath , partitionedTableDescriptor , partitionNameByIds );
153- long tableId = tableIdAndDescriptor .f0 ;
154-
155- // wait until synced to paimon
156- for (Long partitionId : partitionNameByIds .keySet ()) {
157- TableBucket tableBucket = new TableBucket (tableId , partitionId , 0 );
158- assertReplicaStatus (tableBucket , 3 );
159- }
160-
161- // now, let's check data in paimon per partition
162- // check data in paimon
163- String partitionCol = partitionedTableDescriptor .getPartitionKeys ().get (0 );
164- for (String partitionName : partitionNameByIds .values ()) {
165- checkDataInPaimonAppendOnlyPartitionedTable (
166- partitionedTablePath ,
167- Collections .singletonMap (partitionCol , partitionName ),
168- writtenRowsByPartition .get (partitionName ),
169- 0 );
203+ // then start tiering job
204+ JobClient jobClient = buildTieringJob (execEnv );
205+ try {
206+ // check the status of replica after synced
207+ assertReplicaStatus (new TableBucket (t1Id , 0 ), 3 );
208+ // check data in paimon
209+ checkDataInPaimonPrimaryKeyTable (t1 , rows );
210+ } finally {
211+ jobClient .cancel ().get ();
170212 }
171-
172- properties =
173- new HashMap <String , String >() {
174- {
175- put (
176- FLUSS_LAKE_SNAP_BUCKET_OFFSET_PROPERTY ,
177- "["
178- + "{\" partition_id\" :0,\" bucket_id\" :0,\" partition_name\" :\" date=2025\" ,\" log_offset\" :3},"
179- + "{\" partition_id\" :1,\" bucket_id\" :0,\" partition_name\" :\" date=2026\" ,\" log_offset\" :3}"
180- + "]" );
181- }
182- };
183- checkSnapshotPropertyInPaimon (partitionedTablePath , properties );
184-
185- jobClient .cancel ().get ();
186213 }
187214
188215 private Tuple2 <Long , TableDescriptor > createPartitionedTable (TablePath partitionedTablePath )
0 commit comments