2222import org .apache .fluss .lake .paimon .testutils .FlinkPaimonTieringTestBase ;
2323import org .apache .fluss .metadata .Schema ;
2424import org .apache .fluss .metadata .TableBucket ;
25+ import org .apache .fluss .metadata .TableChange ;
2526import org .apache .fluss .metadata .TableDescriptor ;
2627import org .apache .fluss .metadata .TablePath ;
2728import org .apache .fluss .row .InternalRow ;
@@ -175,10 +176,7 @@ void testTiering() throws Exception {
175176 {
176177 put (
177178 FLUSS_LAKE_SNAP_BUCKET_OFFSET_PROPERTY ,
178- "["
179- + "{\" partition_id\" :0,\" bucket_id\" :0,\" partition_name\" :\" date=2025\" ,\" log_offset\" :3},"
180- + "{\" partition_id\" :1,\" bucket_id\" :0,\" partition_name\" :\" date=2026\" ,\" log_offset\" :3}"
181- + "]" );
179+ getPartitionOffsetStr (partitionNameByIds ));
182180 }
183181 };
184182 checkSnapshotPropertyInPaimon (partitionedTablePath , properties );
@@ -187,6 +185,150 @@ void testTiering() throws Exception {
187185 }
188186 }
189187
188+ @ Test
189+ void testTieringForAlterTable () throws Exception {
190+ TablePath t1 = TablePath .of (DEFAULT_DB , "pkTableAlter" );
191+ Map <String , String > tableProperties = new HashMap <>();
192+ tableProperties .put (ConfigOptions .TABLE_DATALAKE_ENABLED .key (), "false" );
193+
194+ long t1Id = createPkTable (t1 , 1 , tableProperties , Collections .emptyMap ());
195+
196+ TableChange .SetOption setOption =
197+ TableChange .set (ConfigOptions .TABLE_DATALAKE_ENABLED .key (), "true" );
198+ List <TableChange > changes = Collections .singletonList (setOption );
199+ admin .alterTable (t1 , changes , false ).get ();
200+
201+ TableBucket t1Bucket = new TableBucket (t1Id , 0 );
202+
203+ // write records
204+ List <InternalRow > rows = Arrays .asList (row (1 , "v1" ), row (2 , "v2" ), row (3 , "v3" ));
205+ writeRows (t1 , rows , false );
206+ waitUntilSnapshot (t1Id , 1 , 0 );
207+
208+ // then start tiering job
209+ JobClient jobClient = buildTieringJob (execEnv );
210+
211+ try {
212+ // check the status of replica after synced
213+ assertReplicaStatus (t1Bucket , 3 );
214+ // check data in paimon
215+ checkDataInPaimonPrimaryKeyTable (t1 , rows );
216+ // check snapshot property in paimon
217+ Map <String , String > properties =
218+ new HashMap <String , String >() {
219+ {
220+ put (
221+ FLUSS_LAKE_SNAP_BUCKET_OFFSET_PROPERTY ,
222+ "[{\" bucket_id\" :0,\" log_offset\" :3}]" );
223+ }
224+ };
225+ checkSnapshotPropertyInPaimon (t1 , properties );
226+
227+ // then, create another log table
228+ TablePath t2 = TablePath .of (DEFAULT_DB , "logTableAlter" );
229+
230+ Map <String , String > logTableProperties = new HashMap <>();
231+ logTableProperties .put (ConfigOptions .TABLE_DATALAKE_ENABLED .key (), "false" );
232+ long t2Id = createLogTable (t2 , 1 , false , logTableProperties , Collections .emptyMap ());
233+ // enable lake
234+ admin .alterTable (t2 , changes , false ).get ();
235+
236+ TableBucket t2Bucket = new TableBucket (t2Id , 0 );
237+ List <InternalRow > flussRows = new ArrayList <>();
238+ // write records
239+ for (int i = 0 ; i < 10 ; i ++) {
240+ rows = Arrays .asList (row (1 , "v1" ), row (2 , "v2" ), row (3 , "v3" ));
241+ flussRows .addAll (rows );
242+ // write records
243+ writeRows (t2 , rows , true );
244+ }
245+ // check the status of replica after synced;
246+ // note: we can't update log start offset for unaware bucket mode log table
247+ assertReplicaStatus (t2Bucket , 30 );
248+
249+ // check data in paimon
250+ checkDataInPaimonAppendOnlyTable (t2 , flussRows , 0 );
251+
252+ // then write data to the pk tables
253+ // write records
254+ rows = Arrays .asList (row (1 , "v111" ), row (2 , "v222" ), row (3 , "v333" ));
255+ // write records
256+ writeRows (t1 , rows , false );
257+
258+ // check the status of replica of t2 after synced
259+ // not check start offset since we won't
260+ // update start log offset for primary key table
261+ assertReplicaStatus (t1Bucket , 9 );
262+
263+ checkDataInPaimonPrimaryKeyTable (t1 , rows );
264+
265+ // then create partitioned table and wait partitions are ready
266+ TablePath partitionedTablePath = TablePath .of (DEFAULT_DB , "partitionedTableAlter" );
267+ Map <String , String > partitionTableProperties = new HashMap <>();
268+ partitionTableProperties .put (ConfigOptions .TABLE_DATALAKE_ENABLED .key (), "false" );
269+
270+ Tuple2 <Long , TableDescriptor > tableIdAndDescriptor =
271+ createPartitionedTable (
272+ partitionedTablePath , partitionTableProperties , Collections .emptyMap ());
273+
274+ admin .alterTable (partitionedTablePath , changes , false ).get ();
275+
276+ Map <Long , String > partitionNameByIds = waitUntilPartitions (partitionedTablePath );
277+
278+ // now, write rows into partitioned table
279+ TableDescriptor partitionedTableDescriptor = tableIdAndDescriptor .f1 ;
280+ Map <String , List <InternalRow >> writtenRowsByPartition =
281+ writeRowsIntoPartitionedTable (
282+ partitionedTablePath , partitionedTableDescriptor , partitionNameByIds );
283+ long tableId = tableIdAndDescriptor .f0 ;
284+
285+ // wait until synced to paimon
286+ for (Long partitionId : partitionNameByIds .keySet ()) {
287+ TableBucket tableBucket = new TableBucket (tableId , partitionId , 0 );
288+ assertReplicaStatus (tableBucket , 3 );
289+ }
290+
291+ // now, let's check data in paimon per partition
292+ // check data in paimon
293+ String partitionCol = partitionedTableDescriptor .getPartitionKeys ().get (0 );
294+ for (String partitionName : partitionNameByIds .values ()) {
295+ checkDataInPaimonAppendOnlyPartitionedTable (
296+ partitionedTablePath ,
297+ Collections .singletonMap (partitionCol , partitionName ),
298+ writtenRowsByPartition .get (partitionName ),
299+ 0 );
300+ }
301+
302+ properties =
303+ new HashMap <String , String >() {
304+ {
305+ put (
306+ FLUSS_LAKE_SNAP_BUCKET_OFFSET_PROPERTY ,
307+ getPartitionOffsetStr (partitionNameByIds ));
308+ }
309+ };
310+ checkSnapshotPropertyInPaimon (partitionedTablePath , properties );
311+ } finally {
312+ jobClient .cancel ().get ();
313+ }
314+ }
315+
316+ private String getPartitionOffsetStr (Map <Long , String > partitionNameByIds ) {
317+ String raw =
318+ "{\" partition_id\" :%s,\" bucket_id\" :0,\" partition_name\" :\" date=%s\" ,\" log_offset\" :3}" ;
319+ List <Long > partitionIds = new ArrayList <>(partitionNameByIds .keySet ());
320+ Collections .sort (partitionIds );
321+ List <String > partitionOffsetStrs = new ArrayList <>();
322+
323+ for (Long partitionId : partitionIds ) {
324+ String partitionName = partitionNameByIds .get (partitionId );
325+ String partitionOffsetStr = String .format (raw , partitionId , partitionName );
326+ partitionOffsetStrs .add (partitionOffsetStr );
327+ }
328+
329+ return "[" + String .join ("," , partitionOffsetStrs ) + "]" ;
330+ }
331+
190332 @ Test
191333 void testTieringToDvEnabledTable () throws Exception {
192334 TablePath t1 = TablePath .of (DEFAULT_DB , "pkTableWithDv" );
@@ -214,6 +356,15 @@ void testTieringToDvEnabledTable() throws Exception {
214356
215357 private Tuple2 <Long , TableDescriptor > createPartitionedTable (TablePath partitionedTablePath )
216358 throws Exception {
359+ return createPartitionedTable (
360+ partitionedTablePath , Collections .emptyMap (), Collections .emptyMap ());
361+ }
362+
363+ private Tuple2 <Long , TableDescriptor > createPartitionedTable (
364+ TablePath partitionedTablePath ,
365+ Map <String , String > properties ,
366+ Map <String , String > customProperties )
367+ throws Exception {
217368 TableDescriptor partitionedTableDescriptor =
218369 TableDescriptor .builder ()
219370 .schema (
@@ -229,6 +380,8 @@ private Tuple2<Long, TableDescriptor> createPartitionedTable(TablePath partition
229380 AutoPartitionTimeUnit .YEAR )
230381 .property (ConfigOptions .TABLE_DATALAKE_ENABLED , true )
231382 .property (ConfigOptions .TABLE_DATALAKE_FRESHNESS , Duration .ofMillis (500 ))
383+ .properties (properties )
384+ .customProperties (customProperties )
232385 .build ();
233386 return Tuple2 .of (
234387 createTable (partitionedTablePath , partitionedTableDescriptor ),
0 commit comments