3535import org .apache .fluss .flink .source .split .LogSplit ;
3636import org .apache .fluss .flink .source .split .SourceSplitBase ;
3737import org .apache .fluss .flink .source .state .SourceEnumeratorState ;
38- import org .apache .fluss .flink .utils .PushdownUtils .FieldEqual ;
3938import org .apache .fluss .lake .source .LakeSource ;
4039import org .apache .fluss .lake .source .LakeSplit ;
4140import org .apache .fluss .metadata .PartitionInfo ;
4241import org .apache .fluss .metadata .TableBucket ;
4342import org .apache .fluss .metadata .TableInfo ;
4443import org .apache .fluss .metadata .TablePath ;
45- import org .apache .fluss .types .DataField ;
44+ import org .apache .fluss .predicate .Predicate ;
45+ import org .apache .fluss .row .BinaryString ;
46+ import org .apache .fluss .row .GenericRow ;
47+ import org .apache .fluss .row .InternalRow ;
4648import org .apache .fluss .utils .ExceptionUtils ;
4749
4850import org .apache .flink .annotation .VisibleForTesting ;
@@ -133,7 +135,7 @@ public class FlinkSourceEnumerator
133135
134136 private volatile boolean closed = false ;
135137
136- private Predicate predicate ;
138+ private Predicate partitionFilters ;
137139
138140 @ Nullable private final LakeSource <LakeSplit > lakeSource ;
139141
@@ -146,7 +148,7 @@ public FlinkSourceEnumerator(
146148 OffsetsInitializer startingOffsetsInitializer ,
147149 long scanPartitionDiscoveryIntervalMs ,
148150 boolean streaming ,
149- Predicate predicate ) {
151+ Predicate partitionFilters ) {
150152 this (
151153 tablePath ,
152154 flussConf ,
@@ -169,7 +171,7 @@ public FlinkSourceEnumerator(
169171 OffsetsInitializer startingOffsetsInitializer ,
170172 long scanPartitionDiscoveryIntervalMs ,
171173 boolean streaming ,
172- List < FieldEqual > partitionFilters ,
174+ Predicate partitionFilters ,
173175 @ Nullable LakeSource <LakeSplit > lakeSource ) {
174176 this (
175177 tablePath ,
@@ -199,7 +201,7 @@ public FlinkSourceEnumerator(
199201 OffsetsInitializer startingOffsetsInitializer ,
200202 long scanPartitionDiscoveryIntervalMs ,
201203 boolean streaming ,
202- List < FieldEqual > partitionFilters ,
204+ Predicate partitionFilters ,
203205 @ Nullable LakeSource <LakeSplit > lakeSource ) {
204206 this .tablePath = checkNotNull (tablePath );
205207 this .flussConf = checkNotNull (flussConf );
@@ -216,7 +218,7 @@ public FlinkSourceEnumerator(
216218 : new LinkedList <>(pendingHybridLakeFlussSplits );
217219 this .scanPartitionDiscoveryIntervalMs = scanPartitionDiscoveryIntervalMs ;
218220 this .streaming = streaming ;
219- this .partitionFilters = checkNotNull ( partitionFilters ) ;
221+ this .partitionFilters = partitionFilters ;
220222 this .stoppingOffsetsInitializer =
221223 streaming ? new NoStoppingOffsetsInitializer () : OffsetsInitializer .latest ();
222224 this .lakeSource = lakeSource ;
@@ -354,22 +356,37 @@ private Set<PartitionInfo> listPartitions() {
354356
355357 /** Apply partition filter. */
356358 private List <PartitionInfo > applyPartitionFilter (List <PartitionInfo > partitionInfos ) {
357- if (predicate == null ) {
359+ if (partitionFilters == null ) {
358360 return partitionInfos ;
359361 } else {
362+ int originalSize = partitionInfos .size ();
360363 List <PartitionInfo > filteredPartitionInfos =
361364 partitionInfos .stream ()
362365 .filter (
363366 partitionInfo ->
364- predicate .test (
367+ partitionFilters .test (
365368 convertPartitionInfoToInternalRow (
366369 partitionInfo )))
367370 .collect (Collectors .toList ());
368- LOG .info (
369- "Filtered partitions {} for table {} with predicate: {}" ,
370- filteredPartitionInfos ,
371- tablePath ,
372- predicate );
371+
372+ int filteredSize = filteredPartitionInfos .size ();
373+ // Only log when there's actual filtering happening or when it's the first time
374+ if (originalSize != filteredSize ) {
375+ LOG .info (
376+ "Applied partition filter for table {}: {} partitions filtered to {} partitions with predicate: {}" ,
377+ tablePath ,
378+ originalSize ,
379+ filteredSize ,
380+ partitionFilters );
381+ if (LOG .isDebugEnabled ()) {
382+ LOG .debug ("Filtered partitions: {}" , filteredPartitionInfos );
383+ }
384+ } else if (LOG .isDebugEnabled ()) {
385+ LOG .debug (
386+ "Partition filter applied for table {} but no partitions were filtered out (total: {})" ,
387+ tablePath ,
388+ originalSize );
389+ }
373390 return filteredPartitionInfos ;
374391 }
375392 }
@@ -394,17 +411,34 @@ private void checkPartitionChanges(Set<PartitionInfo> partitionInfos, Throwable
394411 LOG .error ("Failed to list partitions for {}" , tablePath , t );
395412 return ;
396413 }
414+
415+ if (LOG .isDebugEnabled ()) {
416+ LOG .debug ("Checking partition changes for table {}, found {} partitions" ,
417+ tablePath , partitionInfos .size ());
418+ }
419+
397420 final PartitionChange partitionChange = getPartitionChange (partitionInfos );
398421 if (partitionChange .isEmpty ()) {
422+ if (LOG .isDebugEnabled ()) {
423+ LOG .debug ("No partition changes detected for table {}" , tablePath );
424+ }
399425 return ;
400426 }
401427
402428 // handle removed partitions
403- handlePartitionsRemoved (partitionChange .removedPartitions );
429+ if (!partitionChange .removedPartitions .isEmpty ()) {
430+ LOG .info ("Handling {} removed partitions for table {}: {}" ,
431+ partitionChange .removedPartitions .size (), tablePath , partitionChange .removedPartitions );
432+ handlePartitionsRemoved (partitionChange .removedPartitions );
433+ }
404434
405435 // handle new partitions
406- context .callAsync (
407- () -> initPartitionedSplits (partitionChange .newPartitions ), this ::handleSplitsAdd );
436+ if (!partitionChange .newPartitions .isEmpty ()) {
437+ LOG .info ("Handling {} new partitions for table {}: {}" ,
438+ partitionChange .newPartitions .size (), tablePath , partitionChange .newPartitions );
439+ context .callAsync (
440+ () -> initPartitionedSplits (partitionChange .newPartitions ), this ::handleSplitsAdd );
441+ }
408442 }
409443
410444 private PartitionChange getPartitionChange (Set <PartitionInfo > fetchedPartitionInfos ) {
0 commit comments