Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import org.apache.fluss.annotation.Internal;
import org.apache.fluss.annotation.PublicEvolving;
import org.apache.fluss.compression.ArrowCompressionType;
import org.apache.fluss.metadata.ChangelogImage;
import org.apache.fluss.metadata.DataLakeFormat;
import org.apache.fluss.metadata.DeleteBehavior;
import org.apache.fluss.metadata.KvFormat;
Expand Down Expand Up @@ -1423,6 +1424,21 @@ public class ConfigOptions {
+ "The auto increment column can only be used in primary-key table. The data type of the auto increment column must be INT or BIGINT."
+ "Currently a table can have only one auto-increment column.");

public static final ConfigOption<ChangelogImage> TABLE_CHANGELOG_IMAGE =
key("table.changelog.image")
.enumType(ChangelogImage.class)
.defaultValue(ChangelogImage.FULL)
.withDescription(
"Defines the changelog image mode for the primary key table. "
+ "This configuration is inspired by similar settings in database systems like MySQL's binlog_row_image and PostgreSQL's replica identity. "
+ "The supported modes are `FULL` (default) and `WAL`. "
+ "The `FULL` mode produces both UPDATE_BEFORE and UPDATE_AFTER records for update operations, capturing complete information about updates and allowing tracking of previous values. "
+ "The `WAL` mode does not produce UPDATE_BEFORE records. Only INSERT, UPDATE_AFTER (and DELETE if allowed) records are emitted. "
+ "When WAL mode is enabled with default merge engine (no merge engine configured) and full row updates (not partial update), an optimization is applied to skip looking up old values, "
+ "and in this case INSERT operations are converted to UPDATE_AFTER events. "
+ "This mode reduces storage and transmission costs but loses the ability to track previous values. "
+ "This option only affects primary key tables.");

// ------------------------------------------------------------------------
// ConfigOptions for Kv
// ------------------------------------------------------------------------
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import org.apache.fluss.annotation.PublicEvolving;
import org.apache.fluss.compression.ArrowCompressionInfo;
import org.apache.fluss.metadata.ChangelogImage;
import org.apache.fluss.metadata.DataLakeFormat;
import org.apache.fluss.metadata.DeleteBehavior;
import org.apache.fluss.metadata.KvFormat;
Expand Down Expand Up @@ -117,6 +118,14 @@ public Optional<DeleteBehavior> getDeleteBehavior() {
return config.getOptional(ConfigOptions.TABLE_DELETE_BEHAVIOR);
}

/**
* Gets the changelog image mode of the table. The changelog image mode defines what information
* is included in the changelog for update operations.
*/
public ChangelogImage getChangelogImage() {
return config.get(ConfigOptions.TABLE_CHANGELOG_IMAGE);
}

/** Gets the Arrow compression type and compression level of the table. */
public ArrowCompressionInfo getArrowCompressionInfo() {
return ArrowCompressionInfo.fromConf(config);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.fluss.metadata;

/**
* The changelog image mode for the primary key table.
*
* <p>This enum defines what information is included in the changelog for update operations. It is
* inspired by similar configurations in database systems like MySQL's binlog_row_image and
* PostgreSQL's replica identity.
*
* @since 0.9
*/
public enum ChangelogImage {

/**
* Full changelog with both UPDATE_BEFORE and UPDATE_AFTER records. This is the default behavior
* that captures complete information about updates, allowing tracking of previous values.
*/
FULL,

/**
* WAL mode does not produce UPDATE_BEFORE records. Only INSERT, UPDATE_AFTER (and DELETE if
* allowed) records are emitted. When WAL mode is enabled with default merge engine (no merge
* engine configured) and full row updates (not partial update), an optimization is applied to
* skip looking up old values, and in this case INSERT operations are converted to UPDATE_AFTER
* events, similar to database WAL (Write-Ahead Log) behavior. This mode reduces storage and
* transmission costs but loses the ability to track previous values.
*/
WAL;

/** Creates a {@link ChangelogImage} from the given string. */
public static ChangelogImage fromString(String image) {
switch (image.toUpperCase()) {
case "FULL":
return FULL;
case "WAL":
return WAL;

default:
throw new IllegalArgumentException("Unsupported changelog image: " + image);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.apache.fluss.flink.utils.PushdownUtils.FieldEqual;
import org.apache.fluss.lake.source.LakeSource;
import org.apache.fluss.lake.source.LakeSplit;
import org.apache.fluss.metadata.ChangelogImage;
import org.apache.fluss.metadata.DeleteBehavior;
import org.apache.fluss.metadata.MergeEngineType;
import org.apache.fluss.metadata.TablePath;
Expand Down Expand Up @@ -206,10 +207,32 @@ public ChangelogMode getChangelogMode() {
if (mergeEngineType == MergeEngineType.FIRST_ROW) {
return ChangelogMode.insertOnly();
} else {
// Check delete behavior configuration
Configuration tableConf = Configuration.fromMap(tableOptions);
DeleteBehavior deleteBehavior =
tableConf.get(ConfigOptions.TABLE_DELETE_BEHAVIOR);
ChangelogImage changelogImage =
tableConf.get(ConfigOptions.TABLE_CHANGELOG_IMAGE);
if (changelogImage == ChangelogImage.WAL) {
// When using WAL mode, produce INSERT and UPDATE_AFTER (and DELETE if
// allowed), without UPDATE_BEFORE. Note: with default merge engine and full
// row updates, an optimization converts INSERT to UPDATE_AFTER.
if (deleteBehavior == DeleteBehavior.ALLOW) {
// DELETE is still produced when delete behavior is allowed
return ChangelogMode.newBuilder()
.addContainedKind(RowKind.INSERT)
.addContainedKind(RowKind.UPDATE_AFTER)
.addContainedKind(RowKind.DELETE)
.build();
} else {
// No DELETE when delete operations are ignored or disabled
return ChangelogMode.newBuilder()
.addContainedKind(RowKind.INSERT)
.addContainedKind(RowKind.UPDATE_AFTER)
.build();
}
}

// Using FULL mode, produce full changelog
if (deleteBehavior == DeleteBehavior.ALLOW) {
return ChangelogMode.all();
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1375,4 +1375,74 @@ void testDeleteBehaviorForInsertStmt(String deleteBehavior) throws Exception {
assertResultsIgnoreOrder(rowIter, expectedRows, true);
}
}

@Test
void testWalModeWithDefaultMergeEngineAndAggregation() throws Exception {
String tableName = "wal_mode_pk_table";
// Create a table with WAL mode and default merge engine
tEnv.executeSql(
String.format(
"create table %s ("
+ " id int not null,"
+ " category string,"
+ " amount bigint,"
+ " primary key (id) not enforced"
+ ") with ('table.changelog.image' = 'wal')",
tableName));

// Insert initial data
tEnv.executeSql(
String.format(
"INSERT INTO %s VALUES "
+ "(1, 'A', 100), "
+ "(2, 'B', 200), "
+ "(3, 'A', 150), "
+ "(4, 'B', 250)",
tableName))
.await();

// Use batch mode to update and delete records
tBatchEnv.executeSql("UPDATE " + tableName + " SET amount = 120 WHERE id = 1").await();
tBatchEnv.executeSql("UPDATE " + tableName + " SET amount = 180 WHERE id = 3").await();
tBatchEnv.executeSql("DELETE FROM " + tableName + " WHERE id = 4").await();

// Do aggregation on the table and verify ChangelogNormalize node is generated
String aggQuery =
String.format(
"SELECT category, SUM(amount) as total_amount FROM %s /*+ OPTIONS('scan.startup.mode' = 'earliest') */ GROUP BY category",
tableName);

// Explain the aggregation query to check for ChangelogNormalize
String aggPlan = tEnv.explainSql(aggQuery);
// ChangelogNormalize should be present to normalize the changelog for aggregation
// In Flink, when the source produces changelog with primary key semantics (I, UA, D),
// a ChangelogNormalize operator is inserted before aggregation
assertThat(aggPlan).contains("ChangelogNormalize");

// Execute the aggregation and verify the result
CloseableIterator<Row> aggIter = tEnv.executeSql(aggQuery).collect();

// Expected aggregation results:
// Category A: 120 (id=1) + 180 (id=3) = 300
// Category B: 200 (id=2) = 200 (id=4 was deleted)
List<String> expectedAggResults =
Arrays.asList(
"+I[A, 100]",
"-U[A, 100]",
"+U[A, 250]",
"-U[A, 250]",
"+U[A, 150]",
"-U[A, 150]",
"+U[A, 270]",
"-U[A, 270]",
"+U[A, 120]",
"-U[A, 120]",
"+U[A, 300]",
"+I[B, 250]",
"-D[B, 250]",
"+I[B, 200]");

// Collect results with timeout
assertResultsIgnoreOrder(aggIter, expectedAggResults, true);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,8 @@ public KvTablet getOrCreateKv(
kvFormat,
merger,
arrowCompressionInfo,
schemaGetter);
schemaGetter,
tableConfig.getChangelogImage());
currentKvs.put(tableBucket, tablet);

LOG.info(
Expand Down Expand Up @@ -277,9 +278,8 @@ public KvTablet loadKv(File tabletDir, SchemaGetter schemaGetter) throws Excepti
TablePath tablePath = physicalTablePath.getTablePath();
TableInfo tableInfo = getTableInfo(zkClient, tablePath);

RowMerger rowMerger =
RowMerger.create(
tableInfo.getTableConfig(), tableInfo.getTableConfig().getKvFormat());
TableConfig tableConfig = tableInfo.getTableConfig();
RowMerger rowMerger = RowMerger.create(tableConfig, tableConfig.getKvFormat());
KvTablet kvTablet =
KvTablet.create(
physicalTablePath,
Expand All @@ -290,10 +290,11 @@ public KvTablet loadKv(File tabletDir, SchemaGetter schemaGetter) throws Excepti
serverMetricGroup,
arrowBufferAllocator,
memorySegmentPool,
tableInfo.getTableConfig().getKvFormat(),
tableConfig.getKvFormat(),
rowMerger,
tableInfo.getTableConfig().getArrowCompressionInfo(),
schemaGetter);
tableConfig.getArrowCompressionInfo(),
schemaGetter,
tableConfig.getChangelogImage());
if (this.currentKvs.containsKey(tableBucket)) {
throw new IllegalStateException(
String.format(
Expand Down
Loading