Skip to content

Commit fb8d803

Browse files
committed
remove override of testCountPushDown
1 parent 63ab6b4 commit fb8d803

File tree

2 files changed

+2
-63
lines changed

2 files changed

+2
-63
lines changed

fluss-flink/fluss-flink-2.2/src/test/java/org/apache/fluss/flink/source/Flink22TableSourceBatchITCase.java

Lines changed: 1 addition & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -17,62 +17,5 @@
1717

1818
package org.apache.fluss.flink.source;
1919

20-
import org.apache.flink.types.Row;
21-
import org.apache.flink.util.CloseableIterator;
22-
import org.junit.jupiter.params.ParameterizedTest;
23-
import org.junit.jupiter.params.provider.ValueSource;
24-
25-
import java.util.Collections;
26-
import java.util.List;
27-
28-
import static org.apache.fluss.flink.source.testutils.FlinkRowAssertionsUtils.collectRowsWithTimeout;
29-
import static org.assertj.core.api.Assertions.assertThat;
30-
import static org.assertj.core.api.Assertions.assertThatThrownBy;
31-
3220
/** IT case for batch source in Flink 2.2. */
33-
public class Flink22TableSourceBatchITCase extends FlinkTableSourceBatchITCase {
34-
35-
@ParameterizedTest
36-
@ValueSource(booleans = {true, false})
37-
@Override
38-
void testCountPushDown(boolean partitionTable) throws Exception {
39-
String tableName = partitionTable ? preparePartitionedLogTable() : prepareLogTable();
40-
int expectedRows = partitionTable ? 10 : 5;
41-
// normal scan
42-
String query = String.format("SELECT COUNT(*) FROM %s", tableName);
43-
assertThat(tEnv.explainSql(query))
44-
.contains(
45-
String.format(
46-
"TableSourceScan(table=[[testcatalog, defaultdb, %s, "
47-
+ "aggregates=[grouping=[], aggFunctions=[Count1AggFunction()]]]], "
48-
+ "fields=[count1$0])",
49-
tableName));
50-
CloseableIterator<Row> iterRows = tEnv.executeSql(query).collect();
51-
List<String> collected = collectRowsWithTimeout(iterRows, 1);
52-
List<String> expected = Collections.singletonList(String.format("+I[%s]", expectedRows));
53-
assertThat(collected).isEqualTo(expected);
54-
55-
// test not push down grouping count.
56-
assertThatThrownBy(
57-
() ->
58-
tEnv.explainSql(
59-
String.format(
60-
"SELECT COUNT(*) FROM %s group by id",
61-
tableName))
62-
.wait())
63-
.hasMessageContaining(
64-
"Currently, Fluss only support queries on table with datalake enabled or point queries on primary key when it's in batch execution mode.");
65-
66-
// test not support primary key now
67-
String primaryTableName = prepareSourceTable(new String[] {"id"}, null);
68-
assertThatThrownBy(
69-
() ->
70-
tEnv.explainSql(
71-
String.format(
72-
"SELECT COUNT(*) FROM %s ",
73-
primaryTableName))
74-
.wait())
75-
.hasMessageContaining(
76-
"Currently, Fluss only support queries on table with datalake enabled or point queries on primary key when it's in batch execution mode.");
77-
}
78-
}
21+
public class Flink22TableSourceBatchITCase extends FlinkTableSourceBatchITCase {}

fluss-flink/fluss-flink-common/src/test/java/org/apache/fluss/flink/source/FlinkTableSourceBatchITCase.java

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -281,11 +281,7 @@ void testCountPushDown(boolean partitionTable) throws Exception {
281281
String query = String.format("SELECT COUNT(*) FROM %s", tableName);
282282
assertThat(tEnv.explainSql(query))
283283
.contains(
284-
String.format(
285-
"TableSourceScan(table=[[testcatalog, defaultdb, %s, project=[id], "
286-
+ "aggregates=[grouping=[], aggFunctions=[Count1AggFunction()]]]], "
287-
+ "fields=[count1$0])",
288-
tableName));
284+
"aggregates=[grouping=[], aggFunctions=[Count1AggFunction()]]]], fields=[count1$0]");
289285
CloseableIterator<Row> iterRows = tEnv.executeSql(query).collect();
290286
List<String> collected = collectRowsWithTimeout(iterRows, 1);
291287
List<String> expected = Collections.singletonList(String.format("+I[%s]", expectedRows));

0 commit comments

Comments
 (0)