Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,15 @@ jobs:
strategy:
fail-fast: false
matrix:
jdk: [ "17", "21" ]
jdk: [ "21", "25" ]
pattern: [ "C*", "N*", "D*,H*,L*", "I*,A*,U*", "K*,E*,W*,Z*,Y*,X*", "M*,P*,O*", "R*,B*,Q*,V*", "S*", "T*,F*,G*,J*" ]
uses: ./.github/workflows/worker.yml
with:
script: .github/scripts/run_unit-tests -Dtest=!QTest,'${{ matrix.pattern }}' -Dmaven.test.failure.ignore=true
artifact_prefix: "unit-test-reports-jdk${{ matrix.jdk }}"
jdk: ${{ matrix.jdk }}
key: "test-jdk${{ matrix.jdk }}-[${{ matrix.pattern }}]"
execute: ${{ matrix.jdk == '21' || github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'test-jdk17') }}
execute: ${{ matrix.jdk == '25' || github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'test-jdk21') }}

validate-dist:
uses: ./.github/workflows/worker.yml
Expand All @@ -47,7 +47,7 @@ jobs:
if: ${{ success() && !contains( github.event.pull_request.labels.*.name, 'jacoco:skip') && github.event_name != 'push' }}
with:
script: .github/scripts/create-jacoco-coverage-report.sh
artifacts_to_download: "unit-test-reports-jdk21*"
artifacts_to_download: "unit-test-reports-jdk25*"
key: "coverage-jacoco"

run-separated-tests:
Expand All @@ -67,4 +67,4 @@ jobs:
with:
script: .github/scripts/run_unit-tests ${{ matrix.args }} -Dmaven.test.failure.ignore=true
key: ${{ matrix.name }}
artifact_prefix: "unit-test-reports-jdk21"
artifact_prefix: "unit-test-reports-jdk25"
2 changes: 1 addition & 1 deletion .github/workflows/codeql.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ jobs:
- uses: actions/setup-java@v5
with:
distribution: 'zulu'
java-version: '17'
java-version: '21'
cache: 'maven'

# Initializes the CodeQL tools for scanning.
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/cron-job-its.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ jobs:
- name: setup java
uses: actions/setup-java@v5
with:
java-version: '17'
java-version: '21'
distribution: 'zulu'
cache: maven

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/docker-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ jobs:
uses: actions/setup-java@v5
with:
distribution: 'zulu'
java-version: 17
java-version: 21
cache: 'maven'
- name: Run Docker tests
id: run-it
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/static-checks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ jobs:
strategy:
fail-fast: false
matrix:
jdk: [ '17', '21' ]
jdk: [ '21', '25' ]
uses: ./.github/workflows/worker.yml
with:
script: .github/scripts/packaging-check.sh
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/worker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ on:
jdk:
required: false
type: string
default: '21'
default: '25'
script:
required: true
type: string
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ Find articles written by community members and a calendar of upcoming events on

### Building from source

Please note that JDK 17 or JDK 21 is required to build Druid.
Please note that JDK 21 or JDK 25 is required to build Druid.

See the latest [build guide](https://druid.apache.org/docs/latest/development/build.html) for instructions on building Apache Druid from source.

Expand Down
2 changes: 1 addition & 1 deletion distribution/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
# under the License.
#

ARG JDK_VERSION=17
ARG JDK_VERSION=21

# The platform is explicitly specified as x64 to build the Druid distribution.
# This is because it's not able to build the distribution on arm64 due to dependency problem of web-console. See: https://github.com/apache/druid/issues/13012
Expand Down
2 changes: 1 addition & 1 deletion docs/design/zookeeper.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ The operations that happen over ZK are

## Coordinator Leader Election

We use the Curator [LeaderLatch](https://curator.apache.org/curator-recipes/leader-latch.html) recipe to perform leader election at path
We use the Curator [LeaderLatch](https://curator.apache.org/docs/recipes-leader-latch) recipe to perform leader election at path

```
${druid.zk.paths.coordinatorPath}/_COORDINATOR
Expand Down
2 changes: 1 addition & 1 deletion docs/development/extensions-core/test-stats.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ Make sure to include `druid-stats` extension in order to use these aggregators.

## Z-Score for two sample ztests post aggregator

Please refer to [Making Sense of the Two-Proportions Test](https://www.isixsigma.com/tools-templates/hypothesis-testing/making-sense-two-proportions-test/) and [An Introduction to Statistics: Comparing Two Means](https://userweb.ucs.louisiana.edu/~jcb0773/Berry_statbook/427bookall-August2024.pdf) for more details.
Please refer to [Making Sense of the Two-Proportions Test](https://www.isixsigma.com/hypothesis-testing/making-sense-two-proportions-test/) and [An Introduction to Statistics: Comparing Two Means](https://userweb.ucs.louisiana.edu/~jcb0773/Berry_statbook/427bookall-August2024.pdf) for more details.

```
z = (p1 - p2) / S.E. (assuming null hypothesis is true)
Expand Down
2 changes: 1 addition & 1 deletion docs/multi-stage-query/examples.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ This page describes SQL-based batch ingestion using the [multi-stage query task
These example queries show you some of the things you can do when modifying queries for your use case. Copy the example queries into the **Query** view of the web console and run them to see what they do.

:::tip
When you insert or replace data with SQL-based ingestion, set the context parameter `finalizeAggregations` to `false`. This context parameter is automatically set for you if you use the Druid console. If you use the API, you must explicitly set it. For an example, see [SQL-based ingestion API](../api-reference/sql-ingestion-api#sample-request). For details on aggregations, see [Rollup](./concepts.md#rollup).
When you insert or replace data with SQL-based ingestion, set the context parameter `finalizeAggregations` to `false`. This context parameter is automatically set for you if you use the Druid console. If you use the API, you must explicitly set it. For an example, see [SQL-based ingestion API](../api-reference/sql-ingestion-api.md#sample-request). For details on aggregations, see [Rollup](./concepts.md#rollup).
:::

## INSERT with no rollup
Expand Down
2 changes: 1 addition & 1 deletion docs/operations/java.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ a Java runtime for Druid.

## Selecting a Java runtime

Druid officially supports Java 17 and 21.
Druid officially supports Java 21 and 25.

The project team recommends using an OpenJDK-based Java distribution. There are many free and actively-supported
distributions available, including
Expand Down
52 changes: 26 additions & 26 deletions docs/querying/projections.md
Original file line number Diff line number Diff line change
Expand Up @@ -249,36 +249,36 @@ To use compaction on a datasource that includes projections, you need to set the
<Tabs>
<TabItem value="Coordinator duties">

```json
{
"type": "catalog",
"dataSource": YOUR_DATASOURCE,
"engine": "native",
"skipOffsetFromLatest": "PT0H",
"taskPriority": 25,
"inputSegmentSizeBytes": 100000000000000,
"taskContext": null
```json
{
"type": "catalog",
"dataSource": YOUR_DATASOURCE,
"engine": "native",
"skipOffsetFromLatest": "PT0H",
"taskPriority": 25,
"inputSegmentSizeBytes": 100000000000000,
"taskContext": null
}
```
```

</TabItem>
<TabItem value="Supervisors">

</TabItem>
<TabItem value="Supervisors">

```json
{
"type": "autocompact",
"spec": {
"type": "catalog",
"dataSource": YOUR_DATASOURCE,
"engine": "native",
"skipOffsetFromLatest": "PT0H",
"taskPriority": 25,
"inputSegmentSizeBytes": 100000000000000,
"taskContext": null
},
"suspended": true
}
```
"type": "autocompact",
"spec": {
"type": "catalog",
"dataSource": YOUR_DATASOURCE,
"engine": "native",
"skipOffsetFromLatest": "PT0H",
"taskPriority": 25,
"inputSegmentSizeBytes": 100000000000000,
"taskContext": null
},
"suspended": true
}
```

</TabItem>
</Tabs>
Expand Down
2 changes: 1 addition & 1 deletion docs/querying/query-context-reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ This reference contains context parameters organized by their scope:
To learn how to set the query context, see [Set query context](./query-context.md).

For reference on query context parameters specific to Druid SQL, visit [SQL query context](sql-query-context.md).
For context parameters related to SQL-based ingestion, see the [SQL-based ingestion reference](../multi-stage-query/reference/#context-parameters).
For context parameters related to SQL-based ingestion, see the [SQL-based ingestion reference](../multi-stage-query/reference.md#context-parameters).


## General parameters
Expand Down
5 changes: 5 additions & 0 deletions extensions-contrib/redis-cache/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,11 @@
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-pool2</artifactId>
<version>2.12.1</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
Expand Down
10 changes: 10 additions & 0 deletions extensions-core/kinesis-indexing-service/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,16 @@
<artifactId>regions</artifactId>
<version>${aws.sdk.v2.version}</version>
</dependency>
<dependency>
<groupId>software.amazon.awssdk</groupId>
<artifactId>aws-core</artifactId>
<version>${aws.sdk.v2.version}</version>
</dependency>
<dependency>
<groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
Expand Down
5 changes: 5 additions & 0 deletions indexing-service/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,11 @@
<version>2.48.0</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mozilla</groupId>
<artifactId>rhino-engine</artifactId>
<scope>test</scope>
</dependency>
</dependencies>

<build>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.annotations.VisibleForTesting;
import org.apache.druid.client.indexing.ClientCompactionRunnerInfo;
import org.apache.druid.data.input.impl.AggregateProjectionSpec;
import org.apache.druid.error.InvalidInput;
import org.apache.druid.indexer.CompactionEngine;
Expand All @@ -38,6 +39,8 @@
import org.apache.druid.server.compaction.ReindexingPartitioningRule;
import org.apache.druid.server.compaction.ReindexingRule;
import org.apache.druid.server.compaction.ReindexingRuleProvider;
import org.apache.druid.server.coordinator.ClusterCompactionConfig;
import org.apache.druid.server.coordinator.CompactionConfigValidationResult;
import org.apache.druid.server.coordinator.DataSourceCompactionConfig;
import org.apache.druid.server.coordinator.InlineSchemaDataSourceCompactionConfig;
import org.apache.druid.server.coordinator.UserCompactionTaskDimensionsConfig;
Expand Down Expand Up @@ -249,6 +252,53 @@ public UserCompactionTaskQueryTuningConfig getTuningConfig()
return tuningConfig;
}

/**
* Validates this template using a subset of the standard MSQ compaction checks.
* The standard path in {@link ClientCompactionRunnerInfo#validateCompactionConfig}
* assumes partitioning is controlled by {@code tuningConfig.partitionsSpec}, but
* this template forbids that field and uses {@code defaultPartitionsSpec} instead.
*
* <p>Checks performed:
* <ul>
* <li>partitionsSpec type and options — validated against {@code defaultPartitionsSpec}.
* Range partition dimension type checking passes {@code null} for dimensionSchemas
* since those are not known at template level.</li>
* <li>maxNumTasks >= 2 in taskContext.</li>
* </ul>
*
* <p>Standard MSQ checks skipped (not applicable at template level):
* <ul>
* <li>rollup vs metricsSpec consistency — {@code granularitySpec} is always null on the
* template; rollup is configured per-rule at job generation time.</li>
* <li>metricsSpec aggregator combining factory — there is no metricsSpec on the template;
* metrics come from per-rule data schema rules resolved at job generation time.</li>
* </ul>
*
* <p>Per-rule overrides (partitionsSpec, metricsSpec, rollup) are validated at task
* runtime by {@code MSQCompactionRunner.validateCompactionTask()} once the full config
* is resolved against actual data schemas.
*/
@Override
public CompactionConfigValidationResult validate(ClusterCompactionConfig clusterCompactionConfig)
{
List<CompactionConfigValidationResult> results = new ArrayList<>();

results.add(ClientCompactionRunnerInfo.validatePartitionsSpecForMSQ(
this.getDefaultPartitionsSpec(),
null,
this.getDefaultPartitioningVirtualColumns() != null
? this.getDefaultPartitioningVirtualColumns()
: VirtualColumns.EMPTY
));

results.add(ClientCompactionRunnerInfo.validateMaxNumTasksForMSQ(this.getTaskContext()));

return results.stream()
.filter(result -> !result.isValid())
.findFirst()
.orElse(CompactionConfigValidationResult.success());
}

/**
* Checks if the given interval's end time is after the specified boundary.
* Used to determine if intervals should be skipped based on skip offset configuration.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,17 @@ public void createAndEnqueueJobs(
final String supervisorId = supervisor.getSpec().getId();
try {
if (supervisor.shouldCreateJobs() && !activeSupervisors.contains(supervisorId)) {
final CompactionConfigValidationResult validationResult =
supervisor.getSpec().getSpec().validate(clusterCompactionConfig);
if (!validationResult.isValid()) {
log.warn(
"Skipping job creation for invalid supervisor[%s]: %s",
supervisorId,
validationResult.getReason()
);
return;
}

// Queue fresh jobs
final List<CompactionJob> jobs = supervisor.createJobs(source, jobParams);
jobs.forEach(job -> snapshotBuilder.addToPending(job.getCandidate()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.server.coordinator.AutoCompactionSnapshot;
import org.apache.druid.server.coordinator.CompactionConfigValidationResult;

import javax.annotation.Nullable;
import java.util.List;
Expand Down Expand Up @@ -84,12 +85,17 @@ public void start()
if (supervisorSpec.isSuspended()) {
log.info("Suspending compaction for dataSource[%s].", dataSource);
scheduler.stopCompaction(dataSource);
} else if (!supervisorSpec.getValidationResult().isValid()) {
return;
}

final CompactionConfigValidationResult validationResult =
scheduler.validateCompactionConfig(supervisorSpec.getSpec());
if (!validationResult.isValid()) {
log.warn(
"Cannot start compaction supervisor for datasource[%s] since the compaction supervisor spec is invalid. "
+ "Reason[%s].",
dataSource,
supervisorSpec.getValidationResult().getReason()
validationResult.getReason()
);
} else {
log.info("Starting compaction for dataSource[%s].", dataSource);
Expand All @@ -112,15 +118,19 @@ public SupervisorReport<AutoCompactionSnapshot> getStatus()
snapshot = AutoCompactionSnapshot.builder(dataSource)
.withStatus(AutoCompactionSnapshot.ScheduleStatus.NOT_ENABLED)
.build();
} else if (!supervisorSpec.getValidationResult().isValid()) {
snapshot = AutoCompactionSnapshot.builder(dataSource)
.withMessage(StringUtils.format(
"Compaction supervisor spec is invalid. Reason[%s].",
supervisorSpec.getValidationResult().getReason()
))
.build();
} else {
snapshot = scheduler.getCompactionSnapshot(dataSource);
final CompactionConfigValidationResult validationResult =
scheduler.validateCompactionConfig(supervisorSpec.getSpec());
if (!validationResult.isValid()) {
snapshot = AutoCompactionSnapshot.builder(dataSource)
.withMessage(StringUtils.format(
"Compaction supervisor spec is invalid. Reason[%s].",
validationResult.getReason()
))
.build();
} else {
snapshot = scheduler.getCompactionSnapshot(dataSource);
}
}

return new SupervisorReport<>(supervisorSpec.getId(), DateTimes.nowUtc(), snapshot);
Expand All @@ -133,7 +143,7 @@ public SupervisorStateManager.State getState()
return State.SCHEDULER_STOPPED;
} else if (supervisorSpec.isSuspended()) {
return State.SUSPENDED;
} else if (!supervisorSpec.getValidationResult().isValid()) {
} else if (!scheduler.validateCompactionConfig(supervisorSpec.getSpec()).isValid()) {
return State.INVALID_SPEC;
} else {
return State.RUNNING;
Expand Down
Loading
Loading