diff --git a/CHANGELOG.md b/CHANGELOG.md index cdec070b8e..f5a4780034 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Make security plugin aware of FIPS build param (-Pcrypto.standard=FIPS-140-3) ([#5952](https://github.com/opensearch-project/security/pull/5952)) ### Bug Fixes +- Fix audit log writing errors for rollover-enabled alias indices ([#5878](https://github.com/opensearch-project/security/pull/5878) + - Fix the issue of unprocessed X-Request-Id ([#5954](https://github.com/opensearch-project/security/pull/5954)) ### Refactoring diff --git a/src/integrationTest/java/org/opensearch/security/auditlog/sink/InternalOpenSearchSinkIntegrationTestAuditAlias.java b/src/integrationTest/java/org/opensearch/security/auditlog/sink/InternalOpenSearchSinkIntegrationTestAuditAlias.java new file mode 100644 index 0000000000..1be04b23c7 --- /dev/null +++ b/src/integrationTest/java/org/opensearch/security/auditlog/sink/InternalOpenSearchSinkIntegrationTestAuditAlias.java @@ -0,0 +1,202 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ +package org.opensearch.security.auditlog.sink; + +import java.util.Map; + +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; + +import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.opensearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.opensearch.action.admin.indices.create.CreateIndexRequest; +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.framework.AuditConfiguration; +import org.opensearch.test.framework.AuditFilters; +import org.opensearch.test.framework.cluster.ClusterManager; +import org.opensearch.test.framework.cluster.LocalCluster; +import org.opensearch.test.framework.cluster.TestRestClient; +import org.opensearch.transport.client.Client; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.collection.IsMapContaining.hasKey; +import static org.awaitility.Awaitility.await; + +/** + * Integration tests for {@link InternalOpenSearchSink} with write-alias–based configuration. + * + *

These tests validate the audit sink behavior when a preconfigured write alias + * is used instead of a concrete index name. In this setup, the audit sink must + * detect that the target name is an alias and avoid attempting index creation.

+ * + *
Tested Code Path:
+ *

These tests focus on the {@code metadata.hasAlias(indexName)} branch in + * {@code createIndexIfAbsent()}, ensuring that alias targets are correctly + * recognized and accepted.

+ * + *

This behavior is required to support Index Lifecycle Management (ILM) + * patterns where a write alias points to a rolling series of indices.

+ * + * @see InternalOpenSearchSinkIntegrationTest for regular index-based tests + */ +public class InternalOpenSearchSinkIntegrationTestAuditAlias { + + private static final String AUDIT_ALIAS = "security-audit-write-alias"; + private static final String BACKING_INDEX = "security-audit-backend-000001"; + + @ClassRule + public static final LocalCluster cluster = new LocalCluster.Builder().clusterManager(ClusterManager.SINGLENODE) + .nodeSettings(Map.of("plugins.security.audit.config.index", AUDIT_ALIAS)) + .internalAudit(new AuditConfiguration(true).filters(new AuditFilters().enabledRest(true).enabledTransport(false))) + .build(); + + @BeforeClass + public static void setupAuditAlias() { + try (Client client = cluster.getInternalNodeClient()) { + client.admin().indices().create(new CreateIndexRequest(BACKING_INDEX)).actionGet(); + client.admin() + .indices() + .aliases( + new IndicesAliasesRequest().addAliasAction( + IndicesAliasesRequest.AliasActions.add().index(BACKING_INDEX).alias(AUDIT_ALIAS).writeIndex(true) + ) + ) + .actionGet(); + } + } + + private long countAuditDocs(Client client) { + SearchResponse response = client.search( + new SearchRequest(AUDIT_ALIAS).source(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).size(0)) + ).actionGet(); + + return response.getHits().getTotalHits().value(); + } + + private void generateAuditEvent(String path) { + try (TestRestClient restClient = cluster.getRestClient(cluster.getAdminCertificate())) { + restClient.get(path); + } + } + + /** + * Tests the core functionality of the alias detection change. + * + *

Validates: The {@code metadata.hasAlias(indexName)} branch correctly + * identifies the audit target as an alias and returns true without attempting + * index creation.

+ * + *

Without this change: The sink would try to create an index named + * {@code security-audit-write-alias}, which would fail because an alias with + * that name already exists.

+ */ + @Test + public void testRecognizesAuditTargetAsWriteAlias() { + try (Client client = cluster.getInternalNodeClient()) { + generateAuditEvent("_cluster/health"); + + await().until(() -> countAuditDocs(client) > 0); + + GetAliasesResponse aliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest(AUDIT_ALIAS)).actionGet(); + + assertThat("Write alias must exist in cluster metadata", aliasesResponse.getAliases().isEmpty(), is(false)); + + String concreteIndex = aliasesResponse.getAliases().keySet().iterator().next(); + assertThat("Alias must resolve to a backing index", concreteIndex, not(equalTo(AUDIT_ALIAS))); + + boolean backendIndexExists = client.admin().indices().exists(new IndicesExistsRequest(concreteIndex)).actionGet().isExists(); + + assertThat("Backing index must exist physically", backendIndexExists, is(true)); + } + } + + /** + * Tests that audit events are successfully written through the alias. + * + *

Validates: The sink writes events to the alias, which OpenSearch + * routes to the write index. Multiple events accumulate correctly.

+ */ + @Test + public void testWritesEventsToAliasSuccessfully() { + try (Client client = cluster.getInternalNodeClient()) { + long before = countAuditDocs(client); + + generateAuditEvent("_cluster/health"); + generateAuditEvent("_cluster/stats"); + generateAuditEvent("_nodes/info"); + + await().until(() -> countAuditDocs(client) - before >= 3); + + long after = countAuditDocs(client); + assertThat("Multiple events must be written through alias", after - before, greaterThan(2L)); + } + } + + /** + * Verifies that audit documents written via a write alias contain all mandatory fields, + * correct values, and do not include irrelevant transport-layer fields. + * + *

Validates: When an audit event is generated and routed through a write alias, + * the resulting document contains the required fields: + *

+ * and does not include {@code audit_transport_request_type}, + * confirming that transport-specific fields are absent for REST events.

+ * + *

Why it's important: This ensures that using a write alias does not alter + * the content or structure of audit documents, preserving compliance and correctness.

+ */ + @Test + public void testAuditDocumentsViaAliasContainMandatoryFields() { + try (Client client = cluster.getInternalNodeClient()) { + long before = countAuditDocs(client); + generateAuditEvent("_cluster/health"); + + await().until(() -> countAuditDocs(client) > before); + + SearchResponse response = client.search( + new SearchRequest(AUDIT_ALIAS).source( + new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + .size(1) + .sort("@timestamp", org.opensearch.search.sort.SortOrder.DESC) + ) + ).actionGet(); + + Map doc = response.getHits().getAt(0).getSourceAsMap(); + + assertThat(doc, hasKey("audit_category")); + assertThat(doc, hasKey("audit_request_origin")); + assertThat(doc, hasKey("@timestamp")); + assertThat(doc, hasKey("audit_rest_request_method")); + assertThat(doc, hasKey("audit_rest_request_path")); + assertThat(doc, hasKey("audit_request_layer")); + assertThat(doc.get("audit_request_layer"), is("REST")); + assertThat(doc.get("audit_request_origin"), is("REST")); + assertThat(doc, not(hasKey("audit_transport_request_type"))); + } + } +} diff --git a/src/main/java/org/opensearch/security/auditlog/sink/InternalOpenSearchSink.java b/src/main/java/org/opensearch/security/auditlog/sink/InternalOpenSearchSink.java index 79c6514138..609cf9c8ac 100644 --- a/src/main/java/org/opensearch/security/auditlog/sink/InternalOpenSearchSink.java +++ b/src/main/java/org/opensearch/security/auditlog/sink/InternalOpenSearchSink.java @@ -16,6 +16,7 @@ import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.action.admin.indices.create.CreateIndexRequest; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.security.auditlog.impl.AuditMessage; @@ -52,7 +53,7 @@ public InternalOpenSearchSink( this.indexPattern = DateTimeFormat.forPattern(index); } catch (IllegalArgumentException e) { log.debug( - "Unable to parse index pattern due to {}. " + "If you have no date pattern configured you can safely ignore this message", + "Unable to parse index pattern due to {}. If you have no date pattern configured you can safely ignore this message", e.getMessage() ); } @@ -60,18 +61,32 @@ public InternalOpenSearchSink( @Override public boolean createIndexIfAbsent(String indexName) { - if (clusterService.state().metadata().hasIndex(indexName)) { + final Metadata metadata = clusterService.state().metadata(); + + if (metadata.hasAlias(indexName)) { + log.debug("Audit log target '{}' is an alias. Audit events will be written to the associated write index.", indexName); + return true; + } + if (metadata.hasIndex(indexName)) { + log.debug("Audit log index '{}' already exists.", indexName); return true; } - try { final CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName).settings(indexSettings); - final boolean ok = clientProvider.admin().indices().create(createIndexRequest).actionGet().isAcknowledged(); - log.info("Index {} created?: {}", indexName, ok); - return ok; - } catch (ResourceAlreadyExistsException resourceAlreadyExistsException) { - log.info("Index {} already exists", indexName); + final boolean acknowledged = clientProvider.admin().indices().create(createIndexRequest).actionGet().isAcknowledged(); + if (acknowledged) { + log.info("Created audit log index '{}'", indexName); + } else { + log.error("Failed to create audit log index '{}'. Index creation was not acknowledged.", indexName); + } + return acknowledged; + } catch (ResourceAlreadyExistsException e) { + // Race condition: another node created the index between our check and creation attempt + log.debug("Audit log index '{}' was created by another node", indexName); return true; + } catch (Exception e) { + log.error("Error creating audit log index '{}'", indexName, e); + return false; } } @@ -80,6 +95,7 @@ public void close() throws IOException { } + @Override public boolean doStore(final AuditMessage msg) { return super.doStore(msg, getExpandedIndexName(this.indexPattern, this.index)); }