Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Log stack traces on data nodes before they are cleared for transport #125732

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
5 changes: 5 additions & 0 deletions docs/changelog/125732.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 125732
summary: Log stack traces on data nodes before they are cleared for transport
area: Search
type: bug
issues: []
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,23 @@

import org.apache.http.entity.ContentType;
import org.apache.http.nio.entity.NByteArrayEntity;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.core.config.Configurator;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.Request;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.ErrorTraceHelper;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.test.MockLog;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.xcontent.XContentType;
import org.junit.Before;
import org.junit.BeforeClass;

import java.io.IOException;
import java.nio.charset.Charset;
Expand All @@ -37,19 +44,27 @@ protected Collection<Class<? extends Plugin>> nodePlugins() {
return CollectionUtils.appendToCopyNoNullElements(super.nodePlugins(), MockTransportService.TestPlugin.class);
}

@BeforeClass
public static void setDebugLogLevel() {
Configurator.setLevel(SearchService.class, Level.DEBUG);
}

@Before
public void setupMessageListener() {
hasStackTrace = ErrorTraceHelper.setupErrorTraceListener(internalCluster());
}

private void setupIndexWithDocs() {
createIndex("test1", "test2");
private int setupIndexWithDocs() {
int numShards = numberOfShards();
createIndex("test1", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards).build());
createIndex("test2", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards).build());
indexRandom(
true,
prepareIndex("test1").setId("1").setSource("field", "foo"),
prepareIndex("test2").setId("10").setSource("field", 5)
);
refresh();
return numShards;
}

public void testSearchFailingQueryErrorTraceDefault() throws IOException {
Expand Down Expand Up @@ -108,6 +123,59 @@ public void testSearchFailingQueryErrorTraceFalse() throws IOException {
assertFalse(hasStackTrace.getAsBoolean());
}

public void testNoLoggingInSearchFailingQueryErrorTraceTrue() throws IOException {
int numShards = setupIndexWithDocs();

Request searchRequest = new Request("POST", "/_search");
searchRequest.setJsonEntity("""
{
"query": {
"simple_query_string" : {
"query": "foo",
"fields": ["field"]
}
}
}
""");

String errorTriggeringIndex = "test2";
try (var mockLog = MockLog.capture(SearchService.class)) {
ErrorTraceHelper.addUnseenLoggingExpectations(numShards, mockLog, errorTriggeringIndex);

searchRequest.addParameter("error_trace", "true");
getRestClient().performRequest(searchRequest);
mockLog.assertAllExpectationsMatched();
}
}

public void testLoggingInSearchFailingQueryErrorTraceFalse() throws IOException {
int numShards = setupIndexWithDocs();

Request searchRequest = new Request("POST", "/_search");
searchRequest.setJsonEntity("""
{
"query": {
"simple_query_string" : {
"query": "foo",
"fields": ["field"]
}
}
}
""");

String errorTriggeringIndex = "test2";
try (var mockLog = MockLog.capture(SearchService.class)) {
ErrorTraceHelper.addSeenLoggingExpectations(numShards, mockLog, errorTriggeringIndex);

// error_trace defaults to false so we can test both cases with some randomization
if (randomBoolean()) {
searchRequest.addParameter("error_trace", "false");
}
getRestClient().performRequest(searchRequest);
mockLog.assertAllExpectationsMatched();
}
}

public void testMultiSearchFailingQueryErrorTraceDefault() throws IOException {
setupIndexWithDocs();

Expand Down Expand Up @@ -158,4 +226,55 @@ public void testMultiSearchFailingQueryErrorTraceFalse() throws IOException {

assertFalse(hasStackTrace.getAsBoolean());
}

public void testLoggingInMultiSearchFailingQueryErrorTraceTrue() throws IOException {
int numShards = setupIndexWithDocs();

XContentType contentType = XContentType.JSON;
MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add(
new SearchRequest("test*").source(new SearchSourceBuilder().query(simpleQueryStringQuery("foo").field("field")))
);
Request searchRequest = new Request("POST", "/_msearch");
byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent());
searchRequest.setEntity(
new NByteArrayEntity(requestBody, ContentType.create(contentType.mediaTypeWithoutParameters(), (Charset) null))
);

searchRequest.addParameter("error_trace", "true");

String errorTriggeringIndex = "test2";
try (var mockLog = MockLog.capture(SearchService.class)) {
ErrorTraceHelper.addUnseenLoggingExpectations(numShards, mockLog, errorTriggeringIndex);

getRestClient().performRequest(searchRequest);
mockLog.assertAllExpectationsMatched();
}
}

public void testLoggingInMultiSearchFailingQueryErrorTraceFalse() throws IOException {
int numShards = setupIndexWithDocs();

XContentType contentType = XContentType.JSON;
MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add(
new SearchRequest("test*").source(new SearchSourceBuilder().query(simpleQueryStringQuery("foo").field("field")))
);
Request searchRequest = new Request("POST", "/_msearch");
byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent());
searchRequest.setEntity(
new NByteArrayEntity(requestBody, ContentType.create(contentType.mediaTypeWithoutParameters(), (Charset) null))
);

// error_trace defaults to false so we can test both cases with some randomization
if (randomBoolean()) {
searchRequest.addParameter("error_trace", "false");
}

String errorTriggeringIndex = "test2";
try (var mockLog = MockLog.capture(SearchService.class)) {
ErrorTraceHelper.addSeenLoggingExpectations(numShards, mockLog, errorTriggeringIndex);

getRestClient().performRequest(searchRequest);
mockLog.assertAllExpectationsMatched();
}
}
}
46 changes: 41 additions & 5 deletions server/src/main/java/org/elasticsearch/search/SearchService.java
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@
import java.util.function.Supplier;

import static org.elasticsearch.TransportVersions.ERROR_TRACE_IN_TRANSPORT_HEADER;
import static org.elasticsearch.common.Strings.format;
import static org.elasticsearch.core.TimeValue.timeValueHours;
import static org.elasticsearch.core.TimeValue.timeValueMillis;
import static org.elasticsearch.core.TimeValue.timeValueMinutes;
Expand Down Expand Up @@ -538,12 +539,16 @@ protected void doClose() {
* @param <T> the type of the response
* @param listener the action listener to be wrapped
* @param version channel version of the request
* @param nodeId id of the current node
* @param shardId id of the shard being searched
* @param threadPool with context where to write the new header
* @return the wrapped action listener
*/
static <T> ActionListener<T> maybeWrapListenerForStackTrace(
ActionListener<T> listener,
TransportVersion version,
String nodeId,
ShardId shardId,
ThreadPool threadPool
) {
boolean header = true;
Expand All @@ -552,6 +557,7 @@ static <T> ActionListener<T> maybeWrapListenerForStackTrace(
}
if (header == false) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I do not see where we are logging the trace? It seems we are only logging that its actually removed.

I am thinking we should log the exception as a WARN if:

  • header == false (meaning the trace wouldn't be provided user side)
  • the exception isn't a user exception (e.g. should be considered a 5xx)

However, I defer to @javanna here. He has much better context around what we are trying to achieve.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Passing e to the logger here outputs the stack trace. Maybe my log message isn't clear... think something like "search failed with exception:" would be better?

Interesting point on WARN - do you think a WARN log per shard for the same underlying error is acceptable here? Luca and I decided it would be difficult to dedupe these at the moment. It might become easier with batched query execution.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, I see.

I think that this shouldn't be debug. Maybe debug for all exceptions. But we should log WARN for things that are 5xx, indicating the failure.

I think adding a "Clearing stack trace before...." is unnecessary. Logging isn't just for our debugging, but also for users.

I am not sure indicating that the trace is being removed is overly useful here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've updated the log message to be more clear for users, and raised the level WARN for 5XX. Thanks for the idea there - I think it makes sense for this new message to log at the same level as rest suppressed.

return listener.delegateResponse((l, e) -> {
logger.debug(() -> format("[%s]%s: failed to execute search request", nodeId, shardId), e);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need to correlate the request log on the coordinate node with the error reported on the data node? If so, is the logging information here enough?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good question, I modeled the nodeId/shardId prefix to lead us right back to the coord node's logs. If you take a look at the example below: [CrhugeEAQNGHtZ14Y6Apjg][test][2] could be taken from the r.suppressed log and grepped across data nodes to find the new log.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I understand the nodeId/shardId combination, which associates a coord with a data node. What if there are multiple failing requests on the coord node? How can we identify the error of a single request? Is it a use case we must consider?

ExceptionsHelper.unwrapCausesAndSuppressed(e, err -> {
err.setStackTrace(EMPTY_STACK_TRACE_ARRAY);
return false;
Expand All @@ -563,7 +569,13 @@ static <T> ActionListener<T> maybeWrapListenerForStackTrace(
}

public void executeDfsPhase(ShardSearchRequest request, SearchShardTask task, ActionListener<SearchPhaseResult> listener) {
listener = maybeWrapListenerForStackTrace(listener, request.getChannelVersion(), threadPool);
listener = maybeWrapListenerForStackTrace(
listener,
request.getChannelVersion(),
clusterService.localNode().getId(),
request.shardId(),
threadPool
);
final IndexShard shard = getShard(request);
rewriteAndFetchShardRequest(shard, request, listener.delegateFailure((l, rewritten) -> {
// fork the execution in the search thread pool
Expand Down Expand Up @@ -607,7 +619,13 @@ public void executeQueryPhase(ShardSearchRequest request, CancellableTask task,
rewriteAndFetchShardRequest(
shard,
request,
maybeWrapListenerForStackTrace(listener, request.getChannelVersion(), threadPool).delegateFailure((l, orig) -> {
maybeWrapListenerForStackTrace(
listener,
request.getChannelVersion(),
clusterService.localNode().getId(),
request.shardId(),
threadPool
).delegateFailure((l, orig) -> {
// check if we can shortcut the query phase entirely.
if (orig.canReturnNullResponseIfMatchNoDocs()) {
assert orig.scroll() == null;
Expand Down Expand Up @@ -805,9 +823,15 @@ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, Cancella
}

public void executeRankFeaturePhase(RankFeatureShardRequest request, SearchShardTask task, ActionListener<RankFeatureResult> listener) {
listener = maybeWrapListenerForStackTrace(listener, request.getShardSearchRequest().getChannelVersion(), threadPool);
final ReaderContext readerContext = findReaderContext(request.contextId(), request);
final ShardSearchRequest shardSearchRequest = readerContext.getShardSearchRequest(request.getShardSearchRequest());
listener = maybeWrapListenerForStackTrace(
listener,
shardSearchRequest.getChannelVersion(),
clusterService.localNode().getId(),
shardSearchRequest.shardId(),
threadPool
);
final Releasable markAsUsed = readerContext.markAsUsed(getKeepAlive(shardSearchRequest));
runAsync(getExecutor(readerContext.indexShard()), () -> {
try (SearchContext searchContext = createContext(readerContext, shardSearchRequest, task, ResultsType.RANK_FEATURE, false)) {
Expand Down Expand Up @@ -856,8 +880,14 @@ public void executeQueryPhase(
ActionListener<ScrollQuerySearchResult> listener,
TransportVersion version
) {
listener = maybeWrapListenerForStackTrace(listener, version, threadPool);
final LegacyReaderContext readerContext = (LegacyReaderContext) findReaderContext(request.contextId(), request);
listener = maybeWrapListenerForStackTrace(
listener,
version,
clusterService.localNode().getId(),
readerContext.indexShard().shardId(),
threadPool
);
final Releasable markAsUsed;
try {
markAsUsed = readerContext.markAsUsed(getScrollKeepAlive(request.scroll()));
Expand Down Expand Up @@ -905,9 +935,15 @@ public void executeQueryPhase(
ActionListener<QuerySearchResult> listener,
TransportVersion version
) {
listener = maybeWrapListenerForStackTrace(listener, version, threadPool);
final ReaderContext readerContext = findReaderContext(request.contextId(), request.shardSearchRequest());
final ShardSearchRequest shardSearchRequest = readerContext.getShardSearchRequest(request.shardSearchRequest());
listener = maybeWrapListenerForStackTrace(
listener,
version,
clusterService.localNode().getId(),
shardSearchRequest.shardId(),
threadPool
);
final Releasable markAsUsed = readerContext.markAsUsed(getKeepAlive(shardSearchRequest));
rewriteAndFetchShardRequest(readerContext.indexShard(), shardSearchRequest, listener.delegateFailure((l, rewritten) -> {
// fork the execution in the search thread pool
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@

package org.elasticsearch.search;

import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.core.config.Configurator;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.SortField;
Expand Down Expand Up @@ -51,6 +53,7 @@
import org.elasticsearch.search.sort.BucketedSort;
import org.elasticsearch.search.sort.MinAndMax;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.MockLog;
import org.elasticsearch.xcontent.XContentParserConfiguration;

import java.io.IOException;
Expand All @@ -59,6 +62,7 @@
import java.util.function.BiFunction;
import java.util.function.Predicate;

import static org.elasticsearch.common.Strings.format;
import static org.elasticsearch.search.SearchService.maybeWrapListenerForStackTrace;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.not;
Expand Down Expand Up @@ -125,6 +129,7 @@ public Type getType() {
}

public void testMaybeWrapListenerForStackTrace() {
ShardId shardId = new ShardId("index", "index", 0);
// Tests that the same listener has stack trace if is not wrapped or does not have stack trace if it is wrapped.
AtomicBoolean isWrapped = new AtomicBoolean(false);
ActionListener<SearchPhaseResult> listener = new ActionListener<>() {
Expand All @@ -146,11 +151,48 @@ public void onFailure(Exception e) {
e.fillInStackTrace();
assertThat(e.getStackTrace().length, is(not(0)));
listener.onFailure(e);
listener = maybeWrapListenerForStackTrace(listener, TransportVersion.current(), threadPool);
listener = maybeWrapListenerForStackTrace(listener, TransportVersion.current(), "node", shardId, threadPool);
isWrapped.set(true);
listener.onFailure(e);
}

public void testMaybeWrapListenerForStackTraceLogs() {
final String nodeId = "node";
final String index = "index";
ShardId shardId = new ShardId(index, index, 0);

try (var mockLog = MockLog.capture(SearchService.class)) {
Configurator.setLevel("org.elasticsearch.search.SearchService", Level.DEBUG);
final String exceptionMessage = "test exception message";
mockLog.addExpectation(
new MockLog.PatternAndExceptionSeenEventExpectation(
format("Tracking information ([%s]%s) and exception logged before stack trace cleared", nodeId, shardId),
SearchService.class.getCanonicalName(),
Level.DEBUG,
format("\\[%s\\]%s: failed to execute search request", nodeId, shardId),
Exception.class,
exceptionMessage
)
);

// Tests the listener has logged if it is wrapped
ActionListener<SearchPhaseResult> listener = new ActionListener<>() {
@Override
public void onResponse(SearchPhaseResult searchPhaseResult) {
// noop - we only care about failure scenarios
}

@Override
public void onFailure(Exception e) {
mockLog.assertAllExpectationsMatched();
}
};
Exception e = new Exception(exceptionMessage);
listener = maybeWrapListenerForStackTrace(listener, TransportVersion.current(), nodeId, shardId, threadPool);
listener.onFailure(e);
}
}

private void doTestCanMatch(
SearchRequest searchRequest,
SortField sortField,
Expand Down
Loading
Loading