Skip to content

Commit 132eb02

Browse files
committed
Add trace log of the request for the query and fetch phases (#34479)
This change adds a logger for the query and fetch phases that prints all requests before their execution at the trace level. This will help debugging cases where an issue occurs during the execution since only completed queries are logged by the slow logs.
1 parent 022ab2a commit 132eb02

File tree

4 files changed

+49
-1
lines changed

4 files changed

+49
-1
lines changed

server/src/main/java/org/elasticsearch/search/SearchPhase.java

+29
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
package org.elasticsearch.search;
2121

2222
import org.elasticsearch.search.internal.SearchContext;
23+
import org.elasticsearch.tasks.Task;
2324

2425
/**
2526
* Represents a phase of a search request e.g. query, fetch etc.
@@ -35,4 +36,32 @@ public interface SearchPhase {
3536
* Executes the search phase
3637
*/
3738
void execute(SearchContext context);
39+
40+
class SearchContextSourcePrinter {
41+
private final SearchContext searchContext;
42+
43+
public SearchContextSourcePrinter(SearchContext searchContext) {
44+
this.searchContext = searchContext;
45+
}
46+
47+
@Override
48+
public String toString() {
49+
StringBuilder builder = new StringBuilder();
50+
builder.append(searchContext.indexShard().shardId());
51+
builder.append(" ");
52+
if (searchContext.request() != null &&
53+
searchContext.request().source() != null) {
54+
builder.append("source[").append(searchContext.request().source().toString()).append("], ");
55+
} else {
56+
builder.append("source[], ");
57+
}
58+
if (searchContext.getTask() != null &&
59+
searchContext.getTask().getHeader(Task.X_OPAQUE_ID) != null) {
60+
builder.append("id[").append(searchContext.getTask().getHeader(Task.X_OPAQUE_ID)).append("], ");
61+
} else {
62+
builder.append("id[], ");
63+
}
64+
return builder.toString();
65+
}
66+
}
3867
}

server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java

+8
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919

2020
package org.elasticsearch.search.fetch;
2121

22+
import org.apache.logging.log4j.LogManager;
23+
import org.apache.logging.log4j.Logger;
2224
import org.apache.lucene.index.LeafReaderContext;
2325
import org.apache.lucene.index.ReaderUtil;
2426
import org.apache.lucene.search.DocIdSetIterator;
@@ -70,6 +72,7 @@
7072
* after reducing all of the matches returned by the query phase
7173
*/
7274
public class FetchPhase implements SearchPhase {
75+
private static final Logger LOGGER = LogManager.getLogger(FetchPhase.class);
7376

7477
private final FetchSubPhase[] fetchSubPhases;
7578

@@ -84,6 +87,11 @@ public void preProcess(SearchContext context) {
8487

8588
@Override
8689
public void execute(SearchContext context) {
90+
91+
if (LOGGER.isTraceEnabled()) {
92+
LOGGER.trace("{}", new SearchContextSourcePrinter(context));
93+
}
94+
8795
final FieldsVisitor fieldsVisitor;
8896
Map<String, Set<String>> storedToRequestedFields = new HashMap<>();
8997
StoredFieldsContext storedFieldsContext = context.storedFieldsContext();

server/src/main/java/org/elasticsearch/search/query/QueryPhase.java

+8
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919

2020
package org.elasticsearch.search.query;
2121

22+
import org.apache.logging.log4j.LogManager;
23+
import org.apache.logging.log4j.Logger;
2224
import org.apache.lucene.index.IndexReader;
2325
import org.apache.lucene.index.LeafReaderContext;
2426
import org.apache.lucene.queries.MinDocQuery;
@@ -72,6 +74,7 @@
7274
* (document ids and score or sort criteria) so that matches can be reduced on the coordinating node
7375
*/
7476
public class QueryPhase implements SearchPhase {
77+
private static final Logger LOGGER = LogManager.getLogger(QueryPhase.class);
7578

7679
private final AggregationPhase aggregationPhase;
7780
private final SuggestPhase suggestPhase;
@@ -98,6 +101,11 @@ public void execute(SearchContext searchContext) throws QueryPhaseExecutionExcep
98101
new DocValueFormat[0]);
99102
return;
100103
}
104+
105+
if (LOGGER.isTraceEnabled()) {
106+
LOGGER.trace("{}", new SearchContextSourcePrinter(searchContext));
107+
}
108+
101109
// Pre-process aggregations as late as possible. In the case of a DFS_Q_T_F
102110
// request, preProcess is called on the DFS phase phase, this is why we pre-process them
103111
// here to make sure it happens during the QUERY phase

test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java

+4-1
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@
5555
import org.elasticsearch.index.mapper.ObjectMapper.Nested;
5656
import org.elasticsearch.index.query.QueryShardContext;
5757
import org.elasticsearch.index.query.support.NestedScope;
58+
import org.elasticsearch.index.shard.IndexShard;
5859
import org.elasticsearch.index.shard.ShardId;
5960
import org.elasticsearch.indices.breaker.CircuitBreakerService;
6061
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
@@ -127,6 +128,9 @@ protected AggregatorFactory<?> createAggregatorFactory(Query query,
127128
MappedFieldType... fieldTypes) throws IOException {
128129
SearchContext searchContext = createSearchContext(indexSearcher, indexSettings);
129130
CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService();
131+
IndexShard indexShard = mock(IndexShard.class);
132+
when(indexShard.shardId()).thenReturn(new ShardId("test", "test", 0));
133+
when(searchContext.indexShard()).thenReturn(indexShard);
130134
when(searchContext.aggregations())
131135
.thenReturn(new SearchContextAggregations(AggregatorFactories.EMPTY, bucketConsumer));
132136
when(searchContext.bigArrays()).thenReturn(
@@ -151,7 +155,6 @@ protected AggregatorFactory<?> createAggregatorFactory(Query query,
151155
QueryShardContext queryShardContext = queryShardContextMock(mapperService);
152156
when(queryShardContext.getIndexSettings()).thenReturn(indexSettings);
153157
when(searchContext.getQueryShardContext()).thenReturn(queryShardContext);
154-
155158
Map<String, MappedFieldType> fieldNameToType = new HashMap<>();
156159
fieldNameToType.putAll(Arrays.stream(fieldTypes)
157160
.collect(Collectors.toMap(MappedFieldType::name, Function.identity())));

0 commit comments

Comments
 (0)