Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,12 @@
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
import org.elasticsearch.test.ESIntegTestCase;
import org.junit.After;
import org.junit.Before;

import java.io.IOException;
import java.util.ArrayList;
Expand Down Expand Up @@ -53,18 +50,6 @@ public static String randomExecutionHint() {

private static int numRoutingValues;

@Before
public void disableBatchedExecution() {
// TODO: it's practically impossible to get a 100% deterministic test with batched execution unfortunately, adjust this test to
// still do something useful with batched execution (i.e. use somewhat relaxed assertions)
updateClusterSettings(Settings.builder().put(SearchService.BATCHED_QUERY_PHASE.getKey(), false));
}

@After
public void resetSettings() {
updateClusterSettings(Settings.builder().putNull(SearchService.BATCHED_QUERY_PHASE.getKey()));
}

@Override
public void setupSuiteScopeCluster() throws Exception {
assertAcked(indicesAdmin().prepareCreate("idx").setMapping(STRING_FIELD_NAME, "type=keyword").get());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,7 @@ public SearchPhaseController.ReducedQueryPhase reduce() throws Exception {
batchedResults = this.batchedResults;
}
final int resultSize = buffer.size() + (mergeResult == null ? 0 : 1) + batchedResults.size();
final boolean hasBatchedResults = batchedResults.isEmpty() == false;
final List<TopDocs> topDocsList = hasTopDocs ? new ArrayList<>(resultSize) : null;
final Deque<DelayableWriteable<InternalAggregations>> aggsList = hasAggs ? new ArrayDeque<>(resultSize) : null;
// consume partial merge result from the un-batched execution path that is used for BwC, shard-level retries, and shard level
Expand Down Expand Up @@ -247,6 +248,10 @@ public SearchPhaseController.ReducedQueryPhase reduce() throws Exception {
if (aggsList != null) {
// Add an estimate of the final reduce size
breakerSize = addEstimateAndMaybeBreak(estimateRamBytesUsedForReduce(breakerSize));
AggregationReduceContext aggReduceContext = performFinalReduce
? aggReduceContextBuilder.forFinalReduction()
: aggReduceContextBuilder.forPartialReduction();
aggReduceContext.setFinalReduceHasBatchedResult(hasBatchedResults);
aggs = aggregate(buffer.iterator(), new Iterator<>() {
@Override
public boolean hasNext() {
Expand All @@ -257,10 +262,7 @@ public boolean hasNext() {
public DelayableWriteable<InternalAggregations> next() {
return aggsList.pollFirst();
}
},
resultSize,
performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction()
);
}, resultSize, aggReduceContext);
} else {
aggs = null;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ public interface Builder {
@Nullable
private final AggregationBuilder builder;
private final AggregatorFactories.Builder subBuilders;
private boolean finalReduceHasBatchedResult;

private AggregationReduceContext(
BigArrays bigArrays,
Expand Down Expand Up @@ -136,6 +137,14 @@ public final AggregationReduceContext forAgg(String name) {

protected abstract AggregationReduceContext forSubAgg(AggregationBuilder sub);

public boolean doesFinalReduceHaveBatchedResult() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: that feels weird since it depends on isFinalReduce? Maybe rename it into hasBatchedResult so that callers have to check isFinalReduce and hasBatchedResult?

return finalReduceHasBatchedResult;
}

public void setFinalReduceHasBatchedResult(boolean finalReduceHasBatchedResult) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be final and set in the ForFinal ctr?

this.finalReduceHasBatchedResult = finalReduceHasBatchedResult;
}

/**
* A {@linkplain AggregationReduceContext} to perform a partial reduction.
*/
Expand Down Expand Up @@ -234,7 +243,9 @@ public PipelineTree pipelineTreeRoot() {

@Override
protected AggregationReduceContext forSubAgg(AggregationBuilder sub) {
return new ForFinal(bigArrays(), scriptService(), isCanceled(), sub, multiBucketConsumer, pipelineTreeRoot);
ForFinal subContext = new ForFinal(bigArrays(), scriptService(), isCanceled(), sub, multiBucketConsumer, pipelineTreeRoot);
subContext.setFinalReduceHasBatchedResult(doesFinalReduceHaveBatchedResult());
return subContext;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,10 @@ public InternalAggregation get() {
}
long docCountError = -1;
if (sumDocCountError != -1) {
docCountError = size == 1 ? 0 : sumDocCountError;
// If we are reducing only one aggregation (size == 1), the doc count error should be 0.
// However, the presence of a batched query result implies this is a final reduction and a partial reduction with size > 1
// has already occurred on a data node. The doc count error should not be 0 in this case.
docCountError = size == 1 && reduceContext.doesFinalReduceHaveBatchedResult() == false ? 0 : sumDocCountError;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does that also handle the case where the partial reduction happens on the coord node (when reaching reduce batch size)?

}
return create(name, result, reduceContext.isFinalReduce() ? getOrder() : thisReduceOrder, docCountError, otherDocCount);
}
Expand Down