|
56 | 56 |
|
57 | 57 | import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
|
58 | 58 | import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentSet;
|
| 59 | +import static org.hamcrest.Matchers.greaterThanOrEqualTo; |
59 | 60 |
|
60 | 61 | public class SearchAsyncActionTests extends ESTestCase {
|
61 | 62 |
|
@@ -371,6 +372,102 @@ protected void executeNext(Runnable runnable, Thread originalThread) {
|
371 | 372 | executor.shutdown();
|
372 | 373 | }
|
373 | 374 |
|
| 375 | + public void testAllowPartialResults() throws InterruptedException { |
| 376 | + SearchRequest request = new SearchRequest(); |
| 377 | + request.allowPartialSearchResults(false); |
| 378 | + int numConcurrent = randomIntBetween(1, 5); |
| 379 | + request.setMaxConcurrentShardRequests(numConcurrent); |
| 380 | + int numShards = randomIntBetween(5, 10); |
| 381 | + AtomicBoolean searchPhaseDidRun = new AtomicBoolean(false); |
| 382 | + ActionListener<SearchResponse> responseListener = ActionListener.wrap(response -> {}, |
| 383 | + (e) -> { throw new AssertionError("unexpected", e);} ); |
| 384 | + DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); |
| 385 | + // for the sake of this test we place the replica on the same node. ie. this is not a mistake since we limit per node now |
| 386 | + DiscoveryNode replicaNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); |
| 387 | + |
| 388 | + AtomicInteger contextIdGenerator = new AtomicInteger(0); |
| 389 | + GroupShardsIterator<SearchShardIterator> shardsIter = getShardsIter("idx", |
| 390 | + new OriginalIndices(new String[]{"idx"}, SearchRequest.DEFAULT_INDICES_OPTIONS), |
| 391 | + numShards, true, primaryNode, replicaNode); |
| 392 | + int numShardAttempts = 0; |
| 393 | + for (SearchShardIterator it : shardsIter) { |
| 394 | + numShardAttempts += it.remaining(); |
| 395 | + } |
| 396 | + CountDownLatch latch = new CountDownLatch(numShardAttempts); |
| 397 | + |
| 398 | + SearchTransportService transportService = new SearchTransportService(null, null); |
| 399 | + Map<String, Transport.Connection> lookup = new HashMap<>(); |
| 400 | + Map<ShardId, Boolean> seenShard = new ConcurrentHashMap<>(); |
| 401 | + lookup.put(primaryNode.getId(), new MockConnection(primaryNode)); |
| 402 | + lookup.put(replicaNode.getId(), new MockConnection(replicaNode)); |
| 403 | + Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); |
| 404 | + AtomicInteger numRequests = new AtomicInteger(0); |
| 405 | + AtomicInteger numFailReplicas = new AtomicInteger(0); |
| 406 | + AbstractSearchAsyncAction<TestSearchPhaseResult> asyncAction = |
| 407 | + new AbstractSearchAsyncAction<>( |
| 408 | + "test", |
| 409 | + logger, |
| 410 | + transportService, |
| 411 | + (cluster, node) -> { |
| 412 | + assert cluster == null : "cluster was not null: " + cluster; |
| 413 | + return lookup.get(node); }, |
| 414 | + aliasFilters, |
| 415 | + Collections.emptyMap(), |
| 416 | + Collections.emptyMap(), |
| 417 | + null, |
| 418 | + request, |
| 419 | + responseListener, |
| 420 | + shardsIter, |
| 421 | + new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), |
| 422 | + 0, |
| 423 | + null, |
| 424 | + new InitialSearchPhase.ArraySearchPhaseResults<>(shardsIter.size()), |
| 425 | + request.getMaxConcurrentShardRequests(), |
| 426 | + SearchResponse.Clusters.EMPTY) { |
| 427 | + |
| 428 | + @Override |
| 429 | + protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting shard, |
| 430 | + SearchActionListener<TestSearchPhaseResult> listener) { |
| 431 | + seenShard.computeIfAbsent(shard.shardId(), (i) -> { |
| 432 | + numRequests.incrementAndGet(); // only count this once per shard copy |
| 433 | + return Boolean.TRUE; |
| 434 | + }); |
| 435 | + new Thread(() -> { |
| 436 | + Transport.Connection connection = getConnection(null, shard.currentNodeId()); |
| 437 | + TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult(contextIdGenerator.incrementAndGet(), |
| 438 | + connection.getNode()); |
| 439 | + if (shardIt.remaining() > 0) { |
| 440 | + numFailReplicas.incrementAndGet(); |
| 441 | + listener.onFailure(new RuntimeException()); |
| 442 | + } else { |
| 443 | + listener.onResponse(testSearchPhaseResult); |
| 444 | + } |
| 445 | + }).start(); |
| 446 | + } |
| 447 | + |
| 448 | + @Override |
| 449 | + protected SearchPhase getNextPhase(SearchPhaseResults<TestSearchPhaseResult> results, SearchPhaseContext context) { |
| 450 | + return new SearchPhase("test") { |
| 451 | + @Override |
| 452 | + public void run() { |
| 453 | + assertTrue(searchPhaseDidRun.compareAndSet(false, true)); |
| 454 | + } |
| 455 | + }; |
| 456 | + } |
| 457 | + |
| 458 | + @Override |
| 459 | + protected void executeNext(Runnable runnable, Thread originalThread) { |
| 460 | + super.executeNext(runnable, originalThread); |
| 461 | + latch.countDown(); |
| 462 | + } |
| 463 | + }; |
| 464 | + asyncAction.start(); |
| 465 | + latch.await(); |
| 466 | + assertTrue(searchPhaseDidRun.get()); |
| 467 | + assertEquals(numShards, numRequests.get()); |
| 468 | + assertThat(numFailReplicas.get(), greaterThanOrEqualTo(1)); |
| 469 | + } |
| 470 | + |
374 | 471 | static GroupShardsIterator<SearchShardIterator> getShardsIter(String index, OriginalIndices originalIndices, int numShards,
|
375 | 472 | boolean doReplicas, DiscoveryNode primaryNode, DiscoveryNode replicaNode) {
|
376 | 473 | ArrayList<SearchShardIterator> list = new ArrayList<>();
|
|
0 commit comments