Skip to content

Commit 0d9b788

Browse files
authored
LLClient: Support host selection (#30523)
Allows users of the Low Level REST client to specify which hosts a request should be run on. They implement the `NodeSelector` interface or reuse a built in selector like `NOT_MASTER_ONLY` to chose which nodes are valid. Using it looks like: ``` Request request = new Request("POST", "/foo/_search"); RequestOptions options = request.getOptions().toBuilder(); options.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); request.setOptions(options); ... ``` This introduces a new `Node` object which contains a `HttpHost` and the metadata about the host. At this point that metadata is just `version` and `roles` but I plan to add node attributes in a followup. The canonical way to **get** this metadata is to use the `Sniffer` to pull the information from the Elasticsearch cluster. I've marked this as "breaking-java" because it breaks custom implementations of `HostsSniffer` by renaming the interface to `NodesSniffer` and by changing it from returning a `List<HttpHost>` to a `List<Node>`. It *shouldn't* break anyone else though. Because we expect to find it useful, this also implements `host_selector` support to `do` statements in the yaml tests. Using it looks a little like: ``` --- "example test": - skip: features: host_selector - do: host_selector: version: " - 7.0.0" # same syntax as skip apiname: something: true ``` The `do` section parses the `version` string into a host selector that uses the same version comparison logic as the `skip` section. When the `do` section is executed it passed the off to the `RestClient`, using the `ElasticsearchHostsSniffer` to sniff the required metadata. The idea is to use this in mixed version tests to target a specific version of Elasticsearch so we can be sure about the deprecation logging though we don't currently have any examples that need it. We do, however, have at least one open pull request that requires something like this to properly test it. Closes #21888
1 parent 563141c commit 0d9b788

File tree

57 files changed

+2431
-464
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+2431
-464
lines changed

client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
final class DeadHostState implements Comparable<DeadHostState> {
3030

3131
private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1);
32-
private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30);
32+
static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30);
3333

3434
private final int failedAttempts;
3535
private final long deadUntilNanos;
@@ -55,12 +55,12 @@ final class DeadHostState implements Comparable<DeadHostState> {
5555
*
5656
* @param previousDeadHostState the previous state of the host which allows us to increase the wait till the next retry attempt
5757
*/
58-
DeadHostState(DeadHostState previousDeadHostState, TimeSupplier timeSupplier) {
58+
DeadHostState(DeadHostState previousDeadHostState) {
5959
long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1),
6060
MAX_CONNECTION_TIMEOUT_NANOS);
61-
this.deadUntilNanos = timeSupplier.nanoTime() + timeoutNanos;
61+
this.deadUntilNanos = previousDeadHostState.timeSupplier.nanoTime() + timeoutNanos;
6262
this.failedAttempts = previousDeadHostState.failedAttempts + 1;
63-
this.timeSupplier = timeSupplier;
63+
this.timeSupplier = previousDeadHostState.timeSupplier;
6464
}
6565

6666
/**
@@ -86,6 +86,10 @@ int getFailedAttempts() {
8686

8787
@Override
8888
public int compareTo(DeadHostState other) {
89+
if (timeSupplier != other.timeSupplier) {
90+
throw new IllegalArgumentException("can't compare DeadHostStates with different clocks ["
91+
+ timeSupplier + " != " + other.timeSupplier + "]");
92+
}
8993
return Long.compare(deadUntilNanos, other.deadUntilNanos);
9094
}
9195

@@ -94,19 +98,24 @@ public String toString() {
9498
return "DeadHostState{" +
9599
"failedAttempts=" + failedAttempts +
96100
", deadUntilNanos=" + deadUntilNanos +
101+
", timeSupplier=" + timeSupplier +
97102
'}';
98103
}
99104

100105
/**
101106
* Time supplier that makes timing aspects pluggable to ease testing
102107
*/
103108
interface TimeSupplier {
104-
105109
TimeSupplier DEFAULT = new TimeSupplier() {
106110
@Override
107111
public long nanoTime() {
108112
return System.nanoTime();
109113
}
114+
115+
@Override
116+
public String toString() {
117+
return "nanoTime";
118+
}
110119
};
111120

112121
long nanoTime();
Lines changed: 213 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,213 @@
1+
/*
2+
* Licensed to Elasticsearch under one or more contributor
3+
* license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright
5+
* ownership. Elasticsearch licenses this file to you under
6+
* the Apache License, Version 2.0 (the "License"); you may
7+
* not use this file except in compliance with the License.
8+
* You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing,
13+
* software distributed under the License is distributed on an
14+
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15+
* KIND, either express or implied. See the License for the
16+
* specific language governing permissions and limitations
17+
* under the License.
18+
*/
19+
20+
package org.elasticsearch.client;
21+
22+
import java.util.Objects;
23+
import java.util.Set;
24+
25+
import org.apache.http.HttpHost;
26+
27+
/**
28+
* Metadata about an {@link HttpHost} running Elasticsearch.
29+
*/
30+
public class Node {
31+
/**
32+
* Address that this host claims is its primary contact point.
33+
*/
34+
private final HttpHost host;
35+
/**
36+
* Addresses on which the host is listening. These are useful to have
37+
* around because they allow you to find a host based on any address it
38+
* is listening on.
39+
*/
40+
private final Set<HttpHost> boundHosts;
41+
/**
42+
* Name of the node as configured by the {@code node.name} attribute.
43+
*/
44+
private final String name;
45+
/**
46+
* Version of Elasticsearch that the node is running or {@code null}
47+
* if we don't know the version.
48+
*/
49+
private final String version;
50+
/**
51+
* Roles that the Elasticsearch process on the host has or {@code null}
52+
* if we don't know what roles the node has.
53+
*/
54+
private final Roles roles;
55+
56+
/**
57+
* Create a {@linkplain Node} with metadata. All parameters except
58+
* {@code host} are nullable and implementations of {@link NodeSelector}
59+
* need to decide what to do in their absence.
60+
*/
61+
public Node(HttpHost host, Set<HttpHost> boundHosts, String name, String version, Roles roles) {
62+
if (host == null) {
63+
throw new IllegalArgumentException("host cannot be null");
64+
}
65+
this.host = host;
66+
this.boundHosts = boundHosts;
67+
this.name = name;
68+
this.version = version;
69+
this.roles = roles;
70+
}
71+
72+
/**
73+
* Create a {@linkplain Node} without any metadata.
74+
*/
75+
public Node(HttpHost host) {
76+
this(host, null, null, null, null);
77+
}
78+
79+
/**
80+
* Contact information for the host.
81+
*/
82+
public HttpHost getHost() {
83+
return host;
84+
}
85+
86+
/**
87+
* Addresses on which the host is listening. These are useful to have
88+
* around because they allow you to find a host based on any address it
89+
* is listening on.
90+
*/
91+
public Set<HttpHost> getBoundHosts() {
92+
return boundHosts;
93+
}
94+
95+
/**
96+
* The {@code node.name} of the node.
97+
*/
98+
public String getName() {
99+
return name;
100+
}
101+
102+
/**
103+
* Version of Elasticsearch that the node is running or {@code null}
104+
* if we don't know the version.
105+
*/
106+
public String getVersion() {
107+
return version;
108+
}
109+
110+
/**
111+
* Roles that the Elasticsearch process on the host has or {@code null}
112+
* if we don't know what roles the node has.
113+
*/
114+
public Roles getRoles() {
115+
return roles;
116+
}
117+
118+
@Override
119+
public String toString() {
120+
StringBuilder b = new StringBuilder();
121+
b.append("[host=").append(host);
122+
if (boundHosts != null) {
123+
b.append(", bound=").append(boundHosts);
124+
}
125+
if (name != null) {
126+
b.append(", name=").append(name);
127+
}
128+
if (version != null) {
129+
b.append(", version=").append(version);
130+
}
131+
if (roles != null) {
132+
b.append(", roles=").append(roles);
133+
}
134+
return b.append(']').toString();
135+
}
136+
137+
@Override
138+
public boolean equals(Object obj) {
139+
if (obj == null || obj.getClass() != getClass()) {
140+
return false;
141+
}
142+
Node other = (Node) obj;
143+
return host.equals(other.host)
144+
&& Objects.equals(boundHosts, other.boundHosts)
145+
&& Objects.equals(name, other.name)
146+
&& Objects.equals(version, other.version)
147+
&& Objects.equals(roles, other.roles);
148+
}
149+
150+
@Override
151+
public int hashCode() {
152+
return Objects.hash(host, boundHosts, name, version, roles);
153+
}
154+
155+
/**
156+
* Role information about an Elasticsearch process.
157+
*/
158+
public static final class Roles {
159+
private final boolean masterEligible;
160+
private final boolean data;
161+
private final boolean ingest;
162+
163+
public Roles(boolean masterEligible, boolean data, boolean ingest) {
164+
this.masterEligible = masterEligible;
165+
this.data = data;
166+
this.ingest = ingest;
167+
}
168+
169+
/**
170+
* Teturns whether or not the node <strong>could</strong> be elected master.
171+
*/
172+
public boolean isMasterEligible() {
173+
return masterEligible;
174+
}
175+
/**
176+
* Teturns whether or not the node stores data.
177+
*/
178+
public boolean isData() {
179+
return data;
180+
}
181+
/**
182+
* Teturns whether or not the node runs ingest pipelines.
183+
*/
184+
public boolean isIngest() {
185+
return ingest;
186+
}
187+
188+
@Override
189+
public String toString() {
190+
StringBuilder result = new StringBuilder(3);
191+
if (masterEligible) result.append('m');
192+
if (data) result.append('d');
193+
if (ingest) result.append('i');
194+
return result.toString();
195+
}
196+
197+
@Override
198+
public boolean equals(Object obj) {
199+
if (obj == null || obj.getClass() != getClass()) {
200+
return false;
201+
}
202+
Roles other = (Roles) obj;
203+
return masterEligible == other.masterEligible
204+
&& data == other.data
205+
&& ingest == other.ingest;
206+
}
207+
208+
@Override
209+
public int hashCode() {
210+
return Objects.hash(masterEligible, data, ingest);
211+
}
212+
}
213+
}
Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
/*
2+
* Licensed to Elasticsearch under one or more contributor
3+
* license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright
5+
* ownership. Elasticsearch licenses this file to you under
6+
* the Apache License, Version 2.0 (the "License"); you may
7+
* not use this file except in compliance with the License.
8+
* You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing,
13+
* software distributed under the License is distributed on an
14+
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15+
* KIND, either express or implied. See the License for the
16+
* specific language governing permissions and limitations
17+
* under the License.
18+
*/
19+
20+
package org.elasticsearch.client;
21+
22+
import java.util.Iterator;
23+
24+
/**
25+
* Selects nodes that can receive requests. Used to keep requests away
26+
* from master nodes or to send them to nodes with a particular attribute.
27+
* Use with {@link RequestOptions.Builder#setNodeSelector(NodeSelector)}.
28+
*/
29+
public interface NodeSelector {
30+
/**
31+
* Select the {@link Node}s to which to send requests. This is called with
32+
* a mutable {@link Iterable} of {@linkplain Node}s in the order that the
33+
* rest client would prefer to use them and implementers should remove
34+
* nodes from the that should not receive the request. Implementers may
35+
* iterate the nodes as many times as they need.
36+
* <p>
37+
* This may be called twice per request: first for "living" nodes that
38+
* have not been blacklisted by previous errors. If the selector removes
39+
* all nodes from the list or if there aren't any living nodes then the
40+
* {@link RestClient} will call this method with a list of "dead" nodes.
41+
* <p>
42+
* Implementers should not rely on the ordering of the nodes.
43+
*/
44+
void select(Iterable<Node> nodes);
45+
/*
46+
* We were fairly careful with our choice of Iterable here. The caller has
47+
* a List but reordering the list is likely to break round robin. Luckily
48+
* Iterable doesn't allow any reordering.
49+
*/
50+
51+
/**
52+
* Selector that matches any node.
53+
*/
54+
NodeSelector ANY = new NodeSelector() {
55+
@Override
56+
public void select(Iterable<Node> nodes) {
57+
// Intentionally does nothing
58+
}
59+
60+
@Override
61+
public String toString() {
62+
return "ANY";
63+
}
64+
};
65+
66+
/**
67+
* Selector that matches any node that has metadata and doesn't
68+
* have the {@code master} role OR it has the data {@code data}
69+
* role.
70+
*/
71+
NodeSelector NOT_MASTER_ONLY = new NodeSelector() {
72+
@Override
73+
public void select(Iterable<Node> nodes) {
74+
for (Iterator<Node> itr = nodes.iterator(); itr.hasNext();) {
75+
Node node = itr.next();
76+
if (node.getRoles() == null) continue;
77+
if (node.getRoles().isMasterEligible()
78+
&& false == node.getRoles().isData()
79+
&& false == node.getRoles().isIngest()) {
80+
itr.remove();
81+
}
82+
}
83+
}
84+
85+
@Override
86+
public String toString() {
87+
return "NOT_MASTER_ONLY";
88+
}
89+
};
90+
}

client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,14 +87,14 @@ static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpR
8787
/**
8888
* Logs a request that failed
8989
*/
90-
static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, Exception e) {
90+
static void logFailedRequest(Log logger, HttpUriRequest request, Node node, Exception e) {
9191
if (logger.isDebugEnabled()) {
92-
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e);
92+
logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request.getRequestLine()) + "] failed", e);
9393
}
9494
if (tracer.isTraceEnabled()) {
9595
String traceRequest;
9696
try {
97-
traceRequest = buildTraceRequest(request, host);
97+
traceRequest = buildTraceRequest(request, node.getHost());
9898
} catch (IOException e1) {
9999
tracer.trace("error while reading request for trace purposes", e);
100100
traceRequest = "";

0 commit comments

Comments
 (0)