Skip to content

Commit 8d87ab7

Browse files
authored
fix and enable repository-hdfs secure tests (elastic#44044)
Due to recent changes are done for converting `repository-hdfs` to test clusters (elastic#41252), the `integTestSecure*` tasks did not depend on `secureHdfsFixture` which when running would fail as the fixture would not be available. This commit adds the dependency of the fixture to the task. The `secureHdfsFixture` is a `AntFixture` which is spawned a process. Internally it waits for 30 seconds for the resources to be made available. For my local machine, it took almost 45 seconds to be available so I have added the wait time as an input to the `AntFixture` defaults to 30 seconds and set it to 60 seconds in case of secure hdfs fixture. The integ test for secure hdfs was disabled for a long time and so the changes done in elastic#42090 to fix the tests are also done in this commit.
1 parent af512f9 commit 8d87ab7

File tree

6 files changed

+64
-25
lines changed

6 files changed

+64
-25
lines changed

buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy

+4-1
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,9 @@ public class AntFixture extends AntTask implements Fixture {
5858
@Input
5959
boolean useShell = false
6060

61+
@Input
62+
int maxWaitInSeconds = 30
63+
6164
/**
6265
* A flag to indicate whether the fixture should be run in the foreground, or spawned.
6366
* It is protected so subclasses can override (eg RunTask).
@@ -128,7 +131,7 @@ public class AntFixture extends AntTask implements Fixture {
128131

129132
String failedProp = "failed${name}"
130133
// first wait for resources, or the failure marker from the wrapper script
131-
ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) {
134+
ant.waitfor(maxwait: maxWaitInSeconds, maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) {
132135
or {
133136
resourceexists {
134137
file(file: failureMarker.toString())

plugins/repository-hdfs/build.gradle

+28-3
Original file line numberDiff line numberDiff line change
@@ -91,13 +91,13 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture',
9191
dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture
9292
executable = new File(project.runtimeJavaHome, 'bin/java')
9393
env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }"
94+
maxWaitInSeconds 60
9495
onlyIf { project(':test:fixtures:krb5kdc-fixture').buildFixture.enabled }
9596
waitCondition = { fixture, ant ->
9697
// the hdfs.MiniHDFS fixture writes the ports file when
9798
// it's ready, so we can just wait for the file to exist
9899
return fixture.portsFile.exists()
99100
}
100-
101101
final List<String> miniHDFSArgs = []
102102

103103
// If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options
@@ -125,7 +125,7 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture',
125125
}
126126
}
127127

128-
Set disabledIntegTestTaskNames = ['integTestSecure', 'integTestSecureHa']
128+
Set disabledIntegTestTaskNames = []
129129

130130
for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) {
131131
task "${integTestTaskName}"(type: RestIntegTestTask) {
@@ -136,10 +136,35 @@ for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSec
136136
enabled = false;
137137
}
138138

139+
if (integTestTaskName.contains("Secure")) {
140+
if (integTestTaskName.contains("Ha")) {
141+
dependsOn secureHaHdfsFixture
142+
} else {
143+
dependsOn secureHdfsFixture
144+
}
145+
}
146+
139147
runner {
148+
if (integTestTaskName.contains("Ha")) {
149+
if (integTestTaskName.contains("Secure")) {
150+
Path path = buildDir.toPath()
151+
.resolve("fixtures")
152+
.resolve("secureHaHdfsFixture")
153+
.resolve("ports")
154+
nonInputProperties.systemProperty "test.hdfs-fixture.ports", path
155+
classpath += files(path)
156+
} else {
157+
Path path = buildDir.toPath()
158+
.resolve("fixtures")
159+
.resolve("haHdfsFixture")
160+
.resolve("ports")
161+
nonInputProperties.systemProperty "test.hdfs-fixture.ports", path
162+
classpath += files(path)
163+
}
164+
}
165+
140166
if (integTestTaskName.contains("Secure")) {
141167
if (disabledIntegTestTaskNames.contains(integTestTaskName) == false) {
142-
dependsOn secureHdfsFixture
143168
nonInputProperties.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}"
144169
nonInputProperties.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}"
145170
jvmArgs "-Djava.security.krb5.conf=${krb5conf}"

plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java

+25-14
Original file line numberDiff line numberDiff line change
@@ -19,16 +19,6 @@
1919

2020
package org.elasticsearch.repositories.hdfs;
2121

22-
import java.io.IOException;
23-
import java.net.InetSocketAddress;
24-
import java.nio.file.Files;
25-
import java.nio.file.Path;
26-
import java.security.AccessController;
27-
import java.security.PrivilegedActionException;
28-
import java.security.PrivilegedExceptionAction;
29-
import java.util.ArrayList;
30-
import java.util.List;
31-
3222
import org.apache.hadoop.conf.Configuration;
3323
import org.apache.hadoop.ha.BadFencingConfigurationException;
3424
import org.apache.hadoop.ha.HAServiceProtocol;
@@ -46,6 +36,16 @@
4636
import org.elasticsearch.test.rest.ESRestTestCase;
4737
import org.junit.Assert;
4838

39+
import java.io.IOException;
40+
import java.net.InetSocketAddress;
41+
import java.nio.file.Files;
42+
import java.nio.file.Path;
43+
import java.security.AccessController;
44+
import java.security.PrivilegedActionException;
45+
import java.security.PrivilegedExceptionAction;
46+
import java.util.ArrayList;
47+
import java.util.List;
48+
4949
/**
5050
* Integration test that runs against an HA-Enabled HDFS instance
5151
*/
@@ -57,13 +57,24 @@ public void testHAFailoverWithRepository() throws Exception {
5757
String esKerberosPrincipal = System.getProperty("test.krb5.principal.es");
5858
String hdfsKerberosPrincipal = System.getProperty("test.krb5.principal.hdfs");
5959
String kerberosKeytabLocation = System.getProperty("test.krb5.keytab.hdfs");
60+
String ports = System.getProperty("test.hdfs-fixture.ports");
61+
String nn1Port = "10001";
62+
String nn2Port = "10002";
63+
if (ports.length() > 0) {
64+
final Path path = PathUtils.get(ports);
65+
final List<String> lines = AccessController.doPrivileged((PrivilegedExceptionAction<List<String>>) () -> {
66+
return Files.readAllLines(path);
67+
});
68+
nn1Port = lines.get(0);
69+
nn2Port = lines.get(1);
70+
}
6071
boolean securityEnabled = hdfsKerberosPrincipal != null;
6172

6273
Configuration hdfsConfiguration = new Configuration();
6374
hdfsConfiguration.set("dfs.nameservices", "ha-hdfs");
6475
hdfsConfiguration.set("dfs.ha.namenodes.ha-hdfs", "nn1,nn2");
65-
hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:10001");
66-
hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:10002");
76+
hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:" + nn1Port);
77+
hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:" + nn2Port);
6778
hdfsConfiguration.set(
6879
"dfs.client.failover.proxy.provider.ha-hdfs",
6980
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
@@ -110,8 +121,8 @@ public void testHAFailoverWithRepository() throws Exception {
110121
securityCredentials(securityEnabled, esKerberosPrincipal) +
111122
"\"conf.dfs.nameservices\": \"ha-hdfs\"," +
112123
"\"conf.dfs.ha.namenodes.ha-hdfs\": \"nn1,nn2\"," +
113-
"\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:10001\"," +
114-
"\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:10002\"," +
124+
"\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:"+nn1Port+"\"," +
125+
"\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:"+nn2Port+"\"," +
115126
"\"conf.dfs.client.failover.proxy.provider.ha-hdfs\": " +
116127
"\"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\"" +
117128
"}" +

plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yml

+4-4
Original file line numberDiff line numberDiff line change
@@ -48,17 +48,17 @@
4848
repository: test_snapshot_get_repository
4949
snapshot: test_snapshot_get
5050

51-
- length: { snapshots: 1 }
52-
- match: { snapshots.0.snapshot : test_snapshot_get }
51+
- length: { responses.0.snapshots: 1 }
52+
- match: { responses.0.snapshots.0.snapshot : test_snapshot_get }
5353

5454
# List snapshot info
5555
- do:
5656
snapshot.get:
5757
repository: test_snapshot_get_repository
5858
snapshot: "*"
5959

60-
- length: { snapshots: 1 }
61-
- match: { snapshots.0.snapshot : test_snapshot_get }
60+
- length: { responses.0.snapshots: 1 }
61+
- match: { responses.0.snapshots.0.snapshot : test_snapshot_get }
6262

6363
# Remove our snapshot
6464
- do:

plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
repository: test_snapshot_repository_ro
2424
snapshot: "_all"
2525

26-
- length: { snapshots: 1 }
26+
- length: { responses.0.snapshots: 1 }
2727

2828
# Remove our repository
2929
- do:

test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,8 @@ public static void main(String[] args) throws Exception {
109109
String haNameService = System.getProperty("ha-nameservice");
110110
boolean haEnabled = haNameService != null;
111111
if (haEnabled) {
112-
MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001);
113-
MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002);
112+
MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(0);
113+
MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(0);
114114
MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(haNameService).addNN(nn1).addNN(nn2);
115115
MiniDFSNNTopology namenodeTopology = new MiniDFSNNTopology().addNameservice(nameservice);
116116
builder.nnTopology(namenodeTopology);

0 commit comments

Comments
 (0)