Skip to content

Commit b439b1d

Browse files
committed
Merge remote-tracking branch 'origin/7.0.x'
* origin/7.0.x: Remove ZooKeeperClient/KafkaZkClient usage from tests (#893)
2 parents 277963b + f0595b9 commit b439b1d

File tree

6 files changed

+10
-69
lines changed

6 files changed

+10
-69
lines changed

kafka-rest/src/test/java/io/confluent/kafkarest/integration/AuthorizationErrorTest.java

+1-3
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,6 @@
4444
import org.junit.Before;
4545
import org.junit.Test;
4646
import scala.Option;
47-
import scala.collection.JavaConverters;
4847

4948
public class AuthorizationErrorTest
5049
extends AbstractProducerTest<BinaryTopicProduceRequest, BinaryPartitionProduceRequest> {
@@ -71,8 +70,7 @@ public class AuthorizationErrorTest
7170
@Before
7271
public void setUp() throws Exception {
7372
super.setUp();
74-
kafka.utils.TestUtils.createTopic(
75-
zkClient, TOPIC_NAME, 1, 1, JavaConverters.asScalaBuffer(this.servers), new Properties());
73+
createTopic(TOPIC_NAME, 1, (short) 1);
7674
}
7775

7876
@Override

kafka-rest/src/test/java/io/confluent/kafkarest/integration/AvroProducerTest.java

+2-9
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@
3838
import org.apache.avro.Schema;
3939
import org.junit.Before;
4040
import org.junit.Test;
41-
import scala.collection.JavaConverters;
4241

4342
// This test is much lighter than the Binary one which exercises all variants. Since binary
4443
// covers most code paths well, this just tries to exercise Schema-specific parts.
@@ -112,14 +111,8 @@ public AvroProducerTest() {
112111
public void setUp() throws Exception {
113112
super.setUp();
114113
final int numPartitions = 3;
115-
final int replicationFactor = 1;
116-
kafka.utils.TestUtils.createTopic(
117-
zkClient,
118-
topicName,
119-
numPartitions,
120-
replicationFactor,
121-
JavaConverters.asScalaBuffer(this.servers),
122-
new Properties());
114+
final short replicationFactor = 1;
115+
createTopic(topicName, numPartitions, replicationFactor);
123116

124117
deserializerProps = new Properties();
125118
deserializerProps.setProperty("schema.registry.url", schemaRegConnect);

kafka-rest/src/test/java/io/confluent/kafkarest/integration/ClusterTestHarness.java

-21
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,6 @@
5454
import kafka.utils.MockTime;
5555
import kafka.utils.TestUtils;
5656
import kafka.zk.EmbeddedZookeeper;
57-
import kafka.zk.KafkaZkClient;
58-
import kafka.zookeeper.ZooKeeperClient;
5957
import org.apache.kafka.clients.admin.AdminClient;
6058
import org.apache.kafka.clients.admin.AdminClientConfig;
6159
import org.apache.kafka.clients.admin.AlterConfigOp;
@@ -72,10 +70,8 @@
7270
import org.apache.kafka.common.Node;
7371
import org.apache.kafka.common.TopicPartition;
7472
import org.apache.kafka.common.config.ConfigResource;
75-
import org.apache.kafka.common.security.JaasUtils;
7673
import org.apache.kafka.common.security.auth.SecurityProtocol;
7774
import org.apache.kafka.common.serialization.ByteArraySerializer;
78-
import org.apache.kafka.common.utils.Time;
7975
import org.eclipse.jetty.server.Server;
8076
import org.junit.After;
8177
import org.junit.Assert;
@@ -122,7 +118,6 @@ public static int choosePort() {
122118
// ZK Config
123119
protected String zkConnect;
124120
protected EmbeddedZookeeper zookeeper;
125-
protected KafkaZkClient zkClient;
126121
protected int zkConnectionTimeout = 10000;
127122
protected int zkSessionTimeout = 6000;
128123

@@ -169,21 +164,6 @@ public Properties overrideSchemaRegistryProps(Properties props) {
169164
@Before
170165
public void setUp() throws Exception {
171166
zookeeper = new EmbeddedZookeeper();
172-
zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
173-
Time time = Time.SYSTEM;
174-
zkClient =
175-
new KafkaZkClient(
176-
new ZooKeeperClient(
177-
zkConnect,
178-
zkSessionTimeout,
179-
zkConnectionTimeout,
180-
Integer.MAX_VALUE,
181-
time,
182-
"testMetricGroup",
183-
"testMetricGroupType"),
184-
JaasUtils.isZkSaslEnabled(),
185-
time);
186-
187167
// start brokers concurrently
188168
startBrokersConcurrently(numBrokers);
189169

@@ -327,7 +307,6 @@ public void tearDown() throws Exception {
327307
CoreUtils.delete(server.config().logDirs());
328308
}
329309

330-
zkClient.close();
331310
zookeeper.shutdown();
332311
}
333312

kafka-rest/src/test/java/io/confluent/kafkarest/integration/JsonProducerTest.java

+2-10
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,8 @@
2626
import java.util.HashMap;
2727
import java.util.List;
2828
import java.util.Map;
29-
import java.util.Properties;
3029
import org.junit.Before;
3130
import org.junit.Test;
32-
import scala.collection.JavaConverters;
3331

3432
public class JsonProducerTest
3533
extends AbstractProducerTest<JsonTopicProduceRequest, JsonPartitionProduceRequest> {
@@ -41,14 +39,8 @@ public class JsonProducerTest
4139
public void setUp() throws Exception {
4240
super.setUp();
4341
final int numPartitions = 3;
44-
final int replicationFactor = 1;
45-
kafka.utils.TestUtils.createTopic(
46-
zkClient,
47-
topicName,
48-
numPartitions,
49-
replicationFactor,
50-
JavaConverters.asScalaBuffer(this.servers),
51-
new Properties());
42+
final short replicationFactor = 1;
43+
createTopic(topicName, numPartitions, replicationFactor);
5244
}
5345

5446
@Override

kafka-rest/src/test/java/io/confluent/kafkarest/integration/MetadataAPITest.java

+3-16
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
import javax.ws.rs.core.Response;
3737
import org.junit.Before;
3838
import org.junit.Test;
39-
import scala.collection.JavaConverters;
4039

4140
/**
4241
* Tests metadata access against a real cluster. This isn't exhaustive since the unit tests cover
@@ -80,7 +79,7 @@ public class MetadataAPITest extends ClusterTestHarness {
8079
topic2Configs.setProperty("cleanup.policy", "delete");
8180
}
8281

83-
private static final int numReplicas = 2;
82+
private static final short numReplicas = 2;
8483

8584
public MetadataAPITest() {
8685
super(2, false);
@@ -90,20 +89,8 @@ public MetadataAPITest() {
9089
@Override
9190
public void setUp() throws Exception {
9291
super.setUp();
93-
kafka.utils.TestUtils.createTopic(
94-
zkClient,
95-
topic1Name,
96-
topic1Partitions.size(),
97-
numReplicas,
98-
JavaConverters.asScalaBuffer(this.servers),
99-
new Properties());
100-
kafka.utils.TestUtils.createTopic(
101-
zkClient,
102-
topic2Name,
103-
topic2Partitions.size(),
104-
numReplicas,
105-
JavaConverters.asScalaBuffer(this.servers),
106-
topic2Configs);
92+
createTopic(topic1Name, topic1Partitions.size(), numReplicas);
93+
createTopic(topic2Name, topic2Partitions.size(), numReplicas);
10794
}
10895

10996
@Test

kafka-rest/src/test/java/io/confluent/kafkarest/integration/ProducerTest.java

+2-10
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,11 @@
2424
import io.confluent.kafkarest.entities.v2.PartitionOffset;
2525
import java.util.Arrays;
2626
import java.util.List;
27-
import java.util.Properties;
2827
import javax.ws.rs.client.Entity;
2928
import javax.ws.rs.core.Response;
3029
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
3130
import org.junit.Before;
3231
import org.junit.Test;
33-
import scala.collection.JavaConverters;
3432

3533
public class ProducerTest
3634
extends AbstractProducerTest<BinaryTopicProduceRequest, BinaryPartitionProduceRequest> {
@@ -108,14 +106,8 @@ public class ProducerTest
108106
public void setUp() throws Exception {
109107
super.setUp();
110108
final int numPartitions = 3;
111-
final int replicationFactor = 1;
112-
kafka.utils.TestUtils.createTopic(
113-
zkClient,
114-
topicName,
115-
numPartitions,
116-
replicationFactor,
117-
JavaConverters.asScalaBuffer(this.servers),
118-
new Properties());
109+
final short replicationFactor = 1;
110+
createTopic(topicName, numPartitions, replicationFactor);
119111
}
120112

121113
@Test

0 commit comments

Comments
 (0)