Skip to content

Commit ec3bc09

Browse files
authored
MINOR: remove some obsolete zk tests (#17913)
Reviewers: David Arthur <[email protected]>, Chia-Ping Tsai <[email protected]>
1 parent acd92be commit ec3bc09

File tree

6 files changed

+17
-448
lines changed

6 files changed

+17
-448
lines changed

core/src/test/scala/integration/kafka/admin/ListOffsetsIntegrationTest.scala

+4-72
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ import org.apache.kafka.clients.admin._
2525
import org.apache.kafka.clients.producer.ProducerRecord
2626
import org.apache.kafka.common.TopicPartition
2727
import org.apache.kafka.common.config.TopicConfig
28-
import org.apache.kafka.common.record.RecordBatch
2928
import org.apache.kafka.common.requests.ListOffsetsResponse
3029
import org.apache.kafka.common.utils.{MockTime, Time, Utils}
3130
import org.apache.kafka.server.config.ServerLogConfigs
@@ -45,7 +44,6 @@ class ListOffsetsIntegrationTest extends KafkaServerTestHarness {
4544
private val topicNameWithCustomConfigs = "foo2"
4645
private var adminClient: Admin = _
4746
private val mockTime: Time = new MockTime(1)
48-
private var version = RecordBatch.MAGIC_VALUE_V2
4947
private val dataFolder = Seq(tempDir().getAbsolutePath, tempDir().getAbsolutePath)
5048

5149
@BeforeEach
@@ -73,20 +71,6 @@ class ListOffsetsIntegrationTest extends KafkaServerTestHarness {
7371
assertEquals(ListOffsetsResponse.UNKNOWN_TIMESTAMP, maxTimestampOffset.timestamp())
7472
}
7573

76-
@ParameterizedTest
77-
@ValueSource(strings = Array("zk"))
78-
def testListVersion0(quorum: String): Unit = {
79-
// create records for version 0
80-
createMessageFormatBrokers(RecordBatch.MAGIC_VALUE_V0)
81-
produceMessagesInSeparateBatch()
82-
83-
// update version to version 1 to list offset for max timestamp
84-
createMessageFormatBrokers(RecordBatch.MAGIC_VALUE_V1)
85-
// the offset of max timestamp is always -1 if the batch version is 0
86-
verifyListOffsets(expectedMaxTimestampOffset = -1)
87-
}
88-
89-
9074
@ParameterizedTest
9175
@ValueSource(strings = Array("kraft"))
9276
def testThreeCompressedRecordsInOneBatch(quorum: String): Unit = {
@@ -129,38 +113,6 @@ class ListOffsetsIntegrationTest extends KafkaServerTestHarness {
129113
verifyListOffsets(topic = topicNameWithCustomConfigs, 2)
130114
}
131115

132-
// The message conversion test only run in ZK mode because KRaft mode doesn't support "inter.broker.protocol.version" < 3.0
133-
@ParameterizedTest
134-
@ValueSource(strings = Array("zk"))
135-
def testThreeRecordsInOneBatchWithMessageConversion(quorum: String): Unit = {
136-
createMessageFormatBrokers(RecordBatch.MAGIC_VALUE_V1)
137-
produceMessagesInOneBatch()
138-
verifyListOffsets()
139-
140-
// test LogAppendTime case
141-
setUpForLogAppendTimeCase()
142-
produceMessagesInOneBatch(topic = topicNameWithCustomConfigs)
143-
// In LogAppendTime's case, the maxTimestampOffset should be the first message of the batch.
144-
// So in this one batch test, it'll be the first offset 0
145-
verifyListOffsets(topic = topicNameWithCustomConfigs, 0)
146-
}
147-
148-
// The message conversion test only run in ZK mode because KRaft mode doesn't support "inter.broker.protocol.version" < 3.0
149-
@ParameterizedTest
150-
@ValueSource(strings = Array("zk"))
151-
def testThreeRecordsInSeparateBatchWithMessageConversion(quorum: String): Unit = {
152-
createMessageFormatBrokers(RecordBatch.MAGIC_VALUE_V1)
153-
produceMessagesInSeparateBatch()
154-
verifyListOffsets()
155-
156-
// test LogAppendTime case
157-
setUpForLogAppendTimeCase()
158-
produceMessagesInSeparateBatch(topic = topicNameWithCustomConfigs)
159-
// In LogAppendTime's case, the maxTimestampOffset is the message in the last batch since we advance the time
160-
// for each batch, So it'll be the last offset 2
161-
verifyListOffsets(topic = topicNameWithCustomConfigs, 2)
162-
}
163-
164116
@ParameterizedTest
165117
@ValueSource(strings = Array("kraft"))
166118
def testThreeRecordsInOneBatchHavingDifferentCompressionTypeWithServer(quorum: String): Unit = {
@@ -201,15 +153,6 @@ class ListOffsetsIntegrationTest extends KafkaServerTestHarness {
201153
createTopicWithConfig(topicNameWithCustomConfigs, props)
202154
}
203155

204-
private def createMessageFormatBrokers(recordVersion: Byte): Unit = {
205-
version = recordVersion
206-
recreateBrokers(reconfigure = true, startup = true)
207-
Utils.closeQuietly(adminClient, "ListOffsetsAdminClient")
208-
adminClient = Admin.create(Map[String, Object](
209-
AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstrapServers()
210-
).asJava)
211-
}
212-
213156
private def createTopicWithConfig(topic: String, props: Properties): Unit = {
214157
createTopic(topic, 1, 1.toShort, topicConfig = props)
215158
}
@@ -224,12 +167,9 @@ class ListOffsetsIntegrationTest extends KafkaServerTestHarness {
224167

225168
val maxTimestampOffset = runFetchOffsets(adminClient, OffsetSpec.maxTimestamp(), topic)
226169
assertEquals(expectedMaxTimestampOffset, maxTimestampOffset.offset())
227-
if (version >= RecordBatch.MAGIC_VALUE_V2)
228-
// the epoch is related to the returned offset.
229-
// Hence, it should be zero (the earliest leader epoch), regardless of new leader election
230-
assertEquals(Optional.of(0), maxTimestampOffset.leaderEpoch())
231-
else
232-
assertEquals(Optional.empty(), maxTimestampOffset.leaderEpoch())
170+
// the epoch is related to the returned offset.
171+
// Hence, it should be zero (the earliest leader epoch), regardless of new leader election
172+
assertEquals(Optional.of(0), maxTimestampOffset.leaderEpoch())
233173
}
234174

235175
// case 0: test the offsets from leader's append path
@@ -336,15 +276,7 @@ class ListOffsetsIntegrationTest extends KafkaServerTestHarness {
336276
}
337277

338278
def generateConfigs: Seq[KafkaConfig] = {
339-
TestUtils.createBrokerConfigs(2, zkConnectOrNull).zipWithIndex.map{ case (props, index) =>
340-
if (version == RecordBatch.MAGIC_VALUE_V0) {
341-
props.setProperty("log.message.format.version", "0.9.0")
342-
props.setProperty("inter.broker.protocol.version", "0.9.0")
343-
}
344-
if (version == RecordBatch.MAGIC_VALUE_V1) {
345-
props.setProperty("log.message.format.version", "0.10.0")
346-
props.setProperty("inter.broker.protocol.version", "0.10.0")
347-
}
279+
TestUtils.createBrokerConfigs(2, null).zipWithIndex.map{ case (props, index) =>
348280
// We use mock timer so the records can get removed if the test env is too busy to complete
349281
// tests before kafka-log-retention. Hence, we disable the retention to avoid failed tests
350282
props.setProperty(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG, "-1")

core/src/test/scala/unit/kafka/metrics/MetricsTest.scala

+1-24
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ class MetricsTest extends KafkaServerTestHarness with Logging {
5252
overridingProps.put(JmxReporter.EXCLUDE_CONFIG, s"$requiredKafkaServerPrefix=ClusterId")
5353

5454
def generateConfigs: Seq[KafkaConfig] =
55-
TestUtils.createBrokerConfigs(numNodes, zkConnectOrNull, enableControlledShutdown = false).
55+
TestUtils.createBrokerConfigs(numNodes, null, enableControlledShutdown = false).
5656
map(KafkaConfig.fromProps(_, overridingProps))
5757

5858
val nMessages = 2
@@ -214,29 +214,6 @@ class MetricsTest extends KafkaServerTestHarness with Logging {
214214
assertTrue(TestUtils.meterCount(bytesOut) > initialBytesOut)
215215
}
216216

217-
@ParameterizedTest
218-
@ValueSource(strings = Array("zk"))
219-
def testZkControllerMetrics(quorum: String): Unit = {
220-
val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics
221-
222-
assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=ActiveControllerCount"), 1)
223-
assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=OfflinePartitionsCount"), 1)
224-
assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=PreferredReplicaImbalanceCount"), 1)
225-
assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=GlobalTopicCount"), 1)
226-
assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=GlobalPartitionCount"), 1)
227-
assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=TopicsToDeleteCount"), 1)
228-
assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=ReplicasToDeleteCount"), 1)
229-
assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=TopicsIneligibleToDeleteCount"), 1)
230-
assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=ReplicasIneligibleToDeleteCount"), 1)
231-
assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=ActiveBrokerCount"), 1)
232-
assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=FencedBrokerCount"), 1)
233-
assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=ZkMigrationState"), 1)
234-
235-
val zkStateMetricName = metrics.keySet.asScala.filter(_.getMBeanName == "kafka.controller:type=KafkaController,name=ZkMigrationState").head
236-
val zkStateGauge = metrics.get(zkStateMetricName).asInstanceOf[Gauge[Int]]
237-
assertEquals(ZkMigrationState.ZK.value().intValue(), zkStateGauge.value())
238-
}
239-
240217
@ParameterizedTest
241218
@ValueSource(strings = Array("kraft"))
242219
def testKRaftControllerMetrics(quorum: String): Unit = {

core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala

+2-46
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717

1818
package kafka.server
1919

20-
import kafka.utils._
2120
import org.apache.kafka.common.Uuid
2221
import org.apache.kafka.common.internals.Topic
2322
import org.apache.kafka.common.message.CreateTopicsRequestData
@@ -103,36 +102,6 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest {
103102
validateTopicExists("partial-none")
104103
}
105104

106-
@ParameterizedTest
107-
@ValueSource(strings = Array("zk"))
108-
def testCreateTopicsWithVeryShortTimeouts(quorum: String): Unit = {
109-
// When using ZooKeeper, we don't expect a request to ever complete within 1ms.
110-
// A timeout of 1 ms allows us to test the purgatory timeout logic.
111-
//
112-
// Note: we do not test KRaft here because its behavior is different. Server-side
113-
// timeouts are much less likely to happen with KRaft since the operation is much
114-
// faster. Additionally, if a server side timeout does happen, the operation is
115-
// usually not performed.
116-
validateErrorCreateTopicsRequests(topicsReq(Seq(
117-
topicReq("error-timeout", numPartitions = 10, replicationFactor = 3)), timeout = 1),
118-
Map("error-timeout" -> error(Errors.REQUEST_TIMED_OUT)), checkErrorMessage = false)
119-
validateErrorCreateTopicsRequests(topicsReq(Seq(
120-
topicReq("error-timeout-zero", numPartitions = 10, replicationFactor = 3)), timeout = 0),
121-
Map("error-timeout-zero" -> error(Errors.REQUEST_TIMED_OUT)), checkErrorMessage = false)
122-
// Negative timeouts are treated the same as 0
123-
validateErrorCreateTopicsRequests(topicsReq(Seq(
124-
topicReq("error-timeout-negative", numPartitions = 10, replicationFactor = 3)), timeout = -1),
125-
Map("error-timeout-negative" -> error(Errors.REQUEST_TIMED_OUT)), checkErrorMessage = false)
126-
// The topics should still get created eventually
127-
TestUtils.waitForPartitionMetadata(servers, "error-timeout", 0)
128-
TestUtils.waitForPartitionMetadata(servers, "error-timeout-zero", 0)
129-
TestUtils.waitForPartitionMetadata(servers, "error-timeout-negative", 0)
130-
validateTopicExists("error-timeout")
131-
validateTopicExists("error-timeout-zero")
132-
validateTopicExists("error-timeout-negative")
133-
}
134-
135-
136105
@ParameterizedTest
137106
@ValueSource(strings = Array("kraft"))
138107
def testInvalidCreateTopicsRequests(quorum: String): Unit = {
@@ -149,21 +118,8 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest {
149118
}
150119

151120
@ParameterizedTest
152-
@ValueSource(strings = Array("zk", "zkMigration"))
153-
def testNotController(quorum: String): Unit = {
154-
// Note: we don't run this test when in KRaft mode, because KRaft doesn't have this
155-
// behavior of returning NOT_CONTROLLER. Instead, the request is forwarded.
156-
val req = topicsReq(Seq(topicReq("topic1")))
157-
val response = sendCreateTopicRequest(req, notControllerSocketServer)
158-
val error = if (isZkMigrationTest()) Errors.NONE else Errors.NOT_CONTROLLER
159-
assertEquals(1, response.errorCounts().get(error))
160-
}
161-
162-
@ParameterizedTest
163-
@ValueSource(strings = Array("zk"))
121+
@ValueSource(strings = Array("kraft"))
164122
def testCreateTopicsRequestVersions(quorum: String): Unit = {
165-
// Note: we don't run this test when in KRaft mode, because kraft does not yet support returning topic
166-
// configs from CreateTopics.
167123
for (version <- ApiKeys.CREATE_TOPICS.oldestVersion to ApiKeys.CREATE_TOPICS.latestVersion) {
168124
val topic = s"topic_$version"
169125
val data = new CreateTopicsRequestData()
@@ -175,7 +131,7 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest {
175131
).asJava.iterator()))
176132

177133
val request = new CreateTopicsRequest.Builder(data).build(version.asInstanceOf[Short])
178-
val response = sendCreateTopicRequest(request)
134+
val response = sendCreateTopicRequest(request, adminSocketServer)
179135

180136
val topicResponse = response.data.topics.find(topic)
181137
assertNotNull(topicResponse)

0 commit comments

Comments
 (0)