From f36de0744b915335de6b636e6bd6b5f1276f34f6 Mon Sep 17 00:00:00 2001 From: bozhao12 <102274736+bozhao12@users.noreply.github.com> Date: Wed, 18 May 2022 06:40:05 +0800 Subject: [PATCH] MINOR: Remove redundant metric reset in KafkaController (#12158) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The following variables in `KafkaController` are used for metrics: ``` offlinePartitionCount preferredReplicaImbalanceCount globalTopicCount globalPartitionCount topicsToDeleteCount replicasToDeleteCount ineligibleTopicsToDeleteCount ineligibleReplicasToDeleteCount ``` When the controller goes from active to non-active, these variables will be reset to 0. Currently, this is done explicitly in in `KafkaController.onControllerResignation()` and also after every loop iteration in `KafkaController.updateMetrics()` . The first of these is redundant and can be removed. This patch fixes this and also simplifies `updateMetrics`. Reviewers: Jason Gustafson --- .../kafka/controller/KafkaController.scala | 76 ++++++++----------- 1 file changed, 31 insertions(+), 45 deletions(-) diff --git a/core/src/main/scala/kafka/controller/KafkaController.scala b/core/src/main/scala/kafka/controller/KafkaController.scala index 45c22435e68d2..f4a8569b80367 100644 --- a/core/src/main/scala/kafka/controller/KafkaController.scala +++ b/core/src/main/scala/kafka/controller/KafkaController.scala @@ -468,14 +468,6 @@ class KafkaController(val config: KafkaConfig, // shutdown leader rebalance scheduler kafkaScheduler.shutdown() - offlinePartitionCount = 0 - preferredReplicaImbalanceCount = 0 - globalTopicCount = 0 - globalPartitionCount = 0 - topicsToDeleteCount = 0 - replicasToDeleteCount = 0 - ineligibleTopicsToDeleteCount = 0 - ineligibleReplicasToDeleteCount = 0 // stop token expiry check scheduler if (tokenCleanScheduler.isStarted) @@ -1435,43 +1427,37 @@ class KafkaController(val config: KafkaConfig, } private def updateMetrics(): Unit = { - offlinePartitionCount = - if (!isActive) { - 0 - } else { - controllerContext.offlinePartitionCount - } - - preferredReplicaImbalanceCount = - if (!isActive) { - 0 - } else { - controllerContext.preferredReplicaImbalanceCount - } - - globalTopicCount = if (!isActive) 0 else controllerContext.allTopics.size - - globalPartitionCount = if (!isActive) 0 else controllerContext.partitionWithLeadersCount - - topicsToDeleteCount = if (!isActive) 0 else controllerContext.topicsToBeDeleted.size - - replicasToDeleteCount = if (!isActive) 0 else controllerContext.topicsToBeDeleted.map { topic => - // For each enqueued topic, count the number of replicas that are not yet deleted - controllerContext.replicasForTopic(topic).count { replica => - controllerContext.replicaState(replica) != ReplicaDeletionSuccessful - } - }.sum - - ineligibleTopicsToDeleteCount = if (!isActive) 0 else controllerContext.topicsIneligibleForDeletion.size - - ineligibleReplicasToDeleteCount = if (!isActive) 0 else controllerContext.topicsToBeDeleted.map { topic => - // For each enqueued topic, count the number of replicas that are ineligible - controllerContext.replicasForTopic(topic).count { replica => - controllerContext.replicaState(replica) == ReplicaDeletionIneligible - } - }.sum - - activeBrokerCount = if (isActive) controllerContext.liveOrShuttingDownBrokerIds.size else 0 + if (isActive) { + offlinePartitionCount = controllerContext.offlinePartitionCount + preferredReplicaImbalanceCount = controllerContext.preferredReplicaImbalanceCount + globalTopicCount = controllerContext.allTopics.size + globalPartitionCount = controllerContext.partitionWithLeadersCount + topicsToDeleteCount = controllerContext.topicsToBeDeleted.size + replicasToDeleteCount = controllerContext.topicsToBeDeleted.map { topic => + // For each enqueued topic, count the number of replicas that are not yet deleted + controllerContext.replicasForTopic(topic).count { replica => + controllerContext.replicaState(replica) != ReplicaDeletionSuccessful + } + }.sum + ineligibleTopicsToDeleteCount = controllerContext.topicsIneligibleForDeletion.size + ineligibleReplicasToDeleteCount = controllerContext.topicsToBeDeleted.map { topic => + // For each enqueued topic, count the number of replicas that are ineligible + controllerContext.replicasForTopic(topic).count { replica => + controllerContext.replicaState(replica) == ReplicaDeletionIneligible + } + }.sum + activeBrokerCount = controllerContext.liveOrShuttingDownBrokerIds.size + } else { + offlinePartitionCount = 0 + preferredReplicaImbalanceCount = 0 + globalTopicCount = 0 + globalPartitionCount = 0 + topicsToDeleteCount = 0 + replicasToDeleteCount = 0 + ineligibleTopicsToDeleteCount = 0 + ineligibleReplicasToDeleteCount = 0 + activeBrokerCount = 0 + } } // visible for testing