Skip to content

Commit 343f6fa

Browse files
author
Andrew Choi
authored
Xinfra Monitor - Performance, Optimization, Refactoring, Upgrades, Removal of Deprecations
Xinfra Monitor - Performance, Optimization, Refactoring, Upgrades, Removal of Deprecations Signed-off-by: Andrew Choi <[email protected]>
1 parent 59fbcf6 commit 343f6fa

13 files changed

+26
-27
lines changed

src/main/java/com/linkedin/xinfra/monitor/services/ClusterTopicManipulationService.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ public ClusterTopicManipulationService(String name, AdminClient adminClient, Map
9090
tags.put("name", name);
9191
TopicManagementServiceConfig config = new TopicManagementServiceConfig(props);
9292
String topicFactoryClassName = config.getString(TopicManagementServiceConfig.TOPIC_FACTORY_CLASS_CONFIG);
93+
@SuppressWarnings("rawtypes")
9394
Map topicFactoryConfig =
9495
props.containsKey(TopicManagementServiceConfig.TOPIC_FACTORY_PROPS_CONFIG) ? (Map) props.get(
9596
TopicManagementServiceConfig.TOPIC_FACTORY_PROPS_CONFIG) : new HashMap();

src/main/java/com/linkedin/xinfra/monitor/services/ConsumeService.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ public void onComplete(Map<TopicPartition, OffsetAndMetadata> topicPartitionOffs
204204

205205
} else if (index < nextIndex) {
206206
_sensors._recordsDuplicated.record();
207-
} else if (index > nextIndex) {
207+
} else { // this will equate to the case where index > nextIndex...
208208
nextIndexes.put(partition, index + 1);
209209
long numLostRecords = index - nextIndex;
210210
_sensors._recordsLost.record(numLostRecords);

src/main/java/com/linkedin/xinfra/monitor/services/DefaultMetricsReporterServiceFactory.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ public DefaultMetricsReporterServiceFactory(Map properties, String serviceName)
2929

3030
@SuppressWarnings("unchecked")
3131
@Override
32-
public Service createService() throws Exception {
32+
public Service createService() {
3333
return new DefaultMetricsReporterService(_properties, _serviceName);
3434
}
3535
}

src/main/java/com/linkedin/xinfra/monitor/services/MultiClusterTopicManagementService.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -383,10 +383,9 @@ private static BrokerMetadata randomBroker(Set<BrokerMetadata> brokers) {
383383
// Using Set enforces the usage of loop which is O(n).
384384
// As the list of brokers does not change in newPartitionAssignments,
385385
// the acceptance of a List argument instead of a Set will be faster which is (O(1))
386-
List<BrokerMetadata> brokerMetadataList = new ArrayList<>();
387-
386+
List<BrokerMetadata> brokerMetadataList = new ArrayList<>(brokers);
388387
// convert to a list so there's no need to create a index and iterate through this set
389-
brokerMetadataList.addAll(brokers);
388+
//addAll() is replaced with parameterized constructor call for better performance..
390389

391390
int brokerSetSize = brokers.size();
392391

src/main/java/com/linkedin/xinfra/monitor/services/OffsetCommitService.java

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,6 @@
5858
/**
5959
* Service that monitors the commit offset availability of a particular Consumer Group.
6060
*/
61-
@SuppressWarnings("NullableProblems")
6261
public class OffsetCommitService implements Service {
6362

6463
public static final String METRIC_GRP_PREFIX = "xm-offset-commit-service";

src/main/java/com/linkedin/xinfra/monitor/services/ProduceService.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,7 @@ public void run() {
308308
}
309309
}
310310

311-
@SuppressWarnings("NullableProblems")
311+
312312
private class ProduceServiceThreadFactory implements ThreadFactory {
313313

314314
private final AtomicInteger _threadId = new AtomicInteger();
@@ -318,7 +318,7 @@ public Thread newThread(Runnable r) {
318318
}
319319

320320
private class HandleNewPartitionsThreadFactory implements ThreadFactory {
321-
public Thread newThread(@SuppressWarnings("NullableProblems") Runnable r) {
321+
public Thread newThread(Runnable r) {
322322
return new Thread(r, _name + "-produce-service-new-partition-handler");
323323
}
324324
}

src/main/java/com/linkedin/xinfra/monitor/services/StatsdMetricsReporterServiceFactory.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ public StatsdMetricsReporterServiceFactory(Map properties, String name) {
2828
}
2929

3030
@Override
31-
public Service createService() throws Exception {
31+
public Service createService() {
3232

3333
//noinspection unchecked
3434
return new StatsdMetricsReporterService(_properties, _name);

src/main/java/com/linkedin/xinfra/monitor/services/metrics/CommitAvailabilityMetrics.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@
1515
import org.apache.kafka.common.metrics.MetricConfig;
1616
import org.apache.kafka.common.metrics.Metrics;
1717
import org.apache.kafka.common.metrics.Sensor;
18+
import org.apache.kafka.common.metrics.stats.CumulativeSum;
1819
import org.apache.kafka.common.metrics.stats.Rate;
19-
import org.apache.kafka.common.metrics.stats.Total;
2020
import org.slf4j.Logger;
2121
import org.slf4j.LoggerFactory;
2222

@@ -37,13 +37,13 @@ public CommitAvailabilityMetrics(final Metrics metrics, final Map<String, String
3737
LOG.info("{} called.", this.getClass().getSimpleName());
3838
_offsetsCommitted = metrics.sensor("offsets-committed");
3939
_offsetsCommitted.add(new MetricName("offsets-committed-total", METRIC_GROUP_NAME,
40-
"The total number of offsets per second that are committed.", tags), new Total());
40+
"The total number of offsets per second that are committed.", tags), new CumulativeSum());
4141

4242
_failedCommitOffsets = metrics.sensor("failed-commit-offsets");
4343
_failedCommitOffsets.add(new MetricName("failed-commit-offsets-avg", METRIC_GROUP_NAME,
4444
"The average number of offsets per second that have failed.", tags), new Rate());
4545
_failedCommitOffsets.add(new MetricName("failed-commit-offsets-total", METRIC_GROUP_NAME,
46-
"The total number of offsets per second that have failed.", tags), new Total());
46+
"The total number of offsets per second that have failed.", tags), new CumulativeSum());
4747

4848
metrics.addMetric(new MetricName("offsets-committed-avg", METRIC_GROUP_NAME, "The average offset commits availability.", tags),
4949
(MetricConfig config, long now) -> {

src/main/java/com/linkedin/xinfra/monitor/services/metrics/CommitLatencyMetrics.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ public CommitLatencyMetrics(Metrics metrics, Map<String, String> tags, int laten
6363
* start the recording of consumer offset commit
6464
* @throws Exception if the offset commit is already in progress.
6565
*/
66-
public void recordCommitStart() throws Exception {
66+
public void recordCommitStart() {
6767
if (!_inProgressCommit) {
6868
this.setCommitStartTimeMs(System.currentTimeMillis());
6969
_inProgressCommit = true;

src/main/java/com/linkedin/xinfra/monitor/services/metrics/ConsumeMetrics.java

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,11 @@
1515
import org.apache.kafka.common.metrics.Metrics;
1616
import org.apache.kafka.common.metrics.Sensor;
1717
import org.apache.kafka.common.metrics.stats.Avg;
18+
import org.apache.kafka.common.metrics.stats.CumulativeSum;
1819
import org.apache.kafka.common.metrics.stats.Max;
1920
import org.apache.kafka.common.metrics.stats.Percentile;
2021
import org.apache.kafka.common.metrics.stats.Percentiles;
2122
import org.apache.kafka.common.metrics.stats.Rate;
22-
import org.apache.kafka.common.metrics.stats.Total;
2323
import org.slf4j.Logger;
2424
import org.slf4j.LoggerFactory;
2525

@@ -43,23 +43,23 @@ public ConsumeMetrics(final Metrics metrics, Map<String, String> tags, int laten
4343

4444
_consumeError = metrics.sensor("consume-error");
4545
_consumeError.add(new MetricName("consume-error-rate", METRIC_GROUP_NAME, "The average number of errors per second", tags), new Rate());
46-
_consumeError.add(new MetricName("consume-error-total", METRIC_GROUP_NAME, "The total number of errors", tags), new Total());
46+
_consumeError.add(new MetricName("consume-error-total", METRIC_GROUP_NAME, "The total number of errors", tags), new CumulativeSum());
4747

4848
_recordsConsumed = metrics.sensor("records-consumed");
4949
_recordsConsumed.add(new MetricName("records-consumed-rate", METRIC_GROUP_NAME, "The average number of records per second that are consumed", tags), new Rate());
50-
_recordsConsumed.add(new MetricName("records-consumed-total", METRIC_GROUP_NAME, "The total number of records that are consumed", tags), new Total());
50+
_recordsConsumed.add(new MetricName("records-consumed-total", METRIC_GROUP_NAME, "The total number of records that are consumed", tags), new CumulativeSum());
5151

5252
_recordsDuplicated = metrics.sensor("records-duplicated");
5353
_recordsDuplicated.add(new MetricName("records-duplicated-rate", METRIC_GROUP_NAME, "The average number of records per second that are duplicated", tags), new Rate());
54-
_recordsDuplicated.add(new MetricName("records-duplicated-total", METRIC_GROUP_NAME, "The total number of records that are duplicated", tags), new Total());
54+
_recordsDuplicated.add(new MetricName("records-duplicated-total", METRIC_GROUP_NAME, "The total number of records that are duplicated", tags), new CumulativeSum());
5555

5656
_recordsLost = metrics.sensor("records-lost");
5757
_recordsLost.add(new MetricName("records-lost-rate", METRIC_GROUP_NAME, "The average number of records per second that are lost", tags), new Rate());
58-
_recordsLost.add(new MetricName("records-lost-total", METRIC_GROUP_NAME, "The total number of records that are lost", tags), new Total());
58+
_recordsLost.add(new MetricName("records-lost-total", METRIC_GROUP_NAME, "The total number of records that are lost", tags), new CumulativeSum());
5959

6060
_recordsDelayed = metrics.sensor("records-delayed");
6161
_recordsDelayed.add(new MetricName("records-delayed-rate", METRIC_GROUP_NAME, "The average number of records per second that are either lost or arrive after maximum allowed latency under SLA", tags), new Rate());
62-
_recordsDelayed.add(new MetricName("records-delayed-total", METRIC_GROUP_NAME, "The total number of records that are either lost or arrive after maximum allowed latency under SLA", tags), new Total());
62+
_recordsDelayed.add(new MetricName("records-delayed-total", METRIC_GROUP_NAME, "The total number of records that are either lost or arrive after maximum allowed latency under SLA", tags), new CumulativeSum());
6363

6464
_recordsDelay = metrics.sensor("records-delay");
6565
_recordsDelay.add(new MetricName("records-delay-ms-avg", METRIC_GROUP_NAME, "The average latency of records from producer to consumer", tags), new Avg());

0 commit comments

Comments
 (0)