39
39
import java .util .Set ;
40
40
import java .util .concurrent .TimeUnit ;
41
41
import java .util .stream .Collectors ;
42
+
42
43
import org .apache .commons .lang .Validate ;
43
44
import org .apache .kafka .clients .admin .Admin ;
44
45
import org .apache .kafka .clients .consumer .Consumer ;
@@ -154,7 +155,7 @@ public void open(Map<String, Object> conf, TopologyContext context, SpoutOutputC
154
155
155
156
tupleListener .open (conf , context );
156
157
this .kafkaOffsetMetricManager
157
- = new KafkaOffsetMetricManager <>(() -> Collections .unmodifiableMap (offsetManagers ), () -> admin , context );
158
+ = new KafkaOffsetMetricManager <>(() -> Collections .unmodifiableMap (offsetManagers ), () -> admin , context );
158
159
159
160
LOG .info ("Kafka Spout opened with the following configuration: {}" , kafkaSpoutConfig );
160
161
}
@@ -183,7 +184,7 @@ public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
183
184
previousAssignment = partitions ;
184
185
185
186
LOG .info ("Partitions revoked. [consumer-group={}, consumer={}, topic-partitions={}]" ,
186
- kafkaSpoutConfig .getConsumerGroupId (), consumer , partitions );
187
+ kafkaSpoutConfig .getConsumerGroupId (), consumer , partitions );
187
188
188
189
if (isAtLeastOnceProcessing ()) {
189
190
commitOffsetsForAckedTuples ();
@@ -193,7 +194,7 @@ public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
193
194
@ Override
194
195
public void onPartitionsAssigned (Collection <TopicPartition > partitions ) {
195
196
LOG .info ("Partitions reassignment. [task-ID={}, consumer-group={}, consumer={}, topic-partitions={}]" ,
196
- context .getThisTaskId (), kafkaSpoutConfig .getConsumerGroupId (), consumer , partitions );
197
+ context .getThisTaskId (), kafkaSpoutConfig .getConsumerGroupId (), consumer , partitions );
197
198
198
199
initialize (partitions );
199
200
tupleListener .onPartitionsReassigned (partitions );
@@ -221,7 +222,7 @@ private void initialize(Collection<TopicPartition> partitions) {
221
222
final OffsetAndMetadata committedOffset = consumer .committed (newTp );
222
223
final long fetchOffset = doSeek (newTp , committedOffset );
223
224
LOG .debug ("Set consumer position to [{}] for topic-partition [{}] with [{}] and committed offset [{}]" ,
224
- fetchOffset , newTp , firstPollOffsetStrategy , committedOffset );
225
+ fetchOffset , newTp , firstPollOffsetStrategy , committedOffset );
225
226
if (isAtLeastOnceProcessing () && !offsetManagers .containsKey (newTp )) {
226
227
offsetManagers .put (newTp , new OffsetManager (newTp , fetchOffset ));
227
228
}
@@ -234,13 +235,13 @@ private void initialize(Collection<TopicPartition> partitions) {
234
235
*/
235
236
private long doSeek (TopicPartition newTp , OffsetAndMetadata committedOffset ) {
236
237
LOG .trace ("Seeking offset for topic-partition [{}] with [{}] and committed offset [{}]" ,
237
- newTp , firstPollOffsetStrategy , committedOffset );
238
+ newTp , firstPollOffsetStrategy , committedOffset );
238
239
239
240
if (committedOffset != null ) {
240
241
// offset was previously committed for this consumer group and topic-partition, either by this or another topology.
241
242
if (commitMetadataManager .isOffsetCommittedByThisTopology (newTp ,
242
- committedOffset ,
243
- Collections .unmodifiableMap (offsetManagers ))) {
243
+ committedOffset ,
244
+ Collections .unmodifiableMap (offsetManagers ))) {
244
245
// Another KafkaSpout instance (of this topology) already committed, therefore FirstPollOffsetStrategy does not apply.
245
246
consumer .seek (newTp , committedOffset .offset ());
246
247
} else {
@@ -281,7 +282,7 @@ public void nextTuple() {
281
282
commitOffsetsForAckedTuples ();
282
283
} else if (kafkaSpoutConfig .getProcessingGuarantee () == ProcessingGuarantee .NO_GUARANTEE ) {
283
284
Map <TopicPartition , OffsetAndMetadata > offsetsToCommit =
284
- createFetchedOffsetsMetadata (consumer .assignment ());
285
+ createFetchedOffsetsMetadata (consumer .assignment ());
285
286
consumer .commitAsync (offsetsToCommit , null );
286
287
LOG .debug ("Committed offsets {} to Kafka" , offsetsToCommit );
287
288
}
@@ -336,7 +337,7 @@ private PollablePartitionsInfo getPollablePartitionsInfo() {
336
337
pollablePartitions .add (tp );
337
338
} else {
338
339
LOG .debug ("Not polling on partition [{}]. It has [{}] uncommitted offsets, which exceeds the limit of [{}]. " , tp ,
339
- numUncommittedOffsets , maxUncommittedOffsets );
340
+ numUncommittedOffsets , maxUncommittedOffsets );
340
341
}
341
342
}
342
343
}
@@ -345,7 +346,7 @@ private PollablePartitionsInfo getPollablePartitionsInfo() {
345
346
346
347
private boolean isWaitingToEmit () {
347
348
return waitingToEmit .values ().stream ()
348
- .anyMatch (list -> !list .isEmpty ());
349
+ .anyMatch (list -> !list .isEmpty ());
349
350
}
350
351
351
352
private void setWaitingToEmit (ConsumerRecords <K , V > consumerRecords ) {
@@ -365,11 +366,11 @@ private ConsumerRecords<K, V> pollKafkaBroker(PollablePartitionsInfo pollablePar
365
366
ackRetriableOffsetsIfCompactedAway (pollablePartitionsInfo .pollableEarliestRetriableOffsets , consumerRecords );
366
367
final int numPolledRecords = consumerRecords .count ();
367
368
LOG .debug ("Polled [{}] records from Kafka" ,
368
- numPolledRecords );
369
+ numPolledRecords );
369
370
if (kafkaSpoutConfig .getProcessingGuarantee () == KafkaSpoutConfig .ProcessingGuarantee .AT_MOST_ONCE ) {
370
371
//Commit polled records immediately to ensure delivery is at-most-once.
371
372
Map <TopicPartition , OffsetAndMetadata > offsetsToCommit =
372
- createFetchedOffsetsMetadata (consumer .assignment ());
373
+ createFetchedOffsetsMetadata (consumer .assignment ());
373
374
consumer .commitSync (offsetsToCommit );
374
375
LOG .debug ("Committed offsets {} to Kafka" , offsetsToCommit );
375
376
}
@@ -387,7 +388,7 @@ private void doSeekRetriableTopicPartitions(Map<TopicPartition, Long> pollableEa
387
388
}
388
389
389
390
private void ackRetriableOffsetsIfCompactedAway (Map <TopicPartition , Long > earliestRetriableOffsets ,
390
- ConsumerRecords <K , V > consumerRecords ) {
391
+ ConsumerRecords <K , V > consumerRecords ) {
391
392
for (Entry <TopicPartition , Long > entry : earliestRetriableOffsets .entrySet ()) {
392
393
TopicPartition tp = entry .getKey ();
393
394
List <ConsumerRecord <K , V >> records = consumerRecords .records (tp );
@@ -529,7 +530,7 @@ private void commitOffsetsForAckedTuples() {
529
530
* to the committed offset.
530
531
*/
531
532
LOG .debug ("Consumer fell behind committed offset. Catching up. Position was [{}], skipping to [{}]" ,
532
- position , committedOffset );
533
+ position , committedOffset );
533
534
consumer .seek (tp , committedOffset );
534
535
}
535
536
/**
@@ -539,8 +540,8 @@ private void commitOffsetsForAckedTuples() {
539
540
if (waitingToEmitForTp != null ) {
540
541
//Discard the pending records that are already committed
541
542
waitingToEmit .put (tp , waitingToEmitForTp .stream ()
542
- .filter (record -> record .offset () >= committedOffset )
543
- .collect (Collectors .toCollection (LinkedList ::new )));
543
+ .filter (record -> record .offset () >= committedOffset )
544
+ .collect (Collectors .toCollection (LinkedList ::new )));
544
545
}
545
546
546
547
final OffsetManager offsetManager = offsetManagers .get (tp );
@@ -573,11 +574,11 @@ public void ack(Object messageId) {
573
574
574
575
if (!emitted .contains (msgId )) {
575
576
LOG .debug ("Received ack for message [{}], associated with tuple emitted for a ConsumerRecord that "
576
- + "came from a topic-partition that this consumer group instance is no longer tracking "
577
- + "due to rebalance/partition reassignment. No action taken." , msgId );
577
+ + "came from a topic-partition that this consumer group instance is no longer tracking "
578
+ + "due to rebalance/partition reassignment. No action taken." , msgId );
578
579
} else {
579
580
Validate .isTrue (!retryService .isScheduled (msgId ), "The message id " + msgId + " is queued for retry while being acked."
580
- + " This should never occur barring errors in the RetryService implementation or the spout code." );
581
+ + " This should never occur barring errors in the RetryService implementation or the spout code." );
581
582
offsetManagers .get (msgId .getTopicPartition ()).addToAckMsgs (msgId );
582
583
emitted .remove (msgId );
583
584
}
@@ -595,11 +596,11 @@ public void fail(Object messageId) {
595
596
final KafkaSpoutMessageId msgId = (KafkaSpoutMessageId ) messageId ;
596
597
if (!emitted .contains (msgId )) {
597
598
LOG .debug ("Received fail for tuple this spout is no longer tracking."
598
- + " Partitions may have been reassigned. Ignoring message [{}]" , msgId );
599
+ + " Partitions may have been reassigned. Ignoring message [{}]" , msgId );
599
600
return ;
600
601
}
601
602
Validate .isTrue (!retryService .isScheduled (msgId ), "The message id " + msgId + " is queued for retry while being failed."
602
- + " This should never occur barring errors in the RetryService implementation or the spout code." );
603
+ + " This should never occur barring errors in the RetryService implementation or the spout code." );
603
604
604
605
msgId .incrementNumFails ();
605
606
@@ -630,7 +631,7 @@ private void refreshAssignment() {
630
631
List <TopicPartition > allPartitionsSorted = new ArrayList <>(allPartitions );
631
632
Collections .sort (allPartitionsSorted , TopicPartitionComparator .INSTANCE );
632
633
Set <TopicPartition > assignedPartitions = kafkaSpoutConfig .getTopicPartitioner ()
633
- .getPartitionsForThisTask (allPartitionsSorted , context );
634
+ .getPartitionsForThisTask (allPartitionsSorted , context );
634
635
boolean partitionChanged = topicAssigner .assignPartitions (consumer , assignedPartitions , rebalanceListener );
635
636
if (partitionChanged && canRegisterMetrics ()) {
636
637
LOG .info ("Partitions assignments has changed, updating metrics." );
@@ -683,9 +684,9 @@ public void declareOutputFields(OutputFieldsDeclarer declarer) {
683
684
@ Override
684
685
public String toString () {
685
686
return "KafkaSpout{"
686
- + "offsetManagers =" + offsetManagers
687
- + ", emitted=" + emitted
688
- + "}" ;
687
+ + "offsetManagers =" + offsetManagers
688
+ + ", emitted=" + emitted
689
+ + "}" ;
689
690
}
690
691
691
692
@ Override
@@ -718,8 +719,8 @@ private boolean isPrimitiveOrWrapper(Class<?> type) {
718
719
719
720
private boolean isWrapper (Class <?> type ) {
720
721
return type == Double .class || type == Float .class || type == Long .class
721
- || type == Integer .class || type == Short .class || type == Character .class
722
- || type == Byte .class || type == Boolean .class || type == String .class ;
722
+ || type == Integer .class || type == Short .class || type == Character .class
723
+ || type == Byte .class || type == Boolean .class || type == String .class ;
723
724
}
724
725
725
726
private String getTopicsString () {
@@ -735,8 +736,8 @@ private static class PollablePartitionsInfo {
735
736
PollablePartitionsInfo (Set <TopicPartition > pollablePartitions , Map <TopicPartition , Long > earliestRetriableOffsets ) {
736
737
this .pollablePartitions = pollablePartitions ;
737
738
this .pollableEarliestRetriableOffsets = earliestRetriableOffsets .entrySet ().stream ()
738
- .filter (entry -> pollablePartitions .contains (entry .getKey ()))
739
- .collect (Collectors .toMap (entry -> entry .getKey (), entry -> entry .getValue ()));
739
+ .filter (entry -> pollablePartitions .contains (entry .getKey ()))
740
+ .collect (Collectors .toMap (entry -> entry .getKey (), entry -> entry .getValue ()));
740
741
}
741
742
742
743
public boolean shouldPoll () {
0 commit comments