forked from vert-x3/vertx-kafka-client
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request vert-x3#113 from emasab/bugfix/112-lost-messages
Bugfix/112 lost messages
- Loading branch information
Showing
2 changed files
with
151 additions
and
17 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
120 changes: 120 additions & 0 deletions
120
src/test/java/io/vertx/kafka/client/tests/KafkaReadStreamMockTest.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,120 @@ | ||
package io.vertx.kafka.client.tests; | ||
|
||
import java.util.HashMap; | ||
import java.util.LinkedHashSet; | ||
import java.util.LinkedList; | ||
import java.util.Map; | ||
import java.util.Set; | ||
import java.util.concurrent.atomic.AtomicLong; | ||
|
||
import org.apache.kafka.clients.consumer.ConsumerRecord; | ||
import org.apache.kafka.clients.consumer.MockConsumer; | ||
import org.apache.kafka.clients.consumer.OffsetResetStrategy; | ||
import org.junit.Test; | ||
import org.junit.runner.RunWith; | ||
|
||
import io.vertx.core.Vertx; | ||
import io.vertx.ext.unit.Async; | ||
import io.vertx.ext.unit.TestContext; | ||
import io.vertx.ext.unit.junit.VertxUnitRunner; | ||
import io.vertx.kafka.client.common.TopicPartition; | ||
import io.vertx.kafka.client.consumer.KafkaConsumer; | ||
import io.vertx.kafka.client.consumer.KafkaReadStream; | ||
import io.vertx.kafka.client.consumer.impl.KafkaConsumerImpl; | ||
import io.vertx.kafka.client.producer.KafkaProducerRecord; | ||
import io.vertx.kafka.client.producer.impl.KafkaProducerRecordImpl; | ||
|
||
@RunWith(VertxUnitRunner.class) | ||
public class KafkaReadStreamMockTest extends KafkaTestBase { | ||
|
||
private LinkedList<ConsumerRecord<String,String>> recordsMock = new LinkedList<>(); | ||
|
||
private int SEND_BATCH = 5; | ||
private int TOTAL_MESSAGES = 400; | ||
private final String TOPIC = "topic"; | ||
private Long timer = null; | ||
|
||
private void initRecords(){ | ||
int numMessages = TOTAL_MESSAGES; | ||
for (int i = 0;i < numMessages;i++) { | ||
String key = "key-" + i; | ||
String value = "value-" + i; | ||
recordsMock.add(new ConsumerRecord<String,String>(TOPIC, 0, i, key, value)); | ||
} | ||
} | ||
|
||
private MockConsumer<String, String> createMockConsumer(){ | ||
MockConsumer<String, String> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); | ||
|
||
Map<org.apache.kafka.common.TopicPartition, Long> beginningOffsets = new HashMap<>(); | ||
beginningOffsets.put(new org.apache.kafka.common.TopicPartition(TOPIC, 0), 0L); | ||
consumer.updateBeginningOffsets(beginningOffsets); | ||
return consumer; | ||
} | ||
|
||
private void sendNextBatch(MockConsumer<String, String> consumer){ | ||
for(int i=0;i<SEND_BATCH && recordsMock.size()>0;i++) | ||
consumer.addRecord(recordsMock.pop()); | ||
|
||
} | ||
|
||
@Test | ||
public void shouldNotLoseMessages(TestContext ctx){ | ||
Vertx vertx = Vertx.vertx(); | ||
|
||
Async done = ctx.async(); | ||
|
||
initRecords(); | ||
|
||
MockConsumer<String, String> consumer = createMockConsumer(); | ||
KafkaReadStream<String, String> readStream = KafkaReadStream.create(vertx, consumer); | ||
KafkaConsumer<String, String> consumerVertx = new KafkaConsumerImpl<>(readStream); | ||
|
||
|
||
AtomicLong partitionOffset = new AtomicLong(-1); | ||
|
||
|
||
consumerVertx.handler((r)->{ | ||
long offset = r.offset(); | ||
|
||
partitionOffset.addAndGet(1); | ||
ctx.assertEquals(partitionOffset.get(), offset); | ||
|
||
System.out.println("offset " + offset); | ||
|
||
if(offset == TOTAL_MESSAGES-1){ | ||
consumerVertx.close(); | ||
done.complete(); | ||
} else { | ||
|
||
if(timer!=null) vertx.cancelTimer(timer); | ||
timer = vertx.setTimer(5, (t)->{ | ||
consumerVertx.pause(); | ||
vertx.getOrCreateContext().runOnContext((t1)->{ | ||
consumerVertx.commit(); | ||
consumerVertx.resume(); | ||
sendNextBatch(consumer); | ||
// sends two batches of messages | ||
vertx.getOrCreateContext().runOnContext((t2)->{ | ||
sendNextBatch(consumer); | ||
}); | ||
}); | ||
}); | ||
|
||
} | ||
|
||
}); | ||
|
||
consumerVertx.exceptionHandler(t->ctx.fail(t)); | ||
|
||
|
||
Set<TopicPartition> partitions = new LinkedHashSet<>(); | ||
partitions.add(new TopicPartition(TOPIC, 0)); | ||
|
||
consumerVertx.assign(partitions, (h)->{ | ||
sendNextBatch(consumer); | ||
}); | ||
|
||
} | ||
|
||
} |