Spring Kafka 请求回复分区模式:处理消息后无法提交偏移量

问题描述

我正在使用 Spring Kafka 实现同步请求回复模式。 堆栈:

org.springframework.cloud:spring-cloud-dependencies:2020.0.2

org.springframework.kafka:spring-kafka

io.confluent:kafka-avro-serializer:6.2.0

Java 11


我有一个包含 5 个分区的请求主题和包含 8 个分区的响应

My Response 消费者端配置如下。为简洁起见,我没有显示生产者配置:

  @Bean
    public ReplyingKafkaTemplate<String,PhmsPatientSearchRequest,PhmsPatientSearchResponse> replyKafkaTemplate(ProducerFactory<String,PhmsPatientSearchRequest> pf,KafkaMessageListenerContainer<String,PhmsPatientSearchResponse> container) {
        final ReplyingKafkaTemplate<String,PhmsPatientSearchResponse> repl = new ReplyingKafkaTemplate<>(pf,container);
        repl.setMessageConverter(new StringJsonMessageConverter());
        return repl;
    }

 @Bean
    public KafkaMessageListenerContainer replyContainer(ConsumerFactory<String,PhmsPatientSearchResponse> replyConsumerFactory) {
        TopicPartitionOffset topicPartitionOffset = new TopicPartitionOffset(replyTopic,replyPartition);
        ContainerProperties containerProperties = new ContainerProperties(topicPartitionOffset);
        final KafkaMessageListenerContainer<String,PhmsPatientSearchResponse> msgListenerContainer = new KafkaMessageListenerContainer<>(replyConsumerFactory,containerProperties);
        return msgListenerContainer;
    }

 @Bean
    public ConsumerFactory<String,PhmsPatientSearchResponse> replyConsumerFactory() {
        final DefaultKafkaConsumerFactory<String,PhmsPatientSearchResponse> consumerFactory = new DefaultKafkaConsumerFactory<>(consumerConfigs());
        return consumerFactory;
    }

 @Bean
    public Map<String,Object> consumerConfigs() {
        Map<String,Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONfig,bootstrapServers);
        props.put(ConsumerConfig.GROUP_ID_CONfig,"ResponseConsumer");
        props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONfig,30000);
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONfig,40000);
        props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONfig,30000);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONfig,KafkaAvroDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONfig,KafkaAvroDeserializer.class);
        props.put(JsonDeserializer.TRUSTED_PACKAGES,replyDeserializerTrustedPkg);
        props.put(SCHEMA_REGISTRY_URL,schemRegistryUrl);
        props.put(SPECIFIC_AVRO_READER,true);
        return props;
    }

我的请求回复代码


  ProducerRecord<String,PhmsPatientSearchRequest> patientSearchRequestRecord = new ProducerRecord(requestTopic,phmsPatientSearchRequest);
        // set reply topic in header
       
        patientSearchRequestRecord.headers().add(new RecordHeader(KafkaHeaders.MESSAGE_KEY,messageKey.getBytes()));
        //patientSearchRequestRecord.headers().add(new RecordHeader(KafkaHeaders.REPLY_TOPIC,replyTopic.getBytes()));
        //patientSearchRequestRecord.headers().add(new RecordHeader(KafkaHeaders.REPLY_PARTITION,intToBytesBigEndian(replyPartition)));
       
        // post in kafka topic
        RequestReplyFuture<String,PhmsPatientSearchResponse> sendAndReceive = replyingKafkaTemplate.sendAndReceive(patientSearchRequestRecord);

        // get consumer record

        ConsumerRecord<String,PhmsPatientSearchResponse> consumerRecord = sendAndReceive.get();

我在正确的分区上收到响应消息,但未提交偏移量。 每次我的响应消费者读取消息时都会观察到以下堆栈跟踪。我不认为这是由于一些投票延迟造成的。


org.springframework.kafka.KafkaException: Seek to current after exception; nested exception is org.apache.kafka.clients.consumer.CommitFailedException: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms,which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
    at org.springframework.kafka.listener.SeekToCurrentBatchErrorHandler.handle(SeekToCurrentBatchErrorHandler.java:79) ~[spring-kafka-2.7.4.jar:2.7.4]
    at org.springframework.kafka.listener.RecoveringBatchErrorHandler.handle(RecoveringBatchErrorHandler.java:124) ~[spring-kafka-2.7.4.jar:2.7.4]
    at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.handleConsumerException(KafkaMessageListenerContainer.java:1606) ~[spring-kafka-2.7.4.jar:2.7.4]
    at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:1210) ~[spring-kafka-2.7.4.jar:2.7.4]
    at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) ~[na:na]
    at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[na:na]
    at java.base/java.lang.Thread.run(Thread.java:829) ~[na:na]
Caused by: org.apache.kafka.clients.consumer.CommitFailedException: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms,which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
    at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator$OffsetCommitResponseHandler.handle(ConsumerCoordinator.java:1256) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator$OffsetCommitResponseHandler.handle(ConsumerCoordinator.java:1163) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.internals.AbstractCoordinator$CoordinatorResponseHandler.onSuccess(AbstractCoordinator.java:1173) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.internals.AbstractCoordinator$CoordinatorResponseHandler.onSuccess(AbstractCoordinator.java:1148) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.internals.RequestFuture$1.onSuccess(RequestFuture.java:206) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.internals.RequestFuture.fireSuccess(RequestFuture.java:169) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.internals.RequestFuture.complete(RequestFuture.java:129) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient$RequestFutureCompletionHandler.fireCompletion(ConsumerNetworkClient.java:602) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.firePendingCompletedRequests(ConsumerNetworkClient.java:412) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:297) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:236) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:215) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.commitOffsetsSync(ConsumerCoordinator.java:1005) ~[kafka-clients-2.7.1.jar:na]
    at org.apache.kafka.clients.consumer.KafkaConsumer.commitSync(KafkaConsumer.java:1495) ~[kafka-clients-2.7.1.jar:na]
    at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doCommitSync(KafkaMessageListenerContainer.java:2656) ~[spring-kafka-2.7.4.jar:2.7.4]
    at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.commitSync(KafkaMessageListenerContainer.java:2651) ~[spring-kafka-2.7.4.jar:2.7.4]
    at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.commitIfNecessary(KafkaMessageListenerContainer.java:2637) ~[spring-kafka-2.7.4.jar:2.7.4]
    at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.processCommits(KafkaMessageListenerContainer.java:2451) ~[spring-kafka-2.7.4.jar:2.7.4]
    at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1235) ~[spring-kafka-2.7.4.jar:2.7.4]
    at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:1161) ~[spring-kafka-2.7.4.jar:2.7.4]
    ... 3 common frames omitted


如果我不使用 TopicPartitionOffset,那么我的消费者会监听响应主题中的所有分区并且没有问题。

就此事请求帮助。

解决方法

我刚刚复制了您的代码(但使用了 Strings),它按预期工作...

@SpringBootApplication
public class So68461640Application {

    public static void main(String[] args) {
        SpringApplication.run(So68461640Application.class,args);
    }

    @Bean
    public NewTopic topic() {
        return TopicBuilder.name("so68461640").partitions(5).replicas(1).build();
    }

    @Bean
    public NewTopic reply() {
        return TopicBuilder.name("so68461640.replies").partitions(8).replicas(1).build();
    }

    @Bean
    public ReplyingKafkaTemplate<String,String,String> replyKafkaTemplate(ProducerFactory<String,String> pf,KafkaMessageListenerContainer<String,String> container) {
        final ReplyingKafkaTemplate<String,String> repl = new ReplyingKafkaTemplate<>(
                pf,container);
//      repl.setMessageConverter(new StringJsonMessageConverter());
        return repl;
    }

    @Bean
    public KafkaMessageListenerContainer replyContainer(
            ConsumerFactory<String,String> replyConsumerFactory) {

        TopicPartitionOffset topicPartitionOffset = new TopicPartitionOffset("so68461640.replies",3);
        ContainerProperties containerProperties = new ContainerProperties(topicPartitionOffset);
        final KafkaMessageListenerContainer<String,String> msgListenerContainer = new KafkaMessageListenerContainer<>(
                replyConsumerFactory,containerProperties);
        return msgListenerContainer;
    }

    @Bean
    public ConsumerFactory<String,String> replyConsumerFactory() {
        final DefaultKafkaConsumerFactory<String,String> consumerFactory = new DefaultKafkaConsumerFactory<>(
                consumerConfigs());
        return consumerFactory;
    }

    @Bean
    public Map<String,Object> consumerConfigs() {
        Map<String,Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092");
        props.put(ConsumerConfig.GROUP_ID_CONFIG,"ResponseConsumer");
        props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG,30000);
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG,40000);
        props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG,30000);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        return props;
    }


    @KafkaListener(id = "so68461640",topics = "so68461640")
    @SendTo
    public String listen(String in) {
        System.out.println(in);
        return in.toUpperCase();
    }

    @Bean
    KafkaTemplate<String,String> replyTemplate(ProducerFactory<String,String> pf) {
        return new KafkaTemplate<>(pf);
    }

    @Bean
    public ApplicationRunner runner(ReplyingKafkaTemplate<String,String> replyKafkaTemplate,KafkaTemplate<String,String> replyTemplate,ConcurrentKafkaListenerContainerFactory<?,?> factory) {

        factory.setReplyTemplate(replyTemplate);

        return args -> {
            RequestReplyFuture<String,String> future =
                    replyKafkaTemplate.sendAndReceive(new ProducerRecord("so68461640",null,"test"));
            future.getSendFuture().get(10,TimeUnit.SECONDS);
            ConsumerRecord<String,String> reply = future.get(10,TimeUnit.SECONDS);
            System.out.println(reply.value());
        };
    }

}
test
TEST
% kafka-consumer-groups --bootstrap-server localhost:9092 --describe --group ResponseConsumer

Consumer group 'ResponseConsumer' has no active members.

GROUP            TOPIC              PARTITION  CURRENT-OFFSET  LOG-END-OFFSET  LAG             CONSUMER-ID     HOST            CLIENT-ID
ResponseConsumer so68461640.replies 3          1               1               0               -               -               -