背景说明

业务中需要同一个应用连接2套Kafka做消息的收发,于是引入以下配置实现

环境配置

spring
  kafka:
    default:
      bootstrap-servers: kafka_server
      producer:
        key-serializer: org.apache.kafka.common.serialization.StringSerializer
        value-serializer: org.apache.kafka.common.serialization.StringSerializer
        type: sync
        acks: all
        retries: 3
      consumer:
        auto-commit-interval-ms: 1000
        auto-offset-reset: earliest
        enable-auto-commit: true
        group-id: kafka_group_id
        key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
        value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
        max-poll-records: 500
      listener:
        type: BATCH
        concurrency: 4
    other:
      bootstrap-servers: kafka.server.url:port
      consumer:
        auto-commit-interval-ms: 1000
        auto-offset-reset: earliest
        enable-auto-commit: true
        group-id: kafka_group_id
        key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
        value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
        max-poll-records: 500

配置类

第一套Kafka配置

@EnableKafka
@Configuration
public class DefaultKafkaConfig {

    @Value("${spring.kafka.default.bootstrap-servers}")
    private String server;
    @Value("${spring.kafka.default.consumer.auto-commit-interval-ms}")
    private String autoCommitIntervalMs;
    @Value("${spring.kafka.default.consumer.auto-offset-reset}")
    private String autoOffsetReset;
    @Value("${spring.kafka.default.consumer.enable-auto-commit}")
    private String enableAutoCommit;
    @Value("${spring.kafka.default.consumer.group-id}")
    private String groupId;
    @Value("${spring.kafka.default.consumer.key-deserializer}")
    private String keyDeserializer;
    @Value("${spring.kafka.default.consumer.value-deserializer}")
    private String valueDeserializer;
    @Value("${spring.kafka.default.consumer.max-poll-records}")
    private String maxPollRecords;
    @Value("${spring.kafka.default.listener.concurrency}")
    private Integer concurrency;
    @Value("${spring.kafka.default.producer.key-serializer}")
    private String keySerializer;
    @Value("${spring.kafka.default.producer.value-serializer}")
    private String valueSerializer;
    @Value("${spring.kafka.default.producer.acks}")
    private String acks;
    @Value("${spring.kafka.default.producer.retries}")
    private String retries;

    @Bean
    public KafkaTemplate<String, Object> defaultKafkaTemplate() {
        ProducerFactory<String, Object> factory = producerFactory();
        return new KafkaTemplate<>(factory);
    }

    @Bean
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> defaultKafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(concurrency);
        factory.getContainerProperties().setPollTimeout(4000);
        factory.setBatchListener(true);
        return factory;
    }

    private ConsumerFactory<String, String> consumerFactory() {
        Map<String, Object> properties = new HashMap<>();
        properties.putIfAbsent(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, server);
        properties.putIfAbsent(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitIntervalMs);
        properties.putIfAbsent(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        properties.putIfAbsent(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        properties.putIfAbsent(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        properties.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
        properties.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
        properties.putIfAbsent(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
        return new DefaultKafkaConsumerFactory<>(properties);
    }

    private DefaultKafkaProducerFactory<String, Object> producerFactory() {
        Map<String, Object> properties = new HashMap<>();
        properties.putIfAbsent(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, server);
        properties.putIfAbsent(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializer);
        properties.putIfAbsent(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer);
        properties.putIfAbsent(ProducerConfig.ACKS_CONFIG, acks);
        properties.putIfAbsent(ProducerConfig.RETRIES_CONFIG, retries);
        return new DefaultKafkaProducerFactory<>(properties);
    }
}

第二套Kafka配置

@EnableKafka
@Configuration
public class BigDataKafkaConfig {

    @Value("${spring.kafka.big-data.bootstrap-servers}")
    private String server;
    @Value("${spring.kafka.big-data.consumer.auto-commit-interval-ms}")
    private String autoCommitIntervalMs;
    @Value("${spring.kafka.big-data.consumer.auto-offset-reset}")
    private String autoOffsetReset;
    @Value("${spring.kafka.big-data.consumer.enable-auto-commit}")
    private String enableAutoCommit;
    @Value("${spring.kafka.big-data.consumer.group-id}")
    private String groupId;
    @Value("${spring.kafka.big-data.consumer.key-deserializer}")
    private String keyDeserializer;
    @Value("${spring.kafka.big-data.consumer.value-deserializer}")
    private String valueDeserializer;
    @Value("${spring.kafka.big-data.consumer.max-poll-records}")
    private String maxPollRecords;
    @Value("${spring.kafka.default.listener.concurrency}")
    private Integer concurrency;

    @Bean
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> bigDataKafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(concurrency);
        factory.getContainerProperties().setPollTimeout(4000);
        factory.setBatchListener(true);

        return factory;
    }

    private ConsumerFactory<String, String> consumerFactory() {
        Map<String, Object> properties = new HashMap<>();
        properties.putIfAbsent(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, server);
        properties.putIfAbsent(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitIntervalMs);
        properties.putIfAbsent(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        properties.putIfAbsent(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        properties.putIfAbsent(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        properties.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
        properties.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
        properties.putIfAbsent(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);

        return new DefaultKafkaConsumerFactory<>(properties);
    }
}

具体使用

消费方使用containerFactory区分具体Kafka环境

@KafkaListener(topics = {"my_kafka_topic"}, containerFactory = "defaultKafkaListenerContainerFactory")

发送方使用

@Autowired
@Qualifier("defaultKafkaTemplate")
private KafkaTemplate defaultKafkaTemplate;
kafkaTwoTemplate.send("my_kafka_topic", "message");

参考资料:
https://zhuanlan.zhihu.com/p/374546956


老污的猫
30 声望5 粉丝