前言

最近使用helm3安装好了kafka和rabbitmq,并且想集成到spring中,发现集成不是那么简单的,虽然有官方实例,但是实例上面缺少必要的代码所以通过自己摸索一步步完成,分享给大家。

收发消息示例

首先,安装好kafka安装rabbitmq安装环境,安装好之后,我们就可以配置spring了。

1、首先引入相关依赖包:

<properties>
    <java.version>1.8</java.version>
    <spring-boot.version>2.4.2</spring-boot.version>
    <spring-cloud.version>2020.0.1</spring-cloud.version>
    <spring-cloud-alibaba.version>2021.1</spring-cloud-alibaba.version>
</properties>
<dependency>
    <groupId>org.springframework.cloud</groupId>
    <artifactId>spring-cloud-stream-binder-kafka</artifactId>
</dependency>
<dependency>
    <groupId>org.springframework.cloud</groupId>
    <artifactId>spring-cloud-stream-binder-rabbit</artifactId>
</dependency>

2、配置application.yml文件

下面是我的配置,如下所示:

spring:
  cloud:
    stream:
      function:
        definition: testKafkaOut;testKafkaIn;testRabbitOut;testRabbitIn
      bindings:
        testKafkaOut-out-0: 
          binder: kafka-binder
          destination: test
          #设置消息类型,本次为json,文本则设置"text/plain"
          content-type: application/json 
        testKafkaIn-in-0:
          binder: kafka-binder
          destination: test
          content-type: application/json 
          group: log_group
        testRabbitOut-out-0:
          binder: rabbit-binder
          destination: dev
          content-type: application/json
        testRabbitIn-in-0:
          binder: rabbit-binder
          destination: dev
          content-type: application/json
          group: dev-group
      binders:
        kafka-binder:
          type: kafka
          environment:
            spring:
              cloud:
                stream:
                  kafka:
                    binder:
                      brokers: xxx.xxx.xxx.xxx:xxxx
                      auto-create-topics: true
        rabbit-binder:
          type: rabbit # 消息组件类型
          environment: # 设置rabbitmq的相关的环境配置
            spring:
              rabbitmq:
                host: xxx.xxx.xxx.xxx
                port: xxxx
                username: user
                password: password
                virtual-host: dev

这样就完成了spring cloud streamkafka以及rabbitmq的配置。

3、收发消息

新建一个java类进行收发消息操作,如下所示:

import org.springframework.context.annotation.Bean;
import org.springframework.messaging.Message;
import org.springframework.stereotype.Component;
import java.util.function.Consumer;
import java.util.function.Supplier;

@Component
public class MessageProcessor {
    @Bean
    public Supplier<Message<String>> testKafkaOut() {
        return () -> MessageBuilder.withPayload("Hello from Kafka!").build();
    }

    @Bean
    public Consumer<Message<String>> testKafkaIn() {
        return message -> System.out.println("Received from Kafka: " + message.getPayload());
    }

    @Bean
    public Supplier<Message<String>> testRabbitOut() {
        return () -> MessageBuilder.withPayload("Hello from RabbitMQ!").build();
    }

    @Bean
    public Consumer<Message<String>> testRabbitIn() {
        return message -> System.out.println("Received from RabbitMQ: " + message.getPayload());
    }
}

或者使用下面简单的写法:

发消息:

@Autowired
private StreamBridge streamBridge;
...
streamBridge.send("testRabbitOut-out-0", "hello rabbitmq");
streamBridge.send("testKafkaOut-out-0", "hello kafka");
...

收消息:

@Component
public class ConsumersHandler {
    @Bean
    public Consumer<String> testKafkaIn(){

        return str -> {
            System.out.println("Success Rescive message from kafka: " + str);
        };
    }

    @Bean
    public Consumer<String> testRabbitIn() {
        return str -> {
            System.out.println("Success Rescive message from rabbitmq: " + str);
        };
    }
}

两种写法都可以,就看你自己的选择了,下面是收到消息的打印结果:

Success Rescive message from rabbitmq: hello rabbitmq
Success Rescive message from kafka: hello kafka

配置rabbitmq交换机类型、队列和routing-key示例

直接上生产者yaml代码,如下所示:

spring:
  cloud:
    stream:
      function:
        definition: logConsumer 
      bindings:
        logSupplier-out-0:
          binder: kafka-binder
          destination: ${server.kafka.exchange}
          content-type: application/json
        logConsumer-in-0:
          binder: kafka-binder
          destination: ${server.kafka.exchange}
          content-type: application/json 
          group: log_group
        addUserEvent-out-0:
          binder: rabbit-binder
          destination: ${server.rabbitmq.exchange}
          content-type: application/json
          producer:
            required-groups: addUserEvent-queue
        addUserPoints-out-0:
          binder: rabbit-binder
          destination: ${server.rabbitmq.exchange}
          content-type: application/json
          producer:
            required-groups: addUserPoints-queue
        copyUrlRemind-out-0:
          binder: rabbit-binder
          destination: ${server.rabbitmq.exchange}
          content-type: application/json
          producer:
            required-groups: copyUrlRemind-queue
      rabbit:
        bindings:
          addUserEvent-out-0:
            producer:
              declare-exchange: true
              exchange-type: 'direct'
              # 必须在此设置routing-key,在spring.cloud.stream.bindings中设置是不起使用的
              routing-key-expression: '''addUserEvent-routing-key'''
          addUserPoints-out-0:
            producer:
              declare-exchange: true
              exchange-type: 'direct'
              # 必须在此设置routing-key,在spring.cloud.stream.bindings中设置是不起使用的
              routing-key-expression: '''addUserPoints-routing-key'''
          copyUrlRemind-out-0:
            producer:
              declare-exchange: true
              exchange-type: 'direct'
              # 必须在此设置routing-key,在spring.cloud.stream.bindings中设置是不起使用的
              routing-key-expression: '''copyUrlRemind-routing-key'''
      binders:
        kafka-binder:
          type: kafka
          environment:
            spring:
              cloud:
                stream:
                  kafka:
                    binder:
                      brokers: xxx.xxx.xxx.xxx:xxxx
                      auto-create-topics: true
        rabbit-binder:
          type: rabbit # 消息组件类型
          environment: # 设置rabbitmq的相关的环境配置
            spring:
              rabbitmq:
                host: xxx.xxx.xxx.xxx
                port: xxxx
                username: user
                password: password
                virtual-host: dev

下面是消费者yaml代码,如下所示:

spring:
  cloud:
    stream:
      function:
        definition: logSupplier;logConsumer;addUserEvent;addUserPoints;copyUrlRemind
      bindings:
        logSupplier-out-0:
          binder: kafka-binder
          destination: ${server.kafka.exchange}
          content-type: application/json 
        logConsumer-in-0:
          binder: kafka-binder
          destination: ${server.kafka.exchange}
          content-type: application/json
          group: log_group
        addUserEvent-in-0:
          binder: rabbit-binder
          destination: ${server.rabbitmq.exchange}
          content-type: application/json
          group: addUserEvent-queue
        addUserPoints-in-0:
          binder: rabbit-binder
          destination: ${server.rabbitmq.exchange}
          content-type: application/json
          group: addUserPoints-queue
        copyUrlRemind-in-0:
          binder: rabbit-binder
          destination: ${server.rabbitmq.exchange}
          content-type: application/json
          group: copyUrlRemind-queue
      rabbit:
        bindings:
          addUserEvent-in-0:
            consumer:
              declare-exchange: true
              exchange-type: 'direct'
              # 必须在此设置routing-key,在spring.cloud.stream.bindings中设置是不起使用的
              binding-routing-key: addUserEvent-routing-key
          addUserPoints-in-0:
            consumer:
              declare-exchange: true
              exchange-type: 'direct'
              # 必须在此设置routing-key,在spring.cloud.stream.bindings中设置是不起使用的
              binding-routing-key: addUserPoints-routing-key
          copyUrlRemind-in-0:
            consumer:
              declare-exchange: true
              exchange-type: 'direct'
              # 必须在此设置routing-key,在spring.cloud.stream.bindings中设置是不起使用的
              binding-routing-key: copyUrlRemind-routing-key

这样就配置好了,启动应用之后查看rabbitmq后台
image.png

问题

问题一:
我在一个微服务设置了之后,发现一直连接本地的rabbitmq,可是我已经设置连接远程的rabbitmq:

Attempting to connect to: [localhost:5672]

通过chatgpt4分析之后,它给出了解决办法:

了解你的情况。从提供的配置来看,你已经设置了RabbitMQ服务器的地址、端口、用户名、密码和虚拟主机。但是,应用程序仍然尝试连接到本地地址(localhost:5672)。

这个问题可能是因为Spring Cloud Stream在Binder配置中使用了嵌套环境设置。要解决这个问题,请将配置稍作调整,将rabbit-binder相关配置移动到外层spring.rabbitmq中:

spring:
  rabbitmq:
    host: xxx.xxx.xxx.xxx
    port: xxxx
    username: user
    password: password
    virtual-host: dev
  cloud:
    stream:
      bindings:
        # 这里是你的其他bindings配置
      binders:
        rabbit-binder:
          type: rabbit
这样,应用程序应该会正确地使用你所提供的RabbitMQ服务器地址。重新启动应用程序后,它应该不再尝试连接到 localhost:5672,而是连接到你提供的地址。

这样就解决了,可是问题是其它项目没这个问题,这个问题有时候回头再来看下

问题二:
streamBridge.send发送JSON.toJSONString转换后的字符串报错:Payload must not be null
通过询问chatgpt之后,它给出解决办法:

了解您的问题。在这种情况下,可以尝试使用Message<?>对象作为streamBridge.send()方法的参数,
而不是直接发送JSON字符串。这将帮助您避免遇到“Payload must not be null”的错误。

例如,假设您已经使用JSON.toJSONString()将UserEvent对象转换为JSON字符串:

UserEvent userEvent = getUserEvent(); // 获取一个非空的UserEvent实例
String userEventStr = JSON.toJSONString(userEvent);
现在,你可以创建一个Message<?>对象,将JSON字符串作为其负载,并将此消息传递给send()方法:

import org.springframework.messaging.Message;
import org.springframework.messaging.support.MessageBuilder;

// ...

Message<String> message = MessageBuilder.withPayload(userEventStr).build();
streamBridge.send("addUserEvent-out-0", message);

通过使用Message<?>对象,您可以确保即使在发送JSON字符串时,
streamBridge.send()方法也不会出现“Payload must not be null”的错误。
同时,请确保userEventStr变量在调用send()方法时不为空。

如果问题仍然存在,请检查您的其他配置和代码,确保没有任何隐性设置或操作影响到消息负载。

总结

1、配置application.yml文件的时候要注意function.definition的写法,如下所示:

function:
        definition: testKafkaOut;testKafkaIn;testRabbitOut;testRabbitIn

而我一开始写成了:

testKafkaOut,testKafkaIn,testRabbitOut,testRabbitIn

导致报错:

kafka-binder,rabbit-binder, and no default binder has been set.

别小看这个问题,因为我的粗心花了一周才解决,唉!
2、同样是application.yml中的配置default-binder可以不用设置,因为我们在每个bindings中已经指定了binder了
3、下面的写法,应用程序启动之后报:rabbitmq binder是找不到,所以使用了localhost:5672,如下所示:

spring:
  cloud:
    stream:
      kafka:
        binder:
          auto-create-topics: true
          brokers: xxx.xxx.xxx.xxx:xxxx
      rabbit:
        binder:
          host: xxx.xxx.xxx.xxx
          port: xxxx
          username: user
          password: password
          virtual-host: dev

这个写法有问题,所以推荐我上面的binders写法。
4、我开始借助了chatgpt3.5,它有时候给的代码都是spring cloud strem 3.1之前的,于是我使用了4.0之后给出的代码是最新的,大家可以试试chatgpt4,这里有个推荐的地址

5、经过实操发现,只要你的yaml文件配置的不正确,都会报:kafka-binder,rabbit-binder, and no default binder has been set.,所以要找出配置不对的地方并改正

6、设置binding-routing-key不需要加引号,routing-key-expression前后需要加三个引号

7、根据spring官网文档的说明可知,spring.cloud.stream.rabbit.bindings补充了spring.cloud.stream.bindings的写法,如下所示:

spring.cloud.stream.bindings.<binding name>.destination=myExchange

spring.cloud.stream.bindings.<binding name>.group=myQueue

spring.cloud.stream.rabbit.bindings.<binding name>.consumer.bindQueue=false

spring.cloud.stream.rabbit.bindings.<binding name>.consumer.declareExchange=false

spring.cloud.stream.rabbit.bindings.<binding name>.consumer.queueNameGroupOnly=true
spring.cloud.stream.rabbit.bindings.<binding name>.consumer.bindingRoutingKey=myRoutingKey

spring.cloud.stream.rabbit.bindings.<binding name>.consumer.exchangeType=<type>

spring.cloud.stream.rabbit.bindings.<binding name>.producer.routingKeyExpression='myRoutingKey'

引用

Spring Cloud Stream 函数式编程整合 kafka/rabbit
spring-cloud-stream-samples
Spring Cloud Stream 整合Kafka
Spring Cloud Stream Rabbit 3.1.3 入门实践
rabbit-binder-properties官方


Awbeci
3.1k 声望212 粉丝

Awbeci


« 上一篇
Helm3-安装Kakfa
下一篇 »
helm3-安装nacos