springboot集成redis主从切换问题?

新手上路,请多包涵

求教:
redis是3主3从,配置成功。主库挂了,从库可以自动切换。
问题: 使用springboot集成了redis集群,当主库挂了,从库变成主库的时候大约2秒钟报错,从库变成主库切换完成以后,又正常了。好像是连接的这个库有什么,但是又找不到问题所在,求教大佬?

错误信息

2022-10-24 13:46:34.974 ERROR 4425 --- [nio-8080-exec-1] o.a.c.c.C.[.[.[/].[dispatcherServlet]    : Servlet.service() for servlet [dispatcherServlet] in context with path [] threw exception [Request processing failed; nested exception is org.springframework.data.redis.RedisSystemException: Error in execution; nested exception is io.lettuce.core.RedisCommandExecutionException: CLUSTERDOWN The cluster is down] with root cause

io.lettuce.core.RedisCommandExecutionException: CLUSTERDOWN The cluster is down
    at io.lettuce.core.ExceptionFactory.createExecutionException(ExceptionFactory.java:135) ~[lettuce-core-5.3.5.RELEASE.jar:5.3.5.RELEASE]
    at io.lettuce.core.ExceptionFactory.createExecutionException(ExceptionFactory.java:108) ~[lettuce-core-5.3.5.RELEASE.jar:5.3.5.RELEASE]
    at io.lettuce.core.protocol.AsyncCommand.completeResult(AsyncCommand.java:118) ~[lettuce-core-5.3.5.RELEASE.jar:5.3.5.RELEASE]
    at io.lettuce.core.protocol.AsyncCommand.complete(AsyncCommand.java:109) ~[lettuce-core-5.3.5.RELEASE.jar:5.3.5.RELEASE]
    at io.lettuce.core.protocol.CommandWrapper.complete(CommandWrapper.java:59) ~[lettuce-core-5.3.5.RELEASE.jar:5.3.5.RELEASE]
    at io.lettuce.core.cluster.ClusterCommand.complete(ClusterCommand.java:66) ~[lettuce-core-5.3.5.RELEASE.jar:5.3.5.RELEASE]
    at io.lettuce.core.protocol.CommandHandler.complete(CommandHandler.java:680) ~[lettuce-core-5.3.5.RELEASE.jar:5.3.5.RELEASE]
    at io.lettuce.core.protocol.CommandHandler.decode(CommandHandler.java:640) ~[lettuce-core-5.3.5.RELEASE.jar:5.3.5.RELEASE]
    at io.lettuce.core.protocol.CommandHandler.channelRead(CommandHandler.java:591) ~[lettuce-core-5.3.5.RELEASE.jar:5.3.5.RELEASE]
    at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) ~[netty-transport-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) ~[netty-transport-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) ~[netty-transport-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) ~[netty-transport-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) ~[netty-transport-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) ~[netty-transport-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) ~[netty-transport-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[netty-transport-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:719) ~[netty-transport-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:655) ~[netty-transport-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:581) ~[netty-transport-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) ~[netty-transport-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) ~[netty-common-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[netty-common-4.1.55.Final.jar:4.1.55.Final]
    at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[netty-common-4.1.55.Final.jar:4.1.55.Final]
    at java.lang.Thread.run(Thread.java:748) [na:1.8.0_302]

配置类

@EnableCaching
@Configuration
public class RedisConfig extends CachingConfigurerSupport {

    @Autowired
    private RedisProperties redisProperties;

    public GenericObjectPoolConfig<?> genericObjectPoolConfig(RedisProperties.Pool properties) {
        GenericObjectPoolConfig<?> config = new GenericObjectPoolConfig<>();
        config.setMaxTotal(properties.getMaxActive());
        config.setMaxIdle(properties.getMaxIdle());
        config.setMinIdle(properties.getMinIdle());
        if (properties.getTimeBetweenEvictionRuns() != null) {
            config.setTimeBetweenEvictionRunsMillis(properties.getTimeBetweenEvictionRuns().toMillis());
        }
        if (properties.getMaxWait() != null) {
            config.setMaxWaitMillis(properties.getMaxWait().toMillis());
        }
        return config;
    }

    @Bean(destroyMethod = "destroy")
    public LettuceConnectionFactory lettuceConnectionFactory() {

        //开启 自适应集群拓扑刷新和周期拓扑刷新
        ClusterTopologyRefreshOptions clusterTopologyRefreshOptions = ClusterTopologyRefreshOptions.builder()
                // 开启全部自适应刷新
                .enableAllAdaptiveRefreshTriggers() // 开启自适应刷新,自适应刷新不开启,Redis集群变更时将会导致连接异常
                // 自适应刷新超时时间(默认30秒)
                //默认关闭开启后时间为30秒
                .adaptiveRefreshTriggersTimeout(Duration.ofSeconds(10))
                // 开周期刷新
                // 默认关闭开启后时间为60秒 ClusterTopologyRefreshOptions
                .enablePeriodicRefresh(Duration.ofSeconds(10))
                // .DEFAULT_REFRESH_PERIOD 60  .enablePeriodicRefresh(Duration.ofSeconds(2)) = .enablePeriodicRefresh
                // ().refreshPeriod(Duration.ofSeconds(2))
                .build();

        // https://github.com/lettuce-io/lettuce-core/wiki/Client-Options
        ClientOptions clientOptions = ClusterClientOptions.builder()
                .autoReconnect(true)
                .maxRedirects(6)
                .topologyRefreshOptions(clusterTopologyRefreshOptions)
                .build();

        LettuceClientConfiguration clientConfig = LettucePoolingClientConfiguration.builder()
                .poolConfig(genericObjectPoolConfig(redisProperties.getLettuce().getPool()))
                .readFrom(ReadFrom.MASTER_PREFERRED)
                .clientOptions(clientOptions)
                //默认RedisURI.DEFAULT_TIMEOUT 60
                .commandTimeout(redisProperties.getTimeout())
                .build();

        Set<RedisNode> nodes = new HashSet<RedisNode>();
        List<String> clusterNodes = redisProperties.getCluster().getNodes();
        clusterNodes.forEach(address -> nodes.add(new RedisNode(address.split(":")[0].trim(),
                Integer.valueOf(address.split(":")[1]))));
        RedisClusterConfiguration clusterConfiguration = new RedisClusterConfiguration();
        clusterConfiguration.setClusterNodes(nodes);
        clusterConfiguration.setPassword(RedisPassword.of(redisProperties.getPassword()));
        clusterConfiguration.setMaxRedirects(redisProperties.getCluster().getMaxRedirects());

        LettuceConnectionFactory lettuceConnectionFactory = new LettuceConnectionFactory(clusterConfiguration,
                clientConfig);
        //是否允许多个线程操作共用同一个缓存连接,默认true,false时每个操作都将开辟新的连接
        lettuceConnectionFactory.setShareNativeConnection(false);
        // 重置底层共享连接, 在接下来的访问时初始化
        lettuceConnectionFactory.resetConnection();
        return lettuceConnectionFactory;
    }

    @Bean
    public RedisTemplate<String, Object> redisTemplate(RedisConnectionFactory factory) {
        RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>();
        //解决乱码问题
        RedisSerializer<String> stringSerializer = new StringRedisSerializer();
        Jackson2JsonRedisSerializer<Object> jackson2JsonRedisSerializer =
                new Jackson2JsonRedisSerializer<>(Object.class);
        ObjectMapper om = new ObjectMapper();
        om.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY);
        om.activateDefaultTyping(LaissezFaireSubTypeValidator.instance,
                ObjectMapper.DefaultTyping.NON_FINAL, JsonTypeInfo.As.PROPERTY);
        jackson2JsonRedisSerializer.setObjectMapper(om);
        redisTemplate.setConnectionFactory(factory);
        // key采用String的序列化方式
        redisTemplate.setKeySerializer(stringSerializer);
        // hash的key也采用String的序列化方式
        redisTemplate.setHashKeySerializer(stringSerializer);
        // value序列化方式采用jackson
        redisTemplate.setValueSerializer(jackson2JsonRedisSerializer);
        // hash的value序列化方式采用jackson
        redisTemplate.setHashValueSerializer(jackson2JsonRedisSerializer);
        redisTemplate.afterPropertiesSet();
        return redisTemplate;
    }



    @Bean
    public CacheManager cacheManager(RedisConnectionFactory factory) {
        RedisSerializer<String> redisSerializer = new StringRedisSerializer();
        Jackson2JsonRedisSerializer<Object> jackson2JsonRedisSerializer =
                new Jackson2JsonRedisSerializer<>(Object.class);
        //解决查询缓存转异常的问题
        ObjectMapper om = new ObjectMapper();
        om.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY);
        om.activateDefaultTyping(LaissezFaireSubTypeValidator.instance,
                ObjectMapper.DefaultTyping.NON_FINAL, JsonTypeInfo.As.PROPERTY);
        jackson2JsonRedisSerializer.setObjectMapper(om);
        //配置序列化(解决乱码的问题),过期时间600秒
        RedisCacheConfiguration config = RedisCacheConfiguration.defaultCacheConfig().
                entryTtl(Duration.ofSeconds(600))
                .serializeKeysWith(RedisSerializationContext.SerializationPair.fromSerializer(redisSerializer))
                .serializeValuesWith(RedisSerializationContext.SerializationPair.fromSerializer(jackson2JsonRedisSerializer))
                .disableCachingNullValues();
        RedisCacheManager cacheManager = RedisCacheManager.builder(factory)
                .cacheDefaults(config)
                .build();
        return cacheManager;
    }


}

yaml文件

spring:
  #redis配置
  redis:
    lettuce:
      pool:
        max-active: 1024 # 连接池最大连接数(使用负值表示没有限制)
        max-wait: 10000  # 连接池最大阻塞等待时间
        max-idle: 10   # 连接池中的最大空闲连接
        min-idle: 5   # 连接池中的最小空闲连接
    database: 0
    #过期时间
    timeout: 15000
    #密码
    password: 123456
    cluster:
      refresh:
        adaptive: true
        period: 10000
      nodes:
        - 192.168.0.250:6381
        - 192.168.0.250:6382
        - 192.168.0.250:6383
        - 192.168.0.251:6386
        - 192.168.0.251:6387
        - 192.168.0.251:6388
      max-redirects: 3

POM.xml

<dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-redis</artifactId>
            <version>2.7.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.commons</groupId>
            <artifactId>commons-pool2</artifactId>
            <version>2.9.0</version>
        </dependency>
阅读 2.2k
撰写回答
你尚未登录,登录后可以
  • 和开发者交流问题的细节
  • 关注并接收问题和回答的更新提醒
  • 参与内容的编辑和改进,让解决方法与时俱进
推荐问题