使用RedisCacheWriter#clean在线异步地扫描批量删除缓存数据-spring-data-redis
2023-12-18 06:02:53
背景
目标
结论先行
以终为始,结果导向
技术方案
具体实现
spring-boot
版本
- spring-boot-starter-data-redis-2.7.16
- spring-data-redis-2.7.16
源代码
import java.nio.charset.StandardCharsets;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import cn.hutool.core.thread.ThreadFactoryBuilder;
import com.spring.boot.redis.example.model.CacheKey;
import com.spring.boot.redis.example.service.CacheService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.redis.cache.BatchStrategies;
import org.springframework.data.redis.cache.RedisCacheWriter;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.stereotype.Service;
import org.springframework.util.StopWatch;
/**
* 缓存服务实现
*
* @author guang.yi
* @since 2023/7/30
*/
@Slf4j
@Service("cacheService")
public class CacheServiceImpl implements CacheService {
/**
* 并发开关
*/
private final ConcurrentMap<String, Boolean> concurrentSwitch = new ConcurrentHashMap<>(16);
private final ExecutorService executorService = new ThreadPoolExecutor(
1, 1, 5L, TimeUnit.MINUTES,
new ArrayBlockingQueue<>(1),
new ThreadFactoryBuilder().setNamePrefix("cache-clean-")
.setDaemon(true).build()
);
private final RedisConnectionFactory redisConnectionFactory;
public CacheServiceImpl(
RedisConnectionFactory redisConnectionFactory
) {
this.redisConnectionFactory = redisConnectionFactory;
log.info("create CacheServiceImpl");
}
@Override
public boolean cleanCache(CacheKey cacheKey) {
String keyPattern = cacheKey.getKeyPattern();
// 避免多次重复地操作
if (concurrentSwitch.putIfAbsent(keyPattern, Boolean.TRUE) == null) {
// 异步地执行
executorService.execute(() -> this.clean(cacheKey));
return true;
}
return false;
}
private void clean(CacheKey cacheKey) {
log.info("cleanCache start, cacheKey={}", cacheKey);
StopWatch stopWatch = new StopWatch("cleanCache");
stopWatch.start();
this.clean(cacheKey.getCacheName(), cacheKey.getKeyPattern());
stopWatch.stop();
log.info("cleanCache end, cacheKey={}, stopWatch={}", cacheKey, stopWatch);
}
/**
* 缓存Redis的历史数据清理
* <pre>
* 【批量策略】在线异步地扫描批量删除,每批10个key
* 先SCAN,再批量DEL
* 【执行策略】预发环境,业务低峰时期
* </pre>
*
* @see org.springframework.data.redis.cache.RedisCacheWriter#clean
* @see org.springframework.data.redis.cache.DefaultRedisCacheWriter#clean
*/
private void clean(String cacheName, String keyPattern) {
// 【批量策略】SCAN,每批10个key
RedisCacheWriter redisCacheWriter = RedisCacheWriter.nonLockingRedisCacheWriter(
redisConnectionFactory, BatchStrategies.scan(10));
// 先SCAN,再批量DEL
redisCacheWriter.clean(cacheName, keyPattern.getBytes(StandardCharsets.UTF_8));
}
}
# .A.1. Core Properties
spring:
application:
name: redis-spring-boot-starter-example
# RedisProperties
redis:
database: 0
host: "localhost"
port: 6379
timeout: 1s
connect-timeout: 300ms
client-name: "user-cache-example"
# client-type: lettuce
# sentinel:
# master: ""
# nodes: "host:port"
# cluster:
# nodes: "host:port"
# max-redirects: 3
# jedis:
# pool:
# enabled: true
# max-idle: 8
# min-idle: 0
# max-active: 8
# max-wait: 300ms
# time-between-eviction-runs: 5m
lettuce:
shutdown-timeout: 100ms
pool:
enabled: true
max-idle: 8
min-idle: 0
max-active: 8
max-wait: -1
time-between-eviction-runs: 5m
参考引用
文章来源:https://blog.csdn.net/shupili141005/article/details/135053243
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。 如若内容造成侵权/违法违规/事实不符,请联系我的编程经验分享网邮箱:veading@qq.com进行投诉反馈,一经查实,立即删除!
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。 如若内容造成侵权/违法违规/事实不符,请联系我的编程经验分享网邮箱:veading@qq.com进行投诉反馈,一经查实,立即删除!