SpringBoot整合Ehcache3
前言
公司部门老项目要迁移升级java版本,需要进行缓存相关操作,原框架未支持这部分,经过调研java相关缓存方案大致分为ehcache和redis两种,redis的value最大值为500mb且超过1mb会对存取有性能影响,业务系统需要支持列表查询缓存就不可避免的涉及到大量的数据存取过滤,ehcache支持内存+磁盘缓存不用担心缓存容量问题,所以框架初步版本决定集成ehcache3,设计流程结构如下图所示
缓存配置
maven引用
org.springframework.boot
spring-boot-starter-cache
org.ehcache
ehcache
个性化配置
#缓存配置
cache:
ehcache:
heap: 1000
offheap: 100
disk: 500
diskDir: tempfiles/cache/
@Component
@ConfigurationProperties("frmae.cache.ehcache")
public class EhcacheConfiguration {
/**
* ehcache heap大小
* jvm内存中缓存的key数量
*/
private int heap;
/**
* ehcache offheap大小
* 堆外内存大小, 单位: MB
*/
private int offheap;
/**
* 磁盘持久化目录
*/
private String diskDir;
/**
* ehcache disk
* 持久化到磁盘的大小, 单位: MB
* diskDir有效时才生效
*/
private int disk;
public EhcacheConfiguration(){
heap = 1000;
offheap = 100;
disk = 500;
diskDir = "tempfiles/cache/";
}
}
代码注入配置
因为springboot默认缓存优先注入redis配置,所以需要手动声明bean进行注入,同时ehcache的value值必须支持序列化接口,不能使用Object代替,这里声明一个缓存基类,所有缓存value对象必须继承该类
public class BaseSystemObject implements Serializable {
}
@Configuration
@EnableCaching
public class EhcacheConfig {
@Autowired
private EhcacheConfiguration ehcacheConfiguration;
@Autowired
private ApplicationContext context;
@Bean(name = "ehCacheManager")
public CacheManager getCacheManager() {
//资源池生成器配置持久化
ResourcePoolsBuilder resourcePoolsBuilder = ResourcePoolsBuilder.newResourcePoolsBuilder()
// 堆内缓存大小
.heap(ehcacheConfiguration.getHeap(), EntryUnit.ENTRIES)
// 堆外缓存大小
.offheap(ehcacheConfiguration.getOffheap(), MemoryUnit.MB)
// 文件缓存大小
.disk(ehcacheConfiguration.getDisk(), MemoryUnit.MB);
//生成配置
ExpiryPolicy expiryPolicy = ExpiryPolicyBuilder.noExpiration();
CacheConfiguration config = CacheConfigurationBuilder.newCacheConfigurationBuilder(String.class, BaseSystemObject.class, resourcePoolsBuilder)
//设置永不过期
.withExpiry(expiryPolicy)
.build();
CacheManagerBuilder cacheManagerBuilder = CacheManagerBuilder.newCacheManagerBuilder()
.with(CacheManagerBuilder.persistence(ehcacheConfiguration.getDiskDir()));
return cacheManagerBuilder.build(true);
}
}
缓存操作
缓存预热
针对缓存框架选择的双写策略,即数据库和缓存同时写入,所以在系统启动时需要预先将数据库数据加载到缓存中
针对单表声明自定义注解,个性化缓存定义自定义接口
@Target({ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Inherited
public @interface HPCache {
}
public interface IHPCacheInitService {
String getCacheName();
void initCache();
}
系统初始化时同步进行缓存初始化,扫描注解实体类与接口实现Bean
@Async
public void initCache(Class runtimeClass, List extraPackageNameList) {
List> cacheEntityList = new ArrayList<>();
if (!runtimeClass.getPackage().getName().equals(Application.class.getPackage().getName())) {
cacheEntityList.addAll(ScanUtil.getAllClassByPackageName_Annotation(runtimeClass.getPackage(), HPCache.class));
}
for (String packageName : extraPackageNameList) {
cacheEntityList.addAll(ScanUtil.getAllClassByPackageName_Annotation(packageName, HPCache.class));
}
for (Class clazz : cacheEntityList) {
TableName tableName = (TableName) clazz.getAnnotation(TableName.class);
List> resultList = commonDTO.selectList(tableName.value(), "*", "1=1", "", new HashMap<>(), false);
for (LinkedHashMap map : resultList) {
Cache cache = cacheManager.getCache(clazz.getName(), String.class, BaseSystemObject.class);
String unitguid = ConvertOp.convert2String(map.get("UnitGuid"));
try {
Object obj = clazz.newInstance();
obj = ConvertOp.convertLinkHashMapToBean(map, obj);
cache.put(unitguid, obj);
} catch (Exception e) {
e.printStackTrace();
}
}
}
//自定义缓存
Map res = context.getBeansOfType(IHPCacheInitService.class);
for (Map.Entry en : res.entrySet()) {
IHPCacheInitService service = (IHPCacheInitService) en.getValue();
service.initCache();
}
System.out.println("缓存初始化完毕");
}
需要注意,在EhcacheConfig配置类中需要进行缓存名称的提前注册,否则会导致操作缓存时空指针异常
Map annotatedBeans = context.getBeansWithAnnotation(SpringBootApplication.class);
Class runtimeClass = annotatedBeans.values().toArray()[0].getClass();
//do,dao扫描
List extraPackageNameList = new ArrayList();
extraPackageNameList.add(Application.class.getPackage().getName());
List> cacheEntityList = new ArrayList<>();
if (!runtimeClass.getPackage().getName().equals(Application.class.getPackage().getName())) {
cacheEntityList.addAll(ScanUtil.getAllClassByPackageName_Annotation(runtimeClass.getPackage(), HPCache.class));
}
for (String packageName : extraPackageNameList) {
cacheEntityList.addAll(ScanUtil.getAllClassByPackageName_Annotation(packageName, HPCache.class));
}
for (Class clazz : cacheEntityList) {
cacheManagerBuilder = cacheManagerBuilder.withCache(clazz.getName(), config);
}
//自定义缓存
Map res = context.getBeansOfType(IHPCacheInitService.class);
for (Map.Entry en :res.entrySet()) {
IHPCacheInitService service = (IHPCacheInitService)en.getValue();
cacheManagerBuilder = cacheManagerBuilder.withCache(service.getCacheName(), config);
}
更新操作
手动获取ehcache的bean对象,调用put,repalce,delete方法进行操作
private CacheManager cacheManager = (CacheManager) SpringBootBeanUtil.getBean("ehCacheManager");
public void executeUpdateOperation(String cacheName, String key, BaseSystemObject value) {
Cache cache = cacheManager.getCache(cacheName, String.class, BaseSystemObject.class);
if (cache.containsKey(key)) {
cache.replace(key, value);
} else {
cache.put(key, value);
}
}
public void executeDeleteOperation(String cacheName, String key) {
Cache cache = cacheManager.getCache(cacheName, String.class, BaseSystemObject.class);
cache.remove(key);
}
查询操作
缓存存储单表以主键—object形式存储,个性化缓存为key-object形式存储,单条记录可以通过getCache方法查询,列表查询需要取出整个缓存按条件进行过滤
public Object getCache(String cacheName, String key){
Cache cache = cacheManager.getCache(cacheName, String.class, BaseSystemObject.class);
return cache.get(key);
}
public List
缓存与数据库数据一致性
数据库数据操作与缓存操作顺序为先操作数据后操作缓存,在开启数据库事务的情况下针对单条数据单次操作是没有问题的,如果是组合操作一旦数据库操作发生异常回滚,缓存并没有回滚就会导致数据的不一致,比如执行顺序为dbop1=》cacheop1=》dbop2=》cacheop2,dbop2异常,cacheop1的操作已经更改了缓存
这里选择的方案是在数据库全部执行完毕后统一操作缓存,这个方案有一个缺点是如果缓存操作发生异常还是会出现上述问题,实际过程中缓存只是对内存的操作异常概率较小,对缓存操作持乐观状态,同时我们提供手动重置缓存的功能,算是一个折中方案,下面概述该方案的一个实现
声明自定义缓存事务注解
@Target({ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Inherited
public @interface CacheTransactional {
}
声明切面监听,在标记了CacheTransactional注解的方法执行前进行Redis标识,统一执行完方法体后执行缓存操作
@Aspect
@Component
@Order(value = 101)
public class CacheExecuteAspect {
@Autowired
private CacheExecuteUtil cacheExecuteUtil;
/**
* 切面点 指定注解
*/
@Pointcut("@annotation(com.haopan.frame.common.annotation.CacheTransactional) " +
"|| @within(com.haopan.frame.common.annotation.CacheTransactional)")
public void cacheExecuteAspect() {
}
/**
* 拦截方法指定为 repeatSubmitAspect
*/
@Around("cacheExecuteAspect()")
public Object around(ProceedingJoinPoint point) throws Throwable {
MethodSignature signature = (MethodSignature) point.getSignature();
Method method = signature.getMethod();
CacheTransactional cacheTransactional = method.getAnnotation(CacheTransactional.class);
if (cacheTransactional != null) {
cacheExecuteUtil.putCacheIntoTransition();
try{
Object obj = point.proceed();
cacheExecuteUtil.executeOperation();
return obj;
}catch (Exception e){
e.printStackTrace();
throw e;
}
} else {
return point.proceed();
}
}
}
将缓存操作以线程id区分放入待执行队列中序列化到redis,提供方法统一操作
public class CacheExecuteModel implements Serializable {
private String obejctClazzName;
private String cacheName;
private String key;
private BaseSystemObject value;
private String executeType;
}
private CacheManager cacheManager = (CacheManager) SpringBootBeanUtil.getBean("ehCacheManager");
@Autowired
private RedisUtil redisUtil;
public void putCacheIntoTransition(){
String threadID = Thread.currentThread().getName();
System.out.println("init threadid:"+threadID);
CacheExecuteModel cacheExecuteModel = new CacheExecuteModel();
cacheExecuteModel.setExecuteType("option");
redisUtil.redisTemplateSetForCollection(threadID,cacheExecuteModel, GlobalEnum.RedisDBNum.Cache.get_value());
redisUtil.setExpire(threadID,5, TimeUnit.MINUTES, GlobalEnum.RedisDBNum.Cache.get_value());
}
public void putCache(String cacheName, String key, BaseSystemObject value) {
if(checkCacheOptinionInTransition()){
String threadID = Thread.currentThread().getName();
CacheExecuteModel cacheExecuteModel = new CacheExecuteModel("update", cacheName, key, value.getClass().getName(),value);
redisUtil.redisTemplateSetForCollection(threadID,cacheExecuteModel, GlobalEnum.RedisDBNum.Cache.get_value());
redisUtil.setExpire(threadID,5, TimeUnit.MINUTES, GlobalEnum.RedisDBNum.Cache.get_value());
}else{
executeUpdateOperation(cacheName,key,value);
}
}
public void deleteCache(String cacheName, String key) {
if(checkCacheOptinionInTransition()){
String threadID = Thread.currentThread().getName();
CacheExecuteModel cacheExecuteModel = new CacheExecuteModel("delete", cacheName, key);
redisUtil.redisTemplateSetForCollection(threadID,cacheExecuteModel, GlobalEnum.RedisDBNum.Cache.get_value());
redisUtil.setExpire(threadID,5, TimeUnit.MINUTES, GlobalEnum.RedisDBNum.Cache.get_value());
}else{
executeDeleteOperation(cacheName,key);
}
}
public void executeOperation(){
String threadID = Thread.currentThread().getName();
if(checkCacheOptinionInTransition()){
List executeList = redisUtil.redisTemplateGetForCollectionAll(threadID, GlobalEnum.RedisDBNum.Cache.get_value());
for (LinkedHashMap obj:executeList) {
String executeType = ConvertOp.convert2String(obj.get("executeType"));
if(executeType.contains("option")){
continue;
}
String obejctClazzName = ConvertOp.convert2String(obj.get("obejctClazzName"));
String cacheName = ConvertOp.convert2String(obj.get("cacheName"));
String key = ConvertOp.convert2String(obj.get("key"));
LinkedHashMap valueMap = (LinkedHashMap)obj.get("value");
String valueMapJson = JSON.toJSONString(valueMap);
try{
Object valueInstance = JSON.parseObject(valueMapJson,Class.forName(obejctClazzName));
if(executeType.equals("update")){
executeUpdateOperation(cacheName,key,(BaseSystemObject)valueInstance);
}else if(executeType.equals("delete")){
executeDeleteOperation(cacheName,key);
}
}catch (Exception e){
e.printStackTrace();
}
}
redisUtil.redisTemplateRemove(threadID,GlobalEnum.RedisDBNum.Cache.get_value());
}
}
public boolean checkCacheOptinionInTransition(){
String threadID = Thread.currentThread().getName();
System.out.println("check threadid:"+threadID);
return redisUtil.isValid(threadID, GlobalEnum.RedisDBNum.Cache.get_value());
}