像Mybatis、Hibernate這樣的ORM框架,封裝了JDBC的大部分操做,極大的簡化了咱們對數據庫的操做。java
在實際項目中,咱們發如今一個事務中查詢一樣的語句兩次的時候,第二次沒有進行數據庫查詢,直接返回告終果,實際這種狀況咱們就能夠稱爲緩存。redis
用下面這張圖描述一級緩存和二級緩存的關係。spring
在 MyBatis 中,引入緩存的目的是爲提升查詢效率,下降數據庫壓力。既然 MyBatis 引入了緩存,那麼你們思考過緩存中的 key 和 value 的值分別是什麼嗎?你們可能很容易能回答出 value 的內容,不就是 SQL 的查詢結果嗎。那 key 是什麼呢?是字符串,仍是其餘什麼對象?若是是字符串的話,那麼你們首先能想到的是用 SQL 語句做爲 key。但這是不對的,好比:sql
SELECT * FROM user where id > ?
id > 1 和 id > 10 查出來的結果多是不一樣的,因此咱們不能簡單的使用 SQL 語句做爲 key。從這裏能夠看出來,運行時參數將會影響查詢結果,所以咱們的 key 應該涵蓋運行時參數。除此以外呢,若是進行分頁查詢也會致使查詢結果不一樣,所以 key 也應該涵蓋分頁參數。綜上,咱們不能使用簡單的 SQL 語句做爲 key。應該考慮使用一種複合對象,能涵蓋可影響查詢結果的因子。在 MyBatis 中,這種複合對象就是 CacheKey。下面來看一下它的定義。數據庫
public class CacheKey implements Cloneable, Serializable { private static final int DEFAULT_MULTIPLYER = 37; private static final int DEFAULT_HASHCODE = 17; // 乘子,默認爲37 private final int multiplier; // CacheKey 的 hashCode,綜合了各類影響因子 private int hashcode; // 校驗和 private long checksum; // 影響因子個數 private int count; // 影響因子集合 private List<Object> updateList; public CacheKey() { this.hashcode = DEFAULT_HASHCODE; this.multiplier = DEFAULT_MULTIPLYER; this.count = 0; this.updateList = new ArrayList<Object>(); } /** 每當執行更新操做時,表示有新的影響因子參與計算 * 當不斷有新的影響因子參與計算時,hashcode 和 checksum 將會變得愈發複雜和隨機。這樣可下降衝突率,使 CacheKey 可在緩存中更均勻的分佈。 */ public void update(Object object) { int baseHashCode = object == null ? 1 : ArrayUtil.hashCode(object); // 自增 count count++; // 計算校驗和 checksum += baseHashCode; // 更新 baseHashCode baseHashCode *= count; // 計算 hashCode hashcode = multiplier * hashcode + baseHashCode; // 保存影響因子 updateList.add(object); } /** * CacheKey 最終要做爲鍵存入 HashMap,所以它須要覆蓋 equals 和 hashCode 方法 */ public boolean equals(Object object) { // 檢測是否爲同一個對象 if (this == object) { return true; } // 檢測 object 是否爲 CacheKey if (!(object instanceof CacheKey)) { return false; } final CacheKey cacheKey = (CacheKey) object; // 檢測 hashCode 是否相等 if (hashcode != cacheKey.hashcode) { return false; } // 檢測校驗和是否相同 if (checksum != cacheKey.checksum) { return false; } // 檢測 coutn 是否相同 if (count != cacheKey.count) { return false; } // 若是上面的檢測都經過了,下面分別對每一個影響因子進行比較 for (int i = 0; i < updateList.size(); i++) { Object thisObject = updateList.get(i); Object thatObject = cacheKey.updateList.get(i); if (!ArrayUtil.equals(thisObject, thatObject)) { return false; } } return true; } public int hashCode() { // 返回 hashcode 變量 return hashcode; } }
當不斷有新的影響因子參與計算時,hashcode 和 checksum 將會變得愈發複雜和隨機。這樣可下降衝突率,使 CacheKey 可在緩存中更均勻的分佈。CacheKey 最終要做爲鍵存入 HashMap,所以它須要覆蓋 equals 和 hashCode 方法。apache
同一個session查詢緩存
public static void main(String[] args) { SqlSession session = sqlSessionFactory.openSession(); try { Blog blog = (Blog)session.selectOne("queryById",1); Blog blog2 = (Blog)session.selectOne("queryById",1); } finally { session.close(); } }
結論:只有一個DB查詢安全
兩個session分別查詢服務器
public static void main(String[] args) { SqlSession session = sqlSessionFactory.openSession(); SqlSession session1 = sqlSessionFactory.openSession(); try { Blog blog = (Blog)session.selectOne("queryById",17); Blog blog2 = (Blog)session1.selectOne("queryById",17); } finally { session.close(); } }
結論:進行了兩次DB查詢session
同一個session,進行update以後再次查詢
public static void main(String[] args) { SqlSession session = sqlSessionFactory.openSession(); try { Blog blog = (Blog)session.selectOne("queryById",17); blog.setName("llll"); session.update("updateBlog",blog); Blog blog2 = (Blog)session.selectOne("queryById",17); } finally { session.close(); } }
結論:進行了兩次DB查詢
總結:在一級緩存中,同一個SqlSession下,查詢語句相同的SQL會被緩存,若是執行增刪改操做以後,該緩存就會被刪除
咱們來回顧一下建立SqlSession的過程
SqlSession session = sessionFactory.openSession(); public SqlSession openSession() { return this.openSessionFromDataSource(this.configuration.getDefaultExecutorType(), (TransactionIsolationLevel)null, false); } private SqlSession openSessionFromDataSource(ExecutorType execType, TransactionIsolationLevel level, boolean autoCommit) { Transaction tx = null; DefaultSqlSession var8; try { Environment environment = this.configuration.getEnvironment(); TransactionFactory transactionFactory = this.getTransactionFactoryFromEnvironment(environment); tx = transactionFactory.newTransaction(environment.getDataSource(), level, autoCommit); //建立SQL執行器 Executor executor = this.configuration.newExecutor(tx, execType); var8 = new DefaultSqlSession(this.configuration, executor, autoCommit); } catch (Exception var12) { this.closeTransaction(tx); throw ExceptionFactory.wrapException("Error opening session. Cause: " + var12, var12); } finally { ErrorContext.instance().reset(); } return var8; } public Executor newExecutor(Transaction transaction, ExecutorType executorType) { executorType = executorType == null ? this.defaultExecutorType : executorType; executorType = executorType == null ? ExecutorType.SIMPLE : executorType; Object executor; if (ExecutorType.BATCH == executorType) { executor = new BatchExecutor(this, transaction); } else if (ExecutorType.REUSE == executorType) { executor = new ReuseExecutor(this, transaction); } else { //默認建立SimpleExecutor executor = new SimpleExecutor(this, transaction); } if (this.cacheEnabled) { //開啓二級緩存就會用CachingExecutor裝飾SimpleExecutor executor = new CachingExecutor((Executor)executor); } Executor executor = (Executor)this.interceptorChain.pluginAll(executor); return executor; } public SimpleExecutor(Configuration configuration, Transaction transaction) { super(configuration, transaction); } protected BaseExecutor(Configuration configuration, Transaction transaction) { this.transaction = transaction; this.deferredLoads = new ConcurrentLinkedQueue(); //建立一個緩存對象,PerpetualCache並非線程安全的 //但SqlSession和Executor對象在一般狀況下只能有一個線程訪問,並且訪問完成以後立刻銷燬。也就是session.close(); this.localCache = new PerpetualCache("LocalCache"); this.localOutputParameterCache = new PerpetualCache("LocalOutputParameterCache"); this.closed = false; this.configuration = configuration; this.wrapper = this; }
我只是簡單的貼了代碼,你們能夠看我以前的博客,咱們能夠看到DefaultSqlSession中有SimpleExecutor對象,SimpleExecutor對象中有一個PerpetualCache,一級緩存的數據就是存儲在PerpetualCache對象中,SqlSession關閉的時候會清空PerpetualCache
再來看BaseExecutor中的query方法是怎麼實現一級緩存的,executor默認實現爲CachingExecutor
CachingExecutor
public <E> List<E> query(MappedStatement ms, Object parameter, RowBounds rowBounds, ResultHandler resultHandler) throws SQLException { BoundSql boundSql = ms.getBoundSql(parameter); //利用sql和執行的參數生成一個key,若是同一sql不一樣的執行參數的話,將會生成不一樣的key CacheKey key = createCacheKey(ms, parameter, rowBounds, boundSql); return query(ms, parameter, rowBounds, resultHandler, key, boundSql); } @Override public <E> List<E> query(MappedStatement ms, Object parameterObject, RowBounds rowBounds, ResultHandler resultHandler, CacheKey key, BoundSql boundSql) throws SQLException { // 這裏是二級緩存的查詢,咱們暫且不看 Cache cache = ms.getCache(); if (cache != null) { flushCacheIfRequired(ms); if (ms.isUseCache() && resultHandler == null) { ensureNoOutParams(ms, parameterObject, boundSql); @SuppressWarnings("unchecked") List<E> list = (List<E>) tcm.getObject(cache, key); if (list == null) { list = delegate.<E> query(ms, parameterObject, rowBounds, resultHandler, key, boundSql); tcm.putObject(cache, key, list); // issue #578 and #116 } return list; } } // 直接來到這裏 // 實現爲BaseExecutor.query() return delegate.<E> query(ms, parameterObject, rowBounds, resultHandler, key, boundSql); }
如上,在訪問一級緩存以前,MyBatis 首先會調用 createCacheKey 方法建立 CacheKey。下面咱們來看一下 createCacheKey 方法的邏輯:
public CacheKey createCacheKey(MappedStatement ms, Object parameterObject, RowBounds rowBounds, BoundSql boundSql) { if (closed) { throw new ExecutorException("Executor was closed."); } // 建立 CacheKey 對象 CacheKey cacheKey = new CacheKey(); // 將 MappedStatement 的 id 做爲影響因子進行計算 cacheKey.update(ms.getId()); // RowBounds 用於分頁查詢,下面將它的兩個字段做爲影響因子進行計算 cacheKey.update(rowBounds.getOffset()); cacheKey.update(rowBounds.getLimit()); // 獲取 sql 語句,並進行計算 cacheKey.update(boundSql.getSql()); List<ParameterMapping> parameterMappings = boundSql.getParameterMappings(); TypeHandlerRegistry typeHandlerRegistry = ms.getConfiguration().getTypeHandlerRegistry(); for (ParameterMapping parameterMapping : parameterMappings) { if (parameterMapping.getMode() != ParameterMode.OUT) { // 運行時參數 Object value; // 當前大段代碼用於獲取 SQL 中的佔位符 #{xxx} 對應的運行時參數, // 前文有相似分析,這裏忽略了 String propertyName = parameterMapping.getProperty(); if (boundSql.hasAdditionalParameter(propertyName)) { value = boundSql.getAdditionalParameter(propertyName); } else if (parameterObject == null) { value = null; } else if (typeHandlerRegistry.hasTypeHandler(parameterObject.getClass())) { value = parameterObject; } else { MetaObject metaObject = configuration.newMetaObject(parameterObject); value = metaObject.getValue(propertyName); } // 讓運行時參數參與計算 cacheKey.update(value); } } if (configuration.getEnvironment() != null) { // 獲取 Environment id 遍歷,並讓其參與計算 cacheKey.update(configuration.getEnvironment().getId()); } return cacheKey; }
如上,在計算 CacheKey 的過程當中,有不少影響因子參與了計算。好比 MappedStatement 的 id 字段,SQL 語句,分頁參數,運行時變量,Environment 的 id 字段等。經過讓這些影響因子參與計算,能夠很好的區分不一樣查詢請求。因此,咱們能夠簡單的把 CacheKey 看作是一個查詢請求的 id。有了 CacheKey,咱們就可使用它讀寫緩存了。
SimpleExecutor(BaseExecutor)
@SuppressWarnings("unchecked") @Override public <E> List<E> query(MappedStatement ms, Object parameter, RowBounds rowBounds, ResultHandler resultHandler, CacheKey key, BoundSql boundSql) throws SQLException { ErrorContext.instance().resource(ms.getResource()).activity("executing a query").object(ms.getId()); if (closed) { throw new ExecutorException("Executor was closed."); } if (queryStack == 0 && ms.isFlushCacheRequired()) { clearLocalCache(); } List<E> list; try { queryStack++; // 看這裏,先從localCache中獲取對應CacheKey的結果值 list = resultHandler == null ? (List<E>) localCache.getObject(key) : null; if (list != null) { handleLocallyCachedOutputParameters(ms, key, parameter, boundSql); } else { // 若是緩存中沒有值,則從DB中查詢 list = queryFromDatabase(ms, parameter, rowBounds, resultHandler, key, boundSql); } } finally { queryStack--; } if (queryStack == 0) { for (DeferredLoad deferredLoad : deferredLoads) { deferredLoad.load(); } deferredLoads.clear(); if (configuration.getLocalCacheScope() == LocalCacheScope.STATEMENT) { clearLocalCache(); } } return list; }
BaseExecutor.queryFromDatabase()
咱們先來看下這種緩存中沒有值的狀況,看一下查詢後的結果是如何被放置到緩存中的
private <E> List<E> queryFromDatabase(MappedStatement ms, Object parameter, RowBounds rowBounds, ResultHandler resultHandler, CacheKey key, BoundSql boundSql) throws SQLException { List<E> list; localCache.putObject(key, EXECUTION_PLACEHOLDER); try { // 1.執行查詢,獲取list list = doQuery(ms, parameter, rowBounds, resultHandler, boundSql); } finally { localCache.removeObject(key); } // 2.將查詢後的結果放置到localCache中,key就是咱們剛纔封裝的CacheKey,value就是從DB中查詢到的list localCache.putObject(key, list); if (ms.getStatementType() == StatementType.CALLABLE) { localOutputParameterCache.putObject(key, parameter); } return list; }
咱們來看看 localCache.putObject(key, list);
PerpetualCache 是一級緩存使用的緩存類,內部使用了 HashMap 實現緩存功能。它的源碼以下:
public class PerpetualCache implements Cache { private final String id; private Map<Object, Object> cache = new HashMap<Object, Object>(); public PerpetualCache(String id) { this.id = id; } @Override public String getId() { return id; } @Override public int getSize() { return cache.size(); } @Override public void putObject(Object key, Object value) { // 存儲鍵值對到 HashMap cache.put(key, value); } @Override public Object getObject(Object key) { // 查找緩存項 return cache.get(key); } @Override public Object removeObject(Object key) { // 移除緩存項 return cache.remove(key); } @Override public void clear() { cache.clear(); } // 省略部分代碼 }
總結:能夠看到localCache本質上就是一個Map,key爲咱們的CacheKey,value爲咱們的結果值,是否是很簡單,只是封裝了一個Map而已。
SqlSession.update()
當咱們進行更新操做時,會執行以下代碼
@Override public int update(MappedStatement ms, Object parameter) throws SQLException { ErrorContext.instance().resource(ms.getResource()).activity("executing an update").object(ms.getId()); if (closed) { throw new ExecutorException("Executor was closed."); } //每次執行update/insert/delete語句時都會清除一級緩存。 clearLocalCache(); // 而後再進行更新操做 return doUpdate(ms, parameter); } @Override public void clearLocalCache() { if (!closed) { // 直接將Map清空 localCache.clear(); localOutputParameterCache.clear(); } }
session.close();
//DefaultSqlSession public void close() { try { this.executor.close(this.isCommitOrRollbackRequired(false)); this.closeCursors(); this.dirty = false; } finally { ErrorContext.instance().reset(); } } //BaseExecutor public void close(boolean forceRollback) { try { try { this.rollback(forceRollback); } finally { if (this.transaction != null) { this.transaction.close(); } } } catch (SQLException var11) { log.warn("Unexpected exception on closing transaction. Cause: " + var11); } finally { this.transaction = null; this.deferredLoads = null; this.localCache = null; this.localOutputParameterCache = null; this.closed = true; } } public void rollback(boolean required) throws SQLException { if (!this.closed) { try { this.clearLocalCache(); this.flushStatements(true); } finally { if (required) { this.transaction.rollback(); } } } } public void clearLocalCache() { if (!this.closed) { // 直接將Map清空 this.localCache.clear(); this.localOutputParameterCache.clear(); } }
當關閉SqlSession時,也會清楚SqlSession中的一級緩存
二級緩存構建在一級緩存之上,在收到查詢請求時,MyBatis 首先會查詢二級緩存。若二級緩存未命中,再去查詢一級緩存。與一級緩存不一樣,二級緩存和具體的命名空間綁定,一個Mapper中有一個Cache,相同Mapper中的MappedStatement公用一個Cache,一級緩存則是和 SqlSession 綁定。一級緩存不存在併發問題二級緩存可在多個命名空間間共享,這種狀況下,會存在併發問題,比喻多個不一樣的SqlSession 會同時執行相同的SQL語句,參數也相同,那麼CacheKey是相同的,就會形成多個線程併發訪問相同CacheKey的值,下面首先來看一下訪問二級緩存的邏輯。
二級緩存須要在Mapper.xml中配置<cache/>標籤
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd"> <mapper namespace="mybatis.BlogMapper"> <select id="queryById" parameterType="int" resultType="jdbc.Blog"> select * from blog where id = #{id} </select> <update id="updateBlog" parameterType="jdbc.Blog"> update Blog set name = #{name},url = #{url} where id=#{id} </update> <!-- 開啓BlogMapper二級緩存 --> <cache/> </mapper>
不一樣的session進行相同的查詢
public static void main(String[] args) { SqlSession session = sqlSessionFactory.openSession(); SqlSession session1 = sqlSessionFactory.openSession(); try { Blog blog = (Blog)session.selectOne("queryById",17); Blog blog2 = (Blog)session1.selectOne("queryById",17); } finally { session.close(); } }
結論:執行兩次DB查詢
第一個session查詢完成以後,手動提交,在執行第二個session查詢
public static void main(String[] args) { SqlSession session = sqlSessionFactory.openSession(); SqlSession session1 = sqlSessionFactory.openSession(); try { Blog blog = (Blog)session.selectOne("queryById",17); session.commit(); Blog blog2 = (Blog)session1.selectOne("queryById",17); } finally { session.close(); } }
結論:執行一次DB查詢
第一個session查詢完成以後,手動關閉,在執行第二個session查詢
public static void main(String[] args) { SqlSession session = sqlSessionFactory.openSession(); SqlSession session1 = sqlSessionFactory.openSession(); try { Blog blog = (Blog)session.selectOne("queryById",17); session.close(); Blog blog2 = (Blog)session1.selectOne("queryById",17); } finally { session.close(); } }
結論:執行一次DB查詢
總結:二級緩存的生效必須在session提交或關閉以後纔會生效
按照以前的對Mybatis的分析,對blog.xml的解析工做主要交給XMLConfigBuilder.parse()方法來實現的
1 // XMLConfigBuilder.parse() 2 public Configuration parse() { 3 if (parsed) { 4 throw new BuilderException("Each XMLConfigBuilder can only be used once."); 5 } 6 parsed = true; 7 parseConfiguration(parser.evalNode("/configuration"));// 在這裏 8 return configuration; 9 } 10 11 // parseConfiguration() 12 // 既然是在blog.xml中添加的,那麼咱們就直接看關於mappers標籤的解析 13 private void parseConfiguration(XNode root) { 14 try { 15 Properties settings = settingsAsPropertiess(root.evalNode("settings")); 16 propertiesElement(root.evalNode("properties")); 17 loadCustomVfs(settings); 18 typeAliasesElement(root.evalNode("typeAliases")); 19 pluginElement(root.evalNode("plugins")); 20 objectFactoryElement(root.evalNode("objectFactory")); 21 objectWrapperFactoryElement(root.evalNode("objectWrapperFactory")); 22 reflectionFactoryElement(root.evalNode("reflectionFactory")); 23 settingsElement(settings); 24 // read it after objectFactory and objectWrapperFactory issue #631 25 environmentsElement(root.evalNode("environments")); 26 databaseIdProviderElement(root.evalNode("databaseIdProvider")); 27 typeHandlerElement(root.evalNode("typeHandlers")); 28 // 就是這裏 29 mapperElement(root.evalNode("mappers")); 30 } catch (Exception e) { 31 throw new BuilderException("Error parsing SQL Mapper Configuration. Cause: " + e, e); 32 } 33 } 34 35 36 // mapperElement() 37 private void mapperElement(XNode parent) throws Exception { 38 if (parent != null) { 39 for (XNode child : parent.getChildren()) { 40 if ("package".equals(child.getName())) { 41 String mapperPackage = child.getStringAttribute("name"); 42 configuration.addMappers(mapperPackage); 43 } else { 44 String resource = child.getStringAttribute("resource"); 45 String url = child.getStringAttribute("url"); 46 String mapperClass = child.getStringAttribute("class"); 47 // 按照咱們本例的配置,則直接走該if判斷 48 if (resource != null && url == null && mapperClass == null) { 49 ErrorContext.instance().resource(resource); 50 InputStream inputStream = Resources.getResourceAsStream(resource); 51 XMLMapperBuilder mapperParser = new XMLMapperBuilder(inputStream, configuration, resource, configuration.getSqlFragments()); 52 // 生成XMLMapperBuilder,並執行其parse方法 53 mapperParser.parse(); 54 } else if (resource == null && url != null && mapperClass == null) { 55 ErrorContext.instance().resource(url); 56 InputStream inputStream = Resources.getUrlAsStream(url); 57 XMLMapperBuilder mapperParser = new XMLMapperBuilder(inputStream, configuration, url, configuration.getSqlFragments()); 58 mapperParser.parse(); 59 } else if (resource == null && url == null && mapperClass != null) { 60 Class<?> mapperInterface = Resources.classForName(mapperClass); 61 configuration.addMapper(mapperInterface); 62 } else { 63 throw new BuilderException("A mapper element may only specify a url, resource or class, but not more than one."); 64 } 65 } 66 } 67 } 68 }
咱們來看看解析Mapper.xml
// XMLMapperBuilder.parse() public void parse() { if (!configuration.isResourceLoaded(resource)) { // 解析mapper屬性 configurationElement(parser.evalNode("/mapper")); configuration.addLoadedResource(resource); bindMapperForNamespace(); } parsePendingResultMaps(); parsePendingChacheRefs(); parsePendingStatements(); } // configurationElement() private void configurationElement(XNode context) { try { String namespace = context.getStringAttribute("namespace"); if (namespace == null || namespace.equals("")) { throw new BuilderException("Mapper's namespace cannot be empty"); } builderAssistant.setCurrentNamespace(namespace); cacheRefElement(context.evalNode("cache-ref")); // 最終在這裏看到了關於cache屬性的處理 cacheElement(context.evalNode("cache")); parameterMapElement(context.evalNodes("/mapper/parameterMap")); resultMapElements(context.evalNodes("/mapper/resultMap")); sqlElement(context.evalNodes("/mapper/sql")); // 這裏會將生成的Cache包裝到對應的MappedStatement buildStatementFromContext(context.evalNodes("select|insert|update|delete")); } catch (Exception e) { throw new BuilderException("Error parsing Mapper XML. Cause: " + e, e); } } // cacheElement() private void cacheElement(XNode context) throws Exception { if (context != null) { //解析<cache/>標籤的type屬性,這裏咱們能夠自定義cache的實現類,好比redisCache,若是沒有自定義,這裏使用和一級緩存相同的PERPETUAL String type = context.getStringAttribute("type", "PERPETUAL"); Class<? extends Cache> typeClass = typeAliasRegistry.resolveAlias(type); String eviction = context.getStringAttribute("eviction", "LRU"); Class<? extends Cache> evictionClass = typeAliasRegistry.resolveAlias(eviction); Long flushInterval = context.getLongAttribute("flushInterval"); Integer size = context.getIntAttribute("size"); boolean readWrite = !context.getBooleanAttribute("readOnly", false); boolean blocking = context.getBooleanAttribute("blocking", false); Properties props = context.getChildrenAsProperties(); // 構建Cache對象 builderAssistant.useNewCache(typeClass, evictionClass, flushInterval, size, readWrite, blocking, props); } }
先來看看是如何構建Cache對象的
MapperBuilderAssistant.useNewCache()
public Cache useNewCache(Class<? extends Cache> typeClass, Class<? extends Cache> evictionClass, Long flushInterval, Integer size, boolean readWrite, boolean blocking, Properties props) { // 1.生成Cache對象 Cache cache = new CacheBuilder(currentNamespace) //這裏若是咱們定義了<cache/>中的type,就使用自定義的Cache,不然使用和一級緩存相同的PerpetualCache .implementation(valueOrDefault(typeClass, PerpetualCache.class)) .addDecorator(valueOrDefault(evictionClass, LruCache.class)) .clearInterval(flushInterval) .size(size) .readWrite(readWrite) .blocking(blocking) .properties(props) .build(); // 2.添加到Configuration中 configuration.addCache(cache); // 3.並將cache賦值給MapperBuilderAssistant.currentCache currentCache = cache; return cache; }
咱們看到一個Mapper.xml只會解析一次<cache/>標籤,也就是隻建立一次Cache對象,放進configuration中,並將cache賦值給MapperBuilderAssistant.currentCache
// buildStatementFromContext() private void buildStatementFromContext(List<XNode> list) { if (configuration.getDatabaseId() != null) { buildStatementFromContext(list, configuration.getDatabaseId()); } buildStatementFromContext(list, null); } //buildStatementFromContext() private void buildStatementFromContext(List<XNode> list, String requiredDatabaseId) { for (XNode context : list) { final XMLStatementBuilder statementParser = new XMLStatementBuilder(configuration, builderAssistant, context, requiredDatabaseId); try { // 每一條執行語句轉換成一個MappedStatement statementParser.parseStatementNode(); } catch (IncompleteElementException e) { configuration.addIncompleteStatement(statementParser); } } } // XMLStatementBuilder.parseStatementNode(); public void parseStatementNode() { String id = context.getStringAttribute("id"); String databaseId = context.getStringAttribute("databaseId"); ... Integer fetchSize = context.getIntAttribute("fetchSize"); Integer timeout = context.getIntAttribute("timeout"); String parameterMap = context.getStringAttribute("parameterMap"); String parameterType = context.getStringAttribute("parameterType"); Class<?> parameterTypeClass = resolveClass(parameterType); String resultMap = context.getStringAttribute("resultMap"); String resultType = context.getStringAttribute("resultType"); String lang = context.getStringAttribute("lang"); LanguageDriver langDriver = getLanguageDriver(lang); ... // 建立MappedStatement對象 builderAssistant.addMappedStatement(id, sqlSource, statementType, sqlCommandType, fetchSize, timeout, parameterMap, parameterTypeClass, resultMap, resultTypeClass, resultSetTypeEnum, flushCache, useCache, resultOrdered, keyGenerator, keyProperty, keyColumn, databaseId, langDriver, resultSets); } // builderAssistant.addMappedStatement() public MappedStatement addMappedStatement( String id, ...) { if (unresolvedCacheRef) { throw new IncompleteElementException("Cache-ref not yet resolved"); } id = applyCurrentNamespace(id, false); boolean isSelect = sqlCommandType == SqlCommandType.SELECT; //建立MappedStatement對象 MappedStatement.Builder statementBuilder = new MappedStatement.Builder(configuration, id, sqlSource, sqlCommandType) ... .flushCacheRequired(valueOrDefault(flushCache, !isSelect)) .useCache(valueOrDefault(useCache, isSelect)) .cache(currentCache);// 在這裏將以前生成的Cache封裝到MappedStatement ParameterMap statementParameterMap = getStatementParameterMap(parameterMap, parameterType, id); if (statementParameterMap != null) { statementBuilder.parameterMap(statementParameterMap); } MappedStatement statement = statementBuilder.build(); configuration.addMappedStatement(statement); return statement; }
咱們看到將Mapper中建立的Cache對象,加入到了每一個MappedStatement對象中,也就是同一個Mapper中全部的MappedStatement 中的cache屬性引用是同一個
有關於<cache/>標籤的解析就到這了。
CachingExecutor
// CachingExecutor public <E> List<E> query(MappedStatement ms, Object parameterObject, RowBounds rowBounds, ResultHandler resultHandler) throws SQLException { BoundSql boundSql = ms.getBoundSql(parameterObject); // 建立 CacheKey CacheKey key = createCacheKey(ms, parameterObject, rowBounds, boundSql); return query(ms, parameterObject, rowBounds, resultHandler, key, boundSql); } public <E> List<E> query(MappedStatement ms, Object parameterObject, RowBounds rowBounds, ResultHandler resultHandler, CacheKey key, BoundSql boundSql) throws SQLException { // 從 MappedStatement 中獲取 Cache,注意這裏的 Cache 是從MappedStatement中獲取的 // 也就是咱們上面解析Mapper中<cache/>標籤中建立的,它保存在Configration中 // 咱們在上面解析blog.xml時分析過每個MappedStatement都有一個Cache對象,就是這裏 Cache cache = ms.getCache(); // 若是配置文件中沒有配置 <cache>,則 cache 爲空 if (cache != null) { //若是須要刷新緩存的話就刷新:flushCache="true" flushCacheIfRequired(ms); if (ms.isUseCache() && resultHandler == null) { ensureNoOutParams(ms, boundSql); // 訪問二級緩存 List<E> list = (List<E>) tcm.getObject(cache, key); // 緩存未命中 if (list == null) { // 若是沒有值,則執行查詢,這個查詢實際也是先走一級緩存查詢,一級緩存也沒有的話,則進行DB查詢 list = delegate.<E>query(ms, parameterObject, rowBounds, resultHandler, key, boundSql); // 緩存查詢結果 tcm.putObject(cache, key, list); } return list; } } return delegate.<E>query(ms, parameterObject, rowBounds, resultHandler, key, boundSql); }
若是設置了flushCache="true",則每次查詢都會刷新緩存
<!-- 執行此語句清空緩存 --> <select id="getAll" resultType="entity.TDemo" useCache="true" flushCache="true" > select * from t_demo </select>
如上,注意二級緩存是從 MappedStatement 中獲取的。因爲 MappedStatement 存在於全局配置中,能夠多個 CachingExecutor 獲取到,這樣就會出現線程安全問題。除此以外,若不加以控制,多個事務共用一個緩存實例,會致使髒讀問題。至於髒讀問題,須要藉助其餘類來處理,也就是上面代碼中 tcm 變量對應的類型。下面分析一下。
TransactionalCacheManager
/** 事務緩存管理器 */ public class TransactionalCacheManager { // Cache 與 TransactionalCache 的映射關係表 private final Map<Cache, TransactionalCache> transactionalCaches = new HashMap<Cache, TransactionalCache>(); public void clear(Cache cache) { // 獲取 TransactionalCache 對象,並調用該對象的 clear 方法,下同 getTransactionalCache(cache).clear(); } public Object getObject(Cache cache, CacheKey key) { // 直接從TransactionalCache中獲取緩存 return getTransactionalCache(cache).getObject(key); } public void putObject(Cache cache, CacheKey key, Object value) { // 直接存入TransactionalCache的緩存中 getTransactionalCache(cache).putObject(key, value); } public void commit() { for (TransactionalCache txCache : transactionalCaches.values()) { txCache.commit(); } } public void rollback() { for (TransactionalCache txCache : transactionalCaches.values()) { txCache.rollback(); } } private TransactionalCache getTransactionalCache(Cache cache) { // 從映射表中獲取 TransactionalCache TransactionalCache txCache = transactionalCaches.get(cache); if (txCache == null) { // TransactionalCache 也是一種裝飾類,爲 Cache 增長事務功能 // 建立一個新的TransactionalCache,並將真正的Cache對象存進去 txCache = new TransactionalCache(cache); transactionalCaches.put(cache, txCache); } return txCache; } }
TransactionalCacheManager 內部維護了 Cache 實例與 TransactionalCache 實例間的映射關係,該類也僅負責維護二者的映射關係,真正作事的仍是 TransactionalCache。TransactionalCache 是一種緩存裝飾器,能夠爲 Cache 實例增長事務功能。我在以前提到的髒讀問題正是由該類進行處理的。下面分析一下該類的邏輯。
TransactionalCache
public class TransactionalCache implements Cache { //真正的緩存對象,和上面的Map<Cache, TransactionalCache>中的Cache是同一個 private final Cache delegate; private boolean clearOnCommit; // 在事務被提交前,全部從數據庫中查詢的結果將緩存在此集合中 private final Map<Object, Object> entriesToAddOnCommit; // 在事務被提交前,當緩存未命中時,CacheKey 將會被存儲在此集合中 private final Set<Object> entriesMissedInCache; @Override public Object getObject(Object key) { // 查詢的時候是直接從delegate中去查詢的,也就是從真正的緩存對象中查詢 Object object = delegate.getObject(key); if (object == null) { // 緩存未命中,則將 key 存入到 entriesMissedInCache 中 entriesMissedInCache.add(key); } if (clearOnCommit) { return null; } else { return object; } } @Override public void putObject(Object key, Object object) { // 將鍵值對存入到 entriesToAddOnCommit 這個Map中中,而非真實的緩存對象 delegate 中 entriesToAddOnCommit.put(key, object); } @Override public Object removeObject(Object key) { return null; } @Override public void clear() { clearOnCommit = true; // 清空 entriesToAddOnCommit,但不清空 delegate 緩存 entriesToAddOnCommit.clear(); } public void commit() { // 根據 clearOnCommit 的值決定是否清空 delegate if (clearOnCommit) { delegate.clear(); } // 刷新未緩存的結果到 delegate 緩存中 flushPendingEntries(); // 重置 entriesToAddOnCommit 和 entriesMissedInCache reset(); } public void rollback() { unlockMissedEntries(); reset(); } private void reset() { clearOnCommit = false; // 清空集合 entriesToAddOnCommit.clear(); entriesMissedInCache.clear(); } private void flushPendingEntries() { for (Map.Entry<Object, Object> entry : entriesToAddOnCommit.entrySet()) { // 將 entriesToAddOnCommit 中的內容轉存到 delegate 中 delegate.putObject(entry.getKey(), entry.getValue()); } for (Object entry : entriesMissedInCache) { if (!entriesToAddOnCommit.containsKey(entry)) { // 存入空值 delegate.putObject(entry, null); } } } private void unlockMissedEntries() { for (Object entry : entriesMissedInCache) { try { // 調用 removeObject 進行解鎖 delegate.removeObject(entry); } catch (Exception e) { log.warn("..."); } } } }
存儲二級緩存對象的時候是放到了TransactionalCache.entriesToAddOnCommit這個map中,可是每次查詢的時候是直接從TransactionalCache.delegate中去查詢的,因此這個二級緩存查詢數據庫後,設置緩存值是沒有馬上生效的,主要是由於直接存到 delegate 會致使髒數據問題。
那咱們來看下SqlSession.commit()方法作了什麼
SqlSession
@Override public void commit(boolean force) { try { // 主要是這句 executor.commit(isCommitOrRollbackRequired(force)); dirty = false; } catch (Exception e) { throw ExceptionFactory.wrapException("Error committing transaction. Cause: " + e, e); } finally { ErrorContext.instance().reset(); } } // CachingExecutor.commit() @Override public void commit(boolean required) throws SQLException { delegate.commit(required); tcm.commit();// 在這裏 } // TransactionalCacheManager.commit() public void commit() { for (TransactionalCache txCache : transactionalCaches.values()) { txCache.commit();// 在這裏 } } // TransactionalCache.commit() public void commit() { if (clearOnCommit) { delegate.clear(); } flushPendingEntries();//這一句 reset(); } // TransactionalCache.flushPendingEntries() private void flushPendingEntries() { for (Map.Entry<Object, Object> entry : entriesToAddOnCommit.entrySet()) { // 在這裏真正的將entriesToAddOnCommit的對象逐個添加到delegate中,只有這時,二級緩存才真正的生效 delegate.putObject(entry.getKey(), entry.getValue()); } for (Object entry : entriesMissedInCache) { if (!entriesToAddOnCommit.containsKey(entry)) { delegate.putObject(entry, null); } } }
若是從數據庫查詢到的數據直接存到 delegate 會致使髒數據問題。下面經過一張圖演示一下髒數據問題發生的過程,假設兩個線程開啓兩個不一樣的事務,它們的執行過程以下:
如上圖,時刻2,事務 A 對記錄 A 進行了更新。時刻3,事務 A 從數據庫查詢記錄 A,並將記錄 A 寫入緩存中。時刻4,事務 B 查詢記錄 A,因爲緩存中存在記錄 A,事務 B 直接從緩存中取數據。這個時候,髒數據問題就發生了。事務 B 在事務 A 未提交狀況下,讀取到了事務 A 所修改的記錄。爲了解決這個問題,咱們能夠爲每一個事務引入一個獨立的緩存。查詢數據時,仍從 delegate 緩存(如下統稱爲共享緩存)中查詢。若緩存未命中,則查詢數據庫。存儲查詢結果時,並不直接存儲查詢結果到共享緩存中,而是先存儲到事務緩存中,也就是 entriesToAddOnCommit 集合。當事務提交時,再將事務緩存中的緩存項轉存到共享緩存中。這樣,事務 B 只能在事務 A 提交後,才能讀取到事務 A 所作的修改,解決了髒讀問題。
咱們來看看SqlSession的更新操做
public int update(String statement, Object parameter) { int var4; try { this.dirty = true; MappedStatement ms = this.configuration.getMappedStatement(statement); var4 = this.executor.update(ms, this.wrapCollection(parameter)); } catch (Exception var8) { throw ExceptionFactory.wrapException("Error updating database. Cause: " + var8, var8); } finally { ErrorContext.instance().reset(); } return var4; } public int update(MappedStatement ms, Object parameterObject) throws SQLException { this.flushCacheIfRequired(ms); return this.delegate.update(ms, parameterObject); } private void flushCacheIfRequired(MappedStatement ms) { //獲取MappedStatement對應的Cache,進行清空 Cache cache = ms.getCache(); //SQL需設置flushCache="true" 纔會執行清空 if (cache != null && ms.isFlushCacheRequired()) { this.tcm.clear(cache); } }
MyBatis二級緩存只適用於不常進行增、刪、改的數據,好比國家行政區省市區街道數據。一但數據變動,MyBatis會清空緩存。所以二級緩存不適用於常常進行更新的數據。
經過上面代碼分析,咱們知道二級緩存默認和一級緩存都是使用的PerpetualCache存儲結果,一級緩存只要SQLSession關閉就會清空,其內部使用HashMap實現,因此二級緩存沒法實現分佈式,而且服務器重啓後就沒有緩存了。此時就須要引入第三方緩存中間件,將緩存的值存到外部,如redis和ehcache
修改mapper.xml中的配置。
<?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="com.tyb.saas.common.dal.dao.AreaDefaultMapper"> <!-- flushInterval(清空緩存的時間間隔): 單位毫秒,能夠被設置爲任意的正整數。 默認狀況是不設置,也就是沒有刷新間隔,緩存僅僅調用語句時刷新。 size(引用數目): 能夠被設置爲任意正整數,要記住你緩存的對象數目和你運行環境的可用內存資源數目。默認值是1024。 readOnly(只讀):屬性能夠被設置爲true或false。只讀的緩存會給全部調用者返回緩存對象的相同實例。 所以這些對象不能被修改。這提供了很重要的性能優點。可讀寫的緩存會返回緩存對象的拷貝(經過序列化)。這會慢一些,可是安全,所以默認是false。 eviction(回收策略): 默認的是 LRU: 1.LRU – 最近最少使用的:移除最長時間不被使用的對象。 2.FIFO – 先進先出:按對象進入緩存的順序來移除它們。 3.SOFT – 軟引用:移除基於垃圾回收器狀態和軟引用規則的對象。 4.WEAK – 弱引用:更積極地移除基於垃圾收集器狀態和弱引用規則的對象。 blocking(是否使用阻塞緩存): 默認爲false,當指定爲true時將採用BlockingCache進行封裝,blocking,阻塞的意思, 使用BlockingCache會在查詢緩存時鎖住對應的Key,若是緩存命中了則會釋放對應的鎖,不然會在查詢數據庫之後再釋放鎖, 這樣能夠阻止併發狀況下多個線程同時查詢數據,詳情可參考BlockingCache的源碼。 type(緩存類):可指定使用的緩存類,mybatis默認使用HashMap進行緩存,這裏引用第三方中間件進行緩存 --> <cache type="org.mybatis.caches.redis.RedisCache" blocking="false" flushInterval="0" readOnly="true" size="1024" eviction="FIFO"/> <!-- useCache(是否使用緩存):默認true使用緩存 --> <select id="find" parameterType="map" resultType="com.chenhao.model.User" useCache="true"> SELECT * FROM user </select> </mapper>
依然很簡單, RedisCache 在保存緩存數據和獲取緩存數據時,使用了Java的序列化和反序列化,所以須要保證被緩存的對象必須實現Serializable接口。
也能夠本身實現cache
package com.chenhao.mybatis.cache; import org.apache.ibatis.cache.Cache; import org.springframework.data.redis.core.RedisTemplate; import org.springframework.data.redis.core.ValueOperations; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; /** * @author chenhao * @date 2019/10/31. */ public class RedisCache implements Cache { private final String id; private static ValueOperations<String, Object> valueOs; private static RedisTemplate<String, String> template; public static void setValueOs(ValueOperations<String, Object> valueOs) { RedisCache.valueOs = valueOs; } public static void setTemplate(RedisTemplate<String, String> template) { RedisCache.template = template; } private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); public RedisCache(String id) { if (id == null) { throw new IllegalArgumentException("Cache instances require an ID"); } this.id = id; } @Override public String getId() { return this.id; } @Override public void putObject(Object key, Object value) { valueOs.set(key.toString(), value, 10, TimeUnit.MINUTES); } @Override public Object getObject(Object key) { return valueOs.get(key.toString()); } @Override public Object removeObject(Object key) { valueOs.set(key.toString(), "", 0, TimeUnit.MINUTES); return key; } @Override public void clear() { template.getConnectionFactory().getConnection().flushDb(); } @Override public int getSize() { return template.getConnectionFactory().getConnection().dbSize().intValue(); } @Override public ReadWriteLock getReadWriteLock() { return this.readWriteLock; } }
Mapper中配置本身實現的Cache
<cache type="com.chenhao.mybatis.cache.RedisCache"/>