如何將druid鏈接池監控到的sql執行效率,鏈接池資源狀況等進行持久化存儲,方便系統運維分析優化,如下案例初步測試成功。java
第一部:web
新建MyDruidStatLogger類實現接口 extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger,詳細代碼以下:本實例只實現接收消息並在控制檯打印,實際業務應用須要具體實現存儲方案。spring
package xxx; import com.alibaba.druid.support.logging.Log; import com.alibaba.druid.support.logging.LogFactory; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.Map; import java.util.Properties; import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim; import com.alibaba.druid.pool.DruidDataSourceStatLogger; import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter; import com.alibaba.druid.pool.DruidDataSourceStatValue; import com.alibaba.druid.stat.JdbcSqlStatValue; import com.alibaba.druid.support.json.JSONUtils; public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger { private static Log LOG = LogFactory.getLog(MyDruidStatLogger.class); private Log logger = LOG; public MyDruidStatLogger(){ this.configFromProperties(System.getProperties()); } @Override public void configFromProperties(Properties properties) { String property = properties.getProperty("druid.stat.loggerName"); if (property != null && property.length() > 0) { setLoggerName(property); } } public Log getLogger() { return logger; } @Override public void setLoggerName(String loggerName) { logger = LogFactory.getLog(loggerName); } @Override public void setLogger(Log logger) { if (logger == null) { throw new IllegalArgumentException("logger can not be null"); } this.logger = logger; } public boolean isLogEnable() { return true; } public void log(String value) { logger.info(value); } @Override public void log(DruidDataSourceStatValue statValue) { Map<String, Object> map = new LinkedHashMap<String, Object>(); map.put("url", statValue.getUrl()); map.put("dbType", statValue.getDbType()); map.put("name", statValue.getName()); map.put("activeCount", statValue.getActiveCount()); if (statValue.getActivePeak() > 0) { map.put("activePeak", statValue.getActivePeak()); map.put("activePeakTime", statValue.getActivePeakTime()); } map.put("poolingCount", statValue.getPoolingCount()); if (statValue.getPoolingPeak() > 0) { map.put("poolingPeak", statValue.getPoolingPeak()); map.put("poolingPeakTime", statValue.getPoolingPeakTime()); } map.put("connectCount", statValue.getConnectCount()); map.put("closeCount", statValue.getCloseCount()); if (statValue.getWaitThreadCount() > 0) { map.put("waitThreadCount", statValue.getWaitThreadCount()); } if (statValue.getNotEmptyWaitCount() > 0) { map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount()); } if (statValue.getNotEmptyWaitMillis() > 0) { map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis()); } if (statValue.getLogicConnectErrorCount() > 0) { map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount()); } if (statValue.getPhysicalConnectCount() > 0) { map.put("physicalConnectCount", statValue.getPhysicalConnectCount()); } if (statValue.getPhysicalCloseCount() > 0) { map.put("physicalCloseCount", statValue.getPhysicalCloseCount()); } if (statValue.getPhysicalConnectErrorCount() > 0) { map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount()); } if (statValue.getExecuteCount() > 0) { map.put("executeCount", statValue.getExecuteCount()); } if (statValue.getErrorCount() > 0) { map.put("errorCount", statValue.getErrorCount()); } if (statValue.getCommitCount() > 0) { map.put("commitCount", statValue.getCommitCount()); } if (statValue.getRollbackCount() > 0) { map.put("rollbackCount", statValue.getRollbackCount()); } if (statValue.getPstmtCacheHitCount() > 0) { map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount()); } if (statValue.getPstmtCacheMissCount() > 0) { map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount()); } if (statValue.getStartTransactionCount() > 0) { map.put("startTransactionCount", statValue.getStartTransactionCount()); map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram())); } if (statValue.getConnectCount() > 0) { map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram())); } if (statValue.getClobOpenCount() > 0) { map.put("clobOpenCount", statValue.getClobOpenCount()); } if (statValue.getBlobOpenCount() > 0) { map.put("blobOpenCount", statValue.getBlobOpenCount()); } if (statValue.getSqlSkipCount() > 0) { map.put("sqlSkipCount", statValue.getSqlSkipCount()); } ArrayList<Map<String, Object>> sqlList = new ArrayList<Map<String, Object>>(); if (statValue.getSqlList().size() > 0) { for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) { Map<String, Object> sqlStatMap = new LinkedHashMap<String, Object>(); sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) { sqlStatMap.put("executeCount", sqlStat.getExecuteCount()); sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax()); sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram())); sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram())); } long executeErrorCount = sqlStat.getExecuteErrorCount(); if (executeErrorCount > 0) { sqlStatMap.put("executeErrorCount", executeErrorCount); } int runningCount = sqlStat.getRunningCount(); if (runningCount > 0) { sqlStatMap.put("runningCount", runningCount); } int concurrentMax = sqlStat.getConcurrentMax(); if (concurrentMax > 0) { sqlStatMap.put("concurrentMax", concurrentMax); } if (sqlStat.getFetchRowCount() > 0) { sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount()); sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax()); sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram())); } if (sqlStat.getUpdateCount() > 0) { sqlStatMap.put("updateCount", sqlStat.getUpdateCount()); sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax()); sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram())); } if (sqlStat.getInTransactionCount() > 0) { sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount()); } if (sqlStat.getClobOpenCount() > 0) { sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount()); } if (sqlStat.getBlobOpenCount() > 0) { sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount()); } sqlList.add(sqlStatMap); } map.put("sqlList", sqlList); } if (statValue.getKeepAliveCheckCount() > 0) { map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount()); } String text = JSONUtils.toJSONString(map); System.out.println("==============:"+text); } }
第二步:配置spring beansql
<bean id="myStatLogger" class="com.andaily.web.context.MyDruidStatLogger"> </bean> <bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close"> <!-- 基本屬性 url、user、password --> <property name="url" value="${jdbc.url}" /> <property name="username" value="${jdbc.username}" /> <property name="password" value="${jdbc.password}" /> <!-- 配置初始化大小、最小、最大 --> <property name="initialSize" value="1" /> <property name="minIdle" value="1" /> <property name="maxActive" value="20" /> <!-- 配置獲取鏈接等待超時的時間 --> <property name="maxWait" value="60000" /> <!-- 配置間隔多久才進行一次檢測,檢測須要關閉的空閒鏈接,單位是毫秒 --> <property name="timeBetweenEvictionRunsMillis" value="60000" /> <!-- 配置一個鏈接在池中最小生存的時間,單位是毫秒 --> <property name="minEvictableIdleTimeMillis" value="300000" /> <property name="validationQuery" value="SELECT 'x'" /> <property name="testWhileIdle" value="true" /> <property name="testOnBorrow" value="false" /> <property name="testOnReturn" value="false" /> <!-- 打開PSCache,而且指定每一個鏈接上PSCache的大小 --> <property name="poolPreparedStatements" value="true" /> <property name="maxPoolPreparedStatementPerConnectionSize" value="20" /> <!-- 配置監控統計攔截的filters,去掉後監控界面sql沒法統計 --> <property name="filters" value="stat" /> <property name="timeBetweenLogStatsMillis" value="1000" /> <property name="statLogger" ref="myStatLogger"/> </bean>
啓動後就能夠看到控制檯打印的durid監控信息了。json
package com.andaily.web.context; import com.alibaba.druid.support.logging.Log; import com.alibaba.druid.support.logging.LogFactory; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.Map; import java.util.Properties; import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim; import com.alibaba.druid.pool.DruidDataSourceStatLogger; import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter; import com.alibaba.druid.pool.DruidDataSourceStatValue; import com.alibaba.druid.stat.JdbcSqlStatValue; import com.alibaba.druid.support.json.JSONUtils; public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger { private static Log LOG = LogFactory.getLog(MyDruidStatLogger.class); private Log logger = LOG; public MyDruidStatLogger(){ this.configFromProperties(System.getProperties()); } @Override public void configFromProperties(Properties properties) { String property = properties.getProperty("druid.stat.loggerName"); if (property != null && property.length() > 0) { setLoggerName(property); } } public Log getLogger() { return logger; } @Override public void setLoggerName(String loggerName) { logger = LogFactory.getLog(loggerName); } @Override public void setLogger(Log logger) { if (logger == null) { throw new IllegalArgumentException("logger can not be null"); } this.logger = logger; } public boolean isLogEnable() { return true; } public void log(String value) { logger.info(value); } @Override public void log(DruidDataSourceStatValue statValue) { Map map = new LinkedHashMap(); map.put("url", statValue.getUrl()); map.put("dbType", statValue.getDbType()); map.put("name", statValue.getName()); map.put("activeCount", statValue.getActiveCount()); if (statValue.getActivePeak() > 0) { map.put("activePeak", statValue.getActivePeak()); map.put("activePeakTime", statValue.getActivePeakTime()); } map.put("poolingCount", statValue.getPoolingCount()); if (statValue.getPoolingPeak() > 0) { map.put("poolingPeak", statValue.getPoolingPeak()); map.put("poolingPeakTime", statValue.getPoolingPeakTime()); } map.put("connectCount", statValue.getConnectCount()); map.put("closeCount", statValue.getCloseCount()); if (statValue.getWaitThreadCount() > 0) { map.put("waitThreadCount", statValue.getWaitThreadCount()); } if (statValue.getNotEmptyWaitCount() > 0) { map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount()); } if (statValue.getNotEmptyWaitMillis() > 0) { map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis()); } if (statValue.getLogicConnectErrorCount() > 0) { map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount()); } if (statValue.getPhysicalConnectCount() > 0) { map.put("physicalConnectCount", statValue.getPhysicalConnectCount()); } if (statValue.getPhysicalCloseCount() > 0) { map.put("physicalCloseCount", statValue.getPhysicalCloseCount()); } if (statValue.getPhysicalConnectErrorCount() > 0) { map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount()); } if (statValue.getExecuteCount() > 0) { map.put("executeCount", statValue.getExecuteCount()); } if (statValue.getErrorCount() > 0) { map.put("errorCount", statValue.getErrorCount()); } if (statValue.getCommitCount() > 0) { map.put("commitCount", statValue.getCommitCount()); } if (statValue.getRollbackCount() > 0) { map.put("rollbackCount", statValue.getRollbackCount()); } if (statValue.getPstmtCacheHitCount() > 0) { map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount()); } if (statValue.getPstmtCacheMissCount() > 0) { map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount()); } if (statValue.getStartTransactionCount() > 0) { map.put("startTransactionCount", statValue.getStartTransactionCount()); map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram())); } if (statValue.getConnectCount() > 0) { map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram())); } if (statValue.getClobOpenCount() > 0) { map.put("clobOpenCount", statValue.getClobOpenCount()); } if (statValue.getBlobOpenCount() > 0) { map.put("blobOpenCount", statValue.getBlobOpenCount()); } if (statValue.getSqlSkipCount() > 0) { map.put("sqlSkipCount", statValue.getSqlSkipCount()); } ArrayList> sqlList = new ArrayList>(); if (statValue.getSqlList().size() > 0) { for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) { Map sqlStatMap = new LinkedHashMap(); sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) { sqlStatMap.put("executeCount", sqlStat.getExecuteCount()); sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax()); sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram())); sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram())); } long executeErrorCount = sqlStat.getExecuteErrorCount(); if (executeErrorCount > 0) { sqlStatMap.put("executeErrorCount", executeErrorCount); } int runningCount = sqlStat.getRunningCount(); if (runningCount > 0) { sqlStatMap.put("runningCount", runningCount); } int concurrentMax = sqlStat.getConcurrentMax(); if (concurrentMax > 0) { sqlStatMap.put("concurrentMax", concurrentMax); } if (sqlStat.getFetchRowCount() > 0) { sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount()); sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax()); sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram())); } if (sqlStat.getUpdateCount() > 0) { sqlStatMap.put("updateCount", sqlStat.getUpdateCount()); sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax()); sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram())); } if (sqlStat.getInTransactionCount() > 0) { sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount()); } if (sqlStat.getClobOpenCount() > 0) { sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount()); } if (sqlStat.getBlobOpenCount() > 0) { sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount()); } sqlList.add(sqlStatMap); } map.put("sqlList", sqlList); } if (statValue.getKeepAliveCheckCount() > 0) { map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount()); } String text = JSONUtils.toJSONString(map); System.out.println("==============:"+text); } }