本篇文章接着上篇內容繼續,地址:IDC集羣相關指標獲取
在獲取了對應的IDC機器自身的指標以後,還須要對Hadoop集羣中HDFS和YARN的指標進行採集,大致思路上能夠有2種:java第一種固然仍是能夠延用CM API去獲取,由於CM中的tssql提供了很是豐富的各類指標監控
第二種即經過jmxJ去獲取數據,其實就是經過訪問上述這些相關的URL,而後將獲得的json進行解析,從而獲取到咱們須要的數據,最終將這些數據歸併到一塊兒,定時的去執行採集操做
在實際的實踐過程中使用jmx這種方式去進行獲取,涉及到的url請求以下:
http://localhost:50070/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo
http://localhost:50070/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemStatenode具體的代碼實現思路以下:git
首先須要有個httpclient,去向server發起請求,從而得到對應的json數據,這裏本身編寫了StatefulHttpClient
其次使用JsonUtil該工具類,用於Json類型的數據與對象之間的轉換
固然,咱們也須要將所須要獲取的監控指標給梳理出來,編寫咱們的entity,這裏以HDFS爲例,主要爲HdfsSummary和DataNodeInfo
本案例的代碼在github上,地址:
這裏主要展現核心的代碼:githubMonitorMetrics.java:sql
public class MonitorMetrics { // beans爲經過jmx所返回的json串中最起始的key // 結構爲{"beans":[{"":"","":"",...}]} List<Map<String, Object>> beans = new ArrayList<>(); public List<Map<String, Object>> getBeans() { return beans; } public void setBeans(List<Map<String, Object>> beans) { this.beans = beans; } public Object getMetricsValue(String name) { if (beans.isEmpty()) { return null; } return beans.get(0).getOrDefault(name, null); } }
HadoopUtil.java:json
public class HadoopUtil { public static long gbLength = 1073741824L; public static final String hadoopJmxServerUrl = "http://localhost:50070"; public static final String jmxServerUrlFormat = "%s/jmx?qry=%s"; public static final String nameNodeInfo = "Hadoop:service=NameNode,name=NameNodeInfo"; public static final String fsNameSystemState = "Hadoop:service=NameNode,name=FSNamesystemState"; public static HdfsSummary getHdfsSummary(StatefulHttpClient client) throws IOException { HdfsSummary hdfsSummary = new HdfsSummary(); String namenodeUrl = String.format(jmxServerUrlFormat, hadoopJmxServerUrl, nameNodeInfo); MonitorMetrics monitorMetrics = client.get(MonitorMetrics.class, namenodeUrl, null, null); hdfsSummary.setTotal(doubleFormat(monitorMetrics.getMetricsValue("Total"), gbLength)); hdfsSummary.setDfsFree(doubleFormat(monitorMetrics.getMetricsValue("Free"), gbLength)); hdfsSummary.setDfsUsed(doubleFormat(monitorMetrics.getMetricsValue("Used"), gbLength)); hdfsSummary.setPercentUsed(doubleFormat(monitorMetrics.getMetricsValue("PercentUsed"))); hdfsSummary.setSafeMode(monitorMetrics.getMetricsValue("Safemode").toString()); hdfsSummary.setNonDfsUsed(doubleFormat(monitorMetrics.getMetricsValue("NonDfsUsedSpace"), gbLength)); hdfsSummary.setBlockPoolUsedSpace(doubleFormat(monitorMetrics.getMetricsValue("BlockPoolUsedSpace"), gbLength)); hdfsSummary.setPercentBlockPoolUsed(doubleFormat(monitorMetrics.getMetricsValue("PercentBlockPoolUsed"))); hdfsSummary.setPercentRemaining(doubleFormat(monitorMetrics.getMetricsValue("PercentRemaining"))); hdfsSummary.setTotalBlocks((int) monitorMetrics.getMetricsValue("TotalBlocks")); hdfsSummary.setTotalFiles((int) monitorMetrics.getMetricsValue("TotalFiles")); hdfsSummary.setMissingBlocks((int) monitorMetrics.getMetricsValue("NumberOfMissingBlocks")); String liveNodesJson = monitorMetrics.getMetricsValue("LiveNodes").toString(); String deadNodesJson = monitorMetrics.getMetricsValue("DeadNodes").toString(); List<DataNodeInfo> liveNodes = dataNodeInfoReader(liveNodesJson); List<DataNodeInfo> deadNodes = dataNodeInfoReader(deadNodesJson); hdfsSummary.setLiveDataNodeInfos(liveNodes); hdfsSummary.setDeadDataNodeInfos(deadNodes); String fsNameSystemStateUrl = String.format(jmxServerUrlFormat, hadoopJmxServerUrl, fsNameSystemState); MonitorMetrics hadoopMetrics = client.get(MonitorMetrics.class, fsNameSystemStateUrl, null, null); hdfsSummary.setNumLiveDataNodes((int) hadoopMetrics.getMetricsValue("NumLiveDataNodes")); hdfsSummary.setNumDeadDataNodes((int) hadoopMetrics.getMetricsValue("NumDeadDataNodes")); hdfsSummary.setVolumeFailuresTotal((int) hadoopMetrics.getMetricsValue("VolumeFailuresTotal")); return hdfsSummary; } public static List<DataNodeInfo> dataNodeInfoReader(String jsonData) throws IOException { List<DataNodeInfo> dataNodeInfos = new ArrayList<DataNodeInfo>(); Map<String, Object> nodes = JsonUtil.fromJsonMap(String.class, Object.class, jsonData); for (Map.Entry<String, Object> node : nodes.entrySet()) { Map<String, Object> info = (HashMap<String, Object>) node.getValue(); String nodeName = node.getKey().split(":")[0]; DataNodeInfo dataNodeInfo = new DataNodeInfo(); dataNodeInfo.setNodeName(nodeName); dataNodeInfo.setNodeAddr(info.get("infoAddr").toString().split(":")[0]); dataNodeInfo.setLastContact((int) info.get("lastContact")); dataNodeInfo.setUsedSpace(doubleFormat(info.get("usedSpace"), gbLength)); dataNodeInfo.setAdminState(info.get("adminState").toString()); dataNodeInfo.setNonDfsUsedSpace(doubleFormat(info.get("nonDfsUsedSpace"), gbLength)); dataNodeInfo.setCapacity(doubleFormat(info.get("capacity"), gbLength)); dataNodeInfo.setNumBlocks((int) info.get("numBlocks")); dataNodeInfo.setRemaining(doubleFormat(info.get("remaining"), gbLength)); dataNodeInfo.setBlockPoolUsed(doubleFormat(info.get("blockPoolUsed"), gbLength)); dataNodeInfo.setBlockPoolUsedPerent(doubleFormat(info.get("blockPoolUsedPercent"))); dataNodeInfos.add(dataNodeInfo); } return dataNodeInfos; } public static DecimalFormat df = new DecimalFormat("#.##"); public static double doubleFormat(Object num, long unit) { double result = Double.parseDouble(String.valueOf(num)) / unit; return Double.parseDouble(df.format(result)); } public static double doubleFormat(Object num) { double result = Double.parseDouble(String.valueOf(num)); return Double.parseDouble(df.format(result)); } public static void main(String[] args) { String res = String.format(jmxServerUrlFormat, hadoopJmxServerUrl, nameNodeInfo); System.out.println(res); } }
MonitorApp.java:ide
public class MonitorApp { public static void main(String[] args) throws IOException { StatefulHttpClient client = new StatefulHttpClient(null); HadoopUtil.getHdfsSummary(client).printInfo(); } }
最終展現結果以下:
工具
關於YARN指標的獲取,思路相似,這裏就再也不展現了oop