以前筆者曾經寫過經過scala的方式獲取kafka最新的offsethtml
可是大多數的狀況咱們須要使用java的方式進行獲取最新offsetjava
scala的方式能夠參考: http://www.cnblogs.com/weishao-lsv/p/8159396.htmlapi
如下是經過java代碼獲取kafka最新offsetoop
GetOffsetShellWrapfetch
public class GetOffsetShellWrap { private static Logger log = LoggerFactory.getLogger(GetOffsetShellWrap.class); private String topic; private int port; private String host; private int time; public GetOffsetShellWrap(String topic,int port,String host,int time) { this.topic = topic; this.port = port; this.host = host; this.time = time; } public Map<String, String> getEveryPartitionMaxOffset() { //1.獲取topic全部分區 以及每一個分區的元數據 => 返回 Map<分區id,分區元數據> TreeMap<Integer, PartitionMetadata> partitionIdAndMeta = findTopicEveryPartition(); Map<String, String> map = new HashMap<String, String>(); for (Entry<Integer, PartitionMetadata> entry : partitionIdAndMeta.entrySet()) { int leaderPartitionId = entry.getKey(); //2.根據每一個分區的元數據信息 ==> 獲取leader分區的主機 String leadBroker = entry.getValue().leader().host(); String clientName = "Client_" + topic + "_" + leaderPartitionId; SimpleConsumer consumer = new SimpleConsumer(leadBroker, port,100000, 64 * 1024, clientName); //3.從leader主機獲取分區的offset long readOffset = getLastOffset(consumer, topic, leaderPartitionId, clientName); map.put(String.valueOf(leaderPartitionId), String.valueOf(readOffset)); if (consumer != null) consumer.close(); } return map; } private TreeMap<Integer, PartitionMetadata> findTopicEveryPartition(){ TreeMap<Integer, PartitionMetadata> map = new TreeMap<Integer, PartitionMetadata>(); SimpleConsumer consumer = null; try { consumer = new SimpleConsumer(host, port, 100000, 64 * 1024,"leaderLookup" + new Date().getTime()); List<String> topics = Collections.singletonList(topic); TopicMetadataRequest req = new TopicMetadataRequest(topics); kafka.javaapi.TopicMetadataResponse resp = consumer.send(req); List<TopicMetadata> metaData = resp.topicsMetadata(); if(metaData!=null && !metaData.isEmpty()){ TopicMetadata item = metaData.get(0); for (PartitionMetadata part : item.partitionsMetadata()) { map.put(part.partitionId(), part); } } } catch (Exception e) { e.printStackTrace(); } finally { if (consumer != null) consumer.close(); } return map; } private long getLastOffset(SimpleConsumer consumer, String topic,int leaderPartitionId, String clientName) { TopicAndPartition topicAndPartition = new TopicAndPartition(topic,leaderPartitionId); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(time, 1)); kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(),clientName); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) { log.error("Error fetching data Offset Data the Broker. Reason: "+ response.errorCode(topic, leaderPartitionId)); return 0; } long[] offsets = response.offsets(topic, leaderPartitionId); return offsets[0]; } }
GetOffsetShellWrapJavaTestthis
public class GetOffsetShellWrapJavaTest { public static void main(String[] args) { int port = 9092; String topic = "2017-11-6-test"; int time = -1; GetOffsetShellWrap offsetSearch = new GetOffsetShellWrap(topic,port,"hadoop-01",time); Map<String, String> map = offsetSearch.getEveryPartitionMaxOffset(); for (String key : map.keySet()) { System.out.println(key+"---"+map.get(key)); } } }
結果輸出:spa
0---16096 1---15930 2---16099