本文主要研究一下storm的AggregateProcessor的execute及finishBatch方法html
TridentTopology topology = new TridentTopology(); topology.newStream("spout1", spout) .groupBy(new Fields("user")) .aggregate(new Fields("user","score"),new UserCountAggregator(),new Fields("val")) .toStream() .parallelismHint(1) .each(new Fields("val"),new PrintEachFunc(),new Fields());
storm-core-1.2.2-sources.jar!/org/apache/storm/trident/topology/TridentBoltExecutor.javajava
private void checkFinish(TrackedBatch tracked, Tuple tuple, TupleType type) { if(tracked.failed) { failBatch(tracked); _collector.fail(tuple); return; } CoordCondition cond = tracked.condition; boolean delayed = tracked.delayedAck==null && (cond.commitStream!=null && type==TupleType.COMMIT || cond.commitStream==null); if(delayed) { tracked.delayedAck = tuple; } boolean failed = false; if(tracked.receivedCommit && tracked.reportedTasks == cond.expectedTaskReports) { if(tracked.receivedTuples == tracked.expectedTupleCount) { finishBatch(tracked, tuple); } else { //TODO: add logging that not all tuples were received failBatch(tracked); _collector.fail(tuple); failed = true; } } if(!delayed && !failed) { _collector.ack(tuple); } } private boolean finishBatch(TrackedBatch tracked, Tuple finishTuple) { boolean success = true; try { _bolt.finishBatch(tracked.info); String stream = COORD_STREAM(tracked.info.batchGroup); for(Integer task: tracked.condition.targetTasks) { _collector.emitDirect(task, stream, finishTuple, new Values(tracked.info.batchId, Utils.get(tracked.taskEmittedTuples, task, 0))); } if(tracked.delayedAck!=null) { _collector.ack(tracked.delayedAck); tracked.delayedAck = null; } } catch(FailedException e) { failBatch(tracked, e); success = false; } _batches.remove(tracked.info.batchId.getId()); return success; } public static class TrackedBatch { int attemptId; BatchInfo info; CoordCondition condition; int reportedTasks = 0; int expectedTupleCount = 0; int receivedTuples = 0; Map<Integer, Integer> taskEmittedTuples = new HashMap<>(); //...... }
另外接收到REGULAR類型的tuple時,在tracked.condition.expectedTaskReports==0的時候也會調用finishBatch操做,對於spout來講tracked.condition.expectedTaskReports爲0,由於它是數據源,因此不用接收COORD_STREAM更新expectedTaskReports以及expectedTupleCount
),而該操做會往COORD_STREAM這個stream發送new Values(tracked.info.batchId, Utils.get(tracked.taskEmittedTuples, task, 0)),也就是new Fields("id", "count"),即batchId以及發送給目的task的tuple數量,告知下游的它給task發送了多少tuple(taskEmittedTuples數據在CoordinatedOutputCollector的emit及emitDirect方法裏頭維護
)storm-core-1.2.2-sources.jar!/org/apache/storm/trident/planner/SubtopologyBolt.javanode
public class SubtopologyBolt implements ITridentBatchBolt { //...... @Override public void execute(BatchInfo batchInfo, Tuple tuple) { String sourceStream = tuple.getSourceStreamId(); InitialReceiver ir = _roots.get(sourceStream); if(ir==null) { throw new RuntimeException("Received unexpected tuple " + tuple.toString()); } ir.receive((ProcessorContext) batchInfo.state, tuple); } @Override public void finishBatch(BatchInfo batchInfo) { for(TridentProcessor p: _myTopologicallyOrdered.get(batchInfo.batchGroup)) { p.finishBatch((ProcessorContext) batchInfo.state); } } @Override public Object initBatchState(String batchGroup, Object batchId) { ProcessorContext ret = new ProcessorContext(batchId, new Object[_nodes.size()]); for(TridentProcessor p: _myTopologicallyOrdered.get(batchGroup)) { p.startBatch(ret); } return ret; } @Override public void cleanup() { for(String bg: _myTopologicallyOrdered.keySet()) { for(TridentProcessor p: _myTopologicallyOrdered.get(bg)) { p.cleanup(); } } } @Override public void declareOutputFields(OutputFieldsDeclarer declarer) { for(Node n: _nodes) { declarer.declareStream(n.streamId, TridentUtils.fieldsConcat(new Fields("$batchId"), n.allOutputFields)); } } @Override public Map<String, Object> getComponentConfiguration() { return null; } protected static class InitialReceiver { List<TridentProcessor> _receivers = new ArrayList<>(); RootFactory _factory; ProjectionFactory _project; String _stream; public InitialReceiver(String stream, Fields allFields) { // TODO: don't want to project for non-batch bolts...??? // how to distinguish "batch" streams from non-batch streams? _stream = stream; _factory = new RootFactory(allFields); List<String> projected = new ArrayList<>(allFields.toList()); projected.remove(0); _project = new ProjectionFactory(_factory, new Fields(projected)); } public void receive(ProcessorContext context, Tuple tuple) { TridentTuple t = _project.create(_factory.create(tuple)); for(TridentProcessor r: _receivers) { r.execute(context, _stream, t); } } public void addReceiver(TridentProcessor p) { _receivers.add(p); } public Factory getOutputFactory() { return _project; } } }
包含TransactionAttempt類型的batchId以及Object數組state,state裏頭包含GroupCollector、aggregate累加結果等
)傳遞給finishBatch方法storm-core-1.2.2-sources.jar!/org/apache/storm/trident/planner/processor/AggregateProcessor.javaapache
public class AggregateProcessor implements TridentProcessor { Aggregator _agg; TridentContext _context; FreshCollector _collector; Fields _inputFields; ProjectionFactory _projection; public AggregateProcessor(Fields inputFields, Aggregator agg) { _agg = agg; _inputFields = inputFields; } @Override public void prepare(Map conf, TopologyContext context, TridentContext tridentContext) { List<Factory> parents = tridentContext.getParentTupleFactories(); if(parents.size()!=1) { throw new RuntimeException("Aggregate operation can only have one parent"); } _context = tridentContext; _collector = new FreshCollector(tridentContext); _projection = new ProjectionFactory(parents.get(0), _inputFields); _agg.prepare(conf, new TridentOperationContext(context, _projection)); } @Override public void cleanup() { _agg.cleanup(); } @Override public void startBatch(ProcessorContext processorContext) { _collector.setContext(processorContext); processorContext.state[_context.getStateIndex()] = _agg.init(processorContext.batchId, _collector); } @Override public void execute(ProcessorContext processorContext, String streamId, TridentTuple tuple) { _collector.setContext(processorContext); _agg.aggregate(processorContext.state[_context.getStateIndex()], _projection.create(tuple), _collector); } @Override public void finishBatch(ProcessorContext processorContext) { _collector.setContext(processorContext); _agg.complete(processorContext.state[_context.getStateIndex()], _collector); } @Override public Factory getOutputFactory() { return _collector.getOutputFactory(); } }
storm-core-1.2.2-sources.jar!/org/apache/storm/trident/operation/impl/GroupedAggregator.javasegmentfault
public class GroupedAggregator implements Aggregator<Object[]> { ProjectionFactory _groupFactory; ProjectionFactory _inputFactory; Aggregator _agg; ComboList.Factory _fact; Fields _inFields; Fields _groupFields; public GroupedAggregator(Aggregator agg, Fields group, Fields input, int outSize) { _groupFields = group; _inFields = input; _agg = agg; int[] sizes = new int[2]; sizes[0] = _groupFields.size(); sizes[1] = outSize; _fact = new ComboList.Factory(sizes); } @Override public void prepare(Map conf, TridentOperationContext context) { _inputFactory = context.makeProjectionFactory(_inFields); _groupFactory = context.makeProjectionFactory(_groupFields); _agg.prepare(conf, new TridentOperationContext(context, _inputFactory)); } @Override public Object[] init(Object batchId, TridentCollector collector) { return new Object[] {new GroupCollector(collector, _fact), new HashMap(), batchId}; } @Override public void aggregate(Object[] arr, TridentTuple tuple, TridentCollector collector) { GroupCollector groupColl = (GroupCollector) arr[0]; Map<List, Object> val = (Map) arr[1]; TridentTuple group = _groupFactory.create((TridentTupleView) tuple); TridentTuple input = _inputFactory.create((TridentTupleView) tuple); Object curr; if(!val.containsKey(group)) { curr = _agg.init(arr[2], groupColl); val.put((List) group, curr); } else { curr = val.get(group); } groupColl.currGroup = group; _agg.aggregate(curr, input, groupColl); } @Override public void complete(Object[] arr, TridentCollector collector) { Map<List, Object> val = (Map) arr[1]; GroupCollector groupColl = (GroupCollector) arr[0]; for(Entry<List, Object> e: val.entrySet()) { groupColl.currGroup = e.getKey(); _agg.complete(e.getValue(), groupColl); } } @Override public void cleanup() { _agg.cleanup(); } }
storm-core-1.2.2-sources.jar!/org/apache/storm/trident/operation/impl/ChainedAggregatorImpl.java數組
public class ChainedAggregatorImpl implements Aggregator<ChainedResult> { Aggregator[] _aggs; ProjectionFactory[] _inputFactories; ComboList.Factory _fact; Fields[] _inputFields; public ChainedAggregatorImpl(Aggregator[] aggs, Fields[] inputFields, ComboList.Factory fact) { _aggs = aggs; _inputFields = inputFields; _fact = fact; if(_aggs.length!=_inputFields.length) { throw new IllegalArgumentException("Require input fields for each aggregator"); } } public void prepare(Map conf, TridentOperationContext context) { _inputFactories = new ProjectionFactory[_inputFields.length]; for(int i=0; i<_inputFields.length; i++) { _inputFactories[i] = context.makeProjectionFactory(_inputFields[i]); _aggs[i].prepare(conf, new TridentOperationContext(context, _inputFactories[i])); } } public ChainedResult init(Object batchId, TridentCollector collector) { ChainedResult initted = new ChainedResult(collector, _aggs.length); for(int i=0; i<_aggs.length; i++) { initted.objs[i] = _aggs[i].init(batchId, initted.collectors[i]); } return initted; } public void aggregate(ChainedResult val, TridentTuple tuple, TridentCollector collector) { val.setFollowThroughCollector(collector); for(int i=0; i<_aggs.length; i++) { TridentTuple projected = _inputFactories[i].create((TridentTupleView) tuple); _aggs[i].aggregate(val.objs[i], projected, val.collectors[i]); } } public void complete(ChainedResult val, TridentCollector collector) { val.setFollowThroughCollector(collector); for(int i=0; i<_aggs.length; i++) { _aggs[i].complete(val.objs[i], val.collectors[i]); } if(_aggs.length > 1) { // otherwise, tuples were emitted directly int[] indices = new int[val.collectors.length]; for(int i=0; i<indices.length; i++) { indices[i] = 0; } boolean keepGoing = true; //emit cross-join of all emitted tuples while(keepGoing) { List[] combined = new List[_aggs.length]; for(int i=0; i< _aggs.length; i++) { CaptureCollector capturer = (CaptureCollector) val.collectors[i]; combined[i] = capturer.captured.get(indices[i]); } collector.emit(_fact.create(combined)); keepGoing = increment(val.collectors, indices, indices.length - 1); } } } //return false if can't increment anymore private boolean increment(TridentCollector[] lengths, int[] indices, int j) { if(j==-1) return false; indices[j]++; CaptureCollector capturer = (CaptureCollector) lengths[j]; if(indices[j] >= capturer.captured.size()) { indices[j] = 0; return increment(lengths, indices, j-1); } return true; } public void cleanup() { for(Aggregator a: _aggs) { a.cleanup(); } } }
每次接收到spout的batch的一個tuple就更新該值
)等於expectedTupleCount的時候,會觸發finishBatch操做,該操做會調用SubtopologyBolt.finishBatch,進而調用AggregateProcessor.finishBatch,進而調用GroupedAggregator.complete,進而調用ChainedAggregatorImpl.complete,進而調用用戶的aggregator的complete$batch
)發來的tuple的時候,調用TridentSpoutExecutor的execute方法,以後就因爲tracked.condition.expectedTaskReports==0(本實例兩個TridentBoltExecutor的TrackedBatch的condition.commitStream爲null,於是receivedCommit爲true
),就當即調用finishBatch(裏頭會調用TridentSpoutExecutor的finishBatch方法,以後經過COORD_STREAM給下游TridentBoltExecutor的task發送batchId及taskEmittedTuples數量;而對於下游TridentBoltExecutor它的expectedTaskReports不爲0,則須要在收到COORD_STREAM的tuple的時候才能checkFinish,判斷是否能夠finishBatch
)最後調用用戶的spout
)發射一個batch;而finishBatch方法目前爲空,沒有作任何操做;也就是說對於包裝了TridentSpoutExecutor的TridentBoltExecutor來講,它接收到發射一個batch的指令以後,調用完TridentSpoutExecutor.execute經過emitter發射一個batch,就立馬執行finishBatch操做(發射[id,count]給下游的TridentBoltExecutor,下游TridentBoltExecutor在接收到[id,count]數據時更新expectedTupleCount,而後進行checkFinish判斷,若是receivedTuples等於expectedTupleCount,就觸發finishBatch操做,進而觸發AggregateProcessor的finishBatch操做
)