初始化state類
//org.apache.flink.streaming.runtime.tasks.StreamTask#initializeState
initializeState();
private void initializeState() throws Exception {
StreamOperator<?>[] allOperators = operatorChain.getAllOperators();
for (StreamOperator<?> operator : allOperators) {
if (null != operator) {
operator.initializeState();
}
}
}
operator.initializeState() 調用的方法路徑 org.apache.flink.streaming.api.operators.AbstractStreamOperator#initializeState() ,全部的操做流類都繼承該類,同時也沒有重寫這個方法。
public final void initializeState() throws Exception {
////這裏會調用狀態後端,裏面很重要
1. final StreamOperatorStateContext context =
streamTaskStateManager.streamOperatorStateContext(
getOperatorID(),
getClass().getSimpleName(),
this,
keySerializer,
streamTaskCloseableRegistry,
metrics);
...
|
streamTaskStateManager.streamOperatorStateContext(......)調用方法的路徑org.apache.flink.streaming.api.operators.StreamTaskStateInitializerImpl#streamOperatorStateContext
......
// -------------- Keyed State Backend 這裏是重點 關於checkpoint--------------
keyedStatedBackend = keyedStatedBackend(
keySerializer,
operatorIdentifierText,
prioritizedOperatorSubtaskStates,
streamTaskCloseableRegistry,
metricGroup);
// -------------- Operator State Backend 這裏是重點 關於checkpoint --------------
operatorStateBackend = operatorStateBackend(
operatorIdentifierText,
prioritizedOperatorSubtaskStates,
streamTaskCloseableRegistry);
......
keyedStatedBackend() 這個方法最裏面是調用了 org.apache.flink.streaming.api.operators.BackendRestorerProcedure#attemptCreateAndRestore
private T attemptCreateAndRestore(Collection restoreState) throws Exception {apache
......
// create a new, empty backend.
final T backendInstance = instanceSupplier.get();
// attempt to restore from snapshot (or null if no state was checkpointed).
backendInstance.restore(restoreState);
......
}
backendInstance.restore(restoreState)調用的方法路徑org.apache.flink.runtime.state.DefaultOperatorStateBackend#restore
// registeredOperatorStates這個對象是核心
...
PartitionableListState<?> listState = registeredOperatorStates.get(restoredSnapshot.getName());
if (null == listState) {
listState = new PartitionableListState<>(restoredMetaInfo);
//重點,這裏就是存儲了快照狀態類
//********************************************************************
registeredOperatorStates.put(listState.getStateMetaInfo().getName(), listState);
//********************************************************************
} else {
// TODO with eager state registration in place, check here for serializer migration strategies
}
...
triggerCheckpoint 將定時觸發執行checkpoint,而上面是是初始化的執行邏輯
定時快照state類
org.apache.flink.runtime.checkpoint.CheckpointCoordinator#triggerCheckpoint(long, boolean)
......
// send the messages to the tasks that trigger their checkpoint 我猜想這裏就是遠程發送觸發checkpoint的步驟 這裏進行的數據文件的生成奶奶的
for (Execution execution: executions) {
execution.triggerCheckpoint(checkpointID, timestamp, checkpointOptions);
}
......
execution.triggerCheckpoint調用路徑 org.apache.flink.runtime.executiongraph.Execution#triggerCheckpoint
/**
* Trigger a new checkpoint on the task of this execution.
* @param checkpointId of th checkpoint to trigger
* @param timestamp of the checkpoint to trigger
* @param checkpointOptions of the checkpoint to trigger
*/
public void triggerCheckpoint(long checkpointId, long timestamp, CheckpointOptions checkpointOptions) {
......
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
taskManagerGateway.triggerCheckpoint(attemptId, getVertex().getJobId(), checkpointId, timestamp, checkpointOptions);
}
.....
}
taskManagerGateway.triggerCheckpoint(......)裏面最終調用路徑 org.apache.flink.runtime.taskexecutor.TaskExecutor#triggerCheckpoint
@Override
public CompletableFuture
ExecutionAttemptID executionAttemptID,long checkpointId,long checkpointTimestamp,CheckpointOptions checkpointOptions) {
......
final Task task = taskSlotTable.getTask(executionAttemptID);
if (task != null) {
task.triggerCheckpointBarrier(checkpointId, checkpointTimestamp, checkpointOptions);
return CompletableFuture.completedFuture(Acknowledge.get());
}
......
}
task.triggerCheckpointBarrier(......)調用路徑 org.apache.flink.runtime.taskmanager.Task#triggerCheckpointBarrier
/**
private void checkpointStreamOperator(StreamOperator<?> op) throws Exception {
if (null != op) {
//這個構造方法是核心
OperatorSnapshotFutures snapshotInProgress = op.snapshotState(
checkpointMetaData.getCheckpointId(),
checkpointMetaData.getTimestamp(),
checkpointOptions,
storageLocation);
operatorSnapshotsInProgress.put(op.getOperatorID(), snapshotInProgress);
}
}
op.snapshotState()是核心,調用org.apache.flink.streaming.api.operators.AbstractStreamOperator#snapshotState(long, long, org.apache.flink.runtime.checkpoint.CheckpointOptions, org.apache.flink.runtime.state.CheckpointStreamFactory)
注意由於op是子類,有些累實現AbstractStreamOperator有些子類實現AbstractUdfStreamOperator,因此在下面調用snapshotState(snapshotContext)方法時,會根據子類的實現不一樣,調用org.apache.flink.streaming.api.operators.AbstractStreamOperator#snapshotState(org.apache.flink.runtime.state.StateSnapshotContext)
或org.apache.flink.streaming.api.operators.AbstractUdfStreamOperator#snapshotState
AbstractStreamOperator 實現類有94個
AbstractUdfStreamOperator實現類有42個
AbstractUdfStreamOperator繼承AbstractStreamOperator
@Override
public final OperatorSnapshotFutures snapshotState(long checkpointId, long timestamp, CheckpointOptions checkpointOptions,
CheckpointStreamFactory factory) throws Exception {
try (StateSnapshotContextSynchronousImpl snapshotContext = new StateSnapshotContextSynchronousImpl(
checkpointId,
timestamp,
factory,
keyGroupRange,
getContainingTask().getCancelables())) {
//繼承AbstractUdfStreamOperator的操做類會調用用戶的快照方法,繼承AbstractStreamOperator的操做類會調用這個方法,可是這個方法沒有作什麼東西。
snapshotState(snapshotContext);
//上面調用好用戶的快照方法了,就是肯定了狀態類裏面目前的數據了。
//下面就是如何訪問到狀態類,講狀態內的數據寫入磁盤了。
snapshotInProgress.setKeyedStateRawFuture(snapshotContext.getKeyedStateStreamFuture());
snapshotInProgress.setOperatorStateRawFuture(snapshotContext.getOperatorStateStreamFuture());
//這裏是生產狀態數據文件
if (null != operatorStateBackend) {
System.out.println(Thread.currentThread().getName()+"::這裏將狀態數據寫入文件中");
snapshotInProgress.setOperatorStateManagedFuture(
operatorStateBackend.snapshot(checkpointId, timestamp, factory, checkpointOptions));
}
//這裏是生產狀態數據文件
if (null != keyedStateBackend) {
snapshotInProgress.setKeyedStateManagedFuture(
keyedStateBackend.snapshot(checkpointId, timestamp, factory, checkpointOptions));
}
}
return snapshotInProgress;
}
operatorStateBackend.snapshot(checkpointId, timestamp, factory, checkpointOptions))調用路徑org.apache.flink.runtime.state.DefaultOperatorStateBackend#snapshot
謎底就在下面
public RunnableFuture<SnapshotResult
long checkpointId,
long timestamp,
@Nonnull CheckpointStreamFactory streamFactory,
@Nonnull CheckpointOptions checkpointOptions) throws Exception {
long syncStartTime = System.currentTimeMillis();
//這個是超級關鍵的地方,你想知道如何訪問到用戶函數中的狀態類,就在這裏。
RunnableFuture<SnapshotResult
snapshotStrategy.snapshot(checkpointId, timestamp, streamFactory, checkpointOptions);
snapshotStrategy.logSyncCompleted(streamFactory, syncStartTime);
return snapshotRunner;
}
snapshotStrategy.snapshot(checkpointId, timestamp, streamFactory, checkpointOptions)調用路徑,取決於用戶指定的後端狀態,默認調用路徑以下org.apache.flink.runtime.state.DefaultOperatorStateBackend.DefaultOperatorStateBackendSnapshotStrategy#snapshot
DefaultOperatorStateBackendSnapshotStrategy 是DefaultOperatorStateBackend的內部類
public RunnableFuture<SnapshotResult
//貌似數據就存在 registeredOperatorStates對象裏面 其實下面的步驟不用研究,就是將狀態數據寫入文件,主要看看這個registeredOperatorStates是怎麼弄到的
//************重點 registeredOperatorStates 對象
final Map<String, PartitionableListState<?>> registeredOperatorStatesDeepCopies =
new HashMap<>(registeredOperatorStates.size());
final Map<String, BackendWritableBroadcastState<?, ?>> registeredBroadcastStatesDeepCopies =
new HashMap<>(registeredBroadcastStates.size());
ClassLoader snapshotClassLoader = Thread.currentThread().getContextClassLoader();
try {
// eagerly create deep copies of the list and the broadcast states (if any)
// in the synchronous phase, so that we can use them in the async writing.
//entry.getValue() 裏面就是狀態類 將狀態類存儲在新建的map對象中
if (!registeredOperatorStates.isEmpty()) {
for (Map.Entry<String, PartitionableListState<?>> entry : registeredOperatorStates.entrySet()) {
PartitionableListState<?> listState = entry.getValue();
if (null != listState) {
listState = listState.deepCopy();
}
registeredOperatorStatesDeepCopies.put(entry.getKey(), listState);
}
}
//廣播狀態
if (!registeredBroadcastStates.isEmpty()) {
for (Map.Entry<String, BackendWritableBroadcastState<?, ?>> entry : registeredBroadcastStates.entrySet()) {
BackendWritableBroadcastState<?, ?> broadcastState = entry.getValue();
if (null != broadcastState) {
broadcastState = broadcastState.deepCopy();
}
registeredBroadcastStatesDeepCopies.put(entry.getKey(), broadcastState);
}
}
}
//這個方法裏面生成了狀態數據文件 AsyncSnapshotCallable<SnapshotResult<OperatorStateHandle>> snapshotCallable = new AsyncSnapshotCallable<SnapshotResult<OperatorStateHandle>>() {
@Override
protected SnapshotResult
......
// get the registered operator state infos ...
List
new ArrayList<>(registeredOperatorStatesDeepCopies.size());
for (Map.Entry<String, PartitionableListState<?>> entry :
registeredOperatorStatesDeepCopies.entrySet()) {
operatorMetaInfoSnapshots.add(entry.getValue().getStateMetaInfo().snapshot());
}
// ... get the registered broadcast operator state infos ...
List
new ArrayList<>(registeredBroadcastStatesDeepCopies.size());
for (Map.Entry<String, BackendWritableBroadcastState<?, ?>> entry :
registeredBroadcastStatesDeepCopies.entrySet()) {
broadcastMetaInfoSnapshots.add(entry.getValue().getStateMetaInfo().snapshot());
}
// ... write them all in the checkpoint stream ...
DataOutputView dov = new DataOutputViewStreamWrapper(localOut);
OperatorBackendSerializationProxy backendSerializationProxy =
new OperatorBackendSerializationProxy(operatorMetaInfoSnapshots, broadcastMetaInfoSnapshots);
backendSerializationProxy.write(dov);
// ... and then go for the states ...
......
}
};
final FutureTask<SnapshotResult
snapshotCallable.toAsyncSnapshotFutureTask(closeStreamOnCancelRegistry);
if (!asynchronousSnapshots) {
task.run();
}
return task;
}
}
從上面咱們能夠看到,狀態類都存放在registeredOperatorStatesDeepCopies這個map中。
用戶可以更新狀態類的數據都是由於這樣訪問到了狀態類
public void initializeState(FunctionInitializationContext context) throws Exception {
......
checkpointedState = context.getOperatorStateStore().getListState(descriptor);
......
}
調用的就是org.apache.flink.runtime.state.DefaultOperatorStateBackend#getListState(org.apache.flink.api.common.state.ListStateDescriptor
)
/
* @Description: 返回狀態類的時候,將狀態類放入map對象供後面寫入文件中
* @Param:
* @return:
* @Author: intsmaze
* @Date: 2019/1/18
/
private ListState getListState(
ListStateDescriptor stateDescriptor,
OperatorStateHandle.Mode mode) throws StateMigrationException {
@SuppressWarnings("unchecked")
PartitionableListState previous = (PartitionableListState) accessedStatesByName.get(name);
if (previous != null) {
checkStateNameAndMode(
previous.getStateMetaInfo().getName(),
name,
previous.getStateMetaInfo().getAssignmentMode(),
mode);
return previous;
}
......
PartitionableListState partitionableListState = (PartitionableListState) registeredOperatorStates.get(name);
if (null == partitionableListState) {
// no restored state for the state name; simply create new state holder
partitionableListState = new PartitionableListState<>(
new RegisteredOperatorStateBackendMetaInfo<>(
name,
partitionStateSerializer,
mode));
//這裏也會存儲狀態類數據registeredOperatorStates這個對象和DefaultOperatorStateBackendSnapshotStrategy類的快照方法訪問的對象共享
//**********************************************************
registeredOperatorStates.put(name, partitionableListState);
}