我的針對 webrtc 的源碼閱讀筆記java
----------------------------------------------------------------------------------------------------------------------------------------android
一分鐘快速搭建 rtmpd 服務器: https://blog.csdn.net/freeabc/article/details/102880984git
軟件下載地址: http://www.qiyicc.com/download/rtmpd.rargithub
github 地址:https://github.com/superconvert/smart_rtmpdweb
-----------------------------------------------------------------------------------------------------------------------------------------
api
webrtc 視頻的採集,編碼,發送流程詳細分析
我寫文章通常是兩個思路:
1. 下一步要調用什麼對象的方法
2. 這一步的對象,怎麼關聯到下一步的對象的流程分析
這一步的流程主要闡述怎麼關聯下一步的對象的流程分析,固然這一步作了什麼具體的工做,不能
詳細展現,不然,太龐大了,須要各位朋友針對重點的部分,本身揣摩了。服務器
//-----------------------------------------------------------------------------
//
// 視頻編碼器的建立以及與源的關聯
//
//-----------------------------------------------------------------------------
1. Java
PeerConnectionClient::createPeerConnectionInternal
peerConnection.addTrack(createVideoTrack(videoCapturer), mediaStreamLabels);session
1.1. createVideoTrack 的創建 PeerConnectionClient::createVideoTrack(VideoCapturer capturer) localVideoTrack = factory.createVideoTrack(VIDEO_TRACK_ID, videoSource); 1.2 PeerConnectionFactory::createVideoTrack(String id, VideoSource source) checkPeerConnectionFactoryExists(); return new VideoTrack(nativeCreateVideoTrack(nativeFactory, id, source.getNativeVideoTrackSource())); 1.3 nativeCreateVideoTrack 1.4 Java_org_webrtc_PeerConnectionFactory_nativeCreateVideoTrack(JNIEnv* env, jclass jcaller, jlong factory, jstring id, jlong nativeVideoSource) { return JNI_PeerConnectionFactory_CreateVideoTrack(env, factory, base::android::JavaParamRef<jstring>(env, id), nativeVideoSource); } 1.5 ./sdk/android/src/jni/pc/peer_connection_factory.cc JNI_PeerConnectionFactory_CreateVideoTrack(JNIEnv* jni, jlong native_factory, const JavaParamRef<jstring>& id, jlong native_source) { rtc::scoped_refptr<VideoTrackInterface> track = PeerConnectionFactoryFromJava(native_factory)->CreateVideoTrack( JavaToStdString(jni, id), reinterpret_cast<VideoTrackSourceInterface*>(native_source)); return jlongFromPointer(track.release()); } native_source 就是 AndroidVideoTrackSource 1.6 ./pc/peer_connection_factory.cc rtc::scoped_refptr<VideoTrackInterface> PeerConnectionFactory::CreateVideoTrack(const std::string& id, VideoTrackSourceInterface* source) { RTC_DCHECK(signaling_thread_->IsCurrent()); rtc::scoped_refptr<VideoTrackInterface> track(VideoTrack::Create(id, source, worker_thread_)); return VideoTrackProxy::Create(signaling_thread_, worker_thread_, track); } 1.7 ./pc/video_track.cc rtc::scoped_refptr<VideoTrack> VideoTrack::Create(const std::string& id, VideoTrackSourceInterface* source, rtc::Thread* worker_thread) { rtc::RefCountedObject<VideoTrack>* track = new rtc::RefCountedObject<VideoTrack>(id, source, worker_thread); return track; }
從上面流程看出 VideoTrack 包含了 AndroidVideoTrackSourceoracle
2. Java
PeerConnection::addTrack
RtpSender newSender = nativeAddTrack(track.getNativeMediaStreamTrack(), streamIds);app
3.
Java_org_webrtc_PeerConnection_nativeAddTrack
JNI_PeerConnection_AddTrack
4. ./sdk/android/src/jni/pc/peer_connection.cc
JNI_PeerConnection_AddTrack ExtractNativePC(jni, j_pc)->AddTrack(reinterpret_cast<MediaStreamTrackInterface*>(native_track), JavaListToNativeVector<std::string, jstring>(jni, j_stream_labels, &JavaToNativeString));
5.
PeerConnection::AddTrack(rtc::scoped_refptr<MediaStreamTrackInterface> track, const std::vector<std::string>& stream_ids) auto sender_or_error = (IsUnifiedPlan() ? AddTrackUnifiedPlan(track, stream_ids) : AddTrackPlanB(track, stream_ids));
6.
PeerConnection::AddTrackPlanB(rtc::scoped_refptr<MediaStreamTrackInterface> track, const std::vector<std::string>& stream_ids) auto new_sender = CreateSender(media_type, track->id(), track, adjusted_stream_ids, {}); if (track->kind() == MediaStreamTrackInterface::kAudioKind) { new_sender->internal()->SetMediaChannel(voice_media_channel()); GetAudioTransceiver()->internal()->AddSender(new_sender); const RtpSenderInfo* sender_info = FindSenderInfo(local_audio_sender_infos_, new_sender->internal()->stream_ids()[0], track->id()); if (sender_info) { new_sender->internal()->SetSsrc(sender_info->first_ssrc); } } else { RTC_DCHECK_EQ(MediaStreamTrackInterface::kVideoKind, track->kind()); new_sender->internal()->SetMediaChannel(video_media_channel()); GetVideoTransceiver()->internal()->AddSender(new_sender); const RtpSenderInfo* sender_info = FindSenderInfo(local_video_sender_infos_, new_sender->internal()->stream_ids()[0], track->id()); if (sender_info) { new_sender->internal()->SetSsrc(sender_info->first_ssrc); } } return rtc::scoped_refptr<RtpSenderInterface>(new_sender);
7.
PeerConnection::CreateSender(cricket::MediaType media_type, const std::string& id, rtc::scoped_refptr<MediaStreamTrackInterface> track, const std::vector<std::string>& stream_ids, const std::vector<RtpEncodingParameters>& send_encodings) sender = RtpSenderProxyWithInternal<RtpSenderInternal>::Create(signaling_thread(), VideoRtpSender::Create(worker_thread(), id, this)); NoteUsageEvent(UsageEvent::VIDEO_ADDED); bool set_track_succeeded = sender->SetTrack(track); ---> RTC_DCHECK(set_track_succeeded); sender->internal()->set_stream_ids(stream_ids); sender->internal()->set_init_send_encodings(send_encodings);
8.
VideoRtpSender::SetTrack(MediaStreamTrackInterface* track) RtpSenderBase::SetTrack(MediaStreamTrackInterface* track) SetSend();
9.
VideoRtpSender::SetSend() cricket::VideoOptions options; VideoTrackSourceInterface* source = video_track()->GetSource(); if (source) { options.is_screencast = source->is_screencast(); options.video_noise_reduction = source->needs_denoising(); } switch (cached_track_content_hint_) { case VideoTrackInterface::ContentHint::kNone: break; case VideoTrackInterface::ContentHint::kFluid: options.is_screencast = false; break; case VideoTrackInterface::ContentHint::kDetailed: case VideoTrackInterface::ContentHint::kText: options.is_screencast = true; break; } bool success = worker_thread_->Invoke<bool>(RTC_FROM_HERE, [&] { return video_media_channel()->SetVideoSend(ssrc_, &options, video_track());
10.
WebRtcVideoChannel::SetVideoSend(uint32_t ssrc, const VideoOptions* options, rtc::VideoSourceInterface<webrtc::VideoFrame>* source) const auto& kv = send_streams_.find(ssrc); if (kv == send_streams_.end()) { // Allow unknown ssrc only if source is null. RTC_CHECK(source == nullptr); RTC_LOG(LS_ERROR) << "No sending stream on ssrc " << ssrc; return false; } return kv->second->SetVideoSend(options, source);
而 send_streams_ 就是 WebRtcVideoChannel::WebRtcVideoSendStream 參見下面的
《WebRtcVideoChannel 對象 AddSendStream (WebRtcVideoSendStream send_streams_) 的過程》
WebRtcVideoChannel::AddSendStream(const StreamParams& sp)11.
WebRtcVideoChannel::WebRtcVideoSendStream::SetVideoSend(const VideoOptions* options, rtc::VideoSourceInterface<webrtc::VideoFrame>* source) ReconfigureEncoder(); ---> 流程 12 source_ = source; if (source && stream_) { !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 這段語句是把源與編碼器對象關聯到一塊兒!!!!!!!!!!!!!!!!!!!!!!!!!!!! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! stream_->SetSource(this, GetDegradationPreference()); } stream_ 就是 VideoSendStream 對象,具體建立過程參見 void WebRtcVideoChannel::WebRtcVideoSendStream::RecreateWebRtcStream() 中的 stream_ = call_->CreateVideoSendStream(std::move(config), parameters_.encoder_config.Copy());
stream_->SetSource 的接口繼續分析
11.
11.1 void VideoSendStream::SetSource(rtc::VideoSourceInterface<webrtc::VideoFrame>* source, const DegradationPreference& degradation_preference) { RTC_DCHECK_RUN_ON(&thread_checker_); video_stream_encoder_->SetSource(source, degradation_preference); } 11.2 void VideoStreamEncoder::SetSource(rtc::VideoSourceInterface<VideoFrame>* source, const DegradationPreference& degradation_preference) // 這個裏面作 source 與 encoder 的關聯 source_proxy_->SetSource(source, degradation_preference); 而 source_proxy_ 是 VideoStreamEncoder::VideoStreamEncoder 構造時建立的 ./video/video_stream_encoder.cc VideoStreamEncoder::VideoStreamEncoder( Clock* clock, uint32_t number_of_cores, VideoStreamEncoderObserver* encoder_stats_observer, const VideoStreamEncoderSettings& settings, std::unique_ptr<OveruseFrameDetector> overuse_detector, TaskQueueFactory* task_queue_factory) : source_proxy_(new VideoSourceProxy(this)), VideoSourceProxy 的 video_stream_encoder_ 就是 VideoStreamEncoder ./video/video_stream_encoder.cc explicit VideoSourceProxy(VideoStreamEncoder* video_stream_encoder) : video_stream_encoder_(video_stream_encoder), degradation_preference_(DegradationPreference::DISABLED), source_(nullptr), max_framerate_(std::numeric_limits<int>::max()) {} 11.3 這個地方把 VideoStreamEncoder 和 Source 相關聯 void VideoSourceProxy::SetSource(rtc::VideoSourceInterface<VideoFrame>* source, const DegradationPreference& degradation_preference) { // Called on libjingle's worker thread. RTC_DCHECK_RUN_ON(&main_checker_); rtc::VideoSourceInterface<VideoFrame>* old_source = nullptr; rtc::VideoSinkWants wants; { rtc::CritScope lock(&crit_); degradation_preference_ = degradation_preference; old_source = source_; source_ = source; wants = GetActiveSinkWantsInternal(); } if (old_source != source && old_source != nullptr) { old_source->RemoveSink(video_stream_encoder_); } if (!source) { return; } // source 就是上面設置過來的 WebRtcVideoChannel::WebRtcVideoSendStream 對象 source->AddOrUpdateSink(video_stream_encoder_, wants); } 11.4 void WebRtcVideoChannel::WebRtcVideoSendStream::AddOrUpdateSink( rtc::VideoSinkInterface<webrtc::VideoFrame>* sink, const rtc::VideoSinkWants& wants) encoder_sink_ = sink; source_->AddOrUpdateSink(encoder_sink_, wants); 11.5 void VideoTrack::AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink, const rtc::VideoSinkWants& wants) { RTC_DCHECK(worker_thread_->IsCurrent()); VideoSourceBase::AddOrUpdateSink(sink, wants); rtc::VideoSinkWants modified_wants = wants; modified_wants.black_frames = !enabled(); // video_source_ 就是 AndroidVideoTrackSource(AdaptedVideoTrackSource) 對象 video_source_->AddOrUpdateSink(sink, modified_wants); } video_source_ 就是 AndroidVideoTrackSource 對象 (void AndroidVideoTrackSource::AddOrUpdateSink) 11.6 void AdaptedVideoTrackSource::AddOrUpdateSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink, const rtc::VideoSinkWants& wants) { !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 就這個地方,把視頻源對象 AndroidVideoTrackSource 與 視頻編碼對象 VideoStreamEncoder 關聯一塊兒的 broadcaster_.AddOrUpdateSink(sink, wants); !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! OnSinkWantsChanged(broadcaster_.wants()); }
12.
WebRtcVideoChannel::WebRtcVideoSendStream::ReconfigureEncoder() webrtc::VideoEncoderConfig encoder_config = CreateVideoEncoderConfig(codec_settings.codec); encoder_config.encoder_specific_settings = ConfigureVideoEncoderSettings(codec_settings.codec); stream_->ReconfigureVideoEncoder(encoder_config.Copy()); //----------------------------------------------------------- // 而 stream_ 就是一個 VideoSendStream 對象,是由下面流程產生的 //----------------------------------------------------------- 1. WebRtcVideoChannel::WebRtcVideoSendStream::RecreateWebRtcStream() stream_ = call_->CreateVideoSendStream(std::move(config), parameters_.encoder_config.Copy()); 2. webrtc::VideoSendStream* Call::CreateVideoSendStream(webrtc::VideoSendStream::Config config, VideoEncoderConfig encoder_config) std::unique_ptr<FecController> fec_controller = config_.fec_controller_factory ? config_.fec_controller_factory->CreateFecController() : std::make_unique<FecControllerDefault>(clock_); return CreateVideoSendStream(std::move(config), std::move(encoder_config), std::move(fec_controller)); 3. webrtc::VideoSendStream* Call::CreateVideoSendStream(webrtc::VideoSendStream::Config config, VideoEncoderConfig encoder_config, std::unique_ptr<FecController> fec_controller) VideoSendStream* send_stream = new VideoSendStream(clock_, num_cpu_cores_, module_process_thread_.get(), task_queue_factory_, call_stats_.get(), transport_send_ptr_, bitrate_allocator_.get(), video_send_delay_stats_.get(), event_log_, std::move(config), std::move(encoder_config), suspended_video_send_ssrcs_, suspended_video_payload_states_, std::move(fec_controller));
13.
VideoSendStream::ReconfigureVideoEncoder video_stream_encoder_->ConfigureEncoder(std::move(config), config_.rtp.max_packet_size - CalculateMaxHeaderSize(config_.rtp)); ---> //---------------------------------------------------------- // video_stream_encoder_ 就是 VideoStreamEncoder 對象,產生流程以下 //---------------------------------------------------------- VideoSendStream::VideoSendStream(Clock* clock, int num_cpu_cores, ProcessThread* module_process_thread, TaskQueueFactory* task_queue_factory, CallStats* call_stats, RtpTransportControllerSendInterface* transport, BitrateAllocatorInterface* bitrate_allocator, SendDelayStats* send_delay_stats, RtcEventLog* event_log, VideoSendStream::Config config, VideoEncoderConfig encoder_config, const std::map<uint32_t, RtpState>& suspended_ssrcs, const std::map<uint32_t, RtpPayloadState>& suspended_payload_states, std::unique_ptr<FecController> fec_controller) video_stream_encoder_ = CreateVideoStreamEncoder(clock, task_queue_factory, num_cpu_cores, &stats_proxy_, config_.encoder_settings);
14.
VideoStreamEncoder::ConfigureEncoder(VideoEncoderConfig config, size_t max_data_payload_length)
ReconfigureEncoder(); --->
15.
VideoStreamEncoder::ReconfigureEncoder() std::vector<VideoStream> streams = encoder_config_.video_stream_factory->CreateEncoderStreams(last_frame_info_->width, last_frame_info_->height, encoder_config_); !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 這個地方就是建立了編碼器對象 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! encoder_ = settings_.encoder_factory->CreateVideoEncoder(encoder_config_.video_format); ---> !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 這個函數還有對 encoder_ 的初始化工做 參見下面的視頻編碼器的初始化 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if (encoder_->InitEncode(&send_codec_, VideoEncoder::Settings(settings_.capabilities, number_of_cores_, max_data_payload_length)) != 0)
16.
VideoEncoderFactoryWrapper::CreateVideoEncoder(const SdpVideoFormat& format) ScopedJavaLocalRef<jobject> encoder = Java_VideoEncoderFactory_createEncoder(jni, encoder_factory_, j_codec_info); ---> return JavaToNativeVideoEncoder(jni, encoder);
17.
17.1 static base::android::ScopedJavaLocalRef<jobject> Java_VideoEncoderFactory_createEncoder(JNIEnv* env, const base::android::JavaRef<jobject>& obj, const base::android::JavaRef<jobject>& info) { jclass clazz = org_webrtc_VideoEncoderFactory_clazz(env); CHECK_CLAZZ(env, obj.obj(), org_webrtc_VideoEncoderFactory_clazz(env), NULL); jni_generator::JniJavaCallContextChecked call_context; call_context.Init<base::android::MethodID::TYPE_INSTANCE>(env, clazz, "createEncoder", "(Lorg/webrtc/VideoCodecInfo;)Lorg/webrtc/VideoEncoder;", &g_org_webrtc_VideoEncoderFactory_createEncoder); jobject ret = env->CallObjectMethod(obj.obj(), call_context.base.method_id, info.obj()); return base::android::ScopedJavaLocalRef<jobject>(env, ret); } 17.2 JavaToNativeVideoEncoder(JNIEnv* jni, const JavaRef<jobject>& j_encoder) { const jlong native_encoder = Java_VideoEncoder_createNativeVideoEncoder(jni, j_encoder); VideoEncoder* encoder; if (native_encoder == 0) { encoder = new VideoEncoderWrapper(jni, j_encoder); } else { encoder = reinterpret_cast<VideoEncoder*>(native_encoder); } return std::unique_ptr<VideoEncoder>(encoder);
VideoEncoderWrapper 被建立 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//-----------------------------------------------------------------------------
//
// 視頻編碼器的初始化
//
//-----------------------------------------------------------------------------
1.
這個的流程參見上面的 流程 15
VideoStreamEncoder::ReconfigureEncoder() if (encoder_->InitEncode(&send_codec_, VideoEncoder::Settings(settings_.capabilities, number_of_cores_, max_data_payload_length)) != 0)
2.
VideoEncoderWrapper::InitEncode(const VideoCodec* codec_settings, const Settings& settings) JNIEnv* jni = AttachCurrentThreadIfNeeded(); codec_settings_ = *codec_settings; capabilities_ = settings.capabilities; number_of_cores_ = settings.number_of_cores; num_resets_ = 0; { rtc::CritScope lock(&encoder_queue_crit_); encoder_queue_ = TaskQueueBase::Current(); } return InitEncodeInternal(jni);
3.
VideoEncoderWrapper::InitEncodeInternal(JNIEnv* jni) ScopedJavaLocalRef<jobject> capabilities = Java_Capabilities_Constructor(jni, capabilities_->loss_notification); ScopedJavaLocalRef<jobject> settings = Java_Settings_Constructor( jni, number_of_cores_, codec_settings_.width, codec_settings_.height, static_cast<int>(codec_settings_.startBitrate), static_cast<int>(codec_settings_.maxFramerate), static_cast<int>(codec_settings_.numberOfSimulcastStreams), automatic_resize_on, capabilities); ScopedJavaLocalRef<jobject> callback = Java_VideoEncoderWrapper_createEncoderCallback(jni, jlongFromPointer(this)); int32_t status = JavaToNativeVideoCodecStatus(jni, Java_VideoEncoder_initEncode(jni, encoder_, settings, callback)); RTC_LOG(LS_INFO) << "initEncode: " << status; encoder_info_.supports_native_handle = true; encoder_info_.implementation_name = GetImplementationName(jni); encoder_info_.scaling_settings = GetScalingSettingsInternal(jni); encoder_info_.is_hardware_accelerated = IsHardwareVideoEncoder(jni, encoder_); encoder_info_.has_internal_source = false; if (status == WEBRTC_VIDEO_CODEC_OK) { initialized_ = true; } return status;
4.
Java_VideoEncoder_initEncode(JNIEnv* env, const base::android::JavaRef<jobject>& obj, const base::android::JavaRef<jobject>& settings, const base::android::JavaRef<jobject>& encodeCallback) jclass clazz = org_webrtc_VideoEncoder_clazz(env); CHECK_CLAZZ(env, obj.obj(), org_webrtc_VideoEncoder_clazz(env), NULL); jni_generator::JniJavaCallContextChecked call_context; call_context.Init<base::android::MethodID::TYPE_INSTANCE>(env, clazz, "initEncode", "(Lorg/webrtc/VideoEncoder$Settings;Lorg/webrtc/VideoEncoder$Callback;)Lorg/webrtc/VideoCodecStatus;", &g_org_webrtc_VideoEncoder_initEncode); jobject ret = env->CallObjectMethod(obj.obj(), call_context.base.method_id, settings.obj(), encodeCallback.obj());
5. Java
HardwareVideoEncoder::initEncode(Settings settings, Callback callback) initEncodeInternal
6. Java
HardwareVideoEncoder::initEncodeInternal // 建立編碼器,並設置參數 codec = mediaCodecWrapperFactory.createByCodecName(codecName); // 開啓編碼數據發送線程 outputThread = createOutputThread(); outputThread.start();
//-----------------------------------------------------------------------------------------
// 編碼流程分析
//-----------------------------------------------------------------------------------------
1. Java
ScreenCapturerAndroid::onFrame(VideoFrame frame) capturerObserver.onFrameCaptured(frame);
2. Java
VideoSource::CapturerObserver::onFrameCaptured(VideoFrame frame) final VideoProcessor.FrameAdaptationParameters parameters = nativeAndroidVideoTrackSource.adaptFrame(frame); VideoFrame adaptedFrame = VideoProcessor.applyFrameAdaptationParameters(frame, parameters); if (adaptedFrame != null) { nativeAndroidVideoTrackSource.onFrameCaptured(adaptedFrame); adaptedFrame.release(); }
這裏的 nativeAndroidVideoTrackSource 就是 AndroidVideoTrackSource
2.1 nativeCreateVideoSource(nativeFactory, isScreencast, alignTimestamps) 2.2 Java_org_webrtc_PeerConnectionFactory_nativeCreateVideoSource 2.3 ./sdk/android/src/jni/pc/peer_connection_factory.cc JNI_PeerConnectionFactory_CreateVideoSource return jlongFromPointer(CreateVideoSource(jni, factory->signaling_thread(), factory->worker_thread(), is_screencast, align_timestamps)); 2.4 ./jni/pc/video.cc void* CreateVideoSource(JNIEnv* env, rtc::Thread* signaling_thread, rtc::Thread* worker_thread, jboolean is_screencast, jboolean align_timestamps) rtc::scoped_refptr<AndroidVideoTrackSource> source(new rtc::RefCountedObject<AndroidVideoTrackSource>(signaling_thread, env, is_screencast, align_timestamps));
3.
NativeAndroidVideoTrackSource::onFrameCaptured(VideoFrame frame) nativeOnFrameCaptured(nativeAndroidVideoTrackSource, frame.getRotation(), frame.getTimestampNs(), frame.getBuffer());
4.
Java_org_webrtc_NativeAndroidVideoTrackSource_nativeOnFrameCaptured AndroidVideoTrackSource* native = reinterpret_cast<AndroidVideoTrackSource*>(nativeAndroidVideoTrackSource); CHECK_NATIVE_PTR(env, jcaller, native, "OnFrameCaptured"); return native->OnFrameCaptured(env, rotation, timestampNs, base::android::JavaParamRef<jobject>(env, buffer));
5.
./sdk/android/src/jni/android_video_track_source.cc
AndroidVideoTrackSource::OnFrameCaptured OnFrame(VideoFrame::Builder() .set_video_frame_buffer(buffer) .set_rotation(rotation) .set_timestamp_us(j_timestamp_ns / rtc::kNumNanosecsPerMicrosec) .build());
6.
調用基類的
./media/base/adapted_video_track_source.cc
AdaptedVideoTrackSource::OnFrame broadcaster_.OnFrame(frame);
而 broadcaster_ 裏的 sink 由接口 AddOrUpdateSink 添加 VideoStreamEncoder,具體流程參見《視頻編碼器的建立以及與源的關聯》 11.5
7.
VideoBroadcaster::OnFrame(const webrtc::VideoFrame& frame) if (sink_pair.wants.black_frames) { webrtc::VideoFrame black_frame = webrtc::VideoFrame::Builder() .set_video_frame_buffer(GetBlackFrameBuffer(frame.width(), frame.height())) .set_rotation(frame.rotation()) .set_timestamp_us(frame.timestamp_us()) .set_id(frame.id()) .build(); sink_pair.sink->OnFrame(black_frame); } else if (!previous_frame_sent_to_all_sinks_ && frame.has_update_rect()) { // Since last frame was not sent to some sinks, no reliable update // information is available, so we need to clear the update rect. webrtc::VideoFrame copy = frame; copy.clear_update_rect(); sink_pair.sink->OnFrame(copy); } else { sink_pair.sink->OnFrame(frame); } } 7.1 sink_pairs() 中的 sinks_ 是由 AddOrUpdateSink 添加的 VideoStreamEncoder,具體流程參見《視頻編碼器的建立以及與源的關聯》 11.5 7.2 AdaptedVideoTrackSource::AddOrUpdateSink broadcaster_.AddOrUpdateSink(sink, wants);
因此是由 AndroidVideoTrackSource (AdaptedVideoTrackSource) 添加的,下面咱們分析這個 AndroidVideoTrackSource
8. ./video/video_stream_encoder.cc
void VideoStreamEncoder::OnFrame(const VideoFrame& video_frame) MaybeEncodeVideoFrame(incoming_frame, post_time_us);
9. ./video/video_stream_encoder.cc
void VideoStreamEncoder::MaybeEncodeVideoFrame(const VideoFrame& video_frame, int64_t time_when_posted_us) EncodeVideoFrame(video_frame, time_when_posted_us);
10. ./video/video_stream_encoder.cc
void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame, int64_t time_when_posted_us) const int32_t encode_status = encoder_->Encode(out_frame, &next_frame_types_);
11.
int32_t VideoEncoderWrapper::Encode(const VideoFrame& frame, const std::vector<VideoFrameType>* frame_types) JNIEnv* jni = AttachCurrentThreadIfNeeded(); // Construct encode info. ScopedJavaLocalRef<jobjectArray> j_frame_types = NativeToJavaFrameTypeArray(jni, *frame_types); ScopedJavaLocalRef<jobject> encode_info = Java_EncodeInfo_Constructor(jni, j_frame_types); FrameExtraInfo info; info.capture_time_ns = frame.timestamp_us() * rtc::kNumNanosecsPerMicrosec; info.timestamp_rtp = frame.timestamp(); frame_extra_infos_.push_back(info); ScopedJavaLocalRef<jobject> j_frame = NativeToJavaVideoFrame(jni, frame); ScopedJavaLocalRef<jobject> ret = Java_VideoEncoder_encode(jni, encoder_, j_frame, encode_info); ReleaseJavaVideoFrame(jni, j_frame); return HandleReturnCode(jni, ret, "encode");
12.
static base::android::ScopedJavaLocalRef<jobject> Java_VideoEncoder_encode(JNIEnv* env, const base::android::JavaRef<jobject>& obj, const base::android::JavaRef<jobject>& frame, const base::android::JavaRef<jobject>& info) { jclass clazz = org_webrtc_VideoEncoder_clazz(env); CHECK_CLAZZ(env, obj.obj(), org_webrtc_VideoEncoder_clazz(env), NULL); jni_generator::JniJavaCallContextChecked call_context; call_context.Init<base::android::MethodID::TYPE_INSTANCE>(env, clazz, "encode", "(Lorg/webrtc/VideoFrame;Lorg/webrtc/VideoEncoder$EncodeInfo;)Lorg/webrtc/VideoCodecStatus;", &g_org_webrtc_VideoEncoder_encode); jobject ret = env->CallObjectMethod(obj.obj(), call_context.base.method_id, frame.obj(), info.obj()); return base::android::ScopedJavaLocalRef<jobject>(env, ret); }
13. Java
public VideoCodecStatus HardwareVideoEncoder::encode(VideoFrame videoFrame, EncodeInfo encodeInfo)
這個裏面對視頻進行完成了編碼
//--------------------------------------------------------------------------------------------
//
// 視頻的發送
//
//--------------------------------------------------------------------------------------------
1. Java
視頻編碼器初始化流程 6, outputThread.start(); 開啓了視頻發送線程
void HardwareVideoEncoder::deliverEncodedImage() callback.onEncodedFrame(encodedImage, new CodecSpecificInfo());
這個 callback 是初始化編碼器時,傳遞過來的。參見編碼器初始化流程 3
ScopedJavaLocalRef<jobject> callback = Java_VideoEncoderWrapper_createEncoderCallback(jni, jlongFromPointer(this)); static base::android::ScopedJavaLocalRef<jobject> Java_VideoEncoderWrapper_createEncoderCallback(JNIEnv* env, jlong nativeEncoder) { jclass clazz = org_webrtc_VideoEncoderWrapper_clazz(env); CHECK_CLAZZ(env, clazz, org_webrtc_VideoEncoderWrapper_clazz(env), NULL); jni_generator::JniJavaCallContextChecked call_context; call_context.Init< base::android::MethodID::TYPE_STATIC>( env, clazz, "createEncoderCallback", "(J)Lorg/webrtc/VideoEncoder$Callback;", &g_org_webrtc_VideoEncoderWrapper_createEncoderCallback); jobject ret = env->CallStaticObjectMethod(clazz, call_context.base.method_id, nativeEncoder); return base::android::ScopedJavaLocalRef<jobject>(env, ret); }
這個就是 Java 層的一個 lambda 函數
static VideoEncoder.Callback VideoEncoderWrapper::createEncoderCallback(final long nativeEncoder) { return (EncodedImage frame, VideoEncoder.CodecSpecificInfo info) -> nativeOnEncodedFrame(nativeEncoder, frame); }
2.
nativeOnEncodedFrame
3.
JNI_GENERATOR_EXPORT void Java_org_webrtc_VideoEncoderWrapper_nativeOnEncodedFrame( JNIEnv* env, jclass jcaller, jlong nativeVideoEncoderWrapper, jobject frame) { VideoEncoderWrapper* native = reinterpret_cast<VideoEncoderWrapper*>(nativeVideoEncoderWrapper); CHECK_NATIVE_PTR(env, jcaller, native, "OnEncodedFrame"); return native->OnEncodedFrame(env, base::android::JavaParamRef<jobject>(env, frame)); }
4.
void VideoEncoderWrapper::OnEncodedFrame(JNIEnv* jni, const JavaRef<jobject>& j_encoded_image) callback_->OnEncodedImage(frame_copy, &info, &header);
而 callback_ 是經過函數 RegisterEncodeCompleteCallback 註冊過來的。
在函數 VideoStreamEncoder::ReconfigureEncoder() 裏 encoder_(VideoEncoderWrapper)
encoder_->RegisterEncodeCompleteCallback(this);
上述流程分析後咱們得出下一步要進入 VideoStreamEncoder::OnEncodedImage 函數了
5.
EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage( const EncodedImage& encoded_image, const CodecSpecificInfo* codec_specific_info, const RTPFragmentationHeader* fragmentation) EncodedImageCallback::Result result = sink_->OnEncodedImage( image_copy, codec_info_copy ? codec_info_copy.get() : codec_specific_info, fragmentation_copy ? fragmentation_copy.get() : fragmentation);
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
而 sink_ 是 VideoStreamEncoder::SetSink 設置過來的 VideoSendStreamImpl ,具體流程以下
下面是編碼器與發送對象的關聯過程!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
5.1 WebRtcVideoChannel::WebRtcVideoSendStream::RecreateWebRtcStream webrtc::VideoSendStream::Config config = parameters_.config.Copy(); stream_ = call_->CreateVideoSendStream(std::move(config), parameters_.encoder_config.Copy()); 這個產生一個 VideoSendStream 對象, WebRtcVideoSendStream 包含一個 VideoSendStream 對象 5.2 ./call/call.cc webrtc::VideoSendStream* Call::CreateVideoSendStream VideoSendStream* send_stream = new VideoSendStream( clock_, num_cpu_cores_, module_process_thread_.get(), task_queue_factory_, call_stats_.get(), transport_send_ptr_, bitrate_allocator_.get(), video_send_delay_stats_.get(), event_log_, std::move(config), std::move(encoder_config), suspended_video_send_ssrcs_, suspended_video_payload_states_, std::move(fec_controller)); transport_send_ptr_ 就是 Call::Create 裏的 RtpTransportControllerSend 參見下面的 5.3 ./video/video_send_stream.cc VideoSendStream::VideoSendStream( Clock* clock, int num_cpu_cores, ProcessThread* module_process_thread, TaskQueueFactory* task_queue_factory, CallStats* call_stats, RtpTransportControllerSendInterface* transport, BitrateAllocatorInterface* bitrate_allocator, SendDelayStats* send_delay_stats, RtcEventLog* event_log, VideoSendStream::Config config, VideoEncoderConfig encoder_config, const std::map<uint32_t, RtpState>& suspended_ssrcs, const std::map<uint32_t, RtpPayloadState>& suspended_payload_states, std::unique_ptr<FecController> fec_controller): worker_queue_(transport->GetWorkerQueue()), stats_proxy_(clock, config, encoder_config.content_type), config_(std::move(config)), content_type_(encoder_config.content_type) video_stream_encoder_ = CreateVideoStreamEncoder(clock, task_queue_factory, num_cpu_cores, &stats_proxy_, config_.encoder_settings); worker_queue_->PostTask(ToQueuedTask([this, clock, call_stats, transport, bitrate_allocator, send_delay_stats, event_log, &suspended_ssrcs, &encoder_config, &suspended_payload_states, &fec_controller]() { send_stream_.reset(new VideoSendStreamImpl(clock, &stats_proxy_, worker_queue_, call_stats, transport, bitrate_allocator, send_delay_stats, video_stream_encoder_.get(), event_log, &config_, encoder_config.max_bitrate_bps, encoder_config.bitrate_priority, suspended_ssrcs, suspended_payload_states, encoder_config.content_type, std::move(fec_controller)));}, [this]() { thread_sync_event_.Set(); })); // CreateVideoStreamEncoder 函數產生一個 VideoStreamEncoder 對象 video_stream_encoder_, //std::unique_ptr<VideoStreamEncoderInterface> CreateVideoStreamEncoder(Clock* clock, TaskQueueFactory* task_queue_factory, // uint32_t number_of_cores, VideoStreamEncoderObserver* encoder_stats_observer, const VideoStreamEncoderSettings& settings) { // return std::make_unique<VideoStreamEncoder>(clock, number_of_cores, encoder_stats_observer, settings, // std::make_unique<OveruseFrameDetector>(encoder_stats_observer),task_queue_factory); //} 5.4 這個地方關聯編碼器 VideoStreamEncoder 對象與 VideoSendStreamImpl 對象 VideoSendStreamImpl::VideoSendStreamImpl( Clock* clock, SendStatisticsProxy* stats_proxy, rtc::TaskQueue* worker_queue, CallStats* call_stats, RtpTransportControllerSendInterface* transport, BitrateAllocatorInterface* bitrate_allocator, SendDelayStats* send_delay_stats, VideoStreamEncoderInterface* video_stream_encoder, RtcEventLog* event_log, const VideoSendStream::Config* config, int initial_encoder_max_bitrate, double initial_encoder_bitrate_priority, std::map<uint32_t, RtpState> suspended_ssrcs, std::map<uint32_t, RtpPayloadState> suspended_payload_states, VideoEncoderConfig::ContentType content_type, std::unique_ptr<FecController> fec_controller) // 這個地方調用 VideoStreamEncoder 的 SetSink , 就是關聯 VideoStreamEncoder 與 VideoSendStreamImpl video_stream_encoder_->SetSink(this, rotation_applied);
上述流程分析後咱們得出下一步要進入 VideoSendStreamImpl::OnEncodedImage 函數了
6.
EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage( const EncodedImage& encoded_image, const CodecSpecificInfo* codec_specific_info, const RTPFragmentationHeader* fragmentation) EncodedImageCallback::Result result(EncodedImageCallback::Result::OK); // 咱們須要分析 rtp_video_sender_ 究竟是哪一個類的對象 result = rtp_video_sender_->OnEncodedImage(encoded_image, codec_specific_info, fragmentation);
rtp_video_sender_ 是初始化過程當中
rtp_video_sender_(transport_->CreateRtpVideoSender(
suspended_ssrcs,
suspended_payload_states,
config_->rtp,
config_->rtcp_report_interval_ms,
config_->send_transport,
CreateObservers(call_stats,
&encoder_feedback_,
stats_proxy_,
send_delay_stats),
event_log,
std::move(fec_controller),
CreateFrameEncryptionConfig(config_)))
這裏的 transport 就是 RtpTransportControllerSend 這裏產生一個 RtpVideoSender 對象
RtpTransportControllerSend::CreateRtpVideoSender video_rtp_senders_.push_back(std::make_unique<RtpVideoSender>(clock_, suspended_ssrcs, states, rtp_config, rtcp_report_interval_ms, send_transport, observers, // TODO(holmer): Remove this circular dependency by injecting // the parts of RtpTransportControllerSendInterface that are really used. this, event_log, &retransmission_rate_limiter_, std::move(fec_controller), frame_encryption_config.frame_encryptor, frame_encryption_config.crypto_options));
上述流程分析後咱們得出下一步要進入 RtpVideoSender::OnEncodedImage 函數了
7.
EncodedImageCallback::Result RtpVideoSender::OnEncodedImage( const EncodedImage& encoded_image, const CodecSpecificInfo* codec_specific_info, const RTPFragmentationHeader* fragmentation) // 咱們須要分析 rtp_streams_[stream_index].sender_video 究竟是哪一個類的對象 bool send_result = rtp_streams_[stream_index].sender_video->SendVideo( rtp_config_.payload_type, codec_type_, rtp_timestamp, encoded_image.capture_time_ms_, encoded_image, fragmentation, params_[stream_index].GetRtpVideoHeader(encoded_image, codec_specific_info, shared_frame_id_), expected_retransmission_time_ms);
7.1 rtp_streams_ 的建立
RtpVideoSender::RtpVideoSender( Clock* clock, std::map<uint32_t, RtpState> suspended_ssrcs, const std::map<uint32_t, RtpPayloadState>& states, const RtpConfig& rtp_config, int rtcp_report_interval_ms, Transport* send_transport, const RtpSenderObservers& observers, RtpTransportControllerSendInterface* transport, RtcEventLog* event_log, RateLimiter* retransmission_limiter, std::unique_ptr<FecController> fec_controller, FrameEncryptorInterface* frame_encryptor, const CryptoOptions& crypto_options) rtp_streams_(CreateRtpStreamSenders(clock, rtp_config, observers, rtcp_report_interval_ms, send_transport, transport->GetBandwidthObserver(), transport, flexfec_sender_.get(), event_log, retransmission_limiter, this, frame_encryptor, crypto_options))
7.2 CreateRtpStreamSenders 函數
std::vector<RtpStreamSender> CreateRtpStreamSenders( Clock* clock, const RtpConfig& rtp_config, const RtpSenderObservers& observers, int rtcp_report_interval_ms, Transport* send_transport, RtcpBandwidthObserver* bandwidth_callback, RtpTransportControllerSendInterface* transport, FlexfecSender* flexfec_sender, RtcEventLog* event_log, RateLimiter* retransmission_rate_limiter, OverheadObserver* overhead_observer, FrameEncryptorInterface* frame_encryptor, const CryptoOptions& crypto_options) { RTC_DCHECK_GT(rtp_config.ssrcs.size(), 0); RtpRtcp::Configuration configuration; configuration.clock = clock; configuration.audio = false; configuration.receiver_only = false; configuration.outgoing_transport = send_transport; configuration.intra_frame_callback = observers.intra_frame_callback; configuration.rtcp_loss_notification_observer = observers.rtcp_loss_notification_observer; configuration.bandwidth_callback = bandwidth_callback; configuration.network_state_estimate_observer = transport->network_state_estimate_observer(); configuration.transport_feedback_callback = transport->transport_feedback_observer(); configuration.rtt_stats = observers.rtcp_rtt_stats; configuration.rtcp_packet_type_counter_observer = observers.rtcp_type_observer; // --------------------------------------------------- // RtpPacketSender* RtpTransportControllerSend::packet_sender() 應該是這個吧 ??????? configuration.paced_sender = transport->packet_sender(); //---------------------------------------------------- configuration.send_bitrate_observer = observers.bitrate_observer; configuration.send_side_delay_observer = observers.send_delay_observer; configuration.send_packet_observer = observers.send_packet_observer; configuration.event_log = event_log; configuration.retransmission_rate_limiter = retransmission_rate_limiter; configuration.overhead_observer = overhead_observer; configuration.rtp_stats_callback = observers.rtp_stats; configuration.frame_encryptor = frame_encryptor; configuration.require_frame_encryption = crypto_options.sframe.require_frame_encryption; configuration.extmap_allow_mixed = rtp_config.extmap_allow_mixed; configuration.rtcp_report_interval_ms = rtcp_report_interval_ms; std::vector<RtpStreamSender> rtp_streams; const std::vector<uint32_t>& flexfec_protected_ssrcs = rtp_config.flexfec.protected_media_ssrcs; RTC_DCHECK(rtp_config.rtx.ssrcs.empty() || rtp_config.rtx.ssrcs.size() == rtp_config.rtx.ssrcs.size()); // 根據源 ssrcs 建立多個發送對象 for (size_t i = 0; i < rtp_config.ssrcs.size(); ++i) { configuration.local_media_ssrc = rtp_config.ssrcs[i]; bool enable_flexfec = flexfec_sender != nullptr && std::find(flexfec_protected_ssrcs.begin(), flexfec_protected_ssrcs.end(), configuration.local_media_ssrc) != flexfec_protected_ssrcs.end(); configuration.flexfec_sender = enable_flexfec ? flexfec_sender : nullptr; auto playout_delay_oracle = std::make_unique<PlayoutDelayOracle>(); configuration.ack_observer = playout_delay_oracle.get(); if (rtp_config.rtx.ssrcs.size() > i) { configuration.rtx_send_ssrc = rtp_config.rtx.ssrcs[i]; } // 這個產生一個 ModuleRtpRtcpImpl 對象 auto rtp_rtcp = RtpRtcp::Create(configuration); rtp_rtcp->SetSendingStatus(false); rtp_rtcp->SetSendingMediaStatus(false); rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound); // Set NACK. rtp_rtcp->SetStorePacketsStatus(true, kMinSendSidePacketHistorySize); FieldTrialBasedConfig field_trial_config; RTPSenderVideo::Config video_config; video_config.clock = configuration.clock; video_config.rtp_sender = rtp_rtcp->RtpSender(); video_config.flexfec_sender = configuration.flexfec_sender; video_config.playout_delay_oracle = playout_delay_oracle.get(); video_config.frame_encryptor = frame_encryptor; video_config.require_frame_encryption = crypto_options.sframe.require_frame_encryption; video_config.need_rtp_packet_infos = rtp_config.lntf.enabled; video_config.enable_retransmit_all_layers = false; video_config.field_trials = &field_trial_config; const bool should_disable_red_and_ulpfec = ShouldDisableRedAndUlpfec(enable_flexfec, rtp_config); if (rtp_config.ulpfec.red_payload_type != -1 && !should_disable_red_and_ulpfec) { video_config.red_payload_type = rtp_config.ulpfec.red_payload_type; } if (rtp_config.ulpfec.ulpfec_payload_type != -1 && !should_disable_red_and_ulpfec) { video_config.ulpfec_payload_type = rtp_config.ulpfec.ulpfec_payload_type; } // 建立對象 RTPSenderVideo auto sender_video = std::make_unique<RTPSenderVideo>(video_config); rtp_streams.emplace_back(std::move(playout_delay_oracle), std::move(rtp_rtcp), std::move(sender_video)); } return rtp_streams; }
上述流程分析後咱們得出下一步要進入 RTPSenderVideo::SendVideo 函數了
8. 這個裏面對包作了大量處理
bool RTPSenderVideo::SendVideo(int payload_type, absl::optional<VideoCodecType> codec_type, uint32_t rtp_timestamp, int64_t capture_time_ms, rtc::ArrayView<const uint8_t> payload, const RTPFragmentationHeader* fragmentation, RTPVideoHeader video_header, absl::optional<int64_t> expected_retransmission_time_ms) LogAndSendToNetwork(std::move(rtp_packets), unpacketized_payload_size);
9.
void RTPSenderVideo::LogAndSendToNetwork(std::vector<std::unique_ptr<RtpPacketToSend>> packets, size_t unpacketized_payload_size) rtp_sender_->EnqueuePackets(std::move(packets));
rtp_sender_ 是構造函數傳遞過來的參數 RTPSenderVideo::RTPSenderVideo(const Config& config) : rtp_sender_(config.rtp_sender)
過程以下:
./call/rtp_video_sender.cc
RtpRtcp::Configuration configuration; configuration.paced_sender = transport->packet_sender(); auto rtp_rtcp = RtpRtcp::Create(configuration); // 就是這個 rtp_sender video_config.rtp_sender = rtp_rtcp->RtpSender(); auto sender_video = std::make_unique<RTPSenderVideo>(video_config);
咱們分析 RtpRtcp::Create 函數以及 rtp_rtcp->RtpSender() 函數
./modules/rtp_rtcp/source/rtp_rtcp_impl.cc
std::unique_ptr<RtpRtcp> RtpRtcp::Create(const Configuration& configuration) { RTC_DCHECK(configuration.clock); return std::make_unique<ModuleRtpRtcpImpl>(configuration); } ModuleRtpRtcpImpl::ModuleRtpRtcpImpl(const Configuration& configuration) : rtcp_sender_(configuration), rtcp_receiver_(configuration, this), clock_(configuration.clock), last_bitrate_process_time_(clock_->TimeInMilliseconds()), last_rtt_process_time_(clock_->TimeInMilliseconds()), next_process_time_(clock_->TimeInMilliseconds() + kRtpRtcpMaxIdleTimeProcessMs), packet_overhead_(28), // IPV4 UDP. nack_last_time_sent_full_ms_(0), nack_last_seq_number_sent_(0), remote_bitrate_(configuration.remote_bitrate_estimator), ack_observer_(configuration.ack_observer), rtt_stats_(configuration.rtt_stats), rtt_ms_(0) { if (!configuration.receiver_only) { // 這是 rtp_sender_ 產生的地方 rtp_sender_ = std::make_unique<RtpSenderContext>(configuration); // Make sure rtcp sender use same timestamp offset as rtp sender. rtcp_sender_.SetTimestampOffset( rtp_sender_->packet_generator.TimestampOffset()); } // Set default packet size limit. // TODO(nisse): Kind-of duplicates // webrtc::VideoSendStream::Config::Rtp::kDefaultMaxPacketSize. const size_t kTcpOverIpv4HeaderSize = 40; SetMaxRtpPacketSize(IP_PACKET_SIZE - kTcpOverIpv4HeaderSize); }
是 ModuleRtpRtcpImpl 的 RtpSender () 過來的 就是 ModuleRtpRtcpImpl 的 rtp_sender_ 對象的
RTPSender* ModuleRtpRtcpImpl::RtpSender() { return rtp_sender_ ? &rtp_sender_->packet_generator : nullptr; }
而 packet_generator 的建立見下面流程
ModuleRtpRtcpImpl::RtpSenderContext::RtpSenderContext(
const RtpRtcp::Configuration& config)
: packet_history(config.clock),
packet_sender(config, &packet_history),
non_paced_sender(&packet_sender),
packet_generator(
config,
&packet_history,
config.paced_sender ? config.paced_sender : &non_paced_sender) {}
而 packet_generator 就是一個 RTPSender 對象,結構定義以下:
struct RtpSenderContext { explicit RtpSenderContext(const RtpRtcp::Configuration& config); // Storage of packets, for retransmissions and padding, if applicable. RtpPacketHistory packet_history; // Handles final time timestamping/stats/etc and handover to Transport. RtpSenderEgress packet_sender; // If no paced sender configured, this class will be used to pass packets // from |packet_generator_| to |packet_sender_|. RtpSenderEgress::NonPacedPacketSender non_paced_sender; // Handles creation of RTP packets to be sent. RTPSender packet_generator; };
上述流程分析後咱們得出下一步要進入 RTPSender::EnqueuePackets 函數了10.
./modules/rtp_rtcp/source/rtp_sender.cc
上述步驟把發送的數據放到 RTPSender 的隊列裏了
void RTPSender::EnqueuePackets( std::vector<std::unique_ptr<RtpPacketToSend>> packets) { RTC_DCHECK(!packets.empty()); int64_t now_ms = clock_->TimeInMilliseconds(); for (auto& packet : packets) { RTC_DCHECK(packet); RTC_CHECK(packet->packet_type().has_value()) << "Packet type must be set before sending."; if (packet->capture_time_ms() <= 0) { packet->set_capture_time_ms(now_ms); } } paced_sender_->EnqueuePackets(std::move(packets)); }
而 paced_sender_ 是構造時傳遞過來的
RTPSender::RTPSender(const RtpRtcp::Configuration& config, RtpPacketHistory* packet_history, RtpPacketSender* packet_sender) : clock_(config.clock), random_(clock_->TimeInMicroseconds()), audio_configured_(config.audio), ssrc_(config.local_media_ssrc), rtx_ssrc_(config.rtx_send_ssrc), flexfec_ssrc_(config.flexfec_sender ? absl::make_optional(config.flexfec_sender->ssrc()) : absl::nullopt), packet_history_(packet_history), paced_sender_(packet_sender), sending_media_(true), // Default to sending media. max_packet_size_(IP_PACKET_SIZE - 28), // Default is IP-v4/UDP. last_payload_type_(-1), rtp_header_extension_map_(config.extmap_allow_mixed), // RTP variables sequence_number_forced_(false), ssrc_has_acked_(false), rtx_ssrc_has_acked_(false), last_rtp_timestamp_(0), capture_time_ms_(0), last_timestamp_time_ms_(0), last_packet_marker_bit_(false), csrcs_(), rtx_(kRtxOff), supports_bwe_extension_(false), retransmission_rate_limiter_(config.retransmission_rate_limiter)
而這裏的 paced_sender_ 就是步驟 9 裏的 configuration.paced_sender = transport->packet_sender(); 也就是
RtpPacketSender* RtpTransportControllerSend::packet_sender();
./call/rtp_transport_controller_send.cc
RtpPacketSender* RtpTransportControllerSend::packet_sender() { if (use_task_queue_pacer_) { return task_queue_pacer_.get(); } return process_thread_pacer_.get(); }
其實就是 process_thread_pacer_ 或 task_queue_pacer_
RtpTransportControllerSend::RtpTransportControllerSend( Clock* clock, webrtc::RtcEventLog* event_log, NetworkStatePredictorFactoryInterface* predictor_factory, NetworkControllerFactoryInterface* controller_factory, const BitrateConstraints& bitrate_config, std::unique_ptr<ProcessThread> process_thread, TaskQueueFactory* task_queue_factory, const WebRtcKeyValueConfig* trials) : clock_(clock), event_log_(event_log), bitrate_configurator_(bitrate_config), process_thread_(std::move(process_thread)), use_task_queue_pacer_(IsEnabled(trials, "WebRTC-TaskQueuePacer")), process_thread_pacer_(use_task_queue_pacer_ ? nullptr : new PacedSender(clock, &packet_router_, event_log, trials, process_thread_.get())), task_queue_pacer_(use_task_queue_pacer_ ? new TaskQueuePacedSender(clock, &packet_router_, event_log, trials, task_queue_factory)
其實就是一個 PacedSender 對象
11.
./modules/pacing/paced_sender.cc
void PacedSender::EnqueuePackets( std::vector<std::unique_ptr<RtpPacketToSend>> packets) { { rtc::CritScope cs(&critsect_); for (auto& packet : packets) { pacing_controller_.EnqueuePacket(std::move(packet)); } } MaybeWakupProcessThread(); }
12.
void PacedSender::MaybeWakupProcessThread() { // Tell the process thread to call our TimeUntilNextProcess() method to get // a new time for when to call Process(). if (process_thread_ && process_mode_ == PacingController::ProcessMode::kDynamic) { process_thread_->WakeUp(&module_proxy_); } }
13.
void PacedSender::Process() { rtc::CritScope cs(&critsect_); pacing_controller_.ProcessPackets(); }
pacing_controller_ 就是對象
PacingController pacing_controller_ RTC_GUARDED_BY(critsect_);
./modules/pacing/paced_sender.cc
PacedSender::PacedSender(Clock* clock, PacketRouter* packet_router, RtcEventLog* event_log, const WebRtcKeyValueConfig* field_trials, ProcessThread* process_thread) : process_mode_((field_trials != nullptr && field_trials->Lookup("WebRTC-Pacer-DynamicProcess") .find("Enabled") == 0) ? PacingController::ProcessMode::kDynamic : PacingController::ProcessMode::kPeriodic), pacing_controller_(clock, static_cast<PacingController::PacketSender*>(this), event_log, field_trials, process_mode_), clock_(clock), packet_router_(packet_router), process_thread_(process_thread) { if (process_thread_) process_thread_->RegisterModule(&module_proxy_, RTC_FROM_HERE); }
14.
./modules/pacing/pacing_controller.cc
void PacingController::ProcessPackets() packet_sender_->SendRtpPacket(std::move(rtp_packet), pacing_info);
packet_sender_ 是初始化過程當中傳遞過來的
PacingController::PacingController(Clock* clock,
PacketSender* packet_sender,
RtcEventLog* event_log,
const WebRtcKeyValueConfig* field_trials,
ProcessMode mode)
: mode_(mode),
clock_(clock),
packet_sender_(packet_sender)
15.
./modules/pacing/paced_sender.cc
void PacedSender::SendRtpPacket(std::unique_ptr<RtpPacketToSend> packet, const PacedPacketInfo& cluster_info) { critsect_.Leave(); packet_router_->SendPacket(std::move(packet), cluster_info); critsect_.Enter(); }
packet_router_ 是初始化傳遞過來的
PacedSender::PacedSender(Clock* clock, PacketRouter* packet_router, RtcEventLog* event_log, const WebRtcKeyValueConfig* field_trials, ProcessThread* process_thread) : process_mode_((field_trials != nullptr && field_trials->Lookup("WebRTC-Pacer-DynamicProcess") .find("Enabled") == 0) ? PacingController::ProcessMode::kDynamic : PacingController::ProcessMode::kPeriodic), pacing_controller_(clock, static_cast<PacingController::PacketSender*>(this), event_log, field_trials, process_mode_), clock_(clock), packet_router_(packet_router)
就是 RtpTransportControllerSend 的 packet_router_
PacketRouter packet_router_;
16.
./modules/pacing/packet_router.cc
void PacketRouter::SendPacket(std::unique_ptr<RtpPacketToSend> packet, const PacedPacketInfo& cluster_info) { rtc::CritScope cs(&modules_crit_); // With the new pacer code path, transport sequence numbers are only set here, // on the pacer thread. Therefore we don't need atomics/synchronization. if (packet->IsExtensionReserved<TransportSequenceNumber>()) { packet->SetExtension<TransportSequenceNumber>((++transport_seq_) & 0xFFFF); } uint32_t ssrc = packet->Ssrc(); auto kv = send_modules_map_.find(ssrc); if (kv == send_modules_map_.end()) { RTC_LOG(LS_WARNING) << "Failed to send packet, matching RTP module not found " "or transport error. SSRC = " << packet->Ssrc() << ", sequence number " << packet->SequenceNumber(); return; } RtpRtcp* rtp_module = kv->second.first; if (!rtp_module->TrySendPacket(packet.get(), cluster_info)) { RTC_LOG(LS_WARNING) << "Failed to send packet, rejected by RTP module."; return; } if (rtp_module->SupportsRtxPayloadPadding()) { // This is now the last module to send media, and has the desired // properties needed for payload based padding. Cache it for later use. last_send_module_ = rtp_module; } }
send_modules_map_ 則是這麼設置過來的
./call/rtp_video_sender.cc
RtpVideoSender::RtpVideoSender( Clock* clock, std::map<uint32_t, RtpState> suspended_ssrcs, const std::map<uint32_t, RtpPayloadState>& states, const RtpConfig& rtp_config, int rtcp_report_interval_ms, Transport* send_transport, const RtpSenderObservers& observers, RtpTransportControllerSendInterface* transport, RtcEventLog* event_log, RateLimiter* retransmission_limiter, std::unique_ptr<FecController> fec_controller, FrameEncryptorInterface* frame_encryptor, const CryptoOptions& crypto_options) : send_side_bwe_with_overhead_( webrtc::field_trial::IsEnabled("WebRTC-SendSideBwe-WithOverhead")), account_for_packetization_overhead_(!webrtc::field_trial::IsDisabled( "WebRTC-SubtractPacketizationOverhead")), use_early_loss_detection_( !webrtc::field_trial::IsDisabled("WebRTC-UseEarlyLossDetection")), active_(false), module_process_thread_(nullptr), suspended_ssrcs_(std::move(suspended_ssrcs)), flexfec_sender_( MaybeCreateFlexfecSender(clock, rtp_config, suspended_ssrcs_)), fec_controller_(std::move(fec_controller)), fec_allowed_(true), rtp_streams_(CreateRtpStreamSenders(clock, rtp_config, observers, rtcp_report_interval_ms, send_transport, transport->GetBandwidthObserver(), transport, flexfec_sender_.get(), event_log, retransmission_limiter, this, frame_encryptor, crypto_options)) for (const RtpStreamSender& stream : rtp_streams_) { constexpr bool remb_candidate = true; transport->packet_router()->AddSendRtpModule(stream.rtp_rtcp.get(), remb_candidate); } std::vector<RtpStreamSender> CreateRtpStreamSenders( Clock* clock, const RtpConfig& rtp_config, const RtpSenderObservers& observers, int rtcp_report_interval_ms, Transport* send_transport, RtcpBandwidthObserver* bandwidth_callback, RtpTransportControllerSendInterface* transport, FlexfecSender* flexfec_sender, RtcEventLog* event_log, RateLimiter* retransmission_rate_limiter, OverheadObserver* overhead_observer, FrameEncryptorInterface* frame_encryptor, const CryptoOptions& crypto_options) rtp_streams.emplace_back(std::move(playout_delay_oracle), std::move(rtp_rtcp), std::move(sender_video)); return rtp_streams;
其實就是 RtpRtcp::Create 的對象 ModuleRtpRtcpImpl
17.
./modules/rtp_rtcp/source/rtp_rtcp_impl.cc
bool ModuleRtpRtcpImpl::TrySendPacket(RtpPacketToSend* packet, const PacedPacketInfo& pacing_info) { RTC_DCHECK(rtp_sender_); // TODO(sprang): Consider if we can remove this check. if (!rtp_sender_->packet_generator.SendingMedia()) { return false; } rtp_sender_->packet_sender.SendPacket(packet, pacing_info); return true; }
就是 struct RtpSenderContext 裏 RtpSenderEgress packet_sender;
18.
./modules/rtp_rtcp/source/rtp_sender_egress.cc
void RtpSenderEgress::SendPacket(RtpPacketToSend* packet, const PacedPacketInfo& pacing_info) const bool send_success = SendPacketToNetwork(*packet, options, pacing_info);
19.
bool RtpSenderEgress::SendPacketToNetwork(const RtpPacketToSend& packet, const PacketOptions& options, const PacedPacketInfo& pacing_info) { int bytes_sent = -1; if (transport_) { UpdateRtpOverhead(packet); bytes_sent = transport_->SendRtp(packet.data(), packet.size(), options) ? static_cast<int>(packet.size()) : -1; if (event_log_ && bytes_sent > 0) { event_log_->Log(std::make_unique<RtcEventRtpPacketOutgoing>( packet, pacing_info.probe_cluster_id)); } } if (bytes_sent <= 0) { RTC_LOG(LS_WARNING) << "Transport failed to send packet."; return false; } return true; }
而 transport_ 是構造時傳遞過來的
RtpSenderEgress::RtpSenderEgress(const RtpRtcp::Configuration& config, RtpPacketHistory* packet_history) : ssrc_(config.local_media_ssrc), rtx_ssrc_(config.rtx_send_ssrc), flexfec_ssrc_(config.flexfec_sender ? absl::make_optional(config.flexfec_sender->ssrc()) : absl::nullopt), populate_network2_timestamp_(config.populate_network2_timestamp), send_side_bwe_with_overhead_( IsEnabled("WebRTC-SendSideBwe-WithOverhead", config.field_trials)), clock_(config.clock), packet_history_(packet_history), transport_(config.outgoing_transport),
其實就是函數 std::vector<RtpStreamSender> CreateRtpStreamSenders
RtpRtcp::Configuration configuration;
configuration.outgoing_transport = send_transport;
繼續追蹤 就是這個 VideoSendStreamImpl 裏的 config_->send_transport
rtp_video_sender_(transport_->CreateRtpVideoSender(
suspended_ssrcs,
suspended_payload_states,
config_->rtp,
onfig_->rtcp_report_interval_ms,
config_->send_transport,
VideoSendStream::VideoSendStream
webrtc::VideoSendStream* Call::CreateVideoSendStream
WebRtcVideoChannel::WebRtcVideoSendStream::RecreateWebRtcStream
webrtc::VideoSendStream::Config config = parameters_.config.Copy();
WebRtcVideoChannel::WebRtcVideoSendStream::WebRtcVideoSendStream(
parameters_(std::move(config), options, max_bitrate_bps, codec_settings)
VideoSendStreamParameters parameters_
bool WebRtcVideoChannel::AddSendStream(const StreamParams& sp) { RTC_DCHECK_RUN_ON(&thread_checker_); RTC_LOG(LS_INFO) << "AddSendStream: " << sp.ToString(); if (!ValidateStreamParams(sp)) return false; if (!ValidateSendSsrcAvailability(sp)) return false; for (uint32_t used_ssrc : sp.ssrcs) send_ssrcs_.insert(used_ssrc); webrtc::VideoSendStream::Config config(this); VideoSendStream::Config::Config(Transport* send_transport)
其實 send_transport 就是 WebRtcVideoChannel 對象
在這個流程以前都是 parcer 和 fec , RTP 打包的過程,這以後基本上就是發送的流程
20.
./media/engine/webrtc_video_engine.cc
bool WebRtcVideoChannel::SendRtp(const uint8_t* data, size_t len, const webrtc::PacketOptions& options) { rtc::CopyOnWriteBuffer packet(data, len, kMaxRtpPacketLen); rtc::PacketOptions rtc_options; rtc_options.packet_id = options.packet_id; if (DscpEnabled()) { rtc_options.dscp = PreferredDscp(); } rtc_options.info_signaled_after_sent.included_in_feedback = options.included_in_feedback; rtc_options.info_signaled_after_sent.included_in_allocation = options.included_in_allocation; return MediaChannel::SendPacket(&packet, rtc_options); }
21.
./media/base/media_channel.h
bool SendPacket(rtc::CopyOnWriteBuffer* packet, const rtc::PacketOptions& options) { return DoSendPacket(packet, false, options); }
22../media/base/media_channel.h
bool DoSendPacket(rtc::CopyOnWriteBuffer* packet, bool rtcp, const rtc::PacketOptions& options) { rtc::CritScope cs(&network_interface_crit_); if (!network_interface_) return false; return (!rtcp) ? network_interface_->SendPacket(packet, options) : network_interface_->SendRtcp(packet, options); }
network_interface_ 是經過 void MediaChannel::SetInterface 設置過來的./pc/channel.cc
void BaseChannel::Init_w( webrtc::RtpTransportInternal* rtp_transport, const webrtc::MediaTransportConfig& media_transport_config) { RTC_DCHECK_RUN_ON(worker_thread_); media_transport_config_ = media_transport_config; network_thread_->Invoke<void>( RTC_FROM_HERE, [this, rtp_transport] { SetRtpTransport(rtp_transport); }); // Both RTP and RTCP channels should be set, we can call SetInterface on // the media channel and it can set network options. media_channel_->SetInterface(this, media_transport_config); }
23.
其實就是 VideoChannel
bool BaseChannel::SendPacket(rtc::CopyOnWriteBuffer* packet, const rtc::PacketOptions& options) { return SendPacket(false, packet, options); }
bool BaseChannel::SendPacket(bool rtcp, rtc::CopyOnWriteBuffer* packet, const rtc::PacketOptions& options) return rtcp ? rtp_transport_->SendRtcpPacket(packet, options, PF_SRTP_BYPASS) : rtp_transport_->SendRtpPacket(packet, options, PF_SRTP_BYPASS);
rtp_transport_ 是經過接口
bool BaseChannel::SetRtpTransport(webrtc::RtpTransportInternal* rtp_transport)
./pc/peer_connection.cc
cricket::VideoChannel* PeerConnection::CreateVideoChannel(const std::string& mid) RtpTransportInternal* rtp_transport = GetRtpTransport(mid); video_channel->SetRtpTransport(rtp_transport);
./pc/peer_connection.h
RtpTransportInternal* GetRtpTransport(const std::string& mid) RTC_RUN_ON(signaling_thread()) { auto rtp_transport = transport_controller_->GetRtpTransport(mid); RTC_DCHECK(rtp_transport); return rtp_transport; }
bool PeerConnection::Initialize transport_controller_.reset(new JsepTransportController( signaling_thread(), network_thread(), port_allocator_.get(), async_resolver_factory_.get(), config));
./pc/jsep_transport_controller.cc
RtpTransportInternal* JsepTransportController::GetRtpTransport( const std::string& mid) const { auto jsep_transport = GetJsepTransportForMid(mid); if (!jsep_transport) { return nullptr; } return jsep_transport->rtp_transport(); }
./pc/jsep_transport_controller.cc
const cricket::JsepTransport* JsepTransportController::GetJsepTransportForMid( const std::string& mid) const { auto it = mid_to_transport_.find(mid); return it == mid_to_transport_.end() ? nullptr : it->second; }
mid_to_transport_
bool JsepTransportController::SetTransportForMid
RTCError JsepTransportController::MaybeCreateJsepTransport( bool local, const cricket::ContentInfo& content_info, const cricket::SessionDescription& description) rtc::scoped_refptr<webrtc::IceTransportInterface> ice = CreateIceTransport(content_info.name, /*rtcp=*/false); std::unique_ptr<cricket::DtlsTransportInternal> rtp_dtls_transport = CreateDtlsTransport(content_info, ice->internal(), nullptr); //--------------- 下面流程是 CreateIceTransport begin ----------------------- rtc::scoped_refptr<webrtc::IceTransportInterface> JsepTransportController::CreateIceTransport(const std::string& transport_name, bool rtcp) { int component = rtcp ? cricket::ICE_CANDIDATE_COMPONENT_RTCP : cricket::ICE_CANDIDATE_COMPONENT_RTP; IceTransportInit init; init.set_port_allocator(port_allocator_); init.set_async_resolver_factory(async_resolver_factory_); init.set_event_log(config_.event_log); return config_.ice_transport_factory->CreateIceTransport( transport_name, component, std::move(init)); } ./api/ice_transport_factory.cc rtc::scoped_refptr<IceTransportInterface> CreateIceTransport(cricket::PortAllocator* port_allocator) { IceTransportInit init; init.set_port_allocator(port_allocator); return CreateIceTransport(std::move(init)); } rtc::scoped_refptr<IceTransportInterface> CreateIceTransport(IceTransportInit init) { return new rtc::RefCountedObject<IceTransportWithTransportChannel>( std::make_unique<cricket::P2PTransportChannel>( "", 0, init.port_allocator(), init.async_resolver_factory(), init.event_log())); } //------------------------ CreateIceTransport end -------------------------------
// 其實就是 DtlsTransport 對象
std::unique_ptr<cricket::DtlsTransportInternal> JsepTransportController::CreateDtlsTransport( const cricket::ContentInfo& content_info, cricket::IceTransportInternal* ice, DatagramTransportInterface* datagram_transport) { RTC_DCHECK(network_thread_->IsCurrent()); std::unique_ptr<cricket::DtlsTransportInternal> dtls; if (datagram_transport) { RTC_DCHECK(config_.use_datagram_transport || config_.use_datagram_transport_for_data_channels); } else if (config_.dtls_transport_factory) { dtls = config_.dtls_transport_factory->CreateDtlsTransport(ice, config_.crypto_options); } else { // 執行的是這個 DtlsTransport dtls = std::make_unique<cricket::DtlsTransport>(ice, config_.crypto_options, config_.event_log); }
/-------------------------------------------------------------------
ice 就是底層的傳輸對象
/-------------------------------------------------------------------
dtls_srtp_transport = CreateDtlsSrtpTransport( content_info.name, rtp_dtls_transport.get(), rtcp_dtls_transport.get()); std::unique_ptr<webrtc::DtlsSrtpTransport> JsepTransportController::CreateDtlsSrtpTransport( const std::string& transport_name, cricket::DtlsTransportInternal* rtp_dtls_transport, cricket::DtlsTransportInternal* rtcp_dtls_transport) { RTC_DCHECK(network_thread_->IsCurrent()); auto dtls_srtp_transport = std::make_unique<webrtc::DtlsSrtpTransport>(rtcp_dtls_transport == nullptr); if (config_.enable_external_auth) { dtls_srtp_transport->EnableExternalAuth(); } dtls_srtp_transport->SetDtlsTransports(rtp_dtls_transport, rtcp_dtls_transport); dtls_srtp_transport->SetActiveResetSrtpParams(config_.active_reset_srtp_params); dtls_srtp_transport->SignalDtlsStateChange.connect(this, &JsepTransportController::UpdateAggregateStates_n); return dtls_srtp_transport; }
咱們看看 JsepTransport 建立過程
std::unique_ptr<cricket::JsepTransport> jsep_transport = std::make_unique<cricket::JsepTransport>( content_info.name, certificate_, std::move(ice), std::move(rtcp_ice), std::move(unencrypted_rtp_transport), std::move(sdes_transport), std::move(dtls_srtp_transport), std::move(datagram_rtp_transport), std::move(rtp_dtls_transport), std::move(rtcp_dtls_transport), std::move(sctp_transport), std::move(datagram_transport), data_channel_transport);
JsepTransport 初始化過程
./pc/jsep_transport.cc
JsepTransport::JsepTransport( const std::string& mid, const rtc::scoped_refptr<rtc::RTCCertificate>& local_certificate, rtc::scoped_refptr<webrtc::IceTransportInterface> ice_transport, rtc::scoped_refptr<webrtc::IceTransportInterface> rtcp_ice_transport, std::unique_ptr<webrtc::RtpTransport> unencrypted_rtp_transport, std::unique_ptr<webrtc::SrtpTransport> sdes_transport, std::unique_ptr<webrtc::DtlsSrtpTransport> dtls_srtp_transport, std::unique_ptr<webrtc::RtpTransportInternal> datagram_rtp_transport, std::unique_ptr<DtlsTransportInternal> rtp_dtls_transport, std::unique_ptr<DtlsTransportInternal> rtcp_dtls_transport, std::unique_ptr<SctpTransportInternal> sctp_transport, std::unique_ptr<webrtc::DatagramTransportInterface> datagram_transport, webrtc::DataChannelTransportInterface* data_channel_transport)
jsep_transport->rtp_transport() 則是 JsepTransport
webrtc::RtpTransportInternal* JsepTransport::rtp_transport() const { rtc::CritScope scope(&accessor_lock_); if (composite_rtp_transport_) { return composite_rtp_transport_.get(); } else if (datagram_rtp_transport_) { return datagram_rtp_transport_.get(); } else { // 就是這個 return default_rtp_transport(); } }
// Returns the default (non-datagram) rtp transport, if any.
webrtc::RtpTransportInternal* JsepTransport::default_rtp_transport() const RTC_EXCLUSIVE_LOCKS_REQUIRED(accessor_lock_) { if (dtls_srtp_transport_) { // 就是這個 return dtls_srtp_transport_.get(); } else if (sdes_transport_) { return sdes_transport_.get(); } else if (unencrypted_rtp_transport_) { return unencrypted_rtp_transport_.get(); } else { return nullptr; } }
24.
./pc/dtls_srtp_transport.cc
./pc/srtp_transport.cc
調用 DtlsSrtpTransport 的基類的函數
bool SrtpTransport::SendRtpPacket(rtc::CopyOnWriteBuffer* packet, const rtc::PacketOptions& options, int flags) return SendPacket(/*rtcp=*/false, packet, updated_options, flags);
25.
./pc/rtp_transport.cc
調用 SrtpTransport 的基類的函數
bool RtpTransport::SendPacket(bool rtcp, rtc::CopyOnWriteBuffer* packet, const rtc::PacketOptions& options, int flags) { rtc::PacketTransportInternal* transport = rtcp && !rtcp_mux_enabled_ ? rtcp_packet_transport_ : rtp_packet_transport_; int ret = transport->SendPacket(packet->cdata<char>(), packet->size(), options, flags); if (ret != static_cast<int>(packet->size())) { if (transport->GetError() == ENOTCONN) { RTC_LOG(LS_WARNING) << "Got ENOTCONN from transport."; SetReadyToSend(rtcp, false); } return false; } return true; }
下面咱們分析一下 rtp_packet_transport_ 是怎麼過來的
上面咱們在調用過程當中,有這個函數
JsepTransportController::CreateDtlsSrtpTransport dtls_srtp_transport->SetDtlsTransports(rtp_dtls_transport, rtcp_dtls_transport);
./pc/dtls_srtp_transport.cc
void DtlsSrtpTransport::SetDtlsTransports( cricket::DtlsTransportInternal* rtp_dtls_transport, cricket::DtlsTransportInternal* rtcp_dtls_transport) SetRtpPacketTransport(rtp_dtls_transport); void RtpTransport::SetRtpPacketTransport( rtc::PacketTransportInternal* new_packet_transport) rtp_packet_transport_ = new_packet_transport;
其實就是 DtlsTransport
26.
./p2p/base/dtls_transport.cc
int DtlsTransport::SendPacket(const char* data, size_t size, const rtc::PacketOptions& options, int flags) if (!dtls_active_) { // Not doing DTLS. return ice_transport_->SendPacket(data, size, options); } switch (dtls_state()) { case DTLS_TRANSPORT_NEW: // Can't send data until the connection is active. // TODO(ekr@rtfm.com): assert here if dtls_ is NULL? return -1; case DTLS_TRANSPORT_CONNECTING: // Can't send data until the connection is active. return -1; case DTLS_TRANSPORT_CONNECTED: if (flags & PF_SRTP_BYPASS) { RTC_DCHECK(!srtp_ciphers_.empty()); if (!IsRtpPacket(data, size)) { return -1; } return ice_transport_->SendPacket(data, size, options); } else { return (dtls_->WriteAll(data, size, NULL, NULL) == rtc::SR_SUCCESS) ? static_cast<int>(size) : -1; } case DTLS_TRANSPORT_FAILED: case DTLS_TRANSPORT_CLOSED: // Can't send anything when we're closed. return -1; default: RTC_NOTREACHED(); return -1; } }
ice_transport_ 是初始化過來的對象
DtlsTransport::DtlsTransport(IceTransportInternal* ice_transport, const webrtc::CryptoOptions& crypto_options, webrtc::RtcEventLog* event_log) : transport_name_(ice_transport->transport_name()), component_(ice_transport->component()), ice_transport_(ice_transport), downward_(NULL), srtp_ciphers_(crypto_options.GetSupportedDtlsSrtpCryptoSuites()), ssl_max_version_(rtc::SSL_PROTOCOL_DTLS_12), crypto_options_(crypto_options), event_log_(event_log) { RTC_DCHECK(ice_transport_); ConnectToIceTransport(); }
參看上面的流程 ice_transport_ 就是這個對象 P2PTransportChannel\27.
./p2p/base/p2p_transport_channel.cc
int P2PTransportChannel::SendPacket(const char* data, size_t len, const rtc::PacketOptions& options, int flags) { RTC_DCHECK_RUN_ON(network_thread_); if (flags != 0) { error_ = EINVAL; return -1; } // If we don't think the connection is working yet, return ENOTCONN // instead of sending a packet that will probably be dropped. if (!ReadyToSend(selected_connection_)) { error_ = ENOTCONN; return -1; } last_sent_packet_id_ = options.packet_id; rtc::PacketOptions modified_options(options); modified_options.info_signaled_after_sent.packet_type = rtc::PacketType::kData; int sent = selected_connection_->Send(data, len, modified_options); if (sent <= 0) { RTC_DCHECK(sent < 0); error_ = selected_connection_->GetError(); } return sent; }
底層的 socket 數據會最終調用
P2PTransportChannel::OnReadPacket MaybeSwitchSelectedConnection(connection, IceControllerEvent::DATA_RECEIVED); bool P2PTransportChannel::MaybeSwitchSelectedConnection(IceControllerEvent reason, IceControllerInterface::SwitchResult result) SwitchSelectedConnection(const_cast<Connection*>(*result.connection), reason); void P2PTransportChannel::SwitchSelectedConnection(Connection* conn, IceControllerEvent reason) selected_connection_ = conn;
// 其實就是這個地方產生的 Connection
bool P2PTransportChannel::CreateConnections(const Candidate& remote_candidate, PortInterface* origin_port) Connection* connection = port->CreateConnection(remote_candidate, origin);
./p2p/base/turn_port.cc
Connection* TurnPort::CreateConnection(const Candidate& remote_candidate, CandidateOrigin origin) // 這個裏面會建立 TurnEntry 下面的流程 29 會用到 if (CreateOrRefreshEntry(remote_candidate.address(), next_channel_number_, remote_candidate.username())) { // An entry was created. next_channel_number_++; } // 這個裏面會建立 ProxyConnection 下面的流程 28 會用到 ProxyConnection* conn = new ProxyConnection(this, index, remote_candidate); AddOrReplaceConnection(conn);
28.
./p2p/base/connection.cc
int ProxyConnection::Send(const void* data, size_t size, const rtc::PacketOptions& options) port_->SendTo(data, size, remote_candidate_.address(), options, true);
port_ 的是構造函數時,傳遞過來的,具體參見上面的 TurnPort::CreateConnection
// ProxyConnection 構造函數 ProxyConnection::ProxyConnection(Port* port, size_t index, const Candidate& remote_candidate) : Connection(port, index, remote_candidate) {} // Connection 構造函數 Connection::Connection(Port* port, size_t index, const Candidate& remote_candidate) : id_(rtc::CreateRandomId()), port_(port)
29.
./p2p/base/turn_port.cc
int TurnPort::SendTo(const void* data, size_t size, const rtc::SocketAddress& addr, const rtc::PacketOptions& options, bool payload) TurnEntry* entry = FindEntry(addr); int sent = entry->Send(data, size, payload, modified_options);
其實就是 entries_ 裏的其中一個值,這個的建立,參見上面的 TurnPort::CreateConnection 過程以下:
bool TurnPort::CreateOrRefreshEntry(const rtc::SocketAddress& addr, int channel_number, const std::string& remote_ufrag) entry = new TurnEntry(this, channel_number, addr, remote_ufrag); entries_.push_back(entry); return true;
30.
./p2p/base/turn_port.cc
int TurnEntry::Send(const void* data, size_t size, bool payload, const rtc::PacketOptions& options) // 這個 port_ 其實就是 TurnPort return port_->Send(buf.Data(), buf.Length(), modified_options);
31.
./p2p/base/turn_port.cc
int TurnPort::Send(const void* data, size_t len, const rtc::PacketOptions& options) { return socket_->SendTo(data, len, server_address_.address, options); }
socket_ 是怎麼建立的,這個仍是比較複雜的具體參考博文 https://blog.csdn.net/freeabc/article/details/105659223
<<WebRtc 的初始化部分流程分析,包括 PeerConnection 對象的建立,採集與編碼器的關聯,以及怎麼發送 sdp 到對方>>
./p2p/client/basic_port_allocator.cc
void AllocationSequence::OnMessage(rtc::Message* msg) CreateRelayPorts(); void AllocationSequence::CreateRelayPorts() CreateTurnPort(relay); void AllocationSequence::CreateTurnPort(const RelayServerConfig& config) CreateRelayPortArgs args; args.network_thread = session_->network_thread(); args.socket_factory = session_->socket_factory(); args.network = network_; args.username = session_->username(); args.password = session_->password(); args.server_address = &(*relay_port); args.config = &config; args.origin = session_->allocator()->origin(); args.turn_customizer = session_->allocator()->turn_customizer(); // session_ 就是 PortAllocatorSession // allocator 就是 BasicPortAllocator // relay_port_factory 分析參見 31.1 port = session_->allocator()->relay_port_factory()->Create( args, session_->allocator()->min_port(), session_->allocator()->max_port());
// 31.1 allocator 的產生過程以下,其實就是 TurnPortFactory
31.1.1 ./pc/peer_connection_factory.cpp PeerConnectionFactory::CreatePeerConnection dependencies.allocator = std::make_unique<cricket::BasicPortAllocator>(default_network_manager_.get(), packet_socket_factory,configuration.turn_customizer); 31.1.2 ./p2p/client/basic_port_allocator.cc BasicPortAllocator::BasicPortAllocator( rtc::NetworkManager* network_manager, rtc::PacketSocketFactory* socket_factory, webrtc::TurnCustomizer* customizer, RelayPortFactoryInterface* relay_port_factory) : network_manager_(network_manager), socket_factory_(socket_factory) InitRelayPortFactory(relay_port_factory); 31.1.3 ./p2p/client/basic_port_allocator.cc void BasicPortAllocator::InitRelayPortFactory( RelayPortFactoryInterface* relay_port_factory) { if (relay_port_factory != nullptr) { relay_port_factory_ = relay_port_factory; } else { default_relay_port_factory_.reset(new TurnPortFactory()); relay_port_factory_ = default_relay_port_factory_.get(); } } }
// 31.2 TurnPort 的產生過程
./p2p/client/turn_port_factory.cc std::unique_ptr<Port> TurnPortFactory::Create(const CreateRelayPortArgs& args, int min_port, int max_port) { auto port = TurnPort::CreateUnique( args.network_thread, args.socket_factory, args.network, min_port, max_port, args.username, args.password, *args.server_address, args.config->credentials, args.config->priority, args.origin, args.config->tls_alpn_protocols, args.config->tls_elliptic_curves, args.turn_customizer, args.config->tls_cert_verifier); port->SetTlsCertPolicy(args.config->tls_cert_policy); port->SetTurnLoggingId(args.config->turn_logging_id); return std::move(port); } ./p2p/base/turn_port.h static std::unique_ptr<TurnPort> Create( rtc::Thread* thread, rtc::PacketSocketFactory* factory, rtc::Network* network, uint16_t min_port, uint16_t max_port, const std::string& username, // ice username. const std::string& password, // ice password. const ProtocolAddress& server_address, const RelayCredentials& credentials, int server_priority, const std::string& origin, const std::vector<std::string>& tls_alpn_protocols, const std::vector<std::string>& tls_elliptic_curves, webrtc::TurnCustomizer* customizer, rtc::SSLCertificateVerifier* tls_cert_verifier = nullptr) { // Using `new` to access a non-public constructor. return absl::WrapUnique( new TurnPort(thread, factory, network, min_port, max_port, username, password, server_address, credentials, server_priority, origin, tls_alpn_protocols, tls_elliptic_curves, customizer, tls_cert_verifier)); }
到這裏 TurnPort 就構建完畢,在程序的運行過程當中,下面接口會被調用,並建立 TurnPort 的 socket_
參見博客 https://blog.csdn.net/freeabc/article/details/106000923 流程 7 的過程,7.6 章節
void TurnPort::PrepareAddress() if (!CreateTurnClientSocket()) { RTC_LOG(LS_ERROR) << "Failed to create TURN client socket"; OnAllocateError(STUN_ERROR_GLOBAL_FAILURE, "Failed to create TURN client socket."); return; } bool TurnPort::CreateTurnClientSocket() socket_ = socket_factory()->CreateUdpSocket( rtc::SocketAddress(Network()->GetBestIP(), 0), min_port(), max_port());
其實就是一個 AsyncUDPSocket 參見博文 https://blog.csdn.net/freeabc/article/details/106000923
AsyncPacketSocket* BasicPacketSocketFactory::CreateUdpSocket
32.
./rtc_base/async_udp_socket.cc
int AsyncUDPSocket::Send(const void* pv, size_t cb, const rtc::PacketOptions& options) { rtc::SentPacket sent_packet(options.packet_id, rtc::TimeMillis(), options.info_signaled_after_sent); CopySocketInformationToPacketInfo(cb, *this, false, &sent_packet.info); int ret = socket_->Send(pv, cb); SignalSentPacket(this, sent_packet); return ret; }
socket_ 這個是 AsyncUDPSocket 初始化過來的, 根據博客 https://blog.csdn.net/freeabc/article/details/106000923
AsyncPacketSocket* BasicPacketSocketFactory::CreateUdpSocket
AsyncSocket* socket = socket_factory()->CreateAsyncSocket(address.family(), SOCK_DGRAM);
就是 SocketDispatcher 對象
33.
./rtc_base/physical_socket_server.cc
調用 SocketDispatcher 的基類函數
int PhysicalSocket::Send(const void* pv, size_t cb) int sent = DoSend( s_, reinterpret_cast<const char*>(pv), static_cast<int>(cb), #if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID) // Suppress SIGPIPE. Without this, attempting to send on a socket whose // other end is closed will result in a SIGPIPE signal being raised to // our process, which by default will terminate the process, which we // don't want. By specifying this flag, we'll just get the error EPIPE // instead and can handle the error gracefully. MSG_NOSIGNAL #else 0 #endif );
34. 到這裏就完全發送出去了,調用了系統的 send 函數。。。。。。
int PhysicalSocket::DoSend(SOCKET socket, const char* buf, int len, int flags) { return ::send(socket, buf, len, flags); }
//----------------------------------------------------------------------------------------------
//
// 視頻編碼器的速率調整
//
//----------------------------------------------------------------------------------------------
1. 把 VideoSendStreamImpl 註冊到 Call 對象的 bitrate_allocator_ 裏
bitrate_allocator_ 就是 Call::Call 裏的 bitrate_allocator_(new BitrateAllocator(this))
VideoSendStreamImpl::StartupVideoSendStream()
bitrate_allocator_->AddObserver(this, GetAllocationConfig());
2. 這樣 bitrate_allocator_ 有更新就會調用 下面的函數
這個函數內更改編碼速率
VideoSendStreamImpl::OnBitrateUpdated(BitrateAllocationUpdate update) video_stream_encoder_->OnBitrateUpdated( encoder_target_rate, encoder_stable_target_rate, link_allocation, rtc::dchecked_cast<uint8_t>(update.packet_loss_ratio * 256), update.round_trip_time.ms()); stats_proxy_->OnSetEncoderTargetRate(encoder_target_rate_bps_)
3. bitrate_allocator_ 的更改,來源與 下面流程
首先 Call 對象註冊到 RtpTransportControllerSend
void Call::RegisterRateObserver() transport_send_ptr_->RegisterTargetTransferRateObserver(this);
4. 若是 RtpTransportControllerSend
./call/rtp_transport_controller_send.cc
RtpTransportControllerSend::UpdateControlState() --->
有傳輸速率更新則會調用
void Call::OnTargetTransferRate(TargetTransferRate msg)
bitrate_allocator_->OnNetworkEstimateChanged(msg);
5.
BitrateAllocator::OnNetworkEstimateChanged(TargetTransferRate msg) uint32_t protection_bitrate = config.observer->OnBitrateUpdated(update);
這個地方就是改變編碼器的速率的地方
6. 步驟 2 裏的
來自 VideoSendStream::VideoSendStream 裏的
video_stream_encoder_ = CreateVideoStreamEncoder(clock, task_queue_factory, num_cpu_cores, &stats_proxy_, config_.encoder_settings);
就是 VideoStreamEncoder 對象
./api/video/video_stream_encoder_create.cc
7. 步驟 2 的調用 VideoStreamEncoder::OnBitrateUpdated
if (encoder_) { encoder_->OnPacketLossRateUpdate(static_cast<float>(fraction_lost) / 256.f); encoder_->OnRttUpdate(round_trip_time_ms); } SetEncoderRates(UpdateBitrateAllocationAndNotifyObserver(new_rate_settings));
VideoStreamEncoder::ReconfigureEncoder() 裏建立了 encoder_
這個 encoder_ 目前就是 VideoEncoderWrapper,這個是上層的一個硬編碼代理,並無實現
接口
// Inform the encoder when the packet loss rate changes.
//
// Input: - packet_loss_rate : The packet loss rate (0.0 to 1.0).
virtual void OnPacketLossRateUpdate(float packet_loss_rate);
// Inform the encoder when the round trip time changes.
//
// Input: - rtt_ms : The new RTT, in milliseconds.
virtual void OnRttUpdate(int64_t rtt_ms);
8.
void VideoStreamEncoder::SetEncoderRates( const EncoderRateSettings& rate_settings) encoder_->SetRates(rate_settings.rate_control);
9.
void VideoEncoderWrapper::SetRates(const RateControlParameters& parameters) ScopedJavaLocalRef<jobject> j_bitrate_allocation = ToJavaBitrateAllocation(jni, parameters.bitrate); ScopedJavaLocalRef<jobject> ret = Java_VideoEncoder_setRateAllocation(jni, encoder_, j_bitrate_allocation, (jint)(parameters.framerate_fps + 0.5)); HandleReturnCode(jni, ret, "setRateAllocation");
10. Java
// 到這裏就進行了速率調整。。。。。
public VideoCodecStatus HardwareVideoEncoder::setRateAllocation(BitrateAllocation bitrateAllocation, int framerate) { encodeThreadChecker.checkIsOnValidThread(); if (framerate > MAX_VIDEO_FRAMERATE) { framerate = MAX_VIDEO_FRAMERATE; } bitrateAdjuster.setTargets(bitrateAllocation.getSum(), framerate); return VideoCodecStatus.OK; }
//----------------------------------------------------------------------------------------------
//
// WebRtcVideoChannel 對象 AddSendStream (WebRtcVideoSendStream send_streams_) 的過程
//
//----------------------------------------------------------------------------------------------
1. Java
CallActivity::onConnectedToRoomInternal peerConnectionClient.createOffer()
2. Java
public void PeerConnectionClient::createOffer() { executor.execute(() -> { if (peerConnection != null && !isError) { Log.d(TAG, "PC Create OFFER"); isInitiator = true; peerConnection.createOffer(sdpObserver, sdpMediaConstraints); } }); }
3. Java
public void PeerConnection::createOffer(SdpObserver observer, MediaConstraints constraints) { nativeCreateOffer(observer, constraints); }
4. C++
void Java_org_webrtc_PeerConnection_nativeCreateOffer(JNIEnv* env, jobject jcaller, jobject observer, jobject constraints) { return JNI_PeerConnection_CreateOffer(env, base::android::JavaParamRef<jobject>(env, jcaller), base::android::JavaParamRef<jobject>(env, observer), base::android::JavaParamRef<jobject>(env, constraints)); }
5. ./sdk/android/src/jni/pc/peer_connection.cc
static void JNI_PeerConnection_CreateOffer(JNIEnv* jni, const JavaParamRef<jobject>& j_pc, const JavaParamRef<jobject>& j_observer, const JavaParamRef<jobject>& j_constraints) { std::unique_ptr<MediaConstraints> constraints = JavaToNativeMediaConstraints(jni, j_constraints); rtc::scoped_refptr<CreateSdpObserverJni> observer(new rtc::RefCountedObject<CreateSdpObserverJni>(jni, j_observer, std::move(constraints))); PeerConnectionInterface::RTCOfferAnswerOptions options; CopyConstraintsIntoOfferAnswerOptions(observer->constraints(), &options); ExtractNativePC(jni, j_pc)->CreateOffer(observer, options); }
6. ./pc/peer_connection.cc
void PeerConnection::CreateOffer(CreateSessionDescriptionObserver* observer, const RTCOfferAnswerOptions& options) this_weak_ptr->DoCreateOffer(options, observer_wrapper);
7. ./pc/peer_connection.cc
void PeerConnection::DoCreateOffer(const RTCOfferAnswerOptions& options, rtc::scoped_refptr<CreateSessionDescriptionObserver> observer) cricket::MediaSessionOptions session_options; GetOptionsForOffer(options, &session_options); webrtc_session_desc_factory_->CreateOffer(observer, options, session_options);
參見 PeerConnection::Initialize
webrtc_session_desc_factory_.reset(new WebRtcSessionDescriptionFactory)......
8. ./pc/webrtc_session_description_factory.cc
void WebRtcSessionDescriptionFactory::CreateOffer(CreateSessionDescriptionObserver* observer, const PeerConnectionInterface::RTCOfferAnswerOptions& options, const cricket::MediaSessionOptions& session_options) InternalCreateOffer(request);
9. ./pc/webrtc_session_description_factory.cc
void WebRtcSessionDescriptionFactory::InternalCreateOffer(CreateSessionDescriptionRequest request) std::unique_ptr<cricket::SessionDescription> desc = session_desc_factory_.CreateOffer( request.options, pc_->local_description() ? pc_->local_description()->description() : nullptr);
9.1. ./pc/media_session.cc
MediaSessionDescriptionFactory::CreateOffer
就是這個地方建立的 SDP ,也是很是重要的地方,這裏是異步調用,建立成功則,調用
auto offer = std::make_unique<JsepSessionDescription>(SdpType::kOffer, std::move(desc), session_id_, rtc::ToString(session_version_++)); CopyCandidatesFromSessionDescription(pc_->local_description(), options.mid, offer.get()); PostCreateSessionDescriptionSucceeded(request.observer, std::move(offer));
10. ./pc/webrtc_session_description_factory.cc
WebRtcSessionDescriptionFactory::OnMessage(rtc::Message* msg) 10.1. 失敗 param->observer->OnFailure(std::move(param->error)) 10.2. 成功 param->observer->OnSuccess(param->description.release());
11. Java
SDPObserver::onCreateSuccess(final SessionDescription origSdp) String sdpDescription = origSdp.description; if (preferIsac) { sdpDescription = preferCodec(sdpDescription, AUDIO_CODEC_ISAC, true); } if (isVideoCallEnabled()) { sdpDescription = preferCodec(sdpDescription, getSdpVideoCodecName(peerConnectionParameters), false); } final SessionDescription sdp = new SessionDescription(origSdp.type, sdpDescription); localSdp = sdp; executor.execute(() -> { if (peerConnection != null && !isError) { Log.d(TAG, "Set local SDP from " + sdp.type); peerConnection.setLocalDescription(sdpObserver, sdp); } });
參看流程 2 傳過來的 SDPObserver
12. Java
PeerConnection::setLocalDescription(SdpObserver observer, SessionDescription sdp) { nativeSetLocalDescription(observer, sdp); }
13. C++
Java_org_webrtc_PeerConnection_nativeSetLocalDescription
14. ./sdk/android/src/jni/pc/peer_connection.cc
JNI_PeerConnection_SetLocalDescription
15. ./pc/peer_connection.cc
PeerConnection::SetLocalDescription(SetSessionDescriptionObserver* observer, SessionDescriptionInterface* desc_ptr) this_weak_ptr->DoSetLocalDescription(std::move(desc), std::move(observer_refptr));
16.
void PeerConnection::DoSetLocalDescription(std::unique_ptr<SessionDescriptionInterface> desc, rtc::scoped_refptr<SetSessionDescriptionObserver> observer) error = ApplyLocalDescription(std::move(desc));
17.
PeerConnection::ApplyLocalDescription(std::unique_ptr<SessionDescriptionInterface> desc) error = UpdateSessionState(type, cricket::CS_LOCAL, local_description()->description());
18.
RTCError PeerConnection::UpdateSessionState(SdpType type, cricket::ContentSource source,
const cricket::SessionDescription* description)
error = PushdownMediaDescription(type, source);
19.
RTCError PeerConnection::PushdownMediaDescription(SdpType type, cricket::ContentSource source) for (const auto& transceiver : transceivers_) { const ContentInfo* content_info = FindMediaSectionForTransceiver(transceiver, sdesc); cricket::ChannelInterface* channel = transceiver->internal()->channel(); const MediaContentDescription* content_desc = content_info->media_description(); bool success = (source == cricket::CS_LOCAL) ? channel->SetLocalContent(content_desc, type, &error) : channel->SetRemoteContent(content_desc, type, &error);
RtpTransceiver 的由來,參考下面的分析,這是 PlanB 的流程
transceivers_ 在 PeerConnection::Initialize 的過程當中,進行初始化,就是 RtpTransceiver !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! bool PeerConnection::Initialize(const PeerConnectionInterface::RTCConfiguration& configuration, PeerConnectionDependencies dependencies) transceivers_.push_back(RtpTransceiverProxyWithInternal<RtpTransceiver>::Create(signaling_thread(), new RtpTransceiver(cricket::MEDIA_TYPE_AUDIO))); transceivers_.push_back(RtpTransceiverProxyWithInternal<RtpTransceiver>::Create(signaling_thread(), new RtpTransceiver(cricket::MEDIA_TYPE_VIDEO))); 在函數 CreateChannels 中爲 transceiver 設置 VideoChannel 對象 RTCError PeerConnection::CreateChannels(const SessionDescription& desc) cricket::VideoChannel* video_channel = CreateVideoChannel(video->name); if (!video_channel) { LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, "Failed to create video channel."); } GetVideoTransceiver()->internal()->SetChannel(video_channel);
20.
這是 VideoChannel 的父方法
bool BaseChannel::SetLocalContent(const MediaContentDescription* content, SdpType type, std::string* error_desc) { TRACE_EVENT0("webrtc", "BaseChannel::SetLocalContent"); return InvokeOnWorker<bool>(RTC_FROM_HERE, Bind(&BaseChannel::SetLocalContent_w, this, content, type, error_desc)); }
21.
VideoChannel::SetLocalContent_w(const MediaContentDescription* content, SdpType type, std::string* error_desc) const VideoContentDescription* video = content->as_video(); if (!UpdateLocalStreams_w(video->streams(), type, error_desc))
22.
這是 VideoChannel 的父方法
bool BaseChannel::UpdateLocalStreams_w(const std::vector<StreamParams>& streams, SdpType type, std::string* error_desc) if (media_channel()->AddSendStream(new_stream))
media_channel 就是 WebRtcVideoChannel 對象,具體參看下面 VideoChannel 的產生流程 22.1 PeerConnection 產生 VideoChannel 對象
cricket::VideoChannel* PeerConnection::CreateVideoChannel(const std::string& mid) { RtpTransportInternal* rtp_transport = GetRtpTransport(mid); MediaTransportConfig media_transport_config = transport_controller_->GetMediaTransportConfig(mid); cricket::VideoChannel* video_channel = channel_manager()->CreateVideoChannel( call_ptr_, configuration_.media_config, rtp_transport, media_transport_config, signaling_thread(), mid, SrtpRequired(), GetCryptoOptions(), &ssrc_generator_, video_options_, video_bitrate_allocator_factory_.get()); if (!video_channel) { return nullptr; } video_channel->SignalDtlsSrtpSetupFailure.connect(this, &PeerConnection::OnDtlsSrtpSetupFailure); video_channel->SignalSentPacket.connect(this, &PeerConnection::OnSentPacket_w); video_channel->SetRtpTransport(rtp_transport); return video_channel; }
22.2 ChannelManager 產生 VideoChannel 對象
VideoChannel* ChannelManager::CreateVideoChannel( webrtc::Call* call, const cricket::MediaConfig& media_config, webrtc::RtpTransportInternal* rtp_transport, const webrtc::MediaTransportConfig& media_transport_config, rtc::Thread* signaling_thread, const std::string& content_name, bool srtp_required, const webrtc::CryptoOptions& crypto_options, rtc::UniqueRandomIdGenerator* ssrc_generator, const VideoOptions& options, webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) { if (!worker_thread_->IsCurrent()) { return worker_thread_->Invoke<VideoChannel*>(RTC_FROM_HERE, [&] { return CreateVideoChannel(call, media_config, rtp_transport, media_transport_config, signaling_thread, content_name, srtp_required, crypto_options, ssrc_generator, options, video_bitrate_allocator_factory); }); } RTC_DCHECK_RUN_ON(worker_thread_); RTC_DCHECK(initialized_); RTC_DCHECK(call); if (!media_engine_) { return nullptr; } // 這個地方建立了 WebRtcVideoChannel 對象!!!!!!!!!!!!!!! VideoMediaChannel* media_channel = media_engine_->video().CreateMediaChannel(call, media_config, options, crypto_options, video_bitrate_allocator_factory); if (!media_channel) { return nullptr; } auto video_channel = std::make_unique<VideoChannel>(worker_thread_, network_thread_, signaling_thread, absl::WrapUnique(media_channel), content_name, srtp_required, crypto_options, ssrc_generator); video_channel->Init_w(rtp_transport, media_transport_config); VideoChannel* video_channel_ptr = video_channel.get(); video_channels_.push_back(std::move(video_channel)); return video_channel_ptr; }
22.3 media_engine_->video().CreateMediaChannel 函數實現 WebRtcVideoChannel!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
VideoMediaChannel* WebRtcVideoEngine::CreateMediaChannel(webrtc::Call* call, const MediaConfig& config, const VideoOptions& options, const webrtc::CryptoOptions& crypto_options,webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) { RTC_LOG(LS_INFO) << "CreateMediaChannel. Options: " << options.ToString(); return new WebRtcVideoChannel(call, config, options, crypto_options, encoder_factory_.get(), decoder_factory_.get(), video_bitrate_allocator_factory); }
23.
bool WebRtcVideoChannel::AddSendStream(const StreamParams& sp) { RTC_DCHECK_RUN_ON(&thread_checker_); RTC_LOG(LS_INFO) << "AddSendStream: " << sp.ToString(); if (!ValidateStreamParams(sp)) return false; if (!ValidateSendSsrcAvailability(sp)) return false; for (uint32_t used_ssrc : sp.ssrcs) send_ssrcs_.insert(used_ssrc); webrtc::VideoSendStream::Config config(this); for (const RidDescription& rid : sp.rids()) { config.rtp.rids.push_back(rid.rid); } config.suspend_below_min_bitrate = video_config_.suspend_below_min_bitrate; config.periodic_alr_bandwidth_probing = video_config_.periodic_alr_bandwidth_probing; config.encoder_settings.experiment_cpu_load_estimator = video_config_.experiment_cpu_load_estimator; config.encoder_settings.encoder_factory = encoder_factory_; config.encoder_settings.bitrate_allocator_factory = bitrate_allocator_factory_; config.encoder_settings.encoder_switch_request_callback = this; config.crypto_options = crypto_options_; config.rtp.extmap_allow_mixed = ExtmapAllowMixed(); config.rtcp_report_interval_ms = video_config_.rtcp_report_interval_ms; WebRtcVideoSendStream* stream = new WebRtcVideoSendStream( call_, sp, std::move(config), default_send_options_, video_config_.enable_cpu_adaptation, bitrate_config_.max_bitrate_bps, send_codec_, send_rtp_extensions_, send_params_); send_streams_[ssrc] = stream;
至此 WebRtcVideoChannel 中的 send_streams_ 對象產生, 就是 WebRtcVideoSendStream 流
24.
void WebRtcVideoChannel::WebRtcVideoSendStream::SetSend(bool send) { RTC_DCHECK_RUN_ON(&thread_checker_); sending_ = send; UpdateSendState(); }

