----------------------------------------------------------------------------------------------------------------------------------------
一分钟快速搭建 rtmpd 服务器: https://blog.csdn.net/freeabc/article/details/102880984
软件下载地址: http://www.qiyicc.com/download/rtmpd.rar
github 地址:https://github.com/superconvert/smart_rtmpd
-----------------------------------------------------------------------------------------------------------------------------------------
WebRTC 接收到 offer 指令后流程分析与 jitterbuffer 数据到解码器的流程分析
/false, type, description);
./pc/jsep_transport_controller.cc
RTCError JsepTransportController::ApplyDescription_n(
bool local, SdpType type, const cricket::SessionDescription* description)
for (const cricket::ContentInfo& content_info : description->contents()) {
// Don't create transports for rejected m-lines and bundled m-lines."
if (content_info.rejected ||
(IsBundled(content_info.name) && content_info.name != *bundled_mid())) {
continue;
}
// 参见博文 https://blog.csdn.net/freeabc/article/details/106287318
// 内,有关 JsepTransportController::MaybeCreateJsepTransport 的分析
// 其实就是创建一个底层的传输对象,并绑定之间的传输关系
error = MaybeCreateJsepTransport(local, content_info, *description);
if (!error.ok()) {
return error;
}
}
for (size_t i = 0; i < description->contents().size(); ++i) {
SetIceRole_n(DetermineIceRole(transport, transport_info, type, local));
transport->SetRemoteJsepTransportDescription(jsep_description, type);
}
3.4.2 CreateChannels 创建音视频通道
// 参见博文 https://blog.csdn.net/freeabc/article/details/106287318
RTCError PeerConnection::CreateChannels(const SessionDescription& desc)
// 就是创建一个 VideoChannel ,而 VideoChannel 的 media_channel 就是 WebRtcVideoChannel
cricket::VideoChannel* video_channel = CreateVideoChannel(video->name);
if (!video_channel) {
LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, "Failed to create video channel.");
}
// 绑定 RtpTransceiver 与 VideoChannel
GetVideoTransceiver()->internal()->SetChannel(video_channel);
3.4.3 UpdateSessionState 主要是更新通道并建立各个 pipeline 的链接
// 参见博文 https://blog.csdn.net/freeabc/article/details/106287318
RTCError PeerConnection::UpdateSessionState(SdpType type, cricket::ContentSource source,
const cricket::SessionDescription* description)
error = PushdownMediaDescription(type, source);
RTCError PeerConnection::PushdownMediaDescription(SdpType type, cricket::ContentSource source)
for (const auto& transceiver : transceivers_) {
const ContentInfo* content_info = FindMediaSectionForTransceiver(transceiver, sdesc);
cricket::ChannelInterface* channel = transceiver->internal()->channel();
const MediaContentDescription* content_desc = content_info->media_description();
bool success = (source == cricket::CS_LOCAL)
? channel->SetLocalContent(content_desc, type, &error)
: channel->SetRemoteContent(content_desc, type, &error);
./pc/channel.cc
bool BaseChannel::SetRemoteContent(const MediaContentDescription* content,
SdpType type, std::string* error_desc) {
TRACE_EVENT0("webrtc", "BaseChannel::SetRemoteContent");
return InvokeOnWorker<bool>(RTC_FROM_HERE,
Bind(&BaseChannel::SetRemoteContent_w, this, content, type, error_desc));
}
./pc/channel.cc
bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content,
SdpType type, std::string* error_desc)
if (!UpdateRemoteStreams_w(video->streams(), type, error_desc)) {
SafeSetError("Failed to set remote video description streams.", error_desc);
return false;
}
./pc/channel.cc
bool BaseChannel::UpdateRemoteStreams_w(const std::vector<StreamParams>& streams,
SdpType type, std::string* error_desc)
for (const StreamParams& new_stream : streams) {
// We allow a StreamParams with an empty list of SSRCs, in which case the
// MediaChannel will cache the parameters and use them for any unsignaled
// stream received later.
if ((!new_stream.has_ssrcs() && !HasStreamWithNoSsrcs(remote_streams_)) ||
!GetStreamBySsrc(remote_streams_, new_stream.first_ssrc())) {
// 参见流程 3.4.3.1
if (AddRecvStream_w(new_stream)) {
}
}
}
// 参见流程 3.4.3.2,就是建立底层的接收到的 RTP 数据到 channel 里面来
RegisterRtpDemuxerSink();
remote_streams_ = streams;
3.4.3.1
./pc/channel.cc
bool BaseChannel::AddRecvStream_w(const StreamParams& sp) {
RTC_DCHECK(worker_thread() == rtc::Thread::Current());
// media_channel 就是 WebRtcVideoChannel 对象
// 参看博客 https://blog.csdn.net/freeabc/article/details/106287318
// 流程 22
return media_channel()->AddRecvStream(sp);
}
./media/engine/webrtc_video_engine.cc
bool WebRtcVideoChannel::AddRecvStream(const StreamParams& sp) {
return AddRecvStream(sp, false);
}
./media/engine/webrtc_video_engine.cc
bool WebRtcVideoChannel::AddRecvStream(const StreamParams& sp,
bool default_stream)
receive_streams_[ssrc] = new WebRtcVideoReceiveStream(
this, call_, sp, std::move(config), decoder_factory_, default_stream,
recv_codecs_, flexfec_config);
有关 WebRtcVideoReceiveStream 的深入分析,参见下面的
<<视频数据接收到后,从 jitterbuffer 中到解码的过程分析>>
至此 WebRtcVideoChannel 中的 receive_streams_ 对象产生, 就是 WebRtcVideoReceiveStream 流
3.4.3.2
参见博文 https://blog.csdn.net/freeabc/article/details/106142951
流程 10.3.3
主要目的是:我们看到 VideoChannel 做为 Sink 加到 RtpTransport 里的 rtp_demuxer_, 所以 RtpDemuxer::OnRtpPacket
会调用 VideoChannel::OnRtpPacket,建立 pipeline ( socket ---> jitterbuffer ---> decoder ---> render ) 之间部件的关联
4. peerConnectionClient.createAnswer 流程分析
void PeerConnectionClient::createAnswer() {
peerConnection.createAnswer(sdpObserver, sdpMediaConstraints);
}
void PeerConnection::createAnswer(SdpObserver observer, MediaConstraints constraints) {
nativeCreateAnswer(observer, constraints);
}
JNI_GENERATOR_EXPORT void Java_org_webrtc_PeerConnection_nativeCreateAnswer(
JNIEnv* env,
jobject jcaller,
jobject observer,
jobject constraints) {
return JNI_PeerConnection_CreateAnswer(env, base::android::JavaParamRef<jobject>(env, jcaller),
base::android::JavaParamRef<jobject>(env, observer), base::android::JavaParamRef<jobject>(env, constraints));
}
./sdk/android/src/jni/pc/peer_connection.cc
void JNI_PeerConnection_CreateAnswer(
JNIEnv* jni,
const JavaParamRef<jobject>& j_pc,
const JavaParamRef<jobject>& j_observer,
const JavaParamRef<jobject>& j_constraints) {
std::unique_ptr<MediaConstraints> constraints =
JavaToNativeMediaConstraints(jni, j_constraints);
rtc::scoped_refptr<CreateSdpObserverJni> observer(
new rtc::RefCountedObject<CreateSdpObserverJni>(jni, j_observer, std::move(constraints)));
PeerConnectionInterface::RTCOfferAnswerOptions options;
CopyConstraintsIntoOfferAnswerOptions(observer->constraints(), &options);
ExtractNativePC(jni, j_pc)->CreateAnswer(observer, options);
}
./pc/peer_connection.cc
void PeerConnection::CreateAnswer(CreateSessionDescriptionObserver* observer,
const RTCOfferAnswerOptions& options)
this_weak_ptr->DoCreateAnswer(options, observer_wrapper);
./pc/peer_connection.cc
void PeerConnection::DoCreateAnswer(
const RTCOfferAnswerOptions& options,
rtc::scoped_refptr<CreateSessionDescriptionObserver> observer)
webrtc_session_desc_factory_->CreateAnswer(observer, session_options);
./pc/webrtc_session_description_factory.cc
void WebRtcSessionDescriptionFactory::CreateAnswer(
CreateSessionDescriptionObserver* observer,
const cricket::MediaSessionOptions& session_options)
InternalCreateAnswer(request);
./pc/webrtc_session_description_factory.cc
void WebRtcSessionDescriptionFactory::InternalCreateAnswer(
CreateSessionDescriptionRequest request)
PostCreateSessionDescriptionSucceeded(request.observer, std::move(answer));
// 产生相应的 SDP 并通知上层,做相应的准备工作。
// 后续流程参考 https://blog.csdn.net/freeabc/article/details/106287318
// 这个流程主要是初始化操作,包括网络,中间层的 channel, receive, sender 对象的创建等。
/
[this](std::unique_ptr<EncodedFrame> frame, ReturnReason res) {
RTC_DCHECK_EQ(frame == nullptr, res == ReturnReason::kTimeout);
RTC_DCHECK_EQ(frame != nullptr, res == ReturnReason::kFrameFound);
decode_queue_.PostTask([this, frame = std::move(frame)]() mutable {
RTC_DCHECK_RUN_ON(&decode_queue_);
if (decoder_stopped_)
return;
if (frame) {
// 开始进行解码处理,参见下面的流程 10 开始准备解码数据了
HandleEncodedFrame(std::move(frame));
} else {
HandleFrameBufferTimeout();
}
StartNextDecode();
});
}
);
}
6. jitterbuffer 的 NextFrame 函数的定义
./modules/video_coding/frame_buffer2.cc
void FrameBuffer::NextFrame(
int64_t max_wait_time_ms,
bool keyframe_required,
rtc::TaskQueue* callback_queue,
std::function<void(std::unique_ptr<EncodedFrame>, ReturnReason)> handler) {
RTC_DCHECK_RUN_ON(callback_queue);
TRACE_EVENT0("webrtc", "FrameBuffer::NextFrame");
int64_t latest_return_time_ms =
clock_->TimeInMilliseconds() + max_wait_time_ms;
rtc::CritScope lock(&crit_);
if (stopped_) {
return;
}
latest_return_time_ms_ = latest_return_time_ms;
keyframe_required_ = keyframe_required;
// 上述的 lambada 就是这个
frame_handler_ = handler;
callback_queue_ = callback_queue;
// 参见下面流程 7
StartWaitForNextFrameOnQueue();
}
7.
./modules/video_coding/frame_buffer2.cc
void FrameBuffer::StartWaitForNextFrameOnQueue() {
RTC_DCHECK(callback_queue_);
RTC_DCHECK(!callback_task_.Running());
// 这个里面从接收的队列里 frames_ 取出一帧放到待解码队列 参见流程 8
int64_t wait_ms = FindNextFrame(clock_->TimeInMilliseconds());
callback_task_ = RepeatingTaskHandle::DelayedStart(
callback_queue_->Get(), TimeDelta::ms(wait_ms), [this] {
// If this task has not been cancelled, we did not get any new frames
// while waiting. Continue with frame delivery.
rtc::CritScope lock(&crit_);
if (!frames_to_decode_.empty()) {
// 这个首先调用 GetNextFrame 流程 9 ,然后再调用上面的 lambada 流程 10
// We have frames, deliver!
frame_handler_(absl::WrapUnique(GetNextFrame()), kFrameFound);
CancelCallback();
return TimeDelta::Zero(); // Ignored.
} else if (clock_->TimeInMilliseconds() >= latest_return_time_ms_) {
// We have timed out, signal this and stop repeating.
frame_handler_(nullptr, kTimeout);
CancelCallback();
return TimeDelta::Zero(); // Ignored.
} else {
// If there's no frames to decode and there is still time left, it
// means that the frame buffer was cleared between creation and
// execution of this task. Continue waiting for the remaining time.
int64_t wait_ms = FindNextFrame(clock_->TimeInMilliseconds());
return TimeDelta::ms(wait_ms);
}
}
);
}
./rtc_base/task_utils/repeating_task.h
template <class Closure>
static RepeatingTaskHandle DelayedStart(TaskQueueBase* task_queue,
TimeDelta first_delay,
Closure&& closure) {
auto repeating_task = std::make_unique<webrtc_repeating_task_impl::RepeatingTaskImpl<Closure>>(
task_queue, first_delay, std::forward<Closure>(closure));
auto* repeating_task_ptr = repeating_task.get();
task_queue->PostDelayedTask(std::move(repeating_task), first_delay.ms());
return RepeatingTaskHandle(repeating_task_ptr);
}
8.
int64_t FrameBuffer::FindNextFrame(int64_t now_ms) {
// frames_ 接收队列
for (auto frame_it = frames_.begin();
frame_it != frames_.end() && frame_it->first <= last_continuous_frame_;
++frame_it) {
EncodedFrame* frame = frame_it->second.frame.get();
std::vector<FrameMap::iterator> current_superframe;
current_superframe.push_back(frame_it);
bool last_layer_completed = frame_it->second.frame->is_last_spatial_layer;
FrameMap::iterator next_frame_it = frame_it;
while (true) {
++next_frame_it;
if (next_frame_it == frames_.end() ||
next_frame_it->first.picture_id != frame->id.picture_id ||
!next_frame_it->second.continuous) {
break;
}
// Check if the next frame has some undecoded references other than
// the previous frame in the same superframe.
size_t num_allowed_undecoded_refs =
(next_frame_it->second.frame->inter_layer_predicted) ? 1 : 0;
if (next_frame_it->second.num_missing_decodable >
num_allowed_undecoded_refs) {
break;
}
// All frames in the superframe should have the same timestamp.
if (frame->Timestamp() != next_frame_it->second.frame->Timestamp()) {
RTC_LOG(LS_WARNING) << "Frames in a single superframe have different"
" timestamps. Skipping undecodable superframe.";
break;
}
// 获取一帧
current_superframe.push_back(next_frame_it);
last_layer_completed = next_frame_it->second.frame->is_last_spatial_layer;
}
}
// 解码队列
frames_to_decode_ = std::move(current_superframe);
}
// 我们讲一下 frames_ 数据的由来
// 参见博文 https://blog.csdn.net/freeabc/article/details/106142951 知道 视频流接收最后都调用
int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame)
// 把数据加到这个队列
auto info = frames_.emplace(id, FrameInfo()).first;
// 接收到的视频数据
info->second.frame = std::move(frame);
// 这个激发
new_continuous_frame_event_.Set();
// 继续投递一个任务继续执行 StartWaitForNextFrameOnQueue
if (callback_queue_) {
callback_queue_->PostTask([this] {
rtc::CritScope lock(&crit_);
if (!callback_task_.Running())
return;
RTC_CHECK(frame_handler_);
callback_task_.Stop();
StartWaitForNextFrameOnQueue();
});
}
9.
EncodedFrame* FrameBuffer::GetNextFrame() {
std::vector<EncodedFrame*> frames_out;
for (FrameMap::iterator& frame_it : frames_to_decode_) {
EncodedFrame* frame = frame_it->second.frame.release();
frames_out.push_back(frame);
}
UpdateJitterDelay();
UpdateTimingFrameInfo();
return CombineAndDeleteFrames(frames_out);
}
10.
./video/video_receive_stream.cc
void VideoReceiveStream::HandleEncodedFrame(std::unique_ptr<EncodedFrame> frame) {
int64_t now_ms = clock_->TimeInMilliseconds();
// Current OnPreDecode only cares about QP for VP8.
int qp = -1;
if (frame->CodecSpecific()->codecType == kVideoCodecVP8) {
if (!vp8::GetQp(frame->data(), frame->size(), &qp)) {
RTC_LOG(LS_WARNING) << "Failed to extract QP from VP8 video frame";
}
}
stats_proxy_.OnPreDecode(frame->CodecSpecific()->codecType, qp);
HandleKeyFrameGeneration(frame->FrameType() == VideoFrameType::kVideoFrameKey,
now_ms);
// 参见 10.1 的流程分析
int decode_result = video_receiver_.Decode(frame.get());
if (decode_result == WEBRTC_VIDEO_CODEC_OK ||
decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME) {
keyframe_required_ = false;
frame_decoded_ = true;
rtp_video_stream_receiver_.FrameDecoded(frame->id.picture_id);
if (decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME)
RequestKeyFrame(now_ms);
} else if (!frame_decoded_ || !keyframe_required_ ||
(last_keyframe_request_ms_ + max_wait_for_keyframe_ms_ < now_ms)) {
keyframe_required_ = true;
// TODO(philipel): Remove this keyframe request when downstream project
// has been fixed.
RequestKeyFrame(now_ms);
}
if (encoded_frame_buffer_function_) {
frame->Retain();
encoded_frame_buffer_function_(WebRtcRecordableEncodedFrame(*frame));
}
}
10.1 VideoReceiver2 video_receiver_;
./modules/video_coding/video_receiver2.cc
int32_t VideoReceiver2::Decode(const VCMEncodedFrame* frame) {
RTC_DCHECK_RUN_ON(&decoder_thread_checker_);
TRACE_EVENT0("webrtc", "VideoReceiver2::Decode");
// Change decoder if payload type has changed
VCMGenericDecoder* decoder =
codecDataBase_.GetDecoder(*frame, &decodedFrameCallback_);
if (decoder == nullptr) {
return VCM_NO_CODEC_REGISTERED;
}
// 解码器流程分析完毕,具体解码具体流程参见 10.2
return decoder->Decode(*frame, clock_->TimeInMilliseconds());
}
我们分析 codecDataBase_ 的由来,参见这个函数,我们看到就是 VideoReceiveStream 的 video_decoders_
./video/video_receive_stream.cc
void VideoReceiveStream::Start()
// 这个地方一般把三种编码都注册进去 VP8, VP9, h264(avc)
for (const Decoder& decoder : config_.decoders) {
// 参见下面的 LegacyCreateVideoDecoder 分析
std::unique_ptr<VideoDecoder> video_decoder =
decoder.decoder_factory->LegacyCreateVideoDecoder(decoder.video_format,
config_.stream_id);
if (!decoded_output_file.empty()) {
char filename_buffer[256];
rtc::SimpleStringBuilder ssb(filename_buffer);
ssb << decoded_output_file << "/webrtc_receive_stream_"
<< this->config_.rtp.remote_ssrc << "-" << rtc::TimeMicros()
<< ".ivf";
video_decoder = CreateFrameDumpingDecoderWrapper(std::move(video_decoder), FileWrapper::OpenWriteOnly(ssb.str()));
}
video_decoders_.push_back(std::move(video_decoder));
video_receiver_.RegisterExternalDecoder(video_decoders_.back().get(),
decoder.payload_type);
}
// 我们看看 RegisterExternalDecoder 函数
./modules/video_coding/decoder_database.h
void VCMDecoderDataBase::RegisterExternalDecoder(VideoDecoder* external_decoder, uint8_t payload_type) {
// If payload value already exists, erase old and insert new.
VCMExtDecoderMapItem* ext_decoder = new VCMExtDecoderMapItem(external_decoder, payload_type);
DeregisterExternalDecoder(payload_type);
dec_external_map_[payload_type] = ext_decoder;
}
// LegacyCreateVideoDecoder 分析
我们继续分析 LegacyCreateVideoDecoder 解码器的创建过程,首先分析解码类厂,然后分析 CreateVideoDecoder 函数
./api/video_codecs/video_decoder_factory.cc
std::unique_ptr<VideoDecoder> VideoDecoderFactory::LegacyCreateVideoDecoder(
const SdpVideoFormat& format,
const std::string& receive_stream_id) {
return CreateVideoDecoder(format);
}
//------------------------------------------------
// 解码类厂的创建
//------------------------------------------------
./sdk/android/src/jni/pc/peer_connection_factory.cc
ScopedJavaLocalRef<jobject> CreatePeerConnectionFactoryForJava(
JNIEnv* jni,
const JavaParamRef<jobject>& jcontext,
const JavaParamRef<jobject>& joptions,
rtc::scoped_refptr<AudioDeviceModule> audio_device_module,
rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
const JavaParamRef<jobject>& jencoder_factory,
const JavaParamRef<jobject>& jdecoder_factory,
rtc::scoped_refptr<AudioProcessing> audio_processor,
std::unique_ptr<FecControllerFactoryInterface> fec_controller_factory,
std::unique_ptr<NetworkControllerFactoryInterface>
network_controller_factory,
std::unique_ptr<NetworkStatePredictorFactoryInterface>
network_state_predictor_factory,
std::unique_ptr<MediaTransportFactory> media_transport_factory,
std::unique_ptr<NetEqFactory> neteq_factory)
media_dependencies.video_decoder_factory =
absl::WrapUnique(CreateVideoDecoderFactory(jni, jdecoder_factory));
./sdk/android/src/jni/pc/video.cc
VideoDecoderFactory* CreateVideoDecoderFactory(
JNIEnv* jni,
const JavaRef<jobject>& j_decoder_factory) {
return IsNull(jni, j_decoder_factory)
? nullptr
: new VideoDecoderFactoryWrapper(jni, j_decoder_factory);
}
// 类厂创建完毕,就是一个 VideoDecoderFactoryWrapper 对象
// CreateVideoDecoder 函数如下:
./sdk/android/src/jni/video_decoder_factory_wrapper.cc
std::unique_ptr<VideoDecoder> VideoDecoderFactoryWrapper::CreateVideoDecoder(
const SdpVideoFormat& format) {
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedJavaLocalRef<jobject> j_codec_info =
SdpVideoFormatToVideoCodecInfo(jni, format);
// 参见流程 1
ScopedJavaLocalRef<jobject> decoder = Java_VideoDecoderFactory_createDecoder(
jni, decoder_factory_, j_codec_info);
if (!decoder.obj())
return nullptr;
// JNI 层的对应 Java 层的 video decoder 对象,参见流程 2
return JavaToNativeVideoDecoder(jni, decoder);
}
1.
// 这个就是调用 Java 层的产生硬解码器了
static base::android::ScopedJavaLocalRef<jobject> Java_VideoDecoderFactory_createDecoder(JNIEnv*
env, const base::android::JavaRef<jobject>& obj, const base::android::JavaRef<jobject>& info) {
jclass clazz = org_webrtc_VideoDecoderFactory_clazz(env);
CHECK_CLAZZ(env, obj.obj(),
org_webrtc_VideoDecoderFactory_clazz(env), NULL);
jni_generator::JniJavaCallContextChecked call_context;
call_context.Init<base::android::MethodID::TYPE_INSTANCE>(
env,
clazz,
"createDecoder",
"(Lorg/webrtc/VideoCodecInfo;)Lorg/webrtc/VideoDecoder;",
&g_org_webrtc_VideoDecoderFactory_createDecoder);
jobject ret =
env->CallObjectMethod(obj.obj(), call_context.base.method_id, info.obj());
return base::android::ScopedJavaLocalRef<jobject>(env, ret);
}
// Android Java
VideoDecoder DefaultVideoDecoderFactorypublic::createDecoder(VideoCodecInfo codecType) {
VideoDecoder softwareDecoder = softwareVideoDecoderFactory.createDecoder(codecType);
final VideoDecoder hardwareDecoder = hardwareVideoDecoderFactory.createDecoder(codecType);
if (softwareDecoder == null && platformSoftwareVideoDecoderFactory != null) {
softwareDecoder = platformSoftwareVideoDecoderFactory.createDecoder(codecType);
}
if (hardwareDecoder != null && softwareDecoder != null) {
// 一般都是这个返回给底层
// Both hardware and software supported, wrap it in a software fallback
return new VideoDecoderFallback(
softwareDecoder, hardwareDecoder);
}
return hardwareDecoder != null ? hardwareDecoder : softwareDecoder;
}
// Android Java --- HardwareVideoDecoderFactory 的基类 MediaCodecVideoDecoderFactory
public VideoDecoder MediaCodecVideoDecoderFactory::createDecoder(VideoCodecInfo codecType) {
VideoCodecType type = VideoCodecType.valueOf(codecType.getName());
MediaCodecInfo info = findCodecForType(type);
if (info == null) {
return null;
}
CodecCapabilities capabilities = info.getCapabilitiesForType(type.mimeType());
return new AndroidVideoDecoder(new MediaCodecWrapperFactoryImpl(), info.getName(), type,
MediaCodecUtils.selectColorFormat(MediaCodecUtils.DECODER_COLOR_FORMATS, capabilities),
sharedContext);
}
2.
./sdk/android/src/jni/video_decoder_wrapper.cc
std::unique_ptr<VideoDecoder> JavaToNativeVideoDecoder(
JNIEnv* jni,
const JavaRef<jobject>& j_decoder) {
// 通知 Java 产生解码器
const jlong native_decoder =
Java_VideoDecoder_createNativeVideoDecoder(jni, j_decoder);
VideoDecoder* decoder;
if (native_decoder == 0) {
decoder = new VideoDecoderWrapper(jni, j_decoder);
} else {
decoder = reinterpret_cast<VideoDecoder*>(native_decoder);
}
return std::unique_ptr<VideoDecoder>(decoder);
}
Java_VideoDecoder_createNativeVideoDecoder 这个会调用 Java 层的
public long VideoDecoderFallback::createNativeVideoDecoder() {
return nativeCreateDecoder(fallback, primary);
}
JNI_GENERATOR_EXPORT jlong Java_org_webrtc_VideoDecoderFallback_nativeCreateDecoder(
JNIEnv* env,
jclass jcaller,
jobject fallback,
jobject primary) {
return JNI_VideoDecoderFallback_CreateDecoder(env, base::android::JavaParamRef<jobject>(env,
fallback), base::android::JavaParamRef<jobject>(env, primary));
}
./sdk/android/src/jni/video_decoder_fallback.cc
static jlong JNI_VideoDecoderFallback_CreateDecoder(
JNIEnv* jni,
const JavaParamRef<jobject>& j_fallback_decoder,
const JavaParamRef<jobject>& j_primary_decoder)
VideoDecoder* nativeWrapper =
CreateVideoDecoderSoftwareFallbackWrapper(std::move(fallback_decoder), std::move(primary_decoder))
./api/video_codecs/video_decoder_software_fallback_wrapper.cc
std::unique_ptr<VideoDecoder> CreateVideoDecoderSoftwareFallbackWrapper(
std::unique_ptr<VideoDecoder> sw_fallback_decoder,
std::unique_ptr<VideoDecoder> hw_decoder) {
return std::make_unique<VideoDecoderSoftwareFallbackWrapper>(
std::move(sw_fallback_decoder), std::move(hw_decoder));
}
// VideoDecoderSoftwareFallbackWrapper 这个就是 VCMGenericDecoder 里的 decoder_
10.2
./modules/video_coding/generic_decoder.cc
int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp", frame.Timestamp());
_frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
_frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
_frameInfos[_nextFrameInfoIdx].timing = frame.video_timing();
_frameInfos[_nextFrameInfoIdx].ntp_time_ms = frame.EncodedImage().ntp_time_ms_;
_frameInfos[_nextFrameInfoIdx].packet_infos = frame.PacketInfos();
// Set correctly only for key frames. Thus, use latest key frame
// content type. If the corresponding key frame was lost, decode will fail
// and content type will be ignored.
if (frame.FrameType() == VideoFrameType::kVideoFrameKey) {
_frameInfos[_nextFrameInfoIdx].content_type = frame.contentType();
_last_keyframe_content_type = frame.contentType();
} else {
_frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type;
}
_callback->Map(frame.Timestamp(), &_frameInfos[_nextFrameInfoIdx]);
_nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
// 根据 10.1 的分析,我们知道 decoder_ 就是 VideoDecoderSoftwareFallbackWrapper
int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
frame.RenderTimeMs());
_callback->OnDecoderImplementationName(decoder_->ImplementationName());
if (ret < WEBRTC_VIDEO_CODEC_OK) {
RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
<< frame.Timestamp() << ", error code: " << ret;
_callback->Pop(frame.Timestamp());
return ret;
} else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT) {
// No output
_callback->Pop(frame.Timestamp());
}
return ret;
}
./api/video_codecs/video_decoder_software_fallback_wrapper.cc
int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) {
switch (decoder_type_) {
case DecoderType::kNone:
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
case DecoderType::kHardware: {
int32_t ret = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
ret = hw_decoder_->Decode(input_image, missing_frames, render_time_ms);
if (ret != WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE) {
return ret;
}
// HW decoder returned WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE or
// initialization failed, fallback to software.
if (!InitFallbackDecoder()) {
return ret;
}
// Fallback decoder initialized, fall-through.
RTC_FALLTHROUGH();
}
case DecoderType::kFallback:
return fallback_decoder_->Decode(input_image, missing_frames, render_time_ms);
default:
RTC_NOTREACHED();
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
// Java 层的 org.webrtc 硬解码函数
public VideoCodecStatus AndroidVideoDecoder::decode(EncodedImage frame, DecodeInfo info)
int index;
try {
index = codec.dequeueInputBuffer(DEQUEUE_INPUT_TIMEOUT_US);
} catch (IllegalStateException e) {
Logging.e(TAG, "dequeueInputBuffer failed", e);
return VideoCodecStatus.ERROR;
}
if (index < 0) {
// Decoder is falling behind. No input buffers available.
// The decoder can't simply drop frames; it might lose a key frame.
Logging.e(TAG, "decode() - no HW buffers available; decoder falling behind");
return VideoCodecStatus.ERROR;
}
ByteBuffer buffer;
try {
buffer = codec.getInputBuffers()[index];
} catch (IllegalStateException e) {
Logging.e(TAG, "getInputBuffers failed", e);
return VideoCodecStatus.ERROR;
}
if (buffer.capacity() < size) {
Logging.e(TAG, "decode() - HW buffer too small");
return VideoCodecStatus.ERROR;
}
buffer.put(frame.buffer);
frameInfos.offer(new FrameInfo(SystemClock.elapsedRealtime(), frame.rotation));
try {
codec.queueInputBuffer(index, 0 , size,
TimeUnit.NANOSECONDS.toMicros(frame.captureTimeNs), 0 );
} catch (IllegalStateException e) {
Logging.e(TAG, "queueInputBuffer failed", e);
frameInfos.pollLast();
return VideoCodecStatus.ERROR;
}
if (keyFrameRequired) {
keyFrameRequired = false;
}
return VideoCodecStatus.OK;
//-----------------------------------------------------------------------
// 致此,我们分析的数据从 jitterbuffer 到解码器进行解码的流程基本完毕!
//-----------------------------------------------------------------------