国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 學(xué)院 > 開發(fā)設(shè)計(jì) > 正文

WebRTC音頻

2019-11-09 16:42:53
字體:
供稿:網(wǎng)友

WebRtc語音整體框架

圖一語音整體框架圖 這里寫圖片描述

如上圖所示,音頻整個(gè)處理框架除了ligjingle負(fù)責(zé)p2p數(shù)據(jù)的傳輸,主要是VOE(Voice Engine)和Channel適配層

圖二創(chuàng)建數(shù)據(jù)通信channel時(shí)序圖 這里寫圖片描述 上圖是本地端 這里寫圖片描述 的完整過程,VOE由CreateMediaEngine_w開始創(chuàng)建,Channel適配層由SetLocalDescription根據(jù)SDP開始創(chuàng)建,下面來分析下這兩個(gè)過程

VOE創(chuàng)建過程

/*src/talk/app/webrtc/peerconnectionfactory.cc*/bool PeerConnectionFactory::Initialize() {...... default_allocator_factory_ = PortAllocatorFactory::Create(worker_thread_); ..... cricket::MediaEngineInterface* media_engine = worker_thread_->Invoke<cricket::MediaEngineInterface*>(rtc::Bind( &PeerConnectionFactory::CreateMediaEngine_w, this)); //定義的宏,實(shí)際上就是在worker_thread_線程上運(yùn)行CreateMediaEngine_w ..... channel_manager_.reset( new cricket::ChannelManager(media_engine, worker_thread_)); ......}cricket::MediaEngineInterface* PeerConnectionFactory::CreateMediaEngine_w() { ASSERT(worker_thread_ == rtc::Thread::Current()); return cricket::WebRtcMediaEngineFactory::Create( default_adm_.get(), video_encoder_factory_.get(), video_decoder_factory_.get());}MediaEngineInterface* WebRtcMediaEngineFactory::Create( webrtc::AudioDeviceModule* adm, WebRtcVideoEncoderFactory* encoder_factory, WebRtcVideoDecoderFactory* decoder_factory) { return CreateWebRtcMediaEngine(adm, encoder_factory, decoder_factory);}//CreateWebRtcMediaEngine實(shí)際上是WebRtcMediaEngine2,而WebRtcMediaEngine2又是繼承至CompositeMediaEngine//模板類,實(shí)現(xiàn)在webrtcmediaengine.ccnamespace cricket {class WebRtcMediaEngine2 : public CompositeMediaEngine<WebRtcVoiceEngine, WebRtcVideoEngine2> { public: WebRtcMediaEngine2(webrtc::AudioDeviceModule* adm, WebRtcVideoEncoderFactory* encoder_factory, WebRtcVideoDecoderFactory* decoder_factory) { voice_.SetAudioDeviceModule(adm); video_.SetExternalDecoderFactory(decoder_factory); video_.SetExternalEncoderFactory(encoder_factory); }};} // namespace crickettemplate<class VOICE, class VIDEO>class CompositeMediaEngine : public MediaEngineInterface { public: virtual ~CompositeMediaEngine() {} virtual bool Init(rtc::Thread* worker_thread) { if (!voice_.Init(worker_thread)) //此處的voice 即為WebRtcVoiceEngine return false; video_.Init(); //video 為WebRtcVideoEngine2 后面再分析 return true; }......}

相關(guān)類圖如下: 這里寫圖片描述 圖三VOE引擎類圖

WebRtcVoiceEngine::WebRtcVoiceEngine() : voe_wrapper_(new VoEWrapper()), //底層Voice Engine代理類,與底層相關(guān)的上層都調(diào)用此類完成 tracing_(new VoETraceWrapper()), //調(diào)試相關(guān)類 adm_(NULL), log_filter_(SeverityToFilter(kDefaultLogSeverity)), is_dumping_aec_(false) { Construct();}

下面看看構(gòu)造WebRtcVoiceEngine相關(guān)的類和方法:

//VoEWrapper實(shí)際上是VoiceEngine--> voice_engine_impl.cc的代理/* webrtcvoe.h */class VoEWrapper { public: VoEWrapper() : engine_(webrtc::VoiceEngine::Create()), PRocessing_(engine_), base_(engine_), codec_(engine_), dtmf_(engine_), hw_(engine_), neteq_(engine_), network_(engine_), rtp_(engine_), sync_(engine_), volume_(engine_) { }/*webrtcvoiceengine.cc*/void WebRtcVoiceEngine::Construct() { ...... //注冊引擎狀態(tài)回調(diào)函數(shù),將底層錯(cuò)誤信息告知WebRtcVoiceEngine if (voe_wrapper_->base()->RegisterVoiceEngineObserver(*this) == -1) { LOG_RTCERR0(RegisterVoiceEngineObserver); }.... // Load our audio codec list. ConstructCodecs(); // 根據(jù)kCodecPrefs表,音質(zhì)從高到低,從底層獲取最高音質(zhì)的codec..... options_ = GetDefaultEngineOptions(); //設(shè)置默認(rèn)的音頻選項(xiàng),需要回音消除,降噪,自動(dòng)調(diào)節(jié)音量,是否需要dump等...}//WebRtcVoiceEngine初始化函數(shù)bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) {...... bool res = InitInternal();......}bool WebRtcVoiceEngine::InitInternal() { ...... // 初始化底層AudioDeviceModule 在WebRtc中參數(shù)dbm_此處傳入的是NULL. //voe_wrapper_ 是VoiceEngine的代理類在voice_engine_impl.cc 中實(shí)現(xiàn), //而VoiceEngineImpl繼承至VoiceEngine,creat時(shí)創(chuàng)建的是VoiceEngineImpl //在voe_base_impl.cc中實(shí)現(xiàn) //并將對象返回給VoEWrapper //此處voe_wrapper_->base()實(shí)際上是VoiceEngineImpl對象,下面分析VoiceEngineImpl.Init if (voe_wrapper_->base()->Init(adm_) == -1) { //voe_wrapper_->base() ...... } ......}/*voe_base_impl.cc*/int VoEBaseImpl::Init(AudioDeviceModule* external_adm, AudioProcessing* audioproc) { ...... if (external_adm == nullptr) { //上面已經(jīng)提到,demo中傳入的是null#if !defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE) return -1;#else // Create the internal ADM implementation. //創(chuàng)建本地的AudioDeviceModuleImpl 對象 //通過AudioRecorder 和AudioTrack實(shí)現(xiàn)音頻采集與播放 shared_->set_audio_device(AudioDeviceModuleImpl::Create( VoEId(shared_->instance_id(), -1), shared_->audio_device_layer())); if (shared_->audio_device() == nullptr) { shared_->SetLastError(VE_NO_MEMORY, kTraceCritical, "Init() failed to create the ADM"); return -1; }#endif // WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE } else { // Use the already existing external ADM implementation. shared_->set_audio_device(external_adm); LOG_F(LS_INFO) << "An external ADM implementation will be used in VoiceEngine"; } // Register the ADM to the process thread, which will drive the error // callback mechanism if (shared_->process_thread()) { shared_->process_thread()->RegisterModule(shared_->audio_device()); } bool available = false; // -------------------- // Reinitialize the ADM // 為音頻設(shè)備設(shè)置監(jiān)聽器 if (shared_->audio_device()->RegisterEventObserver(this) != 0) { shared_->SetLastError( VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning, "Init() failed to register event observer for the ADM"); } // 為音頻設(shè)備注冊AudioTransport的實(shí)現(xiàn),實(shí)現(xiàn)音頻數(shù)據(jù)的傳輸 if (shared_->audio_device()->RegisterAudioCallback(this) != 0) { shared_->SetLastError( VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning, "Init() failed to register audio callback for the ADM"); } // 音頻設(shè)備的初始化! if (shared_->audio_device()->Init() != 0) { shared_->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError, "Init() failed to initialize the ADM"); return -1; } ......}AudioDeviceModule* AudioDeviceModuleImpl::Create(const int32_t id, const AudioLayer audioLayer){...... RefCountImpl<AudioDeviceModuleImpl>* audioDevice = new RefCountImpl<AudioDeviceModuleImpl>(id, audioLayer); // 檢查平臺是否支持 if (audioDevice->CheckPlatform() == -1) { delete audioDevice; return NULL; } // 根據(jù)不同的平臺選擇不同的實(shí)現(xiàn),Android平臺 是通過JNI的方式( audio_record_jni.cc audio_track_jni.cc), //獲取java層的org/webrtc/voiceengine/WebRtcAudioRecord.java //和org/webrtc/voiceengine/WebRtcAudioTrack.java 實(shí)現(xiàn)音頻采集和播放 if (audioDevice->CreatePlatformSpecificObjects() == -1) { delete audioDevice; return NULL; } // 分配共享內(nèi)存,通過AudioTransportS實(shí)現(xiàn)音頻數(shù)據(jù)的傳遞 if (audioDevice->AttachAudioBuffer() == -1) { delete audioDevice; return NULL; }......}

Channel創(chuàng)建過程

在圖二時(shí)序圖中,在SetLocalDescription中會(huì)調(diào)用CreateChannels創(chuàng)建根據(jù)SDP創(chuàng)建會(huì)話所需要的Channels.由此開啟了音視頻數(shù)據(jù)和用戶數(shù)據(jù)傳輸通道,下面詳細(xì)看看音頻channel創(chuàng)建的過程,其他的類似: 這里寫圖片描述 相關(guān)類圖如下: 這里寫圖片描述

/* webrtcsession.cc */bool WebRtcSession::CreateChannels(const SessionDescription* desc) { // Creating the media channels and transport proxies. //根據(jù)SDP創(chuàng)建VoiceChannel const cricket::ContentInfo* voice = cricket::GetFirstAudioContent(desc); if (voice && !voice->rejected && !voice_channel_) { if (!CreateVoiceChannel(voice)) { LOG(LS_ERROR) << "Failed to create voice channel."; return false; } } //根據(jù)SDP創(chuàng)建VideoChannel const cricket::ContentInfo* video = cricket::GetFirstVideoContent(desc); if (video && !video->rejected && !video_channel_) { if (!CreateVideoChannel(video)) { LOG(LS_ERROR) << "Failed to create video channel."; return false; } }////根據(jù)SDP創(chuàng)建DataChannel const cricket::ContentInfo* data = cricket::GetFirstDataContent(desc); if (data_channel_type_ != cricket::DCT_NONE && data && !data->rejected && !data_channel_) { if (!CreateDataChannel(data)) { LOG(LS_ERROR) << "Failed to create data channel."; return false; } } ...... return true;}//此處主要分析VoiceChannel的創(chuàng)建過程bool WebRtcSession::CreateVoiceChannel(const cricket::ContentInfo* content) {//channel_manager_為在peerconnectionfactory.cc中Initialize是創(chuàng)建的ChannelManager//media_controller_為WebRtcSession在Initialize時(shí)創(chuàng)建的MediaController對象,實(shí)際上是Call對象的封裝,為了方便call對象的共享!//transport_controller() 返回TransportController,WebRtcSession基類BaseSession 構(gòu)造方法中創(chuàng)建的TransportController//WebRtcSession基類BaseSession實(shí)現(xiàn)的是與libjingle進(jìn)行的交互 voice_channel_.reset(channel_manager_->CreateVoiceChannel( media_controller_.get(), transport_controller(), content->name, true, audio_options_)); if (!voice_channel_) { return false; }...... return true;}/* webrtc/src/talk/session/media/channelmanager.cc*/VoiceChannel* ChannelManager::CreateVoiceChannel( webrtc::MediaControllerInterface* media_controller, TransportController* transport_controller, const std::string& content_name, bool rtcp, const AudioOptions& options) { //定義的宏,實(shí)際意思是 在worker_thread_中運(yùn)行ChannelManager::CreateVoiceChannel_w方法! return worker_thread_->Invoke<VoiceChannel*>( Bind(&ChannelManager::CreateVoiceChannel_w, this, media_controller, transport_controller, content_name, rtcp, options));}VoiceChannel* ChannelManager::CreateVoiceChannel_w( webrtc::MediaControllerInterface* media_controller, TransportController* transport_controller, const std::string& content_name, bool rtcp, const AudioOptions& options) {......//此處的media_engine_為在peerconnectionfactory.cc中創(chuàng)建的WebRtcMediaEngine2//最終調(diào)用WebRtcVoiceEngine::CreateChannel方法 VoiceMediaChannel* media_channel = media_engine_->CreateChannel(media_controller->call_w(), options); if (!media_channel) return nullptr;//VoiceChannel繼承BaseChannel,從libjingle獲取數(shù)據(jù)或者是通過libjingle將數(shù)據(jù)發(fā)給遠(yuǎn)程端! VoiceChannel* voice_channel = new VoiceChannel(worker_thread_, media_engine_.get(), media_channel, transport_controller, content_name, rtcp); if (!voice_channel->Init()) { delete voice_channel; return nullptr; } voice_channels_.push_back(voice_channel); return voice_channel;}VoiceMediaChannel* WebRtcVoiceEngine::CreateChannel(webrtc::Call* call, const AudioOptions& options) { WebRtcVoiceMediaChannel* ch = new WebRtcVoiceMediaChannel(this, options, call); if (!ch->valid()) { delete ch; return nullptr; } return ch;}WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine* engine, const AudioOptions& options, webrtc::Call* call) : engine_(engine), voe_channel_(engine->CreateMediaVoiceChannel()),//調(diào)用WebRtcVoiceEngine::CreateMediaVoiceChannel()方法...... {//將當(dāng)前WebRtcVoiceMediaChannel注冊給WebRtcVoiceEngine管理放入ChannelList中 engine->RegisterChannel(this);......//為上面創(chuàng)造的新channel注冊WebRtcVoiceMediaChannel.可以認(rèn)為WebRtcVoiceMediaChannel是橋梁,底層//channel通過注冊的Transport實(shí)現(xiàn)數(shù)據(jù)流的發(fā)送和接受! ConfigureSendChannel(voe_channel()); SetOptions(options);}int WebRtcVoiceEngine::CreateVoiceChannel(VoEWrapper* voice_engine_wrapper) {//VoEWrapper為VoiceEngine的封裝,我覺得相當(dāng)于是VoiceEngine的代理。//而在VoiceEngine的實(shí)現(xiàn)voice_engine_impl.cc可以看出,VoiceEngine實(shí)際上是VoiceEngineImpl的封裝//voice_engine_wrapper->base()的到的是VoiceEngineImpl對象 return voice_engine_wrapper->base()->CreateChannel(voe_config_);}/* voe_base_impl.cc */int VoEBaseImpl::CreateChannel() { ..... //通過ChannelManager創(chuàng)建Channel對象 voe::ChannelOwner channel_owner = shared_->channel_manager().CreateChannel(); return InitializeChannel(&channel_owner);}/* android/webrtc/src/webrtc/voice_engine/channel_manager.cc*/ChannelOwner ChannelManager::CreateChannel() { return CreateChannelInternal(config_);}ChannelOwner ChannelManager::CreateChannelInternal(const Config& config) { Channel* channel; //新建Channel對象 Channel::CreateChannel(channel, ++last_channel_id_, instance_id_, event_log_.get(), config); ChannelOwner channel_owner(channel); CriticalSectionScoped crit(lock_.get());//ChannelManager對所有新建channel的管理 channels_.push_back(channel_owner);//返回封裝的ChannelOwner return channel_owner;}

語音發(fā)送流程

采集

在安卓系統(tǒng)的WebRtc demo中,語音還是通過系統(tǒng)的AudioRecorder.java 類實(shí)現(xiàn)采集的。在VoEBaseImpl::Init階段介紹過會(huì)為AudioDeviceModule注冊數(shù)據(jù)傳輸回調(diào)函數(shù)如下:int VoEBaseImpl::Init(AudioDeviceModule* external_adm, AudioProcessing* audioproc) { ...... // Register the AudioTransport implementation if (shared_->audio_device()->RegisterAudioCallback(this) != 0) { shared_->SetLastError( VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning, "Init() failed to register audio callback for the ADM"); } ...... }int32_t AudioDeviceModuleImpl::RegisterAudioCallback(AudioTransport* audioCallback){ CriticalSectionScoped lock(&_critSectAudioCb); //最終將VoEBaseImpl的實(shí)現(xiàn),注冊到設(shè)備的AudioDeviceBuffer中 _audioDeviceBuffer.RegisterAudioCallback(audioCallback); return 0;}所以總的來說音頻數(shù)據(jù)會(huì)如下流程,最終VoEBaseImpl實(shí)現(xiàn)的AudioTransport回調(diào)獲取數(shù)據(jù)或者播放數(shù)據(jù)!nativeDataIsRecorded(org/webrtc/voiceengine/WebRtcAudioRecord.java)--->(audio_record_jni.cc)AudioRecordJni::DataIsRecorded-->OnDataIsRecorded-->AudioDeviceBuffer.DeliverRecordedData--->AudioTransport.RecordedDataIsAvailable--->(voe_base_impl.cc)VoEBaseImpl::RecordedDataIsAvailable

處理

/* voe_base_impl.cc */int32_t VoEBaseImpl::RecordedDataIsAvailable( const void* audioSamples, size_t nSamples, size_t nBytesPerSample, uint8_t nChannels, uint32_t samplesPerSec, uint32_t totalDelayMS, int32_t clockDrift, uint32_t micLevel, bool keyPressed, uint32_t& newMicLevel) { newMicLevel = static_cast<uint32_t>(ProcessRecordedDataWithAPM( nullptr, 0, audioSamples, samplesPerSec, nChannels, nSamples, totalDelayMS, clockDrift, micLevel, keyPressed)); return 0;}//從java層獲取的數(shù)據(jù),直接送入ProcessRecordedDataWithAPM處理!int VoEBaseImpl::ProcessRecordedDataWithAPM( const int voe_channels[], int number_of_voe_channels, const void* audio_data, uint32_t sample_rate, uint8_t number_of_channels, size_t number_of_frames, uint32_t audio_delay_milliseconds, int32_t clock_drift, uint32_t volume, bool key_pressed) {......//調(diào)節(jié)音量 if (volume != 0) { // Scale from ADM to VoE level range if (shared_->audio_device()->MaxMicrophoneVolume(&max_volume) == 0) { if (max_volume) { voe_mic_level = static_cast<uint16_t>( (volume * kMaxVolumeLevel + static_cast<int>(max_volume / 2)) / max_volume); } } // We learned that on certain systems (e.g linux) the voe_mic_level // can be greater than the maxVolumeLevel therefore // we are going to cap the voe_mic_level to the maxVolumeLevel // and change the maxVolume to volume if it turns out that // the voe_mic_level is indeed greater than the maxVolumeLevel. if (voe_mic_level > kMaxVolumeLevel) { voe_mic_level = kMaxVolumeLevel; max_volume = volume; } }//這里對音頻有一系列的處理,比如:錄制到文件,重采樣,回音消除,AGC調(diào)節(jié)等。。。 shared_->transmit_mixer()->PrepareDemux( audio_data, number_of_frames, number_of_channels, sample_rate, static_cast<uint16_t>(audio_delay_milliseconds), clock_drift, voe_mic_level, key_pressed); // Copy the audio frame to each sending channel and perform // channel-dependent Operations (file mixing, mute, etc.), encode and // packetize+transmit the RTP packet. When |number_of_voe_channels| == 0, // do the operations on all the existing VoE channels; otherwise the // operations will be done on specific channels. if (number_of_voe_channels == 0) { shared_->transmit_mixer()->DemuxAndMix(); shared_->transmit_mixer()->EncodeAndSend(); } else { shared_->transmit_mixer()->DemuxAndMix(voe_channels, number_of_voe_channels); shared_->transmit_mixer()->EncodeAndSend(voe_channels, number_of_voe_channels); }...... } // Return 0 to indicate no change on the volume. return 0;}

編碼

這里寫圖片描述

//shared_->transmit_mixer()->EncodeAndSend//實(shí)現(xiàn)數(shù)據(jù)的編碼,編碼后觸發(fā)打包發(fā)送void TransmitMixer::EncodeAndSend(const int voe_channels[], int number_of_voe_channels) { for (int i = 0; i < number_of_voe_channels; ++i) { voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]); voe::Channel* channel_ptr = ch.channel(); if (channel_ptr && channel_ptr->Sending())//判斷當(dāng)前的channel是否處于發(fā)送的狀態(tài) channel_ptr->EncodeAndSend(); }}uint32_tChannel::EncodeAndSend(){ ...... //編碼壓縮音頻數(shù)據(jù) if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) { WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::EncodeAndSend() ACM encoding failed"); return 0xFFFFFFFF; } ......}int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) { InputData input_data; CriticalSectionScoped lock(acm_crit_sect_.get()); //編碼之前的處理 ,根據(jù)需求重采樣 并將數(shù)據(jù)封裝在InputData中 int r = Add10MsDataInternal(audio_frame, &input_data); //開始編碼 return r < 0 ? r : Encode(input_data);}int32_t AudioCodingModuleImpl::Encode(const InputData& input_data){...... //從CodecManager獲取當(dāng)前正在使用的編碼器 AudioEncoder* audio_encoder = codec_manager_.CurrentEncoder();...... //開始編碼 encode_buffer_.SetSize(audio_encoder->MaxEncodedBytes()); encoded_info = audio_encoder->Encode( rtp_timestamp, input_data.audio, input_data.length_per_channel, encode_buffer_.size(), encode_buffer_.data()); encode_buffer_.SetSize(encoded_info.encoded_bytes);...... { CriticalSectionScoped lock(callback_crit_sect_.get()); if (packetization_callback_) { //觸發(fā)發(fā)送,packetization_callback_由Channel繼承AudioPacketizationCallback實(shí)現(xiàn)。 //Channel在Init()時(shí)調(diào)用,audio_coding_->RegisterTransportCallback(this)完成注冊! packetization_callback_->SendData( frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp, encode_buffer_.data(), encode_buffer_.size(), my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation : nullptr); } if (vad_callback_) { // 靜音檢測回調(diào) vad_callback_->InFrameType(frame_type); } }}

打包

音頻數(shù)據(jù)在編碼之后會(huì)通過Channel實(shí)現(xiàn)的AudioPacketizationCallback.SendData觸發(fā)數(shù)據(jù)打包發(fā)送流程。 SendData實(shí)現(xiàn)如下:

/* android/webrtc/src/webrtc/voice_engine/channel.cc*/int32_tChannel::SendData(FrameType frameType, uint8_t payloadType, uint32_t timeStamp, const uint8_t* payloadData, size_t payloadSize, const RTPFragmentationHeader* fragmentation){ ...... //RTP打包和發(fā)送 if (_rtpRtcpModule->SendOutgoingData((FrameType&)frameType, payloadType, timeStamp, // Leaving the time when this frame was // received from the capture device as // undefined for voice for now. -1, payloadData, payloadSize, fragmentation) == -1) { _engineStatisticsPtr->SetLastError( VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, "Channel::SendData() failed to send data to RTP/RTCP module"); return -1; }......}/* android/webrtc/src/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc*///最終由RTPSender實(shí)現(xiàn)RTP打包和發(fā)送int32_t ModuleRtpRtcpImpl::SendOutgoingData( FrameType frame_type, int8_t payload_type, uint32_t time_stamp, int64_t capture_time_ms, const uint8_t* payload_data, size_t payload_size, const RTPFragmentationHeader* fragmentation, const RTPVideoHeader* rtp_video_hdr) { rtcp_sender_.SetLastRtpTime(time_stamp, capture_time_ms); if (rtcp_sender_.TimeToSendRTCPReport(kVideoFrameKey == frame_type)) { rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); } return rtp_sender_.SendOutgoingData( frame_type, payload_type, time_stamp, capture_time_ms, payload_data, payload_size, fragmentation, rtp_video_hdr);}/*android/webrtc/src/webrtc/modules/rtp_rtcp/source/rtp_sender.cc*/int32_t RTPSender::SendOutgoingData(FrameType frame_type, int8_t payload_type, uint32_t capture_timestamp, int64_t capture_time_ms, const uint8_t* payload_data, size_t payload_size, const RTPFragmentationHeader* fragmentation, const RTPVideoHeader* rtp_hdr) {......//確定傳輸?shù)氖且纛l還是視頻 if (CheckPayloadType(payload_type, &video_type) != 0) { LOG(LS_ERROR) << "Don't send data with unknown payload type."; return -1; }//若為音頻audio_ 為RTPSenderAudio在android/webrtc/src/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc中實(shí)現(xiàn) ret_val = audio_->SendAudio(frame_type, payload_type, capture_timestamp, payload_data, payload_size, fragmentation);//若為視頻 ret_val = video_->SendVideo(video_type, frame_type, payload_type, capture_timestamp, capture_time_ms, payload_data, payload_size, fragmentation, rtp_hdr);}/*android/webrtc/src/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc*/int32_t RTPSenderAudio::SendAudio( const FrameType frameType, const int8_t payloadType, const uint32_t captureTimeStamp, const uint8_t* payloadData, const size_t dataSize, const RTPFragmentationHeader* fragmentation) { ...... //根據(jù)協(xié)議打包編碼后的音頻數(shù)據(jù),整個(gè)流程較復(fù)雜這里不做分析,可以參考源代碼做深入的了解 ...... //發(fā)送 return _rtpSender->SendToNetwork(dataBuffer, payloadSize, rtpHeaderLength, -1, kAllowRetransmission, RtpPacketSender::kHighPriority); }

發(fā)送

上面流程可以了解到,RTP打包完成之后由RTPSender完成發(fā)送流程,如下:

int32_t RTPSender::SendToNetwork(uint8_t* buffer, size_t payload_length, size_t rtp_header_length, int64_t capture_time_ms, StorageType storage, RtpPacketSender::Priority priority){ ...... //進(jìn)行一些時(shí)間上的處理和重發(fā)機(jī)制處理后直接發(fā)送數(shù)據(jù) bool sent = SendPacketToNetwork(buffer, length);..... //更新統(tǒng)計(jì)狀態(tài) UpdateRtpStats(buffer, length, rtp_header, false, false); ...... } bool RTPSender::SendPacketToNetwork(const uint8_t *packet, size_t size) { int bytes_sent = -1; if (transport_) { bytes_sent = //此處的transport_實(shí)際為Channel,Channel繼承自Transport /* 在Channel構(gòu)造函數(shù)中 Channel::Channel(int32_t channelId, uint32_t instanceId, RtcEventLog* const event_log, const Config& config){ RtpRtcp::Configuration configuration; configuration.audio = true; configuration.outgoing_transport = this; //設(shè)置Transport configuration.audio_messages = this; configuration.receive_statistics = rtp_receive_statistics_.get(); configuration.bandwidth_callback = rtcp_observer_.get(); _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration)); } //在ModuleRtpRtcpImpl構(gòu)造方法中會(huì)將參數(shù)傳入RTPSender ModuleRtpRtcpImpl::ModuleRtpRtcpImpl(const Configuration& configuration) : rtp_sender_(configuration.audio, configuration.clock, configuration.outgoing_transport, configuration.audio_messages, configuration.paced_sender, configuration.transport_sequence_number_allocator, configuration.transport_feedback_callback, configuration.send_bitrate_observer, configuration.send_frame_count_observer, configuration.send_side_delay_observer), rtcp_sender_(configuration.audio, configuration.clock, configuration.receive_statistics, configuration.rtcp_packet_type_counter_observer), rtcp_receiver_(configuration.clock, configuration.receiver_only, configuration.rtcp_packet_type_counter_observer, configuration.bandwidth_callback, configuration.intra_frame_callback, configuration.transport_feedback_callback, this)......) */ transport_->SendRtp(packet, size) ? static_cast<int>(size) : -1; }...... return true;}

通過上面的分析發(fā)現(xiàn)最終的發(fā)送流程在Channel中由SendRtp實(shí)現(xiàn):

boolChannel::SendRtp(const uint8_t *data, size_t len){...... //此處的 _transportPtr 由int32_t Channel::RegisterExternalTransport(Transport& transport)注冊完成 //聯(lián)系之前分析的創(chuàng)建Channel的流程可以發(fā)現(xiàn),在webrtcvoiceengine.cc中 // WebRtcVoiceMediaChannel構(gòu)造函數(shù)中調(diào)用了ConfigureSendChannel(voe_channel()) /* void WebRtcVoiceMediaChannel::ConfigureSendChannel(int channel) { //在VoENetworkImpl中通過ChannelOwner獲取Channel注冊Transport if (engine()->voe()->network()->RegisterExternalTransport( channel, *this) == -1) { LOG_RTCERR2(RegisterExternalTransport, channel, this); } // Enable RTCP (for quality stats and feedback messages) EnableRtcp(channel); // Reset all recv codecs; they will be enabled via SetRecvCodecs. ResetRecvCodecs(channel); // Set RTP header extension for the new channel. SetChannelSendRtpHeaderExtensions(channel, send_extensions_); } */ if (!_transportPtr->SendRtp(bufferToSendPtr, bufferLength)) { std::string transport_name = _externalTransport ? "external transport" : "WebRtc sockets"; WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::SendPacket() RTP transmission using %s failed", transport_name.c_str()); return false; }......}

通過上面的分析可以發(fā)現(xiàn),Channel中注冊的Transport實(shí)際上是WebRtcVoiceMediaChannel

/*android/webrtc/src/talk/media/webrtc/webrtcvoiceengine.h*/class WebRtcVoiceMediaChannel : public VoiceMediaChannel, public webrtc::Transport { ...... // implements Transport interface bool SendRtp(const uint8_t* data, size_t len) override { rtc::Buffer packet(reinterpret_cast<const uint8_t*>(data), len, kMaxRtpPacketLen); return VoiceMediaChannel::SendPacket(&packet); } ......}/*android/webrtc/src/talk/media/base/mediachannel.h*/class VoiceMediaChannel : public MediaChannel {...... // Base method to send packet using NetworkInterface. bool SendPacket(rtc::Buffer* packet) { return DoSendPacket(packet, false); } bool SendRtcp(rtc::Buffer* packet) { return DoSendPacket(packet, true); } // Sets the abstract interface class for sending RTP/RTCP data. virtual void SetInterface(NetworkInterface *iface) { rtc::CritScope cs(&network_interface_crit_); network_interface_ = iface; } private: bool DoSendPacket(rtc::Buffer* packet, bool rtcp) { rtc::CritScope cs(&network_interface_crit_); if (!network_interface_) return false; //network_interface_通過SetInterface設(shè)置, //是由android/webrtc/src/talk/session/media/channel.h實(shí)現(xiàn) 在BaseChannel::Init()調(diào)用SetInterface完成注冊 return (!rtcp) ? network_interface_->SendPacket(packet) : network_interface_->SendRtcp(packet); }......}/*android/webrtc/src/talk/media/base/channel.h*/class BaseChannel : public rtc::MessageHandler, public sigslot::has_slots<>, public MediaChannel::NetworkInterface, public ConnectionStatsGetter { }/*android/webrtc/src/talk/media/base/channel.cc*/ bool BaseChannel::Init() {...... //為BaseChannel設(shè)置TransportChannel if (!SetTransport(content_name())) { return false; } // Both RTP and RTCP channels are set, we can call SetInterface on // media channel and it can set network options. media_channel_->SetInterface(this); return true;}bool BaseChannel::SendPacket(rtc::Buffer* packet, rtc::DiffServCodePoint dscp) { return SendPacket(false, packet, dscp);}bool BaseChannel::SendPacket(bool rtcp, rtc::Buffer* packet, rtc::DiffServCodePoint dscp){ ...... // 獲取傳輸數(shù)據(jù)的TransportChannel,Init()通過調(diào)用SetTransport設(shè)置 TransportChannel* channel = (!rtcp || rtcp_mux_filter_.IsActive()) ? transport_channel_ : rtcp_transport_channel_; if (!channel || !channel->writable()) { return false; } ...... // int ret = channel->SendPacket(packet->data<char>(), packet->size(), options, (secure() && secure_dtls()) ? PF_SRTP_BYPASS : 0); } bool BaseChannel::SetTransport(const std::string& transport_name) { return worker_thread_->Invoke<bool>( Bind(&BaseChannel::SetTransport_w, this, transport_name));//實(shí)際上就是在SetTransport_w線程中調(diào)用SetTransport_w}bool BaseChannel::SetTransport_w(const std::string& transport_name) { ...... //先通過TransportController創(chuàng)建相應(yīng)的 //TransportChannel(TransportChannelImpl繼承TransportChannel,P2PTransportChannel繼承TransportChannelImpl,最終由P2PTransportChannel實(shí)現(xiàn)) set_transport_channel(transport_controller_->CreateTransportChannel_w( transport_name, cricket::ICE_CANDIDATE_COMPONENT_RTP)); if (!transport_channel()) { return false; } ......}void BaseChannel::set_transport_channel(TransportChannel* new_tc) { TransportChannel* old_tc = transport_channel_; if (old_tc) {//先注銷old_tc的事件監(jiān)聽 DisconnectFromTransportChannel(old_tc); //銷毀掉沒用的Channel節(jié)約系統(tǒng)資源 transport_controller_->DestroyTransportChannel_w( transport_name_, cricket::ICE_CANDIDATE_COMPONENT_RTP); } transport_channel_ = new_tc; if (new_tc) {//設(shè)置監(jiān)聽事件 ConnectToTransportChannel(new_tc); for (const auto& pair : socket_options_) { new_tc->SetOption(pair.first, pair.second); } } //告知響應(yīng)的MediaChannel,TransportChannel已經(jīng)設(shè)置完畢 SetReadyToSend(false, new_tc && new_tc->writable());}

P2PTransportChannel的SendPacket設(shè)計(jì)到libjingle p2p的實(shí)現(xiàn),這里做過多的分析。 從以上分析結(jié)合圖一,就能較好理解webRTC整個(gè)音頻框架!

語音接收播放流程

接收

如圖一的黃色箭頭所示,網(wǎng)絡(luò)數(shù)據(jù)從libjingle傳入BaseChannel。

//在VoiceChannel::Init()中調(diào)用BaseChannel::Init() //--->BaseChannel::Init()//--->bool BaseChannel::SetTransport(const std::string& transport_name) //--->bool BaseChannel::SetTransport_w(const std::string& transport_name)//--->void BaseChannel::set_transport_channel(TransportChannel* new_tc)//--->void BaseChannel::ConnectToTransportChannel(TransportChannel* tc)/* 在TransportChannel類中,每接受一個(gè)數(shù)據(jù)包都會(huì)觸發(fā)SignalReadPacket信號 通過信號與曹實(shí)現(xiàn)類間的通信*/void BaseChannel::ConnectToTransportChannel(TransportChannel* tc) { ASSERT(worker_thread_ == rtc::Thread::Current()); tc->SignalWritableState.connect(this, &BaseChannel::OnWritableState); //libjingle每收到一個(gè)數(shù)據(jù)包都會(huì)觸發(fā)BaseChannel::OnChannelRead tc->SignalReadPacket.connect(this, &BaseChannel::OnChannelRead); tc->SignalReadyToSend.connect(this, &BaseChannel::OnReadyToSend);}void BaseChannel::OnChannelRead(TransportChannel* channel, const char* data, size_t len, const rtc::PacketTime& packet_time, int flags) { // OnChannelRead gets called from P2PSocket; now pass data to MediaEngine ASSERT(worker_thread_ == rtc::Thread::Current()); // When using RTCP multiplexing we might get RTCP packets on the RTP // transport. We feed RTP traffic into the demuxer to determine if it is RTCP. bool rtcp = PacketIsRtcp(channel, data, len); rtc::Buffer packet(data, len); HandlePacket(rtcp, &packet, packet_time);}void BaseChannel::HandlePacket(bool rtcp, rtc::Buffer* packet, const rtc::PacketTime& packet_time){ ...... if (!rtcp) { //rtp packet media_channel_->OnPacketReceived(packet, packet_time); } else { // rtcp packet 很顯然這里的media_channel_是WebRtcVoiceMediaChannel media_channel_->OnRtcpReceived(packet, packet_time); } ...... }

解包

/*android/webrtc/src/talk/media/webrtc/webrtcvoiceengine.cc*/void WebRtcVoiceMediaChannel::OnPacketReceived( rtc::Buffer* packet, const rtc::PacketTime& packet_time) { RTC_DCHECK(thread_checker_.CalledOnValidThread()); // Forward packet to Call as well. const webrtc::PacketTime webrtc_packet_time(packet_time.timestamp, packet_time.not_before); //通過PacketReceiver::DeliveryStatus Call::DeliverPacket //--->PacketReceiver::DeliveryStatus Call::DeliverRtp //--->若為音頻則調(diào)用bool AudioReceiveStream::DeliverRtp 估算延時(shí),估算遠(yuǎn)程端的比特率,并更新相關(guān)狀體 //若為視頻則調(diào)用 bool VideoReceiveStream::DeliverRtp call_->Receiver()->DeliverPacket(webrtc::MediaType::AUDIO, reinterpret_cast<const uint8_t*>(packet->data()), packet->size(), webrtc_packet_time); // Pick which channel to send this packet to. If this packet doesn't match // any multiplexed streams, just send it to the default channel. Otherwise, // send it to the specific decoder instance for that stream. int which_channel = GetReceiveChannelNum(ParseSsrc(packet->data(), packet->size(), false)); if (which_channel == -1) { which_channel = voe_channel(); } // Pass it off to the decoder. //開始解包 解碼 engine()->voe()->network()->ReceivedRTPPacket( which_channel, packet->data(), packet->size(), webrtc::PacketTime(packet_time.timestamp, packet_time.not_before));}/*android/webrtc/src/webrtc/audio/audio_receive_stream.cc*/bool AudioReceiveStream::DeliverRtp(const uint8_t* packet, size_t length, const PacketTime& packet_time) { ...... //解析包頭 if (!rtp_header_parser_->Parse(packet, length, &header)) { return false; } ...... //估算延時(shí)和比特率 remote_bitrate_estimator_->IncomingPacket(arrival_time_ms, payload_size, header, false);}/*android/webrtc/src/webrtc/voice_engine/voe_network_impl.cc*/int VoENetworkImpl::ReceivedRTPPacket(int channel, const void* data, size_t length, const PacketTime& packet_time){ ...... //聯(lián)系前面的解析,這里的channelPtr實(shí)際上就是android/webrtc/src/webrtc/voice_engine/Channel.cc中的Channel return channelPtr->ReceivedRTPPacket((const int8_t*)data, length, packet_time);}/*android/webrtc/src/webrtc/voice_engine/Channel.cc*/int32_t Channel::ReceivedRTPPacket(const int8_t* data, size_t length, const PacketTime& packet_time){...... const uint8_t* received_packet = reinterpret_cast<const uint8_t*>(data); RTPHeader header; //解析包頭 if (!rtp_header_parser_->Parse(received_packet, length, &header)) { WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVoice, _channelId, "Incoming packet: invalid RTP header"); return -1; }...... //開始解包和解碼操作 return ReceivePacket(received_packet, length, header, in_order) ? 0 : -1;}bool Channel::ReceivePacket(const uint8_t* packet, size_t packet_length, const RTPHeader& header, bool in_order){ ...... const uint8_t* payload = packet + header.headerLength; ...... //將有效數(shù)據(jù)給解碼器解碼 return rtp_receiver_->IncomingRtpPacket(header, payload, payload_length, payload_specific, in_order);}

解碼

/*android/webrtc/src/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc*/bool RtpReceiverImpl::IncomingRtpPacket( const RTPHeader& rtp_header, const uint8_t* payload, size_t payload_length, PayloadUnion payload_specific, bool in_order){ // Trigger our callbacks. CheckSSRCChanged(rtp_header); ...... //通過回調(diào)將數(shù)據(jù)送給解碼器 int32_t ret_val = rtp_media_receiver_->ParseRtpPacket( &webrtc_rtp_header, payload_specific, is_red, payload, payload_length, clock_->TimeInMilliseconds(), is_first_packet_in_frame); } /*android/webrtc/src/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.cc*/int32_t RTPReceiverAudio::ParseRtpPacket(WebRtcRTPHeader* rtp_header, const PayloadUnion& specific_payload, bool is_red, const uint8_t* payload, size_t payload_length, int64_t timestamp_ms, bool is_first_packet) { ...... return ParseAudioCodecSpecific(rtp_header, payload, payload_length, specific_payload.Audio, is_red); } int32_t RTPReceiverAudio::ParseAudioCodecSpecific( WebRtcRTPHeader* rtp_header, const uint8_t* payload_data, size_t payload_length, const AudioPayload& audio_specific, bool is_red) { //處理DTMF相關(guān) bool telephone_event_packet = TelephoneEventPayloadType(rtp_header->header.payloadType); if (telephone_event_packet) { CriticalSectionScoped lock(crit_sect_.get()); // RFC 4733 2.3 // 0 1 2 3 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // | event |E|R| volume | duration | // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // if (payload_length % 4 != 0) { return -1; } size_t number_of_events = payload_length / 4; // sanity if (number_of_events >= MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS) { number_of_events = MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS; } for (size_t n = 0; n < number_of_events; ++n) { bool end = (payload_data[(4 * n) + 1] & 0x80) ? true : false; std::set<uint8_t>::iterator event = telephone_event_reported_.find(payload_data[4 * n]); if (event != telephone_event_reported_.end()) { // we have already seen this event if (end) { telephone_event_reported_.erase(payload_data[4 * n]); } } else { if (end) { // don't add if it's a end of a tone } else { telephone_event_reported_.insert(payload_data[4 * n]); } } }...... //向解碼器填入數(shù)據(jù) // TODO(holmer): Break this out to have RED parsing handled generically. if (is_red && !(payload_data[0] & 0x80)) { // we recive only one frame packed in a RED packet remove the RED wrapper rtp_header->header.payloadType = payload_data[0]; // only one frame in the RED strip the one byte to help NetEq return data_callback_->OnReceivedPayloadData( payload_data + 1, payload_length - 1, rtp_header); } rtp_header->type.Audio.channel = audio_specific.channels; return data_callback_->OnReceivedPayloadData( payload_data, payload_length, rtp_header); //上面的data_callback_為RtpData類型,由Channel實(shí)現(xiàn)}/*android/webrtc/src/webrtc/voice_engine/Channel.cc*/int32_tChannel::OnReceivedPayloadData(const uint8_t* payloadData, size_t payloadSize, const WebRtcRTPHeader* rtpHeader){ ...... if (audio_coding_->IncomingPacket(payloadData, payloadSize, *rtpHeader) != 0) { _engineStatisticsPtr->SetLastError( VE_AUDIO_CODING_MODULE_ERROR, kTraceWarning, "Channel::OnReceivedPayloadData() unable to push data to the ACM"); return -1; } ......}/*android/webrtc/src/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc*/int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload, const size_t payload_length, const WebRtcRTPHeader& rtp_header) { return receiver_.InsertPacket(rtp_header, incoming_payload, payload_length);}/*/android/webrtc/src/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc*/int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header, const uint8_t* incoming_payload, size_t length_payload){ ...... //根據(jù)rtp頭從DecoderDatabase管理的解碼器中,選擇合適的解碼器 const Decoder* decoder = RtpHeaderToDecoder(*header, incoming_payload); .....//同步相關(guān)處理 ...... //android/webrtc/src/webrtc/modules/audio_coding/neteq/NetEqImpl.CC //NetEq技術(shù)是GIPS的核心音頻處理技術(shù),后背谷歌收購。詳細(xì)了解可參考NetEq解析 if (neteq_->InsertPacket(rtp_header, incoming_payload, length_payload, receive_timestamp) < 0) { LOG(LERROR) << "AcmReceiver::InsertPacket " << static_cast<int>(header->payloadType) << " Failed to insert packet"; return -1; }}/*android/webrtc/src/webrtc/modules/audio_coding/neteq/neteq_impl.cc*/int NetEqImpl::InsertPacket(const WebRtcRTPHeader& rtp_header, const uint8_t* payload, size_t length_bytes, uint32_t receive_timestamp) {...... int error = InsertPacketInternal(rtp_header, payload, length_bytes, receive_timestamp, false);......}int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header, const uint8_t* payload, size_t length_bytes, uint32_t receive_timestamp, bool is_sync_packet){......//將數(shù)據(jù)封裝在通過PacketList管理的Packet中PacketList packet_list; RTPHeader main_header; { // Convert to Packet. // Create |packet| within this separate scope, since it should not be used // directly once it's been inserted in the packet list. This way, |packet| // is not defined outside of this block. Packet* packet = new Packet; packet->header.markerBit = false; packet->header.payloadType = rtp_header.header.payloadType; packet->header.sequenceNumber = rtp_header.header.sequenceNumber; packet->header.timestamp = rtp_header.header.timestamp; packet->header.ssrc = rtp_header.header.ssrc; packet->header.numCSRCs = 0; packet->payload_length = length_bytes; packet->primary = true; packet->waiting_time = 0; packet->payload = new uint8_t[packet->payload_length]; packet->sync_packet = is_sync_packet; if (!packet->payload) { LOG_F(LS_ERROR) << "Payload pointer is NULL."; } assert(payload); // Already checked above. memcpy(packet->payload, payload, packet->payload_length); // Insert packet in a packet list. packet_list.push_back(packet); // Save main payloads header for later. memcpy(&main_header, &packet->header, sizeof(main_header)); }//處理DTMF相關(guān)事件,將事件放入DtmfEvent隊(duì)列中 PacketList::iterator it = packet_list.begin(); while (it != packet_list.end()) { Packet* current_packet = (*it); assert(current_packet); assert(current_packet->payload); if (decoder_database_->IsDtmf(current_packet->header.payloadType)) { assert(!current_packet->sync_packet); // We had a sanity check for this. DtmfEvent event; int ret = DtmfBuffer::ParseEvent( current_packet->header.timestamp, current_packet->payload, current_packet->payload_length, &event); if (ret != DtmfBuffer::kOK) { PacketBuffer::DeleteAllPackets(&packet_list); return kDtmfParsingError; } if (dtmf_buffer_->InsertEvent(event) != DtmfBuffer::kOK) { PacketBuffer::DeleteAllPackets(&packet_list); return kDtmfInsertError; } // TODO(hlundin): Let the destructor of Packet handle the payload. delete [] current_packet->payload; delete current_packet; it = packet_list.erase(it); } else { ++it; } }...... // Update bandwidth estimate, if the packet is not sync-packet. if (!packet_list.empty() && !packet_list.front()->sync_packet) { // The list can be empty here if we got nothing but DTMF payloads. AudioDecoder* decoder = decoder_database_->GetDecoder(main_header.payloadType); assert(decoder); // Should always get a valid object, since we have // already checked that the payload types are known. //在最終的decoder中好像都沒有實(shí)現(xiàn) decoder->IncomingPacket(packet_list.front()->payload, packet_list.front()->payload_length, packet_list.front()->header.sequenceNumber, packet_list.front()->header.timestamp, receive_timestamp); // 需要解碼的數(shù)據(jù)放入PacketBuffer 列表中 const size_t buffer_length_before_insert = packet_buffer_->NumPacketsInBuffer(); ret = packet_buffer_->InsertPacketList( &packet_list, *decoder_database_, &current_rtp_payload_type_, &current_cng_rtp_payload_type_); if (ret == PacketBuffer::kFlushed) { // Reset DSP timestamp etc. if packet buffer flushed. new_codec_ = true; update_sample_rate_and_channels = true; } else if (ret != PacketBuffer::kOK) { PacketBuffer::DeleteAllPackets(&packet_list); return kOtherError; }......}

通過int NetEqImpl::GetAudio獲取pcm數(shù)據(jù)。

int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio, size_t* samples_per_channel, int* num_channels, NetEqOutputType* type){...... int error = GetAudioInternal(max_length, output_audio, samples_per_channel, num_channels)......}int NetEqImpl::GetAudioInternal(size_t max_length, int16_t* output, size_t* samples_per_channel, int* num_channels){......//解碼 int decode_return_value = Decode(&packet_list, &operation, &length, &speech_type);......}int NetEqImpl::Decode(PacketList* packet_list, Operations* operation, int* decoded_length, AudioDecoder::SpeechType* speech_type){......//獲得當(dāng)前解碼器 AudioDecoder* decoder = decoder_database_->GetActiveDecoder();......//開始解碼 if (*operation == kCodecInternalCng) { RTC_DCHECK(packet_list->empty()); return_value = DecodeCng(decoder, decoded_length, speech_type); } else { return_value = DecodeLoop(packet_list, *operation, decoder, decoded_length, speech_type); }......}

最終通過GetAudio獲取的就是pcm數(shù)據(jù)!

播放

org/webrtc/voiceengine/WebRtcAudioTrack.java 通過AudioTrackThread播放線程不斷從native獲取pcm數(shù)據(jù),并將pcm數(shù)據(jù)送入audiotrack中播放。

nativeGetPlayoutData(WebRtcAudioTrack.java)-->void JNICALL AudioTrackJni::GetPlayoutData(audio_track_jni.cc)-->void AudioTrackJni::OnGetPlayoutData(size_t length)((audio_track_jni.cc))void AudioTrackJni::OnGetPlayoutData(size_t length) { ...... // Pull decoded data (in 16-bit PCM format) from jitter buffer. //獲取數(shù)據(jù) int samples = audio_device_buffer_->RequestPlayoutData(frames_per_buffer_); if (samples <= 0) { ALOGE("AudioDeviceBuffer::RequestPlayoutData failed!"); return; } RTC_DCHECK_EQ(static_cast<size_t>(samples), frames_per_buffer_); // Copy decoded data into common byte buffer to ensure that it can be // written to the Java based audio track. //拷貝到共享內(nèi)存 samples = audio_device_buffer_->GetPlayoutData(direct_buffer_address_);......}int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples){....../**/ if (_ptrCbAudioTransport) { uint32_t res(0); int64_t elapsed_time_ms = -1; int64_t ntp_time_ms = -1; res = _ptrCbAudioTransport->NeedMorePlayData(_playSamples, playBytesPerSample, playChannels, playSampleRate, &_playBuffer[0], nSamplesOut, &elapsed_time_ms, &ntp_time_ms); if (res != 0) { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "NeedMorePlayData() failed"); } }......}

AudioTransport由VoEBaseImpl實(shí)現(xiàn),具體的注冊過程可以參考上面的解析!

/*android/webrtc/src/webrtc/voice_engine/voe_base_impl.cc*/int32_t VoEBaseImpl::NeedMorePlayData(size_t nSamples, size_t nBytesPerSample, uint8_t nChannels, uint32_t samplesPerSec, void* audioSamples, size_t& nSamplesOut, int64_t* elapsed_time_ms, int64_t* ntp_time_ms) { GetPlayoutData(static_cast<int>(samplesPerSec), static_cast<int>(nChannels), nSamples, true, audioSamples, elapsed_time_ms, ntp_time_ms); nSamplesOut = audioFrame_.samples_per_channel_; return 0;}void VoEBaseImpl::GetPlayoutData(int sample_rate, int number_of_channels, size_t number_of_frames, bool feed_data_to_apm, void* audio_data, int64_t* elapsed_time_ms, int64_t* ntp_time_ms){ //獲取數(shù)據(jù) shared_->output_mixer()->MixActiveChannels(); //混音和重采樣處理 // Additional operations on the combined signal shared_->output_mixer()->DoOperationsOnCombinedSignal(feed_data_to_apm); // Retrieve the final output mix (resampled to match the ADM) shared_->output_mixer()->GetMixedAudio(sample_rate, number_of_channels, &audioFrame_); //拷貝pcm數(shù)據(jù) memcpy(audio_data, audioFrame_.data_, sizeof(int16_t) * number_of_frames * number_of_channels); }

shared_->output_mixer()->MixActiveChannels() 通過channel從解碼器中獲取pcm數(shù)據(jù)

/*android/webrtc/src/webrtc/voice_engine/output_mixer.cc*/int32_tOutputMixer::MixActiveChannels(){ return _mixerModule.Process();}/*android/webrtc/src/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc*/int32_t AudioConferenceMixerImpl::Process() {...... UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, &remainingParticipantsAllowedToMix);......}void AudioConferenceMixerImpl::UpdateToMix( AudioFrameList* mixList, AudioFrameList* rampOutList, std::map<int, MixerParticipant*>* mixParticipantList, size_t* maxAudioFrameCounter){ ...... for (MixerParticipantList::const_iterator participant = _participantList.begin(); participant != _participantList.end(); ++participant) { ...... //從MixerParticipan獲取pcm數(shù)據(jù),而MixerParticipant由Channel實(shí)現(xiàn) // if((*participant)->GetAudioFrame(_id, audioFrame) != 0) { WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, "failed to GetAudioFrame() from participant"); _audioFramePool->PushMemory(audioFrame); continue; ...... } } ...... }/*android/webrtc/src/webrtc/voice_engine/channel.cc*/ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame){......//從AudioCodingModule獲取解碼的pcm數(shù)據(jù) if (audio_coding_->PlayoutData10Ms(audioFrame->sample_rate_hz_, audioFrame) == -1) { WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::GetAudioFrame() PlayoutData10Ms() failed!"); // In all likelihood, the audio in this frame is garbage. We return an // error so that the audio mixer module doesn't add it to the mix. As // a result, it won't be played out and the actions skipped here are // irrelevant. return -1; }......}/*android/webrtc/src/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc*/int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz, AudioFrame* audio_frame) { // GetAudio always returns 10 ms, at the requested sample rate. if (receiver_.GetAudio(desired_freq_hz, audio_frame) != 0) { WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, "PlayoutData failed, RecOut Failed"); return -1; } audio_frame->id_ = id_; return 0;}/*android/webrtc/src/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc*/int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame){...... // 結(jié)合之前的解碼分析,可知,這里是從緩沖區(qū)中獲取的壓縮音頻數(shù)據(jù),然后通過decoder解碼后送出! if (neteq_->GetAudio(AudioFrame::kMaxDataSizeSamples, audio_buffer_.get(), &samples_per_channel, &num_channels, &type) != NetEq::kOK) { LOG(LERROR) << "AcmReceiver::GetAudio - NetEq Failed."; return -1; }......}

從上面的分析可以看出,webrtc整個(gè)層次結(jié)構(gòu)非常清晰。結(jié)合圖一,再結(jié)合相關(guān)代碼很容易了解整個(gè)框架!


發(fā)表評論 共有條評論
用戶名: 密碼:
驗(yàn)證碼: 匿名發(fā)表
主站蜘蛛池模板: 温泉县| 娄烦县| 中宁县| 合川市| 新乡市| 星座| 新乡市| 阜城县| 寻甸| 襄汾县| 白银市| 江达县| 苏尼特左旗| 涟水县| 巩义市| 博白县| 凌云县| 睢宁县| 齐齐哈尔市| 夹江县| 从江县| 东平县| 东光县| 房产| 玉林市| 外汇| 涞源县| 项城市| 花莲县| 富阳市| 社会| 连云港市| 长宁县| 罗江县| 红河县| 内江市| 锦屏县| 沈阳市| 文山县| 若羌县| 福建省|