国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 學院 > 開發設計 > 正文

WebRTC音頻

2019-11-09 15:29:30
字體:
來源:轉載
供稿:網友

WebRtc語音整體框架

圖一語音整體框架圖 這里寫圖片描述

如上圖所示,音頻整個處理框架除了ligjingle負責p2p數據的傳輸,主要是VOE(Voice Engine)和Channel適配層

圖二創建數據通信channel時序圖 這里寫圖片描述 上圖是本地端 這里寫圖片描述 的完整過程,VOE由CreateMediaEngine_w開始創建,Channel適配層由SetLocalDescription根據SDP開始創建,下面來分析下這兩個過程

VOE創建過程

/*src/talk/app/webrtc/peerconnectionfactory.cc*/bool PeerConnectionFactory::Initialize() {...... default_allocator_factory_ = PortAllocatorFactory::Create(worker_thread_); ..... cricket::MediaEngineInterface* media_engine = worker_thread_->Invoke<cricket::MediaEngineInterface*>(rtc::Bind( &PeerConnectionFactory::CreateMediaEngine_w, this)); //定義的宏,實際上就是在worker_thread_線程上運行CreateMediaEngine_w ..... channel_manager_.reset( new cricket::ChannelManager(media_engine, worker_thread_)); ......}cricket::MediaEngineInterface* PeerConnectionFactory::CreateMediaEngine_w() { ASSERT(worker_thread_ == rtc::Thread::Current()); return cricket::WebRtcMediaEngineFactory::Create( default_adm_.get(), video_encoder_factory_.get(), video_decoder_factory_.get());}MediaEngineInterface* WebRtcMediaEngineFactory::Create( webrtc::AudioDeviceModule* adm, WebRtcVideoEncoderFactory* encoder_factory, WebRtcVideoDecoderFactory* decoder_factory) { return CreateWebRtcMediaEngine(adm, encoder_factory, decoder_factory);}//CreateWebRtcMediaEngine實際上是WebRtcMediaEngine2,而WebRtcMediaEngine2又是繼承至CompositeMediaEngine//模板類,實現在webrtcmediaengine.ccnamespace cricket {class WebRtcMediaEngine2 : public CompositeMediaEngine<WebRtcVoiceEngine, WebRtcVideoEngine2> { public: WebRtcMediaEngine2(webrtc::AudioDeviceModule* adm, WebRtcVideoEncoderFactory* encoder_factory, WebRtcVideoDecoderFactory* decoder_factory) { voice_.SetAudioDeviceModule(adm); video_.SetExternalDecoderFactory(decoder_factory); video_.SetExternalEncoderFactory(encoder_factory); }};} // namespace crickettemplate<class VOICE, class VIDEO>class CompositeMediaEngine : public MediaEngineInterface { public: virtual ~CompositeMediaEngine() {} virtual bool Init(rtc::Thread* worker_thread) { if (!voice_.Init(worker_thread)) //此處的voice 即為WebRtcVoiceEngine return false; video_.Init(); //video 為WebRtcVideoEngine2 后面再分析 return true; }......}

相關類圖如下: 這里寫圖片描述 圖三VOE引擎類圖

WebRtcVoiceEngine::WebRtcVoiceEngine() : voe_wrapper_(new VoEWrapper()), //底層Voice Engine代理類,與底層相關的上層都調用此類完成 tracing_(new VoETraceWrapper()), //調試相關類 adm_(NULL), log_filter_(SeverityToFilter(kDefaultLogSeverity)), is_dumping_aec_(false) { Construct();}

下面看看構造WebRtcVoiceEngine相關的類和方法:

//VoEWrapper實際上是VoiceEngine--> voice_engine_impl.cc的代理/* webrtcvoe.h */class VoEWrapper { public: VoEWrapper() : engine_(webrtc::VoiceEngine::Create()), PRocessing_(engine_), base_(engine_), codec_(engine_), dtmf_(engine_), hw_(engine_), neteq_(engine_), network_(engine_), rtp_(engine_), sync_(engine_), volume_(engine_) { }/*webrtcvoiceengine.cc*/void WebRtcVoiceEngine::Construct() { ...... //注冊引擎狀態回調函數,將底層錯誤信息告知WebRtcVoiceEngine if (voe_wrapper_->base()->RegisterVoiceEngineObserver(*this) == -1) { LOG_RTCERR0(RegisterVoiceEngineObserver); }.... // Load our audio codec list. ConstructCodecs(); // 根據kCodecPrefs表,音質從高到低,從底層獲取最高音質的codec..... options_ = GetDefaultEngineOptions(); //設置默認的音頻選項,需要回音消除,降噪,自動調節音量,是否需要dump等...}//WebRtcVoiceEngine初始化函數bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) {...... bool res = InitInternal();......}bool WebRtcVoiceEngine::InitInternal() { ...... // 初始化底層AudioDeviceModule 在WebRtc中參數dbm_此處傳入的是NULL. //voe_wrapper_ 是VoiceEngine的代理類在voice_engine_impl.cc 中實現, //而VoiceEngineImpl繼承至VoiceEngine,creat時創建的是VoiceEngineImpl //在voe_base_impl.cc中實現 //并將對象返回給VoEWrapper //此處voe_wrapper_->base()實際上是VoiceEngineImpl對象,下面分析VoiceEngineImpl.Init if (voe_wrapper_->base()->Init(adm_) == -1) { //voe_wrapper_->base() ...... } ......}/*voe_base_impl.cc*/int VoEBaseImpl::Init(AudioDeviceModule* external_adm, AudioProcessing* audioproc) { ...... if (external_adm == nullptr) { //上面已經提到,demo中傳入的是null#if !defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE) return -1;#else // Create the internal ADM implementation. //創建本地的AudioDeviceModuleImpl 對象 //通過AudioRecorder 和AudioTrack實現音頻采集與播放 shared_->set_audio_device(AudioDeviceModuleImpl::Create( VoEId(shared_->instance_id(), -1), shared_->audio_device_layer())); if (shared_->audio_device() == nullptr) { shared_->SetLastError(VE_NO_MEMORY, kTraceCritical, "Init() failed to create the ADM"); return -1; }#endif // WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE } else { // Use the already existing external ADM implementation. shared_->set_audio_device(external_adm); LOG_F(LS_INFO) << "An external ADM implementation will be used in VoiceEngine"; } // Register the ADM to the process thread, which will drive the error // callback mechanism if (shared_->process_thread()) { shared_->process_thread()->RegisterModule(shared_->audio_device()); } bool available = false; // -------------------- // Reinitialize the ADM // 為音頻設備設置監聽器 if (shared_->audio_device()->RegisterEventObserver(this) != 0) { shared_->SetLastError( VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning, "Init() failed to register event observer for the ADM"); } // 為音頻設備注冊AudioTransport的實現,實現音頻數據的傳輸 if (shared_->audio_device()->RegisterAudioCallback(this) != 0) { shared_->SetLastError( VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning, "Init() failed to register audio callback for the ADM"); } // 音頻設備的初始化! if (shared_->audio_device()->Init() != 0) { shared_->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError, "Init() failed to initialize the ADM"); return -1; } ......}AudioDeviceModule* AudioDeviceModuleImpl::Create(const int32_t id, const AudioLayer audioLayer){...... RefCountImpl<AudioDeviceModuleImpl>* audioDevice = new RefCountImpl<AudioDeviceModuleImpl>(id, audioLayer); // 檢查平臺是否支持 if (audioDevice->CheckPlatform() == -1) { delete audioDevice; return NULL; } // 根據不同的平臺選擇不同的實現,Android平臺 是通過JNI的方式( audio_record_jni.cc audio_track_jni.cc), //獲取java層的org/webrtc/voiceengine/WebRtcAudioRecord.java //和org/webrtc/voiceengine/WebRtcAudioTrack.java 實現音頻采集和播放 if (audioDevice->CreatePlatformSpecificObjects() == -1) { delete audioDevice; return NULL; } // 分配共享內存,通過AudioTransportS實現音頻數據的傳遞 if (audioDevice->AttachAudioBuffer() == -1) { delete audioDevice; return NULL; }......}

Channel創建過程

在圖二時序圖中,在SetLocalDescription中會調用CreateChannels創建根據SDP創建會話所需要的Channels.由此開啟了音視頻數據和用戶數據傳輸通道,下面詳細看看音頻channel創建的過程,其他的類似: 這里寫圖片描述 相關類圖如下: 這里寫圖片描述

/* webrtcsession.cc */bool WebRtcSession::CreateChannels(const SessionDescription* desc) { // Creating the media channels and transport proxies. //根據SDP創建VoiceChannel const cricket::ContentInfo* voice = cricket::GetFirstAudioContent(desc); if (voice && !voice->rejected && !voice_channel_) { if (!CreateVoiceChannel(voice)) { LOG(LS_ERROR) << "Failed to create voice channel."; return false; } } //根據SDP創建VideoChannel const cricket::ContentInfo* video = cricket::GetFirstVideoContent(desc); if (video && !video->rejected && !video_channel_) { if (!CreateVideoChannel(video)) { LOG(LS_ERROR) << "Failed to create video channel."; return false; } }////根據SDP創建DataChannel const cricket::ContentInfo* data = cricket::GetFirstDataContent(desc); if (data_channel_type_ != cricket::DCT_NONE && data && !data->rejected && !data_channel_) { if (!CreateDataChannel(data)) { LOG(LS_ERROR) << "Failed to create data channel."; return false; } } ...... return true;}//此處主要分析VoiceChannel的創建過程bool WebRtcSession::CreateVoiceChannel(const cricket::ContentInfo* content) {//channel_manager_為在peerconnectionfactory.cc中Initialize是創建的ChannelManager//media_controller_為WebRtcSession在Initialize時創建的MediaController對象,實際上是Call對象的封裝,為了方便call對象的共享!//transport_controller() 返回TransportController,WebRtcSession基類BaseSession 構造方法中創建的TransportController//WebRtcSession基類BaseSession實現的是與libjingle進行的交互 voice_channel_.reset(channel_manager_->CreateVoiceChannel( media_controller_.get(), transport_controller(), content->name, true, audio_options_)); if (!voice_channel_) { return false; }...... return true;}/* webrtc/src/talk/session/media/channelmanager.cc*/VoiceChannel* ChannelManager::CreateVoiceChannel( webrtc::MediaControllerInterface* media_controller, TransportController* transport_controller, const std::string& content_name, bool rtcp, const AudioOptions& options) { //定義的宏,實際意思是 在worker_thread_中運行ChannelManager::CreateVoiceChannel_w方法! return worker_thread_->Invoke<VoiceChannel*>( Bind(&ChannelManager::CreateVoiceChannel_w, this, media_controller, transport_controller, content_name, rtcp, options));}VoiceChannel* ChannelManager::CreateVoiceChannel_w( webrtc::MediaControllerInterface* media_controller, TransportController* transport_controller, const std::string& content_name, bool rtcp, const AudioOptions& options) {......//此處的media_engine_為在peerconnectionfactory.cc中創建的WebRtcMediaEngine2//最終調用WebRtcVoiceEngine::CreateChannel方法 VoiceMediaChannel* media_channel = media_engine_->CreateChannel(media_controller->call_w(), options); if (!media_channel) return nullptr;//VoiceChannel繼承BaseChannel,從libjingle獲取數據或者是通過libjingle將數據發給遠程端! VoiceChannel* voice_channel = new VoiceChannel(worker_thread_, media_engine_.get(), media_channel, transport_controller, content_name, rtcp); if (!voice_channel->Init()) { delete voice_channel; return nullptr; } voice_channels_.push_back(voice_channel); return voice_channel;}VoiceMediaChannel* WebRtcVoiceEngine::CreateChannel(webrtc::Call* call, const AudioOptions& options) { WebRtcVoiceMediaChannel* ch = new WebRtcVoiceMediaChannel(this, options, call); if (!ch->valid()) { delete ch; return nullptr; } return ch;}WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine* engine, const AudioOptions& options, webrtc::Call* call) : engine_(engine), voe_channel_(engine->CreateMediaVoiceChannel()),//調用WebRtcVoiceEngine::CreateMediaVoiceChannel()方法...... {//將當前WebRtcVoiceMediaChannel注冊給WebRtcVoiceEngine管理放入ChannelList中 engine->RegisterChannel(this);......//為上面創造的新channel注冊WebRtcVoiceMediaChannel.可以認為WebRtcVoiceMediaChannel是橋梁,底層//channel通過注冊的Transport實現數據流的發送和接受! ConfigureSendChannel(voe_channel()); SetOptions(options);}int WebRtcVoiceEngine::CreateVoiceChannel(VoEWrapper* voice_engine_wrapper) {//VoEWrapper為VoiceEngine的封裝,我覺得相當于是VoiceEngine的代理。//而在VoiceEngine的實現voice_engine_impl.cc可以看出,VoiceEngine實際上是VoiceEngineImpl的封裝//voice_engine_wrapper->base()的到的是VoiceEngineImpl對象 return voice_engine_wrapper->base()->CreateChannel(voe_config_);}/* voe_base_impl.cc */int VoEBaseImpl::CreateChannel() { ..... //通過ChannelManager創建Channel對象 voe::ChannelOwner channel_owner = shared_->channel_manager().CreateChannel(); return InitializeChannel(&channel_owner);}/* android/webrtc/src/webrtc/voice_engine/channel_manager.cc*/ChannelOwner ChannelManager::CreateChannel() { return CreateChannelInternal(config_);}ChannelOwner ChannelManager::CreateChannelInternal(const Config& config) { Channel* channel; //新建Channel對象 Channel::CreateChannel(channel, ++last_channel_id_, instance_id_, event_log_.get(), config); ChannelOwner channel_owner(channel); CriticalSectionScoped crit(lock_.get());//ChannelManager對所有新建channel的管理 channels_.push_back(channel_owner);//返回封裝的ChannelOwner return channel_owner;}

語音發送流程

采集

在安卓系統的WebRtc demo中,語音還是通過系統的AudioRecorder.java 類實現采集的。在VoEBaseImpl::Init階段介紹過會為AudioDeviceModule注冊數據傳輸回調函數如下:int VoEBaseImpl::Init(AudioDeviceModule* external_adm, AudioProcessing* audioproc) { ...... // Register the AudioTransport implementation if (shared_->audio_device()->RegisterAudioCallback(this) != 0) { shared_->SetLastError( VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning, "Init() failed to register audio callback for the ADM"); } ...... }int32_t AudioDeviceModuleImpl::RegisterAudioCallback(AudioTransport* audioCallback){ CriticalSectionScoped lock(&_critSectAudioCb); //最終將VoEBaseImpl的實現,注冊到設備的AudioDeviceBuffer中 _audioDeviceBuffer.RegisterAudioCallback(audioCallback); return 0;}所以總的來說音頻數據會如下流程,最終VoEBaseImpl實現的AudioTransport回調獲取數據或者播放數據!nativeDataIsRecorded(org/webrtc/voiceengine/WebRtcAudioRecord.java)--->(audio_record_jni.cc)AudioRecordJni::DataIsRecorded-->OnDataIsRecorded-->AudioDeviceBuffer.DeliverRecordedData--->AudioTransport.RecordedDataIsAvailable--->(voe_base_impl.cc)VoEBaseImpl::RecordedDataIsAvailable

處理

/* voe_base_impl.cc */int32_t VoEBaseImpl::RecordedDataIsAvailable( const void* audioSamples, size_t nSamples, size_t nBytesPerSample, uint8_t nChannels, uint32_t samplesPerSec, uint32_t totalDelayMS, int32_t clockDrift, uint32_t micLevel, bool keyPressed, uint32_t& newMicLevel) { newMicLevel = static_cast<uint32_t>(ProcessRecordedDataWithAPM( nullptr, 0, audioSamples, samplesPerSec, nChannels, nSamples, totalDelayMS, clockDrift, micLevel, keyPressed)); return 0;}//從java層獲取的數據,直接送入ProcessRecordedDataWithAPM處理!int VoEBaseImpl::ProcessRecordedDataWithAPM( const int voe_channels[], int number_of_voe_channels, const void* audio_data, uint32_t sample_rate, uint8_t number_of_channels, size_t number_of_frames, uint32_t audio_delay_milliseconds, int32_t clock_drift, uint32_t volume, bool key_pressed) {......//調節音量 if (volume != 0) { // Scale from ADM to VoE level range if (shared_->audio_device()->MaxMicrophoneVolume(&max_volume) == 0) { if (max_volume) { voe_mic_level = static_cast<uint16_t>( (volume * kMaxVolumeLevel + static_cast<int>(max_volume / 2)) / max_volume); } } // We learned that on certain systems (e.g linux) the voe_mic_level // can be greater than the maxVolumeLevel therefore // we are going to cap the voe_mic_level to the maxVolumeLevel // and change the maxVolume to volume if it turns out that // the voe_mic_level is indeed greater than the maxVolumeLevel. if (voe_mic_level > kMaxVolumeLevel) { voe_mic_level = kMaxVolumeLevel; max_volume = volume; } }//這里對音頻有一系列的處理,比如:錄制到文件,重采樣,回音消除,AGC調節等。。。 shared_->transmit_mixer()->PrepareDemux( audio_data, number_of_frames, number_of_channels, sample_rate, static_cast<uint16_t>(audio_delay_milliseconds), clock_drift, voe_mic_level, key_pressed); // Copy the audio frame to each sending channel and perform // channel-dependent Operations (file mixing, mute, etc.), encode and // packetize+transmit the RTP packet. When |number_of_voe_channels| == 0, // do the operations on all the existing VoE channels; otherwise the // operations will be done on specific channels. if (number_of_voe_channels == 0) { shared_->transmit_mixer()->DemuxAndMix(); shared_->transmit_mixer()->EncodeAndSend(); } else { shared_->transmit_mixer()->DemuxAndMix(voe_channels, number_of_voe_channels); shared_->transmit_mixer()->EncodeAndSend(voe_channels, number_of_voe_channels); }...... } // Return 0 to indicate no change on the volume. return 0;}

編碼

這里寫圖片描述

//shared_->transmit_mixer()->EncodeAndSend//實現數據的編碼,編碼后觸發打包發送void TransmitMixer::EncodeAndSend(const int voe_channels[], int number_of_voe_channels) { for (int i = 0; i < number_of_voe_channels; ++i) { voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]); voe::Channel* channel_ptr = ch.channel(); if (channel_ptr && channel_ptr->Sending())//判斷當前的channel是否處于發送的狀態 channel_ptr->EncodeAndSend(); }}uint32_tChannel::EncodeAndSend(){ ...... //編碼壓縮音頻數據 if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) { WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::EncodeAndSend() ACM encoding failed"); return 0xFFFFFFFF; } ......}int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) { InputData input_data; CriticalSectionScoped lock(acm_crit_sect_.get()); //編碼之前的處理 ,根據需求重采樣 并將數據封裝在InputData中 int r = Add10MsDataInternal(audio_frame, &input_data); //開始編碼 return r < 0 ? r : Encode(input_data);}int32_t AudioCodingModuleImpl::Encode(const InputData& input_data){...... //從CodecManager獲取當前正在使用的編碼器 AudioEncoder* audio_encoder = codec_manager_.CurrentEncoder();...... //開始編碼 encode_buffer_.SetSize(audio_encoder->MaxEncodedBytes()); encoded_info = audio_encoder->Encode( rtp_timestamp, input_data.audio, input_data.length_per_channel, encode_buffer_.size(), encode_buffer_.data()); encode_buffer_.SetSize(encoded_info.encoded_bytes);...... { CriticalSectionScoped lock(callback_crit_sect_.get()); if (packetization_callback_) { //觸發發送,packetization_callback_由Channel繼承AudioPacketizationCallback實現。 //Channel在Init()時調用,audio_coding_->RegisterTransportCallback(this)完成注冊! packetization_callback_->SendData( frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp, encode_buffer_.data(), encode_buffer_.size(), my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation : nullptr); } if (vad_callback_) { // 靜音檢測回調 vad_callback_->InFrameType(frame_type); } }}

打包

音頻數據在編碼之后會通過Channel實現的AudioPacketizationCallback.SendData觸發數據打包發送流程。 SendData實現如下:

/* android/webrtc/src/webrtc/voice_engine/channel.cc*/int32_tChannel::SendData(FrameType frameType, uint8_t payloadType, uint32_t timeStamp, const uint8_t* payloadData, size_t payloadSize, const RTPFragmentationHeader* fragmentation){ ...... //RTP打包和發送 if (_rtpRtcpModule->SendOutgoingData((FrameType&)frameType, payloadType, timeStamp, // Leaving the time when this frame was // received from the capture device as // undefined for voice for now. -1, payloadData, payloadSize, fragmentation) == -1) { _engineStatisticsPtr->SetLastError( VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, "Channel::SendData() failed to send data to RTP/RTCP module"); return -1; }......}/* android/webrtc/src/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc*///最終由RTPSender實現RTP打包和發送int32_t ModuleRtpRtcpImpl::SendOutgoingData( FrameType frame_type, int8_t payload_type, uint32_t time_stamp, int64_t capture_time_ms, const uint8_t* payload_data, size_t payload_size, const RTPFragmentationHeader* fragmentation, const RTPVideoHeader* rtp_video_hdr) { rtcp_sender_.SetLastRtpTime(time_stamp, capture_time_ms); if (rtcp_sender_.TimeToSendRTCPReport(kVideoFrameKey == frame_type)) { rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); } return rtp_sender_.SendOutgoingData( frame_type, payload_type, time_stamp, capture_time_ms, payload_data, payload_size, fragmentation, rtp_video_hdr);}/*android/webrtc/src/webrtc/modules/rtp_rtcp/source/rtp_sender.cc*/int32_t RTPSender::SendOutgoingData(FrameType frame_type, int8_t payload_type, uint32_t capture_timestamp, int64_t capture_time_ms, const uint8_t* payload_data, size_t payload_size, const RTPFragmentationHeader* fragmentation, const RTPVideoHeader* rtp_hdr) {......//確定傳輸的是音頻還是視頻 if (CheckPayloadType(payload_type, &video_type) != 0) { LOG(LS_ERROR) << "Don't send data with unknown payload type."; return -1; }//若為音頻audio_ 為RTPSenderAudio在android/webrtc/src/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc中實現 ret_val = audio_->SendAudio(frame_type, payload_type, capture_timestamp, payload_data, payload_size, fragmentation);//若為視頻 ret_val = video_->SendVideo(video_type, frame_type, payload_type, capture_timestamp, capture_time_ms, payload_data, payload_size, fragmentation, rtp_hdr);}/*android/webrtc/src/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc*/int32_t RTPSenderAudio::SendAudio( const FrameType frameType, const int8_t payloadType, const uint32_t captureTimeStamp, const uint8_t* payloadData, const size_t dataSize, const RTPFragmentationHeader* fragmentation) { ...... //根據協議打包編碼后的音頻數據,整個流程較復雜這里不做分析,可以參考源代碼做深入的了解 ...... //發送 return _rtpSender->SendToNetwork(dataBuffer, payloadSize, rtpHeaderLength, -1, kAllowRetransmission, RtpPacketSender::kHighPriority); }

發送

上面流程可以了解到,RTP打包完成之后由RTPSender完成發送流程,如下:

int32_t RTPSender::SendToNetwork(uint8_t* buffer, size_t payload_length, size_t rtp_header_length, int64_t capture_time_ms, StorageType storage, RtpPacketSender::Priority priority){ ...... //進行一些時間上的處理和重發機制處理后直接發送數據 bool sent = SendPacketToNetwork(buffer, length);..... //更新統計狀態 UpdateRtpStats(buffer, length, rtp_header, false, false); ...... } bool RTPSender::SendPacketToNetwork(const uint8_t *packet, size_t size) { int bytes_sent = -1; if (transport_) { bytes_sent = //此處的transport_實際為Channel,Channel繼承自Transport /* 在Channel構造函數中 Channel::Channel(int32_t channelId, uint32_t instanceId, RtcEventLog* const event_log, const Config& config){ RtpRtcp::Configuration configuration; configuration.audio = true; configuration.outgoing_transport = this; //設置Transport configuration.audio_messages = this; configuration.receive_statistics = rtp_receive_statistics_.get(); configuration.bandwidth_callback = rtcp_observer_.get(); _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration)); } //在ModuleRtpRtcpImpl構造方法中會將參數傳入RTPSender ModuleRtpRtcpImpl::ModuleRtpRtcpImpl(const Configuration& configuration) : rtp_sender_(configuration.audio, configuration.clock, configuration.outgoing_transport, configuration.audio_messages, configuration.paced_sender, configuration.transport_sequence_number_allocator, configuration.transport_feedback_callback, configuration.send_bitrate_observer, configuration.send_frame_count_observer, configuration.send_side_delay_observer), rtcp_sender_(configuration.audio, configuration.clock, configuration.receive_statistics, configuration.rtcp_packet_type_counter_observer), rtcp_receiver_(configuration.clock, configuration.receiver_only, configuration.rtcp_packet_type_counter_observer, configuration.bandwidth_callback, configuration.intra_frame_callback, configuration.transport_feedback_callback, this)......) */ transport_->SendRtp(packet, size) ? static_cast<int>(size) : -1; }...... return true;}

通過上面的分析發現最終的發送流程在Channel中由SendRtp實現:

boolChannel::SendRtp(const uint8_t *data, size_t len){...... //此處的 _transportPtr 由int32_t Channel::RegisterExternalTransport(Transport& transport)注冊完成 //聯系之前分析的創建Channel的流程可以發現,在webrtcvoiceengine.cc中 // WebRtcVoiceMediaChannel構造函數中調用了ConfigureSendChannel(voe_channel()) /* void WebRtcVoiceMediaChannel::ConfigureSendChannel(int channel) { //在VoENetworkImpl中通過ChannelOwner獲取Channel注冊Transport if (engine()->voe()->network()->RegisterExternalTransport( channel, *this) == -1) { LOG_RTCERR2(RegisterExternalTransport, channel, this); } // Enable RTCP (for quality stats and feedback messages) EnableRtcp(channel); // Reset all recv codecs; they will be enabled via SetRecvCodecs. ResetRecvCodecs(channel); // Set RTP header extension for the new channel. SetChannelSendRtpHeaderExtensions(channel, send_extensions_); } */ if (!_transportPtr->SendRtp(bufferToSendPtr, bufferLength)) { std::string transport_name = _externalTransport ? "external transport" : "WebRtc sockets"; WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::SendPacket() RTP transmission using %s failed", transport_name.c_str()); return false; }......}

通過上面的分析可以發現,Channel中注冊的Transport實際上是WebRtcVoiceMediaChannel

/*android/webrtc/src/talk/media/webrtc/webrtcvoiceengine.h*/class WebRtcVoiceMediaChannel : public VoiceMediaChannel, public webrtc::Transport { ...... // implements Transport interface bool SendRtp(const uint8_t* data, size_t len) override { rtc::Buffer packet(reinterpret_cast<const uint8_t*>(data), len, kMaxRtpPacketLen); return VoiceMediaChannel::SendPacket(&packet); } ......}/*android/webrtc/src/talk/media/base/mediachannel.h*/class VoiceMediaChannel : public MediaChannel {...... // Base method to send packet using NetworkInterface. bool SendPacket(rtc::Buffer* packet) { return DoSendPacket(packet, false); } bool SendRtcp(rtc::Buffer* packet) { return DoSendPacket(packet, true); } // Sets the abstract interface class for sending RTP/RTCP data. virtual void SetInterface(NetworkInterface *iface) { rtc::CritScope cs(&network_interface_crit_); network_interface_ = iface; } private: bool DoSendPacket(rtc::Buffer* packet, bool rtcp) { rtc::CritScope cs(&network_interface_crit_); if (!network_interface_) return false; //network_interface_通過SetInterface設置, //是由android/webrtc/src/talk/session/media/channel.h實現 在BaseChannel::Init()調用SetInterface完成注冊 return (!rtcp) ? network_interface_->SendPacket(packet) : network_interface_->SendRtcp(packet); }......}/*android/webrtc/src/talk/media/base/channel.h*/class BaseChannel : public rtc::MessageHandler, public sigslot::has_slots<>, public MediaChannel::NetworkInterface, public ConnectionStatsGetter { }/*android/webrtc/src/talk/media/base/channel.cc*/ bool BaseChannel::Init() {...... //為BaseChannel設置TransportChannel if (!SetTransport(content_name())) { return false; } // Both RTP and RTCP channels are set, we can call SetInterface on // media channel and it can set network options. media_channel_->SetInterface(this); return true;}bool BaseChannel::SendPacket(rtc::Buffer* packet, rtc::DiffServCodePoint dscp) { return SendPacket(false, packet, dscp);}bool BaseChannel::SendPacket(bool rtcp, rtc::Buffer* packet, rtc::DiffServCodePoint dscp){ ...... // 獲取傳輸數據的TransportChannel,Init()通過調用SetTransport設置 TransportChannel* channel = (!rtcp || rtcp_mux_filter_.IsActive()) ? transport_channel_ : rtcp_transport_channel_; if (!channel || !channel->writable()) { return false; } ...... // int ret = channel->SendPacket(packet->data<char>(), packet->size(), options, (secure() && secure_dtls()) ? PF_SRTP_BYPASS : 0); } bool BaseChannel::SetTransport(const std::string& transport_name) { return worker_thread_->Invoke<bool>( Bind(&BaseChannel::SetTransport_w, this, transport_name));//實際上就是在SetTransport_w線程中調用SetTransport_w}bool BaseChannel::SetTransport_w(const std::string& transport_name) { ...... //先通過TransportController創建相應的 //TransportChannel(TransportChannelImpl繼承TransportChannel,P2PTransportChannel繼承TransportChannelImpl,最終由P2PTransportChannel實現) set_transport_channel(transport_controller_->CreateTransportChannel_w( transport_name, cricket::ICE_CANDIDATE_COMPONENT_RTP)); if (!transport_channel()) { return false; } ......}void BaseChannel::set_transport_channel(TransportChannel* new_tc) { TransportChannel* old_tc = transport_channel_; if (old_tc) {//先注銷old_tc的事件監聽 DisconnectFromTransportChannel(old_tc); //銷毀掉沒用的Channel節約系統資源 transport_controller_->DestroyTransportChannel_w( transport_name_, cricket::ICE_CANDIDATE_COMPONENT_RTP); } transport_channel_ = new_tc; if (new_tc) {//設置監聽事件 ConnectToTransportChannel(new_tc); for (const auto& pair : socket_options_) { new_tc->SetOption(pair.first, pair.second); } } //告知響應的MediaChannel,TransportChannel已經設置完畢 SetReadyToSend(false, new_tc && new_tc->writable());}

P2PTransportChannel的SendPacket設計到libjingle p2p的實現,這里做過多的分析。 從以上分析結合圖一,就能較好理解webRTC整個音頻框架!

語音接收播放流程

接收

如圖一的黃色箭頭所示,網絡數據從libjingle傳入BaseChannel。

//在VoiceChannel::Init()中調用BaseChannel::Init() //--->BaseChannel::Init()//--->bool BaseChannel::SetTransport(const std::string& transport_name) //--->bool BaseChannel::SetTransport_w(const std::string& transport_name)//--->void BaseChannel::set_transport_channel(TransportChannel* new_tc)//--->void BaseChannel::ConnectToTransportChannel(TransportChannel* tc)/* 在TransportChannel類中,每接受一個數據包都會觸發SignalReadPacket信號 通過信號與曹實現類間的通信*/void BaseChannel::ConnectToTransportChannel(TransportChannel* tc) { ASSERT(worker_thread_ == rtc::Thread::Current()); tc->SignalWritableState.connect(this, &BaseChannel::OnWritableState); //libjingle每收到一個數據包都會觸發BaseChannel::OnChannelRead tc->SignalReadPacket.connect(this, &BaseChannel::OnChannelRead); tc->SignalReadyToSend.connect(this, &BaseChannel::OnReadyToSend);}void BaseChannel::OnChannelRead(TransportChannel* channel, const char* data, size_t len, const rtc::PacketTime& packet_time, int flags) { // OnChannelRead gets called from P2PSocket; now pass data to MediaEngine ASSERT(worker_thread_ == rtc::Thread::Current()); // When using RTCP multiplexing we might get RTCP packets on the RTP // transport. We feed RTP traffic into the demuxer to determine if it is RTCP. bool rtcp = PacketIsRtcp(channel, data, len); rtc::Buffer packet(data, len); HandlePacket(rtcp, &packet, packet_time);}void BaseChannel::HandlePacket(bool rtcp, rtc::Buffer* packet, const rtc::PacketTime& packet_time){ ...... if (!rtcp) { //rtp packet media_channel_->OnPacketReceived(packet, packet_time); } else { // rtcp packet 很顯然這里的media_channel_是WebRtcVoiceMediaChannel media_channel_->OnRtcpReceived(packet, packet_time); } ...... }

解包

/*android/webrtc/src/talk/media/webrtc/webrtcvoiceengine.cc*/void WebRtcVoiceMediaChannel::OnPacketReceived( rtc::Buffer* packet, const rtc::PacketTime& packet_time) { RTC_DCHECK(thread_checker_.CalledOnValidThread()); // Forward packet to Call as well. const webrtc::PacketTime webrtc_packet_time(packet_time.timestamp, packet_time.not_before); //通過PacketReceiver::DeliveryStatus Call::DeliverPacket //--->PacketReceiver::DeliveryStatus Call::DeliverRtp //--->若為音頻則調用bool AudioReceiveStream::DeliverRtp 估算延時,估算遠程端的比特率,并更新相關狀體 //若為視頻則調用 bool VideoReceiveStream::DeliverRtp call_->Receiver()->DeliverPacket(webrtc::MediaType::AUDIO, reinterpret_cast<const uint8_t*>(packet->data()), packet->size(), webrtc_packet_time); // Pick which channel to send this packet to. If this packet doesn't match // any multiplexed streams, just send it to the default channel. Otherwise, // send it to the specific decoder instance for that stream. int which_channel = GetReceiveChannelNum(ParseSsrc(packet->data(), packet->size(), false)); if (which_channel == -1) { which_channel = voe_channel(); } // Pass it off to the decoder. //開始解包 解碼 engine()->voe()->network()->ReceivedRTPPacket( which_channel, packet->data(), packet->size(), webrtc::PacketTime(packet_time.timestamp, packet_time.not_before));}/*android/webrtc/src/webrtc/audio/audio_receive_stream.cc*/bool AudioReceiveStream::DeliverRtp(const uint8_t* packet, size_t length, const PacketTime& packet_time) { ...... //解析包頭 if (!rtp_header_parser_->Parse(packet, length, &header)) { return false; } ...... //估算延時和比特率 remote_bitrate_estimator_->IncomingPacket(arrival_time_ms, payload_size, header, false);}/*android/webrtc/src/webrtc/voice_engine/voe_network_impl.cc*/int VoENetworkImpl::ReceivedRTPPacket(int channel, const void* data, size_t length, const PacketTime& packet_time){ ...... //聯系前面的解析,這里的channelPtr實際上就是android/webrtc/src/webrtc/voice_engine/Channel.cc中的Channel return channelPtr->ReceivedRTPPacket((const int8_t*)data, length, packet_time);}/*android/webrtc/src/webrtc/voice_engine/Channel.cc*/int32_t Channel::ReceivedRTPPacket(const int8_t* data, size_t length, const PacketTime& packet_time){...... const uint8_t* received_packet = reinterpret_cast<const uint8_t*>(data); RTPHeader header; //解析包頭 if (!rtp_header_parser_->Parse(received_packet, length, &header)) { WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVoice, _channelId, "Incoming packet: invalid RTP header"); return -1; }...... //開始解包和解碼操作 return ReceivePacket(received_packet, length, header, in_order) ? 0 : -1;}bool Channel::ReceivePacket(const uint8_t* packet, size_t packet_length, const RTPHeader& header, bool in_order){ ...... const uint8_t* payload = packet + header.headerLength; ...... //將有效數據給解碼器解碼 return rtp_receiver_->IncomingRtpPacket(header, payload, payload_length, payload_specific, in_order);}

解碼

/*android/webrtc/src/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc*/bool RtpReceiverImpl::IncomingRtpPacket( const RTPHeader& rtp_header, const uint8_t* payload, size_t payload_length, PayloadUnion payload_specific, bool in_order){ // Trigger our callbacks. CheckSSRCChanged(rtp_header); ...... //通過回調將數據送給解碼器 int32_t ret_val = rtp_media_receiver_->ParseRtpPacket( &webrtc_rtp_header, payload_specific, is_red, payload, payload_length, clock_->TimeInMilliseconds(), is_first_packet_in_frame); } /*android/webrtc/src/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.cc*/int32_t RTPReceiverAudio::ParseRtpPacket(WebRtcRTPHeader* rtp_header, const PayloadUnion& specific_payload, bool is_red, const uint8_t* payload, size_t payload_length, int64_t timestamp_ms, bool is_first_packet) { ...... return ParseAudioCodecSpecific(rtp_header, payload, payload_length, specific_payload.Audio, is_red); } int32_t RTPReceiverAudio::ParseAudioCodecSpecific( WebRtcRTPHeader* rtp_header, const uint8_t* payload_data, size_t payload_length, const AudioPayload& audio_specific, bool is_red) { //處理DTMF相關 bool telephone_event_packet = TelephoneEventPayloadType(rtp_header->header.payloadType); if (telephone_event_packet) { CriticalSectionScoped lock(crit_sect_.get()); // RFC 4733 2.3 // 0 1 2 3 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // | event |E|R| volume | duration | // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // if (payload_length % 4 != 0) { return -1; } size_t number_of_events = payload_length / 4; // sanity if (number_of_events >= MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS) { number_of_events = MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS; } for (size_t n = 0; n < number_of_events; ++n) { bool end = (payload_data[(4 * n) + 1] & 0x80) ? true : false; std::set<uint8_t>::iterator event = telephone_event_reported_.find(payload_data[4 * n]); if (event != telephone_event_reported_.end()) { // we have already seen this event if (end) { telephone_event_reported_.erase(payload_data[4 * n]); } } else { if (end) { // don't add if it's a end of a tone } else { telephone_event_reported_.insert(payload_data[4 * n]); } } }...... //向解碼器填入數據 // TODO(holmer): Break this out to have RED parsing handled generically. if (is_red && !(payload_data[0] & 0x80)) { // we recive only one frame packed in a RED packet remove the RED wrapper rtp_header->header.payloadType = payload_data[0]; // only one frame in the RED strip the one byte to help NetEq return data_callback_->OnReceivedPayloadData( payload_data + 1, payload_length - 1, rtp_header); } rtp_header->type.Audio.channel = audio_specific.channels; return data_callback_->OnReceivedPayloadData( payload_data, payload_length, rtp_header); //上面的data_callback_為RtpData類型,由Channel實現}/*android/webrtc/src/webrtc/voice_engine/Channel.cc*/int32_tChannel::OnReceivedPayloadData(const uint8_t* payloadData, size_t payloadSize, const WebRtcRTPHeader* rtpHeader){ ...... if (audio_coding_->IncomingPacket(payloadData, payloadSize, *rtpHeader) != 0) { _engineStatisticsPtr->SetLastError( VE_AUDIO_CODING_MODULE_ERROR, kTraceWarning, "Channel::OnReceivedPayloadData() unable to push data to the ACM"); return -1; } ......}/*android/webrtc/src/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc*/int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload, const size_t payload_length, const WebRtcRTPHeader& rtp_header) { return receiver_.InsertPacket(rtp_header, incoming_payload, payload_length);}/*/android/webrtc/src/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc*/int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header, const uint8_t* incoming_payload, size_t length_payload){ ...... //根據rtp頭從DecoderDatabase管理的解碼器中,選擇合適的解碼器 const Decoder* decoder = RtpHeaderToDecoder(*header, incoming_payload); .....//同步相關處理 ...... //android/webrtc/src/webrtc/modules/audio_coding/neteq/NetEqImpl.CC //NetEq技術是GIPS的核心音頻處理技術,后背谷歌收購。詳細了解可參考NetEq解析 if (neteq_->InsertPacket(rtp_header, incoming_payload, length_payload, receive_timestamp) < 0) { LOG(LERROR) << "AcmReceiver::InsertPacket " << static_cast<int>(header->payloadType) << " Failed to insert packet"; return -1; }}/*android/webrtc/src/webrtc/modules/audio_coding/neteq/neteq_impl.cc*/int NetEqImpl::InsertPacket(const WebRtcRTPHeader& rtp_header, const uint8_t* payload, size_t length_bytes, uint32_t receive_timestamp) {...... int error = InsertPacketInternal(rtp_header, payload, length_bytes, receive_timestamp, false);......}int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header, const uint8_t* payload, size_t length_bytes, uint32_t receive_timestamp, bool is_sync_packet){......//將數據封裝在通過PacketList管理的Packet中PacketList packet_list; RTPHeader main_header; { // Convert to Packet. // Create |packet| within this separate scope, since it should not be used // directly once it's been inserted in the packet list. This way, |packet| // is not defined outside of this block. Packet* packet = new Packet; packet->header.markerBit = false; packet->header.payloadType = rtp_header.header.payloadType; packet->header.sequenceNumber = rtp_header.header.sequenceNumber; packet->header.timestamp = rtp_header.header.timestamp; packet->header.ssrc = rtp_header.header.ssrc; packet->header.numCSRCs = 0; packet->payload_length = length_bytes; packet->primary = true; packet->waiting_time = 0; packet->payload = new uint8_t[packet->payload_length]; packet->sync_packet = is_sync_packet; if (!packet->payload) { LOG_F(LS_ERROR) << "Payload pointer is NULL."; } assert(payload); // Already checked above. memcpy(packet->payload, payload, packet->payload_length); // Insert packet in a packet list. packet_list.push_back(packet); // Save main payloads header for later. memcpy(&main_header, &packet->header, sizeof(main_header)); }//處理DTMF相關事件,將事件放入DtmfEvent隊列中 PacketList::iterator it = packet_list.begin(); while (it != packet_list.end()) { Packet* current_packet = (*it); assert(current_packet); assert(current_packet->payload); if (decoder_database_->IsDtmf(current_packet->header.payloadType)) { assert(!current_packet->sync_packet); // We had a sanity check for this. DtmfEvent event; int ret = DtmfBuffer::ParseEvent( current_packet->header.timestamp, current_packet->payload, current_packet->payload_length, &event); if (ret != DtmfBuffer::kOK) { PacketBuffer::DeleteAllPackets(&packet_list); return kDtmfParsingError; } if (dtmf_buffer_->InsertEvent(event) != DtmfBuffer::kOK) { PacketBuffer::DeleteAllPackets(&packet_list); return kDtmfInsertError; } // TODO(hlundin): Let the destructor of Packet handle the payload. delete [] current_packet->payload; delete current_packet; it = packet_list.erase(it); } else { ++it; } }...... // Update bandwidth estimate, if the packet is not sync-packet. if (!packet_list.empty() && !packet_list.front()->sync_packet) { // The list can be empty here if we got nothing but DTMF payloads. AudioDecoder* decoder = decoder_database_->GetDecoder(main_header.payloadType); assert(decoder); // Should always get a valid object, since we have // already checked that the payload types are known. //在最終的decoder中好像都沒有實現 decoder->IncomingPacket(packet_list.front()->payload, packet_list.front()->payload_length, packet_list.front()->header.sequenceNumber, packet_list.front()->header.timestamp, receive_timestamp); // 需要解碼的數據放入PacketBuffer 列表中 const size_t buffer_length_before_insert = packet_buffer_->NumPacketsInBuffer(); ret = packet_buffer_->InsertPacketList( &packet_list, *decoder_database_, &current_rtp_payload_type_, &current_cng_rtp_payload_type_); if (ret == PacketBuffer::kFlushed) { // Reset DSP timestamp etc. if packet buffer flushed. new_codec_ = true; update_sample_rate_and_channels = true; } else if (ret != PacketBuffer::kOK) { PacketBuffer::DeleteAllPackets(&packet_list); return kOtherError; }......}

通過int NetEqImpl::GetAudio獲取pcm數據。

int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio, size_t* samples_per_channel, int* num_channels, NetEqOutputType* type){...... int error = GetAudioInternal(max_length, output_audio, samples_per_channel, num_channels)......}int NetEqImpl::GetAudioInternal(size_t max_length, int16_t* output, size_t* samples_per_channel, int* num_channels){......//解碼 int decode_return_value = Decode(&packet_list, &operation, &length, &speech_type);......}int NetEqImpl::Decode(PacketList* packet_list, Operations* operation, int* decoded_length, AudioDecoder::SpeechType* speech_type){......//獲得當前解碼器 AudioDecoder* decoder = decoder_database_->GetActiveDecoder();......//開始解碼 if (*operation == kCodecInternalCng) { RTC_DCHECK(packet_list->empty()); return_value = DecodeCng(decoder, decoded_length, speech_type); } else { return_value = DecodeLoop(packet_list, *operation, decoder, decoded_length, speech_type); }......}

最終通過GetAudio獲取的就是pcm數據!

播放

org/webrtc/voiceengine/WebRtcAudioTrack.java 通過AudioTrackThread播放線程不斷從native獲取pcm數據,并將pcm數據送入audiotrack中播放。

nativeGetPlayoutData(WebRtcAudioTrack.java)-->void JNICALL AudioTrackJni::GetPlayoutData(audio_track_jni.cc)-->void AudioTrackJni::OnGetPlayoutData(size_t length)((audio_track_jni.cc))void AudioTrackJni::OnGetPlayoutData(size_t length) { ...... // Pull decoded data (in 16-bit PCM format) from jitter buffer. //獲取數據 int samples = audio_device_buffer_->RequestPlayoutData(frames_per_buffer_); if (samples <= 0) { ALOGE("AudioDeviceBuffer::RequestPlayoutData failed!"); return; } RTC_DCHECK_EQ(static_cast<size_t>(samples), frames_per_buffer_); // Copy decoded data into common byte buffer to ensure that it can be // written to the Java based audio track. //拷貝到共享內存 samples = audio_device_buffer_->GetPlayoutData(direct_buffer_address_);......}int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples){....../**/ if (_ptrCbAudioTransport) { uint32_t res(0); int64_t elapsed_time_ms = -1; int64_t ntp_time_ms = -1; res = _ptrCbAudioTransport->NeedMorePlayData(_playSamples, playBytesPerSample, playChannels, playSampleRate, &_playBuffer[0], nSamplesOut, &elapsed_time_ms, &ntp_time_ms); if (res != 0) { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "NeedMorePlayData() failed"); } }......}

AudioTransport由VoEBaseImpl實現,具體的注冊過程可以參考上面的解析!

/*android/webrtc/src/webrtc/voice_engine/voe_base_impl.cc*/int32_t VoEBaseImpl::NeedMorePlayData(size_t nSamples, size_t nBytesPerSample, uint8_t nChannels, uint32_t samplesPerSec, void* audioSamples, size_t& nSamplesOut, int64_t* elapsed_time_ms, int64_t* ntp_time_ms) { GetPlayoutData(static_cast<int>(samplesPerSec), static_cast<int>(nChannels), nSamples, true, audioSamples, elapsed_time_ms, ntp_time_ms); nSamplesOut = audioFrame_.samples_per_channel_; return 0;}void VoEBaseImpl::GetPlayoutData(int sample_rate, int number_of_channels, size_t number_of_frames, bool feed_data_to_apm, void* audio_data, int64_t* elapsed_time_ms, int64_t* ntp_time_ms){ //獲取數據 shared_->output_mixer()->MixActiveChannels(); //混音和重采樣處理 // Additional operations on the combined signal shared_->output_mixer()->DoOperationsOnCombinedSignal(feed_data_to_apm); // Retrieve the final output mix (resampled to match the ADM) shared_->output_mixer()->GetMixedAudio(sample_rate, number_of_channels, &audioFrame_); //拷貝pcm數據 memcpy(audio_data, audioFrame_.data_, sizeof(int16_t) * number_of_frames * number_of_channels); }

shared_->output_mixer()->MixActiveChannels() 通過channel從解碼器中獲取pcm數據

/*android/webrtc/src/webrtc/voice_engine/output_mixer.cc*/int32_tOutputMixer::MixActiveChannels(){ return _mixerModule.Process();}/*android/webrtc/src/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc*/int32_t AudioConferenceMixerImpl::Process() {...... UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, &remainingParticipantsAllowedToMix);......}void AudioConferenceMixerImpl::UpdateToMix( AudioFrameList* mixList, AudioFrameList* rampOutList, std::map<int, MixerParticipant*>* mixParticipantList, size_t* maxAudioFrameCounter){ ...... for (MixerParticipantList::const_iterator participant = _participantList.begin(); participant != _participantList.end(); ++participant) { ...... //從MixerParticipan獲取pcm數據,而MixerParticipant由Channel實現 // if((*participant)->GetAudioFrame(_id, audioFrame) != 0) { WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, "failed to GetAudioFrame() from participant"); _audioFramePool->PushMemory(audioFrame); continue; ...... } } ...... }/*android/webrtc/src/webrtc/voice_engine/channel.cc*/ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame){......//從AudioCodingModule獲取解碼的pcm數據 if (audio_coding_->PlayoutData10Ms(audioFrame->sample_rate_hz_, audioFrame) == -1) { WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::GetAudioFrame() PlayoutData10Ms() failed!"); // In all likelihood, the audio in this frame is garbage. We return an // error so that the audio mixer module doesn't add it to the mix. As // a result, it won't be played out and the actions skipped here are // irrelevant. return -1; }......}/*android/webrtc/src/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc*/int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz, AudioFrame* audio_frame) { // GetAudio always returns 10 ms, at the requested sample rate. if (receiver_.GetAudio(desired_freq_hz, audio_frame) != 0) { WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, "PlayoutData failed, RecOut Failed"); return -1; } audio_frame->id_ = id_; return 0;}/*android/webrtc/src/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc*/int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame){...... // 結合之前的解碼分析,可知,這里是從緩沖區中獲取的壓縮音頻數據,然后通過decoder解碼后送出! if (neteq_->GetAudio(AudioFrame::kMaxDataSizeSamples, audio_buffer_.get(), &samples_per_channel, &num_channels, &type) != NetEq::kOK) { LOG(LERROR) << "AcmReceiver::GetAudio - NetEq Failed."; return -1; }......}

從上面的分析可以看出,webrtc整個層次結構非常清晰。結合圖一,再結合相關代碼很容易了解整個框架!


發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 黄骅市| 平南县| 赣榆县| 大田县| 洛川县| 庄浪县| 山阴县| 宁乡县| 昭通市| 巴彦淖尔市| 安达市| 玉树县| 新巴尔虎左旗| 梨树县| 青州市| 禹州市| 开远市| 金沙县| 通海县| 汝城县| 沙坪坝区| 永善县| 厦门市| 逊克县| 博爱县| 朝阳区| 仁怀市| 化德县| 大悟县| 新竹市| 呼玛县| 盐山县| 武安市| 民权县| 犍为县| 团风县| 历史| 鄂温| 太康县| 平昌县| 左权县|