Created
March 10, 2017 00:22
-
-
Save mysteryjeans/dfddbf73ab232fd3ef17c51d3b38433d to your computer and use it in GitHub Desktop.
WebRTC AudioDeviceModule implementation for custom source
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
* libjingle | |
* Copyright 2012, Google Inc. | |
* | |
* Redistribution and use in source and binary forms, with or without | |
* modification, are permitted provided that the following conditions are met: | |
* | |
* 1. Redistributions of source code must retain the above copyright notice, | |
* this list of conditions and the following disclaimer. | |
* 2. Redistributions in binary form must reproduce the above copyright notice, | |
* this list of conditions and the following disclaimer in the documentation | |
* and/or other materials provided with the distribution. | |
* 3. The name of the author may not be used to endorse or promote products | |
* derived from this software without specific prior written permission. | |
* | |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | |
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | |
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO | |
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | |
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | |
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | |
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | |
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
*/ | |
#include "audiocapturemodule.h" | |
#include "webrtc/base/common.h" | |
#include "webrtc/base/refcount.h" | |
#include "webrtc/base/thread.h" | |
#include "webrtc/base/timeutils.h" | |
namespace nr_webrtc | |
{ | |
// Audio sample value that is high enough that it doesn't occur naturally when | |
// frames are being faked. E.g. NetEq will not generate this large sample value | |
// unless it has received an audio frame containing a sample of this value. | |
// Even simpler buffers would likely just contain audio sample values of 0. | |
static const int kHighSampleValue = 10000; | |
// Same value as src/modules/audio_device/main/source/audio_device_config.h in | |
// https://code.google.com/p/webrtc/ | |
static const uint32_t kAdmMaxIdleTimeProcess = 1000; | |
// Constants here are derived by running VoE using a real ADM. | |
// The constants correspond to 10ms of mono audio at 8000kHz. | |
static const int kTimePerFrameMs = 10; | |
static const int kNumberOfChannels = 1; | |
static const int kSamplesPerSecond = 8000; | |
static const int kTotalDelayMs = 0; | |
static const int kClockDriftMs = 0; | |
static const uint32_t kMaxVolume = 14392; | |
enum { | |
MSG_RUN_PROCESS, | |
MSG_STOP_PROCESS, | |
}; | |
AudioCaptureModule::AudioCaptureModule() : | |
last_process_time_ms_(0), | |
audio_callback_(NULL), | |
recording_(false), | |
playing_(false), | |
play_is_initialized_(false), | |
rec_is_initialized_(false), | |
current_mic_level_(kMaxVolume), | |
started_(false), | |
next_frame_time_(0), | |
frames_received_(0) { | |
} | |
AudioCaptureModule::~AudioCaptureModule() { | |
} | |
rtc::scoped_refptr<AudioCaptureModule> AudioCaptureModule::Create() { | |
rtc::scoped_refptr<AudioCaptureModule> capture_module( | |
new rtc::RefCountedObject<AudioCaptureModule>()); | |
if (!capture_module->Initialize()) { | |
return NULL; | |
} | |
return capture_module; | |
} | |
int AudioCaptureModule::frames_received() const { | |
return frames_received_; | |
} | |
int32_t AudioCaptureModule::Version(char* /*version*/, | |
uint32_t& /*remaining_buffer_in_bytes*/, | |
uint32_t& /*position*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int64_t AudioCaptureModule::TimeUntilNextProcess() { | |
const uint64_t current_time = rtc::TimeMillis(); | |
if (current_time < last_process_time_ms_) { | |
// TODO: wraparound could be handled more gracefully. | |
return 0; | |
} | |
const uint64_t elapsed_time = current_time - last_process_time_ms_; | |
if (kAdmMaxIdleTimeProcess < elapsed_time) { | |
return 0; | |
} | |
return kAdmMaxIdleTimeProcess - elapsed_time; | |
} | |
void AudioCaptureModule::Process() { | |
last_process_time_ms_ = rtc::TimeMillis(); | |
} | |
int32_t AudioCaptureModule::ActiveAudioLayer( | |
AudioLayer* /*audio_layer*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
webrtc::AudioDeviceModule::ErrorCode AudioCaptureModule::LastError() const { | |
ASSERT(false); | |
return webrtc::AudioDeviceModule::kAdmErrNone; | |
} | |
int32_t AudioCaptureModule::RegisterEventObserver( | |
webrtc::AudioDeviceObserver* /*event_callback*/) { | |
// Only used to report warnings and errors. This fake implementation won't | |
// generate any so discard this callback. | |
return 0; | |
} | |
int32_t AudioCaptureModule::RegisterAudioCallback( | |
webrtc::AudioTransport* audio_callback) { | |
audio_callback_ = audio_callback; | |
return 0; | |
} | |
int32_t AudioCaptureModule::Init() { | |
// Initialize is called by the factory method. Safe to ignore this Init call. | |
return 0; | |
} | |
int32_t AudioCaptureModule::Terminate() { | |
// Clean up in the destructor. No action here, just success. | |
return 0; | |
} | |
bool AudioCaptureModule::Initialized() const { | |
ASSERT(false); | |
return 0; | |
} | |
int16_t AudioCaptureModule::PlayoutDevices() { | |
ASSERT(false); | |
return 0; | |
} | |
int16_t AudioCaptureModule::RecordingDevices() { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::PlayoutDeviceName( | |
uint16_t /*index*/, | |
char /*name*/[webrtc::kAdmMaxDeviceNameSize], | |
char /*guid*/[webrtc::kAdmMaxGuidSize]) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::RecordingDeviceName( | |
uint16_t /*index*/, | |
char /*name*/[webrtc::kAdmMaxDeviceNameSize], | |
char /*guid*/[webrtc::kAdmMaxGuidSize]) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetPlayoutDevice(uint16_t /*index*/) { | |
// No playout device, just playing from file. Return success. | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetPlayoutDevice(WindowsDeviceType /*device*/) { | |
if (play_is_initialized_) { | |
return -1; | |
} | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetRecordingDevice(uint16_t /*index*/) { | |
// No recording device, just dropping audio. Return success. | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetRecordingDevice( | |
WindowsDeviceType /*device*/) { | |
if (rec_is_initialized_) { | |
return -1; | |
} | |
return 0; | |
} | |
int32_t AudioCaptureModule::PlayoutIsAvailable(bool* /*available*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::InitPlayout() { | |
play_is_initialized_ = true; | |
return 0; | |
} | |
bool AudioCaptureModule::PlayoutIsInitialized() const { | |
return play_is_initialized_; | |
} | |
int32_t AudioCaptureModule::RecordingIsAvailable(bool* /*available*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::InitRecording() { | |
rec_is_initialized_ = true; | |
return 0; | |
} | |
bool AudioCaptureModule::RecordingIsInitialized() const { | |
return rec_is_initialized_; | |
} | |
int32_t AudioCaptureModule::StartPlayout() { | |
if (!play_is_initialized_) { | |
return -1; | |
} | |
playing_ = true; | |
//UpdateProcessing(); | |
return 0; | |
} | |
int32_t AudioCaptureModule::StopPlayout() { | |
playing_ = false; | |
//UpdateProcessing(); | |
return 0; | |
} | |
bool AudioCaptureModule::Playing() const { | |
return playing_; | |
} | |
int32_t AudioCaptureModule::StartRecording() { | |
if (!rec_is_initialized_) { | |
return -1; | |
} | |
recording_ = true; | |
//UpdateProcessing(); | |
return 0; | |
} | |
int32_t AudioCaptureModule::StopRecording() { | |
recording_ = false; | |
//UpdateProcessing(); | |
return 0; | |
} | |
bool AudioCaptureModule::Recording() const { | |
return recording_; | |
} | |
int32_t AudioCaptureModule::SetAGC(bool /*enable*/) { | |
// No AGC but not needed since audio is pregenerated. Return success. | |
return 0; | |
} | |
bool AudioCaptureModule::AGC() const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetWaveOutVolume(uint16_t /*volume_left*/, | |
uint16_t /*volume_right*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::WaveOutVolume( | |
uint16_t* /*volume_left*/, | |
uint16_t* /*volume_right*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SpeakerIsAvailable(bool* available) { | |
// No speaker, just dropping audio. Return success. | |
*available = true; | |
return 0; | |
} | |
int32_t AudioCaptureModule::InitSpeaker() { | |
// No speaker, just playing from file. Return success. | |
return 0; | |
} | |
bool AudioCaptureModule::SpeakerIsInitialized() const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::MicrophoneIsAvailable(bool* available) { | |
// No microphone, just playing from file. Return success. | |
*available = true; | |
return 0; | |
} | |
int32_t AudioCaptureModule::InitMicrophone() { | |
// No microphone, just playing from file. Return success. | |
return 0; | |
} | |
bool AudioCaptureModule::MicrophoneIsInitialized() const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SpeakerVolumeIsAvailable(bool* /*available*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetSpeakerVolume(uint32_t /*volume*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SpeakerVolume(uint32_t* /*volume*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::MaxSpeakerVolume( | |
uint32_t* /*max_volume*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::MinSpeakerVolume( | |
uint32_t* /*min_volume*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SpeakerVolumeStepSize( | |
uint16_t* /*step_size*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::MicrophoneVolumeIsAvailable( | |
bool* /*available*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetMicrophoneVolume(uint32_t /*volume*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::MicrophoneVolume(uint32_t* volume) const { | |
*volume = current_mic_level_; | |
return 0; | |
} | |
int32_t AudioCaptureModule::MaxMicrophoneVolume( | |
uint32_t* max_volume) const { | |
*max_volume = kMaxVolume; | |
return 0; | |
} | |
int32_t AudioCaptureModule::MinMicrophoneVolume( | |
uint32_t* /*min_volume*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::MicrophoneVolumeStepSize( | |
uint16_t* /*step_size*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SpeakerMuteIsAvailable(bool* /*available*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetSpeakerMute(bool /*enable*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SpeakerMute(bool* /*enabled*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::MicrophoneMuteIsAvailable(bool* /*available*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetMicrophoneMute(bool /*enable*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::MicrophoneMute(bool* /*enabled*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::MicrophoneBoostIsAvailable( | |
bool* /*available*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetMicrophoneBoost(bool /*enable*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::MicrophoneBoost(bool* /*enabled*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::StereoPlayoutIsAvailable( | |
bool* available) const { | |
// No recording device, just dropping audio. Stereo can be dropped just | |
// as easily as mono. | |
*available = true; | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetStereoPlayout(bool /*enable*/) { | |
// No recording device, just dropping audio. Stereo can be dropped just | |
// as easily as mono. | |
return 0; | |
} | |
int32_t AudioCaptureModule::StereoPlayout(bool* /*enabled*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::StereoRecordingIsAvailable( | |
bool* available) const { | |
// Keep thing simple. No stereo recording. | |
*available = false; | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetStereoRecording(bool enable) { | |
if (!enable) { | |
return 0; | |
} | |
return -1; | |
} | |
int32_t AudioCaptureModule::StereoRecording(bool* /*enabled*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetRecordingChannel( | |
const ChannelType channel) { | |
if (channel != AudioDeviceModule::kChannelBoth) { | |
// There is no right or left in mono. I.e. kChannelBoth should be used for | |
// mono. | |
ASSERT(false); | |
return -1; | |
} | |
return 0; | |
} | |
int32_t AudioCaptureModule::RecordingChannel(ChannelType* channel) const { | |
// Stereo recording not supported. However, WebRTC ADM returns kChannelBoth | |
// in that case. Do the same here. | |
*channel = AudioDeviceModule::kChannelBoth; | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetPlayoutBuffer(const BufferType /*type*/, | |
uint16_t /*size_ms*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::PlayoutBuffer(BufferType* /*type*/, | |
uint16_t* /*size_ms*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::PlayoutDelay(uint16_t* delay_ms) const { | |
// No delay since audio frames are dropped. | |
*delay_ms = 0; | |
return 0; | |
} | |
int32_t AudioCaptureModule::RecordingDelay(uint16_t* /*delay_ms*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::CPULoad(uint16_t* /*load*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::StartRawOutputFileRecording( | |
const char /*pcm_file_name_utf8*/[webrtc::kAdmMaxFileNameSize]) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::StopRawOutputFileRecording() { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::StartRawInputFileRecording( | |
const char /*pcm_file_name_utf8*/[webrtc::kAdmMaxFileNameSize]) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::StopRawInputFileRecording() { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetRecordingSampleRate( | |
const uint32_t /*samples_per_sec*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::RecordingSampleRate( | |
uint32_t* /*samples_per_sec*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetPlayoutSampleRate( | |
const uint32_t /*samples_per_sec*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::PlayoutSampleRate( | |
uint32_t* /*samples_per_sec*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::ResetAudioDevice() { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::SetLoudspeakerStatus(bool /*enable*/) { | |
ASSERT(false); | |
return 0; | |
} | |
int32_t AudioCaptureModule::GetLoudspeakerStatus(bool* /*enabled*/) const { | |
ASSERT(false); | |
return 0; | |
} | |
//void AudioCaptureModule::OnMessage(rtc::Message* msg) { | |
// switch (msg->message_id) { | |
// case MSG_RUN_PROCESS: | |
// ProcessFrameP(); | |
// break; | |
// case MSG_STOP_PROCESS: | |
// StopProcessP(); | |
// break; | |
// default: | |
// // All existing messages should be caught. Getting here should never | |
// // happen. | |
// ASSERT(false); | |
// } | |
//} | |
bool AudioCaptureModule::Initialize() { | |
// Set the send buffer samples high enough that it would not occur on the | |
// remote side unless a packet containing a sample of that magnitude has been | |
// sent to it. Note that the audio processing pipeline will likely distort the | |
// original signal. | |
//SetSendBuffer(kHighSampleValue); | |
last_process_time_ms_ = rtc::TimeMillis(); | |
return true; | |
} | |
void AudioCaptureModule::ResetRecBuffer() { | |
memset(rec_buffer_, 0, sizeof(rec_buffer_)); | |
} | |
bool AudioCaptureModule::CheckRecBuffer(int value) { | |
const Sample* buffer_ptr = reinterpret_cast<const Sample*>(rec_buffer_); | |
const int buffer_size_in_samples = sizeof(rec_buffer_) / | |
kNumberBytesPerSample; | |
for (int i = 0; i < buffer_size_in_samples; ++i) { | |
if (buffer_ptr[i] >= value) return true; | |
} | |
return false; | |
} | |
void AudioCaptureModule::PushFrame(const void* audio_data, | |
int bits_per_sample, | |
int sample_rate, | |
size_t number_of_channels, | |
size_t number_of_frames) | |
{ | |
if (!audio_callback_) return; | |
if (audio_callback_->RecordedDataIsAvailable( | |
audio_data, | |
number_of_frames, | |
bits_per_sample / 8, | |
number_of_channels, | |
sample_rate, 0, | |
kClockDriftMs, current_mic_level_, false, | |
current_mic_level_) != 0) | |
ASSERT(false); | |
} | |
//void AudioCaptureModule::UpdateProcessing() { | |
// const bool process = recording_ || playing_; | |
// if (process) { | |
// if (started_) { | |
// // Already started. | |
// return; | |
// } | |
// process_thread_->Post(rtc::Location(), this, MSG_RUN_PROCESS); | |
// } | |
// else { | |
// process_thread_->Send(rtc::Location(), this, MSG_STOP_PROCESS); | |
// } | |
//} | |
//void AudioCaptureModule::ProcessFrameP() { | |
// ASSERT(rtc::Thread::Current() == process_thread_); | |
// if (!started_) { | |
// next_frame_time_ = rtc::TimeMillis(); | |
// started_ = true; | |
// } | |
// // Receive and send frames every kTimePerFrameMs. | |
// if (audio_callback_ != NULL) { | |
// if (playing_) { | |
// ReceiveFrameP(); | |
// } | |
// if (recording_) { | |
// SendFrameP(); | |
// } | |
// } | |
// next_frame_time_ += kTimePerFrameMs; | |
// const uint64_t current_time = rtc::TimeMillis(); | |
// const uint64_t wait_time = (next_frame_time_ > current_time) ? | |
// next_frame_time_ - current_time : 0; | |
// process_thread_->PostDelayed(rtc::Location(), wait_time, this, MSG_RUN_PROCESS); | |
//} | |
//void AudioCaptureModule::ReceiveFrameP() { | |
// ASSERT(rtc::Thread::Current() == process_thread_); | |
// ResetRecBuffer(); | |
// uint32_t nSamplesOut = 0; | |
// //if (audio_callback_->NeedMorePlayData(kNumberSamples, kNumberBytesPerSample, | |
// // kNumberOfChannels, kSamplesPerSecond, | |
// // rec_buffer_, nSamplesOut,) != 0) { | |
// // ASSERT(false); | |
// //} | |
// //ASSERT(nSamplesOut == kNumberSamples); | |
// //// The SetBuffer() function ensures that after decoding, the audio buffer | |
// //// should contain samples of similar magnitude (there is likely to be some | |
// //// distortion due to the audio pipeline). If one sample is detected to | |
// //// have the same or greater magnitude somewhere in the frame, an actual frame | |
// //// has been received from the remote side (i.e. faked frames are not being | |
// //// pulled). | |
// //if (CheckRecBuffer(kHighSampleValue)) ++frames_received_; | |
//} | |
/*void AudioCaptureModule::SendFrameP() { | |
ASSERT(rtc::Thread::Current() == process_thread_); | |
if (audio_callback_->RecordedDataIsAvailable(send_buffer_, kNumberSamples, | |
kNumberBytesPerSample, | |
kNumberOfChannels, | |
kSamplesPerSecond, kTotalDelayMs, | |
kClockDriftMs, current_mic_level_, false, | |
current_mic_level_) != 0) { | |
ASSERT(false); | |
} | |
} | |
void AudioCaptureModule::StopProcessP() { | |
ASSERT(rtc::Thread::Current() == process_thread_); | |
started_ = false; | |
process_thread_->Clear(this); | |
}*/ | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
* libjingle | |
* Copyright 2012, Google Inc. | |
* | |
* Redistribution and use in source and binary forms, with or without | |
* modification, are permitted provided that the following conditions are met: | |
* | |
* 1. Redistributions of source code must retain the above copyright notice, | |
* this list of conditions and the following disclaimer. | |
* 2. Redistributions in binary form must reproduce the above copyright notice, | |
* this list of conditions and the following disclaimer in the documentation | |
* and/or other materials provided with the distribution. | |
* 3. The name of the author may not be used to endorse or promote products | |
* derived from this software without specific prior written permission. | |
* | |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | |
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | |
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO | |
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | |
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | |
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | |
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | |
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
*/ | |
// This class implements an AudioCaptureModule that can be used to detect if | |
// audio is being received properly if it is fed by another AudioCaptureModule | |
// in some arbitrary audio pipeline where they are connected. It does not play | |
// out or record any audio so it does not need access to any hardware and can | |
// therefore be used in the gtest testing framework. | |
// Note P postfix of a function indicates that it should only be called by the | |
// processing thread. | |
#ifndef WEBRTC_NET_AUDIOCAPTUREMODULE_H_ | |
#define WEBRTC_NET_AUDIOCAPTUREMODULE_H_ | |
#pragma once | |
#include "webrtc/base/basictypes.h" | |
#include "webrtc/base/messagehandler.h" | |
#include "webrtc/base/scoped_ref_ptr.h" | |
#include "webrtc/common_types.h" | |
#include "webrtc/modules/audio_device/include/audio_device.h" | |
namespace rtc | |
{ | |
class Thread; | |
} | |
namespace nr_webrtc | |
{ | |
class AudioCaptureModule : public webrtc::AudioDeviceModule | |
{ | |
public: | |
typedef uint16_t Sample; | |
// The value for the following constants have been derived by running VoE | |
// using a real ADM. The constants correspond to 10ms of mono audio at 80kHz. | |
enum { kNumberSamples = 80 }; | |
enum { kNumberBytesPerSample = sizeof(Sample) }; | |
// Creates a AudioCaptureModule or returns NULL on failure. | |
// |process_thread| is used to push and pull audio frames to and from the | |
// returned instance. Note: ownership of |process_thread| is not handed over. | |
static rtc::scoped_refptr<AudioCaptureModule> Create(); | |
// Returns the number of frames that have been successfully pulled by the | |
// instance. Note that correctly detecting success can only be done if the | |
// pulled frame was generated/pushed from a AudioCaptureModule. | |
int frames_received() const; | |
// Following functions are inherited from webrtc::AudioDeviceModule. | |
// Only functions called by PeerConnection are implemented, the rest do | |
// nothing and return success. If a function is not expected to be called by | |
// PeerConnection an assertion is triggered if it is in fact called. | |
virtual int32_t Version(char* version, | |
uint32_t& remaining_buffer_in_bytes, | |
uint32_t& position) const; | |
virtual int64_t TimeUntilNextProcess(); | |
virtual void Process(); | |
//virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id); | |
virtual int32_t AddRef() const { return 0; } | |
virtual int32_t Release() const { return 0; } | |
virtual int32_t ActiveAudioLayer(AudioLayer* audio_layer) const; | |
virtual ErrorCode LastError() const; | |
virtual int32_t RegisterEventObserver( | |
webrtc::AudioDeviceObserver* event_callback); | |
virtual int32_t RegisterAudioCallback(webrtc::AudioTransport* audio_callback); | |
virtual int32_t Init(); | |
virtual int32_t Terminate(); | |
virtual bool Initialized() const; | |
virtual int16_t PlayoutDevices(); | |
virtual int16_t RecordingDevices(); | |
virtual int32_t PlayoutDeviceName(uint16_t index, | |
char name[webrtc::kAdmMaxDeviceNameSize], | |
char guid[webrtc::kAdmMaxGuidSize]); | |
virtual int32_t RecordingDeviceName(uint16_t index, | |
char name[webrtc::kAdmMaxDeviceNameSize], | |
char guid[webrtc::kAdmMaxGuidSize]); | |
virtual int32_t SetPlayoutDevice(uint16_t index); | |
virtual int32_t SetPlayoutDevice(WindowsDeviceType device); | |
virtual int32_t SetRecordingDevice(uint16_t index); | |
virtual int32_t SetRecordingDevice(WindowsDeviceType device); | |
virtual int32_t PlayoutIsAvailable(bool* available); | |
virtual int32_t InitPlayout(); | |
virtual bool PlayoutIsInitialized() const; | |
virtual int32_t RecordingIsAvailable(bool* available); | |
virtual int32_t InitRecording(); | |
virtual bool RecordingIsInitialized() const; | |
virtual int32_t StartPlayout(); | |
virtual int32_t StopPlayout(); | |
virtual bool Playing() const; | |
virtual int32_t StartRecording(); | |
virtual int32_t StopRecording(); | |
virtual bool Recording() const; | |
virtual int32_t SetAGC(bool enable); | |
virtual bool AGC() const; | |
virtual int32_t SetWaveOutVolume(uint16_t volume_left, | |
uint16_t volume_right); | |
virtual int32_t WaveOutVolume(uint16_t* volume_left, | |
uint16_t* volume_right) const; | |
virtual int32_t SpeakerIsAvailable(bool* available); | |
virtual int32_t InitSpeaker(); | |
virtual bool SpeakerIsInitialized() const; | |
virtual int32_t MicrophoneIsAvailable(bool* available); | |
virtual int32_t InitMicrophone(); | |
virtual bool MicrophoneIsInitialized() const; | |
virtual int32_t SpeakerVolumeIsAvailable(bool* available); | |
virtual int32_t SetSpeakerVolume(uint32_t volume); | |
virtual int32_t SpeakerVolume(uint32_t* volume) const; | |
virtual int32_t MaxSpeakerVolume(uint32_t* max_volume) const; | |
virtual int32_t MinSpeakerVolume(uint32_t* min_volume) const; | |
virtual int32_t SpeakerVolumeStepSize(uint16_t* step_size) const; | |
virtual int32_t MicrophoneVolumeIsAvailable(bool* available); | |
virtual int32_t SetMicrophoneVolume(uint32_t volume); | |
virtual int32_t MicrophoneVolume(uint32_t* volume) const; | |
virtual int32_t MaxMicrophoneVolume(uint32_t* max_volume) const; | |
virtual int32_t MinMicrophoneVolume(uint32_t* min_volume) const; | |
virtual int32_t MicrophoneVolumeStepSize(uint16_t* step_size) const; | |
virtual int32_t SpeakerMuteIsAvailable(bool* available); | |
virtual int32_t SetSpeakerMute(bool enable); | |
virtual int32_t SpeakerMute(bool* enabled) const; | |
virtual int32_t MicrophoneMuteIsAvailable(bool* available); | |
virtual int32_t SetMicrophoneMute(bool enable); | |
virtual int32_t MicrophoneMute(bool* enabled) const; | |
virtual int32_t MicrophoneBoostIsAvailable(bool* available); | |
virtual int32_t SetMicrophoneBoost(bool enable); | |
virtual int32_t MicrophoneBoost(bool* enabled) const; | |
virtual int32_t StereoPlayoutIsAvailable(bool* available) const; | |
virtual int32_t SetStereoPlayout(bool enable); | |
virtual int32_t StereoPlayout(bool* enabled) const; | |
virtual int32_t StereoRecordingIsAvailable(bool* available) const; | |
virtual int32_t SetStereoRecording(bool enable); | |
virtual int32_t StereoRecording(bool* enabled) const; | |
virtual int32_t SetRecordingChannel(const ChannelType channel); | |
virtual int32_t RecordingChannel(ChannelType* channel) const; | |
virtual int32_t SetPlayoutBuffer(const BufferType type, | |
uint16_t size_ms = 0); | |
virtual int32_t PlayoutBuffer(BufferType* type, | |
uint16_t* size_ms) const; | |
virtual int32_t PlayoutDelay(uint16_t* delay_ms) const; | |
virtual int32_t RecordingDelay(uint16_t* delay_ms) const; | |
virtual int32_t CPULoad(uint16_t* load) const; | |
virtual int32_t StartRawOutputFileRecording( | |
const char pcm_file_name_utf8[webrtc::kAdmMaxFileNameSize]); | |
virtual int32_t StopRawOutputFileRecording(); | |
virtual int32_t StartRawInputFileRecording( | |
const char pcm_file_name_utf8[webrtc::kAdmMaxFileNameSize]); | |
virtual int32_t StopRawInputFileRecording(); | |
virtual int32_t SetRecordingSampleRate(const uint32_t samples_per_sec); | |
virtual int32_t RecordingSampleRate(uint32_t* samples_per_sec) const; | |
virtual int32_t SetPlayoutSampleRate(const uint32_t samples_per_sec); | |
virtual int32_t PlayoutSampleRate(uint32_t* samples_per_sec) const; | |
virtual int32_t ResetAudioDevice(); | |
virtual int32_t SetLoudspeakerStatus(bool enable); | |
virtual int32_t GetLoudspeakerStatus(bool* enabled) const; | |
// End of functions inherited from webrtc::AudioDeviceModule. | |
// The following function is inherited from talk_base::MessageHandler. | |
//virtual void OnMessage(rtc::Message* msg); | |
virtual bool BuiltInAECIsAvailable() const { return false; } | |
virtual int32_t EnableBuiltInAEC(bool enable) { return -1; } | |
virtual bool BuiltInAGCIsAvailable() const { return false; } | |
virtual int32_t EnableBuiltInAGC(bool enable) { return -1; } | |
virtual bool BuiltInNSIsAvailable() const { return false; } | |
virtual int32_t EnableBuiltInNS(bool enable) { return -1; } | |
void PushFrame(const void* audio_data, | |
int bits_per_sample, | |
int sample_rate, | |
size_t number_of_channels, | |
size_t number_of_frames); | |
// The constructor is protected because the class needs to be created as a | |
// reference counted object (for memory managment reasons). It could be | |
// exposed in which case the burden of proper instantiation would be put on | |
// the creator of a AudioCaptureModule instance. To create an instance of | |
// this class use the Create(..) API. | |
AudioCaptureModule(); | |
// The destructor is protected because it is reference counted and should not | |
// be deleted directly. | |
virtual ~AudioCaptureModule(); | |
private: | |
// Initializes the state of the AudioCaptureModule. This API is called on | |
// creation by the Create() API. | |
bool Initialize(); | |
// SetBuffer() sets all samples in send_buffer_ to |value|. | |
void SetSendBuffer(int value); | |
// Resets rec_buffer_. I.e., sets all rec_buffer_ samples to 0. | |
void ResetRecBuffer(); | |
// Returns true if rec_buffer_ contains one or more sample greater than or | |
// equal to |value|. | |
bool CheckRecBuffer(int value); | |
//// Starts or stops the pushing and pulling of audio frames depending on if | |
//// recording or playback has been enabled/started. | |
//void UpdateProcessing(); | |
//// Periodcally called function that ensures that frames are pulled and pushed | |
//// periodically if enabled/started. | |
//void ProcessFrameP(); | |
//// Pulls frames from the registered webrtc::AudioTransport. | |
//void ReceiveFrameP(); | |
//// Pushes frames to the registered webrtc::AudioTransport. | |
//void SendFrameP(); | |
//// Stops the periodic calling of ProcessFrame() in a thread safe way. | |
//void StopProcessP(); | |
// The time in milliseconds when Process() was last called or 0 if no call | |
// has been made. | |
uint64_t last_process_time_ms_; | |
// Callback for playout and recording. | |
webrtc::AudioTransport* audio_callback_; | |
bool recording_; // True when audio is being pushed from the instance. | |
bool playing_; // True when audio is being pulled by the instance. | |
bool play_is_initialized_; // True when the instance is ready to pull audio. | |
bool rec_is_initialized_; // True when the instance is ready to push audio. | |
// Input to and output from RecordedDataIsAvailable(..) makes it possible to | |
// modify the current mic level. The implementation does not care about the | |
// mic level so it just feeds back what it receives. | |
uint32_t current_mic_level_; | |
// next_frame_time_ is updated in a non-drifting manner to indicate the next | |
// wall clock time the next frame should be generated and received. started_ | |
// ensures that next_frame_time_ can be initialized properly on first call. | |
bool started_; | |
uint64_t next_frame_time_; | |
//// User provided thread context. | |
//rtc::Thread* process_thread_; | |
// Buffer for storing samples received from the webrtc::AudioTransport. | |
char rec_buffer_[kNumberSamples * kNumberBytesPerSample]; | |
// Counter of frames received that have samples of high enough amplitude to | |
// indicate that the frames are not faked somewhere in the audio pipeline | |
// (e.g. by a jitter buffer). | |
int frames_received_; | |
}; | |
} | |
#endif // WEBRTC_NET_AUDIOCAPTUREMODULE_H_ |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment