summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Chromium Automerger <chromium-automerger@android>2014-09-24 16:39:47 +0000
committerAndroid Chromium Automerger <chromium-automerger@android>2014-09-24 16:39:47 +0000
commit8af00ea2e38cd722cdc20c41427cdab7e165591e (patch)
treeb14d4c5686ee0fc58d848cb6b00960c51595199d
parent81b7993e9ee6acc9e003ee943373c90221ef6f70 (diff)
parent291035ed1d8ec308ffbc81e9cd119e2f53f92f86 (diff)
downloadwebrtc-8af00ea2e38cd722cdc20c41427cdab7e165591e.tar.gz
Merge third_party/webrtc from https://chromium.googlesource.com/external/webrtc/trunk/webrtc.git at 291035ed1d8ec308ffbc81e9cd119e2f53f92f86
This commit was generated by merge_from_chromium.py. Change-Id: I8de5d3b4724dd14ebda167e51683b554ddb5e024
-rw-r--r--base/BUILD.gn1
-rw-r--r--base/asynchttprequest_unittest.cc13
-rw-r--r--base/autodetectproxy_unittest.cc11
-rw-r--r--base/base.gyp1
-rw-r--r--base/base_tests.gyp155
-rw-r--r--base/cpumonitor_unittest.cc3
-rw-r--r--base/criticalsection.h23
-rw-r--r--base/criticalsection_unittest.cc5
-rw-r--r--base/filelock_unittest.cc3
-rw-r--r--base/logging_unittest.cc3
-rw-r--r--base/macsocketserver_unittest.cc54
-rw-r--r--base/messagequeue_unittest.cc3
-rw-r--r--base/nat_unittest.cc9
-rw-r--r--base/nssstreamadapter.cc2
-rw-r--r--base/nullsocketserver_unittest.cc3
-rw-r--r--base/opensslstreamadapter.cc11
-rw-r--r--base/physicalsocketserver_unittest.cc13
-rw-r--r--base/proxy_unittest.cc7
-rw-r--r--base/sharedexclusivelock.h25
-rw-r--r--base/sharedexclusivelock_unittest.cc7
-rw-r--r--base/signalthread.h7
-rw-r--r--base/signalthread_unittest.cc11
-rw-r--r--base/ssladapter_unittest.cc342
-rw-r--r--base/sslstreamadapter.h12
-rw-r--r--base/sslstreamadapter_unittest.cc3
-rw-r--r--base/stream_unittest.cc3
-rw-r--r--base/task_unittest.cc15
-rw-r--r--base/testclient_unittest.cc9
-rw-r--r--base/thread_annotations.h (renamed from system_wrappers/interface/thread_annotations.h)0
-rw-r--r--base/thread_checker_unittest.cc7
-rw-r--r--base/thread_unittest.cc23
-rw-r--r--base/virtualsocket_unittest.cc59
-rw-r--r--build/common.gypi5
-rw-r--r--call.h3
-rw-r--r--config.h14
-rw-r--r--modules/audio_coding/codecs/isac/fix/interface/isacfix.h2
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/isacfix.c25
-rw-r--r--modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc2
-rw-r--r--modules/audio_coding/codecs/isac/fix/test/kenny.cc2
-rw-r--r--modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c9
-rw-r--r--modules/audio_coding/codecs/isac/main/interface/isac.h2
-rw-r--r--modules/audio_coding/codecs/isac/main/source/isac.c41
-rw-r--r--modules/audio_coding/codecs/isac/main/source/isac_unittest.cc2
-rw-r--r--modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc4
-rw-r--r--modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc3
-rw-r--r--modules/audio_coding/codecs/isac/main/test/simpleKenny.c6
-rw-r--r--modules/audio_coding/main/acm2/acm_g722.h2
-rw-r--r--modules/audio_coding/main/acm2/acm_generic_codec.h2
-rw-r--r--modules/audio_coding/main/acm2/acm_isac.cc5
-rw-r--r--modules/audio_coding/main/acm2/acm_isac.h2
-rw-r--r--modules/audio_coding/main/acm2/acm_receive_test.cc160
-rw-r--r--modules/audio_coding/main/acm2/acm_receive_test.h4
-rw-r--r--modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc187
-rw-r--r--modules/audio_coding/main/acm2/acm_receive_test_oldapi.h63
-rw-r--r--modules/audio_coding/main/acm2/acm_receiver.h2
-rw-r--r--modules/audio_coding/main/acm2/acm_receiver_unittest.cc34
-rw-r--r--modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc364
-rw-r--r--modules/audio_coding/main/acm2/acm_send_test.cc20
-rw-r--r--modules/audio_coding/main/acm2/acm_send_test.h6
-rw-r--r--modules/audio_coding/main/acm2/acm_send_test_oldapi.cc145
-rw-r--r--modules/audio_coding/main/acm2/acm_send_test_oldapi.h86
-rw-r--r--modules/audio_coding/main/acm2/audio_coding_module.gypi12
-rw-r--r--modules/audio_coding/main/acm2/audio_coding_module_impl.cc13
-rw-r--r--modules/audio_coding/main/acm2/audio_coding_module_impl.h18
-rw-r--r--modules/audio_coding/main/acm2/audio_coding_module_unittest.cc162
-rw-r--r--modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc938
-rw-r--r--modules/audio_coding/main/interface/audio_coding_module.h27
-rw-r--r--modules/audio_coding/neteq/audio_decoder_unittest.cc27
-rw-r--r--modules/audio_coding/neteq/decision_logic_normal.cc6
-rw-r--r--modules/audio_coding/neteq/neteq_external_decoder_unittest.cc274
-rw-r--r--modules/audio_coding/neteq/neteq_impl.cc11
-rw-r--r--modules/audio_coding/neteq/neteq_impl.h2
-rw-r--r--modules/audio_coding/neteq/test/RTPencode.cc12
-rw-r--r--modules/audio_coding/neteq/test/neteq_isac_quality_test.cc3
-rw-r--r--modules/audio_coding/neteq/tools/rtp_generator.cc14
-rw-r--r--modules/audio_coding/neteq/tools/rtp_generator.h32
-rw-r--r--modules/audio_processing/agc/analog_agc.c145
-rw-r--r--modules/audio_processing/agc/analog_agc.h11
-rw-r--r--modules/audio_processing/agc/digital_agc.c26
-rw-r--r--modules/audio_processing/agc/digital_agc.h8
-rw-r--r--modules/audio_processing/audio_processing.gypi4
-rw-r--r--modules/audio_processing/ns/ns_core.c808
-rw-r--r--modules/audio_processing/ns/ns_core.h141
-rw-r--r--modules/audio_processing/test/audio_processing_unittest.cc39
-rw-r--r--modules/audio_processing/test/process_test.cc7
-rw-r--r--modules/modules.gyp2
-rw-r--r--modules/pacing/include/paced_sender.h2
-rw-r--r--modules/rtp_rtcp/source/rtcp_sender.h2
-rw-r--r--modules/rtp_rtcp/source/rtp_packet_history.h2
-rw-r--r--modules/rtp_rtcp/source/rtp_sender.h2
-rw-r--r--modules/video_capture/BUILD.gn5
-rw-r--r--modules/video_coding/codecs/vp8/vp8_impl.cc8
-rw-r--r--modules/video_coding/main/source/timing.h2
-rw-r--r--modules/video_coding/main/source/video_coding_impl.h2
-rw-r--r--modules/video_render/video_render.gypi18
-rw-r--r--system_wrappers/BUILD.gn1
-rw-r--r--system_wrappers/interface/critical_section_wrapper.h2
-rw-r--r--system_wrappers/interface/rw_lock_wrapper.h2
-rw-r--r--system_wrappers/source/system_wrappers.gyp1
-rw-r--r--test/call_test.cc18
-rw-r--r--test/call_test.h6
-rw-r--r--test/webrtc_test_common.gyp16
-rw-r--r--tools/rtcbot/bot/browser/bot.js91
-rw-r--r--tools/rtcbot/test.js10
-rw-r--r--tools/rtcbot/test/webrtc_video_streaming.js94
-rw-r--r--video/bitrate_estimator_tests.cc20
-rw-r--r--video/call.cc11
-rw-r--r--video/call_perf_tests.cc6
-rw-r--r--video/end_to_end_tests.cc96
-rw-r--r--video/full_stack.cc4
-rw-r--r--video/loopback.cc7
-rw-r--r--video/rampup_tests.cc10
-rw-r--r--video/receive_statistics_proxy.h2
-rw-r--r--video/send_statistics_proxy.h2
-rw-r--r--video/video_send_stream.cc31
-rw-r--r--video/video_send_stream.h7
-rw-r--r--video/video_send_stream_tests.cc130
-rw-r--r--video_engine/vie_capturer.h2
-rw-r--r--video_engine/vie_channel_group.cc2
-rw-r--r--video_engine/vie_encoder.h2
-rw-r--r--video_engine/vie_manager_base.h2
-rw-r--r--video_send_stream.h3
-rw-r--r--voice_engine/channel.cc55
-rw-r--r--voice_engine/channel.h2
-rw-r--r--voice_engine/include/voe_base.h3
125 files changed, 4134 insertions, 1334 deletions
diff --git a/base/BUILD.gn b/base/BUILD.gn
index 003fc1bc..fbed1a52 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -127,6 +127,7 @@ static_library("rtc_base_approved") {
"stringencode.h",
"stringutils.cc",
"stringutils.h",
+ "thread_annotations.h",
"timeutils.cc",
"timeutils.h",
]
diff --git a/base/asynchttprequest_unittest.cc b/base/asynchttprequest_unittest.cc
index 0bfd795b..4748de76 100644
--- a/base/asynchttprequest_unittest.cc
+++ b/base/asynchttprequest_unittest.cc
@@ -14,6 +14,7 @@
#include "webrtc/base/httpserver.h"
#include "webrtc/base/socketstream.h"
#include "webrtc/base/thread.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
namespace rtc {
@@ -126,7 +127,7 @@ class AsyncHttpRequestTest : public testing::Test,
TestHttpServer server_;
};
-TEST_F(AsyncHttpRequestTest, TestGetSuccess) {
+TEST_F(AsyncHttpRequestTest, DISABLED_ON_MAC(TestGetSuccess)) {
AsyncHttpRequest* req = CreateGetRequest(
kServerHostnameAddr.hostname(), server().address().port(),
kServerGetPath);
@@ -143,7 +144,7 @@ TEST_F(AsyncHttpRequestTest, TestGetSuccess) {
req->Release();
}
-TEST_F(AsyncHttpRequestTest, TestGetNotFound) {
+TEST_F(AsyncHttpRequestTest, DISABLED_ON_MAC(TestGetNotFound)) {
AsyncHttpRequest* req = CreateGetRequest(
kServerHostnameAddr.hostname(), server().address().port(),
"/bad");
@@ -157,7 +158,7 @@ TEST_F(AsyncHttpRequestTest, TestGetNotFound) {
req->Release();
}
-TEST_F(AsyncHttpRequestTest, TestGetToNonServer) {
+TEST_F(AsyncHttpRequestTest, DISABLED_ON_MAC(TestGetToNonServer)) {
AsyncHttpRequest* req = CreateGetRequest(
"127.0.0.1", server().address().port(),
kServerGetPath);
@@ -187,7 +188,7 @@ TEST_F(AsyncHttpRequestTest, DISABLED_TestGetToInvalidHostname) {
req->Release();
}
-TEST_F(AsyncHttpRequestTest, TestPostSuccess) {
+TEST_F(AsyncHttpRequestTest, DISABLED_ON_MAC(TestPostSuccess)) {
AsyncHttpRequest* req = CreatePostRequest(
kServerHostnameAddr.hostname(), server().address().port(),
kServerPostPath, "text/plain", new MemoryStream("abcd1234"));
@@ -203,7 +204,7 @@ TEST_F(AsyncHttpRequestTest, TestPostSuccess) {
}
// Ensure that we shut down properly even if work is outstanding.
-TEST_F(AsyncHttpRequestTest, TestCancel) {
+TEST_F(AsyncHttpRequestTest, DISABLED_ON_MAC(TestCancel)) {
AsyncHttpRequest* req = CreateGetRequest(
kServerHostnameAddr.hostname(), server().address().port(),
kServerGetPath);
@@ -211,7 +212,7 @@ TEST_F(AsyncHttpRequestTest, TestCancel) {
req->Destroy(true);
}
-TEST_F(AsyncHttpRequestTest, TestGetSuccessDelay) {
+TEST_F(AsyncHttpRequestTest, DISABLED_ON_MAC(TestGetSuccessDelay)) {
AsyncHttpRequest* req = CreateGetRequest(
kServerHostnameAddr.hostname(), server().address().port(),
kServerGetPath);
diff --git a/base/autodetectproxy_unittest.cc b/base/autodetectproxy_unittest.cc
index 80f220f2..782bc258 100644
--- a/base/autodetectproxy_unittest.cc
+++ b/base/autodetectproxy_unittest.cc
@@ -12,6 +12,7 @@
#include "webrtc/base/gunit.h"
#include "webrtc/base/httpcommon.h"
#include "webrtc/base/httpcommon-inl.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
namespace rtc {
@@ -99,26 +100,26 @@ class AutoDetectProxyTest : public testing::Test, public sigslot::has_slots<> {
bool done_;
};
-TEST_F(AutoDetectProxyTest, TestDetectUnresolvedProxy) {
+TEST_F(AutoDetectProxyTest, DISABLED_ON_MAC(TestDetectUnresolvedProxy)) {
TestCopesWithProxy(rtc::SocketAddress("localhost", 9999));
}
-TEST_F(AutoDetectProxyTest, TestDetectUnresolvableProxy) {
+TEST_F(AutoDetectProxyTest, DISABLED_ON_MAC(TestDetectUnresolvableProxy)) {
TestCopesWithProxy(rtc::SocketAddress("invalid", 9999));
}
-TEST_F(AutoDetectProxyTest, TestDetectIPv6Proxy) {
+TEST_F(AutoDetectProxyTest, DISABLED_ON_MAC(TestDetectIPv6Proxy)) {
TestCopesWithProxy(rtc::SocketAddress("::1", 9999));
}
-TEST_F(AutoDetectProxyTest, TestDetectIPv4Proxy) {
+TEST_F(AutoDetectProxyTest, DISABLED_ON_MAC(TestDetectIPv4Proxy)) {
TestCopesWithProxy(rtc::SocketAddress("127.0.0.1", 9999));
}
// Test that proxy detection completes successfully. (Does not actually verify
// the correct detection result since we don't know what proxy to expect on an
// arbitrary machine.)
-TEST_F(AutoDetectProxyTest, TestProxyDetection) {
+TEST_F(AutoDetectProxyTest, DISABLED_ON_MAC(TestProxyDetection)) {
ASSERT_TRUE(Create(kUserAgent,
kPath,
kHost,
diff --git a/base/base.gyp b/base/base.gyp
index f5fad01f..fa92707c 100644
--- a/base/base.gyp
+++ b/base/base.gyp
@@ -50,6 +50,7 @@
'stringencode.h',
'stringutils.cc',
'stringutils.h',
+ 'thread_annotations.h',
'timeutils.cc',
'timeutils.h',
],
diff --git a/base/base_tests.gyp b/base/base_tests.gyp
index 5ad3d2da..2d99e810 100644
--- a/base/base_tests.gyp
+++ b/base/base_tests.gyp
@@ -47,110 +47,111 @@
'direct_dependent_settings': {
'sources': [
'asynchttprequest_unittest.cc',
- 'atomicops_unittest.cc',
- 'autodetectproxy_unittest.cc',
- 'bandwidthsmoother_unittest.cc',
- 'base64_unittest.cc',
- 'basictypes_unittest.cc',
- 'bind_unittest.cc',
- 'buffer_unittest.cc',
- 'bytebuffer_unittest.cc',
- 'byteorder_unittest.cc',
- 'callback_unittest.cc',
- 'cpumonitor_unittest.cc',
- 'crc32_unittest.cc',
- 'criticalsection_unittest.cc',
- 'event_unittest.cc',
- 'exp_filter_unittest.cc',
- 'filelock_unittest.cc',
- 'fileutils_unittest.cc',
- 'helpers_unittest.cc',
- 'httpbase_unittest.cc',
- 'httpcommon_unittest.cc',
- 'httpserver_unittest.cc',
- 'ipaddress_unittest.cc',
- 'logging_unittest.cc',
- 'md5digest_unittest.cc',
- 'messagedigest_unittest.cc',
- 'messagequeue_unittest.cc',
- 'multipart_unittest.cc',
- 'nat_unittest.cc',
- 'network_unittest.cc',
- 'nullsocketserver_unittest.cc',
- 'optionsfile_unittest.cc',
- 'pathutils_unittest.cc',
- 'physicalsocketserver_unittest.cc',
- 'profiler_unittest.cc',
- 'proxy_unittest.cc',
- 'proxydetect_unittest.cc',
- 'ratelimiter_unittest.cc',
- 'ratetracker_unittest.cc',
- 'referencecountedsingletonfactory_unittest.cc',
- 'rollingaccumulator_unittest.cc',
- 'scopedptrcollection_unittest.cc',
- 'sha1digest_unittest.cc',
- 'sharedexclusivelock_unittest.cc',
- 'signalthread_unittest.cc',
- 'sigslot_unittest.cc',
- 'sigslottester.h',
- 'sigslottester.h.pump',
- 'socket_unittest.cc',
- 'socket_unittest.h',
- 'socketaddress_unittest.cc',
- 'stream_unittest.cc',
- 'stringencode_unittest.cc',
- 'stringutils_unittest.cc',
+ #'atomicops_unittest.cc',
+ #'autodetectproxy_unittest.cc',
+ #'bandwidthsmoother_unittest.cc',
+ #'base64_unittest.cc',
+ #'basictypes_unittest.cc',
+ #'bind_unittest.cc',
+ #'buffer_unittest.cc',
+ #'bytebuffer_unittest.cc',
+ #'byteorder_unittest.cc',
+ #'callback_unittest.cc',
+ #'cpumonitor_unittest.cc',
+ #'crc32_unittest.cc',
+ #'criticalsection_unittest.cc',
+ #'event_unittest.cc',
+ #'exp_filter_unittest.cc',
+ #'filelock_unittest.cc',
+ #'fileutils_unittest.cc',
+ #'helpers_unittest.cc',
+ #'httpbase_unittest.cc',
+ #'httpcommon_unittest.cc',
+ #'httpserver_unittest.cc',
+ #'ipaddress_unittest.cc',
+ #'logging_unittest.cc',
+ #'md5digest_unittest.cc',
+ #'messagedigest_unittest.cc',
+ #'messagequeue_unittest.cc',
+ #'multipart_unittest.cc',
+ #'nat_unittest.cc',
+ #'network_unittest.cc',
+ #'nullsocketserver_unittest.cc',
+ #'optionsfile_unittest.cc',
+ #'pathutils_unittest.cc',
+ #'physicalsocketserver_unittest.cc',
+ #'profiler_unittest.cc',
+ #'proxy_unittest.cc',
+ #'proxydetect_unittest.cc',
+ #'ratelimiter_unittest.cc',
+ #'ratetracker_unittest.cc',
+ #'referencecountedsingletonfactory_unittest.cc',
+ #'rollingaccumulator_unittest.cc',
+ #'scopedptrcollection_unittest.cc',
+ #'sha1digest_unittest.cc',
+ #'sharedexclusivelock_unittest.cc',
+ #'signalthread_unittest.cc',
+ #'sigslot_unittest.cc',
+ #'sigslottester.h',
+ #'sigslottester.h.pump',
+ #'socket_unittest.cc',
+ #'socket_unittest.h',
+ #'socketaddress_unittest.cc',
+ #'stream_unittest.cc',
+ #'stringencode_unittest.cc',
+ #'stringutils_unittest.cc',
# TODO(ronghuawu): Reenable this test.
# 'systeminfo_unittest.cc',
- 'task_unittest.cc',
- 'testclient_unittest.cc',
+ #'task_unittest.cc',
+ #'testclient_unittest.cc',
'thread_checker_unittest.cc',
- 'thread_unittest.cc',
- 'timeutils_unittest.cc',
- 'urlencode_unittest.cc',
- 'versionparsing_unittest.cc',
- 'virtualsocket_unittest.cc',
+ #'thread_unittest.cc',
+ #'timeutils_unittest.cc',
+ #'urlencode_unittest.cc',
+ #'versionparsing_unittest.cc',
+ #'virtualsocket_unittest.cc',
# TODO(ronghuawu): Reenable this test.
# 'windowpicker_unittest.cc',
],
'conditions': [
['OS=="linux"', {
'sources': [
- 'latebindingsymboltable_unittest.cc',
+ #'latebindingsymboltable_unittest.cc',
# TODO(ronghuawu): Reenable this test.
# 'linux_unittest.cc',
- 'linuxfdwalk_unittest.cc',
+ #'linuxfdwalk_unittest.cc',
],
}],
['OS=="win"', {
'sources': [
- 'win32_unittest.cc',
- 'win32regkey_unittest.cc',
- 'win32socketserver_unittest.cc',
- 'win32toolhelp_unittest.cc',
- 'win32window_unittest.cc',
- 'win32windowpicker_unittest.cc',
- 'winfirewall_unittest.cc',
+ #'win32_unittest.cc',
+ #'win32regkey_unittest.cc',
+ #'win32socketserver_unittest.cc',
+ #'win32toolhelp_unittest.cc',
+ #'win32window_unittest.cc',
+ #'win32windowpicker_unittest.cc',
+ #'winfirewall_unittest.cc',
],
'sources!': [
# TODO(ronghuawu): Fix TestUdpReadyToSendIPv6 on windows bot
# then reenable these tests.
- 'physicalsocketserver_unittest.cc',
- 'socket_unittest.cc',
- 'win32socketserver_unittest.cc',
- 'win32windowpicker_unittest.cc',
+ #'physicalsocketserver_unittest.cc',
+ #'socket_unittest.cc',
+ #'win32socketserver_unittest.cc',
+ #'win32windowpicker_unittest.cc',
],
}],
['OS=="mac"', {
'sources': [
- 'macsocketserver_unittest.cc',
- 'macutils_unittest.cc',
+ #'macsocketserver_unittest.cc',
+ #'macutils_unittest.cc',
],
}],
['os_posix==1', {
'sources': [
- 'sslidentity_unittest.cc',
- 'sslstreamadapter_unittest.cc',
+ #'ssladapter_unittest.cc',
+ #'sslidentity_unittest.cc',
+ #'sslstreamadapter_unittest.cc',
],
}],
['OS=="ios" or (OS=="mac" and target_arch!="ia32")', {
diff --git a/base/cpumonitor_unittest.cc b/base/cpumonitor_unittest.cc
index 6d9af5ae..1546f558 100644
--- a/base/cpumonitor_unittest.cc
+++ b/base/cpumonitor_unittest.cc
@@ -23,6 +23,7 @@
#include "webrtc/base/thread.h"
#include "webrtc/base/timeutils.h"
#include "webrtc/base/timing.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
namespace rtc {
@@ -337,7 +338,7 @@ TEST(CpuMonitorTest, TestGetSystemLoadForceFallback) {
#endif
// Tests both process and system functions in use at same time.
-TEST(CpuMonitorTest, TestGetBothLoad) {
+TEST(CpuMonitorTest, DISABLED_ON_MAC(TestGetBothLoad)) {
TestCpuSampler(true, true, false);
}
diff --git a/base/criticalsection.h b/base/criticalsection.h
index a950a47f..a2d9bca0 100644
--- a/base/criticalsection.h
+++ b/base/criticalsection.h
@@ -12,6 +12,7 @@
#define WEBRTC_BASE_CRITICALSECTION_H__
#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/thread_annotations.h"
#if defined(WEBRTC_WIN)
#include "webrtc/base/win32.h"
@@ -34,7 +35,7 @@
namespace rtc {
#if defined(WEBRTC_WIN)
-class CriticalSection {
+class LOCKABLE CriticalSection {
public:
CriticalSection() {
InitializeCriticalSection(&crit_);
@@ -44,18 +45,18 @@ class CriticalSection {
~CriticalSection() {
DeleteCriticalSection(&crit_);
}
- void Enter() {
+ void Enter() EXCLUSIVE_LOCK_FUNCTION() {
EnterCriticalSection(&crit_);
TRACK_OWNER(thread_ = GetCurrentThreadId());
}
- bool TryEnter() {
+ bool TryEnter() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
if (TryEnterCriticalSection(&crit_) != FALSE) {
TRACK_OWNER(thread_ = GetCurrentThreadId());
return true;
}
return false;
}
- void Leave() {
+ void Leave() UNLOCK_FUNCTION() {
TRACK_OWNER(thread_ = 0);
LeaveCriticalSection(&crit_);
}
@@ -71,7 +72,7 @@ class CriticalSection {
#endif // WEBRTC_WIN
#if defined(WEBRTC_POSIX)
-class CriticalSection {
+class LOCKABLE CriticalSection {
public:
CriticalSection() {
pthread_mutexattr_t mutex_attribute;
@@ -84,18 +85,18 @@ class CriticalSection {
~CriticalSection() {
pthread_mutex_destroy(&mutex_);
}
- void Enter() {
+ void Enter() EXCLUSIVE_LOCK_FUNCTION() {
pthread_mutex_lock(&mutex_);
TRACK_OWNER(thread_ = pthread_self());
}
- bool TryEnter() {
+ bool TryEnter() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
if (pthread_mutex_trylock(&mutex_) == 0) {
TRACK_OWNER(thread_ = pthread_self());
return true;
}
return false;
}
- void Leave() {
+ void Leave() UNLOCK_FUNCTION() {
TRACK_OWNER(thread_ = 0);
pthread_mutex_unlock(&mutex_);
}
@@ -111,13 +112,13 @@ class CriticalSection {
#endif // WEBRTC_POSIX
// CritScope, for serializing execution through a scope.
-class CritScope {
+class SCOPED_LOCKABLE CritScope {
public:
- explicit CritScope(CriticalSection *pcrit) {
+ explicit CritScope(CriticalSection *pcrit) EXCLUSIVE_LOCK_FUNCTION(pcrit) {
pcrit_ = pcrit;
pcrit_->Enter();
}
- ~CritScope() {
+ ~CritScope() UNLOCK_FUNCTION() {
pcrit_->Leave();
}
private:
diff --git a/base/criticalsection_unittest.cc b/base/criticalsection_unittest.cc
index e1b05cb0..69f74fa2 100644
--- a/base/criticalsection_unittest.cc
+++ b/base/criticalsection_unittest.cc
@@ -16,6 +16,7 @@
#include "webrtc/base/gunit.h"
#include "webrtc/base/scopedptrcollection.h"
#include "webrtc/base/thread.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
namespace rtc {
@@ -119,7 +120,7 @@ TEST(AtomicOpsTest, Simple) {
EXPECT_EQ(0, value);
}
-TEST(AtomicOpsTest, Increment) {
+TEST(AtomicOpsTest, DISABLED_ON_MAC(Increment)) {
// Create and start lots of threads.
AtomicOpRunner<IncrementOp> runner(0);
ScopedPtrCollection<Thread> threads;
@@ -131,7 +132,7 @@ TEST(AtomicOpsTest, Increment) {
EXPECT_EQ(kOperationsToRun * kNumThreads, runner.value());
}
-TEST(AtomicOpsTest, Decrement) {
+TEST(AtomicOpsTest, DISABLED_ON_MAC(Decrement)) {
// Create and start lots of threads.
AtomicOpRunner<DecrementOp> runner(kOperationsToRun * kNumThreads);
ScopedPtrCollection<Thread> threads;
diff --git a/base/filelock_unittest.cc b/base/filelock_unittest.cc
index eecbf07d..536204ec 100644
--- a/base/filelock_unittest.cc
+++ b/base/filelock_unittest.cc
@@ -17,6 +17,7 @@
#include "webrtc/base/pathutils.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
namespace rtc {
@@ -76,7 +77,7 @@ TEST_F(FileLockTest, TestLockX2) {
EXPECT_TRUE(lock2.get() == NULL);
}
-TEST_F(FileLockTest, TestThreadedLock) {
+TEST_F(FileLockTest, DISABLED_ON_MAC(TestThreadedLock)) {
scoped_ptr<FileLock> lock(FileLock::TryLock(temp_file_.pathname()));
EXPECT_TRUE(lock.get() != NULL);
diff --git a/base/logging_unittest.cc b/base/logging_unittest.cc
index 59630d74..aa438376 100644
--- a/base/logging_unittest.cc
+++ b/base/logging_unittest.cc
@@ -14,6 +14,7 @@
#include "webrtc/base/pathutils.h"
#include "webrtc/base/stream.h"
#include "webrtc/base/thread.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
namespace rtc {
@@ -82,7 +83,7 @@ class LogThread : public Thread {
}
};
-TEST(LogTest, MultipleThreads) {
+TEST(LogTest, DISABLED_ON_MAC(MultipleThreads)) {
int sev = LogMessage::GetLogToStream(NULL);
LogThread thread1, thread2, thread3;
diff --git a/base/macsocketserver_unittest.cc b/base/macsocketserver_unittest.cc
index e98be918..4c0432af 100644
--- a/base/macsocketserver_unittest.cc
+++ b/base/macsocketserver_unittest.cc
@@ -33,7 +33,7 @@ class WakeThread : public Thread {
#ifndef CARBON_DEPRECATED
// Test that MacCFSocketServer::Wait works as expected.
-TEST(MacCFSocketServerTest, TestWait) {
+TEST(MacCFSocketServerTest, DISABLED_TestWait) {
MacCFSocketServer server;
uint32 start = Time();
server.Wait(1000, true);
@@ -41,7 +41,7 @@ TEST(MacCFSocketServerTest, TestWait) {
}
// Test that MacCFSocketServer::Wakeup works as expected.
-TEST(MacCFSocketServerTest, TestWakeup) {
+TEST(MacCFSocketServerTest, DISABLED_TestWakeup) {
MacCFSocketServer server;
WakeThread thread(&server);
uint32 start = Time();
@@ -51,7 +51,7 @@ TEST(MacCFSocketServerTest, TestWakeup) {
}
// Test that MacCarbonSocketServer::Wait works as expected.
-TEST(MacCarbonSocketServerTest, TestWait) {
+TEST(MacCarbonSocketServerTest, DISABLED_TestWait) {
MacCarbonSocketServer server;
uint32 start = Time();
server.Wait(1000, true);
@@ -59,7 +59,7 @@ TEST(MacCarbonSocketServerTest, TestWait) {
}
// Test that MacCarbonSocketServer::Wakeup works as expected.
-TEST(MacCarbonSocketServerTest, TestWakeup) {
+TEST(MacCarbonSocketServerTest, DISABLED_TestWakeup) {
MacCarbonSocketServer server;
WakeThread thread(&server);
uint32 start = Time();
@@ -69,7 +69,7 @@ TEST(MacCarbonSocketServerTest, TestWakeup) {
}
// Test that MacCarbonAppSocketServer::Wait works as expected.
-TEST(MacCarbonAppSocketServerTest, TestWait) {
+TEST(MacCarbonAppSocketServerTest, DISABLED_TestWait) {
MacCarbonAppSocketServer server;
uint32 start = Time();
server.Wait(1000, true);
@@ -77,7 +77,7 @@ TEST(MacCarbonAppSocketServerTest, TestWait) {
}
// Test that MacCarbonAppSocketServer::Wakeup works as expected.
-TEST(MacCarbonAppSocketServerTest, TestWakeup) {
+TEST(MacCarbonAppSocketServerTest, DISABLED_TestWakeup) {
MacCarbonAppSocketServer server;
WakeThread thread(&server);
uint32 start = Time();
@@ -102,19 +102,19 @@ class MacAsyncSocketTest : public SocketTest {
SocketServerScope scope_;
};
-TEST_F(MacAsyncSocketTest, TestConnectIPv4) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestConnectIPv4) {
SocketTest::TestConnectIPv4();
}
-TEST_F(MacAsyncSocketTest, TestConnectIPv6) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestConnectIPv6) {
SocketTest::TestConnectIPv6();
}
-TEST_F(MacAsyncSocketTest, TestConnectWithDnsLookupIPv4) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestConnectWithDnsLookupIPv4) {
SocketTest::TestConnectWithDnsLookupIPv4();
}
-TEST_F(MacAsyncSocketTest, TestConnectWithDnsLookupIPv6) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestConnectWithDnsLookupIPv6) {
SocketTest::TestConnectWithDnsLookupIPv6();
}
@@ -123,7 +123,7 @@ TEST_F(MacAsyncSocketTest, DISABLED_TestConnectFailIPv4) {
SocketTest::TestConnectFailIPv4();
}
-TEST_F(MacAsyncSocketTest, TestConnectFailIPv6) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestConnectFailIPv6) {
SocketTest::TestConnectFailIPv6();
}
@@ -136,11 +136,11 @@ TEST_F(MacAsyncSocketTest, DISABLED_TestConnectWithDnsLookupFailIPv6) {
SocketTest::TestConnectWithDnsLookupFailIPv6();
}
-TEST_F(MacAsyncSocketTest, TestConnectWithClosedSocketIPv4) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestConnectWithClosedSocketIPv4) {
SocketTest::TestConnectWithClosedSocketIPv4();
}
-TEST_F(MacAsyncSocketTest, TestConnectWithClosedSocketIPv6) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestConnectWithClosedSocketIPv6) {
SocketTest::TestConnectWithClosedSocketIPv6();
}
@@ -155,51 +155,51 @@ TEST_F(MacAsyncSocketTest, DISABLED_TestServerCloseDuringConnectIPv6) {
}
// Flaky at the moment (0.5% failure rate). Seems the client doesn't get
// signalled in a timely manner...
-TEST_F(MacAsyncSocketTest, TestClientCloseDuringConnectIPv4) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestClientCloseDuringConnectIPv4) {
SocketTest::TestClientCloseDuringConnectIPv4();
}
-TEST_F(MacAsyncSocketTest, TestClientCloseDuringConnectIPv6) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestClientCloseDuringConnectIPv6) {
SocketTest::TestClientCloseDuringConnectIPv6();
}
-TEST_F(MacAsyncSocketTest, TestServerCloseIPv4) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestServerCloseIPv4) {
SocketTest::TestServerCloseIPv4();
}
-TEST_F(MacAsyncSocketTest, TestServerCloseIPv6) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestServerCloseIPv6) {
SocketTest::TestServerCloseIPv6();
}
-TEST_F(MacAsyncSocketTest, TestCloseInClosedCallbackIPv4) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestCloseInClosedCallbackIPv4) {
SocketTest::TestCloseInClosedCallbackIPv4();
}
-TEST_F(MacAsyncSocketTest, TestCloseInClosedCallbackIPv6) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestCloseInClosedCallbackIPv6) {
SocketTest::TestCloseInClosedCallbackIPv6();
}
-TEST_F(MacAsyncSocketTest, TestSocketServerWaitIPv4) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestSocketServerWaitIPv4) {
SocketTest::TestSocketServerWaitIPv4();
}
-TEST_F(MacAsyncSocketTest, TestSocketServerWaitIPv6) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestSocketServerWaitIPv6) {
SocketTest::TestSocketServerWaitIPv6();
}
-TEST_F(MacAsyncSocketTest, TestTcpIPv4) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestTcpIPv4) {
SocketTest::TestTcpIPv4();
}
-TEST_F(MacAsyncSocketTest, TestTcpIPv6) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestTcpIPv6) {
SocketTest::TestTcpIPv6();
}
-TEST_F(MacAsyncSocketTest, TestSingleFlowControlCallbackIPv4) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestSingleFlowControlCallbackIPv4) {
SocketTest::TestSingleFlowControlCallbackIPv4();
}
-TEST_F(MacAsyncSocketTest, TestSingleFlowControlCallbackIPv6) {
+TEST_F(MacAsyncSocketTest, DISABLED_TestSingleFlowControlCallbackIPv6) {
SocketTest::TestSingleFlowControlCallbackIPv6();
}
@@ -226,11 +226,11 @@ class MacCarbonAppAsyncSocketTest : public MacAsyncSocketTest {
};
};
-TEST_F(MacCarbonAppAsyncSocketTest, TestSocketServerWaitIPv4) {
+TEST_F(MacCarbonAppAsyncSocketTest, DISABLED_TestSocketServerWaitIPv4) {
SocketTest::TestSocketServerWaitIPv4();
}
-TEST_F(MacCarbonAppAsyncSocketTest, TestSocketServerWaitIPv6) {
+TEST_F(MacCarbonAppAsyncSocketTest, DISABLED_TestSocketServerWaitIPv6) {
SocketTest::TestSocketServerWaitIPv6();
}
#endif
diff --git a/base/messagequeue_unittest.cc b/base/messagequeue_unittest.cc
index 78024e0b..2027d91e 100644
--- a/base/messagequeue_unittest.cc
+++ b/base/messagequeue_unittest.cc
@@ -16,6 +16,7 @@
#include "webrtc/base/thread.h"
#include "webrtc/base/timeutils.h"
#include "webrtc/base/nullsocketserver.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
using namespace rtc;
@@ -79,7 +80,7 @@ TEST_F(MessageQueueTest,
DelayedPostsWithIdenticalTimesAreProcessedInFifoOrder(&q_nullss);
}
-TEST_F(MessageQueueTest, DisposeNotLocked) {
+TEST_F(MessageQueueTest, DISABLED_ON_MAC(DisposeNotLocked)) {
bool was_locked = true;
bool deleted = false;
DeletedLockChecker* d = new DeletedLockChecker(this, &was_locked, &deleted);
diff --git a/base/nat_unittest.cc b/base/nat_unittest.cc
index 0e16259a..19c1225c 100644
--- a/base/nat_unittest.cc
+++ b/base/nat_unittest.cc
@@ -19,6 +19,7 @@
#include "webrtc/base/physicalsocketserver.h"
#include "webrtc/base/testclient.h"
#include "webrtc/base/virtualsocketserver.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
using namespace rtc;
@@ -236,11 +237,11 @@ void TestPhysicalInternal(const SocketAddress& int_addr) {
TestFilters(int_pss.get(), int_addr, ext_pss.get(), ext_addrs);
}
-TEST(NatTest, TestPhysicalIPv4) {
+TEST(NatTest, DISABLED_ON_MAC(TestPhysicalIPv4)) {
TestPhysicalInternal(SocketAddress("127.0.0.1", 0));
}
-TEST(NatTest, TestPhysicalIPv6) {
+TEST(NatTest, DISABLED_ON_MAC(TestPhysicalIPv6)) {
if (HasIPv6Enabled()) {
TestPhysicalInternal(SocketAddress("::1", 0));
} else {
@@ -278,11 +279,11 @@ void TestVirtualInternal(int family) {
TestFilters(int_vss.get(), int_addr, ext_vss.get(), ext_addrs);
}
-TEST(NatTest, TestVirtualIPv4) {
+TEST(NatTest, DISABLED_ON_MAC(TestVirtualIPv4)) {
TestVirtualInternal(AF_INET);
}
-TEST(NatTest, TestVirtualIPv6) {
+TEST(NatTest, DISABLED_ON_MAC(TestVirtualIPv6)) {
if (HasIPv6Enabled()) {
TestVirtualInternal(AF_INET6);
} else {
diff --git a/base/nssstreamadapter.cc b/base/nssstreamadapter.cc
index f4b2d311..40c017fb 100644
--- a/base/nssstreamadapter.cc
+++ b/base/nssstreamadapter.cc
@@ -486,6 +486,8 @@ int NSSStreamAdapter::BeginSSL() {
return -1;
}
+ // TODO(juberti): Check for client_auth_enabled()
+
rv = SSL_OptionSet(ssl_fd_, SSL_REQUIRE_CERTIFICATE, PR_TRUE);
if (rv != SECSuccess) {
Error("BeginSSL", -1, false);
diff --git a/base/nullsocketserver_unittest.cc b/base/nullsocketserver_unittest.cc
index fe21f6ad..6d3eb52b 100644
--- a/base/nullsocketserver_unittest.cc
+++ b/base/nullsocketserver_unittest.cc
@@ -10,6 +10,7 @@
#include "webrtc/base/gunit.h"
#include "webrtc/base/nullsocketserver.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
namespace rtc {
@@ -27,7 +28,7 @@ class NullSocketServerTest
NullSocketServer ss_;
};
-TEST_F(NullSocketServerTest, WaitAndSet) {
+TEST_F(NullSocketServerTest, DISABLED_ON_MAC(WaitAndSet)) {
Thread thread;
EXPECT_TRUE(thread.Start());
thread.Post(this, 0);
diff --git a/base/opensslstreamadapter.cc b/base/opensslstreamadapter.cc
index ed5ac740..070a948b 100644
--- a/base/opensslstreamadapter.cc
+++ b/base/opensslstreamadapter.cc
@@ -743,8 +743,15 @@ SSL_CTX* OpenSSLStreamAdapter::SetupSSLContext() {
SSL_CTX_set_info_callback(ctx, OpenSSLAdapter::SSLInfoCallback);
#endif
- SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER |SSL_VERIFY_FAIL_IF_NO_PEER_CERT,
- SSLVerifyCallback);
+ int mode = SSL_VERIFY_PEER;
+ if (client_auth_enabled()) {
+ // Require a certificate from the client.
+ // Note: Normally this is always true in production, but it may be disabled
+ // for testing purposes (e.g. SSLAdapter unit tests).
+ mode |= SSL_VERIFY_FAIL_IF_NO_PEER_CERT;
+ }
+
+ SSL_CTX_set_verify(ctx, mode, SSLVerifyCallback);
SSL_CTX_set_verify_depth(ctx, 4);
SSL_CTX_set_cipher_list(ctx, "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
diff --git a/base/physicalsocketserver_unittest.cc b/base/physicalsocketserver_unittest.cc
index df750adb..4d57e730 100644
--- a/base/physicalsocketserver_unittest.cc
+++ b/base/physicalsocketserver_unittest.cc
@@ -18,6 +18,7 @@
#include "webrtc/base/socket_unittest.h"
#include "webrtc/base/testutils.h"
#include "webrtc/base/thread.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
namespace rtc {
@@ -106,11 +107,11 @@ TEST_F(PhysicalSocketTest, TestCloseInClosedCallbackIPv6) {
SocketTest::TestCloseInClosedCallbackIPv6();
}
-TEST_F(PhysicalSocketTest, TestSocketServerWaitIPv4) {
+TEST_F(PhysicalSocketTest, DISABLED_ON_MAC(TestSocketServerWaitIPv4)) {
SocketTest::TestSocketServerWaitIPv4();
}
-TEST_F(PhysicalSocketTest, TestSocketServerWaitIPv6) {
+TEST_F(PhysicalSocketTest, DISABLED_ON_MAC(TestSocketServerWaitIPv6)) {
SocketTest::TestSocketServerWaitIPv6();
}
@@ -206,7 +207,7 @@ Thread *PosixSignalDeliveryTest::signaled_thread_ = NULL;
// Test receiving a synchronous signal while not in Wait() and then entering
// Wait() afterwards.
-TEST_F(PosixSignalDeliveryTest, RaiseThenWait) {
+TEST_F(PosixSignalDeliveryTest, DISABLED_ON_MAC(RaiseThenWait)) {
ASSERT_TRUE(ss_->SetPosixSignalHandler(SIGTERM, &RecordSignal));
raise(SIGTERM);
EXPECT_TRUE(ss_->Wait(0, true));
@@ -216,7 +217,7 @@ TEST_F(PosixSignalDeliveryTest, RaiseThenWait) {
// Test that we can handle getting tons of repeated signals and that we see all
// the different ones.
-TEST_F(PosixSignalDeliveryTest, InsanelyManySignals) {
+TEST_F(PosixSignalDeliveryTest, DISABLED_ON_MAC(InsanelyManySignals)) {
ss_->SetPosixSignalHandler(SIGTERM, &RecordSignal);
ss_->SetPosixSignalHandler(SIGINT, &RecordSignal);
for (int i = 0; i < 10000; ++i) {
@@ -231,7 +232,7 @@ TEST_F(PosixSignalDeliveryTest, InsanelyManySignals) {
}
// Test that a signal during a Wait() call is detected.
-TEST_F(PosixSignalDeliveryTest, SignalDuringWait) {
+TEST_F(PosixSignalDeliveryTest, DISABLED_ON_MAC(SignalDuringWait)) {
ss_->SetPosixSignalHandler(SIGALRM, &RecordSignal);
alarm(1);
EXPECT_TRUE(ss_->Wait(1500, true));
@@ -256,7 +257,7 @@ class RaiseSigTermRunnable : public Runnable {
// Test that it works no matter what thread the kernel chooses to give the
// signal to (since it's not guaranteed to be the one that Wait() runs on).
-TEST_F(PosixSignalDeliveryTest, SignalOnDifferentThread) {
+TEST_F(PosixSignalDeliveryTest, DISABLED_ON_MAC(SignalOnDifferentThread)) {
ss_->SetPosixSignalHandler(SIGTERM, &RecordSignal);
// Mask out SIGTERM so that it can't be delivered to this thread.
sigset_t mask;
diff --git a/base/proxy_unittest.cc b/base/proxy_unittest.cc
index d8a523fe..a1ade0c7 100644
--- a/base/proxy_unittest.cc
+++ b/base/proxy_unittest.cc
@@ -17,6 +17,7 @@
#include "webrtc/base/testclient.h"
#include "webrtc/base/testechoserver.h"
#include "webrtc/base/virtualsocketserver.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
using rtc::Socket;
using rtc::Thread;
@@ -74,7 +75,7 @@ class ProxyTest : public testing::Test {
};
// Tests whether we can use a SOCKS5 proxy to connect to a server.
-TEST_F(ProxyTest, TestSocks5Connect) {
+TEST_F(ProxyTest, DISABLED_ON_MAC(TestSocks5Connect)) {
rtc::AsyncSocket* socket =
ss()->CreateAsyncSocket(kSocksProxyIntAddr.family(), SOCK_STREAM);
rtc::AsyncSocksProxySocket* proxy_socket =
@@ -118,7 +119,7 @@ TEST_F(ProxyTest, TestHttpsConnect) {
*/
// Tests whether we can autodetect a SOCKS5 proxy.
-TEST_F(ProxyTest, TestAutoDetectSocks5) {
+TEST_F(ProxyTest, DISABLED_ON_MAC(TestAutoDetectSocks5)) {
EXPECT_EQ(rtc::PROXY_SOCKS5, DetectProxyType(kSocksProxyIntAddr));
}
@@ -130,6 +131,6 @@ TEST_F(ProxyTest, TestAutoDetectHttps) {
*/
// Tests whether we fail properly for no proxy.
-TEST_F(ProxyTest, TestAutoDetectBogus) {
+TEST_F(ProxyTest, DISABLED_ON_MAC(TestAutoDetectBogus)) {
EXPECT_EQ(rtc::PROXY_UNKNOWN, DetectProxyType(kBogusProxyIntAddr));
}
diff --git a/base/sharedexclusivelock.h b/base/sharedexclusivelock.h
index f64d7cf5..aaaba3b8 100644
--- a/base/sharedexclusivelock.h
+++ b/base/sharedexclusivelock.h
@@ -19,14 +19,14 @@ namespace rtc {
// This class provides shared-exclusive lock. It can be used in cases like
// multiple-readers/single-writer model.
-class SharedExclusiveLock {
+class LOCKABLE SharedExclusiveLock {
public:
SharedExclusiveLock();
// Locking/unlocking methods. It is encouraged to use SharedScope or
// ExclusiveScope for protection.
- void LockExclusive();
- void UnlockExclusive();
+ void LockExclusive() EXCLUSIVE_LOCK_FUNCTION();
+ void UnlockExclusive() UNLOCK_FUNCTION();
void LockShared();
void UnlockShared();
@@ -39,15 +39,14 @@ class SharedExclusiveLock {
DISALLOW_COPY_AND_ASSIGN(SharedExclusiveLock);
};
-class SharedScope {
+class SCOPED_LOCKABLE SharedScope {
public:
- explicit SharedScope(SharedExclusiveLock* lock) : lock_(lock) {
+ explicit SharedScope(SharedExclusiveLock* lock) SHARED_LOCK_FUNCTION(lock)
+ : lock_(lock) {
lock_->LockShared();
}
- ~SharedScope() {
- lock_->UnlockShared();
- }
+ ~SharedScope() UNLOCK_FUNCTION() { lock_->UnlockShared(); }
private:
SharedExclusiveLock* lock_;
@@ -55,15 +54,15 @@ class SharedScope {
DISALLOW_COPY_AND_ASSIGN(SharedScope);
};
-class ExclusiveScope {
+class SCOPED_LOCKABLE ExclusiveScope {
public:
- explicit ExclusiveScope(SharedExclusiveLock* lock) : lock_(lock) {
+ explicit ExclusiveScope(SharedExclusiveLock* lock)
+ EXCLUSIVE_LOCK_FUNCTION(lock)
+ : lock_(lock) {
lock_->LockExclusive();
}
- ~ExclusiveScope() {
- lock_->UnlockExclusive();
- }
+ ~ExclusiveScope() UNLOCK_FUNCTION() { lock_->UnlockExclusive(); }
private:
SharedExclusiveLock* lock_;
diff --git a/base/sharedexclusivelock_unittest.cc b/base/sharedexclusivelock_unittest.cc
index 42334af7..ea406cf9 100644
--- a/base/sharedexclusivelock_unittest.cc
+++ b/base/sharedexclusivelock_unittest.cc
@@ -16,6 +16,7 @@
#include "webrtc/base/sharedexclusivelock.h"
#include "webrtc/base/thread.h"
#include "webrtc/base/timeutils.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
namespace rtc {
@@ -157,7 +158,7 @@ TEST_F(SharedExclusiveLockTest, DISABLED_TestSharedShared) {
EXPECT_LE(reader1.waiting_time_in_ms(), kNoWaitThresholdInMs);
}
-TEST_F(SharedExclusiveLockTest, TestSharedExclusive) {
+TEST_F(SharedExclusiveLockTest, DISABLED_ON_MAC(TestSharedExclusive)) {
bool done;
WriteTask writer(shared_exclusive_lock_.get(), &value_, &done);
@@ -176,7 +177,7 @@ TEST_F(SharedExclusiveLockTest, TestSharedExclusive) {
EXPECT_GE(writer.waiting_time_in_ms(), kWaitThresholdInMs);
}
-TEST_F(SharedExclusiveLockTest, TestExclusiveShared) {
+TEST_F(SharedExclusiveLockTest, DISABLED_ON_MAC(TestExclusiveShared)) {
int value;
bool done;
ReadTask reader(shared_exclusive_lock_.get(), &value_, &done);
@@ -196,7 +197,7 @@ TEST_F(SharedExclusiveLockTest, TestExclusiveShared) {
EXPECT_GE(reader.waiting_time_in_ms(), kWaitThresholdInMs);
}
-TEST_F(SharedExclusiveLockTest, TestExclusiveExclusive) {
+TEST_F(SharedExclusiveLockTest, DISABLED_ON_MAC(TestExclusiveExclusive)) {
bool done;
WriteTask writer(shared_exclusive_lock_.get(), &value_, &done);
diff --git a/base/signalthread.h b/base/signalthread.h
index a97bda1a..8e18be61 100644
--- a/base/signalthread.h
+++ b/base/signalthread.h
@@ -115,16 +115,17 @@ class SignalThread
DISALLOW_IMPLICIT_CONSTRUCTORS(Worker);
};
- class EnterExit {
+ class SCOPED_LOCKABLE EnterExit {
public:
- explicit EnterExit(SignalThread* t) : t_(t) {
+ explicit EnterExit(SignalThread* t) EXCLUSIVE_LOCK_FUNCTION(t->cs_)
+ : t_(t) {
t_->cs_.Enter();
// If refcount_ is zero then the object has already been deleted and we
// will be double-deleting it in ~EnterExit()! (shouldn't happen)
ASSERT(t_->refcount_ != 0);
++t_->refcount_;
}
- ~EnterExit() {
+ ~EnterExit() UNLOCK_FUNCTION() {
bool d = (0 == --t_->refcount_);
t_->cs_.Leave();
if (d)
diff --git a/base/signalthread_unittest.cc b/base/signalthread_unittest.cc
index e0ea54eb..3f534312 100644
--- a/base/signalthread_unittest.cc
+++ b/base/signalthread_unittest.cc
@@ -11,6 +11,7 @@
#include "webrtc/base/gunit.h"
#include "webrtc/base/signalthread.h"
#include "webrtc/base/thread.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
using namespace rtc;
@@ -133,7 +134,7 @@ class OwnerThread : public Thread, public sigslot::has_slots<> {
// Test for when the main thread goes away while the
// signal thread is still working. This may happen
// when shutting down the process.
-TEST_F(SignalThreadTest, OwnerThreadGoesAway) {
+TEST_F(SignalThreadTest, DISABLED_ON_MAC(OwnerThreadGoesAway)) {
{
scoped_ptr<OwnerThread> owner(new OwnerThread(this));
main_thread_ = owner.get();
@@ -156,7 +157,7 @@ TEST_F(SignalThreadTest, OwnerThreadGoesAway) {
EXPECT_EQ(stopped, thread_stopped_); \
EXPECT_EQ(deleted, thread_deleted_);
-TEST_F(SignalThreadTest, ThreadFinishes) {
+TEST_F(SignalThreadTest, DISABLED_ON_MAC(ThreadFinishes)) {
thread_->Start();
EXPECT_STATE(1, 0, 0, 0, 0);
Thread::SleepMs(500);
@@ -165,7 +166,7 @@ TEST_F(SignalThreadTest, ThreadFinishes) {
EXPECT_STATE(1, 1, 1, 0, 1);
}
-TEST_F(SignalThreadTest, ReleasedThreadFinishes) {
+TEST_F(SignalThreadTest, DISABLED_ON_MAC(ReleasedThreadFinishes)) {
thread_->Start();
EXPECT_STATE(1, 0, 0, 0, 0);
thread_->Release();
@@ -177,7 +178,7 @@ TEST_F(SignalThreadTest, ReleasedThreadFinishes) {
EXPECT_STATE(1, 1, 1, 0, 1);
}
-TEST_F(SignalThreadTest, DestroyedThreadCleansUp) {
+TEST_F(SignalThreadTest, DISABLED_ON_MAC(DestroyedThreadCleansUp)) {
thread_->Start();
EXPECT_STATE(1, 0, 0, 0, 0);
thread_->Destroy(true);
@@ -186,7 +187,7 @@ TEST_F(SignalThreadTest, DestroyedThreadCleansUp) {
EXPECT_STATE(1, 0, 0, 1, 1);
}
-TEST_F(SignalThreadTest, DeferredDestroyedThreadCleansUp) {
+TEST_F(SignalThreadTest, DISABLED_ON_MAC(DeferredDestroyedThreadCleansUp)) {
thread_->Start();
EXPECT_STATE(1, 0, 0, 0, 0);
thread_->Destroy(false);
diff --git a/base/ssladapter_unittest.cc b/base/ssladapter_unittest.cc
new file mode 100644
index 00000000..6d4536d2
--- /dev/null
+++ b/base/ssladapter_unittest.cc
@@ -0,0 +1,342 @@
+/*
+ * Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+
+#include "webrtc/base/gunit.h"
+#include "webrtc/base/ipaddress.h"
+#include "webrtc/base/socketstream.h"
+#include "webrtc/base/ssladapter.h"
+#include "webrtc/base/sslstreamadapter.h"
+#include "webrtc/base/stream.h"
+#include "webrtc/base/virtualsocketserver.h"
+
+static const int kTimeout = 5000;
+
+static rtc::AsyncSocket* CreateSocket(const rtc::SSLMode& ssl_mode) {
+ rtc::SocketAddress address(rtc::IPAddress(INADDR_ANY), 0);
+
+ rtc::AsyncSocket* socket = rtc::Thread::Current()->
+ socketserver()->CreateAsyncSocket(
+ address.family(), (ssl_mode == rtc::SSL_MODE_DTLS) ?
+ SOCK_DGRAM : SOCK_STREAM);
+ socket->Bind(address);
+
+ return socket;
+}
+
+static std::string GetSSLProtocolName(const rtc::SSLMode& ssl_mode) {
+ return (ssl_mode == rtc::SSL_MODE_DTLS) ? "DTLS" : "TLS";
+}
+
+class SSLAdapterTestDummyClient : public sigslot::has_slots<> {
+ public:
+ explicit SSLAdapterTestDummyClient(const rtc::SSLMode& ssl_mode)
+ : ssl_mode_(ssl_mode) {
+ rtc::AsyncSocket* socket = CreateSocket(ssl_mode_);
+
+ ssl_adapter_.reset(rtc::SSLAdapter::Create(socket));
+
+ // Ignore any certificate errors for the purpose of testing.
+ // Note: We do this only because we don't have a real certificate.
+ // NEVER USE THIS IN PRODUCTION CODE!
+ ssl_adapter_->set_ignore_bad_cert(true);
+
+ ssl_adapter_->SignalReadEvent.connect(this,
+ &SSLAdapterTestDummyClient::OnSSLAdapterReadEvent);
+ ssl_adapter_->SignalCloseEvent.connect(this,
+ &SSLAdapterTestDummyClient::OnSSLAdapterCloseEvent);
+ }
+
+ rtc::AsyncSocket::ConnState GetState() const {
+ return ssl_adapter_->GetState();
+ }
+
+ const std::string& GetReceivedData() const {
+ return data_;
+ }
+
+ int Connect(const std::string& hostname, const rtc::SocketAddress& address) {
+ LOG(LS_INFO) << "Starting " << GetSSLProtocolName(ssl_mode_)
+ << " handshake with " << hostname;
+
+ if (ssl_adapter_->StartSSL(hostname.c_str(), false) != 0) {
+ return -1;
+ }
+
+ LOG(LS_INFO) << "Initiating connection with " << address;
+
+ return ssl_adapter_->Connect(address);
+ }
+
+ int Close() {
+ return ssl_adapter_->Close();
+ }
+
+ int Send(const std::string& message) {
+ LOG(LS_INFO) << "Client sending '" << message << "'";
+
+ return ssl_adapter_->Send(message.data(), message.length());
+ }
+
+ void OnSSLAdapterReadEvent(rtc::AsyncSocket* socket) {
+ char buffer[4096] = "";
+
+ // Read data received from the server and store it in our internal buffer.
+ int read = socket->Recv(buffer, sizeof(buffer) - 1);
+ if (read != -1) {
+ buffer[read] = '\0';
+
+ LOG(LS_INFO) << "Client received '" << buffer << "'";
+
+ data_ += buffer;
+ }
+ }
+
+ void OnSSLAdapterCloseEvent(rtc::AsyncSocket* socket, int error) {
+ // OpenSSLAdapter signals handshake failure with a close event, but without
+ // closing the socket! Let's close the socket here. This way GetState() can
+ // return CS_CLOSED after failure.
+ if (socket->GetState() != rtc::AsyncSocket::CS_CLOSED) {
+ socket->Close();
+ }
+ }
+
+ private:
+ const rtc::SSLMode ssl_mode_;
+
+ rtc::scoped_ptr<rtc::SSLAdapter> ssl_adapter_;
+
+ std::string data_;
+};
+
+class SSLAdapterTestDummyServer : public sigslot::has_slots<> {
+ public:
+ explicit SSLAdapterTestDummyServer(const rtc::SSLMode& ssl_mode)
+ : ssl_mode_(ssl_mode) {
+ // Generate a key pair and a certificate for this host.
+ ssl_identity_.reset(rtc::SSLIdentity::Generate(GetHostname()));
+
+ server_socket_.reset(CreateSocket(ssl_mode_));
+
+ server_socket_->SignalReadEvent.connect(this,
+ &SSLAdapterTestDummyServer::OnServerSocketReadEvent);
+
+ server_socket_->Listen(1);
+
+ LOG(LS_INFO) << ((ssl_mode_ == rtc::SSL_MODE_DTLS) ? "UDP" : "TCP")
+ << " server listening on " << server_socket_->GetLocalAddress();
+ }
+
+ rtc::SocketAddress GetAddress() const {
+ return server_socket_->GetLocalAddress();
+ }
+
+ std::string GetHostname() const {
+ // Since we don't have a real certificate anyway, the value here doesn't
+ // really matter.
+ return "example.com";
+ }
+
+ const std::string& GetReceivedData() const {
+ return data_;
+ }
+
+ int Send(const std::string& message) {
+ if (ssl_stream_adapter_ == NULL
+ || ssl_stream_adapter_->GetState() != rtc::SS_OPEN) {
+ // No connection yet.
+ return -1;
+ }
+
+ LOG(LS_INFO) << "Server sending '" << message << "'";
+
+ size_t written;
+ int error;
+
+ rtc::StreamResult r = ssl_stream_adapter_->Write(message.data(),
+ message.length(), &written, &error);
+ if (r == rtc::SR_SUCCESS) {
+ return written;
+ } else {
+ return -1;
+ }
+ }
+
+ void OnServerSocketReadEvent(rtc::AsyncSocket* socket) {
+ if (ssl_stream_adapter_ != NULL) {
+ // Only a single connection is supported.
+ return;
+ }
+
+ rtc::SocketAddress address;
+ rtc::AsyncSocket* new_socket = socket->Accept(&address);
+ rtc::SocketStream* stream = new rtc::SocketStream(new_socket);
+
+ ssl_stream_adapter_.reset(rtc::SSLStreamAdapter::Create(stream));
+ ssl_stream_adapter_->SetServerRole();
+
+ // SSLStreamAdapter is normally used for peer-to-peer communication, but
+ // here we're testing communication between a client and a server
+ // (e.g. a WebRTC-based application and an RFC 5766 TURN server), where
+ // clients are not required to provide a certificate during handshake.
+ // Accordingly, we must disable client authentication here.
+ ssl_stream_adapter_->set_client_auth_enabled(false);
+
+ ssl_stream_adapter_->SetIdentity(ssl_identity_->GetReference());
+
+ // Set a bogus peer certificate digest.
+ unsigned char digest[20];
+ size_t digest_len = sizeof(digest);
+ ssl_stream_adapter_->SetPeerCertificateDigest(rtc::DIGEST_SHA_1, digest,
+ digest_len);
+
+ ssl_stream_adapter_->StartSSLWithPeer();
+
+ ssl_stream_adapter_->SignalEvent.connect(this,
+ &SSLAdapterTestDummyServer::OnSSLStreamAdapterEvent);
+ }
+
+ void OnSSLStreamAdapterEvent(rtc::StreamInterface* stream, int sig, int err) {
+ if (sig & rtc::SE_READ) {
+ char buffer[4096] = "";
+
+ size_t read;
+ int error;
+
+ // Read data received from the client and store it in our internal
+ // buffer.
+ rtc::StreamResult r = stream->Read(buffer,
+ sizeof(buffer) - 1, &read, &error);
+ if (r == rtc::SR_SUCCESS) {
+ buffer[read] = '\0';
+
+ LOG(LS_INFO) << "Server received '" << buffer << "'";
+
+ data_ += buffer;
+ }
+ }
+ }
+
+ private:
+ const rtc::SSLMode ssl_mode_;
+
+ rtc::scoped_ptr<rtc::AsyncSocket> server_socket_;
+ rtc::scoped_ptr<rtc::SSLStreamAdapter> ssl_stream_adapter_;
+
+ rtc::scoped_ptr<rtc::SSLIdentity> ssl_identity_;
+
+ std::string data_;
+};
+
+class SSLAdapterTestBase : public testing::Test,
+ public sigslot::has_slots<> {
+ public:
+ explicit SSLAdapterTestBase(const rtc::SSLMode& ssl_mode)
+ : ssl_mode_(ssl_mode),
+ ss_scope_(new rtc::VirtualSocketServer(NULL)),
+ server_(new SSLAdapterTestDummyServer(ssl_mode_)),
+ client_(new SSLAdapterTestDummyClient(ssl_mode_)),
+ handshake_wait_(kTimeout) {
+ }
+
+ static void SetUpTestCase() {
+ rtc::InitializeSSL();
+ }
+
+ static void TearDownTestCase() {
+ rtc::CleanupSSL();
+ }
+
+ void SetHandshakeWait(int wait) {
+ handshake_wait_ = wait;
+ }
+
+ void TestHandshake(bool expect_success) {
+ int rv;
+
+ // The initial state is CS_CLOSED
+ ASSERT_EQ(rtc::AsyncSocket::CS_CLOSED, client_->GetState());
+
+ rv = client_->Connect(server_->GetHostname(), server_->GetAddress());
+ ASSERT_EQ(0, rv);
+
+ // Now the state should be CS_CONNECTING
+ ASSERT_EQ(rtc::AsyncSocket::CS_CONNECTING, client_->GetState());
+
+ if (expect_success) {
+ // If expecting success, the client should end up in the CS_CONNECTED
+ // state after handshake.
+ EXPECT_EQ_WAIT(rtc::AsyncSocket::CS_CONNECTED, client_->GetState(),
+ handshake_wait_);
+
+ LOG(LS_INFO) << GetSSLProtocolName(ssl_mode_) << " handshake complete.";
+
+ } else {
+ // On handshake failure the client should end up in the CS_CLOSED state.
+ EXPECT_EQ_WAIT(rtc::AsyncSocket::CS_CLOSED, client_->GetState(),
+ handshake_wait_);
+
+ LOG(LS_INFO) << GetSSLProtocolName(ssl_mode_) << " handshake failed.";
+ }
+ }
+
+ void TestTransfer(const std::string& message) {
+ int rv;
+
+ rv = client_->Send(message);
+ ASSERT_EQ(static_cast<int>(message.length()), rv);
+
+ // The server should have received the client's message.
+ EXPECT_EQ_WAIT(message, server_->GetReceivedData(), kTimeout);
+
+ rv = server_->Send(message);
+ ASSERT_EQ(static_cast<int>(message.length()), rv);
+
+ // The client should have received the server's message.
+ EXPECT_EQ_WAIT(message, client_->GetReceivedData(), kTimeout);
+
+ LOG(LS_INFO) << "Transfer complete.";
+ }
+
+ private:
+ const rtc::SSLMode ssl_mode_;
+
+ const rtc::SocketServerScope ss_scope_;
+
+ rtc::scoped_ptr<SSLAdapterTestDummyServer> server_;
+ rtc::scoped_ptr<SSLAdapterTestDummyClient> client_;
+
+ int handshake_wait_;
+};
+
+class SSLAdapterTestTLS : public SSLAdapterTestBase {
+ public:
+ SSLAdapterTestTLS() : SSLAdapterTestBase(rtc::SSL_MODE_TLS) {}
+};
+
+
+#if SSL_USE_OPENSSL
+
+// Basic tests: TLS
+
+// Test that handshake works
+TEST_F(SSLAdapterTestTLS, TestTLSConnect) {
+ TestHandshake(true);
+}
+
+// Test transfer between client and server
+TEST_F(SSLAdapterTestTLS, TestTLSTransfer) {
+ TestHandshake(true);
+ TestTransfer("Hello, world!");
+}
+
+#endif // SSL_USE_OPENSSL
+
diff --git a/base/sslstreamadapter.h b/base/sslstreamadapter.h
index ffe6b2f7..ea966c54 100644
--- a/base/sslstreamadapter.h
+++ b/base/sslstreamadapter.h
@@ -48,11 +48,15 @@ class SSLStreamAdapter : public StreamAdapterInterface {
static SSLStreamAdapter* Create(StreamInterface* stream);
explicit SSLStreamAdapter(StreamInterface* stream)
- : StreamAdapterInterface(stream), ignore_bad_cert_(false) { }
+ : StreamAdapterInterface(stream), ignore_bad_cert_(false),
+ client_auth_enabled_(true) { }
void set_ignore_bad_cert(bool ignore) { ignore_bad_cert_ = ignore; }
bool ignore_bad_cert() const { return ignore_bad_cert_; }
+ void set_client_auth_enabled(bool enabled) { client_auth_enabled_ = enabled; }
+ bool client_auth_enabled() const { return client_auth_enabled_; }
+
// Specify our SSL identity: key and certificate. Mostly this is
// only used in the peer-to-peer mode (unless we actually want to
// provide a client certificate to a server).
@@ -151,10 +155,16 @@ class SSLStreamAdapter : public StreamAdapterInterface {
static bool HaveDtlsSrtp();
static bool HaveExporter();
+ private:
// If true, the server certificate need not match the configured
// server_name, and in fact missing certificate authority and other
// verification errors are ignored.
bool ignore_bad_cert_;
+
+ // If true (default), the client is required to provide a certificate during
+ // handshake. If no certificate is given, handshake fails. This applies to
+ // server mode only.
+ bool client_auth_enabled_;
};
} // namespace rtc
diff --git a/base/sslstreamadapter_unittest.cc b/base/sslstreamadapter_unittest.cc
index af78bfff..f7dcb71e 100644
--- a/base/sslstreamadapter_unittest.cc
+++ b/base/sslstreamadapter_unittest.cc
@@ -21,6 +21,7 @@
#include "webrtc/base/sslidentity.h"
#include "webrtc/base/sslstreamadapter.h"
#include "webrtc/base/stream.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
static const int kBlockSize = 4096;
static const char kAES_CM_HMAC_SHA1_80[] = "AES_CM_128_HMAC_SHA1_80";
@@ -750,7 +751,7 @@ TEST_F(SSLStreamAdapterTestDTLS,
};
// Test a handshake with small MTU
-TEST_F(SSLStreamAdapterTestDTLS, TestDTLSConnectWithSmallMtu) {
+TEST_F(SSLStreamAdapterTestDTLS, DISABLED_ON_MAC(TestDTLSConnectWithSmallMtu)) {
MAYBE_SKIP_TEST(HaveDtls);
SetMtu(700);
SetHandshakeWait(20000);
diff --git a/base/stream_unittest.cc b/base/stream_unittest.cc
index a6664d71..5c2cb576 100644
--- a/base/stream_unittest.cc
+++ b/base/stream_unittest.cc
@@ -10,6 +10,7 @@
#include "webrtc/base/gunit.h"
#include "webrtc/base/stream.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
namespace rtc {
@@ -434,7 +435,7 @@ TEST(FifoBufferTest, WriteOffsetAndReadOffset) {
EXPECT_EQ(SR_BLOCK, buf.ReadOffset(out, 10, 16, NULL));
}
-TEST(AsyncWriteTest, TestWrite) {
+TEST(AsyncWriteTest, DISABLED_ON_MAC(TestWrite)) {
FifoBuffer* buf = new FifoBuffer(100);
AsyncWriteStream stream(buf, Thread::Current());
EXPECT_EQ(SS_OPEN, stream.GetState());
diff --git a/base/task_unittest.cc b/base/task_unittest.cc
index 8831259c..b74c29fe 100644
--- a/base/task_unittest.cc
+++ b/base/task_unittest.cc
@@ -27,6 +27,7 @@
#include "webrtc/base/taskrunner.h"
#include "webrtc/base/thread.h"
#include "webrtc/base/timeutils.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
namespace rtc {
@@ -288,7 +289,7 @@ class TaskTest : public sigslot::has_slots<> {
TaskInfo happy_[HAPPY_TASK_COUNT];
};
-TEST(start_task_test, Timeout) {
+TEST(start_task_test, DISABLED_ON_MAC(Timeout)) {
TaskTest task_test;
task_test.Start();
task_test.check_passed();
@@ -335,7 +336,7 @@ class TaskAbortTest : public sigslot::has_slots<> {
DISALLOW_EVIL_CONSTRUCTORS(TaskAbortTest);
};
-TEST(start_task_test, Abort) {
+TEST(start_task_test, DISABLED_ON_MAC(Abort)) {
TaskAbortTest abort_test;
abort_test.Start();
}
@@ -398,7 +399,7 @@ class AbortShouldWakeTest : public sigslot::has_slots<> {
DISALLOW_EVIL_CONSTRUCTORS(AbortShouldWakeTest);
};
-TEST(start_task_test, AbortShouldWake) {
+TEST(start_task_test, DISABLED_ON_MAC(AbortShouldWake)) {
AbortShouldWakeTest abort_should_wake_test;
abort_should_wake_test.Start();
}
@@ -479,7 +480,7 @@ class TimeoutChangeTest : public sigslot::has_slots<> {
DISALLOW_EVIL_CONSTRUCTORS(TimeoutChangeTest);
};
-TEST(start_task_test, TimeoutChange) {
+TEST(start_task_test, DISABLED_ON_MAC(TimeoutChange)) {
TimeoutChangeTest timeout_change_test;
timeout_change_test.Start();
}
@@ -496,7 +497,7 @@ class DeleteTestTaskRunner : public TaskRunner {
DISALLOW_EVIL_CONSTRUCTORS(DeleteTestTaskRunner);
};
-TEST(unstarted_task_test, DeleteTask) {
+TEST(unstarted_task_test, DISABLED_ON_MAC(DeleteTask)) {
// This test ensures that we don't
// crash if a task is deleted without running it.
DeleteTestTaskRunner task_runner;
@@ -511,7 +512,7 @@ TEST(unstarted_task_test, DeleteTask) {
task_runner.RunTasks();
}
-TEST(unstarted_task_test, DoNotDeleteTask1) {
+TEST(unstarted_task_test, DISABLED_ON_MAC(DoNotDeleteTask1)) {
// This test ensures that we don't
// crash if a task runner is deleted without
// running a certain task.
@@ -525,7 +526,7 @@ TEST(unstarted_task_test, DoNotDeleteTask1) {
// Never run the tasks
}
-TEST(unstarted_task_test, DoNotDeleteTask2) {
+TEST(unstarted_task_test, DISABLED_ON_MAC(DoNotDeleteTask2)) {
// This test ensures that we don't
// crash if a taskrunner is delete with a
// task that has never been started.
diff --git a/base/testclient_unittest.cc b/base/testclient_unittest.cc
index c2826686..21a70555 100644
--- a/base/testclient_unittest.cc
+++ b/base/testclient_unittest.cc
@@ -14,6 +14,7 @@
#include "webrtc/base/testclient.h"
#include "webrtc/base/testechoserver.h"
#include "webrtc/base/thread.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
using namespace rtc;
@@ -51,11 +52,11 @@ void TestTcpInternal(const SocketAddress& loopback) {
}
// Tests whether the TestClient can send UDP to itself.
-TEST(TestClientTest, TestUdpIPv4) {
+TEST(TestClientTest, DISABLED_ON_MAC(TestUdpIPv4)) {
TestUdpInternal(SocketAddress("127.0.0.1", 0));
}
-TEST(TestClientTest, TestUdpIPv6) {
+TEST(TestClientTest, DISABLED_ON_MAC(TestUdpIPv6)) {
if (HasIPv6Enabled()) {
TestUdpInternal(SocketAddress("::1", 0));
} else {
@@ -64,11 +65,11 @@ TEST(TestClientTest, TestUdpIPv6) {
}
// Tests whether the TestClient can connect to a server and exchange data.
-TEST(TestClientTest, TestTcpIPv4) {
+TEST(TestClientTest, DISABLED_ON_MAC(TestTcpIPv4)) {
TestTcpInternal(SocketAddress("127.0.0.1", 0));
}
-TEST(TestClientTest, TestTcpIPv6) {
+TEST(TestClientTest, DISABLED_ON_MAC(TestTcpIPv6)) {
if (HasIPv6Enabled()) {
TestTcpInternal(SocketAddress("::1", 0));
} else {
diff --git a/system_wrappers/interface/thread_annotations.h b/base/thread_annotations.h
index 612242d6..612242d6 100644
--- a/system_wrappers/interface/thread_annotations.h
+++ b/base/thread_annotations.h
diff --git a/base/thread_checker_unittest.cc b/base/thread_checker_unittest.cc
index 87e69c03..25b9f503 100644
--- a/base/thread_checker_unittest.cc
+++ b/base/thread_checker_unittest.cc
@@ -15,6 +15,7 @@
#include "webrtc/base/thread.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
// Duplicated from base/threading/thread_checker.h so that we can be
// good citizens there and undef the macro.
@@ -104,7 +105,7 @@ class DeleteThreadCheckerClassOnThread : public Thread {
} // namespace
-TEST(ThreadCheckerTest, CallsAllowedOnSameThread) {
+TEST(ThreadCheckerTest, DISABLED_ON_MAC(CallsAllowedOnSameThread)) {
scoped_ptr<ThreadCheckerClass> thread_checker_class(
new ThreadCheckerClass);
@@ -115,7 +116,7 @@ TEST(ThreadCheckerTest, CallsAllowedOnSameThread) {
thread_checker_class.reset();
}
-TEST(ThreadCheckerTest, DestructorAllowedOnDifferentThread) {
+TEST(ThreadCheckerTest, DISABLED_ON_MAC(DestructorAllowedOnDifferentThread)) {
scoped_ptr<ThreadCheckerClass> thread_checker_class(
new ThreadCheckerClass);
@@ -128,7 +129,7 @@ TEST(ThreadCheckerTest, DestructorAllowedOnDifferentThread) {
delete_on_thread.Join();
}
-TEST(ThreadCheckerTest, DetachFromThread) {
+TEST(ThreadCheckerTest, DISABLED_ON_MAC(DetachFromThread)) {
scoped_ptr<ThreadCheckerClass> thread_checker_class(
new ThreadCheckerClass);
diff --git a/base/thread_unittest.cc b/base/thread_unittest.cc
index 6a687574..4229df28 100644
--- a/base/thread_unittest.cc
+++ b/base/thread_unittest.cc
@@ -15,6 +15,7 @@
#include "webrtc/base/physicalsocketserver.h"
#include "webrtc/base/socketaddress.h"
#include "webrtc/base/thread.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
#if defined(WEBRTC_WIN)
#include <comdef.h> // NOLINT
@@ -198,7 +199,7 @@ TEST(ThreadTest, DISABLED_Main) {
// Test that setting thread names doesn't cause a malfunction.
// There's no easy way to verify the name was set properly at this time.
-TEST(ThreadTest, Names) {
+TEST(ThreadTest, DISABLED_ON_MAC(Names)) {
// Default name
Thread *thread;
thread = new Thread();
@@ -221,7 +222,7 @@ TEST(ThreadTest, Names) {
// Test that setting thread priorities doesn't cause a malfunction.
// There's no easy way to verify the priority was set properly at this time.
-TEST(ThreadTest, Priorities) {
+TEST(ThreadTest, DISABLED_ON_MAC(Priorities)) {
Thread *thread;
thread = new Thread();
EXPECT_TRUE(thread->SetPriority(PRIORITY_HIGH));
@@ -246,7 +247,7 @@ TEST(ThreadTest, Priorities) {
}
-TEST(ThreadTest, Wrap) {
+TEST(ThreadTest, DISABLED_ON_MAC(Wrap)) {
CustomThread* cthread = new CustomThread();
EXPECT_TRUE(cthread->WrapCurrent());
EXPECT_TRUE(cthread->RunningForTest());
@@ -256,7 +257,7 @@ TEST(ThreadTest, Wrap) {
delete cthread;
}
-TEST(ThreadTest, Invoke) {
+TEST(ThreadTest, DISABLED_ON_MAC(Invoke)) {
// Create and start the thread.
Thread thread;
thread.Start();
@@ -304,7 +305,7 @@ class AsyncInvokeTest : public testing::Test {
Thread* expected_thread_;
};
-TEST_F(AsyncInvokeTest, FireAndForget) {
+TEST_F(AsyncInvokeTest, DISABLED_FireAndForget) {
AsyncInvoker invoker;
// Create and start the thread.
Thread thread;
@@ -315,7 +316,7 @@ TEST_F(AsyncInvokeTest, FireAndForget) {
EXPECT_TRUE_WAIT(called, kWaitTimeout);
}
-TEST_F(AsyncInvokeTest, WithCallback) {
+TEST_F(AsyncInvokeTest, DISABLED_WithCallback) {
AsyncInvoker invoker;
// Create and start the thread.
Thread thread;
@@ -328,7 +329,7 @@ TEST_F(AsyncInvokeTest, WithCallback) {
EXPECT_EQ_WAIT(42, int_value_, kWaitTimeout);
}
-TEST_F(AsyncInvokeTest, CancelInvoker) {
+TEST_F(AsyncInvokeTest, DISABLED_CancelInvoker) {
// Create and start the thread.
Thread thread;
thread.Start();
@@ -344,7 +345,7 @@ TEST_F(AsyncInvokeTest, CancelInvoker) {
EXPECT_EQ(0, int_value_);
}
-TEST_F(AsyncInvokeTest, CancelCallingThread) {
+TEST_F(AsyncInvokeTest, DISABLED_CancelCallingThread) {
AsyncInvoker invoker;
{ // Create and start the thread.
Thread thread;
@@ -361,7 +362,7 @@ TEST_F(AsyncInvokeTest, CancelCallingThread) {
EXPECT_EQ(0, int_value_);
}
-TEST_F(AsyncInvokeTest, KillInvokerBeforeExecute) {
+TEST_F(AsyncInvokeTest, DISABLED_KillInvokerBeforeExecute) {
Thread thread;
thread.Start();
{
@@ -378,7 +379,7 @@ TEST_F(AsyncInvokeTest, KillInvokerBeforeExecute) {
EXPECT_EQ(0, int_value_);
}
-TEST_F(AsyncInvokeTest, Flush) {
+TEST_F(AsyncInvokeTest, DISABLED_Flush) {
AsyncInvoker invoker;
bool flag1 = false;
bool flag2 = false;
@@ -396,7 +397,7 @@ TEST_F(AsyncInvokeTest, Flush) {
EXPECT_TRUE(flag2);
}
-TEST_F(AsyncInvokeTest, FlushWithIds) {
+TEST_F(AsyncInvokeTest, DISABLED_FlushWithIds) {
AsyncInvoker invoker;
bool flag1 = false;
bool flag2 = false;
diff --git a/base/virtualsocket_unittest.cc b/base/virtualsocket_unittest.cc
index 253d2c5b..0a26cc9d 100644
--- a/base/virtualsocket_unittest.cc
+++ b/base/virtualsocket_unittest.cc
@@ -21,6 +21,7 @@
#include "webrtc/base/thread.h"
#include "webrtc/base/timeutils.h"
#include "webrtc/base/virtualsocketserver.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
using namespace rtc;
@@ -775,37 +776,37 @@ class VirtualSocketServerTest : public testing::Test {
const SocketAddress kIPv6AnyAddress;
};
-TEST_F(VirtualSocketServerTest, basic_v4) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(basic_v4)) {
SocketAddress ipv4_test_addr(IPAddress(INADDR_ANY), 5000);
BasicTest(ipv4_test_addr);
}
-TEST_F(VirtualSocketServerTest, basic_v6) {
+TEST_F(VirtualSocketServerTest,DISABLED_ON_MAC( basic_v6)) {
SocketAddress ipv6_test_addr(IPAddress(in6addr_any), 5000);
BasicTest(ipv6_test_addr);
}
-TEST_F(VirtualSocketServerTest, connect_v4) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(connect_v4)) {
ConnectTest(kIPv4AnyAddress);
}
-TEST_F(VirtualSocketServerTest, connect_v6) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(connect_v6)) {
ConnectTest(kIPv6AnyAddress);
}
-TEST_F(VirtualSocketServerTest, connect_to_non_listener_v4) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(connect_to_non_listener_v4)) {
ConnectToNonListenerTest(kIPv4AnyAddress);
}
-TEST_F(VirtualSocketServerTest, connect_to_non_listener_v6) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(connect_to_non_listener_v6)) {
ConnectToNonListenerTest(kIPv6AnyAddress);
}
-TEST_F(VirtualSocketServerTest, close_during_connect_v4) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(close_during_connect_v4)) {
CloseDuringConnectTest(kIPv4AnyAddress);
}
-TEST_F(VirtualSocketServerTest, close_during_connect_v6) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(close_during_connect_v6)) {
CloseDuringConnectTest(kIPv6AnyAddress);
}
@@ -817,11 +818,11 @@ TEST_F(VirtualSocketServerTest, close_v6) {
CloseTest(kIPv6AnyAddress);
}
-TEST_F(VirtualSocketServerTest, tcp_send_v4) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(tcp_send_v4)) {
TcpSendTest(kIPv4AnyAddress);
}
-TEST_F(VirtualSocketServerTest, tcp_send_v6) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(tcp_send_v6)) {
TcpSendTest(kIPv6AnyAddress);
}
@@ -833,17 +834,17 @@ TEST_F(VirtualSocketServerTest, TcpSendsPacketsInOrder_v6) {
TcpSendsPacketsInOrderTest(kIPv6AnyAddress);
}
-TEST_F(VirtualSocketServerTest, bandwidth_v4) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(bandwidth_v4)) {
SocketAddress ipv4_test_addr(IPAddress(INADDR_ANY), 1000);
BandwidthTest(ipv4_test_addr);
}
-TEST_F(VirtualSocketServerTest, bandwidth_v6) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(bandwidth_v6)) {
SocketAddress ipv6_test_addr(IPAddress(in6addr_any), 1000);
BandwidthTest(ipv6_test_addr);
}
-TEST_F(VirtualSocketServerTest, delay_v4) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(delay_v4)) {
SocketAddress ipv4_test_addr(IPAddress(INADDR_ANY), 1000);
DelayTest(ipv4_test_addr);
}
@@ -855,104 +856,104 @@ TEST_F(VirtualSocketServerTest, DISABLED_delay_v6) {
}
// Works, receiving socket sees 127.0.0.2.
-TEST_F(VirtualSocketServerTest, CanConnectFromMappedIPv6ToIPv4Any) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CanConnectFromMappedIPv6ToIPv4Any)) {
CrossFamilyConnectionTest(SocketAddress("::ffff:127.0.0.2", 0),
SocketAddress("0.0.0.0", 5000),
true);
}
// Fails.
-TEST_F(VirtualSocketServerTest, CantConnectFromUnMappedIPv6ToIPv4Any) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CantConnectFromUnMappedIPv6ToIPv4Any)) {
CrossFamilyConnectionTest(SocketAddress("::2", 0),
SocketAddress("0.0.0.0", 5000),
false);
}
// Fails.
-TEST_F(VirtualSocketServerTest, CantConnectFromUnMappedIPv6ToMappedIPv6) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CantConnectFromUnMappedIPv6ToMappedIPv6)) {
CrossFamilyConnectionTest(SocketAddress("::2", 0),
SocketAddress("::ffff:127.0.0.1", 5000),
false);
}
// Works. receiving socket sees ::ffff:127.0.0.2.
-TEST_F(VirtualSocketServerTest, CanConnectFromIPv4ToIPv6Any) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CanConnectFromIPv4ToIPv6Any)) {
CrossFamilyConnectionTest(SocketAddress("127.0.0.2", 0),
SocketAddress("::", 5000),
true);
}
// Fails.
-TEST_F(VirtualSocketServerTest, CantConnectFromIPv4ToUnMappedIPv6) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CantConnectFromIPv4ToUnMappedIPv6)) {
CrossFamilyConnectionTest(SocketAddress("127.0.0.2", 0),
SocketAddress("::1", 5000),
false);
}
// Works. Receiving socket sees ::ffff:127.0.0.1.
-TEST_F(VirtualSocketServerTest, CanConnectFromIPv4ToMappedIPv6) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CanConnectFromIPv4ToMappedIPv6)) {
CrossFamilyConnectionTest(SocketAddress("127.0.0.1", 0),
SocketAddress("::ffff:127.0.0.2", 5000),
true);
}
// Works, receiving socket sees a result from GetNextIP.
-TEST_F(VirtualSocketServerTest, CanConnectFromUnboundIPv6ToIPv4Any) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CanConnectFromUnboundIPv6ToIPv4Any)) {
CrossFamilyConnectionTest(SocketAddress("::", 0),
SocketAddress("0.0.0.0", 5000),
true);
}
// Works, receiving socket sees whatever GetNextIP gave the client.
-TEST_F(VirtualSocketServerTest, CanConnectFromUnboundIPv4ToIPv6Any) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CanConnectFromUnboundIPv4ToIPv6Any)) {
CrossFamilyConnectionTest(SocketAddress("0.0.0.0", 0),
SocketAddress("::", 5000),
true);
}
-TEST_F(VirtualSocketServerTest, CanSendDatagramFromUnboundIPv4ToIPv6Any) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CanSendDatagramFromUnboundIPv4ToIPv6Any)) {
CrossFamilyDatagramTest(SocketAddress("0.0.0.0", 0),
SocketAddress("::", 5000),
true);
}
-TEST_F(VirtualSocketServerTest, CanSendDatagramFromMappedIPv6ToIPv4Any) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CanSendDatagramFromMappedIPv6ToIPv4Any)) {
CrossFamilyDatagramTest(SocketAddress("::ffff:127.0.0.1", 0),
SocketAddress("0.0.0.0", 5000),
true);
}
-TEST_F(VirtualSocketServerTest, CantSendDatagramFromUnMappedIPv6ToIPv4Any) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CantSendDatagramFromUnMappedIPv6ToIPv4Any)) {
CrossFamilyDatagramTest(SocketAddress("::2", 0),
SocketAddress("0.0.0.0", 5000),
false);
}
-TEST_F(VirtualSocketServerTest, CantSendDatagramFromUnMappedIPv6ToMappedIPv6) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CantSendDatagramFromUnMappedIPv6ToMappedIPv6)) {
CrossFamilyDatagramTest(SocketAddress("::2", 0),
SocketAddress("::ffff:127.0.0.1", 5000),
false);
}
-TEST_F(VirtualSocketServerTest, CanSendDatagramFromIPv4ToIPv6Any) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CanSendDatagramFromIPv4ToIPv6Any)) {
CrossFamilyDatagramTest(SocketAddress("127.0.0.2", 0),
SocketAddress("::", 5000),
true);
}
-TEST_F(VirtualSocketServerTest, CantSendDatagramFromIPv4ToUnMappedIPv6) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CantSendDatagramFromIPv4ToUnMappedIPv6)) {
CrossFamilyDatagramTest(SocketAddress("127.0.0.2", 0),
SocketAddress("::1", 5000),
false);
}
-TEST_F(VirtualSocketServerTest, CanSendDatagramFromIPv4ToMappedIPv6) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CanSendDatagramFromIPv4ToMappedIPv6)) {
CrossFamilyDatagramTest(SocketAddress("127.0.0.1", 0),
SocketAddress("::ffff:127.0.0.2", 5000),
true);
}
-TEST_F(VirtualSocketServerTest, CanSendDatagramFromUnboundIPv6ToIPv4Any) {
+TEST_F(VirtualSocketServerTest, DISABLED_ON_MAC(CanSendDatagramFromUnboundIPv6ToIPv4Any)) {
CrossFamilyDatagramTest(SocketAddress("::", 0),
SocketAddress("0.0.0.0", 5000),
true);
diff --git a/build/common.gypi b/build/common.gypi
index 92404fde..fdf1abe1 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -148,11 +148,6 @@
'include_tests%': 1,
'restrict_webrtc_logging%': 0,
}],
- ['OS=="mac" or OS=="ios"', {
- # Set the minimum SDK level to 10.7 since we need AVFoundation.
- # See ../build/common.gypi for more info about this variable.
- 'mac_sdk_min%': '10.7',
- }],
['OS=="ios"', {
'build_libjpeg%': 0,
'enable_protobuf%': 0,
diff --git a/call.h b/call.h
index 30300740..bb1dca14 100644
--- a/call.h
+++ b/call.h
@@ -92,8 +92,7 @@ class Call {
virtual VideoSendStream* CreateVideoSendStream(
const VideoSendStream::Config& config,
- const std::vector<VideoStream>& video_streams,
- const void* encoder_settings) = 0;
+ const VideoEncoderConfig& encoder_config) = 0;
virtual void DestroyVideoSendStream(VideoSendStream* send_stream) = 0;
diff --git a/config.h b/config.h
index e4bccf90..6f3fb1d6 100644
--- a/config.h
+++ b/config.h
@@ -108,6 +108,20 @@ struct VideoStream {
std::vector<int> temporal_layers;
};
+struct VideoEncoderConfig {
+ enum ContentType {
+ kRealtimeVideo,
+ kScreenshare,
+ };
+
+ VideoEncoderConfig()
+ : content_type(kRealtimeVideo), encoder_specific_settings(NULL) {}
+
+ std::vector<VideoStream> streams;
+ ContentType content_type;
+ void* encoder_specific_settings;
+};
+
} // namespace webrtc
#endif // WEBRTC_CONFIG_H_
diff --git a/modules/audio_coding/codecs/isac/fix/interface/isacfix.h b/modules/audio_coding/codecs/isac/fix/interface/isacfix.h
index 7cd29544..942727ab 100644
--- a/modules/audio_coding/codecs/isac/fix/interface/isacfix.h
+++ b/modules/audio_coding/codecs/isac/fix/interface/isacfix.h
@@ -130,7 +130,7 @@ extern "C" {
int16_t WebRtcIsacfix_Encode(ISACFIX_MainStruct *ISAC_main_inst,
const int16_t *speechIn,
- int16_t *encoded);
+ uint8_t* encoded);
diff --git a/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/modules/audio_coding/codecs/isac/fix/source/isacfix.c
index e855daea..118fd2fe 100644
--- a/modules/audio_coding/codecs/isac/fix/source/isacfix.c
+++ b/modules/audio_coding/codecs/isac/fix/source/isacfix.c
@@ -17,6 +17,7 @@
#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h"
+#include <assert.h>
#include <stdlib.h>
#include "webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
@@ -355,10 +356,10 @@ int16_t WebRtcIsacfix_EncoderInit(ISACFIX_MainStruct *ISAC_main_inst,
int16_t WebRtcIsacfix_Encode(ISACFIX_MainStruct *ISAC_main_inst,
const int16_t *speechIn,
- int16_t *encoded)
+ uint8_t* encoded)
{
ISACFIX_SubStruct *ISAC_inst;
- int16_t stream_len;
+ int16_t stream_len, stream_len_even;
#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
@@ -382,16 +383,24 @@ int16_t WebRtcIsacfix_Encode(ISACFIX_MainStruct *ISAC_main_inst,
return -1;
}
+ /* One would think that only even stream lengths would make sense here. We do
+ in fact observe odd lengths, however, and in those cases we copy an extra
+ byte. */
+ stream_len_even = stream_len % 2 == 0 ? stream_len : stream_len + 1;
- /* convert from bytes to int16_t */
#ifndef WEBRTC_ARCH_BIG_ENDIAN
- for (k=0;k<(stream_len+1)>>1;k++) {
- encoded[k] = (int16_t)( ( (uint16_t)(ISAC_inst->ISACenc_obj.bitstr_obj).stream[k] >> 8 )
- | (((ISAC_inst->ISACenc_obj.bitstr_obj).stream[k] & 0x00FF) << 8));
+ /* The encoded data vector is supposesd to be big-endian, but our internal
+ representation is little-endian. So byteswap. */
+ for (k = 0; k < stream_len_even / 2; ++k) {
+ uint16_t s = ISAC_inst->ISACenc_obj.bitstr_obj.stream[k];
+ /* In big-endian, we have... */
+ encoded[2 * k] = s >> 8; /* ...most significant byte at low address... */
+ encoded[2 * k + 1] = s; /* ...least significant byte at high address. */
}
-
#else
- WEBRTC_SPL_MEMCPY_W16(encoded, (ISAC_inst->ISACenc_obj.bitstr_obj).stream, (stream_len + 1)>>1);
+ /* The encoded data vector and our internal representation are both
+ big-endian. */
+ memcpy(encoded, ISAC_inst->ISACenc_obj.bitstr_obj.stream, stream_len_even);
#endif
diff --git a/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc b/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
index 35827511..207ee8c3 100644
--- a/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
+++ b/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
@@ -71,7 +71,7 @@ float IsacSpeedTest::EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
size_t pointer = 0;
for (int idx = 0; idx < subblocks; idx++, pointer += subblock_length) {
value = WebRtcIsacfix_Encode(ISACFIX_main_inst_, &in_data[pointer],
- reinterpret_cast<int16_t*>(bit_stream));
+ bit_stream);
}
clocks = clock() - clocks;
EXPECT_GT(value, 0);
diff --git a/modules/audio_coding/codecs/isac/fix/test/kenny.cc b/modules/audio_coding/codecs/isac/fix/test/kenny.cc
index c3c6f135..91c4d76a 100644
--- a/modules/audio_coding/codecs/isac/fix/test/kenny.cc
+++ b/modules/audio_coding/codecs/isac/fix/test/kenny.cc
@@ -565,7 +565,7 @@ int main(int argc, char* argv[])
/* Encode */
stream_len = WebRtcIsacfix_Encode(ISAC_main_inst,
shortdata,
- (int16_t*)streamdata);
+ (uint8_t*)streamdata);
/* If packet is ready, and CE testing, call the different API
functions from the internal API. */
diff --git a/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c b/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
index d5682b21..965f2bc1 100644
--- a/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
+++ b/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
@@ -439,7 +439,9 @@ int main(int argc, char* argv[])
/* iSAC encoding */
if (mode==0 || mode ==1) {
- stream_len = WebRtcIsac_Encode(ISAC_main_inst, shortdata, streamdata);
+ stream_len = WebRtcIsac_Encode(ISAC_main_inst,
+ shortdata,
+ (uint8_t*)streamdata);
if (stream_len < 0) {
/* exit if returned with error */
errtype=WebRtcIsac_GetErrorCode(ISAC_main_inst);
@@ -449,7 +451,10 @@ int main(int argc, char* argv[])
} else if (mode==2 || mode==3) {
/* iSAC encoding */
if (nbTest != 1)
- stream_len = WebRtcIsacfix_Encode(ISACFIX_main_inst, shortdata, streamdata);
+ stream_len = WebRtcIsacfix_Encode(
+ ISACFIX_main_inst,
+ shortdata,
+ (uint8_t*)streamdata);
else
stream_len = WebRtcIsacfix_EncodeNb(ISACFIX_main_inst, shortdata, streamdata);
diff --git a/modules/audio_coding/codecs/isac/main/interface/isac.h b/modules/audio_coding/codecs/isac/main/interface/isac.h
index 76a61e6d..40670580 100644
--- a/modules/audio_coding/codecs/isac/main/interface/isac.h
+++ b/modules/audio_coding/codecs/isac/main/interface/isac.h
@@ -147,7 +147,7 @@ extern "C" {
int16_t WebRtcIsac_Encode(
ISACStruct* ISAC_main_inst,
const int16_t* speechIn,
- int16_t* encoded);
+ uint8_t* encoded);
/******************************************************************************
diff --git a/modules/audio_coding/codecs/isac/main/source/isac.c b/modules/audio_coding/codecs/isac/main/source/isac.c
index d47eb80b..13170a08 100644
--- a/modules/audio_coding/codecs/isac/main/source/isac.c
+++ b/modules/audio_coding/codecs/isac/main/source/isac.c
@@ -496,7 +496,7 @@ int16_t WebRtcIsac_EncoderInit(ISACStruct* ISAC_main_inst,
*/
int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
const int16_t* speechIn,
- int16_t* encoded) {
+ uint8_t* encoded) {
float inFrame[FRAMESAMPLES_10ms];
int16_t speechInLB[FRAMESAMPLES_10ms];
int16_t speechInUB[FRAMESAMPLES_10ms];
@@ -504,7 +504,6 @@ int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
int16_t streamLenUB = 0;
int16_t streamLen = 0;
int16_t k = 0;
- uint8_t* ptrEncodedUW8 = (uint8_t*)encoded;
int garbageLen = 0;
int32_t bottleneck = 0;
int16_t bottleneckIdx = 0;
@@ -643,23 +642,22 @@ int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
streamLenUB = 0;
}
- memcpy(ptrEncodedUW8, instLB->ISACencLB_obj.bitstr_obj.stream, streamLenLB);
+ memcpy(encoded, instLB->ISACencLB_obj.bitstr_obj.stream, streamLenLB);
streamLen = streamLenLB;
if (streamLenUB > 0) {
- ptrEncodedUW8[streamLenLB] = (uint8_t)(streamLenUB + 1 +
- LEN_CHECK_SUM_WORD8);
- memcpy(&ptrEncodedUW8[streamLenLB + 1],
- instUB->ISACencUB_obj.bitstr_obj.stream, streamLenUB);
- streamLen += ptrEncodedUW8[streamLenLB];
+ encoded[streamLenLB] = streamLenUB + 1 + LEN_CHECK_SUM_WORD8;
+ memcpy(&encoded[streamLenLB + 1],
+ instUB->ISACencUB_obj.bitstr_obj.stream,
+ streamLenUB);
+ streamLen += encoded[streamLenLB];
} else {
- ptrEncodedUW8[streamLenLB] = 0;
+ encoded[streamLenLB] = 0;
}
} else {
if (streamLenLB == 0) {
return 0;
}
- memcpy(ptrEncodedUW8, instLB->ISACencLB_obj.bitstr_obj.stream,
- streamLenLB);
+ memcpy(encoded, instLB->ISACencLB_obj.bitstr_obj.stream, streamLenLB);
streamLenUB = 0;
streamLen = streamLenLB;
}
@@ -697,11 +695,11 @@ int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
* 255 is the max garbage length we can signal using 8 bits. */
if ((instISAC->bandwidthKHz == isac8kHz) ||
(streamLenUB == 0)) {
- ptrGarbage = &ptrEncodedUW8[streamLenLB];
+ ptrGarbage = &encoded[streamLenLB];
limit = streamLen + 255;
} else {
- ptrGarbage = &ptrEncodedUW8[streamLenLB + 1 + streamLenUB];
- limit = streamLen + (255 - ptrEncodedUW8[streamLenLB]);
+ ptrGarbage = &encoded[streamLenLB + 1 + streamLenUB];
+ limit = streamLen + (255 - encoded[streamLenLB]);
}
minBytes = (minBytes > limit) ? limit : minBytes;
@@ -718,13 +716,12 @@ int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
* That is the only way to preserve backward compatibility. */
if ((instISAC->bandwidthKHz == isac8kHz) ||
(streamLenUB == 0)) {
- ptrEncodedUW8[streamLenLB] = (uint8_t)garbageLen;
+ encoded[streamLenLB] = garbageLen;
} else {
- ptrEncodedUW8[streamLenLB] += (uint8_t)garbageLen;
+ encoded[streamLenLB] += garbageLen;
/* Write the length of the garbage at the end of the upper-band
* bit-stream, if exists. This helps for sanity check. */
- ptrEncodedUW8[streamLenLB + 1 + streamLenUB] =
- (uint8_t)garbageLen;
+ encoded[streamLenLB + 1 + streamLenUB] = garbageLen;
}
streamLen += garbageLen;
@@ -741,16 +738,14 @@ int16_t WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
if ((instISAC->bandwidthKHz != isac8kHz) && (streamLenUB > 0)) {
uint32_t crc;
- WebRtcIsac_GetCrc((int16_t*)(&(ptrEncodedUW8[streamLenLB + 1])),
+ WebRtcIsac_GetCrc((int16_t*)(&(encoded[streamLenLB + 1])),
streamLenUB + garbageLen, &crc);
#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
- ptrEncodedUW8[streamLen - LEN_CHECK_SUM_WORD8 + k] =
- (uint8_t)((crc >> (24 - k * 8)) & 0xFF);
+ encoded[streamLen - LEN_CHECK_SUM_WORD8 + k] = crc >> (24 - k * 8);
}
#else
- memcpy(&ptrEncodedUW8[streamLenLB + streamLenUB + 1], &crc,
- LEN_CHECK_SUM_WORD8);
+ memcpy(&encoded[streamLenLB + streamLenUB + 1], &crc, LEN_CHECK_SUM_WORD8);
#endif
}
return streamLen;
diff --git a/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc b/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
index 3c55bd3d..1d653731 100644
--- a/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
+++ b/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
@@ -31,7 +31,7 @@ class IsacTest : public ::testing::Test {
int16_t speech_data_[kIsacNumberOfSamples];
int16_t output_data_[kIsacNumberOfSamples];
- int16_t bitstream_[kMaxBytes / 2];
+ uint8_t bitstream_[kMaxBytes];
uint8_t bitstream_small_[7]; // Simulate sync packets.
};
diff --git a/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc b/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
index c5f9561b..8af4e6f9 100644
--- a/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
+++ b/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
@@ -662,8 +662,8 @@ int main(int argc, char* argv[])
if(!(testNum == 3 && framecnt == 0))
{
stream_len = WebRtcIsac_Encode(ISAC_main_inst,
- shortdata,
- (int16_t*)streamdata);
+ shortdata,
+ (uint8_t*)streamdata);
if((payloadSize != 0) && (stream_len > payloadSize))
{
if(testNum == 0)
diff --git a/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc b/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
index fd70eca7..72d3fe86 100644
--- a/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
+++ b/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
@@ -283,7 +283,8 @@ int main(int argc, char* argv[])
streamLen = WebRtcIsac_Encode(codecInstance[senderIdx],
- audioBuff10ms, (short*)bitStream);
+ audioBuff10ms,
+ (uint8_t*)bitStream);
int16_t ggg;
if (streamLen > 0) {
if(( WebRtcIsac_ReadFrameLen(codecInstance[receiverIdx],
diff --git a/modules/audio_coding/codecs/isac/main/test/simpleKenny.c b/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
index 980465d6..2df5a84e 100644
--- a/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
+++ b/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
@@ -373,8 +373,10 @@ valid values are 8 and 16.\n", sampFreqKHz);
cur_framesmpls += samplesIn10Ms;
//-------- iSAC encoding ---------
- stream_len = WebRtcIsac_Encode(ISAC_main_inst, shortdata,
- (int16_t*)payload);
+ stream_len = WebRtcIsac_Encode(
+ ISAC_main_inst,
+ shortdata,
+ (uint8_t*)payload);
if(stream_len < 0)
{
diff --git a/modules/audio_coding/main/acm2/acm_g722.h b/modules/audio_coding/main/acm2/acm_g722.h
index 6717d2c4..dd3b7669 100644
--- a/modules/audio_coding/main/acm2/acm_g722.h
+++ b/modules/audio_coding/main/acm2/acm_g722.h
@@ -11,8 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G722_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G722_H_
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
typedef struct WebRtcG722EncInst G722EncInst;
typedef struct WebRtcG722DecInst G722DecInst;
diff --git a/modules/audio_coding/main/acm2/acm_generic_codec.h b/modules/audio_coding/main/acm2/acm_generic_codec.h
index adca299c..ad6f412a 100644
--- a/modules/audio_coding/main/acm2/acm_generic_codec.h
+++ b/modules/audio_coding/main/acm2/acm_generic_codec.h
@@ -11,12 +11,12 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_GENERIC_CODEC_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_GENERIC_CODEC_H_
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
#include "webrtc/modules/audio_coding/neteq/interface/audio_decoder.h"
#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/system_wrappers/interface/trace.h"
#define MAX_FRAME_SIZE_10MSEC 6
diff --git a/modules/audio_coding/main/acm2/acm_isac.cc b/modules/audio_coding/main/acm2/acm_isac.cc
index 850204f3..f3682f1e 100644
--- a/modules/audio_coding/main/acm2/acm_isac.cc
+++ b/modules/audio_coding/main/acm2/acm_isac.cc
@@ -347,8 +347,9 @@ int16_t ACMISAC::InternalEncode(uint8_t* bitstream,
return -1;
}
*bitstream_len_byte = ACM_ISAC_ENCODE(
- codec_inst_ptr_->inst, &in_audio_[in_audio_ix_read_],
- reinterpret_cast<int16_t*>(bitstream));
+ codec_inst_ptr_->inst,
+ &in_audio_[in_audio_ix_read_],
+ bitstream);
// increment the read index this tell the caller that how far
// we have gone forward in reading the audio buffer
in_audio_ix_read_ += samples_in_10ms_audio_;
diff --git a/modules/audio_coding/main/acm2/acm_isac.h b/modules/audio_coding/main/acm2/acm_isac.h
index 23548229..486313a0 100644
--- a/modules/audio_coding/main/acm2/acm_isac.h
+++ b/modules/audio_coding/main/acm2/acm_isac.h
@@ -11,10 +11,10 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_ISAC_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_ISAC_H_
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
#include "webrtc/modules/audio_coding/neteq/interface/audio_decoder.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
namespace webrtc {
diff --git a/modules/audio_coding/main/acm2/acm_receive_test.cc b/modules/audio_coding/main/acm2/acm_receive_test.cc
index 79c9adf0..7e41328e 100644
--- a/modules/audio_coding/main/acm2/acm_receive_test.cc
+++ b/modules/audio_coding/main/acm2/acm_receive_test.cc
@@ -22,124 +22,67 @@
namespace webrtc {
namespace test {
-namespace {
-// Returns true if the codec should be registered, otherwise false. Changes
-// the number of channels for the Opus codec to always be 1.
-bool ModifyAndUseThisCodec(CodecInst* codec_param) {
- if (STR_CASE_CMP(codec_param->plname, "CN") == 0 &&
- codec_param->plfreq == 48000)
- return false; // Skip 48 kHz comfort noise.
-
- if (STR_CASE_CMP(codec_param->plname, "telephone-event") == 0)
- return false; // Skip DTFM.
-
- return true;
-}
-
-// Remaps payload types from ACM's default to those used in the resource file
-// neteq_universal_new.rtp. Returns true if the codec should be registered,
-// otherwise false. The payload types are set as follows (all are mono codecs):
-// PCMu = 0;
-// PCMa = 8;
-// Comfort noise 8 kHz = 13
-// Comfort noise 16 kHz = 98
-// Comfort noise 32 kHz = 99
-// iLBC = 102
-// iSAC wideband = 103
-// iSAC super-wideband = 104
-// iSAC fullband = 124
-// AVT/DTMF = 106
-// RED = 117
-// PCM16b 8 kHz = 93
-// PCM16b 16 kHz = 94
-// PCM16b 32 kHz = 95
-// G.722 = 94
-bool RemapPltypeAndUseThisCodec(const char* plname,
- int plfreq,
- int channels,
- int* pltype) {
- if (channels != 1)
- return false; // Don't use non-mono codecs.
-
- // Re-map pltypes to those used in the NetEq test files.
- if (STR_CASE_CMP(plname, "PCMU") == 0 && plfreq == 8000) {
- *pltype = 0;
- } else if (STR_CASE_CMP(plname, "PCMA") == 0 && plfreq == 8000) {
- *pltype = 8;
- } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 8000) {
- *pltype = 13;
- } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 16000) {
- *pltype = 98;
- } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 32000) {
- *pltype = 99;
- } else if (STR_CASE_CMP(plname, "ILBC") == 0) {
- *pltype = 102;
- } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 16000) {
- *pltype = 103;
- } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 32000) {
- *pltype = 104;
- } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 48000) {
- *pltype = 124;
- } else if (STR_CASE_CMP(plname, "telephone-event") == 0) {
- *pltype = 106;
- } else if (STR_CASE_CMP(plname, "red") == 0) {
- *pltype = 117;
- } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 8000) {
- *pltype = 93;
- } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 16000) {
- *pltype = 94;
- } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 32000) {
- *pltype = 95;
- } else if (STR_CASE_CMP(plname, "G722") == 0) {
- *pltype = 9;
- } else {
- // Don't use any other codecs.
- return false;
- }
- return true;
-}
-} // namespace
-
AcmReceiveTest::AcmReceiveTest(PacketSource* packet_source,
AudioSink* audio_sink,
int output_freq_hz,
NumOutputChannels exptected_output_channels)
: clock_(0),
- acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
packet_source_(packet_source),
audio_sink_(audio_sink),
output_freq_hz_(output_freq_hz),
exptected_output_channels_(exptected_output_channels) {
+ webrtc::AudioCoding::Config config;
+ config.clock = &clock_;
+ config.playout_frequency_hz = output_freq_hz_;
+ acm_.reset(webrtc::AudioCoding::Create(config));
}
void AcmReceiveTest::RegisterDefaultCodecs() {
- CodecInst my_codec_param;
- for (int n = 0; n < acm_->NumberOfCodecs(); n++) {
- ASSERT_EQ(0, acm_->Codec(n, &my_codec_param)) << "Failed to get codec.";
- if (ModifyAndUseThisCodec(&my_codec_param)) {
- ASSERT_EQ(0, acm_->RegisterReceiveCodec(my_codec_param))
- << "Couldn't register receive codec.\n";
- }
- }
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kOpus, 120));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kISAC, 103));
+#ifndef WEBRTC_ANDROID
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kISACSWB, 104));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kISACFB, 105));
+#endif
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCM16B, 107));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCM16Bwb, 108));
+ ASSERT_TRUE(
+ acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCM16Bswb32kHz, 109));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCM16B_2ch, 111));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCM16Bwb_2ch, 112));
+ ASSERT_TRUE(
+ acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCM16Bswb32kHz_2ch, 113));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCMU, 0));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCMA, 8));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCMU_2ch, 110));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCMA_2ch, 118));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kILBC, 102));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kG722, 9));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kG722_2ch, 119));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kCNNB, 13));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kCNWB, 98));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kCNSWB, 99));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kRED, 127));
}
void AcmReceiveTest::RegisterNetEqTestCodecs() {
- CodecInst my_codec_param;
- for (int n = 0; n < acm_->NumberOfCodecs(); n++) {
- ASSERT_EQ(0, acm_->Codec(n, &my_codec_param)) << "Failed to get codec.";
- if (!ModifyAndUseThisCodec(&my_codec_param)) {
- // Skip this codec.
- continue;
- }
-
- if (RemapPltypeAndUseThisCodec(my_codec_param.plname,
- my_codec_param.plfreq,
- my_codec_param.channels,
- &my_codec_param.pltype)) {
- ASSERT_EQ(0, acm_->RegisterReceiveCodec(my_codec_param))
- << "Couldn't register receive codec.\n";
- }
- }
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kISAC, 103));
+#ifndef WEBRTC_ANDROID
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kISACSWB, 104));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kISACFB, 124));
+#endif
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCM16B, 93));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCM16Bwb, 94));
+ ASSERT_TRUE(
+ acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCM16Bswb32kHz, 95));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCMU, 0));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCMA, 8));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kILBC, 102));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kG722, 9));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kCNNB, 13));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kCNWB, 98));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kCNSWB, 99));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kRED, 117));
}
void AcmReceiveTest::Run() {
@@ -148,7 +91,7 @@ void AcmReceiveTest::Run() {
// Pull audio until time to insert packet.
while (clock_.TimeInMilliseconds() < packet->time_ms()) {
AudioFrame output_frame;
- EXPECT_EQ(0, acm_->PlayoutData10Ms(output_freq_hz_, &output_frame));
+ EXPECT_TRUE(acm_->Get10MsAudio(&output_frame));
EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
const int samples_per_block = output_freq_hz_ * 10 / 1000;
EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
@@ -170,11 +113,10 @@ void AcmReceiveTest::Run() {
header.header = packet->header();
header.frameType = kAudioFrameSpeech;
memset(&header.type.Audio, 0, sizeof(RTPAudioHeader));
- EXPECT_EQ(0,
- acm_->IncomingPacket(
- packet->payload(),
- static_cast<int32_t>(packet->payload_length_bytes()),
- header))
+ EXPECT_TRUE(
+ acm_->InsertPacket(packet->payload(),
+ static_cast<int32_t>(packet->payload_length_bytes()),
+ header))
<< "Failure when inserting packet:" << std::endl
<< " PT = " << static_cast<int>(header.header.payloadType) << std::endl
<< " TS = " << header.header.timestamp << std::endl
diff --git a/modules/audio_coding/main/acm2/acm_receive_test.h b/modules/audio_coding/main/acm2/acm_receive_test.h
index c454020d..19fe4c53 100644
--- a/modules/audio_coding/main/acm2/acm_receive_test.h
+++ b/modules/audio_coding/main/acm2/acm_receive_test.h
@@ -16,7 +16,7 @@
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
-class AudioCodingModule;
+class AudioCoding;
struct CodecInst;
namespace test {
@@ -50,7 +50,7 @@ class AcmReceiveTest {
private:
SimulatedClock clock_;
- scoped_ptr<AudioCodingModule> acm_;
+ scoped_ptr<AudioCoding> acm_;
PacketSource* packet_source_;
AudioSink* audio_sink_;
const int output_freq_hz_;
diff --git a/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc b/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
new file mode 100644
index 00000000..b0c9af13
--- /dev/null
+++ b/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+// Returns true if the codec should be registered, otherwise false. Changes
+// the number of channels for the Opus codec to always be 1.
+bool ModifyAndUseThisCodec(CodecInst* codec_param) {
+ if (STR_CASE_CMP(codec_param->plname, "CN") == 0 &&
+ codec_param->plfreq == 48000)
+ return false; // Skip 48 kHz comfort noise.
+
+ if (STR_CASE_CMP(codec_param->plname, "telephone-event") == 0)
+ return false; // Skip DTFM.
+
+ return true;
+}
+
+// Remaps payload types from ACM's default to those used in the resource file
+// neteq_universal_new.rtp. Returns true if the codec should be registered,
+// otherwise false. The payload types are set as follows (all are mono codecs):
+// PCMu = 0;
+// PCMa = 8;
+// Comfort noise 8 kHz = 13
+// Comfort noise 16 kHz = 98
+// Comfort noise 32 kHz = 99
+// iLBC = 102
+// iSAC wideband = 103
+// iSAC super-wideband = 104
+// iSAC fullband = 124
+// AVT/DTMF = 106
+// RED = 117
+// PCM16b 8 kHz = 93
+// PCM16b 16 kHz = 94
+// PCM16b 32 kHz = 95
+// G.722 = 94
+bool RemapPltypeAndUseThisCodec(const char* plname,
+ int plfreq,
+ int channels,
+ int* pltype) {
+ if (channels != 1)
+ return false; // Don't use non-mono codecs.
+
+ // Re-map pltypes to those used in the NetEq test files.
+ if (STR_CASE_CMP(plname, "PCMU") == 0 && plfreq == 8000) {
+ *pltype = 0;
+ } else if (STR_CASE_CMP(plname, "PCMA") == 0 && plfreq == 8000) {
+ *pltype = 8;
+ } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 8000) {
+ *pltype = 13;
+ } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 16000) {
+ *pltype = 98;
+ } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 32000) {
+ *pltype = 99;
+ } else if (STR_CASE_CMP(plname, "ILBC") == 0) {
+ *pltype = 102;
+ } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 16000) {
+ *pltype = 103;
+ } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 32000) {
+ *pltype = 104;
+ } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 48000) {
+ *pltype = 124;
+ } else if (STR_CASE_CMP(plname, "telephone-event") == 0) {
+ *pltype = 106;
+ } else if (STR_CASE_CMP(plname, "red") == 0) {
+ *pltype = 117;
+ } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 8000) {
+ *pltype = 93;
+ } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 16000) {
+ *pltype = 94;
+ } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 32000) {
+ *pltype = 95;
+ } else if (STR_CASE_CMP(plname, "G722") == 0) {
+ *pltype = 9;
+ } else {
+ // Don't use any other codecs.
+ return false;
+ }
+ return true;
+}
+} // namespace
+
+AcmReceiveTestOldApi::AcmReceiveTestOldApi(
+ PacketSource* packet_source,
+ AudioSink* audio_sink,
+ int output_freq_hz,
+ NumOutputChannels exptected_output_channels)
+ : clock_(0),
+ acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
+ packet_source_(packet_source),
+ audio_sink_(audio_sink),
+ output_freq_hz_(output_freq_hz),
+ exptected_output_channels_(exptected_output_channels) {
+}
+
+void AcmReceiveTestOldApi::RegisterDefaultCodecs() {
+ CodecInst my_codec_param;
+ for (int n = 0; n < acm_->NumberOfCodecs(); n++) {
+ ASSERT_EQ(0, acm_->Codec(n, &my_codec_param)) << "Failed to get codec.";
+ if (ModifyAndUseThisCodec(&my_codec_param)) {
+ ASSERT_EQ(0, acm_->RegisterReceiveCodec(my_codec_param))
+ << "Couldn't register receive codec.\n";
+ }
+ }
+}
+
+void AcmReceiveTestOldApi::RegisterNetEqTestCodecs() {
+ CodecInst my_codec_param;
+ for (int n = 0; n < acm_->NumberOfCodecs(); n++) {
+ ASSERT_EQ(0, acm_->Codec(n, &my_codec_param)) << "Failed to get codec.";
+ if (!ModifyAndUseThisCodec(&my_codec_param)) {
+ // Skip this codec.
+ continue;
+ }
+
+ if (RemapPltypeAndUseThisCodec(my_codec_param.plname,
+ my_codec_param.plfreq,
+ my_codec_param.channels,
+ &my_codec_param.pltype)) {
+ ASSERT_EQ(0, acm_->RegisterReceiveCodec(my_codec_param))
+ << "Couldn't register receive codec.\n";
+ }
+ }
+}
+
+void AcmReceiveTestOldApi::Run() {
+ for (scoped_ptr<Packet> packet(packet_source_->NextPacket()); packet;
+ packet.reset(packet_source_->NextPacket())) {
+ // Pull audio until time to insert packet.
+ while (clock_.TimeInMilliseconds() < packet->time_ms()) {
+ AudioFrame output_frame;
+ EXPECT_EQ(0, acm_->PlayoutData10Ms(output_freq_hz_, &output_frame));
+ EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
+ const int samples_per_block = output_freq_hz_ * 10 / 1000;
+ EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
+ if (exptected_output_channels_ != kArbitraryChannels) {
+ if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) {
+ // Don't check number of channels for PLC output, since each test run
+ // usually starts with a short period of mono PLC before decoding the
+ // first packet.
+ } else {
+ EXPECT_EQ(exptected_output_channels_, output_frame.num_channels_);
+ }
+ }
+ ASSERT_TRUE(audio_sink_->WriteAudioFrame(output_frame));
+ clock_.AdvanceTimeMilliseconds(10);
+ }
+
+ // Insert packet after converting from RTPHeader to WebRtcRTPHeader.
+ WebRtcRTPHeader header;
+ header.header = packet->header();
+ header.frameType = kAudioFrameSpeech;
+ memset(&header.type.Audio, 0, sizeof(RTPAudioHeader));
+ EXPECT_EQ(0,
+ acm_->IncomingPacket(
+ packet->payload(),
+ static_cast<int32_t>(packet->payload_length_bytes()),
+ header))
+ << "Failure when inserting packet:" << std::endl
+ << " PT = " << static_cast<int>(header.header.payloadType) << std::endl
+ << " TS = " << header.header.timestamp << std::endl
+ << " SN = " << header.header.sequenceNumber;
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h b/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h
new file mode 100644
index 00000000..795893c2
--- /dev/null
+++ b/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVE_TEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVE_TEST_H_
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+namespace webrtc {
+class AudioCodingModule;
+struct CodecInst;
+
+namespace test {
+class AudioSink;
+class PacketSource;
+
+class AcmReceiveTestOldApi {
+ public:
+ enum NumOutputChannels {
+ kArbitraryChannels = 0,
+ kMonoOutput = 1,
+ kStereoOutput = 2
+ };
+
+ AcmReceiveTestOldApi(PacketSource* packet_source,
+ AudioSink* audio_sink,
+ int output_freq_hz,
+ NumOutputChannels exptected_output_channels);
+ virtual ~AcmReceiveTestOldApi() {}
+
+ // Registers the codecs with default parameters from ACM.
+ void RegisterDefaultCodecs();
+
+ // Registers codecs with payload types matching the pre-encoded NetEq test
+ // files.
+ void RegisterNetEqTestCodecs();
+
+ // Runs the test and returns true if successful.
+ void Run();
+
+ private:
+ SimulatedClock clock_;
+ scoped_ptr<AudioCodingModule> acm_;
+ PacketSource* packet_source_;
+ AudioSink* audio_sink_;
+ const int output_freq_hz_;
+ NumOutputChannels exptected_output_channels_;
+
+ DISALLOW_COPY_AND_ASSIGN(AcmReceiveTestOldApi);
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVE_TEST_H_
diff --git a/modules/audio_coding/main/acm2/acm_receiver.h b/modules/audio_coding/main/acm2/acm_receiver.h
index 180b4ba6..94ea5b01 100644
--- a/modules/audio_coding/main/acm2/acm_receiver.h
+++ b/modules/audio_coding/main/acm2/acm_receiver.h
@@ -13,6 +13,7 @@
#include <vector>
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_audio/vad/include/webrtc_vad.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
@@ -23,7 +24,6 @@
#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/modules/audio_coding/main/acm2/acm_receiver_unittest.cc b/modules/audio_coding/main/acm2/acm_receiver_unittest.cc
index 94d51b77..9cfef3a8 100644
--- a/modules/audio_coding/main/acm2/acm_receiver_unittest.cc
+++ b/modules/audio_coding/main/acm2/acm_receiver_unittest.cc
@@ -47,9 +47,10 @@ class AcmReceiverTest : public AudioPacketizationCallback,
packet_sent_(false),
last_packet_send_timestamp_(timestamp_),
last_frame_type_(kFrameEmpty) {
- AudioCodingModule::Config config;
- acm_.reset(new AudioCodingModuleImpl(config));
- receiver_.reset(new AcmReceiver(config));
+ AudioCoding::Config config;
+ config.transport = this;
+ acm_.reset(new AudioCodingImpl(config));
+ receiver_.reset(new AcmReceiver(config.ToOldConfig()));
}
~AcmReceiverTest() {}
@@ -61,10 +62,6 @@ class AcmReceiverTest : public AudioPacketizationCallback,
ASSERT_EQ(0, ACMCodecDB::Codec(n, &codecs_[n]));
}
- acm_->InitializeReceiver();
- acm_->InitializeSender();
- acm_->RegisterTransportCallback(this);
-
rtp_header_.header.sequenceNumber = 0;
rtp_header_.header.timestamp = 0;
rtp_header_.header.markerBit = false;
@@ -82,12 +79,12 @@ class AcmReceiverTest : public AudioPacketizationCallback,
CodecInst codec;
ACMCodecDB::Codec(codec_id, &codec);
if (timestamp_ == 0) { // This is the first time inserting audio.
- ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
+ ASSERT_TRUE(acm_->RegisterSendCodec(codec_id, codec.pltype));
} else {
- CodecInst current_codec;
- ASSERT_EQ(0, acm_->SendCodec(&current_codec));
- if (!CodecsEqual(codec, current_codec))
- ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
+ const CodecInst* current_codec = acm_->GetSenderCodecInst();
+ ASSERT_TRUE(current_codec);
+ if (!CodecsEqual(codec, *current_codec))
+ ASSERT_TRUE(acm_->RegisterSendCodec(codec_id, codec.pltype));
}
AudioFrame frame;
// Frame setup according to the codec.
@@ -102,8 +99,7 @@ class AcmReceiverTest : public AudioPacketizationCallback,
while (num_bytes == 0) {
frame.timestamp_ = timestamp_;
timestamp_ += frame.samples_per_channel_;
- ASSERT_EQ(0, acm_->Add10MsData(frame));
- num_bytes = acm_->Process();
+ num_bytes = acm_->Add10MsAudio(frame);
ASSERT_GE(num_bytes, 0);
}
ASSERT_TRUE(packet_sent_); // Sanity check.
@@ -151,7 +147,7 @@ class AcmReceiverTest : public AudioPacketizationCallback,
scoped_ptr<AcmReceiver> receiver_;
CodecInst codecs_[ACMCodecDB::kMaxNumCodecs];
- scoped_ptr<AudioCodingModule> acm_;
+ scoped_ptr<AudioCoding> acm_;
WebRtcRTPHeader rtp_header_;
uint32_t timestamp_;
bool packet_sent_; // Set when SendData is called reset when inserting audio.
@@ -307,7 +303,7 @@ TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(LastAudioCodec)) {
// Register CNG at sender side.
int n = 0;
while (kCngId[n] > 0) {
- ASSERT_EQ(0, acm_->RegisterSendCodec(codecs_[kCngId[n]]));
+ ASSERT_TRUE(acm_->RegisterSendCodec(kCngId[n], codecs_[kCngId[n]].pltype));
++n;
}
@@ -316,7 +312,7 @@ TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(LastAudioCodec)) {
EXPECT_EQ(-1, receiver_->LastAudioCodec(&codec));
// Start with sending DTX.
- ASSERT_EQ(0, acm_->SetVAD(true, true, VADVeryAggr));
+ ASSERT_TRUE(acm_->SetVad(true, true, VADVeryAggr));
packet_sent_ = false;
InsertOnePacketOfSilence(kCodecId[0]); // Enough to test with one codec.
ASSERT_TRUE(packet_sent_);
@@ -330,7 +326,7 @@ TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(LastAudioCodec)) {
n = 0;
while (kCodecId[n] >= 0) { // Loop over codecs.
// Set DTX off to send audio payload.
- acm_->SetVAD(false, false, VADAggr);
+ acm_->SetVad(false, false, VADAggr);
packet_sent_ = false;
InsertOnePacketOfSilence(kCodecId[n]);
@@ -342,7 +338,7 @@ TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(LastAudioCodec)) {
// Set VAD on to send DTX. Then check if the "Last Audio codec" returns
// the expected codec.
- acm_->SetVAD(true, true, VADAggr);
+ acm_->SetVad(true, true, VADAggr);
// Do as many encoding until a DTX is sent.
while (last_frame_type_ != kAudioFrameCN) {
diff --git a/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc b/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc
new file mode 100644
index 00000000..ef890ecb
--- /dev/null
+++ b/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/main/acm2/acm_receiver.h"
+
+#include <algorithm> // std::min
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
+#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
+#include "webrtc/modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/test/test_suite.h"
+#include "webrtc/test/testsupport/fileutils.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
+
+namespace webrtc {
+
+namespace acm2 {
+namespace {
+
+bool CodecsEqual(const CodecInst& codec_a, const CodecInst& codec_b) {
+ if (strcmp(codec_a.plname, codec_b.plname) != 0 ||
+ codec_a.plfreq != codec_b.plfreq ||
+ codec_a.pltype != codec_b.pltype ||
+ codec_b.channels != codec_a.channels)
+ return false;
+ return true;
+}
+
+} // namespace
+
+class AcmReceiverTestOldApi : public AudioPacketizationCallback,
+ public ::testing::Test {
+ protected:
+ AcmReceiverTestOldApi()
+ : timestamp_(0),
+ packet_sent_(false),
+ last_packet_send_timestamp_(timestamp_),
+ last_frame_type_(kFrameEmpty) {
+ AudioCodingModule::Config config;
+ acm_.reset(new AudioCodingModuleImpl(config));
+ receiver_.reset(new AcmReceiver(config));
+ }
+
+ ~AcmReceiverTestOldApi() {}
+
+ virtual void SetUp() OVERRIDE {
+ ASSERT_TRUE(receiver_.get() != NULL);
+ ASSERT_TRUE(acm_.get() != NULL);
+ for (int n = 0; n < ACMCodecDB::kNumCodecs; n++) {
+ ASSERT_EQ(0, ACMCodecDB::Codec(n, &codecs_[n]));
+ }
+
+ acm_->InitializeReceiver();
+ acm_->InitializeSender();
+ acm_->RegisterTransportCallback(this);
+
+ rtp_header_.header.sequenceNumber = 0;
+ rtp_header_.header.timestamp = 0;
+ rtp_header_.header.markerBit = false;
+ rtp_header_.header.ssrc = 0x12345678; // Arbitrary.
+ rtp_header_.header.numCSRCs = 0;
+ rtp_header_.header.payloadType = 0;
+ rtp_header_.frameType = kAudioFrameSpeech;
+ rtp_header_.type.Audio.isCNG = false;
+ }
+
+ virtual void TearDown() OVERRIDE {
+ }
+
+ void InsertOnePacketOfSilence(int codec_id) {
+ CodecInst codec;
+ ACMCodecDB::Codec(codec_id, &codec);
+ if (timestamp_ == 0) { // This is the first time inserting audio.
+ ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
+ } else {
+ CodecInst current_codec;
+ ASSERT_EQ(0, acm_->SendCodec(&current_codec));
+ if (!CodecsEqual(codec, current_codec))
+ ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
+ }
+ AudioFrame frame;
+ // Frame setup according to the codec.
+ frame.sample_rate_hz_ = codec.plfreq;
+ frame.samples_per_channel_ = codec.plfreq / 100; // 10 ms.
+ frame.num_channels_ = codec.channels;
+ memset(frame.data_, 0, frame.samples_per_channel_ * frame.num_channels_ *
+ sizeof(int16_t));
+ int num_bytes = 0;
+ packet_sent_ = false;
+ last_packet_send_timestamp_ = timestamp_;
+ while (num_bytes == 0) {
+ frame.timestamp_ = timestamp_;
+ timestamp_ += frame.samples_per_channel_;
+ ASSERT_EQ(0, acm_->Add10MsData(frame));
+ num_bytes = acm_->Process();
+ ASSERT_GE(num_bytes, 0);
+ }
+ ASSERT_TRUE(packet_sent_); // Sanity check.
+ }
+
+ // Last element of id should be negative.
+ void AddSetOfCodecs(const int* id) {
+ int n = 0;
+ while (id[n] >= 0) {
+ ASSERT_EQ(0, receiver_->AddCodec(id[n], codecs_[id[n]].pltype,
+ codecs_[id[n]].channels, NULL));
+ ++n;
+ }
+ }
+
+ virtual int SendData(
+ FrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ uint16_t payload_len_bytes,
+ const RTPFragmentationHeader* fragmentation) OVERRIDE {
+ if (frame_type == kFrameEmpty)
+ return 0;
+
+ rtp_header_.header.payloadType = payload_type;
+ rtp_header_.frameType = frame_type;
+ if (frame_type == kAudioFrameSpeech)
+ rtp_header_.type.Audio.isCNG = false;
+ else
+ rtp_header_.type.Audio.isCNG = true;
+ rtp_header_.header.timestamp = timestamp;
+
+ int ret_val = receiver_->InsertPacket(rtp_header_, payload_data,
+ payload_len_bytes);
+ if (ret_val < 0) {
+ assert(false);
+ return -1;
+ }
+ rtp_header_.header.sequenceNumber++;
+ packet_sent_ = true;
+ last_frame_type_ = frame_type;
+ return 0;
+ }
+
+ scoped_ptr<AcmReceiver> receiver_;
+ CodecInst codecs_[ACMCodecDB::kMaxNumCodecs];
+ scoped_ptr<AudioCodingModule> acm_;
+ WebRtcRTPHeader rtp_header_;
+ uint32_t timestamp_;
+ bool packet_sent_; // Set when SendData is called reset when inserting audio.
+ uint32_t last_packet_send_timestamp_;
+ FrameType last_frame_type_;
+};
+
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecGetCodec)) {
+ // Add codec.
+ for (int n = 0; n < ACMCodecDB::kNumCodecs; ++n) {
+ if (n & 0x1) // Just add codecs with odd index.
+ EXPECT_EQ(0, receiver_->AddCodec(n, codecs_[n].pltype,
+ codecs_[n].channels, NULL));
+ }
+ // Get codec and compare.
+ for (int n = 0; n < ACMCodecDB::kNumCodecs; ++n) {
+ CodecInst my_codec;
+ if (n & 0x1) {
+ // Codecs with odd index should match the reference.
+ EXPECT_EQ(0, receiver_->DecoderByPayloadType(codecs_[n].pltype,
+ &my_codec));
+ EXPECT_TRUE(CodecsEqual(codecs_[n], my_codec));
+ } else {
+ // Codecs with even index are not registered.
+ EXPECT_EQ(-1, receiver_->DecoderByPayloadType(codecs_[n].pltype,
+ &my_codec));
+ }
+ }
+}
+
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecChangePayloadType)) {
+ CodecInst ref_codec;
+ const int codec_id = ACMCodecDB::kPCMA;
+ EXPECT_EQ(0, ACMCodecDB::Codec(codec_id, &ref_codec));
+ const int payload_type = ref_codec.pltype;
+ EXPECT_EQ(0, receiver_->AddCodec(codec_id, ref_codec.pltype,
+ ref_codec.channels, NULL));
+ CodecInst test_codec;
+ EXPECT_EQ(0, receiver_->DecoderByPayloadType(payload_type, &test_codec));
+ EXPECT_EQ(true, CodecsEqual(ref_codec, test_codec));
+
+ // Re-register the same codec with different payload.
+ ref_codec.pltype = payload_type + 1;
+ EXPECT_EQ(0, receiver_->AddCodec(codec_id, ref_codec.pltype,
+ ref_codec.channels, NULL));
+
+ // Payload type |payload_type| should not exist.
+ EXPECT_EQ(-1, receiver_->DecoderByPayloadType(payload_type, &test_codec));
+
+ // Payload type |payload_type + 1| should exist.
+ EXPECT_EQ(0, receiver_->DecoderByPayloadType(payload_type + 1, &test_codec));
+ EXPECT_TRUE(CodecsEqual(test_codec, ref_codec));
+}
+
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecRemoveCodec)) {
+ CodecInst codec;
+ const int codec_id = ACMCodecDB::kPCMA;
+ EXPECT_EQ(0, ACMCodecDB::Codec(codec_id, &codec));
+ const int payload_type = codec.pltype;
+ EXPECT_EQ(0, receiver_->AddCodec(codec_id, codec.pltype,
+ codec.channels, NULL));
+
+ // Remove non-existing codec should not fail. ACM1 legacy.
+ EXPECT_EQ(0, receiver_->RemoveCodec(payload_type + 1));
+
+ // Remove an existing codec.
+ EXPECT_EQ(0, receiver_->RemoveCodec(payload_type));
+
+ // Ask for the removed codec, must fail.
+ EXPECT_EQ(-1, receiver_->DecoderByPayloadType(payload_type, &codec));
+}
+
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(SampleRate)) {
+ const int kCodecId[] = {
+ ACMCodecDB::kISAC, ACMCodecDB::kISACSWB, ACMCodecDB::kISACFB,
+ -1 // Terminator.
+ };
+ AddSetOfCodecs(kCodecId);
+
+ AudioFrame frame;
+ const int kOutSampleRateHz = 8000; // Different than codec sample rate.
+ int n = 0;
+ while (kCodecId[n] >= 0) {
+ const int num_10ms_frames = codecs_[kCodecId[n]].pacsize /
+ (codecs_[kCodecId[n]].plfreq / 100);
+ InsertOnePacketOfSilence(kCodecId[n]);
+ for (int k = 0; k < num_10ms_frames; ++k) {
+ EXPECT_EQ(0, receiver_->GetAudio(kOutSampleRateHz, &frame));
+ }
+ EXPECT_EQ(std::min(32000, codecs_[kCodecId[n]].plfreq),
+ receiver_->current_sample_rate_hz());
+ ++n;
+ }
+}
+
+// Verify that the playout mode is set correctly.
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(PlayoutMode)) {
+ receiver_->SetPlayoutMode(voice);
+ EXPECT_EQ(voice, receiver_->PlayoutMode());
+
+ receiver_->SetPlayoutMode(streaming);
+ EXPECT_EQ(streaming, receiver_->PlayoutMode());
+
+ receiver_->SetPlayoutMode(fax);
+ EXPECT_EQ(fax, receiver_->PlayoutMode());
+
+ receiver_->SetPlayoutMode(off);
+ EXPECT_EQ(off, receiver_->PlayoutMode());
+}
+
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(PostdecodingVad)) {
+ receiver_->EnableVad();
+ EXPECT_TRUE(receiver_->vad_enabled());
+
+ const int id = ACMCodecDB::kPCM16Bwb;
+ ASSERT_EQ(0, receiver_->AddCodec(id, codecs_[id].pltype, codecs_[id].channels,
+ NULL));
+ const int kNumPackets = 5;
+ const int num_10ms_frames = codecs_[id].pacsize / (codecs_[id].plfreq / 100);
+ AudioFrame frame;
+ for (int n = 0; n < kNumPackets; ++n) {
+ InsertOnePacketOfSilence(id);
+ for (int k = 0; k < num_10ms_frames; ++k)
+ ASSERT_EQ(0, receiver_->GetAudio(codecs_[id].plfreq, &frame));
+ }
+ EXPECT_EQ(AudioFrame::kVadPassive, frame.vad_activity_);
+
+ receiver_->DisableVad();
+ EXPECT_FALSE(receiver_->vad_enabled());
+
+ for (int n = 0; n < kNumPackets; ++n) {
+ InsertOnePacketOfSilence(id);
+ for (int k = 0; k < num_10ms_frames; ++k)
+ ASSERT_EQ(0, receiver_->GetAudio(codecs_[id].plfreq, &frame));
+ }
+ EXPECT_EQ(AudioFrame::kVadUnknown, frame.vad_activity_);
+}
+
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(LastAudioCodec)) {
+ const int kCodecId[] = {
+ ACMCodecDB::kISAC, ACMCodecDB::kPCMA, ACMCodecDB::kISACSWB,
+ ACMCodecDB::kPCM16Bswb32kHz, ACMCodecDB::kG722_1C_48,
+ -1 // Terminator.
+ };
+ AddSetOfCodecs(kCodecId);
+
+ const int kCngId[] = { // Not including full-band.
+ ACMCodecDB::kCNNB, ACMCodecDB::kCNWB, ACMCodecDB::kCNSWB,
+ -1 // Terminator.
+ };
+ AddSetOfCodecs(kCngId);
+
+ // Register CNG at sender side.
+ int n = 0;
+ while (kCngId[n] > 0) {
+ ASSERT_EQ(0, acm_->RegisterSendCodec(codecs_[kCngId[n]]));
+ ++n;
+ }
+
+ CodecInst codec;
+ // No audio payload is received.
+ EXPECT_EQ(-1, receiver_->LastAudioCodec(&codec));
+
+ // Start with sending DTX.
+ ASSERT_EQ(0, acm_->SetVAD(true, true, VADVeryAggr));
+ packet_sent_ = false;
+ InsertOnePacketOfSilence(kCodecId[0]); // Enough to test with one codec.
+ ASSERT_TRUE(packet_sent_);
+ EXPECT_EQ(kAudioFrameCN, last_frame_type_);
+
+ // Has received, only, DTX. Last Audio codec is undefined.
+ EXPECT_EQ(-1, receiver_->LastAudioCodec(&codec));
+ EXPECT_EQ(-1, receiver_->last_audio_codec_id());
+ EXPECT_EQ(-1, receiver_->last_audio_payload_type());
+
+ n = 0;
+ while (kCodecId[n] >= 0) { // Loop over codecs.
+ // Set DTX off to send audio payload.
+ acm_->SetVAD(false, false, VADAggr);
+ packet_sent_ = false;
+ InsertOnePacketOfSilence(kCodecId[n]);
+
+ // Sanity check if Actually an audio payload received, and it should be
+ // of type "speech."
+ ASSERT_TRUE(packet_sent_);
+ ASSERT_EQ(kAudioFrameSpeech, last_frame_type_);
+ EXPECT_EQ(kCodecId[n], receiver_->last_audio_codec_id());
+
+ // Set VAD on to send DTX. Then check if the "Last Audio codec" returns
+ // the expected codec.
+ acm_->SetVAD(true, true, VADAggr);
+
+ // Do as many encoding until a DTX is sent.
+ while (last_frame_type_ != kAudioFrameCN) {
+ packet_sent_ = false;
+ InsertOnePacketOfSilence(kCodecId[n]);
+ ASSERT_TRUE(packet_sent_);
+ }
+ EXPECT_EQ(kCodecId[n], receiver_->last_audio_codec_id());
+ EXPECT_EQ(codecs_[kCodecId[n]].pltype,
+ receiver_->last_audio_payload_type());
+ EXPECT_EQ(0, receiver_->LastAudioCodec(&codec));
+ EXPECT_TRUE(CodecsEqual(codecs_[kCodecId[n]], codec));
+ ++n;
+ }
+}
+
+} // namespace acm2
+
+} // namespace webrtc
diff --git a/modules/audio_coding/main/acm2/acm_send_test.cc b/modules/audio_coding/main/acm2/acm_send_test.cc
index 30bf2fd3..ec3c2540 100644
--- a/modules/audio_coding/main/acm2/acm_send_test.cc
+++ b/modules/audio_coding/main/acm2/acm_send_test.cc
@@ -27,7 +27,6 @@ AcmSendTest::AcmSendTest(InputAudioFile* audio_source,
int source_rate_hz,
int test_duration_ms)
: clock_(0),
- acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
audio_source_(audio_source),
source_rate_hz_(source_rate_hz),
input_block_size_samples_(source_rate_hz_ * kBlockSizeMs / 1000),
@@ -37,24 +36,23 @@ AcmSendTest::AcmSendTest(InputAudioFile* audio_source,
payload_type_(0),
timestamp_(0),
sequence_number_(0) {
+ webrtc::AudioCoding::Config config;
+ config.clock = &clock_;
+ config.transport = this;
+ acm_.reset(webrtc::AudioCoding::Create(config));
input_frame_.sample_rate_hz_ = source_rate_hz_;
input_frame_.num_channels_ = 1;
input_frame_.samples_per_channel_ = input_block_size_samples_;
assert(input_block_size_samples_ * input_frame_.num_channels_ <=
AudioFrame::kMaxDataSizeSamples);
- acm_->RegisterTransportCallback(this);
}
-bool AcmSendTest::RegisterCodec(const char* payload_name,
- int sampling_freq_hz,
+bool AcmSendTest::RegisterCodec(int codec_type,
int channels,
int payload_type,
int frame_size_samples) {
- CHECK_EQ(0, AudioCodingModule::Codec(payload_name, &codec_, sampling_freq_hz,
- channels));
- codec_.pltype = payload_type;
- codec_.pacsize = frame_size_samples;
- codec_registered_ = (acm_->RegisterSendCodec(codec_) == 0);
+ codec_registered_ =
+ acm_->RegisterSendCodec(codec_type, payload_type, frame_size_samples);
input_frame_.num_channels_ = channels;
assert(input_block_size_samples_ * input_frame_.num_channels_ <=
AudioFrame::kMaxDataSizeSamples);
@@ -79,9 +77,9 @@ Packet* AcmSendTest::NextPacket() {
input_frame_.num_channels_,
input_frame_.data_);
}
- CHECK_EQ(0, acm_->Add10MsData(input_frame_));
+ int32_t encoded_bytes = acm_->Add10MsAudio(input_frame_);
+ EXPECT_GE(encoded_bytes, 0);
input_frame_.timestamp_ += input_block_size_samples_;
- int32_t encoded_bytes = acm_->Process();
if (encoded_bytes > 0) {
// Encoded packet received.
return CreatePacket();
diff --git a/modules/audio_coding/main/acm2/acm_send_test.h b/modules/audio_coding/main/acm2/acm_send_test.h
index db5d9e55..8bc0cde0 100644
--- a/modules/audio_coding/main/acm2/acm_send_test.h
+++ b/modules/audio_coding/main/acm2/acm_send_test.h
@@ -33,8 +33,7 @@ class AcmSendTest : public AudioPacketizationCallback, public PacketSource {
virtual ~AcmSendTest() {}
// Registers the send codec. Returns true on success, false otherwise.
- bool RegisterCodec(const char* payload_name,
- int sampling_freq_hz,
+ bool RegisterCodec(int codec_type,
int channels,
int payload_type,
int frame_size_samples);
@@ -62,12 +61,11 @@ class AcmSendTest : public AudioPacketizationCallback, public PacketSource {
Packet* CreatePacket();
SimulatedClock clock_;
- scoped_ptr<AudioCodingModule> acm_;
+ scoped_ptr<AudioCoding> acm_;
InputAudioFile* audio_source_;
int source_rate_hz_;
const int input_block_size_samples_;
AudioFrame input_frame_;
- CodecInst codec_;
bool codec_registered_;
int test_duration_ms_;
// The following member variables are set whenever SendData() is called.
diff --git a/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc b/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
new file mode 100644
index 00000000..2f5178e0
--- /dev/null
+++ b/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
+
+namespace webrtc {
+namespace test {
+
+AcmSendTestOldApi::AcmSendTestOldApi(InputAudioFile* audio_source,
+ int source_rate_hz,
+ int test_duration_ms)
+ : clock_(0),
+ acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
+ audio_source_(audio_source),
+ source_rate_hz_(source_rate_hz),
+ input_block_size_samples_(source_rate_hz_ * kBlockSizeMs / 1000),
+ codec_registered_(false),
+ test_duration_ms_(test_duration_ms),
+ frame_type_(kAudioFrameSpeech),
+ payload_type_(0),
+ timestamp_(0),
+ sequence_number_(0) {
+ input_frame_.sample_rate_hz_ = source_rate_hz_;
+ input_frame_.num_channels_ = 1;
+ input_frame_.samples_per_channel_ = input_block_size_samples_;
+ assert(input_block_size_samples_ * input_frame_.num_channels_ <=
+ AudioFrame::kMaxDataSizeSamples);
+ acm_->RegisterTransportCallback(this);
+}
+
+bool AcmSendTestOldApi::RegisterCodec(const char* payload_name,
+ int sampling_freq_hz,
+ int channels,
+ int payload_type,
+ int frame_size_samples) {
+ CHECK_EQ(0,
+ AudioCodingModule::Codec(
+ payload_name, &codec_, sampling_freq_hz, channels));
+ codec_.pltype = payload_type;
+ codec_.pacsize = frame_size_samples;
+ codec_registered_ = (acm_->RegisterSendCodec(codec_) == 0);
+ input_frame_.num_channels_ = channels;
+ assert(input_block_size_samples_ * input_frame_.num_channels_ <=
+ AudioFrame::kMaxDataSizeSamples);
+ return codec_registered_;
+}
+
+Packet* AcmSendTestOldApi::NextPacket() {
+ assert(codec_registered_);
+ if (filter_.test(payload_type_)) {
+ // This payload type should be filtered out. Since the payload type is the
+ // same throughout the whole test run, no packet at all will be delivered.
+ // We can just as well signal that the test is over by returning NULL.
+ return NULL;
+ }
+ // Insert audio and process until one packet is produced.
+ while (clock_.TimeInMilliseconds() < test_duration_ms_) {
+ clock_.AdvanceTimeMilliseconds(kBlockSizeMs);
+ CHECK(audio_source_->Read(input_block_size_samples_, input_frame_.data_));
+ if (input_frame_.num_channels_ > 1) {
+ InputAudioFile::DuplicateInterleaved(input_frame_.data_,
+ input_block_size_samples_,
+ input_frame_.num_channels_,
+ input_frame_.data_);
+ }
+ CHECK_EQ(0, acm_->Add10MsData(input_frame_));
+ input_frame_.timestamp_ += input_block_size_samples_;
+ int32_t encoded_bytes = acm_->Process();
+ if (encoded_bytes > 0) {
+ // Encoded packet received.
+ return CreatePacket();
+ }
+ }
+ // Test ended.
+ return NULL;
+}
+
+// This method receives the callback from ACM when a new packet is produced.
+int32_t AcmSendTestOldApi::SendData(
+ FrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ uint16_t payload_len_bytes,
+ const RTPFragmentationHeader* fragmentation) {
+ // Store the packet locally.
+ frame_type_ = frame_type;
+ payload_type_ = payload_type;
+ timestamp_ = timestamp;
+ last_payload_vec_.assign(payload_data, payload_data + payload_len_bytes);
+ assert(last_payload_vec_.size() == payload_len_bytes);
+ return 0;
+}
+
+Packet* AcmSendTestOldApi::CreatePacket() {
+ const size_t kRtpHeaderSize = 12;
+ size_t allocated_bytes = last_payload_vec_.size() + kRtpHeaderSize;
+ uint8_t* packet_memory = new uint8_t[allocated_bytes];
+ // Populate the header bytes.
+ packet_memory[0] = 0x80;
+ packet_memory[1] = payload_type_;
+ packet_memory[2] = (sequence_number_ >> 8) & 0xFF;
+ packet_memory[3] = (sequence_number_) & 0xFF;
+ packet_memory[4] = (timestamp_ >> 24) & 0xFF;
+ packet_memory[5] = (timestamp_ >> 16) & 0xFF;
+ packet_memory[6] = (timestamp_ >> 8) & 0xFF;
+ packet_memory[7] = timestamp_ & 0xFF;
+ // Set SSRC to 0x12345678.
+ packet_memory[8] = 0x12;
+ packet_memory[9] = 0x34;
+ packet_memory[10] = 0x56;
+ packet_memory[11] = 0x78;
+
+ ++sequence_number_;
+
+ // Copy the payload data.
+ memcpy(packet_memory + kRtpHeaderSize,
+ &last_payload_vec_[0],
+ last_payload_vec_.size());
+ Packet* packet =
+ new Packet(packet_memory, allocated_bytes, clock_.TimeInMilliseconds());
+ assert(packet);
+ assert(packet->valid_header());
+ return packet;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/modules/audio_coding/main/acm2/acm_send_test_oldapi.h b/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
new file mode 100644
index 00000000..ff229a0a
--- /dev/null
+++ b/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_SEND_TEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_SEND_TEST_H_
+
+#include <vector>
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+namespace webrtc {
+
+namespace test {
+class InputAudioFile;
+class Packet;
+
+class AcmSendTestOldApi : public AudioPacketizationCallback,
+ public PacketSource {
+ public:
+ AcmSendTestOldApi(InputAudioFile* audio_source,
+ int source_rate_hz,
+ int test_duration_ms);
+ virtual ~AcmSendTestOldApi() {}
+
+ // Registers the send codec. Returns true on success, false otherwise.
+ bool RegisterCodec(const char* payload_name,
+ int sampling_freq_hz,
+ int channels,
+ int payload_type,
+ int frame_size_samples);
+
+ // Returns the next encoded packet. Returns NULL if the test duration was
+ // exceeded. Ownership of the packet is handed over to the caller.
+ // Inherited from PacketSource.
+ Packet* NextPacket();
+
+ // Inherited from AudioPacketizationCallback.
+ virtual int32_t SendData(
+ FrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ uint16_t payload_len_bytes,
+ const RTPFragmentationHeader* fragmentation) OVERRIDE;
+
+ private:
+ static const int kBlockSizeMs = 10;
+
+ // Creates a Packet object from the last packet produced by ACM (and received
+ // through the SendData method as a callback). Ownership of the new Packet
+ // object is transferred to the caller.
+ Packet* CreatePacket();
+
+ SimulatedClock clock_;
+ scoped_ptr<AudioCodingModule> acm_;
+ InputAudioFile* audio_source_;
+ int source_rate_hz_;
+ const int input_block_size_samples_;
+ AudioFrame input_frame_;
+ CodecInst codec_;
+ bool codec_registered_;
+ int test_duration_ms_;
+ // The following member variables are set whenever SendData() is called.
+ FrameType frame_type_;
+ int payload_type_;
+ uint32_t timestamp_;
+ uint16_t sequence_number_;
+ std::vector<uint8_t> last_payload_vec_;
+
+ DISALLOW_COPY_AND_ASSIGN(AcmSendTestOldApi);
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_SEND_TEST_H_
diff --git a/modules/audio_coding/main/acm2/audio_coding_module.gypi b/modules/audio_coding/main/acm2/audio_coding_module.gypi
index f88dbd37..d746a80b 100644
--- a/modules/audio_coding/main/acm2/audio_coding_module.gypi
+++ b/modules/audio_coding/main/acm2/audio_coding_module.gypi
@@ -119,7 +119,11 @@
{
'target_name': 'acm_receive_test',
'type': 'static_library',
+ 'defines': [
+ '<@(audio_coding_defines)',
+ ],
'dependencies': [
+ '<@(audio_coding_dependencies)',
'audio_coding_module',
'neteq_unittest_tools',
'<(DEPTH)/testing/gtest.gyp:gtest',
@@ -127,12 +131,18 @@
'sources': [
'acm_receive_test.cc',
'acm_receive_test.h',
+ 'acm_receive_test_oldapi.cc',
+ 'acm_receive_test_oldapi.h',
],
}, # acm_receive_test
{
'target_name': 'acm_send_test',
'type': 'static_library',
+ 'defines': [
+ '<@(audio_coding_defines)',
+ ],
'dependencies': [
+ '<@(audio_coding_dependencies)',
'audio_coding_module',
'neteq_unittest_tools',
'<(DEPTH)/testing/gtest.gyp:gtest',
@@ -140,6 +150,8 @@
'sources': [
'acm_send_test.cc',
'acm_send_test.h',
+ 'acm_send_test_oldapi.cc',
+ 'acm_send_test_oldapi.h',
],
}, # acm_send_test
{
diff --git a/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
index 2212f83c..687c5b81 100644
--- a/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -2073,6 +2073,13 @@ const AudioEncoder* AudioCodingImpl::GetSenderInfo() const {
FATAL() << "Not implemented yet.";
}
+const CodecInst* AudioCodingImpl::GetSenderCodecInst() {
+ if (acm_old_->SendCodec(&current_send_codec_) != 0) {
+ return NULL;
+ }
+ return &current_send_codec_;
+}
+
int AudioCodingImpl::Add10MsAudio(const AudioFrame& audio_frame) {
if (acm_old_->Add10MsData(audio_frame) != 0) {
return -1;
@@ -2151,6 +2158,12 @@ void AudioCodingImpl::DisableNack() {
FATAL() << "Not implemented yet.";
}
+bool AudioCodingImpl::SetVad(bool enable_dtx,
+ bool enable_vad,
+ ACMVADMode vad_mode) {
+ return acm_old_->SetVAD(enable_dtx, enable_vad, vad_mode) == 0;
+}
+
std::vector<uint16_t> AudioCodingImpl::GetNackList(
int round_trip_time_ms) const {
return acm_old_->GetNackList(round_trip_time_ms);
diff --git a/modules/audio_coding/main/acm2/audio_coding_module_impl.h b/modules/audio_coding/main/acm2/audio_coding_module_impl.h
index 93fd96be..b8d128fe 100644
--- a/modules/audio_coding/main/acm2/audio_coding_module_impl.h
+++ b/modules/audio_coding/main/acm2/audio_coding_module_impl.h
@@ -13,13 +13,13 @@
#include <vector>
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_receiver.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
namespace webrtc {
@@ -390,10 +390,7 @@ class AudioCodingModuleImpl : public AudioCodingModule {
class AudioCodingImpl : public AudioCoding {
public:
AudioCodingImpl(const Config& config) {
- AudioCodingModule::Config config_old;
- config_old.id = 0;
- config_old.neteq_config = config.neteq_config;
- config_old.clock = config.clock;
+ AudioCodingModule::Config config_old = config.ToOldConfig();
acm_old_.reset(new acm2::AudioCodingModuleImpl(config_old));
acm_old_->RegisterTransportCallback(config.transport);
acm_old_->RegisterVADCallback(config.vad_callback);
@@ -414,6 +411,8 @@ class AudioCodingImpl : public AudioCoding {
virtual const AudioEncoder* GetSenderInfo() const OVERRIDE;
+ virtual const CodecInst* GetSenderCodecInst() OVERRIDE;
+
virtual int Add10MsAudio(const AudioFrame& audio_frame) OVERRIDE;
virtual const ReceiverInfo* GetReceiverInfo() const OVERRIDE;
@@ -449,6 +448,10 @@ class AudioCodingImpl : public AudioCoding {
virtual void DisableNack() OVERRIDE;
+ virtual bool SetVad(bool enable_dtx,
+ bool enable_vad,
+ ACMVADMode vad_mode) OVERRIDE;
+
virtual std::vector<uint16_t> GetNackList(
int round_trip_time_ms) const OVERRIDE;
@@ -465,8 +468,11 @@ class AudioCodingImpl : public AudioCoding {
int* sample_rate_hz,
int* channels);
- scoped_ptr<acm2::AudioCodingModuleImpl> acm_old_;
int playout_frequency_hz_;
+ // TODO(henrik.lundin): All members below this line are temporary and should
+ // be removed after refactoring is completed.
+ scoped_ptr<acm2::AudioCodingModuleImpl> acm_old_;
+ CodecInst current_send_codec_;
};
} // namespace webrtc
diff --git a/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc b/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
index 009218d1..828b772e 100644
--- a/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
+++ b/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
@@ -12,7 +12,9 @@
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/md5digest.h"
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_receive_test.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_send_test.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
@@ -30,7 +32,6 @@
#include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/system_wrappers/interface/sleep.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/system_wrappers/interface/thread_wrapper.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/gtest_disable.h"
@@ -118,19 +119,15 @@ class PacketizationCallbackStub : public AudioPacketizationCallback {
class AudioCodingModuleTest : public ::testing::Test {
protected:
AudioCodingModuleTest()
- : id_(1),
- rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)),
- clock_(Clock::GetRealTimeClock()) {}
+ : rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)) {
+ config_.transport = &packet_cb_;
+ }
~AudioCodingModuleTest() {}
void TearDown() OVERRIDE {}
void SetUp() OVERRIDE {
- acm_.reset(AudioCodingModule::Create(id_, clock_));
-
- RegisterCodec();
-
rtp_utility_->Populate(&rtp_header_);
input_frame_.sample_rate_hz_ = kSampleRateHz;
@@ -141,17 +138,32 @@ class AudioCodingModuleTest : public ::testing::Test {
memset(input_frame_.data_,
0,
input_frame_.samples_per_channel_ * sizeof(input_frame_.data_[0]));
+ }
- ASSERT_EQ(0, acm_->RegisterTransportCallback(&packet_cb_));
+ void CreateAcm() {
+ acm_.reset(AudioCoding::Create(config_));
+ ASSERT_TRUE(acm_.get() != NULL);
+ RegisterCodec();
}
virtual void RegisterCodec() {
- AudioCodingModule::Codec("L16", &codec_, kSampleRateHz, 1);
- codec_.pltype = kPayloadType;
-
// Register L16 codec in ACM.
- ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec_));
- ASSERT_EQ(0, acm_->RegisterSendCodec(codec_));
+ int codec_type = acm2::ACMCodecDB::kNone;
+ switch (kSampleRateHz) {
+ case 8000:
+ codec_type = acm2::ACMCodecDB::kPCM16B;
+ break;
+ case 16000:
+ codec_type = acm2::ACMCodecDB::kPCM16Bwb;
+ break;
+ case 32000:
+ codec_type = acm2::ACMCodecDB::kPCM16Bswb32kHz;
+ break;
+ default:
+ FATAL() << "Sample rate not supported in this test.";
+ }
+ ASSERT_TRUE(acm_->RegisterSendCodec(codec_type, kPayloadType));
+ ASSERT_TRUE(acm_->RegisterReceiveCodec(codec_type, kPayloadType));
}
virtual void InsertPacketAndPullAudio() {
@@ -161,41 +173,33 @@ class AudioCodingModuleTest : public ::testing::Test {
virtual void InsertPacket() {
const uint8_t kPayload[kPayloadSizeBytes] = {0};
- ASSERT_EQ(0,
- acm_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_));
+ ASSERT_TRUE(acm_->InsertPacket(kPayload, kPayloadSizeBytes, rtp_header_));
rtp_utility_->Forward(&rtp_header_);
}
virtual void PullAudio() {
AudioFrame audio_frame;
- ASSERT_EQ(0, acm_->PlayoutData10Ms(-1, &audio_frame));
+ ASSERT_TRUE(acm_->Get10MsAudio(&audio_frame));
}
virtual void InsertAudio() {
- ASSERT_EQ(0, acm_->Add10MsData(input_frame_));
+ int encoded_bytes = acm_->Add10MsAudio(input_frame_);
+ ASSERT_GE(encoded_bytes, 0);
input_frame_.timestamp_ += kNumSamples10ms;
}
- virtual void Encode() {
- int32_t encoded_bytes = acm_->Process();
- // Expect to get one packet with two bytes per sample, or no packet at all,
- // depending on how many 10 ms blocks go into |codec_.pacsize|.
- EXPECT_TRUE(encoded_bytes == 2 * codec_.pacsize || encoded_bytes == 0);
- }
-
- const int id_;
+ AudioCoding::Config config_;
scoped_ptr<RtpUtility> rtp_utility_;
- scoped_ptr<AudioCodingModule> acm_;
+ scoped_ptr<AudioCoding> acm_;
PacketizationCallbackStub packet_cb_;
WebRtcRTPHeader rtp_header_;
AudioFrame input_frame_;
- CodecInst codec_;
- Clock* clock_;
};
// Check if the statistics are initialized correctly. Before any call to ACM
// all fields have to be zero.
TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(InitializedToZero)) {
+ CreateAcm();
AudioDecodingCallStats stats;
acm_->GetDecodingCallStatistics(&stats);
EXPECT_EQ(0, stats.calls_to_neteq);
@@ -209,10 +213,10 @@ TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(InitializedToZero)) {
// Apply an initial playout delay. Calls to AudioCodingModule::PlayoutData10ms()
// should result in generating silence, check the associated field.
TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(SilenceGeneratorCalled)) {
- AudioDecodingCallStats stats;
const int kInitialDelay = 100;
-
- acm_->SetInitialPlayoutDelay(kInitialDelay);
+ config_.initial_playout_delay_ms = kInitialDelay;
+ CreateAcm();
+ AudioDecodingCallStats stats;
int num_calls = 0;
for (int time_ms = 0; time_ms < kInitialDelay;
@@ -232,6 +236,7 @@ TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(SilenceGeneratorCalled)) {
// simulate packet loss and check if PLC and PLC-to-CNG statistics are
// correctly updated.
TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(NetEqCalls)) {
+ CreateAcm();
AudioDecodingCallStats stats;
const int kNumNormalCalls = 10;
@@ -263,21 +268,16 @@ TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(NetEqCalls)) {
}
TEST_F(AudioCodingModuleTest, VerifyOutputFrame) {
+ CreateAcm();
AudioFrame audio_frame;
const int kSampleRateHz = 32000;
- EXPECT_EQ(0, acm_->PlayoutData10Ms(kSampleRateHz, &audio_frame));
- EXPECT_EQ(id_, audio_frame.id_);
+ EXPECT_TRUE(acm_->Get10MsAudio(&audio_frame));
EXPECT_EQ(0u, audio_frame.timestamp_);
EXPECT_GT(audio_frame.num_channels_, 0);
EXPECT_EQ(kSampleRateHz / 100, audio_frame.samples_per_channel_);
EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
}
-TEST_F(AudioCodingModuleTest, FailOnZeroDesiredFrequency) {
- AudioFrame audio_frame;
- EXPECT_EQ(-1, acm_->PlayoutData10Ms(0, &audio_frame));
-}
-
// A multi-threaded test for ACM. This base class is using the PCM16b 16 kHz
// codec, while the derive class AcmIsacMtTest is using iSAC.
class AudioCodingModuleMtTest : public AudioCodingModuleTest {
@@ -306,11 +306,12 @@ class AudioCodingModuleMtTest : public AudioCodingModuleTest {
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
next_insert_packet_time_ms_(0),
fake_clock_(new SimulatedClock(0)) {
- clock_ = fake_clock_.get();
+ config_.clock = fake_clock_.get();
}
virtual void SetUp() OVERRIDE {
AudioCodingModuleTest::SetUp();
+ CreateAcm();
StartThreads();
}
@@ -357,7 +358,6 @@ class AudioCodingModuleMtTest : public AudioCodingModuleTest {
}
++send_count_;
InsertAudio();
- Encode();
if (TestDone()) {
test_complete_->Set();
}
@@ -373,7 +373,7 @@ class AudioCodingModuleMtTest : public AudioCodingModuleTest {
SleepMs(1);
{
CriticalSectionScoped lock(crit_sect_.get());
- if (clock_->TimeInMilliseconds() < next_insert_packet_time_ms_) {
+ if (fake_clock_->TimeInMilliseconds() < next_insert_packet_time_ms_) {
return true;
}
next_insert_packet_time_ms_ += 10;
@@ -394,7 +394,7 @@ class AudioCodingModuleMtTest : public AudioCodingModuleTest {
{
CriticalSectionScoped lock(crit_sect_.get());
// Don't let the insert thread fall behind.
- if (next_insert_packet_time_ms_ < clock_->TimeInMilliseconds()) {
+ if (next_insert_packet_time_ms_ < fake_clock_->TimeInMilliseconds()) {
return true;
}
++pull_audio_count_;
@@ -439,6 +439,7 @@ class AcmIsacMtTest : public AudioCodingModuleMtTest {
virtual void SetUp() OVERRIDE {
AudioCodingModuleTest::SetUp();
+ CreateAcm();
// Set up input audio source to read from specified file, loop after 5
// seconds, and deliver blocks of 10 ms.
@@ -450,7 +451,6 @@ class AcmIsacMtTest : public AudioCodingModuleMtTest {
int loop_counter = 0;
while (packet_cb_.last_payload_len_bytes() == 0) {
InsertAudio();
- Encode();
ASSERT_LT(loop_counter++, 10);
}
// Set |last_packet_number_| to one less that |num_calls| so that the packet
@@ -462,13 +462,12 @@ class AcmIsacMtTest : public AudioCodingModuleMtTest {
virtual void RegisterCodec() OVERRIDE {
COMPILE_ASSERT(kSampleRateHz == 16000, test_designed_for_isac_16khz);
- AudioCodingModule::Codec("ISAC", &codec_, kSampleRateHz, 1);
- codec_.pltype = kPayloadType;
// Register iSAC codec in ACM, effectively unregistering the PCM16B codec
// registered in AudioCodingModuleTest::SetUp();
- ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec_));
- ASSERT_EQ(0, acm_->RegisterSendCodec(codec_));
+ ASSERT_TRUE(acm_->RegisterSendCodec(acm2::ACMCodecDB::kISAC, kPayloadType));
+ ASSERT_TRUE(
+ acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kISAC, kPayloadType));
}
virtual void InsertPacket() OVERRIDE {
@@ -484,10 +483,8 @@ class AcmIsacMtTest : public AudioCodingModuleMtTest {
last_packet_number_ = num_calls;
}
ASSERT_GT(last_payload_vec_.size(), 0u);
- ASSERT_EQ(
- 0,
- acm_->IncomingPacket(
- &last_payload_vec_[0], last_payload_vec_.size(), rtp_header_));
+ ASSERT_TRUE(acm_->InsertPacket(
+ &last_payload_vec_[0], last_payload_vec_.size(), rtp_header_));
}
virtual void InsertAudio() OVERRIDE {
@@ -495,8 +492,6 @@ class AcmIsacMtTest : public AudioCodingModuleMtTest {
AudioCodingModuleTest::InsertAudio();
}
- virtual void Encode() OVERRIDE { ASSERT_GE(acm_->Process(), 0); }
-
// This method is the same as AudioCodingModuleMtTest::TestDone(), but here
// it is using the constants defined in this class (i.e., shorter test run).
virtual bool TestDone() OVERRIDE {
@@ -634,19 +629,15 @@ class AcmSenderBitExactness : public ::testing::Test,
// Registers a send codec in the test::AcmSendTest object. Returns true on
// success, false on failure.
- bool RegisterSendCodec(const char* payload_name,
- int sampling_freq_hz,
+ bool RegisterSendCodec(int codec_type,
int channels,
int payload_type,
int frame_size_samples,
int frame_size_rtp_timestamps) {
payload_type_ = payload_type;
frame_size_rtp_timestamps_ = frame_size_rtp_timestamps;
- return send_test_->RegisterCodec(payload_name,
- sampling_freq_hz,
- channels,
- payload_type,
- frame_size_samples);
+ return send_test_->RegisterCodec(
+ codec_type, channels, payload_type, frame_size_samples);
}
// Runs the test. SetUpSender() and RegisterSendCodec() must have been called
@@ -728,15 +719,13 @@ class AcmSenderBitExactness : public ::testing::Test,
payload_checksum_.Update(packet->payload(), packet->payload_length_bytes());
}
- void SetUpTest(const char* codec_name,
- int codec_sample_rate_hz,
+ void SetUpTest(int codec_type,
int channels,
int payload_type,
int codec_frame_size_samples,
int codec_frame_size_rtp_timestamps) {
ASSERT_TRUE(SetUpSender());
- ASSERT_TRUE(RegisterSendCodec(codec_name,
- codec_sample_rate_hz,
+ ASSERT_TRUE(RegisterSendCodec(codec_type,
channels,
payload_type,
codec_frame_size_samples,
@@ -754,7 +743,7 @@ class AcmSenderBitExactness : public ::testing::Test,
};
TEST_F(AcmSenderBitExactness, IsacWb30ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 480, 480));
+ ASSERT_NO_FATAL_FAILURE(SetUpTest(acm2::ACMCodecDB::kISAC, 1, 103, 480, 480));
Run(AcmReceiverBitExactness::PlatformChecksum(
"c7e5bdadfa2871df95639fcc297cf23d",
"0499ca260390769b3172136faad925b9",
@@ -768,7 +757,7 @@ TEST_F(AcmSenderBitExactness, IsacWb30ms) {
}
TEST_F(AcmSenderBitExactness, IsacWb60ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 960, 960));
+ ASSERT_NO_FATAL_FAILURE(SetUpTest(acm2::ACMCodecDB::kISAC, 1, 103, 960, 960));
Run(AcmReceiverBitExactness::PlatformChecksum(
"14d63c5f08127d280e722e3191b73bdd",
"8da003e16c5371af2dc2be79a50f9076",
@@ -782,7 +771,8 @@ TEST_F(AcmSenderBitExactness, IsacWb60ms) {
}
TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(IsacSwb30ms)) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 32000, 1, 104, 960, 960));
+ ASSERT_NO_FATAL_FAILURE(
+ SetUpTest(acm2::ACMCodecDB::kISACSWB, 1, 104, 960, 960));
Run(AcmReceiverBitExactness::PlatformChecksum(
"98d960600eb4ddb3fcbe11f5057ddfd7",
"",
@@ -796,7 +786,7 @@ TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(IsacSwb30ms)) {
}
TEST_F(AcmSenderBitExactness, Pcm16_8000khz_10ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+ ASSERT_NO_FATAL_FAILURE(SetUpTest(acm2::ACMCodecDB::kPCM16B, 1, 107, 80, 80));
Run("de4a98e1406f8b798d99cd0704e862e2",
"c1edd36339ce0326cc4550041ad719a0",
100,
@@ -804,7 +794,8 @@ TEST_F(AcmSenderBitExactness, Pcm16_8000khz_10ms) {
}
TEST_F(AcmSenderBitExactness, Pcm16_16000khz_10ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 16000, 1, 108, 160, 160));
+ ASSERT_NO_FATAL_FAILURE(
+ SetUpTest(acm2::ACMCodecDB::kPCM16Bwb, 1, 108, 160, 160));
Run("ae646d7b68384a1269cc080dd4501916",
"ad786526383178b08d80d6eee06e9bad",
100,
@@ -812,7 +803,8 @@ TEST_F(AcmSenderBitExactness, Pcm16_16000khz_10ms) {
}
TEST_F(AcmSenderBitExactness, Pcm16_32000khz_10ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 32000, 1, 109, 320, 320));
+ ASSERT_NO_FATAL_FAILURE(
+ SetUpTest(acm2::ACMCodecDB::kPCM16Bswb32kHz, 1, 109, 320, 320));
Run("7fe325e8fbaf755e3c5df0b11a4774fb",
"5ef82ea885e922263606c6fdbc49f651",
100,
@@ -820,7 +812,8 @@ TEST_F(AcmSenderBitExactness, Pcm16_32000khz_10ms) {
}
TEST_F(AcmSenderBitExactness, Pcm16_stereo_8000khz_10ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 2, 111, 80, 80));
+ ASSERT_NO_FATAL_FAILURE(
+ SetUpTest(acm2::ACMCodecDB::kPCM16B_2ch, 2, 111, 80, 80));
Run("fb263b74e7ac3de915474d77e4744ceb",
"62ce5adb0d4965d0a52ec98ae7f98974",
100,
@@ -828,7 +821,8 @@ TEST_F(AcmSenderBitExactness, Pcm16_stereo_8000khz_10ms) {
}
TEST_F(AcmSenderBitExactness, Pcm16_stereo_16000khz_10ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 16000, 2, 112, 160, 160));
+ ASSERT_NO_FATAL_FAILURE(
+ SetUpTest(acm2::ACMCodecDB::kPCM16Bwb_2ch, 2, 112, 160, 160));
Run("d09e9239553649d7ac93e19d304281fd",
"41ca8edac4b8c71cd54fd9f25ec14870",
100,
@@ -836,7 +830,8 @@ TEST_F(AcmSenderBitExactness, Pcm16_stereo_16000khz_10ms) {
}
TEST_F(AcmSenderBitExactness, Pcm16_stereo_32000khz_10ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 32000, 2, 113, 320, 320));
+ ASSERT_NO_FATAL_FAILURE(
+ SetUpTest(acm2::ACMCodecDB::kPCM16Bswb32kHz_2ch, 2, 113, 320, 320));
Run("5f025d4f390982cc26b3d92fe02e3044",
"50e58502fb04421bf5b857dda4c96879",
100,
@@ -844,7 +839,7 @@ TEST_F(AcmSenderBitExactness, Pcm16_stereo_32000khz_10ms) {
}
TEST_F(AcmSenderBitExactness, Pcmu_20ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMU", 8000, 1, 0, 160, 160));
+ ASSERT_NO_FATAL_FAILURE(SetUpTest(acm2::ACMCodecDB::kPCMU, 1, 0, 160, 160));
Run("81a9d4c0bb72e9becc43aef124c981e9",
"8f9b8750bd80fe26b6cbf6659b89f0f9",
50,
@@ -852,7 +847,7 @@ TEST_F(AcmSenderBitExactness, Pcmu_20ms) {
}
TEST_F(AcmSenderBitExactness, Pcma_20ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMA", 8000, 1, 8, 160, 160));
+ ASSERT_NO_FATAL_FAILURE(SetUpTest(acm2::ACMCodecDB::kPCMA, 1, 8, 160, 160));
Run("39611f798969053925a49dc06d08de29",
"6ad745e55aa48981bfc790d0eeef2dd1",
50,
@@ -860,7 +855,8 @@ TEST_F(AcmSenderBitExactness, Pcma_20ms) {
}
TEST_F(AcmSenderBitExactness, Pcmu_stereo_20ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMU", 8000, 2, 110, 160, 160));
+ ASSERT_NO_FATAL_FAILURE(
+ SetUpTest(acm2::ACMCodecDB::kPCMU_2ch, 2, 110, 160, 160));
Run("437bec032fdc5cbaa0d5175430af7b18",
"60b6f25e8d1e74cb679cfe756dd9bca5",
50,
@@ -868,7 +864,8 @@ TEST_F(AcmSenderBitExactness, Pcmu_stereo_20ms) {
}
TEST_F(AcmSenderBitExactness, Pcma_stereo_20ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMA", 8000, 2, 118, 160, 160));
+ ASSERT_NO_FATAL_FAILURE(
+ SetUpTest(acm2::ACMCodecDB::kPCMA_2ch, 2, 118, 160, 160));
Run("a5c6d83c5b7cedbeff734238220a4b0c",
"92b282c83efd20e7eeef52ba40842cf7",
50,
@@ -876,7 +873,7 @@ TEST_F(AcmSenderBitExactness, Pcma_stereo_20ms) {
}
TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(Ilbc_30ms)) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("ILBC", 8000, 1, 102, 240, 240));
+ ASSERT_NO_FATAL_FAILURE(SetUpTest(acm2::ACMCodecDB::kILBC, 1, 102, 240, 240));
Run(AcmReceiverBitExactness::PlatformChecksum(
"7b6ec10910debd9af08011d3ed5249f7",
"android_audio",
@@ -890,7 +887,7 @@ TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(Ilbc_30ms)) {
}
TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(G722_20ms)) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 1, 9, 320, 160));
+ ASSERT_NO_FATAL_FAILURE(SetUpTest(acm2::ACMCodecDB::kG722, 1, 9, 320, 160));
Run(AcmReceiverBitExactness::PlatformChecksum(
"7d759436f2533582950d148b5161a36c",
"android_audio",
@@ -904,7 +901,8 @@ TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(G722_20ms)) {
}
TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(G722_stereo_20ms)) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 2, 119, 320, 160));
+ ASSERT_NO_FATAL_FAILURE(
+ SetUpTest(acm2::ACMCodecDB::kG722_2ch, 2, 119, 320, 160));
Run(AcmReceiverBitExactness::PlatformChecksum(
"7190ee718ab3d80eca181e5f7140c210",
"android_audio",
@@ -918,7 +916,7 @@ TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(G722_stereo_20ms)) {
}
TEST_F(AcmSenderBitExactness, Opus_stereo_20ms) {
- ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 2, 120, 960, 960));
+ ASSERT_NO_FATAL_FAILURE(SetUpTest(acm2::ACMCodecDB::kOpus, 2, 120, 960, 960));
Run(AcmReceiverBitExactness::PlatformChecksum(
"855041f2490b887302bce9d544731849",
"1e1a0fce893fef2d66886a7f09e2ebce",
diff --git a/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc b/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
new file mode 100644
index 00000000..0e9d2736
--- /dev/null
+++ b/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
@@ -0,0 +1,938 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/md5digest.h"
+#include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h"
+#include "webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h"
+#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/neteq/tools/audio_checksum.h"
+#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
+#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "webrtc/modules/audio_coding/neteq/tools/output_audio_file.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
+#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+#include "webrtc/system_wrappers/interface/compile_assert.h"
+#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/interface/event_wrapper.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/system_wrappers/interface/sleep.h"
+#include "webrtc/system_wrappers/interface/thread_wrapper.h"
+#include "webrtc/test/testsupport/fileutils.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
+
+namespace webrtc {
+
+const int kSampleRateHz = 16000;
+const int kNumSamples10ms = kSampleRateHz / 100;
+const int kFrameSizeMs = 10; // Multiple of 10.
+const int kFrameSizeSamples = kFrameSizeMs / 10 * kNumSamples10ms;
+const int kPayloadSizeBytes = kFrameSizeSamples * sizeof(int16_t);
+const uint8_t kPayloadType = 111;
+
+class RtpUtility {
+ public:
+ RtpUtility(int samples_per_packet, uint8_t payload_type)
+ : samples_per_packet_(samples_per_packet), payload_type_(payload_type) {}
+
+ virtual ~RtpUtility() {}
+
+ void Populate(WebRtcRTPHeader* rtp_header) {
+ rtp_header->header.sequenceNumber = 0xABCD;
+ rtp_header->header.timestamp = 0xABCDEF01;
+ rtp_header->header.payloadType = payload_type_;
+ rtp_header->header.markerBit = false;
+ rtp_header->header.ssrc = 0x1234;
+ rtp_header->header.numCSRCs = 0;
+ rtp_header->frameType = kAudioFrameSpeech;
+
+ rtp_header->header.payload_type_frequency = kSampleRateHz;
+ rtp_header->type.Audio.channel = 1;
+ rtp_header->type.Audio.isCNG = false;
+ }
+
+ void Forward(WebRtcRTPHeader* rtp_header) {
+ ++rtp_header->header.sequenceNumber;
+ rtp_header->header.timestamp += samples_per_packet_;
+ }
+
+ private:
+ int samples_per_packet_;
+ uint8_t payload_type_;
+};
+
+class PacketizationCallbackStub : public AudioPacketizationCallback {
+ public:
+ PacketizationCallbackStub()
+ : num_calls_(0),
+ crit_sect_(CriticalSectionWrapper::CreateCriticalSection()) {}
+
+ virtual int32_t SendData(
+ FrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ uint16_t payload_len_bytes,
+ const RTPFragmentationHeader* fragmentation) OVERRIDE {
+ CriticalSectionScoped lock(crit_sect_.get());
+ ++num_calls_;
+ last_payload_vec_.assign(payload_data, payload_data + payload_len_bytes);
+ return 0;
+ }
+
+ int num_calls() const {
+ CriticalSectionScoped lock(crit_sect_.get());
+ return num_calls_;
+ }
+
+ int last_payload_len_bytes() const {
+ CriticalSectionScoped lock(crit_sect_.get());
+ return last_payload_vec_.size();
+ }
+
+ void SwapBuffers(std::vector<uint8_t>* payload) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ last_payload_vec_.swap(*payload);
+ }
+
+ private:
+ int num_calls_ GUARDED_BY(crit_sect_);
+ std::vector<uint8_t> last_payload_vec_ GUARDED_BY(crit_sect_);
+ const scoped_ptr<CriticalSectionWrapper> crit_sect_;
+};
+
+class AudioCodingModuleTestOldApi : public ::testing::Test {
+ protected:
+ AudioCodingModuleTestOldApi()
+ : id_(1),
+ rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)),
+ clock_(Clock::GetRealTimeClock()) {}
+
+ ~AudioCodingModuleTestOldApi() {}
+
+ void TearDown() {}
+
+ void SetUp() {
+ acm_.reset(AudioCodingModule::Create(id_, clock_));
+
+ RegisterCodec();
+
+ rtp_utility_->Populate(&rtp_header_);
+
+ input_frame_.sample_rate_hz_ = kSampleRateHz;
+ input_frame_.num_channels_ = 1;
+ input_frame_.samples_per_channel_ = kSampleRateHz * 10 / 1000; // 10 ms.
+ COMPILE_ASSERT(kSampleRateHz * 10 / 1000 <= AudioFrame::kMaxDataSizeSamples,
+ audio_frame_too_small);
+ memset(input_frame_.data_,
+ 0,
+ input_frame_.samples_per_channel_ * sizeof(input_frame_.data_[0]));
+
+ ASSERT_EQ(0, acm_->RegisterTransportCallback(&packet_cb_));
+ }
+
+ virtual void RegisterCodec() {
+ AudioCodingModule::Codec("L16", &codec_, kSampleRateHz, 1);
+ codec_.pltype = kPayloadType;
+
+ // Register L16 codec in ACM.
+ ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec_));
+ ASSERT_EQ(0, acm_->RegisterSendCodec(codec_));
+ }
+
+ virtual void InsertPacketAndPullAudio() {
+ InsertPacket();
+ PullAudio();
+ }
+
+ virtual void InsertPacket() {
+ const uint8_t kPayload[kPayloadSizeBytes] = {0};
+ ASSERT_EQ(0,
+ acm_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_));
+ rtp_utility_->Forward(&rtp_header_);
+ }
+
+ virtual void PullAudio() {
+ AudioFrame audio_frame;
+ ASSERT_EQ(0, acm_->PlayoutData10Ms(-1, &audio_frame));
+ }
+
+ virtual void InsertAudio() {
+ ASSERT_EQ(0, acm_->Add10MsData(input_frame_));
+ input_frame_.timestamp_ += kNumSamples10ms;
+ }
+
+ virtual void Encode() {
+ int32_t encoded_bytes = acm_->Process();
+ // Expect to get one packet with two bytes per sample, or no packet at all,
+ // depending on how many 10 ms blocks go into |codec_.pacsize|.
+ EXPECT_TRUE(encoded_bytes == 2 * codec_.pacsize || encoded_bytes == 0);
+ }
+
+ const int id_;
+ scoped_ptr<RtpUtility> rtp_utility_;
+ scoped_ptr<AudioCodingModule> acm_;
+ PacketizationCallbackStub packet_cb_;
+ WebRtcRTPHeader rtp_header_;
+ AudioFrame input_frame_;
+ CodecInst codec_;
+ Clock* clock_;
+};
+
+// Check if the statistics are initialized correctly. Before any call to ACM
+// all fields have to be zero.
+TEST_F(AudioCodingModuleTestOldApi, DISABLED_ON_ANDROID(InitializedToZero)) {
+ AudioDecodingCallStats stats;
+ acm_->GetDecodingCallStatistics(&stats);
+ EXPECT_EQ(0, stats.calls_to_neteq);
+ EXPECT_EQ(0, stats.calls_to_silence_generator);
+ EXPECT_EQ(0, stats.decoded_normal);
+ EXPECT_EQ(0, stats.decoded_cng);
+ EXPECT_EQ(0, stats.decoded_plc);
+ EXPECT_EQ(0, stats.decoded_plc_cng);
+}
+
+// Apply an initial playout delay. Calls to AudioCodingModule::PlayoutData10ms()
+// should result in generating silence, check the associated field.
+TEST_F(AudioCodingModuleTestOldApi,
+ DISABLED_ON_ANDROID(SilenceGeneratorCalled)) {
+ AudioDecodingCallStats stats;
+ const int kInitialDelay = 100;
+
+ acm_->SetInitialPlayoutDelay(kInitialDelay);
+
+ int num_calls = 0;
+ for (int time_ms = 0; time_ms < kInitialDelay;
+ time_ms += kFrameSizeMs, ++num_calls) {
+ InsertPacketAndPullAudio();
+ }
+ acm_->GetDecodingCallStatistics(&stats);
+ EXPECT_EQ(0, stats.calls_to_neteq);
+ EXPECT_EQ(num_calls, stats.calls_to_silence_generator);
+ EXPECT_EQ(0, stats.decoded_normal);
+ EXPECT_EQ(0, stats.decoded_cng);
+ EXPECT_EQ(0, stats.decoded_plc);
+ EXPECT_EQ(0, stats.decoded_plc_cng);
+}
+
+// Insert some packets and pull audio. Check statistics are valid. Then,
+// simulate packet loss and check if PLC and PLC-to-CNG statistics are
+// correctly updated.
+TEST_F(AudioCodingModuleTestOldApi, DISABLED_ON_ANDROID(NetEqCalls)) {
+ AudioDecodingCallStats stats;
+ const int kNumNormalCalls = 10;
+
+ for (int num_calls = 0; num_calls < kNumNormalCalls; ++num_calls) {
+ InsertPacketAndPullAudio();
+ }
+ acm_->GetDecodingCallStatistics(&stats);
+ EXPECT_EQ(kNumNormalCalls, stats.calls_to_neteq);
+ EXPECT_EQ(0, stats.calls_to_silence_generator);
+ EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
+ EXPECT_EQ(0, stats.decoded_cng);
+ EXPECT_EQ(0, stats.decoded_plc);
+ EXPECT_EQ(0, stats.decoded_plc_cng);
+
+ const int kNumPlc = 3;
+ const int kNumPlcCng = 5;
+
+ // Simulate packet-loss. NetEq first performs PLC then PLC fades to CNG.
+ for (int n = 0; n < kNumPlc + kNumPlcCng; ++n) {
+ PullAudio();
+ }
+ acm_->GetDecodingCallStatistics(&stats);
+ EXPECT_EQ(kNumNormalCalls + kNumPlc + kNumPlcCng, stats.calls_to_neteq);
+ EXPECT_EQ(0, stats.calls_to_silence_generator);
+ EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
+ EXPECT_EQ(0, stats.decoded_cng);
+ EXPECT_EQ(kNumPlc, stats.decoded_plc);
+ EXPECT_EQ(kNumPlcCng, stats.decoded_plc_cng);
+}
+
+TEST_F(AudioCodingModuleTestOldApi, VerifyOutputFrame) {
+ AudioFrame audio_frame;
+ const int kSampleRateHz = 32000;
+ EXPECT_EQ(0, acm_->PlayoutData10Ms(kSampleRateHz, &audio_frame));
+ EXPECT_EQ(id_, audio_frame.id_);
+ EXPECT_EQ(0u, audio_frame.timestamp_);
+ EXPECT_GT(audio_frame.num_channels_, 0);
+ EXPECT_EQ(kSampleRateHz / 100, audio_frame.samples_per_channel_);
+ EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
+}
+
+TEST_F(AudioCodingModuleTestOldApi, FailOnZeroDesiredFrequency) {
+ AudioFrame audio_frame;
+ EXPECT_EQ(-1, acm_->PlayoutData10Ms(0, &audio_frame));
+}
+
+// A multi-threaded test for ACM. This base class is using the PCM16b 16 kHz
+// codec, while the derive class AcmIsacMtTest is using iSAC.
+class AudioCodingModuleMtTestOldApi : public AudioCodingModuleTestOldApi {
+ protected:
+ static const int kNumPackets = 500;
+ static const int kNumPullCalls = 500;
+
+ AudioCodingModuleMtTestOldApi()
+ : AudioCodingModuleTestOldApi(),
+ send_thread_(ThreadWrapper::CreateThread(CbSendThread,
+ this,
+ kRealtimePriority,
+ "send")),
+ insert_packet_thread_(ThreadWrapper::CreateThread(CbInsertPacketThread,
+ this,
+ kRealtimePriority,
+ "insert_packet")),
+ pull_audio_thread_(ThreadWrapper::CreateThread(CbPullAudioThread,
+ this,
+ kRealtimePriority,
+ "pull_audio")),
+ test_complete_(EventWrapper::Create()),
+ send_count_(0),
+ insert_packet_count_(0),
+ pull_audio_count_(0),
+ crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ next_insert_packet_time_ms_(0),
+ fake_clock_(new SimulatedClock(0)) {
+ clock_ = fake_clock_.get();
+ }
+
+ void SetUp() {
+ AudioCodingModuleTestOldApi::SetUp();
+ StartThreads();
+ }
+
+ void StartThreads() {
+ unsigned int thread_id = 0;
+ ASSERT_TRUE(send_thread_->Start(thread_id));
+ ASSERT_TRUE(insert_packet_thread_->Start(thread_id));
+ ASSERT_TRUE(pull_audio_thread_->Start(thread_id));
+ }
+
+ void TearDown() {
+ AudioCodingModuleTestOldApi::TearDown();
+ pull_audio_thread_->Stop();
+ send_thread_->Stop();
+ insert_packet_thread_->Stop();
+ }
+
+ EventTypeWrapper RunTest() {
+ return test_complete_->Wait(10 * 60 * 1000); // 10 minutes' timeout.
+ }
+
+ virtual bool TestDone() {
+ if (packet_cb_.num_calls() > kNumPackets) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ if (pull_audio_count_ > kNumPullCalls) {
+ // Both conditions for completion are met. End the test.
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static bool CbSendThread(void* context) {
+ return reinterpret_cast<AudioCodingModuleMtTestOldApi*>(context)
+ ->CbSendImpl();
+ }
+
+ // The send thread doesn't have to care about the current simulated time,
+ // since only the AcmReceiver is using the clock.
+ bool CbSendImpl() {
+ SleepMs(1);
+ if (HasFatalFailure()) {
+ // End the test early if a fatal failure (ASSERT_*) has occurred.
+ test_complete_->Set();
+ }
+ ++send_count_;
+ InsertAudio();
+ Encode();
+ if (TestDone()) {
+ test_complete_->Set();
+ }
+ return true;
+ }
+
+ static bool CbInsertPacketThread(void* context) {
+ return reinterpret_cast<AudioCodingModuleMtTestOldApi*>(context)
+ ->CbInsertPacketImpl();
+ }
+
+ bool CbInsertPacketImpl() {
+ SleepMs(1);
+ {
+ CriticalSectionScoped lock(crit_sect_.get());
+ if (clock_->TimeInMilliseconds() < next_insert_packet_time_ms_) {
+ return true;
+ }
+ next_insert_packet_time_ms_ += 10;
+ }
+ // Now we're not holding the crit sect when calling ACM.
+ ++insert_packet_count_;
+ InsertPacket();
+ return true;
+ }
+
+ static bool CbPullAudioThread(void* context) {
+ return reinterpret_cast<AudioCodingModuleMtTestOldApi*>(context)
+ ->CbPullAudioImpl();
+ }
+
+ bool CbPullAudioImpl() {
+ SleepMs(1);
+ {
+ CriticalSectionScoped lock(crit_sect_.get());
+ // Don't let the insert thread fall behind.
+ if (next_insert_packet_time_ms_ < clock_->TimeInMilliseconds()) {
+ return true;
+ }
+ ++pull_audio_count_;
+ }
+ // Now we're not holding the crit sect when calling ACM.
+ PullAudio();
+ fake_clock_->AdvanceTimeMilliseconds(10);
+ return true;
+ }
+
+ scoped_ptr<ThreadWrapper> send_thread_;
+ scoped_ptr<ThreadWrapper> insert_packet_thread_;
+ scoped_ptr<ThreadWrapper> pull_audio_thread_;
+ const scoped_ptr<EventWrapper> test_complete_;
+ int send_count_;
+ int insert_packet_count_;
+ int pull_audio_count_ GUARDED_BY(crit_sect_);
+ const scoped_ptr<CriticalSectionWrapper> crit_sect_;
+ int64_t next_insert_packet_time_ms_ GUARDED_BY(crit_sect_);
+ scoped_ptr<SimulatedClock> fake_clock_;
+};
+
+TEST_F(AudioCodingModuleMtTestOldApi, DoTest) {
+ EXPECT_EQ(kEventSignaled, RunTest());
+}
+
+// This is a multi-threaded ACM test using iSAC. The test encodes audio
+// from a PCM file. The most recent encoded frame is used as input to the
+// receiving part. Depending on timing, it may happen that the same RTP packet
+// is inserted into the receiver multiple times, but this is a valid use-case,
+// and simplifies the test code a lot.
+class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi {
+ protected:
+ static const int kNumPackets = 500;
+ static const int kNumPullCalls = 500;
+
+ AcmIsacMtTestOldApi()
+ : AudioCodingModuleMtTestOldApi(), last_packet_number_(0) {}
+
+ ~AcmIsacMtTestOldApi() {}
+
+ void SetUp() {
+ AudioCodingModuleTestOldApi::SetUp();
+
+ // Set up input audio source to read from specified file, loop after 5
+ // seconds, and deliver blocks of 10 ms.
+ const std::string input_file_name =
+ webrtc::test::ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
+ audio_loop_.Init(input_file_name, 5 * kSampleRateHz, kNumSamples10ms);
+
+ // Generate one packet to have something to insert.
+ int loop_counter = 0;
+ while (packet_cb_.last_payload_len_bytes() == 0) {
+ InsertAudio();
+ Encode();
+ ASSERT_LT(loop_counter++, 10);
+ }
+ // Set |last_packet_number_| to one less that |num_calls| so that the packet
+ // will be fetched in the next InsertPacket() call.
+ last_packet_number_ = packet_cb_.num_calls() - 1;
+
+ StartThreads();
+ }
+
+ virtual void RegisterCodec() {
+ COMPILE_ASSERT(kSampleRateHz == 16000, test_designed_for_isac_16khz);
+ AudioCodingModule::Codec("ISAC", &codec_, kSampleRateHz, 1);
+ codec_.pltype = kPayloadType;
+
+ // Register iSAC codec in ACM, effectively unregistering the PCM16B codec
+ // registered in AudioCodingModuleTestOldApi::SetUp();
+ ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec_));
+ ASSERT_EQ(0, acm_->RegisterSendCodec(codec_));
+ }
+
+ void InsertPacket() {
+ int num_calls = packet_cb_.num_calls(); // Store locally for thread safety.
+ if (num_calls > last_packet_number_) {
+ // Get the new payload out from the callback handler.
+ // Note that since we swap buffers here instead of directly inserting
+ // a pointer to the data in |packet_cb_|, we avoid locking the callback
+ // for the duration of the IncomingPacket() call.
+ packet_cb_.SwapBuffers(&last_payload_vec_);
+ ASSERT_GT(last_payload_vec_.size(), 0u);
+ rtp_utility_->Forward(&rtp_header_);
+ last_packet_number_ = num_calls;
+ }
+ ASSERT_GT(last_payload_vec_.size(), 0u);
+ ASSERT_EQ(
+ 0,
+ acm_->IncomingPacket(
+ &last_payload_vec_[0], last_payload_vec_.size(), rtp_header_));
+ }
+
+ void InsertAudio() {
+ memcpy(input_frame_.data_, audio_loop_.GetNextBlock(), kNumSamples10ms);
+ AudioCodingModuleTestOldApi::InsertAudio();
+ }
+
+ void Encode() { ASSERT_GE(acm_->Process(), 0); }
+
+ // This method is the same as AudioCodingModuleMtTestOldApi::TestDone(), but
+ // here it is using the constants defined in this class (i.e., shorter test
+ // run).
+ virtual bool TestDone() {
+ if (packet_cb_.num_calls() > kNumPackets) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ if (pull_audio_count_ > kNumPullCalls) {
+ // Both conditions for completion are met. End the test.
+ return true;
+ }
+ }
+ return false;
+ }
+
+ int last_packet_number_;
+ std::vector<uint8_t> last_payload_vec_;
+ test::AudioLoop audio_loop_;
+};
+
+TEST_F(AcmIsacMtTestOldApi, DoTest) {
+ EXPECT_EQ(kEventSignaled, RunTest());
+}
+
+class AcmReceiverBitExactnessOldApi : public ::testing::Test {
+ public:
+ static std::string PlatformChecksum(std::string win64,
+ std::string android,
+ std::string others) {
+#if defined(_WIN32) && defined(WEBRTC_ARCH_64_BITS)
+ return win64;
+#elif defined(WEBRTC_ANDROID)
+ return android;
+#else
+ return others;
+#endif
+ }
+
+ protected:
+ void Run(int output_freq_hz, const std::string& checksum_ref) {
+ const std::string input_file_name =
+ webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
+ scoped_ptr<test::RtpFileSource> packet_source(
+ test::RtpFileSource::Create(input_file_name));
+#ifdef WEBRTC_ANDROID
+ // Filter out iLBC and iSAC-swb since they are not supported on Android.
+ packet_source->FilterOutPayloadType(102); // iLBC.
+ packet_source->FilterOutPayloadType(104); // iSAC-swb.
+#endif
+
+ test::AudioChecksum checksum;
+ const std::string output_file_name =
+ webrtc::test::OutputPath() +
+ ::testing::UnitTest::GetInstance()
+ ->current_test_info()
+ ->test_case_name() +
+ "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name() +
+ "_output.pcm";
+ test::OutputAudioFile output_file(output_file_name);
+ test::AudioSinkFork output(&checksum, &output_file);
+
+ test::AcmReceiveTestOldApi test(
+ packet_source.get(),
+ &output,
+ output_freq_hz,
+ test::AcmReceiveTestOldApi::kArbitraryChannels);
+ ASSERT_NO_FATAL_FAILURE(test.RegisterNetEqTestCodecs());
+ test.Run();
+
+ std::string checksum_string = checksum.Finish();
+ EXPECT_EQ(checksum_ref, checksum_string);
+ }
+};
+
+TEST_F(AcmReceiverBitExactnessOldApi, 8kHzOutput) {
+ Run(8000,
+ PlatformChecksum("bd6f8d9602cd82444ea2539e674df747",
+ "6ac89c7145072c26bfeba602cd661afb",
+ "8a8440f5511eb729221b9aac25cda3a0"));
+}
+
+TEST_F(AcmReceiverBitExactnessOldApi, 16kHzOutput) {
+ Run(16000,
+ PlatformChecksum("a39bc6ee0c4eb15f3ad2f43cebcc571d",
+ "3e888eb04f57db2c6ef952fe64f17fe6",
+ "7be583092c5adbcb0f6cd66eca20ea63"));
+}
+
+TEST_F(AcmReceiverBitExactnessOldApi, 32kHzOutput) {
+ Run(32000,
+ PlatformChecksum("80964572aaa2dc92f9e34896dd3802b3",
+ "aeca37e963310f5b6552b7edea23c2f1",
+ "3a84188abe9fca25fedd6034760f3e22"));
+}
+
+TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutput) {
+ Run(48000,
+ PlatformChecksum("8aacde91f390e0d5a9c2ed571a25fd37",
+ "76b9e99e0a3998aa28355e7a2bd836f7",
+ "89b4b19bdb4de40f1d88302ef8cb9f9b"));
+}
+
+// This test verifies bit exactness for the send-side of ACM. The test setup is
+// a chain of three different test classes:
+//
+// test::AcmSendTest -> AcmSenderBitExactness -> test::AcmReceiveTest
+//
+// The receiver side is driving the test by requesting new packets from
+// AcmSenderBitExactness::NextPacket(). This method, in turn, asks for the
+// packet from test::AcmSendTest::NextPacket, which inserts audio from the
+// input file until one packet is produced. (The input file loops indefinitely.)
+// Before passing the packet to the receiver, this test class verifies the
+// packet header and updates a payload checksum with the new payload. The
+// decoded output from the receiver is also verified with a (separate) checksum.
+class AcmSenderBitExactnessOldApi : public ::testing::Test,
+ public test::PacketSource {
+ protected:
+ static const int kTestDurationMs = 1000;
+
+ AcmSenderBitExactnessOldApi()
+ : frame_size_rtp_timestamps_(0),
+ packet_count_(0),
+ payload_type_(0),
+ last_sequence_number_(0),
+ last_timestamp_(0) {}
+
+ // Sets up the test::AcmSendTest object. Returns true on success, otherwise
+ // false.
+ bool SetUpSender() {
+ const std::string input_file_name =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ // Note that |audio_source_| will loop forever. The test duration is set
+ // explicitly by |kTestDurationMs|.
+ audio_source_.reset(new test::InputAudioFile(input_file_name));
+ static const int kSourceRateHz = 32000;
+ send_test_.reset(new test::AcmSendTestOldApi(
+ audio_source_.get(), kSourceRateHz, kTestDurationMs));
+ return send_test_.get() != NULL;
+ }
+
+ // Registers a send codec in the test::AcmSendTest object. Returns true on
+ // success, false on failure.
+ bool RegisterSendCodec(const char* payload_name,
+ int sampling_freq_hz,
+ int channels,
+ int payload_type,
+ int frame_size_samples,
+ int frame_size_rtp_timestamps) {
+ payload_type_ = payload_type;
+ frame_size_rtp_timestamps_ = frame_size_rtp_timestamps;
+ return send_test_->RegisterCodec(payload_name,
+ sampling_freq_hz,
+ channels,
+ payload_type,
+ frame_size_samples);
+ }
+
+ // Runs the test. SetUpSender() and RegisterSendCodec() must have been called
+ // before calling this method.
+ void Run(const std::string& audio_checksum_ref,
+ const std::string& payload_checksum_ref,
+ int expected_packets,
+ test::AcmReceiveTestOldApi::NumOutputChannels expected_channels) {
+ // Set up the receiver used to decode the packets and verify the decoded
+ // output.
+ test::AudioChecksum audio_checksum;
+ const std::string output_file_name =
+ webrtc::test::OutputPath() +
+ ::testing::UnitTest::GetInstance()
+ ->current_test_info()
+ ->test_case_name() +
+ "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name() +
+ "_output.pcm";
+ test::OutputAudioFile output_file(output_file_name);
+ // Have the output audio sent both to file and to the checksum calculator.
+ test::AudioSinkFork output(&audio_checksum, &output_file);
+ const int kOutputFreqHz = 8000;
+ test::AcmReceiveTestOldApi receive_test(
+ this, &output, kOutputFreqHz, expected_channels);
+ ASSERT_NO_FATAL_FAILURE(receive_test.RegisterDefaultCodecs());
+
+ // This is where the actual test is executed.
+ receive_test.Run();
+
+ // Extract and verify the audio checksum.
+ std::string checksum_string = audio_checksum.Finish();
+ EXPECT_EQ(audio_checksum_ref, checksum_string);
+
+ // Extract and verify the payload checksum.
+ char checksum_result[rtc::Md5Digest::kSize];
+ payload_checksum_.Finish(checksum_result, rtc::Md5Digest::kSize);
+ checksum_string = rtc::hex_encode(checksum_result, rtc::Md5Digest::kSize);
+ EXPECT_EQ(payload_checksum_ref, checksum_string);
+
+ // Verify number of packets produced.
+ EXPECT_EQ(expected_packets, packet_count_);
+ }
+
+ // Returns a pointer to the next packet. Returns NULL if the source is
+ // depleted (i.e., the test duration is exceeded), or if an error occurred.
+ // Inherited from test::PacketSource.
+ test::Packet* NextPacket() OVERRIDE {
+ // Get the next packet from AcmSendTest. Ownership of |packet| is
+ // transferred to this method.
+ test::Packet* packet = send_test_->NextPacket();
+ if (!packet)
+ return NULL;
+
+ VerifyPacket(packet);
+ // TODO(henrik.lundin) Save the packet to file as well.
+
+ // Pass it on to the caller. The caller becomes the owner of |packet|.
+ return packet;
+ }
+
+ // Verifies the packet.
+ void VerifyPacket(const test::Packet* packet) {
+ EXPECT_TRUE(packet->valid_header());
+ // (We can check the header fields even if valid_header() is false.)
+ EXPECT_EQ(payload_type_, packet->header().payloadType);
+ if (packet_count_ > 0) {
+ // This is not the first packet.
+ uint16_t sequence_number_diff =
+ packet->header().sequenceNumber - last_sequence_number_;
+ EXPECT_EQ(1, sequence_number_diff);
+ uint32_t timestamp_diff = packet->header().timestamp - last_timestamp_;
+ EXPECT_EQ(frame_size_rtp_timestamps_, timestamp_diff);
+ }
+ ++packet_count_;
+ last_sequence_number_ = packet->header().sequenceNumber;
+ last_timestamp_ = packet->header().timestamp;
+ // Update the checksum.
+ payload_checksum_.Update(packet->payload(), packet->payload_length_bytes());
+ }
+
+ void SetUpTest(const char* codec_name,
+ int codec_sample_rate_hz,
+ int channels,
+ int payload_type,
+ int codec_frame_size_samples,
+ int codec_frame_size_rtp_timestamps) {
+ ASSERT_TRUE(SetUpSender());
+ ASSERT_TRUE(RegisterSendCodec(codec_name,
+ codec_sample_rate_hz,
+ channels,
+ payload_type,
+ codec_frame_size_samples,
+ codec_frame_size_rtp_timestamps));
+ }
+
+ scoped_ptr<test::AcmSendTestOldApi> send_test_;
+ scoped_ptr<test::InputAudioFile> audio_source_;
+ uint32_t frame_size_rtp_timestamps_;
+ int packet_count_;
+ uint8_t payload_type_;
+ uint16_t last_sequence_number_;
+ uint32_t last_timestamp_;
+ rtc::Md5Digest payload_checksum_;
+};
+
+TEST_F(AcmSenderBitExactnessOldApi, IsacWb30ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 480, 480));
+ Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "c7e5bdadfa2871df95639fcc297cf23d",
+ "0499ca260390769b3172136faad925b9",
+ "0b58f9eeee43d5891f5f6c75e77984a3"),
+ AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "d42cb5195463da26c8129bbfe73a22e6",
+ "83de248aea9c3c2bd680b6952401b4ca",
+ "3c79f16f34218271f3dca4e2b1dfe1bb"),
+ 33,
+ test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, IsacWb60ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 960, 960));
+ Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "14d63c5f08127d280e722e3191b73bdd",
+ "8da003e16c5371af2dc2be79a50f9076",
+ "1ad29139a04782a33daad8c2b9b35875"),
+ AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "ebe04a819d3a9d83a83a17f271e1139a",
+ "97aeef98553b5a4b5a68f8b716e8eaf0",
+ "9e0a0ab743ad987b55b8e14802769c56"),
+ 16,
+ test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(IsacSwb30ms)) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 32000, 1, 104, 960, 960));
+ Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "98d960600eb4ddb3fcbe11f5057ddfd7",
+ "",
+ "2f6dfe142f735f1d96f6bd86d2526f42"),
+ AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "cc9d2d86a71d6f99f97680a5c27e2762",
+ "",
+ "7b214fc3a5e33d68bf30e77969371f31"),
+ 33,
+ test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_8000khz_10ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+ Run("de4a98e1406f8b798d99cd0704e862e2",
+ "c1edd36339ce0326cc4550041ad719a0",
+ 100,
+ test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_16000khz_10ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 16000, 1, 108, 160, 160));
+ Run("ae646d7b68384a1269cc080dd4501916",
+ "ad786526383178b08d80d6eee06e9bad",
+ 100,
+ test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_32000khz_10ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 32000, 1, 109, 320, 320));
+ Run("7fe325e8fbaf755e3c5df0b11a4774fb",
+ "5ef82ea885e922263606c6fdbc49f651",
+ 100,
+ test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_8000khz_10ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 2, 111, 80, 80));
+ Run("fb263b74e7ac3de915474d77e4744ceb",
+ "62ce5adb0d4965d0a52ec98ae7f98974",
+ 100,
+ test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_16000khz_10ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 16000, 2, 112, 160, 160));
+ Run("d09e9239553649d7ac93e19d304281fd",
+ "41ca8edac4b8c71cd54fd9f25ec14870",
+ 100,
+ test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_32000khz_10ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 32000, 2, 113, 320, 320));
+ Run("5f025d4f390982cc26b3d92fe02e3044",
+ "50e58502fb04421bf5b857dda4c96879",
+ 100,
+ test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcmu_20ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMU", 8000, 1, 0, 160, 160));
+ Run("81a9d4c0bb72e9becc43aef124c981e9",
+ "8f9b8750bd80fe26b6cbf6659b89f0f9",
+ 50,
+ test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcma_20ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMA", 8000, 1, 8, 160, 160));
+ Run("39611f798969053925a49dc06d08de29",
+ "6ad745e55aa48981bfc790d0eeef2dd1",
+ 50,
+ test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcmu_stereo_20ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMU", 8000, 2, 110, 160, 160));
+ Run("437bec032fdc5cbaa0d5175430af7b18",
+ "60b6f25e8d1e74cb679cfe756dd9bca5",
+ 50,
+ test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcma_stereo_20ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMA", 8000, 2, 118, 160, 160));
+ Run("a5c6d83c5b7cedbeff734238220a4b0c",
+ "92b282c83efd20e7eeef52ba40842cf7",
+ 50,
+ test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(Ilbc_30ms)) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("ILBC", 8000, 1, 102, 240, 240));
+ Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "7b6ec10910debd9af08011d3ed5249f7",
+ "android_audio",
+ "7b6ec10910debd9af08011d3ed5249f7"),
+ AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "cfae2e9f6aba96e145f2bcdd5050ce78",
+ "android_payload",
+ "cfae2e9f6aba96e145f2bcdd5050ce78"),
+ 33,
+ test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(G722_20ms)) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 1, 9, 320, 160));
+ Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "7d759436f2533582950d148b5161a36c",
+ "android_audio",
+ "7d759436f2533582950d148b5161a36c"),
+ AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "fc68a87e1380614e658087cb35d5ca10",
+ "android_payload",
+ "fc68a87e1380614e658087cb35d5ca10"),
+ 50,
+ test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(G722_stereo_20ms)) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 2, 119, 320, 160));
+ Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "7190ee718ab3d80eca181e5f7140c210",
+ "android_audio",
+ "7190ee718ab3d80eca181e5f7140c210"),
+ AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "66516152eeaa1e650ad94ff85f668dac",
+ "android_payload",
+ "66516152eeaa1e650ad94ff85f668dac"),
+ 50,
+ test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Opus_stereo_20ms) {
+ ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 2, 120, 960, 960));
+ Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "855041f2490b887302bce9d544731849",
+ "1e1a0fce893fef2d66886a7f09e2ebce",
+ "855041f2490b887302bce9d544731849"),
+ AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "d781cce1ab986b618d0da87226cdde30",
+ "1a1fe04dd12e755949987c8d729fb3e0",
+ "d781cce1ab986b618d0da87226cdde30"),
+ 50,
+ test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+} // namespace webrtc
diff --git a/modules/audio_coding/main/interface/audio_coding_module.h b/modules/audio_coding/main/interface/audio_coding_module.h
index 389b93fe..8d73285a 100644
--- a/modules/audio_coding/main/interface/audio_coding_module.h
+++ b/modules/audio_coding/main/interface/audio_coding_module.h
@@ -1015,6 +1015,14 @@ class AudioCoding {
playout_channels(1),
playout_frequency_hz(32000) {}
+ AudioCodingModule::Config ToOldConfig() const {
+ AudioCodingModule::Config old_config;
+ old_config.id = 0;
+ old_config.neteq_config = neteq_config;
+ old_config.clock = clock;
+ return old_config;
+ }
+
NetEq::Config neteq_config;
Clock* clock;
AudioPacketizationCallback* transport;
@@ -1046,6 +1054,9 @@ class AudioCoding {
// codec that was registered in the latest call to RegisterSendCodec().
virtual const AudioEncoder* GetSenderInfo() const = 0;
+ // Temporary solution to be used during refactoring.
+ virtual const CodecInst* GetSenderCodecInst() = 0;
+
// Adds 10 ms of raw (PCM) audio data to the encoder. If the sampling
// frequency of the audio does not match the sampling frequency of the
// current encoder, ACM will resample the audio.
@@ -1139,6 +1150,22 @@ class AudioCoding {
// Disables NACK.
virtual void DisableNack() = 0;
+
+ // Temporary solution to be used during refactoring.
+ // If DTX is enabled and the codec does not have internal DTX/VAD
+ // WebRtc VAD will be automatically enabled and |enable_vad| is ignored.
+ //
+ // If DTX is disabled but VAD is enabled no DTX packets are sent,
+ // regardless of whether the codec has internal DTX/VAD or not. In this
+ // case, WebRtc VAD is running to label frames as active/in-active.
+ //
+ // NOTE! VAD/DTX is not supported when sending stereo.
+ //
+ // Return true if successful, false otherwise.
+ virtual bool SetVad(bool enable_dtx,
+ bool enable_vad,
+ ACMVADMode vad_mode) = 0;
+
// Returns a list of packets to request retransmission of.
// |round_trip_time_ms| is an estimate of the round-trip-time (in
// milliseconds). Missing packets which will be decoded sooner than the
diff --git a/modules/audio_coding/neteq/audio_decoder_unittest.cc b/modules/audio_coding/neteq/audio_decoder_unittest.cc
index d33c8f23..624e6a4d 100644
--- a/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -348,14 +348,11 @@ class AudioDecoderIsacFloatTest : public AudioDecoderTest {
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
uint8_t* output) {
// Insert 3 * 10 ms. Expect non-zero output on third call.
- EXPECT_EQ(0, WebRtcIsac_Encode(encoder_, input,
- reinterpret_cast<int16_t*>(output)));
+ EXPECT_EQ(0, WebRtcIsac_Encode(encoder_, input, output));
input += input_size_;
- EXPECT_EQ(0, WebRtcIsac_Encode(encoder_, input,
- reinterpret_cast<int16_t*>(output)));
+ EXPECT_EQ(0, WebRtcIsac_Encode(encoder_, input, output));
input += input_size_;
- int enc_len_bytes =
- WebRtcIsac_Encode(encoder_, input, reinterpret_cast<int16_t*>(output));
+ int enc_len_bytes = WebRtcIsac_Encode(encoder_, input, output);
EXPECT_GT(enc_len_bytes, 0);
return enc_len_bytes;
}
@@ -388,14 +385,11 @@ class AudioDecoderIsacSwbTest : public AudioDecoderTest {
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
uint8_t* output) {
// Insert 3 * 10 ms. Expect non-zero output on third call.
- EXPECT_EQ(0, WebRtcIsac_Encode(encoder_, input,
- reinterpret_cast<int16_t*>(output)));
+ EXPECT_EQ(0, WebRtcIsac_Encode(encoder_, input, output));
input += input_size_;
- EXPECT_EQ(0, WebRtcIsac_Encode(encoder_, input,
- reinterpret_cast<int16_t*>(output)));
+ EXPECT_EQ(0, WebRtcIsac_Encode(encoder_, input, output));
input += input_size_;
- int enc_len_bytes =
- WebRtcIsac_Encode(encoder_, input, reinterpret_cast<int16_t*>(output));
+ int enc_len_bytes = WebRtcIsac_Encode(encoder_, input, output);
EXPECT_GT(enc_len_bytes, 0);
return enc_len_bytes;
}
@@ -441,14 +435,11 @@ class AudioDecoderIsacFixTest : public AudioDecoderTest {
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
uint8_t* output) {
// Insert 3 * 10 ms. Expect non-zero output on third call.
- EXPECT_EQ(0, WebRtcIsacfix_Encode(encoder_, input,
- reinterpret_cast<int16_t*>(output)));
+ EXPECT_EQ(0, WebRtcIsacfix_Encode(encoder_, input, output));
input += input_size_;
- EXPECT_EQ(0, WebRtcIsacfix_Encode(encoder_, input,
- reinterpret_cast<int16_t*>(output)));
+ EXPECT_EQ(0, WebRtcIsacfix_Encode(encoder_, input, output));
input += input_size_;
- int enc_len_bytes = WebRtcIsacfix_Encode(
- encoder_, input, reinterpret_cast<int16_t*>(output));
+ int enc_len_bytes = WebRtcIsacfix_Encode(encoder_, input, output);
EXPECT_GT(enc_len_bytes, 0);
return enc_len_bytes;
}
diff --git a/modules/audio_coding/neteq/decision_logic_normal.cc b/modules/audio_coding/neteq/decision_logic_normal.cc
index 97a8843a..9e422041 100644
--- a/modules/audio_coding/neteq/decision_logic_normal.cc
+++ b/modules/audio_coding/neteq/decision_logic_normal.cc
@@ -76,8 +76,10 @@ Operations DecisionLogicNormal::GetDecisionSpecialized(
available_timestamp, play_dtmf);
} else {
// This implies that available_timestamp < target_timestamp, which can
- // happen when a new stream or codec is received. Signal for a reset.
- return kUndefined;
+ // happen when a new stream or codec is received. Do Expand instead, and
+ // wait for a newer packet to arrive, or for the buffer to flush (resulting
+ // in a master reset).
+ return kExpand;
}
}
diff --git a/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc b/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
index 77615257..6a8eafa1 100644
--- a/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
@@ -19,6 +19,7 @@
#include "webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h"
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "webrtc/system_wrappers/interface/compile_assert.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/gtest_disable.h"
@@ -26,6 +27,7 @@
namespace webrtc {
using ::testing::_;
+using ::testing::Return;
// This test encodes a few packets of PCM16b 32 kHz data and inserts it into two
// different NetEq instances. The first instance uses the internal version of
@@ -50,10 +52,9 @@ class NetEqExternalDecoderTest : public ::testing::Test {
payload_size_bytes_(0),
last_send_time_(0),
last_arrival_time_(0) {
- NetEq::Config config;
- config.sample_rate_hz = sample_rate_hz_;
- neteq_external_ = NetEq::Create(config);
- neteq_ = NetEq::Create(config);
+ config_.sample_rate_hz = sample_rate_hz_;
+ neteq_external_ = NetEq::Create(config_);
+ neteq_ = NetEq::Create(config_);
input_ = new int16_t[frame_size_samples_];
encoded_ = new uint8_t[2 * frame_size_samples_];
}
@@ -158,6 +159,8 @@ class NetEqExternalDecoderTest : public ::testing::Test {
EXPECT_EQ(output_size_samples_, samples_per_channel);
}
+ virtual int NumExpectedDecodeCalls(int num_loops) const { return num_loops; }
+
void RunTest(int num_loops) {
// Get next input packets (mono and multi-channel).
int next_send_time;
@@ -169,7 +172,7 @@ class NetEqExternalDecoderTest : public ::testing::Test {
} while (Lost()); // If lost, immediately read the next packet.
EXPECT_CALL(*external_decoder_, Decode(_, payload_size_bytes_, _, _))
- .Times(num_loops);
+ .Times(NumExpectedDecodeCalls(num_loops));
int time_now = 0;
for (int k = 0; k < num_loops; ++k) {
@@ -196,11 +199,12 @@ class NetEqExternalDecoderTest : public ::testing::Test {
}
}
- const int sample_rate_hz_;
- const int samples_per_ms_;
+ NetEq::Config config_;
+ int sample_rate_hz_;
+ int samples_per_ms_;
const int frame_size_ms_;
- const int frame_size_samples_;
- const int output_size_samples_;
+ int frame_size_samples_;
+ int output_size_samples_;
NetEq* neteq_external_;
NetEq* neteq_;
scoped_ptr<MockExternalPcm16B> external_decoder_;
@@ -220,4 +224,256 @@ TEST_F(NetEqExternalDecoderTest, RunTest) {
RunTest(100); // Run 100 laps @ 10 ms each in the test loop.
}
+class LargeTimestampJumpTest : public NetEqExternalDecoderTest {
+ protected:
+ enum TestStates {
+ kInitialPhase,
+ kNormalPhase,
+ kExpandPhase,
+ kFadedExpandPhase,
+ kRecovered
+ };
+
+ LargeTimestampJumpTest()
+ : NetEqExternalDecoderTest(), test_state_(kInitialPhase) {
+ sample_rate_hz_ = 8000;
+ samples_per_ms_ = sample_rate_hz_ / 1000;
+ frame_size_samples_ = frame_size_ms_ * samples_per_ms_;
+ output_size_samples_ = frame_size_ms_ * samples_per_ms_;
+ EXPECT_CALL(*external_decoder_, Die()).Times(1);
+ external_decoder_.reset(new MockExternalPcm16B(kDecoderPCM16B));
+ }
+
+ void SetUp() OVERRIDE {
+ const std::string file_name =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ input_file_.reset(new test::InputAudioFile(file_name));
+ assert(sample_rate_hz_ == 8000);
+ NetEqDecoder decoder = kDecoderPCM16B;
+ EXPECT_CALL(*external_decoder_, Init());
+ EXPECT_CALL(*external_decoder_, HasDecodePlc())
+ .WillRepeatedly(Return(false));
+ // NetEq is not allowed to delete the external decoder (hence Times(0)).
+ EXPECT_CALL(*external_decoder_, Die()).Times(0);
+ ASSERT_EQ(NetEq::kOK,
+ neteq_external_->RegisterExternalDecoder(
+ external_decoder_.get(), decoder, kPayloadType));
+ ASSERT_EQ(NetEq::kOK, neteq_->RegisterPayloadType(decoder, kPayloadType));
+ }
+
+ void InsertPackets(int next_arrival_time) OVERRIDE {
+ // Insert packet in external decoder instance.
+ EXPECT_CALL(*external_decoder_,
+ IncomingPacket(_,
+ payload_size_bytes_,
+ rtp_header_.header.sequenceNumber,
+ rtp_header_.header.timestamp,
+ next_arrival_time));
+ ASSERT_EQ(
+ NetEq::kOK,
+ neteq_external_->InsertPacket(
+ rtp_header_, encoded_, payload_size_bytes_, next_arrival_time));
+ }
+
+ void GetOutputAudio() OVERRIDE {
+ NetEqOutputType output_type;
+ int samples_per_channel;
+ int num_channels;
+ // Get audio from external decoder instance.
+ ASSERT_EQ(NetEq::kOK,
+ neteq_external_->GetAudio(kMaxBlockSize,
+ output_external_,
+ &samples_per_channel,
+ &num_channels,
+ &output_type));
+ EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(output_size_samples_, samples_per_channel);
+ UpdateState(output_type);
+ }
+
+ virtual void UpdateState(NetEqOutputType output_type) {
+ switch (test_state_) {
+ case kInitialPhase: {
+ if (output_type == kOutputNormal) {
+ test_state_ = kNormalPhase;
+ }
+ break;
+ }
+ case kNormalPhase: {
+ if (output_type == kOutputPLC) {
+ test_state_ = kExpandPhase;
+ }
+ break;
+ }
+ case kExpandPhase: {
+ if (output_type == kOutputPLCtoCNG) {
+ test_state_ = kFadedExpandPhase;
+ }
+ break;
+ }
+ case kFadedExpandPhase: {
+ if (output_type == kOutputNormal) {
+ test_state_ = kRecovered;
+ }
+ break;
+ }
+ case kRecovered: {
+ break;
+ }
+ }
+ }
+
+ void VerifyOutput(size_t num_samples) const OVERRIDE {
+ if (test_state_ == kExpandPhase || test_state_ == kFadedExpandPhase) {
+ // Don't verify the output in this phase of the test.
+ return;
+ }
+ for (size_t i = 0; i < num_samples; ++i) {
+ if (output_external_[i] != 0)
+ return;
+ }
+ EXPECT_TRUE(false)
+ << "Expected at least one non-zero sample in each output block.";
+ }
+
+ int NumExpectedDecodeCalls(int num_loops) const OVERRIDE {
+ // Some packets won't be decoded because of the buffer being flushed after
+ // the timestamp jump.
+ return num_loops - (config_.max_packets_in_buffer + 1);
+ }
+
+ TestStates test_state_;
+};
+
+TEST_F(LargeTimestampJumpTest, JumpLongerThanHalfRange) {
+ // Set the timestamp series to start at 2880, increase to 7200, then jump to
+ // 2869342376. The sequence numbers start at 42076 and increase by 1 for each
+ // packet, also when the timestamp jumps.
+ static const uint16_t kStartSeqeunceNumber = 42076;
+ static const uint32_t kStartTimestamp = 2880;
+ static const uint32_t kJumpFromTimestamp = 7200;
+ static const uint32_t kJumpToTimestamp = 2869342376;
+ COMPILE_ASSERT(kJumpFromTimestamp < kJumpToTimestamp,
+ timestamp_jump_should_not_result_in_wrap);
+ COMPILE_ASSERT(
+ static_cast<uint32_t>(kJumpToTimestamp - kJumpFromTimestamp) > 0x7FFFFFFF,
+ jump_should_be_larger_than_half_range);
+ // Replace the default RTP generator with one that jumps in timestamp.
+ rtp_generator_.reset(new test::TimestampJumpRtpGenerator(samples_per_ms_,
+ kStartSeqeunceNumber,
+ kStartTimestamp,
+ kJumpFromTimestamp,
+ kJumpToTimestamp));
+
+ RunTest(130); // Run 130 laps @ 10 ms each in the test loop.
+ EXPECT_EQ(kRecovered, test_state_);
+}
+
+TEST_F(LargeTimestampJumpTest, JumpLongerThanHalfRangeAndWrap) {
+ // Make a jump larger than half the 32-bit timestamp range. Set the start
+ // timestamp such that the jump will result in a wrap around.
+ static const uint16_t kStartSeqeunceNumber = 42076;
+ // Set the jump length slightly larger than 2^31.
+ static const uint32_t kStartTimestamp = 3221223116;
+ static const uint32_t kJumpFromTimestamp = 3221223216;
+ static const uint32_t kJumpToTimestamp = 1073744278;
+ COMPILE_ASSERT(kJumpToTimestamp < kJumpFromTimestamp,
+ timestamp_jump_should_result_in_wrap);
+ COMPILE_ASSERT(
+ static_cast<uint32_t>(kJumpToTimestamp - kJumpFromTimestamp) > 0x7FFFFFFF,
+ jump_should_be_larger_than_half_range);
+ // Replace the default RTP generator with one that jumps in timestamp.
+ rtp_generator_.reset(new test::TimestampJumpRtpGenerator(samples_per_ms_,
+ kStartSeqeunceNumber,
+ kStartTimestamp,
+ kJumpFromTimestamp,
+ kJumpToTimestamp));
+
+ RunTest(130); // Run 130 laps @ 10 ms each in the test loop.
+ EXPECT_EQ(kRecovered, test_state_);
+}
+
+class ShortTimestampJumpTest : public LargeTimestampJumpTest {
+ protected:
+ void UpdateState(NetEqOutputType output_type) OVERRIDE {
+ switch (test_state_) {
+ case kInitialPhase: {
+ if (output_type == kOutputNormal) {
+ test_state_ = kNormalPhase;
+ }
+ break;
+ }
+ case kNormalPhase: {
+ if (output_type == kOutputPLC) {
+ test_state_ = kExpandPhase;
+ }
+ break;
+ }
+ case kExpandPhase: {
+ if (output_type == kOutputNormal) {
+ test_state_ = kRecovered;
+ }
+ break;
+ }
+ case kRecovered: {
+ break;
+ }
+ default: { FAIL(); }
+ }
+ }
+
+ int NumExpectedDecodeCalls(int num_loops) const OVERRIDE {
+ // Some packets won't be decoded because of the timestamp jump.
+ return num_loops - 2;
+ }
+};
+
+TEST_F(ShortTimestampJumpTest, JumpShorterThanHalfRange) {
+ // Make a jump shorter than half the 32-bit timestamp range. Set the start
+ // timestamp such that the jump will not result in a wrap around.
+ static const uint16_t kStartSeqeunceNumber = 42076;
+ // Set the jump length slightly smaller than 2^31.
+ static const uint32_t kStartTimestamp = 4711;
+ static const uint32_t kJumpFromTimestamp = 4811;
+ static const uint32_t kJumpToTimestamp = 2147483747;
+ COMPILE_ASSERT(kJumpFromTimestamp < kJumpToTimestamp,
+ timestamp_jump_should_not_result_in_wrap);
+ COMPILE_ASSERT(
+ static_cast<uint32_t>(kJumpToTimestamp - kJumpFromTimestamp) < 0x7FFFFFFF,
+ jump_should_be_smaller_than_half_range);
+ // Replace the default RTP generator with one that jumps in timestamp.
+ rtp_generator_.reset(new test::TimestampJumpRtpGenerator(samples_per_ms_,
+ kStartSeqeunceNumber,
+ kStartTimestamp,
+ kJumpFromTimestamp,
+ kJumpToTimestamp));
+
+ RunTest(130); // Run 130 laps @ 10 ms each in the test loop.
+ EXPECT_EQ(kRecovered, test_state_);
+}
+
+TEST_F(ShortTimestampJumpTest, JumpShorterThanHalfRangeAndWrap) {
+ // Make a jump shorter than half the 32-bit timestamp range. Set the start
+ // timestamp such that the jump will result in a wrap around.
+ static const uint16_t kStartSeqeunceNumber = 42076;
+ // Set the jump length slightly smaller than 2^31.
+ static const uint32_t kStartTimestamp = 3221227827;
+ static const uint32_t kJumpFromTimestamp = 3221227927;
+ static const uint32_t kJumpToTimestamp = 1073739567;
+ COMPILE_ASSERT(kJumpToTimestamp < kJumpFromTimestamp,
+ timestamp_jump_should_result_in_wrap);
+ COMPILE_ASSERT(
+ static_cast<uint32_t>(kJumpToTimestamp - kJumpFromTimestamp) < 0x7FFFFFFF,
+ jump_should_be_smaller_than_half_range);
+ // Replace the default RTP generator with one that jumps in timestamp.
+ rtp_generator_.reset(new test::TimestampJumpRtpGenerator(samples_per_ms_,
+ kStartSeqeunceNumber,
+ kStartTimestamp,
+ kJumpFromTimestamp,
+ kJumpToTimestamp));
+
+ RunTest(130); // Run 130 laps @ 10 ms each in the test loop.
+ EXPECT_EQ(kRecovered, test_state_);
+}
+
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/neteq_impl.cc b/modules/audio_coding/neteq/neteq_impl.cc
index a184fc33..d7147335 100644
--- a/modules/audio_coding/neteq/neteq_impl.cc
+++ b/modules/audio_coding/neteq/neteq_impl.cc
@@ -864,9 +864,6 @@ int NetEqImpl::GetDecision(Operations* operation,
assert(sync_buffer_.get());
uint32_t end_timestamp = sync_buffer_->end_timestamp();
- if (!new_codec_) {
- packet_buffer_->DiscardOldPackets(end_timestamp);
- }
const RTPHeader* header = packet_buffer_->NextRtpHeader();
if (decision_logic_->CngRfc3389On() || last_mode_ == kModeRfc3389Cng) {
@@ -1817,6 +1814,14 @@ int NetEqImpl::ExtractPackets(int required_samples, PacketList* packet_list) {
}
} while (extracted_samples < required_samples && next_packet_available);
+ if (extracted_samples > 0) {
+ // Delete old packets only when we are going to decode something. Otherwise,
+ // we could end up in the situation where we never decode anything, since
+ // all incoming packets are considered too old but the buffer will also
+ // never be flooded and flushed.
+ packet_buffer_->DiscardOldPackets(timestamp_);
+ }
+
return extracted_samples;
}
diff --git a/modules/audio_coding/neteq/neteq_impl.h b/modules/audio_coding/neteq/neteq_impl.h
index d3dff185..cfe6a402 100644
--- a/modules/audio_coding/neteq/neteq_impl.h
+++ b/modules/audio_coding/neteq/neteq_impl.h
@@ -14,6 +14,7 @@
#include <vector>
#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/audio_coding/neteq/audio_multi_vector.h"
#include "webrtc/modules/audio_coding/neteq/defines.h"
#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
@@ -22,7 +23,6 @@
#include "webrtc/modules/audio_coding/neteq/rtcp.h"
#include "webrtc/modules/audio_coding/neteq/statistics_calculator.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/modules/audio_coding/neteq/test/RTPencode.cc b/modules/audio_coding/neteq/test/RTPencode.cc
index 93b366b9..92bccee2 100644
--- a/modules/audio_coding/neteq/test/RTPencode.cc
+++ b/modules/audio_coding/neteq/test/RTPencode.cc
@@ -1632,9 +1632,13 @@ int NetEQTest_encode(int coder, int16_t *indata, int frameLen, unsigned char * e
cdlen=0;
while (cdlen<=0) {
#ifdef CODEC_ISAC /* floating point */
- cdlen=WebRtcIsac_Encode(ISAC_inst[k],&indata[noOfCalls*160],(int16_t*)encoded);
+ cdlen = WebRtcIsac_Encode(ISAC_inst[k],
+ &indata[noOfCalls * 160],
+ encoded);
#else /* fixed point */
- cdlen=WebRtcIsacfix_Encode(ISAC_inst[k],&indata[noOfCalls*160],(int16_t*)encoded);
+ cdlen = WebRtcIsacfix_Encode(ISAC_inst[k],
+ &indata[noOfCalls * 160],
+ encoded);
#endif
noOfCalls++;
}
@@ -1645,7 +1649,9 @@ int NetEQTest_encode(int coder, int16_t *indata, int frameLen, unsigned char * e
int noOfCalls=0;
cdlen=0;
while (cdlen<=0) {
- cdlen=WebRtcIsac_Encode(ISACSWB_inst[k],&indata[noOfCalls*320],(int16_t*)encoded);
+ cdlen = WebRtcIsac_Encode(ISACSWB_inst[k],
+ &indata[noOfCalls * 320],
+ encoded);
noOfCalls++;
}
}
diff --git a/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc b/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
index 6b0f4828..b672a0c2 100644
--- a/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
+++ b/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
@@ -138,8 +138,7 @@ int NetEqIsacQualityTest::EncodeBlock(int16_t* in_data,
// The Isac encoder does not perform encoding (and returns 0) until it
// receives a sequence of sub-blocks that amount to the frame duration.
EXPECT_EQ(0, value);
- value = WebRtcIsacfix_Encode(isac_encoder_, &in_data[pointer],
- reinterpret_cast<int16_t*>(payload));
+ value = WebRtcIsacfix_Encode(isac_encoder_, &in_data[pointer], payload);
}
EXPECT_GT(value, 0);
return value;
diff --git a/modules/audio_coding/neteq/tools/rtp_generator.cc b/modules/audio_coding/neteq/tools/rtp_generator.cc
index 17ac209f..db9988d9 100644
--- a/modules/audio_coding/neteq/tools/rtp_generator.cc
+++ b/modules/audio_coding/neteq/tools/rtp_generator.cc
@@ -44,5 +44,19 @@ void RtpGenerator::set_drift_factor(double factor) {
}
}
+uint32_t TimestampJumpRtpGenerator::GetRtpHeader(uint8_t payload_type,
+ size_t payload_length_samples,
+ WebRtcRTPHeader* rtp_header) {
+ uint32_t ret = RtpGenerator::GetRtpHeader(
+ payload_type, payload_length_samples, rtp_header);
+ if (timestamp_ - static_cast<uint32_t>(payload_length_samples) <=
+ jump_from_timestamp_ &&
+ timestamp_ > jump_from_timestamp_) {
+ // We just moved across the |jump_from_timestamp_| timestamp. Do the jump.
+ timestamp_ = jump_to_timestamp_;
+ }
+ return ret;
+}
+
} // namespace test
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/rtp_generator.h b/modules/audio_coding/neteq/tools/rtp_generator.h
index d3824c8d..22804361 100644
--- a/modules/audio_coding/neteq/tools/rtp_generator.h
+++ b/modules/audio_coding/neteq/tools/rtp_generator.h
@@ -34,24 +34,50 @@ class RtpGenerator {
drift_factor_(0.0) {
}
+ virtual ~RtpGenerator() {}
+
// Writes the next RTP header to |rtp_header|, which will be of type
// |payload_type|. Returns the send time for this packet (in ms). The value of
// |payload_length_samples| determines the send time for the next packet.
- uint32_t GetRtpHeader(uint8_t payload_type, size_t payload_length_samples,
- WebRtcRTPHeader* rtp_header);
+ virtual uint32_t GetRtpHeader(uint8_t payload_type,
+ size_t payload_length_samples,
+ WebRtcRTPHeader* rtp_header);
void set_drift_factor(double factor);
- private:
+ protected:
uint16_t seq_number_;
uint32_t timestamp_;
uint32_t next_send_time_ms_;
const uint32_t ssrc_;
const int samples_per_ms_;
double drift_factor_;
+
+ private:
DISALLOW_COPY_AND_ASSIGN(RtpGenerator);
};
+class TimestampJumpRtpGenerator : public RtpGenerator {
+ public:
+ TimestampJumpRtpGenerator(int samples_per_ms,
+ uint16_t start_seq_number,
+ uint32_t start_timestamp,
+ uint32_t jump_from_timestamp,
+ uint32_t jump_to_timestamp)
+ : RtpGenerator(samples_per_ms, start_seq_number, start_timestamp),
+ jump_from_timestamp_(jump_from_timestamp),
+ jump_to_timestamp_(jump_to_timestamp) {}
+
+ uint32_t GetRtpHeader(uint8_t payload_type,
+ size_t payload_length_samples,
+ WebRtcRTPHeader* rtp_header) OVERRIDE;
+
+ private:
+ uint32_t jump_from_timestamp_;
+ uint32_t jump_to_timestamp_;
+ DISALLOW_COPY_AND_ASSIGN(TimestampJumpRtpGenerator);
+};
+
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_
diff --git a/modules/audio_processing/agc/analog_agc.c b/modules/audio_processing/agc/analog_agc.c
index 0376dae5..32929dda 100644
--- a/modules/audio_processing/agc/analog_agc.c
+++ b/modules/audio_processing/agc/analog_agc.c
@@ -19,7 +19,7 @@
#include <assert.h>
#include <stdlib.h>
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
#include <stdio.h>
#endif
#include "webrtc/modules/audio_processing/agc/analog_agc.h"
@@ -139,10 +139,10 @@ int WebRtcAgc_AddMic(void *state, int16_t *in_mic, int16_t *in_mic_H,
L = 8;
} else
{
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
"AGC->add_mic, frame %d: Invalid number of samples\n\n",
- (stt->fcount + 1));
+ stt->fcount + 1);
#endif
return -1;
}
@@ -160,10 +160,10 @@ int WebRtcAgc_AddMic(void *state, int16_t *in_mic, int16_t *in_mic_H,
L = 16;
} else
{
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
"AGC->add_mic, frame %d: Invalid number of samples\n\n",
- (stt->fcount + 1));
+ stt->fcount + 1);
#endif
return -1;
}
@@ -177,10 +177,10 @@ int WebRtcAgc_AddMic(void *state, int16_t *in_mic, int16_t *in_mic_H,
L = 16;
} else
{
-#ifdef AGC_DEBUG
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
"AGC->add_mic, frame %d: Invalid sample rate\n\n",
- (stt->fcount + 1));
+ stt->fcount + 1);
#endif
return -1;
}
@@ -343,7 +343,7 @@ int WebRtcAgc_AddFarend(void *state, const int16_t *in_far, int16_t samples)
{
if ((samples != 80) && (samples != 160))
{
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
"AGC->add_far_end, frame %d: Invalid number of samples\n\n",
stt->fcount);
@@ -355,7 +355,7 @@ int WebRtcAgc_AddFarend(void *state, const int16_t *in_far, int16_t samples)
{
if ((samples != 160) && (samples != 320))
{
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
"AGC->add_far_end, frame %d: Invalid number of samples\n\n",
stt->fcount);
@@ -367,7 +367,7 @@ int WebRtcAgc_AddFarend(void *state, const int16_t *in_far, int16_t samples)
{
if ((samples != 160) && (samples != 320))
{
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
"AGC->add_far_end, frame %d: Invalid number of samples\n\n",
stt->fcount);
@@ -377,7 +377,7 @@ int WebRtcAgc_AddFarend(void *state, const int16_t *in_far, int16_t samples)
subFrames = 160;
} else
{
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
"AGC->add_far_end, frame %d: Invalid sample rate\n\n",
stt->fcount + 1);
@@ -657,10 +657,12 @@ void WebRtcAgc_ZeroCtrl(Agc_t *stt, int32_t *inMicLevel, int32_t *env)
stt->micVol = *inMicLevel;
}
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
- "\t\tAGC->zeroCntrl, frame %d: 500 ms under threshold, micVol:\n",
- stt->fcount, stt->micVol);
+ "\t\tAGC->zeroCntrl, frame %d: 500 ms under threshold,"
+ " micVol: %d\n",
+ stt->fcount,
+ stt->micVol);
#endif
stt->activeSpeech = 0;
@@ -771,14 +773,18 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
if (inMicLevelTmp > stt->maxAnalog)
{
-#ifdef AGC_DEBUG //test log
- fprintf(stt->fpt, "\tAGC->ProcessAnalog, frame %d: micLvl > maxAnalog\n", stt->fcount);
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+ fprintf(stt->fpt,
+ "\tAGC->ProcessAnalog, frame %d: micLvl > maxAnalog\n",
+ stt->fcount);
#endif
return -1;
} else if (inMicLevelTmp < stt->minLevel)
{
-#ifdef AGC_DEBUG //test log
- fprintf(stt->fpt, "\tAGC->ProcessAnalog, frame %d: micLvl < minLevel\n", stt->fcount);
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+ fprintf(stt->fpt,
+ "\tAGC->ProcessAnalog, frame %d: micLvl < minLevel\n",
+ stt->fcount);
#endif
return -1;
}
@@ -813,9 +819,10 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
#ifdef MIC_LEVEL_FEEDBACK
//stt->numBlocksMicLvlSat = 0;
#endif
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
- "\tAGC->ProcessAnalog, frame %d: micLvl < minLevel by manual decrease, raise vol\n",
+ "\tAGC->ProcessAnalog, frame %d: micLvl < minLevel by manual"
+ " decrease, raise vol\n",
stt->fcount);
#endif
}
@@ -871,10 +878,11 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
}
inMicLevelTmp = stt->micVol;
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
"\tAGC->ProcessAnalog, frame %d: saturated, micVol = %d\n",
- stt->fcount, stt->micVol);
+ stt->fcount,
+ stt->micVol);
#endif
if (stt->micVol < stt->minOutput)
@@ -1011,10 +1019,13 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
#ifdef MIC_LEVEL_FEEDBACK
//stt->numBlocksMicLvlSat = 0;
#endif
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
- "\tAGC->ProcessAnalog, frame %d: measure > 2ndUpperLim, micVol = %d, maxLevel = %d\n",
- stt->fcount, stt->micVol, stt->maxLevel);
+ "\tAGC->ProcessAnalog, frame %d: measure >"
+ " 2ndUpperLim, micVol = %d, maxLevel = %d\n",
+ stt->fcount,
+ stt->micVol,
+ stt->maxLevel);
#endif
}
} else if (stt->Rxx160_LPw32 > stt->upperLimit)
@@ -1054,10 +1065,13 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
#ifdef MIC_LEVEL_FEEDBACK
//stt->numBlocksMicLvlSat = 0;
#endif
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
- "\tAGC->ProcessAnalog, frame %d: measure > UpperLim, micVol = %d, maxLevel = %d\n",
- stt->fcount, stt->micVol, stt->maxLevel);
+ "\tAGC->ProcessAnalog, frame %d: measure >"
+ " UpperLim, micVol = %d, maxLevel = %d\n",
+ stt->fcount,
+ stt->micVol,
+ stt->maxLevel);
#endif
}
} else if (stt->Rxx160_LPw32 < stt->lowerSecondaryLimit)
@@ -1113,10 +1127,12 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
fprintf(stderr, "Sat mic Level: %d\n", stt->numBlocksMicLvlSat);
}
#endif
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
- "\tAGC->ProcessAnalog, frame %d: measure < 2ndLowerLim, micVol = %d\n",
- stt->fcount, stt->micVol);
+ "\tAGC->ProcessAnalog, frame %d: measure <"
+ " 2ndLowerLim, micVol = %d\n",
+ stt->fcount,
+ stt->micVol);
#endif
}
} else if (stt->Rxx160_LPw32 < stt->lowerLimit)
@@ -1172,10 +1188,11 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
fprintf(stderr, "Sat mic Level: %d\n", stt->numBlocksMicLvlSat);
}
#endif
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
"\tAGC->ProcessAnalog, frame %d: measure < LowerLim, micVol = %d\n",
- stt->fcount, stt->micVol);
+ stt->fcount,
+ stt->micVol);
#endif
}
@@ -1272,9 +1289,10 @@ int WebRtcAgc_Process(void *agcInst, const int16_t *in_near,
{
if ((samples != 80) && (samples != 160))
{
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
- "AGC->Process, frame %d: Invalid number of samples\n\n", stt->fcount);
+ "AGC->Process, frame %d: Invalid number of samples\n\n",
+ stt->fcount);
#endif
return -1;
}
@@ -1283,9 +1301,10 @@ int WebRtcAgc_Process(void *agcInst, const int16_t *in_near,
{
if ((samples != 160) && (samples != 320))
{
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
- "AGC->Process, frame %d: Invalid number of samples\n\n", stt->fcount);
+ "AGC->Process, frame %d: Invalid number of samples\n\n",
+ stt->fcount);
#endif
return -1;
}
@@ -1294,18 +1313,20 @@ int WebRtcAgc_Process(void *agcInst, const int16_t *in_near,
{
if ((samples != 160) && (samples != 320))
{
-#ifdef AGC_DEBUG //test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
- "AGC->Process, frame %d: Invalid number of samples\n\n", stt->fcount);
+ "AGC->Process, frame %d: Invalid number of samples\n\n",
+ stt->fcount);
#endif
return -1;
}
subFrames = 160;
} else
{
-#ifdef AGC_DEBUG// test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
- "AGC->Process, frame %d: Invalid sample rate\n\n", stt->fcount);
+ "AGC->Process, frame %d: Invalid sample rate\n\n",
+ stt->fcount);
#endif
return -1;
}
@@ -1341,7 +1362,7 @@ int WebRtcAgc_Process(void *agcInst, const int16_t *in_near,
}
}
-#ifdef AGC_DEBUG//test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
stt->fcount++;
#endif
@@ -1350,8 +1371,10 @@ int WebRtcAgc_Process(void *agcInst, const int16_t *in_near,
if (WebRtcAgc_ProcessDigital(&stt->digitalAgc, &in_near[i], &in_near_H[i], &out[i], &out_H[i],
stt->fs, stt->lowLevelSignal) == -1)
{
-#ifdef AGC_DEBUG//test log
- fprintf(stt->fpt, "AGC->Process, frame %d: Error from DigAGC\n\n", stt->fcount);
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+ fprintf(stt->fpt,
+ "AGC->Process, frame %d: Error from DigAGC\n\n",
+ stt->fcount);
#endif
return -1;
}
@@ -1364,8 +1387,14 @@ int WebRtcAgc_Process(void *agcInst, const int16_t *in_near,
return -1;
}
}
-#ifdef AGC_DEBUG//test log
- fprintf(stt->agcLog, "%5d\t%d\t%d\t%d\n", stt->fcount, inMicLevelTmp, *outMicLevel, stt->maxLevel, stt->micVol);
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+ fprintf(stt->agcLog,
+ "%5d\t%d\t%d\t%d\t%d\n",
+ stt->fcount,
+ inMicLevelTmp,
+ *outMicLevel,
+ stt->maxLevel,
+ stt->micVol);
#endif
/* update queue */
@@ -1441,8 +1470,10 @@ int WebRtcAgc_set_config(void *agcInst, WebRtcAgc_config_t agcConfig)
if (WebRtcAgc_CalculateGainTable(&(stt->digitalAgc.gainTable[0]), stt->compressionGaindB,
stt->targetLevelDbfs, stt->limiterEnable, stt->analogTarget) == -1)
{
-#ifdef AGC_DEBUG//test log
- fprintf(stt->fpt, "AGC->set_config, frame %d: Error from calcGainTable\n\n", stt->fcount);
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+ fprintf(stt->fpt,
+ "AGC->set_config, frame %d: Error from calcGainTable\n\n",
+ stt->fcount);
#endif
return -1;
}
@@ -1498,7 +1529,7 @@ int WebRtcAgc_Create(void **agcInst)
return -1;
}
-#ifdef AGC_DEBUG
+#ifdef WEBRTC_AGC_DEBUG_DUMP
stt->fpt = fopen("./agc_test_log.txt", "wt");
stt->agcLog = fopen("./agc_debug_log.txt", "wt");
stt->digitalAgc.logFile = fopen("./agc_log.txt", "wt");
@@ -1515,7 +1546,7 @@ int WebRtcAgc_Free(void *state)
Agc_t *stt;
stt = (Agc_t *)state;
-#ifdef AGC_DEBUG
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fclose(stt->fpt);
fclose(stt->agcLog);
fclose(stt->digitalAgc.logFile);
@@ -1553,13 +1584,13 @@ int WebRtcAgc_Init(void *agcInst, int32_t minLevel, int32_t maxLevel,
* 2 - Digital Automatic Gain Control [-targetLevelDbfs (default -3 dBOv)]
* 3 - Fixed Digital Gain [compressionGaindB (default 8 dB)]
*/
-#ifdef AGC_DEBUG//test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
stt->fcount = 0;
fprintf(stt->fpt, "AGC->Init\n");
#endif
if (agcMode < kAgcModeUnchanged || agcMode > kAgcModeFixedDigital)
{
-#ifdef AGC_DEBUG//test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt, "AGC->Init: error, incorrect mode\n\n");
#endif
return -1;
@@ -1616,10 +1647,12 @@ int WebRtcAgc_Init(void *agcInst, int32_t minLevel, int32_t maxLevel,
stt->numBlocksMicLvlSat = 0;
stt->micLvlSat = 0;
#endif
-#ifdef AGC_DEBUG//test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt,
"AGC->Init: minLevel = %d, maxAnalog = %d, maxLevel = %d\n",
- stt->minLevel, stt->maxAnalog, stt->maxLevel);
+ stt->minLevel,
+ stt->maxAnalog,
+ stt->maxLevel);
#endif
/* Minimum output volume is 4% higher than the available lowest volume level */
@@ -1687,13 +1720,13 @@ int WebRtcAgc_Init(void *agcInst, int32_t minLevel, int32_t maxLevel,
/* Only positive values are allowed that are not too large */
if ((minLevel >= maxLevel) || (maxLevel & 0xFC000000))
{
-#ifdef AGC_DEBUG//test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt, "minLevel, maxLevel value(s) are invalid\n\n");
#endif
return -1;
} else
{
-#ifdef AGC_DEBUG//test log
+#ifdef WEBRTC_AGC_DEBUG_DUMP
fprintf(stt->fpt, "\n");
#endif
return 0;
diff --git a/modules/audio_processing/agc/analog_agc.h b/modules/audio_processing/agc/analog_agc.h
index b036f449..19b324f1 100644
--- a/modules/audio_processing/agc/analog_agc.h
+++ b/modules/audio_processing/agc/analog_agc.h
@@ -15,9 +15,8 @@
#include "webrtc/modules/audio_processing/agc/include/gain_control.h"
#include "webrtc/typedefs.h"
-//#define AGC_DEBUG
//#define MIC_LEVEL_FEEDBACK
-#ifdef AGC_DEBUG
+#ifdef WEBRTC_AGC_DEBUG_DUMP
#include <stdio.h>
#endif
@@ -122,10 +121,10 @@ typedef struct
AgcVad_t vadMic;
DigitalAgc_t digitalAgc;
-#ifdef AGC_DEBUG
- FILE* fpt;
- FILE* agcLog;
- int32_t fcount;
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+ FILE* fpt;
+ FILE* agcLog;
+ int32_t fcount;
#endif
int16_t lowLevelSignal;
diff --git a/modules/audio_processing/agc/digital_agc.c b/modules/audio_processing/agc/digital_agc.c
index e74bb4c0..da087ca3 100644
--- a/modules/audio_processing/agc/digital_agc.c
+++ b/modules/audio_processing/agc/digital_agc.c
@@ -16,7 +16,7 @@
#include <assert.h>
#include <string.h>
-#ifdef AGC_DEBUG
+#ifdef WEBRTC_AGC_DEBUG_DUMP
#include <stdio.h>
#endif
@@ -274,7 +274,7 @@ int32_t WebRtcAgc_InitDigital(DigitalAgc_t *stt, int16_t agcMode)
stt->gain = 65536;
stt->gatePrevious = 0;
stt->agcMode = agcMode;
-#ifdef AGC_DEBUG
+#ifdef WEBRTC_AGC_DEBUG_DUMP
stt->frameCounter = 0;
#endif
@@ -397,9 +397,14 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near,
decay = 0;
}
}
-#ifdef AGC_DEBUG
+#ifdef WEBRTC_AGC_DEBUG_DUMP
stt->frameCounter++;
- fprintf(stt->logFile, "%5.2f\t%d\t%d\t%d\t", (float)(stt->frameCounter) / 100, logratio, decay, stt->vadNearend.stdLongTerm);
+ fprintf(stt->logFile,
+ "%5.2f\t%d\t%d\t%d\t",
+ (float)(stt->frameCounter) / 100,
+ logratio,
+ decay,
+ stt->vadNearend.stdLongTerm);
#endif
// Find max amplitude per sub frame
// iterate over sub frames
@@ -461,10 +466,15 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near,
frac = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 19); // Q12
tmp32 = WEBRTC_SPL_MUL((stt->gainTable[zeros-1] - stt->gainTable[zeros]), frac);
gains[k + 1] = stt->gainTable[zeros] + WEBRTC_SPL_RSHIFT_W32(tmp32, 12);
-#ifdef AGC_DEBUG
- if (k == 0)
- {
- fprintf(stt->logFile, "%d\t%d\t%d\t%d\t%d\n", env[0], cur_level, stt->capacitorFast, stt->capacitorSlow, zeros);
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+ if (k == 0) {
+ fprintf(stt->logFile,
+ "%d\t%d\t%d\t%d\t%d\n",
+ env[0],
+ cur_level,
+ stt->capacitorFast,
+ stt->capacitorSlow,
+ zeros);
}
#endif
}
diff --git a/modules/audio_processing/agc/digital_agc.h b/modules/audio_processing/agc/digital_agc.h
index 6bd086fc..3dcd9375 100644
--- a/modules/audio_processing/agc/digital_agc.h
+++ b/modules/audio_processing/agc/digital_agc.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_DIGITAL_AGC_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_DIGITAL_AGC_H_
-#ifdef AGC_DEBUG
+#ifdef WEBRTC_AGC_DEBUG_DUMP
#include <stdio.h>
#endif
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
@@ -46,9 +46,9 @@ typedef struct
int16_t agcMode;
AgcVad_t vadNearend;
AgcVad_t vadFarend;
-#ifdef AGC_DEBUG
- FILE* logFile;
- int frameCounter;
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+ FILE* logFile;
+ int frameCounter;
#endif
} DigitalAgc_t;
diff --git a/modules/audio_processing/audio_processing.gypi b/modules/audio_processing/audio_processing.gypi
index 9298f118..9bbcfae7 100644
--- a/modules/audio_processing/audio_processing.gypi
+++ b/modules/audio_processing/audio_processing.gypi
@@ -21,6 +21,7 @@
'variables': {
# Outputs some low-level debug files.
'aec_debug_dump%': 0,
+ 'agc_debug_dump%': 0,
# Disables the usual mode where we trust the reported system delay
# values the AEC receives. The corresponding define is set appropriately
@@ -93,6 +94,9 @@
['aec_untrusted_delay_for_testing==1', {
'defines': ['WEBRTC_UNTRUSTED_DELAY',],
}],
+ ['agc_debug_dump==1', {
+ 'defines': ['WEBRTC_AGC_DEBUG_DUMP',],
+ }],
['enable_protobuf==1', {
'dependencies': ['audioproc_debug_proto'],
'defines': ['WEBRTC_AUDIOPROC_DEBUG_DUMP'],
diff --git a/modules/audio_processing/ns/ns_core.c b/modules/audio_processing/ns/ns_core.c
index 0d60aafc..285e4048 100644
--- a/modules/audio_processing/ns/ns_core.c
+++ b/modules/audio_processing/ns/ns_core.c
@@ -10,8 +10,8 @@
#include <math.h>
#include <string.h>
-//#include <stdio.h>
#include <stdlib.h>
+
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_processing/ns/include/noise_suppression.h"
#include "webrtc/modules/audio_processing/ns/ns_core.h"
@@ -20,37 +20,40 @@
// Set Feature Extraction Parameters
void WebRtcNs_set_feature_extraction_parameters(NSinst_t* inst) {
- //bin size of histogram
- inst->featureExtractionParams.binSizeLrt = (float)0.1;
+ // bin size of histogram
+ inst->featureExtractionParams.binSizeLrt = (float)0.1;
inst->featureExtractionParams.binSizeSpecFlat = (float)0.05;
inst->featureExtractionParams.binSizeSpecDiff = (float)0.1;
- //range of histogram over which lrt threshold is computed
+ // range of histogram over which lrt threshold is computed
inst->featureExtractionParams.rangeAvgHistLrt = (float)1.0;
- //scale parameters: multiply dominant peaks of the histograms by scale factor to obtain
- // thresholds for prior model
- inst->featureExtractionParams.factor1ModelPars = (float)1.20; //for lrt and spectral diff
- inst->featureExtractionParams.factor2ModelPars = (float)0.9; //for spectral_flatness:
+ // scale parameters: multiply dominant peaks of the histograms by scale factor
+ // to obtain thresholds for prior model
+ inst->featureExtractionParams.factor1ModelPars =
+ (float)1.20; // for lrt and spectral diff
+ inst->featureExtractionParams.factor2ModelPars =
+ (float)0.9; // for spectral_flatness:
// used when noise is flatter than speech
- //peak limit for spectral flatness (varies between 0 and 1)
+ // peak limit for spectral flatness (varies between 0 and 1)
inst->featureExtractionParams.thresPosSpecFlat = (float)0.6;
- //limit on spacing of two highest peaks in histogram: spacing determined by bin size
+ // limit on spacing of two highest peaks in histogram: spacing determined by
+ // bin size
inst->featureExtractionParams.limitPeakSpacingSpecFlat =
2 * inst->featureExtractionParams.binSizeSpecFlat;
inst->featureExtractionParams.limitPeakSpacingSpecDiff =
2 * inst->featureExtractionParams.binSizeSpecDiff;
- //limit on relevance of second peak:
+ // limit on relevance of second peak:
inst->featureExtractionParams.limitPeakWeightsSpecFlat = (float)0.5;
inst->featureExtractionParams.limitPeakWeightsSpecDiff = (float)0.5;
// fluctuation limit of lrt feature
inst->featureExtractionParams.thresFluctLrt = (float)0.05;
- //limit on the max and min values for the feature thresholds
+ // limit on the max and min values for the feature thresholds
inst->featureExtractionParams.maxLrt = (float)1.0;
inst->featureExtractionParams.minLrt = (float)0.20;
@@ -60,19 +63,19 @@ void WebRtcNs_set_feature_extraction_parameters(NSinst_t* inst) {
inst->featureExtractionParams.maxSpecDiff = (float)1.0;
inst->featureExtractionParams.minSpecDiff = (float)0.16;
- //criteria of weight of histogram peak to accept/reject feature
- inst->featureExtractionParams.thresWeightSpecFlat = (int)(0.3
- * (inst->modelUpdatePars[1])); //for spectral flatness
- inst->featureExtractionParams.thresWeightSpecDiff = (int)(0.3
- * (inst->modelUpdatePars[1])); //for spectral difference
+ // criteria of weight of histogram peak to accept/reject feature
+ inst->featureExtractionParams.thresWeightSpecFlat =
+ (int)(0.3 * (inst->modelUpdatePars[1])); // for spectral flatness
+ inst->featureExtractionParams.thresWeightSpecDiff =
+ (int)(0.3 * (inst->modelUpdatePars[1])); // for spectral difference
}
// Initialize state
int WebRtcNs_InitCore(NSinst_t* inst, uint32_t fs) {
int i;
- //We only support 10ms frames
+ // We only support 10ms frames
- //check for valid pointer
+ // check for valid pointer
if (inst == NULL) {
return -1;
}
@@ -106,20 +109,21 @@ int WebRtcNs_InitCore(NSinst_t* inst, uint32_t fs) {
inst->window = kBlocks160w256;
inst->outLen = 0;
}
- inst->magnLen = inst->anaLen / 2 + 1; // Number of frequency bins
+ inst->magnLen = inst->anaLen / 2 + 1; // Number of frequency bins
// Initialize fft work arrays.
- inst->ip[0] = 0; // Setting this triggers initialization.
+ inst->ip[0] = 0; // Setting this triggers initialization.
memset(inst->dataBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
WebRtc_rdft(inst->anaLen, 1, inst->dataBuf, inst->ip, inst->wfft);
+ memset(inst->analyzeBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
memset(inst->dataBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
memset(inst->syntBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
- //for HB processing
+ // for HB processing
memset(inst->dataBufHB, 0, sizeof(float) * ANAL_BLOCKL_MAX);
- //for quantile noise estimation
+ // for quantile noise estimation
memset(inst->quantile, 0, sizeof(float) * HALF_ANAL_BLOCKL);
for (i = 0; i < SIMULT * HALF_ANAL_BLOCKL; i++) {
inst->lquantile[i] = (float)8.0;
@@ -127,7 +131,8 @@ int WebRtcNs_InitCore(NSinst_t* inst, uint32_t fs) {
}
for (i = 0; i < SIMULT; i++) {
- inst->counter[i] = (int)floor((float)(END_STARTUP_LONG * (i + 1)) / (float)SIMULT);
+ inst->counter[i] =
+ (int)floor((float)(END_STARTUP_LONG * (i + 1)) / (float)SIMULT);
}
inst->updates = 0;
@@ -140,52 +145,63 @@ int WebRtcNs_InitCore(NSinst_t* inst, uint32_t fs) {
// Set the aggressiveness: default
inst->aggrMode = 0;
- //initialize variables for new method
- inst->priorSpeechProb = (float)0.5; //prior prob for speech/noise
+ // initialize variables for new method
+ inst->priorSpeechProb = (float)0.5; // prior prob for speech/noise
for (i = 0; i < HALF_ANAL_BLOCKL; i++) {
- inst->magnPrev[i] = (float)0.0; //previous mag spectrum
- inst->noisePrev[i] = (float)0.0; //previous noise-spectrum
- inst->logLrtTimeAvg[i] = LRT_FEATURE_THR; //smooth LR ratio (same as threshold)
- inst->magnAvgPause[i] = (float)0.0; //conservative noise spectrum estimate
- inst->speechProbHB[i] = (float)0.0; //for estimation of HB in second pass
- inst->initMagnEst[i] = (float)0.0; //initial average mag spectrum
+ inst->magnPrev[i] = (float)0.0; // previous mag spectrum
+ inst->noisePrev[i] = (float)0.0; // previous noise-spectrum
+ inst->logLrtTimeAvg[i] =
+ LRT_FEATURE_THR; // smooth LR ratio (same as threshold)
+ inst->magnAvgPause[i] = (float)0.0; // conservative noise spectrum estimate
+ inst->speechProb[i] = (float)0.0; // for estimation of HB in second pass
+ inst->initMagnEst[i] = (float)0.0; // initial average mag spectrum
}
- //feature quantities
- inst->featureData[0] = SF_FEATURE_THR; //spectral flatness (start on threshold)
- inst->featureData[1] = (float)0.0; //spectral entropy: not used in this version
- inst->featureData[2] = (float)0.0; //spectral variance: not used in this version
- inst->featureData[3] = LRT_FEATURE_THR; //average lrt factor (start on threshold)
- inst->featureData[4] = SF_FEATURE_THR; //spectral template diff (start on threshold)
- inst->featureData[5] = (float)0.0; //normalization for spectral-diff
- inst->featureData[6] = (float)0.0; //window time-average of input magnitude spectrum
-
- //histogram quantities: used to estimate/update thresholds for features
+ // feature quantities
+ inst->featureData[0] =
+ SF_FEATURE_THR; // spectral flatness (start on threshold)
+ inst->featureData[1] =
+ (float)0.0; // spectral entropy: not used in this version
+ inst->featureData[2] =
+ (float)0.0; // spectral variance: not used in this version
+ inst->featureData[3] =
+ LRT_FEATURE_THR; // average lrt factor (start on threshold)
+ inst->featureData[4] =
+ SF_FEATURE_THR; // spectral template diff (start on threshold)
+ inst->featureData[5] = (float)0.0; // normalization for spectral-diff
+ inst->featureData[6] =
+ (float)0.0; // window time-average of input magnitude spectrum
+
+ // histogram quantities: used to estimate/update thresholds for features
for (i = 0; i < HIST_PAR_EST; i++) {
inst->histLrt[i] = 0;
inst->histSpecFlat[i] = 0;
inst->histSpecDiff[i] = 0;
}
- inst->blockInd = -1; //frame counter
- inst->priorModelPars[0] = LRT_FEATURE_THR; //default threshold for lrt feature
- inst->priorModelPars[1] = (float)0.5; //threshold for spectral flatness:
+ inst->blockInd = -1; // frame counter
+ inst->priorModelPars[0] =
+ LRT_FEATURE_THR; // default threshold for lrt feature
+ inst->priorModelPars[1] = (float)0.5; // threshold for spectral flatness:
// determined on-line
- inst->priorModelPars[2] = (float)1.0; //sgn_map par for spectral measure:
+ inst->priorModelPars[2] = (float)1.0; // sgn_map par for spectral measure:
// 1 for flatness measure
- inst->priorModelPars[3] = (float)0.5; //threshold for template-difference feature:
+ inst->priorModelPars[3] =
+ (float)0.5; // threshold for template-difference feature:
// determined on-line
- inst->priorModelPars[4] = (float)1.0; //default weighting parameter for lrt feature
- inst->priorModelPars[5] = (float)0.0; //default weighting parameter for
+ inst->priorModelPars[4] =
+ (float)1.0; // default weighting parameter for lrt feature
+ inst->priorModelPars[5] = (float)0.0; // default weighting parameter for
// spectral flatness feature
- inst->priorModelPars[6] = (float)0.0; //default weighting parameter for
+ inst->priorModelPars[6] = (float)0.0; // default weighting parameter for
// spectral difference feature
- inst->modelUpdatePars[0] = 2; //update flag for parameters:
+ inst->modelUpdatePars[0] = 2; // update flag for parameters:
// 0 no update, 1=update once, 2=update every window
- inst->modelUpdatePars[1] = 500; //window for update
- inst->modelUpdatePars[2] = 0; //counter for update of conservative noise spectrum
- //counter if the feature thresholds are updated during the sequence
+ inst->modelUpdatePars[1] = 500; // window for update
+ inst->modelUpdatePars[2] =
+ 0; // counter for update of conservative noise spectrum
+ // counter if the feature thresholds are updated during the sequence
inst->modelUpdatePars[3] = inst->modelUpdatePars[1];
inst->signalEnergy = 0.0;
@@ -194,12 +210,11 @@ int WebRtcNs_InitCore(NSinst_t* inst, uint32_t fs) {
inst->pinkNoiseNumerator = 0.0;
inst->pinkNoiseExp = 0.0;
- WebRtcNs_set_feature_extraction_parameters(inst); // Set feature configuration
+ WebRtcNs_set_feature_extraction_parameters(inst);
- //default mode
+ // default mode
WebRtcNs_set_policy_core(inst, 0);
-
memset(inst->outBuf, 0, sizeof(float) * 3 * BLOCKL_MAX);
inst->initFlag = 1;
@@ -218,17 +233,17 @@ int WebRtcNs_set_policy_core(NSinst_t* inst, int mode) {
inst->denoiseBound = (float)0.5;
inst->gainmap = 0;
} else if (mode == 1) {
- //inst->overdrive = (float)1.25;
+ // inst->overdrive = (float)1.25;
inst->overdrive = (float)1.0;
inst->denoiseBound = (float)0.25;
inst->gainmap = 1;
} else if (mode == 2) {
- //inst->overdrive = (float)1.25;
+ // inst->overdrive = (float)1.25;
inst->overdrive = (float)1.1;
inst->denoiseBound = (float)0.125;
inst->gainmap = 1;
} else if (mode == 3) {
- //inst->overdrive = (float)1.30;
+ // inst->overdrive = (float)1.30;
inst->overdrive = (float)1.25;
inst->denoiseBound = (float)0.09;
inst->gainmap = 1;
@@ -264,17 +279,19 @@ void WebRtcNs_NoiseEstimation(NSinst_t* inst, float* magn, float* noise) {
// update log quantile estimate
if (lmagn[i] > inst->lquantile[offset + i]) {
- inst->lquantile[offset + i] += QUANTILE * delta
- / (float)(inst->counter[s] + 1);
+ inst->lquantile[offset + i] +=
+ QUANTILE * delta / (float)(inst->counter[s] + 1);
} else {
- inst->lquantile[offset + i] -= ((float)1.0 - QUANTILE) * delta
- / (float)(inst->counter[s] + 1);
+ inst->lquantile[offset + i] -=
+ ((float)1.0 - QUANTILE) * delta / (float)(inst->counter[s] + 1);
}
// update density estimate
if (fabs(lmagn[i] - inst->lquantile[offset + i]) < WIDTH) {
- inst->density[offset + i] = ((float)inst->counter[s] * inst->density[offset
- + i] + (float)1.0 / ((float)2.0 * WIDTH)) / (float)(inst->counter[s] + 1);
+ inst->density[offset + i] =
+ ((float)inst->counter[s] * inst->density[offset + i] +
+ (float)1.0 / ((float)2.0 * WIDTH)) /
+ (float)(inst->counter[s] + 1);
}
} // end loop over magnitude spectrum
@@ -304,57 +321,66 @@ void WebRtcNs_NoiseEstimation(NSinst_t* inst, float* magn, float* noise) {
}
// Extract thresholds for feature parameters
-// histograms are computed over some window_size (given by inst->modelUpdatePars[1])
+// histograms are computed over some window_size (given by
+// inst->modelUpdatePars[1])
// thresholds and weights are extracted every window
-// flag 0 means update histogram only, flag 1 means compute the thresholds/weights
+// flag 0 means update histogram only, flag 1 means compute the
+// thresholds/weights
// threshold and weights are returned in: inst->priorModelPars
void WebRtcNs_FeatureParameterExtraction(NSinst_t* inst, int flag) {
int i, useFeatureSpecFlat, useFeatureSpecDiff, numHistLrt;
int maxPeak1, maxPeak2;
- int weightPeak1SpecFlat, weightPeak2SpecFlat, weightPeak1SpecDiff, weightPeak2SpecDiff;
+ int weightPeak1SpecFlat, weightPeak2SpecFlat, weightPeak1SpecDiff,
+ weightPeak2SpecDiff;
float binMid, featureSum;
float posPeak1SpecFlat, posPeak2SpecFlat, posPeak1SpecDiff, posPeak2SpecDiff;
float fluctLrt, avgHistLrt, avgSquareHistLrt, avgHistLrtCompl;
- //3 features: lrt, flatness, difference
- //lrt_feature = inst->featureData[3];
- //flat_feature = inst->featureData[0];
- //diff_feature = inst->featureData[4];
+ // 3 features: lrt, flatness, difference
+ // lrt_feature = inst->featureData[3];
+ // flat_feature = inst->featureData[0];
+ // diff_feature = inst->featureData[4];
- //update histograms
+ // update histograms
if (flag == 0) {
// LRT
- if ((inst->featureData[3] < HIST_PAR_EST * inst->featureExtractionParams.binSizeLrt)
- && (inst->featureData[3] >= 0.0)) {
- i = (int)(inst->featureData[3] / inst->featureExtractionParams.binSizeLrt);
+ if ((inst->featureData[3] <
+ HIST_PAR_EST * inst->featureExtractionParams.binSizeLrt) &&
+ (inst->featureData[3] >= 0.0)) {
+ i = (int)(inst->featureData[3] /
+ inst->featureExtractionParams.binSizeLrt);
inst->histLrt[i]++;
}
// Spectral flatness
- if ((inst->featureData[0] < HIST_PAR_EST
- * inst->featureExtractionParams.binSizeSpecFlat)
- && (inst->featureData[0] >= 0.0)) {
- i = (int)(inst->featureData[0] / inst->featureExtractionParams.binSizeSpecFlat);
+ if ((inst->featureData[0] <
+ HIST_PAR_EST * inst->featureExtractionParams.binSizeSpecFlat) &&
+ (inst->featureData[0] >= 0.0)) {
+ i = (int)(inst->featureData[0] /
+ inst->featureExtractionParams.binSizeSpecFlat);
inst->histSpecFlat[i]++;
}
// Spectral difference
- if ((inst->featureData[4] < HIST_PAR_EST
- * inst->featureExtractionParams.binSizeSpecDiff)
- && (inst->featureData[4] >= 0.0)) {
- i = (int)(inst->featureData[4] / inst->featureExtractionParams.binSizeSpecDiff);
+ if ((inst->featureData[4] <
+ HIST_PAR_EST * inst->featureExtractionParams.binSizeSpecDiff) &&
+ (inst->featureData[4] >= 0.0)) {
+ i = (int)(inst->featureData[4] /
+ inst->featureExtractionParams.binSizeSpecDiff);
inst->histSpecDiff[i]++;
}
}
// extract parameters for speech/noise probability
if (flag == 1) {
- //lrt feature: compute the average over inst->featureExtractionParams.rangeAvgHistLrt
+ // lrt feature: compute the average over
+ // inst->featureExtractionParams.rangeAvgHistLrt
avgHistLrt = 0.0;
avgHistLrtCompl = 0.0;
avgSquareHistLrt = 0.0;
numHistLrt = 0;
for (i = 0; i < HIST_PAR_EST; i++) {
- binMid = ((float)i + (float)0.5) * inst->featureExtractionParams.binSizeLrt;
+ binMid =
+ ((float)i + (float)0.5) * inst->featureExtractionParams.binSizeLrt;
if (binMid <= inst->featureExtractionParams.rangeAvgHistLrt) {
avgHistLrt += inst->histLrt[i] * binMid;
numHistLrt += inst->histLrt[i];
@@ -370,11 +396,11 @@ void WebRtcNs_FeatureParameterExtraction(NSinst_t* inst, int flag) {
fluctLrt = avgSquareHistLrt - avgHistLrt * avgHistLrtCompl;
// get threshold for lrt feature:
if (fluctLrt < inst->featureExtractionParams.thresFluctLrt) {
- //very low fluct, so likely noise
+ // very low fluct, so likely noise
inst->priorModelPars[0] = inst->featureExtractionParams.maxLrt;
} else {
- inst->priorModelPars[0] = inst->featureExtractionParams.factor1ModelPars
- * avgHistLrt;
+ inst->priorModelPars[0] =
+ inst->featureExtractionParams.factor1ModelPars * avgHistLrt;
// check if value is within min/max range
if (inst->priorModelPars[0] < inst->featureExtractionParams.minLrt) {
inst->priorModelPars[0] = inst->featureExtractionParams.minLrt;
@@ -385,8 +411,8 @@ void WebRtcNs_FeatureParameterExtraction(NSinst_t* inst, int flag) {
}
// done with lrt feature
- //
- // for spectral flatness and spectral difference: compute the main peaks of histogram
+ // for spectral flatness and spectral difference: compute the main peaks of
+ // histogram
maxPeak1 = 0;
maxPeak2 = 0;
posPeak1SpecFlat = 0.0;
@@ -396,7 +422,8 @@ void WebRtcNs_FeatureParameterExtraction(NSinst_t* inst, int flag) {
// peaks for flatness
for (i = 0; i < HIST_PAR_EST; i++) {
- binMid = ((float)i + (float)0.5) * inst->featureExtractionParams.binSizeSpecFlat;
+ binMid = ((float)i + (float)0.5) *
+ inst->featureExtractionParams.binSizeSpecFlat;
if (inst->histSpecFlat[i] > maxPeak1) {
// Found new "first" peak
maxPeak2 = maxPeak1;
@@ -414,7 +441,7 @@ void WebRtcNs_FeatureParameterExtraction(NSinst_t* inst, int flag) {
}
}
- //compute two peaks for spectral difference
+ // compute two peaks for spectral difference
maxPeak1 = 0;
maxPeak2 = 0;
posPeak1SpecDiff = 0.0;
@@ -423,7 +450,8 @@ void WebRtcNs_FeatureParameterExtraction(NSinst_t* inst, int flag) {
weightPeak2SpecDiff = 0;
// peaks for spectral difference
for (i = 0; i < HIST_PAR_EST; i++) {
- binMid = ((float)i + (float)0.5) * inst->featureExtractionParams.binSizeSpecDiff;
+ binMid = ((float)i + (float)0.5) *
+ inst->featureExtractionParams.binSizeSpecDiff;
if (inst->histSpecDiff[i] > maxPeak1) {
// Found new "first" peak
maxPeak2 = maxPeak1;
@@ -444,25 +472,26 @@ void WebRtcNs_FeatureParameterExtraction(NSinst_t* inst, int flag) {
// for spectrum flatness feature
useFeatureSpecFlat = 1;
// merge the two peaks if they are close
- if ((fabs(posPeak2SpecFlat - posPeak1SpecFlat)
- < inst->featureExtractionParams.limitPeakSpacingSpecFlat)
- && (weightPeak2SpecFlat
- > inst->featureExtractionParams.limitPeakWeightsSpecFlat
- * weightPeak1SpecFlat)) {
+ if ((fabs(posPeak2SpecFlat - posPeak1SpecFlat) <
+ inst->featureExtractionParams.limitPeakSpacingSpecFlat) &&
+ (weightPeak2SpecFlat >
+ inst->featureExtractionParams.limitPeakWeightsSpecFlat *
+ weightPeak1SpecFlat)) {
weightPeak1SpecFlat += weightPeak2SpecFlat;
posPeak1SpecFlat = (float)0.5 * (posPeak1SpecFlat + posPeak2SpecFlat);
}
- //reject if weight of peaks is not large enough, or peak value too small
- if (weightPeak1SpecFlat < inst->featureExtractionParams.thresWeightSpecFlat
- || posPeak1SpecFlat < inst->featureExtractionParams.thresPosSpecFlat) {
+ // reject if weight of peaks is not large enough, or peak value too small
+ if (weightPeak1SpecFlat <
+ inst->featureExtractionParams.thresWeightSpecFlat ||
+ posPeak1SpecFlat < inst->featureExtractionParams.thresPosSpecFlat) {
useFeatureSpecFlat = 0;
}
// if selected, get the threshold
if (useFeatureSpecFlat == 1) {
// compute the threshold
- inst->priorModelPars[1] = inst->featureExtractionParams.factor2ModelPars
- * posPeak1SpecFlat;
- //check if value is within min/max range
+ inst->priorModelPars[1] =
+ inst->featureExtractionParams.factor2ModelPars * posPeak1SpecFlat;
+ // check if value is within min/max range
if (inst->priorModelPars[1] < inst->featureExtractionParams.minSpecFlat) {
inst->priorModelPars[1] = inst->featureExtractionParams.minSpecFlat;
}
@@ -475,22 +504,23 @@ void WebRtcNs_FeatureParameterExtraction(NSinst_t* inst, int flag) {
// for template feature
useFeatureSpecDiff = 1;
// merge the two peaks if they are close
- if ((fabs(posPeak2SpecDiff - posPeak1SpecDiff)
- < inst->featureExtractionParams.limitPeakSpacingSpecDiff)
- && (weightPeak2SpecDiff
- > inst->featureExtractionParams.limitPeakWeightsSpecDiff
- * weightPeak1SpecDiff)) {
+ if ((fabs(posPeak2SpecDiff - posPeak1SpecDiff) <
+ inst->featureExtractionParams.limitPeakSpacingSpecDiff) &&
+ (weightPeak2SpecDiff >
+ inst->featureExtractionParams.limitPeakWeightsSpecDiff *
+ weightPeak1SpecDiff)) {
weightPeak1SpecDiff += weightPeak2SpecDiff;
posPeak1SpecDiff = (float)0.5 * (posPeak1SpecDiff + posPeak2SpecDiff);
}
// get the threshold value
- inst->priorModelPars[3] = inst->featureExtractionParams.factor1ModelPars
- * posPeak1SpecDiff;
- //reject if weight of peaks is not large enough
- if (weightPeak1SpecDiff < inst->featureExtractionParams.thresWeightSpecDiff) {
+ inst->priorModelPars[3] =
+ inst->featureExtractionParams.factor1ModelPars * posPeak1SpecDiff;
+ // reject if weight of peaks is not large enough
+ if (weightPeak1SpecDiff <
+ inst->featureExtractionParams.thresWeightSpecDiff) {
useFeatureSpecDiff = 0;
}
- //check if value is within min/max range
+ // check if value is within min/max range
if (inst->priorModelPars[3] < inst->featureExtractionParams.minSpecDiff) {
inst->priorModelPars[3] = inst->featureExtractionParams.minSpecDiff;
}
@@ -530,7 +560,7 @@ void WebRtcNs_FeatureParameterExtraction(NSinst_t* inst, int flag) {
// spectral flatness is returned in inst->featureData[0]
void WebRtcNs_ComputeSpectralFlatness(NSinst_t* inst, float* magnIn) {
int i;
- int shiftLP = 1; //option to remove first bin(s) from spectral measures
+ int shiftLP = 1; // option to remove first bin(s) from spectral measures
float avgSpectralFlatnessNum, avgSpectralFlatnessDen, spectralTmp;
// comute spectral measures
@@ -540,7 +570,8 @@ void WebRtcNs_ComputeSpectralFlatness(NSinst_t* inst, float* magnIn) {
for (i = 0; i < shiftLP; i++) {
avgSpectralFlatnessDen -= magnIn[i];
}
- // compute log of ratio of the geometric to arithmetic mean: check for log(0) case
+ // compute log of ratio of the geometric to arithmetic mean: check for log(0)
+ // case
for (i = shiftLP; i < inst->magnLen; i++) {
if (magnIn[i] > 0.0) {
avgSpectralFlatnessNum += (float)log(magnIn[i]);
@@ -549,24 +580,26 @@ void WebRtcNs_ComputeSpectralFlatness(NSinst_t* inst, float* magnIn) {
return;
}
}
- //normalize
+ // normalize
avgSpectralFlatnessDen = avgSpectralFlatnessDen / inst->magnLen;
avgSpectralFlatnessNum = avgSpectralFlatnessNum / inst->magnLen;
- //ratio and inverse log: check for case of log(0)
+ // ratio and inverse log: check for case of log(0)
spectralTmp = (float)exp(avgSpectralFlatnessNum) / avgSpectralFlatnessDen;
- //time-avg update of spectral flatness feature
+ // time-avg update of spectral flatness feature
inst->featureData[0] += SPECT_FL_TAVG * (spectralTmp - inst->featureData[0]);
// done with flatness feature
}
-// Compute the difference measure between input spectrum and a template/learned noise spectrum
+// Compute the difference measure between input spectrum and a template/learned
+// noise spectrum
// magnIn is the input spectrum
// the reference/template spectrum is inst->magnAvgPause[i]
// returns (normalized) spectral difference in inst->featureData[4]
void WebRtcNs_ComputeSpectralDifference(NSinst_t* inst, float* magnIn) {
- // avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 / var(magnAvgPause)
+ // avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 /
+ // var(magnAvgPause)
int i;
float avgPause, avgMagn, covMagnPause, varPause, varMagn, avgDiffNormMagn;
@@ -574,7 +607,7 @@ void WebRtcNs_ComputeSpectralDifference(NSinst_t* inst, float* magnIn) {
avgMagn = inst->sumMagn;
// compute average quantities
for (i = 0; i < inst->magnLen; i++) {
- //conservative smooth noise spectrum from pause frames
+ // conservative smooth noise spectrum from pause frames
avgPause += inst->magnAvgPause[i];
}
avgPause = avgPause / ((float)inst->magnLen);
@@ -586,7 +619,8 @@ void WebRtcNs_ComputeSpectralDifference(NSinst_t* inst, float* magnIn) {
// compute variance and covariance quantities
for (i = 0; i < inst->magnLen; i++) {
covMagnPause += (magnIn[i] - avgMagn) * (inst->magnAvgPause[i] - avgPause);
- varPause += (inst->magnAvgPause[i] - avgPause) * (inst->magnAvgPause[i] - avgPause);
+ varPause +=
+ (inst->magnAvgPause[i] - avgPause) * (inst->magnAvgPause[i] - avgPause);
varMagn += (magnIn[i] - avgMagn) * (magnIn[i] - avgMagn);
}
covMagnPause = covMagnPause / ((float)inst->magnLen);
@@ -595,19 +629,24 @@ void WebRtcNs_ComputeSpectralDifference(NSinst_t* inst, float* magnIn) {
// update of average magnitude spectrum
inst->featureData[6] += inst->signalEnergy;
- avgDiffNormMagn = varMagn - (covMagnPause * covMagnPause) / (varPause + (float)0.0001);
+ avgDiffNormMagn =
+ varMagn - (covMagnPause * covMagnPause) / (varPause + (float)0.0001);
// normalize and compute time-avg update of difference feature
- avgDiffNormMagn = (float)(avgDiffNormMagn / (inst->featureData[5] + (float)0.0001));
- inst->featureData[4] += SPECT_DIFF_TAVG * (avgDiffNormMagn - inst->featureData[4]);
+ avgDiffNormMagn =
+ (float)(avgDiffNormMagn / (inst->featureData[5] + (float)0.0001));
+ inst->featureData[4] +=
+ SPECT_DIFF_TAVG * (avgDiffNormMagn - inst->featureData[4]);
}
// Compute speech/noise probability
// speech/noise probability is returned in: probSpeechFinal
-//magn is the input magnitude spectrum
-//noise is the noise spectrum
-//snrLocPrior is the prior snr for each freq.
-//snr loc_post is the post snr for each freq.
-void WebRtcNs_SpeechNoiseProb(NSinst_t* inst, float* probSpeechFinal, float* snrLocPrior,
+// magn is the input magnitude spectrum
+// noise is the noise spectrum
+// snrLocPrior is the prior snr for each freq.
+// snr loc_post is the post snr for each freq.
+void WebRtcNs_SpeechNoiseProb(NSinst_t* inst,
+ float* probSpeechFinal,
+ float* snrLocPrior,
float* snrLocPost) {
int i, sgnMap;
float invLrt, gainPrior, indPrior;
@@ -619,19 +658,19 @@ void WebRtcNs_SpeechNoiseProb(NSinst_t* inst, float* probSpeechFinal, float* snr
float widthPrior, widthPrior0, widthPrior1, widthPrior2;
widthPrior0 = WIDTH_PR_MAP;
- widthPrior1 = (float)2.0 * WIDTH_PR_MAP; //width for pause region:
+ widthPrior1 = (float)2.0 * WIDTH_PR_MAP; // width for pause region:
// lower range, so increase width in tanh map
- widthPrior2 = (float)2.0 * WIDTH_PR_MAP; //for spectral-difference measure
+ widthPrior2 = (float)2.0 * WIDTH_PR_MAP; // for spectral-difference measure
- //threshold parameters for features
+ // threshold parameters for features
threshPrior0 = inst->priorModelPars[0];
threshPrior1 = inst->priorModelPars[1];
threshPrior2 = inst->priorModelPars[3];
- //sign for flatness feature
+ // sign for flatness feature
sgnMap = (int)(inst->priorModelPars[2]);
- //weight parameters for features
+ // weight parameters for features
weightIndPrior0 = inst->priorModelPars[4];
weightIndPrior1 = inst->priorModelPars[5];
weightIndPrior2 = inst->priorModelPars[6];
@@ -643,8 +682,8 @@ void WebRtcNs_SpeechNoiseProb(NSinst_t* inst, float* probSpeechFinal, float* snr
tmpFloat1 = (float)1.0 + (float)2.0 * snrLocPrior[i];
tmpFloat2 = (float)2.0 * snrLocPrior[i] / (tmpFloat1 + (float)0.0001);
besselTmp = (snrLocPost[i] + (float)1.0) * tmpFloat2;
- inst->logLrtTimeAvg[i] += LRT_TAVG * (besselTmp - (float)log(tmpFloat1)
- - inst->logLrtTimeAvg[i]);
+ inst->logLrtTimeAvg[i] +=
+ LRT_TAVG * (besselTmp - (float)log(tmpFloat1) - inst->logLrtTimeAvg[i]);
logLrtTimeAvgKsum += inst->logLrtTimeAvg[i];
}
logLrtTimeAvgKsum = (float)logLrtTimeAvgKsum / (inst->magnLen);
@@ -652,23 +691,24 @@ void WebRtcNs_SpeechNoiseProb(NSinst_t* inst, float* probSpeechFinal, float* snr
// done with computation of LR factor
//
- //compute the indicator functions
+ // compute the indicator functions
//
// average lrt feature
widthPrior = widthPrior0;
- //use larger width in tanh map for pause regions
+ // use larger width in tanh map for pause regions
if (logLrtTimeAvgKsum < threshPrior0) {
widthPrior = widthPrior1;
}
// compute indicator function: sigmoid map
- indicator0 = (float)0.5 * ((float)tanh(widthPrior *
- (logLrtTimeAvgKsum - threshPrior0)) + (float)1.0);
+ indicator0 = (float)0.5 *
+ ((float)tanh(widthPrior * (logLrtTimeAvgKsum - threshPrior0)) +
+ (float)1.0);
- //spectral flatness feature
+ // spectral flatness feature
tmpFloat1 = inst->featureData[0];
widthPrior = widthPrior0;
- //use larger width in tanh map for pause regions
+ // use larger width in tanh map for pause regions
if (sgnMap == 1 && (tmpFloat1 > threshPrior1)) {
widthPrior = widthPrior1;
}
@@ -676,26 +716,29 @@ void WebRtcNs_SpeechNoiseProb(NSinst_t* inst, float* probSpeechFinal, float* snr
widthPrior = widthPrior1;
}
// compute indicator function: sigmoid map
- indicator1 = (float)0.5 * ((float)tanh((float)sgnMap *
- widthPrior * (threshPrior1 - tmpFloat1)) + (float)1.0);
+ indicator1 =
+ (float)0.5 *
+ ((float)tanh((float)sgnMap * widthPrior * (threshPrior1 - tmpFloat1)) +
+ (float)1.0);
- //for template spectrum-difference
+ // for template spectrum-difference
tmpFloat1 = inst->featureData[4];
widthPrior = widthPrior0;
- //use larger width in tanh map for pause regions
+ // use larger width in tanh map for pause regions
if (tmpFloat1 < threshPrior2) {
widthPrior = widthPrior2;
}
// compute indicator function: sigmoid map
- indicator2 = (float)0.5 * ((float)tanh(widthPrior * (tmpFloat1 - threshPrior2))
- + (float)1.0);
+ indicator2 =
+ (float)0.5 *
+ ((float)tanh(widthPrior * (tmpFloat1 - threshPrior2)) + (float)1.0);
- //combine the indicator function with the feature weights
- indPrior = weightIndPrior0 * indicator0 + weightIndPrior1 * indicator1 + weightIndPrior2
- * indicator2;
+ // combine the indicator function with the feature weights
+ indPrior = weightIndPrior0 * indicator0 + weightIndPrior1 * indicator1 +
+ weightIndPrior2 * indicator2;
// done with computing indicator function
- //compute the prior probability
+ // compute the prior probability
inst->priorSpeechProb += PRIOR_UPDATE * (indPrior - inst->priorSpeechProb);
// make sure probabilities are within range: keep floor to 0.01
if (inst->priorSpeechProb > 1.0) {
@@ -705,8 +748,9 @@ void WebRtcNs_SpeechNoiseProb(NSinst_t* inst, float* probSpeechFinal, float* snr
inst->priorSpeechProb = (float)0.01;
}
- //final speech probability: combine prior model with LR factor:
- gainPrior = ((float)1.0 - inst->priorSpeechProb) / (inst->priorSpeechProb + (float)0.0001);
+ // final speech probability: combine prior model with LR factor:
+ gainPrior = ((float)1.0 - inst->priorSpeechProb) /
+ (inst->priorSpeechProb + (float)0.0001);
for (i = 0; i < inst->magnLen; i++) {
invLrt = (float)exp(-inst->logLrtTimeAvg[i]);
invLrt = (float)gainPrior * invLrt;
@@ -714,135 +758,65 @@ void WebRtcNs_SpeechNoiseProb(NSinst_t* inst, float* probSpeechFinal, float* snr
}
}
-int WebRtcNs_AnalyzeCore(NSinst_t* inst, float* inFrame) {
- return 0;
-}
-
-int WebRtcNs_ProcessCore(NSinst_t* inst,
- float* speechFrame,
- float* speechFrameHB,
- float* outFrame,
- float* outFrameHB) {
- // main routine for noise reduction
-
- int flagHB = 0;
- int i;
- const int kStartBand = 5; // Skip first frequency bins during estimation.
- int updateParsFlag;
-
- float energy1, energy2, gain, factor, factor1, factor2;
- float signalEnergy, sumMagn;
- float snrPrior, currentEstimateStsa;
- float tmpFloat1, tmpFloat2, tmpFloat3, probSpeech, probNonSpeech;
- float gammaNoiseTmp, gammaNoiseOld;
- float noiseUpdateTmp, fTmp;
- float fout[BLOCKL_MAX];
- float winData[ANAL_BLOCKL_MAX];
- float magn[HALF_ANAL_BLOCKL], noise[HALF_ANAL_BLOCKL];
- float theFilter[HALF_ANAL_BLOCKL], theFilterTmp[HALF_ANAL_BLOCKL];
- float snrLocPost[HALF_ANAL_BLOCKL], snrLocPrior[HALF_ANAL_BLOCKL];
- float probSpeechFinal[HALF_ANAL_BLOCKL] = { 0 };
- float previousEstimateStsa[HALF_ANAL_BLOCKL];
- float real[ANAL_BLOCKL_MAX], imag[HALF_ANAL_BLOCKL];
+int WebRtcNs_AnalyzeCore(NSinst_t* inst, float* speechFrame) {
+ int i;
+ const int kStartBand = 5; // Skip first frequency bins during estimation.
+ int updateParsFlag;
+ float energy;
+ float signalEnergy, sumMagn;
+ float tmpFloat1, tmpFloat2, tmpFloat3, probSpeech, probNonSpeech;
+ float gammaNoiseTmp, gammaNoiseOld;
+ float noiseUpdateTmp, fTmp;
+ float winData[ANAL_BLOCKL_MAX];
+ float magn[HALF_ANAL_BLOCKL], noise[HALF_ANAL_BLOCKL];
+ float snrLocPost[HALF_ANAL_BLOCKL], snrLocPrior[HALF_ANAL_BLOCKL];
+ float real[ANAL_BLOCKL_MAX], imag[HALF_ANAL_BLOCKL];
// Variables during startup
- float sum_log_i = 0.0;
- float sum_log_i_square = 0.0;
- float sum_log_magn = 0.0;
- float sum_log_i_log_magn = 0.0;
- float parametric_noise = 0.0;
- float parametric_exp = 0.0;
- float parametric_num = 0.0;
-
- // SWB variables
- int deltaBweHB = 1;
- int deltaGainHB = 1;
- float decayBweHB = 1.0;
- float gainMapParHB = 1.0;
- float gainTimeDomainHB = 1.0;
- float avgProbSpeechHB, avgProbSpeechHBTmp, avgFilterGainHB, gainModHB;
+ float sum_log_i = 0.0;
+ float sum_log_i_square = 0.0;
+ float sum_log_magn = 0.0;
+ float sum_log_i_log_magn = 0.0;
+ float parametric_exp = 0.0;
+ float parametric_num = 0.0;
// Check that initiation has been done
if (inst->initFlag != 1) {
return (-1);
}
- // Check for valid pointers based on sampling rate
- if (inst->fs == 32000) {
- if (speechFrameHB == NULL) {
- return -1;
- }
- flagHB = 1;
- // range for averaging low band quantities for H band gain
- deltaBweHB = (int)inst->magnLen / 4;
- deltaGainHB = deltaBweHB;
- }
//
updateParsFlag = inst->modelUpdatePars[0];
//
// update analysis buffer for L band
- memcpy(inst->dataBuf, inst->dataBuf + inst->blockLen10ms,
+ memcpy(inst->analyzeBuf,
+ inst->analyzeBuf + inst->blockLen10ms,
sizeof(float) * (inst->anaLen - inst->blockLen10ms));
- memcpy(inst->dataBuf + inst->anaLen - inst->blockLen10ms, speechFrame,
+ memcpy(inst->analyzeBuf + inst->anaLen - inst->blockLen10ms,
+ speechFrame,
sizeof(float) * inst->blockLen10ms);
- if (flagHB == 1) {
- // update analysis buffer for H band
- memcpy(inst->dataBufHB, inst->dataBufHB + inst->blockLen10ms,
- sizeof(float) * (inst->anaLen - inst->blockLen10ms));
- memcpy(inst->dataBufHB + inst->anaLen - inst->blockLen10ms, speechFrameHB,
- sizeof(float) * inst->blockLen10ms);
- }
-
// check if processing needed
if (inst->outLen == 0) {
// windowing
- energy1 = 0.0;
+ energy = 0.0;
for (i = 0; i < inst->anaLen; i++) {
- winData[i] = inst->window[i] * inst->dataBuf[i];
- energy1 += winData[i] * winData[i];
+ winData[i] = inst->window[i] * inst->analyzeBuf[i];
+ energy += winData[i] * winData[i];
}
- if (energy1 == 0.0) {
- // synthesize the special case of zero input
+ if (energy == 0.0) {
// we want to avoid updating statistics in this case:
- // Updating feature statistics when we have zeros only will cause thresholds to
- // move towards zero signal situations. This in turn has the effect that once the
- // signal is "turned on" (non-zero values) everything will be treated as speech
- // and there is no noise suppression effect. Depending on the duration of the
- // inactive signal it takes a considerable amount of time for the system to learn
- // what is noise and what is speech.
-
- // read out fully processed segment
- for (i = inst->windShift; i < inst->blockLen + inst->windShift; i++) {
- fout[i - inst->windShift] = inst->syntBuf[i];
- }
- // update synthesis buffer
- memcpy(inst->syntBuf, inst->syntBuf + inst->blockLen,
- sizeof(float) * (inst->anaLen - inst->blockLen));
- memset(inst->syntBuf + inst->anaLen - inst->blockLen, 0,
- sizeof(float) * inst->blockLen);
-
- // out buffer
- inst->outLen = inst->blockLen - inst->blockLen10ms;
- if (inst->blockLen > inst->blockLen10ms) {
- for (i = 0; i < inst->outLen; i++) {
- inst->outBuf[i] = fout[i + inst->blockLen10ms];
- }
- }
- for (i = 0; i < inst->blockLen10ms; ++i)
- outFrame[i] = WEBRTC_SPL_SAT(
- WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN);
-
- // for time-domain gain of HB
- if (flagHB == 1)
- for (i = 0; i < inst->blockLen10ms; ++i)
- outFrameHB[i] = WEBRTC_SPL_SAT(
- WEBRTC_SPL_WORD16_MAX, inst->dataBufHB[i], WEBRTC_SPL_WORD16_MIN);
-
+ // Updating feature statistics when we have zeros only will cause
+ // thresholds to move towards zero signal situations. This in turn has the
+ // effect that once the signal is "turned on" (non-zero values) everything
+ // will be treated as speech and there is no noise suppression effect.
+ // Depending on the duration of the inactive signal it takes a
+ // considerable amount of time for the system to learn what is noise and
+ // what is speech.
return 0;
}
//
- inst->blockInd++; // Update the block index only when we process a block.
+ inst->blockInd++; // Update the block index only when we process a block.
// FFT
WebRtc_rdft(inst->anaLen, 1, winData, inst->ip, inst->wfft);
@@ -856,8 +830,6 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
(float)(real[inst->magnLen - 1] * real[inst->magnLen - 1]);
sumMagn = magn[0] + magn[inst->magnLen - 1];
if (inst->blockInd < END_STARTUP_SHORT) {
- inst->initMagnEst[0] += magn[0];
- inst->initMagnEst[inst->magnLen - 1] += magn[inst->magnLen - 1];
tmpFloat2 = log((float)(inst->magnLen - 1));
sum_log_i = tmpFloat2;
sum_log_i_square = tmpFloat2 * tmpFloat2;
@@ -875,7 +847,6 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
magn[i] = ((float)sqrt(fTmp)) + 1.0f;
sumMagn += magn[i];
if (inst->blockInd < END_STARTUP_SHORT) {
- inst->initMagnEst[i] += magn[i];
if (i >= kStartBand) {
tmpFloat2 = log((float)i);
sum_log_i += tmpFloat2;
@@ -890,18 +861,20 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
inst->signalEnergy = signalEnergy;
inst->sumMagn = sumMagn;
- //compute spectral flatness on input spectrum
+ // compute spectral flatness on input spectrum
WebRtcNs_ComputeSpectralFlatness(inst, magn);
// quantile noise estimate
WebRtcNs_NoiseEstimation(inst, magn, noise);
- //compute simplified noise model during startup
+ // compute simplified noise model during startup
if (inst->blockInd < END_STARTUP_SHORT) {
// Estimate White noise
- inst->whiteNoiseLevel += sumMagn / ((float)inst->magnLen) * inst->overdrive;
+ inst->whiteNoiseLevel +=
+ sumMagn / ((float)inst->magnLen) * inst->overdrive;
// Estimate Pink noise parameters
tmpFloat1 = sum_log_i_square * ((float)(inst->magnLen - kStartBand));
tmpFloat1 -= (sum_log_i * sum_log_i);
- tmpFloat2 = (sum_log_i_square * sum_log_magn - sum_log_i * sum_log_i_log_magn);
+ tmpFloat2 =
+ (sum_log_i_square * sum_log_magn - sum_log_i * sum_log_i_log_magn);
tmpFloat3 = tmpFloat2 / tmpFloat1;
// Constrain the estimated spectrum to be positive
if (tmpFloat3 < 0.0f) {
@@ -921,32 +894,34 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
inst->pinkNoiseExp += tmpFloat3;
// Calculate frequency independent parts of parametric noise estimate.
- if (inst->pinkNoiseExp == 0.0f) {
- // Use white noise estimate
- parametric_noise = inst->whiteNoiseLevel;
- } else {
+ if (inst->pinkNoiseExp > 0.0f) {
// Use pink noise estimate
- parametric_num = exp(inst->pinkNoiseNumerator / (float)(inst->blockInd + 1));
+ parametric_num =
+ exp(inst->pinkNoiseNumerator / (float)(inst->blockInd + 1));
parametric_num *= (float)(inst->blockInd + 1);
parametric_exp = inst->pinkNoiseExp / (float)(inst->blockInd + 1);
- parametric_noise = parametric_num / pow((float)kStartBand, parametric_exp);
}
for (i = 0; i < inst->magnLen; i++) {
- // Estimate the background noise using the white and pink noise parameters
- if ((inst->pinkNoiseExp > 0.0f) && (i >= kStartBand)) {
+ // Estimate the background noise using the white and pink noise
+ // parameters
+ if (inst->pinkNoiseExp == 0.0f) {
+ // Use white noise estimate
+ inst->parametricNoise[i] = inst->whiteNoiseLevel;
+ } else {
// Use pink noise estimate
- parametric_noise = parametric_num / pow((float)i, parametric_exp);
+ float use_band = (float)(i < kStartBand ? kStartBand : i);
+ inst->parametricNoise[i] =
+ parametric_num / pow(use_band, parametric_exp);
}
- theFilterTmp[i] = (inst->initMagnEst[i] - inst->overdrive * parametric_noise);
- theFilterTmp[i] /= (inst->initMagnEst[i] + (float)0.0001);
// Weight quantile noise with modeled noise
noise[i] *= (inst->blockInd);
- tmpFloat2 = parametric_noise * (END_STARTUP_SHORT - inst->blockInd);
+ tmpFloat2 =
+ inst->parametricNoise[i] * (END_STARTUP_SHORT - inst->blockInd);
noise[i] += (tmpFloat2 / (float)(inst->blockInd + 1));
noise[i] /= END_STARTUP_SHORT;
}
}
- //compute average signal during END_STARTUP_LONG time:
+ // compute average signal during END_STARTUP_LONG time:
// used to normalize spectral difference measure
if (inst->blockInd < END_STARTUP_LONG) {
inst->featureData[5] *= inst->blockInd;
@@ -954,11 +929,8 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
inst->featureData[5] /= (inst->blockInd + 1);
}
- //start processing at frames == converged+1
- //
+ // start processing at frames == converged+1
// STEP 1: compute prior and post snr based on quantile noise est
- //
-
// compute DD estimate of prior SNR: needed for new method
for (i = 0; i < inst->magnLen; i++) {
// post snr
@@ -968,23 +940,25 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
}
// previous post snr
// previous estimate: based on previous frame with gain filter
- previousEstimateStsa[i] = inst->magnPrev[i] / (inst->noisePrev[i] + (float)0.0001)
- * (inst->smooth[i]);
+ inst->previousEstimateStsa[i] = inst->magnPrev[i] /
+ (inst->noisePrev[i] + (float)0.0001) *
+ (inst->smooth[i]);
// DD estimate is sum of two terms: current estimate and previous estimate
// directed decision update of snrPrior
- snrLocPrior[i] = DD_PR_SNR * previousEstimateStsa[i] + ((float)1.0 - DD_PR_SNR)
- * snrLocPost[i];
+ snrLocPrior[i] = DD_PR_SNR * inst->previousEstimateStsa[i] +
+ ((float)1.0 - DD_PR_SNR) * snrLocPost[i];
// post and prior snr needed for step 2
} // end of loop over freqs
- // done with step 1: dd computation of prior and post snr
+ // done with step 1: dd computation of prior and post snr
- //
- //STEP 2: compute speech/noise likelihood
- //
- // compute difference of input spectrum with learned/estimated noise spectrum
+ // STEP 2: compute speech/noise likelihood
+ // compute difference of input spectrum with learned/estimated noise
+ // spectrum
WebRtcNs_ComputeSpectralDifference(inst, magn);
- // compute histograms for parameter decisions (thresholds and weights for features)
- // parameters are extracted once every window time (=inst->modelUpdatePars[1])
+ // compute histograms for parameter decisions (thresholds and weights for
+ // features)
+ // parameters are extracted once every window time
+ // (=inst->modelUpdatePars[1])
if (updateParsFlag >= 1) {
// counter update
inst->modelUpdatePars[3]--;
@@ -1002,25 +976,27 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
} else {
// update every window:
// get normalization for spectral difference for next window estimate
- inst->featureData[6] = inst->featureData[6]
- / ((float)inst->modelUpdatePars[1]);
- inst->featureData[5] = (float)0.5 * (inst->featureData[6]
- + inst->featureData[5]);
+ inst->featureData[6] =
+ inst->featureData[6] / ((float)inst->modelUpdatePars[1]);
+ inst->featureData[5] =
+ (float)0.5 * (inst->featureData[6] + inst->featureData[5]);
inst->featureData[6] = (float)0.0;
}
}
}
// compute speech/noise probability
- WebRtcNs_SpeechNoiseProb(inst, probSpeechFinal, snrLocPrior, snrLocPost);
+ WebRtcNs_SpeechNoiseProb(inst, inst->speechProb, snrLocPrior, snrLocPost);
// time-avg parameter for noise update
gammaNoiseTmp = NOISE_UPDATE;
for (i = 0; i < inst->magnLen; i++) {
- probSpeech = probSpeechFinal[i];
+ probSpeech = inst->speechProb[i];
probNonSpeech = (float)1.0 - probSpeech;
// temporary noise update:
// use it for speech frames if update value is less than previous
- noiseUpdateTmp = gammaNoiseTmp * inst->noisePrev[i] + ((float)1.0 - gammaNoiseTmp)
- * (probNonSpeech * magn[i] + probSpeech * inst->noisePrev[i]);
+ noiseUpdateTmp =
+ gammaNoiseTmp * inst->noisePrev[i] +
+ ((float)1.0 - gammaNoiseTmp) *
+ (probNonSpeech * magn[i] + probSpeech * inst->noisePrev[i]);
//
// time-constant based on speech/noise state
gammaNoiseOld = gammaNoiseTmp;
@@ -1031,16 +1007,20 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
}
// conservative noise update
if (probSpeech < PROB_RANGE) {
- inst->magnAvgPause[i] += GAMMA_PAUSE * (magn[i] - inst->magnAvgPause[i]);
+ inst->magnAvgPause[i] +=
+ GAMMA_PAUSE * (magn[i] - inst->magnAvgPause[i]);
}
// noise update
if (gammaNoiseTmp == gammaNoiseOld) {
noise[i] = noiseUpdateTmp;
} else {
- noise[i] = gammaNoiseTmp * inst->noisePrev[i] + ((float)1.0 - gammaNoiseTmp)
- * (probNonSpeech * magn[i] + probSpeech * inst->noisePrev[i]);
+ noise[i] =
+ gammaNoiseTmp * inst->noisePrev[i] +
+ ((float)1.0 - gammaNoiseTmp) *
+ (probNonSpeech * magn[i] + probSpeech * inst->noisePrev[i]);
// allow for noise update downwards:
- // if noise update decreases the noise, it is safe, so allow it to happen
+ // if noise update decreases the noise, it is safe, so allow it to
+ // happen
if (noiseUpdateTmp < noise[i]) {
noise[i] = noiseUpdateTmp;
}
@@ -1048,25 +1028,160 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
} // end of freq loop
// done with step 2: noise update
- //
- // STEP 3: compute dd update of prior snr and post snr based on new noise estimate
- //
+ // keep track of noise spectrum for next frame
+ for (i = 0; i < inst->magnLen; i++) {
+ inst->noisePrev[i] = noise[i];
+ }
+ } // end of if inst->outLen == 0
+
+ return 0;
+}
+
+int WebRtcNs_ProcessCore(NSinst_t* inst,
+ float* speechFrame,
+ float* speechFrameHB,
+ float* outFrame,
+ float* outFrameHB) {
+ // main routine for noise reduction
+ int flagHB = 0;
+ int i;
+
+ float energy1, energy2, gain, factor, factor1, factor2;
+ float snrPrior, currentEstimateStsa;
+ float tmpFloat1, tmpFloat2;
+ float fTmp;
+ float fout[BLOCKL_MAX];
+ float winData[ANAL_BLOCKL_MAX];
+ float magn[HALF_ANAL_BLOCKL];
+ float theFilter[HALF_ANAL_BLOCKL], theFilterTmp[HALF_ANAL_BLOCKL];
+ float real[ANAL_BLOCKL_MAX], imag[HALF_ANAL_BLOCKL];
+
+ // SWB variables
+ int deltaBweHB = 1;
+ int deltaGainHB = 1;
+ float decayBweHB = 1.0;
+ float gainMapParHB = 1.0;
+ float gainTimeDomainHB = 1.0;
+ float avgProbSpeechHB, avgProbSpeechHBTmp, avgFilterGainHB, gainModHB;
+
+ // Check that initiation has been done
+ if (inst->initFlag != 1) {
+ return (-1);
+ }
+ // Check for valid pointers based on sampling rate
+ if (inst->fs == 32000) {
+ if (speechFrameHB == NULL) {
+ return -1;
+ }
+ flagHB = 1;
+ // range for averaging low band quantities for H band gain
+ deltaBweHB = (int)inst->magnLen / 4;
+ deltaGainHB = deltaBweHB;
+ }
+
+ // update analysis buffer for L band
+ memcpy(inst->dataBuf,
+ inst->dataBuf + inst->blockLen10ms,
+ sizeof(float) * (inst->anaLen - inst->blockLen10ms));
+ memcpy(inst->dataBuf + inst->anaLen - inst->blockLen10ms,
+ speechFrame,
+ sizeof(float) * inst->blockLen10ms);
+
+ if (flagHB == 1) {
+ // update analysis buffer for H band
+ memcpy(inst->dataBufHB,
+ inst->dataBufHB + inst->blockLen10ms,
+ sizeof(float) * (inst->anaLen - inst->blockLen10ms));
+ memcpy(inst->dataBufHB + inst->anaLen - inst->blockLen10ms,
+ speechFrameHB,
+ sizeof(float) * inst->blockLen10ms);
+ }
+
+ // check if processing needed
+ if (inst->outLen == 0) {
+ // windowing
+ energy1 = 0.0;
+ for (i = 0; i < inst->anaLen; i++) {
+ winData[i] = inst->window[i] * inst->dataBuf[i];
+ energy1 += winData[i] * winData[i];
+ }
+ if (energy1 == 0.0) {
+ // synthesize the special case of zero input
+ // read out fully processed segment
+ for (i = inst->windShift; i < inst->blockLen + inst->windShift; i++) {
+ fout[i - inst->windShift] = inst->syntBuf[i];
+ }
+ // update synthesis buffer
+ memcpy(inst->syntBuf,
+ inst->syntBuf + inst->blockLen,
+ sizeof(float) * (inst->anaLen - inst->blockLen));
+ memset(inst->syntBuf + inst->anaLen - inst->blockLen,
+ 0,
+ sizeof(float) * inst->blockLen);
+
+ // out buffer
+ inst->outLen = inst->blockLen - inst->blockLen10ms;
+ if (inst->blockLen > inst->blockLen10ms) {
+ for (i = 0; i < inst->outLen; i++) {
+ inst->outBuf[i] = fout[i + inst->blockLen10ms];
+ }
+ }
+ for (i = 0; i < inst->blockLen10ms; ++i)
+ outFrame[i] = WEBRTC_SPL_SAT(
+ WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN);
+
+ // for time-domain gain of HB
+ if (flagHB == 1)
+ for (i = 0; i < inst->blockLen10ms; ++i)
+ outFrameHB[i] = WEBRTC_SPL_SAT(
+ WEBRTC_SPL_WORD16_MAX, inst->dataBufHB[i], WEBRTC_SPL_WORD16_MIN);
+
+ return 0;
+ }
+
+ // FFT
+ WebRtc_rdft(inst->anaLen, 1, winData, inst->ip, inst->wfft);
+
+ imag[0] = 0;
+ real[0] = winData[0];
+ magn[0] = (float)(fabs(real[0]) + 1.0f);
+ imag[inst->magnLen - 1] = 0;
+ real[inst->magnLen - 1] = winData[1];
+ magn[inst->magnLen - 1] = (float)(fabs(real[inst->magnLen - 1]) + 1.0f);
+ if (inst->blockInd < END_STARTUP_SHORT) {
+ inst->initMagnEst[0] += magn[0];
+ inst->initMagnEst[inst->magnLen - 1] += magn[inst->magnLen - 1];
+ }
+ for (i = 1; i < inst->magnLen - 1; i++) {
+ real[i] = winData[2 * i];
+ imag[i] = winData[2 * i + 1];
+ // magnitude spectrum
+ fTmp = real[i] * real[i];
+ fTmp += imag[i] * imag[i];
+ magn[i] = ((float)sqrt(fTmp)) + 1.0f;
+ if (inst->blockInd < END_STARTUP_SHORT) {
+ inst->initMagnEst[i] += magn[i];
+ }
+ }
+
+ // Compute dd update of prior snr and post snr based on new noise estimate
for (i = 0; i < inst->magnLen; i++) {
// post and prior snr
currentEstimateStsa = (float)0.0;
- if (magn[i] > noise[i]) {
- currentEstimateStsa = magn[i] / (noise[i] + (float)0.0001) - (float)1.0;
+ if (magn[i] > inst->noisePrev[i]) {
+ currentEstimateStsa =
+ magn[i] / (inst->noisePrev[i] + (float)0.0001) - (float)1.0;
}
- // DD estimate is sume of two terms: current estimate and previous estimate
+ // DD estimate is sume of two terms: current estimate and previous
+ // estimate
// directed decision update of snrPrior
- snrPrior = DD_PR_SNR * previousEstimateStsa[i] + ((float)1.0 - DD_PR_SNR)
- * currentEstimateStsa;
+ snrPrior = DD_PR_SNR * inst->previousEstimateStsa[i] +
+ ((float)1.0 - DD_PR_SNR) * currentEstimateStsa;
// gain filter
tmpFloat1 = inst->overdrive + snrPrior;
tmpFloat2 = (float)snrPrior / tmpFloat1;
theFilter[i] = (float)tmpFloat2;
} // end of loop over freqs
- // done with step3
for (i = 0; i < inst->magnLen; i++) {
// flooring bottom
@@ -1078,6 +1193,9 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
theFilter[i] = 1.0;
}
if (inst->blockInd < END_STARTUP_SHORT) {
+ theFilterTmp[i] =
+ (inst->initMagnEst[i] - inst->overdrive * inst->parametricNoise[i]);
+ theFilterTmp[i] /= (inst->initMagnEst[i] + (float)0.0001);
// flooring bottom
if (theFilterTmp[i] < inst->denoiseBound) {
theFilterTmp[i] = inst->denoiseBound;
@@ -1097,9 +1215,8 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
real[i] *= inst->smooth[i];
imag[i] *= inst->smooth[i];
}
- // keep track of noise and magn spectrum for next frame
+ // keep track of magn spectrum for next frame
for (i = 0; i < inst->magnLen; i++) {
- inst->noisePrev[i] = noise[i];
inst->magnPrev[i] = magn[i];
}
// back to time domain
@@ -1112,10 +1229,10 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
WebRtc_rdft(inst->anaLen, -1, winData, inst->ip, inst->wfft);
for (i = 0; i < inst->anaLen; i++) {
- real[i] = 2.0f * winData[i] / inst->anaLen; // fft scaling
+ real[i] = 2.0f * winData[i] / inst->anaLen; // fft scaling
}
- //scale factor: only do it after END_STARTUP_LONG time
+ // scale factor: only do it after END_STARTUP_LONG time
factor = (float)1.0;
if (inst->gainmap == 1 && inst->blockInd > END_STARTUP_LONG) {
factor1 = (float)1.0;
@@ -1135,17 +1252,17 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
}
}
if (gain < B_LIM) {
- //don't reduce scale too much for pause regions:
+ // don't reduce scale too much for pause regions:
// attenuation here should be controlled by flooring
if (gain <= inst->denoiseBound) {
gain = inst->denoiseBound;
}
factor2 = (float)1.0 - (float)0.3 * (B_LIM - gain);
}
- //combine both scales with speech/noise prob:
+ // combine both scales with speech/noise prob:
// note prior (priorSpeechProb) is not frequency dependent
- factor = inst->priorSpeechProb * factor1 + ((float)1.0 - inst->priorSpeechProb)
- * factor2;
+ factor = inst->priorSpeechProb * factor1 +
+ ((float)1.0 - inst->priorSpeechProb) * factor2;
} // out of inst->gainmap==1
// synthesis
@@ -1157,9 +1274,11 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
fout[i - inst->windShift] = inst->syntBuf[i];
}
// update synthesis buffer
- memcpy(inst->syntBuf, inst->syntBuf + inst->blockLen,
+ memcpy(inst->syntBuf,
+ inst->syntBuf + inst->blockLen,
sizeof(float) * (inst->anaLen - inst->blockLen));
- memset(inst->syntBuf + inst->anaLen - inst->blockLen, 0,
+ memset(inst->syntBuf + inst->anaLen - inst->blockLen,
+ 0,
sizeof(float) * inst->blockLen);
// out buffer
@@ -1174,27 +1293,26 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
for (i = 0; i < inst->blockLen10ms; i++) {
fout[i] = inst->outBuf[i];
}
- memcpy(inst->outBuf, inst->outBuf + inst->blockLen10ms,
+ memcpy(inst->outBuf,
+ inst->outBuf + inst->blockLen10ms,
sizeof(float) * (inst->outLen - inst->blockLen10ms));
- memset(inst->outBuf + inst->outLen - inst->blockLen10ms, 0,
+ memset(inst->outBuf + inst->outLen - inst->blockLen10ms,
+ 0,
sizeof(float) * inst->blockLen10ms);
inst->outLen -= inst->blockLen10ms;
}
for (i = 0; i < inst->blockLen10ms; ++i)
- outFrame[i] = WEBRTC_SPL_SAT(
- WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN);
+ outFrame[i] =
+ WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN);
// for time-domain gain of HB
if (flagHB == 1) {
- for (i = 0; i < inst->magnLen; i++) {
- inst->speechProbHB[i] = probSpeechFinal[i];
- }
// average speech prob from low band
// avg over second half (i.e., 4->8kHz) of freq. spectrum
avgProbSpeechHB = 0.0;
for (i = inst->magnLen - deltaBweHB - 1; i < inst->magnLen - 1; i++) {
- avgProbSpeechHB += inst->speechProbHB[i];
+ avgProbSpeechHB += inst->speechProb[i];
}
avgProbSpeechHB = avgProbSpeechHB / ((float)deltaBweHB);
// average filter gain from low band
@@ -1206,14 +1324,16 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
avgFilterGainHB = avgFilterGainHB / ((float)(deltaGainHB));
avgProbSpeechHBTmp = (float)2.0 * avgProbSpeechHB - (float)1.0;
// gain based on speech prob:
- gainModHB = (float)0.5 * ((float)1.0 + (float)tanh(gainMapParHB * avgProbSpeechHBTmp));
- //combine gain with low band gain
+ gainModHB = (float)0.5 *
+ ((float)1.0 + (float)tanh(gainMapParHB * avgProbSpeechHBTmp));
+ // combine gain with low band gain
gainTimeDomainHB = (float)0.5 * gainModHB + (float)0.5 * avgFilterGainHB;
if (avgProbSpeechHB >= (float)0.5) {
- gainTimeDomainHB = (float)0.25 * gainModHB + (float)0.75 * avgFilterGainHB;
+ gainTimeDomainHB =
+ (float)0.25 * gainModHB + (float)0.75 * avgFilterGainHB;
}
gainTimeDomainHB = gainTimeDomainHB * decayBweHB;
- //make sure gain is within flooring range
+ // make sure gain is within flooring range
// flooring bottom
if (gainTimeDomainHB < inst->denoiseBound) {
gainTimeDomainHB = inst->denoiseBound;
@@ -1222,11 +1342,11 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
if (gainTimeDomainHB > (float)1.0) {
gainTimeDomainHB = 1.0;
}
- //apply gain
+ // apply gain
for (i = 0; i < inst->blockLen10ms; i++) {
float o = gainTimeDomainHB * inst->dataBufHB[i];
- outFrameHB[i] = WEBRTC_SPL_SAT(
- WEBRTC_SPL_WORD16_MAX, o, WEBRTC_SPL_WORD16_MIN);
+ outFrameHB[i] =
+ WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, o, WEBRTC_SPL_WORD16_MIN);
}
} // end of H band gain computation
//
diff --git a/modules/audio_processing/ns/ns_core.h b/modules/audio_processing/ns/ns_core.h
index 2d4b23cf..c5ca13f0 100644
--- a/modules/audio_processing/ns/ns_core.h
+++ b/modules/audio_processing/ns/ns_core.h
@@ -8,106 +8,108 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NS_CORE_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NS_CORE_H_
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_NS_CORE_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_NS_CORE_H_
#include "webrtc/modules/audio_processing/ns/defines.h"
typedef struct NSParaExtract_t_ {
-
- //bin size of histogram
+ // bin size of histogram
float binSizeLrt;
float binSizeSpecFlat;
float binSizeSpecDiff;
- //range of histogram over which lrt threshold is computed
+ // range of histogram over which lrt threshold is computed
float rangeAvgHistLrt;
- //scale parameters: multiply dominant peaks of the histograms by scale factor to obtain
- //thresholds for prior model
- float factor1ModelPars; //for lrt and spectral difference
- float factor2ModelPars; //for spectral_flatness: used when noise is flatter than speech
- //peak limit for spectral flatness (varies between 0 and 1)
+ // scale parameters: multiply dominant peaks of the histograms by scale factor
+ // to obtain thresholds for prior model
+ float factor1ModelPars; // for lrt and spectral difference
+ float factor2ModelPars; // for spectral_flatness: used when noise is flatter
+ // than speech
+ // peak limit for spectral flatness (varies between 0 and 1)
float thresPosSpecFlat;
- //limit on spacing of two highest peaks in histogram: spacing determined by bin size
+ // limit on spacing of two highest peaks in histogram: spacing determined by
+ // bin size
float limitPeakSpacingSpecFlat;
float limitPeakSpacingSpecDiff;
- //limit on relevance of second peak:
+ // limit on relevance of second peak:
float limitPeakWeightsSpecFlat;
float limitPeakWeightsSpecDiff;
- //limit on fluctuation of lrt feature
+ // limit on fluctuation of lrt feature
float thresFluctLrt;
- //limit on the max and min values for the feature thresholds
+ // limit on the max and min values for the feature thresholds
float maxLrt;
float minLrt;
float maxSpecFlat;
float minSpecFlat;
float maxSpecDiff;
float minSpecDiff;
- //criteria of weight of histogram peak to accept/reject feature
+ // criteria of weight of histogram peak to accept/reject feature
int thresWeightSpecFlat;
int thresWeightSpecDiff;
} NSParaExtract_t;
typedef struct NSinst_t_ {
-
- uint32_t fs;
- int blockLen;
- int blockLen10ms;
- int windShift;
- int outLen;
- int anaLen;
- int magnLen;
- int aggrMode;
- const float* window;
- float dataBuf[ANAL_BLOCKL_MAX];
- float syntBuf[ANAL_BLOCKL_MAX];
- float outBuf[3 * BLOCKL_MAX];
-
- int initFlag;
+ uint32_t fs;
+ int blockLen;
+ int blockLen10ms;
+ int windShift;
+ int outLen;
+ int anaLen;
+ int magnLen;
+ int aggrMode;
+ const float* window;
+ float analyzeBuf[ANAL_BLOCKL_MAX];
+ float dataBuf[ANAL_BLOCKL_MAX];
+ float syntBuf[ANAL_BLOCKL_MAX];
+ float outBuf[3 * BLOCKL_MAX];
+
+ int initFlag;
// parameters for quantile noise estimation
- float density[SIMULT* HALF_ANAL_BLOCKL];
- float lquantile[SIMULT* HALF_ANAL_BLOCKL];
- float quantile[HALF_ANAL_BLOCKL];
- int counter[SIMULT];
- int updates;
+ float density[SIMULT * HALF_ANAL_BLOCKL];
+ float lquantile[SIMULT * HALF_ANAL_BLOCKL];
+ float quantile[HALF_ANAL_BLOCKL];
+ int counter[SIMULT];
+ int updates;
// parameters for Wiener filter
- float smooth[HALF_ANAL_BLOCKL];
- float overdrive;
- float denoiseBound;
- int gainmap;
+ float previousEstimateStsa[HALF_ANAL_BLOCKL];
+ float smooth[HALF_ANAL_BLOCKL];
+ float overdrive;
+ float denoiseBound;
+ int gainmap;
// fft work arrays.
- int ip[IP_LENGTH];
- float wfft[W_LENGTH];
+ int ip[IP_LENGTH];
+ float wfft[W_LENGTH];
// parameters for new method: some not needed, will reduce/cleanup later
- int32_t blockInd; //frame index counter
- int modelUpdatePars[4]; //parameters for updating or estimating
+ int32_t blockInd; // frame index counter
+ int modelUpdatePars[4]; // parameters for updating or estimating
// thresholds/weights for prior model
- float priorModelPars[7]; //parameters for prior model
- float noisePrev[HALF_ANAL_BLOCKL]; //noise spectrum from previous frame
- float magnPrev[HALF_ANAL_BLOCKL]; //magnitude spectrum of previous frame
- float logLrtTimeAvg[HALF_ANAL_BLOCKL]; //log lrt factor with time-smoothing
- float priorSpeechProb; //prior speech/noise probability
- float featureData[7]; //data for features
- float magnAvgPause[HALF_ANAL_BLOCKL]; //conservative noise spectrum estimate
- float signalEnergy; //energy of magn
- float sumMagn; //sum of magn
- float whiteNoiseLevel; //initial noise estimate
- float initMagnEst[HALF_ANAL_BLOCKL]; //initial magnitude spectrum estimate
- float pinkNoiseNumerator; //pink noise parameter: numerator
- float pinkNoiseExp; //pink noise parameter: power of freq
- NSParaExtract_t featureExtractionParams; //parameters for feature extraction
- //histograms for parameter estimation
- int histLrt[HIST_PAR_EST];
- int histSpecFlat[HIST_PAR_EST];
- int histSpecDiff[HIST_PAR_EST];
- //quantities for high band estimate
- float speechProbHB[HALF_ANAL_BLOCKL]; //final speech/noise prob: prior + LRT
- float dataBufHB[ANAL_BLOCKL_MAX]; //buffering data for HB
+ float priorModelPars[7]; // parameters for prior model
+ float noisePrev[HALF_ANAL_BLOCKL]; // noise spectrum from previous frame
+ float magnPrev[HALF_ANAL_BLOCKL]; // magnitude spectrum of previous frame
+ float logLrtTimeAvg[HALF_ANAL_BLOCKL]; // log lrt factor with time-smoothing
+ float priorSpeechProb; // prior speech/noise probability
+ float featureData[7]; // data for features
+ float magnAvgPause[HALF_ANAL_BLOCKL]; // conservative noise spectrum estimate
+ float signalEnergy; // energy of magn
+ float sumMagn; // sum of magn
+ float whiteNoiseLevel; // initial noise estimate
+ float initMagnEst[HALF_ANAL_BLOCKL]; // initial magnitude spectrum estimate
+ float pinkNoiseNumerator; // pink noise parameter: numerator
+ float pinkNoiseExp; // pink noise parameter: power of freq
+ float parametricNoise[HALF_ANAL_BLOCKL];
+ NSParaExtract_t featureExtractionParams; // parameters for feature extraction
+ // histograms for parameter estimation
+ int histLrt[HIST_PAR_EST];
+ int histSpecFlat[HIST_PAR_EST];
+ int histSpecDiff[HIST_PAR_EST];
+ // quantities for high band estimate
+ float speechProb[HALF_ANAL_BLOCKL]; // final speech/noise prob: prior + LRT
+ float dataBufHB[ANAL_BLOCKL_MAX]; // buffering data for HB
} NSinst_t;
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -136,7 +138,7 @@ int WebRtcNs_InitCore(NSinst_t* inst, uint32_t fs);
*
* Input:
* - inst : Instance that should be initialized
- * - mode : 0: Mild (6 dB), 1: Medium (10 dB), 2: Aggressive (15 dB)
+ * - mode : 0: Mild (6dB), 1: Medium (10dB), 2: Aggressive (15dB)
*
* Output:
* - NS_inst : Initialized instance
@@ -153,7 +155,7 @@ int WebRtcNs_set_policy_core(NSinst_t* inst, int mode);
*
* Input:
* - inst : Instance that should be initialized
- * - inFrame : Input speech frame for lower band
+ * - speechFrame : Input speech frame for lower band
*
* Output:
* - inst : Updated instance
@@ -161,7 +163,7 @@ int WebRtcNs_set_policy_core(NSinst_t* inst, int mode);
* Return value : 0 - OK
* -1 - Error
*/
-int WebRtcNs_AnalyzeCore(NSinst_t* inst, float* inFrame);
+int WebRtcNs_AnalyzeCore(NSinst_t* inst, float* speechFrame);
/****************************************************************************
* WebRtcNs_ProcessCore
@@ -187,8 +189,7 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
float* outFrameLow,
float* outFrameHigh);
-
#ifdef __cplusplus
}
#endif
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NS_CORE_H_
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_NS_CORE_H_
diff --git a/modules/audio_processing/test/audio_processing_unittest.cc b/modules/audio_processing/test/audio_processing_unittest.cc
index d7f7cf4c..af63bde3 100644
--- a/modules/audio_processing/test/audio_processing_unittest.cc
+++ b/modules/audio_processing/test/audio_processing_unittest.cc
@@ -214,6 +214,7 @@ void WriteStatsMessage(const AudioProcessing::Statistic& output,
void OpenFileAndWriteMessage(const std::string filename,
const ::google::protobuf::MessageLite& msg) {
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
FILE* file = fopen(filename.c_str(), "wb");
ASSERT_TRUE(file != NULL);
@@ -226,6 +227,10 @@ void OpenFileAndWriteMessage(const std::string filename,
ASSERT_EQ(static_cast<size_t>(size),
fwrite(array.get(), sizeof(array[0]), size, file));
fclose(file);
+#else
+ std::cout << "Warning: Writing new reference is only allowed on Linux!"
+ << std::endl;
+#endif
}
std::string ResourceFilePath(std::string name, int sample_rate_hz) {
@@ -1735,14 +1740,7 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveIdenticalResults) {
// TODO(andrew): Add a test to process a few frames with different combinations
// of enabled components.
-// TODO(bjornv): Investigate if simply increasing the slack is a good way to
-// make this test work on Android. When running the test on a N7 we get a {2, 6}
-// difference of |has_voice_count| and |max_output_average| is up to 18 higher.
-// All numbers being consistently higher on N7 compare to ref_data, evaluated on
-// linux. Simply increasing the slack is one way forward. Adding an offset to
-// the metrics mentioned above, but keeping the same slack, is also an
-// alternative.
-TEST_F(ApmTest, DISABLED_ON_ANDROID(Process)) {
+TEST_F(ApmTest, Process) {
GOOGLE_PROTOBUF_VERIFY_VERSION;
audioproc::OutputData ref_data;
@@ -1857,12 +1855,33 @@ TEST_F(ApmTest, DISABLED_ON_ANDROID(Process)) {
if (!write_ref_data) {
const int kIntNear = 1;
+ // When running the test on a N7 we get a {2, 6} difference of
+ // |has_voice_count| and |max_output_average| is up to 18 higher.
+ // All numbers being consistently higher on N7 compare to ref_data.
+ // TODO(bjornv): If we start getting more of these offsets on Android we
+ // should consider a different approach. Either using one slack for all,
+ // or generate a separate android reference.
+#if defined(WEBRTC_ANDROID)
+ const int kHasVoiceCountOffset = 3;
+ const int kHasVoiceCountNear = 3;
+ const int kMaxOutputAverageOffset = 9;
+ const int kMaxOutputAverageNear = 9;
+#else
+ const int kHasVoiceCountOffset = 0;
+ const int kHasVoiceCountNear = kIntNear;
+ const int kMaxOutputAverageOffset = 0;
+ const int kMaxOutputAverageNear = kIntNear;
+#endif
EXPECT_NEAR(test->has_echo_count(), has_echo_count, kIntNear);
- EXPECT_NEAR(test->has_voice_count(), has_voice_count, kIntNear);
+ EXPECT_NEAR(test->has_voice_count(),
+ has_voice_count - kHasVoiceCountOffset,
+ kHasVoiceCountNear);
EXPECT_NEAR(test->is_saturated_count(), is_saturated_count, kIntNear);
EXPECT_NEAR(test->analog_level_average(), analog_level_average, kIntNear);
- EXPECT_NEAR(test->max_output_average(), max_output_average, kIntNear);
+ EXPECT_NEAR(test->max_output_average(),
+ max_output_average - kMaxOutputAverageOffset,
+ kMaxOutputAverageNear);
#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
audioproc::Test::EchoMetrics reference = test->echo_metrics();
diff --git a/modules/audio_processing/test/process_test.cc b/modules/audio_processing/test/process_test.cc
index b3f6a775..b6d51e47 100644
--- a/modules/audio_processing/test/process_test.cc
+++ b/modules/audio_processing/test/process_test.cc
@@ -874,7 +874,7 @@ void void_main(int argc, char* argv[]) {
near_frame.samples_per_channel_ = samples_per_channel;
if (!raw_output) {
- // The WAV file needs to be reset every time, because it cant change
+ // The WAV file needs to be reset every time, because it can't change
// it's sample rate or number of channels.
output_wav_file.reset(new WavFile(out_filename + ".wav",
sample_rate_hz,
@@ -1028,6 +1028,11 @@ void void_main(int argc, char* argv[]) {
if (raw_output && !output_raw_file) {
output_raw_file.reset(new RawFile(out_filename + ".pcm"));
}
+ if (!raw_output && !output_wav_file) {
+ output_wav_file.reset(new WavFile(out_filename + ".wav",
+ sample_rate_hz,
+ num_capture_output_channels));
+ }
WriteIntData(near_frame.data_,
size,
output_wav_file.get(),
diff --git a/modules/modules.gyp b/modules/modules.gyp
index 4086299a..777523ab 100644
--- a/modules/modules.gyp
+++ b/modules/modules.gyp
@@ -104,7 +104,9 @@
'sources': [
'audio_coding/main/acm2/acm_opus_unittest.cc',
'audio_coding/main/acm2/acm_receiver_unittest.cc',
+ 'audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc',
'audio_coding/main/acm2/audio_coding_module_unittest.cc',
+ 'audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc',
'audio_coding/main/acm2/call_statistics_unittest.cc',
'audio_coding/main/acm2/initial_delay_manager_unittest.cc',
'audio_coding/main/acm2/nack_unittest.cc',
diff --git a/modules/pacing/include/paced_sender.h b/modules/pacing/include/paced_sender.h
index ddd8e53b..14a3a3d4 100644
--- a/modules/pacing/include/paced_sender.h
+++ b/modules/pacing/include/paced_sender.h
@@ -14,9 +14,9 @@
#include <list>
#include <set>
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/interface/module.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/modules/rtp_rtcp/source/rtcp_sender.h b/modules/rtp_rtcp/source/rtcp_sender.h
index 26c44b04..c0b8ebdb 100644
--- a/modules/rtp_rtcp/source/rtcp_sender.h
+++ b/modules/rtp_rtcp/source/rtcp_sender.h
@@ -15,6 +15,7 @@
#include <sstream>
#include <string>
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/remote_bitrate_estimator/include/bwe_defines.h"
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
@@ -23,7 +24,6 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/modules/rtp_rtcp/source/tmmbr_help.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/modules/rtp_rtcp/source/rtp_packet_history.h b/modules/rtp_rtcp/source/rtp_packet_history.h
index 190e5057..253f6d0a 100644
--- a/modules/rtp_rtcp/source/rtp_packet_history.h
+++ b/modules/rtp_rtcp/source/rtp_packet_history.h
@@ -15,10 +15,10 @@
#include <vector>
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/typedefs.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
namespace webrtc {
diff --git a/modules/rtp_rtcp/source/rtp_sender.h b/modules/rtp_rtcp/source/rtp_sender.h
index e4d4fca3..b2f2e0c4 100644
--- a/modules/rtp_rtcp/source/rtp_sender.h
+++ b/modules/rtp_rtcp/source/rtp_sender.h
@@ -16,6 +16,7 @@
#include <map>
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/pacing/include/paced_sender.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
@@ -25,7 +26,6 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h"
#include "webrtc/modules/rtp_rtcp/source/ssrc_database.h"
#include "webrtc/modules/rtp_rtcp/source/video_codec_information.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#define MAX_INIT_RTP_SEQ_NUMBER 32767 // 2^15 -1.
diff --git a/modules/video_capture/BUILD.gn b/modules/video_capture/BUILD.gn
index 773708bc..b9f3d2f3 100644
--- a/modules/video_capture/BUILD.gn
+++ b/modules/video_capture/BUILD.gn
@@ -100,7 +100,10 @@ source_set("video_capture_internal_impl") {
"QTKit.framework",
]
}
- if (is_win) {
+ # winsdk_samples isn't pulled into Chromium, so it is disabled for Chromium
+ # builds. This is not a problem since the internal video capture
+ # implementation should not be used in chrome - issue 3831.
+ if (is_win && !build_with_chromium) {
sources = [
"windows/device_info_ds.cc",
"windows/device_info_ds.h",
diff --git a/modules/video_coding/codecs/vp8/vp8_impl.cc b/modules/video_coding/codecs/vp8/vp8_impl.cc
index 16d105d1..2a2a9d04 100644
--- a/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -175,7 +175,7 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
// Creating a wrapper to the image - setting image data to NULL. Actual
// pointer will be set in encode. Setting align to 1, as it is meaningless
// (actual memory is not allocated).
- raw_ = vpx_img_wrap(NULL, IMG_FMT_I420, codec_.width, codec_.height,
+ raw_ = vpx_img_wrap(NULL, VPX_IMG_FMT_I420, codec_.width, codec_.height,
1, NULL);
// populate encoder configuration with default values
if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), config_, 0)) {
@@ -356,9 +356,9 @@ int VP8EncoderImpl::Encode(const I420VideoFrame& input_frame,
}
// Image in vpx_image_t format.
// Input frame is const. VP8's raw frame is not defined as const.
- raw_->planes[PLANE_Y] = const_cast<uint8_t*>(frame.buffer(kYPlane));
- raw_->planes[PLANE_U] = const_cast<uint8_t*>(frame.buffer(kUPlane));
- raw_->planes[PLANE_V] = const_cast<uint8_t*>(frame.buffer(kVPlane));
+ raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(frame.buffer(kYPlane));
+ raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(frame.buffer(kUPlane));
+ raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(frame.buffer(kVPlane));
// TODO(mikhal): Stride should be set in initialization.
raw_->stride[VPX_PLANE_Y] = frame.stride(kYPlane);
raw_->stride[VPX_PLANE_U] = frame.stride(kUPlane);
diff --git a/modules/video_coding/main/source/timing.h b/modules/video_coding/main/source/timing.h
index 116639eb..bb44a93b 100644
--- a/modules/video_coding/main/source/timing.h
+++ b/modules/video_coding/main/source/timing.h
@@ -11,9 +11,9 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TIMING_H_
#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TIMING_H_
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/video_coding/main/source/codec_timer.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/modules/video_coding/main/source/video_coding_impl.h b/modules/video_coding/main/source/video_coding_impl.h
index 816552fe..5b3fe2eb 100644
--- a/modules/video_coding/main/source/video_coding_impl.h
+++ b/modules/video_coding/main/source/video_coding_impl.h
@@ -15,6 +15,7 @@
#include <vector>
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/video_coding/main/source/codec_database.h"
#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
#include "webrtc/modules/video_coding/main/source/generic_decoder.h"
@@ -25,7 +26,6 @@
#include "webrtc/modules/video_coding/main/source/timing.h"
#include "webrtc/system_wrappers/interface/clock.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
namespace webrtc {
diff --git a/modules/video_render/video_render.gypi b/modules/video_render/video_render.gypi
index da51588a..5db851d9 100644
--- a/modules/video_render/video_render.gypi
+++ b/modules/video_render/video_render.gypi
@@ -141,23 +141,7 @@
'windows/video_render_windows_impl.h',
'windows/video_render_direct3d9.cc',
'windows/video_render_windows_impl.cc',
- ],
- 'variables': {
- # 'directx_sdk_path' will be overridden in the condition block
- # below, but it must not be declared as empty here since gyp
- # will check if the first character is '/' for some reason.
- # If it's empty, we'll get an out-of-bounds error.
- 'directx_sdk_path': 'will_be_overridden',
- 'directx_sdk_default_path': '<(DEPTH)/third_party/directxsdk/files',
- 'conditions': [
- ['"<!(python <(DEPTH)/build/dir_exists.py <(directx_sdk_default_path))"=="True"', {
- 'directx_sdk_path': '<(DEPTH)/third_party/directxsdk/files',
- }, {
- 'directx_sdk_path': '$(DXSDK_DIR)',
- }],
- ],
- },
-
+ ],
'include_dirs': [
'<(directx_sdk_path)/Include',
],
diff --git a/system_wrappers/BUILD.gn b/system_wrappers/BUILD.gn
index 76a0f340..262189c1 100644
--- a/system_wrappers/BUILD.gn
+++ b/system_wrappers/BUILD.gn
@@ -45,7 +45,6 @@ static_library("system_wrappers") {
"interface/static_instance.h",
"interface/stl_util.h",
"interface/stringize_macros.h",
- "interface/thread_annotations.h",
"interface/thread_wrapper.h",
"interface/tick_util.h",
"interface/timestamp_extrapolator.h",
diff --git a/system_wrappers/interface/critical_section_wrapper.h b/system_wrappers/interface/critical_section_wrapper.h
index 4979b5c7..e93a249e 100644
--- a/system_wrappers/interface/critical_section_wrapper.h
+++ b/system_wrappers/interface/critical_section_wrapper.h
@@ -14,8 +14,8 @@
// If the critical section is heavily contended it may be beneficial to use
// read/write locks instead.
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
namespace webrtc {
class LOCKABLE CriticalSectionWrapper {
diff --git a/system_wrappers/interface/rw_lock_wrapper.h b/system_wrappers/interface/rw_lock_wrapper.h
index 91126e5d..dbe6d6c7 100644
--- a/system_wrappers/interface/rw_lock_wrapper.h
+++ b/system_wrappers/interface/rw_lock_wrapper.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_RW_LOCK_WRAPPER_H_
#define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_RW_LOCK_WRAPPER_H_
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
+#include "webrtc/base/thread_annotations.h"
// Note, Windows pre-Vista version of RW locks are not supported natively. For
// these OSs regular critical sections have been used to approximate RW lock
diff --git a/system_wrappers/source/system_wrappers.gyp b/system_wrappers/source/system_wrappers.gyp
index 82addeb5..e6c4216d 100644
--- a/system_wrappers/source/system_wrappers.gyp
+++ b/system_wrappers/source/system_wrappers.gyp
@@ -54,7 +54,6 @@
'../interface/static_instance.h',
'../interface/stl_util.h',
'../interface/stringize_macros.h',
- '../interface/thread_annotations.h',
'../interface/thread_wrapper.h',
'../interface/tick_util.h',
'../interface/timestamp_extrapolator.h',
diff --git a/test/call_test.cc b/test/call_test.cc
index ccad7551..d49f6e3f 100644
--- a/test/call_test.cc
+++ b/test/call_test.cc
@@ -16,7 +16,6 @@ namespace test {
CallTest::CallTest()
: clock_(Clock::GetRealTimeClock()),
- encoder_settings_(NULL),
send_stream_(NULL),
fake_encoder_(clock_) {
}
@@ -40,8 +39,7 @@ void CallTest::RunBaseTest(BaseTest* test) {
if (test->ShouldCreateReceivers()) {
CreateMatchingReceiveConfigs();
}
- encoder_settings_ = test->GetEncoderSettings();
- test->ModifyConfigs(&send_config_, &receive_configs_, &video_streams_);
+ test->ModifyConfigs(&send_config_, &receive_configs_, &encoder_config_);
CreateStreams();
test->OnStreamsCreated(send_stream_, receive_streams_);
@@ -92,7 +90,7 @@ void CallTest::CreateSendConfig(size_t num_streams) {
send_config_.encoder_settings.encoder = &fake_encoder_;
send_config_.encoder_settings.payload_name = "FAKE";
send_config_.encoder_settings.payload_type = kFakeSendPayloadType;
- video_streams_ = test::CreateVideoStreams(num_streams);
+ encoder_config_.streams = test::CreateVideoStreams(num_streams);
for (size_t i = 0; i < num_streams; ++i)
send_config_.rtp.ssrcs.push_back(kSendSsrcs[i]);
}
@@ -123,7 +121,7 @@ void CallTest::CreateMatchingReceiveConfigs() {
}
void CallTest::CreateFrameGeneratorCapturer() {
- VideoStream stream = video_streams_.back();
+ VideoStream stream = encoder_config_.streams.back();
frame_generator_capturer_.reset(
test::FrameGeneratorCapturer::Create(send_stream_->Input(),
stream.width,
@@ -135,8 +133,8 @@ void CallTest::CreateStreams() {
assert(send_stream_ == NULL);
assert(receive_streams_.empty());
- send_stream_ = sender_call_->CreateVideoSendStream(
- send_config_, video_streams_, encoder_settings_);
+ send_stream_ =
+ sender_call_->CreateVideoSendStream(send_config_, encoder_config_);
for (size_t i = 0; i < receive_configs_.size(); ++i) {
receive_streams_.push_back(
@@ -193,14 +191,10 @@ size_t BaseTest::GetNumStreams() const {
return 1;
}
-const void* BaseTest::GetEncoderSettings() {
- return NULL;
-}
-
void BaseTest::ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) {
+ VideoEncoderConfig* encoder_config) {
}
void BaseTest::OnStreamsCreated(
diff --git a/test/call_test.h b/test/call_test.h
index a03c3bbe..695fb2ac 100644
--- a/test/call_test.h
+++ b/test/call_test.h
@@ -65,8 +65,7 @@ class CallTest : public ::testing::Test {
scoped_ptr<Call> sender_call_;
VideoSendStream::Config send_config_;
- std::vector<VideoStream> video_streams_;
- const void* encoder_settings_;
+ VideoEncoderConfig encoder_config_;
VideoSendStream* send_stream_;
scoped_ptr<Call> receiver_call_;
@@ -93,11 +92,10 @@ class BaseTest : public RtpRtcpObserver {
virtual Call::Config GetReceiverCallConfig();
virtual void OnCallsCreated(Call* sender_call, Call* receiver_call);
- virtual const void* GetEncoderSettings();
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams);
+ VideoEncoderConfig* encoder_config);
virtual void OnStreamsCreated(
VideoSendStream* send_stream,
const std::vector<VideoReceiveStream*>& receive_streams);
diff --git a/test/webrtc_test_common.gyp b/test/webrtc_test_common.gyp
index 71b875d1..d422a4b0 100644
--- a/test/webrtc_test_common.gyp
+++ b/test/webrtc_test_common.gyp
@@ -104,22 +104,6 @@
'sources!': [
'null_platform_renderer.cc',
],
- 'variables': {
- # 'directx_sdk_path' will be overridden in the condition block
- # below, but it must not be declared as empty here since gyp
- # will check if the first character is '/' for some reason.
- # If it's empty, we'll get an out-of-bounds error.
- 'directx_sdk_path': 'will_be_overridden',
- 'directx_sdk_default_path': '<(DEPTH)/third_party/directxsdk/files',
- 'conditions': [
- ['"<!(python <(DEPTH)/build/dir_exists.py <(directx_sdk_default_path))"=="True"', {
- 'directx_sdk_path': '<(DEPTH)/third_party/directxsdk/files',
- }, {
- 'directx_sdk_path': '$(DXSDK_DIR)',
- }],
- ],
- },
-
'include_dirs': [
'<(directx_sdk_path)/Include',
],
diff --git a/tools/rtcbot/bot/browser/bot.js b/tools/rtcbot/bot/browser/bot.js
index 130d006e..88398021 100644
--- a/tools/rtcbot/bot/browser/bot.js
+++ b/tools/rtcbot/bot/browser/bot.js
@@ -5,23 +5,80 @@
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
+//
+var localStreams = [];
+var remoteStreams = [];
+
+function ping(callback) {
+ callback("pong");
+}
+
+function getUserMedia(constraints, onSuccessCallback, onFailCallback){
+ console.log("Getting user media.");
+ navigator.webkitGetUserMedia(constraints,
+ onSuccessCallbackWraper, onFailCallback);
+
+ function onSuccessCallbackWraper(stream) {
+ console.log("GetUserMedia success.");
+ localStreams[stream.id] = stream;
+ onSuccessCallback(stream);
+ }
+}
+
+function createPeerConnection(doneCallback, failCallback) {
+ console.log("Creating peer connection");
+ var obj = {};
+ var pc = new webkitRTCPeerConnection(null);
-var botExposedApi = {
- ping: function (callback) {
- callback("pong");
- },
-
- createPeerConnection: function (doneCallback) {
- console.log("Creating peer connection");
- var pc = new webkitRTCPeerConnection(null);
- var obj = {};
- expose(obj, pc, "close");
- expose(obj, pc, "createOffer");
- expose(obj, pc, "createAnswer");
- expose(obj, pc, "setRemoteDescription", { 0: RTCSessionDescription });
- expose(obj, pc, "setLocalDescription", { 0: RTCSessionDescription });
- doneCallback(obj);
- },
+ expose(obj, pc, "close");
+ expose(obj, pc, "createOffer");
+ expose(obj, pc, "createAnswer");
+ expose(obj, pc, "addEventListener");
+ expose(obj, pc, "addIceCandidate", { 0: RTCIceCandidate});
+ expose(obj, pc, "setRemoteDescription", { 0: RTCSessionDescription });
+ expose(obj, pc, "setLocalDescription", { 0: RTCSessionDescription });
+
+ obj.addStream = function(stream) {
+ console.log("Adding local stream.");
+ var tempStream = localStreams[stream.id];
+ if (!tempStream) {
+ console.log("Undefined stream!");
+ return;
+ }
+ pc.addStream(tempStream);
+ };
+
+ pc.addEventListener('addstream', function(event) {
+ remoteStreams[event.stream.id] = event.stream;
+ });
+
+ doneCallback(obj);
+}
+
+function showStream(streamId, autoplay, muted) {
+ var stream = getStreamFromIdentifier_(streamId);
+ var video = document.createElement('video');
+ video.autoplay = autoplay;
+ video.muted = muted;
+ document.body.appendChild(video);
+ video.src = URL.createObjectURL(stream);
+ console.log("Stream " + stream.id + " attached to video element");
};
-connectToServer(botExposedApi);
+function getStreamFromIdentifier_(id) {
+ var tempStream = localStreams[id];
+ if (tempStream)
+ return tempStream;
+ tempStream = remoteStreams[id];
+ if (tempStream)
+ return tempStream;
+ console.log(id + " is not id for stream.");
+ return null;
+}
+
+connectToServer({
+ ping: ping,
+ getUserMedia: getUserMedia,
+ createPeerConnection: createPeerConnection,
+ showStream: showStream,
+});
diff --git a/tools/rtcbot/test.js b/tools/rtcbot/test.js
index c1db4fc0..36675ccb 100644
--- a/tools/rtcbot/test.js
+++ b/tools/rtcbot/test.js
@@ -17,10 +17,10 @@ var vm = require('vm');
var BotManager = require('./botmanager.js');
function Test(botType) {
- // Make the test fail if not completed in 3 seconds.
+ // TODO(houssainy) set the time out.
this.timeout_ = setTimeout(
this.fail.bind(this, "Test timeout!"),
- 5000);
+ 10000);
this.botType_ = botType;
}
@@ -76,10 +76,10 @@ Test.prototype = {
},
}
-function runTest(testfile) {
+function runTest(botType, testfile) {
console.log("Running test: " + testfile);
var script = vm.createScript(fs.readFileSync(testfile), testfile);
- script.runInNewContext({ test: new Test(process.argv[2]) });
+ script.runInNewContext({ test: new Test(botType) });
}
-runTest("./test/simple_offer_answer.js");
+runTest(process.argv[2], process.argv[3]);
diff --git a/tools/rtcbot/test/webrtc_video_streaming.js b/tools/rtcbot/test/webrtc_video_streaming.js
new file mode 100644
index 00000000..a9079763
--- /dev/null
+++ b/tools/rtcbot/test/webrtc_video_streaming.js
@@ -0,0 +1,94 @@
+// Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+//
+// A unidirectional video and audio flowing test from bot 1 to bot 2.
+//
+// Note: the source of the video and audio stream is getUserMedia().
+//
+// TODO(houssainy): get a condition to terminate the test.
+//
+function testVideoStreaming(bot1, bot2) {
+ var pc1 = null;
+ var pc2 = null;
+
+ test.wait([
+ createPeerConnection.bind(bot1),
+ createPeerConnection.bind(bot2) ],
+ onPeerConnectionCreated);
+
+ function createPeerConnection(done) {
+ this.createPeerConnection(done, test.fail);
+ }
+
+ function onPeerConnectionCreated(peer1, peer2) {
+ test.log("RTC Peers created.");
+ pc1 = peer1;
+ pc2 = peer2;
+ pc1.addEventListener('addstream', test.fail);
+ pc2.addEventListener('addstream', onAddStream);
+ pc1.addEventListener('icecandidate', onIceCandidate.bind(pc2));
+ pc2.addEventListener('icecandidate', onIceCandidate.bind(pc1));
+
+ bot1.getUserMedia({video:true, audio:true}, onUserMediaSuccess, test.fail);
+
+ function onUserMediaSuccess(stream) {
+ test.log("User has granted access to local media.");
+ pc1.addStream(stream);
+ bot1.showStream(stream.id, true, true);
+
+ createOfferAndAnswer();
+ }
+ }
+
+ function onAddStream(event) {
+ test.log("On Add stream.");
+ bot2.showStream(event.stream.id, true, false);
+ }
+
+ function onIceCandidate(event) {
+ if(event.candidate){
+ test.log(event.candidate.candidate);
+ this.addIceCandidate(event.candidate,
+ onAddIceCandidateSuccess, test.fail);
+ };
+
+ function onAddIceCandidateSuccess() {
+ test.log("Candidate added successfully");
+ };
+ }
+
+ function createOfferAndAnswer() {
+ test.log("Creating offer.");
+ pc1.createOffer(gotOffer, test.fail);
+
+ function gotOffer(offer) {
+ test.log("Got offer");
+ pc1.setLocalDescription(offer, onSetSessionDescriptionSuccess, test.fail);
+ pc2.setRemoteDescription(offer, onSetSessionDescriptionSuccess,
+ test.fail);
+ test.log("Creating answer");
+ pc2.createAnswer(gotAnswer, test.fail);
+ }
+
+ function gotAnswer(answer) {
+ test.log("Got answer");
+ pc2.setLocalDescription(answer, onSetSessionDescriptionSuccess,
+ test.fail);
+ pc1.setRemoteDescription(answer, onSetSessionDescriptionSuccess,
+ test.fail);
+ }
+
+ function onSetSessionDescriptionSuccess() {
+ test.log("Set session description success.");
+ }
+ }
+}
+
+test.wait( [ test.spawnBot.bind(test, "alice"),
+ test.spawnBot.bind(test, "bob") ],
+ testVideoStreaming);
diff --git a/video/bitrate_estimator_tests.cc b/video/bitrate_estimator_tests.cc
index 40c1ed68..cb7c391e 100644
--- a/video/bitrate_estimator_tests.cc
+++ b/video/bitrate_estimator_tests.cc
@@ -13,11 +13,11 @@
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/call.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/test/call_test.h"
#include "webrtc/test/direct_transport.h"
@@ -148,7 +148,7 @@ class BitrateEstimatorTest : public test::CallTest {
send_config_.encoder_settings.encoder = NULL;
send_config_.encoder_settings.payload_name = "FAKE";
send_config_.encoder_settings.payload_type = kFakeSendPayloadType;
- video_streams_ = test::CreateVideoStreams(1);
+ encoder_config_.streams = test::CreateVideoStreams(1);
receive_config_ = VideoReceiveStream::Config();
assert(receive_config_.codecs.empty());
@@ -195,14 +195,14 @@ class BitrateEstimatorTest : public test::CallTest {
test_->send_config_.rtp.ssrcs[0]++;
test_->send_config_.encoder_settings.encoder = &fake_encoder_;
send_stream_ = test_->sender_call_->CreateVideoSendStream(
- test_->send_config_, test_->video_streams_, NULL);
- assert(test_->video_streams_.size() == 1);
- frame_generator_capturer_.reset(
- test::FrameGeneratorCapturer::Create(send_stream_->Input(),
- test_->video_streams_[0].width,
- test_->video_streams_[0].height,
- 30,
- Clock::GetRealTimeClock()));
+ test_->send_config_, test_->encoder_config_);
+ assert(test_->encoder_config_.streams.size() == 1);
+ frame_generator_capturer_.reset(test::FrameGeneratorCapturer::Create(
+ send_stream_->Input(),
+ test_->encoder_config_.streams[0].width,
+ test_->encoder_config_.streams[0].height,
+ 30,
+ Clock::GetRealTimeClock()));
send_stream_->Start();
frame_generator_capturer_->Start();
diff --git a/video/call.cc b/video/call.cc
index b4adafd7..25853f8b 100644
--- a/video/call.cc
+++ b/video/call.cc
@@ -14,6 +14,7 @@
#include <map>
#include <vector>
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/call.h"
#include "webrtc/common.h"
#include "webrtc/config.h"
@@ -22,7 +23,6 @@
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/video/video_receive_stream.h"
#include "webrtc/video/video_send_stream.h"
@@ -87,8 +87,7 @@ class Call : public webrtc::Call, public PacketReceiver {
virtual VideoSendStream* CreateVideoSendStream(
const VideoSendStream::Config& config,
- const std::vector<VideoStream>& video_streams,
- const void* encoder_settings) OVERRIDE;
+ const VideoEncoderConfig& encoder_config) OVERRIDE;
virtual void DestroyVideoSendStream(webrtc::VideoSendStream* send_stream)
OVERRIDE;
@@ -196,8 +195,7 @@ PacketReceiver* Call::Receiver() { return this; }
VideoSendStream* Call::CreateVideoSendStream(
const VideoSendStream::Config& config,
- const std::vector<VideoStream>& video_streams,
- const void* encoder_settings) {
+ const VideoEncoderConfig& encoder_config) {
assert(config.rtp.ssrcs.size() > 0);
// TODO(mflodman): Base the start bitrate on a current bandwidth estimate, if
@@ -207,8 +205,7 @@ VideoSendStream* Call::CreateVideoSendStream(
overuse_observer_proxy_.get(),
video_engine_,
config,
- video_streams,
- encoder_settings,
+ encoder_config,
suspended_send_ssrcs_,
base_channel_id_,
config_.start_bitrate_bps != -1 ? config_.start_bitrate_bps
diff --git a/video/call_perf_tests.cc b/video/call_perf_tests.cc
index 557c5149..cd83698f 100644
--- a/video/call_perf_tests.cc
+++ b/video/call_perf_tests.cc
@@ -15,6 +15,7 @@
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/call.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
@@ -22,7 +23,6 @@
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/rtp_to_ntp.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/test/call_test.h"
#include "webrtc/test/direct_transport.h"
#include "webrtc/test/encoder_settings.h"
@@ -399,7 +399,7 @@ void CallPerfTest::TestCaptureNtpTime(const FakeNetworkPipe::Config& net_config,
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
(*receive_configs)[0].renderer = this;
// Enable the receiver side rtt calculation.
(*receive_configs)[0].rtp.rtcp_xr.receiver_reference_time_report = true;
@@ -546,7 +546,7 @@ void CallPerfTest::TestMinTransmitBitrate(bool pad_to_min_bitrate) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
if (pad_to_min_bitrate_) {
send_config->rtp.min_transmit_bitrate_bps = kMinTransmitBitrateBps;
} else {
diff --git a/video/end_to_end_tests.cc b/video/end_to_end_tests.cc
index 46420ecc..0e87a033 100644
--- a/video/end_to_end_tests.cc
+++ b/video/end_to_end_tests.cc
@@ -208,7 +208,7 @@ TEST_F(EndToEndTest, TransmitsFirstFrame) {
Start();
scoped_ptr<test::FrameGenerator> frame_generator(test::FrameGenerator::Create(
- video_streams_[0].width, video_streams_[0].height));
+ encoder_config_.streams[0].width, encoder_config_.streams[0].height));
send_stream_->Input()->SwapFrame(frame_generator->NextFrame());
EXPECT_EQ(kEventSignaled, renderer.Wait())
@@ -238,13 +238,13 @@ TEST_F(EndToEndTest, SendsAndReceivesH264) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->encoder_settings.encoder = &fake_encoder_;
send_config->encoder_settings.payload_name = "H264";
send_config->encoder_settings.payload_type = kFakeSendPayloadType;
- (*video_streams)[0].min_bitrate_bps = 50000;
- (*video_streams)[0].target_bitrate_bps =
- (*video_streams)[0].max_bitrate_bps = 2000000;
+ encoder_config->streams[0].min_bitrate_bps = 50000;
+ encoder_config->streams[0].target_bitrate_bps =
+ encoder_config->streams[0].max_bitrate_bps = 2000000;
(*receive_configs)[0].renderer = this;
VideoCodec codec =
@@ -369,7 +369,7 @@ TEST_F(EndToEndTest, ReceivesAndRetransmitsNack) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
(*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
}
@@ -463,7 +463,7 @@ TEST_F(EndToEndTest, DISABLED_CanReceiveFec) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
// TODO(pbos): Run this test with combined NACK/FEC enabled as well.
// int rtp_history_ms = 1000;
// (*receive_configs)[0].rtp.nack.rtp_history_ms = rtp_history_ms;
@@ -541,7 +541,7 @@ void EndToEndTest::DecodesRetransmittedFrame(bool retransmit_over_rtx) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
(*receive_configs)[0].pre_render_callback = this;
(*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
@@ -578,8 +578,7 @@ TEST_F(EndToEndTest, DecodesRetransmittedFrameOverRtx) {
DecodesRetransmittedFrame(true);
}
-// Disabled due to: https://code.google.com/p/webrtc/issues/detail?id=3770
-TEST_F(EndToEndTest, DISABLED_ON_ANDROID(UsesFrameCallbacks)) {
+TEST_F(EndToEndTest, UsesFrameCallbacks) {
static const int kWidth = 320;
static const int kHeight = 240;
@@ -648,9 +647,9 @@ TEST_F(EndToEndTest, DISABLED_ON_ANDROID(UsesFrameCallbacks)) {
VideoEncoder::Create(VideoEncoder::kVp8));
send_config_.encoder_settings.encoder = encoder.get();
send_config_.encoder_settings.payload_name = "VP8";
- ASSERT_EQ(1u, video_streams_.size()) << "Test setup error.";
- video_streams_[0].width = kWidth;
- video_streams_[0].height = kHeight;
+ ASSERT_EQ(1u, encoder_config_.streams.size()) << "Test setup error.";
+ encoder_config_.streams[0].width = kWidth;
+ encoder_config_.streams[0].height = kHeight;
send_config_.pre_encode_callback = &pre_encode_callback;
CreateMatchingReceiveConfigs();
@@ -745,7 +744,7 @@ void EndToEndTest::ReceivesPliAndRecovers(int rtp_history_ms) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->rtp.nack.rtp_history_ms = rtp_history_ms_;
(*receive_configs)[0].rtp.nack.rtp_history_ms = rtp_history_ms_;
(*receive_configs)[0].renderer = this;
@@ -892,7 +891,7 @@ void EndToEndTest::RespectsRtcpMode(newapi::RtcpMode rtcp_mode) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
(*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
(*receive_configs)[0].rtp.rtcp_mode = rtcp_mode_;
@@ -925,9 +924,7 @@ TEST_F(EndToEndTest, UsesRtcpReducedSizeMode) {
// Another is set up to receive all three of these with different renderers.
// Each renderer verifies that it receives the expected resolution, and as soon
// as every renderer has received a frame, the test finishes.
-//
-// Disabled due to: https://code.google.com/p/webrtc/issues/detail?id=3770
-TEST_F(EndToEndTest, DISABLED_ON_ANDROID(SendsAndReceivesMultipleStreams)) {
+TEST_F(EndToEndTest, SendsAndReceivesMultipleStreams) {
static const size_t kNumStreams = 3;
class VideoOutputObserver : public VideoRenderer {
@@ -991,15 +988,16 @@ TEST_F(EndToEndTest, DISABLED_ON_ANDROID(SendsAndReceivesMultipleStreams)) {
send_config.encoder_settings.encoder = encoders[i].get();
send_config.encoder_settings.payload_name = "VP8";
send_config.encoder_settings.payload_type = 124;
- std::vector<VideoStream> video_streams = test::CreateVideoStreams(1);
- VideoStream* stream = &video_streams[0];
+ VideoEncoderConfig encoder_config;
+ encoder_config.streams = test::CreateVideoStreams(1);
+ VideoStream* stream = &encoder_config.streams[0];
stream->width = width;
stream->height = height;
stream->max_framerate = 5;
stream->min_bitrate_bps = stream->target_bitrate_bps =
stream->max_bitrate_bps = 100000;
send_streams[i] =
- sender_call->CreateVideoSendStream(send_config, video_streams, NULL);
+ sender_call->CreateVideoSendStream(send_config, encoder_config);
send_streams[i]->Start();
VideoReceiveStream::Config receive_config;
@@ -1090,7 +1088,7 @@ TEST_F(EndToEndTest, ObserversEncodedFrames) {
Start();
scoped_ptr<test::FrameGenerator> frame_generator(test::FrameGenerator::Create(
- video_streams_[0].width, video_streams_[0].height));
+ encoder_config_.streams[0].width, encoder_config_.streams[0].height));
send_stream_->Input()->SwapFrame(frame_generator->NextFrame());
EXPECT_EQ(kEventSignaled, post_encode_observer.Wait())
@@ -1215,7 +1213,7 @@ void EndToEndTest::TestXrReceiverReferenceTimeReport(bool enable_rrtr) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
(*receive_configs)[0].rtp.rtcp_mode = newapi::kRtcpReducedSize;
(*receive_configs)[0].rtp.rtcp_xr.receiver_reference_time_report =
enable_rrtr_;
@@ -1283,19 +1281,19 @@ void EndToEndTest::TestSendsSetSsrcs(size_t num_ssrcs,
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
if (num_ssrcs_ > 1) {
// Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
- for (size_t i = 0; i < video_streams->size(); ++i) {
- (*video_streams)[i].min_bitrate_bps = 10000;
- (*video_streams)[i].target_bitrate_bps = 15000;
- (*video_streams)[i].max_bitrate_bps = 20000;
+ for (size_t i = 0; i < encoder_config->streams.size(); ++i) {
+ encoder_config->streams[i].min_bitrate_bps = 10000;
+ encoder_config->streams[i].target_bitrate_bps = 15000;
+ encoder_config->streams[i].max_bitrate_bps = 20000;
}
}
- all_streams_ = *video_streams;
+ encoder_config_all_streams_ = *encoder_config;
if (send_single_ssrc_first_)
- video_streams->resize(1);
+ encoder_config->streams.resize(1);
}
virtual void OnStreamsCreated(
@@ -1311,7 +1309,7 @@ void EndToEndTest::TestSendsSetSsrcs(size_t num_ssrcs,
if (send_single_ssrc_first_) {
// Set full simulcast and continue with the rest of the SSRCs.
- send_stream_->ReconfigureVideoEncoder(all_streams_, NULL);
+ send_stream_->ReconfigureVideoEncoder(encoder_config_all_streams_);
EXPECT_EQ(kEventSignaled, Wait())
<< "Timed out while waiting on additional SSRCs.";
}
@@ -1328,7 +1326,7 @@ void EndToEndTest::TestSendsSetSsrcs(size_t num_ssrcs,
bool expect_single_ssrc_;
VideoSendStream* send_stream_;
- std::vector<VideoStream> all_streams_;
+ VideoEncoderConfig encoder_config_all_streams_;
} test(kSendSsrcs, num_ssrcs, send_single_ssrc_first);
RunBaseTest(&test);
@@ -1472,7 +1470,7 @@ TEST_F(EndToEndTest, GetStats) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->pre_encode_callback = this; // Used to inject delay.
send_config->rtp.c_name = "SomeCName";
@@ -1648,12 +1646,12 @@ TEST_F(EndToEndTest, DISABLED_RedundantPayloadsTransmittedOnAllSsrcs) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
// Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
- for (size_t i = 0; i < video_streams->size(); ++i) {
- (*video_streams)[i].min_bitrate_bps = 10000;
- (*video_streams)[i].target_bitrate_bps = 15000;
- (*video_streams)[i].max_bitrate_bps = 20000;
+ for (size_t i = 0; i < encoder_config->streams.size(); ++i) {
+ encoder_config->streams[i].min_bitrate_bps = 10000;
+ encoder_config->streams[i].target_bitrate_bps = 15000;
+ encoder_config->streams[i].max_bitrate_bps = 20000;
}
// Significantly higher than max bitrates for all video streams -> forcing
// padding to trigger redundant padding on all RTX SSRCs.
@@ -1787,10 +1785,10 @@ void EndToEndTest::TestRtpStatePreservation(bool use_rtx) {
}
// Lower bitrates so that all streams send initially.
- for (size_t i = 0; i < video_streams_.size(); ++i) {
- video_streams_[i].min_bitrate_bps = 10000;
- video_streams_[i].target_bitrate_bps = 15000;
- video_streams_[i].max_bitrate_bps = 20000;
+ for (size_t i = 0; i < encoder_config_.streams.size(); ++i) {
+ encoder_config_.streams[i].min_bitrate_bps = 10000;
+ encoder_config_.streams[i].target_bitrate_bps = 15000;
+ encoder_config_.streams[i].max_bitrate_bps = 20000;
}
CreateMatchingReceiveConfigs();
@@ -1809,10 +1807,10 @@ void EndToEndTest::TestRtpStatePreservation(bool use_rtx) {
sender_call_->DestroyVideoSendStream(send_stream_);
// Re-create VideoSendStream with only one stream.
- std::vector<VideoStream> one_stream = video_streams_;
- one_stream.resize(1);
+ VideoEncoderConfig one_stream = encoder_config_;
+ one_stream.streams.resize(1);
send_stream_ =
- sender_call_->CreateVideoSendStream(send_config_, one_stream, NULL);
+ sender_call_->CreateVideoSendStream(send_config_, one_stream);
send_stream_->Start();
CreateFrameGeneratorCapturer();
frame_generator_capturer_->Start();
@@ -1822,19 +1820,19 @@ void EndToEndTest::TestRtpStatePreservation(bool use_rtx) {
<< "Timed out waiting for single RTP packet.";
// Reconfigure back to use all streams.
- send_stream_->ReconfigureVideoEncoder(video_streams_, NULL);
+ send_stream_->ReconfigureVideoEncoder(encoder_config_);
observer.ResetExpectedSsrcs(kNumSsrcs);
EXPECT_EQ(kEventSignaled, observer.Wait())
<< "Timed out waiting for all SSRCs to send packets.";
// Reconfigure down to one stream.
- send_stream_->ReconfigureVideoEncoder(one_stream, NULL);
+ send_stream_->ReconfigureVideoEncoder(one_stream);
observer.ResetExpectedSsrcs(1);
EXPECT_EQ(kEventSignaled, observer.Wait())
<< "Timed out waiting for single RTP packet.";
// Reconfigure back to use all streams.
- send_stream_->ReconfigureVideoEncoder(video_streams_, NULL);
+ send_stream_->ReconfigureVideoEncoder(encoder_config_);
observer.ResetExpectedSsrcs(kNumSsrcs);
EXPECT_EQ(kEventSignaled, observer.Wait())
<< "Timed out waiting for all SSRCs to send packets.";
@@ -1937,7 +1935,7 @@ TEST_F(EndToEndTest, RespectsNetworkState) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->encoder_settings.encoder = this;
}
diff --git a/video/full_stack.cc b/video/full_stack.cc
index cd1190cc..f2d71a97 100644
--- a/video/full_stack.cc
+++ b/video/full_stack.cc
@@ -14,6 +14,7 @@
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/call.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
@@ -22,7 +23,6 @@
#include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/system_wrappers/interface/sleep.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/test/call_test.h"
#include "webrtc/test/direct_transport.h"
#include "webrtc/test/encoder_settings.h"
@@ -393,7 +393,7 @@ void FullStackTest::RunTest(const FullStackTestParams& params) {
send_config_.encoder_settings.payload_name = "VP8";
send_config_.encoder_settings.payload_type = 124;
- VideoStream* stream = &video_streams_[0];
+ VideoStream* stream = &encoder_config_.streams[0];
stream->width = params.clip.width;
stream->height = params.clip.height;
stream->min_bitrate_bps = params.min_bitrate_bps;
diff --git a/video/loopback.cc b/video/loopback.cc
index 29a3c782..488adf41 100644
--- a/video/loopback.cc
+++ b/video/loopback.cc
@@ -129,8 +129,9 @@ void Loopback() {
send_config.encoder_settings.encoder = encoder.get();
send_config.encoder_settings.payload_name = flags::Codec();
send_config.encoder_settings.payload_type = 124;
- std::vector<VideoStream> video_streams = test::CreateVideoStreams(1);
- VideoStream* stream = &video_streams[0];
+ VideoEncoderConfig encoder_config;
+ encoder_config.streams = test::CreateVideoStreams(1);
+ VideoStream* stream = &encoder_config.streams[0];
stream->width = flags::Width();
stream->height = flags::Height();
stream->min_bitrate_bps = static_cast<int>(flags::MinBitrate()) * 1000;
@@ -140,7 +141,7 @@ void Loopback() {
stream->max_qp = 56;
VideoSendStream* send_stream =
- call->CreateVideoSendStream(send_config, video_streams, NULL);
+ call->CreateVideoSendStream(send_config, encoder_config);
Clock* test_clock = Clock::GetRealTimeClock();
diff --git a/video/rampup_tests.cc b/video/rampup_tests.cc
index e1dd95a6..10bcb7f1 100644
--- a/video/rampup_tests.cc
+++ b/video/rampup_tests.cc
@@ -411,8 +411,8 @@ void RampUpTest::RunRampUpTest(bool rtx,
receiver_transport.SetReceiver(sender_call_->Receiver());
if (num_streams == 1) {
- video_streams_[0].target_bitrate_bps = 2000000;
- video_streams_[0].max_bitrate_bps = 2000000;
+ encoder_config_.streams[0].target_bitrate_bps = 2000000;
+ encoder_config_.streams[0].max_bitrate_bps = 2000000;
}
send_config_.rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
@@ -430,9 +430,9 @@ void RampUpTest::RunRampUpTest(bool rtx,
// For multi stream rampup until all streams are being sent. That means
// enough birate to send all the target streams plus the min bitrate of
// the last one.
- int expected_bitrate_bps = video_streams_.back().min_bitrate_bps;
- for (size_t i = 0; i < video_streams_.size() - 1; ++i) {
- expected_bitrate_bps += video_streams_[i].target_bitrate_bps;
+ int expected_bitrate_bps = encoder_config_.streams.back().min_bitrate_bps;
+ for (size_t i = 0; i < encoder_config_.streams.size() - 1; ++i) {
+ expected_bitrate_bps += encoder_config_.streams[i].target_bitrate_bps;
}
stream_observer.set_expected_bitrate_bps(expected_bitrate_bps);
}
diff --git a/video/receive_statistics_proxy.h b/video/receive_statistics_proxy.h
index 35e5cc3e..b5fbf860 100644
--- a/video/receive_statistics_proxy.h
+++ b/video/receive_statistics_proxy.h
@@ -13,10 +13,10 @@
#include <string>
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
#include "webrtc/frame_callback.h"
#include "webrtc/modules/remote_bitrate_estimator/rate_statistics.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/video_engine/include/vie_codec.h"
#include "webrtc/video_engine/include/vie_rtp_rtcp.h"
#include "webrtc/video_receive_stream.h"
diff --git a/video/send_statistics_proxy.h b/video/send_statistics_proxy.h
index 1b888b03..ef459dab 100644
--- a/video/send_statistics_proxy.h
+++ b/video/send_statistics_proxy.h
@@ -13,12 +13,12 @@
#include <string>
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
#include "webrtc/video_engine/include/vie_codec.h"
#include "webrtc/video_engine/include/vie_capture.h"
#include "webrtc/video_send_stream.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
namespace webrtc {
diff --git a/video/video_send_stream.cc b/video/video_send_stream.cc
index 7d64755d..6e8f2381 100644
--- a/video/video_send_stream.cc
+++ b/video/video_send_stream.cc
@@ -114,8 +114,7 @@ VideoSendStream::VideoSendStream(
CpuOveruseObserver* overuse_observer,
webrtc::VideoEngine* video_engine,
const VideoSendStream::Config& config,
- const std::vector<VideoStream> video_streams,
- const void* encoder_settings,
+ const VideoEncoderConfig& encoder_config,
const std::map<uint32_t, RtpState>& suspended_ssrcs,
int base_channel,
int start_bitrate_bps)
@@ -211,7 +210,7 @@ VideoSendStream::VideoSendStream(
}
codec_ = ViECodec::GetInterface(video_engine);
- if (!ReconfigureVideoEncoder(video_streams, encoder_settings))
+ if (!ReconfigureVideoEncoder(encoder_config))
abort();
if (overuse_observer)
@@ -297,8 +296,8 @@ void VideoSendStream::Stop() {
}
bool VideoSendStream::ReconfigureVideoEncoder(
- const std::vector<VideoStream>& streams,
- const void* encoder_settings) {
+ const VideoEncoderConfig& config) {
+ const std::vector<VideoStream>& streams = config.streams;
assert(!streams.empty());
assert(config_.rtp.ssrcs.size() >= streams.size());
@@ -311,6 +310,14 @@ bool VideoSendStream::ReconfigureVideoEncoder(
} else {
video_codec.codecType = kVideoCodecGeneric;
}
+ switch (config.content_type) {
+ case VideoEncoderConfig::kRealtimeVideo:
+ video_codec.mode = kRealtimeVideo;
+ break;
+ case VideoEncoderConfig::kScreenshare:
+ video_codec.mode = kScreensharing;
+ break;
+ }
if (video_codec.codecType == kVideoCodecVP8) {
video_codec.codecSpecific.VP8 = VideoEncoder::GetDefaultVp8Settings();
@@ -319,13 +326,15 @@ bool VideoSendStream::ReconfigureVideoEncoder(
}
if (video_codec.codecType == kVideoCodecVP8) {
- if (encoder_settings != NULL) {
- video_codec.codecSpecific.VP8 =
- *reinterpret_cast<const VideoCodecVP8*>(encoder_settings);
+ if (config.encoder_specific_settings != NULL) {
+ video_codec.codecSpecific.VP8 = *reinterpret_cast<const VideoCodecVP8*>(
+ config.encoder_specific_settings);
}
+ video_codec.codecSpecific.VP8.numberOfTemporalLayers =
+ static_cast<unsigned char>(streams.back().temporal_layers.size());
} else {
// TODO(pbos): Support encoder_settings codec-agnostically.
- assert(encoder_settings == NULL);
+ assert(config.encoder_specific_settings == NULL);
}
strncpy(video_codec.plName,
@@ -355,8 +364,8 @@ bool VideoSendStream::ReconfigureVideoEncoder(
sim_stream->targetBitrate = streams[i].target_bitrate_bps / 1000;
sim_stream->maxBitrate = streams[i].max_bitrate_bps / 1000;
sim_stream->qpMax = streams[i].max_qp;
- // TODO(pbos): Implement mapping for temporal layers.
- assert(streams[i].temporal_layers.empty());
+ sim_stream->numberOfTemporalLayers =
+ static_cast<unsigned char>(streams[i].temporal_layers.size());
video_codec.width = std::max(video_codec.width,
static_cast<unsigned short>(streams[i].width));
diff --git a/video/video_send_stream.h b/video/video_send_stream.h
index 130c1c5c..8a77852e 100644
--- a/video/video_send_stream.h
+++ b/video/video_send_stream.h
@@ -46,8 +46,7 @@ class VideoSendStream : public webrtc::VideoSendStream,
CpuOveruseObserver* overuse_observer,
webrtc::VideoEngine* video_engine,
const VideoSendStream::Config& config,
- const std::vector<VideoStream> video_streams,
- const void* encoder_settings,
+ const VideoEncoderConfig& encoder_config,
const std::map<uint32_t, RtpState>& suspended_ssrcs,
int base_channel,
int start_bitrate);
@@ -57,8 +56,8 @@ class VideoSendStream : public webrtc::VideoSendStream,
virtual void Start() OVERRIDE;
virtual void Stop() OVERRIDE;
- virtual bool ReconfigureVideoEncoder(const std::vector<VideoStream>& streams,
- const void* encoder_settings) OVERRIDE;
+ virtual bool ReconfigureVideoEncoder(
+ const VideoEncoderConfig& config) OVERRIDE;
virtual Stats GetStats() const OVERRIDE;
diff --git a/video/video_send_stream_tests.cc b/video/video_send_stream_tests.cc
index 7a641bd4..6c2fa39f 100644
--- a/video/video_send_stream_tests.cc
+++ b/video/video_send_stream_tests.cc
@@ -114,7 +114,7 @@ TEST_F(VideoSendStreamTest, SupportsCName) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->rtp.c_name = kCName;
}
@@ -152,7 +152,7 @@ TEST_F(VideoSendStreamTest, SupportsAbsoluteSendTime) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->rtp.extensions.push_back(
RtpExtension(RtpExtension::kAbsSendTime, kAbsSendTimeExtensionId));
}
@@ -193,7 +193,7 @@ TEST_F(VideoSendStreamTest, SupportsTransmissionTimeOffset) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->encoder_settings.encoder = &encoder_;
send_config->rtp.extensions.push_back(
RtpExtension(RtpExtension::kTOffset, kTOffsetExtensionId));
@@ -360,7 +360,7 @@ TEST_F(VideoSendStreamTest, SupportsFec) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->rtp.fec.red_payload_type = kRedPayloadType;
send_config->rtp.fec.ulpfec_payload_type = kUlpfecPayloadType;
}
@@ -440,7 +440,7 @@ void VideoSendStreamTest::TestNackRetransmission(
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
send_config->rtp.rtx.payload_type = retransmit_payload_type_;
if (retransmit_ssrc_ != kSendSsrcs[0])
@@ -613,7 +613,7 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
if (use_fec_) {
send_config->rtp.fec.red_payload_type = kRedPayloadType;
send_config->rtp.fec.ulpfec_payload_type = kUlpfecPayloadType;
@@ -788,14 +788,14 @@ TEST_F(VideoSendStreamTest, SuspendBelowMinBitrate) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
send_config->pre_encode_callback = this;
send_config->suspend_below_min_bitrate = true;
- int min_bitrate_bps = (*video_streams)[0].min_bitrate_bps;
+ int min_bitrate_bps = encoder_config->streams[0].min_bitrate_bps;
set_low_remb_bps(min_bitrate_bps - 10000);
int threshold_window = std::max(min_bitrate_bps / 10, 10000);
- ASSERT_GT((*video_streams)[0].max_bitrate_bps,
+ ASSERT_GT(encoder_config->streams[0].max_bitrate_bps,
min_bitrate_bps + threshold_window + 5000);
set_high_remb_bps(min_bitrate_bps + threshold_window + 5000);
}
@@ -977,7 +977,7 @@ TEST_F(VideoSendStreamTest, ProducesStats) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
SetConfig(*send_config);
}
@@ -1074,7 +1074,7 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->rtp.min_transmit_bitrate_bps = kMinTransmitBitrateBps;
}
@@ -1133,8 +1133,8 @@ TEST_F(VideoSendStreamTest, CapturesTextureAndI420VideoFrames) {
// Prepare five input frames. Send I420VideoFrame and TextureVideoFrame
// alternatively.
ScopedVector<I420VideoFrame> input_frames;
- int width = static_cast<int>(video_streams_[0].width);
- int height = static_cast<int>(video_streams_[0].height);
+ int width = static_cast<int>(encoder_config_.streams[0].width);
+ int height = static_cast<int>(encoder_config_.streams[0].height);
webrtc::RefCountImpl<FakeNativeHandle>* handle1 =
new webrtc::RefCountImpl<FakeNativeHandle>();
webrtc::RefCountImpl<FakeNativeHandle>* handle2 =
@@ -1154,7 +1154,7 @@ TEST_F(VideoSendStreamTest, CapturesTextureAndI420VideoFrames) {
send_stream_->Input()->SwapFrame(frame.get());
// Do not send the next frame too fast, so the frame dropper won't drop it.
if (i < input_frames.size() - 1)
- SleepMs(1000 / video_streams_[0].max_framerate);
+ SleepMs(1000 / encoder_config_.streams[0].max_framerate);
// Wait until the output frame is received before sending the next input
// frame. Or the previous input frame may be replaced without delivering.
observer.WaitOutputFrame();
@@ -1327,16 +1327,16 @@ TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->encoder_settings.encoder = this;
- video_streams_ = *video_streams;
+ encoder_config_ = *encoder_config;
}
virtual void PerformTest() OVERRIDE {
EXPECT_EQ(kEventSignaled, Wait())
<< "Timed out while waiting for Encode.";
EXPECT_EQ(0u, num_releases());
- stream_->ReconfigureVideoEncoder(video_streams_, NULL);
+ stream_->ReconfigureVideoEncoder(encoder_config_);
EXPECT_EQ(0u, num_releases());
stream_->Stop();
// Encoder should not be released before destroying the VideoSendStream.
@@ -1354,7 +1354,7 @@ TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) {
bool callback_registered_ GUARDED_BY(crit_);
size_t num_releases_ GUARDED_BY(crit_);
bool released_ GUARDED_BY(crit_);
- std::vector<VideoStream> video_streams_;
+ VideoEncoderConfig encoder_config_;
} test_encoder;
RunBaseTest(&test_encoder);
@@ -1363,7 +1363,64 @@ TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) {
EXPECT_EQ(1u, test_encoder.num_releases());
}
+TEST_F(VideoSendStreamTest, EncoderSetupPropagatesCommonEncoderConfigValues) {
+ class VideoCodecConfigObserver : public test::SendTest,
+ public test::FakeEncoder {
+ public:
+ VideoCodecConfigObserver()
+ : SendTest(kDefaultTimeoutMs),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ num_initializations_(0) {}
+
+ private:
+ virtual void ModifyConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) OVERRIDE {
+ send_config->encoder_settings.encoder = this;
+ encoder_config_ = *encoder_config;
+ }
+
+ virtual void OnStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) OVERRIDE {
+ stream_ = send_stream;
+ }
+
+ virtual int32_t InitEncode(const VideoCodec* config,
+ int32_t number_of_cores,
+ uint32_t max_payload_size) OVERRIDE {
+ if (num_initializations_ == 0) {
+ // Verify default values.
+ EXPECT_EQ(kRealtimeVideo, config->mode);
+ } else {
+ // Verify that changed values are propagated.
+ EXPECT_EQ(kScreensharing, config->mode);
+ }
+ ++num_initializations_;
+ return FakeEncoder::InitEncode(config, number_of_cores, max_payload_size);
+ }
+
+ virtual void PerformTest() OVERRIDE {
+ EXPECT_EQ(1u, num_initializations_) << "VideoEncoder not initialized.";
+
+ encoder_config_.content_type = VideoEncoderConfig::kScreenshare;
+ stream_->ReconfigureVideoEncoder(encoder_config_);
+ EXPECT_EQ(2u, num_initializations_)
+ << "ReconfigureVideoEncoder did not reinitialize the encoder with "
+ "new encoder settings.";
+ }
+
+ size_t num_initializations_;
+ VideoSendStream* stream_;
+ VideoEncoderConfig encoder_config_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp8Config) {
+ static const size_t kNumberOfTemporalLayers = 4;
class VideoCodecConfigObserver : public test::SendTest,
public test::FakeEncoder {
public:
@@ -1378,11 +1435,17 @@ TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp8Config) {
virtual void ModifyConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
- std::vector<VideoStream>* video_streams) OVERRIDE {
+ VideoEncoderConfig* encoder_config) OVERRIDE {
send_config->encoder_settings.encoder = this;
send_config->encoder_settings.payload_name = "VP8";
- video_streams_ = *video_streams;
+ for (size_t i = 0; i < encoder_config->streams.size(); ++i) {
+ encoder_config->streams[i].temporal_layers.resize(
+ kNumberOfTemporalLayers);
+ }
+
+ encoder_config->encoder_specific_settings = &vp8_settings_;
+ encoder_config_ = *encoder_config;
}
virtual void OnStreamsCreated(
@@ -1395,6 +1458,20 @@ TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp8Config) {
int32_t number_of_cores,
uint32_t max_payload_size) OVERRIDE {
EXPECT_EQ(kVideoCodecVP8, config->codecType);
+
+ // Check that the number of temporal layers has propagated properly to
+ // VideoCodec.
+ EXPECT_EQ(kNumberOfTemporalLayers,
+ config->codecSpecific.VP8.numberOfTemporalLayers);
+
+ for (unsigned char i = 0; i < config->numberOfSimulcastStreams; ++i) {
+ EXPECT_EQ(kNumberOfTemporalLayers,
+ config->simulcastStream[i].numberOfTemporalLayers);
+ }
+
+ // Set expected temporal layers as they should have been set when
+ // reconfiguring the encoder and not match the set config.
+ vp8_settings_.numberOfTemporalLayers = kNumberOfTemporalLayers;
EXPECT_EQ(0,
memcmp(&config->codecSpecific.VP8,
&vp8_settings_,
@@ -1407,26 +1484,23 @@ TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp8Config) {
EXPECT_EQ(1u, num_initializations_) << "VideoEncoder not initialized.";
vp8_settings_.denoisingOn = true;
- stream_->ReconfigureVideoEncoder(video_streams_, &vp8_settings_);
+ stream_->ReconfigureVideoEncoder(encoder_config_);
EXPECT_EQ(2u, num_initializations_)
<< "ReconfigureVideoEncoder did not reinitialize the encoder with "
"new encoder settings.";
}
- int32_t Encode(
- const I420VideoFrame& input_image,
- const CodecSpecificInfo* codec_specific_info,
- const std::vector<VideoFrameType>* frame_types) {
+ int32_t Encode(const I420VideoFrame& input_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const std::vector<VideoFrameType>* frame_types) OVERRIDE {
// Silently skip the encode, FakeEncoder::Encode doesn't produce VP8.
return 0;
}
- virtual const void* GetEncoderSettings() OVERRIDE { return &vp8_settings_; }
-
VideoCodecVP8 vp8_settings_;
size_t num_initializations_;
VideoSendStream* stream_;
- std::vector<VideoStream> video_streams_;
+ VideoEncoderConfig encoder_config_;
} test;
RunBaseTest(&test);
diff --git a/video_engine/vie_capturer.h b/video_engine/vie_capturer.h
index 2464ca04..fdee5d88 100644
--- a/video_engine/vie_capturer.h
+++ b/video_engine/vie_capturer.h
@@ -13,6 +13,7 @@
#include <vector>
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/video_capture/include/video_capture.h"
@@ -20,7 +21,6 @@
#include "webrtc/modules/video_coding/main/interface/video_coding.h"
#include "webrtc/modules/video_processing/main/interface/video_processing.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/typedefs.h"
#include "webrtc/video_engine/include/vie_base.h"
#include "webrtc/video_engine/include/vie_capture.h"
diff --git a/video_engine/vie_channel_group.cc b/video_engine/vie_channel_group.cc
index 6b2d3570..c6b3f741 100644
--- a/video_engine/vie_channel_group.cc
+++ b/video_engine/vie_channel_group.cc
@@ -10,6 +10,7 @@
#include "webrtc/video_engine/vie_channel_group.h"
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/common.h"
#include "webrtc/experiments.h"
#include "webrtc/modules/bitrate_controller/include/bitrate_controller.h"
@@ -18,7 +19,6 @@
#include "webrtc/modules/utility/interface/process_thread.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/video_engine/call_stats.h"
#include "webrtc/video_engine/encoder_state_feedback.h"
#include "webrtc/video_engine/vie_channel.h"
diff --git a/video_engine/vie_encoder.h b/video_engine/vie_encoder.h
index e2e691e0..51c1d016 100644
--- a/video_engine/vie_encoder.h
+++ b/video_engine/vie_encoder.h
@@ -14,13 +14,13 @@
#include <list>
#include <map>
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/bitrate_controller/include/bitrate_controller.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
#include "webrtc/modules/video_processing/main/interface/video_processing.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/typedefs.h"
#include "webrtc/frame_callback.h"
#include "webrtc/video_engine/vie_defines.h"
diff --git a/video_engine/vie_manager_base.h b/video_engine/vie_manager_base.h
index f9f008f3..c5e92106 100644
--- a/video_engine/vie_manager_base.h
+++ b/video_engine/vie_manager_base.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_VIDEO_ENGINE_VIE_MANAGER_BASE_H_
#define WEBRTC_VIDEO_ENGINE_VIE_MANAGER_BASE_H_
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
+#include "webrtc/base/thread_annotations.h"
namespace webrtc {
diff --git a/video_send_stream.h b/video_send_stream.h
index 8c9d5b7f..dd2bec12 100644
--- a/video_send_stream.h
+++ b/video_send_stream.h
@@ -153,8 +153,7 @@ class VideoSendStream {
// Set which streams to send. Must have at least as many SSRCs as configured
// in the config. Encoder settings are passed on to the encoder instance along
// with the VideoStream settings.
- virtual bool ReconfigureVideoEncoder(const std::vector<VideoStream>& streams,
- const void* encoder_settings) = 0;
+ virtual bool ReconfigureVideoEncoder(const VideoEncoderConfig& config) = 0;
virtual Stats GetStats() const = 0;
diff --git a/voice_engine/channel.cc b/voice_engine/channel.cc
index 6d6b099c..93a241be 100644
--- a/voice_engine/channel.cc
+++ b/voice_engine/channel.cc
@@ -1392,61 +1392,6 @@ Channel::StopReceiving()
}
int32_t
-Channel::SetNetEQPlayoutMode(NetEqModes mode)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
- "Channel::SetNetEQPlayoutMode()");
- AudioPlayoutMode playoutMode(voice);
- switch (mode)
- {
- case kNetEqDefault:
- playoutMode = voice;
- break;
- case kNetEqStreaming:
- playoutMode = streaming;
- break;
- case kNetEqFax:
- playoutMode = fax;
- break;
- case kNetEqOff:
- playoutMode = off;
- break;
- }
- if (audio_coding_->SetPlayoutMode(playoutMode) != 0)
- {
- _engineStatisticsPtr->SetLastError(
- VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
- "SetNetEQPlayoutMode() failed to set playout mode");
- return -1;
- }
- return 0;
-}
-
-int32_t
-Channel::GetNetEQPlayoutMode(NetEqModes& mode)
-{
- const AudioPlayoutMode playoutMode = audio_coding_->PlayoutMode();
- switch (playoutMode)
- {
- case voice:
- mode = kNetEqDefault;
- break;
- case streaming:
- mode = kNetEqStreaming;
- break;
- case fax:
- mode = kNetEqFax;
- break;
- case off:
- mode = kNetEqOff;
- }
- WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
- VoEId(_instanceId,_channelId),
- "Channel::GetNetEQPlayoutMode() => mode=%u", mode);
- return 0;
-}
-
-int32_t
Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
diff --git a/voice_engine/channel.h b/voice_engine/channel.h
index d030daf3..535922fa 100644
--- a/voice_engine/channel.h
+++ b/voice_engine/channel.h
@@ -194,8 +194,6 @@ public:
int32_t StartReceiving();
int32_t StopReceiving();
- int32_t SetNetEQPlayoutMode(NetEqModes mode);
- int32_t GetNetEQPlayoutMode(NetEqModes& mode);
int32_t RegisterVoiceEngineObserver(VoiceEngineObserver& observer);
int32_t DeRegisterVoiceEngineObserver();
diff --git a/voice_engine/include/voe_base.h b/voice_engine/include/voe_base.h
index 3a78f7b6..65a71758 100644
--- a/voice_engine/include/voe_base.h
+++ b/voice_engine/include/voe_base.h
@@ -181,9 +181,6 @@ public:
OnHoldModes mode = kHoldSendAndPlay) { return -1; }
virtual int GetOnHoldStatus(int channel, bool& enabled,
OnHoldModes& mode) { return -1; }
- virtual int SetNetEQPlayoutMode(int channel, NetEqModes mode) { return -1; }
- virtual int GetNetEQPlayoutMode(int channel,
- NetEqModes& mode) { return -1; }
protected:
VoEBase() {}